diff --git a/build.zig b/build.zig index 51e7416fd..b5eaca769 100644 --- a/build.zig +++ b/build.zig @@ -96,7 +96,6 @@ pub fn build(b: *std.build.Builder) !void { const exe_options_module = exe_options.createModule(); const known_folders_module = b.dependency("known_folders", .{}).module("known-folders"); const diffz_module = b.dependency("diffz", .{}).module("diffz"); - const binned_allocator_module = b.dependency("binned_allocator", .{}).module("binned_allocator"); const exe = b.addExecutable(.{ .name = "zls", @@ -111,7 +110,6 @@ pub fn build(b: *std.build.Builder) !void { exe.addModule("build_options", exe_options_module); exe.addModule("known-folders", known_folders_module); exe.addModule("diffz", diffz_module); - exe.addModule("binned_allocator", binned_allocator_module); if (enable_tracy) { const client_cpp = "src/tracy/public/TracyClient.cpp"; diff --git a/build.zig.zon b/build.zig.zon index 614293884..1bdd9ae96 100644 --- a/build.zig.zon +++ b/build.zig.zon @@ -5,17 +5,12 @@ .dependencies = .{ .known_folders = .{ - .url = "https://github.com/ziglibs/known-folders/archive/a564f582122326328dad6b59209d070d57c4e6ae.tar.gz", - .hash = "1220bb12c9bfe291eed1afe6a2070c7c39918ab1979f24a281bba39dfb23f5bcd544", + .url = "https://github.com/ziglibs/known-folders/archive/855473062efac722624737f1adc56f9c0dd92017.tar.gz", + .hash = "1220f9d9dd88b1352b1a2bde4a96f547a1591d66ad0a9d76854d0c2c7fa59eef2508", }, .diffz = .{ - .url = "https://github.com/ziglibs/diffz/archive/90353d401c59e2ca5ed0abe5444c29ad3d7489aa.tar.gz", - .hash = "122089a8247a693cad53beb161bde6c30f71376cd4298798d45b32740c3581405864", - }, - .binned_allocator = .{ - // upstream: https://gist.github.com/antlilja/8372900fcc09e38d7b0b6bbaddad3904 - .url = "https://gist.github.com/antlilja/8372900fcc09e38d7b0b6bbaddad3904/archive/6c3321e0969ff2463f8335da5601986cf2108690.tar.gz", - .hash = "1220363c7e27b2d3f39de6ff6e90f9537a0634199860fea237a55ddb1e1717f5d6a5", + .url = "https://github.com/ziglibs/diffz/archive/86f5435f63961dcdba8b76e47b44e2381671fb09.tar.gz", + .hash = "122014b1776beda990cdc7bdbecc6960bdce9eb762d6dc7cc6664517f171becc17dd", }, }, .paths = .{""}, diff --git a/deps.nix b/deps.nix index 832ae455c..55970cac7 100644 --- a/deps.nix +++ b/deps.nix @@ -3,13 +3,6 @@ { linkFarm, fetchzip }: linkFarm "zig-packages" [ - { - name = "1220363c7e27b2d3f39de6ff6e90f9537a0634199860fea237a55ddb1e1717f5d6a5"; - path = fetchzip { - url = "https://gist.github.com/antlilja/8372900fcc09e38d7b0b6bbaddad3904/archive/6c3321e0969ff2463f8335da5601986cf2108690.tar.gz"; - hash = "sha256-m/kr4kmkG2rLkAj5YwvM0HmXTd+chAiQHzYK6ozpWlw="; - }; - } { name = "122089a8247a693cad53beb161bde6c30f71376cd4298798d45b32740c3581405864"; path = fetchzip { diff --git a/src/ComptimeInterpreter.zig b/src/ComptimeInterpreter.zig index ffa0e73e8..e43806ce9 100644 --- a/src/ComptimeInterpreter.zig +++ b/src/ComptimeInterpreter.zig @@ -215,9 +215,9 @@ pub fn interpret( continue; }; - var init_value = try (try interpreter.interpret(container_field.ast.type_expr, container_namespace, .{})).getValue(); + const init_value = try (try interpreter.interpret(container_field.ast.type_expr, container_namespace, .{})).getValue(); - var default_value = if (container_field.ast.value_expr == 0) + const default_value = if (container_field.ast.value_expr == 0) Index.none else (try (try interpreter.interpret(container_field.ast.value_expr, container_namespace, .{})).getValue()).index; // TODO check ty @@ -443,7 +443,7 @@ pub fn interpret( const field_name = tree.tokenSlice(data[node_idx].rhs); var ir = try interpreter.interpret(data[node_idx].lhs, namespace, options); - var ir_value = try ir.getValue(); + const ir_value = try ir.getValue(); const val_index = ir_value.index; const val = interpreter.ip.indexToKey(val_index); @@ -883,7 +883,7 @@ pub fn interpret( } }; } - var import_uri = (try interpreter.document_store.uriFromImportStr(interpreter.allocator, interpreter.getHandle().*, import_str[1 .. import_str.len - 1])) orelse return error.ImportFailure; + const import_uri = (try interpreter.document_store.uriFromImportStr(interpreter.allocator, interpreter.getHandle().*, import_str[1 .. import_str.len - 1])) orelse return error.ImportFailure; defer interpreter.allocator.free(import_uri); const import_handle = interpreter.document_store.getOrLoadHandle(import_uri) orelse return error.ImportFailure; @@ -1248,7 +1248,7 @@ pub fn call( var arg_index: usize = 0; while (ast.nextFnParam(&arg_it)) |param| { if (arg_index >= arguments.len) return error.MissingArguments; - var tex = try (try interpreter.interpret(param.type_expr, fn_namespace, options)).getValue(); + const tex = try (try interpreter.interpret(param.type_expr, fn_namespace, options)).getValue(); const tex_ty = interpreter.ip.indexToKey(tex.index).typeOf(); if (tex_ty != .type_type) { try interpreter.recordError( diff --git a/src/DocumentScope.zig b/src/DocumentScope.zig index 109d6d564..88361656f 100644 --- a/src/DocumentScope.zig +++ b/src/DocumentScope.zig @@ -678,7 +678,7 @@ noinline fn walkContainerDecl( const doc = try Analyser.getDocComments(allocator, tree, decl); errdefer if (doc) |d| allocator.free(d); // TODO: Fix allocation; just store indices - var gop_res = try context.doc_scope.enum_completions.getOrPut(allocator, .{ + const gop_res = try context.doc_scope.enum_completions.getOrPut(allocator, .{ .label = name, .kind = .EnumMember, .insertText = name, diff --git a/src/DocumentStore.zig b/src/DocumentStore.zig index c45efb026..5e46f591a 100644 --- a/src/DocumentStore.zig +++ b/src/DocumentStore.zig @@ -409,7 +409,7 @@ pub const Handle = struct { var old_analysis_errors = self.analysis_errors; var old_document_scope = if (old_status.has_document_scope) self.impl.document_scope else null; var old_zir = if (old_status.has_zir) self.impl.zir else null; - var old_comptime_interpreter = if (old_status.has_comptime_interpreter) self.impl.comptime_interpreter else null; + const old_comptime_interpreter = if (old_status.has_comptime_interpreter) self.impl.comptime_interpreter else null; self.tree = new_tree; self.import_uris = .{}; @@ -877,7 +877,7 @@ pub fn loadBuildConfiguration(self: *DocumentStore, build_file_uri: Uri) !std.js self.allocator.free(args); } - var zig_run_result = blk: { + const zig_run_result = blk: { const tracy_zone2 = tracy.trace(@src()); defer tracy_zone2.end(); break :blk try std.process.Child.run(.{ @@ -1194,7 +1194,7 @@ fn collectCIncludes(allocator: std.mem.Allocator, tree: Ast) error{OutOfMemory}! const tracy_zone = tracy.trace(@src()); defer tracy_zone.end(); - var cimport_nodes = try analysis.collectCImportNodes(allocator, tree); + const cimport_nodes = try analysis.collectCImportNodes(allocator, tree); defer allocator.free(cimport_nodes); var sources = std.MultiArrayList(CImportHandle){}; diff --git a/src/Server.zig b/src/Server.zig index ffea7fe19..d3519c9fb 100644 --- a/src/Server.zig +++ b/src/Server.zig @@ -1606,7 +1606,7 @@ fn codeActionHandler(server: *Server, arena: std.mem.Allocator, request: types.C } const Result = getRequestMetadata("textDocument/codeAction").?.Result; - var result = try arena.alloc(std.meta.Child(std.meta.Child(Result)), actions.items.len); + const result = try arena.alloc(std.meta.Child(std.meta.Child(Result)), actions.items.len); for (actions.items, result) |action, *out| { out.* = .{ .CodeAction = action }; } diff --git a/src/ZigCompileServer.zig b/src/ZigCompileServer.zig index c9c86c3c9..94eeb2d93 100644 --- a/src/ZigCompileServer.zig +++ b/src/ZigCompileServer.zig @@ -15,7 +15,7 @@ pub const Options = struct { }; pub fn init(options: Options) Client { - var s: Client = .{ + const s: Client = .{ .in = options.in, .out = options.out, .pooler = std.io.poll(options.gpa, StreamEnum, .{ .in = options.in }), diff --git a/src/analyser/InternPool.zig b/src/analyser/InternPool.zig index b9abff6b2..0fc7cbe13 100644 --- a/src/analyser/InternPool.zig +++ b/src/analyser/InternPool.zig @@ -1836,14 +1836,14 @@ pub fn resolvePeerTypes(ip: *InternPool, gpa: Allocator, types: []const Index, t var arena_allocator = std.heap.ArenaAllocator.init(gpa); defer arena_allocator.deinit(); - var arena = arena_allocator.allocator(); + const arena = arena_allocator.allocator(); var chosen = types[0]; // If this is non-null then it does the following thing, depending on the chosen zigTypeTag(). // * ErrorSet: this is an override // * ErrorUnion: this is an override of the error set only // * other: at the end we make an ErrorUnion with the other thing and this - var err_set_ty: Index = Index.none; + const err_set_ty: Index = Index.none; var any_are_null = false; var seen_const = false; var convert_to_slice = false; diff --git a/src/analyser/encoding.zig b/src/analyser/encoding.zig index 2a65bc623..2f18dd5e1 100644 --- a/src/analyser/encoding.zig +++ b/src/analyser/encoding.zig @@ -147,7 +147,7 @@ pub fn decode(extra: *[]const u8, comptime T: type) T { .Array => |info| blk: { var array: T = undefined; var i: usize = 0; - while (i < info.len) { + while (i < info.len) : (i += 1) { array[i] = decode(extra, info.child); } break :blk array; diff --git a/src/analysis.zig b/src/analysis.zig index c8e2df0a9..efcfcec1a 100644 --- a/src/analysis.zig +++ b/src/analysis.zig @@ -196,7 +196,7 @@ pub fn getFunctionSnippet( try buf_stream.writeAll("..."); } else if (param.type_expr != 0) { var curr_token = tree.firstToken(param.type_expr); - var end_token = ast.lastToken(tree, param.type_expr); + const end_token = ast.lastToken(tree, param.type_expr); while (curr_token <= end_token) : (curr_token += 1) { const tag = token_tags[curr_token]; const is_comma = tag == .comma; @@ -1493,7 +1493,7 @@ fn resolveTypeOfNodeUncached(analyser: *Analyser, node_handle: NodeWithHandle) e // Need at least one char between the quotes, eg "a" if (field_name.len < 2) return null; - var lhs = (try analyser.resolveTypeOfNodeInternal(.{ + const lhs = (try analyser.resolveTypeOfNodeInternal(.{ .node = params[0], .handle = handle, })) orelse return null; @@ -2396,10 +2396,10 @@ pub fn getFieldAccessType( const curr_handle = if (current_type == null) handle else current_type.?.handle; if (std.mem.eql(u8, tokenizer.buffer[tok.loc.start..tok.loc.end], "@import")) { if (tokenizer.next().tag != .l_paren) return null; - var import_str_tok = tokenizer.next(); // should be the .string_literal + const import_str_tok = tokenizer.next(); // should be the .string_literal if (import_str_tok.tag != .string_literal) return null; if (import_str_tok.loc.end - import_str_tok.loc.start < 2) return null; - var import_str = offsets.locToSlice(tokenizer.buffer, .{ + const import_str = offsets.locToSlice(tokenizer.buffer, .{ .start = import_str_tok.loc.start + 1, .end = import_str_tok.loc.end - 1, }); @@ -2563,7 +2563,7 @@ pub fn getPositionContext( var line_loc = if (!lookahead) offsets.lineLocAtIndex(text, new_index) else offsets.lineLocUntilIndex(text, new_index); - var line = offsets.locToSlice(text, line_loc); + const line = offsets.locToSlice(text, line_loc); if (std.mem.startsWith(u8, std.mem.trimLeft(u8, line, " \t"), "//")) return .comment; // Check if the (trimmed) line starts with a '.', ie a continuation @@ -2742,7 +2742,7 @@ pub fn getPositionContext( if (line.len == 0) return .empty; - var held_line = try allocator.dupeZ(u8, offsets.locToSlice(text, line_loc)); + const held_line = try allocator.dupeZ(u8, offsets.locToSlice(text, line_loc)); defer allocator.free(held_line); switch (line[0]) { diff --git a/src/ast.zig b/src/ast.zig index 4c0e14273..ac781098a 100644 --- a/src/ast.zig +++ b/src/ast.zig @@ -682,7 +682,7 @@ pub fn lastToken(tree: Ast, node: Ast.Node.Index) Ast.TokenIndex { const token = lastToken(tree, datas[n].lhs) + 3; // rparen, lbrace, rbrace return end_offset + (findMatchingRBrace(tree, token) orelse token); } else { - var token = lastToken(tree, tree.extra_data[cases.end - 1]) + 1; // for the rbrace + const token = lastToken(tree, tree.extra_data[cases.end - 1]) + 1; // for the rbrace return end_offset + (findMatchingRBrace(tree, token) orelse token); } }, diff --git a/src/binned_allocator.zig b/src/binned_allocator.zig new file mode 100644 index 000000000..5a945c17c --- /dev/null +++ b/src/binned_allocator.zig @@ -0,0 +1,527 @@ +const std = @import("std"); +const builtin = @import("builtin"); + +pub const Config = struct { + /// Whether to synchronize usage of this allocator. + /// For actual thread safety, the backing allocator must also be thread safe. + thread_safe: bool = !builtin.single_threaded, + + /// Whether to warn about leaked memory on deinit. + /// This reporting is extremely limited; for proper leak checking use GeneralPurposeAllocator. + report_leaks: bool = true, +}; + +pub fn BinnedAllocator(comptime config: Config) type { + return struct { + backing_allocator: std.mem.Allocator = std.heap.page_allocator, + bins: Bins = .{}, + large_count: if (config.report_leaks) Counter else u0 = if (config.report_leaks) Counter.init() else u0, + + const Bins = struct { + Bin(16, 8) = .{}, + Bin(64, 4) = .{}, + Bin(256, 2) = .{}, + Bin(1024, 0) = .{}, + Bin(4096, 0) = .{}, + }; + comptime { + var prev: usize = 0; + for (Bins{}) |bin| { + std.debug.assert(bin.size > prev); + prev = bin.size; + } + } + + const Self = @This(); + + pub fn deinit(self: *Self) void { + const log = std.log.scoped(.binned_allocator); + + inline for (&self.bins) |*bin| { + if (config.report_leaks) { + const leaks = bin.list.count() - bin.freeCount(); + if (leaks > 0) { + log.warn("{} leaked blocks in {}-byte bin", .{ leaks, bin.size }); + } + } + bin.deinit(self.backing_allocator); + } + + if (config.report_leaks) { + const large_count = self.large_count.load(); + if (large_count > 0) { + log.warn("{} large blocks leaked. Large leaks cannot be cleaned up!", .{large_count}); + } + } + } + + pub fn allocator(self: *Self) std.mem.Allocator { + return .{ + .ptr = self, + .vtable = &.{ + .alloc = alloc, + .resize = resize, + .free = free, + }, + }; + } + + fn alloc(ctx: *anyopaque, len: usize, log2_align: u8, ret_addr: usize) ?[*]u8 { + const self: *Self = @ptrCast(@alignCast(ctx)); + + const align_ = @as(usize, 1) << @as(std.math.Log2Int(usize), @intCast(log2_align)); + const size = @max(len, align_); + inline for (&self.bins) |*bin| { + if (size <= bin.size) { + return bin.alloc(self.backing_allocator); + } + } + + if (self.backing_allocator.rawAlloc(len, log2_align, ret_addr)) |ptr| { + if (config.report_leaks) { + self.large_count.increment(); + } + return ptr; + } else { + return null; + } + } + + fn resize(ctx: *anyopaque, buf: []u8, log2_align: u8, new_len: usize, ret_addr: usize) bool { + const self: *Self = @ptrCast(@alignCast(ctx)); + + const align_ = @as(usize, 1) << @as(std.math.Log2Int(usize), @intCast(log2_align)); + comptime var prev_size: usize = 0; + inline for (&self.bins) |*bin| { + if (buf.len <= bin.size and align_ <= bin.size) { + // Check it still fits + return new_len > prev_size and new_len <= bin.size; + } + prev_size = bin.size; + } + + // Assuming it's a large alloc + if (new_len <= prev_size) return false; // New size fits into a bin + return self.backing_allocator.rawResize(buf, log2_align, new_len, ret_addr); + } + + fn free(ctx: *anyopaque, buf: []u8, log2_align: u8, ret_addr: usize) void { + const self: *Self = @ptrCast(@alignCast(ctx)); + + const align_ = @as(usize, 1) << @as(std.math.Log2Int(usize), @intCast(log2_align)); + inline for (&self.bins) |*bin| { + if (buf.len <= bin.size and align_ <= bin.size) { + bin.free(buf.ptr); + return; + } + } + + // Assuming it's a large alloc + self.backing_allocator.rawFree(buf, log2_align, ret_addr); + if (config.report_leaks) { + self.large_count.decrement(); + } + } + + const Mutex = if (config.thread_safe) + std.Thread.Mutex + else + struct { + fn lock(_: @This()) void {} + fn unlock(_: @This()) void {} + }; + + const Counter = if (config.thread_safe) + struct { + count: std.atomic.Atomic(usize), + fn init() @This() { + return .{ .count = std.atomic.Atomic(usize).init(0) }; + } + fn load(self: *const @This()) usize { + return self.count.load(.Acquire); + } + fn increment(self: *@This()) void { + _ = self.count.fetchAdd(1, .AcqRel); + } + fn decrement(self: *@This()) void { + _ = self.count.fetchSub(1, .AcqRel); + } + } + else + struct { + count: usize, + fn init() @This() { + return .{ .count = 0 }; + } + fn load(self: @This()) usize { + return self.count; + } + fn increment(self: *@This()) void { + self.count += 1; + } + fn decrement(self: *@This()) void { + self.count -= 1; + } + }; + + fn Bin(comptime slot_size: usize, comptime init_count: usize) type { + return struct { + mutex: Mutex = .{}, + list: std.SegmentedList(Slot(slot_size), init_count) = .{}, + free_head: ?*Slot(slot_size) = null, + comptime size: usize = slot_size, + + fn deinit(self: *@This(), al: std.mem.Allocator) void { + self.list.deinit(al); + } + + fn alloc(self: *@This(), al: std.mem.Allocator) ?[*]u8 { + self.mutex.lock(); + defer self.mutex.unlock(); + + const slot = if (self.free_head) |s| blk: { + self.free_head = s.next; + break :blk s; + } else self.list.addOne(al) catch return null; + slot.* = .{ .buf = undefined }; + return &slot.buf; + } + + fn free(self: *@This(), ptr: [*]u8) void { + self.mutex.lock(); + defer self.mutex.unlock(); + + const slot: *Slot(slot_size) = @ptrCast(@alignCast(ptr)); + slot.* = .{ .next = self.free_head }; + self.free_head = slot; + } + + // Only public in case someone wants to dump out internal allocator debug info + pub fn freeCount(self: *@This()) usize { + self.mutex.lock(); + defer self.mutex.unlock(); + + var slot_opt = self.free_head; + var count: usize = 0; + while (slot_opt) |slot| : (slot_opt = slot.next) { + count += 1; + } + return count; + } + }; + } + fn Slot(comptime size: usize) type { + return extern union { + buf: [size]u8 align(size), // Allocated + next: ?*@This(), // Free + + comptime { + if (@sizeOf(@This()) != size or @alignOf(@This()) != size) { + @compileError("Slot size too small!"); + } + } + }; + } + }; +} + +test "small allocations - free in same order" { + var binned = BinnedAllocator(.{}){}; + defer binned.deinit(); + const allocator = binned.allocator(); + + var list = std.ArrayList(*u64).init(std.testing.allocator); + defer list.deinit(); + + var i: usize = 0; + while (i < 513) : (i += 1) { + const ptr = try allocator.create(u64); + try list.append(ptr); + } + + for (list.items) |ptr| { + allocator.destroy(ptr); + } +} + +test "small allocations - free in reverse order" { + var binned = BinnedAllocator(.{}){}; + defer binned.deinit(); + const allocator = binned.allocator(); + + var list = std.ArrayList(*u64).init(std.testing.allocator); + defer list.deinit(); + + var i: usize = 0; + while (i < 513) : (i += 1) { + const ptr = try allocator.create(u64); + try list.append(ptr); + } + + while (list.popOrNull()) |ptr| { + allocator.destroy(ptr); + } +} + +test "small allocations - alloc free alloc" { + var binned = BinnedAllocator(.{}){}; + defer binned.deinit(); + const allocator = binned.allocator(); + + const a = try allocator.create(u64); + allocator.destroy(a); + const b = try allocator.create(u64); + allocator.destroy(b); +} + +test "large allocations" { + var binned = BinnedAllocator(.{}){}; + defer binned.deinit(); + const allocator = binned.allocator(); + + const ptr1 = try allocator.alloc(u64, 42768); + const ptr2 = try allocator.alloc(u64, 52768); + allocator.free(ptr1); + const ptr3 = try allocator.alloc(u64, 62768); + allocator.free(ptr3); + allocator.free(ptr2); +} + +test "very large allocation" { + var binned = BinnedAllocator(.{}){}; + defer binned.deinit(); + const allocator = binned.allocator(); + + try std.testing.expectError(error.OutOfMemory, allocator.alloc(u8, std.math.maxInt(usize))); +} + +test "realloc" { + var binned = BinnedAllocator(.{}){}; + defer binned.deinit(); + const allocator = binned.allocator(); + + var slice = try allocator.alignedAlloc(u8, @alignOf(u32), 1); + defer allocator.free(slice); + slice[0] = 0x12; + + // This reallocation should keep its pointer address. + const old_slice = slice; + slice = try allocator.realloc(slice, 2); + try std.testing.expect(old_slice.ptr == slice.ptr); + try std.testing.expect(slice[0] == 0x12); + slice[1] = 0x34; + + // This requires upgrading to a larger bin size + slice = try allocator.realloc(slice, 17); + try std.testing.expect(old_slice.ptr != slice.ptr); + try std.testing.expect(slice[0] == 0x12); + try std.testing.expect(slice[1] == 0x34); +} + +test "shrink" { + var binned = BinnedAllocator(.{}){}; + defer binned.deinit(); + const allocator = binned.allocator(); + + var slice = try allocator.alloc(u8, 20); + defer allocator.free(slice); + + @memset(slice, 0x11); + + try std.testing.expect(allocator.resize(slice, 17)); + slice = slice[0..17]; + + for (slice) |b| { + try std.testing.expect(b == 0x11); + } + + try std.testing.expect(!allocator.resize(slice, 16)); + + for (slice) |b| { + try std.testing.expect(b == 0x11); + } +} + +test "large object - grow" { + var binned = BinnedAllocator(.{}){}; + defer binned.deinit(); + const allocator = binned.allocator(); + + var slice1 = try allocator.alloc(u8, 8192 - 20); + defer allocator.free(slice1); + + const old = slice1; + slice1 = try allocator.realloc(slice1, 8192 - 10); + try std.testing.expect(slice1.ptr == old.ptr); + + slice1 = try allocator.realloc(slice1, 8192); + try std.testing.expect(slice1.ptr == old.ptr); + + slice1 = try allocator.realloc(slice1, 8192 + 1); +} + +test "realloc small object to large object" { + var binned = BinnedAllocator(.{}){}; + defer binned.deinit(); + const allocator = binned.allocator(); + + var slice = try allocator.alloc(u8, 70); + defer allocator.free(slice); + slice[0] = 0x12; + slice[60] = 0x34; + + // This requires upgrading to a large object + const large_object_size = 8192 + 50; + slice = try allocator.realloc(slice, large_object_size); + try std.testing.expect(slice[0] == 0x12); + try std.testing.expect(slice[60] == 0x34); +} + +test "shrink large object to large object" { + var binned = BinnedAllocator(.{}){}; + defer binned.deinit(); + const allocator = binned.allocator(); + + var slice = try allocator.alloc(u8, 8192 + 50); + defer allocator.free(slice); + slice[0] = 0x12; + slice[60] = 0x34; + + if (!allocator.resize(slice, 8192 + 1)) return; + slice = slice.ptr[0 .. 8192 + 1]; + try std.testing.expect(slice[0] == 0x12); + try std.testing.expect(slice[60] == 0x34); + + try std.testing.expect(allocator.resize(slice, 8192 + 1)); + slice = slice[0 .. 8192 + 1]; + try std.testing.expect(slice[0] == 0x12); + try std.testing.expect(slice[60] == 0x34); + + slice = try allocator.realloc(slice, 8192); + try std.testing.expect(slice[0] == 0x12); + try std.testing.expect(slice[60] == 0x34); +} + +test "shrink large object to large object with larger alignment" { + var binned = BinnedAllocator(.{}){}; + defer binned.deinit(); + const allocator = binned.allocator(); + + var debug_buffer: [1000]u8 = undefined; + var fba = std.heap.FixedBufferAllocator.init(&debug_buffer); + const debug_allocator = fba.allocator(); + + const alloc_size = 8192 + 50; + var slice = try allocator.alignedAlloc(u8, 16, alloc_size); + defer allocator.free(slice); + + const big_alignment: usize = switch (builtin.os.tag) { + .windows => 65536, // Windows aligns to 64K. + else => 8192, + }; + // This loop allocates until we find a page that is not aligned to the big + // alignment. Then we shrink the allocation after the loop, but increase the + // alignment to the higher one, that we know will force it to realloc. + var stuff_to_free = std.ArrayList([]align(16) u8).init(debug_allocator); + while (std.mem.isAligned(@intFromPtr(slice.ptr), big_alignment)) { + try stuff_to_free.append(slice); + slice = try allocator.alignedAlloc(u8, 16, alloc_size); + } + while (stuff_to_free.popOrNull()) |item| { + allocator.free(item); + } + slice[0] = 0x12; + slice[60] = 0x34; + + slice = try allocator.reallocAdvanced(slice, big_alignment, alloc_size / 2); + try std.testing.expect(slice[0] == 0x12); + try std.testing.expect(slice[60] == 0x34); +} + +test "realloc large object to small object" { + var binned = BinnedAllocator(.{}){}; + defer binned.deinit(); + const allocator = binned.allocator(); + + var slice = try allocator.alloc(u8, 8192 + 50); + defer allocator.free(slice); + slice[0] = 0x12; + slice[16] = 0x34; + + slice = try allocator.realloc(slice, 19); + try std.testing.expect(slice[0] == 0x12); + try std.testing.expect(slice[16] == 0x34); +} + +test "non-page-allocator backing allocator" { + var binned = BinnedAllocator(.{}){ .backing_allocator = std.testing.allocator }; + defer binned.deinit(); + const allocator = binned.allocator(); + + const ptr = try allocator.create(i32); + defer allocator.destroy(ptr); +} + +test "realloc large object to larger alignment" { + var binned = BinnedAllocator(.{}){}; + defer binned.deinit(); + const allocator = binned.allocator(); + + var debug_buffer: [1000]u8 = undefined; + var fba = std.heap.FixedBufferAllocator.init(&debug_buffer); + const debug_allocator = fba.allocator(); + + var slice = try allocator.alignedAlloc(u8, 16, 8192 + 50); + defer allocator.free(slice); + + const big_alignment: usize = switch (builtin.os.tag) { + .windows => 65536, // Windows aligns to 64K. + else => 8192, + }; + // This loop allocates until we find a page that is not aligned to the big alignment. + var stuff_to_free = std.ArrayList([]align(16) u8).init(debug_allocator); + while (std.mem.isAligned(@intFromPtr(slice.ptr), big_alignment)) { + try stuff_to_free.append(slice); + slice = try allocator.alignedAlloc(u8, 16, 8192 + 50); + } + while (stuff_to_free.popOrNull()) |item| { + allocator.free(item); + } + slice[0] = 0x12; + slice[16] = 0x34; + + slice = try allocator.reallocAdvanced(slice, 32, 8192 + 100); + try std.testing.expect(slice[0] == 0x12); + try std.testing.expect(slice[16] == 0x34); + + slice = try allocator.reallocAdvanced(slice, 32, 8192 + 25); + try std.testing.expect(slice[0] == 0x12); + try std.testing.expect(slice[16] == 0x34); + + slice = try allocator.reallocAdvanced(slice, big_alignment, 8192 + 100); + try std.testing.expect(slice[0] == 0x12); + try std.testing.expect(slice[16] == 0x34); +} + +test "large object does not shrink to small" { + var binned = BinnedAllocator(.{}){}; + defer binned.deinit(); + const allocator = binned.allocator(); + + const slice = try allocator.alloc(u8, 8192 + 50); + defer allocator.free(slice); + + try std.testing.expect(!allocator.resize(slice, 4)); +} + +test "objects of size 1024 and 2048" { + var binned = BinnedAllocator(.{}){}; + defer binned.deinit(); + const allocator = binned.allocator(); + + const slice = try allocator.alloc(u8, 1025); + const slice2 = try allocator.alloc(u8, 3000); + + allocator.free(slice); + allocator.free(slice2); +} diff --git a/src/build_runner/0.10.0.zig b/src/build_runner/0.10.0.zig index db45759fb..02b6d178f 100644 --- a/src/build_runner/0.10.0.zig +++ b/src/build_runner/0.10.0.zig @@ -22,7 +22,7 @@ pub fn main() !void { const allocator = arena.allocator(); - var args = try process.argsAlloc(allocator); + const args = try process.argsAlloc(allocator); defer process.argsFree(allocator, args); // skip my own exe name diff --git a/src/build_runner/0.11.0.zig b/src/build_runner/0.11.0.zig index 86d6323df..570af65fe 100644 --- a/src/build_runner/0.11.0.zig +++ b/src/build_runner/0.11.0.zig @@ -21,7 +21,7 @@ pub fn main() !void { const allocator = arena.allocator(); - var args = try process.argsAlloc(allocator); + const args = try process.argsAlloc(allocator); // skip my own exe name var arg_idx: usize = 1; diff --git a/src/build_runner/master.zig b/src/build_runner/master.zig index 03fc72739..e7e5cb8cf 100644 --- a/src/build_runner/master.zig +++ b/src/build_runner/master.zig @@ -21,7 +21,7 @@ pub fn main() !void { const allocator = arena.allocator(); - var args = try process.argsAlloc(allocator); + const args = try process.argsAlloc(allocator); // skip my own exe name var arg_idx: usize = 1; @@ -143,7 +143,6 @@ pub fn main() !void { // pub const root_deps: []const struct { []const u8, []const u8 } = &.{ // .{ "known_folders", "1220bb12c9bfe291eed1afe6a2070c7c39918ab1979f24a281bba39dfb23f5bcd544" }, // .{ "diffz", "122089a8247a693cad53beb161bde6c30f71376cd4298798d45b32740c3581405864" }, - // .{ "binned_allocator", "1220363c7e27b2d3f39de6ff6e90f9537a0634199860fea237a55ddb1e1717f5d6a5" }, // }; var deps_build_roots: std.ArrayListUnmanaged(BuildConfig.DepsBuildRoots) = .{}; diff --git a/src/config_gen/config_gen.zig b/src/config_gen/config_gen.zig index 0061baafa..7f3e9c8d7 100644 --- a/src/config_gen/config_gen.zig +++ b/src/config_gen/config_gen.zig @@ -267,7 +267,7 @@ fn generateVSCodeConfigFile(allocator: std.mem.Allocator, config: Config, path: } var buffered_writer = std.io.bufferedWriter(config_file.writer()); - var writer = buffered_writer.writer(); + const writer = buffered_writer.writer(); try std.json.stringify(configuration, .{ .whitespace = .indent_4, @@ -553,7 +553,7 @@ fn collectBuiltinData(allocator: std.mem.Allocator, version: []const u8, langref else => unreachable, } - var content_token = tokenizer.next(); + const content_token = tokenizer.next(); std.debug.assert(content_token.id == .Content); const content = tokenizer.buffer[content_token.start..content_token.end]; const writer = builtins.items[builtins.items.len - 1].documentation.writer(allocator); @@ -846,7 +846,7 @@ fn generateVersionDataFile(allocator: std.mem.Allocator, version: []const u8, ou }; defer allocator.free(langref_source); - var builtins = try collectBuiltinData(allocator, version, langref_source); + const builtins = try collectBuiltinData(allocator, version, langref_source); defer { for (builtins) |*builtin| { builtin.documentation.deinit(allocator); @@ -884,7 +884,7 @@ fn generateVersionDataFile(allocator: std.mem.Allocator, version: []const u8, ou const snippet = try extractSnippetFromSignature(allocator, signature); defer allocator.free(snippet); - var arguments = try extractArgumentsFromSignature(allocator, signature[builtin.name.len + 1 ..]); + const arguments = try extractArgumentsFromSignature(allocator, signature[builtin.name.len + 1 ..]); defer allocator.free(arguments); try writer.print( @@ -970,7 +970,7 @@ fn httpGET(allocator: std.mem.Allocator, uri: std.Uri) !Response { pub fn main() !void { var general_purpose_allocator = std.heap.GeneralPurposeAllocator(.{}){}; defer std.debug.assert(general_purpose_allocator.deinit() == .ok); - var gpa = general_purpose_allocator.allocator(); + const gpa = general_purpose_allocator.allocator(); var stderr = std.io.getStdErr().writer(); diff --git a/src/diff.zig b/src/diff.zig index 0264ee309..cbc895287 100644 --- a/src/diff.zig +++ b/src/diff.zig @@ -21,7 +21,7 @@ pub fn edits( var arena = std.heap.ArenaAllocator.init(allocator); defer arena.deinit(); - var diffs = try dmp.diff(arena.allocator(), before, after, true); + const diffs = try dmp.diff(arena.allocator(), before, after, true); var edit_count: usize = 0; for (diffs.items) |diff| { @@ -41,7 +41,7 @@ pub fn edits( var offset: usize = 0; for (diffs.items) |diff| { - var start = offset; + const start = offset; switch (diff.operation) { .delete => { offset += diff.text.len; @@ -122,7 +122,7 @@ pub fn applyTextEdits( const tracy_zone = tracy.trace(@src()); defer tracy_zone.end(); - var text_edits_sortable = try allocator.dupe(types.TextEdit, text_edits); + const text_edits_sortable = try allocator.dupe(types.TextEdit, text_edits); defer allocator.free(text_edits_sortable); std.mem.sort(types.TextEdit, text_edits_sortable, {}, textEditLessThan); diff --git a/src/features/code_actions.zig b/src/features/code_actions.zig index aebeb25a2..9919a2e18 100644 --- a/src/features/code_actions.zig +++ b/src/features/code_actions.zig @@ -121,9 +121,9 @@ fn handleUnusedFunctionParameter(builder: *Builder, actions: *std.ArrayListUnman // ) void { ... } // We have to be able to detect both cases. const fn_proto_param = payload.get(tree).?; - var param_end = offsets.tokenToLoc(tree, ast.paramLastToken(tree, fn_proto_param)).end; + const param_end = offsets.tokenToLoc(tree, ast.paramLastToken(tree, fn_proto_param)).end; - var found_comma = std.mem.startsWith( + const found_comma = std.mem.startsWith( u8, std.mem.trimLeft(u8, tree.source[param_end..], " \n"), ",", @@ -292,7 +292,7 @@ fn handlePointlessDiscard(builder: *Builder, actions: *std.ArrayListUnmanaged(ty fn detectIndentation(source: []const u8) []const u8 { // Essentially I'm looking for the first indentation in the file. var i: usize = 0; - var len = source.len - 1; // I need 1 look-ahead + const len = source.len - 1; // I need 1 look-ahead while (i < len) : (i += 1) { if (source[i] != '\n') continue; i += 1; diff --git a/src/features/completions.zig b/src/features/completions.zig index 5155f256b..c447dfb48 100644 --- a/src/features/completions.zig +++ b/src/features/completions.zig @@ -601,8 +601,8 @@ fn formatDetailedLabel(item: *types.CompletionItem, arena: std.mem.Allocator) er // log.info("## label: {s} it: {s} kind: {} isValue: {}", .{item.label, it, item.kind, isValue}); if (std.mem.startsWith(u8, it, "fn ") or std.mem.startsWith(u8, it, "@")) { - var s: usize = std.mem.indexOf(u8, it, "(") orelse return; - var e: usize = std.mem.lastIndexOf(u8, it, ")") orelse return; + const s: usize = std.mem.indexOf(u8, it, "(") orelse return; + const e: usize = std.mem.lastIndexOf(u8, it, ")") orelse return; if (e < s) { log.warn("something wrong when trying to build label detail for {s} kind: {}", .{ it, item.kind.? }); return; @@ -615,8 +615,8 @@ fn formatDetailedLabel(item: *types.CompletionItem, arena: std.mem.Allocator) er if (std.mem.indexOf(u8, it, "= struct")) |_| { item.labelDetails.?.description = "struct"; } else if (std.mem.indexOf(u8, it, "= union")) |_| { - var us: usize = std.mem.indexOf(u8, it, "(") orelse return; - var ue: usize = std.mem.lastIndexOf(u8, it, ")") orelse return; + const us: usize = std.mem.indexOf(u8, it, "(") orelse return; + const ue: usize = std.mem.lastIndexOf(u8, it, ")") orelse return; if (ue < us) { log.warn("something wrong when trying to build label detail for a .Constant|union {s}", .{it}); return; @@ -636,7 +636,7 @@ fn formatDetailedLabel(item: *types.CompletionItem, arena: std.mem.Allocator) er if (eqlPos != null) { if (start > eqlPos.?) return; } - var e: usize = eqlPos orelse it.len; + const e: usize = eqlPos orelse it.len; item.labelDetails = .{ .detail = "", // left .description = it[start + 1 .. e], // right @@ -653,8 +653,8 @@ fn formatDetailedLabel(item: *types.CompletionItem, arena: std.mem.Allocator) er }; } } else if (item.kind.? == .Variable) { - var s: usize = std.mem.indexOf(u8, it, ":") orelse return; - var e: usize = std.mem.indexOf(u8, it, "=") orelse return; + const s: usize = std.mem.indexOf(u8, it, ":") orelse return; + const e: usize = std.mem.indexOf(u8, it, "=") orelse return; if (e < s) { log.warn("something wrong when trying to build label detail for a .Variable {s}", .{it}); @@ -677,8 +677,8 @@ fn formatDetailedLabel(item: *types.CompletionItem, arena: std.mem.Allocator) er .description = it, // right }; } else if (item.kind.? == .Constant or item.kind.? == .Field) { - var s: usize = std.mem.indexOf(u8, it, " ") orelse return; - var e: usize = std.mem.indexOf(u8, it, "=") orelse it.len; + const s: usize = std.mem.indexOf(u8, it, " ") orelse return; + const e: usize = std.mem.indexOf(u8, it, "=") orelse it.len; if (e < s) { log.warn("something wrong when trying to build label detail for a .Variable {s}", .{it}); return; @@ -693,16 +693,16 @@ fn formatDetailedLabel(item: *types.CompletionItem, arena: std.mem.Allocator) er }; if (std.mem.indexOf(u8, it, "= union(")) |_| { - var us: usize = std.mem.indexOf(u8, it, "(") orelse return; - var ue: usize = std.mem.lastIndexOf(u8, it, ")") orelse return; + const us: usize = std.mem.indexOf(u8, it, "(") orelse return; + const ue: usize = std.mem.lastIndexOf(u8, it, ")") orelse return; if (ue < us) { log.warn("something wrong when trying to build label detail for a .Constant|union {s}", .{it}); return; } item.labelDetails.?.description = it[us - 5 .. ue + 1]; } else if (std.mem.indexOf(u8, it, "= enum(")) |_| { - var es: usize = std.mem.indexOf(u8, it, "(") orelse return; - var ee: usize = std.mem.lastIndexOf(u8, it, ")") orelse return; + const es: usize = std.mem.indexOf(u8, it, "(") orelse return; + const ee: usize = std.mem.lastIndexOf(u8, it, ")") orelse return; if (ee < es) { log.warn("something wrong when trying to build label detail for a .Constant|enum {s}", .{it}); return; @@ -775,7 +775,7 @@ fn completeDot(document_store: *DocumentStore, analyser: *Analyser, arena: std.m const token_tags = tree.tokens.items(.tag); // as invoked source_index points to the char/token after the `.`, do `- 1` - var dot_token_index = offsets.sourceIndexToTokenIndex(tree, source_index - 1); + const dot_token_index = offsets.sourceIndexToTokenIndex(tree, source_index - 1); if (dot_token_index < 2) return &.{}; var completions = std.ArrayListUnmanaged(types.CompletionItem){}; @@ -805,7 +805,7 @@ fn completeDot(document_store: *DocumentStore, analyser: *Analyser, arena: std.m if (token_tags[dot_token_index - 1] == .number_literal or token_tags[dot_token_index - 1] != .equal) return &.{}; // `var enum_val = .` or the get*Context logic failed because of syntax errors (parser didn't create the necessary node(s)) - var enum_completions = try document_store.enumCompletionItems(arena, handle.*); + const enum_completions = try document_store.enumCompletionItems(arena, handle.*); return enum_completions; } @@ -835,7 +835,7 @@ fn completeFileSystemStringLiteral( return &.{}; }; } else { - var document_path = try URI.parse(arena, handle.uri); + const document_path = try URI.parse(arena, handle.uri); try search_paths.append(arena, std.fs.path.dirname(document_path).?); } @@ -1292,7 +1292,7 @@ fn collectVarAccessContainerNodes( try node_type.getAllTypesWithHandlesArrayList(arena, types_with_handles); return; } - var fn_param_decl = Analyser.Declaration{ .param_payload = .{ + const fn_param_decl = Analyser.Declaration{ .param_payload = .{ .func = fn_proto_node, .param_index = @intCast(dot_context.fn_arg_index), } }; diff --git a/src/features/folding_range.zig b/src/features/folding_range.zig index 34e861b8b..61c396fc6 100644 --- a/src/features/folding_range.zig +++ b/src/features/folding_range.zig @@ -59,7 +59,7 @@ const Builder = struct { const tracy_zone = tracy.trace(@src()); defer tracy_zone.end(); - var result_locations = try builder.allocator.alloc(types.FoldingRange, builder.locations.items.len); + const result_locations = try builder.allocator.alloc(types.FoldingRange, builder.locations.items.len); errdefer builder.allocator.free(result_locations); for (builder.locations.items, result_locations) |folding_range, *result| { diff --git a/src/features/inlay_hints.zig b/src/features/inlay_hints.zig index 314b5fed4..0debfd6d9 100644 --- a/src/features/inlay_hints.zig +++ b/src/features/inlay_hints.zig @@ -67,7 +67,7 @@ const Builder = struct { var last_index: usize = 0; var last_position: types.Position = .{ .line = 0, .character = 0 }; - var converted_hints = try self.arena.alloc(types.InlayHint, self.hints.items.len); + const converted_hints = try self.arena.alloc(types.InlayHint, self.hints.items.len); for (converted_hints, self.hints.items) |*converted_hint, hint| { const position = offsets.advancePosition( self.handle.tree.source, diff --git a/src/features/references.zig b/src/features/references.zig index 2519a98c2..c59a3a8ec 100644 --- a/src/features/references.zig +++ b/src/features/references.zig @@ -162,7 +162,7 @@ fn gatherReferences( try dependencies.ensureUnusedCapacity(allocator, handle_dependencies.items.len); for (handle_dependencies.items) |uri| { - var gop = dependencies.getOrPutAssumeCapacity(uri); + const gop = dependencies.getOrPutAssumeCapacity(uri); if (gop.found_existing) { allocator.free(uri); } @@ -289,7 +289,7 @@ const CallBuilder = struct { .async_call_one_comma, => { var buf: [1]Ast.Node.Index = undefined; - var call = tree.fullCall(&buf, node).?; + const call = tree.fullCall(&buf, node).?; const called_node = call.ast.fn_expr; diff --git a/src/features/selection_range.zig b/src/features/selection_range.zig index a3c89dc1a..c4a1585cb 100644 --- a/src/features/selection_range.zig +++ b/src/features/selection_range.zig @@ -18,7 +18,7 @@ pub fn generateSelectionRanges( // // A faster algorithm would be to walk the tree starting from the root, // descending into the child containing the position at every step. - var result = try arena.alloc(types.SelectionRange, positions.len); + const result = try arena.alloc(types.SelectionRange, positions.len); var locs = try std.ArrayListUnmanaged(offsets.Loc).initCapacity(arena, 32); for (positions, result) |position, *out| { const index = offsets.positionToIndex(handle.tree.source, position, offset_encoding); diff --git a/src/main.zig b/src/main.zig index 0e8af0323..99a710461 100644 --- a/src/main.zig +++ b/src/main.zig @@ -9,7 +9,7 @@ const Server = @import("Server.zig"); const Header = @import("Header.zig"); const Transport = @import("Transport.zig"); const debug = @import("debug.zig"); -const binned_allocator = @import("binned_allocator"); +const binned_allocator = @import("binned_allocator.zig"); const logger = std.log.scoped(.zls_main); diff --git a/src/offsets.zig b/src/offsets.zig index 2cd3bdf1b..e3d89634d 100644 --- a/src/offsets.zig +++ b/src/offsets.zig @@ -113,7 +113,7 @@ pub fn sourceIndexToTokenIndex(tree: Ast, source_index: usize) Ast.TokenIndex { } } while (upper_index > 0) : (upper_index -= 1) { - var token_start = tokens_start[upper_index]; + const token_start = tokens_start[upper_index]; if (token_start > source_index) continue; // checking for equality here is suboptimal // Handle source_index being > than the last possible token_start (max_token_start < source_index < tree.source.len) if (upper_index == tokens_start.len - 1) break; diff --git a/src/stage2/AstGen.zig b/src/stage2/AstGen.zig index 06e9c6061..a72f90710 100644 --- a/src/stage2/AstGen.zig +++ b/src/stage2/AstGen.zig @@ -1226,7 +1226,7 @@ fn awaitExpr( try astgen.errNoteNode(gz.suspend_node, "suspend block here", .{}), }); } - const operand = try expr(gz, scope, .{ .rl = .none }, rhs_node); + const operand = try expr(gz, scope, .{ .rl = .ref }, rhs_node); const result = if (gz.nosuspend_node != 0) try gz.addExtendedPayload(.await_nosuspend, Zir.Inst.UnNode{ .node = gz.nodeIndexToRelative(node), @@ -1248,7 +1248,7 @@ fn resumeExpr( const tree = astgen.tree; const node_datas = tree.nodes.items(.data); const rhs_node = node_datas[node].lhs; - const operand = try expr(gz, scope, .{ .rl = .none }, rhs_node); + const operand = try expr(gz, scope, .{ .rl = .ref }, rhs_node); const result = try gz.addUnNode(.@"resume", operand, node); return rvalue(gz, ri, result, node); } @@ -1722,6 +1722,57 @@ fn structInitExpr( } } + { + var sfba = std.heap.stackFallback(256, astgen.arena); + const sfba_allocator = sfba.get(); + + var duplicate_names = std.AutoArrayHashMap(u32, ArrayListUnmanaged(Ast.TokenIndex)).init(sfba_allocator); + defer duplicate_names.deinit(); + try duplicate_names.ensureTotalCapacity(@intCast(struct_init.ast.fields.len)); + + // When there aren't errors, use this to avoid a second iteration. + var any_duplicate = false; + + for (struct_init.ast.fields) |field| { + const name_token = tree.firstToken(field) - 2; + const name_index = try astgen.identAsString(name_token); + + const gop = try duplicate_names.getOrPut(name_index); + + if (gop.found_existing) { + try gop.value_ptr.append(sfba_allocator, name_token); + any_duplicate = true; + } else { + gop.value_ptr.* = .{}; + try gop.value_ptr.append(sfba_allocator, name_token); + } + } + + if (any_duplicate) { + var it = duplicate_names.iterator(); + + while (it.next()) |entry| { + const record = entry.value_ptr.*; + if (record.items.len > 1) { + var error_notes = std.ArrayList(u32).init(astgen.arena); + + for (record.items[1..]) |duplicate| { + try error_notes.append(try astgen.errNoteTok(duplicate, "other field here", .{})); + } + + try astgen.appendErrorTokNotes( + record.items[0], + "duplicate field", + .{}, + error_notes.items, + ); + } + } + + return error.AnalysisFail; + } + } + if (struct_init.ast.type_expr != 0) { // Typed inits do not use RLS for language simplicity. const ty_inst = try typeExpr(gz, scope, struct_init.ast.type_expr); @@ -1920,6 +1971,17 @@ fn comptimeExpr( .block_two, .block_two_semicolon, .block, .block_semicolon => { const token_tags = tree.tokens.items(.tag); const lbrace = main_tokens[node]; + // Careful! We can't pass in the real result location here, since it may + // refer to runtime memory. A runtime-to-comptime boundary has to remove + // result location information, compute the result, and copy it to the true + // result location at runtime. We do this below as well. + const ty_only_ri: ResultInfo = .{ + .ctx = ri.ctx, + .rl = if (try ri.rl.resultType(gz, node)) |res_ty| + .{ .coerced_ty = res_ty } + else + .none, + }; if (token_tags[lbrace - 1] == .colon and token_tags[lbrace - 2] == .identifier) { @@ -1934,17 +1996,13 @@ fn comptimeExpr( else stmts[0..2]; - // Careful! We can't pass in the real result location here, since it may - // refer to runtime memory. A runtime-to-comptime boundary has to remove - // result location information, compute the result, and copy it to the true - // result location at runtime. We do this below as well. - const block_ref = try labeledBlockExpr(gz, scope, .{ .rl = .none }, node, stmt_slice, true); + const block_ref = try labeledBlockExpr(gz, scope, ty_only_ri, node, stmt_slice, true); return rvalue(gz, ri, block_ref, node); }, .block, .block_semicolon => { const stmts = tree.extra_data[node_datas[node].lhs..node_datas[node].rhs]; // Replace result location and copy back later - see above. - const block_ref = try labeledBlockExpr(gz, scope, .{ .rl = .none }, node, stmts, true); + const block_ref = try labeledBlockExpr(gz, scope, ty_only_ri, node, stmts, true); return rvalue(gz, ri, block_ref, node); }, else => unreachable, @@ -1962,7 +2020,14 @@ fn comptimeExpr( const block_inst = try gz.makeBlockInst(.block_comptime, node); // Replace result location and copy back later - see above. - const block_result = try expr(&block_scope, scope, .{ .rl = .none }, node); + const ty_only_ri: ResultInfo = .{ + .ctx = ri.ctx, + .rl = if (try ri.rl.resultType(gz, node)) |res_ty| + .{ .coerced_ty = res_ty } + else + .none, + }; + const block_result = try expr(&block_scope, scope, ty_only_ri, node); if (!gz.refIsNoReturn(block_result)) { _ = try block_scope.addBreak(.@"break", block_inst, block_result); } @@ -2891,11 +2956,19 @@ fn checkUsed(gz: *GenZir, outer_scope: *Scope, inner_scope: *Scope) InnerError!v const s = scope.cast(Scope.LocalPtr).?; if (s.used == 0 and s.discarded == 0) { try astgen.appendErrorTok(s.token_src, "unused {s}", .{@tagName(s.id_cat)}); - } else if (s.used != 0 and s.discarded != 0) { - try astgen.appendErrorTokNotes(s.discarded, "pointless discard of {s}", .{@tagName(s.id_cat)}, &[_]u32{ - try gz.astgen.errNoteTok(s.used, "used here", .{}), - }); + } else { + if (s.used != 0 and s.discarded != 0) { + try astgen.appendErrorTokNotes(s.discarded, "pointless discard of {s}", .{@tagName(s.id_cat)}, &[_]u32{ + try astgen.errNoteTok(s.used, "used here", .{}), + }); + } + if (s.id_cat == .@"local variable" and !s.used_as_lvalue) { + try astgen.appendErrorTokNotes(s.token_src, "local variable is never mutated", .{}, &.{ + try astgen.errNoteTok(s.token_src, "consider using 'const'", .{}), + }); + } } + scope = s.parent; }, .defer_normal, .defer_error => scope = scope.cast(Scope.Defer).?.parent, @@ -4872,6 +4945,15 @@ fn structDeclInner( } }; + var sfba = std.heap.stackFallback(256, astgen.arena); + const sfba_allocator = sfba.get(); + + var duplicate_names = std.AutoArrayHashMap(u32, std.ArrayListUnmanaged(Ast.TokenIndex)).init(sfba_allocator); + try duplicate_names.ensureTotalCapacity(field_count); + + // When there aren't errors, use this to avoid a second iteration. + var any_duplicate = false; + var known_non_opv = false; var known_comptime_only = false; var any_comptime_fields = false; @@ -4884,11 +4966,21 @@ fn structDeclInner( }; if (!is_tuple) { + const field_name = try astgen.identAsString(member.ast.main_token); member.convertToNonTupleLike(astgen.tree.nodes); assert(!member.ast.tuple_like); - const field_name = try astgen.identAsString(member.ast.main_token); wip_members.appendToField(field_name); + + const gop = try duplicate_names.getOrPut(field_name); + + if (gop.found_existing) { + try gop.value_ptr.append(sfba_allocator, member.ast.main_token); + any_duplicate = true; + } else { + gop.value_ptr.* = .{}; + try gop.value_ptr.append(sfba_allocator, member.ast.main_token); + } } else if (!member.ast.tuple_like) { return astgen.failTok(member.ast.main_token, "tuple field has a name", .{}); } @@ -4970,6 +5062,34 @@ fn structDeclInner( } } + if (any_duplicate) { + var it = duplicate_names.iterator(); + + while (it.next()) |entry| { + const record = entry.value_ptr.*; + if (record.items.len > 1) { + var error_notes = std.ArrayList(u32).init(astgen.arena); + + for (record.items[1..]) |duplicate| { + try error_notes.append(try astgen.errNoteTok(duplicate, "other field here", .{})); + } + + try error_notes.append(try astgen.errNoteNode(node, "struct declared here", .{})); + + try astgen.appendErrorTokNotes( + record.items[0], + "duplicate struct field: '{s}'", + .{try astgen.identifierTokenString(record.items[0])}, + error_notes.items, + ); + } + } + + return error.AnalysisFail; + } + + duplicate_names.deinit(); + try gz.setStruct(decl_inst, .{ .src_node = node, .layout = layout, @@ -6591,7 +6711,7 @@ fn forExpr( }; } - var then_node = for_full.ast.then_expr; + const then_node = for_full.ast.then_expr; var then_scope = parent_gz.makeSubBlock(&cond_scope.base); defer then_scope.unstack(); @@ -7471,7 +7591,10 @@ fn localVarRef( ); switch (ri.rl) { - .ref, .ref_coerced_ty => return ptr_inst, + .ref, .ref_coerced_ty => { + local_ptr.used_as_lvalue = true; + return ptr_inst; + }, else => { const loaded = try gz.addUnNode(.load, ptr_inst, ident); return rvalueNoCoercePreRef(gz, ri, loaded, ident); @@ -8041,7 +8164,7 @@ fn typeOf( } const payload_size: u32 = std.meta.fields(Zir.Inst.TypeOfPeer).len; const payload_index = try reserveExtra(astgen, payload_size + args.len); - var args_index = payload_index + payload_size; + const args_index = payload_index + payload_size; const typeof_inst = try gz.addExtendedMultiOpPayloadIndex(.typeof_peer, payload_index, args.len); @@ -10840,6 +10963,9 @@ const Scope = struct { /// Track the identifier where it is discarded, like this `_ = foo;`. /// 0 means never discarded. discarded: Ast.TokenIndex = 0, + /// Whether this value is used as an lvalue after inititialization. + /// If not, we know it can be `const`, so will emit a compile error if it is `var`. + used_as_lvalue: bool = false, /// String table index. name: u32, id_cat: IdCat, diff --git a/src/translate_c.zig b/src/translate_c.zig index 609c2d569..733936546 100644 --- a/src/translate_c.zig +++ b/src/translate_c.zig @@ -51,7 +51,7 @@ pub fn convertCInclude(allocator: std.mem.Allocator, tree: Ast, node: Ast.Node.I fn callConvertCIncludeInternal(allocator: std.mem.Allocator, args: anytype) error{ OutOfMemory, Unsupported }!void { if (zig_builtin.zig_backend == .other or zig_builtin.zig_backend == .stage1) { const FrameSize = @sizeOf(@Frame(convertCIncludeInternal)); - var child_frame = try allocator.alignedAlloc(u8, std.Target.stack_align, FrameSize); + const child_frame = try allocator.alignedAlloc(u8, std.Target.stack_align, FrameSize); defer allocator.free(child_frame); return await @asyncCall(child_frame, {}, convertCIncludeInternal, args); diff --git a/tests/language_features/comptime_interpreter.zig b/tests/language_features/comptime_interpreter.zig index 62d6fe358..da42399ac 100644 --- a/tests/language_features/comptime_interpreter.zig +++ b/tests/language_features/comptime_interpreter.zig @@ -262,7 +262,7 @@ test "ComptimeInterpreter - call comptime argument" { try std.testing.expect(result1.ty.simple_type == .type); try std.testing.expectEqual(Key{ .int_type = .{ .signedness = .unsigned, .bits = 8 } }, result1.val.?); - var result2 = try context.call(context.findFn("Foo"), &.{KV{ + const result2 = try context.call(context.findFn("Foo"), &.{KV{ .ty = .{ .simple_type = .bool }, .val = .{ .simple_value = .bool_false }, }}); @@ -298,7 +298,7 @@ const Context = struct { interpreter: *ComptimeInterpreter, pub fn init(source: []const u8) !Context { - var config = try allocator.create(zls.Config); + const config = try allocator.create(zls.Config); errdefer allocator.destroy(config); var document_store = try allocator.create(zls.DocumentStore); diff --git a/tests/lsp_features/completion.zig b/tests/lsp_features/completion.zig index 6bbdac91a..e0188a6d9 100644 --- a/tests/lsp_features/completion.zig +++ b/tests/lsp_features/completion.zig @@ -1675,7 +1675,7 @@ fn testCompletion(source: []const u8, expected_completions: []const Completion) if (missing.count() != 0 or unexpected.count() != 0) { var buffer = std.ArrayListUnmanaged(u8){}; defer buffer.deinit(allocator); - var out = buffer.writer(allocator); + const out = buffer.writer(allocator); try printLabels(out, found, "found"); try printLabels(out, missing, "missing"); diff --git a/tests/utility/diff.zig b/tests/utility/diff.zig index 18cb7b10f..34fbabaed 100644 --- a/tests/utility/diff.zig +++ b/tests/utility/diff.zig @@ -2,7 +2,7 @@ const std = @import("std"); const zls = @import("zls"); fn gen(alloc: std.mem.Allocator, rand: std.rand.Random) ![]const u8 { - var buffer = try alloc.alloc(u8, rand.intRangeAtMost(usize, 0, 256)); + const buffer = try alloc.alloc(u8, rand.intRangeAtMost(usize, 0, 256)); for (buffer) |*b| b.* = rand.intRangeAtMost(u8, ' ', '~'); return buffer; }