diff --git a/src/DocumentStore.zig b/src/DocumentStore.zig index 87d28bdff..8402e33fa 100644 --- a/src/DocumentStore.zig +++ b/src/DocumentStore.zig @@ -9,7 +9,7 @@ const log = std.log.scoped(.store); const lsp = @import("lsp"); const Ast = std.zig.Ast; const BuildAssociatedConfig = @import("BuildAssociatedConfig.zig"); -const BuildConfig = @import("build_runner/shared.zig").BuildConfig; +pub const BuildConfig = @import("build_runner/shared.zig").BuildConfig; const tracy = @import("tracy"); const translate_c = @import("translate_c.zig"); const DocumentScope = @import("DocumentScope.zig"); @@ -102,69 +102,68 @@ pub const BuildFile = struct { self.impl.mutex.unlock(); } - /// Usage example: - /// ```zig - /// const package_uris: std.ArrayList([]const u8) = .empty; - /// defer { - /// for (package_uris) |uri| allocator.free(uri); - /// package_uris.deinit(allocator); - /// } - /// const success = try build_file.collectBuildConfigPackageUris(allocator, &package_uris); - /// ``` - pub fn collectBuildConfigPackageUris( - self: *BuildFile, - allocator: std.mem.Allocator, - package_uris: *std.ArrayList(Uri), - ) error{OutOfMemory}!bool { + /// Returns whether the `Uri` is a dependency of the given `BuildFile`. + /// May return `null` to indicate an inconclusive result because + /// the required build config has not been resolved yet. + /// + /// invalidates any pointers into `build_files` + /// **Thread safe** takes an exclusive lock + fn isAssociatedWith( + build_file: *BuildFile, + uri: Uri, + document_store: *DocumentStore, + ) error{OutOfMemory}!union(enum) { + unknown, + no, + /// Stores the `root_source_file`. Caller owns returned memory. + yes: []const u8, + } { const tracy_zone = tracy.trace(@src()); defer tracy_zone.end(); - const build_config = self.tryLockConfig() orelse return false; - defer self.unlockConfig(); + const allocator = document_store.allocator; + var arena_allocator: std.heap.ArenaAllocator = .init(allocator); + defer arena_allocator.deinit(); + + const arena = arena_allocator.allocator(); - try package_uris.ensureUnusedCapacity(allocator, build_config.packages.len); - for (build_config.packages) |package| { - package_uris.appendAssumeCapacity(try .fromPath(allocator, package.path)); + var module_root_source_file_paths: std.ArrayList([]const u8) = .empty; + + { + const build_config = build_file.tryLockConfig() orelse return .unknown; + defer build_file.unlockConfig(); + + const module_paths = build_config.modules.map.keys(); + + try module_root_source_file_paths.ensureUnusedCapacity(arena, module_paths.len); + for (module_paths) |module_path| { + module_root_source_file_paths.appendAssumeCapacity(try arena.dupe(u8, module_path)); + } } - return true; - } - /// Usage example: - /// ```zig - /// const include_paths: std.ArrayList([]u8) = .empty; - /// defer { - /// for (include_paths) |path| allocator.free(path); - /// include_paths.deinit(allocator); - /// } - /// const success = try build_file.collectBuildConfigIncludePaths(allocator, &include_paths); - /// ``` - pub fn collectBuildConfigIncludePaths( - self: *BuildFile, - allocator: std.mem.Allocator, - include_paths: *std.ArrayList([]const u8), - ) !bool { - const tracy_zone = tracy.trace(@src()); - defer tracy_zone.end(); + var found_uris: Uri.ArrayHashMap(void) = .empty; - const build_config = self.tryLockConfig() orelse return false; - defer self.unlockConfig(); - - try include_paths.ensureUnusedCapacity(allocator, build_config.include_dirs.len); - for (build_config.include_dirs) |include_path| { - const absolute_path = if (std.fs.path.isAbsolute(include_path)) - try allocator.dupe(u8, include_path) - else blk: { - const build_file_path = self.uri.toFsPath(allocator) catch |err| switch (err) { - error.OutOfMemory => return error.OutOfMemory, - error.UnsupportedScheme => continue, - }; - const build_file_dirname = std.fs.path.dirname(build_file_path) orelse continue; - break :blk try std.fs.path.join(allocator, &.{ build_file_dirname, include_path }); - }; + var i: usize = 0; - include_paths.appendAssumeCapacity(absolute_path); + for (module_root_source_file_paths.items) |root_source_file| { + try found_uris.put(arena, try .fromPath(arena, root_source_file), {}); + + while (i < found_uris.count()) : (i += 1) { + const source_uri = found_uris.keys()[i]; + if (uri.eql(source_uri)) { + return .{ .yes = try allocator.dupe(u8, root_source_file) }; + } + if (isInStd(source_uri)) continue; + + const handle = document_store.getOrLoadHandle(source_uri) orelse return .unknown; + + const import_uris = try handle.getImportUris(); + try found_uris.ensureUnusedCapacity(arena, import_uris.len); + for (import_uris) |import_uri| found_uris.putAssumeCapacity(try import_uri.dupe(arena), {}); + } } - return true; + + return .no; } fn deinit(self: *BuildFile, allocator: std.mem.Allocator) void { @@ -208,18 +207,28 @@ pub const Handle = struct { /// not the build file should be skipped because it has previously been /// found to be "unassociated" with the handle. has_been_checked: std.DynamicBitSetUnmanaged, - - fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - allocator.free(self.potential_build_files); - self.has_been_checked.deinit(allocator); - self.* = undefined; - } }, /// The Handle has no associated build file (build.zig). none, /// The associated build file (build.zig) has been successfully resolved. - resolved: *BuildFile, + resolved: GetAssociatedBuildFileResult.Resolved, + + fn deinit(self: *@This(), allocator: std.mem.Allocator) void { + switch (self.*) { + .init, .none => {}, + .unresolved => |*unresolved| { + allocator.free(unresolved.potential_build_files); + unresolved.has_been_checked.deinit(allocator); + }, + .resolved => |resolved| { + allocator.free(resolved.root_source_file); + }, + } + self.* = undefined; + } } = .init, + + associated_compilation_units: GetAssociatedCompilationUnitsResult = .unresolved, }, const ZirOrZoir = union(Ast.Mode) { @@ -298,10 +307,8 @@ pub const Handle = struct { for (self.cimports.items(.source)) |source| allocator.free(source); self.cimports.deinit(allocator); - switch (self.impl.associated_build_file) { - .init, .none, .resolved => {}, - .unresolved => |*payload| payload.deinit(allocator), - } + self.impl.associated_build_file.deinit(allocator); + self.impl.associated_compilation_units.deinit(allocator); self.* = undefined; } @@ -410,32 +417,25 @@ pub const Handle = struct { }); } - /// Returns the associated build file (build.zig) of the handle. - /// - /// `DocumentStore.build_files` is guaranteed to contain this Uri. - /// Uri memory managed by its build_file - pub fn getAssociatedBuildFileUri(self: *Handle, document_store: *DocumentStore) error{OutOfMemory}!?Uri { - comptime std.debug.assert(supports_build_system); - switch (try self.getAssociatedBuildFileUri2(document_store)) { - .none, - .unresolved, - => return null, - .resolved => |build_file| return build_file.uri, - } - } - - /// Returns the associated build file (build.zig) of the handle. - /// - /// `DocumentStore.build_files` is guaranteed to contain this Uri. - /// Uri memory managed by its build_file - pub fn getAssociatedBuildFileUri2(self: *Handle, document_store: *DocumentStore) error{OutOfMemory}!union(enum) { + pub const GetAssociatedBuildFileResult = union(enum) { /// The Handle has no associated build file (build.zig). none, /// The associated build file (build.zig) has not been resolved yet. unresolved, /// The associated build file (build.zig) has been successfully resolved. - resolved: *BuildFile, - } { + resolved: Resolved, + + pub const Resolved = struct { + build_file: *BuildFile, + root_source_file: []const u8, + }; + }; + + /// Returns the associated build file (build.zig) of the handle. + /// + /// `DocumentStore.build_files` is guaranteed to contain this Uri. + /// Uri memory managed by its build_file + pub fn getAssociatedBuildFile(self: *Handle, document_store: *DocumentStore) error{OutOfMemory}!GetAssociatedBuildFileResult { comptime std.debug.assert(supports_build_system); self.impl.lock.lock(); @@ -467,18 +467,9 @@ pub const Handle = struct { }, .unresolved => |*unresolved| unresolved, .none => return .none, - .resolved => |build_file| return .{ .resolved = build_file }, + .resolved => |resolved| return .{ .resolved = resolved }, }; - // special case when there is only one potential build file - if (unresolved.potential_build_files.len == 1) { - const build_file = unresolved.potential_build_files[0]; - log.debug("Resolved build file of '{s}' as '{s}'", .{ self.uri.raw, build_file.uri.raw }); - unresolved.deinit(document_store.allocator); - self.impl.associated_build_file = .{ .resolved = build_file }; - return .{ .resolved = build_file }; - } - var has_missing_build_config = false; var it = unresolved.has_been_checked.iterator(.{ @@ -487,21 +478,29 @@ pub const Handle = struct { }); while (it.next()) |i| { const build_file = unresolved.potential_build_files[i]; - const is_associated = try document_store.uriAssociatedWithBuild(build_file, self.uri) orelse { - has_missing_build_config = true; - continue; - }; - - if (!is_associated) { - // the build file should be skipped in future calls. - unresolved.has_been_checked.set(i); - continue; + switch (try build_file.isAssociatedWith(self.uri, document_store)) { + .unknown => { + has_missing_build_config = true; + continue; + }, + .no => { + // the build file should be skipped in future calls. + unresolved.has_been_checked.set(i); + continue; + }, + .yes => |root_source_file| { + // log.debug("Resolved build file of '{s}' as '{s}' root={s}", .{ self.uri.raw, build_file.uri, root_source_file.raw }); + errdefer comptime unreachable; + self.impl.associated_build_file.deinit(document_store.allocator); + self.impl.associated_build_file = .{ + .resolved = .{ + .build_file = build_file, + .root_source_file = root_source_file, + }, + }; + return .{ .resolved = self.impl.associated_build_file.resolved }; + }, } - - log.debug("Resolved build file of '{s}' as '{s}'", .{ self.uri.raw, build_file.uri.raw }); - unresolved.deinit(document_store.allocator); - self.impl.associated_build_file = .{ .resolved = build_file }; - return .{ .resolved = build_file }; } if (has_missing_build_config) { @@ -511,11 +510,98 @@ pub const Handle = struct { return .unresolved; } - unresolved.deinit(document_store.allocator); + self.impl.associated_build_file.deinit(document_store.allocator); self.impl.associated_build_file = .none; return .none; } + pub const GetAssociatedCompilationUnitsResult = union(enum) { + /// The Handle has no associated compilation unit. + none, + /// The associated compilation unit has not been resolved yet. + unresolved, + /// The associated compilation unit has been successfully resolved to a list of root module. + resolved: []const []const u8, + + fn deinit(result: *GetAssociatedCompilationUnitsResult, allocator: std.mem.Allocator) void { + switch (result.*) { + .none, .unresolved => {}, + .resolved => |root_source_files| { + allocator.free(root_source_files); + }, + } + result.* = undefined; + } + }; + + /// Returns the root source file of the root module of the given handle. Same as `@import("root")`. + pub fn getAssociatedCompilationUnits(self: *Handle, document_store: *DocumentStore) error{OutOfMemory}!GetAssociatedCompilationUnitsResult { + const build_file, const target_root_source_file = switch (self.impl.associated_compilation_units) { + else => return self.impl.associated_compilation_units, + .unresolved => switch (try self.getAssociatedBuildFile(document_store)) { + .none => return .none, + .unresolved => return .unresolved, + .resolved => |resolved| .{ resolved.build_file, resolved.root_source_file }, + }, + }; + + const build_config = build_file.tryLockConfig() orelse return .none; + defer build_file.unlockConfig(); + + const allocator = document_store.allocator; + const modules = &build_config.modules.map; + + var visted: std.DynamicBitSetUnmanaged = try .initEmpty(allocator, modules.count()); + defer visted.deinit(allocator); + + var queue: std.ArrayList(usize) = try .initCapacity(allocator, 1); + defer queue.deinit(allocator); + + const target_index = modules.getIndex(target_root_source_file).?; + + // We only care about the root source file of each root module so we convert them to a set. + var root_modules: std.StringArrayHashMapUnmanaged(void) = .empty; + defer root_modules.deinit(allocator); + + try root_modules.ensureTotalCapacity(allocator, build_config.compilations.len); + for (build_config.compilations) |compile| { + root_modules.putAssumeCapacity(compile.root_module, {}); + } + + var results: std.ArrayList([]const u8) = .empty; + defer results.deinit(allocator); + + // Do a graph search from root modules until we reach `root_source_file` + for (root_modules.keys()) |root_module| { + visted.unsetAll(); + queue.clearRetainingCapacity(); + queue.appendAssumeCapacity(modules.getIndex(root_module).?); + + while (queue.pop()) |index| { + if (index == target_index) { + try results.append(allocator, root_module); + break; + } + + if (visted.isSet(index)) continue; + visted.set(index); + + const imported_modules = modules.values()[index].import_table.map.values(); + try queue.ensureUnusedCapacity(allocator, imported_modules.len); + for (imported_modules) |root_source_file| { + queue.appendAssumeCapacity(modules.getIndex(root_source_file) orelse continue); + } + } + } + + if (results.items.len == 0) { + self.impl.associated_compilation_units = .none; + } else { + self.impl.associated_compilation_units = .{ .resolved = try results.toOwnedSlice(allocator) }; + } + return self.impl.associated_compilation_units; + } + fn getLazy( self: *Handle, comptime T: type, @@ -1161,19 +1247,13 @@ fn loadBuildConfiguration(self: *DocumentStore, build_file_uri: Uri, build_file_ .ignore_unknown_fields = true, .allocate = .alloc_always, }; - const build_config = std.json.parseFromSlice( + + return std.json.parseFromSlice( BuildConfig, self.allocator, zig_run_result.stdout, parse_options, ) catch return error.InvalidBuildConfig; - errdefer build_config.deinit(); - - for (build_config.value.packages) |*pkg| { - pkg.path = try std.fs.path.resolve(build_config.arena.allocator(), &.{ build_file_path, "..", pkg.path }); - } - - return build_config; } /// Checks if the build.zig file is accessible in dir. @@ -1187,7 +1267,7 @@ fn buildDotZigExists(dir_path: []const u8) bool { /// Walk down the tree towards the uri. When we hit `build.zig` files /// add them to the list of potential build files. /// `build.zig` files higher in the filesystem have precedence. -/// See `Handle.getAssociatedBuildFileUri`. +/// See `Handle.getAssociatedBuildFile`. /// Caller owns returned memory. fn collectPotentialBuildFiles(self: *DocumentStore, uri: Uri) error{OutOfMemory}![]*BuildFile { if (isInStd(uri)) return &.{}; @@ -1279,12 +1359,12 @@ fn uriAssociatedWithBuild( const tracy_zone = tracy.trace(@src()); defer tracy_zone.end(); - var checked_uris: Uri.ArrayHashMap(void) = .empty; + var checked_uris: std.StringHashMapUnmanaged(void) = .empty; defer checked_uris.deinit(self.allocator); var package_uris: std.ArrayList(Uri) = .empty; defer { - for (package_uris.items) |package_uri| package_uri.deinit(self.allocator); + for (package_uris.items) |package_uri| self.allocator.free(package_uri); package_uris.deinit(self.allocator); } const success = try build_file.collectBuildConfigPackageUris(self.allocator, &package_uris); @@ -1350,7 +1430,11 @@ fn createAndStoreDocument( errdefer new_handle.deinit(); if (supports_build_system and isBuildFile(uri) and !isInStd(uri)) { - _ = self.getOrLoadBuildFile(uri); + if (self.getBuildFile(uri)) |build_file| { + self.invalidateBuildFile(build_file.uri); + } else { + _ = self.getOrLoadBuildFile(uri); + } } self.lock.lock(); @@ -1360,10 +1444,15 @@ fn createAndStoreDocument( errdefer if (!gop.found_existing) std.debug.assert(self.handles.swapRemove(uri)); if (gop.found_existing) { + std.debug.assert(new_handle.impl.associated_build_file == .init); + std.debug.assert(new_handle.impl.associated_compilation_units == .unresolved); if (lsp_synced) { new_handle.impl.associated_build_file = gop.value_ptr.*.impl.associated_build_file; gop.value_ptr.*.impl.associated_build_file = .init; + new_handle.impl.associated_compilation_units = gop.value_ptr.*.impl.associated_compilation_units; + gop.value_ptr.*.impl.associated_compilation_units = .unresolved; + new_handle.uri = gop.key_ptr.*; gop.value_ptr.*.deinit(); gop.value_ptr.*.* = new_handle; @@ -1460,9 +1549,20 @@ pub fn collectDependencies( } if (supports_build_system) no_build_file: { - const build_file_uri = try handle.getAssociatedBuildFileUri(store) orelse break :no_build_file; - const build_file = store.getBuildFile(build_file_uri) orelse break :no_build_file; - _ = try build_file.collectBuildConfigPackageUris(allocator, dependencies); + const build_file = switch (try handle.getAssociatedBuildFile(store)) { + .none, .unresolved => break :no_build_file, + .resolved => |resolved| resolved.build_file, + }; + + const build_config = build_file.tryLockConfig() orelse break :no_build_file; + defer build_file.unlockConfig(); + + const module_paths = build_config.modules.map.keys(); + + try dependencies.ensureUnusedCapacity(allocator, module_paths.len); + for (module_paths) |module_path| { + dependencies.appendAssumeCapacity(try .fromPath(allocator, module_path)); + } } } @@ -1477,6 +1577,9 @@ pub fn collectIncludeDirs( ) !bool { comptime std.debug.assert(supports_build_system); + const tracy_zone = tracy.trace(@src()); + defer tracy_zone.end(); + var arena_allocator: std.heap.ArenaAllocator = .init(allocator); defer arena_allocator.deinit(); @@ -1498,10 +1601,32 @@ pub fn collectIncludeDirs( include_dirs.appendAssumeCapacity(try allocator.dupe(u8, native_include_dir)); } - const collected_all = switch (try handle.getAssociatedBuildFileUri2(store)) { + const collected_all = switch (try handle.getAssociatedBuildFile(store)) { .none => true, .unresolved => false, - .resolved => |build_file| try build_file.collectBuildConfigIncludePaths(allocator, include_dirs), + .resolved => |resolved| collected_all: { + const build_config = resolved.build_file.tryLockConfig() orelse break :collected_all false; + defer resolved.build_file.unlockConfig(); + + const module = build_config.modules.map.get(resolved.root_source_file) orelse break :collected_all true; + + try include_dirs.ensureUnusedCapacity(allocator, module.include_dirs.len); + for (module.include_dirs) |include_path| { + const absolute_path = if (std.fs.path.isAbsolute(include_path)) + try allocator.dupe(u8, include_path) + else blk: { + const build_file_path = resolved.build_file.uri.toFsPath(allocator) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, + error.UnsupportedScheme => continue, + }; + const build_file_dirname = std.fs.path.dirname(build_file_path) orelse continue; + break :blk try std.fs.path.join(allocator, &.{ build_file_dirname, include_path }); + }; + + include_dirs.appendAssumeCapacity(absolute_path); + } + break :collected_all true; + }, }; return collected_all; @@ -1518,18 +1643,20 @@ pub fn collectCMacros( ) !bool { comptime std.debug.assert(supports_build_system); - const collected_all = switch (try handle.getAssociatedBuildFileUri2(store)) { + const collected_all = switch (try handle.getAssociatedBuildFile(store)) { .none => true, .unresolved => false, - .resolved => |build_file| blk: { - const build_config = build_file.tryLockConfig() orelse break :blk false; - defer build_file.unlockConfig(); + .resolved => |resolved| collected_all: { + const build_config = resolved.build_file.tryLockConfig() orelse break :collected_all false; + defer resolved.build_file.unlockConfig(); + + const module = build_config.modules.map.get(resolved.root_source_file) orelse break :collected_all true; - try c_macros.ensureUnusedCapacity(allocator, build_config.c_macros.len); - for (build_config.c_macros) |c_macro| { + try c_macros.ensureUnusedCapacity(allocator, module.c_macros.len); + for (module.c_macros) |c_macro| { c_macros.appendAssumeCapacity(try allocator.dupe(u8, c_macro)); } - break :blk true; + break :collected_all true; }, }; @@ -1700,66 +1827,111 @@ fn publishCimportDiagnostics(self: *DocumentStore, handle: *Handle) !void { try self.diagnostics_collection.publishDiagnostics(); } +pub const UriFromImportStringResult = union(enum) { + none, + one: Uri, + many: []const Uri, + + pub fn deinit(result: *UriFromImportStringResult, allocator: std.mem.Allocator) void { + switch (result.*) { + .none => {}, + .one => |uri| uri.deinit(allocator), + .many => |uris| { + for (uris) |uri| uri.deinit(allocator); + allocator.free(uris); + }, + } + } +}; + /// takes the string inside a @import() node (without the quotation marks) /// and returns it's uri /// caller owns the returned memory /// **Thread safe** takes a shared lock -pub fn uriFromImportStr(self: *DocumentStore, allocator: std.mem.Allocator, handle: *Handle, import_str: []const u8) error{OutOfMemory}!?Uri { +pub fn uriFromImportStr( + self: *DocumentStore, + allocator: std.mem.Allocator, + handle: *Handle, + import_str: []const u8, +) error{OutOfMemory}!UriFromImportStringResult { const tracy_zone = tracy.trace(@src()); defer tracy_zone.end(); + if (std.mem.endsWith(u8, import_str, ".zig") or std.mem.endsWith(u8, import_str, ".zon")) { + const base_path = handle.uri.toFsPath(allocator) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, + error.UnsupportedScheme => return .none, + }; + defer allocator.free(base_path); + const uri = try resolveFileImportString(allocator, base_path, import_str) orelse return .none; + return .{ .one = uri }; + } + if (std.mem.eql(u8, import_str, "std")) { - const zig_lib_dir = self.config.zig_lib_dir orelse return null; + const zig_lib_dir = self.config.zig_lib_dir orelse return .none; const std_path = try zig_lib_dir.join(allocator, &.{ "std", "std.zig" }); defer allocator.free(std_path); - return try .fromPath(allocator, std_path); - } else if (std.mem.eql(u8, import_str, "builtin")) { + return .{ .one = try .fromPath(allocator, std_path) }; + } + + if (std.mem.eql(u8, import_str, "builtin")) { if (supports_build_system) { - if (try handle.getAssociatedBuildFileUri(self)) |build_file_uri| { - const build_file = self.getBuildFile(build_file_uri).?; - if (build_file.builtin_uri) |builtin_uri| { - return try builtin_uri.dupe(allocator); - } + switch (try handle.getAssociatedBuildFile(self)) { + .none, .unresolved => {}, + .resolved => |resolved| { + if (resolved.build_file.builtin_uri) |builtin_uri| { + return .{ .one = try builtin_uri.dupe(allocator) }; + } + }, } } if (self.config.builtin_path) |builtin_path| { - return try .fromPath(allocator, builtin_path); + return .{ .one = try .fromPath(allocator, builtin_path) }; } - return null; - } else if (!std.mem.endsWith(u8, import_str, ".zig")) { - if (!supports_build_system) return null; + return .none; + } - if (isBuildFile(handle.uri)) blk: { - const build_file = self.getBuildFile(handle.uri) orelse break :blk; - const build_config = build_file.tryLockConfig() orelse break :blk; - defer build_file.unlockConfig(); + if (!supports_build_system) return .none; - for (build_config.deps_build_roots) |dep_build_root| { - if (std.mem.eql(u8, import_str, dep_build_root.name)) { - return try .fromPath(allocator, dep_build_root.path); - } - } - } else if (try handle.getAssociatedBuildFileUri(self)) |build_file_uri| blk: { - const build_file = self.getBuildFile(build_file_uri).?; - const build_config = build_file.tryLockConfig() orelse break :blk; - defer build_file.unlockConfig(); + if (std.mem.eql(u8, import_str, "root")) { + const root_source_files = switch (try handle.getAssociatedCompilationUnits(self)) { + .none, .unresolved => return .none, + .resolved => |root_source_files| root_source_files, + }; + var uris: std.ArrayList(Uri) = try .initCapacity(allocator, root_source_files.len); + defer { + for (uris.items) |uri| uri.deinit(allocator); + uris.deinit(allocator); + } + for (root_source_files) |root_source_file| { + uris.appendAssumeCapacity(try .fromPath(allocator, root_source_file)); + } + return .{ .many = try uris.toOwnedSlice(allocator) }; + } - for (build_config.packages) |pkg| { - if (std.mem.eql(u8, import_str, pkg.name)) { - return try .fromPath(allocator, pkg.path); - } - } + if (isBuildFile(handle.uri)) blk: { + const build_file = self.getBuildFile(handle.uri) orelse break :blk; + const build_config = build_file.tryLockConfig() orelse break :blk; + defer build_file.unlockConfig(); + + if (build_config.dependencies.map.get(import_str)) |path| { + return .{ .one = try .fromPath(allocator, path) }; } - return null; - } else { - const base_path = handle.uri.toFsPath(allocator) catch |err| switch (err) { - error.OutOfMemory => return error.OutOfMemory, - error.UnsupportedScheme => return null, - }; - defer allocator.free(base_path); - return try resolveFileImportString(allocator, base_path, import_str); + return .none; + } + + switch (try handle.getAssociatedBuildFile(self)) { + .none, .unresolved => return .none, + .resolved => |resolved| { + const build_config = resolved.build_file.tryLockConfig() orelse return .none; + defer resolved.build_file.unlockConfig(); + + const module = build_config.modules.map.get(resolved.root_source_file) orelse return .none; + const imported_root_source_file = module.import_table.map.get(import_str) orelse return .none; + return .{ .one = try .fromPath(allocator, imported_root_source_file) }; + }, } } diff --git a/src/analysis.zig b/src/analysis.zig index bac65e1d3..8045c4973 100644 --- a/src/analysis.zig +++ b/src/analysis.zig @@ -2284,23 +2284,16 @@ fn resolveTypeOfNodeUncached(analyser: *Analyser, options: ResolveOptions) error const import_param = params[0]; if (tree.nodeTag(import_param) != .string_literal) return null; - const import_str = tree.tokenSlice(tree.nodeMainToken(import_param)); - const import_uri = (try analyser.store.uriFromImportStr( - analyser.arena, - handle, - import_str[1 .. import_str.len - 1], - )) orelse (try analyser.store.uriFromImportStr( - analyser.arena, - analyser.root_handle orelse return null, - import_str[1 .. import_str.len - 1], - )) orelse return null; - - const new_handle = analyser.store.getOrLoadHandle(import_uri) orelse return null; + const string_literal = tree.tokenSlice(tree.nodeMainToken(import_param)); + const import_string = string_literal[1 .. string_literal.len - 1]; + if (std.mem.endsWith(u8, import_string, ".zon")) { + // TODO + return null; + } - return .{ - .data = .{ .container = .root(new_handle) }, - .is_type_val = true, - }; + if (try analyser.resolveImportString(handle, import_string)) |ty| return ty; + if (try analyser.resolveImportString(analyser.root_handle orelse return null, import_string)) |ty| return ty; + return null; }, .c_import => { if (!DocumentStore.supports_build_system) return null; @@ -4407,6 +4400,34 @@ pub const ScopeWithHandle = struct { } }; +pub fn resolveImportString(analyser: *Analyser, handle: *DocumentStore.Handle, import_string: []const u8) error{OutOfMemory}!?Type { + const result = try analyser.store.uriFromImportStr(analyser.arena, handle, import_string); + switch (result) { + .none => return null, + .one => |uri| { + const node_handle = analyser.store.getOrLoadHandle(uri) orelse return null; + return .{ + .data = .{ .container = .root(node_handle) }, + .is_type_val = true, + }; + }, + .many => |uris| { + var entries: std.ArrayList(Type.Data.EitherEntry) = try .initCapacity(analyser.arena, uris.len); + for (uris) |uri| { + const node_handle = analyser.store.getOrLoadHandle(uri) orelse continue; + entries.appendAssumeCapacity(.{ + .type_data = .{ .container = .root(node_handle) }, + .descriptor = "", + }); + } + return .{ + .data = .{ .either = entries.items }, + .is_type_val = true, + }; + }, + } +} + /// Look up `type_name` in 'zig_lib_dir/std/builtin.zig' and return it as an instance /// Useful for functionality related to builtin fns pub fn instanceStdBuiltinType(analyser: *Analyser, type_name: []const u8) error{OutOfMemory}!?Type { @@ -4681,12 +4702,7 @@ pub fn getFieldAccessType( .start = import_str_tok.loc.start + 1, .end = import_str_tok.loc.end - 1, }); - const import_uri = try analyser.store.uriFromImportStr(analyser.arena, handle, import_str) orelse return null; - const node_handle = analyser.store.getOrLoadHandle(import_uri) orelse return null; - current_type = .{ - .data = .{ .container = .root(node_handle) }, - .is_type_val = true, - }; + current_type = try analyser.resolveImportString(handle, import_str) orelse return null; _ = tokenizer.next(); // eat the .r_paren continue; // Outermost `while` } diff --git a/src/build_runner/build_runner.zig b/src/build_runner/build_runner.zig index 70f782c44..9c0750e3d 100644 --- a/src/build_runner/build_runner.zig +++ b/src/build_runner/build_runner.zig @@ -15,17 +15,17 @@ //! `zig build --build-runner /path/to/zls/src/build_runner/build_runner.zig` (if the cwd contains build.zig) //! -const root = @import("@build"); const std = @import("std"); const builtin = @import("builtin"); -const assert = std.debug.assert; const mem = std.mem; const process = std.process; -const ArrayListManaged = if (@hasDecl(std, "array_list")) std.array_list.Managed else std.ArrayList; -const ArrayList = if (@hasDecl(std, "array_list")) std.ArrayList else std.ArrayList; +const File = std.fs.File; const Step = std.Build.Step; const Allocator = std.mem.Allocator; +const fatal = std.process.fatal; +const runner = @This(); +pub const root = @import("@build"); pub const dependencies = @import("@dependencies"); pub const std_options: std.Options = .{ @@ -34,18 +34,17 @@ pub const std_options: std.Options = .{ .crypto_fork_safety = false, }; -///! This is a modified build runner to extract information out of build.zig -///! Modified version of lib/build_runner.zig pub fn main() !void { - // Here we use an ArenaAllocator backed by a DirectAllocator because a build is a short-lived, - // one shot program. We don't need to waste time freeing memory and finding places to squish - // bytes into. So we free everything all at once at the very end. - var single_threaded_arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); + // The build runner is often short-lived, but thanks to `--watch` and `--webui`, that's not + // always the case. So, we do need a true gpa for some things. + var debug_gpa_state: std.heap.DebugAllocator(.{}) = .init; + defer _ = debug_gpa_state.deinit(); + const gpa = debug_gpa_state.allocator(); + + // ...but we'll back our arena by `std.heap.page_allocator` for efficiency. + var single_threaded_arena: std.heap.ArenaAllocator = .init(std.heap.page_allocator); defer single_threaded_arena.deinit(); - - var thread_safe_arena: std.heap.ThreadSafeAllocator = .{ - .child_allocator = single_threaded_arena.allocator(), - }; + var thread_safe_arena: std.heap.ThreadSafeAllocator = .{ .child_allocator = single_threaded_arena.allocator() }; const arena = thread_safe_arena.allocator(); const args = try process.argsAlloc(arena); @@ -118,28 +117,26 @@ pub fn main() !void { dependencies.root_deps, ); - var targets = ArrayListManaged([]const u8).init(arena); - var debug_log_scopes = ArrayListManaged([]const u8).init(arena); + var targets: std.ArrayList([]const u8) = .empty; var thread_pool_options: std.Thread.Pool.Options = .{ .allocator = arena }; var install_prefix: ?[]const u8 = null; var dir_list: std.Build.DirList = .{}; var max_rss: u64 = 0; var skip_oom_steps = false; - var seed: u32 = 0; var output_tmp_nonce: ?[16]u8 = null; - var debounce_interval_ms: u16 = 50; var watch = false; var check_step_only = false; + var debounce_interval_ms: u16 = 50; while (nextArg(args, &arg_idx)) |arg| { if (mem.startsWith(u8, arg, "-Z")) { - if (arg.len != 18) fatal("bad argument: '{s}'", .{arg}); + if (arg.len != 18) fatalWithHint("bad argument: '{s}'", .{arg}); output_tmp_nonce = arg[2..18].*; } else if (mem.startsWith(u8, arg, "-D")) { const option_contents = arg[2..]; if (option_contents.len == 0) - fatal("expected option name after '-D'", .{}); + fatalWithHint("expected option name after '-D'", .{}); if (mem.indexOfScalar(u8, option_contents, '=')) |name_end| { const option_name = option_contents[0..name_end]; const option_value = option_contents[name_end + 1 ..]; @@ -169,7 +166,7 @@ pub fn main() !void { } else if (mem.startsWith(u8, arg, "--release=")) { const text = arg["--release=".len..]; builder.release_mode = std.meta.stringToEnum(std.Build.ReleaseMode, text) orelse { - fatal("expected [off|any|fast|safe|small] in '{s}', found '{s}'", .{ + fatalWithHint("expected [off|any|fast|safe|small] in '{s}', found '{s}'", .{ arg, text, }); }; @@ -198,46 +195,59 @@ pub fn main() !void { builder.libc_file = nextArgOrFatal(args, &arg_idx); } else if (mem.eql(u8, arg, "--color")) { const next_arg = nextArg(args, &arg_idx) orelse - fatal("expected [auto|on|off] after '{s}'", .{arg}); + fatalWithHint("expected [auto|on|off] after '{s}'", .{arg}); _ = next_arg; } else if (mem.eql(u8, arg, "--summary")) { const next_arg = nextArg(args, &arg_idx) orelse - fatal("expected [all|new|failures|none] after '{s}'", .{arg}); + fatalWithHint("expected [all|new|failures|none] after '{s}'", .{arg}); _ = next_arg; } else if (mem.eql(u8, arg, "--seed")) { const next_arg = nextArg(args, &arg_idx) orelse - fatal("expected u32 after '{s}'", .{arg}); - seed = std.fmt.parseUnsigned(u32, next_arg, 0) catch |err| { + fatalWithHint("expected u32 after '{s}'", .{arg}); + graph.random_seed = std.fmt.parseUnsigned(u32, next_arg, 0) catch |err| { fatal("unable to parse seed '{s}' as unsigned 32-bit integer: {s}\n", .{ next_arg, @errorName(err), }); }; + } else if (mem.eql(u8, arg, "--build-id")) { + builder.build_id = .fast; + } else if (mem.startsWith(u8, arg, "--build-id=")) { + const style = arg["--build-id=".len..]; + builder.build_id = std.zig.BuildId.parse(style) catch |err| { + fatal("unable to parse --build-id style '{s}': {s}", .{ + style, @errorName(err), + }); + }; } else if (mem.eql(u8, arg, "--debounce")) { const next_arg = nextArg(args, &arg_idx) orelse - fatal("expected u16 after '{s}'", .{arg}); + fatalWithHint("expected u16 after '{s}'", .{arg}); debounce_interval_ms = std.fmt.parseUnsigned(u16, next_arg, 0) catch |err| { fatal("unable to parse debounce interval '{s}' as unsigned 16-bit integer: {s}\n", .{ next_arg, @errorName(err), }); }; + } else if (mem.eql(u8, arg, "--webui")) { + fatal("argument '{s}' is not available", .{arg}); + } else if (mem.startsWith(u8, arg, "--webui=")) { + fatal("argument '{s}' is not available", .{arg}); } else if (mem.eql(u8, arg, "--debug-log")) { - const next_arg = nextArgOrFatal(args, &arg_idx); - try debug_log_scopes.append(next_arg); + fatal("argument '{s}' is not available", .{arg}); } else if (mem.eql(u8, arg, "--debug-pkg-config")) { builder.debug_pkg_config = true; + } else if (mem.eql(u8, arg, "--debug-rt")) { + graph.debug_compiler_runtime_libs = true; } else if (mem.eql(u8, arg, "--debug-compile-errors")) { builder.debug_compile_errors = true; + } else if (mem.eql(u8, arg, "--debug-incremental")) { + builder.debug_incremental = true; } else if (mem.eql(u8, arg, "--system")) { // The usage text shows another argument after this parameter // but it is handled by the parent process. The build runner // only sees this flag. graph.system_package_mode = true; } else if (mem.eql(u8, arg, "--libc-runtimes") or mem.eql(u8, arg, "--glibc-runtimes")) { - if (@hasField(std.Build, "glibc_runtimes_dir")) { - builder.glibc_runtimes_dir = nextArgOrFatal(args, &arg_idx); - } else { - builder.libc_runtimes_dir = nextArgOrFatal(args, &arg_idx); - } + // --glibc-runtimes was the old name of the flag; kept for compatibility for now. + builder.libc_runtimes_dir = nextArgOrFatal(args, &arg_idx); } else if (mem.eql(u8, arg, "--verbose-link")) { builder.verbose_link = true; } else if (mem.eql(u8, arg, "--verbose-air")) { @@ -246,7 +256,7 @@ pub fn main() !void { builder.verbose_llvm_ir = "-"; } else if (mem.startsWith(u8, arg, "--verbose-llvm-ir=")) { builder.verbose_llvm_ir = arg["--verbose-llvm-ir=".len..]; - } else if (mem.eql(u8, arg, "--verbose-llvm-bc=")) { + } else if (mem.startsWith(u8, arg, "--verbose-llvm-bc=")) { builder.verbose_llvm_bc = arg["--verbose-llvm-bc=".len..]; } else if (mem.eql(u8, arg, "--verbose-cimport")) { builder.verbose_cimport = true; @@ -260,6 +270,10 @@ pub fn main() !void { watch = true; } else if (mem.eql(u8, arg, "--check-only")) { // ZLS only check_step_only = true; + } else if (mem.eql(u8, arg, "--time-report")) { + fatal("argument '{s}' is not available", .{arg}); + } else if (mem.eql(u8, arg, "--fuzz")) { + fatal("argument '{s}' is not available", .{arg}); } else if (mem.eql(u8, arg, "-fincremental")) { graph.incremental = true; } else if (mem.eql(u8, arg, "-fno-incremental")) { @@ -284,6 +298,10 @@ pub fn main() !void { builder.enable_darling = true; } else if (mem.eql(u8, arg, "-fno-darling")) { builder.enable_darling = false; + } else if (mem.eql(u8, arg, "-fallow-so-scripts")) { + graph.allow_so_scripts = true; + } else if (mem.eql(u8, arg, "-fno-allow-so-scripts")) { + graph.allow_so_scripts = false; } else if (mem.eql(u8, arg, "-freference-trace")) { builder.reference_trace = 256; } else if (mem.startsWith(u8, arg, "-freference-trace=")) { @@ -311,10 +329,10 @@ pub fn main() !void { builder.args = argsRest(args, arg_idx); break; } else { - fatal("unrecognized argument: '{s}'", .{arg}); + fatalWithHint("unrecognized argument: '{s}'", .{arg}); } } else { - try targets.append(arg); + try targets.append(arena, arg); } } @@ -323,7 +341,6 @@ pub fn main() !void { }); defer main_progress_node.end(); - builder.debug_log_scopes = debug_log_scopes.items; builder.resolveInstallPrefix(install_prefix, dir_list); { var prog_node = main_progress_node.start("Configure", 0); @@ -333,7 +350,7 @@ pub fn main() !void { } if (graph.needed_lazy_dependencies.entries.len != 0) { - var buffer: ArrayList(u8) = .{}; + var buffer: std.ArrayList(u8) = .{}; for (graph.needed_lazy_dependencies.keys()) |k| { try buffer.appendSlice(arena, k); try buffer.append(arena, '\n'); @@ -361,16 +378,16 @@ pub fn main() !void { validateSystemLibraryOptions(builder); var run: Run = .{ + .gpa = gpa, .max_rss = max_rss, .max_rss_is_default = false, .max_rss_mutex = .{}, .skip_oom_steps = skip_oom_steps, - .memory_blocked_steps = .init(arena), + .watch = watch, + .memory_blocked_steps = .empty, .thread_pool = undefined, // set below - .claimed_rss = 0, - .watch = watch, .cycle = 0, }; @@ -384,14 +401,12 @@ pub fn main() !void { if (!watch) { try extractBuildInformation( - arena, builder, arena, main_progress_node, &run, - seed, ); - return; + std.process.exit(0); } var w = try Watch.init(); @@ -400,7 +415,7 @@ pub fn main() !void { fn do(ww: *Watch) void { while (true) { var buffer: [1]u8 = undefined; - const amt = std.fs.File.stdin().read(&buffer) catch process.exit(1); + const amt = File.stdin().read(&buffer) catch process.exit(1); if (amt == 0) process.exit(0); switch (buffer[0]) { '\x00' => ww.trigger(), @@ -411,30 +426,28 @@ pub fn main() !void { }.do, .{&w}); message_thread.detach(); - const gpa = arena; - - var step_stack = try stepNamesToStepStack(gpa, builder, targets.items, check_step_only); + var step_stack = try resolveStepNames(gpa, builder, targets.items, check_step_only); + defer step_stack.deinit(gpa); if (step_stack.count() == 0) { // This means that `enable_build_on_save == null` and the project contains no "check" step. return; } - prepare(gpa, builder, &step_stack, &run, seed) catch |err| switch (err) { - error.UncleanExit => process.exit(1), - else => return err, + const starting_steps = try gpa.dupe(*Step, step_stack.keys()); + defer gpa.free(starting_steps); + + prepare(builder, &step_stack, &run) catch |err| switch (err) { + error.DependencyLoopDetected => process.exit(1), + else => |e| return e, }; rebuild: while (true) : (run.cycle += 1) { - runSteps( - gpa, + try runSteps( builder, &step_stack, main_progress_node, &run, - ) catch |err| switch (err) { - error.UncleanExit => process.exit(1), - else => return err, - }; + ); try w.update(gpa, step_stack.keys()); @@ -522,69 +535,68 @@ const Watch = struct { }; const Run = struct { + gpa: Allocator, max_rss: u64, max_rss_is_default: bool, max_rss_mutex: std.Thread.Mutex, skip_oom_steps: bool, - memory_blocked_steps: ArrayListManaged(*Step), + watch: bool, + /// Allocated into `gpa`. + memory_blocked_steps: std.ArrayList(*Step), thread_pool: std.Thread.Pool, - claimed_rss: usize, - watch: bool, cycle: u32, }; -fn stepNamesToStepStack( +fn resolveStepNames( gpa: Allocator, b: *std.Build, step_names: []const []const u8, check_step_only: bool, ) !std.AutoArrayHashMapUnmanaged(*Step, void) { - var step_stack: std.AutoArrayHashMapUnmanaged(*Step, void) = .{}; - errdefer step_stack.deinit(gpa); + var starting_steps: std.AutoArrayHashMapUnmanaged(*Step, void) = .{}; + errdefer starting_steps.deinit(gpa); if (step_names.len == 0) { if (b.top_level_steps.get("check")) |tls| { - try step_stack.put(gpa, &tls.step, {}); + try starting_steps.put(gpa, &tls.step, {}); } else if (!check_step_only) { - try step_stack.put(gpa, b.default_step, {}); + try starting_steps.put(gpa, b.default_step, {}); } } else { - try step_stack.ensureUnusedCapacity(gpa, step_names.len); + try starting_steps.ensureUnusedCapacity(gpa, step_names.len); for (0..step_names.len) |i| { const step_name = step_names[step_names.len - i - 1]; const s = b.top_level_steps.get(step_name) orelse { std.debug.print("no step named '{s}'\n access the help menu with 'zig build -h'\n", .{step_name}); process.exit(1); }; - step_stack.putAssumeCapacity(&s.step, {}); + starting_steps.putAssumeCapacity(&s.step, {}); } } - return step_stack; + return starting_steps; } fn prepare( - gpa: Allocator, b: *std.Build, - step_stack: *std.AutoArrayHashMapUnmanaged(*Step, void), + unpopulated_step_stack: *std.AutoArrayHashMapUnmanaged(*Step, void), run: *Run, - seed: u32, -) error{ OutOfMemory, UncleanExit }!void { - const starting_steps = try gpa.dupe(*Step, step_stack.keys()); +) error{ OutOfMemory, DependencyLoopDetected }!void { + const gpa = run.gpa; + + const starting_steps = try gpa.dupe(*Step, unpopulated_step_stack.keys()); defer gpa.free(starting_steps); - var rng = std.Random.DefaultPrng.init(seed); + var rng = std.Random.DefaultPrng.init(b.graph.random_seed); const rand = rng.random(); rand.shuffle(*Step, starting_steps); for (starting_steps) |s| { - constructGraphAndCheckForDependencyLoop(b, s, step_stack, rand) catch |err| switch (err) { - error.DependencyLoopDetected => return uncleanExit(), - else => |e| return e, - }; + try constructGraphAndCheckForDependencyLoop(gpa, b, s, unpopulated_step_stack, rand); } + const step_stack = unpopulated_step_stack; { // Check that we have enough memory to complete the build. @@ -606,22 +618,19 @@ fn prepare( if (run.max_rss_is_default) { std.debug.print("note: use --maxrss to override the default", .{}); } - return uncleanExit(); } } } fn runSteps( - gpa: std.mem.Allocator, b: *std.Build, steps_stack: *const std.AutoArrayHashMapUnmanaged(*Step, void), parent_prog_node: std.Progress.Node, run: *Run, -) error{ OutOfMemory, UncleanExit }!void { +) error{}!void { const thread_pool = &run.thread_pool; - const steps = steps_stack.keys(); - var step_prog = parent_prog_node.start("steps", steps.len); + var step_prog = parent_prog_node.start("steps", steps_stack.count()); defer step_prog.end(); var wait_group: std.Thread.WaitGroup = .{}; @@ -630,23 +639,12 @@ fn runSteps( // Here we spawn the initial set of tasks with a nice heuristic - // dependency order. Each worker when it finishes a step will then // check whether it should run any dependants. - for (steps) |step| { + for (steps_stack.keys()) |step| { if (step.state == .skipped_oom) continue; - wait_group.start(); - thread_pool.spawn(workerMakeOneStep, .{ - &wait_group, gpa, b, steps_stack, step, step_prog, run, - }) catch @panic("OOM"); - } - - if (run.watch) { - for (steps) |step| { - const step_id: u32 = @intCast(steps_stack.getIndex(step).?); - // missing fields: - // - result_error_msgs - // - result_stderr - serveWatchErrorBundle(step_id, run.cycle, step.result_error_bundle) catch @panic("failed to send watch errors"); - } + thread_pool.spawnWg(&wait_group, workerMakeOneStep, .{ + &wait_group, b, steps_stack, step, step_prog, run, + }); } } @@ -662,27 +660,34 @@ fn runSteps( /// when it finishes executing in `workerMakeOneStep`, it spawns next steps /// to run in random order fn constructGraphAndCheckForDependencyLoop( + gpa: Allocator, b: *std.Build, s: *Step, step_stack: *std.AutoArrayHashMapUnmanaged(*Step, void), rand: std.Random, -) error{ OutOfMemory, DependencyLoopDetected }!void { +) !void { switch (s.state) { - .precheck_started => return error.DependencyLoopDetected, + .precheck_started => { + return error.DependencyLoopDetected; + }, .precheck_unstarted => { s.state = .precheck_started; - try step_stack.ensureUnusedCapacity(b.allocator, s.dependencies.items.len); + try step_stack.ensureUnusedCapacity(gpa, s.dependencies.items.len); // We dupe to avoid shuffling the steps in the summary, it depends // on s.dependencies' order. - const deps = b.allocator.dupe(*Step, s.dependencies.items) catch @panic("OOM"); + const deps = gpa.dupe(*Step, s.dependencies.items) catch @panic("OOM"); + defer gpa.free(deps); + rand.shuffle(*Step, deps); for (deps) |dep| { - try step_stack.put(b.allocator, dep, {}); + try step_stack.put(gpa, dep, {}); try dep.dependants.append(b.allocator, s); - try constructGraphAndCheckForDependencyLoop(b, dep, step_stack, rand); + constructGraphAndCheckForDependencyLoop(gpa, b, dep, step_stack, rand) catch |err| { + return err; + }; } s.state = .precheck_done; @@ -690,26 +695,23 @@ fn constructGraphAndCheckForDependencyLoop( .precheck_done => {}, // These don't happen until we actually run the step graph. - .dependency_failure, - .running, - .success, - .failure, - .skipped, - .skipped_oom, - => {}, + .dependency_failure => unreachable, + .running => unreachable, + .success => unreachable, + .failure => unreachable, + .skipped => unreachable, + .skipped_oom => unreachable, } } fn workerMakeOneStep( wg: *std.Thread.WaitGroup, - gpa: std.mem.Allocator, b: *std.Build, steps_stack: *const std.AutoArrayHashMapUnmanaged(*Step, void), s: *Step, prog_node: std.Progress.Node, run: *Run, ) void { - defer wg.finish(); const thread_pool = &run.thread_pool; // First, check the conditions for running this step. If they are not met, @@ -745,7 +747,7 @@ fn workerMakeOneStep( if (new_claimed_rss > run.max_rss) { // Running this step right now could possibly exceed the allotted RSS. // Add this step to the queue of memory-blocked steps. - run.memory_blocked_steps.append(s) catch @panic("OOM"); + run.memory_blocked_steps.append(run.gpa, s) catch @panic("OOM"); return; } @@ -759,17 +761,17 @@ fn workerMakeOneStep( } } - var sub_prog_node = prog_node.start(s.name, 0); + const sub_prog_node = prog_node.start(s.name, 0); defer sub_prog_node.end(); - const make_result = s.make(structInitIgnoreUnknown(std.Build.Step.MakeOptions, .{ + const make_result = s.make(structInitIgnoreUnknown(Step.MakeOptions, .{ .progress_node = sub_prog_node, .thread_pool = thread_pool, - .watch = true, - .gpa = gpa, + .watch = run.watch, .web_server = null, .ttyconf = .no_color, .unit_test_timeout_ns = null, + .gpa = run.gpa, })); if (run.watch) { @@ -786,17 +788,19 @@ fn workerMakeOneStep( } else |err| switch (err) { error.MakeFailed => { @atomicStore(Step.State, &s.state, .failure, .seq_cst); + std.Progress.setStatus(.failure_working); break :handle_result; }, - error.MakeSkipped => @atomicStore(Step.State, &s.state, .skipped, .seq_cst), + error.MakeSkipped => { + @atomicStore(Step.State, &s.state, .skipped, .seq_cst); + }, } // Successful completion of a step, so we queue up its dependants as well. for (s.dependants.items) |dep| { - wg.start(); - thread_pool.spawn(workerMakeOneStep, .{ - wg, gpa, b, steps_stack, dep, prog_node, run, - }) catch @panic("OOM"); + thread_pool.spawnWg(wg, workerMakeOneStep, .{ + wg, b, steps_stack, dep, prog_node, run, + }); } } @@ -815,14 +819,13 @@ fn workerMakeOneStep( var j: usize = 0; while (j < run.memory_blocked_steps.items.len) : (j += 1) { const dep = run.memory_blocked_steps.items[j]; - assert(dep.max_rss != 0); + std.debug.assert(dep.max_rss != 0); if (dep.max_rss <= remaining) { remaining -= dep.max_rss; - wg.start(); - thread_pool.spawn(workerMakeOneStep, .{ - wg, gpa, b, steps_stack, dep, prog_node, run, - }) catch @panic("OOM"); + thread_pool.spawnWg(wg, workerMakeOneStep, .{ + wg, b, steps_stack, dep, prog_node, run, + }); } else { run.memory_blocked_steps.items[i] = dep; i += 1; @@ -866,8 +869,8 @@ fn uncleanExit() error{UncleanExit} { process.exit(1); } -fn fatal(comptime f: []const u8, args: anytype) noreturn { - std.debug.print(f ++ "\n", args); +fn fatalWithHint(comptime f: []const u8, args: anytype) noreturn { + std.debug.print(f ++ "\n access the help menu with 'zig build -h'\n", args); process.exit(1); } @@ -945,7 +948,7 @@ fn createModuleDependenciesForStep(step: *Step) Allocator.Error!void { step.dependOn(&other.step); }, - .config_header_step => |config_header| step.dependOn(&config_header.step), + .config_header_step => |other| step.dependOn(&other.step), }; for (mod.lib_paths.items) |lp| lp.addStepDependencies(step); for (mod.rpaths.items) |rpath| switch (rpath) { @@ -978,79 +981,12 @@ const shared = @import("shared.zig"); const Transport = shared.Transport; const BuildConfig = shared.BuildConfig; -const Packages = struct { - allocator: std.mem.Allocator, - - /// Outer key is the package name, inner key is the file path. - packages: std.StringArrayHashMapUnmanaged(std.StringArrayHashMapUnmanaged(void)) = .{}, - - /// Returns true if the package was already present. - pub fn addPackage(self: *Packages, name: []const u8, path: []const u8) !bool { - const name_gop_result = try self.packages.getOrPutValue(self.allocator, name, .{}); - const path_gop_result = try name_gop_result.value_ptr.getOrPut(self.allocator, path); - return path_gop_result.found_existing; - } - - pub fn toPackageList(self: *Packages) ![]BuildConfig.Package { - var result: ArrayList(BuildConfig.Package) = .{}; - errdefer result.deinit(self.allocator); - - const Context = struct { - keys: [][]const u8, - - pub fn lessThan(ctx: @This(), a_index: usize, b_index: usize) bool { - return std.mem.lessThan(u8, ctx.keys[a_index], ctx.keys[b_index]); - } - }; - - self.packages.sort(Context{ .keys = self.packages.keys() }); - - for (self.packages.keys(), self.packages.values()) |name, path_hashmap| { - for (path_hashmap.keys()) |path| { - try result.append(self.allocator, .{ .name = name, .path = path }); - } - } - - return try result.toOwnedSlice(self.allocator); - } - - pub fn deinit(self: *Packages) void { - for (self.packages.values()) |*path_hashmap| { - path_hashmap.deinit(self.allocator); - } - self.packages.deinit(self.allocator); - } -}; - fn extractBuildInformation( - gpa: Allocator, b: *std.Build, arena: Allocator, main_progress_node: std.Progress.Node, run: *Run, - seed: u32, ) !void { - var steps = std.AutoArrayHashMapUnmanaged(*Step, void){}; - defer steps.deinit(gpa); - - // collect the set of all steps - { - var stack: ArrayList(*Step) = .{}; - defer stack.deinit(gpa); - - try stack.ensureUnusedCapacity(gpa, b.top_level_steps.count()); - for (b.top_level_steps.values()) |tls| { - stack.appendAssumeCapacity(&tls.step); - } - - while (stack.pop()) |step| { - const gop = try steps.getOrPut(gpa, step); - if (gop.found_existing) continue; - - try stack.appendSlice(gpa, step.dependencies.items); - } - } - const helper = struct { fn addLazyPathStepDependencies(allocator: Allocator, set: *std.AutoArrayHashMapUnmanaged(*Step, void), lazy_path: std.Build.LazyPath) !void { switch (lazy_path) { @@ -1080,38 +1016,30 @@ fn extractBuildInformation( .config_header_step => |config_header| try set.put(allocator, &config_header.step, {}), } } - + /// Only adds the necessary dependencies to resolve the `root_source_file` and `include_dirs`. Does not include dependencies of imported modules. fn addModuleDependencies(allocator: Allocator, set: *std.AutoArrayHashMapUnmanaged(*Step, void), module: *std.Build.Module) !void { if (module.root_source_file) |root_source_file| { try addLazyPathStepDependencies(allocator, set, root_source_file); } - for (module.import_table.values()) |import| { - if (import.root_source_file) |root_source_file| { - try addLazyPathStepDependencies(allocator, set, root_source_file); - } - } - for (module.include_dirs.items) |include_dir| { try addIncludeDirStepDependencies(allocator, set, include_dir); } } - - fn processItem( + fn processModule( allocator: Allocator, + modules: *std.StringArrayHashMapUnmanaged(shared.BuildConfig.Module), module: *std.Build.Module, - compile: ?*std.Build.Step.Compile, - name: []const u8, - packages: *Packages, - include_dirs: *std.StringArrayHashMapUnmanaged(void), - c_macros: *std.StringArrayHashMapUnmanaged(void), + compile: ?*Step.Compile, + cwd: []const u8, ) !void { - if (module.root_source_file) |root_source_file| { - _ = try packages.addPackage(name, root_source_file.getPath(module.owner)); - } + const root_source_file = module.root_source_file orelse return; + + var include_dirs: std.StringArrayHashMapUnmanaged(void) = .empty; + var c_macros: std.StringArrayHashMapUnmanaged(void) = .empty; if (compile) |exe| { - try processPkgConfig(allocator, include_dirs, c_macros, exe); + try processPkgConfig(allocator, &include_dirs, &c_macros, exe); } try c_macros.ensureUnusedCapacity(allocator, module.c_macros.items.len); @@ -1156,83 +1084,141 @@ fn extractBuildInformation( }, } } + + const root_source_file_path = try std.fs.path.resolve(allocator, &.{ cwd, root_source_file.getPath2(module.owner, null) }); + + // All modules with the same root source file are merged. This limitation may be lifted in the future. + const gop = try modules.getOrPutValue(allocator, root_source_file_path, .{ + .import_table = .{}, + .c_macros = &.{}, + .include_dirs = &.{}, + }); + + for (module.import_table.keys(), module.import_table.values()) |name, import| { + const gop_import = try gop.value_ptr.import_table.map.getOrPut(allocator, name); + // This does not account for the possibility of collisions (i.e. modules with same root source file import different modules under the same name). + if (!gop_import.found_existing) { + gop_import.value_ptr.* = try std.fs.path.resolve(allocator, &.{ cwd, import.root_source_file.?.getPath2(import.owner, null) }); + } + } + gop.value_ptr.c_macros = try std.mem.concat(allocator, []const u8, &.{ gop.value_ptr.c_macros, c_macros.keys() }); + gop.value_ptr.include_dirs = try std.mem.concat(allocator, []const u8, &.{ gop.value_ptr.include_dirs, include_dirs.keys() }); } }; + const gpa = run.gpa; - var step_dependencies: std.AutoArrayHashMapUnmanaged(*Step, void) = .{}; - defer step_dependencies.deinit(gpa); + // The value tracks whether the step is a decendant of the "install" step. + var all_steps: std.AutoArrayHashMapUnmanaged(*Step, bool) = .empty; + defer all_steps.deinit(gpa); - // collect step dependencies + // collect all steps that are decendants of the "install" step. { - var modules: std.AutoArrayHashMapUnmanaged(*std.Build.Module, void) = .{}; - defer modules.deinit(gpa); + try all_steps.putNoClobber(gpa, b.getInstallStep(), true); - // collect root modules of `Step.Compile` - for (steps.keys()) |step| { - const compile = step.cast(Step.Compile) orelse continue; - const graph = compile.root_module.getGraph(); - try modules.ensureUnusedCapacity(gpa, graph.modules.len); - for (graph.modules) |module| modules.putAssumeCapacity(module, {}); + var i: usize = 0; + while (i < all_steps.count()) : (i += 1) { + const step = all_steps.keys()[i]; + + try all_steps.ensureUnusedCapacity(gpa, step.dependencies.items.len); + for (step.dependencies.items) |other_step| { + all_steps.putAssumeCapacity(other_step, true); + } + } + } + + // collect all other steps + { + var i: usize = all_steps.count(); + + try all_steps.ensureUnusedCapacity(gpa, b.top_level_steps.count()); + for (b.top_level_steps.values()) |tls| { + all_steps.putAssumeCapacity(&tls.step, true); + } + + while (i < all_steps.count()) : (i += 1) { + const step = all_steps.keys()[i]; + + try all_steps.ensureUnusedCapacity(gpa, step.dependencies.items.len); + for (step.dependencies.items) |other_step| { + all_steps.putAssumeCapacity(other_step, false); + } } + } + + // Collect all steps that need to be run so that we can resolve the lazy paths we are interested in (e.g. root_source_file). + { + var needed_steps: std.AutoArrayHashMapUnmanaged(*Step, void) = .empty; + defer needed_steps.deinit(gpa); + + var modules: std.AutoArrayHashMapUnmanaged(*std.Build.Module, void) = .empty; + defer modules.deinit(gpa); - // collect public modules + try modules.ensureUnusedCapacity(gpa, b.modules.count()); for (b.modules.values()) |root_module| { - const graph = root_module.getGraph(); + modules.putAssumeCapacity(root_module, {}); + } + + // collect all modules of `Step.Compile` + for (all_steps.keys()) |step| { + const compile = step.cast(Step.Compile) orelse continue; + const graph = compile.root_module.getGraph(); try modules.ensureUnusedCapacity(gpa, graph.modules.len); for (graph.modules) |module| modules.putAssumeCapacity(module, {}); } // collect all dependencies of all found modules for (modules.keys()) |module| { - try helper.addModuleDependencies(gpa, &step_dependencies, module); + try helper.addModuleDependencies(gpa, &needed_steps, module); } - } - - prepare(gpa, b, &step_dependencies, run, seed) catch |err| switch (err) { - error.UncleanExit => process.exit(1), - else => return err, - }; - // run all steps that are dependencies - try runSteps( - gpa, - b, - &step_dependencies, - main_progress_node, - run, - ); + prepare(b, &needed_steps, run) catch |err| switch (err) { + error.DependencyLoopDetected => process.exit(1), + else => |e| return e, + }; - var include_dirs: std.StringArrayHashMapUnmanaged(void) = .{}; - defer include_dirs.deinit(gpa); + try runSteps( + b, + &needed_steps, + main_progress_node, + run, + ); + } - var c_macros: std.StringArrayHashMapUnmanaged(void) = .{}; - defer c_macros.deinit(gpa); + // We collect modules in the following order: + // - public modules (`std.Build.addModule`) + // - modules that are reachable from the "install" step + // - all other reachable modules + var modules: std.StringArrayHashMapUnmanaged(BuildConfig.Module) = .empty; + const cwd = try std.process.getCwdAlloc(arena); + + for (b.modules.values()) |root_module| { + const graph = root_module.getGraph(); + for (graph.modules) |module| { + try helper.processModule(arena, &modules, module, null, cwd); + } + } - var packages: Packages = .{ .allocator = gpa }; - defer packages.deinit(); + // We loop twice through all steps so that decendants of the "install" step are processed first. + for ([_]bool{ true, false }) |want_install_step_decendant| { + for (all_steps.keys(), all_steps.values()) |step, is_install_step_decendant| { + if (is_install_step_decendant != want_install_step_decendant) continue; - // extract packages and include paths - { - for (steps.keys()) |step| { const compile = step.cast(Step.Compile) orelse continue; const graph = compile.root_module.getGraph(); - try helper.processItem(gpa, compile.root_module, compile, "root", &packages, &include_dirs, &c_macros); for (graph.modules) |module| { - for (module.import_table.keys(), module.import_table.values()) |name, import| { - try helper.processItem(gpa, import, null, name, &packages, &include_dirs, &c_macros); - } + try helper.processModule(arena, &modules, module, compile, cwd); } } + } - for (b.modules.values()) |root_module| { - const graph = root_module.getGraph(); - try helper.processItem(gpa, root_module, null, "root", &packages, &include_dirs, &c_macros); - for (graph.modules) |module| { - for (module.import_table.keys(), module.import_table.values()) |name, import| { - try helper.processItem(gpa, import, null, name, &packages, &include_dirs, &c_macros); - } - } - } + var compilations: std.ArrayList(BuildConfig.Compile) = .empty; + for (all_steps.keys()) |step| { + const compile = step.cast(Step.Compile) orelse continue; + const root_source_file = compile.root_module.root_source_file orelse continue; + const root_source_file_path = try std.fs.path.resolve(arena, &.{ cwd, root_source_file.getPath2(compile.root_module.owner, null) }); + try compilations.append(arena, .{ + .root_module = root_source_file_path, + }); } // Sample `@dependencies` structure: @@ -1249,50 +1235,49 @@ fn extractBuildInformation( // .{ "diffz", "122089a8247a693cad53beb161bde6c30f71376cd4298798d45b32740c3581405864" }, // }; - var deps_build_roots: ArrayList(BuildConfig.DepsBuildRoots) = .{}; + // Collect the dependencies from `build.zig.zon` + var root_dependencies: std.StringArrayHashMapUnmanaged([]const u8) = .empty; for (dependencies.root_deps) |root_dep| { inline for (comptime std.meta.declarations(dependencies.packages)) |package| blk: { if (std.mem.eql(u8, package.name, root_dep[1])) { const package_info = @field(dependencies.packages, package.name); if (!@hasDecl(package_info, "build_root")) break :blk; if (!@hasDecl(package_info, "build_zig")) break :blk; - try deps_build_roots.append(arena, .{ - .name = root_dep[0], - .path = try std.fs.path.join(arena, &.{ package_info.build_root, "build.zig" }), - }); + try root_dependencies.put( + arena, + root_dep[0], + try std.fs.path.join(arena, &.{ package_info.build_root, "build.zig" }), + ); } } } - var available_options: std.json.ArrayHashMap(BuildConfig.AvailableOption) = .{}; - try available_options.map.ensureTotalCapacity(arena, b.available_options_map.count()); + var available_options: std.StringArrayHashMapUnmanaged(BuildConfig.AvailableOption) = .empty; + try available_options.ensureTotalCapacity(arena, b.available_options_map.count()); var it = b.available_options_map.iterator(); while (it.next()) |available_option| { - available_options.map.putAssumeCapacityNoClobber(available_option.key_ptr.*, available_option.value_ptr.*); + available_options.putAssumeCapacityNoClobber(available_option.key_ptr.*, available_option.value_ptr.*); } - const stringifyValueAlloc = if (@hasDecl(std.json, "Stringify")) std.json.Stringify.valueAlloc else std.json.stringifyAlloc; - - const stringified_build_config = try stringifyValueAlloc( + const stringified_build_config = try std.json.Stringify.valueAlloc( gpa, BuildConfig{ - .deps_build_roots = deps_build_roots.items, - .packages = try packages.toPackageList(), - .include_dirs = include_dirs.keys(), + .dependencies = .{ .map = root_dependencies }, + .modules = .{ .map = modules }, + .compilations = compilations.items, .top_level_steps = b.top_level_steps.keys(), - .available_options = available_options, - .c_macros = c_macros.keys(), + .available_options = .{ .map = available_options }, }, .{ .whitespace = .indent_2 }, ); - var file_writer = std.fs.File.stdout().writer(&.{}); + var file_writer = File.stdout().writer(&.{}); file_writer.interface.writeAll(stringified_build_config) catch return file_writer.err.?; } fn processPkgConfig( - allocator: std.mem.Allocator, + allocator: Allocator, include_dirs: *std.StringArrayHashMapUnmanaged(void), c_macros: *std.StringArrayHashMapUnmanaged(void), exe: *Step.Compile, @@ -1398,7 +1383,7 @@ const copied_from_zig = struct { else => return err, }; - var zig_args = ArrayListManaged([]const u8).init(b.allocator); + var zig_args = std.array_list.Managed([]const u8).init(b.allocator); defer zig_args.deinit(); var it = mem.tokenizeAny(u8, stdout, " \r\n\t"); @@ -1433,7 +1418,7 @@ const copied_from_zig = struct { fn execPkgConfigList(self: *std.Build, out_code: *u8) (std.Build.PkgConfigError || std.Build.RunError)![]const std.Build.PkgConfigPkg { const stdout = try self.runAllowFail(&.{ "pkg-config", "--list-all" }, out_code, .Ignore); - var list = ArrayListManaged(std.Build.PkgConfigPkg).init(self.allocator); + var list = std.array_list.Managed(std.Build.PkgConfigPkg).init(self.allocator); errdefer list.deinit(); var line_it = mem.tokenizeAny(u8, stdout, "\r\n"); while (line_it.next()) |line| { @@ -1498,7 +1483,7 @@ fn serveWatchErrorBundle( std.mem.byteSwapAllElements(u32, @constCast(error_bundle.extra)); // trust me bro } - var file_writer = std.fs.File.stdout().writer(&.{}); + var file_writer = File.stdout().writer(&.{}); const writer = &file_writer.interface; var data = [_][]const u8{ diff --git a/src/build_runner/shared.zig b/src/build_runner/shared.zig index 745e54925..3ed7de68e 100644 --- a/src/build_runner/shared.zig +++ b/src/build_runner/shared.zig @@ -2,22 +2,33 @@ const std = @import("std"); const builtin = @import("builtin"); -const native_endian = builtin.target.cpu.arch.endian(); -const need_bswap = native_endian != .little; pub const BuildConfig = struct { - deps_build_roots: []DepsBuildRoots, - packages: []Package, - include_dirs: []const []const u8, + /// The `dependencies` in `build.zig.zon`. + dependencies: std.json.ArrayHashMap([]const u8), + /// The key is the `root_source_file`. + /// All modules with the same root source file are merged. This limitation may be lifted in the future. + modules: std.json.ArrayHashMap(Module), + /// List of all compilations units. + compilations: []const Compile, + /// The names of all top level steps. top_level_steps: []const []const u8, available_options: std.json.ArrayHashMap(AvailableOption), - c_macros: []const []const u8 = &.{}, - pub const DepsBuildRoots = Package; - pub const Package = struct { - name: []const u8, - path: []const u8, + pub const Module = struct { + import_table: std.json.ArrayHashMap([]const u8), + c_macros: []const []const u8, + include_dirs: []const []const u8, }; + + pub const Compile = struct { + /// Key in `BuildConfig.modules`. + root_module: []const u8, + + // may contain additional information in the future like `target` or `link_libc`. + }; + + /// Equivalent to `std.Build.AvailableOption` which is not accessible because it non-pub. pub const AvailableOption = @FieldType(@FieldType(std.Build, "available_options_map").KV, "value"); }; diff --git a/src/features/completions.zig b/src/features/completions.zig index 7f51b559b..ed8374563 100644 --- a/src/features/completions.zig +++ b/src/features/completions.zig @@ -570,10 +570,6 @@ fn completeFieldAccess(builder: *Builder, loc: offsets.Loc) error{OutOfMemory}!v fn kindToSortScore(kind: types.completion.Item.Kind) []const u8 { return switch (kind) { - .Module => "1", // used for packages - .Folder => "2", - .File => "3", - .Operator => "1", .Field, .EnumMember => "2", .Method => "3", @@ -588,6 +584,11 @@ fn kindToSortScore(kind: types.completion.Item.Kind) []const u8 { .Snippet => "6", .Keyword => "7", + // Used for `@import` completions. Must be set in `completeFileSystemStringLiteral`. + .Module => unreachable, + .Folder => unreachable, + .File => unreachable, + else => unreachable, }; } @@ -685,7 +686,6 @@ fn completeDot(builder: *Builder, loc: offsets.Loc) error{OutOfMemory}!void { /// - `.embedfile_string_literal` /// - `.string_literal` fn completeFileSystemStringLiteral(builder: *Builder, pos_context: Analyser.PositionContext) error{OutOfMemory}!void { - var completions: CompletionSet = .empty; const store = &builder.server.document_store; const source = builder.orig_handle.tree.source; @@ -711,6 +711,89 @@ fn completeFileSystemStringLiteral(builder: *Builder, pos_context: Analyser.Posi const completing = offsets.locToSlice(source, .{ .start = string_content_loc.start, .end = previous_separator_index orelse string_content_loc.start }); + const after_separator_index = if (previous_separator_index) |index| index + 1 else string_content_loc.start; + const insert_loc: offsets.Loc = .{ .start = after_separator_index, .end = builder.source_index }; + const replace_loc: offsets.Loc = .{ .start = after_separator_index, .end = next_separator_index orelse string_content_loc.end }; + + const insert_range = offsets.locToRange(source, insert_loc, builder.server.offset_encoding); + const replace_range = offsets.locToRange(source, replace_loc, builder.server.offset_encoding); + + if (pos_context == .import_string_literal) { + try builder.completions.ensureUnusedCapacity(builder.arena, 2); + if (store.config.zig_lib_dir) |zig_lib_dir| { + builder.completions.appendAssumeCapacity(.{ + .label = "std", + .kind = .Module, + .detail = zig_lib_dir.path, + .sortText = "1", + }); + } + if (store.config.builtin_path) |builtin_path| { + builder.completions.appendAssumeCapacity(.{ + .label = "builtin", + .kind = .Module, + .detail = builtin_path, + .sortText = "2", + }); + } + + if (!DocumentStore.supports_build_system) { + // no build system modules + } else if (DocumentStore.isBuildFile(builder.orig_handle.uri)) blk: { + const build_file = store.getBuildFile(builder.orig_handle.uri) orelse break :blk; + const build_config = build_file.tryLockConfig() orelse break :blk; + defer build_file.unlockConfig(); + + try builder.completions.ensureUnusedCapacity(builder.arena, build_config.dependencies.map.count()); + for (build_config.dependencies.map.keys(), build_config.dependencies.map.values()) |name, path| { + try builder.completions.append(builder.arena, .{ + .label = name, + .kind = .Module, + .detail = path, + .sortText = "4", + }); + } + } else switch (try builder.orig_handle.getAssociatedBuildFile(store)) { + .none, .unresolved => {}, + .resolved => |resolved| blk: { + const build_config = resolved.build_file.tryLockConfig() orelse break :blk; + defer resolved.build_file.unlockConfig(); + + const module = build_config.modules.map.get(resolved.root_source_file) orelse break :blk; + + try builder.completions.ensureUnusedCapacity(builder.arena, 1 + module.import_table.map.count()); + + builder.completions.appendAssumeCapacity(.{ + .label = "root", + .kind = .Module, + .detail = try builder.arena.dupe(u8, resolved.root_source_file), + .sortText = "3", + }); + + for (module.import_table.map.keys(), module.import_table.map.values()) |name, root_source_file| { + builder.completions.appendAssumeCapacity(.{ + .label = try builder.arena.dupe(u8, name), + .kind = .Module, + .detail = try builder.arena.dupe(u8, root_source_file), + .sortText = "4", + }); + } + }, + } + + const string_content_range = offsets.locToRange(source, string_content_loc, builder.server.offset_encoding); + + // completions on module replace the entire string literal + for (builder.completions.items) |*item| { + if (item.kind == .Module and item.textEdit == null) { + item.textEdit = if (builder.server.client_capabilities.supports_completion_insert_replace_support) + .{ .insert_replace_edit = .{ .newText = item.label, .insert = insert_range, .replace = string_content_range } } + else + .{ .text_edit = .{ .newText = item.label, .range = insert_range } }; + } + } + } + var search_paths: std.ArrayList([]const u8) = .empty; if (std.fs.path.isAbsolute(completing) and pos_context != .import_string_literal) { try search_paths.append(builder.arena, completing); @@ -728,13 +811,6 @@ fn completeFileSystemStringLiteral(builder: *Builder, pos_context: Analyser.Posi try search_paths.append(builder.arena, std.fs.path.dirname(document_path).?); } - const after_separator_index = if (previous_separator_index) |index| index + 1 else string_content_loc.start; - const insert_loc: offsets.Loc = .{ .start = after_separator_index, .end = builder.source_index }; - const replace_loc: offsets.Loc = .{ .start = after_separator_index, .end = next_separator_index orelse string_content_loc.end }; - - const insert_range = offsets.locToRange(source, insert_loc, builder.server.offset_encoding); - const replace_range = offsets.locToRange(source, replace_loc, builder.server.offset_encoding); - for (search_paths.items) |path| { if (!std.fs.path.isAbsolute(path)) continue; const dir_path = if (std.fs.path.isAbsolute(completing)) path else try std.fs.path.join(builder.arena, &.{ path, completing }); @@ -766,7 +842,7 @@ fn completeFileSystemStringLiteral(builder: *Builder, pos_context: Analyser.Posi else label; - _ = try completions.getOrPut(builder.arena, .{ + try builder.completions.append(builder.arena, .{ .label = label, .kind = if (entry.kind == .file) .File else .Folder, .detail = if (pos_context == .cinclude_string_literal) path else null, @@ -774,73 +850,10 @@ fn completeFileSystemStringLiteral(builder: *Builder, pos_context: Analyser.Posi .{ .insert_replace_edit = .{ .newText = insert_text, .insert = insert_range, .replace = replace_range } } else .{ .text_edit = .{ .newText = insert_text, .range = insert_range } }, + .sortText = if (entry.kind == .file) "6" else "5", }); } } - - if (completing.len == 0 and pos_context == .import_string_literal) { - no_modules: { - if (!DocumentStore.supports_build_system) break :no_modules; - - if (DocumentStore.isBuildFile(builder.orig_handle.uri)) { - const build_file = store.getBuildFile(builder.orig_handle.uri) orelse break :no_modules; - const build_config = build_file.tryLockConfig() orelse break :no_modules; - defer build_file.unlockConfig(); - - try completions.ensureUnusedCapacity(builder.arena, build_config.deps_build_roots.len); - for (build_config.deps_build_roots) |dbr| { - completions.putAssumeCapacity(.{ - .label = dbr.name, - .kind = .Module, - .detail = dbr.path, - }, {}); - } - } else if (try builder.orig_handle.getAssociatedBuildFileUri(store)) |uri| { - const build_file = store.getBuildFile(uri).?; - const build_config = build_file.tryLockConfig() orelse break :no_modules; - defer build_file.unlockConfig(); - - try completions.ensureUnusedCapacity(builder.arena, build_config.packages.len); - for (build_config.packages) |pkg| { - completions.putAssumeCapacity(.{ - .label = pkg.name, - .kind = .Module, - .detail = pkg.path, - }, {}); - } - } - } - - try completions.ensureUnusedCapacity(builder.arena, 2); - if (store.config.zig_lib_dir) |zig_lib_dir| { - completions.putAssumeCapacity(.{ - .label = "std", - .kind = .Module, - .detail = zig_lib_dir.path, - }, {}); - } - if (store.config.builtin_path) |builtin_path| { - completions.putAssumeCapacity(.{ - .label = "builtin", - .kind = .Module, - .detail = builtin_path, - }, {}); - } - - const string_content_range = offsets.locToRange(source, string_content_loc, builder.server.offset_encoding); - - // completions on module replace the entire string literal - for (completions.keys()) |*item| { - if (item.kind == .Module and item.textEdit == null) { - item.textEdit = if (builder.server.client_capabilities.supports_completion_insert_replace_support) - .{ .insert_replace_edit = .{ .newText = item.label, .insert = insert_range, .replace = string_content_range } } - else - .{ .text_edit = .{ .newText = item.label, .range = insert_range } }; - } - } - } - - try builder.completions.appendSlice(builder.arena, completions.keys()); } pub fn completionAtIndex( @@ -919,8 +932,10 @@ pub fn completionAtIndex( } } - const score = kindToSortScore(item.kind.?); - item.sortText = try std.fmt.allocPrint(arena, "{s}_{s}", .{ score, item.label }); + if (item.sortText == null) { + const score = kindToSortScore(item.kind.?); + item.sortText = try std.fmt.allocPrint(arena, "{s}_{s}", .{ score, item.label }); + } } return .{ .isIncomplete = false, .items = completions }; diff --git a/src/features/goto.zig b/src/features/goto.zig index 73f6c5752..21c0a8554 100644 --- a/src/features/goto.zig +++ b/src/features/goto.zig @@ -232,7 +232,7 @@ fn gotoDefinitionString( pos_context: Analyser.PositionContext, handle: *DocumentStore.Handle, offset_encoding: offsets.Encoding, -) error{OutOfMemory}!?types.Definition.Link { +) error{OutOfMemory}!?[]const types.Definition.Link { const tracy_zone = tracy.trace(@src()); defer tracy_zone.end(); @@ -240,14 +240,17 @@ fn gotoDefinitionString( if (loc.start == loc.end) return null; const import_str = offsets.locToSlice(handle.tree.source, loc); - const uri: ?Uri = switch (pos_context) { + const result: DocumentStore.UriFromImportStringResult = switch (pos_context) { .import_string_literal, .embedfile_string_literal, => try document_store.uriFromImportStr(arena, handle, import_str), .cinclude_string_literal => blk: { if (!DocumentStore.supports_build_system) return null; - if (std.fs.path.isAbsolute(import_str)) break :blk try Uri.fromPath(arena, import_str); + if (std.fs.path.isAbsolute(import_str)) { + break :blk .{ .one = try .fromPath(arena, import_str) }; + } + var include_dirs: std.ArrayList([]const u8) = .empty; _ = document_store.collectIncludeDirs(arena, handle, &include_dirs) catch |err| { log.err("failed to resolve include paths: {}", .{err}); @@ -256,7 +259,7 @@ fn gotoDefinitionString( for (include_dirs.items) |dir| { const path = try std.fs.path.join(arena, &.{ dir, import_str }); std.fs.accessAbsolute(path, .{}) catch continue; - break :blk try Uri.fromPath(arena, path); + break :blk .{ .one = try .fromPath(arena, path) }; } return null; }, @@ -267,12 +270,27 @@ fn gotoDefinitionString( .start = .{ .line = 0, .character = 0 }, .end = .{ .line = 0, .character = 0 }, }; - return .{ - .originSelectionRange = offsets.locToRange(handle.tree.source, loc, offset_encoding), - .targetUri = if (uri) |u| u.raw else return null, - .targetRange = target_range, - .targetSelectionRange = target_range, - }; + switch (result) { + .none => return null, + .one => |uri| return try arena.dupe(types.Definition.Link, &.{.{ + .originSelectionRange = offsets.locToRange(handle.tree.source, loc, offset_encoding), + .targetUri = uri.raw, + .targetRange = target_range, + .targetSelectionRange = target_range, + }}), + .many => |uris| { + const links = try arena.alloc(types.Definition.Link, uris.len); + for (links, uris) |*link, uri| { + link.* = .{ + .originSelectionRange = offsets.locToRange(handle.tree.source, loc, offset_encoding), + .targetUri = uri.raw, + .targetRange = target_range, + .targetSelectionRange = target_range, + }; + } + return links; + }, + } } pub fn gotoHandler( @@ -294,10 +312,13 @@ pub fn gotoHandler( const source_index = offsets.positionToIndex(handle.tree.source, request.position, server.offset_encoding); const pos_context = try Analyser.getPositionContext(arena, &handle.tree, source_index, true); - const response = switch (pos_context) { + const response: types.Definition.Link = blk: switch (pos_context) { .builtin => |loc| try gotoDefinitionBuiltin(&analyser, handle, loc, server.offset_encoding), .var_access => try gotoDefinitionGlobal(&analyser, handle, source_index, kind, server.offset_encoding), - .field_access => |loc| blk: { + .label_access, .label_decl => |loc| try gotoDefinitionLabel(&analyser, handle, source_index, loc, kind, server.offset_encoding), + .enum_literal => try gotoDefinitionEnumLiteral(&analyser, handle, source_index, kind, server.offset_encoding), + + .field_access => |loc| { const links = try gotoDefinitionFieldAccess(&analyser, arena, handle, source_index, loc, kind, server.offset_encoding) orelse return null; if (server.client_capabilities.supports_textDocument_definition_linkSupport) { return .{ .definition_links = links }; @@ -311,10 +332,28 @@ pub fn gotoHandler( .import_string_literal, .cinclude_string_literal, .embedfile_string_literal, - => try gotoDefinitionString(&server.document_store, arena, pos_context, handle, server.offset_encoding), - .label_access, .label_decl => |loc| try gotoDefinitionLabel(&analyser, handle, source_index, loc, kind, server.offset_encoding), - .enum_literal => try gotoDefinitionEnumLiteral(&analyser, handle, source_index, kind, server.offset_encoding), - else => null, + => { + const links = try gotoDefinitionString(&server.document_store, arena, pos_context, handle, server.offset_encoding) orelse return null; + if (server.client_capabilities.supports_textDocument_definition_linkSupport) { + return .{ .definition_links = links }; + } + switch (links.len) { + 0 => unreachable, + 1 => break :blk links[0], + else => return null, + } + }, + + .string_literal, + .number_literal, + .char_literal, + .parens_expr, + .keyword, + .global_error_set, + .comment, + .other, + .empty, + => return null, } orelse return null; if (server.client_capabilities.supports_textDocument_definition_linkSupport) { @@ -323,10 +362,8 @@ pub fn gotoHandler( }; } - return .{ - .definition = .{ .location = .{ - .uri = response.targetUri, - .range = response.targetSelectionRange, - } }, - }; + return .{ .definition = .{ .location = .{ + .uri = response.targetUri, + .range = response.targetSelectionRange, + } } }; } diff --git a/src/snippets.zig b/src/snippets.zig index 2a84bf7b9..807b65bab 100644 --- a/src/snippets.zig +++ b/src/snippets.zig @@ -10,6 +10,7 @@ pub const Snipped = struct { pub const top_level_decl_data = [_]Snipped{ .{ .label = "std", .kind = .Snippet, .text = "const std = @import(\"std\");" }, + .{ .label = "builtin", .kind = .Snippet, .text = "const builtin = @import(\"builtin\");" }, .{ .label = "root", .kind = .Snippet, .text = "const root = @import(\"root\");" }, .{ .label = "import", .kind = .Snippet, .text = "const $1 = @import(\"$2\")" }, .{ .label = "fn", .kind = .Snippet, .text = "fn ${1:name}($2) ${3:!void} {$0}" }, diff --git a/tests/add_build_runner_cases.zig b/tests/add_build_runner_cases.zig index 799023db3..1cced1273 100644 --- a/tests/add_build_runner_cases.zig +++ b/tests/add_build_runner_cases.zig @@ -58,13 +58,19 @@ pub fn addCases( build_cmd.addArg("--zig-lib-dir"); build_cmd.addDirectoryArg(.{ .cwd_relative = b.fmt("{f}", .{b.graph.zig_lib_directory}) }); + build_cmd.addFileInput(b.path("src/build_runner/shared.zig")); + const actual_build_config_json = build_cmd.captureStdOut(.{}); const run_diff = b.addRunArtifact(check_exe); run_diff.setName(b.fmt("run {s} ({s})", .{ check_exe.name, entry.name })); + run_diff.setCwd(cases_dir); run_diff.addFileArg(expected_build_config_json); run_diff.addFileArg(actual_build_config_json); - run_diff.addDirectoryArg(cases_dir); + run_diff.addArg("--cache-dir"); + run_diff.addDirectoryArg(.{ .cwd_relative = b.fmt("{f}", .{b.cache_root}) }); + run_diff.addArg("--global-cache-dir"); + run_diff.addDirectoryArg(.{ .cwd_relative = b.fmt("{f}", .{b.graph.global_cache_root}) }); test_step.dependOn(&run_diff.step); } diff --git a/tests/build_runner_cases/add_module.json b/tests/build_runner_cases/add_module.json index a51926a19..bbd40dca0 100644 --- a/tests/build_runner_cases/add_module.json +++ b/tests/build_runner_cases/add_module.json @@ -1,16 +1,16 @@ { - "deps_build_roots": [], - "packages": [ - { - "name": "root", - "path": "root.zig" + "dependencies": {}, + "modules": { + "root.zig": { + "import_table": {}, + "c_macros": [], + "include_dirs": [] } - ], - "include_dirs": [], + }, + "compilations": [], "top_level_steps": [ "install", "uninstall" ], - "available_options": {}, - "c_macros": [] + "available_options": {} } \ No newline at end of file diff --git a/tests/build_runner_cases/define_c_macro.json b/tests/build_runner_cases/define_c_macro.json index b61f3eff0..080938f0e 100644 --- a/tests/build_runner_cases/define_c_macro.json +++ b/tests/build_runner_cases/define_c_macro.json @@ -1,18 +1,18 @@ { - "deps_build_roots": [], - "packages": [ - { - "name": "root", - "path": "root.zig" + "dependencies": {}, + "modules": { + "root.zig": { + "import_table": {}, + "c_macros": [ + "-Dkey=value" + ], + "include_dirs": [] } - ], - "include_dirs": [], + }, + "compilations": [], "top_level_steps": [ "install", "uninstall" ], - "available_options": {}, - "c_macros": [ - "-Dkey=value" - ] + "available_options": {} } \ No newline at end of file diff --git a/tests/build_runner_cases/empty.json b/tests/build_runner_cases/empty.json index bc46f1e1c..8faf4b959 100644 --- a/tests/build_runner_cases/empty.json +++ b/tests/build_runner_cases/empty.json @@ -1,11 +1,10 @@ { - "deps_build_roots": [], - "packages": [], - "include_dirs": [], + "dependencies": {}, + "modules": {}, + "compilations": [], "top_level_steps": [ "install", "uninstall" ], - "available_options": {}, - "c_macros": [] + "available_options": {} } \ No newline at end of file diff --git a/tests/build_runner_cases/module_root_source_file_collision.json b/tests/build_runner_cases/module_root_source_file_collision.json new file mode 100644 index 000000000..33e14abbd --- /dev/null +++ b/tests/build_runner_cases/module_root_source_file_collision.json @@ -0,0 +1,39 @@ +{ + "dependencies": {}, + "modules": { + "main.zig": { + "import_table": { + "collision": "first.zig", + "first": "first.zig", + "second": "second.zig" + }, + "c_macros": [], + "include_dirs": [] + }, + "first.zig": { + "import_table": {}, + "c_macros": [], + "include_dirs": [] + }, + "second.zig": { + "import_table": {}, + "c_macros": [], + "include_dirs": [] + }, + "third.zig": { + "import_table": {}, + "c_macros": [], + "include_dirs": [] + } + }, + "compilations": [ + { + "root_module": "main.zig" + } + ], + "top_level_steps": [ + "install", + "uninstall" + ], + "available_options": {} +} \ No newline at end of file diff --git a/tests/build_runner_cases/module_root_source_file_collision.zig b/tests/build_runner_cases/module_root_source_file_collision.zig new file mode 100644 index 000000000..cb5b4d5b2 --- /dev/null +++ b/tests/build_runner_cases/module_root_source_file_collision.zig @@ -0,0 +1,37 @@ +//! There are three different module with `main.zig` as their root source file +//! and every one of them defines an import with the name `collision`. + +const std = @import("std"); + +pub fn build(b: *std.Build) void { + const first = b.createModule(.{ .root_source_file = b.path("first.zig") }); + const second = b.createModule(.{ .root_source_file = b.path("second.zig") }); + const third = b.createModule(.{ .root_source_file = b.path("third.zig") }); + + const exe = b.addExecutable(.{ + .name = "exe", + .root_module = b.createModule(.{ + .root_source_file = b.path("main.zig"), + .target = b.graph.host, + .imports = &.{ + .{ .name = "collision", .module = third }, + }, + }), + }); + b.installArtifact(exe); + + _ = b.addModule("foo", .{ + .root_source_file = b.path("main.zig"), + .imports = &.{ + .{ .name = "collision", .module = first }, + .{ .name = "first", .module = first }, + }, + }); + _ = b.addModule("bar", .{ + .root_source_file = b.path("main.zig"), + .imports = &.{ + .{ .name = "collision", .module = second }, + .{ .name = "second", .module = second }, + }, + }); +} diff --git a/tests/build_runner_cases/module_self_import.json b/tests/build_runner_cases/module_self_import.json index 2c9d4df8c..13e4c5c8a 100644 --- a/tests/build_runner_cases/module_self_import.json +++ b/tests/build_runner_cases/module_self_import.json @@ -1,20 +1,18 @@ { - "deps_build_roots": [], - "packages": [ - { - "name": "bar", - "path": "root.zig" - }, - { - "name": "root", - "path": "root.zig" + "dependencies": {}, + "modules": { + "root.zig": { + "import_table": { + "bar": "root.zig" + }, + "c_macros": [], + "include_dirs": [] } - ], - "include_dirs": [], + }, + "compilations": [], "top_level_steps": [ "install", "uninstall" ], - "available_options": {}, - "c_macros": [] + "available_options": {} } \ No newline at end of file diff --git a/tests/build_runner_cases/multiple_module_import_names.json b/tests/build_runner_cases/multiple_module_import_names.json index 35d25ffb0..5072d1e2c 100644 --- a/tests/build_runner_cases/multiple_module_import_names.json +++ b/tests/build_runner_cases/multiple_module_import_names.json @@ -1,40 +1,33 @@ { - "deps_build_roots": [], - "packages": [ - { - "name": "bar_in_foo", - "path": "bar.zig" + "dependencies": {}, + "modules": { + "foo.zig": { + "import_table": { + "bar_in_foo": "bar.zig" + }, + "c_macros": [], + "include_dirs": [] }, - { - "name": "bar_in_main", - "path": "bar.zig" + "bar.zig": { + "import_table": { + "foo_in_bar": "foo.zig" + }, + "c_macros": [], + "include_dirs": [] }, - { - "name": "foo_in_bar", - "path": "foo.zig" - }, - { - "name": "foo_in_main", - "path": "foo.zig" - }, - { - "name": "root", - "path": "foo.zig" - }, - { - "name": "root", - "path": "bar.zig" - }, - { - "name": "root", - "path": "main.zig" + "main.zig": { + "import_table": { + "foo_in_main": "foo.zig", + "bar_in_main": "bar.zig" + }, + "c_macros": [], + "include_dirs": [] } - ], - "include_dirs": [], + }, + "compilations": [], "top_level_steps": [ "install", "uninstall" ], - "available_options": {}, - "c_macros": [] + "available_options": {} } \ No newline at end of file diff --git a/tests/build_runner_cases/public_module_with_generated_file.json b/tests/build_runner_cases/public_module_with_generated_file.json new file mode 100644 index 000000000..f170bf379 --- /dev/null +++ b/tests/build_runner_cases/public_module_with_generated_file.json @@ -0,0 +1,16 @@ +{ + "dependencies": {}, + "modules": { + ".zig-local-cache/generated.zig": { + "import_table": {}, + "c_macros": [], + "include_dirs": [] + } + }, + "compilations": [], + "top_level_steps": [ + "install", + "uninstall" + ], + "available_options": {} +} \ No newline at end of file diff --git a/tests/build_runner_cases/public_module_with_generated_file.zig b/tests/build_runner_cases/public_module_with_generated_file.zig new file mode 100644 index 000000000..e03c2f002 --- /dev/null +++ b/tests/build_runner_cases/public_module_with_generated_file.zig @@ -0,0 +1,10 @@ +const std = @import("std"); + +pub fn build(b: *std.Build) void { + const write_files = b.addWriteFiles(); + const generated = write_files.add("generated.zig", ""); + + _ = b.addModule("root", .{ + .root_source_file = generated, + }); +} diff --git a/tests/build_runner_check.zig b/tests/build_runner_check.zig index 11671536d..95c73cb4e 100644 --- a/tests/build_runner_check.zig +++ b/tests/build_runner_check.zig @@ -10,44 +10,106 @@ pub fn main() !u8 { defer _ = debug_allocator.deinit(); const gpa = debug_allocator.allocator(); + const cwd = try std.process.getCwdAlloc(gpa); + defer gpa.free(cwd); + const args = try std.process.argsAlloc(gpa); defer std.process.argsFree(gpa, args); - if (args.len != 4) @panic("invalid arguments"); + if (args.len != 7) @panic("invalid arguments"); const expected = std.fs.cwd().readFileAlloc(args[1], gpa, .limited(16 * 1024 * 1024)) catch |err| std.debug.panic("could no open/read file '{s}': {}", .{ args[1], err }); defer gpa.free(expected); - const actual_unsanitized = std.fs.cwd().readFileAlloc(args[2], gpa, .limited(16 * 1024 * 1024)) catch |err| + const actual = std.fs.cwd().readFileAlloc(args[2], gpa, .limited(16 * 1024 * 1024)) catch |err| std.debug.panic("could no open/read file '{s}': {}", .{ args[2], err }); - defer gpa.free(actual_unsanitized); - - const actual = blk: { - var aw: std.Io.Writer.Allocating = .init(gpa); - defer aw.deinit(); - - try std.json.Stringify.encodeJsonStringChars(args[3], .{}, &aw.writer); - try std.json.Stringify.encodeJsonStringChars(&.{std.fs.path.sep}, .{}, &aw.writer); - - // The build runner will produce absolute paths in the output so we remove them here. - const actual = try std.mem.replaceOwned(u8, gpa, actual_unsanitized, aw.written(), ""); - - // We also convert windows style '\\' path separators to posix style '/'. - switch (std.fs.path.sep) { - '/' => break :blk actual, - '\\' => { - defer gpa.free(actual); - break :blk try std.mem.replaceOwned(u8, gpa, actual, "\\\\", "/"); - }, - else => unreachable, + defer gpa.free(actual); + + std.debug.assert(std.mem.eql(u8, args[3], "--cache-dir")); + const local_cache_dir = try std.fs.path.resolve(gpa, &.{ cwd, args[4] }); + defer gpa.free(local_cache_dir); + + std.debug.assert(std.mem.eql(u8, args[5], "--global-cache-dir")); + const global_cache_dir = try std.fs.path.resolve(gpa, &.{ cwd, args[6] }); + defer gpa.free(global_cache_dir); + + const actual_sanitized = sanitized: { + const parsed = try std.json.parseFromSlice(zls.DocumentStore.BuildConfig, gpa, actual, .{}); + defer parsed.deinit(); + + var new: zls.DocumentStore.BuildConfig = parsed.value; + const arena = parsed.arena.allocator(); + + for (new.dependencies.map.keys()) |*str| str.* = try sanitizePath(arena, str.*, cwd, local_cache_dir, global_cache_dir); + try new.dependencies.map.reIndex(arena); + + for (new.modules.map.keys()) |*str| str.* = try sanitizePath(arena, str.*, cwd, local_cache_dir, global_cache_dir); + try new.modules.map.reIndex(arena); + + for (new.modules.map.values()) |*mod| { + for (mod.import_table.map.values()) |*str| str.* = try sanitizePath(arena, str.*, cwd, local_cache_dir, global_cache_dir); + try mod.import_table.map.reIndex(arena); } + + for (new.compilations) |*compile| { + @as(*[]const u8, @constCast(&compile.root_module)).* = try sanitizePath(arena, compile.root_module, cwd, local_cache_dir, global_cache_dir); + } + + break :sanitized try std.json.Stringify.valueAlloc( + gpa, + new, + .{ .whitespace = .indent_2 }, + ); }; - defer gpa.free(actual); + defer gpa.free(actual_sanitized); - if (std.mem.eql(u8, expected, actual)) return 0; + if (std.mem.eql(u8, expected, actual_sanitized)) return 0; - zls.testing.renderLineDiff(gpa, expected, actual); + zls.testing.renderLineDiff(gpa, expected, actual_sanitized); return 1; } + +fn stripBasePath(base_dir: []const u8, path: []const u8) ?[]const u8 { + if (!std.mem.startsWith(u8, path, base_dir)) return null; + if (!std.mem.startsWith(u8, path[base_dir.len..], std.fs.path.sep_str)) return null; + return path[base_dir.len + std.fs.path.sep_str.len ..]; +} + +fn sanitizePath( + arena: std.mem.Allocator, + path: []const u8, + cwd: []const u8, + local_cache_dir: []const u8, + global_cache_dir: []const u8, +) error{OutOfMemory}![]const u8 { + const new = try arena.dupe(u8, new: { + if (stripBasePath(cwd, path)) |foo| { + break :new foo; + } + if (stripBasePath(local_cache_dir, path)) |to| { + var it = std.fs.path.componentIterator(to); + std.debug.assert(std.mem.eql(u8, it.next().?.name, "o")); + std.debug.assert(it.next().?.name.len == std.Build.Cache.hex_digest_len); + break :new try std.fmt.allocPrint(arena, ".zig-local-cache/{s}", .{to[it.end_index + 1 ..]}); + } + if (stripBasePath(global_cache_dir, path)) |to| { + var it = std.fs.path.componentIterator(to); + std.debug.assert(std.mem.eql(u8, it.next().?.name, "o")); + std.debug.assert(it.next().?.name.len == std.Build.Cache.hex_digest_len); + break :new try std.fmt.allocPrint(arena, ".zig-global-cache/{s}", .{to[it.end_index + 1 ..]}); + } + std.debug.assert(!std.fs.path.isAbsolute(path)); // got an absolute path that is not in cwd or any cache dir + break :new path; + }); + + // Convert windows style '\\' path separators to posix style '/'. + if (std.fs.path.sep == '\\') { + for (new) |*c| { + if (c.* == std.fs.path.sep) c.* = '/'; + } + } + + return new; +}