From 22ff26fe70b437b6678305bd84347718a4b5336d Mon Sep 17 00:00:00 2001 From: seanthegleaming Date: Mon, 9 Dec 2024 15:10:53 -0500 Subject: [PATCH 1/8] Added mremap support --- lib/std/c.zig | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/lib/std/c.zig b/lib/std/c.zig index c6a5a8552110..f4c0d5f4b0a8 100644 --- a/lib/std/c.zig +++ b/lib/std/c.zig @@ -7640,6 +7640,25 @@ pub const MAP = switch (native_os) { else => void, }; +pub const REMAP = blk: { + if (native_os.isGnuLibC(native_abi)) { + if (versionCheck(.{ .major = 2, .minor = 4 })) { + packed struct(c_uint) { + MAYMOVE: bool = false, + FIXED: bool = false, + DONTUNMAP: bool = false, + _: u29 = 0, + }; + } else { + break :blk packed struct(c_uint) { + MAYMOVE: bool = false, + _: u31 = 0, + }; + } + } + break :blk void; +}; + /// Used by libc to communicate failure. Not actually part of the underlying syscall. pub const MAP_FAILED: *anyopaque = @ptrFromInt(maxInt(usize)); From 89b0958b22bb2ff9293bbdaeeebab4293a1bb52b Mon Sep 17 00:00:00 2001 From: seanthegleaming Date: Mon, 9 Dec 2024 15:16:16 -0500 Subject: [PATCH 2/8] Basic mremap support in std.c --- lib/std/c.zig | 24 +++++++----------------- 1 file changed, 7 insertions(+), 17 deletions(-) diff --git a/lib/std/c.zig b/lib/std/c.zig index f4c0d5f4b0a8..818670e94786 100644 --- a/lib/std/c.zig +++ b/lib/std/c.zig @@ -7640,23 +7640,12 @@ pub const MAP = switch (native_os) { else => void, }; -pub const REMAP = blk: { - if (native_os.isGnuLibC(native_abi)) { - if (versionCheck(.{ .major = 2, .minor = 4 })) { - packed struct(c_uint) { - MAYMOVE: bool = false, - FIXED: bool = false, - DONTUNMAP: bool = false, - _: u29 = 0, - }; - } else { - break :blk packed struct(c_uint) { - MAYMOVE: bool = false, - _: u31 = 0, - }; - } - } - break :blk void; +pub const REMAP = switch (native_os.isGnuLibC(native_abi)) { + true => packed struct(u32) { + MAYMOVE: bool = false, + _: u31 = 0, + }, + false => void, }; /// Used by libc to communicate failure. Not actually part of the underlying syscall. @@ -9292,6 +9281,7 @@ pub extern "c" fn write(fd: fd_t, buf: [*]const u8, nbyte: usize) isize; pub extern "c" fn pwrite(fd: fd_t, buf: [*]const u8, nbyte: usize, offset: off_t) isize; pub extern "c" fn mmap(addr: ?*align(page_size) anyopaque, len: usize, prot: c_uint, flags: MAP, fd: fd_t, offset: off_t) *anyopaque; pub extern "c" fn munmap(addr: *align(page_size) const anyopaque, len: usize) c_int; +pub extern "c" fn mremap(addr: *align(page_size) const anyopaque, old_len: usize, new_len: usize, flags: REMAP) *anyopaque; pub extern "c" fn mprotect(addr: *align(page_size) anyopaque, len: usize, prot: c_uint) c_int; pub extern "c" fn link(oldpath: [*:0]const u8, newpath: [*:0]const u8) c_int; pub extern "c" fn linkat(oldfd: fd_t, oldpath: [*:0]const u8, newfd: fd_t, newpath: [*:0]const u8, flags: c_int) c_int; From f6025f8c4b4b640520b799c49ea66e72a5359b44 Mon Sep 17 00:00:00 2001 From: seanthegleaming Date: Mon, 9 Dec 2024 15:21:45 -0500 Subject: [PATCH 3/8] Added mremap support to std.os.linux --- lib/std/os/linux.zig | 42 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/lib/std/os/linux.zig b/lib/std/os/linux.zig index e9a84d806256..9f51f2b6c3fe 100644 --- a/lib/std/os/linux.zig +++ b/lib/std/os/linux.zig @@ -305,6 +305,38 @@ pub const MAP = switch (native_arch) { else => @compileError("missing std.os.linux.MAP constants for this architecture"), }; +pub const REMAP = switch (native_arch) { + .x86_64, + .x86, + .aarch64, + .aarch64_be, + .arm, + .armeb, + .thumb, + .thumbeb, + .riscv32, + .riscv64, + .loongarch64, + .sparc64, + .mips, + .mipsel, + .mips64, + .mips64el, + .powerpc, + .powerpcle, + .powerpc64, + .powerpc64le, + .hexagon, + .s390x, + => packed struct(u32) { + MAYMOVE: bool = false, + FIXED: bool = false, + DONTUNMAP: bool = false, + _: u29 = 0, + }, + else => @compileError("missing std.os.linux.REMAP constants for this architecture"), +}; + pub const O = switch (native_arch) { .x86_64 => packed struct(u32) { ACCMODE: ACCMODE = .RDONLY, @@ -930,6 +962,16 @@ pub fn mmap(address: ?[*]u8, length: usize, prot: usize, flags: MAP, fd: i32, of } } +pub fn mremap(address: ?[*]u8, old_length: usize, new_length: usize, flags: REMAP, new_addr: [*]u8) usize { + return syscall5( + @intFromPtr(address), + old_length, + new_length, + @as(u32, @bitCast(flags)), + @intFromPtr(new_addr), + ); +} + pub fn mprotect(address: [*]const u8, length: usize, protection: usize) usize { return syscall3(.mprotect, @intFromPtr(address), length, protection); } From 528fc5b6f9575b4441e9e886bf24acd3780b6d6d Mon Sep 17 00:00:00 2001 From: seanthegleaming Date: Mon, 9 Dec 2024 15:38:15 -0500 Subject: [PATCH 4/8] Added std.posix.mremap --- lib/std/posix.zig | 61 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 61 insertions(+) diff --git a/lib/std/posix.zig b/lib/std/posix.zig index 39b29ea76f02..6dc1519d33c7 100644 --- a/lib/std/posix.zig +++ b/lib/std/posix.zig @@ -24,6 +24,7 @@ const maxInt = std.math.maxInt; const cast = std.math.cast; const assert = std.debug.assert; const native_os = builtin.os.tag; +const native_abi = builtin.abi; test { _ = @import("posix/test.zig"); @@ -4767,6 +4768,66 @@ pub fn munmap(memory: []align(mem.page_size) const u8) void { } } +pub const MRemapError = error{ + /// "Segmanetation Fault". + /// The passed in slice is not a valid virtual address + /// for the process. + PageFault, + + LockedMemoryLimitExceeded, + OutOfMemory, +} || UnexpectedError; + +pub fn mremap( + memory: []align(mem.page_size) const u8, + new_len: usize, + may_move: bool, +) MRemapError![]align(mem.page_size) u8 { + const err: E = blk: { + if (use_libc) { + if (native_os.isGnuLibC(native_abi)) { + const rc = system.mremap( + memory.ptr, + memory.len, + new_len, + .{ .MAYMOVE = may_move }, + ); + if (rc != std.c.MAP_FAILED) { + const head: [*]align(mem.page_size) u8 = @ptrCast(@alignCast(rc)); + return head[0..new_len]; + } else { + break :blk @enumFromInt(system._errno().*); + } + } + } else if (native_os == .linux) { + const rc = system.mremap( + memory.ptr, + memory.len, + new_len, + .{ .MAYMOVE = may_move }, + undefined, + ); + switch (errno(rc)) { + .SUCCESS => { + const head: [*]align(mem.page_size) u8 = @ptrFromInt(rc); + return head[0..new_len]; + }, + else => |err| break :blk err, + } + } + @compileError("mremap is not available on this target"); + }; + + return switch (err) { + .SUCCESS => unreachable, // Handled above + .INVAL => unreachable, // Invalid parameters + .AGAIN => error.LockedMemoryLimitExceeded, + .FAULT => error.PageFault, + .NOMEM => error.OutOfMemory, + else => unexpectedErrno(err), + }; +} + pub const MSyncError = error{ UnmappedMemory, PermissionDenied, From bc3194662b91485b4c9414db7a754a191ff1d761 Mon Sep 17 00:00:00 2001 From: seanthegleaming Date: Mon, 9 Dec 2024 17:21:14 -0500 Subject: [PATCH 5/8] Refactored PageAllocator.zig, allowing it to use mremap. Additionally fixed a typo in linux.zig --- lib/std/heap.zig | 3 - lib/std/heap/PageAllocator.zig | 187 +++++++++++++++++++++------------ lib/std/os/linux.zig | 1 + lib/std/posix.zig | 6 +- 4 files changed, 123 insertions(+), 74 deletions(-) diff --git a/lib/std/heap.zig b/lib/std/heap.zig index 3d19d8daa6b2..f691786ba095 100644 --- a/lib/std/heap.zig +++ b/lib/std/heap.zig @@ -29,9 +29,6 @@ pub const MemoryPoolAligned = memory_pool.MemoryPoolAligned; pub const MemoryPoolExtra = memory_pool.MemoryPoolExtra; pub const MemoryPoolOptions = memory_pool.Options; -/// TODO Utilize this on Windows. -pub var next_mmap_addr_hint: ?[*]align(mem.page_size) u8 = null; - const CAllocator = struct { comptime { if (!builtin.link_libc) { diff --git a/lib/std/heap/PageAllocator.zig b/lib/std/heap/PageAllocator.zig index 4188c255285c..e54beb722bc7 100644 --- a/lib/std/heap/PageAllocator.zig +++ b/lib/std/heap/PageAllocator.zig @@ -8,46 +8,118 @@ const native_os = builtin.os.tag; const windows = std.os.windows; const posix = std.posix; +pub var next_mmap_addr_hint = std.atomic.Value(?[*]align(mem.page_size) u8).init(null); + pub const vtable = Allocator.VTable{ .alloc = alloc, .resize = resize, .free = free, }; -fn alloc(_: *anyopaque, n: usize, log2_align: u8, ra: usize) ?[*]u8 { - _ = ra; - _ = log2_align; - assert(n > 0); - if (n > maxInt(usize) - (mem.page_size - 1)) return null; - - if (native_os == .windows) { - const addr = windows.VirtualAlloc( - null, +/// Whether `posix.mremap` may be used +const use_mremap = @hasDecl(posix.system, "REMAP") and posix.system.REMAP != void; - // VirtualAlloc will round the length to a multiple of page size. - // VirtualAlloc docs: If the lpAddress parameter is NULL, this value is rounded up to the next page boundary - n, +/// Whether an invalid `next_mmap_addr_hint` will cause `mapGet` to fail +const invalid_hint_fails = switch (native_os) { + .windows => true, + else => false, +}; +/// Allocated pages of memory. The size of the allocation is rounded up to the page. +/// `hint` is a hint for where the allocation should be mapped to. +/// If `invalid_hint_fails`, then this function fails when an allocation cannot be made exactly at `hint`. +/// Otherwise, this function may return an address other than `hint`. +fn mapGet(bytes: usize, hint: ?[*]align(mem.page_size) u8) ![*]align(mem.page_size) u8 { + return switch (native_os) { + .windows => @ptrCast(@alignCast(try windows.VirtualAlloc( + @ptrCast(hint), + bytes, windows.MEM_COMMIT | windows.MEM_RESERVE, windows.PAGE_READWRITE, - ) catch return null; - return @ptrCast(addr); + ))), + else => (try posix.mmap( + hint, + bytes, + posix.PROT.READ | posix.PROT.WRITE, + .{ .TYPE = .PRIVATE, .ANONYMOUS = true }, + -1, + 0, + )).ptr, + }; +} + +/// Unmaps allocated memory. The size of the allocation is rounded up to the page. +/// Using `free` frees the allocation entirely, +/// while using `shrink` may simply decommit memory for later use. +fn mapFree(memory: []align(mem.page_size) u8, kind: enum { free, shrink }) void { + switch (native_os) { + .windows => windows.VirtualFree( + @ptrCast(memory.ptr), + memory.len, + switch (kind) { + .free => windows.MEM_RELEASE, + .shrink => windows.MEM_DECOMMIT, + }, + ), + else => posix.munmap(memory), } +} + +/// Attempts to shrink memory acquired for `mapGet` to `new_size` bytes. +/// Returns true of success, and false on failure. +fn mapShrink(memory: []align(mem.page_size) u8, new_size: usize) bool { + const new_size_aligned = mem.alignForward(usize, new_size, mem.page_size); + const old_size_aligned = mem.alignForward(usize, memory.len, mem.page_size); + + assert(new_size_aligned < old_size_aligned); + + mapFree(@alignCast(memory.ptr[new_size_aligned..old_size_aligned]), .shrink); + return true; +} + +/// Attempts to grow memory acquired for `mapGet` to `new_size` bytes. +/// Returns true of success, and false on failure. +fn mapGrow(memory: []align(mem.page_size) u8, new_size: usize) bool { + assert(new_size > memory.len); + + const new_size_aligned = mem.alignForward(usize, new_size, mem.page_size); + const old_size_aligned = mem.alignForward(usize, memory.len, mem.page_size); + + assert(new_size_aligned > old_size_aligned); + + if (use_mremap) { + const slice = posix.mremap( + memory[0..old_size_aligned], + new_size_aligned, + false, + ) catch return false; + assert(slice.ptr == memory.ptr); + return true; + } else { + return false; + } +} + +fn alloc(_: *anyopaque, n: usize, log2_align: u8, ra: usize) ?[*]u8 { + _ = ra; + _ = log2_align; + assert(n > 0); + if (n > maxInt(usize) - (mem.page_size - 1)) return null; const aligned_len = mem.alignForward(usize, n, mem.page_size); - const hint = @atomicLoad(@TypeOf(std.heap.next_mmap_addr_hint), &std.heap.next_mmap_addr_hint, .unordered); - const slice = posix.mmap( - hint, - aligned_len, - posix.PROT.READ | posix.PROT.WRITE, - .{ .TYPE = .PRIVATE, .ANONYMOUS = true }, - -1, - 0, - ) catch return null; - assert(mem.isAligned(@intFromPtr(slice.ptr), mem.page_size)); - const new_hint: [*]align(mem.page_size) u8 = @alignCast(slice.ptr + aligned_len); - _ = @cmpxchgStrong(@TypeOf(std.heap.next_mmap_addr_hint), &std.heap.next_mmap_addr_hint, hint, new_hint, .monotonic, .monotonic); - return slice.ptr; + const hint = next_mmap_addr_hint.load(.unordered); + const head = mapGet(aligned_len, hint) catch blk: { + if (invalid_hint_fails and next_mmap_addr_hint.rmw(.Xchg, null, .monotonic) != null) { + // for systems where an invalid hint causes allocation failure, + // if we encounter an error, first attempt to retry without a hint + break :blk mapGet(aligned_len, null) catch return null; + } else { + return null; + } + }; + assert(mem.isAligned(@intFromPtr(head), mem.page_size)); + _ = next_mmap_addr_hint.cmpxchgStrong(hint, @alignCast(head + aligned_len), .monotonic, .monotonic); + return head; } fn resize( @@ -60,54 +132,29 @@ fn resize( _ = log2_buf_align; _ = return_address; const new_size_aligned = mem.alignForward(usize, new_size, mem.page_size); - - if (native_os == .windows) { - if (new_size <= buf_unaligned.len) { - const base_addr = @intFromPtr(buf_unaligned.ptr); - const old_addr_end = base_addr + buf_unaligned.len; - const new_addr_end = mem.alignForward(usize, base_addr + new_size, mem.page_size); - if (old_addr_end > new_addr_end) { - // For shrinking that is not releasing, we will only - // decommit the pages not needed anymore. - windows.VirtualFree( - @as(*anyopaque, @ptrFromInt(new_addr_end)), - old_addr_end - new_addr_end, - windows.MEM_DECOMMIT, - ); - } - return true; - } - const old_size_aligned = mem.alignForward(usize, buf_unaligned.len, mem.page_size); - if (new_size_aligned <= old_size_aligned) { - return true; - } - return false; + const old_size_aligned = mem.alignForward(usize, buf_unaligned.len, mem.page_size); + const buf: []align(mem.page_size) u8 = @alignCast(buf_unaligned.ptr[0..old_size_aligned]); + const ordering = std.math.order(old_size_aligned, new_size_aligned); + const result = switch (ordering) { + .lt => mapGrow(buf, new_size_aligned), + .eq => true, + .gt => mapShrink(buf, new_size_aligned), + }; + if (result and ordering != .eq) { + const old_end: [*]align(mem.page_size) u8 = @alignCast(buf.ptr + old_size_aligned); + const new_end: [*]align(mem.page_size) u8 = @alignCast(buf.ptr + new_size_aligned); + _ = next_mmap_addr_hint.cmpxchgStrong(old_end, new_end, .monotonic, .monotonic); } - - const buf_aligned_len = mem.alignForward(usize, buf_unaligned.len, mem.page_size); - if (new_size_aligned == buf_aligned_len) - return true; - - if (new_size_aligned < buf_aligned_len) { - const ptr = buf_unaligned.ptr + new_size_aligned; - // TODO: if the next_mmap_addr_hint is within the unmapped range, update it - posix.munmap(@alignCast(ptr[0 .. buf_aligned_len - new_size_aligned])); - return true; - } - - // TODO: call mremap - // TODO: if the next_mmap_addr_hint is within the remapped range, update it - return false; + return result; } fn free(_: *anyopaque, slice: []u8, log2_buf_align: u8, return_address: usize) void { _ = log2_buf_align; _ = return_address; - if (native_os == .windows) { - windows.VirtualFree(slice.ptr, 0, windows.MEM_RELEASE); - } else { - const buf_aligned_len = mem.alignForward(usize, slice.len, mem.page_size); - posix.munmap(@alignCast(slice.ptr[0..buf_aligned_len])); - } + const aligned_len = mem.alignForward(usize, slice.len, mem.page_size); + const head: []align(mem.page_size) u8 = @alignCast(slice[0..aligned_len]); + mapFree(head, .free); + const tail: [*]align(mem.page_size) u8 = @alignCast(head.ptr + head.len); + _ = next_mmap_addr_hint.cmpxchgStrong(tail, head.ptr, .monotonic, .monotonic); } diff --git a/lib/std/os/linux.zig b/lib/std/os/linux.zig index 9f51f2b6c3fe..dfec85728a9f 100644 --- a/lib/std/os/linux.zig +++ b/lib/std/os/linux.zig @@ -964,6 +964,7 @@ pub fn mmap(address: ?[*]u8, length: usize, prot: usize, flags: MAP, fd: i32, of pub fn mremap(address: ?[*]u8, old_length: usize, new_length: usize, flags: REMAP, new_addr: [*]u8) usize { return syscall5( + .mremap, @intFromPtr(address), old_length, new_length, diff --git a/lib/std/posix.zig b/lib/std/posix.zig index 6dc1519d33c7..c933652f5b03 100644 --- a/lib/std/posix.zig +++ b/lib/std/posix.zig @@ -4778,11 +4778,15 @@ pub const MRemapError = error{ OutOfMemory, } || UnexpectedError; +/// Change the size of memory which was mapped via `mmap`. +/// Neither `memory.len` nor `new_len` does not need to be aligned. +/// Only available on linux or glibc targets. pub fn mremap( - memory: []align(mem.page_size) const u8, + memory: []align(mem.page_size) u8, new_len: usize, may_move: bool, ) MRemapError![]align(mem.page_size) u8 { + // TODO: better support for REMAP_FIXED const err: E = blk: { if (use_libc) { if (native_os.isGnuLibC(native_abi)) { From 2934607ff76c37628a85258185de35c6ee39e33c Mon Sep 17 00:00:00 2001 From: seanthegleaming Date: Mon, 9 Dec 2024 17:27:53 -0500 Subject: [PATCH 6/8] Fixed potential out of bounds access on slice --- lib/std/heap/PageAllocator.zig | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/std/heap/PageAllocator.zig b/lib/std/heap/PageAllocator.zig index e54beb722bc7..13dfdac3d229 100644 --- a/lib/std/heap/PageAllocator.zig +++ b/lib/std/heap/PageAllocator.zig @@ -89,7 +89,7 @@ fn mapGrow(memory: []align(mem.page_size) u8, new_size: usize) bool { if (use_mremap) { const slice = posix.mremap( - memory[0..old_size_aligned], + memory.ptr[0..old_size_aligned], new_size_aligned, false, ) catch return false; @@ -153,7 +153,7 @@ fn free(_: *anyopaque, slice: []u8, log2_buf_align: u8, return_address: usize) v _ = return_address; const aligned_len = mem.alignForward(usize, slice.len, mem.page_size); - const head: []align(mem.page_size) u8 = @alignCast(slice[0..aligned_len]); + const head: []align(mem.page_size) u8 = @alignCast(slice.ptr[0..aligned_len]); mapFree(head, .free); const tail: [*]align(mem.page_size) u8 = @alignCast(head.ptr + head.len); _ = next_mmap_addr_hint.cmpxchgStrong(tail, head.ptr, .monotonic, .monotonic); From d8be3fad099af2dd7e1dee5bf77ff3099df71689 Mon Sep 17 00:00:00 2001 From: seanthegleaming Date: Mon, 9 Dec 2024 18:45:43 -0500 Subject: [PATCH 7/8] Disabled problemativ usage of the mmap hint in windows. --- lib/std/heap/PageAllocator.zig | 192 ++++++++++++++++----------------- 1 file changed, 93 insertions(+), 99 deletions(-) diff --git a/lib/std/heap/PageAllocator.zig b/lib/std/heap/PageAllocator.zig index 13dfdac3d229..0229fb62abbd 100644 --- a/lib/std/heap/PageAllocator.zig +++ b/lib/std/heap/PageAllocator.zig @@ -8,6 +8,7 @@ const native_os = builtin.os.tag; const windows = std.os.windows; const posix = std.posix; +/// TODO: utilize this on windows pub var next_mmap_addr_hint = std.atomic.Value(?[*]align(mem.page_size) u8).init(null); pub const vtable = Allocator.VTable{ @@ -19,85 +20,19 @@ pub const vtable = Allocator.VTable{ /// Whether `posix.mremap` may be used const use_mremap = @hasDecl(posix.system, "REMAP") and posix.system.REMAP != void; -/// Whether an invalid `next_mmap_addr_hint` will cause `mapGet` to fail -const invalid_hint_fails = switch (native_os) { - .windows => true, - else => false, -}; - -/// Allocated pages of memory. The size of the allocation is rounded up to the page. -/// `hint` is a hint for where the allocation should be mapped to. -/// If `invalid_hint_fails`, then this function fails when an allocation cannot be made exactly at `hint`. -/// Otherwise, this function may return an address other than `hint`. -fn mapGet(bytes: usize, hint: ?[*]align(mem.page_size) u8) ![*]align(mem.page_size) u8 { - return switch (native_os) { - .windows => @ptrCast(@alignCast(try windows.VirtualAlloc( - @ptrCast(hint), - bytes, - windows.MEM_COMMIT | windows.MEM_RESERVE, - windows.PAGE_READWRITE, - ))), - else => (try posix.mmap( - hint, - bytes, - posix.PROT.READ | posix.PROT.WRITE, - .{ .TYPE = .PRIVATE, .ANONYMOUS = true }, - -1, - 0, - )).ptr, - }; +fn mmapAlloc(bytes: usize, hint: ?[*]align(mem.page_size) u8) ![]align(mem.page_size) u8 { + return posix.mmap( + hint, + bytes, + posix.PROT.READ | posix.PROT.WRITE, + .{ .TYPE = .PRIVATE, .ANONYMOUS = true }, + -1, + 0, + ); } -/// Unmaps allocated memory. The size of the allocation is rounded up to the page. -/// Using `free` frees the allocation entirely, -/// while using `shrink` may simply decommit memory for later use. -fn mapFree(memory: []align(mem.page_size) u8, kind: enum { free, shrink }) void { - switch (native_os) { - .windows => windows.VirtualFree( - @ptrCast(memory.ptr), - memory.len, - switch (kind) { - .free => windows.MEM_RELEASE, - .shrink => windows.MEM_DECOMMIT, - }, - ), - else => posix.munmap(memory), - } -} - -/// Attempts to shrink memory acquired for `mapGet` to `new_size` bytes. -/// Returns true of success, and false on failure. -fn mapShrink(memory: []align(mem.page_size) u8, new_size: usize) bool { - const new_size_aligned = mem.alignForward(usize, new_size, mem.page_size); - const old_size_aligned = mem.alignForward(usize, memory.len, mem.page_size); - - assert(new_size_aligned < old_size_aligned); - - mapFree(@alignCast(memory.ptr[new_size_aligned..old_size_aligned]), .shrink); - return true; -} - -/// Attempts to grow memory acquired for `mapGet` to `new_size` bytes. -/// Returns true of success, and false on failure. -fn mapGrow(memory: []align(mem.page_size) u8, new_size: usize) bool { - assert(new_size > memory.len); - - const new_size_aligned = mem.alignForward(usize, new_size, mem.page_size); - const old_size_aligned = mem.alignForward(usize, memory.len, mem.page_size); - - assert(new_size_aligned > old_size_aligned); - - if (use_mremap) { - const slice = posix.mremap( - memory.ptr[0..old_size_aligned], - new_size_aligned, - false, - ) catch return false; - assert(slice.ptr == memory.ptr); - return true; - } else { - return false; - } +fn mapUnget(memory: []align(mem.page_size) u8) void { + std.posix.munmap(memory); } fn alloc(_: *anyopaque, n: usize, log2_align: u8, ra: usize) ?[*]u8 { @@ -106,20 +41,40 @@ fn alloc(_: *anyopaque, n: usize, log2_align: u8, ra: usize) ?[*]u8 { assert(n > 0); if (n > maxInt(usize) - (mem.page_size - 1)) return null; + if (native_os == .windows) { + const addr = windows.VirtualAlloc( + null, + + // VirtualAlloc will round the length to a multiple of page size. + // VirtualAlloc docs: If the lpAddress parameter is NULL, this value is rounded up to the next page boundary + n, + + windows.MEM_COMMIT | windows.MEM_RESERVE, + windows.PAGE_READWRITE, + ) catch return null; + return @ptrCast(addr); + } + const aligned_len = mem.alignForward(usize, n, mem.page_size); const hint = next_mmap_addr_hint.load(.unordered); - const head = mapGet(aligned_len, hint) catch blk: { - if (invalid_hint_fails and next_mmap_addr_hint.rmw(.Xchg, null, .monotonic) != null) { - // for systems where an invalid hint causes allocation failure, - // if we encounter an error, first attempt to retry without a hint - break :blk mapGet(aligned_len, null) catch return null; - } else { - return null; - } + + const slice = posix.mmap( + hint, + aligned_len, + posix.PROT.READ | posix.PROT.WRITE, + .{ .TYPE = .PRIVATE, .ANONYMOUS = true }, + -1, + 0, + ) catch { + _ = next_mmap_addr_hint.cmpxchgStrong(hint, null, .monotonic, .monotonic); + return null; }; - assert(mem.isAligned(@intFromPtr(head), mem.page_size)); - _ = next_mmap_addr_hint.cmpxchgStrong(hint, @alignCast(head + aligned_len), .monotonic, .monotonic); - return head; + + assert(mem.isAligned(@intFromPtr(slice.ptr), mem.page_size)); + const new_hint: [*]align(mem.page_size) u8 = @alignCast(slice.ptr + aligned_len); + _ = next_mmap_addr_hint.cmpxchgStrong(hint, new_hint, .monotonic, .monotonic); + + return slice.ptr; } fn resize( @@ -131,16 +86,51 @@ fn resize( ) bool { _ = log2_buf_align; _ = return_address; + const new_size_aligned = mem.alignForward(usize, new_size, mem.page_size); const old_size_aligned = mem.alignForward(usize, buf_unaligned.len, mem.page_size); + + if (native_os == .windows) { + if (new_size <= buf_unaligned.len) { + const base_addr = @intFromPtr(buf_unaligned.ptr); + const old_addr_end = base_addr + buf_unaligned.len; + const new_addr_end = mem.alignForward(usize, base_addr + new_size, mem.page_size); + if (old_addr_end > new_addr_end) { + // For shrinking that is not releasing, we will only + // decommit the pages not needed anymore. + windows.VirtualFree( + @as(*anyopaque, @ptrFromInt(new_addr_end)), + old_addr_end - new_addr_end, + windows.MEM_DECOMMIT, + ); + } + return true; + } + return new_size_aligned <= old_size_aligned; + } + const buf: []align(mem.page_size) u8 = @alignCast(buf_unaligned.ptr[0..old_size_aligned]); - const ordering = std.math.order(old_size_aligned, new_size_aligned); - const result = switch (ordering) { - .lt => mapGrow(buf, new_size_aligned), - .eq => true, - .gt => mapShrink(buf, new_size_aligned), + const result = switch (std.math.order(old_size_aligned, new_size_aligned)) { + .lt => grow: { + if (use_mremap) { + const slice = posix.mremap( + buf.ptr[0..old_size_aligned], + new_size_aligned, + false, + ) catch break :grow false; + assert(slice.ptr == buf.ptr); + break :grow true; + } else { + break :grow false; + } + }, + .eq => return true, // return now and don't set the hint + .gt => shrink: { + posix.munmap(@alignCast(buf.ptr[new_size_aligned..old_size_aligned])); + break :shrink true; + }, }; - if (result and ordering != .eq) { + if (result) { const old_end: [*]align(mem.page_size) u8 = @alignCast(buf.ptr + old_size_aligned); const new_end: [*]align(mem.page_size) u8 = @alignCast(buf.ptr + new_size_aligned); _ = next_mmap_addr_hint.cmpxchgStrong(old_end, new_end, .monotonic, .monotonic); @@ -152,9 +142,13 @@ fn free(_: *anyopaque, slice: []u8, log2_buf_align: u8, return_address: usize) v _ = log2_buf_align; _ = return_address; - const aligned_len = mem.alignForward(usize, slice.len, mem.page_size); - const head: []align(mem.page_size) u8 = @alignCast(slice.ptr[0..aligned_len]); - mapFree(head, .free); - const tail: [*]align(mem.page_size) u8 = @alignCast(head.ptr + head.len); - _ = next_mmap_addr_hint.cmpxchgStrong(tail, head.ptr, .monotonic, .monotonic); + if (native_os == .windows) { + windows.VirtualFree(slice.ptr, 0, windows.MEM_RELEASE); + } else { + const buf_aligned_len = mem.alignForward(usize, slice.len, mem.page_size); + const head: []align(mem.page_size) u8 = @alignCast(slice.ptr[0..buf_aligned_len]); + posix.munmap(head); + const tail: [*]align(mem.page_size) u8 = @alignCast(head.ptr + head.len); + _ = next_mmap_addr_hint.cmpxchgStrong(tail, head.ptr, .monotonic, .monotonic); + } } From 1f7f7de950ff2a1149facdb182bc7a6e2af4e356 Mon Sep 17 00:00:00 2001 From: seanthegleaming Date: Tue, 10 Dec 2024 09:29:28 -0500 Subject: [PATCH 8/8] Removed redundant unused functions from PageAllocator.zig --- lib/std/heap/PageAllocator.zig | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/lib/std/heap/PageAllocator.zig b/lib/std/heap/PageAllocator.zig index 0229fb62abbd..d7d44b631b67 100644 --- a/lib/std/heap/PageAllocator.zig +++ b/lib/std/heap/PageAllocator.zig @@ -20,21 +20,6 @@ pub const vtable = Allocator.VTable{ /// Whether `posix.mremap` may be used const use_mremap = @hasDecl(posix.system, "REMAP") and posix.system.REMAP != void; -fn mmapAlloc(bytes: usize, hint: ?[*]align(mem.page_size) u8) ![]align(mem.page_size) u8 { - return posix.mmap( - hint, - bytes, - posix.PROT.READ | posix.PROT.WRITE, - .{ .TYPE = .PRIVATE, .ANONYMOUS = true }, - -1, - 0, - ); -} - -fn mapUnget(memory: []align(mem.page_size) u8) void { - std.posix.munmap(memory); -} - fn alloc(_: *anyopaque, n: usize, log2_align: u8, ra: usize) ?[*]u8 { _ = ra; _ = log2_align;