diff --git a/CMakeLists.txt b/CMakeLists.txt index 47bd0da03046..2407b99c8a49 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -392,6 +392,7 @@ set(ZIG_STAGE2_SOURCES lib/std/Thread/Pool.zig lib/std/Thread/ResetEvent.zig lib/std/Thread/WaitGroup.zig + lib/std/alloc/arena.zig lib/std/array_hash_map.zig lib/std/array_list.zig lib/std/ascii.zig @@ -428,7 +429,6 @@ set(ZIG_STAGE2_SOURCES lib/std/hash/wyhash.zig lib/std/hash_map.zig lib/std/heap.zig - lib/std/heap/arena_allocator.zig lib/std/io.zig lib/std/io/Reader.zig lib/std/io/Writer.zig diff --git a/lib/std/alloc.zig b/lib/std/alloc.zig new file mode 100644 index 000000000000..05f9d2a9027a --- /dev/null +++ b/lib/std/alloc.zig @@ -0,0 +1,769 @@ +const std = @import("std.zig"); +const builtin = @import("builtin"); +const root = @import("root"); +const assert = std.debug.assert; +const testing = std.testing; +const mem = std.mem; +const c = std.c; +const Allocator = std.mem.Allocator; +const windows = std.os.windows; + +pub const Logging = @import("alloc/logging.zig"); +pub const LogToWriter = @import("alloc/log_to_writer.zig"); +pub const Arena = @import("alloc/arena.zig"); +pub const GeneralPurpose = @import("alloc/general_purpose.zig"); +pub const Wasm = @import("alloc/Wasm.zig"); +pub const WasmPage = @import("alloc/WasmPage.zig"); +pub const Page = @import("alloc/Page.zig"); +pub const ThreadSafe = @import("alloc/ThreadSafe.zig"); +pub const Sbrk = @import("alloc/sbrk.zig"); + +pub const MemoryPool = @import("alloc/memory_pool.zig"); + +const CAllocator = struct { + comptime { + if (!builtin.link_libc) { + @compileError("C allocator is only available when linking against libc"); + } + } + + pub const supports_malloc_size = @TypeOf(malloc_size) != void; + pub const malloc_size = if (@TypeOf(c.malloc_size) != void) + c.malloc_size + else if (@TypeOf(c.malloc_usable_size) != void) + c.malloc_usable_size + else if (@TypeOf(c._msize) != void) + c._msize + else {}; + + pub const supports_posix_memalign = switch (builtin.os.tag) { + .dragonfly, .netbsd, .freebsd, .solaris, .openbsd, .linux, .macos, .ios, .tvos, .watchos, .visionos => true, + else => false, + }; + + fn getHeader(ptr: [*]u8) *[*]u8 { + return @as(*[*]u8, @ptrFromInt(@intFromPtr(ptr) - @sizeOf(usize))); + } + + fn alignedAlloc(len: usize, log2_align: u8) ?[*]u8 { + const alignment = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_align)); + if (supports_posix_memalign) { + // The posix_memalign only accepts alignment values that are a + // multiple of the pointer size + const eff_alignment = @max(alignment, @sizeOf(usize)); + + var aligned_ptr: ?*anyopaque = undefined; + if (c.posix_memalign(&aligned_ptr, eff_alignment, len) != 0) + return null; + + return @as([*]u8, @ptrCast(aligned_ptr)); + } + + // Thin wrapper around regular malloc, overallocate to account for + // alignment padding and store the original malloc()'ed pointer before + // the aligned address. + const unaligned_ptr = @as([*]u8, @ptrCast(c.malloc(len + alignment - 1 + @sizeOf(usize)) orelse return null)); + const unaligned_addr = @intFromPtr(unaligned_ptr); + const aligned_addr = mem.alignForward(usize, unaligned_addr + @sizeOf(usize), alignment); + const aligned_ptr = unaligned_ptr + (aligned_addr - unaligned_addr); + getHeader(aligned_ptr).* = unaligned_ptr; + + return aligned_ptr; + } + + fn alignedFree(ptr: [*]u8) void { + if (supports_posix_memalign) { + return c.free(ptr); + } + + const unaligned_ptr = getHeader(ptr).*; + c.free(unaligned_ptr); + } + + fn alignedAllocSize(ptr: [*]u8) usize { + if (supports_posix_memalign) { + return CAllocator.malloc_size(ptr); + } + + const unaligned_ptr = getHeader(ptr).*; + const delta = @intFromPtr(ptr) - @intFromPtr(unaligned_ptr); + return CAllocator.malloc_size(unaligned_ptr) - delta; + } + + fn alloc( + _: *anyopaque, + len: usize, + log2_align: u8, + return_address: usize, + ) ?[*]u8 { + _ = return_address; + assert(len > 0); + return alignedAlloc(len, log2_align); + } + + fn resize( + _: *anyopaque, + buf: []u8, + log2_buf_align: u8, + new_len: usize, + return_address: usize, + ) bool { + _ = log2_buf_align; + _ = return_address; + if (new_len <= buf.len) { + return true; + } + if (CAllocator.supports_malloc_size) { + const full_len = alignedAllocSize(buf.ptr); + if (new_len <= full_len) { + return true; + } + } + return false; + } + + fn free( + _: *anyopaque, + buf: []u8, + log2_buf_align: u8, + return_address: usize, + ) void { + _ = log2_buf_align; + _ = return_address; + alignedFree(buf.ptr); + } +}; + +/// Supports the full Allocator interface, including alignment, and exploiting +/// `malloc_usable_size` if available. For an allocator that directly calls +/// `malloc`/`free`, see `raw_c_allocator`. +pub const c_allocator = Allocator{ + .ptr = undefined, + .vtable = &c_allocator_vtable, +}; +const c_allocator_vtable = Allocator.VTable{ + .alloc = CAllocator.alloc, + .resize = CAllocator.resize, + .free = CAllocator.free, +}; + +/// Asserts allocations are within `@alignOf(std.c.max_align_t)` and directly calls +/// `malloc`/`free`. Does not attempt to utilize `malloc_usable_size`. +/// This allocator is safe to use as the backing allocator with +/// `ArenaAllocator` for example and is more optimal in such a case +/// than `c_allocator`. +pub const raw_c_allocator = Allocator{ + .ptr = undefined, + .vtable = &raw_c_allocator_vtable, +}; +const raw_c_allocator_vtable = Allocator.VTable{ + .alloc = rawCAlloc, + .resize = rawCResize, + .free = rawCFree, +}; + +fn rawCAlloc( + _: *anyopaque, + len: usize, + log2_ptr_align: u8, + ret_addr: usize, +) ?[*]u8 { + _ = ret_addr; + assert(log2_ptr_align <= comptime std.math.log2_int(usize, @alignOf(std.c.max_align_t))); + // Note that this pointer cannot be aligncasted to max_align_t because if + // len is < max_align_t then the alignment can be smaller. For example, if + // max_align_t is 16, but the user requests 8 bytes, there is no built-in + // type in C that is size 8 and has 16 byte alignment, so the alignment may + // be 8 bytes rather than 16. Similarly if only 1 byte is requested, malloc + // is allowed to return a 1-byte aligned pointer. + return @as(?[*]u8, @ptrCast(c.malloc(len))); +} + +fn rawCResize( + _: *anyopaque, + buf: []u8, + log2_old_align: u8, + new_len: usize, + ret_addr: usize, +) bool { + _ = log2_old_align; + _ = ret_addr; + + if (new_len <= buf.len) + return true; + + if (CAllocator.supports_malloc_size) { + const full_len = CAllocator.malloc_size(buf.ptr); + if (new_len <= full_len) return true; + } + + return false; +} + +fn rawCFree( + _: *anyopaque, + buf: []u8, + log2_old_align: u8, + ret_addr: usize, +) void { + _ = log2_old_align; + _ = ret_addr; + c.free(buf.ptr); +} + +/// This allocator makes a syscall directly for every allocation and free. +/// Thread-safe and lock-free. +pub const page_allocator = if (@hasDecl(root, "os") and + @hasDecl(root.os, "alloc") and + @hasDecl(root.os.alloc, "page_allocator")) + root.os.alloc.page_allocator +else if (builtin.target.isWasm()) + Allocator{ + .ptr = undefined, + .vtable = &WasmPage.vtable, + } +else if (builtin.target.os.tag == .plan9) + Allocator{ + .ptr = undefined, + .vtable = &Sbrk(std.os.plan9.sbrk).vtable, + } +else + Allocator{ + .ptr = undefined, + .vtable = &Page.vtable, + }; + +/// This allocator is fast, small, and specific to WebAssembly. In the future, +/// this will be the implementation automatically selected by +/// `GeneralPurposeAllocator` when compiling in `ReleaseSmall` mode for wasm32 +/// and wasm64 architectures. +/// Until then, it is available here to play with. +pub const wasm_allocator = Allocator{ + .ptr = undefined, + .vtable = &std.alloc.Wasm.vtable, +}; + +/// Verifies that the adjusted length will still map to the full length +pub fn alignPageAllocLen(full_len: usize, len: usize) usize { + const aligned_len = mem.alignAllocLen(full_len, len); + assert(mem.alignForward(usize, aligned_len, mem.page_size) == full_len); + return aligned_len; +} + +fn sliceContainsPtr(container: []u8, ptr: [*]u8) bool { + return @intFromPtr(ptr) >= @intFromPtr(container.ptr) and + @intFromPtr(ptr) < (@intFromPtr(container.ptr) + container.len); +} + +fn sliceContainsSlice(container: []u8, slice: []u8) bool { + return @intFromPtr(slice.ptr) >= @intFromPtr(container.ptr) and + (@intFromPtr(slice.ptr) + slice.len) <= (@intFromPtr(container.ptr) + container.len); +} + +pub const FixedBuffer = struct { + end_index: usize, + buffer: []u8, + + pub fn init(buffer: []u8) FixedBuffer { + return FixedBuffer{ + .buffer = buffer, + .end_index = 0, + }; + } + + /// *WARNING* using this at the same time as the interface returned by `threadSafeAllocator` is not thread safe + pub fn allocator(self: *FixedBuffer) Allocator { + return .{ + .ptr = self, + .vtable = &.{ + .alloc = alloc, + .resize = resize, + .free = free, + }, + }; + } + + /// Provides a lock free thread safe `Allocator` interface to the underlying `FixedBufferAllocator` + /// *WARNING* using this at the same time as the interface returned by `allocator` is not thread safe + pub fn threadSafeAllocator(self: *FixedBuffer) Allocator { + return .{ + .ptr = self, + .vtable = &.{ + .alloc = threadSafeAlloc, + .resize = Allocator.noResize, + .free = Allocator.noFree, + }, + }; + } + + pub fn ownsPtr(self: *FixedBuffer, ptr: [*]u8) bool { + return sliceContainsPtr(self.buffer, ptr); + } + + pub fn ownsSlice(self: *FixedBuffer, slice: []u8) bool { + return sliceContainsSlice(self.buffer, slice); + } + + /// NOTE: this will not work in all cases, if the last allocation had an adjusted_index + /// then we won't be able to determine what the last allocation was. This is because + /// the alignForward operation done in alloc is not reversible. + pub fn isLastAllocation(self: *FixedBuffer, buf: []u8) bool { + return buf.ptr + buf.len == self.buffer.ptr + self.end_index; + } + + fn alloc(ctx: *anyopaque, n: usize, log2_ptr_align: u8, ra: usize) ?[*]u8 { + const self: *FixedBuffer = @ptrCast(@alignCast(ctx)); + _ = ra; + const ptr_align = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_ptr_align)); + const adjust_off = mem.alignPointerOffset(self.buffer.ptr + self.end_index, ptr_align) orelse return null; + const adjusted_index = self.end_index + adjust_off; + const new_end_index = adjusted_index + n; + if (new_end_index > self.buffer.len) return null; + self.end_index = new_end_index; + return self.buffer.ptr + adjusted_index; + } + + fn resize( + ctx: *anyopaque, + buf: []u8, + log2_buf_align: u8, + new_size: usize, + return_address: usize, + ) bool { + const self: *FixedBuffer = @ptrCast(@alignCast(ctx)); + _ = log2_buf_align; + _ = return_address; + assert(@inComptime() or self.ownsSlice(buf)); + + if (!self.isLastAllocation(buf)) { + if (new_size > buf.len) return false; + return true; + } + + if (new_size <= buf.len) { + const sub = buf.len - new_size; + self.end_index -= sub; + return true; + } + + const add = new_size - buf.len; + if (add + self.end_index > self.buffer.len) return false; + + self.end_index += add; + return true; + } + + fn free( + ctx: *anyopaque, + buf: []u8, + log2_buf_align: u8, + return_address: usize, + ) void { + const self: *FixedBuffer = @ptrCast(@alignCast(ctx)); + _ = log2_buf_align; + _ = return_address; + assert(@inComptime() or self.ownsSlice(buf)); + + if (self.isLastAllocation(buf)) { + self.end_index -= buf.len; + } + } + + fn threadSafeAlloc(ctx: *anyopaque, n: usize, log2_ptr_align: u8, ra: usize) ?[*]u8 { + const self: *FixedBuffer = @ptrCast(@alignCast(ctx)); + _ = ra; + const ptr_align = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_ptr_align)); + var end_index = @atomicLoad(usize, &self.end_index, .seq_cst); + while (true) { + const adjust_off = mem.alignPointerOffset(self.buffer.ptr + end_index, ptr_align) orelse return null; + const adjusted_index = end_index + adjust_off; + const new_end_index = adjusted_index + n; + if (new_end_index > self.buffer.len) return null; + end_index = @cmpxchgWeak(usize, &self.end_index, end_index, new_end_index, .seq_cst, .seq_cst) orelse + return self.buffer[adjusted_index..new_end_index].ptr; + } + } + + pub fn reset(self: *FixedBuffer) void { + self.end_index = 0; + } +}; + +/// Returns a `StackFallbackAllocator` allocating using either a +/// `FixedBufferAllocator` on an array of size `size` and falling back to +/// `fallback_allocator` if that fails. +pub fn stackFallback(comptime size: usize, fallback_allocator: Allocator) StackFallbackAllocator(size) { + return StackFallbackAllocator(size){ + .buffer = undefined, + .fallback_allocator = fallback_allocator, + .fixed_buffer_allocator = undefined, + }; +} + +/// An allocator that attempts to allocate using a +/// `FixedBufferAllocator` using an array of size `size`. If the +/// allocation fails, it will fall back to using +/// `fallback_allocator`. Easily created with `stackFallback`. +pub fn StackFallbackAllocator(comptime size: usize) type { + return struct { + const Self = @This(); + + buffer: [size]u8, + fallback_allocator: Allocator, + fixed_buffer_allocator: FixedBuffer, + get_called: if (std.debug.runtime_safety) bool else void = + if (std.debug.runtime_safety) false else {}, + + /// This function both fetches a `Allocator` interface to this + /// allocator *and* resets the internal buffer allocator. + pub fn get(self: *Self) Allocator { + if (std.debug.runtime_safety) { + assert(!self.get_called); // `get` called multiple times; instead use `const allocator = stackFallback(N).get();` + self.get_called = true; + } + self.fixed_buffer_allocator = FixedBuffer.init(self.buffer[0..]); + return .{ + .ptr = self, + .vtable = &.{ + .alloc = alloc, + .resize = resize, + .free = free, + }, + }; + } + + /// Unlike most std allocators `StackFallbackAllocator` modifies + /// its internal state before returning an implementation of + /// the`Allocator` interface and therefore also doesn't use + /// the usual `.allocator()` method. + pub const allocator = @compileError("use 'const allocator = stackFallback(N).get();' instead"); + + fn alloc( + ctx: *anyopaque, + len: usize, + log2_ptr_align: u8, + ra: usize, + ) ?[*]u8 { + const self: *Self = @ptrCast(@alignCast(ctx)); + return FixedBuffer.alloc(&self.fixed_buffer_allocator, len, log2_ptr_align, ra) orelse + return self.fallback_allocator.rawAlloc(len, log2_ptr_align, ra); + } + + fn resize( + ctx: *anyopaque, + buf: []u8, + log2_buf_align: u8, + new_len: usize, + ra: usize, + ) bool { + const self: *Self = @ptrCast(@alignCast(ctx)); + if (self.fixed_buffer_allocator.ownsPtr(buf.ptr)) { + return FixedBuffer.resize(&self.fixed_buffer_allocator, buf, log2_buf_align, new_len, ra); + } else { + return self.fallback_allocator.rawResize(buf, log2_buf_align, new_len, ra); + } + } + + fn free( + ctx: *anyopaque, + buf: []u8, + log2_buf_align: u8, + ra: usize, + ) void { + const self: *Self = @ptrCast(@alignCast(ctx)); + if (self.fixed_buffer_allocator.ownsPtr(buf.ptr)) { + return FixedBuffer.free(&self.fixed_buffer_allocator, buf, log2_buf_align, ra); + } else { + return self.fallback_allocator.rawFree(buf, log2_buf_align, ra); + } + } + }; +} + +test "c_allocator" { + if (builtin.link_libc) { + try testAllocator(c_allocator); + try testAllocatorAligned(c_allocator); + try testAllocatorLargeAlignment(c_allocator); + try testAllocatorAlignedShrink(c_allocator); + } +} + +test "raw_c_allocator" { + if (builtin.link_libc) { + try testAllocator(raw_c_allocator); + } +} + +test "PageAllocator" { + const allocator = page_allocator; + try testAllocator(allocator); + try testAllocatorAligned(allocator); + if (!builtin.target.isWasm()) { + try testAllocatorLargeAlignment(allocator); + try testAllocatorAlignedShrink(allocator); + } + + if (builtin.os.tag == .windows) { + const slice = try allocator.alignedAlloc(u8, mem.page_size, 128); + slice[0] = 0x12; + slice[127] = 0x34; + allocator.free(slice); + } + { + var buf = try allocator.alloc(u8, mem.page_size + 1); + defer allocator.free(buf); + buf = try allocator.realloc(buf, 1); // shrink past the page boundary + } +} + +test "Arena" { + var arena_allocator = Arena.init(page_allocator); + defer arena_allocator.deinit(); + const allocator = arena_allocator.allocator(); + + try testAllocator(allocator); + try testAllocatorAligned(allocator); + try testAllocatorLargeAlignment(allocator); + try testAllocatorAlignedShrink(allocator); +} + +var test_fixed_buffer_allocator_memory: [800000 * @sizeOf(u64)]u8 = undefined; +test "FixedBufferAllocator" { + var fixed_buffer_allocator = mem.validationWrap(FixedBuffer.init(test_fixed_buffer_allocator_memory[0..])); + const allocator = fixed_buffer_allocator.allocator(); + + try testAllocator(allocator); + try testAllocatorAligned(allocator); + try testAllocatorLargeAlignment(allocator); + try testAllocatorAlignedShrink(allocator); +} + +test "FixedBufferAllocator.reset" { + var buf: [8]u8 align(@alignOf(u64)) = undefined; + var fba = FixedBuffer.init(buf[0..]); + const allocator = fba.allocator(); + + const X = 0xeeeeeeeeeeeeeeee; + const Y = 0xffffffffffffffff; + + const x = try allocator.create(u64); + x.* = X; + try testing.expectError(error.OutOfMemory, allocator.create(u64)); + + fba.reset(); + const y = try allocator.create(u64); + y.* = Y; + + // we expect Y to have overwritten X. + try testing.expect(x.* == y.*); + try testing.expect(y.* == Y); +} + +test "StackFallbackAllocator" { + { + var stack_allocator = stackFallback(4096, std.testing.allocator); + try testAllocator(stack_allocator.get()); + } + { + var stack_allocator = stackFallback(4096, std.testing.allocator); + try testAllocatorAligned(stack_allocator.get()); + } + { + var stack_allocator = stackFallback(4096, std.testing.allocator); + try testAllocatorLargeAlignment(stack_allocator.get()); + } + { + var stack_allocator = stackFallback(4096, std.testing.allocator); + try testAllocatorAlignedShrink(stack_allocator.get()); + } +} + +test "FixedBuffer Reuse memory on realloc" { + var small_fixed_buffer: [10]u8 = undefined; + // check if we re-use the memory + { + var fixed_buffer_allocator = FixedBuffer.init(small_fixed_buffer[0..]); + const allocator = fixed_buffer_allocator.allocator(); + + const slice0 = try allocator.alloc(u8, 5); + try testing.expect(slice0.len == 5); + const slice1 = try allocator.realloc(slice0, 10); + try testing.expect(slice1.ptr == slice0.ptr); + try testing.expect(slice1.len == 10); + try testing.expectError(error.OutOfMemory, allocator.realloc(slice1, 11)); + } + // check that we don't re-use the memory if it's not the most recent block + { + var fixed_buffer_allocator = FixedBuffer.init(small_fixed_buffer[0..]); + const allocator = fixed_buffer_allocator.allocator(); + + var slice0 = try allocator.alloc(u8, 2); + slice0[0] = 1; + slice0[1] = 2; + const slice1 = try allocator.alloc(u8, 2); + const slice2 = try allocator.realloc(slice0, 4); + try testing.expect(slice0.ptr != slice2.ptr); + try testing.expect(slice1.ptr != slice2.ptr); + try testing.expect(slice2[0] == 1); + try testing.expect(slice2[1] == 2); + } +} + +test "Thread safe FixedBuffer" { + var fixed_buffer_allocator = FixedBuffer.init(test_fixed_buffer_allocator_memory[0..]); + + try testAllocator(fixed_buffer_allocator.threadSafeAllocator()); + try testAllocatorAligned(fixed_buffer_allocator.threadSafeAllocator()); + try testAllocatorLargeAlignment(fixed_buffer_allocator.threadSafeAllocator()); + try testAllocatorAlignedShrink(fixed_buffer_allocator.threadSafeAllocator()); +} + +/// This one should not try alignments that exceed what C malloc can handle. +pub fn testAllocator(base_allocator: mem.Allocator) !void { + var validationAllocator = mem.validationWrap(base_allocator); + const allocator = validationAllocator.allocator(); + + var slice = try allocator.alloc(*i32, 100); + try testing.expect(slice.len == 100); + for (slice, 0..) |*item, i| { + item.* = try allocator.create(i32); + item.*.* = @as(i32, @intCast(i)); + } + + slice = try allocator.realloc(slice, 20000); + try testing.expect(slice.len == 20000); + + for (slice[0..100], 0..) |item, i| { + try testing.expect(item.* == @as(i32, @intCast(i))); + allocator.destroy(item); + } + + if (allocator.resize(slice, 50)) { + slice = slice[0..50]; + if (allocator.resize(slice, 25)) { + slice = slice[0..25]; + try testing.expect(allocator.resize(slice, 0)); + slice = slice[0..0]; + slice = try allocator.realloc(slice, 10); + try testing.expect(slice.len == 10); + } + } + allocator.free(slice); + + // Zero-length allocation + const empty = try allocator.alloc(u8, 0); + allocator.free(empty); + // Allocation with zero-sized types + const zero_bit_ptr = try allocator.create(u0); + zero_bit_ptr.* = 0; + allocator.destroy(zero_bit_ptr); + + const oversize = try allocator.alignedAlloc(u32, null, 5); + try testing.expect(oversize.len >= 5); + for (oversize) |*item| { + item.* = 0xDEADBEEF; + } + allocator.free(oversize); +} + +pub fn testAllocatorAligned(base_allocator: mem.Allocator) !void { + var validationAllocator = mem.validationWrap(base_allocator); + const allocator = validationAllocator.allocator(); + + // Test a few alignment values, smaller and bigger than the type's one + inline for ([_]u29{ 1, 2, 4, 8, 16, 32, 64 }) |alignment| { + // initial + var slice = try allocator.alignedAlloc(u8, alignment, 10); + try testing.expect(slice.len == 10); + // grow + slice = try allocator.realloc(slice, 100); + try testing.expect(slice.len == 100); + if (allocator.resize(slice, 10)) { + slice = slice[0..10]; + } + try testing.expect(allocator.resize(slice, 0)); + slice = slice[0..0]; + // realloc from zero + slice = try allocator.realloc(slice, 100); + try testing.expect(slice.len == 100); + if (allocator.resize(slice, 10)) { + slice = slice[0..10]; + } + try testing.expect(allocator.resize(slice, 0)); + } +} + +pub fn testAllocatorLargeAlignment(base_allocator: mem.Allocator) !void { + var validationAllocator = mem.validationWrap(base_allocator); + const allocator = validationAllocator.allocator(); + + const large_align: usize = mem.page_size / 2; + + var align_mask: usize = undefined; + align_mask = @shlWithOverflow(~@as(usize, 0), @as(Allocator.Log2Align, @ctz(large_align)))[0]; + + var slice = try allocator.alignedAlloc(u8, large_align, 500); + try testing.expect(@intFromPtr(slice.ptr) & align_mask == @intFromPtr(slice.ptr)); + + if (allocator.resize(slice, 100)) { + slice = slice[0..100]; + } + + slice = try allocator.realloc(slice, 5000); + try testing.expect(@intFromPtr(slice.ptr) & align_mask == @intFromPtr(slice.ptr)); + + if (allocator.resize(slice, 10)) { + slice = slice[0..10]; + } + + slice = try allocator.realloc(slice, 20000); + try testing.expect(@intFromPtr(slice.ptr) & align_mask == @intFromPtr(slice.ptr)); + + allocator.free(slice); +} + +pub fn testAllocatorAlignedShrink(base_allocator: mem.Allocator) !void { + var validationAllocator = mem.validationWrap(base_allocator); + const allocator = validationAllocator.allocator(); + + var debug_buffer: [1000]u8 = undefined; + var fib = FixedBuffer.init(&debug_buffer); + const debug_allocator = fib.allocator(); + + const alloc_size = mem.page_size * 2 + 50; + var slice = try allocator.alignedAlloc(u8, 16, alloc_size); + defer allocator.free(slice); + + var stuff_to_free = std.ArrayList([]align(16) u8).init(debug_allocator); + // On Windows, VirtualAlloc returns addresses aligned to a 64K boundary, + // which is 16 pages, hence the 32. This test may require to increase + // the size of the allocations feeding the `allocator` parameter if they + // fail, because of this high over-alignment we want to have. + while (@intFromPtr(slice.ptr) == mem.alignForward(usize, @intFromPtr(slice.ptr), mem.page_size * 32)) { + try stuff_to_free.append(slice); + slice = try allocator.alignedAlloc(u8, 16, alloc_size); + } + while (stuff_to_free.popOrNull()) |item| { + allocator.free(item); + } + slice[0] = 0x12; + slice[60] = 0x34; + + slice = try allocator.reallocAdvanced(slice, alloc_size / 2, 0); + try testing.expect(slice[0] == 0x12); + try testing.expect(slice[60] == 0x34); +} + +test { + _ = Logging; + _ = LogToWriter; + _ = Logging.ScopedAllocator; + _ = MemoryPool; + _ = Arena; + _ = GeneralPurpose; + if (comptime builtin.target.isWasm()) { + _ = Wasm; + _ = WasmPage; + } +} diff --git a/lib/std/heap/PageAllocator.zig b/lib/std/alloc/Page.zig similarity index 91% rename from lib/std/heap/PageAllocator.zig rename to lib/std/alloc/Page.zig index 4188c255285c..1a7ffaa9387d 100644 --- a/lib/std/heap/PageAllocator.zig +++ b/lib/std/alloc/Page.zig @@ -1,6 +1,5 @@ const std = @import("../std.zig"); const builtin = @import("builtin"); -const Allocator = std.mem.Allocator; const mem = std.mem; const maxInt = std.math.maxInt; const assert = std.debug.assert; @@ -8,7 +7,10 @@ const native_os = builtin.os.tag; const windows = std.os.windows; const posix = std.posix; -pub const vtable = Allocator.VTable{ +// TODO Utilize this on Windows. +var next_mmap_addr_hint: ?[*]align(mem.page_size) u8 = null; + +pub const vtable = mem.Allocator.VTable{ .alloc = alloc, .resize = resize, .free = free, @@ -35,7 +37,7 @@ fn alloc(_: *anyopaque, n: usize, log2_align: u8, ra: usize) ?[*]u8 { } const aligned_len = mem.alignForward(usize, n, mem.page_size); - const hint = @atomicLoad(@TypeOf(std.heap.next_mmap_addr_hint), &std.heap.next_mmap_addr_hint, .unordered); + const hint = @atomicLoad(@TypeOf(next_mmap_addr_hint), &next_mmap_addr_hint, .unordered); const slice = posix.mmap( hint, aligned_len, @@ -46,7 +48,7 @@ fn alloc(_: *anyopaque, n: usize, log2_align: u8, ra: usize) ?[*]u8 { ) catch return null; assert(mem.isAligned(@intFromPtr(slice.ptr), mem.page_size)); const new_hint: [*]align(mem.page_size) u8 = @alignCast(slice.ptr + aligned_len); - _ = @cmpxchgStrong(@TypeOf(std.heap.next_mmap_addr_hint), &std.heap.next_mmap_addr_hint, hint, new_hint, .monotonic, .monotonic); + _ = @cmpxchgStrong(@TypeOf(next_mmap_addr_hint), &next_mmap_addr_hint, hint, new_hint, .monotonic, .monotonic); return slice.ptr; } diff --git a/lib/std/heap/ThreadSafeAllocator.zig b/lib/std/alloc/ThreadSafe.zig similarity index 72% rename from lib/std/heap/ThreadSafeAllocator.zig rename to lib/std/alloc/ThreadSafe.zig index 12bb095b30fd..252934cc367f 100644 --- a/lib/std/heap/ThreadSafeAllocator.zig +++ b/lib/std/alloc/ThreadSafe.zig @@ -1,9 +1,10 @@ //! Wraps a non-thread-safe allocator and makes it thread-safe. - -child_allocator: Allocator, +child_allocator: std.mem.Allocator, mutex: std.Thread.Mutex = .{}, -pub fn allocator(self: *ThreadSafeAllocator) Allocator { +const Allocator = @This(); + +pub fn allocator(self: *Allocator) std.mem.Allocator { return .{ .ptr = self, .vtable = &.{ @@ -15,7 +16,7 @@ pub fn allocator(self: *ThreadSafeAllocator) Allocator { } fn alloc(ctx: *anyopaque, n: usize, log2_ptr_align: u8, ra: usize) ?[*]u8 { - const self: *ThreadSafeAllocator = @ptrCast(@alignCast(ctx)); + const self: *Allocator = @ptrCast(@alignCast(ctx)); self.mutex.lock(); defer self.mutex.unlock(); @@ -23,7 +24,7 @@ fn alloc(ctx: *anyopaque, n: usize, log2_ptr_align: u8, ra: usize) ?[*]u8 { } fn resize(ctx: *anyopaque, buf: []u8, log2_buf_align: u8, new_len: usize, ret_addr: usize) bool { - const self: *ThreadSafeAllocator = @ptrCast(@alignCast(ctx)); + const self: *Allocator = @ptrCast(@alignCast(ctx)); self.mutex.lock(); defer self.mutex.unlock(); @@ -32,7 +33,7 @@ fn resize(ctx: *anyopaque, buf: []u8, log2_buf_align: u8, new_len: usize, ret_ad } fn free(ctx: *anyopaque, buf: []u8, log2_buf_align: u8, ret_addr: usize) void { - const self: *ThreadSafeAllocator = @ptrCast(@alignCast(ctx)); + const self: *Allocator = @ptrCast(@alignCast(ctx)); self.mutex.lock(); defer self.mutex.unlock(); @@ -41,5 +42,3 @@ fn free(ctx: *anyopaque, buf: []u8, log2_buf_align: u8, ret_addr: usize) void { } const std = @import("../std.zig"); -const ThreadSafeAllocator = @This(); -const Allocator = std.mem.Allocator; diff --git a/lib/std/heap/WasmAllocator.zig b/lib/std/alloc/Wasm.zig similarity index 95% rename from lib/std/heap/WasmAllocator.zig rename to lib/std/alloc/Wasm.zig index 61ad6247153f..cd206d9f5afe 100644 --- a/lib/std/heap/WasmAllocator.zig +++ b/lib/std/alloc/Wasm.zig @@ -2,7 +2,6 @@ const std = @import("../std.zig"); const builtin = @import("builtin"); -const Allocator = std.mem.Allocator; const mem = std.mem; const assert = std.debug.assert; const wasm = std.wasm; @@ -14,13 +13,13 @@ comptime { } } -pub const vtable = Allocator.VTable{ +pub const vtable = mem.Allocator.VTable{ .alloc = alloc, .resize = resize, .free = free, }; -pub const Error = Allocator.Error; +pub const Error = mem.Allocator.Error; const max_usize = math.maxInt(usize); const ushift = math.Log2Int(usize); @@ -47,7 +46,7 @@ fn alloc(ctx: *anyopaque, len: usize, log2_align: u8, return_address: usize) ?[* _ = ctx; _ = return_address; // Make room for the freelist next pointer. - const alignment = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_align)); + const alignment = @as(usize, 1) << @as(mem.Allocator.Log2Align, @intCast(log2_align)); const actual_len = @max(len +| @sizeOf(usize), alignment); const slot_size = math.ceilPowerOfTwo(usize, actual_len) catch return null; const class = math.log2(slot_size) - min_class; @@ -91,7 +90,7 @@ fn resize( _ = return_address; // We don't want to move anything from one size class to another, but we // can recover bytes in between powers of two. - const buf_align = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_buf_align)); + const buf_align = @as(usize, 1) << @as(mem.Allocator.Log2Align, @intCast(log2_buf_align)); const old_actual_len = @max(buf.len + @sizeOf(usize), buf_align); const new_actual_len = @max(new_len +| @sizeOf(usize), buf_align); const old_small_slot_size = math.ceilPowerOfTwoAssert(usize, old_actual_len); @@ -116,7 +115,7 @@ fn free( ) void { _ = ctx; _ = return_address; - const buf_align = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_buf_align)); + const buf_align = @as(usize, 1) << @as(mem.Allocator.Log2Align, @intCast(log2_buf_align)); const actual_len = @max(buf.len + @sizeOf(usize), buf_align); const slot_size = math.ceilPowerOfTwoAssert(usize, actual_len); const class = math.log2(slot_size) - min_class; @@ -157,7 +156,7 @@ fn allocBigPages(n: usize) usize { return @as(usize, @intCast(page_index)) * wasm.page_size; } -const test_ally = Allocator{ +const test_ally = mem.Allocator{ .ptr = undefined, .vtable = &vtable, }; @@ -310,6 +309,6 @@ test "objects of size 1024 and 2048" { } test "standard allocator tests" { - try std.heap.testAllocator(test_ally); - try std.heap.testAllocatorAligned(test_ally); + try std.alloc.testAllocator(test_ally); + try std.alloc.testAllocatorAligned(test_ally); } diff --git a/lib/std/heap/WasmPageAllocator.zig b/lib/std/alloc/WasmPage.zig similarity index 99% rename from lib/std/heap/WasmPageAllocator.zig rename to lib/std/alloc/WasmPage.zig index ca625e43ed6b..acb71fbcba12 100644 --- a/lib/std/heap/WasmPageAllocator.zig +++ b/lib/std/alloc/WasmPage.zig @@ -1,7 +1,6 @@ const WasmPageAllocator = @This(); const std = @import("../std.zig"); const builtin = @import("builtin"); -const Allocator = std.mem.Allocator; const mem = std.mem; const maxInt = std.math.maxInt; const assert = std.debug.assert; @@ -12,7 +11,7 @@ comptime { } } -pub const vtable = Allocator.VTable{ +pub const vtable = mem.Allocator.VTable{ .alloc = alloc, .resize = resize, .free = free, diff --git a/lib/std/alloc/arena.zig b/lib/std/alloc/arena.zig new file mode 100644 index 000000000000..7480d4c2a1f6 --- /dev/null +++ b/lib/std/alloc/arena.zig @@ -0,0 +1,285 @@ +//! This allocator takes an existing allocator, wraps it, and provides an interface +//! where you can allocate without freeing, and then free it all together. +child_allocator: mem.Allocator, +state: State, + +const Arena = @This(); + +/// Inner state of Allocator. Can be stored rather than the entire Allocator +/// as a memory-saving optimization. +pub const State = struct { + buffer_list: std.SinglyLinkedList(usize) = .{}, + end_index: usize = 0, + + pub fn promote(self: State, child_allocator: mem.Allocator) Arena { + return .{ + .child_allocator = child_allocator, + .state = self, + }; + } +}; + +pub fn allocator(self: *Arena) mem.Allocator { + return .{ + .ptr = self, + .vtable = &.{ + .alloc = alloc, + .resize = resize, + .free = free, + }, + }; +} + +const BufNode = std.SinglyLinkedList(usize).Node; + +pub fn init(child_allocator: mem.Allocator) Arena { + return (State{}).promote(child_allocator); +} + +pub fn deinit(self: Arena) void { + // NOTE: When changing this, make sure `reset()` is adjusted accordingly! + + var it = self.state.buffer_list.first; + while (it) |node| { + // this has to occur before the free because the free frees node + const next_it = node.next; + const align_bits = std.math.log2_int(usize, @alignOf(BufNode)); + const alloc_buf = @as([*]u8, @ptrCast(node))[0..node.data]; + self.child_allocator.rawFree(alloc_buf, align_bits, @returnAddress()); + it = next_it; + } +} + +pub const ResetMode = union(enum) { + /// Releases all allocated memory in the arena. + free_all, + /// This will pre-heat the arena for future allocations by allocating a + /// large enough buffer for all previously done allocations. + /// Preheating will speed up the allocation process by invoking the backing allocator + /// less often than before. If `reset()` is used in a loop, this means that after the + /// biggest operation, no memory allocations are performed anymore. + retain_capacity, + /// This is the same as `retain_capacity`, but the memory will be shrunk to + /// this value if it exceeds the limit. + retain_with_limit: usize, +}; +/// Queries the current memory use of this arena. +/// This will **not** include the storage required for internal keeping. +pub fn queryCapacity(self: Arena) usize { + var size: usize = 0; + var it = self.state.buffer_list.first; + while (it) |node| : (it = node.next) { + // Compute the actually allocated size excluding the + // linked list node. + size += node.data - @sizeOf(BufNode); + } + return size; +} +/// Resets the arena allocator and frees all allocated memory. +/// +/// `mode` defines how the currently allocated memory is handled. +/// See the variant documentation for `ResetMode` for the effects of each mode. +/// +/// The function will return whether the reset operation was successful or not. +/// If the reallocation failed `false` is returned. The arena will still be fully +/// functional in that case, all memory is released. Future allocations just might +/// be slower. +/// +/// NOTE: If `mode` is `free_all`, the function will always return `true`. +pub fn reset(self: *Arena, mode: ResetMode) bool { + // Some words on the implementation: + // The reset function can be implemented with two basic approaches: + // - Counting how much bytes were allocated since the last reset, and storing that + // information in State. This will make reset fast and alloc only a teeny tiny bit + // slower. + // - Counting how much bytes were allocated by iterating the chunk linked list. This + // will make reset slower, but alloc() keeps the same speed when reset() as if reset() + // would not exist. + // + // The second variant was chosen for implementation, as with more and more calls to reset(), + // the function will get faster and faster. At one point, the complexity of the function + // will drop to amortized O(1), as we're only ever having a single chunk that will not be + // reallocated, and we're not even touching the backing allocator anymore. + // + // Thus, only the first hand full of calls to reset() will actually need to iterate the linked + // list, all future calls are just taking the first node, and only resetting the `end_index` + // value. + const requested_capacity = switch (mode) { + .retain_capacity => self.queryCapacity(), + .retain_with_limit => |limit| @min(limit, self.queryCapacity()), + .free_all => 0, + }; + if (requested_capacity == 0) { + // just reset when we don't have anything to reallocate + self.deinit(); + self.state = State{}; + return true; + } + const total_size = requested_capacity + @sizeOf(BufNode); + const align_bits = std.math.log2_int(usize, @alignOf(BufNode)); + // Free all nodes except for the last one + var it = self.state.buffer_list.first; + const maybe_first_node = while (it) |node| { + // this has to occur before the free because the free frees node + const next_it = node.next; + if (next_it == null) + break node; + const alloc_buf = @as([*]u8, @ptrCast(node))[0..node.data]; + self.child_allocator.rawFree(alloc_buf, align_bits, @returnAddress()); + it = next_it; + } else null; + std.debug.assert(maybe_first_node == null or maybe_first_node.?.next == null); + // reset the state before we try resizing the buffers, so we definitely have reset the arena to 0. + self.state.end_index = 0; + if (maybe_first_node) |first_node| { + self.state.buffer_list.first = first_node; + // perfect, no need to invoke the child_allocator + if (first_node.data == total_size) + return true; + const first_alloc_buf = @as([*]u8, @ptrCast(first_node))[0..first_node.data]; + if (self.child_allocator.rawResize(first_alloc_buf, align_bits, total_size, @returnAddress())) { + // successful resize + first_node.data = total_size; + } else { + // manual realloc + const new_ptr = self.child_allocator.rawAlloc(total_size, align_bits, @returnAddress()) orelse { + // we failed to preheat the arena properly, signal this to the user. + return false; + }; + self.child_allocator.rawFree(first_alloc_buf, align_bits, @returnAddress()); + const node: *BufNode = @ptrCast(@alignCast(new_ptr)); + node.* = .{ .data = total_size }; + self.state.buffer_list.first = node; + } + } + return true; +} + +fn createNode(self: *Arena, prev_len: usize, minimum_size: usize) ?*BufNode { + const actual_min_size = minimum_size + (@sizeOf(BufNode) + 16); + const big_enough_len = prev_len + actual_min_size; + const len = big_enough_len + big_enough_len / 2; + const log2_align = comptime std.math.log2_int(usize, @alignOf(BufNode)); + const ptr = self.child_allocator.rawAlloc(len, log2_align, @returnAddress()) orelse + return null; + const buf_node: *BufNode = @ptrCast(@alignCast(ptr)); + buf_node.* = .{ .data = len }; + self.state.buffer_list.prepend(buf_node); + self.state.end_index = 0; + return buf_node; +} + +fn alloc(ctx: *anyopaque, n: usize, log2_ptr_align: u8, ra: usize) ?[*]u8 { + const self: *Arena = @ptrCast(@alignCast(ctx)); + _ = ra; + + const ptr_align = @as(usize, 1) << @as(mem.Allocator.Log2Align, @intCast(log2_ptr_align)); + var cur_node = if (self.state.buffer_list.first) |first_node| + first_node + else + (self.createNode(0, n + ptr_align) orelse return null); + while (true) { + const cur_alloc_buf = @as([*]u8, @ptrCast(cur_node))[0..cur_node.data]; + const cur_buf = cur_alloc_buf[@sizeOf(BufNode)..]; + const addr = @intFromPtr(cur_buf.ptr) + self.state.end_index; + const adjusted_addr = mem.alignForward(usize, addr, ptr_align); + const adjusted_index = self.state.end_index + (adjusted_addr - addr); + const new_end_index = adjusted_index + n; + + if (new_end_index <= cur_buf.len) { + const result = cur_buf[adjusted_index..new_end_index]; + self.state.end_index = new_end_index; + return result.ptr; + } + + const bigger_buf_size = @sizeOf(BufNode) + new_end_index; + const log2_align = comptime std.math.log2_int(usize, @alignOf(BufNode)); + if (self.child_allocator.rawResize(cur_alloc_buf, log2_align, bigger_buf_size, @returnAddress())) { + cur_node.data = bigger_buf_size; + } else { + // Allocate a new node if that's not possible + cur_node = self.createNode(cur_buf.len, n + ptr_align) orelse return null; + } + } +} + +fn resize(ctx: *anyopaque, buf: []u8, log2_buf_align: u8, new_len: usize, ret_addr: usize) bool { + const self: *Arena = @ptrCast(@alignCast(ctx)); + _ = log2_buf_align; + _ = ret_addr; + + const cur_node = self.state.buffer_list.first orelse return false; + const cur_buf = @as([*]u8, @ptrCast(cur_node))[@sizeOf(BufNode)..cur_node.data]; + if (@intFromPtr(cur_buf.ptr) + self.state.end_index != @intFromPtr(buf.ptr) + buf.len) { + // It's not the most recent allocation, so it cannot be expanded, + // but it's fine if they want to make it smaller. + return new_len <= buf.len; + } + + if (buf.len >= new_len) { + self.state.end_index -= buf.len - new_len; + return true; + } else if (cur_buf.len - self.state.end_index >= new_len - buf.len) { + self.state.end_index += new_len - buf.len; + return true; + } else { + return false; + } +} + +fn free(ctx: *anyopaque, buf: []u8, log2_buf_align: u8, ret_addr: usize) void { + _ = log2_buf_align; + _ = ret_addr; + + const self: *Arena = @ptrCast(@alignCast(ctx)); + + const cur_node = self.state.buffer_list.first orelse return; + const cur_buf = @as([*]u8, @ptrCast(cur_node))[@sizeOf(BufNode)..cur_node.data]; + + if (@intFromPtr(cur_buf.ptr) + self.state.end_index == @intFromPtr(buf.ptr) + buf.len) { + self.state.end_index -= buf.len; + } +} + +test "reset with preheating" { + var arena_allocator = init(std.testing.allocator); + defer arena_allocator.deinit(); + // provides some variance in the allocated data + var rng_src = std.Random.DefaultPrng.init(std.testing.random_seed); + const random = rng_src.random(); + var rounds: usize = 25; + while (rounds > 0) { + rounds -= 1; + _ = arena_allocator.reset(.retain_capacity); + var alloced_bytes: usize = 0; + const total_size: usize = random.intRangeAtMost(usize, 256, 16384); + while (alloced_bytes < total_size) { + const size = random.intRangeAtMost(usize, 16, 256); + const alignment = 32; + const slice = try arena_allocator.allocator().alignedAlloc(u8, alignment, size); + try std.testing.expect(std.mem.isAligned(@intFromPtr(slice.ptr), alignment)); + try std.testing.expectEqual(size, slice.len); + alloced_bytes += slice.len; + } + } +} + +test "reset while retaining a buffer" { + var arena_allocator = init(std.testing.allocator); + defer arena_allocator.deinit(); + const a = arena_allocator.allocator(); + + // Create two internal buffers + _ = try a.alloc(u8, 1); + _ = try a.alloc(u8, 1000); + + // Check that we have at least two buffers + try std.testing.expect(arena_allocator.state.buffer_list.first.?.next != null); + + // This retains the first allocated buffer + try std.testing.expect(arena_allocator.reset(.{ .retain_with_limit = 1 })); +} + +const std = @import("../std.zig"); +const assert = std.debug.assert; +const mem = std.mem; diff --git a/lib/std/heap/general_purpose_allocator.zig b/lib/std/alloc/general_purpose.zig similarity index 96% rename from lib/std/heap/general_purpose_allocator.zig rename to lib/std/alloc/general_purpose.zig index b760c9d85de4..8bd1d8750f23 100644 --- a/lib/std/heap/general_purpose_allocator.zig +++ b/lib/std/alloc/general_purpose.zig @@ -17,7 +17,7 @@ //! //! * Do not re-use memory slots, so that memory safety is upheld. For small //! allocations, this is handled here; for larger ones it is handled in the -//! backing allocator (by default `std.heap.page_allocator`). +//! backing allocator (by default `std.alloc.page_allocator`). //! //! * Make pointer math errors unlikely to harm memory from //! unrelated allocations. @@ -98,7 +98,6 @@ const log = std.log.scoped(.gpa); const math = std.math; const assert = std.debug.assert; const mem = std.mem; -const Allocator = std.mem.Allocator; const page_size = std.mem.page_size; const StackTrace = std.builtin.StackTrace; @@ -158,9 +157,9 @@ pub const Config = struct { pub const Check = enum { ok, leak }; /// Default initialization of this struct is deprecated; use `.init` instead. -pub fn GeneralPurposeAllocator(comptime config: Config) type { +pub fn Allocator(comptime config: Config) type { return struct { - backing_allocator: Allocator = std.heap.page_allocator, + backing_allocator: mem.Allocator = std.heap.page_allocator, buckets: [small_bucket_count]Buckets = [1]Buckets{Buckets{}} ** small_bucket_count, cur_buckets: [small_bucket_count]?*BucketHeader = [1]?*BucketHeader{null} ** small_bucket_count, large_allocations: LargeAllocTable = .{}, @@ -175,7 +174,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { const Self = @This(); - /// The initial state of a `GeneralPurposeAllocator`, containing no allocations and backed by the system page allocator. + /// The initial state of a `Allocator`, containing no allocations and backed by the system page allocator. pub const init: Self = .{ .backing_allocator = std.heap.page_allocator, .buckets = [1]Buckets{.{}} ** small_bucket_count, @@ -316,7 +315,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { } }; - pub fn allocator(self: *Self) Allocator { + pub fn allocator(self: *Self) mem.Allocator { return .{ .ptr = self, .vtable = &.{ @@ -722,7 +721,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { ret_addr: usize, ) bool { const self: *Self = @ptrCast(@alignCast(ctx)); - const log2_old_align = @as(Allocator.Log2Align, @intCast(log2_old_align_u8)); + const log2_old_align = @as(mem.Allocator.Log2Align, @intCast(log2_old_align_u8)); self.mutex.lock(); defer self.mutex.unlock(); @@ -840,7 +839,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { ret_addr: usize, ) void { const self: *Self = @ptrCast(@alignCast(ctx)); - const log2_old_align = @as(Allocator.Log2Align, @intCast(log2_old_align_u8)); + const log2_old_align = @as(mem.Allocator.Log2Align, @intCast(log2_old_align_u8)); self.mutex.lock(); defer self.mutex.unlock(); @@ -982,16 +981,16 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { self.mutex.lock(); defer self.mutex.unlock(); if (!self.isAllocationAllowed(len)) return null; - return allocInner(self, len, @as(Allocator.Log2Align, @intCast(log2_ptr_align)), ret_addr) catch return null; + return allocInner(self, len, @as(mem.Allocator.Log2Align, @intCast(log2_ptr_align)), ret_addr) catch return null; } fn allocInner( self: *Self, len: usize, - log2_ptr_align: Allocator.Log2Align, + log2_ptr_align: mem.Allocator.Log2Align, ret_addr: usize, - ) Allocator.Error![*]u8 { - const new_aligned_size = @max(len, @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_ptr_align))); + ) mem.Allocator.Error![*]u8 { + const new_aligned_size = @max(len, @as(usize, 1) << @as(mem.Allocator.Log2Align, @intCast(log2_ptr_align))); if (new_aligned_size > largest_bucket_object_size) { try self.large_allocations.ensureUnusedCapacity(self.backing_allocator, 1); const ptr = self.backing_allocator.rawAlloc(len, log2_ptr_align, ret_addr) orelse @@ -1065,7 +1064,7 @@ const TraceKind = enum { const test_config = Config{}; test "small allocations - free in same order" { - var gpa = GeneralPurposeAllocator(test_config){}; + var gpa = Allocator(test_config){}; defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak"); const allocator = gpa.allocator(); @@ -1084,7 +1083,7 @@ test "small allocations - free in same order" { } test "small allocations - free in reverse order" { - var gpa = GeneralPurposeAllocator(test_config){}; + var gpa = Allocator(test_config){}; defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak"); const allocator = gpa.allocator(); @@ -1103,7 +1102,7 @@ test "small allocations - free in reverse order" { } test "large allocations" { - var gpa = GeneralPurposeAllocator(test_config){}; + var gpa = Allocator(test_config){}; defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak"); const allocator = gpa.allocator(); @@ -1116,7 +1115,7 @@ test "large allocations" { } test "very large allocation" { - var gpa = GeneralPurposeAllocator(test_config){}; + var gpa = Allocator(test_config){}; defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak"); const allocator = gpa.allocator(); @@ -1124,7 +1123,7 @@ test "very large allocation" { } test "realloc" { - var gpa = GeneralPurposeAllocator(test_config){}; + var gpa = Allocator(test_config){}; defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak"); const allocator = gpa.allocator(); @@ -1146,7 +1145,7 @@ test "realloc" { } test "shrink" { - var gpa = GeneralPurposeAllocator(test_config){}; + var gpa = Allocator(test_config){}; defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak"); const allocator = gpa.allocator(); @@ -1171,7 +1170,7 @@ test "shrink" { } test "large object - grow" { - var gpa = GeneralPurposeAllocator(test_config){}; + var gpa = Allocator(test_config){}; defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak"); const allocator = gpa.allocator(); @@ -1189,7 +1188,7 @@ test "large object - grow" { } test "realloc small object to large object" { - var gpa = GeneralPurposeAllocator(test_config){}; + var gpa = Allocator(test_config){}; defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak"); const allocator = gpa.allocator(); @@ -1206,7 +1205,7 @@ test "realloc small object to large object" { } test "shrink large object to large object" { - var gpa = GeneralPurposeAllocator(test_config){}; + var gpa = Allocator(test_config){}; defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak"); const allocator = gpa.allocator(); @@ -1231,7 +1230,7 @@ test "shrink large object to large object" { } test "shrink large object to large object with larger alignment" { - var gpa = GeneralPurposeAllocator(test_config){}; + var gpa = Allocator(test_config){}; defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak"); const allocator = gpa.allocator(); @@ -1267,7 +1266,7 @@ test "shrink large object to large object with larger alignment" { } test "realloc large object to small object" { - var gpa = GeneralPurposeAllocator(test_config){}; + var gpa = Allocator(test_config){}; defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak"); const allocator = gpa.allocator(); @@ -1282,7 +1281,7 @@ test "realloc large object to small object" { } test "overridable mutexes" { - var gpa = GeneralPurposeAllocator(.{ .MutexType = std.Thread.Mutex }){ + var gpa = Allocator(.{ .MutexType = std.Thread.Mutex }){ .backing_allocator = std.testing.allocator, .mutex = std.Thread.Mutex{}, }; @@ -1294,7 +1293,7 @@ test "overridable mutexes" { } test "non-page-allocator backing allocator" { - var gpa = GeneralPurposeAllocator(.{}){ .backing_allocator = std.testing.allocator }; + var gpa = Allocator(.{}){ .backing_allocator = std.testing.allocator }; defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak"); const allocator = gpa.allocator(); @@ -1303,7 +1302,7 @@ test "non-page-allocator backing allocator" { } test "realloc large object to larger alignment" { - var gpa = GeneralPurposeAllocator(test_config){}; + var gpa = Allocator(test_config){}; defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak"); const allocator = gpa.allocator(); @@ -1345,7 +1344,7 @@ test "realloc large object to larger alignment" { test "large object shrinks to small but allocation fails during shrink" { var failing_allocator = std.testing.FailingAllocator.init(std.heap.page_allocator, .{ .fail_index = 3 }); - var gpa = GeneralPurposeAllocator(.{}){ .backing_allocator = failing_allocator.allocator() }; + var gpa = Allocator(.{}){ .backing_allocator = failing_allocator.allocator() }; defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak"); const allocator = gpa.allocator(); @@ -1354,7 +1353,7 @@ test "large object shrinks to small but allocation fails during shrink" { slice[0] = 0x12; slice[3] = 0x34; - // Next allocation will fail in the backing allocator of the GeneralPurposeAllocator + // Next allocation will fail in the backing allocator of the Allocator try std.testing.expect(allocator.resize(slice, 4)); slice = slice[0..4]; @@ -1363,7 +1362,7 @@ test "large object shrinks to small but allocation fails during shrink" { } test "objects of size 1024 and 2048" { - var gpa = GeneralPurposeAllocator(test_config){}; + var gpa = Allocator(test_config){}; defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak"); const allocator = gpa.allocator(); @@ -1375,7 +1374,7 @@ test "objects of size 1024 and 2048" { } test "setting a memory cap" { - var gpa = GeneralPurposeAllocator(.{ .enable_memory_limit = true }){}; + var gpa = Allocator(.{ .enable_memory_limit = true }){}; defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak"); const allocator = gpa.allocator(); @@ -1402,10 +1401,10 @@ test "setting a memory cap" { test "double frees" { // use a GPA to back a GPA to check for leaks of the latter's metadata - var backing_gpa = GeneralPurposeAllocator(.{ .safety = true }){}; + var backing_gpa = Allocator(.{ .safety = true }){}; defer std.testing.expect(backing_gpa.deinit() == .ok) catch @panic("leak"); - const GPA = GeneralPurposeAllocator(.{ .safety = true, .never_unmap = true, .retain_metadata = true }); + const GPA = Allocator(.{ .safety = true, .never_unmap = true, .retain_metadata = true }); var gpa = GPA{ .backing_allocator = backing_gpa.allocator() }; defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak"); const allocator = gpa.allocator(); @@ -1441,7 +1440,7 @@ test "double frees" { } test "empty bucket size class" { - const GPA = GeneralPurposeAllocator(.{ .safety = true, .never_unmap = true, .retain_metadata = true }); + const GPA = Allocator(.{ .safety = true, .never_unmap = true, .retain_metadata = true }); var gpa = GPA{}; defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak"); const allocator = gpa.allocator(); @@ -1459,7 +1458,7 @@ test "empty bucket size class" { test "bug 9995 fix, large allocs count requested size not backing size" { // with AtLeast, buffer likely to be larger than requested, especially when shrinking - var gpa = GeneralPurposeAllocator(.{ .enable_memory_limit = true }){}; + var gpa = Allocator(.{ .enable_memory_limit = true }){}; const allocator = gpa.allocator(); var buf = try allocator.alignedAlloc(u8, 1, page_size + 1); @@ -1471,7 +1470,7 @@ test "bug 9995 fix, large allocs count requested size not backing size" { } test "retain metadata and never unmap" { - var gpa = std.heap.GeneralPurposeAllocator(.{ + var gpa = Allocator(.{ .safety = true, .never_unmap = true, .retain_metadata = true, diff --git a/lib/std/heap/log_to_writer_allocator.zig b/lib/std/alloc/log_to_writer.zig similarity index 76% rename from lib/std/heap/log_to_writer_allocator.zig rename to lib/std/alloc/log_to_writer.zig index b5c86c9bebbf..1df98db1cb63 100644 --- a/lib/std/heap/log_to_writer_allocator.zig +++ b/lib/std/alloc/log_to_writer.zig @@ -1,23 +1,23 @@ const std = @import("../std.zig"); -const Allocator = std.mem.Allocator; +const mem = std.mem; /// This allocator is used in front of another allocator and logs to the provided writer /// on every call to the allocator. Writer errors are ignored. -pub fn LogToWriterAllocator(comptime Writer: type) type { +pub fn Allocator(comptime Writer: type) type { return struct { - parent_allocator: Allocator, + parent_allocator: mem.Allocator, writer: Writer, const Self = @This(); - pub fn init(parent_allocator: Allocator, writer: Writer) Self { + pub fn init(parent_allocator: mem.Allocator, writer: Writer) Self { return Self{ .parent_allocator = parent_allocator, .writer = writer, }; } - pub fn allocator(self: *Self) Allocator { + pub fn allocator(self: *Self) mem.Allocator { return .{ .ptr = self, .vtable = &.{ @@ -86,27 +86,27 @@ pub fn LogToWriterAllocator(comptime Writer: type) type { /// This allocator is used in front of another allocator and logs to the provided writer /// on every call to the allocator. Writer errors are ignored. -pub fn logToWriterAllocator( - parent_allocator: Allocator, +pub fn allocator( + parent_allocator: mem.Allocator, writer: anytype, -) LogToWriterAllocator(@TypeOf(writer)) { - return LogToWriterAllocator(@TypeOf(writer)).init(parent_allocator, writer); +) Allocator(@TypeOf(writer)) { + return Allocator(@TypeOf(writer)).init(parent_allocator, writer); } -test "LogToWriterAllocator" { +test Allocator { var log_buf: [255]u8 = undefined; var fbs = std.io.fixedBufferStream(&log_buf); var allocator_buf: [10]u8 = undefined; - var fixedBufferAllocator = std.mem.validationWrap(std.heap.FixedBufferAllocator.init(&allocator_buf)); - var allocator_state = logToWriterAllocator(fixedBufferAllocator.allocator(), fbs.writer()); - const allocator = allocator_state.allocator(); + var fixedBufferAllocator = std.mem.validationWrap(std.alloc.FixedBuffer.init(&allocator_buf)); + var allocator_state = allocator(fixedBufferAllocator.allocator(), fbs.writer()); + const alloc = allocator_state.allocator(); - var a = try allocator.alloc(u8, 10); - try std.testing.expect(allocator.resize(a, 5)); + var a = try alloc.alloc(u8, 10); + try std.testing.expect(alloc.resize(a, 5)); a = a[0..5]; - try std.testing.expect(!allocator.resize(a, 20)); - allocator.free(a); + try std.testing.expect(!alloc.resize(a, 20)); + alloc.free(a); try std.testing.expectEqualSlices(u8, \\alloc : 10 success! diff --git a/lib/std/heap/logging_allocator.zig b/lib/std/alloc/logging.zig similarity index 84% rename from lib/std/heap/logging_allocator.zig rename to lib/std/alloc/logging.zig index 706f2ac544e3..04745891aa85 100644 --- a/lib/std/heap/logging_allocator.zig +++ b/lib/std/alloc/logging.zig @@ -1,20 +1,20 @@ const std = @import("../std.zig"); -const Allocator = std.mem.Allocator; +const mem = std.mem; /// This allocator is used in front of another allocator and logs to `std.log` /// on every call to the allocator. -/// For logging to a `std.io.Writer` see `std.heap.LogToWriterAllocator` -pub fn LoggingAllocator( +/// For logging to a `std.io.Writer` see `std.alloc.LogToWriter` +pub fn Allocator( comptime success_log_level: std.log.Level, comptime failure_log_level: std.log.Level, ) type { - return ScopedLoggingAllocator(.default, success_log_level, failure_log_level); + return ScopedAllocator(.default, success_log_level, failure_log_level); } /// This allocator is used in front of another allocator and logs to `std.log` /// with the given scope on every call to the allocator. -/// For logging to a `std.io.Writer` see `std.heap.LogToWriterAllocator` -pub fn ScopedLoggingAllocator( +/// For logging to a `std.io.Writer` see `std.alloc.LogToWriter.Allocator` +pub fn ScopedAllocator( comptime scope: @Type(.enum_literal), comptime success_log_level: std.log.Level, comptime failure_log_level: std.log.Level, @@ -22,17 +22,17 @@ pub fn ScopedLoggingAllocator( const log = std.log.scoped(scope); return struct { - parent_allocator: Allocator, + parent_allocator: mem.Allocator, const Self = @This(); - pub fn init(parent_allocator: Allocator) Self { + pub fn init(parent_allocator: mem.Allocator) Self { return .{ .parent_allocator = parent_allocator, }; } - pub fn allocator(self: *Self) Allocator { + pub fn allocator(self: *Self) mem.Allocator { return .{ .ptr = self, .vtable = &.{ @@ -127,7 +127,7 @@ pub fn ScopedLoggingAllocator( /// This allocator is used in front of another allocator and logs to `std.log` /// on every call to the allocator. -/// For logging to a `std.io.Writer` see `std.heap.LogToWriterAllocator` -pub fn loggingAllocator(parent_allocator: Allocator) LoggingAllocator(.debug, .err) { - return LoggingAllocator(.debug, .err).init(parent_allocator); +/// For logging to a `std.io.Writer` see `std.alloc.LogToWriter.Allocator` +pub fn allocator(parent_allocator: mem.Allocator) Allocator(.debug, .err) { + return Allocator(.debug, .err).init(parent_allocator); } diff --git a/lib/std/heap/memory_pool.zig b/lib/std/alloc/memory_pool.zig similarity index 78% rename from lib/std/heap/memory_pool.zig rename to lib/std/alloc/memory_pool.zig index 5bfc4a60f6ce..cc1ad571b19d 100644 --- a/lib/std/heap/memory_pool.zig +++ b/lib/std/alloc/memory_pool.zig @@ -2,23 +2,20 @@ const std = @import("../std.zig"); const debug_mode = @import("builtin").mode == .Debug; -pub const MemoryPoolError = error{OutOfMemory}; +pub const Error = error{OutOfMemory}; /// A memory pool that can allocate objects of a single type very quickly. /// Use this when you need to allocate a lot of objects of the same type, /// because It outperforms general purpose allocators. -pub fn MemoryPool(comptime Item: type) type { - return MemoryPoolAligned(Item, @alignOf(Item)); +pub fn Auto(comptime Item: type) type { + return Aligned(Item, @alignOf(Item)); } -/// A memory pool that can allocate objects of a single type very quickly. -/// Use this when you need to allocate a lot of objects of the same type, -/// because It outperforms general purpose allocators. -pub fn MemoryPoolAligned(comptime Item: type, comptime alignment: u29) type { +pub fn Aligned(comptime Item: type, comptime alignment: u29) type { if (@alignOf(Item) == alignment) { - return MemoryPoolExtra(Item, .{}); + return Extra(Item, .{}); } else { - return MemoryPoolExtra(Item, .{ .alignment = alignment }); + return Extra(Item, .{ .alignment = alignment }); } } @@ -31,10 +28,7 @@ pub const Options = struct { growable: bool = true, }; -/// A memory pool that can allocate objects of a single type very quickly. -/// Use this when you need to allocate a lot of objects of the same type, -/// because It outperforms general purpose allocators. -pub fn MemoryPoolExtra(comptime Item: type, comptime pool_options: Options) type { +pub fn Extra(comptime Item: type, comptime pool_options: Options) type { return struct { const Pool = @This(); @@ -55,18 +49,18 @@ pub fn MemoryPoolExtra(comptime Item: type, comptime pool_options: Options) type const NodePtr = *align(item_alignment) Node; const ItemPtr = *align(item_alignment) Item; - arena: std.heap.ArenaAllocator, + arena: std.alloc.Arena, free_list: ?NodePtr = null, /// Creates a new memory pool. pub fn init(allocator: std.mem.Allocator) Pool { - return .{ .arena = std.heap.ArenaAllocator.init(allocator) }; + return .{ .arena = std.alloc.Arena.init(allocator) }; } /// Creates a new memory pool and pre-allocates `initial_size` items. /// This allows the up to `initial_size` active allocations before a /// `OutOfMemory` error happens when calling `create()`. - pub fn initPreheated(allocator: std.mem.Allocator, initial_size: usize) MemoryPoolError!Pool { + pub fn initPreheated(allocator: std.mem.Allocator, initial_size: usize) Error!Pool { var pool = init(allocator); errdefer pool.deinit(); @@ -89,7 +83,7 @@ pub fn MemoryPoolExtra(comptime Item: type, comptime pool_options: Options) type pool.* = undefined; } - pub const ResetMode = std.heap.ArenaAllocator.ResetMode; + pub const ResetMode = std.alloc.Arena.ResetMode; /// Resets the memory pool and destroys all allocated items. /// This can be used to batch-destroy all objects without invalidating the memory pool. @@ -138,7 +132,7 @@ pub fn MemoryPoolExtra(comptime Item: type, comptime pool_options: Options) type pool.free_list = node; } - fn allocNew(pool: *Pool) MemoryPoolError!*align(item_alignment) [item_size]u8 { + fn allocNew(pool: *Pool) Error!*align(item_alignment) [item_size]u8 { const mem = try pool.arena.allocator().alignedAlloc(u8, item_alignment, item_size); return mem[0..item_size]; // coerce slice to array pointer } @@ -146,7 +140,7 @@ pub fn MemoryPoolExtra(comptime Item: type, comptime pool_options: Options) type } test "basic" { - var pool = MemoryPool(u32).init(std.testing.allocator); + var pool = Auto(u32).init(std.testing.allocator); defer pool.deinit(); const p1 = try pool.create(); @@ -166,7 +160,7 @@ test "basic" { } test "preheating (success)" { - var pool = try MemoryPool(u32).initPreheated(std.testing.allocator, 4); + var pool = try Auto(u32).initPreheated(std.testing.allocator, 4); defer pool.deinit(); _ = try pool.create(); @@ -176,11 +170,11 @@ test "preheating (success)" { test "preheating (failure)" { const failer = std.testing.failing_allocator; - try std.testing.expectError(error.OutOfMemory, MemoryPool(u32).initPreheated(failer, 5)); + try std.testing.expectError(error.OutOfMemory, Auto(u32).initPreheated(failer, 5)); } test "growable" { - var pool = try MemoryPoolExtra(u32, .{ .growable = false }).initPreheated(std.testing.allocator, 4); + var pool = try Extra(u32, .{ .growable = false }).initPreheated(std.testing.allocator, 4); defer pool.deinit(); _ = try pool.create(); @@ -196,7 +190,7 @@ test "greater than pointer default alignment" { data: u64 align(16), }; - var pool = MemoryPool(Foo).init(std.testing.allocator); + var pool = Auto(Foo).init(std.testing.allocator); defer pool.deinit(); const foo: *Foo = try pool.create(); @@ -208,7 +202,7 @@ test "greater than pointer manual alignment" { data: u64, }; - var pool = MemoryPoolAligned(Foo, 16).init(std.testing.allocator); + var pool = Aligned(Foo, 16).init(std.testing.allocator); defer pool.deinit(); const foo: *align(16) Foo = try pool.create(); diff --git a/lib/std/heap/sbrk_allocator.zig b/lib/std/alloc/sbrk.zig similarity index 93% rename from lib/std/heap/sbrk_allocator.zig rename to lib/std/alloc/sbrk.zig index 08933fed5257..2d8be3ab83dc 100644 --- a/lib/std/heap/sbrk_allocator.zig +++ b/lib/std/alloc/sbrk.zig @@ -1,19 +1,18 @@ const std = @import("../std.zig"); const builtin = @import("builtin"); const math = std.math; -const Allocator = std.mem.Allocator; const mem = std.mem; const assert = std.debug.assert; -pub fn SbrkAllocator(comptime sbrk: *const fn (n: usize) usize) type { +pub fn Allocator(comptime sbrk: *const fn (n: usize) usize) type { return struct { - pub const vtable: Allocator.VTable = .{ + pub const vtable: mem.Allocator.VTable = .{ .alloc = alloc, .resize = resize, .free = free, }; - pub const Error = Allocator.Error; + pub const Error = mem.Allocator.Error; const max_usize = math.maxInt(usize); const ushift = math.Log2Int(usize); @@ -44,7 +43,7 @@ pub fn SbrkAllocator(comptime sbrk: *const fn (n: usize) usize) type { lock.lock(); defer lock.unlock(); // Make room for the freelist next pointer. - const alignment = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_align)); + const alignment = @as(usize, 1) << @as(mem.Allocator.Log2Align, @intCast(log2_align)); const actual_len = @max(len +| @sizeOf(usize), alignment); const slot_size = math.ceilPowerOfTwo(usize, actual_len) catch return null; const class = math.log2(slot_size) - min_class; @@ -91,7 +90,7 @@ pub fn SbrkAllocator(comptime sbrk: *const fn (n: usize) usize) type { defer lock.unlock(); // We don't want to move anything from one size class to another, but we // can recover bytes in between powers of two. - const buf_align = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_buf_align)); + const buf_align = @as(usize, 1) << @as(mem.Allocator.Log2Align, @intCast(log2_buf_align)); const old_actual_len = @max(buf.len + @sizeOf(usize), buf_align); const new_actual_len = @max(new_len +| @sizeOf(usize), buf_align); const old_small_slot_size = math.ceilPowerOfTwoAssert(usize, old_actual_len); @@ -118,7 +117,7 @@ pub fn SbrkAllocator(comptime sbrk: *const fn (n: usize) usize) type { _ = return_address; lock.lock(); defer lock.unlock(); - const buf_align = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_buf_align)); + const buf_align = @as(usize, 1) << @as(mem.Allocator.Log2Align, @intCast(log2_buf_align)); const actual_len = @max(buf.len + @sizeOf(usize), buf_align); const slot_size = math.ceilPowerOfTwoAssert(usize, actual_len); const class = math.log2(slot_size) - min_class; diff --git a/lib/std/heap.zig b/lib/std/heap.zig index 3d19d8daa6b2..09c1807357a2 100644 --- a/lib/std/heap.zig +++ b/lib/std/heap.zig @@ -8,259 +8,64 @@ const c = std.c; const Allocator = std.mem.Allocator; const windows = std.os.windows; -pub const LoggingAllocator = @import("heap/logging_allocator.zig").LoggingAllocator; -pub const loggingAllocator = @import("heap/logging_allocator.zig").loggingAllocator; -pub const ScopedLoggingAllocator = @import("heap/logging_allocator.zig").ScopedLoggingAllocator; -pub const LogToWriterAllocator = @import("heap/log_to_writer_allocator.zig").LogToWriterAllocator; -pub const logToWriterAllocator = @import("heap/log_to_writer_allocator.zig").logToWriterAllocator; -pub const ArenaAllocator = @import("heap/arena_allocator.zig").ArenaAllocator; -pub const GeneralPurposeAllocatorConfig = @import("heap/general_purpose_allocator.zig").Config; -pub const GeneralPurposeAllocator = @import("heap/general_purpose_allocator.zig").GeneralPurposeAllocator; -pub const Check = @import("heap/general_purpose_allocator.zig").Check; -pub const WasmAllocator = @import("heap/WasmAllocator.zig"); -pub const WasmPageAllocator = @import("heap/WasmPageAllocator.zig"); -pub const PageAllocator = @import("heap/PageAllocator.zig"); -pub const ThreadSafeAllocator = @import("heap/ThreadSafeAllocator.zig"); -pub const SbrkAllocator = @import("heap/sbrk_allocator.zig").SbrkAllocator; - -const memory_pool = @import("heap/memory_pool.zig"); -pub const MemoryPool = memory_pool.MemoryPool; -pub const MemoryPoolAligned = memory_pool.MemoryPoolAligned; -pub const MemoryPoolExtra = memory_pool.MemoryPoolExtra; -pub const MemoryPoolOptions = memory_pool.Options; - -/// TODO Utilize this on Windows. -pub var next_mmap_addr_hint: ?[*]align(mem.page_size) u8 = null; - -const CAllocator = struct { - comptime { - if (!builtin.link_libc) { - @compileError("C allocator is only available when linking against libc"); - } - } - - pub const supports_malloc_size = @TypeOf(malloc_size) != void; - pub const malloc_size = if (@TypeOf(c.malloc_size) != void) - c.malloc_size - else if (@TypeOf(c.malloc_usable_size) != void) - c.malloc_usable_size - else if (@TypeOf(c._msize) != void) - c._msize - else {}; - - pub const supports_posix_memalign = switch (builtin.os.tag) { - .dragonfly, .netbsd, .freebsd, .solaris, .openbsd, .linux, .macos, .ios, .tvos, .watchos, .visionos => true, - else => false, - }; - - fn getHeader(ptr: [*]u8) *[*]u8 { - return @as(*[*]u8, @ptrFromInt(@intFromPtr(ptr) - @sizeOf(usize))); - } - - fn alignedAlloc(len: usize, log2_align: u8) ?[*]u8 { - const alignment = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_align)); - if (supports_posix_memalign) { - // The posix_memalign only accepts alignment values that are a - // multiple of the pointer size - const eff_alignment = @max(alignment, @sizeOf(usize)); - - var aligned_ptr: ?*anyopaque = undefined; - if (c.posix_memalign(&aligned_ptr, eff_alignment, len) != 0) - return null; - - return @as([*]u8, @ptrCast(aligned_ptr)); - } - - // Thin wrapper around regular malloc, overallocate to account for - // alignment padding and store the original malloc()'ed pointer before - // the aligned address. - const unaligned_ptr = @as([*]u8, @ptrCast(c.malloc(len + alignment - 1 + @sizeOf(usize)) orelse return null)); - const unaligned_addr = @intFromPtr(unaligned_ptr); - const aligned_addr = mem.alignForward(usize, unaligned_addr + @sizeOf(usize), alignment); - const aligned_ptr = unaligned_ptr + (aligned_addr - unaligned_addr); - getHeader(aligned_ptr).* = unaligned_ptr; - - return aligned_ptr; - } - - fn alignedFree(ptr: [*]u8) void { - if (supports_posix_memalign) { - return c.free(ptr); - } - - const unaligned_ptr = getHeader(ptr).*; - c.free(unaligned_ptr); - } - - fn alignedAllocSize(ptr: [*]u8) usize { - if (supports_posix_memalign) { - return CAllocator.malloc_size(ptr); - } - - const unaligned_ptr = getHeader(ptr).*; - const delta = @intFromPtr(ptr) - @intFromPtr(unaligned_ptr); - return CAllocator.malloc_size(unaligned_ptr) - delta; - } - - fn alloc( - _: *anyopaque, - len: usize, - log2_align: u8, - return_address: usize, - ) ?[*]u8 { - _ = return_address; - assert(len > 0); - return alignedAlloc(len, log2_align); - } - - fn resize( - _: *anyopaque, - buf: []u8, - log2_buf_align: u8, - new_len: usize, - return_address: usize, - ) bool { - _ = log2_buf_align; - _ = return_address; - if (new_len <= buf.len) { - return true; - } - if (CAllocator.supports_malloc_size) { - const full_len = alignedAllocSize(buf.ptr); - if (new_len <= full_len) { - return true; - } - } - return false; - } - - fn free( - _: *anyopaque, - buf: []u8, - log2_buf_align: u8, - return_address: usize, - ) void { - _ = log2_buf_align; - _ = return_address; - alignedFree(buf.ptr); - } -}; - -/// Supports the full Allocator interface, including alignment, and exploiting -/// `malloc_usable_size` if available. For an allocator that directly calls -/// `malloc`/`free`, see `raw_c_allocator`. -pub const c_allocator = Allocator{ - .ptr = undefined, - .vtable = &c_allocator_vtable, -}; -const c_allocator_vtable = Allocator.VTable{ - .alloc = CAllocator.alloc, - .resize = CAllocator.resize, - .free = CAllocator.free, -}; - -/// Asserts allocations are within `@alignOf(std.c.max_align_t)` and directly calls -/// `malloc`/`free`. Does not attempt to utilize `malloc_usable_size`. -/// This allocator is safe to use as the backing allocator with -/// `ArenaAllocator` for example and is more optimal in such a case -/// than `c_allocator`. -pub const raw_c_allocator = Allocator{ - .ptr = undefined, - .vtable = &raw_c_allocator_vtable, -}; -const raw_c_allocator_vtable = Allocator.VTable{ - .alloc = rawCAlloc, - .resize = rawCResize, - .free = rawCFree, -}; - -fn rawCAlloc( - _: *anyopaque, - len: usize, - log2_ptr_align: u8, - ret_addr: usize, -) ?[*]u8 { - _ = ret_addr; - assert(log2_ptr_align <= comptime std.math.log2_int(usize, @alignOf(std.c.max_align_t))); - // Note that this pointer cannot be aligncasted to max_align_t because if - // len is < max_align_t then the alignment can be smaller. For example, if - // max_align_t is 16, but the user requests 8 bytes, there is no built-in - // type in C that is size 8 and has 16 byte alignment, so the alignment may - // be 8 bytes rather than 16. Similarly if only 1 byte is requested, malloc - // is allowed to return a 1-byte aligned pointer. - return @as(?[*]u8, @ptrCast(c.malloc(len))); -} - -fn rawCResize( - _: *anyopaque, - buf: []u8, - log2_old_align: u8, - new_len: usize, - ret_addr: usize, -) bool { - _ = log2_old_align; - _ = ret_addr; - - if (new_len <= buf.len) - return true; - - if (CAllocator.supports_malloc_size) { - const full_len = CAllocator.malloc_size(buf.ptr); - if (new_len <= full_len) return true; - } - - return false; -} - -fn rawCFree( - _: *anyopaque, - buf: []u8, - log2_old_align: u8, - ret_addr: usize, -) void { - _ = log2_old_align; - _ = ret_addr; - c.free(buf.ptr); -} - -/// This allocator makes a syscall directly for every allocation and free. -/// Thread-safe and lock-free. -pub const page_allocator = if (@hasDecl(root, "os") and - @hasDecl(root.os, "heap") and - @hasDecl(root.os.heap, "page_allocator")) - root.os.heap.page_allocator -else if (builtin.target.isWasm()) - Allocator{ - .ptr = undefined, - .vtable = &WasmPageAllocator.vtable, - } -else if (builtin.target.os.tag == .plan9) - Allocator{ - .ptr = undefined, - .vtable = &SbrkAllocator(std.os.plan9.sbrk).vtable, - } -else - Allocator{ - .ptr = undefined, - .vtable = &PageAllocator.vtable, - }; - -/// This allocator is fast, small, and specific to WebAssembly. In the future, -/// this will be the implementation automatically selected by -/// `GeneralPurposeAllocator` when compiling in `ReleaseSmall` mode for wasm32 -/// and wasm64 architectures. -/// Until then, it is available here to play with. -pub const wasm_allocator = Allocator{ - .ptr = undefined, - .vtable = &std.heap.WasmAllocator.vtable, -}; - -/// Verifies that the adjusted length will still map to the full length -pub fn alignPageAllocLen(full_len: usize, len: usize) usize { - const aligned_len = mem.alignAllocLen(full_len, len); - assert(mem.alignForward(usize, aligned_len, mem.page_size) == full_len); - return aligned_len; -} +pub const alloc = @import("alloc.zig"); + +/// This has moved into std.alloc and is provided here only for compatibility +pub const LoggingAllocator = alloc.Logging.Allocator; +/// This has moved into std.alloc and is provided here only for compatibility +pub const loggingAllocator = alloc.Logging.allocator; +/// This has moved into std.alloc and is provided here only for compatibility +pub const ScopedLoggingAllocator = alloc.Logging.ScopedAllocator; +/// This has moved into std.alloc and is provided here only for compatibility +pub const LogToWriterAllocator = alloc.LogToWriter.Allocator; +/// This has moved into std.alloc and is provided here only for compatibility +pub const logToWriterAllocator = alloc.LogToWriter.allocator; +/// This has moved into std.alloc and is provided here only for compatibility +pub const ArenaAllocator = alloc.Arena; +/// This has moved into std.alloc and is provided here only for compatibility +pub const GeneralPurposeAllocatorConfig = alloc.GeneralPurpose.Config; +/// This has moved into std.alloc and is provided here only for compatibility +pub const GeneralPurposeAllocator = alloc.GeneralPurpose.Allocator; +/// This has moved into std.alloc and is provided here only for compatibility +pub const Check = alloc.GeneralPurpose.Check; +/// This has moved into std.alloc and is provided here only for compatibility +pub const WasmAllocator = alloc.Wasm; +/// This has moved into std.alloc and is provided here only for compatibility +pub const WasmPageAllocator = alloc.WasmPage; +/// This has moved into std.alloc and is provided here only for compatibility +pub const PageAllocator = alloc.Page; +/// This has moved into std.alloc and is provided here only for compatibility +pub const ThreadSafeAllocator = alloc.ThreadSafe; +/// This has moved into std.alloc and is provided here only for compatibility +pub const SbrkAllocator = alloc.Sbrk.Allocator; + +/// This has moved into std.alloc and is provided here only for compatibility +pub const MemoryPool = alloc.MemoryPool.Auto; +/// This has moved into std.alloc and is provided here only for compatibility +pub const MemoryPoolAligned = alloc.MemoryPool.Aligned; +/// This has moved into std.alloc and is provided here only for compatibility +pub const MemoryPoolExtra = alloc.MemoryPool.Extra; +/// This has moved into std.alloc and is provided here only for compatibility +pub const MemoryPoolOptions = alloc.MemoryPool.Options; + +/// This has moved into std.alloc and is provided here only for compatibility +const CAllocator = alloc.CAllocator; + +/// This has moved into std.alloc and is provided here only for compatibility +pub const c_allocator = alloc.c_allocator; +/// This has moved into std.alloc and is provided here only for compatibility +pub const raw_c_allocator = alloc.raw_c_allocator; +/// This has moved into std.alloc and is provided here only for compatibility +pub const page_allocator = alloc.page_allocator; +/// This has moved into std.alloc and is provided here only for compatibility +pub const wasm_allocator = alloc.wasm_allocator; + +/// This has moved into std.alloc and is provided here only for compatibility +pub const FixedBufferAllocator = alloc.FixedBuffer; +/// This has moved into std.alloc and is provided here only for compatibility +pub const stackFallback = alloc.stackFallback; +/// This has moved into std.alloc and is provided here only for compatibility +pub const StackFallbackAllocator = alloc.StackFallbackAllocator; pub const HeapAllocator = switch (builtin.os.tag) { .windows => struct { @@ -278,7 +83,7 @@ pub const HeapAllocator = switch (builtin.os.tag) { return .{ .ptr = self, .vtable = &.{ - .alloc = alloc, + .alloc = HeapAllocator.alloc, .resize = resize, .free = free, }, @@ -362,273 +167,6 @@ pub const HeapAllocator = switch (builtin.os.tag) { else => @compileError("Unsupported OS"), }; -fn sliceContainsPtr(container: []u8, ptr: [*]u8) bool { - return @intFromPtr(ptr) >= @intFromPtr(container.ptr) and - @intFromPtr(ptr) < (@intFromPtr(container.ptr) + container.len); -} - -fn sliceContainsSlice(container: []u8, slice: []u8) bool { - return @intFromPtr(slice.ptr) >= @intFromPtr(container.ptr) and - (@intFromPtr(slice.ptr) + slice.len) <= (@intFromPtr(container.ptr) + container.len); -} - -pub const FixedBufferAllocator = struct { - end_index: usize, - buffer: []u8, - - pub fn init(buffer: []u8) FixedBufferAllocator { - return FixedBufferAllocator{ - .buffer = buffer, - .end_index = 0, - }; - } - - /// *WARNING* using this at the same time as the interface returned by `threadSafeAllocator` is not thread safe - pub fn allocator(self: *FixedBufferAllocator) Allocator { - return .{ - .ptr = self, - .vtable = &.{ - .alloc = alloc, - .resize = resize, - .free = free, - }, - }; - } - - /// Provides a lock free thread safe `Allocator` interface to the underlying `FixedBufferAllocator` - /// *WARNING* using this at the same time as the interface returned by `allocator` is not thread safe - pub fn threadSafeAllocator(self: *FixedBufferAllocator) Allocator { - return .{ - .ptr = self, - .vtable = &.{ - .alloc = threadSafeAlloc, - .resize = Allocator.noResize, - .free = Allocator.noFree, - }, - }; - } - - pub fn ownsPtr(self: *FixedBufferAllocator, ptr: [*]u8) bool { - return sliceContainsPtr(self.buffer, ptr); - } - - pub fn ownsSlice(self: *FixedBufferAllocator, slice: []u8) bool { - return sliceContainsSlice(self.buffer, slice); - } - - /// NOTE: this will not work in all cases, if the last allocation had an adjusted_index - /// then we won't be able to determine what the last allocation was. This is because - /// the alignForward operation done in alloc is not reversible. - pub fn isLastAllocation(self: *FixedBufferAllocator, buf: []u8) bool { - return buf.ptr + buf.len == self.buffer.ptr + self.end_index; - } - - fn alloc(ctx: *anyopaque, n: usize, log2_ptr_align: u8, ra: usize) ?[*]u8 { - const self: *FixedBufferAllocator = @ptrCast(@alignCast(ctx)); - _ = ra; - const ptr_align = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_ptr_align)); - const adjust_off = mem.alignPointerOffset(self.buffer.ptr + self.end_index, ptr_align) orelse return null; - const adjusted_index = self.end_index + adjust_off; - const new_end_index = adjusted_index + n; - if (new_end_index > self.buffer.len) return null; - self.end_index = new_end_index; - return self.buffer.ptr + adjusted_index; - } - - fn resize( - ctx: *anyopaque, - buf: []u8, - log2_buf_align: u8, - new_size: usize, - return_address: usize, - ) bool { - const self: *FixedBufferAllocator = @ptrCast(@alignCast(ctx)); - _ = log2_buf_align; - _ = return_address; - assert(@inComptime() or self.ownsSlice(buf)); - - if (!self.isLastAllocation(buf)) { - if (new_size > buf.len) return false; - return true; - } - - if (new_size <= buf.len) { - const sub = buf.len - new_size; - self.end_index -= sub; - return true; - } - - const add = new_size - buf.len; - if (add + self.end_index > self.buffer.len) return false; - - self.end_index += add; - return true; - } - - fn free( - ctx: *anyopaque, - buf: []u8, - log2_buf_align: u8, - return_address: usize, - ) void { - const self: *FixedBufferAllocator = @ptrCast(@alignCast(ctx)); - _ = log2_buf_align; - _ = return_address; - assert(@inComptime() or self.ownsSlice(buf)); - - if (self.isLastAllocation(buf)) { - self.end_index -= buf.len; - } - } - - fn threadSafeAlloc(ctx: *anyopaque, n: usize, log2_ptr_align: u8, ra: usize) ?[*]u8 { - const self: *FixedBufferAllocator = @ptrCast(@alignCast(ctx)); - _ = ra; - const ptr_align = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_ptr_align)); - var end_index = @atomicLoad(usize, &self.end_index, .seq_cst); - while (true) { - const adjust_off = mem.alignPointerOffset(self.buffer.ptr + end_index, ptr_align) orelse return null; - const adjusted_index = end_index + adjust_off; - const new_end_index = adjusted_index + n; - if (new_end_index > self.buffer.len) return null; - end_index = @cmpxchgWeak(usize, &self.end_index, end_index, new_end_index, .seq_cst, .seq_cst) orelse - return self.buffer[adjusted_index..new_end_index].ptr; - } - } - - pub fn reset(self: *FixedBufferAllocator) void { - self.end_index = 0; - } -}; - -/// Returns a `StackFallbackAllocator` allocating using either a -/// `FixedBufferAllocator` on an array of size `size` and falling back to -/// `fallback_allocator` if that fails. -pub fn stackFallback(comptime size: usize, fallback_allocator: Allocator) StackFallbackAllocator(size) { - return StackFallbackAllocator(size){ - .buffer = undefined, - .fallback_allocator = fallback_allocator, - .fixed_buffer_allocator = undefined, - }; -} - -/// An allocator that attempts to allocate using a -/// `FixedBufferAllocator` using an array of size `size`. If the -/// allocation fails, it will fall back to using -/// `fallback_allocator`. Easily created with `stackFallback`. -pub fn StackFallbackAllocator(comptime size: usize) type { - return struct { - const Self = @This(); - - buffer: [size]u8, - fallback_allocator: Allocator, - fixed_buffer_allocator: FixedBufferAllocator, - get_called: if (std.debug.runtime_safety) bool else void = - if (std.debug.runtime_safety) false else {}, - - /// This function both fetches a `Allocator` interface to this - /// allocator *and* resets the internal buffer allocator. - pub fn get(self: *Self) Allocator { - if (std.debug.runtime_safety) { - assert(!self.get_called); // `get` called multiple times; instead use `const allocator = stackFallback(N).get();` - self.get_called = true; - } - self.fixed_buffer_allocator = FixedBufferAllocator.init(self.buffer[0..]); - return .{ - .ptr = self, - .vtable = &.{ - .alloc = alloc, - .resize = resize, - .free = free, - }, - }; - } - - /// Unlike most std allocators `StackFallbackAllocator` modifies - /// its internal state before returning an implementation of - /// the`Allocator` interface and therefore also doesn't use - /// the usual `.allocator()` method. - pub const allocator = @compileError("use 'const allocator = stackFallback(N).get();' instead"); - - fn alloc( - ctx: *anyopaque, - len: usize, - log2_ptr_align: u8, - ra: usize, - ) ?[*]u8 { - const self: *Self = @ptrCast(@alignCast(ctx)); - return FixedBufferAllocator.alloc(&self.fixed_buffer_allocator, len, log2_ptr_align, ra) orelse - return self.fallback_allocator.rawAlloc(len, log2_ptr_align, ra); - } - - fn resize( - ctx: *anyopaque, - buf: []u8, - log2_buf_align: u8, - new_len: usize, - ra: usize, - ) bool { - const self: *Self = @ptrCast(@alignCast(ctx)); - if (self.fixed_buffer_allocator.ownsPtr(buf.ptr)) { - return FixedBufferAllocator.resize(&self.fixed_buffer_allocator, buf, log2_buf_align, new_len, ra); - } else { - return self.fallback_allocator.rawResize(buf, log2_buf_align, new_len, ra); - } - } - - fn free( - ctx: *anyopaque, - buf: []u8, - log2_buf_align: u8, - ra: usize, - ) void { - const self: *Self = @ptrCast(@alignCast(ctx)); - if (self.fixed_buffer_allocator.ownsPtr(buf.ptr)) { - return FixedBufferAllocator.free(&self.fixed_buffer_allocator, buf, log2_buf_align, ra); - } else { - return self.fallback_allocator.rawFree(buf, log2_buf_align, ra); - } - } - }; -} - -test "c_allocator" { - if (builtin.link_libc) { - try testAllocator(c_allocator); - try testAllocatorAligned(c_allocator); - try testAllocatorLargeAlignment(c_allocator); - try testAllocatorAlignedShrink(c_allocator); - } -} - -test "raw_c_allocator" { - if (builtin.link_libc) { - try testAllocator(raw_c_allocator); - } -} - -test "PageAllocator" { - const allocator = page_allocator; - try testAllocator(allocator); - try testAllocatorAligned(allocator); - if (!builtin.target.isWasm()) { - try testAllocatorLargeAlignment(allocator); - try testAllocatorAlignedShrink(allocator); - } - - if (builtin.os.tag == .windows) { - const slice = try allocator.alignedAlloc(u8, mem.page_size, 128); - slice[0] = 0x12; - slice[127] = 0x34; - allocator.free(slice); - } - { - var buf = try allocator.alloc(u8, mem.page_size + 1); - defer allocator.free(buf); - buf = try allocator.realloc(buf, 1); // shrink past the page boundary - } -} - test "HeapAllocator" { if (builtin.os.tag == .windows) { // https://github.com/ziglang/zig/issues/13702 @@ -638,260 +176,9 @@ test "HeapAllocator" { defer heap_allocator.deinit(); const allocator = heap_allocator.allocator(); - try testAllocator(allocator); - try testAllocatorAligned(allocator); - try testAllocatorLargeAlignment(allocator); - try testAllocatorAlignedShrink(allocator); - } -} - -test "ArenaAllocator" { - var arena_allocator = ArenaAllocator.init(page_allocator); - defer arena_allocator.deinit(); - const allocator = arena_allocator.allocator(); - - try testAllocator(allocator); - try testAllocatorAligned(allocator); - try testAllocatorLargeAlignment(allocator); - try testAllocatorAlignedShrink(allocator); -} - -var test_fixed_buffer_allocator_memory: [800000 * @sizeOf(u64)]u8 = undefined; -test "FixedBufferAllocator" { - var fixed_buffer_allocator = mem.validationWrap(FixedBufferAllocator.init(test_fixed_buffer_allocator_memory[0..])); - const allocator = fixed_buffer_allocator.allocator(); - - try testAllocator(allocator); - try testAllocatorAligned(allocator); - try testAllocatorLargeAlignment(allocator); - try testAllocatorAlignedShrink(allocator); -} - -test "FixedBufferAllocator.reset" { - var buf: [8]u8 align(@alignOf(u64)) = undefined; - var fba = FixedBufferAllocator.init(buf[0..]); - const allocator = fba.allocator(); - - const X = 0xeeeeeeeeeeeeeeee; - const Y = 0xffffffffffffffff; - - const x = try allocator.create(u64); - x.* = X; - try testing.expectError(error.OutOfMemory, allocator.create(u64)); - - fba.reset(); - const y = try allocator.create(u64); - y.* = Y; - - // we expect Y to have overwritten X. - try testing.expect(x.* == y.*); - try testing.expect(y.* == Y); -} - -test "StackFallbackAllocator" { - { - var stack_allocator = stackFallback(4096, std.testing.allocator); - try testAllocator(stack_allocator.get()); - } - { - var stack_allocator = stackFallback(4096, std.testing.allocator); - try testAllocatorAligned(stack_allocator.get()); - } - { - var stack_allocator = stackFallback(4096, std.testing.allocator); - try testAllocatorLargeAlignment(stack_allocator.get()); - } - { - var stack_allocator = stackFallback(4096, std.testing.allocator); - try testAllocatorAlignedShrink(stack_allocator.get()); - } -} - -test "FixedBufferAllocator Reuse memory on realloc" { - var small_fixed_buffer: [10]u8 = undefined; - // check if we re-use the memory - { - var fixed_buffer_allocator = FixedBufferAllocator.init(small_fixed_buffer[0..]); - const allocator = fixed_buffer_allocator.allocator(); - - const slice0 = try allocator.alloc(u8, 5); - try testing.expect(slice0.len == 5); - const slice1 = try allocator.realloc(slice0, 10); - try testing.expect(slice1.ptr == slice0.ptr); - try testing.expect(slice1.len == 10); - try testing.expectError(error.OutOfMemory, allocator.realloc(slice1, 11)); - } - // check that we don't re-use the memory if it's not the most recent block - { - var fixed_buffer_allocator = FixedBufferAllocator.init(small_fixed_buffer[0..]); - const allocator = fixed_buffer_allocator.allocator(); - - var slice0 = try allocator.alloc(u8, 2); - slice0[0] = 1; - slice0[1] = 2; - const slice1 = try allocator.alloc(u8, 2); - const slice2 = try allocator.realloc(slice0, 4); - try testing.expect(slice0.ptr != slice2.ptr); - try testing.expect(slice1.ptr != slice2.ptr); - try testing.expect(slice2[0] == 1); - try testing.expect(slice2[1] == 2); - } -} - -test "Thread safe FixedBufferAllocator" { - var fixed_buffer_allocator = FixedBufferAllocator.init(test_fixed_buffer_allocator_memory[0..]); - - try testAllocator(fixed_buffer_allocator.threadSafeAllocator()); - try testAllocatorAligned(fixed_buffer_allocator.threadSafeAllocator()); - try testAllocatorLargeAlignment(fixed_buffer_allocator.threadSafeAllocator()); - try testAllocatorAlignedShrink(fixed_buffer_allocator.threadSafeAllocator()); -} - -/// This one should not try alignments that exceed what C malloc can handle. -pub fn testAllocator(base_allocator: mem.Allocator) !void { - var validationAllocator = mem.validationWrap(base_allocator); - const allocator = validationAllocator.allocator(); - - var slice = try allocator.alloc(*i32, 100); - try testing.expect(slice.len == 100); - for (slice, 0..) |*item, i| { - item.* = try allocator.create(i32); - item.*.* = @as(i32, @intCast(i)); - } - - slice = try allocator.realloc(slice, 20000); - try testing.expect(slice.len == 20000); - - for (slice[0..100], 0..) |item, i| { - try testing.expect(item.* == @as(i32, @intCast(i))); - allocator.destroy(item); - } - - if (allocator.resize(slice, 50)) { - slice = slice[0..50]; - if (allocator.resize(slice, 25)) { - slice = slice[0..25]; - try testing.expect(allocator.resize(slice, 0)); - slice = slice[0..0]; - slice = try allocator.realloc(slice, 10); - try testing.expect(slice.len == 10); - } - } - allocator.free(slice); - - // Zero-length allocation - const empty = try allocator.alloc(u8, 0); - allocator.free(empty); - // Allocation with zero-sized types - const zero_bit_ptr = try allocator.create(u0); - zero_bit_ptr.* = 0; - allocator.destroy(zero_bit_ptr); - - const oversize = try allocator.alignedAlloc(u32, null, 5); - try testing.expect(oversize.len >= 5); - for (oversize) |*item| { - item.* = 0xDEADBEEF; - } - allocator.free(oversize); -} - -pub fn testAllocatorAligned(base_allocator: mem.Allocator) !void { - var validationAllocator = mem.validationWrap(base_allocator); - const allocator = validationAllocator.allocator(); - - // Test a few alignment values, smaller and bigger than the type's one - inline for ([_]u29{ 1, 2, 4, 8, 16, 32, 64 }) |alignment| { - // initial - var slice = try allocator.alignedAlloc(u8, alignment, 10); - try testing.expect(slice.len == 10); - // grow - slice = try allocator.realloc(slice, 100); - try testing.expect(slice.len == 100); - if (allocator.resize(slice, 10)) { - slice = slice[0..10]; - } - try testing.expect(allocator.resize(slice, 0)); - slice = slice[0..0]; - // realloc from zero - slice = try allocator.realloc(slice, 100); - try testing.expect(slice.len == 100); - if (allocator.resize(slice, 10)) { - slice = slice[0..10]; - } - try testing.expect(allocator.resize(slice, 0)); - } -} - -pub fn testAllocatorLargeAlignment(base_allocator: mem.Allocator) !void { - var validationAllocator = mem.validationWrap(base_allocator); - const allocator = validationAllocator.allocator(); - - const large_align: usize = mem.page_size / 2; - - var align_mask: usize = undefined; - align_mask = @shlWithOverflow(~@as(usize, 0), @as(Allocator.Log2Align, @ctz(large_align)))[0]; - - var slice = try allocator.alignedAlloc(u8, large_align, 500); - try testing.expect(@intFromPtr(slice.ptr) & align_mask == @intFromPtr(slice.ptr)); - - if (allocator.resize(slice, 100)) { - slice = slice[0..100]; - } - - slice = try allocator.realloc(slice, 5000); - try testing.expect(@intFromPtr(slice.ptr) & align_mask == @intFromPtr(slice.ptr)); - - if (allocator.resize(slice, 10)) { - slice = slice[0..10]; - } - - slice = try allocator.realloc(slice, 20000); - try testing.expect(@intFromPtr(slice.ptr) & align_mask == @intFromPtr(slice.ptr)); - - allocator.free(slice); -} - -pub fn testAllocatorAlignedShrink(base_allocator: mem.Allocator) !void { - var validationAllocator = mem.validationWrap(base_allocator); - const allocator = validationAllocator.allocator(); - - var debug_buffer: [1000]u8 = undefined; - var fib = FixedBufferAllocator.init(&debug_buffer); - const debug_allocator = fib.allocator(); - - const alloc_size = mem.page_size * 2 + 50; - var slice = try allocator.alignedAlloc(u8, 16, alloc_size); - defer allocator.free(slice); - - var stuff_to_free = std.ArrayList([]align(16) u8).init(debug_allocator); - // On Windows, VirtualAlloc returns addresses aligned to a 64K boundary, - // which is 16 pages, hence the 32. This test may require to increase - // the size of the allocations feeding the `allocator` parameter if they - // fail, because of this high over-alignment we want to have. - while (@intFromPtr(slice.ptr) == mem.alignForward(usize, @intFromPtr(slice.ptr), mem.page_size * 32)) { - try stuff_to_free.append(slice); - slice = try allocator.alignedAlloc(u8, 16, alloc_size); - } - while (stuff_to_free.popOrNull()) |item| { - allocator.free(item); - } - slice[0] = 0x12; - slice[60] = 0x34; - - slice = try allocator.reallocAdvanced(slice, alloc_size / 2, 0); - try testing.expect(slice[0] == 0x12); - try testing.expect(slice[60] == 0x34); -} - -test { - _ = LoggingAllocator; - _ = LogToWriterAllocator; - _ = ScopedLoggingAllocator; - _ = @import("heap/memory_pool.zig"); - _ = ArenaAllocator; - _ = GeneralPurposeAllocator; - if (comptime builtin.target.isWasm()) { - _ = WasmAllocator; - _ = WasmPageAllocator; + try alloc.testAllocator(allocator); + try alloc.testAllocatorAligned(allocator); + try alloc.testAllocatorLargeAlignment(allocator); + try alloc.testAllocatorAlignedShrink(allocator); } } diff --git a/lib/std/heap/arena_allocator.zig b/lib/std/heap/arena_allocator.zig deleted file mode 100644 index 3cff6b439fbc..000000000000 --- a/lib/std/heap/arena_allocator.zig +++ /dev/null @@ -1,286 +0,0 @@ -const std = @import("../std.zig"); -const assert = std.debug.assert; -const mem = std.mem; -const Allocator = std.mem.Allocator; - -/// This allocator takes an existing allocator, wraps it, and provides an interface -/// where you can allocate without freeing, and then free it all together. -pub const ArenaAllocator = struct { - child_allocator: Allocator, - state: State, - - /// Inner state of ArenaAllocator. Can be stored rather than the entire ArenaAllocator - /// as a memory-saving optimization. - pub const State = struct { - buffer_list: std.SinglyLinkedList(usize) = .{}, - end_index: usize = 0, - - pub fn promote(self: State, child_allocator: Allocator) ArenaAllocator { - return .{ - .child_allocator = child_allocator, - .state = self, - }; - } - }; - - pub fn allocator(self: *ArenaAllocator) Allocator { - return .{ - .ptr = self, - .vtable = &.{ - .alloc = alloc, - .resize = resize, - .free = free, - }, - }; - } - - const BufNode = std.SinglyLinkedList(usize).Node; - - pub fn init(child_allocator: Allocator) ArenaAllocator { - return (State{}).promote(child_allocator); - } - - pub fn deinit(self: ArenaAllocator) void { - // NOTE: When changing this, make sure `reset()` is adjusted accordingly! - - var it = self.state.buffer_list.first; - while (it) |node| { - // this has to occur before the free because the free frees node - const next_it = node.next; - const align_bits = std.math.log2_int(usize, @alignOf(BufNode)); - const alloc_buf = @as([*]u8, @ptrCast(node))[0..node.data]; - self.child_allocator.rawFree(alloc_buf, align_bits, @returnAddress()); - it = next_it; - } - } - - pub const ResetMode = union(enum) { - /// Releases all allocated memory in the arena. - free_all, - /// This will pre-heat the arena for future allocations by allocating a - /// large enough buffer for all previously done allocations. - /// Preheating will speed up the allocation process by invoking the backing allocator - /// less often than before. If `reset()` is used in a loop, this means that after the - /// biggest operation, no memory allocations are performed anymore. - retain_capacity, - /// This is the same as `retain_capacity`, but the memory will be shrunk to - /// this value if it exceeds the limit. - retain_with_limit: usize, - }; - /// Queries the current memory use of this arena. - /// This will **not** include the storage required for internal keeping. - pub fn queryCapacity(self: ArenaAllocator) usize { - var size: usize = 0; - var it = self.state.buffer_list.first; - while (it) |node| : (it = node.next) { - // Compute the actually allocated size excluding the - // linked list node. - size += node.data - @sizeOf(BufNode); - } - return size; - } - /// Resets the arena allocator and frees all allocated memory. - /// - /// `mode` defines how the currently allocated memory is handled. - /// See the variant documentation for `ResetMode` for the effects of each mode. - /// - /// The function will return whether the reset operation was successful or not. - /// If the reallocation failed `false` is returned. The arena will still be fully - /// functional in that case, all memory is released. Future allocations just might - /// be slower. - /// - /// NOTE: If `mode` is `free_all`, the function will always return `true`. - pub fn reset(self: *ArenaAllocator, mode: ResetMode) bool { - // Some words on the implementation: - // The reset function can be implemented with two basic approaches: - // - Counting how much bytes were allocated since the last reset, and storing that - // information in State. This will make reset fast and alloc only a teeny tiny bit - // slower. - // - Counting how much bytes were allocated by iterating the chunk linked list. This - // will make reset slower, but alloc() keeps the same speed when reset() as if reset() - // would not exist. - // - // The second variant was chosen for implementation, as with more and more calls to reset(), - // the function will get faster and faster. At one point, the complexity of the function - // will drop to amortized O(1), as we're only ever having a single chunk that will not be - // reallocated, and we're not even touching the backing allocator anymore. - // - // Thus, only the first hand full of calls to reset() will actually need to iterate the linked - // list, all future calls are just taking the first node, and only resetting the `end_index` - // value. - const requested_capacity = switch (mode) { - .retain_capacity => self.queryCapacity(), - .retain_with_limit => |limit| @min(limit, self.queryCapacity()), - .free_all => 0, - }; - if (requested_capacity == 0) { - // just reset when we don't have anything to reallocate - self.deinit(); - self.state = State{}; - return true; - } - const total_size = requested_capacity + @sizeOf(BufNode); - const align_bits = std.math.log2_int(usize, @alignOf(BufNode)); - // Free all nodes except for the last one - var it = self.state.buffer_list.first; - const maybe_first_node = while (it) |node| { - // this has to occur before the free because the free frees node - const next_it = node.next; - if (next_it == null) - break node; - const alloc_buf = @as([*]u8, @ptrCast(node))[0..node.data]; - self.child_allocator.rawFree(alloc_buf, align_bits, @returnAddress()); - it = next_it; - } else null; - std.debug.assert(maybe_first_node == null or maybe_first_node.?.next == null); - // reset the state before we try resizing the buffers, so we definitely have reset the arena to 0. - self.state.end_index = 0; - if (maybe_first_node) |first_node| { - self.state.buffer_list.first = first_node; - // perfect, no need to invoke the child_allocator - if (first_node.data == total_size) - return true; - const first_alloc_buf = @as([*]u8, @ptrCast(first_node))[0..first_node.data]; - if (self.child_allocator.rawResize(first_alloc_buf, align_bits, total_size, @returnAddress())) { - // successful resize - first_node.data = total_size; - } else { - // manual realloc - const new_ptr = self.child_allocator.rawAlloc(total_size, align_bits, @returnAddress()) orelse { - // we failed to preheat the arena properly, signal this to the user. - return false; - }; - self.child_allocator.rawFree(first_alloc_buf, align_bits, @returnAddress()); - const node: *BufNode = @ptrCast(@alignCast(new_ptr)); - node.* = .{ .data = total_size }; - self.state.buffer_list.first = node; - } - } - return true; - } - - fn createNode(self: *ArenaAllocator, prev_len: usize, minimum_size: usize) ?*BufNode { - const actual_min_size = minimum_size + (@sizeOf(BufNode) + 16); - const big_enough_len = prev_len + actual_min_size; - const len = big_enough_len + big_enough_len / 2; - const log2_align = comptime std.math.log2_int(usize, @alignOf(BufNode)); - const ptr = self.child_allocator.rawAlloc(len, log2_align, @returnAddress()) orelse - return null; - const buf_node: *BufNode = @ptrCast(@alignCast(ptr)); - buf_node.* = .{ .data = len }; - self.state.buffer_list.prepend(buf_node); - self.state.end_index = 0; - return buf_node; - } - - fn alloc(ctx: *anyopaque, n: usize, log2_ptr_align: u8, ra: usize) ?[*]u8 { - const self: *ArenaAllocator = @ptrCast(@alignCast(ctx)); - _ = ra; - - const ptr_align = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_ptr_align)); - var cur_node = if (self.state.buffer_list.first) |first_node| - first_node - else - (self.createNode(0, n + ptr_align) orelse return null); - while (true) { - const cur_alloc_buf = @as([*]u8, @ptrCast(cur_node))[0..cur_node.data]; - const cur_buf = cur_alloc_buf[@sizeOf(BufNode)..]; - const addr = @intFromPtr(cur_buf.ptr) + self.state.end_index; - const adjusted_addr = mem.alignForward(usize, addr, ptr_align); - const adjusted_index = self.state.end_index + (adjusted_addr - addr); - const new_end_index = adjusted_index + n; - - if (new_end_index <= cur_buf.len) { - const result = cur_buf[adjusted_index..new_end_index]; - self.state.end_index = new_end_index; - return result.ptr; - } - - const bigger_buf_size = @sizeOf(BufNode) + new_end_index; - const log2_align = comptime std.math.log2_int(usize, @alignOf(BufNode)); - if (self.child_allocator.rawResize(cur_alloc_buf, log2_align, bigger_buf_size, @returnAddress())) { - cur_node.data = bigger_buf_size; - } else { - // Allocate a new node if that's not possible - cur_node = self.createNode(cur_buf.len, n + ptr_align) orelse return null; - } - } - } - - fn resize(ctx: *anyopaque, buf: []u8, log2_buf_align: u8, new_len: usize, ret_addr: usize) bool { - const self: *ArenaAllocator = @ptrCast(@alignCast(ctx)); - _ = log2_buf_align; - _ = ret_addr; - - const cur_node = self.state.buffer_list.first orelse return false; - const cur_buf = @as([*]u8, @ptrCast(cur_node))[@sizeOf(BufNode)..cur_node.data]; - if (@intFromPtr(cur_buf.ptr) + self.state.end_index != @intFromPtr(buf.ptr) + buf.len) { - // It's not the most recent allocation, so it cannot be expanded, - // but it's fine if they want to make it smaller. - return new_len <= buf.len; - } - - if (buf.len >= new_len) { - self.state.end_index -= buf.len - new_len; - return true; - } else if (cur_buf.len - self.state.end_index >= new_len - buf.len) { - self.state.end_index += new_len - buf.len; - return true; - } else { - return false; - } - } - - fn free(ctx: *anyopaque, buf: []u8, log2_buf_align: u8, ret_addr: usize) void { - _ = log2_buf_align; - _ = ret_addr; - - const self: *ArenaAllocator = @ptrCast(@alignCast(ctx)); - - const cur_node = self.state.buffer_list.first orelse return; - const cur_buf = @as([*]u8, @ptrCast(cur_node))[@sizeOf(BufNode)..cur_node.data]; - - if (@intFromPtr(cur_buf.ptr) + self.state.end_index == @intFromPtr(buf.ptr) + buf.len) { - self.state.end_index -= buf.len; - } - } -}; - -test "reset with preheating" { - var arena_allocator = ArenaAllocator.init(std.testing.allocator); - defer arena_allocator.deinit(); - // provides some variance in the allocated data - var rng_src = std.Random.DefaultPrng.init(std.testing.random_seed); - const random = rng_src.random(); - var rounds: usize = 25; - while (rounds > 0) { - rounds -= 1; - _ = arena_allocator.reset(.retain_capacity); - var alloced_bytes: usize = 0; - const total_size: usize = random.intRangeAtMost(usize, 256, 16384); - while (alloced_bytes < total_size) { - const size = random.intRangeAtMost(usize, 16, 256); - const alignment = 32; - const slice = try arena_allocator.allocator().alignedAlloc(u8, alignment, size); - try std.testing.expect(std.mem.isAligned(@intFromPtr(slice.ptr), alignment)); - try std.testing.expectEqual(size, slice.len); - alloced_bytes += slice.len; - } - } -} - -test "reset while retaining a buffer" { - var arena_allocator = ArenaAllocator.init(std.testing.allocator); - defer arena_allocator.deinit(); - const a = arena_allocator.allocator(); - - // Create two internal buffers - _ = try a.alloc(u8, 1); - _ = try a.alloc(u8, 1000); - - // Check that we have at least two buffers - try std.testing.expect(arena_allocator.state.buffer_list.first.?.next != null); - - // This retains the first allocated buffer - try std.testing.expect(arena_allocator.reset(.{ .retain_with_limit = 1 })); -} diff --git a/lib/std/std.zig b/lib/std/std.zig index cc61111746aa..5ba3f38caddb 100644 --- a/lib/std/std.zig +++ b/lib/std/std.zig @@ -45,6 +45,7 @@ pub const Treap = @import("treap.zig").Treap; pub const Tz = tz.Tz; pub const Uri = @import("Uri.zig"); +pub const alloc = @import("alloc.zig"); pub const array_hash_map = @import("array_hash_map.zig"); pub const atomic = @import("atomic.zig"); pub const base64 = @import("base64.zig");