From 90d65cdcd1db1e2d044c0bf2bc70b3c71552932a Mon Sep 17 00:00:00 2001 From: playX18 Date: Fri, 14 Feb 2025 12:56:12 +0700 Subject: [PATCH 1/3] feat: add tracking functionality for memory management in various allocators and resources --- Cargo.toml | 5 + src/plan/global.rs | 4 + src/policy/largeobjectspace.rs | 2 + src/policy/marksweepspace/native_ms/block.rs | 19 ++- src/policy/space.rs | 2 +- src/util/alloc/bumpallocator.rs | 7 +- src/util/alloc/free_list_allocator.rs | 4 + src/util/alloc/large_object_allocator.rs | 6 +- src/util/heap/blockpageresource.rs | 15 ++ src/util/heap/externalpageresource.rs | 4 + src/util/heap/freelistpageresource.rs | 15 ++ src/util/heap/monotonepageresource.rs | 9 +- src/util/heap/pageresource.rs | 4 + src/util/memory.rs | 16 ++ src/util/metadata/vo_bit/helper.rs | 13 ++ src/util/mod.rs | 1 + src/util/sanity/mod.rs | 1 + src/util/track.rs | 151 +++++++++++++++++++ 18 files changed, 272 insertions(+), 6 deletions(-) create mode 100644 src/util/track.rs diff --git a/Cargo.toml b/Cargo.toml index 9fa74fa2f7..73ba0c7cd6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -51,6 +51,8 @@ static_assertions = "1.1.0" strum = "0.26.2" strum_macros = "0.26.2" sysinfo = "0.30.9" +crabgrind = { version = "0.1.12", optional = true } + [dev-dependencies] paste = "1.0.8" @@ -218,4 +220,7 @@ eager_sweeping = [] # normal heap range, we will have to use chunk-based SFT table. Turning on this feature will use a different SFT map implementation on 64bits, # and will affect all the plans in the build. Please be aware of the consequence, and this is only meant to be experimental use. malloc_mark_sweep = [] +# Enable Valgrind support in MMTk. This will invoke Valgrind interfaces on allocation and deallocation. At the moment only MarkSweep +# space is supported +crabgrind = ["dep:crabgrind"] # Group:end diff --git a/src/plan/global.rs b/src/plan/global.rs index a744c5a9f2..d0190c6f20 100644 --- a/src/plan/global.rs +++ b/src/plan/global.rs @@ -98,10 +98,14 @@ pub fn create_plan( let sft_map: &mut dyn crate::policy::sft_map::SFTMap = unsafe { crate::mmtk::SFT_MAP.get_mut() }.as_mut(); plan.for_each_space(&mut |s| { + sft_map.notify_space_creation(s.as_sft()); s.initialize_sft(sft_map); + // after SFT is initialized, we can also initialize mempool tracking + s.get_page_resource().track(); }); + plan } diff --git a/src/policy/largeobjectspace.rs b/src/policy/largeobjectspace.rs index cdeb87a9c0..7d2e09f11c 100644 --- a/src/policy/largeobjectspace.rs +++ b/src/policy/largeobjectspace.rs @@ -10,6 +10,7 @@ use crate::util::heap::{FreeListPageResource, PageResource}; use crate::util::metadata; use crate::util::object_enum::ObjectEnumerator; use crate::util::opaque_pointer::*; +use crate::util::track::track_free; use crate::util::treadmill::TreadMill; use crate::util::{Address, ObjectReference}; use crate::vm::ObjectModel; @@ -288,6 +289,7 @@ impl LargeObjectSpace { let sweep = |object: ObjectReference| { #[cfg(feature = "vo_bit")] crate::util::metadata::vo_bit::unset_vo_bit(object); + track_free(object.to_object_start::(), 0 /* TODO: Size */); self.pr .release_pages(get_super_page(object.to_object_start::())); }; diff --git a/src/policy/marksweepspace/native_ms/block.rs b/src/policy/marksweepspace/native_ms/block.rs index 5bef9a3e52..d81e40a6ae 100644 --- a/src/policy/marksweepspace/native_ms/block.rs +++ b/src/policy/marksweepspace/native_ms/block.rs @@ -297,6 +297,13 @@ impl Block { if !VM::VMObjectModel::LOCAL_MARK_BIT_SPEC .is_marked::(potential_object, Ordering::SeqCst) { + #[cfg(feature="crabgrind")] + { + let vo_bit = crate::util::metadata::vo_bit::is_vo_bit_set(potential_object); + if vo_bit { + crabgrind::memcheck::alloc::free(cell.to_mut_ptr(), 0); + } + } // clear VO bit if it is ever set. It is possible that the VO bit is never set for this cell (i.e. there was no object in this cell before this GC), // we unset the bit anyway. #[cfg(feature = "vo_bit")] @@ -304,6 +311,7 @@ impl Block { unsafe { cell.store::
(last); } + last = cell; } cell += cell_size; @@ -365,7 +373,14 @@ impl Block { "{:?} Free cell: {}, last cell in freelist is {}", self, cell, last ); - + #[cfg(feature="crabgrind")] + { + let vo_bit = crate::util::metadata::vo_bit::is_vo_bit_set(potential_object_ref); + if vo_bit { + println!("free"); + crabgrind::memcheck::alloc::free(cell.to_mut_ptr(), 0); + } + } // Clear VO bit: we don't know where the object reference actually is, so we bulk zero the cell. #[cfg(feature = "vo_bit")] crate::util::metadata::vo_bit::bzero_vo_bit(cell, cell_size); @@ -376,8 +391,10 @@ impl Block { cell.store::
(last); } last = cell; + cell += cell_size; debug_assert_eq!(cursor, cell); + } } } diff --git a/src/policy/space.rs b/src/policy/space.rs index 3ce1ee3fe1..6c1494b6c3 100644 --- a/src/policy/space.rs +++ b/src/policy/space.rs @@ -6,6 +6,7 @@ use crate::util::metadata::side_metadata::{ SideMetadataContext, SideMetadataSanity, SideMetadataSpec, }; use crate::util::object_enum::ObjectEnumerator; +use crate::util::track::track_mempool_alloc; use crate::util::Address; use crate::util::ObjectReference; @@ -214,7 +215,6 @@ pub trait Space: 'static + SFT + Sync + Downcast { self.common().descriptor ); } - debug!("Space.acquire(), returned = {}", res.start); res.start } diff --git a/src/util/alloc/bumpallocator.rs b/src/util/alloc/bumpallocator.rs index 76cc628c89..f124bf16bb 100644 --- a/src/util/alloc/bumpallocator.rs +++ b/src/util/alloc/bumpallocator.rs @@ -1,5 +1,6 @@ use std::sync::Arc; +use crate::util::track::track_malloc; use crate::util::Address; use crate::util::alloc::Allocator; @@ -116,13 +117,17 @@ impl Allocator for BumpAllocator { self.bump_pointer.cursor, self.bump_pointer.limit ); + track_malloc(result, size, false); result + } } fn alloc_slow_once(&mut self, size: usize, align: usize, offset: usize) -> Address { trace!("alloc_slow"); - self.acquire_block(size, align, offset, false) + let block = self.acquire_block(size, align, offset, false); + track_malloc(block, size, false); + block } /// Slow path for allocation if precise stress testing has been enabled. diff --git a/src/util/alloc/free_list_allocator.rs b/src/util/alloc/free_list_allocator.rs index 28958a1759..58ab529b80 100644 --- a/src/util/alloc/free_list_allocator.rs +++ b/src/util/alloc/free_list_allocator.rs @@ -6,6 +6,7 @@ use crate::policy::marksweepspace::native_ms::*; use crate::util::alloc::allocator; use crate::util::alloc::Allocator; use crate::util::linear_scan::Region; +use crate::util::track::track_malloc; use crate::util::Address; use crate::util::VMThread; use crate::vm::VMBinding; @@ -76,6 +77,7 @@ impl Allocator for FreeListAllocator { size, align, offset, cell, cell_size, res + size, cell + cell_size ); } + return res; } } @@ -179,6 +181,8 @@ impl FreeListAllocator { } } + track_malloc(cell, cell_size, true); + cell } diff --git a/src/util/alloc/large_object_allocator.rs b/src/util/alloc/large_object_allocator.rs index baf0738274..c736175c34 100644 --- a/src/util/alloc/large_object_allocator.rs +++ b/src/util/alloc/large_object_allocator.rs @@ -4,6 +4,7 @@ use crate::policy::largeobjectspace::LargeObjectSpace; use crate::policy::space::Space; use crate::util::alloc::{allocator, Allocator}; use crate::util::opaque_pointer::*; +use crate::util::track::track_malloc; use crate::util::Address; use crate::vm::VMBinding; @@ -42,7 +43,10 @@ impl Allocator for LargeObjectAllocator { let cell: Address = self.alloc_slow(size, align, offset); // We may get a null ptr from alloc due to the VM being OOM if !cell.is_zero() { - allocator::align_allocation::(cell, align, offset) + let result = allocator::align_allocation::(cell, align, offset); + + track_malloc(result, size, true); + result } else { cell } diff --git a/src/util/heap/blockpageresource.rs b/src/util/heap/blockpageresource.rs index e1e8e23501..02b3888f33 100644 --- a/src/util/heap/blockpageresource.rs +++ b/src/util/heap/blockpageresource.rs @@ -9,6 +9,7 @@ use crate::util::heap::space_descriptor::SpaceDescriptor; use crate::util::linear_scan::Region; use crate::util::opaque_pointer::*; use crate::util::rust_util::zeroed_alloc::new_zeroed_vec; +use crate::util::track::{track_mempool, track_mempool_alloc, track_mempool_free, untrack_mempool}; use crate::vm::*; use atomic::Ordering; use spin::RwLock; @@ -30,6 +31,10 @@ pub struct BlockPageResource { } impl PageResource for BlockPageResource { + fn track(&self) { + track_mempool(self, 0, false); + } + fn common(&self) -> &CommonPageResource { self.flpr.common() } @@ -58,6 +63,12 @@ impl PageResource for BlockPageResource { } } +impl Drop for BlockPageResource { + fn drop(&mut self) { + untrack_mempool(self); + } +} + impl BlockPageResource { /// Block granularity in pages const LOG_PAGES: usize = B::LOG_BYTES - LOG_BYTES_IN_PAGE as usize; @@ -136,6 +147,7 @@ impl BlockPageResource { self.block_queue.add_global_array(array); // Finish slow-allocation self.commit_pages(reserved_pages, required_pages, tls); + track_mempool_alloc(self, first_block, required_pages * BYTES_IN_PAGE); Result::Ok(PRAllocResult { start: first_block, pages: required_pages, @@ -156,6 +168,7 @@ impl BlockPageResource { // Fast allocate from the blocks list if let Some(block) = self.block_queue.pop() { self.commit_pages(reserved_pages, required_pages, tls); + track_mempool_alloc(self, block.start(), required_pages * BYTES_IN_PAGE); return Result::Ok(PRAllocResult { start: block.start(), pages: required_pages, @@ -170,6 +183,7 @@ impl BlockPageResource { let pages = 1 << Self::LOG_PAGES; debug_assert!(pages as usize <= self.common().accounting.get_committed_pages()); self.common().accounting.release(pages as _); + track_mempool_free(self, block.start()); self.block_queue.push(block) } @@ -413,3 +427,4 @@ impl BlockPool { } } } + diff --git a/src/util/heap/externalpageresource.rs b/src/util/heap/externalpageresource.rs index 613042b8ba..5be303d4c7 100644 --- a/src/util/heap/externalpageresource.rs +++ b/src/util/heap/externalpageresource.rs @@ -28,6 +28,10 @@ pub struct ExternalPages { } impl PageResource for ExternalPageResource { + fn track(&self) { + /* cannot track external pages reliably? */ + } + fn common(&self) -> &CommonPageResource { &self.common } diff --git a/src/util/heap/freelistpageresource.rs b/src/util/heap/freelistpageresource.rs index aa66fe232d..7e5a4fe420 100644 --- a/src/util/heap/freelistpageresource.rs +++ b/src/util/heap/freelistpageresource.rs @@ -17,6 +17,7 @@ use crate::util::heap::space_descriptor::SpaceDescriptor; use crate::util::memory; use crate::util::opaque_pointer::*; use crate::util::raw_memory_freelist::RawMemoryFreeList; +use crate::util::track::{track_mempool, track_mempool_alloc, track_mempool_free, untrack_mempool}; use crate::vm::*; use std::marker::PhantomData; @@ -41,6 +42,10 @@ struct FreeListPageResourceSync { } impl PageResource for FreeListPageResource { + fn track(&self) { + track_mempool(self, 0, false); + } + fn common(&self) -> &CommonPageResource { &self.common } @@ -134,6 +139,9 @@ impl PageResource for FreeListPageResource { } } }; + + track_mempool_alloc(self, rtn, conversions::pages_to_bytes(required_pages)); + Result::Ok(PRAllocResult { start: rtn, pages: required_pages, @@ -346,6 +354,7 @@ impl FreeListPageResource { } self.common.accounting.release(pages as _); + track_mempool_free(self, first); let freed = sync.free_list.free(page_offset as _, true); sync.pages_currently_on_freelist += pages as usize; if !self.common.contiguous { @@ -391,3 +400,9 @@ impl FreeListPageResource { } } } + +impl Drop for FreeListPageResource { + fn drop(&mut self) { + untrack_mempool(self); + } +} \ No newline at end of file diff --git a/src/util/heap/monotonepageresource.rs b/src/util/heap/monotonepageresource.rs index 39bb076b24..5dee3d9377 100644 --- a/src/util/heap/monotonepageresource.rs +++ b/src/util/heap/monotonepageresource.rs @@ -2,7 +2,8 @@ use super::layout::vm_layout::{BYTES_IN_CHUNK, PAGES_IN_CHUNK}; use crate::policy::space::required_chunks; use crate::util::address::Address; use crate::util::constants::BYTES_IN_PAGE; -use crate::util::conversions::*; +use crate::util::conversions::{self, *}; +use crate::util::track::{track_mempool, track_mempool_alloc}; use std::ops::Range; use std::sync::{Mutex, MutexGuard}; @@ -45,6 +46,10 @@ pub enum MonotonePageResourceConditional { Discontiguous, } impl PageResource for MonotonePageResource { + fn track(&self) { + track_mempool(self,0, true); + } + fn common(&self) -> &CommonPageResource { &self.common } @@ -149,7 +154,7 @@ impl PageResource for MonotonePageResource { sync.current_chunk = chunk_align_down(sync.cursor); } self.commit_pages(reserved_pages, required_pages, tls); - + track_mempool_alloc(self, rtn, conversions::pages_to_bytes(required_pages)); Result::Ok(PRAllocResult { start: rtn, pages: required_pages, diff --git a/src/util/heap/pageresource.rs b/src/util/heap/pageresource.rs index 6f0e6a2453..756c2da132 100644 --- a/src/util/heap/pageresource.rs +++ b/src/util/heap/pageresource.rs @@ -11,6 +11,10 @@ use crate::util::heap::PageAccounting; use crate::vm::VMBinding; pub trait PageResource: 'static { + + /// Track this page resource for memory tools like Valgrind. + fn track(&self); + /// Allocate pages from this resource. /// Simply bump the cursor, and fail if we hit the sentinel. /// Return The start of the first page if successful, zero on failure. diff --git a/src/util/memory.rs b/src/util/memory.rs index d79905b759..475811e12c 100644 --- a/src/util/memory.rs +++ b/src/util/memory.rs @@ -353,6 +353,22 @@ pub fn handle_mmap_error( /// This function is currently left empty for non-linux, and should be implemented in the future. /// As the function is only used for assertions, MMTk will still run even if we never panic. pub(crate) fn panic_if_unmapped(_start: Address, _size: usize, _anno: &MmapAnnotation) { + #[cfg(feature = "crabgrind")] + { + + use crabgrind::memcheck::Error; + let result = crabgrind::memcheck::is_defined(_start.to_mut_ptr(), _size); + match result { + Ok(_) => panic!("{} of size {} is not mapped", _start, _size), + Err(err) => match err { + Error::NotAddressable(addr) => { + panic!("Address {addr:x} is not addressable, start={_start}"); + } + + _ => () + }, + } + } #[cfg(target_os = "linux")] { let flags = MMAP_FLAGS; diff --git a/src/util/metadata/vo_bit/helper.rs b/src/util/metadata/vo_bit/helper.rs index 4dd06459de..622bd1945f 100644 --- a/src/util/metadata/vo_bit/helper.rs +++ b/src/util/metadata/vo_bit/helper.rs @@ -29,6 +29,7 @@ use crate::{ util::{ linear_scan::Region, metadata::{vo_bit, MetadataSpec}, + track::{track_free, tracking_enabled}, ObjectReference, }, vm::{ObjectModel, VMBinding}, @@ -184,6 +185,18 @@ pub(crate) fn on_object_forwarded(new_object: ObjectReference) { } pub(crate) fn on_region_swept(region: &R, is_occupied: bool) { + if tracking_enabled() { + let mut cursor = region.start(); + while cursor < region.end() { + if let Some(object) = vo_bit::is_vo_bit_set_for_addr(cursor) { + if object.is_live() { + track_free(object.to_object_start::(), 0); + } + } + cursor += VM::MIN_ALIGNMENT; + } + } + match strategy::() { VOBitUpdateStrategy::ClearAndReconstruct => { // Do nothing. The VO bit metadata is already reconstructed. diff --git a/src/util/mod.rs b/src/util/mod.rs index d22c29a2e3..f6481794ca 100644 --- a/src/util/mod.rs +++ b/src/util/mod.rs @@ -70,6 +70,7 @@ pub(crate) mod slot_logger; pub(crate) mod statistics; /// A treadmill implementation. pub(crate) mod treadmill; +pub(crate) mod track; // These modules are private. They are only used by other util modules. diff --git a/src/util/sanity/mod.rs b/src/util/sanity/mod.rs index d45a1a9fa1..33b97b09bd 100644 --- a/src/util/sanity/mod.rs +++ b/src/util/sanity/mod.rs @@ -1 +1,2 @@ pub mod sanity_checker; +pub mod track; \ No newline at end of file diff --git a/src/util/track.rs b/src/util/track.rs new file mode 100644 index 0000000000..0c748d3c17 --- /dev/null +++ b/src/util/track.rs @@ -0,0 +1,151 @@ +//! Track memory ranges for tools like Valgrind address sanitizer, or other memory checkers. + +use crate::util::Address; + +pub fn tracking_enabled() -> bool { + #[cfg(feature = "crabgrind")] + { + crabgrind::run_mode() != crabgrind::RunMode::Native + } + + #[cfg(not(feature = "crabgrind"))] + { + false + } +} + +pub fn track_malloc(p: Address, size: usize, zero: bool) { + #[cfg(feature = "crabgrind")] + { + crabgrind::memcheck::alloc::malloc(p.to_mut_ptr(), size, 0, zero) + } + + #[cfg(not(feature = "crabgrind"))] + { + let _ = p; + let _ = size; + let _ = zero; + } +} + +pub fn track_free(p: Address, size: usize) { + let _ = size; + #[cfg(feature = "crabgrind")] + { + crabgrind::memcheck::alloc::free(p.to_mut_ptr(), 0); + } + #[cfg(not(feature = "crabgrind"))] + { + let _ = p; + } +} + +#[derive(Copy, Clone, PartialEq, Eq, Debug)] +pub enum MemState { + NoAccess, + Undefined, + Defined, + DefinedIfAddressable, +} + +pub fn track_mem(p: Address, size: usize, state: MemState) { + #[cfg(feature = "crabgrind")] + { + let state = match state { + MemState::Defined => crabgrind::memcheck::MemState::Defined, + MemState::DefinedIfAddressable => crabgrind::memcheck::MemState::DefinedIfAddressable, + MemState::NoAccess => crabgrind::memcheck::MemState::NoAccess, + MemState::Undefined => crabgrind::memcheck::MemState::Undefined, + }; + + crabgrind::memcheck::mark_mem(p.to_mut_ptr(), size, state); + } + + #[cfg(not(feature = "crabgrind"))] + { + let _ = p; + let _ = size; + let _ = state; + } +} + +/// Track a memory pool. Read [Memory Pools](https://valgrind.org/docs/manual/mc-manual.html#mc-manual.mempools) +/// of valgrind for more information. +/// +/// # Parameters +/// +/// - `pool`: The memory pool to track. +/// - `redzone`: Redzone in between chunks. +/// - `is_zeroed`: Whether the memory pool is zeroed. +pub fn track_mempool(pool: &T, redzone: usize, is_zeroed: bool) { + #[cfg(feature = "crabgrind")] + { + crabgrind::memcheck::mempool::create( + Address::from_ref(pool).to_mut_ptr(), + redzone, + is_zeroed, + Some(crabgrind::memcheck::mempool::AUTO_FREE | crabgrind::memcheck::mempool::METAPOOL), + ); + } + + #[cfg(not(feature = "crabgrind"))] + { + let _ = pool; + let _ = redzone; + let _ = is_zeroed; + } +} + +/// Untrack a memory pool. This destroys the memory pool in the memory checker. +pub fn untrack_mempool(pool: &T) { + #[cfg(feature = "crabgrind")] + { + crabgrind::memcheck::mempool::destroy(Address::from_ref(pool).to_mut_ptr()); + } + #[cfg(not(feature = "crabgrind"))] + { + let _ = pool; + } +} + +/// Associate a piece of memory with a memory pool. +/// +/// # Parameters +/// - `pool`: The memory pool to associate with. +/// - `addr`: The address of the memory to associate. +/// - `size`: The size of the memory to associate. +pub fn track_mempool_alloc(pool: &T, addr: Address, size: usize) { + + #[cfg(feature = "crabgrind")] + { + crabgrind::memcheck::mempool::alloc( + Address::from_ptr(pool as *const T as *const u8).to_mut_ptr(), + addr.to_mut_ptr(), + size, + ); + } + + #[cfg(not(feature = "crabgrind"))] + { + let _ = pool; + let _ = addr; + let _ = size; + } +} + +/// Disassociate a piece of memory with a memory pool. +/// +/// # Parameters +/// - `pool`: The memory pool to disassociate with. +/// - `addr`: The address of the memory to disassociate. +pub fn track_mempool_free(pool: &T, addr: Address) { + #[cfg(feature = "crabgrind")] + { + crabgrind::memcheck::mempool::free(Address::from_ref(pool).to_mut_ptr(), addr.to_mut_ptr()); + } + #[cfg(not(feature = "crabgrind"))] + { + let _ = pool; + let _ = addr; + } +} From 2b84075738f268e63e4551232c4d2945afb0b269 Mon Sep 17 00:00:00 2001 From: playX18 Date: Fri, 14 Feb 2025 12:56:43 +0700 Subject: [PATCH 2/3] fmt --- src/plan/global.rs | 2 -- src/policy/marksweepspace/native_ms/block.rs | 10 +++++----- src/util/alloc/bumpallocator.rs | 3 +-- src/util/alloc/large_object_allocator.rs | 4 ++-- src/util/heap/blockpageresource.rs | 3 +-- src/util/heap/freelistpageresource.rs | 2 +- src/util/heap/monotonepageresource.rs | 2 +- src/util/heap/pageresource.rs | 1 - src/util/memory.rs | 3 +-- src/util/mod.rs | 2 +- src/util/sanity/mod.rs | 1 - src/util/track.rs | 1 - 12 files changed, 13 insertions(+), 21 deletions(-) diff --git a/src/plan/global.rs b/src/plan/global.rs index d0190c6f20..80e52bab99 100644 --- a/src/plan/global.rs +++ b/src/plan/global.rs @@ -98,14 +98,12 @@ pub fn create_plan( let sft_map: &mut dyn crate::policy::sft_map::SFTMap = unsafe { crate::mmtk::SFT_MAP.get_mut() }.as_mut(); plan.for_each_space(&mut |s| { - sft_map.notify_space_creation(s.as_sft()); s.initialize_sft(sft_map); // after SFT is initialized, we can also initialize mempool tracking s.get_page_resource().track(); }); - plan } diff --git a/src/policy/marksweepspace/native_ms/block.rs b/src/policy/marksweepspace/native_ms/block.rs index d81e40a6ae..6277fdd957 100644 --- a/src/policy/marksweepspace/native_ms/block.rs +++ b/src/policy/marksweepspace/native_ms/block.rs @@ -297,7 +297,7 @@ impl Block { if !VM::VMObjectModel::LOCAL_MARK_BIT_SPEC .is_marked::(potential_object, Ordering::SeqCst) { - #[cfg(feature="crabgrind")] + #[cfg(feature = "crabgrind")] { let vo_bit = crate::util::metadata::vo_bit::is_vo_bit_set(potential_object); if vo_bit { @@ -373,9 +373,10 @@ impl Block { "{:?} Free cell: {}, last cell in freelist is {}", self, cell, last ); - #[cfg(feature="crabgrind")] + #[cfg(feature = "crabgrind")] { - let vo_bit = crate::util::metadata::vo_bit::is_vo_bit_set(potential_object_ref); + let vo_bit = + crate::util::metadata::vo_bit::is_vo_bit_set(potential_object_ref); if vo_bit { println!("free"); crabgrind::memcheck::alloc::free(cell.to_mut_ptr(), 0); @@ -391,10 +392,9 @@ impl Block { cell.store::
(last); } last = cell; - + cell += cell_size; debug_assert_eq!(cursor, cell); - } } } diff --git a/src/util/alloc/bumpallocator.rs b/src/util/alloc/bumpallocator.rs index f124bf16bb..06be7fd43b 100644 --- a/src/util/alloc/bumpallocator.rs +++ b/src/util/alloc/bumpallocator.rs @@ -119,7 +119,6 @@ impl Allocator for BumpAllocator { ); track_malloc(result, size, false); result - } } @@ -127,7 +126,7 @@ impl Allocator for BumpAllocator { trace!("alloc_slow"); let block = self.acquire_block(size, align, offset, false); track_malloc(block, size, false); - block + block } /// Slow path for allocation if precise stress testing has been enabled. diff --git a/src/util/alloc/large_object_allocator.rs b/src/util/alloc/large_object_allocator.rs index c736175c34..9fffba90fe 100644 --- a/src/util/alloc/large_object_allocator.rs +++ b/src/util/alloc/large_object_allocator.rs @@ -44,9 +44,9 @@ impl Allocator for LargeObjectAllocator { // We may get a null ptr from alloc due to the VM being OOM if !cell.is_zero() { let result = allocator::align_allocation::(cell, align, offset); - + track_malloc(result, size, true); - result + result } else { cell } diff --git a/src/util/heap/blockpageresource.rs b/src/util/heap/blockpageresource.rs index 02b3888f33..475e579a8b 100644 --- a/src/util/heap/blockpageresource.rs +++ b/src/util/heap/blockpageresource.rs @@ -34,7 +34,7 @@ impl PageResource for BlockPageResource { fn track(&self) { track_mempool(self, 0, false); } - + fn common(&self) -> &CommonPageResource { self.flpr.common() } @@ -427,4 +427,3 @@ impl BlockPool { } } } - diff --git a/src/util/heap/freelistpageresource.rs b/src/util/heap/freelistpageresource.rs index 7e5a4fe420..0c6dd9fcdd 100644 --- a/src/util/heap/freelistpageresource.rs +++ b/src/util/heap/freelistpageresource.rs @@ -405,4 +405,4 @@ impl Drop for FreeListPageResource { fn drop(&mut self) { untrack_mempool(self); } -} \ No newline at end of file +} diff --git a/src/util/heap/monotonepageresource.rs b/src/util/heap/monotonepageresource.rs index 5dee3d9377..58195440a5 100644 --- a/src/util/heap/monotonepageresource.rs +++ b/src/util/heap/monotonepageresource.rs @@ -47,7 +47,7 @@ pub enum MonotonePageResourceConditional { } impl PageResource for MonotonePageResource { fn track(&self) { - track_mempool(self,0, true); + track_mempool(self, 0, true); } fn common(&self) -> &CommonPageResource { diff --git a/src/util/heap/pageresource.rs b/src/util/heap/pageresource.rs index 756c2da132..75e5bc7119 100644 --- a/src/util/heap/pageresource.rs +++ b/src/util/heap/pageresource.rs @@ -11,7 +11,6 @@ use crate::util::heap::PageAccounting; use crate::vm::VMBinding; pub trait PageResource: 'static { - /// Track this page resource for memory tools like Valgrind. fn track(&self); diff --git a/src/util/memory.rs b/src/util/memory.rs index 475811e12c..6cf5e68bf4 100644 --- a/src/util/memory.rs +++ b/src/util/memory.rs @@ -355,7 +355,6 @@ pub fn handle_mmap_error( pub(crate) fn panic_if_unmapped(_start: Address, _size: usize, _anno: &MmapAnnotation) { #[cfg(feature = "crabgrind")] { - use crabgrind::memcheck::Error; let result = crabgrind::memcheck::is_defined(_start.to_mut_ptr(), _size); match result { @@ -365,7 +364,7 @@ pub(crate) fn panic_if_unmapped(_start: Address, _size: usize, _anno: &MmapAnnot panic!("Address {addr:x} is not addressable, start={_start}"); } - _ => () + _ => (), }, } } diff --git a/src/util/mod.rs b/src/util/mod.rs index f6481794ca..1dce4dc1df 100644 --- a/src/util/mod.rs +++ b/src/util/mod.rs @@ -68,9 +68,9 @@ pub(crate) mod sanity; pub(crate) mod slot_logger; /// Utils for collecting statistics. pub(crate) mod statistics; +pub(crate) mod track; /// A treadmill implementation. pub(crate) mod treadmill; -pub(crate) mod track; // These modules are private. They are only used by other util modules. diff --git a/src/util/sanity/mod.rs b/src/util/sanity/mod.rs index 33b97b09bd..d45a1a9fa1 100644 --- a/src/util/sanity/mod.rs +++ b/src/util/sanity/mod.rs @@ -1,2 +1 @@ pub mod sanity_checker; -pub mod track; \ No newline at end of file diff --git a/src/util/track.rs b/src/util/track.rs index 0c748d3c17..8a204b8510 100644 --- a/src/util/track.rs +++ b/src/util/track.rs @@ -115,7 +115,6 @@ pub fn untrack_mempool(pool: &T) { /// - `addr`: The address of the memory to associate. /// - `size`: The size of the memory to associate. pub fn track_mempool_alloc(pool: &T, addr: Address, size: usize) { - #[cfg(feature = "crabgrind")] { crabgrind::memcheck::mempool::alloc( From baf42c4b4d9412e908dec40428a9d6ab9b97d906 Mon Sep 17 00:00:00 2001 From: playX18 Date: Fri, 14 Feb 2025 12:59:39 +0700 Subject: [PATCH 3/3] require vo_bit feature when crabgrind is enabled --- Cargo.toml | 2 +- src/policy/marksweepspace/native_ms/block.rs | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 73ba0c7cd6..691f6eab13 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -222,5 +222,5 @@ eager_sweeping = [] malloc_mark_sweep = [] # Enable Valgrind support in MMTk. This will invoke Valgrind interfaces on allocation and deallocation. At the moment only MarkSweep # space is supported -crabgrind = ["dep:crabgrind"] +crabgrind = ["dep:crabgrind", "vo_bit"] # Group:end diff --git a/src/policy/marksweepspace/native_ms/block.rs b/src/policy/marksweepspace/native_ms/block.rs index 6277fdd957..707d6944b3 100644 --- a/src/policy/marksweepspace/native_ms/block.rs +++ b/src/policy/marksweepspace/native_ms/block.rs @@ -378,7 +378,6 @@ impl Block { let vo_bit = crate::util::metadata::vo_bit::is_vo_bit_set(potential_object_ref); if vo_bit { - println!("free"); crabgrind::memcheck::alloc::free(cell.to_mut_ptr(), 0); } }