diff --git a/src/addr.rs b/src/addr.rs index 512821a..7fac30f 100644 --- a/src/addr.rs +++ b/src/addr.rs @@ -12,6 +12,19 @@ def_usize_addr! { pub type GuestPhysAddr; } +/// Note: This is just a conversion in number and has no semantic meaning. +/// +/// Why we need this conversion? +/// Because `GenericPTE` provided by `page_table_entry::x86_64` only accepts `PhysAddr` as the physical address type. +/// Introduce `GuestPhysAddr` concept into `GenericPTE` will bring a lot of complexity. +/// +/// I just implement this ugly conversion to make things work. +impl From for GuestPhysAddr { + fn from(addr: PhysAddr) -> Self { + Self::from_usize(addr.into()) + } +} + def_usize_addr_formatter! { GuestVirtAddr = "GVA:{}"; GuestPhysAddr = "GPA:{}"; diff --git a/src/address_space/backend/alloc.rs b/src/address_space/backend/alloc.rs index eb01e11..aa8fa8d 100644 --- a/src/address_space/backend/alloc.rs +++ b/src/address_space/backend/alloc.rs @@ -1,10 +1,14 @@ -use memory_addr::{PageIter4K, PhysAddr}; -use page_table_multiarch::{MappingFlags, PageSize, PagingHandler}; +use memory_addr::{ + MemoryAddr, PAGE_SIZE_1G, PAGE_SIZE_2M, PAGE_SIZE_4K, PageIter1G, PageIter2M, PageIter4K, + PhysAddr, +}; +use page_table_multiarch::{ + GenericPTE, MappingFlags, PageSize, PageTable64, PagingHandler, PagingMetaData, +}; use super::Backend; -use crate::{GuestPhysAddr, npt::NestedPageTable as PageTable}; -impl Backend { +impl Backend { /// Creates a new allocation mapping backend. pub const fn new_alloc(populate: bool) -> Self { Self::Alloc { @@ -15,22 +19,56 @@ impl Backend { pub(crate) fn map_alloc( &self, - start: GuestPhysAddr, + start: M::VirtAddr, size: usize, flags: MappingFlags, - pt: &mut PageTable, + pt: &mut PageTable64, populate: bool, ) -> bool { debug!( "map_alloc: [{:#x}, {:#x}) {:?} (populate={})", start, - start + size, + start.add(size), flags, populate ); if populate { // allocate all possible physical frames for populated mapping. - for addr in PageIter4K::new(start, start + size).unwrap() { + + let mut start_addr = start; + let end_addr = start_addr.add(size); + // First try to allocate 1GB pages if the start address is aligned and + // the size is large enough. + if start_addr.is_aligned(PAGE_SIZE_1G) && size >= PAGE_SIZE_1G { + for addr in PageIter1G::new(start_addr, end_addr.align_down(PAGE_SIZE_1G)).unwrap() + { + if H::alloc_frames(PAGE_SIZE_1G / PAGE_SIZE_4K, PAGE_SIZE_1G) + .and_then(|frame| pt.map(addr, frame, PageSize::Size1G, flags).ok()) + .is_none() + { + return false; + } + } + start_addr = end_addr; + } + + // Then try to allocate 2MB pages if the start address is aligned and + // the size is large enough. + if start_addr.is_aligned(PAGE_SIZE_2M) && size >= PAGE_SIZE_2M { + for addr in PageIter2M::new(start_addr, end_addr.align_down(PAGE_SIZE_2M)).unwrap() + { + if H::alloc_frames(PAGE_SIZE_2M / PAGE_SIZE_4K, PAGE_SIZE_2M) + .and_then(|frame| pt.map(addr, frame, PageSize::Size2M, flags).ok()) + .is_none() + { + return false; + } + } + start_addr = end_addr; + } + + // Then try to allocate 4K pages. + for addr in PageIter4K::new(start_addr, end_addr).unwrap() { if H::alloc_frame() .and_then(|frame| pt.map(addr, frame, PageSize::Size4K, flags).ok()) .is_none() @@ -55,13 +93,45 @@ impl Backend { pub(crate) fn unmap_alloc( &self, - start: GuestPhysAddr, + start: M::VirtAddr, size: usize, - pt: &mut PageTable, + pt: &mut PageTable64, _populate: bool, ) -> bool { - debug!("unmap_alloc: [{:#x}, {:#x})", start, start + size); - for addr in PageIter4K::new(start, start + size).unwrap() { + debug!("unmap_alloc: [{:#x}, {:#x})", start, start.add(size)); + + let mut addr = start; + while addr < start.add(size) { + if let Ok((frame, _flags, page_size)) = pt.query(addr) { + // Deallocate the physical frame if there is a mapping in the + // page table. + match page_size { + PageSize::Size1G => { + if !addr.is_aligned(PAGE_SIZE_1G) { + return false; + } + H::dealloc_frames(frame, PAGE_SIZE_1G / PAGE_SIZE_4K); + } + PageSize::Size2M => { + if !addr.is_aligned(PAGE_SIZE_2M) { + return false; + } + H::dealloc_frames(frame, PAGE_SIZE_2M / PAGE_SIZE_4K); + } + PageSize::Size4K => { + if !addr.is_aligned(PAGE_SIZE_4K) { + return false; + } + H::dealloc_frame(frame); + } + } + addr = addr.add(page_size as usize); + } else { + // It's fine if the page is not mapped. + } + } + + for addr in PageIter4K::new(start, start.add(size)).unwrap() { if let Ok((frame, page_size, _)) = pt.unmap(addr) { // Deallocate the physical frame if there is a mapping in the // page table. @@ -78,9 +148,9 @@ impl Backend { pub(crate) fn handle_page_fault_alloc( &self, - vaddr: GuestPhysAddr, + vaddr: M::VirtAddr, orig_flags: MappingFlags, - pt: &mut PageTable, + pt: &mut PageTable64, populate: bool, ) -> bool { if populate { diff --git a/src/address_space/backend/linear.rs b/src/address_space/backend/linear.rs index 8a3e6af..c2e3099 100644 --- a/src/address_space/backend/linear.rs +++ b/src/address_space/backend/linear.rs @@ -1,38 +1,41 @@ -use memory_addr::PhysAddr; -use page_table_multiarch::{MappingFlags, PagingHandler}; +use memory_addr::{MemoryAddr, PhysAddr}; +use page_table_multiarch::{GenericPTE, MappingFlags, PageTable64, PagingHandler, PagingMetaData}; use super::Backend; -use crate::{GuestPhysAddr, npt::NestedPageTable as PageTable}; -impl Backend { +impl Backend { /// Creates a new linear mapping backend. - pub const fn new_linear(pa_va_offset: usize) -> Self { - Self::Linear { pa_va_offset } + pub const fn new_linear(pa_va_offset: usize, allow_huge: bool) -> Self { + Self::Linear { + pa_va_offset, + allow_huge, + } } pub(crate) fn map_linear( &self, - start: GuestPhysAddr, + start: M::VirtAddr, size: usize, flags: MappingFlags, - pt: &mut PageTable, + pt: &mut PageTable64, + allow_huge: bool, pa_va_offset: usize, ) -> bool { - let pa_start = PhysAddr::from(start.as_usize() - pa_va_offset); + let pa_start = PhysAddr::from(start.into() - pa_va_offset); debug!( "map_linear: [{:#x}, {:#x}) -> [{:#x}, {:#x}) {:?}", start, - start + size, + start.add(size), pa_start, pa_start + size, flags ); pt.map_region( start, - |va| PhysAddr::from(va.as_usize() - pa_va_offset), + |va| PhysAddr::from(va.into() - pa_va_offset), size, flags, - false, + allow_huge, false, ) .is_ok() @@ -40,12 +43,12 @@ impl Backend { pub(crate) fn unmap_linear( &self, - start: GuestPhysAddr, + start: M::VirtAddr, size: usize, - pt: &mut PageTable, + pt: &mut PageTable64, _pa_va_offset: usize, ) -> bool { - debug!("unmap_linear: [{:#x}, {:#x})", start, start + size); + debug!("unmap_linear: [{:#x}, {:#x})", start, start.add(size)); pt.unmap_region(start, size, true).is_ok() } } diff --git a/src/address_space/backend/mod.rs b/src/address_space/backend/mod.rs index 8955d91..af4baf4 100644 --- a/src/address_space/backend/mod.rs +++ b/src/address_space/backend/mod.rs @@ -1,9 +1,8 @@ //! Memory mapping backends. +use memory_addr::MemoryAddr; use memory_set::MappingBackend; -use page_table_multiarch::{MappingFlags, PagingHandler}; - -use crate::{GuestPhysAddr, npt::NestedPageTable as PageTable}; +use page_table_multiarch::{GenericPTE, MappingFlags, PageTable64, PagingHandler, PagingMetaData}; mod alloc; mod linear; @@ -16,7 +15,7 @@ mod linear; /// contiguous and their addresses should be known when creating the mapping. /// - **Allocation**: used in general, or for lazy mappings. The target physical /// frames are obtained from the global allocator. -pub enum Backend { +pub enum Backend { /// Linear mapping backend. /// /// The offset between the virtual address and the physical address is @@ -25,6 +24,7 @@ pub enum Backend { Linear { /// `vaddr - paddr`. pa_va_offset: usize, + allow_huge: bool, }, /// Allocation mapping backend. /// @@ -36,14 +36,20 @@ pub enum Backend { /// Whether to populate the physical frames when creating the mapping. populate: bool, /// A phantom data for the paging handler. - _phantom: core::marker::PhantomData, + _phantom: core::marker::PhantomData<(M, PTE, H)>, }, } -impl Clone for Backend { +impl Clone for Backend { fn clone(&self) -> Self { match *self { - Self::Linear { pa_va_offset } => Self::Linear { pa_va_offset }, + Self::Linear { + pa_va_offset, + allow_huge, + } => Self::Linear { + pa_va_offset, + allow_huge, + }, Self::Alloc { populate, .. } => Self::Alloc { populate, _phantom: core::marker::PhantomData, @@ -52,49 +58,61 @@ impl Clone for Backend { } } -impl MappingBackend for Backend { - type Addr = GuestPhysAddr; +impl MappingBackend for Backend { + type Addr = M::VirtAddr; type Flags = MappingFlags; - type PageTable = PageTable; + type PageTable = PageTable64; fn map( &self, - start: GuestPhysAddr, + start: M::VirtAddr, size: usize, flags: MappingFlags, - pt: &mut PageTable, + pt: &mut Self::PageTable, ) -> bool { match *self { - Self::Linear { pa_va_offset } => self.map_linear(start, size, flags, pt, pa_va_offset), + Self::Linear { + pa_va_offset, + allow_huge, + } => self.map_linear(start, size, flags, pt, allow_huge, pa_va_offset), Self::Alloc { populate, .. } => self.map_alloc(start, size, flags, pt, populate), } } - fn unmap(&self, start: GuestPhysAddr, size: usize, pt: &mut PageTable) -> bool { + fn unmap(&self, start: M::VirtAddr, size: usize, pt: &mut Self::PageTable) -> bool { match *self { - Self::Linear { pa_va_offset } => self.unmap_linear(start, size, pt, pa_va_offset), + Self::Linear { pa_va_offset, .. } => self.unmap_linear(start, size, pt, pa_va_offset), Self::Alloc { populate, .. } => self.unmap_alloc(start, size, pt, populate), } } fn protect( &self, - _start: GuestPhysAddr, - _size: usize, - _new_flags: MappingFlags, - _page_table: &mut PageTable, + start: M::VirtAddr, + size: usize, + new_flags: MappingFlags, + page_table: &mut Self::PageTable, ) -> bool { - // a stub here - true + debug!( + "protect_region({:#x}) [{:#x}, {:#x}) {:?}", + page_table.root_paddr(), + start, + start.add(size), + new_flags, + ); + page_table + .protect_region(start, size, new_flags, true) + .map(|tlb| tlb.ignore()) + .is_ok() } } -impl Backend { +impl Backend { pub(crate) fn handle_page_fault( &self, - vaddr: GuestPhysAddr, + vaddr: M::VirtAddr, orig_flags: MappingFlags, - page_table: &mut PageTable, + page_table: &mut PageTable64, ) -> bool { match *self { Self::Linear { .. } => false, // Linear mappings should not trigger page faults. diff --git a/src/address_space/mod.rs b/src/address_space/mod.rs index 993de01..86d17cd 100644 --- a/src/address_space/mod.rs +++ b/src/address_space/mod.rs @@ -2,12 +2,13 @@ use alloc::vec::Vec; use core::fmt; use axerrno::{AxError, AxResult, ax_err}; -use memory_addr::{MemoryAddr, PhysAddr, is_aligned_4k}; +use memory_addr::{AddrRange, MemoryAddr, PAGE_SIZE_4K, PhysAddr, is_aligned_4k}; use memory_set::{MemoryArea, MemorySet}; -use page_table_multiarch::PagingHandler; +use page_table_multiarch::{ + GenericPTE, PageSize, PageTable64, PagingError, PagingHandler, PagingMetaData, +}; -use crate::npt::NestedPageTable as PageTable; -use crate::{GuestPhysAddr, GuestPhysAddrRange, mapping_err_to_ax_err}; +use crate::mapping_err_to_ax_err; mod backend; @@ -15,20 +16,20 @@ pub use backend::Backend; pub use page_table_entry::MappingFlags; /// The virtual memory address space. -pub struct AddrSpace { - va_range: GuestPhysAddrRange, - areas: MemorySet>, - pt: PageTable, +pub struct AddrSpace { + va_range: AddrRange, + areas: MemorySet>, + pt: PageTable64, } -impl AddrSpace { +impl AddrSpace { /// Returns the address space base. - pub const fn base(&self) -> GuestPhysAddr { + pub const fn base(&self) -> M::VirtAddr { self.va_range.start } /// Returns the address space end. - pub const fn end(&self) -> GuestPhysAddr { + pub const fn end(&self) -> M::VirtAddr { self.va_range.end } @@ -38,7 +39,7 @@ impl AddrSpace { } /// Returns the reference to the inner page table. - pub const fn page_table(&self) -> &PageTable { + pub const fn page_table(&self) -> &PageTable64 { &self.pt } @@ -48,17 +49,17 @@ impl AddrSpace { } /// Checks if the address space contains the given address range. - pub fn contains_range(&self, start: GuestPhysAddr, size: usize) -> bool { + pub fn contains_range(&self, start: M::VirtAddr, size: usize) -> bool { self.va_range - .contains_range(GuestPhysAddrRange::from_start_size(start, size)) + .contains_range(AddrRange::from_start_size(start, size)) } /// Creates a new empty address space. - pub fn new_empty(base: GuestPhysAddr, size: usize) -> AxResult { + pub fn new_empty(base: M::VirtAddr, size: usize) -> AxResult { Ok(Self { - va_range: GuestPhysAddrRange::from_start_size(base, size), + va_range: AddrRange::from_start_size(base, size), areas: MemorySet::new(), - pt: PageTable::try_new().map_err(|_| AxError::NoMemory)?, + pt: PageTable64::::try_new().map_err(|_| AxError::NoMemory)?, }) } @@ -69,22 +70,35 @@ impl AddrSpace { /// The `flags` parameter indicates the mapping permissions and attributes. pub fn map_linear( &mut self, - start_vaddr: GuestPhysAddr, + start_vaddr: M::VirtAddr, start_paddr: PhysAddr, size: usize, flags: MappingFlags, + allow_huge: bool, ) -> AxResult { if !self.contains_range(start_vaddr, size) { - return ax_err!(InvalidInput, "address out of range"); + return ax_err!( + InvalidInput, + alloc::format!( + "[{:?} {:?}]address out of range", + start_vaddr, + start_vaddr.add(size) + ) + ); } if !start_vaddr.is_aligned_4k() || !start_paddr.is_aligned_4k() || !is_aligned_4k(size) { return ax_err!(InvalidInput, "address not aligned"); } - let offset = start_vaddr.as_usize() - start_paddr.as_usize(); - let area = MemoryArea::new(start_vaddr, size, flags, Backend::new_linear(offset)); + let offset = start_vaddr.into() - start_paddr.as_usize(); + let area = MemoryArea::new( + start_vaddr, + size, + flags, + Backend::new_linear(offset, allow_huge), + ); self.areas - .map(area, &mut self.pt, false) + .map(area, &mut self.pt, true) .map_err(mapping_err_to_ax_err)?; Ok(()) } @@ -96,7 +110,7 @@ impl AddrSpace { /// The `flags` parameter indicates the mapping permissions and attributes. pub fn map_alloc( &mut self, - start: GuestPhysAddr, + start: M::VirtAddr, size: usize, flags: MappingFlags, populate: bool, @@ -104,7 +118,7 @@ impl AddrSpace { if !self.contains_range(start, size) { return ax_err!( InvalidInput, - alloc::format!("address [{:?}~{:?}] out of range", start, start + size).as_str() + alloc::format!("address [{:?}~{:?}] out of range", start, start.add(size)).as_str() ); } if !start.is_aligned_4k() || !is_aligned_4k(size) { @@ -118,8 +132,31 @@ impl AddrSpace { Ok(()) } + pub fn protect(&mut self, start: M::VirtAddr, size: usize, flags: MappingFlags) -> AxResult { + if !self.contains_range(start, size) { + return ax_err!(InvalidInput, "address out of range"); + } + if !start.is_aligned_4k() || !is_aligned_4k(size) { + return ax_err!(InvalidInput, "address not aligned"); + } + + let update_flags = |new_flags: MappingFlags| { + move |old_flags: MappingFlags| -> Option { + if old_flags == new_flags { + return None; + } + Some(new_flags) + } + }; + + self.areas + .protect(start, size, update_flags(flags), &mut self.pt) + .map_err(mapping_err_to_ax_err)?; + Ok(()) + } + /// Removes mappings within the specified virtual address range. - pub fn unmap(&mut self, start: GuestPhysAddr, size: usize) -> AxResult { + pub fn unmap(&mut self, start: M::VirtAddr, size: usize) -> AxResult { if !self.contains_range(start, size) { return ax_err!(InvalidInput, "address out of range"); } @@ -144,7 +181,7 @@ impl AddrSpace { /// /// Returns `true` if the page fault is handled successfully (not a real /// fault). - pub fn handle_page_fault(&mut self, vaddr: GuestPhysAddr, access_flags: MappingFlags) -> bool { + pub fn handle_page_fault(&mut self, vaddr: M::VirtAddr, access_flags: MappingFlags) -> bool { if !self.va_range.contains(vaddr) { return false; } @@ -163,17 +200,11 @@ impl AddrSpace { /// Translates the given `VirtAddr` into `PhysAddr`. /// /// Returns `None` if the virtual address is out of range or not mapped. - pub fn translate(&self, vaddr: GuestPhysAddr) -> Option { + pub fn translate(&self, vaddr: M::VirtAddr) -> Option<(PhysAddr, MappingFlags, PageSize)> { if !self.va_range.contains(vaddr) { return None; } - self.pt - .query(vaddr) - .map(|(phys_addr, _, _)| { - debug!("vaddr {:?} translate to {:?}", vaddr, phys_addr); - phys_addr - }) - .ok() + self.pt.query(vaddr).ok() } /// Translate&Copy the given `VirtAddr` with LENGTH len to a mutable u8 Vec through page table. @@ -181,7 +212,7 @@ impl AddrSpace { /// Returns `None` if the virtual address is out of range or not mapped. pub fn translated_byte_buffer( &self, - vaddr: GuestPhysAddr, + vaddr: M::VirtAddr, len: usize, ) -> Option> { if !self.va_range.contains(vaddr) { @@ -198,7 +229,7 @@ impl AddrSpace { } let mut start = vaddr; - let end = start + len; + let end = start.add(len); debug!( "start {:?} end {:?} area size {:#x}", @@ -210,13 +241,13 @@ impl AddrSpace { let mut v = Vec::new(); while start < end { let (start_paddr, _, page_size) = self.page_table().query(start).unwrap(); - let mut end_va = start.align_down(page_size) + page_size.into(); + let mut end_va = start.align_down(page_size).add(page_size.into()); end_va = end_va.min(end); v.push(unsafe { core::slice::from_raw_parts_mut( H::phys_to_virt(start_paddr).as_mut_ptr(), - (end_va - start.as_usize()).into(), + (end_va.sub_addr(start)).into(), ) }); start = end_va; @@ -231,7 +262,7 @@ impl AddrSpace { /// and returns the size of the `MemoryArea` corresponding to the target vaddr. /// /// Returns `None` if the virtual address is out of range or not mapped. - pub fn translate_and_get_limit(&self, vaddr: GuestPhysAddr) -> Option<(PhysAddr, usize)> { + pub fn translate_and_get_limit(&self, vaddr: M::VirtAddr) -> Option<(PhysAddr, usize)> { if !self.va_range.contains(vaddr) { return None; } @@ -246,7 +277,122 @@ impl AddrSpace { } } -impl fmt::Debug for AddrSpace { +impl AddrSpace { + pub fn clone(&self) -> AxResult { + let mut cloned_aspace = Self::new_empty(self.base(), self.size())?; + + for area in self.areas.iter() { + let new_backend = area.backend().clone(); + let new_area = MemoryArea::new(area.start(), area.size(), area.flags(), new_backend); + + cloned_aspace + .areas + .map(new_area, &mut cloned_aspace.pt, false) + .map_err(mapping_err_to_ax_err)?; + + match area.backend() { + Backend::Alloc { .. } => { + // Alloc mappings are cloned. + // They are created in the new address space. + // The physical frames are copied to the new address space. + let mut addr = area.start(); + let end = addr.add(area.size()); + while addr < end { + match self.pt.query(addr) { + Ok((phys_addr, _, page_size)) => { + if !addr.is_aligned(page_size as usize) { + warn!( + "AddrSpace clone: addr {:#x} is not aligned to page size {:?}", + addr, page_size + ); + } + let mut end_va = addr.align_down(page_size).add(page_size.into()); + end_va = end_va.min(end); + + // Copy the physical frames to the new address space. + let new_phys_addr = match cloned_aspace.pt.query(addr) { + Ok((new_phys_addr, _, new_pgsize)) => { + if page_size != new_pgsize { + warn!( + "AddrSpace clone: addr {:#x} page size mismatch {:?} != {:?}", + addr, page_size, new_pgsize + ); + } + new_phys_addr + } + Err(PagingError::NotMapped) => { + // The address is not mapped in the new address space. + // map it! + if !cloned_aspace.handle_page_fault(addr, area.flags()) { + warn!( + "AddrSpace clone: addr {:#x} handle page fault failed, check why?", + addr + ); + } + + match cloned_aspace.pt.query(addr) { + Ok((new_phys_addr, _, new_pgsize)) => { + if page_size != new_pgsize { + warn!( + "AddrSpace clone: addr {:#x} page size mismatch {:?} != {:?}", + addr, page_size, new_pgsize + ); + } + new_phys_addr + } + Err(_) => { + warn!( + "AddrSpace clone: addr {:#x} is not mapped", + addr + ); + continue; + } + } + } + Err(_) => { + warn!("AddrSpace clone: addr {:#x} is not mapped", addr); + continue; + } + }; + + unsafe { + core::ptr::copy_nonoverlapping( + H::phys_to_virt(phys_addr).as_ptr(), + H::phys_to_virt(new_phys_addr).as_mut_ptr(), + page_size as usize, + ) + }; + + addr = end_va; + } + Err(PagingError::NotMapped) => { + // The address is not mapped in the original address space. + // Step forward to the next 4K page. + addr = addr.add(PAGE_SIZE_4K); + } + Err(_) => { + warn!("AddrSpace clone: addr {:#x} is not mapped", addr); + } + } + } + } + Backend::Linear { .. } => { + // Linear mappings are not cloned. + // They are created in the new address space. + } + } + } + + Ok(cloned_aspace) + } + + pub fn clone_cow(&mut self) -> AxResult { + unimplemented!() + } +} + +#[allow(unused)] +impl AddrSpace { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("AddrSpace") .field("va_range", &self.va_range) @@ -256,7 +402,8 @@ impl fmt::Debug for AddrSpace { } } -impl Drop for AddrSpace { +#[allow(unused)] +impl AddrSpace { fn drop(&mut self) { self.clear(); } diff --git a/src/lib.rs b/src/lib.rs index b3c9b58..2e9e9fa 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -9,7 +9,9 @@ extern crate alloc; mod addr; mod address_space; -mod npt; +/// Todo: this has to be combined with page_table_multiarch with `nested_page_table` feature, +/// or separated into a new crate maybe named as `nested_page_table_multiarch`. +pub mod npt; pub use addr::*; pub use address_space::*; @@ -34,3 +36,19 @@ fn mapping_err_to_ax_err(err: MappingError) -> AxError { MappingError::BadState => AxError::BadState, } } + +pub trait EPTTranslator { + /// Converts a guest physical address to a host physical address + /// through Nested Page Table (NPT) translation. + /// + /// # Parameters + /// + /// * `gpa` - The guest physical address to convert. + /// + /// # Returns + /// + /// * `HostPhysAddr` - The corresponding host physical address. + fn guest_phys_to_host_phys( + gpa: GuestPhysAddr, + ) -> Option<(HostPhysAddr, MappingFlags, page_table_multiarch::PageSize)>; +} diff --git a/src/npt/arch/x86_64.rs b/src/npt/arch/x86_64.rs index ae57a11..7dc8cbe 100644 --- a/src/npt/arch/x86_64.rs +++ b/src/npt/arch/x86_64.rs @@ -174,10 +174,41 @@ impl PagingMetaData for ExtendedPageTableMetadata { type VirtAddr = GuestPhysAddr; - fn flush_tlb(_vaddr: Option) { - todo!() + fn flush_tlb(vaddr: Option) { + // todo!() + debug!("flush_tlb {:?}", vaddr); + unsafe { + invept(InvEptType::Global); + } } } /// The VMX extended page table. (SDM Vol. 3C, Section 29.3) pub type ExtendedPageTable = PageTable64; + +/// INVEPT type. (SDM Vol. 3C, Section 30.3) +#[repr(u64)] +#[derive(Debug)] +#[allow(dead_code)] +pub enum InvEptType { + /// The logical processor invalidates all mappings associated with bits + /// 51:12 of the EPT pointer (EPTP) specified in the INVEPT descriptor. + /// It may invalidate other mappings as well. + SingleContext = 1, + /// The logical processor invalidates mappings associated with all EPTPs. + Global = 2, +} + +/// Invalidate Translations Derived from EPT. (SDM Vol. 3C, Section 30.3) +/// +/// Invalidates mappings in the translation lookaside buffers (TLBs) and +/// paging-structure caches that were derived from extended page tables (EPT). +/// (See Chapter 28, “VMX Support for Address Translation”.) Invalidation is +/// based on the INVEPT type specified in the register operand and the INVEPT +/// descriptor specified in the memory operand. +pub unsafe fn invept(inv_type: InvEptType) { + let invept_desc = [0, 0]; + unsafe { + core::arch::asm!("invept {0}, [{1}]", in(reg) inv_type as u64, in(reg) &invept_desc); + } +} diff --git a/src/npt/mod.rs b/src/npt/mod.rs index 14d7d2d..9a201fa 100644 --- a/src/npt/mod.rs +++ b/src/npt/mod.rs @@ -2,6 +2,8 @@ cfg_if::cfg_if! { if #[cfg(target_arch = "x86_64")] { /// The architecture-specific nested page table for two-stage address translation. pub type NestedPageTable = arch::ExtendedPageTable; + pub type EPTEntry = arch::EPTEntry; + pub type EPTMetadata = arch::ExtendedPageTableMetadata; } else if #[cfg(any(target_arch = "riscv32", target_arch = "riscv64"))] { /// The architecture-specific page table. pub type NestedPageTable = arch::NestedPageTable;