diff --git a/arch/x86/kvm/pkvm/memory.h b/arch/x86/kvm/pkvm/memory.h index 226f7955c4ce..771a866cdb58 100644 --- a/arch/x86/kvm/pkvm/memory.h +++ b/arch/x86/kvm/pkvm/memory.h @@ -44,6 +44,12 @@ extern u64 __pkvm_vmemmap; #define pkvm_page_to_virt(page) __pkvm_va(pkvm_page_to_phys(page)) #define pkvm_page_to_pool(page) (((struct pkvm_page *)page)->pool) +/* Caution: __st is evaluated twice. */ +#define for_each_pkvm_page(__p, __st, __sz) \ + for (struct pkvm_page *__p = pkvm_phys_to_page(PAGE_ALIGN_DOWN(__st)), \ + *__e = pkvm_phys_to_page(PAGE_ALIGN((__st) + (__sz))); \ + __p < __e; __p++) + /* * Refcounting for 'struct pkvm_page'. * pkvm_pool::lock must be held if atomic access to the refcount is required. diff --git a/arch/x86/kvm/pkvm/mmu.c b/arch/x86/kvm/pkvm/mmu.c index a9cbbeab62e9..93fcdfc2cb57 100644 --- a/arch/x86/kvm/pkvm/mmu.c +++ b/arch/x86/kvm/pkvm/mmu.c @@ -181,7 +181,7 @@ static int fix_hyp_mmu_page_refcnt(void) * Calculate the max address space, then walk the [0, size) address * range to fixup refcount of every page-table page. */ - size = hyp_mmu.pgt_ops->level_to_size(hyp_mmu.cap.level + 1); + size = pkvm_pgtable_max_size(&hyp_mmu); #endif return pkvm_pgtable_walk(&hyp_mmu, 0, size, &walker); @@ -190,26 +190,17 @@ static int fix_hyp_mmu_page_refcnt(void) static void set_host_mem_pgstate(unsigned long phys, unsigned long size, enum pkvm_page_state pgstate) { - unsigned long end = PAGE_ALIGN(phys + size); - struct pkvm_page *page; - - for (phys = PAGE_ALIGN_DOWN(phys); phys < end; phys += PAGE_SIZE) { - page = pkvm_phys_to_page(phys); + for_each_pkvm_page(page, phys, size) page->host_state = pgstate; - } } static int check_host_mem_pgstate(unsigned long phys, unsigned long size, enum pkvm_page_state pgstate) { - unsigned long end = PAGE_ALIGN(phys + size); - struct pkvm_page *page; - if (!is_memory_range(phys, size)) return -EINVAL; - for (phys = PAGE_ALIGN_DOWN(phys); phys < end; phys += PAGE_SIZE) { - page = pkvm_phys_to_page(phys); + for_each_pkvm_page(page, phys, size) { if (page->host_state != pgstate) return -EPERM; } @@ -278,6 +269,12 @@ static int check_page_owner_and_state(struct pkvm_pgtable *pgt, unsigned long va return pkvm_pgtable_walk(pgt, vaddr, size, &walker); } +static u64 host_mmu_pte_prot(bool mmio) +{ + return host_mmu.pgt_ops->calc_pte_perm(true, true, true) | + host_mmu.pgt_ops->calc_pte_memtype(mmio); +} + static int fix_host_mmu_pgstate_walker(struct pkvm_pgtable_visit_ctx *ctx, unsigned long walk_flags, void *const arg) @@ -329,23 +326,21 @@ static int fix_host_mmu_pgstate_walker(struct pkvm_pgtable_visit_ctx *ctx, static int fix_host_mmu_pgstate(void) { - unsigned long size = host_mmu.pgt_ops->level_to_size(host_mmu.cap.level + 1); struct pkvm_pgtable_walker walker = { .cb = fix_host_mmu_pgstate_walker, .arg = NULL, .walk_flags = PKVM_PGTABLE_WALK_LEAF, }; - return pkvm_pgtable_walk(&host_mmu, 0, size, &walker); + return pkvm_pgtable_walk(&host_mmu, 0, pkvm_pgtable_max_size(&host_mmu), + &walker); } static int host_mmu_map(unsigned long phys, unsigned long size, bool mmio) { - u64 prot = host_mmu.pgt_ops->calc_pte_perm(true, true, true) | - host_mmu.pgt_ops->calc_pte_memtype(mmio); - /* The vaddr == phys for the host MMU */ - return pkvm_pgtable_map(&host_mmu, phys, phys, size, prot); + return pkvm_pgtable_map(&host_mmu, phys, phys, size, + host_mmu_pte_prot(mmio)); } int pkvm_hyp_mmu_init(void *pool_base, unsigned long pool_pages) @@ -541,8 +536,6 @@ int pkvm_host_donate_hyp(unsigned long phys, unsigned long size, bool clear) */ void pkvm_hyp_donate_host(unsigned long phys, unsigned long size, bool clear) { - u64 prot = host_mmu.pgt_ops->calc_pte_perm(true, true, true) | - host_mmu.pgt_ops->calc_pte_memtype(false); void *va = __pkvm_va(phys); int ret; @@ -573,7 +566,8 @@ void pkvm_hyp_donate_host(unsigned long phys, unsigned long size, bool clear) * the page state and the mapping, which may lead to unexpected * behavior. So panic if it fails. */ - BUG_ON(ret = pkvm_pgtable_map(&host_mmu, phys, phys, size, prot)); + BUG_ON(ret = pkvm_pgtable_map(&host_mmu, phys, phys, size, + host_mmu_pte_prot(false))); set_host_mem_pgstate(phys, size, PKVM_PAGE_OWNED); unlock: @@ -611,8 +605,7 @@ void pkvm_hyp_donate_host(unsigned long phys, unsigned long size, bool clear) */ int pkvm_hyp_donate_host_mmio_locked(unsigned long phys, unsigned long size) { - u64 prot = host_mmu.pgt_ops->calc_pte_perm(true, true, true) | - host_mmu.pgt_ops->calc_pte_memtype(true) | + u64 prot = host_mmu_pte_prot(true) | host_mmu.pgt_ops->pte_mk_pgstate(PKVM_PAGE_OWNED); int ret; @@ -647,9 +640,6 @@ int pkvm_hyp_donate_host_mmio_locked(unsigned long phys, unsigned long size) */ int pkvm_host_share_hyp(unsigned long phys, unsigned long size) { - unsigned long start = PAGE_ALIGN_DOWN(phys); - unsigned long end = PAGE_ALIGN(phys + size); - struct pkvm_page *page; int ret; if (size == 0 || !is_memory_range(phys, size)) @@ -657,9 +647,7 @@ int pkvm_host_share_hyp(unsigned long phys, unsigned long size) pkvm_host_mmu_lock(); - for (phys = start; phys < end; phys += PAGE_SIZE) { - page = pkvm_phys_to_page(phys); - + for_each_pkvm_page(page, phys, size) { switch (page->host_state) { case PKVM_PAGE_OWNED: BUG_ON(page->host_share_hyp_count); @@ -684,9 +672,7 @@ int pkvm_host_share_hyp(unsigned long phys, unsigned long size) } } - for (phys = start; phys < end; phys += PAGE_SIZE) { - page = pkvm_phys_to_page(phys); - + for_each_pkvm_page(page, phys, size) { page->host_state = PKVM_PAGE_SHARED_OWNED; page->host_share_hyp_count++; } @@ -713,8 +699,6 @@ int pkvm_host_share_hyp(unsigned long phys, unsigned long size) */ void pkvm_host_unshare_hyp(unsigned long phys, unsigned long size) { - unsigned long end = PAGE_ALIGN(phys + size); - struct pkvm_page *page; int ret; if (size == 0) { @@ -728,9 +712,7 @@ void pkvm_host_unshare_hyp(unsigned long phys, unsigned long size) if (ret) goto unlock; - for (phys = PAGE_ALIGN_DOWN(phys); phys < end; phys += PAGE_SIZE) { - page = pkvm_phys_to_page(phys); - + for_each_pkvm_page(page, phys, size) { /* * Even if host_share_hyp_count is 0 because the page is * shared with a guest, not with the hypervisor, it still diff --git a/arch/x86/kvm/pkvm/pgtable.h b/arch/x86/kvm/pkvm/pgtable.h index bac964989621..8b1cc6180c43 100644 --- a/arch/x86/kvm/pkvm/pgtable.h +++ b/arch/x86/kvm/pkvm/pgtable.h @@ -200,4 +200,13 @@ static inline enum pkvm_owner_id pkvm_pte_owner_id(struct pkvm_pgtable *pgt, voi return FIELD_GET(PKVM_INVALID_PTE_OWNER_MASK, pgt->pgt_ops->pte_get(ptep)); } +/* + * Return the max size of the virtual address space that can be + * mapped by the page table @pgt. + */ +static inline unsigned long pkvm_pgtable_max_size(struct pkvm_pgtable *pgt) +{ + return pgt->pgt_ops->level_to_size(pgt->cap.level + 1); +} + #endif /* __PKVM_X86_PGTABLE_H */