Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions arch/x86/kvm/pkvm/memory.h
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,12 @@ extern u64 __pkvm_vmemmap;
#define pkvm_page_to_virt(page) __pkvm_va(pkvm_page_to_phys(page))
#define pkvm_page_to_pool(page) (((struct pkvm_page *)page)->pool)

/* Caution: __st is evaluated twice. */
#define for_each_pkvm_page(__p, __st, __sz) \
for (struct pkvm_page *__p = pkvm_phys_to_page(PAGE_ALIGN_DOWN(__st)), \
*__e = pkvm_phys_to_page(PAGE_ALIGN((__st) + (__sz))); \
__p < __e; __p++)

/*
* Refcounting for 'struct pkvm_page'.
* pkvm_pool::lock must be held if atomic access to the refcount is required.
Expand Down
56 changes: 19 additions & 37 deletions arch/x86/kvm/pkvm/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -181,7 +181,7 @@ static int fix_hyp_mmu_page_refcnt(void)
* Calculate the max address space, then walk the [0, size) address
* range to fixup refcount of every page-table page.
*/
size = hyp_mmu.pgt_ops->level_to_size(hyp_mmu.cap.level + 1);
size = pkvm_pgtable_max_size(&hyp_mmu);
#endif

return pkvm_pgtable_walk(&hyp_mmu, 0, size, &walker);
Expand All @@ -190,26 +190,17 @@ static int fix_hyp_mmu_page_refcnt(void)
static void set_host_mem_pgstate(unsigned long phys, unsigned long size,
enum pkvm_page_state pgstate)
{
unsigned long end = PAGE_ALIGN(phys + size);
struct pkvm_page *page;

for (phys = PAGE_ALIGN_DOWN(phys); phys < end; phys += PAGE_SIZE) {
page = pkvm_phys_to_page(phys);
for_each_pkvm_page(page, phys, size)
page->host_state = pgstate;
}
}

static int check_host_mem_pgstate(unsigned long phys, unsigned long size,
enum pkvm_page_state pgstate)
{
unsigned long end = PAGE_ALIGN(phys + size);
struct pkvm_page *page;

if (!is_memory_range(phys, size))
return -EINVAL;

for (phys = PAGE_ALIGN_DOWN(phys); phys < end; phys += PAGE_SIZE) {
page = pkvm_phys_to_page(phys);
for_each_pkvm_page(page, phys, size) {
if (page->host_state != pgstate)
return -EPERM;
}
Expand Down Expand Up @@ -278,6 +269,12 @@ static int check_page_owner_and_state(struct pkvm_pgtable *pgt, unsigned long va
return pkvm_pgtable_walk(pgt, vaddr, size, &walker);
}

static u64 host_mmu_pte_prot(bool mmio)
{
return host_mmu.pgt_ops->calc_pte_perm(true, true, true) |
host_mmu.pgt_ops->calc_pte_memtype(mmio);
}

static int fix_host_mmu_pgstate_walker(struct pkvm_pgtable_visit_ctx *ctx,
unsigned long walk_flags,
void *const arg)
Expand Down Expand Up @@ -329,23 +326,21 @@ static int fix_host_mmu_pgstate_walker(struct pkvm_pgtable_visit_ctx *ctx,

static int fix_host_mmu_pgstate(void)
{
unsigned long size = host_mmu.pgt_ops->level_to_size(host_mmu.cap.level + 1);
struct pkvm_pgtable_walker walker = {
.cb = fix_host_mmu_pgstate_walker,
.arg = NULL,
.walk_flags = PKVM_PGTABLE_WALK_LEAF,
};

return pkvm_pgtable_walk(&host_mmu, 0, size, &walker);
return pkvm_pgtable_walk(&host_mmu, 0, pkvm_pgtable_max_size(&host_mmu),
&walker);
}

static int host_mmu_map(unsigned long phys, unsigned long size, bool mmio)
{
u64 prot = host_mmu.pgt_ops->calc_pte_perm(true, true, true) |
host_mmu.pgt_ops->calc_pte_memtype(mmio);

/* The vaddr == phys for the host MMU */
return pkvm_pgtable_map(&host_mmu, phys, phys, size, prot);
return pkvm_pgtable_map(&host_mmu, phys, phys, size,
host_mmu_pte_prot(mmio));
}

int pkvm_hyp_mmu_init(void *pool_base, unsigned long pool_pages)
Expand Down Expand Up @@ -541,8 +536,6 @@ int pkvm_host_donate_hyp(unsigned long phys, unsigned long size, bool clear)
*/
void pkvm_hyp_donate_host(unsigned long phys, unsigned long size, bool clear)
{
u64 prot = host_mmu.pgt_ops->calc_pte_perm(true, true, true) |
host_mmu.pgt_ops->calc_pte_memtype(false);
void *va = __pkvm_va(phys);
int ret;

Expand Down Expand Up @@ -573,7 +566,8 @@ void pkvm_hyp_donate_host(unsigned long phys, unsigned long size, bool clear)
* the page state and the mapping, which may lead to unexpected
* behavior. So panic if it fails.
*/
BUG_ON(ret = pkvm_pgtable_map(&host_mmu, phys, phys, size, prot));
BUG_ON(ret = pkvm_pgtable_map(&host_mmu, phys, phys, size,
host_mmu_pte_prot(false)));

set_host_mem_pgstate(phys, size, PKVM_PAGE_OWNED);
unlock:
Expand Down Expand Up @@ -611,8 +605,7 @@ void pkvm_hyp_donate_host(unsigned long phys, unsigned long size, bool clear)
*/
int pkvm_hyp_donate_host_mmio_locked(unsigned long phys, unsigned long size)
{
u64 prot = host_mmu.pgt_ops->calc_pte_perm(true, true, true) |
host_mmu.pgt_ops->calc_pte_memtype(true) |
u64 prot = host_mmu_pte_prot(true) |
host_mmu.pgt_ops->pte_mk_pgstate(PKVM_PAGE_OWNED);
int ret;

Expand Down Expand Up @@ -647,19 +640,14 @@ int pkvm_hyp_donate_host_mmio_locked(unsigned long phys, unsigned long size)
*/
int pkvm_host_share_hyp(unsigned long phys, unsigned long size)
{
unsigned long start = PAGE_ALIGN_DOWN(phys);
unsigned long end = PAGE_ALIGN(phys + size);
struct pkvm_page *page;
int ret;

if (size == 0 || !is_memory_range(phys, size))
return -EINVAL;

pkvm_host_mmu_lock();

for (phys = start; phys < end; phys += PAGE_SIZE) {
page = pkvm_phys_to_page(phys);

for_each_pkvm_page(page, phys, size) {
switch (page->host_state) {
case PKVM_PAGE_OWNED:
BUG_ON(page->host_share_hyp_count);
Expand All @@ -684,9 +672,7 @@ int pkvm_host_share_hyp(unsigned long phys, unsigned long size)
}
}

for (phys = start; phys < end; phys += PAGE_SIZE) {
page = pkvm_phys_to_page(phys);

for_each_pkvm_page(page, phys, size) {
page->host_state = PKVM_PAGE_SHARED_OWNED;
page->host_share_hyp_count++;
}
Expand All @@ -713,8 +699,6 @@ int pkvm_host_share_hyp(unsigned long phys, unsigned long size)
*/
void pkvm_host_unshare_hyp(unsigned long phys, unsigned long size)
{
unsigned long end = PAGE_ALIGN(phys + size);
struct pkvm_page *page;
int ret;

if (size == 0) {
Expand All @@ -728,9 +712,7 @@ void pkvm_host_unshare_hyp(unsigned long phys, unsigned long size)
if (ret)
goto unlock;

for (phys = PAGE_ALIGN_DOWN(phys); phys < end; phys += PAGE_SIZE) {
page = pkvm_phys_to_page(phys);

for_each_pkvm_page(page, phys, size) {
/*
* Even if host_share_hyp_count is 0 because the page is
* shared with a guest, not with the hypervisor, it still
Expand Down
9 changes: 9 additions & 0 deletions arch/x86/kvm/pkvm/pgtable.h
Original file line number Diff line number Diff line change
Expand Up @@ -200,4 +200,13 @@ static inline enum pkvm_owner_id pkvm_pte_owner_id(struct pkvm_pgtable *pgt, voi
return FIELD_GET(PKVM_INVALID_PTE_OWNER_MASK, pgt->pgt_ops->pte_get(ptep));
}

/*
* Return the max size of the virtual address space that can be
* mapped by the page table @pgt.
*/
static inline unsigned long pkvm_pgtable_max_size(struct pkvm_pgtable *pgt)
{
return pgt->pgt_ops->level_to_size(pgt->cap.level + 1);
}

#endif /* __PKVM_X86_PGTABLE_H */