Message ID | 20230408040020.868929-3-npiggin@gmail.com |
---|---|
State | New |
Headers | show |
Series | KVM: selftests: add powerpc support | expand |
On Sat, Apr 08, 2023, Nicholas Piggin wrote: > powerpc will require this to allocate MMU tables in guest memory that > are aligned and larger than the base page size. > > Signed-off-by: Nicholas Piggin <npiggin@gmail.com> > --- > .../selftests/kvm/include/kvm_util_base.h | 2 + > tools/testing/selftests/kvm/lib/kvm_util.c | 44 ++++++++++++------- > 2 files changed, 29 insertions(+), 17 deletions(-) > > diff --git a/tools/testing/selftests/kvm/include/kvm_util_base.h b/tools/testing/selftests/kvm/include/kvm_util_base.h > index 16425da16861..8a27bd4111ff 100644 > --- a/tools/testing/selftests/kvm/include/kvm_util_base.h > +++ b/tools/testing/selftests/kvm/include/kvm_util_base.h > @@ -679,6 +679,8 @@ const char *exit_reason_str(unsigned int exit_reason); > > vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min, > uint32_t memslot); > +vm_paddr_t vm_phy_pages_alloc_align(struct kvm_vm *vm, size_t num, size_t align, > + vm_paddr_t paddr_min, uint32_t memslot); > vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, > vm_paddr_t paddr_min, uint32_t memslot); > vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm); > diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c > index 8ec20ac33de0..4f15bbbb8f5e 100644 > --- a/tools/testing/selftests/kvm/lib/kvm_util.c > +++ b/tools/testing/selftests/kvm/lib/kvm_util.c > @@ -1898,6 +1898,7 @@ const char *exit_reason_str(unsigned int exit_reason) > * Input Args: > * vm - Virtual Machine > * num - number of pages > + * align - pages alignment > * paddr_min - Physical address minimum > * memslot - Memory region to allocate page from > * > @@ -1911,7 +1912,7 @@ const char *exit_reason_str(unsigned int exit_reason) > * and their base address is returned. A TEST_ASSERT failure occurs if > * not enough pages are available at or above paddr_min. > */ > -vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, > +vm_paddr_t vm_phy_pages_alloc_align(struct kvm_vm *vm, size_t num, size_t align, I'd prefer to use double underscores, even though they are imperfect, because appending a single specifier always seems to result in the name becoming stale sooner or later, e.g. when another param with a default is added. And IIUC, PPC requires the page tables to be naturally aligned, so rather than expose the inner helper and copy+paste the rather odd KVM_GUEST_PAGE_TABLE_MIN_PADDR and vm->memslots[MEM_REGION_PT] stuff, what if we instead have vm_alloc_page_table() deal with the alignment? And provide a PPC-specific wrapper so that other architectures don't need to manually specify '1' page? E.g. --- .../selftests/kvm/include/kvm_util_base.h | 18 +++++++++++++++--- tools/testing/selftests/kvm/lib/kvm_util.c | 14 ++++++++------ .../selftests/kvm/lib/powerpc/processor.c | 8 ++------ 3 files changed, 25 insertions(+), 15 deletions(-) diff --git a/tools/testing/selftests/kvm/include/kvm_util_base.h b/tools/testing/selftests/kvm/include/kvm_util_base.h index f14a059f58fb..e52405c9fa8b 100644 --- a/tools/testing/selftests/kvm/include/kvm_util_base.h +++ b/tools/testing/selftests/kvm/include/kvm_util_base.h @@ -700,11 +700,23 @@ const char *exit_reason_str(unsigned int exit_reason); vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min, uint32_t memslot); -vm_paddr_t vm_phy_pages_alloc_align(struct kvm_vm *vm, size_t num, size_t align, - vm_paddr_t paddr_min, uint32_t memslot); vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, vm_paddr_t paddr_min, uint32_t memslot); -vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm); + +vm_paddr_t __vm_alloc_page_table(struct kvm_vm *vm, size_t nr_pages); + +/* + * PowerPC conditionally needs to allocate multiple pages for each page table, + * all other architectures consume exactly one page per table. + */ +#if defined(__powerpc64__ +#define vm_alloc_page_table __vm_alloc_page_table +#else +static inline vm_alloc_page_table(struct kvm_vm *vm) +{ + return __vm_alloc_page_table(vm, 1) +} +#endif /* * ____vm_create() does KVM_CREATE_VM and little else. __vm_create() also diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c index 70f792ba444c..ffd18afe9725 100644 --- a/tools/testing/selftests/kvm/lib/kvm_util.c +++ b/tools/testing/selftests/kvm/lib/kvm_util.c @@ -1946,8 +1946,9 @@ const char *exit_reason_str(unsigned int exit_reason) * and their base address is returned. A TEST_ASSERT failure occurs if * not enough pages are available at or above paddr_min. */ -vm_paddr_t vm_phy_pages_alloc_align(struct kvm_vm *vm, size_t num, size_t align, - vm_paddr_t paddr_min, uint32_t memslot) +static vm_paddr_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, + size_t align, vm_paddr_t paddr_min, + uint32_t memslot) { struct userspace_mem_region *region; sparsebit_idx_t pg, base; @@ -1992,7 +1993,7 @@ vm_paddr_t vm_phy_pages_alloc_align(struct kvm_vm *vm, size_t num, size_t align, vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, vm_paddr_t paddr_min, uint32_t memslot) { - return vm_phy_pages_alloc_align(vm, num, 1, paddr_min, memslot); + return __vm_phy_pages_alloc(vm, num, 1, paddr_min, memslot); } vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min, @@ -2001,10 +2002,11 @@ vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min, return vm_phy_pages_alloc(vm, 1, paddr_min, memslot); } -vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm) +vm_paddr_t __vm_alloc_page_table(struct kvm_vm *vm) { - return vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, - vm->memslots[MEM_REGION_PT]); + return __vm_phy_pages_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, + nr_pages, nr_pages, + vm->memslots[MEM_REGION_PT]); } /* diff --git a/tools/testing/selftests/kvm/lib/powerpc/processor.c b/tools/testing/selftests/kvm/lib/powerpc/processor.c index 7052ce9b5029..57d64d281467 100644 --- a/tools/testing/selftests/kvm/lib/powerpc/processor.c +++ b/tools/testing/selftests/kvm/lib/powerpc/processor.c @@ -44,9 +44,7 @@ void virt_arch_pgd_alloc(struct kvm_vm *vm) pgd_pages = (1UL << (RADIX_PGD_INDEX_SIZE + 3)) >> vm->page_shift; if (!pgd_pages) pgd_pages = 1; - pgtb = vm_phy_pages_alloc_align(vm, pgd_pages, pgd_pages, - KVM_GUEST_PAGE_TABLE_MIN_PADDR, - vm->memslots[MEM_REGION_PT]); + pt = vm_alloc_page_table(vm, pgd_pages); vm->pgd = pgtb; /* Set the base page directory in the proc table */ @@ -168,9 +166,7 @@ void virt_arch_pg_map(struct kvm_vm *vm, uint64_t gva, uint64_t gpa) pt_pages = (1ULL << (nls + 3)) >> vm->page_shift; if (!pt_pages) pt_pages = 1; - pt = vm_phy_pages_alloc_align(vm, pt_pages, pt_pages, - KVM_GUEST_PAGE_TABLE_MIN_PADDR, - vm->memslots[MEM_REGION_PT]); + pt = vm_alloc_page_table(vm, pt_pages); pde = PDE_VALID | nls | pt; *pdep = cpu_to_be64(pde); } base-commit: 15a281f5c83f34d4d1808e5f790403b0770c5e78 --
diff --git a/tools/testing/selftests/kvm/include/kvm_util_base.h b/tools/testing/selftests/kvm/include/kvm_util_base.h index 16425da16861..8a27bd4111ff 100644 --- a/tools/testing/selftests/kvm/include/kvm_util_base.h +++ b/tools/testing/selftests/kvm/include/kvm_util_base.h @@ -679,6 +679,8 @@ const char *exit_reason_str(unsigned int exit_reason); vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min, uint32_t memslot); +vm_paddr_t vm_phy_pages_alloc_align(struct kvm_vm *vm, size_t num, size_t align, + vm_paddr_t paddr_min, uint32_t memslot); vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, vm_paddr_t paddr_min, uint32_t memslot); vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm); diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c index 8ec20ac33de0..4f15bbbb8f5e 100644 --- a/tools/testing/selftests/kvm/lib/kvm_util.c +++ b/tools/testing/selftests/kvm/lib/kvm_util.c @@ -1898,6 +1898,7 @@ const char *exit_reason_str(unsigned int exit_reason) * Input Args: * vm - Virtual Machine * num - number of pages + * align - pages alignment * paddr_min - Physical address minimum * memslot - Memory region to allocate page from * @@ -1911,7 +1912,7 @@ const char *exit_reason_str(unsigned int exit_reason) * and their base address is returned. A TEST_ASSERT failure occurs if * not enough pages are available at or above paddr_min. */ -vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, +vm_paddr_t vm_phy_pages_alloc_align(struct kvm_vm *vm, size_t num, size_t align, vm_paddr_t paddr_min, uint32_t memslot) { struct userspace_mem_region *region; @@ -1925,24 +1926,27 @@ vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, paddr_min, vm->page_size); region = memslot2region(vm, memslot); - base = pg = paddr_min >> vm->page_shift; - - do { - for (; pg < base + num; ++pg) { - if (!sparsebit_is_set(region->unused_phy_pages, pg)) { - base = pg = sparsebit_next_set(region->unused_phy_pages, pg); - break; + base = paddr_min >> vm->page_shift; + +again: + base = (base + align - 1) & ~(align - 1); + for (pg = base; pg < base + num; ++pg) { + if (!sparsebit_is_set(region->unused_phy_pages, pg)) { + base = sparsebit_next_set(region->unused_phy_pages, pg); + if (!base) { + fprintf(stderr, "No guest physical pages " + "available, paddr_min: 0x%lx " + "page_size: 0x%x memslot: %u " + "num_pages: %lu align: %lu\n", + paddr_min, vm->page_size, memslot, + num, align); + fputs("---- vm dump ----\n", stderr); + vm_dump(stderr, vm, 2); + TEST_ASSERT(false, "false"); + abort(); } + goto again; } - } while (pg && pg != base + num); - - if (pg == 0) { - fprintf(stderr, "No guest physical page available, " - "paddr_min: 0x%lx page_size: 0x%x memslot: %u\n", - paddr_min, vm->page_size, memslot); - fputs("---- vm dump ----\n", stderr); - vm_dump(stderr, vm, 2); - abort(); } for (pg = base; pg < base + num; ++pg) @@ -1951,6 +1955,12 @@ vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, return base * vm->page_size; } +vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, + vm_paddr_t paddr_min, uint32_t memslot) +{ + return vm_phy_pages_alloc_align(vm, num, 1, paddr_min, memslot); +} + vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min, uint32_t memslot) {
powerpc will require this to allocate MMU tables in guest memory that are aligned and larger than the base page size. Signed-off-by: Nicholas Piggin <npiggin@gmail.com> --- .../selftests/kvm/include/kvm_util_base.h | 2 + tools/testing/selftests/kvm/lib/kvm_util.c | 44 ++++++++++++------- 2 files changed, 29 insertions(+), 17 deletions(-)