diff mbox series

[v10,16/16] KVM: selftests: guest_memfd mmap() test when mapping is allowed

Message ID 20250527180245.1413463-17-tabba@google.com
State New
Headers show
Series KVM: Mapping guest_memfd backed memory at the host for software protected VMs | expand

Commit Message

Fuad Tabba May 27, 2025, 6:02 p.m. UTC
Expand the guest_memfd selftests to include testing mapping guest
memory for VM types that support it.

Also, build the guest_memfd selftest for arm64.

Co-developed-by: Ackerley Tng <ackerleytng@google.com>
Signed-off-by: Ackerley Tng <ackerleytng@google.com>
Signed-off-by: Fuad Tabba <tabba@google.com>
---
 tools/testing/selftests/kvm/Makefile.kvm      |   1 +
 .../testing/selftests/kvm/guest_memfd_test.c  | 162 +++++++++++++++---
 2 files changed, 142 insertions(+), 21 deletions(-)

Comments

Gavin Shan June 4, 2025, 9:19 a.m. UTC | #1
Hi Fuad,

On 5/28/25 4:02 AM, Fuad Tabba wrote:
> Expand the guest_memfd selftests to include testing mapping guest
> memory for VM types that support it.
> 
> Also, build the guest_memfd selftest for arm64.
> 
> Co-developed-by: Ackerley Tng <ackerleytng@google.com>
> Signed-off-by: Ackerley Tng <ackerleytng@google.com>
> Signed-off-by: Fuad Tabba <tabba@google.com>
> ---
>   tools/testing/selftests/kvm/Makefile.kvm      |   1 +
>   .../testing/selftests/kvm/guest_memfd_test.c  | 162 +++++++++++++++---
>   2 files changed, 142 insertions(+), 21 deletions(-)
> 

The test case fails on 64KB host, and the file size in test_create_guest_memfd_multiple()
would be page_size and (2 * page_size). The fixed size 4096 and 8192 aren't aligned to 64KB.

# ./guest_memfd_test
Random seed: 0x6b8b4567
==== Test Assertion Failure ====
   guest_memfd_test.c:178: fd1 != -1
   pid=7565 tid=7565 errno=22 - Invalid argument
      1	0x000000000040252f: test_create_guest_memfd_multiple at guest_memfd_test.c:178
      2	 (inlined by) test_with_type at guest_memfd_test.c:231
      3	0x00000000004020c7: main at guest_memfd_test.c:306
      4	0x0000ffff8cec733f: ?? ??:0
      5	0x0000ffff8cec7417: ?? ??:0
      6	0x00000000004021ef: _start at ??:?
   memfd creation should succeed

> diff --git a/tools/testing/selftests/kvm/Makefile.kvm b/tools/testing/selftests/kvm/Makefile.kvm
> index f62b0a5aba35..ccf95ed037c3 100644
> --- a/tools/testing/selftests/kvm/Makefile.kvm
> +++ b/tools/testing/selftests/kvm/Makefile.kvm
> @@ -163,6 +163,7 @@ TEST_GEN_PROGS_arm64 += access_tracking_perf_test
>   TEST_GEN_PROGS_arm64 += arch_timer
>   TEST_GEN_PROGS_arm64 += coalesced_io_test
>   TEST_GEN_PROGS_arm64 += dirty_log_perf_test
> +TEST_GEN_PROGS_arm64 += guest_memfd_test
>   TEST_GEN_PROGS_arm64 += get-reg-list
>   TEST_GEN_PROGS_arm64 += memslot_modification_stress_test
>   TEST_GEN_PROGS_arm64 += memslot_perf_test
> diff --git a/tools/testing/selftests/kvm/guest_memfd_test.c b/tools/testing/selftests/kvm/guest_memfd_test.c
> index ce687f8d248f..3d6765bc1f28 100644
> --- a/tools/testing/selftests/kvm/guest_memfd_test.c
> +++ b/tools/testing/selftests/kvm/guest_memfd_test.c
> @@ -34,12 +34,46 @@ static void test_file_read_write(int fd)
>   		    "pwrite on a guest_mem fd should fail");
>   }
>   
> -static void test_mmap(int fd, size_t page_size)
> +static void test_mmap_allowed(int fd, size_t page_size, size_t total_size)
> +{
> +	const char val = 0xaa;
> +	char *mem;
> +	size_t i;
> +	int ret;
> +
> +	mem = mmap(NULL, total_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
> +	TEST_ASSERT(mem != MAP_FAILED, "mmaping() guest memory should pass.");
> +

If you agree, I think it would be nice to ensure guest-memfd doesn't support
copy-on-write, more details are provided below.

> +	memset(mem, val, total_size);
> +	for (i = 0; i < total_size; i++)
> +		TEST_ASSERT_EQ(mem[i], val);
> +
> +	ret = fallocate(fd, FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE, 0,
> +			page_size);
> +	TEST_ASSERT(!ret, "fallocate the first page should succeed");
> +
> +	for (i = 0; i < page_size; i++)
> +		TEST_ASSERT_EQ(mem[i], 0x00);
> +	for (; i < total_size; i++)
> +		TEST_ASSERT_EQ(mem[i], val);
> +
> +	memset(mem, val, page_size);
> +	for (i = 0; i < total_size; i++)
> +		TEST_ASSERT_EQ(mem[i], val);
> +
> +	ret = munmap(mem, total_size);
> +	TEST_ASSERT(!ret, "munmap should succeed");
> +}
> +
> +static void test_mmap_denied(int fd, size_t page_size, size_t total_size)
>   {
>   	char *mem;
>   
>   	mem = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
>   	TEST_ASSERT_EQ(mem, MAP_FAILED);
> +
> +	mem = mmap(NULL, total_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
> +	TEST_ASSERT_EQ(mem, MAP_FAILED);
>   }

Add one more argument to test_mmap_denied as the flags passed to mmap().

static void test_mmap_denied(int fd, size_t page_size, size_t total_size, int mmap_flags)
{
	mem = mmap(NULL, total_size, PROT_READ | PROT_WRITE, mmap_flags, fd, 0);
}

>   
>   static void test_file_size(int fd, size_t page_size, size_t total_size)
> @@ -120,26 +154,19 @@ static void test_invalid_punch_hole(int fd, size_t page_size, size_t total_size)
>   	}
>   }
>   
> -static void test_create_guest_memfd_invalid(struct kvm_vm *vm)
> +static void test_create_guest_memfd_invalid_sizes(struct kvm_vm *vm,
> +						  uint64_t guest_memfd_flags,
> +						  size_t page_size)
>   {
> -	size_t page_size = getpagesize();
> -	uint64_t flag;
>   	size_t size;
>   	int fd;
>   
>   	for (size = 1; size < page_size; size++) {
> -		fd = __vm_create_guest_memfd(vm, size, 0);
> -		TEST_ASSERT(fd == -1 && errno == EINVAL,
> +		fd = __vm_create_guest_memfd(vm, size, guest_memfd_flags);
> +		TEST_ASSERT(fd < 0 && errno == EINVAL,
>   			    "guest_memfd() with non-page-aligned page size '0x%lx' should fail with EINVAL",
>   			    size);
>   	}
> -
> -	for (flag = BIT(0); flag; flag <<= 1) {
> -		fd = __vm_create_guest_memfd(vm, page_size, flag);
> -		TEST_ASSERT(fd == -1 && errno == EINVAL,
> -			    "guest_memfd() with flag '0x%lx' should fail with EINVAL",
> -			    flag);
> -	}
>   }
>   
>   static void test_create_guest_memfd_multiple(struct kvm_vm *vm)
> @@ -170,30 +197,123 @@ static void test_create_guest_memfd_multiple(struct kvm_vm *vm)
>   	close(fd1);
>   }
>   
> -int main(int argc, char *argv[])
> +#define GUEST_MEMFD_TEST_SLOT 10
> +#define GUEST_MEMFD_TEST_GPA 0x100000000
> +
> +static bool check_vm_type(unsigned long vm_type)
>   {
> -	size_t page_size;
> +	/*
> +	 * Not all architectures support KVM_CAP_VM_TYPES. However, those that
> +	 * support guest_memfd have that support for the default VM type.
> +	 */
> +	if (vm_type == VM_TYPE_DEFAULT)
> +		return true;
> +
> +	return kvm_check_cap(KVM_CAP_VM_TYPES) & BIT(vm_type);
> +}
> +
> +static void test_with_type(unsigned long vm_type, uint64_t guest_memfd_flags,
> +			   bool expect_mmap_allowed)
> +{
> +	struct kvm_vm *vm;
>   	size_t total_size;
> +	size_t page_size;
>   	int fd;
> -	struct kvm_vm *vm;
>   
> -	TEST_REQUIRE(kvm_has_cap(KVM_CAP_GUEST_MEMFD));
> +	if (!check_vm_type(vm_type))
> +		return;
>   
>   	page_size = getpagesize();
>   	total_size = page_size * 4;
>   
> -	vm = vm_create_barebones();
> +	vm = vm_create_barebones_type(vm_type);
>   
> -	test_create_guest_memfd_invalid(vm);
>   	test_create_guest_memfd_multiple(vm);
> +	test_create_guest_memfd_invalid_sizes(vm, guest_memfd_flags, page_size);
>   
> -	fd = vm_create_guest_memfd(vm, total_size, 0);
> +	fd = vm_create_guest_memfd(vm, total_size, guest_memfd_flags);
>   
>   	test_file_read_write(fd);
> -	test_mmap(fd, page_size);
> +
> +	if (expect_mmap_allowed)
> +		test_mmap_allowed(fd, page_size, total_size);
> +	else
> +		test_mmap_denied(fd, page_size, total_size);
> +

	if (expect_mmap_allowed) {
		test_mmap_denied(fd, page_size, total_size, MAP_PRIVATE);
		test_mmap_allowed(fd, page_size, total_size);
	} else {
		test_mmap_denied(fd, page_size, total_size, MAP_SHARED);
	}

>   	test_file_size(fd, page_size, total_size);
>   	test_fallocate(fd, page_size, total_size);
>   	test_invalid_punch_hole(fd, page_size, total_size);
>   
>   	close(fd);
> +	kvm_vm_release(vm);
> +}
> +
> +static void test_vm_type_gmem_flag_validity(unsigned long vm_type,
> +					    uint64_t expected_valid_flags)
> +{
> +	size_t page_size = getpagesize();
> +	struct kvm_vm *vm;
> +	uint64_t flag = 0;
> +	int fd;
> +
> +	if (!check_vm_type(vm_type))
> +		return;
> +
> +	vm = vm_create_barebones_type(vm_type);
> +
> +	for (flag = BIT(0); flag; flag <<= 1) {
> +		fd = __vm_create_guest_memfd(vm, page_size, flag);
> +
> +		if (flag & expected_valid_flags) {
> +			TEST_ASSERT(fd >= 0,
> +				    "guest_memfd() with flag '0x%lx' should be valid",
> +				    flag);
> +			close(fd);
> +		} else {
> +			TEST_ASSERT(fd < 0 && errno == EINVAL,
> +				    "guest_memfd() with flag '0x%lx' should fail with EINVAL",
> +				    flag);
> +		}
> +	}
> +
> +	kvm_vm_release(vm);
> +}
> +
> +static void test_gmem_flag_validity(void)
> +{
> +	uint64_t non_coco_vm_valid_flags = 0;
> +
> +	if (kvm_has_cap(KVM_CAP_GMEM_SHARED_MEM))
> +		non_coco_vm_valid_flags = GUEST_MEMFD_FLAG_SUPPORT_SHARED;
> +
> +	test_vm_type_gmem_flag_validity(VM_TYPE_DEFAULT, non_coco_vm_valid_flags);
> +
> +#ifdef __x86_64__
> +	test_vm_type_gmem_flag_validity(KVM_X86_SW_PROTECTED_VM, non_coco_vm_valid_flags);
> +	test_vm_type_gmem_flag_validity(KVM_X86_SEV_VM, 0);
> +	test_vm_type_gmem_flag_validity(KVM_X86_SEV_ES_VM, 0);
> +	test_vm_type_gmem_flag_validity(KVM_X86_SNP_VM, 0);
> +	test_vm_type_gmem_flag_validity(KVM_X86_TDX_VM, 0);
> +#endif
> +}
> +
> +int main(int argc, char *argv[])
> +{
> +	TEST_REQUIRE(kvm_has_cap(KVM_CAP_GUEST_MEMFD));
> +
> +	test_gmem_flag_validity();
> +
> +	test_with_type(VM_TYPE_DEFAULT, 0, false);
> +	if (kvm_has_cap(KVM_CAP_GMEM_SHARED_MEM)) {
> +		test_with_type(VM_TYPE_DEFAULT, GUEST_MEMFD_FLAG_SUPPORT_SHARED,
> +			       true);
> +	}
> +
> +#ifdef __x86_64__
> +	test_with_type(KVM_X86_SW_PROTECTED_VM, 0, false);
> +	if (kvm_has_cap(KVM_CAP_GMEM_SHARED_MEM)) {
> +		test_with_type(KVM_X86_SW_PROTECTED_VM,
> +			       GUEST_MEMFD_FLAG_SUPPORT_SHARED, true);
> +	}
> +#endif
>   }

Thanks,
Gavin
Fuad Tabba June 4, 2025, 9:48 a.m. UTC | #2
Hi Gavin,

On Wed, 4 Jun 2025 at 10:20, Gavin Shan <gshan@redhat.com> wrote:
>
> Hi Fuad,
>
> On 5/28/25 4:02 AM, Fuad Tabba wrote:
> > Expand the guest_memfd selftests to include testing mapping guest
> > memory for VM types that support it.
> >
> > Also, build the guest_memfd selftest for arm64.
> >
> > Co-developed-by: Ackerley Tng <ackerleytng@google.com>
> > Signed-off-by: Ackerley Tng <ackerleytng@google.com>
> > Signed-off-by: Fuad Tabba <tabba@google.com>
> > ---
> >   tools/testing/selftests/kvm/Makefile.kvm      |   1 +
> >   .../testing/selftests/kvm/guest_memfd_test.c  | 162 +++++++++++++++---
> >   2 files changed, 142 insertions(+), 21 deletions(-)
> >
>
> The test case fails on 64KB host, and the file size in test_create_guest_memfd_multiple()
> would be page_size and (2 * page_size). The fixed size 4096 and 8192 aren't aligned to 64KB.

Yes, however, this patch didn't introduce or modify this test. I think
it's better to fix it in a separate patch independent of this series.

> # ./guest_memfd_test
> Random seed: 0x6b8b4567
> ==== Test Assertion Failure ====
>    guest_memfd_test.c:178: fd1 != -1
>    pid=7565 tid=7565 errno=22 - Invalid argument
>       1 0x000000000040252f: test_create_guest_memfd_multiple at guest_memfd_test.c:178
>       2  (inlined by) test_with_type at guest_memfd_test.c:231
>       3 0x00000000004020c7: main at guest_memfd_test.c:306
>       4 0x0000ffff8cec733f: ?? ??:0
>       5 0x0000ffff8cec7417: ?? ??:0
>       6 0x00000000004021ef: _start at ??:?
>    memfd creation should succeed
>
> > diff --git a/tools/testing/selftests/kvm/Makefile.kvm b/tools/testing/selftests/kvm/Makefile.kvm
> > index f62b0a5aba35..ccf95ed037c3 100644
> > --- a/tools/testing/selftests/kvm/Makefile.kvm
> > +++ b/tools/testing/selftests/kvm/Makefile.kvm
> > @@ -163,6 +163,7 @@ TEST_GEN_PROGS_arm64 += access_tracking_perf_test
> >   TEST_GEN_PROGS_arm64 += arch_timer
> >   TEST_GEN_PROGS_arm64 += coalesced_io_test
> >   TEST_GEN_PROGS_arm64 += dirty_log_perf_test
> > +TEST_GEN_PROGS_arm64 += guest_memfd_test
> >   TEST_GEN_PROGS_arm64 += get-reg-list
> >   TEST_GEN_PROGS_arm64 += memslot_modification_stress_test
> >   TEST_GEN_PROGS_arm64 += memslot_perf_test
> > diff --git a/tools/testing/selftests/kvm/guest_memfd_test.c b/tools/testing/selftests/kvm/guest_memfd_test.c
> > index ce687f8d248f..3d6765bc1f28 100644
> > --- a/tools/testing/selftests/kvm/guest_memfd_test.c
> > +++ b/tools/testing/selftests/kvm/guest_memfd_test.c
> > @@ -34,12 +34,46 @@ static void test_file_read_write(int fd)
> >                   "pwrite on a guest_mem fd should fail");
> >   }
> >
> > -static void test_mmap(int fd, size_t page_size)
> > +static void test_mmap_allowed(int fd, size_t page_size, size_t total_size)
> > +{
> > +     const char val = 0xaa;
> > +     char *mem;
> > +     size_t i;
> > +     int ret;
> > +
> > +     mem = mmap(NULL, total_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
> > +     TEST_ASSERT(mem != MAP_FAILED, "mmaping() guest memory should pass.");
> > +
>
> If you agree, I think it would be nice to ensure guest-memfd doesn't support
> copy-on-write, more details are provided below.

Good idea. I think we can do this without adding much more code. I'll
add a check in test_mmap_allowed(), since the idea is, even if mmap()
is supported, we still can't COW. I'll rename the functions to make
this a bit clearer (i.e., supported instead of allowed).

Thank you for this and thank you for the reviews!
/fuad

> > +     memset(mem, val, total_size);
> > +     for (i = 0; i < total_size; i++)
> > +             TEST_ASSERT_EQ(mem[i], val);
> > +
> > +     ret = fallocate(fd, FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE, 0,
> > +                     page_size);
> > +     TEST_ASSERT(!ret, "fallocate the first page should succeed");
> > +
> > +     for (i = 0; i < page_size; i++)
> > +             TEST_ASSERT_EQ(mem[i], 0x00);
> > +     for (; i < total_size; i++)
> > +             TEST_ASSERT_EQ(mem[i], val);
> > +
> > +     memset(mem, val, page_size);
> > +     for (i = 0; i < total_size; i++)
> > +             TEST_ASSERT_EQ(mem[i], val);
> > +
> > +     ret = munmap(mem, total_size);
> > +     TEST_ASSERT(!ret, "munmap should succeed");
> > +}
> > +
> > +static void test_mmap_denied(int fd, size_t page_size, size_t total_size)
> >   {
> >       char *mem;
> >
> >       mem = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
> >       TEST_ASSERT_EQ(mem, MAP_FAILED);
> > +
> > +     mem = mmap(NULL, total_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
> > +     TEST_ASSERT_EQ(mem, MAP_FAILED);
> >   }
>
> Add one more argument to test_mmap_denied as the flags passed to mmap().
>
> static void test_mmap_denied(int fd, size_t page_size, size_t total_size, int mmap_flags)
> {
>         mem = mmap(NULL, total_size, PROT_READ | PROT_WRITE, mmap_flags, fd, 0);
> }
>
> >
> >   static void test_file_size(int fd, size_t page_size, size_t total_size)
> > @@ -120,26 +154,19 @@ static void test_invalid_punch_hole(int fd, size_t page_size, size_t total_size)
> >       }
> >   }
> >
> > -static void test_create_guest_memfd_invalid(struct kvm_vm *vm)
> > +static void test_create_guest_memfd_invalid_sizes(struct kvm_vm *vm,
> > +                                               uint64_t guest_memfd_flags,
> > +                                               size_t page_size)
> >   {
> > -     size_t page_size = getpagesize();
> > -     uint64_t flag;
> >       size_t size;
> >       int fd;
> >
> >       for (size = 1; size < page_size; size++) {
> > -             fd = __vm_create_guest_memfd(vm, size, 0);
> > -             TEST_ASSERT(fd == -1 && errno == EINVAL,
> > +             fd = __vm_create_guest_memfd(vm, size, guest_memfd_flags);
> > +             TEST_ASSERT(fd < 0 && errno == EINVAL,
> >                           "guest_memfd() with non-page-aligned page size '0x%lx' should fail with EINVAL",
> >                           size);
> >       }
> > -
> > -     for (flag = BIT(0); flag; flag <<= 1) {
> > -             fd = __vm_create_guest_memfd(vm, page_size, flag);
> > -             TEST_ASSERT(fd == -1 && errno == EINVAL,
> > -                         "guest_memfd() with flag '0x%lx' should fail with EINVAL",
> > -                         flag);
> > -     }
> >   }
> >
> >   static void test_create_guest_memfd_multiple(struct kvm_vm *vm)
> > @@ -170,30 +197,123 @@ static void test_create_guest_memfd_multiple(struct kvm_vm *vm)
> >       close(fd1);
> >   }
> >
> > -int main(int argc, char *argv[])
> > +#define GUEST_MEMFD_TEST_SLOT 10
> > +#define GUEST_MEMFD_TEST_GPA 0x100000000
> > +
> > +static bool check_vm_type(unsigned long vm_type)
> >   {
> > -     size_t page_size;
> > +     /*
> > +      * Not all architectures support KVM_CAP_VM_TYPES. However, those that
> > +      * support guest_memfd have that support for the default VM type.
> > +      */
> > +     if (vm_type == VM_TYPE_DEFAULT)
> > +             return true;
> > +
> > +     return kvm_check_cap(KVM_CAP_VM_TYPES) & BIT(vm_type);
> > +}
> > +
> > +static void test_with_type(unsigned long vm_type, uint64_t guest_memfd_flags,
> > +                        bool expect_mmap_allowed)
> > +{
> > +     struct kvm_vm *vm;
> >       size_t total_size;
> > +     size_t page_size;
> >       int fd;
> > -     struct kvm_vm *vm;
> >
> > -     TEST_REQUIRE(kvm_has_cap(KVM_CAP_GUEST_MEMFD));
> > +     if (!check_vm_type(vm_type))
> > +             return;
> >
> >       page_size = getpagesize();
> >       total_size = page_size * 4;
> >
> > -     vm = vm_create_barebones();
> > +     vm = vm_create_barebones_type(vm_type);
> >
> > -     test_create_guest_memfd_invalid(vm);
> >       test_create_guest_memfd_multiple(vm);
> > +     test_create_guest_memfd_invalid_sizes(vm, guest_memfd_flags, page_size);
> >
> > -     fd = vm_create_guest_memfd(vm, total_size, 0);
> > +     fd = vm_create_guest_memfd(vm, total_size, guest_memfd_flags);
> >
> >       test_file_read_write(fd);
> > -     test_mmap(fd, page_size);
> > +
> > +     if (expect_mmap_allowed)
> > +             test_mmap_allowed(fd, page_size, total_size);
> > +     else
> > +             test_mmap_denied(fd, page_size, total_size);
> > +
>
>         if (expect_mmap_allowed) {
>                 test_mmap_denied(fd, page_size, total_size, MAP_PRIVATE);
>                 test_mmap_allowed(fd, page_size, total_size);
>         } else {
>                 test_mmap_denied(fd, page_size, total_size, MAP_SHARED);
>         }
>
> >       test_file_size(fd, page_size, total_size);
> >       test_fallocate(fd, page_size, total_size);
> >       test_invalid_punch_hole(fd, page_size, total_size);
> >
> >       close(fd);
> > +     kvm_vm_release(vm);
> > +}
> > +
> > +static void test_vm_type_gmem_flag_validity(unsigned long vm_type,
> > +                                         uint64_t expected_valid_flags)
> > +{
> > +     size_t page_size = getpagesize();
> > +     struct kvm_vm *vm;
> > +     uint64_t flag = 0;
> > +     int fd;
> > +
> > +     if (!check_vm_type(vm_type))
> > +             return;
> > +
> > +     vm = vm_create_barebones_type(vm_type);
> > +
> > +     for (flag = BIT(0); flag; flag <<= 1) {
> > +             fd = __vm_create_guest_memfd(vm, page_size, flag);
> > +
> > +             if (flag & expected_valid_flags) {
> > +                     TEST_ASSERT(fd >= 0,
> > +                                 "guest_memfd() with flag '0x%lx' should be valid",
> > +                                 flag);
> > +                     close(fd);
> > +             } else {
> > +                     TEST_ASSERT(fd < 0 && errno == EINVAL,
> > +                                 "guest_memfd() with flag '0x%lx' should fail with EINVAL",
> > +                                 flag);
> > +             }
> > +     }
> > +
> > +     kvm_vm_release(vm);
> > +}
> > +
> > +static void test_gmem_flag_validity(void)
> > +{
> > +     uint64_t non_coco_vm_valid_flags = 0;
> > +
> > +     if (kvm_has_cap(KVM_CAP_GMEM_SHARED_MEM))
> > +             non_coco_vm_valid_flags = GUEST_MEMFD_FLAG_SUPPORT_SHARED;
> > +
> > +     test_vm_type_gmem_flag_validity(VM_TYPE_DEFAULT, non_coco_vm_valid_flags);
> > +
> > +#ifdef __x86_64__
> > +     test_vm_type_gmem_flag_validity(KVM_X86_SW_PROTECTED_VM, non_coco_vm_valid_flags);
> > +     test_vm_type_gmem_flag_validity(KVM_X86_SEV_VM, 0);
> > +     test_vm_type_gmem_flag_validity(KVM_X86_SEV_ES_VM, 0);
> > +     test_vm_type_gmem_flag_validity(KVM_X86_SNP_VM, 0);
> > +     test_vm_type_gmem_flag_validity(KVM_X86_TDX_VM, 0);
> > +#endif
> > +}
> > +
> > +int main(int argc, char *argv[])
> > +{
> > +     TEST_REQUIRE(kvm_has_cap(KVM_CAP_GUEST_MEMFD));
> > +
> > +     test_gmem_flag_validity();
> > +
> > +     test_with_type(VM_TYPE_DEFAULT, 0, false);
> > +     if (kvm_has_cap(KVM_CAP_GMEM_SHARED_MEM)) {
> > +             test_with_type(VM_TYPE_DEFAULT, GUEST_MEMFD_FLAG_SUPPORT_SHARED,
> > +                            true);
> > +     }
> > +
> > +#ifdef __x86_64__
> > +     test_with_type(KVM_X86_SW_PROTECTED_VM, 0, false);
> > +     if (kvm_has_cap(KVM_CAP_GMEM_SHARED_MEM)) {
> > +             test_with_type(KVM_X86_SW_PROTECTED_VM,
> > +                            GUEST_MEMFD_FLAG_SUPPORT_SHARED, true);
> > +     }
> > +#endif
> >   }
>
> Thanks,
> Gavin
>
Fuad Tabba June 4, 2025, 10:25 a.m. UTC | #3
Hi Gavin,

On Wed, 4 Jun 2025 at 11:05, Gavin Shan <gshan@redhat.com> wrote:
>
> Hi Fuad,
>
> On 6/4/25 7:48 PM, Fuad Tabba wrote:
> > On Wed, 4 Jun 2025 at 10:20, Gavin Shan <gshan@redhat.com> wrote:
> >>
> >> On 5/28/25 4:02 AM, Fuad Tabba wrote:
> >>> Expand the guest_memfd selftests to include testing mapping guest
> >>> memory for VM types that support it.
> >>>
> >>> Also, build the guest_memfd selftest for arm64.
> >>>
> >>> Co-developed-by: Ackerley Tng <ackerleytng@google.com>
> >>> Signed-off-by: Ackerley Tng <ackerleytng@google.com>
> >>> Signed-off-by: Fuad Tabba <tabba@google.com>
> >>> ---
> >>>    tools/testing/selftests/kvm/Makefile.kvm      |   1 +
> >>>    .../testing/selftests/kvm/guest_memfd_test.c  | 162 +++++++++++++++---
> >>>    2 files changed, 142 insertions(+), 21 deletions(-)
> >>>
> >>
> >> The test case fails on 64KB host, and the file size in test_create_guest_memfd_multiple()
> >> would be page_size and (2 * page_size). The fixed size 4096 and 8192 aren't aligned to 64KB.
> >
> > Yes, however, this patch didn't introduce or modify this test. I think
> > it's better to fix it in a separate patch independent of this series.
> >
>
> Yeah, it can be separate patch or a preparatory patch before PATCH[16/16]
> of this series because x86 hasn't 64KB page size. The currently fixed sizes
> (4096 and 8192) are aligned to page size on x86. and 'guest-memfd-test' is
> enabled on arm64 by this series.

You're right. This patch enables support for arm64, so it should be
fixed in conjunction with that. As you suggested, I'll add a seperate
patch before this one that fixes this and enables support for arm64.

Thanks again!
/fuad

> >> # ./guest_memfd_test
> >> Random seed: 0x6b8b4567
> >> ==== Test Assertion Failure ====
> >>     guest_memfd_test.c:178: fd1 != -1
> >>     pid=7565 tid=7565 errno=22 - Invalid argument
> >>        1 0x000000000040252f: test_create_guest_memfd_multiple at guest_memfd_test.c:178
> >>        2  (inlined by) test_with_type at guest_memfd_test.c:231
> >>        3 0x00000000004020c7: main at guest_memfd_test.c:306
> >>        4 0x0000ffff8cec733f: ?? ??:0
> >>        5 0x0000ffff8cec7417: ?? ??:0
> >>        6 0x00000000004021ef: _start at ??:?
> >>     memfd creation should succeed
> >>
> >>> diff --git a/tools/testing/selftests/kvm/Makefile.kvm b/tools/testing/selftests/kvm/Makefile.kvm
> >>> index f62b0a5aba35..ccf95ed037c3 100644
> >>> --- a/tools/testing/selftests/kvm/Makefile.kvm
> >>> +++ b/tools/testing/selftests/kvm/Makefile.kvm
> >>> @@ -163,6 +163,7 @@ TEST_GEN_PROGS_arm64 += access_tracking_perf_test
> >>>    TEST_GEN_PROGS_arm64 += arch_timer
> >>>    TEST_GEN_PROGS_arm64 += coalesced_io_test
> >>>    TEST_GEN_PROGS_arm64 += dirty_log_perf_test
> >>> +TEST_GEN_PROGS_arm64 += guest_memfd_test
> >>>    TEST_GEN_PROGS_arm64 += get-reg-list
> >>>    TEST_GEN_PROGS_arm64 += memslot_modification_stress_test
> >>>    TEST_GEN_PROGS_arm64 += memslot_perf_test
> >>> diff --git a/tools/testing/selftests/kvm/guest_memfd_test.c b/tools/testing/selftests/kvm/guest_memfd_test.c
> >>> index ce687f8d248f..3d6765bc1f28 100644
> >>> --- a/tools/testing/selftests/kvm/guest_memfd_test.c
> >>> +++ b/tools/testing/selftests/kvm/guest_memfd_test.c
> >>> @@ -34,12 +34,46 @@ static void test_file_read_write(int fd)
> >>>                    "pwrite on a guest_mem fd should fail");
> >>>    }
> >>>
> >>> -static void test_mmap(int fd, size_t page_size)
> >>> +static void test_mmap_allowed(int fd, size_t page_size, size_t total_size)
> >>> +{
> >>> +     const char val = 0xaa;
> >>> +     char *mem;
> >>> +     size_t i;
> >>> +     int ret;
> >>> +
> >>> +     mem = mmap(NULL, total_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
> >>> +     TEST_ASSERT(mem != MAP_FAILED, "mmaping() guest memory should pass.");
> >>> +
> >>
> >> If you agree, I think it would be nice to ensure guest-memfd doesn't support
> >> copy-on-write, more details are provided below.
> >
> > Good idea. I think we can do this without adding much more code. I'll
> > add a check in test_mmap_allowed(), since the idea is, even if mmap()
> > is supported, we still can't COW. I'll rename the functions to make
> > this a bit clearer (i.e., supported instead of allowed).
> >
> > Thank you for this and thank you for the reviews!
> >
>
> Sounds good to me  :)
>
> >
> >>> +     memset(mem, val, total_size);
> >>> +     for (i = 0; i < total_size; i++)
> >>> +             TEST_ASSERT_EQ(mem[i], val);
> >>> +
> >>> +     ret = fallocate(fd, FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE, 0,
> >>> +                     page_size);
> >>> +     TEST_ASSERT(!ret, "fallocate the first page should succeed");
> >>> +
> >>> +     for (i = 0; i < page_size; i++)
> >>> +             TEST_ASSERT_EQ(mem[i], 0x00);
> >>> +     for (; i < total_size; i++)
> >>> +             TEST_ASSERT_EQ(mem[i], val);
> >>> +
> >>> +     memset(mem, val, page_size);
> >>> +     for (i = 0; i < total_size; i++)
> >>> +             TEST_ASSERT_EQ(mem[i], val);
> >>> +
> >>> +     ret = munmap(mem, total_size);
> >>> +     TEST_ASSERT(!ret, "munmap should succeed");
> >>> +}
> >>> +
> >>> +static void test_mmap_denied(int fd, size_t page_size, size_t total_size)
> >>>    {
> >>>        char *mem;
> >>>
> >>>        mem = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
> >>>        TEST_ASSERT_EQ(mem, MAP_FAILED);
> >>> +
> >>> +     mem = mmap(NULL, total_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
> >>> +     TEST_ASSERT_EQ(mem, MAP_FAILED);
> >>>    }
> >>
> >> Add one more argument to test_mmap_denied as the flags passed to mmap().
> >>
> >> static void test_mmap_denied(int fd, size_t page_size, size_t total_size, int mmap_flags)
> >> {
> >>          mem = mmap(NULL, total_size, PROT_READ | PROT_WRITE, mmap_flags, fd, 0);
> >> }
> >>
> >>>
> >>>    static void test_file_size(int fd, size_t page_size, size_t total_size)
> >>> @@ -120,26 +154,19 @@ static void test_invalid_punch_hole(int fd, size_t page_size, size_t total_size)
> >>>        }
> >>>    }
> >>>
> >>> -static void test_create_guest_memfd_invalid(struct kvm_vm *vm)
> >>> +static void test_create_guest_memfd_invalid_sizes(struct kvm_vm *vm,
> >>> +                                               uint64_t guest_memfd_flags,
> >>> +                                               size_t page_size)
> >>>    {
> >>> -     size_t page_size = getpagesize();
> >>> -     uint64_t flag;
> >>>        size_t size;
> >>>        int fd;
> >>>
> >>>        for (size = 1; size < page_size; size++) {
> >>> -             fd = __vm_create_guest_memfd(vm, size, 0);
> >>> -             TEST_ASSERT(fd == -1 && errno == EINVAL,
> >>> +             fd = __vm_create_guest_memfd(vm, size, guest_memfd_flags);
> >>> +             TEST_ASSERT(fd < 0 && errno == EINVAL,
> >>>                            "guest_memfd() with non-page-aligned page size '0x%lx' should fail with EINVAL",
> >>>                            size);
> >>>        }
> >>> -
> >>> -     for (flag = BIT(0); flag; flag <<= 1) {
> >>> -             fd = __vm_create_guest_memfd(vm, page_size, flag);
> >>> -             TEST_ASSERT(fd == -1 && errno == EINVAL,
> >>> -                         "guest_memfd() with flag '0x%lx' should fail with EINVAL",
> >>> -                         flag);
> >>> -     }
> >>>    }
> >>>
> >>>    static void test_create_guest_memfd_multiple(struct kvm_vm *vm)
> >>> @@ -170,30 +197,123 @@ static void test_create_guest_memfd_multiple(struct kvm_vm *vm)
> >>>        close(fd1);
> >>>    }
> >>>
> >>> -int main(int argc, char *argv[])
> >>> +#define GUEST_MEMFD_TEST_SLOT 10
> >>> +#define GUEST_MEMFD_TEST_GPA 0x100000000
> >>> +
> >>> +static bool check_vm_type(unsigned long vm_type)
> >>>    {
> >>> -     size_t page_size;
> >>> +     /*
> >>> +      * Not all architectures support KVM_CAP_VM_TYPES. However, those that
> >>> +      * support guest_memfd have that support for the default VM type.
> >>> +      */
> >>> +     if (vm_type == VM_TYPE_DEFAULT)
> >>> +             return true;
> >>> +
> >>> +     return kvm_check_cap(KVM_CAP_VM_TYPES) & BIT(vm_type);
> >>> +}
> >>> +
> >>> +static void test_with_type(unsigned long vm_type, uint64_t guest_memfd_flags,
> >>> +                        bool expect_mmap_allowed)
> >>> +{
> >>> +     struct kvm_vm *vm;
> >>>        size_t total_size;
> >>> +     size_t page_size;
> >>>        int fd;
> >>> -     struct kvm_vm *vm;
> >>>
> >>> -     TEST_REQUIRE(kvm_has_cap(KVM_CAP_GUEST_MEMFD));
> >>> +     if (!check_vm_type(vm_type))
> >>> +             return;
> >>>
> >>>        page_size = getpagesize();
> >>>        total_size = page_size * 4;
> >>>
> >>> -     vm = vm_create_barebones();
> >>> +     vm = vm_create_barebones_type(vm_type);
> >>>
> >>> -     test_create_guest_memfd_invalid(vm);
> >>>        test_create_guest_memfd_multiple(vm);
> >>> +     test_create_guest_memfd_invalid_sizes(vm, guest_memfd_flags, page_size);
> >>>
> >>> -     fd = vm_create_guest_memfd(vm, total_size, 0);
> >>> +     fd = vm_create_guest_memfd(vm, total_size, guest_memfd_flags);
> >>>
> >>>        test_file_read_write(fd);
> >>> -     test_mmap(fd, page_size);
> >>> +
> >>> +     if (expect_mmap_allowed)
> >>> +             test_mmap_allowed(fd, page_size, total_size);
> >>> +     else
> >>> +             test_mmap_denied(fd, page_size, total_size);
> >>> +
> >>
> >>          if (expect_mmap_allowed) {
> >>                  test_mmap_denied(fd, page_size, total_size, MAP_PRIVATE);
> >>                  test_mmap_allowed(fd, page_size, total_size);
> >>          } else {
> >>                  test_mmap_denied(fd, page_size, total_size, MAP_SHARED);
> >>          }
> >>
> >>>        test_file_size(fd, page_size, total_size);
> >>>        test_fallocate(fd, page_size, total_size);
> >>>        test_invalid_punch_hole(fd, page_size, total_size);
> >>>
> >>>        close(fd);
> >>> +     kvm_vm_release(vm);
> >>> +}
> >>> +
> >>> +static void test_vm_type_gmem_flag_validity(unsigned long vm_type,
> >>> +                                         uint64_t expected_valid_flags)
> >>> +{
> >>> +     size_t page_size = getpagesize();
> >>> +     struct kvm_vm *vm;
> >>> +     uint64_t flag = 0;
> >>> +     int fd;
> >>> +
> >>> +     if (!check_vm_type(vm_type))
> >>> +             return;
> >>> +
> >>> +     vm = vm_create_barebones_type(vm_type);
> >>> +
> >>> +     for (flag = BIT(0); flag; flag <<= 1) {
> >>> +             fd = __vm_create_guest_memfd(vm, page_size, flag);
> >>> +
> >>> +             if (flag & expected_valid_flags) {
> >>> +                     TEST_ASSERT(fd >= 0,
> >>> +                                 "guest_memfd() with flag '0x%lx' should be valid",
> >>> +                                 flag);
> >>> +                     close(fd);
> >>> +             } else {
> >>> +                     TEST_ASSERT(fd < 0 && errno == EINVAL,
> >>> +                                 "guest_memfd() with flag '0x%lx' should fail with EINVAL",
> >>> +                                 flag);
> >>> +             }
> >>> +     }
> >>> +
> >>> +     kvm_vm_release(vm);
> >>> +}
> >>> +
> >>> +static void test_gmem_flag_validity(void)
> >>> +{
> >>> +     uint64_t non_coco_vm_valid_flags = 0;
> >>> +
> >>> +     if (kvm_has_cap(KVM_CAP_GMEM_SHARED_MEM))
> >>> +             non_coco_vm_valid_flags = GUEST_MEMFD_FLAG_SUPPORT_SHARED;
> >>> +
> >>> +     test_vm_type_gmem_flag_validity(VM_TYPE_DEFAULT, non_coco_vm_valid_flags);
> >>> +
> >>> +#ifdef __x86_64__
> >>> +     test_vm_type_gmem_flag_validity(KVM_X86_SW_PROTECTED_VM, non_coco_vm_valid_flags);
> >>> +     test_vm_type_gmem_flag_validity(KVM_X86_SEV_VM, 0);
> >>> +     test_vm_type_gmem_flag_validity(KVM_X86_SEV_ES_VM, 0);
> >>> +     test_vm_type_gmem_flag_validity(KVM_X86_SNP_VM, 0);
> >>> +     test_vm_type_gmem_flag_validity(KVM_X86_TDX_VM, 0);
> >>> +#endif
> >>> +}
> >>> +
> >>> +int main(int argc, char *argv[])
> >>> +{
> >>> +     TEST_REQUIRE(kvm_has_cap(KVM_CAP_GUEST_MEMFD));
> >>> +
> >>> +     test_gmem_flag_validity();
> >>> +
> >>> +     test_with_type(VM_TYPE_DEFAULT, 0, false);
> >>> +     if (kvm_has_cap(KVM_CAP_GMEM_SHARED_MEM)) {
> >>> +             test_with_type(VM_TYPE_DEFAULT, GUEST_MEMFD_FLAG_SUPPORT_SHARED,
> >>> +                            true);
> >>> +     }
> >>> +
> >>> +#ifdef __x86_64__
> >>> +     test_with_type(KVM_X86_SW_PROTECTED_VM, 0, false);
> >>> +     if (kvm_has_cap(KVM_CAP_GMEM_SHARED_MEM)) {
> >>> +             test_with_type(KVM_X86_SW_PROTECTED_VM,
> >>> +                            GUEST_MEMFD_FLAG_SUPPORT_SHARED, true);
> >>> +     }
> >>> +#endif
> >>>    }
> >>
>
> Thanks,
> Gavin
>
diff mbox series

Patch

diff --git a/tools/testing/selftests/kvm/Makefile.kvm b/tools/testing/selftests/kvm/Makefile.kvm
index f62b0a5aba35..ccf95ed037c3 100644
--- a/tools/testing/selftests/kvm/Makefile.kvm
+++ b/tools/testing/selftests/kvm/Makefile.kvm
@@ -163,6 +163,7 @@  TEST_GEN_PROGS_arm64 += access_tracking_perf_test
 TEST_GEN_PROGS_arm64 += arch_timer
 TEST_GEN_PROGS_arm64 += coalesced_io_test
 TEST_GEN_PROGS_arm64 += dirty_log_perf_test
+TEST_GEN_PROGS_arm64 += guest_memfd_test
 TEST_GEN_PROGS_arm64 += get-reg-list
 TEST_GEN_PROGS_arm64 += memslot_modification_stress_test
 TEST_GEN_PROGS_arm64 += memslot_perf_test
diff --git a/tools/testing/selftests/kvm/guest_memfd_test.c b/tools/testing/selftests/kvm/guest_memfd_test.c
index ce687f8d248f..3d6765bc1f28 100644
--- a/tools/testing/selftests/kvm/guest_memfd_test.c
+++ b/tools/testing/selftests/kvm/guest_memfd_test.c
@@ -34,12 +34,46 @@  static void test_file_read_write(int fd)
 		    "pwrite on a guest_mem fd should fail");
 }
 
-static void test_mmap(int fd, size_t page_size)
+static void test_mmap_allowed(int fd, size_t page_size, size_t total_size)
+{
+	const char val = 0xaa;
+	char *mem;
+	size_t i;
+	int ret;
+
+	mem = mmap(NULL, total_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
+	TEST_ASSERT(mem != MAP_FAILED, "mmaping() guest memory should pass.");
+
+	memset(mem, val, total_size);
+	for (i = 0; i < total_size; i++)
+		TEST_ASSERT_EQ(mem[i], val);
+
+	ret = fallocate(fd, FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE, 0,
+			page_size);
+	TEST_ASSERT(!ret, "fallocate the first page should succeed");
+
+	for (i = 0; i < page_size; i++)
+		TEST_ASSERT_EQ(mem[i], 0x00);
+	for (; i < total_size; i++)
+		TEST_ASSERT_EQ(mem[i], val);
+
+	memset(mem, val, page_size);
+	for (i = 0; i < total_size; i++)
+		TEST_ASSERT_EQ(mem[i], val);
+
+	ret = munmap(mem, total_size);
+	TEST_ASSERT(!ret, "munmap should succeed");
+}
+
+static void test_mmap_denied(int fd, size_t page_size, size_t total_size)
 {
 	char *mem;
 
 	mem = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
 	TEST_ASSERT_EQ(mem, MAP_FAILED);
+
+	mem = mmap(NULL, total_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
+	TEST_ASSERT_EQ(mem, MAP_FAILED);
 }
 
 static void test_file_size(int fd, size_t page_size, size_t total_size)
@@ -120,26 +154,19 @@  static void test_invalid_punch_hole(int fd, size_t page_size, size_t total_size)
 	}
 }
 
-static void test_create_guest_memfd_invalid(struct kvm_vm *vm)
+static void test_create_guest_memfd_invalid_sizes(struct kvm_vm *vm,
+						  uint64_t guest_memfd_flags,
+						  size_t page_size)
 {
-	size_t page_size = getpagesize();
-	uint64_t flag;
 	size_t size;
 	int fd;
 
 	for (size = 1; size < page_size; size++) {
-		fd = __vm_create_guest_memfd(vm, size, 0);
-		TEST_ASSERT(fd == -1 && errno == EINVAL,
+		fd = __vm_create_guest_memfd(vm, size, guest_memfd_flags);
+		TEST_ASSERT(fd < 0 && errno == EINVAL,
 			    "guest_memfd() with non-page-aligned page size '0x%lx' should fail with EINVAL",
 			    size);
 	}
-
-	for (flag = BIT(0); flag; flag <<= 1) {
-		fd = __vm_create_guest_memfd(vm, page_size, flag);
-		TEST_ASSERT(fd == -1 && errno == EINVAL,
-			    "guest_memfd() with flag '0x%lx' should fail with EINVAL",
-			    flag);
-	}
 }
 
 static void test_create_guest_memfd_multiple(struct kvm_vm *vm)
@@ -170,30 +197,123 @@  static void test_create_guest_memfd_multiple(struct kvm_vm *vm)
 	close(fd1);
 }
 
-int main(int argc, char *argv[])
+#define GUEST_MEMFD_TEST_SLOT 10
+#define GUEST_MEMFD_TEST_GPA 0x100000000
+
+static bool check_vm_type(unsigned long vm_type)
 {
-	size_t page_size;
+	/*
+	 * Not all architectures support KVM_CAP_VM_TYPES. However, those that
+	 * support guest_memfd have that support for the default VM type.
+	 */
+	if (vm_type == VM_TYPE_DEFAULT)
+		return true;
+
+	return kvm_check_cap(KVM_CAP_VM_TYPES) & BIT(vm_type);
+}
+
+static void test_with_type(unsigned long vm_type, uint64_t guest_memfd_flags,
+			   bool expect_mmap_allowed)
+{
+	struct kvm_vm *vm;
 	size_t total_size;
+	size_t page_size;
 	int fd;
-	struct kvm_vm *vm;
 
-	TEST_REQUIRE(kvm_has_cap(KVM_CAP_GUEST_MEMFD));
+	if (!check_vm_type(vm_type))
+		return;
 
 	page_size = getpagesize();
 	total_size = page_size * 4;
 
-	vm = vm_create_barebones();
+	vm = vm_create_barebones_type(vm_type);
 
-	test_create_guest_memfd_invalid(vm);
 	test_create_guest_memfd_multiple(vm);
+	test_create_guest_memfd_invalid_sizes(vm, guest_memfd_flags, page_size);
 
-	fd = vm_create_guest_memfd(vm, total_size, 0);
+	fd = vm_create_guest_memfd(vm, total_size, guest_memfd_flags);
 
 	test_file_read_write(fd);
-	test_mmap(fd, page_size);
+
+	if (expect_mmap_allowed)
+		test_mmap_allowed(fd, page_size, total_size);
+	else
+		test_mmap_denied(fd, page_size, total_size);
+
 	test_file_size(fd, page_size, total_size);
 	test_fallocate(fd, page_size, total_size);
 	test_invalid_punch_hole(fd, page_size, total_size);
 
 	close(fd);
+	kvm_vm_release(vm);
+}
+
+static void test_vm_type_gmem_flag_validity(unsigned long vm_type,
+					    uint64_t expected_valid_flags)
+{
+	size_t page_size = getpagesize();
+	struct kvm_vm *vm;
+	uint64_t flag = 0;
+	int fd;
+
+	if (!check_vm_type(vm_type))
+		return;
+
+	vm = vm_create_barebones_type(vm_type);
+
+	for (flag = BIT(0); flag; flag <<= 1) {
+		fd = __vm_create_guest_memfd(vm, page_size, flag);
+
+		if (flag & expected_valid_flags) {
+			TEST_ASSERT(fd >= 0,
+				    "guest_memfd() with flag '0x%lx' should be valid",
+				    flag);
+			close(fd);
+		} else {
+			TEST_ASSERT(fd < 0 && errno == EINVAL,
+				    "guest_memfd() with flag '0x%lx' should fail with EINVAL",
+				    flag);
+		}
+	}
+
+	kvm_vm_release(vm);
+}
+
+static void test_gmem_flag_validity(void)
+{
+	uint64_t non_coco_vm_valid_flags = 0;
+
+	if (kvm_has_cap(KVM_CAP_GMEM_SHARED_MEM))
+		non_coco_vm_valid_flags = GUEST_MEMFD_FLAG_SUPPORT_SHARED;
+
+	test_vm_type_gmem_flag_validity(VM_TYPE_DEFAULT, non_coco_vm_valid_flags);
+
+#ifdef __x86_64__
+	test_vm_type_gmem_flag_validity(KVM_X86_SW_PROTECTED_VM, non_coco_vm_valid_flags);
+	test_vm_type_gmem_flag_validity(KVM_X86_SEV_VM, 0);
+	test_vm_type_gmem_flag_validity(KVM_X86_SEV_ES_VM, 0);
+	test_vm_type_gmem_flag_validity(KVM_X86_SNP_VM, 0);
+	test_vm_type_gmem_flag_validity(KVM_X86_TDX_VM, 0);
+#endif
+}
+
+int main(int argc, char *argv[])
+{
+	TEST_REQUIRE(kvm_has_cap(KVM_CAP_GUEST_MEMFD));
+
+	test_gmem_flag_validity();
+
+	test_with_type(VM_TYPE_DEFAULT, 0, false);
+	if (kvm_has_cap(KVM_CAP_GMEM_SHARED_MEM)) {
+		test_with_type(VM_TYPE_DEFAULT, GUEST_MEMFD_FLAG_SUPPORT_SHARED,
+			       true);
+	}
+
+#ifdef __x86_64__
+	test_with_type(KVM_X86_SW_PROTECTED_VM, 0, false);
+	if (kvm_has_cap(KVM_CAP_GMEM_SHARED_MEM)) {
+		test_with_type(KVM_X86_SW_PROTECTED_VM,
+			       GUEST_MEMFD_FLAG_SUPPORT_SHARED, true);
+	}
+#endif
 }