diff mbox series

[v5,5/5] KVM: selftests: Add bus lock exit test

Message ID 20250502050346.14274-6-manali.shukla@amd.com
State New
Headers show
Series Add support for the Bus Lock Threshold | expand

Commit Message

Manali Shukla May 2, 2025, 5:03 a.m. UTC
From: Nikunj A Dadhania <nikunj@amd.com>

Add a test case to verify the Bus Lock exit feature

The main thing that the selftest verifies is that when a Buslock is
generated in the guest context, the KVM_EXIT_X86_BUS_LOCK is triggered
for SVM or VMX when the KVM capability KVM_CAP_X86_BUS_LOCK_EXIT is
enabled.

This test case also verifies the Bus Lock exit in nested scenario.

Signed-off-by: Nikunj A Dadhania <nikunj@amd.com>
Co-developed-by: Manali Shukla <manali.shukla@amd.com>
Signed-off-by: Manali Shukla <manali.shukla@amd.com>
---
 tools/testing/selftests/kvm/Makefile.kvm      |   1 +
 .../selftests/kvm/x86/kvm_buslock_test.c      | 135 ++++++++++++++++++
 2 files changed, 136 insertions(+)
 create mode 100644 tools/testing/selftests/kvm/x86/kvm_buslock_test.c

Comments

Sean Christopherson May 16, 2025, 7:28 p.m. UTC | #1
On Fri, May 02, 2025, Manali Shukla wrote:
> diff --git a/tools/testing/selftests/kvm/x86/kvm_buslock_test.c b/tools/testing/selftests/kvm/x86/kvm_buslock_test.c
> new file mode 100644
> index 000000000000..9c081525ac2a
> --- /dev/null
> +++ b/tools/testing/selftests/kvm/x86/kvm_buslock_test.c
> @@ -0,0 +1,135 @@
> +// SPDX-License-Identifier: GPL-2.0-only
> +/*
> + * Copyright (C) 2024 Advanced Micro Devices, Inc.
> + */
> +
> +#include "test_util.h"
> +#include "kvm_util.h"
> +#include "processor.h"
> +#include "svm_util.h"
> +#include "vmx.h"
> +
> +#define NR_ITERATIONS 100
> +#define L2_GUEST_STACK_SIZE 64
> +
> +#pragma GCC diagnostic push
> +#pragma GCC diagnostic ignored "-Waddress-of-packed-member"

Eww.

> +
> +struct buslock_test {
> +	unsigned char pad[PAGE_SIZE - 2];
> +	atomic_long_t val;
> +} __packed;

You don't need an entire page to generate a bus lock, two cache lines will do
nicely.  And there's certain no need for __packed.

> +struct buslock_test test __aligned(PAGE_SIZE);
> +
> +static __always_inline void buslock_atomic_add(int i, atomic_long_t *v)
> +{
> +	asm volatile(LOCK_PREFIX "addl %1,%0"
> +		     : "+m" (v->counter)
> +		     : "ir" (i) : "memory");
> +}

If only there were utilities for atomics...

> +static void buslock_add(void)

guest_generate_buslocks()

> +{
> +	/*
> +	 * Increment a page unaligned variable atomically.
> +	 * This should generate a bus lock exit.

Not should, will.

> +	 */
> +	for (int i = 0; i < NR_ITERATIONS; i++)
> +		buslock_atomic_add(2, &test.val);

Don't do weird and completely arbitrary things like adding '2' instead of '1',
it makes readers look for intent and purpose that doesn't exist.

> +}

...

> +int main(int argc, char *argv[])
> +{
> +	struct kvm_vcpu *vcpu;
> +	struct kvm_run *run;
> +	struct kvm_vm *vm;
> +	vm_vaddr_t nested_test_data_gva;
> +
> +	TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SVM) || kvm_cpu_has(X86_FEATURE_VMX));

There's no reason to make nested support a hard dependency, it's just as easy to
make it conditional.

> +	TEST_REQUIRE(kvm_has_cap(KVM_CAP_X86_BUS_LOCK_EXIT));
> +
> +	vm = vm_create(1);
> +	vm_enable_cap(vm, KVM_CAP_X86_BUS_LOCK_EXIT, KVM_BUS_LOCK_DETECTION_EXIT);
> +	vcpu = vm_vcpu_add(vm, 0, guest_code);
> +
> +	if (kvm_cpu_has(X86_FEATURE_SVM))
> +		vcpu_alloc_svm(vm, &nested_test_data_gva);
> +	else
> +		vcpu_alloc_vmx(vm, &nested_test_data_gva);
> +
> +	vcpu_args_set(vcpu, 1, nested_test_data_gva);
> +
> +	run = vcpu->run;
> +
> +	for (;;) {
> +		struct ucall uc;
> +
> +		vcpu_run(vcpu);
> +
> +		if (run->exit_reason == KVM_EXIT_IO) {
> +			switch (get_ucall(vcpu, &uc)) {
> +			case UCALL_ABORT:
> +				REPORT_GUEST_ASSERT(uc);
> +				/* NOT REACHED */
> +			case UCALL_SYNC:
> +				continue;
> +			case UCALL_DONE:
> +				goto done;
> +			default:
> +				TEST_FAIL("Unknown ucall 0x%lx.", uc.cmd);
> +			}
> +		}
> +
> +		TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_X86_BUS_LOCK);
> +	}

*sigh*

This doesn't actually ****VERIFY**** that the expected number of bus lock exits
were generated.  KVM could literally do nothing and the test will pass.  E.g. the
test passes if I do this:

diff --git a/tools/testing/selftests/kvm/x86/kvm_buslock_test.c b/tools/testing/selftests/kvm/x86/kvm_buslock_test.c
index 9c081525ac2a..aa65d6be0f13 100644
--- a/tools/testing/selftests/kvm/x86/kvm_buslock_test.c
+++ b/tools/testing/selftests/kvm/x86/kvm_buslock_test.c
@@ -93,10 +93,10 @@ int main(int argc, char *argv[])
        vm_vaddr_t nested_test_data_gva;
 
        TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SVM) || kvm_cpu_has(X86_FEATURE_VMX));
-       TEST_REQUIRE(kvm_has_cap(KVM_CAP_X86_BUS_LOCK_EXIT));
+//     TEST_REQUIRE(kvm_has_cap(KVM_CAP_X86_BUS_LOCK_EXIT));
 
        vm = vm_create(1);
-       vm_enable_cap(vm, KVM_CAP_X86_BUS_LOCK_EXIT, KVM_BUS_LOCK_DETECTION_EXIT);
+//     vm_enable_cap(vm, KVM_CAP_X86_BUS_LOCK_EXIT, KVM_BUS_LOCK_DETECTION_EXIT);
        vcpu = vm_vcpu_add(vm, 0, guest_code);
 
        if (kvm_cpu_has(X86_FEATURE_SVM))
--

The test would also fail to detect if KVM completely skipped the instruction.

This is not rocket science.  If you can't make your test fail by introducing bugs
in what you're testing, then your test is worthless.

No need for a v6, I'm going to do surgery when I apply, this series has dragged
on for far too long.
Manali Shukla May 19, 2025, 11:45 a.m. UTC | #2
On 5/17/2025 12:58 AM, Sean Christopherson wrote:
> On Fri, May 02, 2025, Manali Shukla wrote:
>> diff --git a/tools/testing/selftests/kvm/x86/kvm_buslock_test.c b/tools/testing/selftests/kvm/x86/kvm_buslock_test.c
>> new file mode 100644
>> index 000000000000..9c081525ac2a
>> --- /dev/null
>> +++ b/tools/testing/selftests/kvm/x86/kvm_buslock_test.c
>> @@ -0,0 +1,135 @@
>> +// SPDX-License-Identifier: GPL-2.0-only
>> +/*
>> + * Copyright (C) 2024 Advanced Micro Devices, Inc.
>> + */
>> +
>> +#include "test_util.h"
>> +#include "kvm_util.h"
>> +#include "processor.h"
>> +#include "svm_util.h"
>> +#include "vmx.h"
>> +
>> +#define NR_ITERATIONS 100
>> +#define L2_GUEST_STACK_SIZE 64
>> +
>> +#pragma GCC diagnostic push
>> +#pragma GCC diagnostic ignored "-Waddress-of-packed-member"
> 
> Eww.
> 
>> +
>> +struct buslock_test {
>> +	unsigned char pad[PAGE_SIZE - 2];
>> +	atomic_long_t val;
>> +} __packed;
> 
> You don't need an entire page to generate a bus lock, two cache lines will do
> nicely.  And there's certain no need for __packed.
> 
>> +struct buslock_test test __aligned(PAGE_SIZE);
>> +
>> +static __always_inline void buslock_atomic_add(int i, atomic_long_t *v)
>> +{
>> +	asm volatile(LOCK_PREFIX "addl %1,%0"
>> +		     : "+m" (v->counter)
>> +		     : "ir" (i) : "memory");
>> +}
> 
> If only there were utilities for atomics...
> 
>> +static void buslock_add(void)
> 
> guest_generate_buslocks()
> 
>> +{
>> +	/*
>> +	 * Increment a page unaligned variable atomically.
>> +	 * This should generate a bus lock exit.
> 
> Not should, will.
> 
>> +	 */
>> +	for (int i = 0; i < NR_ITERATIONS; i++)
>> +		buslock_atomic_add(2, &test.val);
> 
> Don't do weird and completely arbitrary things like adding '2' instead of '1',
> it makes readers look for intent and purpose that doesn't exist.
> 

Got it. Thanks for the feedback.

>> +}
> 
> ...
> 
>> +int main(int argc, char *argv[])
>> +{
>> +	struct kvm_vcpu *vcpu;
>> +	struct kvm_run *run;
>> +	struct kvm_vm *vm;
>> +	vm_vaddr_t nested_test_data_gva;
>> +
>> +	TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SVM) || kvm_cpu_has(X86_FEATURE_VMX));
> 
> There's no reason to make nested support a hard dependency, it's just as easy to
> make it conditional.
> 
>> +	TEST_REQUIRE(kvm_has_cap(KVM_CAP_X86_BUS_LOCK_EXIT));
>> +
>> +	vm = vm_create(1);
>> +	vm_enable_cap(vm, KVM_CAP_X86_BUS_LOCK_EXIT, KVM_BUS_LOCK_DETECTION_EXIT);
>> +	vcpu = vm_vcpu_add(vm, 0, guest_code);
>> +
>> +	if (kvm_cpu_has(X86_FEATURE_SVM))
>> +		vcpu_alloc_svm(vm, &nested_test_data_gva);
>> +	else
>> +		vcpu_alloc_vmx(vm, &nested_test_data_gva);
>> +
>> +	vcpu_args_set(vcpu, 1, nested_test_data_gva);
>> +
>> +	run = vcpu->run;
>> +
>> +	for (;;) {
>> +		struct ucall uc;
>> +
>> +		vcpu_run(vcpu);
>> +
>> +		if (run->exit_reason == KVM_EXIT_IO) {
>> +			switch (get_ucall(vcpu, &uc)) {
>> +			case UCALL_ABORT:
>> +				REPORT_GUEST_ASSERT(uc);
>> +				/* NOT REACHED */
>> +			case UCALL_SYNC:
>> +				continue;
>> +			case UCALL_DONE:
>> +				goto done;
>> +			default:
>> +				TEST_FAIL("Unknown ucall 0x%lx.", uc.cmd);
>> +			}
>> +		}
>> +
>> +		TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_X86_BUS_LOCK);
>> +	}
> 
> *sigh*
> 
> This doesn't actually ****VERIFY**** that the expected number of bus lock exits
> were generated.  KVM could literally do nothing and the test will pass.  E.g. the
> test passes if I do this:
> 
> diff --git a/tools/testing/selftests/kvm/x86/kvm_buslock_test.c b/tools/testing/selftests/kvm/x86/kvm_buslock_test.c
> index 9c081525ac2a..aa65d6be0f13 100644
> --- a/tools/testing/selftests/kvm/x86/kvm_buslock_test.c
> +++ b/tools/testing/selftests/kvm/x86/kvm_buslock_test.c
> @@ -93,10 +93,10 @@ int main(int argc, char *argv[])
>         vm_vaddr_t nested_test_data_gva;
>  
>         TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SVM) || kvm_cpu_has(X86_FEATURE_VMX));
> -       TEST_REQUIRE(kvm_has_cap(KVM_CAP_X86_BUS_LOCK_EXIT));
> +//     TEST_REQUIRE(kvm_has_cap(KVM_CAP_X86_BUS_LOCK_EXIT));
>  
>         vm = vm_create(1);
> -       vm_enable_cap(vm, KVM_CAP_X86_BUS_LOCK_EXIT, KVM_BUS_LOCK_DETECTION_EXIT);
> +//     vm_enable_cap(vm, KVM_CAP_X86_BUS_LOCK_EXIT, KVM_BUS_LOCK_DETECTION_EXIT);
>         vcpu = vm_vcpu_add(vm, 0, guest_code);
>  
>         if (kvm_cpu_has(X86_FEATURE_SVM))
> --
> 
> The test would also fail to detect if KVM completely skipped the instruction.
> 
> This is not rocket science.  If you can't make your test fail by introducing bugs
> in what you're testing, then your test is worthless.
> 
> No need for a v6, I'm going to do surgery when I apply, this series has dragged
> on for far too long.

Understood. I agree the test should catch such failures — I’ll take that into account in future work.
Thanks for the review and for pushing this forward.

-Manali
diff mbox series

Patch

diff --git a/tools/testing/selftests/kvm/Makefile.kvm b/tools/testing/selftests/kvm/Makefile.kvm
index f62b0a5aba35..fa65b82c1f53 100644
--- a/tools/testing/selftests/kvm/Makefile.kvm
+++ b/tools/testing/selftests/kvm/Makefile.kvm
@@ -78,6 +78,7 @@  TEST_GEN_PROGS_x86 += x86/hyperv_svm_test
 TEST_GEN_PROGS_x86 += x86/hyperv_tlb_flush
 TEST_GEN_PROGS_x86 += x86/kvm_clock_test
 TEST_GEN_PROGS_x86 += x86/kvm_pv_test
+TEST_GEN_PROGS_x86 += x86/kvm_buslock_test
 TEST_GEN_PROGS_x86 += x86/monitor_mwait_test
 TEST_GEN_PROGS_x86 += x86/nested_emulation_test
 TEST_GEN_PROGS_x86 += x86/nested_exceptions_test
diff --git a/tools/testing/selftests/kvm/x86/kvm_buslock_test.c b/tools/testing/selftests/kvm/x86/kvm_buslock_test.c
new file mode 100644
index 000000000000..9c081525ac2a
--- /dev/null
+++ b/tools/testing/selftests/kvm/x86/kvm_buslock_test.c
@@ -0,0 +1,135 @@ 
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2024 Advanced Micro Devices, Inc.
+ */
+
+#include "test_util.h"
+#include "kvm_util.h"
+#include "processor.h"
+#include "svm_util.h"
+#include "vmx.h"
+
+#define NR_ITERATIONS 100
+#define L2_GUEST_STACK_SIZE 64
+
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Waddress-of-packed-member"
+
+struct buslock_test {
+	unsigned char pad[PAGE_SIZE - 2];
+	atomic_long_t val;
+} __packed;
+
+struct buslock_test test __aligned(PAGE_SIZE);
+
+static __always_inline void buslock_atomic_add(int i, atomic_long_t *v)
+{
+	asm volatile(LOCK_PREFIX "addl %1,%0"
+		     : "+m" (v->counter)
+		     : "ir" (i) : "memory");
+}
+
+static void buslock_add(void)
+{
+	/*
+	 * Increment a page unaligned variable atomically.
+	 * This should generate a bus lock exit.
+	 */
+	for (int i = 0; i < NR_ITERATIONS; i++)
+		buslock_atomic_add(2, &test.val);
+}
+
+#pragma GCC diagnostic pop
+
+static void l2_guest_code(void)
+{
+	buslock_add();
+	GUEST_DONE();
+}
+
+static void l1_svm_code(struct svm_test_data *svm)
+{
+	unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
+	struct vmcb *vmcb = svm->vmcb;
+
+	generic_svm_setup(svm, l2_guest_code,
+			  &l2_guest_stack[L2_GUEST_STACK_SIZE]);
+	run_guest(vmcb, svm->vmcb_gpa);
+	GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_VMMCALL);
+	GUEST_DONE();
+}
+
+static void l1_vmx_code(struct vmx_pages *vmx)
+{
+	unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
+
+	GUEST_ASSERT_EQ(prepare_for_vmx_operation(vmx), true);
+	GUEST_ASSERT_EQ(load_vmcs(vmx), true);
+
+	prepare_vmcs(vmx, NULL, &l2_guest_stack[L2_GUEST_STACK_SIZE]);
+
+	GUEST_ASSERT(!vmwrite(GUEST_RIP, (u64)l2_guest_code));
+	GUEST_ASSERT(!vmlaunch());
+
+	GUEST_ASSERT_EQ(vmreadz(VM_EXIT_REASON), EXIT_REASON_VMCALL);
+	GUEST_DONE();
+}
+
+static void guest_code(void *test_data)
+{
+	buslock_add();
+
+	if (this_cpu_has(X86_FEATURE_SVM))
+		l1_svm_code(test_data);
+	else
+		l1_vmx_code(test_data);
+}
+
+int main(int argc, char *argv[])
+{
+	struct kvm_vcpu *vcpu;
+	struct kvm_run *run;
+	struct kvm_vm *vm;
+	vm_vaddr_t nested_test_data_gva;
+
+	TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SVM) || kvm_cpu_has(X86_FEATURE_VMX));
+	TEST_REQUIRE(kvm_has_cap(KVM_CAP_X86_BUS_LOCK_EXIT));
+
+	vm = vm_create(1);
+	vm_enable_cap(vm, KVM_CAP_X86_BUS_LOCK_EXIT, KVM_BUS_LOCK_DETECTION_EXIT);
+	vcpu = vm_vcpu_add(vm, 0, guest_code);
+
+	if (kvm_cpu_has(X86_FEATURE_SVM))
+		vcpu_alloc_svm(vm, &nested_test_data_gva);
+	else
+		vcpu_alloc_vmx(vm, &nested_test_data_gva);
+
+	vcpu_args_set(vcpu, 1, nested_test_data_gva);
+
+	run = vcpu->run;
+
+	for (;;) {
+		struct ucall uc;
+
+		vcpu_run(vcpu);
+
+		if (run->exit_reason == KVM_EXIT_IO) {
+			switch (get_ucall(vcpu, &uc)) {
+			case UCALL_ABORT:
+				REPORT_GUEST_ASSERT(uc);
+				/* NOT REACHED */
+			case UCALL_SYNC:
+				continue;
+			case UCALL_DONE:
+				goto done;
+			default:
+				TEST_FAIL("Unknown ucall 0x%lx.", uc.cmd);
+			}
+		}
+
+		TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_X86_BUS_LOCK);
+	}
+done:
+	kvm_vm_free(vm);
+	return 0;
+}