diff mbox series

[RFC,v2,02/10] KVM: x86: Adjust locking order in move_enc_context_from

Message ID ee22d844512a828dc5285a93676699d1aca0e0ed.1749672978.git.afranji@google.com
State New
Headers show
Series Add TDX intra-host migration support | expand

Commit Message

Ryan Afranji June 11, 2025, 9:16 p.m. UTC
Previously, the order for acquiring the locks required for the migration
function move_enc_context_from() was: 1) memslot lock 2) vCPU lock. This
can trigger a deadlock warning because a vCPU IOCTL modifying memslots
will acquire the locks in reverse order: 1) vCPU lock 2) memslot lock.

This patch adjusts move_enc_context_from() to match vCPU IOCTL’s locking
order to prevent deadlock warnings.

Signed-off-by: Ryan Afranji <afranji@google.com>
---
 arch/x86/kvm/svm/sev.c | 13 +------------
 arch/x86/kvm/x86.c     | 14 +++++++++++++-
 2 files changed, 14 insertions(+), 13 deletions(-)
diff mbox series

Patch

diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index 402543994b0b..380d5951f8dd 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -1961,26 +1961,15 @@  int sev_vm_move_enc_context_from(struct kvm *kvm, struct kvm *source_kvm)
 		charged = true;
 	}
 
-	ret = kvm_lock_all_vcpus(kvm);
-	if (ret)
-		goto out_dst_cgroup;
-	ret = kvm_lock_all_vcpus(source_kvm);
-	if (ret)
-		goto out_dst_vcpu;
-
 	ret = sev_check_source_vcpus(kvm, source_kvm);
 	if (ret)
-		goto out_source_vcpu;
+		goto out_dst_cgroup;
 
 	sev_migrate_from(kvm, source_kvm);
 	kvm_vm_dead(source_kvm);
 	cg_cleanup_sev = src_sev;
 	ret = 0;
 
-out_source_vcpu:
-	kvm_unlock_all_vcpus(source_kvm);
-out_dst_vcpu:
-	kvm_unlock_all_vcpus(kvm);
 out_dst_cgroup:
 	/* Operates on the source on success, on the destination on failure.  */
 	if (charged)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index b1672379a16b..c28fa28a8e42 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -6743,10 +6743,18 @@  static int kvm_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd)
 	if (r)
 		goto out_mark_migration_done;
 
-	r = kvm_lock_vm_memslots(kvm, source_kvm);
+	r = kvm_lock_all_vcpus(kvm);
 	if (r)
 		goto out_unlock;
 
+	r = kvm_lock_all_vcpus(source_kvm);
+	if (r)
+		goto out_unlock_vcpus;
+
+	r = kvm_lock_vm_memslots(kvm, source_kvm);
+	if (r)
+		goto out_unlock_source_vcpus;
+
 	r = kvm_move_memory_ctxt_from(kvm, source_kvm);
 	if (r)
 		goto out_unlock_memslots;
@@ -6762,6 +6770,10 @@  static int kvm_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd)
 
 out_unlock_memslots:
 	kvm_unlock_vm_memslots(kvm, source_kvm);
+out_unlock_source_vcpus:
+	kvm_unlock_all_vcpus(source_kvm);
+out_unlock_vcpus:
+	kvm_unlock_all_vcpus(kvm);
 out_unlock:
 	kvm_unlock_two_vms(kvm, source_kvm);
 out_mark_migration_done: