diff mbox

[v8,4/5] KVM: arm/arm64: remove coarse grain dist locking at kvm_vgic_sync_hwstate

Message ID 1421685793-3547-5-git-send-email-eric.auger@linaro.org
State New
Headers show

Commit Message

Auger Eric Jan. 19, 2015, 4:43 p.m. UTC
To prepare for irqfd addition, coarse grain locking is removed at
kvm_vgic_sync_hwstate level and finer grain locking is introduced in
vgic_process_maintenance only.

Signed-off-by: Eric Auger <eric.auger@linaro.org>
---
 virt/kvm/arm/vgic.c | 13 +++++--------
 1 file changed, 5 insertions(+), 8 deletions(-)

Comments

Christoffer Dall Feb. 23, 2015, 8:38 p.m. UTC | #1
On Mon, Jan 19, 2015 at 05:43:12PM +0100, Eric Auger wrote:
> To prepare for irqfd addition, coarse grain locking is removed at
> kvm_vgic_sync_hwstate level and finer grain locking is introduced in
> vgic_process_maintenance only.
> 
> Signed-off-by: Eric Auger <eric.auger@linaro.org>

Acked-by: Christoffer Dall <christoffer.dall@linaro.org>
diff mbox

Patch

diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index c84f53df..18060ea 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -1447,6 +1447,7 @@  epilog:
 static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
 {
 	u32 status = vgic_get_interrupt_status(vcpu);
+	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
 	bool level_pending = false;
 
 	kvm_debug("STATUS = %08x\n", status);
@@ -1464,6 +1465,7 @@  static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
 			struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
 			WARN_ON(vgic_irq_is_edge(vcpu, vlr.irq));
 
+			spin_lock(&dist->lock);
 			vgic_irq_clear_queued(vcpu, vlr.irq);
 			WARN_ON(vlr.state & LR_STATE_MASK);
 			vlr.state = 0;
@@ -1491,6 +1493,8 @@  static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
 				vgic_cpu_irq_clear(vcpu, vlr.irq);
 			}
 
+			spin_unlock(&dist->lock);
+
 			/*
 			 * Despite being EOIed, the LR may not have
 			 * been marked as empty.
@@ -1505,10 +1509,7 @@  static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
 	return level_pending;
 }
 
-/*
- * Sync back the VGIC state after a guest run. The distributor lock is
- * needed so we don't get preempted in the middle of the state processing.
- */
+/* Sync back the VGIC state after a guest run */
 static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
 {
 	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
@@ -1555,14 +1556,10 @@  void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
 
 void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
 {
-	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
-
 	if (!irqchip_in_kernel(vcpu->kvm))
 		return;
 
-	spin_lock(&dist->lock);
 	__kvm_vgic_sync_hwstate(vcpu);
-	spin_unlock(&dist->lock);
 }
 
 int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)