@@ -460,17 +460,34 @@ static void lateeoi_list_add(struct irq_info *info)
spin_unlock_irqrestore(&eoi->eoi_list_lock, flags);
}
-static void xen_irq_lateeoi_locked(struct irq_info *info)
+static void xen_irq_lateeoi_locked(struct irq_info *info, bool spurious)
{
evtchn_port_t evtchn;
unsigned int cpu;
+ unsigned int delay = 0;
evtchn = info->evtchn;
if (!VALID_EVTCHN(evtchn) || !list_empty(&info->eoi_list))
return;
+ if (spurious) {
+ if ((1 << info->spurious_cnt) < (HZ << 2))
+ info->spurious_cnt++;
+ if (info->spurious_cnt > 1) {
+ delay = 1 << (info->spurious_cnt - 2);
+ if (delay > HZ)
+ delay = HZ;
+ if (!info->eoi_time)
+ info->eoi_cpu = smp_processor_id();
+ info->eoi_time = get_jiffies_64() + delay;
+ }
+ } else {
+ info->spurious_cnt = 0;
+ }
+
cpu = info->eoi_cpu;
- if (info->eoi_time && info->irq_epoch == per_cpu(irq_epoch, cpu)) {
+ if (info->eoi_time &&
+ (info->irq_epoch == per_cpu(irq_epoch, cpu) || delay)) {
lateeoi_list_add(info);
return;
}
@@ -507,7 +524,7 @@ static void xen_irq_lateeoi_worker(struct work_struct *work)
info->eoi_time = 0;
- xen_irq_lateeoi_locked(info);
+ xen_irq_lateeoi_locked(info, false);
}
if (info)
@@ -536,7 +553,7 @@ void xen_irq_lateeoi(unsigned int irq, unsigned int eoi_flags)
info = info_for_irq(irq);
if (info)
- xen_irq_lateeoi_locked(info);
+ xen_irq_lateeoi_locked(info, eoi_flags & XEN_EOI_FLAG_SPURIOUS);
read_unlock_irqrestore(&evtchn_rwlock, flags);
}
@@ -1439,7 +1456,7 @@ int evtchn_get(unsigned int evtchn)
goto done;
err = -EINVAL;
- if (info->refcnt <= 0)
+ if (info->refcnt <= 0 || info->refcnt == SHRT_MAX)
goto done;
info->refcnt++;
@@ -31,7 +31,8 @@ enum xen_irq_type {
struct irq_info {
struct list_head list;
struct list_head eoi_list;
- int refcnt;
+ short refcnt;
+ short spurious_cnt;
enum xen_irq_type type; /* type */
unsigned irq;
unsigned int evtchn; /* event channel */