[V12,2/2] irq: Compute the periodic interval for interrupts

Message ID 1498227072-5980-2-git-send-email-daniel.lezcano@linaro.org
State New
Headers show
Series
  • [V12,1/2] irq: Track the interrupt timings
Related show

Commit Message

Daniel Lezcano June 23, 2017, 2:11 p.m.
An interrupt behaves with a burst of activity with periodic interval of time
followed by one or two peaks of longer interval.

As the time intervals are periodic, statistically speaking they follow a normal
distribution and each interrupts can be tracked individually.

This patch does statistics on all interrupts, except the timers which are
deterministic by essence. The goal is to extract the periodicity for each
interrupt, with the last timestamp and sum them, so we have the next event.

Taking the earliest prediction gives the expected wakeup on the system (assuming
a timer won't expire before).

As stated in the previous patch, this code is not enabled in the kernel by
default.

Signed-off-by: Daniel Lezcano <daniel.lezcano@linaro.org>

Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rafael J. Wysocki <rafael@kernel.org>
Cc: Vincent Guittot <vincent.guittot@linaro.org>
Cc: Nicolas Pitre <nicolas.pitre@linaro.org>
---
Changelog:

   V12:
	- Folded unsigned int -> int change into the first patch
	- Made struct irqt_stat fields names more explicit and tabulate spaced
	- Removed comment warning for irq_disabled in irqs_update()
	- Moved function warning for irq_disabled with a check and comment
	  in irq_timings_next_event()
	- Inverted irq_timings_alloc() variable declaratin order
   V11:
	- No changes
   V10:
	- Simplified index/count computation
   V9:
	- Deal with 48+16 bits encoded values
	- Changed irq_stat => irqt_stat to prevent name collision on s390
	- Changed div64 by constant IRQ_TIMINGS_SHIFT bit shift for average
	- Changed div64 by constant IRQ_TIMINGS_SHIFT bit shift for variance

Signed-off-by: Daniel Lezcano <daniel.lezcano@linaro.org>

---
 include/linux/interrupt.h |   1 +
 kernel/irq/internals.h    |  19 +++
 kernel/irq/timings.c      | 339 ++++++++++++++++++++++++++++++++++++++++++++++
 3 files changed, 359 insertions(+)

-- 
2.7.4

Comments

Peter Zijlstra July 19, 2017, 11:38 a.m. | #1
On Sat, Jun 24, 2017 at 03:02:30AM -0700, tip-bot for Daniel Lezcano wrote:
> +	/*

> +	 * Look in the list of interrupts' statistics, the earliest

> +	 * next event.

> +	 */

> +	idr_for_each_entry(&irqt_stats, s, i) {

> +

> +		irqs = this_cpu_ptr(s);

> +

> +		if (!irqs->valid)

> +			continue;

> +

> +		if (irqs->next_evt <= now) {

> +			irq = i;

> +			next_evt = now;

> +

> +			/*

> +			 * This interrupt mustn't use in the future

> +			 * until new events occur and update the

> +			 * statistics.

> +			 */

> +			irqs->valid = 0;

> +			break;

> +		}

> +

> +		if (irqs->next_evt < next_evt) {

> +			irq = i;

> +			next_evt = irqs->next_evt;

> +		}

> +	}


That is a fairly expensive thing to do.. sure the 'continue' avoids the
very worst of it, but its still painful.

How about something like so on top? (compile tested only)

---
 kernel/irq/internals.h |  4 +++-
 kernel/irq/irqdesc.c   |  2 ++
 kernel/irq/timings.c   | 35 ++++++++++++++++++++++-------------
 3 files changed, 27 insertions(+), 14 deletions(-)diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index dbfba9933ed2..b96511ca7cd3 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -275,6 +275,7 @@ struct irq_timings {
 
 DECLARE_PER_CPU(struct irq_timings, irq_timings);
 
+extern void irq_timings_init(void);
 extern void irq_timings_free(int irq);
 extern int irq_timings_alloc(int irq);
 
@@ -357,9 +358,10 @@ static __always_inline void record_irq_time(struct irq_desc *desc)
 	}
 }
 #else
+static inline void irq_timings_init(void) {}
 static inline void irq_remove_timings(struct irq_desc *desc) {}
 static inline void irq_setup_timings(struct irq_desc *desc,
-				     struct irqaction *act) {};
+				     struct irqaction *act) {}
 static inline void record_irq_time(struct irq_desc *desc) {}
 #endif /* CONFIG_IRQ_TIMINGS */
 
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index 73be2b3909bd..17e95592f291 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -493,6 +493,7 @@ int __init early_irq_init(void)
 	struct irq_desc *desc;
 
 	init_irq_default_affinity();
+	irq_timings_init();
 
 	/* Let arch update nr_irqs and return the nr of preallocated irqs */
 	initcnt = arch_probe_nr_irqs();
@@ -532,6 +533,7 @@ int __init early_irq_init(void)
 	struct irq_desc *desc;
 
 	init_irq_default_affinity();
+	irq_timings_init();
 
 	printk(KERN_INFO "NR_IRQS: %d\n", NR_IRQS);
 
diff --git a/kernel/irq/timings.c b/kernel/irq/timings.c
index c8c1d073fbf1..6bddfbb8d419 100644
--- a/kernel/irq/timings.c
+++ b/kernel/irq/timings.c
@@ -24,15 +24,17 @@
 DEFINE_STATIC_KEY_FALSE(irq_timing_enabled);
 
 DEFINE_PER_CPU(struct irq_timings, irq_timings);
+static DEFINE_PER_CPU(struct list_head, irqt_list);
 
 struct irqt_stat {
-	u64	next_evt;
-	u64	last_ts;
-	u64	variance;
-	u32	avg;
-	u32	nr_samples;
-	int	anomalies;
-	int	valid;
+	u64			next_evt;
+	u64			last_ts;
+	u64			variance;
+	u32			avg;
+	u32			nr_samples;
+	int			anomalies;
+	int			valid;
+	struct list_head	list;
 };
 
 static DEFINE_IDR(irqt_stats);
@@ -197,6 +199,7 @@ static void irqs_update(struct irqt_stat *irqs, u64 ts)
 	 * the next event on it.
 	 */
 	irqs->valid = 1;
+	list_add(&irqs->list, this_cpu_ptr(&irqt_list));
 
 	/*
 	 * Online average algorithm:
@@ -254,8 +257,8 @@ static void irqs_update(struct irqt_stat *irqs, u64 ts)
 u64 irq_timings_next_event(u64 now)
 {
 	struct irq_timings *irqts = this_cpu_ptr(&irq_timings);
-	struct irqt_stat *irqs;
 	struct irqt_stat __percpu *s;
+	struct irqt_stat *irqs, *tmp;
 	u64 ts, next_evt = U64_MAX;
 	int i, irq = 0;
 
@@ -297,11 +300,8 @@ u64 irq_timings_next_event(u64 now)
 	 * Look in the list of interrupts' statistics, the earliest
 	 * next event.
 	 */
-	idr_for_each_entry(&irqt_stats, s, i) {
-
-		irqs = this_cpu_ptr(s);
-
-		if (!irqs->valid)
+	list_for_each_entry_safe(irqs, tmp, this_cpu_ptr(&irqt_list), list) {
+		if (WARN_ON_ONCE(!irqs->valid))
 			continue;
 
 		if (irqs->next_evt <= now) {
@@ -314,6 +314,7 @@ u64 irq_timings_next_event(u64 now)
 			 * statistics.
 			 */
 			irqs->valid = 0;
+			list_del(&irqs->list);
 			break;
 		}
 
@@ -367,3 +368,11 @@ int irq_timings_alloc(int irq)
 
 	return 0;
 }
+
+void __init irq_timings_init(void)
+{
+	int cpu;
+
+	for_each_possible_cpu(cpu)
+		INIT_LIST_HEAD(&per_cpu(irqt_list, cpu));
+}

Peter Zijlstra July 19, 2017, 1:39 p.m. | #2
On Sat, Jun 24, 2017 at 03:02:30AM -0700, tip-bot for Daniel Lezcano wrote:

> +	/*

> +	 * The interrupt is considered stable enough to try to predict

> +	 * the next event on it.

> +	 */

> +	irqs->valid = 1;

> +

> +	/*

> +	 * Online average algorithm:

> +	 *

> +	 *  new_average = average + ((value - average) / count)

> +	 *

> +	 * The variance computation depends on the new average

> +	 * to be computed here first.

> +	 *

> +	 */

> +	irqs->avg = irqs->avg + (diff >> IRQ_TIMINGS_SHIFT);

> +

> +	/*

> +	 * Online variance algorithm:

> +	 *

> +	 *  new_variance = variance + (value - average) x (value - new_average)

> +	 *

> +	 * Warning: irqs->avg is updated with the line above, hence

> +	 * 'interval - irqs->avg' is no longer equal to 'diff'

> +	 */

> +	irqs->variance = irqs->variance + (diff * (interval - irqs->avg));

> +

> +	/*

> +	 * Update the next event

> +	 */

> +	irqs->next_evt = ts + irqs->avg;

> +}


This implements the CDF bias I spoke of in that other thread. Very much
RFC, has not in fact been near a compiler.

---
Subject: irq/timings: Add estimation bias
From: Peter Zijlstra <peterz@infradead.org>

Date: Wed Jul 19 14:59:17 CEST 2017

When estimating a normal random variable the average is, per
definition, too long 50% of the time.

When we use this variable to select idle states, picking too deep an
idle state results in increased exit latency, which, if the estimate
was wrong, results in decreased performance.

Hence we'd like to be able to bias our estimate to be short a specific
percent of the time. We can readily compute this using the Cumulative
Distribution Function, since the CDF(x) gives P(X <= x).

Implement this using a inverse Z table for the normal CDF.======8<====[ cdf.cc ]====>8======

#include <boost/math/special_functions/erf.hpp>
#include <math.h>
#include <stdio.h>

#define CDF_BITS        16

int main(void)
{
        double z;
        int i;

        printf("#define CDF_BITS\t%d\n\n", CDF_BITS);
        printf("static const int normal_inv_cdf[] = {\n");

        for (i=50; i<100; i++) {
                z = sqrt(2.0) * boost::math::erf_inv( (2*(double)i)/100.0 - 1.0 );

                printf("\t/* %02d%% */ 0x%05x,\n", i, (int)round((1 << CDF_BITS) * z));
        }

        printf("};\n");

        return 0;
}

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>

---
 include/linux/normal-cdf.h |   74 +++++++++++++++++++++++++++++++++++++++++++++
 kernel/irq/timings.c       |   23 +++++++++++++
 2 files changed, 97 insertions(+)

--- /dev/null
+++ b/include/linux/normal-cdf.h
@@ -0,0 +1,74 @@
+#ifndef _LINUX_NORMAL_CDF_H
+#define _LINUX_NORMAL_CDF_H
+
+#define NORMAL_CDF_BITS	16
+
+/*
+ *     x - mu
+ * Z = ------
+ *     sigma
+ *
+ *
+ *          1              Z
+ * CDF(x) = - [1 + erf( ------ )] = p
+ *          2           sqrt(2)
+ *
+ *                  -1
+ * Z = sqrt(2) * erf  (2p - 1)
+ */
+
+static const int normal_inv_cdf_Z[] = {
+	/* 50% */ 0x00000,
+	/* 51% */ 0x0066b,
+	/* 52% */ 0x00cd7,
+	/* 53% */ 0x01345,
+	/* 54% */ 0x019b6,
+	/* 55% */ 0x0202b,
+	/* 56% */ 0x026a6,
+	/* 57% */ 0x02d27,
+	/* 58% */ 0x033af,
+	/* 59% */ 0x03a40,
+	/* 60% */ 0x040db,
+	/* 61% */ 0x04781,
+	/* 62% */ 0x04e34,
+	/* 63% */ 0x054f4,
+	/* 64% */ 0x05bc4,
+	/* 65% */ 0x062a4,
+	/* 66% */ 0x06997,
+	/* 67% */ 0x0709e,
+	/* 68% */ 0x077bb,
+	/* 69% */ 0x07ef0,
+	/* 70% */ 0x0863f,
+	/* 71% */ 0x08dab,
+	/* 72% */ 0x09535,
+	/* 73% */ 0x09ce1,
+	/* 74% */ 0x0a4b2,
+	/* 75% */ 0x0acab,
+	/* 76% */ 0x0b4d0,
+	/* 77% */ 0x0bd25,
+	/* 78% */ 0x0c5ae,
+	/* 79% */ 0x0ce72,
+	/* 80% */ 0x0d774,
+	/* 81% */ 0x0e0be,
+	/* 82% */ 0x0ea55,
+	/* 83% */ 0x0f444,
+	/* 84% */ 0x0fe95,
+	/* 85% */ 0x10954,
+	/* 86% */ 0x11490,
+	/* 87% */ 0x1205b,
+	/* 88% */ 0x12ccc,
+	/* 89% */ 0x139fe,
+	/* 90% */ 0x14814,
+	/* 91% */ 0x1573c,
+	/* 92% */ 0x167b3,
+	/* 93% */ 0x179cd,
+	/* 94% */ 0x18e06,
+	/* 95% */ 0x1a515,
+	/* 96% */ 0x1c02d,
+	/* 97% */ 0x1e17c,
+	/* 98% */ 0x20dc2,
+	/* 99% */ 0x2538c,
+};
+
+#endif /* _LINUX_NORMAL_CDF_H */
+
--- a/kernel/irq/timings.c
+++ b/kernel/irq/timings.c
@@ -16,6 +16,7 @@
 #include <linux/idr.h>
 #include <linux/irq.h>
 #include <linux/math64.h>
+#include <linux/normal-cdf.h>
 
 #include <trace/events/irq.h>
 
@@ -26,6 +27,21 @@ DEFINE_STATIC_KEY_FALSE(irq_timing_enabl
 DEFINE_PER_CPU(struct irq_timings, irq_timings);
 static DEFINE_PER_CPU(struct list_head, irqt_list);
 
+/*
+ * Used to reduce estimation error; where the average of the distribution
+ * is too long 50% of the time, we can compute the value for which we're no
+ * more than @pct too long using the CDF.
+ *
+ * This is important for performance reasons, picking too deep a C state
+ * results in increased exit latencies when we were wrong, and hence impacts
+ * performance. By changing the estimate to be shorter, we alleviate this.
+ *
+ * Default to 50% for a decent power / performance ratio.
+ *
+ * 1 <= irq_timings_pct <= 50
+ */
+int irq_timings_pct = 50;
+
 struct irqt_stat {
 	u64			next_evt;
 	u64			last_ts;
@@ -226,6 +242,13 @@ static void irqs_update(struct irqt_stat
 	 * Update the next event
 	 */
 	irqs->next_evt = ts + irqs->avg;
+
+	if (irq_timings_pct != 50) {
+		int Z = normal_inv_cdf_Z[50 - irq_timings_pct];
+		u64 stdev = int_sqrt(irqs->variance);
+
+		irqs->next_evt -= (Z * stdev) >> NORMAL_CDF_BITS;
+	}
 }
 
 /**

Patch

diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 9f61723..37f8e35 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -706,6 +706,7 @@  static inline void init_irq_proc(void)
 #ifdef CONFIG_IRQ_TIMINGS
 void irq_timings_enable(void);
 void irq_timings_disable(void);
+u64 irq_timings_next_event(u64 now);
 #endif
 
 struct seq_file;
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index ec86e0e..3d989a0 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -247,13 +247,21 @@  struct irq_timings {
 
 DECLARE_PER_CPU(struct irq_timings, irq_timings);
 
+extern void irq_timings_free(int irq);
+extern int irq_timings_alloc(int irq);
+
 static inline void irq_remove_timings(struct irq_desc *desc)
 {
 	desc->istate &= ~IRQS_TIMINGS;
+
+	irq_timings_free(irq_desc_get_irq(desc));
 }
 
 static inline void irq_setup_timings(struct irq_desc *desc, struct irqaction *act)
 {
+	int irq = irq_desc_get_irq(desc);
+	int ret;
+
 	/*
 	 * We don't need the measurement because the idle code already
 	 * knows the next expiry event.
@@ -261,6 +269,17 @@  static inline void irq_setup_timings(struct irq_desc *desc, struct irqaction *ac
 	if (act->flags & __IRQF_TIMER)
 		return;
 
+	/*
+	 * In case the timing allocation fails, we just want to warn,
+	 * not fail, so letting the system boot anyway.
+	 */
+	ret = irq_timings_alloc(irq);
+	if (ret) {
+		pr_warn("Failed to allocate irq timing stats for irq%d (%d)",
+			irq, ret);
+		return;
+	}
+
 	desc->istate |= IRQS_TIMINGS;
 }
 
diff --git a/kernel/irq/timings.c b/kernel/irq/timings.c
index 56cf687..c8c1d07 100644
--- a/kernel/irq/timings.c
+++ b/kernel/irq/timings.c
@@ -8,10 +8,16 @@ 
  * published by the Free Software Foundation.
  *
  */
+#include <linux/kernel.h>
 #include <linux/percpu.h>
+#include <linux/slab.h>
 #include <linux/static_key.h>
 #include <linux/interrupt.h>
+#include <linux/idr.h>
 #include <linux/irq.h>
+#include <linux/math64.h>
+
+#include <trace/events/irq.h>
 
 #include "internals.h"
 
@@ -19,6 +25,18 @@  DEFINE_STATIC_KEY_FALSE(irq_timing_enabled);
 
 DEFINE_PER_CPU(struct irq_timings, irq_timings);
 
+struct irqt_stat {
+	u64	next_evt;
+	u64	last_ts;
+	u64	variance;
+	u32	avg;
+	u32	nr_samples;
+	int	anomalies;
+	int	valid;
+};
+
+static DEFINE_IDR(irqt_stats);
+
 void irq_timings_enable(void)
 {
 	static_branch_enable(&irq_timing_enabled);
@@ -28,3 +46,324 @@  void irq_timings_disable(void)
 {
 	static_branch_disable(&irq_timing_enabled);
 }
+
+/**
+ * irqs_update - update the irq timing statistics with a new timestamp
+ *
+ * @irqs: an irqt_stat struct pointer
+ * @ts: the new timestamp
+ *
+ * The statistics are computed online, in other words, the code is
+ * designed to compute the statistics on a stream of values rather
+ * than doing multiple passes on the values to compute the average,
+ * then the variance. The integer division introduces a loss of
+ * precision but with an acceptable error margin regarding the results
+ * we would have with the double floating precision: we are dealing
+ * with nanosec, so big numbers, consequently the mantisse is
+ * negligeable, especially when converting the time in usec
+ * afterwards.
+ *
+ * The computation happens at idle time. When the CPU is not idle, the
+ * interrupts' timestamps are stored in the circular buffer, when the
+ * CPU goes idle and this routine is called, all the buffer's values
+ * are injected in the statistical model continuying to extend the
+ * statistics from the previous busy-idle cycle.
+ *
+ * The observations showed a device will trigger a burst of periodic
+ * interrupts followed by one or two peaks of longer time, for
+ * instance when a SD card device flushes its cache, then the periodic
+ * intervals occur again. A one second inactivity period resets the
+ * stats, that gives us the certitude the statistical values won't
+ * exceed 1x10^9, thus the computation won't overflow.
+ *
+ * Basically, the purpose of the algorithm is to watch the periodic
+ * interrupts and eliminate the peaks.
+ *
+ * An interrupt is considered periodically stable if the interval of
+ * its occurences follow the normal distribution, thus the values
+ * comply with:
+ *
+ *      avg - 3 x stddev < value < avg + 3 x stddev
+ *
+ * Which can be simplified to:
+ *
+ *      -3 x stddev < value - avg < 3 x stddev
+ *
+ *      abs(value - avg) < 3 x stddev
+ *
+ * In order to save a costly square root computation, we use the
+ * variance. For the record, stddev = sqrt(variance). The equation
+ * above becomes:
+ *
+ *      abs(value - avg) < 3 x sqrt(variance)
+ *
+ * And finally we square it:
+ *
+ *      (value - avg) ^ 2 < (3 x sqrt(variance)) ^ 2
+ *
+ *      (value - avg) x (value - avg) < 9 x variance
+ *
+ * Statistically speaking, any values out of this interval is
+ * considered as an anomaly and is discarded. However, a normal
+ * distribution appears when the number of samples is 30 (it is the
+ * rule of thumb in statistics, cf. "30 samples" on Internet). When
+ * there are three consecutive anomalies, the statistics are resetted.
+ *
+ */
+static void irqs_update(struct irqt_stat *irqs, u64 ts)
+{
+	u64 old_ts = irqs->last_ts;
+	u64 variance = 0;
+	u64 interval;
+	s64 diff;
+
+	/*
+	 * The timestamps are absolute time values, we need to compute
+	 * the timing interval between two interrupts.
+	 */
+	irqs->last_ts = ts;
+
+	/*
+	 * The interval type is u64 in order to deal with the same
+	 * type in our computation, that prevent mindfuck issues with
+	 * overflow, sign and division.
+	 */
+	interval = ts - old_ts;
+
+	/*
+	 * The interrupt triggered more than one second apart, that
+	 * ends the sequence as predictible for our purpose. In this
+	 * case, assume we have the beginning of a sequence and the
+	 * timestamp is the first value. As it is impossible to
+	 * predict anything at this point, return.
+	 *
+	 * Note the first timestamp of the sequence will always fall
+	 * in this test because the old_ts is zero. That is what we
+	 * want as we need another timestamp to compute an interval.
+	 */
+	if (interval >= NSEC_PER_SEC) {
+		memset(irqs, 0, sizeof(*irqs));
+		irqs->last_ts = ts;
+		return;
+	}
+
+	/*
+	 * Pre-compute the delta with the average as the result is
+	 * used several times in this function.
+	 */
+	diff = interval - irqs->avg;
+
+	/*
+	 * Increment the number of samples.
+	 */
+	irqs->nr_samples++;
+
+	/*
+	 * Online variance divided by the number of elements if there
+	 * is more than one sample.  Normally the formula is division
+	 * by nr_samples - 1 but we assume the number of element will be
+	 * more than 32 and dividing by 32 instead of 31 is enough
+	 * precise.
+	 */
+	if (likely(irqs->nr_samples > 1))
+		variance = irqs->variance >> IRQ_TIMINGS_SHIFT;
+
+	/*
+	 * The rule of thumb in statistics for the normal distribution
+	 * is having at least 30 samples in order to have the model to
+	 * apply. Values outside the interval are considered as an
+	 * anomaly.
+	 */
+	if ((irqs->nr_samples >= 30) && ((diff * diff) > (9 * variance))) {
+		/*
+		 * After three consecutive anomalies, we reset the
+		 * stats as it is no longer stable enough.
+		 */
+		if (irqs->anomalies++ >= 3) {
+			memset(irqs, 0, sizeof(*irqs));
+			irqs->last_ts = ts;
+			return;
+		}
+	} else {
+		/*
+		 * The anomalies must be consecutives, so at this
+		 * point, we reset the anomalies counter.
+		 */
+		irqs->anomalies = 0;
+	}
+
+	/*
+	 * The interrupt is considered stable enough to try to predict
+	 * the next event on it.
+	 */
+	irqs->valid = 1;
+
+	/*
+	 * Online average algorithm:
+	 *
+	 *  new_average = average + ((value - average) / count)
+	 *
+	 * The variance computation depends on the new average
+	 * to be computed here first.
+	 *
+	 */
+	irqs->avg = irqs->avg + (diff >> IRQ_TIMINGS_SHIFT);
+
+	/*
+	 * Online variance algorithm:
+	 *
+	 *  new_variance = variance + (value - average) x (value - new_average)
+	 *
+	 * Warning: irqs->avg is updated with the line above, hence
+	 * 'interval - irqs->avg' is no longer equal to 'diff'
+	 */
+	irqs->variance = irqs->variance + (diff * (interval - irqs->avg));
+
+	/*
+	 * Update the next event
+	 */
+	irqs->next_evt = ts + irqs->avg;
+}
+
+/**
+ * irq_timings_next_event - Return when the next event is supposed to arrive
+ *
+ * During the last busy cycle, the number of interrupts is incremented
+ * and stored in the irq_timings structure. This information is
+ * necessary to:
+ *
+ * - know if the index in the table wrapped up:
+ *
+ *      If more than the array size interrupts happened during the
+ *      last busy/idle cycle, the index wrapped up and we have to
+ *      begin with the next element in the array which is the last one
+ *      in the sequence, otherwise it is a the index 0.
+ *
+ * - have an indication of the interrupts activity on this CPU
+ *   (eg. irq/sec)
+ *
+ * The values are 'consumed' after inserting in the statistical model,
+ * thus the count is reinitialized.
+ *
+ * The array of values **must** be browsed in the time direction, the
+ * timestamp must increase between an element and the next one.
+ *
+ * Returns a nanosec time based estimation of the earliest interrupt,
+ * U64_MAX otherwise.
+ */
+u64 irq_timings_next_event(u64 now)
+{
+	struct irq_timings *irqts = this_cpu_ptr(&irq_timings);
+	struct irqt_stat *irqs;
+	struct irqt_stat __percpu *s;
+	u64 ts, next_evt = U64_MAX;
+	int i, irq = 0;
+
+	/*
+	 * This function must be called with the local irq disabled in
+	 * order to prevent the timings circular buffer to be updated
+	 * while we are reading it.
+	 */
+	WARN_ON_ONCE(!irqs_disabled());
+
+	/*
+	 * Number of elements in the circular buffer: If it happens it
+	 * was flushed before, then the number of elements could be
+	 * smaller than IRQ_TIMINGS_SIZE, so the count is used,
+	 * otherwise the array size is used as we wrapped. The index
+	 * begins from zero when we did not wrap. That could be done
+	 * in a nicer way with the proper circular array structure
+	 * type but with the cost of extra computation in the
+	 * interrupt handler hot path. We choose efficiency.
+	 *
+	 * Inject measured irq/timestamp to the statistical model
+	 * while decrementing the counter because we consume the data
+	 * from our circular buffer.
+	 */
+	for (i = irqts->count & IRQ_TIMINGS_MASK,
+		     irqts->count = min(IRQ_TIMINGS_SIZE, irqts->count);
+	     irqts->count > 0; irqts->count--, i = (i + 1) & IRQ_TIMINGS_MASK) {
+
+		irq = irq_timing_decode(irqts->values[i], &ts);
+
+		s = idr_find(&irqt_stats, irq);
+		if (s) {
+			irqs = this_cpu_ptr(s);
+			irqs_update(irqs, ts);
+		}
+	}
+
+	/*
+	 * Look in the list of interrupts' statistics, the earliest
+	 * next event.
+	 */
+	idr_for_each_entry(&irqt_stats, s, i) {
+
+		irqs = this_cpu_ptr(s);
+
+		if (!irqs->valid)
+			continue;
+
+		if (irqs->next_evt <= now) {
+			irq = i;
+			next_evt = now;
+
+			/*
+			 * This interrupt mustn't use in the future
+			 * until new events occur and update the
+			 * statistics.
+			 */
+			irqs->valid = 0;
+			break;
+		}
+
+		if (irqs->next_evt < next_evt) {
+			irq = i;
+			next_evt = irqs->next_evt;
+		}
+	}
+
+	return next_evt;
+}
+
+void irq_timings_free(int irq)
+{
+	struct irqt_stat __percpu *s;
+
+	s = idr_find(&irqt_stats, irq);
+	if (s) {
+		free_percpu(s);
+		idr_remove(&irqt_stats, irq);
+	}
+}
+
+int irq_timings_alloc(int irq)
+{
+	struct irqt_stat __percpu *s;
+	int id;
+
+	/*
+	 * Some platforms can have the same private interrupt per cpu,
+	 * so this function may be be called several times with the
+	 * same interrupt number. Just bail out in case the per cpu
+	 * stat structure is already allocated.
+	 */
+	s = idr_find(&irqt_stats, irq);
+	if (s)
+		return 0;
+
+	s = alloc_percpu(*s);
+	if (!s)
+		return -ENOMEM;
+
+	idr_preload(GFP_KERNEL);
+	id = idr_alloc(&irqt_stats, s, irq, irq + 1, GFP_NOWAIT);
+	idr_preload_end();
+
+	if (id < 0) {
+		free_percpu(s);
+		return id;
+	}
+
+	return 0;
+}