@@ -51,6 +51,7 @@ struct tk_read_base {
* @wall_to_monotonic: CLOCK_REALTIME to CLOCK_MONOTONIC offset
* @offs_real: Offset clock monotonic -> clock realtime
* @offs_boot: Offset clock monotonic -> clock boottime
+ * @offs_boot_jiffies64 Offset clock monotonic -> clock boottime in jiffies64
* @offs_tai: Offset clock monotonic -> clock tai
* @tai_offset: The current UTC to TAI offset in seconds
* @clock_was_set_seq: The sequence number of clock was set events
@@ -93,6 +94,7 @@ struct timekeeper {
struct timespec64 wall_to_monotonic;
ktime_t offs_real;
ktime_t offs_boot;
+ u64 offs_boot_jiffies64;
ktime_t offs_tai;
s32 tai_offset;
unsigned int clock_was_set_seq;
@@ -151,6 +151,8 @@ extern u64 ktime_get_raw_fast_ns(void);
extern u64 ktime_get_boot_fast_ns(void);
extern u64 ktime_get_real_fast_ns(void);
+extern u64 get_jiffies_boot_64(void);
+
/*
* timespec64/time64_t interfaces utilizing the ktime based ones
* for API completeness, these could be implemented more efficiently
@@ -146,6 +146,7 @@ static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec64 wtm)
static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta)
{
tk->offs_boot = ktime_add(tk->offs_boot, delta);
+ tk->offs_boot_jiffies64 = nsecs_to_jiffies64(ktime_to_ns(tk->offs_boot));
}
/*
@@ -539,6 +540,17 @@ u64 ktime_get_real_fast_ns(void)
}
EXPORT_SYMBOL_GPL(ktime_get_real_fast_ns);
+/**
+ * get_jiffies_boot_64 - The normal get_jiffies_64(), but taking into
+ * account the time spent sleeping. This does not do any sort of locking
+ * on the time spent sleeping.
+ */
+u64 get_jiffies_boot_64(void)
+{
+ return get_jiffies_64() + tk_core.timekeeper.offs_boot_jiffies64;
+}
+EXPORT_SYMBOL(get_jiffies_boot_64);
+
/**
* halt_fast_timekeeper - Prevent fast timekeeper from accessing clocksource.
* @tk: Timekeeper to snapshot.
This enables using the usual get_jiffies_64() but taking into account time spent sleeping, giving the high performance characteristics of querying jiffies without the drawback. We accomplish this by precomputing the boottime jiffies offset whenever it is updated, rather than doing the expensive-ish div_u64 on each query. Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Arnd Bergmann <arnd@arndb.de> --- include/linux/timekeeper_internal.h | 2 ++ include/linux/timekeeping.h | 2 ++ kernel/time/timekeeping.c | 12 ++++++++++++ 3 files changed, 16 insertions(+) -- 2.21.0