diff mbox series

[1/3] timekeeping: add missing non-_ns functions for fast accessors

Message ID 20190620141159.15965-1-Jason@zx2c4.com
State Superseded
Headers show
Series [1/3] timekeeping: add missing non-_ns functions for fast accessors | expand

Commit Message

Jason A. Donenfeld June 20, 2019, 2:11 p.m. UTC
Previously there was no analogue to get proper ktime_t versions of the
fast variety of ktime invocations. This commit makes the interface
uniform with the other accessors.

Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>

Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Thomas Gleixner <tglx@linutronix.de>
---
 Documentation/core-api/timekeeping.rst |  7 +++-
 include/linux/timekeeping.h            | 28 ++++++++++++--
 kernel/time/timekeeping.c              | 52 +++++++++++++-------------
 3 files changed, 55 insertions(+), 32 deletions(-)

-- 
2.21.0

Comments

Arnd Bergmann June 21, 2019, 2:29 p.m. UTC | #1
On Thu, Jun 20, 2019 at 4:12 PM Jason A. Donenfeld <Jason@zx2c4.com> wrote:
>

> Previously there was no analogue to get proper ktime_t versions of the

> fast variety of ktime invocations. This commit makes the interface

> uniform with the other accessors.

>

> Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>

> Cc: Arnd Bergmann <arnd@arndb.de>

> Cc: Thomas Gleixner <tglx@linutronix.de>


Consistent is good, not sure if there is a real use for the *_fast()
functions returning a ktime_t, but I don't mind adding them.

> +.. c:function:: ktime_t ktime_get_mono_fast_ns( void )

> +               ktime_t ktime_get_raw_fast_ns( void )

> +               ktime_t ktime_get_boottime_fast_ns( void )

> +               ktime_t ktime_get_real_fast_ns( void )

> +

>  .. c:function:: u64 ktime_get_mono_fast_ns( void )

>                 u64 ktime_get_raw_fast_ns( void )

>                 u64 ktime_get_boot_fast_ns( void )


Typo: you have the same function names listed twice here,
one of them should be ktime_get_mono_fast() instead of
ktime_get_mono_fast_ns().

Also, we might want to rename ktime_get_boot_fast_ns()
to ktime_get_boottime_fast_ns in the process. It seems there
is only a single caller.

      Arnd
Jason A. Donenfeld June 21, 2019, 2:33 p.m. UTC | #2
On Fri, Jun 21, 2019 at 4:29 PM Arnd Bergmann <arnd@arndb.de> wrote:
> Typo: you have the same function names listed twice here,

> one of them should be ktime_get_mono_fast() instead of

> ktime_get_mono_fast_ns().


Nice catch. Vim twitches gone crazy.

> Also, we might want to rename ktime_get_boot_fast_ns()

> to ktime_get_boottime_fast_ns in the process. It seems there

> is only a single caller.


And tai -> clocktai on the others. I can send a followup patch to
unify all those after this set.
Arnd Bergmann June 21, 2019, 2:40 p.m. UTC | #3
On Fri, Jun 21, 2019 at 4:33 PM Jason A. Donenfeld <Jason@zx2c4.com> wrote:
>

> On Fri, Jun 21, 2019 at 4:29 PM Arnd Bergmann <arnd@arndb.de> wrote:

> > Typo: you have the same function names listed twice here,

> > one of them should be ktime_get_mono_fast() instead of

> > ktime_get_mono_fast_ns().

>

> Nice catch. Vim twitches gone crazy.

>

> > Also, we might want to rename ktime_get_boot_fast_ns()

> > to ktime_get_boottime_fast_ns in the process. It seems there

> > is only a single caller.

>

> And tai -> clocktai on the others. I can send a followup patch to

> unify all those after this set.


Yes, that's probably best.

       Arnd
diff mbox series

Patch

diff --git a/Documentation/core-api/timekeeping.rst b/Documentation/core-api/timekeeping.rst
index 93cbeb9daec0..ad32085174f8 100644
--- a/Documentation/core-api/timekeeping.rst
+++ b/Documentation/core-api/timekeeping.rst
@@ -94,7 +94,7 @@  different format depending on what is required by the user:
 	down the seconds to the full seconds of the last timer tick
 	using the respective reference.
 
-Coarse and fast_ns access
+Coarse and fast access
 -------------------------
 
 Some additional variants exist for more specialized cases:
@@ -125,6 +125,11 @@  Some additional variants exist for more specialized cases:
 	up to several microseconds on older hardware with an external
 	clocksource.
 
+.. c:function:: ktime_t ktime_get_mono_fast_ns( void )
+		ktime_t ktime_get_raw_fast_ns( void )
+		ktime_t ktime_get_boottime_fast_ns( void )
+		ktime_t ktime_get_real_fast_ns( void )
+
 .. c:function:: u64 ktime_get_mono_fast_ns( void )
 		u64 ktime_get_raw_fast_ns( void )
 		u64 ktime_get_boot_fast_ns( void )
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h
index a8ab0f143ac4..c5d360779fab 100644
--- a/include/linux/timekeeping.h
+++ b/include/linux/timekeeping.h
@@ -146,10 +146,30 @@  static inline u64 ktime_get_raw_ns(void)
 	return ktime_to_ns(ktime_get_raw());
 }
 
-extern u64 ktime_get_mono_fast_ns(void);
-extern u64 ktime_get_raw_fast_ns(void);
-extern u64 ktime_get_boot_fast_ns(void);
-extern u64 ktime_get_real_fast_ns(void);
+extern ktime_t ktime_get_mono_fast(void);
+extern ktime_t ktime_get_raw_fast(void);
+extern ktime_t ktime_get_boottime_fast(void);
+extern ktime_t ktime_get_real_fast(void);
+
+static inline u64 ktime_get_mono_fast_ns(void)
+{
+	return ktime_to_ns(ktime_get_mono_fast());
+}
+
+static inline u64 ktime_get_raw_fast_ns(void)
+{
+	return ktime_to_ns(ktime_get_raw_fast());
+}
+
+static inline u64 ktime_get_boot_fast_ns(void)
+{
+	return ktime_to_ns(ktime_get_boottime_fast());
+}
+
+static inline u64 ktime_get_real_fast_ns(void)
+{
+	return ktime_to_ns(ktime_get_real_fast());
+}
 
 /*
  * timespec64/time64_t interfaces utilizing the ktime based ones
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 44b726bab4bd..4c97c9c8c217 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -443,41 +443,40 @@  static void update_fast_timekeeper(const struct tk_read_base *tkr,
  * of the following timestamps. Callers need to be aware of that and
  * deal with it.
  */
-static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
+static __always_inline ktime_t __ktime_get_fast(struct tk_fast *tkf)
 {
 	struct tk_read_base *tkr;
 	unsigned int seq;
-	u64 now;
+	ktime_t now;
 
 	do {
 		seq = raw_read_seqcount_latch(&tkf->seq);
 		tkr = tkf->base + (seq & 0x01);
-		now = ktime_to_ns(tkr->base);
-
-		now += timekeeping_delta_to_ns(tkr,
+		now = ktime_add_ns(tkr->base,
+			timekeeping_delta_to_ns(tkr,
 				clocksource_delta(
 					tk_clock_read(tkr),
 					tkr->cycle_last,
-					tkr->mask));
+					tkr->mask)));
 	} while (read_seqcount_retry(&tkf->seq, seq));
 
 	return now;
 }
 
-u64 ktime_get_mono_fast_ns(void)
+ktime_t ktime_get_mono_fast(void)
 {
-	return __ktime_get_fast_ns(&tk_fast_mono);
+	return __ktime_get_fast(&tk_fast_mono);
 }
-EXPORT_SYMBOL_GPL(ktime_get_mono_fast_ns);
+EXPORT_SYMBOL_GPL(ktime_get_mono_fast);
 
-u64 ktime_get_raw_fast_ns(void)
+ktime_t ktime_get_raw_fast(void)
 {
-	return __ktime_get_fast_ns(&tk_fast_raw);
+	return __ktime_get_fast(&tk_fast_raw);
 }
-EXPORT_SYMBOL_GPL(ktime_get_raw_fast_ns);
+EXPORT_SYMBOL_GPL(ktime_get_raw_fast);
 
 /**
- * ktime_get_boot_fast_ns - NMI safe and fast access to boot clock.
+ * ktime_get_boottime_fast - NMI safe and fast access to boot clock.
  *
  * To keep it NMI safe since we're accessing from tracing, we're not using a
  * separate timekeeper with updates to monotonic clock and boot offset
@@ -497,47 +496,46 @@  EXPORT_SYMBOL_GPL(ktime_get_raw_fast_ns);
  * partially updated.  Since the tk->offs_boot update is a rare event, this
  * should be a rare occurrence which postprocessing should be able to handle.
  */
-u64 notrace ktime_get_boot_fast_ns(void)
+ktime_t notrace ktime_get_boottime_fast(void)
 {
 	struct timekeeper *tk = &tk_core.timekeeper;
 
-	return (ktime_get_mono_fast_ns() + ktime_to_ns(tk->offs_boot));
+	return ktime_add(ktime_get_mono_fast(), tk->offs_boot);
 }
-EXPORT_SYMBOL_GPL(ktime_get_boot_fast_ns);
+EXPORT_SYMBOL_GPL(ktime_get_boottime_fast);
 
 
 /*
- * See comment for __ktime_get_fast_ns() vs. timestamp ordering
+ * See comment for __ktime_get_fast() vs. timestamp ordering
  */
-static __always_inline u64 __ktime_get_real_fast_ns(struct tk_fast *tkf)
+static __always_inline ktime_t __ktime_get_real_fast(struct tk_fast *tkf)
 {
 	struct tk_read_base *tkr;
 	unsigned int seq;
-	u64 now;
+	ktime_t now;
 
 	do {
 		seq = raw_read_seqcount_latch(&tkf->seq);
 		tkr = tkf->base + (seq & 0x01);
-		now = ktime_to_ns(tkr->base_real);
-
-		now += timekeeping_delta_to_ns(tkr,
+		now = ktime_add_ns(tkr->base_real,
+			timekeeping_delta_to_ns(tkr,
 				clocksource_delta(
 					tk_clock_read(tkr),
 					tkr->cycle_last,
-					tkr->mask));
+					tkr->mask)));
 	} while (read_seqcount_retry(&tkf->seq, seq));
 
 	return now;
 }
 
 /**
- * ktime_get_real_fast_ns: - NMI safe and fast access to clock realtime.
+ * ktime_get_real_fast: - NMI safe and fast access to clock realtime.
  */
-u64 ktime_get_real_fast_ns(void)
+ktime_t ktime_get_real_fast(void)
 {
-	return __ktime_get_real_fast_ns(&tk_fast_mono);
+	return __ktime_get_real_fast(&tk_fast_mono);
 }
-EXPORT_SYMBOL_GPL(ktime_get_real_fast_ns);
+EXPORT_SYMBOL_GPL(ktime_get_real_fast);
 
 /**
  * halt_fast_timekeeper - Prevent fast timekeeper from accessing clocksource.