@@ -165,25 +165,6 @@ static inline void arch_timer_set_cntkct
isb();
}
-/*
- * Ensure that reads of the counter are treated the same as memory reads
- * for the purposes of ordering by subsequent memory barriers.
- *
- * This insanity brought to you by speculative system register reads,
- * out-of-order memory accesses, sequence locks and Thomas Gleixner.
- *
- * http://lists.infradead.org/pipermail/linux-arm-kernel/2019-February/631195.html
- */
-#define arch_counter_enforce_ordering(val) do { \
- u64 tmp, _val = (val); \
- \
- asm volatile( \
- " eor %0, %1, %1\n" \
- " add %0, sp, %0\n" \
- " ldr xzr, [%0]" \
- : "=r" (tmp) : "r" (_val)); \
-} while (0)
-
static __always_inline u64 __arch_counter_get_cntpct_stable(void)
{
u64 cnt;
@@ -224,8 +205,6 @@ static __always_inline u64 __arch_counte
return cnt;
}
-#undef arch_counter_enforce_ordering
-
static inline int arch_timer_arch_init(void)
{
return 0;
@@ -57,6 +57,25 @@ static inline unsigned long array_index_
return mask;
}
+/*
+ * Ensure that reads of the counter are treated the same as memory reads
+ * for the purposes of ordering by subsequent memory barriers.
+ *
+ * This insanity brought to you by speculative system register reads,
+ * out-of-order memory accesses, sequence locks and Thomas Gleixner.
+ *
+ * http://lists.infradead.org/pipermail/linux-arm-kernel/2019-February/631195.html
+ */
+#define arch_counter_enforce_ordering(val) do { \
+ u64 tmp, _val = (val); \
+ \
+ asm volatile( \
+ " eor %0, %1, %1\n" \
+ " add %0, sp, %0\n" \
+ " ldr xzr, [%0]" \
+ : "=r" (tmp) : "r" (_val)); \
+} while (0)
+
#define __smp_mb() dmb(ish)
#define __smp_rmb() dmb(ishld)
#define __smp_wmb() dmb(ishst)
@@ -85,11 +85,7 @@ static __always_inline u64 __arch_get_hw
*/
isb();
asm volatile("mrs %0, cntvct_el0" : "=r" (res) :: "memory");
- /*
- * This isb() is required to prevent that the seq lock is
- * speculated.#
- */
- isb();
+ arch_counter_enforce_ordering(res);
return res;
}