diff mbox series

[RESEND,v2,3/9] asm-generic: Move some macros from linux/bitops.h to a new bits.h file

Message ID 1529412794-17720-4-git-send-email-will.deacon@arm.com
State New
Headers show
Series Rewrite asm-generic/bitops/{atomic,lock}.h and use on arm64 | expand

Commit Message

Will Deacon June 19, 2018, 12:53 p.m. UTC
In preparation for implementing the asm-generic atomic bitops in terms
of atomic_long_*, we need to prevent asm/atomic.h implementations from
pulling in linux/bitops.h. A common reason for this include is for the
BITS_PER_BYTE definition, so move this and some other BIT() and masking
macros into a new header file, linux/bits.h

Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>

Signed-off-by: Will Deacon <will.deacon@arm.com>

---
 include/linux/bitops.h | 22 +---------------------
 include/linux/bits.h   | 26 ++++++++++++++++++++++++++
 2 files changed, 27 insertions(+), 21 deletions(-)
 create mode 100644 include/linux/bits.h

-- 
2.1.4

Comments

Andrew Morton July 7, 2018, 12:30 a.m. UTC | #1
On Tue, 19 Jun 2018 13:53:08 +0100 Will Deacon <will.deacon@arm.com> wrote:

> In preparation for implementing the asm-generic atomic bitops in terms

> of atomic_long_*, we need to prevent asm/atomic.h implementations from

> pulling in linux/bitops.h. A common reason for this include is for the

> BITS_PER_BYTE definition, so move this and some other BIT() and masking

> macros into a new header file, linux/bits.h

> 

> --- a/include/linux/bitops.h

> +++ b/include/linux/bitops.h

> @@ -2,29 +2,9 @@

>  #ifndef _LINUX_BITOPS_H

>  #define _LINUX_BITOPS_H

>  #include <asm/types.h>

> +#include <linux/bits.h>

>  

> -#ifdef	__KERNEL__

> -#define BIT(nr)			(1UL << (nr))

> -#define BIT_ULL(nr)		(1ULL << (nr))

> -#define BIT_MASK(nr)		(1UL << ((nr) % BITS_PER_LONG))

> -#define BIT_WORD(nr)		((nr) / BITS_PER_LONG)

> -#define BIT_ULL_MASK(nr)	(1ULL << ((nr) % BITS_PER_LONG_LONG))

> -#define BIT_ULL_WORD(nr)	((nr) / BITS_PER_LONG_LONG)

> -#define BITS_PER_BYTE		8

>  #define BITS_TO_LONGS(nr)	DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))

> -#endif


Why does it leave BITS_TO_LONGS() in place?

That becomes unfortunate with Chris's patch, so I'm moving
BITS_TO_LONGS() into bits.h.


From: Chris Wilson <chris@chris-wilson.co.uk>

Subject: include/linux/bitops.h: introduce BITS_PER_TYPE

net_dim.h has a rather useful extension to BITS_PER_BYTE to compute the
number of bits in a type (BITS_PER_BYTE * sizeof(T)), so promote the macro
to bitops.h, alongside BITS_PER_BYTE, for wider usage.

Link: http://lkml.kernel.org/r/20180706094458.14116-1-chris@chris-wilson.co.uk
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>

Reviewed-by: Jani Nikula <jani.nikula@intel.com>

Cc: Randy Dunlap <rdunlap@infradead.org>
Cc: Andy Gospodarek <gospo@broadcom.com>
Cc: David S. Miller <davem@davemloft.net>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

---

 include/linux/bitops.h  |    3 ++-
 include/linux/net_dim.h |    1 -
 2 files changed, 2 insertions(+), 2 deletions(-)

diff -puN include/linux/bitops.h~bitops-introduce-bits_per_type include/linux/bitops.h
--- a/include/linux/bitops.h~bitops-introduce-bits_per_type
+++ a/include/linux/bitops.h
@@ -11,7 +11,8 @@
 #define BIT_ULL_MASK(nr)	(1ULL << ((nr) % BITS_PER_LONG_LONG))
 #define BIT_ULL_WORD(nr)	((nr) / BITS_PER_LONG_LONG)
 #define BITS_PER_BYTE		8
-#define BITS_TO_LONGS(nr)	DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
+#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE)
+#define BITS_TO_LONGS(nr)	DIV_ROUND_UP(nr, BITS_PER_TYPE(long))
 #endif
 
 /*
diff -puN include/linux/net_dim.h~bitops-introduce-bits_per_type include/linux/net_dim.h
--- a/include/linux/net_dim.h~bitops-introduce-bits_per_type
+++ a/include/linux/net_dim.h
@@ -363,7 +363,6 @@ static inline void net_dim_sample(u16 ev
 }
 
 #define NET_DIM_NEVENTS 64
-#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE)
 #define BIT_GAP(bits, end, start) ((((end) - (start)) + BIT_ULL(bits)) & (BIT_ULL(bits) - 1))
 
 static inline void net_dim_calc_stats(struct net_dim_sample *start,
Will Deacon July 9, 2018, 11:32 a.m. UTC | #2
Hi Andrew,

On Fri, Jul 06, 2018 at 05:30:49PM -0700, Andrew Morton wrote:
> On Tue, 19 Jun 2018 13:53:08 +0100 Will Deacon <will.deacon@arm.com> wrote:

> 

> > In preparation for implementing the asm-generic atomic bitops in terms

> > of atomic_long_*, we need to prevent asm/atomic.h implementations from

> > pulling in linux/bitops.h. A common reason for this include is for the

> > BITS_PER_BYTE definition, so move this and some other BIT() and masking

> > macros into a new header file, linux/bits.h

> > 

> > --- a/include/linux/bitops.h

> > +++ b/include/linux/bitops.h

> > @@ -2,29 +2,9 @@

> >  #ifndef _LINUX_BITOPS_H

> >  #define _LINUX_BITOPS_H

> >  #include <asm/types.h>

> > +#include <linux/bits.h>

> >  

> > -#ifdef	__KERNEL__

> > -#define BIT(nr)			(1UL << (nr))

> > -#define BIT_ULL(nr)		(1ULL << (nr))

> > -#define BIT_MASK(nr)		(1UL << ((nr) % BITS_PER_LONG))

> > -#define BIT_WORD(nr)		((nr) / BITS_PER_LONG)

> > -#define BIT_ULL_MASK(nr)	(1ULL << ((nr) % BITS_PER_LONG_LONG))

> > -#define BIT_ULL_WORD(nr)	((nr) / BITS_PER_LONG_LONG)

> > -#define BITS_PER_BYTE		8

> >  #define BITS_TO_LONGS(nr)	DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))

> > -#endif

> 

> Why does it leave BITS_TO_LONGS() in place?

> 

> That becomes unfortunate with Chris's patch, so I'm moving

> BITS_TO_LONGS() into bits.h.


The reason I avoided that was because it would pull in the dreaded
kernel.h for DIV_ROUND_UP, and then we're back to circular include hell :(

Will


> From: Chris Wilson <chris@chris-wilson.co.uk>

> Subject: include/linux/bitops.h: introduce BITS_PER_TYPE

> 

> net_dim.h has a rather useful extension to BITS_PER_BYTE to compute the

> number of bits in a type (BITS_PER_BYTE * sizeof(T)), so promote the macro

> to bitops.h, alongside BITS_PER_BYTE, for wider usage.

> 

> Link: http://lkml.kernel.org/r/20180706094458.14116-1-chris@chris-wilson.co.uk

> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>

> Reviewed-by: Jani Nikula <jani.nikula@intel.com>

> Cc: Randy Dunlap <rdunlap@infradead.org>

> Cc: Andy Gospodarek <gospo@broadcom.com>

> Cc: David S. Miller <davem@davemloft.net>

> Cc: Thomas Gleixner <tglx@linutronix.de>

> Cc: Ingo Molnar <mingo@kernel.org>

> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

> ---

> 

>  include/linux/bitops.h  |    3 ++-

>  include/linux/net_dim.h |    1 -

>  2 files changed, 2 insertions(+), 2 deletions(-)

> 

> diff -puN include/linux/bitops.h~bitops-introduce-bits_per_type include/linux/bitops.h

> --- a/include/linux/bitops.h~bitops-introduce-bits_per_type

> +++ a/include/linux/bitops.h

> @@ -11,7 +11,8 @@

>  #define BIT_ULL_MASK(nr)	(1ULL << ((nr) % BITS_PER_LONG_LONG))

>  #define BIT_ULL_WORD(nr)	((nr) / BITS_PER_LONG_LONG)

>  #define BITS_PER_BYTE		8

> -#define BITS_TO_LONGS(nr)	DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))

> +#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE)

> +#define BITS_TO_LONGS(nr)	DIV_ROUND_UP(nr, BITS_PER_TYPE(long))

>  #endif

>  

>  /*

> diff -puN include/linux/net_dim.h~bitops-introduce-bits_per_type include/linux/net_dim.h

> --- a/include/linux/net_dim.h~bitops-introduce-bits_per_type

> +++ a/include/linux/net_dim.h

> @@ -363,7 +363,6 @@ static inline void net_dim_sample(u16 ev

>  }

>  

>  #define NET_DIM_NEVENTS 64

> -#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE)

>  #define BIT_GAP(bits, end, start) ((((end) - (start)) + BIT_ULL(bits)) & (BIT_ULL(bits) - 1))

>  

>  static inline void net_dim_calc_stats(struct net_dim_sample *start,

> _

>
Andrew Morton July 9, 2018, 10:10 p.m. UTC | #3
On Mon, 9 Jul 2018 12:32:51 +0100 Will Deacon <will.deacon@arm.com> wrote:

> Hi Andrew,

> 

> On Fri, Jul 06, 2018 at 05:30:49PM -0700, Andrew Morton wrote:

> > On Tue, 19 Jun 2018 13:53:08 +0100 Will Deacon <will.deacon@arm.com> wrote:

> > 

> > > In preparation for implementing the asm-generic atomic bitops in terms

> > > of atomic_long_*, we need to prevent asm/atomic.h implementations from

> > > pulling in linux/bitops.h. A common reason for this include is for the

> > > BITS_PER_BYTE definition, so move this and some other BIT() and masking

> > > macros into a new header file, linux/bits.h

> > > 

> > > --- a/include/linux/bitops.h

> > > +++ b/include/linux/bitops.h

> > > @@ -2,29 +2,9 @@

> > >  #ifndef _LINUX_BITOPS_H

> > >  #define _LINUX_BITOPS_H

> > >  #include <asm/types.h>

> > > +#include <linux/bits.h>

> > >  

> > > -#ifdef	__KERNEL__

> > > -#define BIT(nr)			(1UL << (nr))

> > > -#define BIT_ULL(nr)		(1ULL << (nr))

> > > -#define BIT_MASK(nr)		(1UL << ((nr) % BITS_PER_LONG))

> > > -#define BIT_WORD(nr)		((nr) / BITS_PER_LONG)

> > > -#define BIT_ULL_MASK(nr)	(1ULL << ((nr) % BITS_PER_LONG_LONG))

> > > -#define BIT_ULL_WORD(nr)	((nr) / BITS_PER_LONG_LONG)

> > > -#define BITS_PER_BYTE		8

> > >  #define BITS_TO_LONGS(nr)	DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))

> > > -#endif

> > 

> > Why does it leave BITS_TO_LONGS() in place?

> > 

> > That becomes unfortunate with Chris's patch, so I'm moving

> > BITS_TO_LONGS() into bits.h.

> 

> The reason I avoided that was because it would pull in the dreaded

> kernel.h for DIV_ROUND_UP, and then we're back to circular include hell :(

> 


Well we should comment that so every reader doesn't wonder what I
wondered.

Refactoring works well.  I suppose DIV_ROUND_UP and friends await a new
<arithmacros.h>.

Also, all these macros no longer `#ifdef __KERNEL__' protection, which
wasn't mentioned in the changelog.  Deliberate?
Will Deacon July 11, 2018, 9:43 a.m. UTC | #4
Hi again, Andrew,

On Mon, Jul 09, 2018 at 03:10:06PM -0700, Andrew Morton wrote:
> On Mon, 9 Jul 2018 12:32:51 +0100 Will Deacon <will.deacon@arm.com> wrote:

> > On Fri, Jul 06, 2018 at 05:30:49PM -0700, Andrew Morton wrote:

> > > On Tue, 19 Jun 2018 13:53:08 +0100 Will Deacon <will.deacon@arm.com> wrote:

> > > 

> > > > In preparation for implementing the asm-generic atomic bitops in terms

> > > > of atomic_long_*, we need to prevent asm/atomic.h implementations from

> > > > pulling in linux/bitops.h. A common reason for this include is for the

> > > > BITS_PER_BYTE definition, so move this and some other BIT() and masking

> > > > macros into a new header file, linux/bits.h

> > > > 

> > > > --- a/include/linux/bitops.h

> > > > +++ b/include/linux/bitops.h

> > > > @@ -2,29 +2,9 @@

> > > >  #ifndef _LINUX_BITOPS_H

> > > >  #define _LINUX_BITOPS_H

> > > >  #include <asm/types.h>

> > > > +#include <linux/bits.h>

> > > >  

> > > > -#ifdef	__KERNEL__

> > > > -#define BIT(nr)			(1UL << (nr))

> > > > -#define BIT_ULL(nr)		(1ULL << (nr))

> > > > -#define BIT_MASK(nr)		(1UL << ((nr) % BITS_PER_LONG))

> > > > -#define BIT_WORD(nr)		((nr) / BITS_PER_LONG)

> > > > -#define BIT_ULL_MASK(nr)	(1ULL << ((nr) % BITS_PER_LONG_LONG))

> > > > -#define BIT_ULL_WORD(nr)	((nr) / BITS_PER_LONG_LONG)

> > > > -#define BITS_PER_BYTE		8

> > > >  #define BITS_TO_LONGS(nr)	DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))

> > > > -#endif

> > > 

> > > Why does it leave BITS_TO_LONGS() in place?

> > > 

> > > That becomes unfortunate with Chris's patch, so I'm moving

> > > BITS_TO_LONGS() into bits.h.

> > 

> > The reason I avoided that was because it would pull in the dreaded

> > kernel.h for DIV_ROUND_UP, and then we're back to circular include hell :(

> > 

> 

> Well we should comment that so every reader doesn't wonder what I

> wondered.

> 

> Refactoring works well.  I suppose DIV_ROUND_UP and friends await a new

> <arithmacros.h>.


Yes; pulling apart kernel.h would certainly help solve some of the fragility
here.

> Also, all these macros no longer `#ifdef __KERNEL__' protection, which

> wasn't mentioned in the changelog.  Deliberate?


Yes, these aren't under uapi/ so I dropped the guards now that they're not
needed (my understanding is that they're only there because of the way the
uapi split was originally scripted). I can also confirm that the generated
user headers match exactly with and without my changes.

Will
diff mbox series

Patch

diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index 4cac4e1a72ff..af419012d77d 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -2,29 +2,9 @@ 
 #ifndef _LINUX_BITOPS_H
 #define _LINUX_BITOPS_H
 #include <asm/types.h>
+#include <linux/bits.h>
 
-#ifdef	__KERNEL__
-#define BIT(nr)			(1UL << (nr))
-#define BIT_ULL(nr)		(1ULL << (nr))
-#define BIT_MASK(nr)		(1UL << ((nr) % BITS_PER_LONG))
-#define BIT_WORD(nr)		((nr) / BITS_PER_LONG)
-#define BIT_ULL_MASK(nr)	(1ULL << ((nr) % BITS_PER_LONG_LONG))
-#define BIT_ULL_WORD(nr)	((nr) / BITS_PER_LONG_LONG)
-#define BITS_PER_BYTE		8
 #define BITS_TO_LONGS(nr)	DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
-#endif
-
-/*
- * Create a contiguous bitmask starting at bit position @l and ending at
- * position @h. For example
- * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
- */
-#define GENMASK(h, l) \
-	(((~0UL) - (1UL << (l)) + 1) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
-
-#define GENMASK_ULL(h, l) \
-	(((~0ULL) - (1ULL << (l)) + 1) & \
-	 (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
 
 extern unsigned int __sw_hweight8(unsigned int w);
 extern unsigned int __sw_hweight16(unsigned int w);
diff --git a/include/linux/bits.h b/include/linux/bits.h
new file mode 100644
index 000000000000..2b7b532c1d51
--- /dev/null
+++ b/include/linux/bits.h
@@ -0,0 +1,26 @@ 
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_BITS_H
+#define __LINUX_BITS_H
+#include <asm/bitsperlong.h>
+
+#define BIT(nr)			(1UL << (nr))
+#define BIT_ULL(nr)		(1ULL << (nr))
+#define BIT_MASK(nr)		(1UL << ((nr) % BITS_PER_LONG))
+#define BIT_WORD(nr)		((nr) / BITS_PER_LONG)
+#define BIT_ULL_MASK(nr)	(1ULL << ((nr) % BITS_PER_LONG_LONG))
+#define BIT_ULL_WORD(nr)	((nr) / BITS_PER_LONG_LONG)
+#define BITS_PER_BYTE		8
+
+/*
+ * Create a contiguous bitmask starting at bit position @l and ending at
+ * position @h. For example
+ * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
+ */
+#define GENMASK(h, l) \
+	(((~0UL) - (1UL << (l)) + 1) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
+
+#define GENMASK_ULL(h, l) \
+	(((~0ULL) - (1ULL << (l)) + 1) & \
+	 (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
+
+#endif	/* __LINUX_BITS_H */