diff mbox series

[5/5] x86/kernel: jump_table: use relative references

Message ID 20180627160604.8154-6-ard.biesheuvel@linaro.org
State New
Headers show
Series add support for relative references in jump tables | expand

Commit Message

Ard Biesheuvel June 27, 2018, 4:06 p.m. UTC
Similar to the arm64 case, 64-bit x86 can benefit from using 32-bit
relative references rather than 64-bit absolute ones when emitting
struct jump_entry instances. Not only does this reduce the memory
footprint of the entries themselves by 50%, it also removes the need
for carrying relocation metadata on relocatable builds (i.e., for KASLR)
which saves a fair chunk of .init space as well (although the savings
are not as dramatic as on arm64)

Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>

---
 arch/x86/Kconfig                  |  1 +
 arch/x86/include/asm/jump_label.h | 28 ++++++--------------
 tools/objtool/special.c           |  4 +--
 3 files changed, 11 insertions(+), 22 deletions(-)

-- 
2.11.0

Comments

Peter Zijlstra June 28, 2018, 8:31 a.m. UTC | #1
On Wed, Jun 27, 2018 at 06:06:04PM +0200, Ard Biesheuvel wrote:
> Similar to the arm64 case, 64-bit x86 can benefit from using 32-bit

> relative references rather than 64-bit absolute ones when emitting

> struct jump_entry instances. Not only does this reduce the memory

> footprint of the entries themselves by 50%, it also removes the need

> for carrying relocation metadata on relocatable builds (i.e., for KASLR)

> which saves a fair chunk of .init space as well (although the savings

> are not as dramatic as on arm64)


This will conflict with:

  https://lkml.kernel.org/r/20180622172212.199633-10-namit@vmware.com
Ard Biesheuvel June 28, 2018, 8:34 a.m. UTC | #2
On 28 June 2018 at 10:31, Peter Zijlstra <peterz@infradead.org> wrote:
> On Wed, Jun 27, 2018 at 06:06:04PM +0200, Ard Biesheuvel wrote:

>> Similar to the arm64 case, 64-bit x86 can benefit from using 32-bit

>> relative references rather than 64-bit absolute ones when emitting

>> struct jump_entry instances. Not only does this reduce the memory

>> footprint of the entries themselves by 50%, it also removes the need

>> for carrying relocation metadata on relocatable builds (i.e., for KASLR)

>> which saves a fair chunk of .init space as well (although the savings

>> are not as dramatic as on arm64)

>

> This will conflict with:

>

>   https://lkml.kernel.org/r/20180622172212.199633-10-namit@vmware.com


Thanks for the head's up. Fortunately, it does not conflict
fundamentally, so it should be a straight-forward rebase after that
code is merged.

Do you think this is likely to get merged for v4.19?
Peter Zijlstra June 28, 2018, 9:28 a.m. UTC | #3
On Thu, Jun 28, 2018 at 10:34:54AM +0200, Ard Biesheuvel wrote:
> On 28 June 2018 at 10:31, Peter Zijlstra <peterz@infradead.org> wrote:

> > On Wed, Jun 27, 2018 at 06:06:04PM +0200, Ard Biesheuvel wrote:

> >> Similar to the arm64 case, 64-bit x86 can benefit from using 32-bit

> >> relative references rather than 64-bit absolute ones when emitting

> >> struct jump_entry instances. Not only does this reduce the memory

> >> footprint of the entries themselves by 50%, it also removes the need

> >> for carrying relocation metadata on relocatable builds (i.e., for KASLR)

> >> which saves a fair chunk of .init space as well (although the savings

> >> are not as dramatic as on arm64)

> >

> > This will conflict with:

> >

> >   https://lkml.kernel.org/r/20180622172212.199633-10-namit@vmware.com

> 

> Thanks for the head's up. Fortunately, it does not conflict

> fundamentally, so it should be a straight-forward rebase after that

> code is merged.


Yeah, shouldn't be hard to cure.

There's another patch set that might have a little conflict, but that's
not near ready I think, so that'll have to just cope with things
shifting underneath (and there too, the fixup shouldn't be hard).

> Do you think this is likely to get merged for v4.19?


I'm thinking it is near ready so it might, but I'm not in charge of
those bits :-)
diff mbox series

Patch

diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index e10a3542db7e..dd71258ec1cc 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -118,6 +118,7 @@  config X86
 	select HAVE_ARCH_AUDITSYSCALL
 	select HAVE_ARCH_HUGE_VMAP		if X86_64 || X86_PAE
 	select HAVE_ARCH_JUMP_LABEL
+	select HAVE_ARCH_JUMP_LABEL_RELATIVE
 	select HAVE_ARCH_KASAN			if X86_64
 	select HAVE_ARCH_KGDB
 	select HAVE_ARCH_MMAP_RND_BITS		if MMU
diff --git a/arch/x86/include/asm/jump_label.h b/arch/x86/include/asm/jump_label.h
index 8c0de4282659..d0f1f25b41d5 100644
--- a/arch/x86/include/asm/jump_label.h
+++ b/arch/x86/include/asm/jump_label.h
@@ -36,8 +36,8 @@  static __always_inline bool arch_static_branch(struct static_key *key, bool bran
 	asm_volatile_goto("1:"
 		".byte " __stringify(STATIC_KEY_INIT_NOP) "\n\t"
 		".pushsection __jump_table,  \"aw\" \n\t"
-		_ASM_ALIGN "\n\t"
-		_ASM_PTR "1b, %l[l_yes], %c0 + %c1 \n\t"
+		".balign 4\n\t"
+		".long 1b - ., %l[l_yes] - ., %c0 + %c1 - .\n\t"
 		".popsection \n\t"
 		: :  "i" (key), "i" (branch) : : l_yes);
 
@@ -52,8 +52,8 @@  static __always_inline bool arch_static_branch_jump(struct static_key *key, bool
 		".byte 0xe9\n\t .long %l[l_yes] - 2f\n\t"
 		"2:\n\t"
 		".pushsection __jump_table,  \"aw\" \n\t"
-		_ASM_ALIGN "\n\t"
-		_ASM_PTR "1b, %l[l_yes], %c0 + %c1 \n\t"
+		".balign 4\n\t"
+		".long 1b - ., %l[l_yes] - ., %c0 + %c1 - .\n\t"
 		".popsection \n\t"
 		: :  "i" (key), "i" (branch) : : l_yes);
 
@@ -62,18 +62,6 @@  static __always_inline bool arch_static_branch_jump(struct static_key *key, bool
 	return true;
 }
 
-#ifdef CONFIG_X86_64
-typedef u64 jump_label_t;
-#else
-typedef u32 jump_label_t;
-#endif
-
-struct jump_entry {
-	jump_label_t code;
-	jump_label_t target;
-	jump_label_t key;
-};
-
 #else	/* __ASSEMBLY__ */
 
 .macro STATIC_JUMP_IF_TRUE target, key, def
@@ -87,8 +75,8 @@  struct jump_entry {
 	.byte		STATIC_KEY_INIT_NOP
 	.endif
 	.pushsection __jump_table, "aw"
-	_ASM_ALIGN
-	_ASM_PTR	.Lstatic_jump_\@, \target, \key
+	.balign		4
+	.long		.Lstatic_jump_\@ - ., \target - ., \key - .
 	.popsection
 .endm
 
@@ -103,8 +91,8 @@  struct jump_entry {
 .Lstatic_jump_after_\@:
 	.endif
 	.pushsection __jump_table, "aw"
-	_ASM_ALIGN
-	_ASM_PTR	.Lstatic_jump_\@, \target, \key + 1
+	.balign		4
+	.long		.Lstatic_jump_\@ - ., \target - ., \key + 1 - .
 	.popsection
 .endm
 
diff --git a/tools/objtool/special.c b/tools/objtool/special.c
index 84f001d52322..98ae55b39037 100644
--- a/tools/objtool/special.c
+++ b/tools/objtool/special.c
@@ -30,9 +30,9 @@ 
 #define EX_ORIG_OFFSET		0
 #define EX_NEW_OFFSET		4
 
-#define JUMP_ENTRY_SIZE		24
+#define JUMP_ENTRY_SIZE		12
 #define JUMP_ORIG_OFFSET	0
-#define JUMP_NEW_OFFSET		8
+#define JUMP_NEW_OFFSET		4
 
 #define ALT_ENTRY_SIZE		13
 #define ALT_ORIG_OFFSET		0