@@ -30,8 +30,8 @@ static __always_inline bool arch_static_branch(struct static_key *key, bool bran
{
asm goto("1: nop\n\t"
".pushsection __jump_table, \"aw\"\n\t"
- ".align 3\n\t"
- ".quad 1b, %l[l_yes], %c0\n\t"
+ ".align 2\n\t"
+ ".long 1b - ., %l[l_yes] - ., %c0 - .\n\t"
".popsection\n\t"
: : "i"(&((char *)key)[branch]) : : l_yes);
@@ -44,8 +44,8 @@ static __always_inline bool arch_static_branch_jump(struct static_key *key, bool
{
asm goto("1: b %l[l_yes]\n\t"
".pushsection __jump_table, \"aw\"\n\t"
- ".align 3\n\t"
- ".quad 1b, %l[l_yes], %c0\n\t"
+ ".align 2\n\t"
+ ".long 1b - ., %l[l_yes] - ., %c0 - .\n\t"
".popsection\n\t"
: : "i"(&((char *)key)[branch]) : : l_yes);
@@ -57,19 +57,26 @@ static __always_inline bool arch_static_branch_jump(struct static_key *key, bool
typedef u64 jump_label_t;
struct jump_entry {
- jump_label_t code;
- jump_label_t target;
- jump_label_t key;
+ s32 code;
+ s32 target;
+ s32 key;
};
static inline jump_label_t jump_entry_code(const struct jump_entry *entry)
{
- return entry->code;
+ return (jump_label_t)&entry->code + entry->code;
+}
+
+static inline jump_label_t jump_entry_target(const struct jump_entry *entry)
+{
+ return (jump_label_t)&entry->target + entry->target;
}
static inline struct static_key *jump_entry_key(const struct jump_entry *entry)
{
- return (struct static_key *)((unsigned long)entry->key & ~1UL);
+ unsigned long key = (unsigned long)&entry->key + entry->key;
+
+ return (struct static_key *)(key & ~1UL);
}
static inline bool jump_entry_is_branch(const struct jump_entry *entry)
@@ -87,7 +94,7 @@ static inline void jump_entry_set_module_init(struct jump_entry *entry)
entry->code = 0;
}
-#define jump_label_swap NULL
+void jump_label_swap(void *a, void *b, int size);
#endif /* __ASSEMBLY__ */
#endif /* __ASM_JUMP_LABEL_H */
@@ -25,12 +25,12 @@
void arch_jump_label_transform(struct jump_entry *entry,
enum jump_label_type type)
{
- void *addr = (void *)entry->code;
+ void *addr = (void *)jump_entry_code(entry);
u32 insn;
if (type == JUMP_LABEL_JMP) {
- insn = aarch64_insn_gen_branch_imm(entry->code,
- entry->target,
+ insn = aarch64_insn_gen_branch_imm(jump_entry_code(entry),
+ jump_entry_target(entry),
AARCH64_INSN_BRANCH_NOLINK);
} else {
insn = aarch64_insn_gen_nop();
@@ -50,4 +50,20 @@ void arch_jump_label_transform_static(struct jump_entry *entry,
*/
}
+void jump_label_swap(void *a, void *b, int size)
+{
+ long delta = (unsigned long)a - (unsigned long)b;
+ struct jump_entry *jea = a;
+ struct jump_entry *jeb = b;
+ struct jump_entry tmp = *jea;
+
+ jea->code = jeb->code - delta;
+ jea->target = jeb->target - delta;
+ jea->key = jeb->key - delta;
+
+ jeb->code = tmp.code + delta;
+ jeb->target = tmp.target + delta;
+ jeb->key = tmp.key + delta;
+}
+
#endif /* HAVE_JUMP_LABEL */
On a randomly chosen distro kernel build for arm64, vmlinux.o shows the following sections, containing jump label entries, and the associated RELA relocation records, respectively: ... [38088] __jump_table PROGBITS 0000000000000000 00e19f30 000000000002ea10 0000000000000000 WA 0 0 8 [38089] .rela__jump_table RELA 0000000000000000 01fd8bb0 000000000008be30 0000000000000018 I 38178 38088 8 ... In other words, we have 190 KB worth of 'struct jump_entry' instances, and 573 KB worth of RELA entries to relocate each entry's code, target and key members. This means the RELA section occupies 10% of the .init segment, and the two sections combined represent 5% of vmlinux's entire memory footprint. So let's switch from 64-bit absolute references to 32-bit relative references: this reduces the size of the __jump_table by 50%, and gets rid of the RELA section entirely. Note that this requires some extra care in the sorting routine, given that the offsets change when entries are moved around in the jump_entry table. Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> --- arch/arm64/include/asm/jump_label.h | 27 ++++++++++++-------- arch/arm64/kernel/jump_label.c | 22 +++++++++++++--- 2 files changed, 36 insertions(+), 13 deletions(-) -- 2.11.0