@@ -174,6 +174,8 @@ infrastructure:
+------------------------------+---------+---------+
| SSBS | [7-4] | y |
+------------------------------+---------+---------+
+ | BT | [3-0] | y |
+ +------------------------------+---------+---------+
4) MIDR_EL1 - Main ID Register
@@ -204,6 +204,10 @@ HWCAP2_FRINT
Functionality implied by ID_AA64ISAR1_EL1.FRINTTS == 0b0001.
+HWCAP2_BTI
+
+ Functionality implied by ID_AA64PFR0_EL1.BT == 0b0001.
+
4. Unused AT_HWCAP bits
-----------------------
@@ -1484,6 +1484,32 @@ config ARM64_PTR_AUTH
endmenu
+menu "ARMv8.5 architectural features"
+
+config ARM64_BTI
+ bool "Branch Target Identification support"
+ default y
+ help
+ Branch Target Identification (part of the ARMv8.5 Extensions)
+ provides a mechanism to limit the set of locations to which computed
+ branch instructions such as BR or BLR can jump.
+
+ To make use of BTI on CPUs that support it, say Y.
+
+ BTI is intended to provide complementary protection to other control
+ flow integrity protection mechanisms, such as the Pointer
+ authentication mechanism provided as part of the ARMv8.3 Extensions.
+ For this reason, it does not make sense to enable this option without
+ also enabling support for pointer authentication. Thus, when
+ enabling this option you should also select ARM64_PTR_AUTH=y.
+
+ Userspace binaries must also be specifically compiled to make use of
+ this mechanism. If you say N here or the hardware does not support
+ BTI, such binaries can still run, but you get no additional
+ enforcement of branch destinations.
+
+endmenu
+
config ARM64_SVE
bool "ARM Scalable Vector Extension support"
default y
@@ -56,7 +56,8 @@
#define ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM 46
#define ARM64_WORKAROUND_1542419 47
#define ARM64_WORKAROUND_1319367 48
+#define ARM64_BTI 49
-#define ARM64_NCAPS 49
+#define ARM64_NCAPS 50
#endif /* __ASM_CPUCAPS_H */
@@ -613,6 +613,12 @@ static inline bool system_has_prio_mask_debugging(void)
system_uses_irq_prio_masking();
}
+static inline bool system_supports_bti(void)
+{
+ return IS_ENABLED(CONFIG_ARM64_BTI) &&
+ cpus_have_const_cap(ARM64_BTI);
+}
+
#define ARM64_BP_HARDEN_UNKNOWN -1
#define ARM64_BP_HARDEN_WA_NEEDED 0
#define ARM64_BP_HARDEN_NOT_REQUIRED 1
@@ -22,7 +22,7 @@
#define ESR_ELx_EC_PAC (0x09) /* EL2 and above */
/* Unallocated EC: 0x0A - 0x0B */
#define ESR_ELx_EC_CP14_64 (0x0C)
-/* Unallocated EC: 0x0d */
+#define ESR_ELx_EC_BTI (0x0D)
#define ESR_ELx_EC_ILL (0x0E)
/* Unallocated EC: 0x0F - 0x10 */
#define ESR_ELx_EC_SVC32 (0x11)
@@ -35,6 +35,7 @@ asmlinkage void enter_from_user_mode(void);
void do_mem_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs);
void do_sp_pc_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs);
void do_undefinstr(struct pt_regs *regs);
+void do_bti(struct pt_regs *regs);
asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr);
void do_debug_exception(unsigned long addr_if_watchpoint, unsigned int esr,
struct pt_regs *regs);
@@ -86,6 +86,7 @@
#define KERNEL_HWCAP_SVESM4 __khwcap2_feature(SVESM4)
#define KERNEL_HWCAP_FLAGM2 __khwcap2_feature(FLAGM2)
#define KERNEL_HWCAP_FRINT __khwcap2_feature(FRINT)
+#define KERNEL_HWCAP_BTI __khwcap2_feature(BTI)
/*
* This yields a mask that user programs can use to figure out what
new file mode 100644
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_MMAN_H__
+#define __ASM_MMAN_H__
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <uapi/asm/mman.h>
+
+static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot,
+ unsigned long pkey __always_unused)
+{
+ if (system_supports_bti() && (prot & PROT_BTI))
+ return VM_ARM64_BTI;
+
+ return 0;
+}
+#define arch_calc_vm_prot_bits(prot, pkey) arch_calc_vm_prot_bits(prot, pkey)
+
+static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
+{
+ return (vm_flags & VM_ARM64_BTI) ? __pgprot(PTE_GP) : __pgprot(0);
+}
+#define arch_vm_get_page_prot(vm_flags) arch_vm_get_page_prot(vm_flags)
+
+static inline bool arch_validate_prot(unsigned long prot,
+ unsigned long addr __always_unused)
+{
+ unsigned long supported = PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM;
+
+ if (system_supports_bti())
+ supported |= PROT_BTI;
+
+ return (prot & ~supported) == 0;
+}
+#define arch_validate_prot(prot, addr) arch_validate_prot(prot, addr)
+
+#endif /* ! __ASM_MMAN_H__ */
@@ -150,6 +150,7 @@
#define PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
#define PTE_AF (_AT(pteval_t, 1) << 10) /* Access Flag */
#define PTE_NG (_AT(pteval_t, 1) << 11) /* nG */
+#define PTE_GP (_AT(pteval_t, 1) << 50) /* BTI guarded */
#define PTE_DBM (_AT(pteval_t, 1) << 51) /* Dirty Bit Management */
#define PTE_CONT (_AT(pteval_t, 1) << 52) /* Contiguous range */
#define PTE_PXN (_AT(pteval_t, 1) << 53) /* Privileged XN */
@@ -662,7 +662,7 @@ static inline phys_addr_t pgd_page_paddr(pgd_t pgd)
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
- PTE_PROT_NONE | PTE_VALID | PTE_WRITE;
+ PTE_PROT_NONE | PTE_VALID | PTE_WRITE | PTE_GP;
/* preserve the hardware dirty information */
if (pte_hw_dirty(pte))
pte = pte_mkdirty(pte);
@@ -35,8 +35,16 @@
#define GIC_PRIO_PSR_I_SET (1 << 4)
/* Additional SPSR bits not exposed in the UABI */
+#define PSR_BTYPE_SHIFT 10
+
#define PSR_IL_BIT (1 << 20)
+/* Convenience names for the values of PSTATE.BTYPE */
+#define PSR_BTYPE_NONE (0b00 << PSR_BTYPE_SHIFT)
+#define PSR_BTYPE_JC (0b01 << PSR_BTYPE_SHIFT)
+#define PSR_BTYPE_C (0b10 << PSR_BTYPE_SHIFT)
+#define PSR_BTYPE_J (0b11 << PSR_BTYPE_SHIFT)
+
/* AArch32-specific ptrace requests */
#define COMPAT_PTRACE_GETREGS 12
#define COMPAT_PTRACE_SETREGS 13
@@ -510,6 +510,8 @@
#endif
/* SCTLR_EL1 specific flags. */
+#define SCTLR_EL1_BT1 (BIT(36))
+#define SCTLR_EL1_BT0 (BIT(35))
#define SCTLR_EL1_UCI (BIT(26))
#define SCTLR_EL1_E0E (BIT(24))
#define SCTLR_EL1_SPAN (BIT(23))
@@ -599,10 +601,12 @@
/* id_aa64pfr1 */
#define ID_AA64PFR1_SSBS_SHIFT 4
+#define ID_AA64PFR1_BT_SHIFT 0
#define ID_AA64PFR1_SSBS_PSTATE_NI 0
#define ID_AA64PFR1_SSBS_PSTATE_ONLY 1
#define ID_AA64PFR1_SSBS_PSTATE_INSNS 2
+#define ID_AA64PFR1_BT_BTI 0x1
/* id_aa64zfr0 */
#define ID_AA64ZFR0_SM4_SHIFT 40
@@ -65,5 +65,6 @@
#define HWCAP2_SVESM4 (1 << 6)
#define HWCAP2_FLAGM2 (1 << 7)
#define HWCAP2_FRINT (1 << 8)
+#define HWCAP2_BTI (1 << 9)
#endif /* _UAPI__ASM_HWCAP_H */
new file mode 100644
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _UAPI__ASM_MMAN_H
+#define _UAPI__ASM_MMAN_H
+
+#include <asm-generic/mman.h>
+
+#define PROT_BTI 0x10 /* BTI guarded page */
+
+#endif /* ! _UAPI__ASM_MMAN_H */
@@ -46,6 +46,7 @@
#define PSR_I_BIT 0x00000080
#define PSR_A_BIT 0x00000100
#define PSR_D_BIT 0x00000200
+#define PSR_BTYPE_MASK 0x00000c00
#define PSR_SSBS_BIT 0x00001000
#define PSR_PAN_BIT 0x00400000
#define PSR_UAO_BIT 0x00800000
@@ -172,6 +172,8 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = {
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_SSBS_SHIFT, 4, ID_AA64PFR1_SSBS_PSTATE_NI),
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_BTI),
+ FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_BT_SHIFT, 4, 0),
ARM64_FTR_END,
};
@@ -1267,6 +1269,21 @@ static bool can_use_gic_priorities(const struct arm64_cpu_capabilities *entry,
}
#endif
+#ifdef CONFIG_ARM64_BTI
+static void bti_enable(const struct arm64_cpu_capabilities *__unused)
+{
+ /*
+ * Use of X16/X17 for tail-calls and trampolines that jump to
+ * function entry points using BR is a requirement for
+ * marking binaries with GNU_PROPERTY_AARCH64_FEATURE_1_BTI.
+ * So, be strict and forbid other BRs using other registers to
+ * jump onto a PACIxSP instruction:
+ */
+ sysreg_clear_set(sctlr_el1, 0, SCTLR_EL1_BT0 | SCTLR_EL1_BT1);
+ isb();
+}
+#endif /* CONFIG_ARM64_BTI */
+
static const struct arm64_cpu_capabilities arm64_features[] = {
{
.desc = "GIC system register CPU interface",
@@ -1566,6 +1583,19 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.sign = FTR_UNSIGNED,
.min_field_value = 1,
},
+#endif
+#ifdef CONFIG_ARM64_BTI
+ {
+ .desc = "Branch Target Identification",
+ .capability = ARM64_BTI,
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .matches = has_cpuid_feature,
+ .cpu_enable = bti_enable,
+ .sys_reg = SYS_ID_AA64PFR1_EL1,
+ .field_pos = ID_AA64PFR1_BT_SHIFT,
+ .min_field_value = ID_AA64PFR1_BT_BTI,
+ .sign = FTR_UNSIGNED,
+ },
#endif
{},
};
@@ -1662,6 +1692,9 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_SM4_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_SM4, CAP_HWCAP, KERNEL_HWCAP_SVESM4),
#endif
HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_SSBS_SHIFT, FTR_UNSIGNED, ID_AA64PFR1_SSBS_PSTATE_INSNS, CAP_HWCAP, KERNEL_HWCAP_SSBS),
+#ifdef CONFIG_ARM64_BTI
+ HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_BT_SHIFT, FTR_UNSIGNED, ID_AA64PFR1_BT_BTI, CAP_HWCAP, KERNEL_HWCAP_BTI),
+#endif
#ifdef CONFIG_ARM64_PTR_AUTH
HWCAP_MULTI_CAP(ptr_auth_hwcap_addr_matches, CAP_HWCAP, KERNEL_HWCAP_PACA),
HWCAP_MULTI_CAP(ptr_auth_hwcap_gen_matches, CAP_HWCAP, KERNEL_HWCAP_PACG),
@@ -84,6 +84,7 @@ static const char *const hwcap_str[] = {
"svesm4",
"flagm2",
"frint",
+ "bti",
NULL
};
@@ -188,6 +188,14 @@ static void notrace el0_undef(struct pt_regs *regs)
}
NOKPROBE_SYMBOL(el0_undef);
+static void notrace el0_bti(struct pt_regs *regs)
+{
+ user_exit_irqoff();
+ local_daif_restore(DAIF_PROCCTX);
+ do_bti(regs);
+}
+NOKPROBE_SYMBOL(el0_bti);
+
static void notrace el0_inv(struct pt_regs *regs, unsigned long esr)
{
user_exit_irqoff();
@@ -255,6 +263,9 @@ asmlinkage void notrace el0_sync_handler(struct pt_regs *regs)
case ESR_ELx_EC_UNKNOWN:
el0_undef(regs);
break;
+ case ESR_ELx_EC_BTI:
+ el0_bti(regs);
+ break;
case ESR_ELx_EC_BREAKPT_LOW:
case ESR_ELx_EC_SOFTSTP_LOW:
case ESR_ELx_EC_WATCHPT_LOW:
@@ -1853,7 +1853,7 @@ void syscall_trace_exit(struct pt_regs *regs)
*/
#define SPSR_EL1_AARCH64_RES0_BITS \
(GENMASK_ULL(63, 32) | GENMASK_ULL(27, 25) | GENMASK_ULL(23, 22) | \
- GENMASK_ULL(20, 13) | GENMASK_ULL(11, 10) | GENMASK_ULL(5, 5))
+ GENMASK_ULL(20, 13) | GENMASK_ULL(5, 5))
#define SPSR_EL1_AARCH32_RES0_BITS \
(GENMASK_ULL(63, 32) | GENMASK_ULL(22, 22) | GENMASK_ULL(20, 20))
@@ -730,6 +730,22 @@ static void setup_return(struct pt_regs *regs, struct k_sigaction *ka,
regs->regs[29] = (unsigned long)&user->next_frame->fp;
regs->pc = (unsigned long)ka->sa.sa_handler;
+ /*
+ * Signal delivery is a (wacky) indirect function call in
+ * userspace, so simulate the same setting of BTYPE as a BLR
+ * <register containing the signal handler entry point>.
+ * Signal delivery to a location in a PROT_BTI guarded page
+ * that is not a function entry point will now trigger a
+ * SIGILL in userspace.
+ *
+ * If the signal handler entry point is not in a PROT_BTI
+ * guarded page, this is harmless.
+ */
+ if (system_supports_bti()) {
+ regs->pstate &= ~PSR_BTYPE_MASK;
+ regs->pstate |= PSR_BTYPE_C;
+ }
+
if (ka->sa.sa_flags & SA_RESTORER)
sigtramp = ka->sa.sa_restorer;
else
@@ -98,6 +98,24 @@ static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
regs->orig_x0 = regs->regs[0];
regs->syscallno = scno;
+ /*
+ * BTI note:
+ * The architecture does not guarantee that SPSR.BTYPE is zero
+ * on taking an SVC, so we could return to userspace with a
+ * non-zero BTYPE after the syscall.
+ *
+ * This shouldn't matter except when userspace is explicitly
+ * doing something stupid, such as setting PROT_BTI on a page
+ * that lacks conforming BTI/PACIxSP instructions, falling
+ * through from one executable page to another with differing
+ * PROT_BTI, or messing with BYTPE via ptrace: in such cases,
+ * userspace should not be surprised if a SIGILL occurs on
+ * syscall return.
+ *
+ * So, don't touch regs->pstate & PSR_BTYPE_MASK here.
+ * (Similarly for HVC and SMC elsewhere.)
+ */
+
cortex_a76_erratum_1463225_svc_handler();
local_daif_restore(DAIF_PROCCTX);
user_exit();
@@ -408,6 +408,13 @@ void do_undefinstr(struct pt_regs *regs)
}
NOKPROBE_SYMBOL(do_undefinstr);
+void do_bti(struct pt_regs *regs)
+{
+ BUG_ON(!user_mode(regs));
+ force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc);
+}
+NOKPROBE_SYMBOL(do_bti);
+
#define __user_cache_maint(insn, address, res) \
if (address >= user_addr_max()) { \
res = -EFAULT; \
@@ -750,6 +757,7 @@ static const char *esr_class_str[] = {
[ESR_ELx_EC_CP10_ID] = "CP10 MRC/VMRS",
[ESR_ELx_EC_PAC] = "PAC",
[ESR_ELx_EC_CP14_64] = "CP14 MCRR/MRRC",
+ [ESR_ELx_EC_BTI] = "BTI",
[ESR_ELx_EC_ILL] = "PSTATE.IL",
[ESR_ELx_EC_SVC32] = "SVC (AArch32)",
[ESR_ELx_EC_HVC32] = "HVC (AArch32)",
@@ -329,6 +329,9 @@ extern unsigned int kobjsize(const void *objp);
#elif defined(CONFIG_SPARC64)
# define VM_SPARC_ADI VM_ARCH_1 /* Uses ADI tag for access control */
# define VM_ARCH_CLEAR VM_SPARC_ADI
+#elif defined(CONFIG_ARM64)
+# define VM_ARM64_BTI VM_ARCH_1 /* BTI guarded page, a.k.a. GP bit */
+# define VM_ARCH_CLEAR VM_ARM64_BTI
#elif !defined(CONFIG_MMU)
# define VM_MAPPED_COPY VM_ARCH_1 /* T if mapped copy of data (nommu mmap) */
#endif