@@ -1303,7 +1303,11 @@ static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
static void tcg_out_goto_tb(TCGContext *s, int which)
{
- /* indirect jump method */
+ /* Direct branch will be patched by tb_target_set_jmp_target. */
+ set_jmp_insn_offset(s, which);
+ tcg_out32(s, OPC_NOP);
+
+ /* When branch is out of range, fall through to indirect. */
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_REG_ZERO,
get_jmp_target_addr(s, which));
tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, TCG_REG_TMP0, 0);
@@ -1313,7 +1317,18 @@ static void tcg_out_goto_tb(TCGContext *s, int which)
void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
uintptr_t jmp_rx, uintptr_t jmp_rw)
{
- /* Always indirect, nothing to do */
+ uintptr_t addr = tb->jmp_target_addr[n];
+ ptrdiff_t offset = addr - jmp_rx;
+ tcg_insn_unit insn;
+
+ /* Either directly branch, or fall through to indirect branch. */
+ if (offset == sextreg(offset, 0, 20)) {
+ insn = encode_uj(OPC_JAL, TCG_REG_ZERO, offset);
+ } else {
+ insn = OPC_NOP;
+ }
+ qatomic_set((uint32_t *)jmp_rw, insn);
+ flush_idcache_range(jmp_rx, jmp_rw, 4);
}
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
Now that tcg can handle direct and indirect goto_tb simultaneously, we can optimistically leave space for a direct branch and fall back to loading the pointer from the TB for an indirect branch. Signed-off-by: Richard Henderson <richard.henderson@linaro.org> --- tcg/riscv/tcg-target.c.inc | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-)