@@ -1013,36 +1013,41 @@ static TCGReg tcg_out_zext_addr_if_32_bit(TCGContext *s,
return addr;
}
-static void tcg_out_qemu_ld_indexed(TCGContext *s, TCGReg rd, TCGReg rj,
- TCGReg rk, MemOp opc, TCGType type)
+typedef struct {
+ TCGReg base;
+ TCGReg index;
+} HostAddress;
+
+static void tcg_out_qemu_ld_indexed(TCGContext *s, MemOp opc, TCGType type,
+ TCGReg rd, HostAddress h)
{
/* Byte swapping is left to middle-end expansion. */
tcg_debug_assert((opc & MO_BSWAP) == 0);
switch (opc & MO_SSIZE) {
case MO_UB:
- tcg_out_opc_ldx_bu(s, rd, rj, rk);
+ tcg_out_opc_ldx_bu(s, rd, h.base, h.index);
break;
case MO_SB:
- tcg_out_opc_ldx_b(s, rd, rj, rk);
+ tcg_out_opc_ldx_b(s, rd, h.base, h.index);
break;
case MO_UW:
- tcg_out_opc_ldx_hu(s, rd, rj, rk);
+ tcg_out_opc_ldx_hu(s, rd, h.base, h.index);
break;
case MO_SW:
- tcg_out_opc_ldx_h(s, rd, rj, rk);
+ tcg_out_opc_ldx_h(s, rd, h.base, h.index);
break;
case MO_UL:
if (type == TCG_TYPE_I64) {
- tcg_out_opc_ldx_wu(s, rd, rj, rk);
+ tcg_out_opc_ldx_wu(s, rd, h.base, h.index);
break;
}
/* fallthrough */
case MO_SL:
- tcg_out_opc_ldx_w(s, rd, rj, rk);
+ tcg_out_opc_ldx_w(s, rd, h.base, h.index);
break;
case MO_UQ:
- tcg_out_opc_ldx_d(s, rd, rj, rk);
+ tcg_out_opc_ldx_d(s, rd, h.base, h.index);
break;
default:
g_assert_not_reached();
@@ -1053,23 +1058,23 @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
MemOpIdx oi, TCGType data_type)
{
MemOp opc = get_memop(oi);
- TCGReg base, index;
+ HostAddress h;
#ifdef CONFIG_SOFTMMU
tcg_insn_unit *label_ptr[1];
tcg_out_tlb_load(s, addr_reg, oi, label_ptr, 1);
- index = TCG_REG_TMP2;
+ h.index = TCG_REG_TMP2;
#else
unsigned a_bits = get_alignment_bits(opc);
if (a_bits) {
tcg_out_test_alignment(s, true, addr_reg, a_bits);
}
- index = USE_GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_ZERO;
+ h.index = USE_GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_ZERO;
#endif
- base = tcg_out_zext_addr_if_32_bit(s, addr_reg, TCG_REG_TMP0);
- tcg_out_qemu_ld_indexed(s, data_reg, base, index, opc, data_type);
+ h.base = tcg_out_zext_addr_if_32_bit(s, addr_reg, TCG_REG_TMP0);
+ tcg_out_qemu_ld_indexed(s, opc, data_type, data_reg, h);
#ifdef CONFIG_SOFTMMU
add_qemu_ldst_label(s, true, oi, data_type, data_reg, addr_reg,
@@ -1077,24 +1082,24 @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
#endif
}
-static void tcg_out_qemu_st_indexed(TCGContext *s, TCGReg data,
- TCGReg rj, TCGReg rk, MemOp opc)
+static void tcg_out_qemu_st_indexed(TCGContext *s, MemOp opc,
+ TCGReg rd, HostAddress h)
{
/* Byte swapping is left to middle-end expansion. */
tcg_debug_assert((opc & MO_BSWAP) == 0);
switch (opc & MO_SIZE) {
case MO_8:
- tcg_out_opc_stx_b(s, data, rj, rk);
+ tcg_out_opc_stx_b(s, rd, h.base, h.index);
break;
case MO_16:
- tcg_out_opc_stx_h(s, data, rj, rk);
+ tcg_out_opc_stx_h(s, rd, h.base, h.index);
break;
case MO_32:
- tcg_out_opc_stx_w(s, data, rj, rk);
+ tcg_out_opc_stx_w(s, rd, h.base, h.index);
break;
case MO_64:
- tcg_out_opc_stx_d(s, data, rj, rk);
+ tcg_out_opc_stx_d(s, rd, h.base, h.index);
break;
default:
g_assert_not_reached();
@@ -1105,23 +1110,23 @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
MemOpIdx oi, TCGType data_type)
{
MemOp opc = get_memop(oi);
- TCGReg base, index;
+ HostAddress h;
#ifdef CONFIG_SOFTMMU
tcg_insn_unit *label_ptr[1];
tcg_out_tlb_load(s, addr_reg, oi, label_ptr, 0);
- index = TCG_REG_TMP2;
+ h.index = TCG_REG_TMP2;
#else
unsigned a_bits = get_alignment_bits(opc);
if (a_bits) {
tcg_out_test_alignment(s, false, addr_reg, a_bits);
}
- index = USE_GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_ZERO;
+ h.index = USE_GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_ZERO;
#endif
- base = tcg_out_zext_addr_if_32_bit(s, addr_reg, TCG_REG_TMP0);
- tcg_out_qemu_st_indexed(s, data_reg, base, index, opc);
+ h.base = tcg_out_zext_addr_if_32_bit(s, addr_reg, TCG_REG_TMP0);
+ tcg_out_qemu_st_indexed(s, opc, data_reg, h);
#ifdef CONFIG_SOFTMMU
add_qemu_ldst_label(s, false, oi, data_type, data_reg, addr_reg,