@@ -1178,6 +1178,19 @@ static void read_neon_element32(TCGv_i32 dest, int reg, int ele, MemOp memop)
}
}
+static void read_neon_element64(TCGv_i64 dest, int reg, int ele, MemOp memop)
+{
+ long off = neon_element_offset(reg, ele, memop);
+
+ switch (memop) {
+ case MO_Q:
+ tcg_gen_ld_i64(dest, cpu_env, off);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
static void write_neon_element32(TCGv_i32 src, int reg, int ele, MemOp memop)
{
long off = neon_element_offset(reg, ele, memop);
@@ -1197,6 +1210,19 @@ static void write_neon_element32(TCGv_i32 src, int reg, int ele, MemOp memop)
}
}
+static void write_neon_element64(TCGv_i64 src, int reg, int ele, MemOp memop)
+{
+ long off = neon_element_offset(reg, ele, memop);
+
+ switch (memop) {
+ case MO_64:
+ tcg_gen_st_i64(src, cpu_env, off);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
static TCGv_ptr vfp_reg_ptr(bool dp, int reg)
{
TCGv_ptr ret = tcg_temp_new_ptr();
@@ -1265,9 +1265,9 @@ static bool do_2shift_env_64(DisasContext *s, arg_2reg_shift *a,
for (pass = 0; pass < a->q + 1; pass++) {
TCGv_i64 tmp = tcg_temp_new_i64();
- neon_load_reg64(tmp, a->vm + pass);
+ read_neon_element64(tmp, a->vm, pass, MO_64);
fn(tmp, cpu_env, tmp, constimm);
- neon_store_reg64(tmp, a->vd + pass);
+ write_neon_element64(tmp, a->vd, pass, MO_64);
tcg_temp_free_i64(tmp);
}
tcg_temp_free_i64(constimm);
@@ -1375,8 +1375,8 @@ static bool do_2shift_narrow_64(DisasContext *s, arg_2reg_shift *a,
rd = tcg_temp_new_i32();
/* Load both inputs first to avoid potential overwrite if rm == rd */
- neon_load_reg64(rm1, a->vm);
- neon_load_reg64(rm2, a->vm + 1);
+ read_neon_element64(rm1, a->vm, 0, MO_64);
+ read_neon_element64(rm2, a->vm, 1, MO_64);
shiftfn(rm1, rm1, constimm);
narrowfn(rd, cpu_env, rm1);
@@ -1579,7 +1579,7 @@ static bool do_vshll_2sh(DisasContext *s, arg_2reg_shift *a,
tcg_gen_shli_i64(tmp, tmp, a->shift);
tcg_gen_andi_i64(tmp, tmp, ~widen_mask);
}
- neon_store_reg64(tmp, a->vd);
+ write_neon_element64(tmp, a->vd, 0, MO_64);
widenfn(tmp, rm1);
tcg_temp_free_i32(rm1);
@@ -1587,7 +1587,7 @@ static bool do_vshll_2sh(DisasContext *s, arg_2reg_shift *a,
tcg_gen_shli_i64(tmp, tmp, a->shift);
tcg_gen_andi_i64(tmp, tmp, ~widen_mask);
}
- neon_store_reg64(tmp, a->vd + 1);
+ write_neon_element64(tmp, a->vd, 1, MO_64);
tcg_temp_free_i64(tmp);
return true;
}
@@ -1822,7 +1822,7 @@ static bool do_prewiden_3d(DisasContext *s, arg_3diff *a,
rm_64 = tcg_temp_new_i64();
if (src1_wide) {
- neon_load_reg64(rn0_64, a->vn);
+ read_neon_element64(rn0_64, a->vn, 0, MO_64);
} else {
TCGv_i32 tmp = tcg_temp_new_i32();
read_neon_element32(tmp, a->vn, 0, MO_32);
@@ -1841,7 +1841,7 @@ static bool do_prewiden_3d(DisasContext *s, arg_3diff *a,
* avoid incorrect results if a narrow input overlaps with the result.
*/
if (src1_wide) {
- neon_load_reg64(rn1_64, a->vn + 1);
+ read_neon_element64(rn1_64, a->vn, 1, MO_64);
} else {
TCGv_i32 tmp = tcg_temp_new_i32();
read_neon_element32(tmp, a->vn, 1, MO_32);
@@ -1851,12 +1851,12 @@ static bool do_prewiden_3d(DisasContext *s, arg_3diff *a,
rm = tcg_temp_new_i32();
read_neon_element32(rm, a->vm, 1, MO_32);
- neon_store_reg64(rn0_64, a->vd);
+ write_neon_element64(rn0_64, a->vd, 0, MO_64);
widenfn(rm_64, rm);
tcg_temp_free_i32(rm);
opfn(rn1_64, rn1_64, rm_64);
- neon_store_reg64(rn1_64, a->vd + 1);
+ write_neon_element64(rn1_64, a->vd, 1, MO_64);
tcg_temp_free_i64(rn0_64);
tcg_temp_free_i64(rn1_64);
@@ -1928,15 +1928,15 @@ static bool do_narrow_3d(DisasContext *s, arg_3diff *a,
rd0 = tcg_temp_new_i32();
rd1 = tcg_temp_new_i32();
- neon_load_reg64(rn_64, a->vn);
- neon_load_reg64(rm_64, a->vm);
+ read_neon_element64(rn_64, a->vn, 0, MO_64);
+ read_neon_element64(rm_64, a->vm, 0, MO_64);
opfn(rn_64, rn_64, rm_64);
narrowfn(rd0, rn_64);
- neon_load_reg64(rn_64, a->vn + 1);
- neon_load_reg64(rm_64, a->vm + 1);
+ read_neon_element64(rn_64, a->vn, 1, MO_64);
+ read_neon_element64(rm_64, a->vm, 1, MO_64);
opfn(rn_64, rn_64, rm_64);
@@ -2036,16 +2036,16 @@ static bool do_long_3d(DisasContext *s, arg_3diff *a,
/* Don't store results until after all loads: they might overlap */
if (accfn) {
tmp = tcg_temp_new_i64();
- neon_load_reg64(tmp, a->vd);
+ read_neon_element64(tmp, a->vd, 0, MO_64);
accfn(tmp, tmp, rd0);
- neon_store_reg64(tmp, a->vd);
- neon_load_reg64(tmp, a->vd + 1);
+ write_neon_element64(tmp, a->vd, 0, MO_64);
+ read_neon_element64(tmp, a->vd, 1, MO_64);
accfn(tmp, tmp, rd1);
- neon_store_reg64(tmp, a->vd + 1);
+ write_neon_element64(tmp, a->vd, 1, MO_64);
tcg_temp_free_i64(tmp);
} else {
- neon_store_reg64(rd0, a->vd);
- neon_store_reg64(rd1, a->vd + 1);
+ write_neon_element64(rd0, a->vd, 0, MO_64);
+ write_neon_element64(rd1, a->vd, 1, MO_64);
}
tcg_temp_free_i64(rd0);
@@ -2669,16 +2669,16 @@ static bool do_2scalar_long(DisasContext *s, arg_2scalar *a,
if (accfn) {
TCGv_i64 t64 = tcg_temp_new_i64();
- neon_load_reg64(t64, a->vd);
+ read_neon_element64(t64, a->vd, 0, MO_64);
accfn(t64, t64, rn0_64);
- neon_store_reg64(t64, a->vd);
- neon_load_reg64(t64, a->vd + 1);
+ write_neon_element64(t64, a->vd, 0, MO_64);
+ read_neon_element64(t64, a->vd, 1, MO_64);
accfn(t64, t64, rn1_64);
- neon_store_reg64(t64, a->vd + 1);
+ write_neon_element64(t64, a->vd, 1, MO_64);
tcg_temp_free_i64(t64);
} else {
- neon_store_reg64(rn0_64, a->vd);
- neon_store_reg64(rn1_64, a->vd + 1);
+ write_neon_element64(rn0_64, a->vd, 0, MO_64);
+ write_neon_element64(rn1_64, a->vd, 1, MO_64);
}
tcg_temp_free_i64(rn0_64);
tcg_temp_free_i64(rn1_64);
@@ -2812,10 +2812,10 @@ static bool trans_VEXT(DisasContext *s, arg_VEXT *a)
right = tcg_temp_new_i64();
dest = tcg_temp_new_i64();
- neon_load_reg64(right, a->vn);
- neon_load_reg64(left, a->vm);
+ read_neon_element64(right, a->vn, 0, MO_64);
+ read_neon_element64(left, a->vm, 0, MO_64);
tcg_gen_extract2_i64(dest, right, left, a->imm * 8);
- neon_store_reg64(dest, a->vd);
+ write_neon_element64(dest, a->vd, 0, MO_64);
tcg_temp_free_i64(left);
tcg_temp_free_i64(right);
@@ -2831,21 +2831,21 @@ static bool trans_VEXT(DisasContext *s, arg_VEXT *a)
destright = tcg_temp_new_i64();
if (a->imm < 8) {
- neon_load_reg64(right, a->vn);
- neon_load_reg64(middle, a->vn + 1);
+ read_neon_element64(right, a->vn, 0, MO_64);
+ read_neon_element64(middle, a->vn, 1, MO_64);
tcg_gen_extract2_i64(destright, right, middle, a->imm * 8);
- neon_load_reg64(left, a->vm);
+ read_neon_element64(left, a->vm, 0, MO_64);
tcg_gen_extract2_i64(destleft, middle, left, a->imm * 8);
} else {
- neon_load_reg64(right, a->vn + 1);
- neon_load_reg64(middle, a->vm);
+ read_neon_element64(right, a->vn, 1, MO_64);
+ read_neon_element64(middle, a->vm, 0, MO_64);
tcg_gen_extract2_i64(destright, right, middle, (a->imm - 8) * 8);
- neon_load_reg64(left, a->vm + 1);
+ read_neon_element64(left, a->vm, 1, MO_64);
tcg_gen_extract2_i64(destleft, middle, left, (a->imm - 8) * 8);
}
- neon_store_reg64(destright, a->vd);
- neon_store_reg64(destleft, a->vd + 1);
+ write_neon_element64(destright, a->vd, 0, MO_64);
+ write_neon_element64(destleft, a->vd, 1, MO_64);
tcg_temp_free_i64(destright);
tcg_temp_free_i64(destleft);
@@ -3052,11 +3052,11 @@ static bool do_2misc_pairwise(DisasContext *s, arg_2misc *a,
if (accfn) {
TCGv_i64 tmp64 = tcg_temp_new_i64();
- neon_load_reg64(tmp64, a->vd + pass);
+ read_neon_element64(tmp64, a->vd, pass, MO_64);
accfn(rd_64, tmp64, rd_64);
tcg_temp_free_i64(tmp64);
}
- neon_store_reg64(rd_64, a->vd + pass);
+ write_neon_element64(rd_64, a->vd, pass, MO_64);
tcg_temp_free_i64(rd_64);
}
return true;
@@ -3254,9 +3254,9 @@ static bool do_vmovn(DisasContext *s, arg_2misc *a,
rd0 = tcg_temp_new_i32();
rd1 = tcg_temp_new_i32();
- neon_load_reg64(rm, a->vm);
+ read_neon_element64(rm, a->vm, 0, MO_64);
narrowfn(rd0, cpu_env, rm);
- neon_load_reg64(rm, a->vm + 1);
+ read_neon_element64(rm, a->vm, 1, MO_64);
narrowfn(rd1, cpu_env, rm);
write_neon_element32(rd0, a->vd, 0, MO_32);
write_neon_element32(rd1, a->vd, 1, MO_32);
@@ -3326,10 +3326,10 @@ static bool trans_VSHLL(DisasContext *s, arg_2misc *a)
widenfn(rd, rm0);
tcg_gen_shli_i64(rd, rd, 8 << a->size);
- neon_store_reg64(rd, a->vd);
+ write_neon_element64(rd, a->vd, 0, MO_64);
widenfn(rd, rm1);
tcg_gen_shli_i64(rd, rd, 8 << a->size);
- neon_store_reg64(rd, a->vd + 1);
+ write_neon_element64(rd, a->vd, 1, MO_64);
tcg_temp_free_i64(rd);
tcg_temp_free_i32(rm0);
@@ -3847,10 +3847,10 @@ static bool trans_VSWP(DisasContext *s, arg_2misc *a)
rm = tcg_temp_new_i64();
rd = tcg_temp_new_i64();
for (pass = 0; pass < (a->q ? 2 : 1); pass++) {
- neon_load_reg64(rm, a->vm + pass);
- neon_load_reg64(rd, a->vd + pass);
- neon_store_reg64(rm, a->vd + pass);
- neon_store_reg64(rd, a->vm + pass);
+ read_neon_element64(rm, a->vm, pass, MO_64);
+ read_neon_element64(rd, a->vd, pass, MO_64);
+ write_neon_element64(rm, a->vd, pass, MO_64);
+ write_neon_element64(rd, a->vm, pass, MO_64);
}
tcg_temp_free_i64(rm);
tcg_temp_free_i64(rd);