@@ -6566,9 +6566,7 @@ static void zcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
*/
new_len = sve_vqm1_for_el(env, cur_el);
if (new_len < old_len) {
-#ifdef TARGET_AARCH64
aarch64_sve_narrow_vq(env, new_len + 1);
-#endif
}
}
@@ -10645,9 +10643,7 @@ static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
* Note that new_el can never be 0. If cur_el is 0, then
* el0_a64 is is_a64(), else el0_a64 is ignored.
*/
-#ifdef TARGET_AARCH64
aarch64_sve_change_el(env, cur_el, new_el, is_a64(env));
-#endif
}
if (cur_el < new_el) {
@@ -11552,7 +11548,6 @@ void cpu_get_tb_cpu_state(CPUARMState *env, vaddr *pc,
*cs_base = flags.flags2;
}
-#ifdef TARGET_AARCH64
/*
* The manual says that when SVE is enabled and VQ is widened the
* implementation is allowed to zero the previously inaccessible
@@ -11664,12 +11659,9 @@ void aarch64_sve_change_el(CPUARMState *env, int old_el,
/* When changing vector length, clear inaccessible state. */
if (new_len < old_len) {
-#ifdef TARGET_AARCH64
aarch64_sve_narrow_vq(env, new_len + 1);
-#endif
}
}
-#endif
#ifndef CONFIG_USER_ONLY
ARMSecuritySpace arm_security_space(CPUARMState *env)
They were hiding aarch64_sve_narrow_vq and aarch64_sve_change_el, which we can expose safely. Signed-off-by: Pierrick Bouvier <pierrick.bouvier@linaro.org> --- target/arm/helper.c | 8 -------- 1 file changed, 8 deletions(-)