@@ -21,9 +21,9 @@ uint32_t tcg_sve_disable_lens(unsigned long *sve_vq_map,
bool tcg_sve_validate_lens(unsigned long *sve_vq_map, uint32_t max_vq,
Error **errp);
-void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq);
+void tcg_sve_narrow_vq(CPUARMState *env, unsigned vq);
-void aarch64_sve_change_el(CPUARMState *env, int old_el,
- int new_el, bool el0_a64);
+void tcg_sve_change_el(CPUARMState *env, int old_el,
+ int new_el, bool el0_a64);
#endif /* TCG_SVE_H */
@@ -10877,7 +10877,7 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
vq = MIN(vq, cpu->sve_max_vq);
if (vq < old_vq) {
- aarch64_sve_narrow_vq(env, vq);
+ tcg_sve_narrow_vq(env, vq);
}
env->vfp.zcr_el[1] = vq - 1;
arm_rebuild_hflags(env);
@@ -119,7 +119,7 @@ void arm_cpu_do_interrupt_aarch64(CPUState *cs)
* Note that new_el can never be 0. If cur_el is 0, then
* el0_a64 is is_a64(), else el0_a64 is ignored.
*/
- aarch64_sve_change_el(env, cur_el, new_el, is_a64(env));
+ tcg_sve_change_el(env, cur_el, new_el, is_a64(env));
}
if (cur_el < new_el) {
@@ -5814,7 +5814,7 @@ static void zcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
*/
new_len = sve_zcr_len_for_el(env, cur_el);
if (new_len < old_len) {
- aarch64_sve_narrow_vq(env, new_len + 1);
+ tcg_sve_narrow_vq(env, new_len + 1);
}
}
@@ -1042,7 +1042,7 @@ void HELPER(exception_return)(CPUARMState *env, uint64_t new_pc)
* Note that cur_el can never be 0. If new_el is 0, then
* el0_a64 is return_to_aa64, else el0_a64 is ignored.
*/
- aarch64_sve_change_el(env, cur_el, new_el, return_to_aa64);
+ tcg_sve_change_el(env, cur_el, new_el, return_to_aa64);
qemu_mutex_lock_iothread();
arm_call_el_change_hook(env_archcpu(env));
@@ -95,7 +95,7 @@ bool tcg_sve_validate_lens(unsigned long *sve_vq_map, uint32_t max_vq,
* may well be cheaper than conditionals to restrict the operation
* to the relevant portion of a uint16_t[16].
*/
-void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq)
+void tcg_sve_narrow_vq(CPUARMState *env, unsigned vq)
{
int i, j;
uint64_t pmask;
@@ -124,7 +124,7 @@ void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq)
/*
* Notice a change in SVE vector size when changing EL.
*/
-void aarch64_sve_change_el(CPUARMState *env, int old_el,
+void tcg_sve_change_el(CPUARMState *env, int old_el,
int new_el, bool el0_a64)
{
ARMCPU *cpu = env_archcpu(env);
@@ -162,6 +162,6 @@ void aarch64_sve_change_el(CPUARMState *env, int old_el,
/* When changing vector length, clear inaccessible state. */
if (new_len < old_len) {
- aarch64_sve_narrow_vq(env, new_len + 1);
+ tcg_sve_narrow_vq(env, new_len + 1);
}
}