@@ -1056,9 +1056,6 @@ int arm_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs,
#ifdef TARGET_AARCH64
int aarch64_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
int aarch64_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
-void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq);
-void aarch64_sve_change_el(CPUARMState *env, int old_el,
- int new_el, bool el0_a64);
static inline bool is_a64(CPUARMState *env)
{
@@ -1090,10 +1087,6 @@ static inline uint64_t *sve_bswap64(uint64_t *dst, uint64_t *src, int nr)
}
#else
-static inline void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq) { }
-static inline void aarch64_sve_change_el(CPUARMState *env, int o,
- int n, bool a)
-{ }
#define is_a64(env) ((void)env, false)
@@ -21,4 +21,9 @@ uint32_t tcg_sve_disable_lens(unsigned long *sve_vq_map,
bool tcg_sve_validate_lens(unsigned long *sve_vq_map, uint32_t max_vq,
Error **errp);
+void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq);
+
+void aarch64_sve_change_el(CPUARMState *env, int old_el,
+ int new_el, bool el0_a64);
+
#endif /* TCG_SVE_H */
@@ -134,6 +134,10 @@
#include "fd-trans.h"
#include "tcg/tcg.h"
+#ifdef TARGET_AARCH64
+#include "tcg/tcg-sve.h"
+#endif /* TARGET_AARCH64 */
+
#ifndef CLONE_IO
#define CLONE_IO 0x80000000 /* Clone io context */
#endif
@@ -21,6 +21,7 @@
#include "qemu/osdep.h"
#include "qemu/log.h"
#include "cpu.h"
+#include "tcg/tcg-sve.h"
#include "internals.h"
#include "sysemu/tcg.h"
@@ -16,6 +16,10 @@
#include "cpu-mmu.h"
#include "cpregs.h"
+#ifdef TARGET_AARCH64
+#include "tcg/tcg-sve.h"
+#endif /* TARGET_AARCH64 */
+
#define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
#define PMCR_NUM_COUNTERS 4 /* QEMU IMPDEF choice */
@@ -20,6 +20,7 @@
#include "qemu/osdep.h"
#include "qemu/units.h"
#include "cpu.h"
+#include "tcg/tcg-sve.h"
#include "exec/gdbstub.h"
#include "exec/helper-proto.h"
#include "qemu/host-utils.h"
@@ -1294,90 +1294,3 @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
*pflags = flags.flags;
*cs_base = flags.flags2;
}
-
-#ifdef TARGET_AARCH64
-/*
- * The manual says that when SVE is enabled and VQ is widened the
- * implementation is allowed to zero the previously inaccessible
- * portion of the registers. The corollary to that is that when
- * SVE is enabled and VQ is narrowed we are also allowed to zero
- * the now inaccessible portion of the registers.
- *
- * The intent of this is that no predicate bit beyond VQ is ever set.
- * Which means that some operations on predicate registers themselves
- * may operate on full uint64_t or even unrolled across the maximum
- * uint64_t[4]. Performing 4 bits of host arithmetic unconditionally
- * may well be cheaper than conditionals to restrict the operation
- * to the relevant portion of a uint16_t[16].
- */
-void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq)
-{
- int i, j;
- uint64_t pmask;
-
- assert(vq >= 1 && vq <= ARM_MAX_VQ);
- assert(vq <= env_archcpu(env)->sve_max_vq);
-
- /* Zap the high bits of the zregs. */
- for (i = 0; i < 32; i++) {
- memset(&env->vfp.zregs[i].d[2 * vq], 0, 16 * (ARM_MAX_VQ - vq));
- }
-
- /* Zap the high bits of the pregs and ffr. */
- pmask = 0;
- if (vq & 3) {
- pmask = ~(-1ULL << (16 * (vq & 3)));
- }
- for (j = vq / 4; j < ARM_MAX_VQ / 4; j++) {
- for (i = 0; i < 17; ++i) {
- env->vfp.pregs[i].p[j] &= pmask;
- }
- pmask = 0;
- }
-}
-
-/*
- * Notice a change in SVE vector size when changing EL.
- */
-void aarch64_sve_change_el(CPUARMState *env, int old_el,
- int new_el, bool el0_a64)
-{
- ARMCPU *cpu = env_archcpu(env);
- int old_len, new_len;
- bool old_a64, new_a64;
-
- /* Nothing to do if no SVE. */
- if (!cpu_isar_feature(aa64_sve, cpu)) {
- return;
- }
-
- /* Nothing to do if FP is disabled in either EL. */
- if (fp_exception_el(env, old_el) || fp_exception_el(env, new_el)) {
- return;
- }
-
- /*
- * DDI0584A.d sec 3.2: "If SVE instructions are disabled or trapped
- * at ELx, or not available because the EL is in AArch32 state, then
- * for all purposes other than a direct read, the ZCR_ELx.LEN field
- * has an effective value of 0".
- *
- * Consider EL2 (aa64, vq=4) -> EL0 (aa32) -> EL1 (aa64, vq=0).
- * If we ignore aa32 state, we would fail to see the vq4->vq0 transition
- * from EL2->EL1. Thus we go ahead and narrow when entering aa32 so that
- * we already have the correct register contents when encountering the
- * vq0->vq0 transition between EL0->EL1.
- */
- old_a64 = old_el ? arm_el_is_aa64(env, old_el) : el0_a64;
- old_len = (old_a64 && !sve_exception_el(env, old_el)
- ? sve_zcr_len_for_el(env, old_el) : 0);
- new_a64 = new_el ? arm_el_is_aa64(env, new_el) : el0_a64;
- new_len = (new_a64 && !sve_exception_el(env, new_el)
- ? sve_zcr_len_for_el(env, new_el) : 0);
-
- /* When changing vector length, clear inaccessible state. */
- if (new_len < old_len) {
- aarch64_sve_narrow_vq(env, new_len + 1);
- }
-}
-#endif
@@ -24,6 +24,7 @@
#include "sysemu/tcg.h"
#include "cpu-sve.h"
#include "tcg-sve.h"
+#include "cpu-exceptions-aa64.h"
void tcg_sve_enable_lens(unsigned long *sve_vq_map,
unsigned long *sve_vq_init, uint32_t max_vq)
@@ -79,3 +80,88 @@ bool tcg_sve_validate_lens(unsigned long *sve_vq_map, uint32_t max_vq,
}
return true;
}
+
+/*
+ * The manual says that when SVE is enabled and VQ is widened the
+ * implementation is allowed to zero the previously inaccessible
+ * portion of the registers. The corollary to that is that when
+ * SVE is enabled and VQ is narrowed we are also allowed to zero
+ * the now inaccessible portion of the registers.
+ *
+ * The intent of this is that no predicate bit beyond VQ is ever set.
+ * Which means that some operations on predicate registers themselves
+ * may operate on full uint64_t or even unrolled across the maximum
+ * uint64_t[4]. Performing 4 bits of host arithmetic unconditionally
+ * may well be cheaper than conditionals to restrict the operation
+ * to the relevant portion of a uint16_t[16].
+ */
+void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq)
+{
+ int i, j;
+ uint64_t pmask;
+
+ assert(vq >= 1 && vq <= ARM_MAX_VQ);
+ assert(vq <= env_archcpu(env)->sve_max_vq);
+
+ /* Zap the high bits of the zregs. */
+ for (i = 0; i < 32; i++) {
+ memset(&env->vfp.zregs[i].d[2 * vq], 0, 16 * (ARM_MAX_VQ - vq));
+ }
+
+ /* Zap the high bits of the pregs and ffr. */
+ pmask = 0;
+ if (vq & 3) {
+ pmask = ~(-1ULL << (16 * (vq & 3)));
+ }
+ for (j = vq / 4; j < ARM_MAX_VQ / 4; j++) {
+ for (i = 0; i < 17; ++i) {
+ env->vfp.pregs[i].p[j] &= pmask;
+ }
+ pmask = 0;
+ }
+}
+
+/*
+ * Notice a change in SVE vector size when changing EL.
+ */
+void aarch64_sve_change_el(CPUARMState *env, int old_el,
+ int new_el, bool el0_a64)
+{
+ ARMCPU *cpu = env_archcpu(env);
+ int old_len, new_len;
+ bool old_a64, new_a64;
+
+ /* Nothing to do if no SVE. */
+ if (!cpu_isar_feature(aa64_sve, cpu)) {
+ return;
+ }
+
+ /* Nothing to do if FP is disabled in either EL. */
+ if (fp_exception_el(env, old_el) || fp_exception_el(env, new_el)) {
+ return;
+ }
+
+ /*
+ * DDI0584A.d sec 3.2: "If SVE instructions are disabled or trapped
+ * at ELx, or not available because the EL is in AArch32 state, then
+ * for all purposes other than a direct read, the ZCR_ELx.LEN field
+ * has an effective value of 0".
+ *
+ * Consider EL2 (aa64, vq=4) -> EL0 (aa32) -> EL1 (aa64, vq=0).
+ * If we ignore aa32 state, we would fail to see the vq4->vq0 transition
+ * from EL2->EL1. Thus we go ahead and narrow when entering aa32 so that
+ * we already have the correct register contents when encountering the
+ * vq0->vq0 transition between EL0->EL1.
+ */
+ old_a64 = old_el ? arm_el_is_aa64(env, old_el) : el0_a64;
+ old_len = (old_a64 && !sve_exception_el(env, old_el)
+ ? sve_zcr_len_for_el(env, old_el) : 0);
+ new_a64 = new_el ? arm_el_is_aa64(env, new_el) : el0_a64;
+ new_len = (new_a64 && !sve_exception_el(env, new_el)
+ ? sve_zcr_len_for_el(env, new_el) : 0);
+
+ /* When changing vector length, clear inaccessible state. */
+ if (new_len < old_len) {
+ aarch64_sve_narrow_vq(env, new_len + 1);
+ }
+}