@@ -195,6 +195,9 @@ extern const struct VMStateDescription vmstate_arm_cpu;
void register_cp_regs_for_features(ARMCPU *cpu);
void init_cpreg_list(ARMCPU *cpu);
+bool arm_cpu_do_hvc(CPUState *cs);
+bool arm_cpu_do_smc(CPUState *cs);
+
void arm_cpu_do_interrupt(CPUState *cpu);
void arm_v7m_cpu_do_interrupt(CPUState *cpu);
@@ -51,6 +51,8 @@
#define EXCP_EXCEPTION_EXIT 8 /* Return from v7M exception. */
#define EXCP_KERNEL_TRAP 9 /* Jumped to kernel code page. */
#define EXCP_STREX 10
+#define EXCP_HVC 11
+#define EXCP_SMC 12
#define ARMV7M_EXCP_RESET 1
#define ARMV7M_EXCP_NMI 2
@@ -466,14 +466,11 @@ void aarch64_cpu_do_interrupt(CPUState *cs)
env->exception.syndrome);
}
- env->cp15.esr_el1 = env->exception.syndrome;
- env->cp15.far_el1 = env->exception.vaddress;
-
switch (cs->exception_index) {
case EXCP_PREFETCH_ABORT:
case EXCP_DATA_ABORT:
qemu_log_mask(CPU_LOG_INT, "...with FAR 0x%" PRIx64 "\n",
- env->cp15.far_el1);
+ env->exception.vaddress);
break;
case EXCP_BKPT:
case EXCP_UDEF:
@@ -485,10 +482,37 @@ void aarch64_cpu_do_interrupt(CPUState *cs)
case EXCP_FIQ:
addr += 0x100;
break;
+ case EXCP_HVC:
+ if (arm_cpu_do_hvc(cs)) {
+ return;
+ }
+ qemu_log_mask(LOG_GUEST_ERROR, "Unhandled HVC exception\n");
+ env->exception.syndrome = syn_uncategorized();
+ if (is_a64(env)) {
+ env->pc -= 4;
+ } else {
+ env->regs[15] -= 4;
+ }
+ break;
+ case EXCP_SMC:
+ if (arm_cpu_do_smc(cs)) {
+ return;
+ }
+ qemu_log_mask(LOG_GUEST_ERROR, "Unhandled SMC exception\n");
+ env->exception.syndrome = syn_uncategorized();
+ if (is_a64(env)) {
+ env->pc -= 4;
+ } else {
+ env->regs[15] -= 4;
+ }
+ break;
default:
cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
}
+ env->cp15.esr_el1 = env->exception.syndrome;
+ env->cp15.far_el1 = env->exception.vaddress;
+
if (is_a64(env)) {
env->banked_spsr[0] = pstate_read(env);
env->sp_el[arm_current_pl(env)] = env->xregs[31];
@@ -3255,6 +3255,16 @@ void arm_v7m_cpu_do_interrupt(CPUState *cs)
env->thumb = addr & 1;
}
+bool arm_cpu_do_hvc(CPUState *cs)
+{
+ return false;
+}
+
+bool arm_cpu_do_smc(CPUState *cs)
+{
+ return false;
+}
+
/* Handle a CPU exception. */
void arm_cpu_do_interrupt(CPUState *cs)
{
@@ -3357,6 +3367,26 @@ void arm_cpu_do_interrupt(CPUState *cs)
mask = CPSR_A | CPSR_I | CPSR_F;
offset = 4;
break;
+ case EXCP_HVC:
+ if (arm_cpu_do_hvc(cs)) {
+ return;
+ }
+ qemu_log_mask(LOG_GUEST_ERROR, "Unhandled HVC exception\n");
+ new_mode = ARM_CPU_MODE_UND;
+ addr = 0x04;
+ mask = CPSR_A | CPSR_I;
+ offset = 4;
+ break;
+ case EXCP_SMC:
+ if (arm_cpu_do_smc(cs)) {
+ return;
+ }
+ qemu_log_mask(LOG_GUEST_ERROR, "Unhandled SMC exception\n");
+ new_mode = ARM_CPU_MODE_UND;
+ addr = 0x04;
+ mask = CPSR_A | CPSR_I;
+ offset = 4;
+ break;
default:
cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
return; /* Never happens. Keep compiler happy. */
@@ -184,6 +184,26 @@ static inline uint32_t syn_aa32_svc(uint32_t imm16, bool is_thumb)
| (is_thumb ? 0 : ARM_EL_IL);
}
+static inline uint32_t syn_aa64_hvc(uint32_t imm16)
+{
+ return (EC_AA64_HVC << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff);
+}
+
+static inline uint32_t syn_aa32_hvc(uint32_t imm16)
+{
+ return (EC_AA32_HVC << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff);
+}
+
+static inline uint32_t syn_aa64_smc(uint32_t imm16)
+{
+ return (EC_AA64_SMC << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff);
+}
+
+static inline uint32_t syn_aa32_smc(void)
+{
+ return (EC_AA32_SMC << ARM_EL_EC_SHIFT) | ARM_EL_IL;
+}
+
static inline uint32_t syn_aa64_bkpt(uint32_t imm16)
{
return (EC_AA64_BKPT << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff);
@@ -1449,11 +1449,18 @@ static void disas_exc(DisasContext *s, uint32_t insn)
/* SVC, HVC, SMC; since we don't support the Virtualization
* or TrustZone extensions these all UNDEF except SVC.
*/
- if (op2_ll != 1) {
- unallocated_encoding(s);
+ switch (op2_ll) {
+ case 1:
+ gen_exception_insn(s, 0, EXCP_SWI, syn_aa64_svc(imm16));
+ break;
+ case 2:
+ gen_exception_insn(s, 0, EXCP_HVC, syn_aa64_hvc(imm16));
+ break;
+ case 3:
+ gen_exception_insn(s, 0, EXCP_SMC, syn_aa64_smc(imm16));
break;
}
- gen_exception_insn(s, 0, EXCP_SWI, syn_aa64_svc(imm16));
+ unallocated_encoding(s);
break;
case 1:
if (op2_ll != 0) {
@@ -7727,9 +7727,14 @@ static void disas_arm_insn(CPUARMState * env, DisasContext *s)
case 7:
{
int imm16 = extract32(insn, 0, 4) | (extract32(insn, 8, 12) << 4);
- /* SMC instruction (op1 == 3)
- and undefined instructions (op1 == 0 || op1 == 2)
- will trap */
+ /* HVC and SMC instructions */
+ if (op1 == 2) {
+ gen_exception_insn(s, 0, EXCP_HVC, syn_aa32_hvc(imm16));
+ break;
+ } else if (op1 == 3) {
+ gen_exception_insn(s, 0, EXCP_SMC, syn_aa32_smc());
+ break;
+ }
if (op1 != 1) {
goto illegal_op;
}
@@ -9555,10 +9560,15 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw
goto illegal_op;
if (insn & (1 << 26)) {
- /* Secure monitor call (v6Z) */
- qemu_log_mask(LOG_UNIMP,
- "arm: unimplemented secure monitor call\n");
- goto illegal_op; /* not implemented. */
+ if (!(insn & (1 << 20))) {
+ /* Hypervisor call (v7) */
+ uint32_t imm16 = extract32(insn, 0, 12);
+ imm16 |= extract32(insn, 16, 4) << 12;
+ gen_exception_insn(s, 0, EXCP_HVC, syn_aa32_hvc(imm16));
+ } else {
+ /* Secure monitor call (v6+) */
+ gen_exception_insn(s, 0, EXCP_SMC, syn_aa32_smc());
+ }
} else {
op = (insn >> 20) & 7;
switch (op) {