@@ -509,7 +509,7 @@ uint32_t HELPER(calc_cc)(CPUS390XState *env, uint32_t cc_op, uint64_t src,
#ifndef CONFIG_USER_ONLY
void HELPER(load_psw)(CPUS390XState *env, uint64_t mask, uint64_t addr)
{
- load_psw(env, mask, addr);
+ s390_cpu_set_psw(env, mask, addr);
cpu_loop_exit(env_cpu(env));
}
@@ -845,6 +845,9 @@ int s390_cpu_pv_mem_rw(S390CPU *cpu, unsigned int offset, void *hostbuf,
int s390_cpu_restart(S390CPU *cpu);
void s390_init_sigp(void);
+/* helper.c */
+void s390_cpu_set_psw(CPUS390XState *env, uint64_t mask, uint64_t addr);
+uint64_t s390_cpu_get_psw_mask(CPUS390XState *env);
/* outside of target/s390x/ */
S390CPU *s390_cpu_addr2state(uint16_t cpu_addr);
@@ -252,7 +252,7 @@ static void do_program_interrupt(CPUS390XState *env)
lowcore->pgm_ilen = cpu_to_be16(ilen);
lowcore->pgm_code = cpu_to_be16(env->int_pgm_code);
- lowcore->program_old_psw.mask = cpu_to_be64(get_psw_mask(env));
+ lowcore->program_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env));
lowcore->program_old_psw.addr = cpu_to_be64(env->psw.addr);
mask = be64_to_cpu(lowcore->program_new_psw.mask);
addr = be64_to_cpu(lowcore->program_new_psw.addr);
@@ -260,7 +260,7 @@ static void do_program_interrupt(CPUS390XState *env)
cpu_unmap_lowcore(lowcore);
- load_psw(env, mask, addr);
+ s390_cpu_set_psw(env, mask, addr);
}
static void do_svc_interrupt(CPUS390XState *env)
@@ -272,14 +272,14 @@ static void do_svc_interrupt(CPUS390XState *env)
lowcore->svc_code = cpu_to_be16(env->int_svc_code);
lowcore->svc_ilen = cpu_to_be16(env->int_svc_ilen);
- lowcore->svc_old_psw.mask = cpu_to_be64(get_psw_mask(env));
+ lowcore->svc_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env));
lowcore->svc_old_psw.addr = cpu_to_be64(env->psw.addr + env->int_svc_ilen);
mask = be64_to_cpu(lowcore->svc_new_psw.mask);
addr = be64_to_cpu(lowcore->svc_new_psw.addr);
cpu_unmap_lowcore(lowcore);
- load_psw(env, mask, addr);
+ s390_cpu_set_psw(env, mask, addr);
/* When a PER event is pending, the PER exception has to happen
immediately after the SERVICE CALL one. */
@@ -348,12 +348,12 @@ static void do_ext_interrupt(CPUS390XState *env)
mask = be64_to_cpu(lowcore->external_new_psw.mask);
addr = be64_to_cpu(lowcore->external_new_psw.addr);
- lowcore->external_old_psw.mask = cpu_to_be64(get_psw_mask(env));
+ lowcore->external_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env));
lowcore->external_old_psw.addr = cpu_to_be64(env->psw.addr);
cpu_unmap_lowcore(lowcore);
- load_psw(env, mask, addr);
+ s390_cpu_set_psw(env, mask, addr);
}
static void do_io_interrupt(CPUS390XState *env)
@@ -373,7 +373,7 @@ static void do_io_interrupt(CPUS390XState *env)
lowcore->subchannel_nr = cpu_to_be16(io->nr);
lowcore->io_int_parm = cpu_to_be32(io->parm);
lowcore->io_int_word = cpu_to_be32(io->word);
- lowcore->io_old_psw.mask = cpu_to_be64(get_psw_mask(env));
+ lowcore->io_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env));
lowcore->io_old_psw.addr = cpu_to_be64(env->psw.addr);
mask = be64_to_cpu(lowcore->io_new_psw.mask);
addr = be64_to_cpu(lowcore->io_new_psw.addr);
@@ -381,7 +381,7 @@ static void do_io_interrupt(CPUS390XState *env)
cpu_unmap_lowcore(lowcore);
g_free(io);
- load_psw(env, mask, addr);
+ s390_cpu_set_psw(env, mask, addr);
}
typedef struct MchkExtSaveArea {
@@ -457,14 +457,14 @@ static void do_mchk_interrupt(CPUS390XState *env)
lowcore->clock_comp_save_area = cpu_to_be64(env->ckc >> 8);
lowcore->mcic = cpu_to_be64(mcic);
- lowcore->mcck_old_psw.mask = cpu_to_be64(get_psw_mask(env));
+ lowcore->mcck_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env));
lowcore->mcck_old_psw.addr = cpu_to_be64(env->psw.addr);
mask = be64_to_cpu(lowcore->mcck_new_psw.mask);
addr = be64_to_cpu(lowcore->mcck_new_psw.addr);
cpu_unmap_lowcore(lowcore);
- load_psw(env, mask, addr);
+ s390_cpu_set_psw(env, mask, addr);
}
void s390_cpu_do_interrupt(CPUState *cs)
@@ -592,9 +592,11 @@ void s390x_cpu_debug_excp_handler(CPUState *cs)
and MVCS instrutions are not used. */
env->per_perc_atmid |= env->psw.mask & (PSW_MASK_ASC) >> 46;
- /* Remove all watchpoints to re-execute the code. A PER exception
- will be triggered, it will call load_psw which will recompute
- the watchpoints. */
+ /*
+ * Remove all watchpoints to re-execute the code. A PER exception
+ * will be triggered, it will call s390_cpu_set_psw which will
+ * recompute the watchpoints.
+ */
cpu_watchpoint_remove_all(cs, BP_CPU);
cpu_loop_exit_noexc(cs);
}
@@ -104,44 +104,6 @@ void s390_handle_wait(S390CPU *cpu)
}
}
-void load_psw(CPUS390XState *env, uint64_t mask, uint64_t addr)
-{
- uint64_t old_mask = env->psw.mask;
-
- env->psw.addr = addr;
- env->psw.mask = mask;
-
- /* KVM will handle all WAITs and trigger a WAIT exit on disabled_wait */
- if (!tcg_enabled()) {
- return;
- }
- env->cc_op = (mask >> 44) & 3;
-
- if ((old_mask ^ mask) & PSW_MASK_PER) {
- s390_cpu_recompute_watchpoints(env_cpu(env));
- }
-
- if (mask & PSW_MASK_WAIT) {
- s390_handle_wait(env_archcpu(env));
- }
-}
-
-uint64_t get_psw_mask(CPUS390XState *env)
-{
- uint64_t r = env->psw.mask;
-
- if (tcg_enabled()) {
- env->cc_op = calc_cc(env, env->cc_op, env->cc_src, env->cc_dst,
- env->cc_vr);
-
- r &= ~PSW_MASK_CC;
- assert(!(env->cc_op & ~3));
- r |= (uint64_t)env->cc_op << 44;
- }
-
- return r;
-}
-
LowCore *cpu_map_lowcore(CPUS390XState *env)
{
LowCore *lowcore;
@@ -168,7 +130,7 @@ void do_restart_interrupt(CPUS390XState *env)
lowcore = cpu_map_lowcore(env);
- lowcore->restart_old_psw.mask = cpu_to_be64(get_psw_mask(env));
+ lowcore->restart_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env));
lowcore->restart_old_psw.addr = cpu_to_be64(env->psw.addr);
mask = be64_to_cpu(lowcore->restart_new_psw.mask);
addr = be64_to_cpu(lowcore->restart_new_psw.addr);
@@ -176,7 +138,7 @@ void do_restart_interrupt(CPUS390XState *env)
cpu_unmap_lowcore(lowcore);
env->pending_int &= ~INTERRUPT_RESTART;
- load_psw(env, mask, addr);
+ s390_cpu_set_psw(env, mask, addr);
}
void s390_cpu_recompute_watchpoints(CPUState *cs)
@@ -266,7 +228,7 @@ int s390_store_status(S390CPU *cpu, hwaddr addr, bool store_arch)
sa->grs[i] = cpu_to_be64(cpu->env.regs[i]);
}
sa->psw.addr = cpu_to_be64(cpu->env.psw.addr);
- sa->psw.mask = cpu_to_be64(get_psw_mask(&cpu->env));
+ sa->psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(&cpu->env));
sa->prefix = cpu_to_be32(cpu->env.psa);
sa->fpc = cpu_to_be32(cpu->env.fpc);
sa->todpr = cpu_to_be32(cpu->env.todpr);
@@ -323,8 +285,53 @@ int s390_store_adtl_status(S390CPU *cpu, hwaddr addr, hwaddr len)
cpu_physical_memory_unmap(sa, len, 1, len);
return 0;
}
+#else
+/* For user-only, tcg is always enabled. */
+#define tcg_enabled() true
#endif /* CONFIG_USER_ONLY */
+void s390_cpu_set_psw(CPUS390XState *env, uint64_t mask, uint64_t addr)
+{
+#ifndef CONFIG_USER_ONLY
+ uint64_t old_mask = env->psw.mask;
+#endif
+
+ env->psw.addr = addr;
+ env->psw.mask = mask;
+
+ /* KVM will handle all WAITs and trigger a WAIT exit on disabled_wait */
+ if (!tcg_enabled()) {
+ return;
+ }
+ env->cc_op = (mask >> 44) & 3;
+
+#ifndef CONFIG_USER_ONLY
+ if ((old_mask ^ mask) & PSW_MASK_PER) {
+ s390_cpu_recompute_watchpoints(env_cpu(env));
+ }
+
+ if (mask & PSW_MASK_WAIT) {
+ s390_handle_wait(env_archcpu(env));
+ }
+#endif
+}
+
+uint64_t s390_cpu_get_psw_mask(CPUS390XState *env)
+{
+ uint64_t r = env->psw.mask;
+
+ if (tcg_enabled()) {
+ env->cc_op = calc_cc(env, env->cc_op, env->cc_src, env->cc_dst,
+ env->cc_vr);
+
+ r &= ~PSW_MASK_CC;
+ assert(!(env->cc_op & ~3));
+ r |= (uint64_t)env->cc_op << 44;
+ }
+
+ return r;
+}
+
void s390_cpu_dump_state(CPUState *cs, FILE *f, int flags)
{
S390CPU *cpu = S390_CPU(cs);
@@ -235,10 +235,6 @@ int s390_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs,
const char *cc_name(enum cc_op cc_op);
uint32_t calc_cc(CPUS390XState *env, uint32_t cc_op, uint64_t src, uint64_t dst,
uint64_t vr);
-#ifndef CONFIG_USER_ONLY
-void load_psw(CPUS390XState *env, uint64_t mask, uint64_t addr);
-#endif /* CONFIG_USER_ONLY */
-
/* cpu.c */
#ifndef CONFIG_USER_ONLY
@@ -312,7 +308,6 @@ void s390_cpu_gdb_init(CPUState *cs);
void s390_cpu_dump_state(CPUState *cpu, FILE *f, int flags);
void do_restart_interrupt(CPUS390XState *env);
#ifndef CONFIG_USER_ONLY
-uint64_t get_psw_mask(CPUS390XState *env);
void s390_cpu_recompute_watchpoints(CPUState *cs);
void s390x_tod_timer(void *opaque);
void s390x_cpu_timer(void *opaque);
@@ -235,7 +235,8 @@ static void sigp_restart(CPUState *cs, run_on_cpu_data arg)
cpu_synchronize_state(cs);
/*
* Set OPERATING (and unhalting) before loading the restart PSW.
- * load_psw() will then properly halt the CPU again if necessary (TCG).
+ * s390_cpu_set_psw() will then properly halt the CPU again if
+ * necessary (TCG).
*/
s390_cpu_set_state(S390_CPU_STATE_OPERATING, cpu);
do_restart_interrupt(&cpu->env);