@@ -98,6 +98,7 @@ DEF_HELPER_FLAGS_3(itlbp_pa11, TCG_CALL_NO_RWG, void, env, tl, tl)
DEF_HELPER_FLAGS_3(idtlbt_pa20, TCG_CALL_NO_RWG, void, env, tl, tl)
DEF_HELPER_FLAGS_3(iitlbt_pa20, TCG_CALL_NO_RWG, void, env, tl, tl)
DEF_HELPER_FLAGS_2(ptlb, TCG_CALL_NO_RWG, void, env, tl)
+DEF_HELPER_FLAGS_2(ptlb_l, TCG_CALL_NO_RWG, void, env, tl)
DEF_HELPER_FLAGS_1(ptlbe, TCG_CALL_NO_RWG, void, env)
DEF_HELPER_FLAGS_2(lpa, TCG_CALL_NO_WG, tl, env, tl)
DEF_HELPER_FLAGS_1(change_prot_id, TCG_CALL_NO_RWG, void, env)
@@ -161,9 +161,23 @@ ixtlbxf 000001 00000 r:5 00 0 data:1 01000 addr:1 0 00000
# pa2.0 tlb insert idtlbt and iitlbt instructions
ixtlbt 000001 r2:5 r1:5 000 data:1 100000 0 00000 # idtlbt
-pxtlbx 000001 b:5 x:5 sp:2 0100100 local:1 m:1 ----- data=1
-pxtlbx 000001 b:5 x:5 ... 000100 local:1 m:1 ----- \
- sp=%assemble_sr3x data=0
+# pdtlb, pitlb
+pxtlb 000001 b:5 x:5 sp:2 01001000 m:1 ----- \
+ &ldst disp=0 scale=0 size=0 t=0
+pxtlb 000001 b:5 x:5 ... 0001000 m:1 ----- \
+ &ldst disp=0 scale=0 size=0 t=0 sp=%assemble_sr3x
+
+# ... pa20 local
+pxtlb_l 000001 b:5 x:5 sp:2 01011000 m:1 ----- \
+ &ldst disp=0 scale=0 size=0 t=0
+pxtlb_l 000001 b:5 x:5 ... 0011000 m:1 ----- \
+ &ldst disp=0 scale=0 size=0 t=0 sp=%assemble_sr3x
+
+# pdtlbe, pitlbe
+pxtlbe 000001 b:5 x:5 sp:2 01001001 m:1 ----- \
+ &ldst disp=0 scale=0 size=0 t=0
+pxtlbe 000001 b:5 x:5 ... 0001001 m:1 ----- \
+ &ldst disp=0 scale=0 size=0 t=0 sp=%assemble_sr3x
lpa 000001 b:5 x:5 sp:2 01001101 m:1 t:5 \
&ldst disp=0 scale=0 size=0
@@ -448,16 +448,34 @@ void HELPER(iitlbt_pa20)(CPUHPPAState *env, target_ulong r1, target_ulong r2)
itlbt_pa20(env, r1, r2, va_b);
}
-/* Purge (Insn/Data) TLB. This is explicitly page-based, and is
- synchronous across all processors. */
+/* Purge (Insn/Data) TLB. */
static void ptlb_work(CPUState *cpu, run_on_cpu_data data)
{
CPUHPPAState *env = cpu_env(cpu);
- target_ulong addr = (target_ulong) data.target_ptr;
+ vaddr start = data.target_ptr;
+ vaddr end;
- hppa_flush_tlb_range(env, addr, addr);
+ /*
+ * PA2.0 allows a range of pages encoded into GR[b], which we have
+ * copied into the bottom bits of the otherwise page-aligned address.
+ * PA1.x will always provide zero here, for a single page flush.
+ */
+ end = start & 0xf;
+ start &= TARGET_PAGE_MASK;
+ end = TARGET_PAGE_SIZE << (2 * end);
+ end = start + end - 1;
+
+ hppa_flush_tlb_range(env, start, end);
}
+/* This is local to the current cpu. */
+void HELPER(ptlb_l)(CPUHPPAState *env, target_ulong addr)
+{
+ trace_hppa_tlb_ptlb_local(env);
+ ptlb_work(env_cpu(env), RUN_ON_CPU_TARGET_PTR(addr));
+}
+
+/* This is synchronous across all processors. */
void HELPER(ptlb)(CPUHPPAState *env, target_ulong addr)
{
CPUState *src = env_cpu(env);
@@ -2320,7 +2320,7 @@ static bool trans_ixtlbx(DisasContext *ctx, arg_ixtlbx *a)
#endif
}
-static bool trans_pxtlbx(DisasContext *ctx, arg_pxtlbx *a)
+static bool do_pxtlb(DisasContext *ctx, arg_ldst *a, bool local)
{
CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
#ifndef CONFIG_USER_ONLY
@@ -2330,15 +2330,53 @@ static bool trans_pxtlbx(DisasContext *ctx, arg_pxtlbx *a)
nullify_over(ctx);
form_gva(ctx, &addr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
- if (a->m) {
- save_gpr(ctx, a->b, ofs);
+
+ /*
+ * Page align now, rather than later, so that we can add in the
+ * page_size field from pa2.0 from the low 4 bits of GR[b].
+ */
+ tcg_gen_andi_i64(addr, addr, TARGET_PAGE_MASK);
+ if (ctx->is_pa20) {
+ tcg_gen_deposit_i64(addr, addr, load_gpr(ctx, a->b), 0, 4);
}
- if (a->local) {
- gen_helper_ptlbe(tcg_env);
+
+ if (local) {
+ gen_helper_ptlb_l(tcg_env, addr);
} else {
gen_helper_ptlb(tcg_env, addr);
}
+ if (a->m) {
+ save_gpr(ctx, a->b, ofs);
+ }
+
+ /* Exit TB for TLB change if mmu is enabled. */
+ if (ctx->tb_flags & PSW_C) {
+ ctx->base.is_jmp = DISAS_IAQ_N_STALE;
+ }
+ return nullify_end(ctx);
+#endif
+}
+
+static bool trans_pxtlb(DisasContext *ctx, arg_ldst *a)
+{
+ return do_pxtlb(ctx, a, false);
+}
+
+static bool trans_pxtlb_l(DisasContext *ctx, arg_ldst *a)
+{
+ return ctx->is_pa20 && do_pxtlb(ctx, a, true);
+}
+
+static bool trans_pxtlbe(DisasContext *ctx, arg_ldst *a)
+{
+ CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
+#ifndef CONFIG_USER_ONLY
+ nullify_over(ctx);
+
+ trans_nop_addrx(ctx, a);
+ gen_helper_ptlbe(tcg_env);
+
/* Exit TB for TLB change if mmu is enabled. */
if (ctx->tb_flags & PSW_C) {
ctx->base.is_jmp = DISAS_IAQ_N_STALE;
@@ -10,6 +10,7 @@ disable hppa_tlb_fill_success(void *env, uint64_t addr, uint64_t phys, int size,
disable hppa_tlb_itlba(void *env, void *ent, uint64_t va_b, uint64_t va_e, uint64_t pa) "env=%p ent=%p va_b=0x%lx va_e=0x%lx pa=0x%lx"
disable hppa_tlb_itlbp(void *env, void *ent, int access_id, int u, int pl2, int pl1, int type, int b, int d, int t) "env=%p ent=%p access_id=%x u=%d pl2=%d pl1=%d type=%d b=%d d=%d t=%d"
disable hppa_tlb_ptlb(void *env) "env=%p"
+disable hppa_tlb_ptlb_local(void *env) "env=%p"
disable hppa_tlb_ptlbe(void *env) "env=%p"
disable hppa_tlb_lpa_success(void *env, uint64_t addr, uint64_t phys) "env=%p addr=0x%lx phys=0x%lx"
disable hppa_tlb_lpa_failed(void *env, uint64_t addr) "env=%p addr=0x%lx"