@@ -707,8 +707,9 @@ void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr)
tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS);
}
-static void tlb_flush_page_bits_locked(CPUArchState *env, int midx,
- target_ulong page, unsigned bits)
+static void tlb_flush_range_locked(CPUArchState *env, int midx,
+ target_ulong addr, target_ulong len,
+ unsigned bits)
{
CPUTLBDesc *d = &env_tlb(env)->d[midx];
CPUTLBDescFast *f = &env_tlb(env)->f[midx];
@@ -718,20 +719,26 @@ static void tlb_flush_page_bits_locked(CPUArchState *env, int midx,
* If @bits is smaller than the tlb size, there may be multiple entries
* within the TLB; otherwise all addresses that match under @mask hit
* the same TLB entry.
- *
* TODO: Perhaps allow bits to be a few bits less than the size.
* For now, just flush the entire TLB.
+ *
+ * If @len is larger than the tlb size, then it will take longer to
+ * test all of the entries in the TLB than it will to flush it all.
*/
- if (mask < f->mask) {
+ if (mask < f->mask || len > f->mask) {
tlb_debug("forcing full flush midx %d ("
- TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
- midx, page, mask);
+ TARGET_FMT_lx "/" TARGET_FMT_lx "+" TARGET_FMT_lx ")\n",
+ midx, addr, mask, len);
tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
return;
}
- /* Check if we need to flush due to large pages. */
- if ((page & d->large_page_mask) == d->large_page_addr) {
+ /*
+ * Check if we need to flush due to large pages.
+ * Because large_page_mask contains all 1's from the msb,
+ * we only need to test the end of the range.
+ */
+ if (((addr + len - 1) & d->large_page_mask) == d->large_page_addr) {
tlb_debug("forcing full flush midx %d ("
TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
midx, d->large_page_addr, d->large_page_mask);
@@ -739,14 +746,20 @@ static void tlb_flush_page_bits_locked(CPUArchState *env, int midx,
return;
}
- if (tlb_flush_entry_mask_locked(tlb_entry(env, midx, page), page, mask)) {
- tlb_n_used_entries_dec(env, midx);
+ for (target_ulong i = 0; i < len; i += TARGET_PAGE_SIZE) {
+ target_ulong page = addr + i;
+ CPUTLBEntry *entry = tlb_entry(env, midx, page);
+
+ if (tlb_flush_entry_mask_locked(entry, page, mask)) {
+ tlb_n_used_entries_dec(env, midx);
+ }
+ tlb_flush_vtlb_page_mask_locked(env, midx, page, mask);
}
- tlb_flush_vtlb_page_mask_locked(env, midx, page, mask);
}
typedef struct {
target_ulong addr;
+ target_ulong len;
uint16_t idxmap;
uint16_t bits;
} TLBFlushPageBitsByMMUIdxData;
@@ -760,18 +773,20 @@ tlb_flush_page_bits_by_mmuidx_async_0(CPUState *cpu,
assert_cpu_is_self(cpu);
- tlb_debug("page addr:" TARGET_FMT_lx "/%u mmu_map:0x%x\n",
- d.addr, d.bits, d.idxmap);
+ tlb_debug("range:" TARGET_FMT_lx "/%u+" TARGET_FMT_lx " mmu_map:0x%x\n",
+ d.addr, d.bits, d.len, d.idxmap);
qemu_spin_lock(&env_tlb(env)->c.lock);
for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
if ((d.idxmap >> mmu_idx) & 1) {
- tlb_flush_page_bits_locked(env, mmu_idx, d.addr, d.bits);
+ tlb_flush_range_locked(env, mmu_idx, d.addr, d.len, d.bits);
}
}
qemu_spin_unlock(&env_tlb(env)->c.lock);
- tb_flush_jmp_cache(cpu, d.addr);
+ for (target_ulong i = 0; i < d.len; i += TARGET_PAGE_SIZE) {
+ tb_flush_jmp_cache(cpu, d.addr + i);
+ }
}
static bool encode_pbm_to_runon(run_on_cpu_data *out,
@@ -829,6 +844,7 @@ void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, target_ulong addr,
/* This should already be page aligned */
d.addr = addr & TARGET_PAGE_MASK;
+ d.len = TARGET_PAGE_SIZE;
d.idxmap = idxmap;
d.bits = bits;
@@ -865,6 +881,7 @@ void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu,
/* This should already be page aligned */
d.addr = addr & TARGET_PAGE_MASK;
+ d.len = TARGET_PAGE_SIZE;
d.idxmap = idxmap;
d.bits = bits;
@@ -908,6 +925,7 @@ void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
/* This should already be page aligned */
d.addr = addr & TARGET_PAGE_MASK;
+ d.len = TARGET_PAGE_SIZE;
d.idxmap = idxmap;
d.bits = bits;