@@ -678,7 +678,7 @@ void tb_invalidate_phys_addr(target_ulong addr);
void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs);
#endif
void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
-void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end);
+void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last);
void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr);
/* GETPC is the true target of the return instruction that we'll execute. */
@@ -991,11 +991,10 @@ TranslationBlock *tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
* Called with mmap_lock held for user-mode emulation.
* NOTE: this function must not be called while a TB is running.
*/
-void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
+void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last)
{
TranslationBlock *tb;
PageForEachNext n;
- tb_page_addr_t last = end - 1;
assert_memory_lock();
@@ -1011,11 +1010,11 @@ void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
*/
void tb_invalidate_phys_page(tb_page_addr_t addr)
{
- tb_page_addr_t start, end;
+ tb_page_addr_t start, last;
start = addr & TARGET_PAGE_MASK;
- end = start + TARGET_PAGE_SIZE;
- tb_invalidate_phys_range(start, end);
+ last = addr | ~TARGET_PAGE_MASK;
+ tb_invalidate_phys_range(start, last);
}
/*
@@ -1169,28 +1168,30 @@ void tb_invalidate_phys_page(tb_page_addr_t addr)
/*
* Invalidate all TBs which intersect with the target physical address range
- * [start;end[. NOTE: start and end may refer to *different* physical pages.
+ * [start;last]. NOTE: start and end may refer to *different* physical pages.
* 'is_cpu_write_access' should be true if called from a real cpu write
* access: the virtual CPU will exit the current TB if code is modified inside
* this TB.
*/
-void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
+void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last)
{
struct page_collection *pages;
- tb_page_addr_t next;
+ tb_page_addr_t index, index_last;
- pages = page_collection_lock(start, end - 1);
- for (next = (start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
- start < end;
- start = next, next += TARGET_PAGE_SIZE) {
- PageDesc *pd = page_find(start >> TARGET_PAGE_BITS);
- tb_page_addr_t bound = MIN(next, end);
+ pages = page_collection_lock(start, last);
+
+ index_last = last >> TARGET_PAGE_BITS;
+ for (index = start >> TARGET_PAGE_BITS; index <= index_last; index++) {
+ PageDesc *pd = page_find(index);
+ tb_page_addr_t bound;
if (pd == NULL) {
continue;
}
assert_page_locked(pd);
- tb_invalidate_phys_page_range__locked(pages, pd, start, bound - 1, 0);
+ bound = (index << TARGET_PAGE_BITS) | ~TARGET_PAGE_MASK;
+ bound = MIN(bound, last);
+ tb_invalidate_phys_page_range__locked(pages, pd, start, bound, 0);
}
page_collection_unlock(pages);
}
@@ -572,7 +572,7 @@ void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr)
cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
addr = get_page_addr_code(env, pc);
if (addr != -1) {
- tb_invalidate_phys_range(addr, addr + 1);
+ tb_invalidate_phys_range(addr, addr);
}
}
}
@@ -516,7 +516,7 @@ void page_set_flags(target_ulong start, target_ulong last, int flags)
~(reset ? 0 : PAGE_STICKY));
}
if (inval_tb) {
- tb_invalidate_phys_range(start, last + 1);
+ tb_invalidate_phys_range(start, last);
}
}
@@ -2527,7 +2527,7 @@ static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
}
if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
assert(tcg_enabled());
- tb_invalidate_phys_range(addr, addr + length);
+ tb_invalidate_phys_range(addr, addr + length - 1);
dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
}
cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);