@@ -734,7 +734,11 @@ void cpu_exec_init(CPUState *cpu, Error **errp)
#if defined(CONFIG_USER_ONLY)
static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
{
+ mmap_lock();
+ tb_lock();
tb_invalidate_phys_page_range(pc, pc + 1, 0);
+ tb_unlock();
+ mmap_unlock();
}
#else
static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
@@ -743,6 +747,7 @@ static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
hwaddr phys = cpu_get_phys_page_attrs_debug(cpu, pc, &attrs);
int asidx = cpu_asidx_from_attrs(cpu, attrs);
if (phys != -1) {
+ /* Locks grabbed by tb_invalidate_phys_addr */
tb_invalidate_phys_addr(cpu->cpu_ases[asidx].as,
phys | (pc & ~TARGET_PAGE_MASK));
}
@@ -2072,7 +2077,11 @@ MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
uint64_t val, unsigned size)
{
+ bool locked = false;
+
if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
+ locked = true;
+ tb_lock();
tb_invalidate_phys_page_fast(ram_addr, size);
}
switch (size) {
@@ -2088,6 +2097,11 @@ static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
default:
abort();
}
+
+ if (locked) {
+ tb_unlock();
+ }
+
/* Set both VGA and migration bits for simplicity and to remove
* the notdirty callback faster.
*/
@@ -2566,7 +2580,9 @@ static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
}
if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
+ tb_lock();
tb_invalidate_phys_range(addr, addr + length);
+ tb_unlock();
dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
}
cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
@@ -1355,12 +1355,11 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
* access: the virtual CPU will exit the current TB if code is modified inside
* this TB.
*
- * Called with mmap_lock held for user-mode emulation
+ * Called with mmap_lock held for user-mode emulation, grabs tb_lock
+ * Called with tb_lock held for system-mode emulation
*/
-void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
+static void tb_invalidate_phys_range_1(tb_page_addr_t start, tb_page_addr_t end)
{
- assert_memory_lock();
-
while (start < end) {
tb_invalidate_phys_page_range(start, end, 0);
start &= TARGET_PAGE_MASK;
@@ -1368,6 +1367,21 @@ void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
}
}
+#ifdef CONFIG_SOFTMMU
+void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
+{
+ assert_tb_lock();
+ tb_invalidate_phys_range_1(start, end);
+}
+#else
+void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
+{
+ assert_memory_lock();
+ tb_lock();
+ tb_invalidate_phys_range_1(start, end);
+ tb_unlock();
+}
+#endif
/*
* Invalidate all TBs which intersect with the target physical address range
* [start;end[. NOTE: start and end must refer to the *same* physical page.
@@ -1375,7 +1389,8 @@ void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
* access: the virtual CPU will exit the current TB if code is modified inside
* this TB.
*
- * Called with mmap_lock held for user-mode emulation
+ * Called with tb_lock/mmap_lock held for user-mode emulation
+ * Called with tb_lock held for system-mode emulation
*/
void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
int is_cpu_write_access)
@@ -1398,6 +1413,7 @@ void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
#endif /* TARGET_HAS_PRECISE_SMC */
assert_memory_lock();
+ assert_tb_lock();
p = page_find(start >> TARGET_PAGE_BITS);
if (!p) {
@@ -1412,7 +1428,6 @@ void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
/* we remove all the TBs in the range [start, end[ */
/* XXX: see if in some cases it could be faster to invalidate all
the code */
- tb_lock();
tb = p->first_tb;
while (tb != NULL) {
n = (uintptr_t)tb & 3;
@@ -1472,12 +1487,12 @@ void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
cpu_resume_from_signal(cpu, NULL);
}
#endif
- tb_unlock();
}
#ifdef CONFIG_SOFTMMU
/* len must be <= 8 and start must be a multiple of len.
- * Called via softmmu_template.h, with iothread mutex not held.
+ * Called via softmmu_template.h when code areas are written to with
+ * tb_lock held.
*/
void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
{
@@ -1492,6 +1507,8 @@ void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
(intptr_t)cpu_single_env->segs[R_CS].base);
}
#endif
+ assert_memory_lock();
+
p = page_find(start >> TARGET_PAGE_BITS);
if (!p) {
return;
@@ -1536,6 +1553,8 @@ static void tb_invalidate_phys_page(tb_page_addr_t addr,
uint32_t current_flags = 0;
#endif
+ assert_memory_lock();
+
addr &= TARGET_PAGE_MASK;
p = page_find(addr >> TARGET_PAGE_BITS);
if (!p) {
@@ -1641,7 +1660,9 @@ void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr)
return;
}
ram_addr = memory_region_get_ram_addr(mr) + addr;
+ tb_lock();
tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
+ tb_unlock();
rcu_read_unlock();
}
#endif /* !defined(CONFIG_USER_ONLY) */
While we previously assumed an existing memory lock protected the page look up in the MTTCG SoftMMU case the memory lock is provided by the tb_lock. As a result we push the taking of this lock up the call tree. This requires a slightly different entry for the SoftMMU and user-mode cases from tb_invalidate_phys_range. This also means user-mode breakpoint insertion needs to take two locks but it hadn't taken any previously so this is an improvement. Signed-off-by: Alex Bennée <alex.bennee@linaro.org> --- exec.c | 16 ++++++++++++++++ translate-all.c | 37 +++++++++++++++++++++++++++++-------- 2 files changed, 45 insertions(+), 8 deletions(-) -- 2.7.4