@@ -185,7 +185,7 @@ static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, TranslationBlock *itb)
cc->synchronize_from_tb(cpu, last_tb);
} else {
assert(cc->set_pc);
- cc->set_pc(cpu, last_tb->pc);
+ cc->set_pc(cpu, atomic_read(&last_tb->pc));
}
}
if (tb_exit == TB_EXIT_REQUESTED) {
@@ -235,13 +235,13 @@ static bool tb_cmp(const void *p, const void *d)
const TranslationBlock *tb = p;
const struct tb_desc *desc = d;
- if (tb->pc == desc->pc &&
- tb->page_addr[0] == desc->phys_page1 &&
- tb->cs_base == desc->cs_base &&
- tb->flags == desc->flags &&
+ if (atomic_read(&tb->pc) == desc->pc &&
+ atomic_read(&tb->page_addr[0]) == desc->phys_page1 &&
+ atomic_read(&tb->cs_base) == desc->cs_base &&
+ atomic_read(&tb->flags) == desc->flags &&
!atomic_read(&tb->invalid)) {
/* check next page if needed */
- if (tb->page_addr[1] == -1) {
+ if (atomic_read(&tb->page_addr[1]) == -1) {
return true;
} else {
tb_page_addr_t phys_page2;
@@ -249,7 +249,7 @@ static bool tb_cmp(const void *p, const void *d)
virt_page2 = (desc->pc & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
phys_page2 = get_page_addr_code(desc->env, virt_page2);
- if (tb->page_addr[1] == phys_page2) {
+ if (atomic_read(&tb->page_addr[1]) == phys_page2) {
return true;
}
}
@@ -507,7 +507,7 @@ static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
return;
}
- trace_exec_tb(tb, tb->pc);
+ trace_exec_tb(tb, atomic_read(&tb->pc));
ret = cpu_tb_exec(cpu, tb);
*last_tb = (TranslationBlock *)(ret & ~TB_EXIT_MASK);
*tb_exit = ret & TB_EXIT_MASK;
@@ -200,6 +200,17 @@ static inline void tlb_flush_by_mmuidx(CPUState *cpu, ...)
#define USE_DIRECT_JUMP
#endif
+/*
+ * TranslationBlock
+ *
+ * This structure represents a single translated block of code. The
+ * actual code is referenced via tc_ptr. This structure is accessed
+ * across multiple QEMU threads so for C11 compliance all fields
+ * should be access with at least relaxed atomic primitives. Fields
+ * that are updated after initial generation, mainly those involved
+ * with patching jumps and chaining TBs, need stronger guarantees to
+ * prevent corruption.
+ */
struct TranslationBlock {
target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */
target_ulong cs_base; /* CS base for this block */
@@ -773,9 +773,10 @@ static TranslationBlock *tb_alloc(target_ulong pc)
return NULL;
}
tb = &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs++];
- tb->pc = pc;
- tb->cflags = 0;
- tb->invalid = false;
+
+ atomic_set(&tb->pc, pc);
+ atomic_set(&tb->cflags, 0);
+ atomic_set(&tb->invalid, false);
return tb;
}
@@ -1095,7 +1096,7 @@ static inline void tb_alloc_page(TranslationBlock *tb,
bool page_already_protected;
#endif
- tb->page_addr[n] = page_addr;
+ atomic_set(&tb->page_addr[n], page_addr);
p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
tb->page_next[n] = p->first_tb;
#ifndef CONFIG_USER_ONLY
@@ -1156,7 +1157,7 @@ static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
if (phys_page2 != -1) {
tb_alloc_page(tb, 1, phys_page2);
} else {
- tb->page_addr[1] = -1;
+ atomic_set(&tb->page_addr[1], -1);
}
/* add in the hash table */
The TranslationBuffer is one of those heavily accessed across threads. To meet defined C11 behaviour across threads we update the accesses to use the relaxed atomic helpers. Care is still taken with locking and barriers for when flags are updated and when newly generated buffers are made visible to the rest of the system. Signed-off-by: Alex Bennée <alex.bennee@linaro.org> --- cpu-exec.c | 16 ++++++++-------- include/exec/exec-all.h | 11 +++++++++++ translate-all.c | 11 ++++++----- 3 files changed, 25 insertions(+), 13 deletions(-) -- 2.9.3