diff mbox

[RFC,1/2] softmmu_template: add smmu_helper, convert VICTIM_TLB_HIT

Message ID 1452268394-31252-2-git-send-email-alex.bennee@linaro.org
State New
Headers show

Commit Message

Alex Bennée Jan. 8, 2016, 3:53 p.m. UTC
This lays the ground work for a re-factoring of the softmmu template
code. The patch introduces inline "smmu_helper" functions where
common (or almost common) code can be placed. Arguments that the
compiler picks up as constant can then be used to eliminate legs of code
in the inline fragments.

There is a minor wrinkle that we need to use a unique name for each
inline fragment as the template is included multiple times. For this the
smmu_helper macro does the appropriate glue magic.

I've tested the result with no change to functionality. Comparing the
the objdump of cputlb.o shows minimal changes in probe_write and
everything else is identical.

TODO: explain probe_write changes

Signed-off-by: Alex Bennée <alex.bennee@linaro.org>

---
 softmmu_template.h | 75 +++++++++++++++++++++++++++++++++---------------------
 1 file changed, 46 insertions(+), 29 deletions(-)

-- 
2.6.4
diff mbox

Patch

diff --git a/softmmu_template.h b/softmmu_template.h
index 6803890..0074bd7 100644
--- a/softmmu_template.h
+++ b/softmmu_template.h
@@ -116,30 +116,47 @@ 
 # define helper_te_st_name  helper_le_st_name
 #endif
 
-/* macro to check the victim tlb */
-#define VICTIM_TLB_HIT(ty)                                                    \
-({                                                                            \
-    /* we are about to do a page table walk. our last hope is the             \
-     * victim tlb. try to refill from the victim tlb before walking the       \
-     * page table. */                                                         \
-    int vidx;                                                                 \
-    CPUIOTLBEntry tmpiotlb;                                                   \
-    CPUTLBEntry tmptlb;                                                       \
-    for (vidx = CPU_VTLB_SIZE-1; vidx >= 0; --vidx) {                         \
-        if (env->tlb_v_table[mmu_idx][vidx].ty == (addr & TARGET_PAGE_MASK)) {\
-            /* found entry in victim tlb, swap tlb and iotlb */               \
-            tmptlb = env->tlb_table[mmu_idx][index];                          \
-            env->tlb_table[mmu_idx][index] = env->tlb_v_table[mmu_idx][vidx]; \
-            env->tlb_v_table[mmu_idx][vidx] = tmptlb;                         \
-            tmpiotlb = env->iotlb[mmu_idx][index];                            \
-            env->iotlb[mmu_idx][index] = env->iotlb_v[mmu_idx][vidx];         \
-            env->iotlb_v[mmu_idx][vidx] = tmpiotlb;                           \
-            break;                                                            \
-        }                                                                     \
-    }                                                                         \
-    /* return true when there is a vtlb hit, i.e. vidx >=0 */                 \
-    vidx >= 0;                                                                \
-})
+/* Inline helper functions for SoftMMU
+ *
+ * These functions help reduce code duplication in the various main
+ * helper functions. Constant arguments (like endian state) will allow
+ * the compiler to skip code which is never called in a given inline.
+ */
+
+#define smmu_helper(name) glue(glue(glue(_smmu_helper_, SUFFIX), MMUSUFFIX),name)
+
+static inline int smmu_helper(victim_tlb_hit) (const bool is_read, CPUArchState *env,
+                                               unsigned mmu_idx, int index,
+                                               target_ulong addr)
+{
+    /* we are about to do a page table walk. our last hope is the
+     * victim tlb. try to refill from the victim tlb before walking the
+     * page table. */
+    int vidx;
+    CPUIOTLBEntry tmpiotlb;
+    CPUTLBEntry tmptlb;
+    for (vidx = CPU_VTLB_SIZE-1; vidx >= 0; --vidx) {
+        bool match;
+        if (is_read) {
+            match = env->tlb_v_table[mmu_idx][vidx].ADDR_READ == (addr & TARGET_PAGE_MASK);
+        } else {
+            match = env->tlb_v_table[mmu_idx][vidx].addr_write == (addr & TARGET_PAGE_MASK);
+        }
+
+        if (match) {
+            /* found entry in victim tlb, swap tlb and iotlb */
+            tmptlb = env->tlb_table[mmu_idx][index];
+            env->tlb_table[mmu_idx][index] = env->tlb_v_table[mmu_idx][vidx];
+            env->tlb_v_table[mmu_idx][vidx] = tmptlb;
+            tmpiotlb = env->iotlb[mmu_idx][index];
+            env->iotlb[mmu_idx][index] = env->iotlb_v[mmu_idx][vidx];
+            env->iotlb_v[mmu_idx][vidx] = tmpiotlb;
+            break;
+        }
+    }
+    /* return true when there is a vtlb hit, i.e. vidx >=0 */
+    return vidx >= 0;
+}
 
 #ifndef SOFTMMU_CODE_ACCESS
 static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env,
@@ -185,7 +202,7 @@  WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr,
             cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
                                  mmu_idx, retaddr);
         }
-        if (!VICTIM_TLB_HIT(ADDR_READ)) {
+        if (!smmu_helper(victim_tlb_hit)(true, env, mmu_idx, index, addr)) {
             tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
                      mmu_idx, retaddr);
         }
@@ -269,7 +286,7 @@  WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr,
             cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
                                  mmu_idx, retaddr);
         }
-        if (!VICTIM_TLB_HIT(ADDR_READ)) {
+        if (!smmu_helper(victim_tlb_hit)(true, env, mmu_idx, index, addr)) {
             tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
                      mmu_idx, retaddr);
         }
@@ -389,7 +406,7 @@  void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
             cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
                                  mmu_idx, retaddr);
         }
-        if (!VICTIM_TLB_HIT(addr_write)) {
+        if (!smmu_helper(victim_tlb_hit)(false, env, mmu_idx, index, addr)) {
             tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
         }
         tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
@@ -469,7 +486,7 @@  void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
             cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
                                  mmu_idx, retaddr);
         }
-        if (!VICTIM_TLB_HIT(addr_write)) {
+        if (!smmu_helper(victim_tlb_hit)(false, env, mmu_idx, index, addr)) {
             tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
         }
         tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
@@ -542,7 +559,7 @@  void probe_write(CPUArchState *env, target_ulong addr, int mmu_idx,
     if ((addr & TARGET_PAGE_MASK)
         != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
         /* TLB entry is for a different page */
-        if (!VICTIM_TLB_HIT(addr_write)) {
+        if (!smmu_helper(victim_tlb_hit)(false, env, mmu_idx, index, addr)) {
             tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
         }
     }