diff mbox series

[064/147] accel/tcg: Pass CPUTLBEntryFull to tlb_reset_dirty_range_locked

Message ID 20250422192819.302784-65-richard.henderson@linaro.org
State New
Headers show
Series single-binary patch queue | expand

Commit Message

Richard Henderson April 22, 2025, 7:26 p.m. UTC
While we're renaming things, don't modify addr; save it for
reuse in the qatomic_set.  Compute the host address into a
new local variable.

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
 accel/tcg/cputlb.c | 20 ++++++++++----------
 1 file changed, 10 insertions(+), 10 deletions(-)

Comments

Pierrick Bouvier April 22, 2025, 8:51 p.m. UTC | #1
On 4/22/25 12:26, Richard Henderson wrote:
> While we're renaming things, don't modify addr; save it for
> reuse in the qatomic_set.  Compute the host address into a
> new local variable.
> 
> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
> ---
>   accel/tcg/cputlb.c | 20 ++++++++++----------
>   1 file changed, 10 insertions(+), 10 deletions(-)
> 

Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
Philippe Mathieu-Daudé April 23, 2025, 10:03 a.m. UTC | #2
Hi Richard,

On 22/4/25 21:26, Richard Henderson wrote:
> While we're renaming things, don't modify addr; save it for
> reuse in the qatomic_set.  Compute the host address into a
> new local variable.
> 
> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
> ---
>   accel/tcg/cputlb.c | 20 ++++++++++----------
>   1 file changed, 10 insertions(+), 10 deletions(-)
> 
> diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
> index 10090067f7..5df98d93d0 100644
> --- a/accel/tcg/cputlb.c
> +++ b/accel/tcg/cputlb.c
> @@ -882,18 +882,16 @@ void tlb_unprotect_code(ram_addr_t ram_addr)
>    *
>    * Called with tlb_c.lock held.
>    */
> -static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry,
> +static void tlb_reset_dirty_range_locked(CPUTLBEntryFull *full, CPUTLBEntry *ent,
>                                            uintptr_t start, uintptr_t length)
>   {
> -    uintptr_t addr = tlb_entry->addr_write;
> +    const uintptr_t addr = ent->addr_write;

Can we introduce 'int flags' here, and add the CPUTLBEntryFull
argument in the following patch?

>   
>       if ((addr & (TLB_INVALID_MASK | TLB_MMIO |
>                    TLB_DISCARD_WRITE | TLB_NOTDIRTY)) == 0) {
> -        addr &= TARGET_PAGE_MASK;
> -        addr += tlb_entry->addend;
> -        if ((addr - start) < length) {
> -            qatomic_set(&tlb_entry->addr_write,
> -                        tlb_entry->addr_write | TLB_NOTDIRTY);
> +        uintptr_t host = (addr & TARGET_PAGE_MASK) + ent->addend;
> +        if ((host - start) < length) {
> +            qatomic_set(&ent->addr_write, addr | TLB_NOTDIRTY);
>           }
>       }
>   }
> @@ -918,16 +916,18 @@ void tlb_reset_dirty(CPUState *cpu, uintptr_t start, uintptr_t length)
>   
>       qemu_spin_lock(&cpu->neg.tlb.c.lock);
>       for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
> +        CPUTLBDesc *desc = &cpu->neg.tlb.d[mmu_idx];
> +        CPUTLBDescFast *fast = &cpu->neg.tlb.f[mmu_idx];
> +        unsigned int n = tlb_n_entries(fast);
>           unsigned int i;
> -        unsigned int n = tlb_n_entries(&cpu->neg.tlb.f[mmu_idx]);
>   
>           for (i = 0; i < n; i++) {
> -            tlb_reset_dirty_range_locked(&cpu->neg.tlb.f[mmu_idx].table[i],
> +            tlb_reset_dirty_range_locked(&desc->fulltlb[i], &fast->table[i],
>                                            start, length);
>           }
>   
>           for (i = 0; i < CPU_VTLB_SIZE; i++) {
> -            tlb_reset_dirty_range_locked(&cpu->neg.tlb.d[mmu_idx].vtable[i],
> +            tlb_reset_dirty_range_locked(&desc->vfulltlb[i], &desc->vtable[i],
>                                            start, length);
>           }
>       }
diff mbox series

Patch

diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index 10090067f7..5df98d93d0 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -882,18 +882,16 @@  void tlb_unprotect_code(ram_addr_t ram_addr)
  *
  * Called with tlb_c.lock held.
  */
-static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry,
+static void tlb_reset_dirty_range_locked(CPUTLBEntryFull *full, CPUTLBEntry *ent,
                                          uintptr_t start, uintptr_t length)
 {
-    uintptr_t addr = tlb_entry->addr_write;
+    const uintptr_t addr = ent->addr_write;
 
     if ((addr & (TLB_INVALID_MASK | TLB_MMIO |
                  TLB_DISCARD_WRITE | TLB_NOTDIRTY)) == 0) {
-        addr &= TARGET_PAGE_MASK;
-        addr += tlb_entry->addend;
-        if ((addr - start) < length) {
-            qatomic_set(&tlb_entry->addr_write,
-                        tlb_entry->addr_write | TLB_NOTDIRTY);
+        uintptr_t host = (addr & TARGET_PAGE_MASK) + ent->addend;
+        if ((host - start) < length) {
+            qatomic_set(&ent->addr_write, addr | TLB_NOTDIRTY);
         }
     }
 }
@@ -918,16 +916,18 @@  void tlb_reset_dirty(CPUState *cpu, uintptr_t start, uintptr_t length)
 
     qemu_spin_lock(&cpu->neg.tlb.c.lock);
     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
+        CPUTLBDesc *desc = &cpu->neg.tlb.d[mmu_idx];
+        CPUTLBDescFast *fast = &cpu->neg.tlb.f[mmu_idx];
+        unsigned int n = tlb_n_entries(fast);
         unsigned int i;
-        unsigned int n = tlb_n_entries(&cpu->neg.tlb.f[mmu_idx]);
 
         for (i = 0; i < n; i++) {
-            tlb_reset_dirty_range_locked(&cpu->neg.tlb.f[mmu_idx].table[i],
+            tlb_reset_dirty_range_locked(&desc->fulltlb[i], &fast->table[i],
                                          start, length);
         }
 
         for (i = 0; i < CPU_VTLB_SIZE; i++) {
-            tlb_reset_dirty_range_locked(&cpu->neg.tlb.d[mmu_idx].vtable[i],
+            tlb_reset_dirty_range_locked(&desc->vfulltlb[i], &desc->vtable[i],
                                          start, length);
         }
     }