Message ID | 20241114011310.3615-20-philmd@linaro.org |
---|---|
State | New |
Headers | show |
Series | exec: Build up 'cputlb.h' and 'ram_addr.h' headers | expand |
On 11/13/24 17:13, Philippe Mathieu-Daudé wrote: > Move CPU TLB related methods to "exec/cputlb.h". > > Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org> > --- > include/exec/cpu-all.h | 23 ----------------------- > accel/tcg/cputlb.c | 23 +++++++++++++++++++++++ > 2 files changed, 23 insertions(+), 23 deletions(-) > > diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h > index 1c8e0446d0..ccaa650b19 100644 > --- a/include/exec/cpu-all.h > +++ b/include/exec/cpu-all.h > @@ -323,29 +323,6 @@ static inline int cpu_mmu_index(CPUState *cs, bool ifetch) > /* The two sets of flags must not overlap. */ > QEMU_BUILD_BUG_ON(TLB_FLAGS_MASK & TLB_SLOW_FLAGS_MASK); > > -/** > - * tlb_hit_page: return true if page aligned @addr is a hit against the > - * TLB entry @tlb_addr > - * > - * @addr: virtual address to test (must be page aligned) > - * @tlb_addr: TLB entry address (a CPUTLBEntry addr_read/write/code value) > - */ > -static inline bool tlb_hit_page(uint64_t tlb_addr, vaddr addr) > -{ > - return addr == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK)); > -} > - > -/** > - * tlb_hit: return true if @addr is a hit against the TLB entry @tlb_addr > - * > - * @addr: virtual address to test (need not be page aligned) > - * @tlb_addr: TLB entry address (a CPUTLBEntry addr_read/write/code value) > - */ > -static inline bool tlb_hit(uint64_t tlb_addr, vaddr addr) > -{ > - return tlb_hit_page(tlb_addr, addr & TARGET_PAGE_MASK); > -} > - > #endif /* !CONFIG_USER_ONLY */ > > /* Validate correct placement of CPUArchState. */ > diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c > index 080cbcb34d..dba4831cd1 100644 > --- a/accel/tcg/cputlb.c > +++ b/accel/tcg/cputlb.c > @@ -1220,6 +1220,29 @@ void tlb_set_page(CPUState *cpu, vaddr addr, > prot, mmu_idx, size); > } > > +/** > + * tlb_hit_page: return true if page aligned @addr is a hit against the > + * TLB entry @tlb_addr > + * > + * @addr: virtual address to test (must be page aligned) > + * @tlb_addr: TLB entry address (a CPUTLBEntry addr_read/write/code value) > + */ > +static inline bool tlb_hit_page(uint64_t tlb_addr, vaddr addr) > +{ > + return addr == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK)); > +} > + > +/** > + * tlb_hit: return true if @addr is a hit against the TLB entry @tlb_addr > + * > + * @addr: virtual address to test (need not be page aligned) > + * @tlb_addr: TLB entry address (a CPUTLBEntry addr_read/write/code value) > + */ > +static inline bool tlb_hit(uint64_t tlb_addr, vaddr addr) > +{ > + return tlb_hit_page(tlb_addr, addr & TARGET_PAGE_MASK); > +} > + > /* > * Note: tlb_fill_align() can trigger a resize of the TLB. > * This means that all of the caller's prior references to the TLB table Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h index 1c8e0446d0..ccaa650b19 100644 --- a/include/exec/cpu-all.h +++ b/include/exec/cpu-all.h @@ -323,29 +323,6 @@ static inline int cpu_mmu_index(CPUState *cs, bool ifetch) /* The two sets of flags must not overlap. */ QEMU_BUILD_BUG_ON(TLB_FLAGS_MASK & TLB_SLOW_FLAGS_MASK); -/** - * tlb_hit_page: return true if page aligned @addr is a hit against the - * TLB entry @tlb_addr - * - * @addr: virtual address to test (must be page aligned) - * @tlb_addr: TLB entry address (a CPUTLBEntry addr_read/write/code value) - */ -static inline bool tlb_hit_page(uint64_t tlb_addr, vaddr addr) -{ - return addr == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK)); -} - -/** - * tlb_hit: return true if @addr is a hit against the TLB entry @tlb_addr - * - * @addr: virtual address to test (need not be page aligned) - * @tlb_addr: TLB entry address (a CPUTLBEntry addr_read/write/code value) - */ -static inline bool tlb_hit(uint64_t tlb_addr, vaddr addr) -{ - return tlb_hit_page(tlb_addr, addr & TARGET_PAGE_MASK); -} - #endif /* !CONFIG_USER_ONLY */ /* Validate correct placement of CPUArchState. */ diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index 080cbcb34d..dba4831cd1 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -1220,6 +1220,29 @@ void tlb_set_page(CPUState *cpu, vaddr addr, prot, mmu_idx, size); } +/** + * tlb_hit_page: return true if page aligned @addr is a hit against the + * TLB entry @tlb_addr + * + * @addr: virtual address to test (must be page aligned) + * @tlb_addr: TLB entry address (a CPUTLBEntry addr_read/write/code value) + */ +static inline bool tlb_hit_page(uint64_t tlb_addr, vaddr addr) +{ + return addr == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK)); +} + +/** + * tlb_hit: return true if @addr is a hit against the TLB entry @tlb_addr + * + * @addr: virtual address to test (need not be page aligned) + * @tlb_addr: TLB entry address (a CPUTLBEntry addr_read/write/code value) + */ +static inline bool tlb_hit(uint64_t tlb_addr, vaddr addr) +{ + return tlb_hit_page(tlb_addr, addr & TARGET_PAGE_MASK); +} + /* * Note: tlb_fill_align() can trigger a resize of the TLB. * This means that all of the caller's prior references to the TLB table
Move CPU TLB related methods to "exec/cputlb.h". Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org> --- include/exec/cpu-all.h | 23 ----------------------- accel/tcg/cputlb.c | 23 +++++++++++++++++++++++ 2 files changed, 23 insertions(+), 23 deletions(-)