@@ -166,7 +166,6 @@ void free_io_pgtable_ops(struct io_pgtable_ops *ops);
struct io_pgtable {
enum io_pgtable_fmt fmt;
void *cookie;
- bool tlb_sync_pending;
struct io_pgtable_cfg cfg;
struct io_pgtable_ops ops;
};
@@ -176,22 +175,17 @@ struct io_pgtable {
static inline void io_pgtable_tlb_flush_all(struct io_pgtable *iop)
{
iop->cfg.tlb->tlb_flush_all(iop->cookie);
- iop->tlb_sync_pending = true;
}
static inline void io_pgtable_tlb_add_flush(struct io_pgtable *iop,
unsigned long iova, size_t size, size_t granule, bool leaf)
{
iop->cfg.tlb->tlb_add_flush(iova, size, granule, leaf, iop->cookie);
- iop->tlb_sync_pending = true;
}
static inline void io_pgtable_tlb_sync(struct io_pgtable *iop)
{
- if (iop->tlb_sync_pending) {
- iop->cfg.tlb->tlb_sync(iop->cookie);
- iop->tlb_sync_pending = false;
- }
+ iop->cfg.tlb->tlb_sync(iop->cookie);
}
/**
This member is unused now, because the previous patches ensured that each unmap will always be followed by tlb sync operation. By the way, ->tlb_flush_all executes tlb_sync by itself. Signed-off-by: Zhen Lei <thunder.leizhen@huawei.com> --- drivers/iommu/io-pgtable.h | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) -- 2.5.0