@@ -315,6 +315,31 @@ static bool dev_is_untrusted(struct device *dev)
return dev_is_pci(dev) && to_pci_dev(dev)->untrusted;
}
+void iommu_reconfig_dev_group_dma(struct device *dev)
+{
+ struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
+ struct iommu_dma_cookie *cookie = domain->iova_cookie;
+ unsigned long shift, iova_len;
+ struct iova_domain *iovad;
+ size_t max_opt_dma_size;
+
+ if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
+ return;
+
+ max_opt_dma_size = iommu_group_get_max_opt_dma_size(dev->iommu_group);
+ if (!max_opt_dma_size)
+ return;
+
+ iovad = &cookie->iovad;
+ shift = iova_shift(iovad);
+ iova_len = max_opt_dma_size >> shift;
+
+ if (iova_domain_len_is_cached(iovad, iova_len))
+ return;
+
+ iommu_realloc_dev_group(dev);
+}
+
/**
* iommu_dma_init_domain - Initialise a DMA mapping domain
* @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
@@ -20,6 +20,7 @@ void iommu_put_dma_cookie(struct iommu_domain *domain);
/* Setup call for arch DMA mapping code */
void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size);
+void iommu_reconfig_dev_group_dma(struct device *dev);
/* The DMA API isn't _quite_ the whole story, though... */
/*
@@ -53,6 +54,9 @@ static inline void iommu_setup_dma_ops(struct device *dev, u64 dma_base,
u64 size)
{
}
+static inline void iommu_reconfig_dev_group_dma(struct device *dev)
+{
+}
static inline int iommu_get_dma_cookie(struct iommu_domain *domain)
{
Add a function to reconfigure the IOMMU group for a device, if necessary. IOVAs are cached in power-of-2 granules, so there is no point in allocating a new IOMMU domain if the current range is suitable. Signed-off-by: John Garry <john.garry@huawei.com> --- drivers/iommu/dma-iommu.c | 25 +++++++++++++++++++++++++ include/linux/dma-iommu.h | 4 ++++ 2 files changed, 29 insertions(+) -- 2.26.2