@@ -170,6 +170,23 @@ static inline void dma_free_attrs(struct device *dev, size_t size,
ops->free(dev, size, cpu_addr, dma_handle, attrs);
}
+static inline dma_addr_t dma_iova_alloc(struct device *dev, size_t size)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+ BUG_ON(!ops);
+
+ return ops->iova_alloc(dev, size);
+}
+
+static inline void dma_iova_free(struct device *dev, dma_addr_t addr,
+ size_t size)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+ BUG_ON(!ops);
+
+ ops->iova_free(dev, addr, size);
+}
+
static inline size_t dma_iova_get_free_total(struct device *dev)
{
struct dma_map_ops *ops = get_dma_ops(dev);
@@ -1080,6 +1080,13 @@ static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
return mapping->base + (start << (mapping->order + PAGE_SHIFT));
}
+static dma_addr_t arm_iommu_iova_alloc(struct device *dev, size_t size)
+{
+ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+
+ return __alloc_iova(mapping, size);
+}
+
static inline void __free_iova(struct dma_iommu_mapping *mapping,
dma_addr_t addr, size_t size)
{
@@ -1094,6 +1101,14 @@ static inline void __free_iova(struct dma_iommu_mapping *mapping,
spin_unlock_irqrestore(&mapping->lock, flags);
}
+static void arm_iommu_iova_free(struct device *dev, dma_addr_t addr,
+ size_t size)
+{
+ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+
+ __free_iova(mapping, addr, size);
+}
+
static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, gfp_t gfp)
{
struct page **pages;
@@ -1773,6 +1788,8 @@ struct dma_map_ops iommu_ops = {
.sync_sg_for_cpu = arm_iommu_sync_sg_for_cpu,
.sync_sg_for_device = arm_iommu_sync_sg_for_device,
+ .iova_alloc = arm_iommu_iova_alloc,
+ .iova_free = arm_iommu_iova_free,
.iova_get_free_total = arm_iommu_iova_get_free_total,
.iova_get_free_max = arm_iommu_iova_get_free_max,
};
@@ -53,6 +53,8 @@ struct dma_map_ops {
#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
u64 (*get_required_mask)(struct device *dev);
#endif
+ dma_addr_t (*iova_alloc)(struct device *dev, size_t size);
+ void (*iova_free)(struct device *dev, dma_addr_t addr, size_t size);
size_t (*iova_get_free_total)(struct device *dev);
size_t (*iova_get_free_max)(struct device *dev);
There are some cases that IOVA allocation and mapping have to be done seperately, especially for perf optimization reasons. This patch allows client modules to {alloc,free} IOVA space without backing up actual pages for that area. Signed-off-by: Hiroshi Doyu <hdoyu@nvidia.com> --- arch/arm/include/asm/dma-mapping.h | 17 +++++++++++++++++ arch/arm/mm/dma-mapping.c | 17 +++++++++++++++++ include/linux/dma-mapping.h | 2 ++ 3 files changed, 36 insertions(+), 0 deletions(-)