@@ -405,7 +405,7 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
{
struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iova_domain *iovad = &cookie->iovad;
- unsigned long shift, iova_len, iova = 0;
+ unsigned long shift, iova_len, iova = IOVA_BAD_ADDR;
if (cookie->type == IOMMU_DMA_MSI_COOKIE) {
cookie->msi_iova += size;
@@ -433,11 +433,13 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
iova = alloc_iova_fast(iovad, iova_len,
DMA_BIT_MASK(32) >> shift, false);
- if (!iova)
+ if (iova == IOVA_BAD_ADDR)
iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift,
true);
- return (dma_addr_t)iova << shift;
+ if (iova != IOVA_BAD_ADDR)
+ return (dma_addr_t)iova << shift;
+ return DMA_MAPPING_ERROR;
}
static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
@@ -493,8 +495,8 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
size = iova_align(iovad, size + iova_off);
iova = iommu_dma_alloc_iova(domain, size, dma_mask, dev);
- if (!iova)
- return DMA_MAPPING_ERROR;
+ if (iova == DMA_MAPPING_ERROR)
+ return iova;
if (iommu_map_atomic(domain, iova, phys - iova_off, size, prot)) {
iommu_dma_free_iova(cookie, iova, size);
@@ -617,7 +619,7 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
size = iova_align(iovad, size);
iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev);
- if (!iova)
+ if (iova == DMA_MAPPING_ERROR)
goto out_free_pages;
if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL))
@@ -887,7 +889,7 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
}
iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
- if (!iova)
+ if (iova == DMA_MAPPING_ERROR)
goto out_restore_sg;
/*
@@ -1181,7 +1183,7 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
return NULL;
iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
- if (!iova)
+ if (iova == DMA_MAPPING_ERROR)
goto out_free_page;
if (iommu_map(domain, iova, msi_addr, size, prot))
@@ -3416,15 +3416,15 @@ static unsigned long intel_alloc_iova(struct device *dev,
*/
iova_pfn = alloc_iova_fast(&domain->iovad, nrpages,
IOVA_PFN(DMA_BIT_MASK(32)), false);
- if (iova_pfn)
+ if (iova_pfn != IOVA_BAD_ADDR)
return iova_pfn;
}
iova_pfn = alloc_iova_fast(&domain->iovad, nrpages,
IOVA_PFN(dma_mask), true);
- if (unlikely(!iova_pfn)) {
+ if (unlikely(iova_pfn == IOVA_BAD_ADDR)) {
dev_err_once(dev, "Allocating %ld-page iova failed\n",
nrpages);
- return 0;
+ return IOVA_BAD_ADDR;
}
return iova_pfn;
@@ -3454,7 +3454,7 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
size = aligned_nrpages(paddr, size);
iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask);
- if (!iova_pfn)
+ if (iova_pfn == IOVA_BAD_ADDR)
goto error;
/*
@@ -3663,7 +3663,7 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size),
*dev->dma_mask);
- if (!iova_pfn) {
+ if (iova_pfn == IOVA_BAD_ADDR) {
sglist->dma_length = 0;
return 0;
}
@@ -3760,7 +3760,7 @@ bounce_map_single(struct device *dev, phys_addr_t paddr, size_t size,
nrpages = aligned_nrpages(0, size);
iova_pfn = intel_alloc_iova(dev, domain,
dma_to_mm_pfn(nrpages), dma_mask);
- if (!iova_pfn)
+ if (iova_pfn == IOVA_BAD_ADDR)
return DMA_MAPPING_ERROR;
/*
@@ -407,6 +407,8 @@ EXPORT_SYMBOL_GPL(free_iova);
* This function tries to satisfy an iova allocation from the rcache,
* and falls back to regular allocation on failure. If regular allocation
* fails too and the flush_rcache flag is set then the rcache will be flushed.
+ * Returns a pfn the allocated iova starts at or IOVA_BAD_ADDR in the case
+ * of a failure.
*/
unsigned long
alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
@@ -416,7 +418,7 @@ alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
struct iova *new_iova;
iova_pfn = iova_rcache_get(iovad, size, limit_pfn + 1);
- if (iova_pfn)
+ if (iova_pfn != IOVA_BAD_ADDR)
return iova_pfn;
retry:
@@ -425,7 +427,7 @@ alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
unsigned int cpu;
if (!flush_rcache)
- return 0;
+ return IOVA_BAD_ADDR;
/* Try replenishing IOVAs by flushing rcache. */
flush_rcache = false;
@@ -956,7 +958,7 @@ static unsigned long __iova_rcache_get(struct iova_rcache *rcache,
unsigned long limit_pfn)
{
struct iova_cpu_rcache *cpu_rcache;
- unsigned long iova_pfn = 0;
+ unsigned long iova_pfn = IOVA_BAD_ADDR;
bool has_pfn = false;
unsigned long flags;
@@ -998,7 +1000,7 @@ static unsigned long iova_rcache_get(struct iova_domain *iovad,
unsigned int log_size = order_base_2(size);
if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE)
- return 0;
+ return IOVA_BAD_ADDR;
return __iova_rcache_get(&iovad->rcaches[log_size], limit_pfn - size);
}
@@ -22,6 +22,8 @@ struct iova {
unsigned long pfn_lo; /* Lowest allocated pfn */
};
+#define IOVA_BAD_ADDR (~0UL)
+
struct iova_magazine;
struct iova_cpu_rcache;
Zero is a valid DMA and IOVA address on many architectures, so adjust the IOVA management code to properly handle it. A new value IOVA_BAD_ADDR (~0UL) is introduced as a generic value for the error case. Adjust all callers of the alloc_iova_fast() function for the new return value. Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com> --- drivers/iommu/dma-iommu.c | 18 ++++++++++-------- drivers/iommu/intel/iommu.c | 12 ++++++------ drivers/iommu/iova.c | 10 ++++++---- include/linux/iova.h | 2 ++ 4 files changed, 24 insertions(+), 18 deletions(-)