From patchwork Wed Jul 17 18:33:48 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: [1/5] drm/panfrost: Restructure the GEM object creation X-Patchwork-Submitter: Rob Herring X-Patchwork-Id: 169153 Message-Id: <20190717183352.22519-1-robh@kernel.org> To: dri-devel@lists.freedesktop.org Cc: Boris Brezillon , Robin Murphy , Alyssa Rosenzweig , Tomeu Vizoso , Steven Price Date: Wed, 17 Jul 2019 12:33:48 -0600 From: Rob Herring List-Id: Direct Rendering Infrastructure - Development Setting the GPU VA when creating the GEM object doesn't allow for any conditional adjustments. In preparation to support adjusting the mapping, restructure the GEM object creation to map the GEM object after we've created the base shmem object. Cc: Tomeu Vizoso Cc: Boris Brezillon Cc: Robin Murphy Cc: Steven Price Cc: Alyssa Rosenzweig Signed-off-by: Rob Herring --- drivers/gpu/drm/panfrost/panfrost_drv.c | 21 +++------ drivers/gpu/drm/panfrost/panfrost_gem.c | 58 ++++++++++++++++++++----- drivers/gpu/drm/panfrost/panfrost_gem.h | 5 +++ 3 files changed, 59 insertions(+), 25 deletions(-) diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c index cb43ff4ebf4a..d354b92964d5 100644 --- a/drivers/gpu/drm/panfrost/panfrost_drv.c +++ b/drivers/gpu/drm/panfrost/panfrost_drv.c @@ -46,29 +46,20 @@ static int panfrost_ioctl_get_param(struct drm_device *ddev, void *data, struct static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data, struct drm_file *file) { - int ret; - struct drm_gem_shmem_object *shmem; + struct panfrost_gem_object *bo; struct drm_panfrost_create_bo *args = data; if (!args->size || args->flags || args->pad) return -EINVAL; - shmem = drm_gem_shmem_create_with_handle(file, dev, args->size, - &args->handle); - if (IS_ERR(shmem)) - return PTR_ERR(shmem); - - ret = panfrost_mmu_map(to_panfrost_bo(&shmem->base)); - if (ret) - goto err_free; + bo = panfrost_gem_create_with_handle(file, dev, args->size, args->flags, + &args->handle); + if (IS_ERR(bo)) + return PTR_ERR(bo); - args->offset = to_panfrost_bo(&shmem->base)->node.start << PAGE_SHIFT; + args->offset = bo->node.start << PAGE_SHIFT; return 0; - -err_free: - drm_gem_handle_delete(file, args->handle); - return ret; } /** diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c index 543ab1b81bd5..df70dcf3cb2f 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gem.c +++ b/drivers/gpu/drm/panfrost/panfrost_gem.c @@ -23,7 +23,8 @@ static void panfrost_gem_free_object(struct drm_gem_object *obj) panfrost_mmu_unmap(bo); spin_lock(&pfdev->mm_lock); - drm_mm_remove_node(&bo->node); + if (drm_mm_node_allocated(&bo->node)) + drm_mm_remove_node(&bo->node); spin_unlock(&pfdev->mm_lock); drm_gem_shmem_free_object(obj); @@ -50,10 +51,7 @@ static const struct drm_gem_object_funcs panfrost_gem_funcs = { */ struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t size) { - int ret; - struct panfrost_device *pfdev = dev->dev_private; struct panfrost_gem_object *obj; - u64 align; obj = kzalloc(sizeof(*obj), GFP_KERNEL); if (!obj) @@ -61,20 +59,52 @@ struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t obj->base.base.funcs = &panfrost_gem_funcs; - size = roundup(size, PAGE_SIZE); - align = size >= SZ_2M ? SZ_2M >> PAGE_SHIFT : 0; + return &obj->base.base; +} + +static int panfrost_gem_map(struct panfrost_device *pfdev, struct panfrost_gem_object *bo) +{ + int ret; + size_t size = bo->base.base.size; + u64 align = size >= SZ_2M ? SZ_2M >> PAGE_SHIFT : 0; spin_lock(&pfdev->mm_lock); - ret = drm_mm_insert_node_generic(&pfdev->mm, &obj->node, + ret = drm_mm_insert_node_generic(&pfdev->mm, &bo->node, size >> PAGE_SHIFT, align, 0, 0); spin_unlock(&pfdev->mm_lock); + if (ret) + return ret; + + return panfrost_mmu_map(bo); +} + +struct panfrost_gem_object * +panfrost_gem_create_with_handle(struct drm_file *file_priv, + struct drm_device *dev, size_t size, + u32 flags, + uint32_t *handle) +{ + int ret; + struct panfrost_device *pfdev = dev->dev_private; + struct drm_gem_shmem_object *shmem; + struct panfrost_gem_object *bo; + + size = roundup(size, PAGE_SIZE); + + shmem = drm_gem_shmem_create_with_handle(file_priv, dev, size, handle); + if (IS_ERR(shmem)) + return ERR_CAST(shmem); + + bo = to_panfrost_bo(&shmem->base); + + ret = panfrost_gem_map(pfdev, bo); if (ret) goto free_obj; - return &obj->base.base; + return bo; free_obj: - kfree(obj); + drm_gem_handle_delete(file_priv, *handle); return ERR_PTR(ret); } @@ -83,8 +113,10 @@ panfrost_gem_prime_import_sg_table(struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sgt) { + int ret; struct drm_gem_object *obj; struct panfrost_gem_object *pobj; + struct panfrost_device *pfdev = dev->dev_private; obj = drm_gem_shmem_prime_import_sg_table(dev, attach, sgt); if (IS_ERR(obj)) @@ -92,7 +124,13 @@ panfrost_gem_prime_import_sg_table(struct drm_device *dev, pobj = to_panfrost_bo(obj); - panfrost_mmu_map(pobj); + ret = panfrost_gem_map(pfdev, pobj); + if (ret) + goto free_obj; return obj; + +free_obj: + drm_gem_object_put_unlocked(obj); + return ERR_PTR(ret); } diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.h b/drivers/gpu/drm/panfrost/panfrost_gem.h index 6dbcaba020fc..ce065270720b 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gem.h +++ b/drivers/gpu/drm/panfrost/panfrost_gem.h @@ -22,6 +22,11 @@ struct panfrost_gem_object *to_panfrost_bo(struct drm_gem_object *obj) struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t size); +struct panfrost_gem_object * +panfrost_gem_create_with_handle(struct drm_file *file_priv, + struct drm_device *dev, size_t size, u32 flags, + uint32_t *handle); + struct drm_gem_object * panfrost_gem_prime_import_sg_table(struct drm_device *dev, struct dma_buf_attachment *attach, From patchwork Wed Jul 17 18:33:49 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: [2/5] drm/panfrost: Split panfrost_mmu_map SG list mapping to its own function X-Patchwork-Submitter: Rob Herring X-Patchwork-Id: 169154 Message-Id: <20190717183352.22519-2-robh@kernel.org> To: dri-devel@lists.freedesktop.org Cc: Boris Brezillon , Robin Murphy , Alyssa Rosenzweig , Tomeu Vizoso , Steven Price Date: Wed, 17 Jul 2019 12:33:49 -0600 From: Rob Herring List-Id: Direct Rendering Infrastructure - Development In preparation to create partial GPU mappings of BOs on page faults, split out the SG list handling of panfrost_mmu_map(). Cc: Tomeu Vizoso Cc: Boris Brezillon Cc: Robin Murphy Cc: Steven Price Cc: Alyssa Rosenzweig Signed-off-by: Rob Herring Reviewed-by: Steven Price --- drivers/gpu/drm/panfrost/panfrost_mmu.c | 52 +++++++++++++++---------- 1 file changed, 31 insertions(+), 21 deletions(-) diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c index f502e91be42a..5383b837f04b 100644 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c @@ -167,27 +167,13 @@ static size_t get_pgsize(u64 addr, size_t size) return SZ_2M; } -int panfrost_mmu_map(struct panfrost_gem_object *bo) +static int mmu_map_sg(struct panfrost_device *pfdev, u64 iova, + int prot, struct sg_table *sgt) { - struct drm_gem_object *obj = &bo->base.base; - struct panfrost_device *pfdev = to_panfrost_device(obj->dev); - struct io_pgtable_ops *ops = pfdev->mmu->pgtbl_ops; - u64 iova = bo->node.start << PAGE_SHIFT; unsigned int count; struct scatterlist *sgl; - struct sg_table *sgt; - int ret; - - if (WARN_ON(bo->is_mapped)) - return 0; - - sgt = drm_gem_shmem_get_pages_sgt(obj); - if (WARN_ON(IS_ERR(sgt))) - return PTR_ERR(sgt); - - ret = pm_runtime_get_sync(pfdev->dev); - if (ret < 0) - return ret; + struct io_pgtable_ops *ops = pfdev->mmu->pgtbl_ops; + u64 start_iova = iova; mutex_lock(&pfdev->mmu->lock); @@ -200,18 +186,42 @@ int panfrost_mmu_map(struct panfrost_gem_object *bo) while (len) { size_t pgsize = get_pgsize(iova | paddr, len); - ops->map(ops, iova, paddr, pgsize, IOMMU_WRITE | IOMMU_READ); + ops->map(ops, iova, paddr, pgsize, prot); iova += pgsize; paddr += pgsize; len -= pgsize; } } - mmu_hw_do_operation(pfdev, 0, bo->node.start << PAGE_SHIFT, - bo->node.size << PAGE_SHIFT, AS_COMMAND_FLUSH_PT); + mmu_hw_do_operation(pfdev, 0, start_iova, iova - start_iova, + AS_COMMAND_FLUSH_PT); mutex_unlock(&pfdev->mmu->lock); + return 0; +} + +int panfrost_mmu_map(struct panfrost_gem_object *bo) +{ + struct drm_gem_object *obj = &bo->base.base; + struct panfrost_device *pfdev = to_panfrost_device(obj->dev); + struct sg_table *sgt; + int ret; + int prot = IOMMU_READ | IOMMU_WRITE; + + if (WARN_ON(bo->is_mapped)) + return 0; + + sgt = drm_gem_shmem_get_pages_sgt(obj); + if (WARN_ON(IS_ERR(sgt))) + return PTR_ERR(sgt); + + ret = pm_runtime_get_sync(pfdev->dev); + if (ret < 0) + return ret; + + mmu_map_sg(pfdev, bo->node.start << PAGE_SHIFT, prot, sgt); + pm_runtime_mark_last_busy(pfdev->dev); pm_runtime_put_autosuspend(pfdev->dev); bo->is_mapped = true; From patchwork Wed Jul 17 18:33:50 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: [3/5] drm/panfrost: Add a no execute flag for BO allocations X-Patchwork-Submitter: Rob Herring X-Patchwork-Id: 169157 Message-Id: <20190717183352.22519-3-robh@kernel.org> To: dri-devel@lists.freedesktop.org Cc: Boris Brezillon , Robin Murphy , Alyssa Rosenzweig , Tomeu Vizoso , Steven Price Date: Wed, 17 Jul 2019 12:33:50 -0600 From: Rob Herring List-Id: Direct Rendering Infrastructure - Development Executable buffers have an alignment restriction that they can't cross 16MB boundary as the GPU program counter is 24-bits. This restriction is currently not handled and we just get lucky. As current userspace assumes all BOs are executable, that has to remain the default. So add a new PANFROST_BO_NOEXEC flag to allow userspace to indicate which BOs are not executable. There is also a restriction that executable buffers cannot start or end on a 4GB boundary. This is mostly avoided as there is only 4GB of space currently and the beginning is already blocked out for NULL ptr detection. Add support to handle this restriction fully regardless of the current constraints. For existing userspace, all created BOs remain executable, but the GPU VA alignment will be increased to the size of the BO. This shouldn't matter as there is plenty of GPU VA space. Cc: Tomeu Vizoso Cc: Boris Brezillon Cc: Robin Murphy Cc: Steven Price Cc: Alyssa Rosenzweig Signed-off-by: Rob Herring --- drivers/gpu/drm/panfrost/panfrost_drv.c | 20 +++++++++++++++++++- drivers/gpu/drm/panfrost/panfrost_gem.c | 18 ++++++++++++++++-- drivers/gpu/drm/panfrost/panfrost_gem.h | 3 ++- drivers/gpu/drm/panfrost/panfrost_mmu.c | 3 +++ include/uapi/drm/panfrost_drm.h | 2 ++ 5 files changed, 42 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c index d354b92964d5..b91e991bc6a3 100644 --- a/drivers/gpu/drm/panfrost/panfrost_drv.c +++ b/drivers/gpu/drm/panfrost/panfrost_drv.c @@ -49,7 +49,8 @@ static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data, struct panfrost_gem_object *bo; struct drm_panfrost_create_bo *args = data; - if (!args->size || args->flags || args->pad) + if (!args->size || args->pad || + (args->flags & ~PANFROST_BO_NOEXEC)) return -EINVAL; bo = panfrost_gem_create_with_handle(file, dev, args->size, args->flags, @@ -367,6 +368,22 @@ static struct drm_driver panfrost_drm_driver = { .gem_prime_mmap = drm_gem_prime_mmap, }; +#define PFN_4G_MASK ((SZ_4G - 1) >> PAGE_SHIFT) + +static void panfrost_drm_mm_color_adjust(const struct drm_mm_node *node, + unsigned long color, + u64 *start, u64 *end) +{ + /* Executable buffers can't start or end on a 4GB boundary */ + if (!color) { + if ((*start & PFN_4G_MASK) == 0) + (*start)++; + + if ((*end & PFN_4G_MASK) == 0) + (*end)--; + } +} + static int panfrost_probe(struct platform_device *pdev) { struct panfrost_device *pfdev; @@ -394,6 +411,7 @@ static int panfrost_probe(struct platform_device *pdev) /* 4G enough for now. can be 48-bit */ drm_mm_init(&pfdev->mm, SZ_32M >> PAGE_SHIFT, (SZ_4G - SZ_32M) >> PAGE_SHIFT); + pfdev->mm.color_adjust = panfrost_drm_mm_color_adjust; pm_runtime_use_autosuspend(pfdev->dev); pm_runtime_set_autosuspend_delay(pfdev->dev, 50); /* ~3 frames */ diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c index df70dcf3cb2f..37ffec8391da 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gem.c +++ b/drivers/gpu/drm/panfrost/panfrost_gem.c @@ -66,11 +66,23 @@ static int panfrost_gem_map(struct panfrost_device *pfdev, struct panfrost_gem_o { int ret; size_t size = bo->base.base.size; - u64 align = size >= SZ_2M ? SZ_2M >> PAGE_SHIFT : 0; + u64 align; + + /* + * Executable buffers cannot cross a 16MB boundary as the program + * counter is 24-bits. We assume executable buffers will be less than + * 16MB and aligning executable buffers to their size will avoid + * crossing a 16MB boundary. + */ + if (!bo->noexec) + align = size >> PAGE_SHIFT; + else + align = size >= SZ_2M ? SZ_2M >> PAGE_SHIFT : 0; spin_lock(&pfdev->mm_lock); ret = drm_mm_insert_node_generic(&pfdev->mm, &bo->node, - size >> PAGE_SHIFT, align, 0, 0); + size >> PAGE_SHIFT, align, + bo->noexec, 0); spin_unlock(&pfdev->mm_lock); if (ret) return ret; @@ -96,6 +108,7 @@ panfrost_gem_create_with_handle(struct drm_file *file_priv, return ERR_CAST(shmem); bo = to_panfrost_bo(&shmem->base); + bo->noexec = !!(flags & PANFROST_BO_NOEXEC); ret = panfrost_gem_map(pfdev, bo); if (ret) @@ -123,6 +136,7 @@ panfrost_gem_prime_import_sg_table(struct drm_device *dev, return ERR_CAST(obj); pobj = to_panfrost_bo(obj); + pobj->noexec = true; ret = panfrost_gem_map(pfdev, pobj); if (ret) diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.h b/drivers/gpu/drm/panfrost/panfrost_gem.h index ce065270720b..132f02399b7b 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gem.h +++ b/drivers/gpu/drm/panfrost/panfrost_gem.h @@ -11,7 +11,8 @@ struct panfrost_gem_object { struct drm_gem_shmem_object base; struct drm_mm_node node; - bool is_mapped; + bool is_mapped :1; + bool noexec :1; }; static inline diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c index 5383b837f04b..d18484a07bfa 100644 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c @@ -212,6 +212,9 @@ int panfrost_mmu_map(struct panfrost_gem_object *bo) if (WARN_ON(bo->is_mapped)) return 0; + if (bo->noexec) + prot |= IOMMU_NOEXEC; + sgt = drm_gem_shmem_get_pages_sgt(obj); if (WARN_ON(IS_ERR(sgt))) return PTR_ERR(sgt); diff --git a/include/uapi/drm/panfrost_drm.h b/include/uapi/drm/panfrost_drm.h index b5d370638846..17fb5d200f7a 100644 --- a/include/uapi/drm/panfrost_drm.h +++ b/include/uapi/drm/panfrost_drm.h @@ -82,6 +82,8 @@ struct drm_panfrost_wait_bo { __s64 timeout_ns; /* absolute */ }; +#define PANFROST_BO_NOEXEC 1 + /** * struct drm_panfrost_create_bo - ioctl argument for creating Panfrost BOs. * From patchwork Wed Jul 17 18:33:51 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: [4/5] drm/panfrost: Add support for GPU heap allocations X-Patchwork-Submitter: Rob Herring X-Patchwork-Id: 169155 Message-Id: <20190717183352.22519-4-robh@kernel.org> To: dri-devel@lists.freedesktop.org Cc: Boris Brezillon , Robin Murphy , Alyssa Rosenzweig , Tomeu Vizoso , Steven Price Date: Wed, 17 Jul 2019 12:33:51 -0600 From: Rob Herring List-Id: Direct Rendering Infrastructure - Development The midgard/bifrost GPUs need to allocate GPU heap memory which is allocated on GPU page faults and not pinned in memory. The vendor driver calls this functionality GROW_ON_GPF. This implementation assumes that BOs allocated with the PANFROST_BO_NOEXEC flag are never mmapped or exported. Both of those may actually work, but I'm unsure if there's some interaction there. It would cause the whole object to be pinned in memory which would defeat the point of this. On faults, we map in 2MB at a time in order to utilize huge pages (if enabled). Currently, once we've mapped pages in, they are only unmapped if the BO is freed. Once we add shrinker support, we can unmap pages with the shrinker. Cc: Tomeu Vizoso Cc: Boris Brezillon Cc: Robin Murphy Cc: Steven Price Cc: Alyssa Rosenzweig Signed-off-by: Rob Herring --- drivers/gpu/drm/panfrost/TODO | 2 - drivers/gpu/drm/panfrost/panfrost_drv.c | 2 +- drivers/gpu/drm/panfrost/panfrost_gem.c | 14 ++- drivers/gpu/drm/panfrost/panfrost_gem.h | 8 ++ drivers/gpu/drm/panfrost/panfrost_mmu.c | 114 +++++++++++++++++++++--- include/uapi/drm/panfrost_drm.h | 1 + 6 files changed, 125 insertions(+), 16 deletions(-) diff --git a/drivers/gpu/drm/panfrost/TODO b/drivers/gpu/drm/panfrost/TODO index c2e44add37d8..64129bf73933 100644 --- a/drivers/gpu/drm/panfrost/TODO +++ b/drivers/gpu/drm/panfrost/TODO @@ -14,8 +14,6 @@ The hard part is handling when more address spaces are needed than what the h/w provides. -- Support pinning pages on demand (GPU page faults). - - Support userspace controlled GPU virtual addresses. Needed for Vulkan. (Tomeu) - Support for madvise and a shrinker. diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c index b91e991bc6a3..9e87d0060202 100644 --- a/drivers/gpu/drm/panfrost/panfrost_drv.c +++ b/drivers/gpu/drm/panfrost/panfrost_drv.c @@ -50,7 +50,7 @@ static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data, struct drm_panfrost_create_bo *args = data; if (!args->size || args->pad || - (args->flags & ~PANFROST_BO_NOEXEC)) + (args->flags & ~(PANFROST_BO_NOEXEC | PANFROST_BO_HEAP))) return -EINVAL; bo = panfrost_gem_create_with_handle(file, dev, args->size, args->flags, diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c index 37ffec8391da..528396000038 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gem.c +++ b/drivers/gpu/drm/panfrost/panfrost_gem.c @@ -87,7 +87,10 @@ static int panfrost_gem_map(struct panfrost_device *pfdev, struct panfrost_gem_o if (ret) return ret; - return panfrost_mmu_map(bo); + if (!bo->is_heap) + ret = panfrost_mmu_map(bo); + + return ret; } struct panfrost_gem_object * @@ -101,7 +104,11 @@ panfrost_gem_create_with_handle(struct drm_file *file_priv, struct drm_gem_shmem_object *shmem; struct panfrost_gem_object *bo; - size = roundup(size, PAGE_SIZE); + /* Round up heap allocations to 2MB to keep fault handling simple */ + if (flags & PANFROST_BO_HEAP) + size = roundup(size, SZ_2M); + else + size = roundup(size, PAGE_SIZE); shmem = drm_gem_shmem_create_with_handle(file_priv, dev, size, handle); if (IS_ERR(shmem)) @@ -109,6 +116,9 @@ panfrost_gem_create_with_handle(struct drm_file *file_priv, bo = to_panfrost_bo(&shmem->base); bo->noexec = !!(flags & PANFROST_BO_NOEXEC); + bo->is_heap = !!(flags & PANFROST_BO_HEAP); + if (bo->is_heap) + bo->noexec = true; ret = panfrost_gem_map(pfdev, bo); if (ret) diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.h b/drivers/gpu/drm/panfrost/panfrost_gem.h index 132f02399b7b..c500ca6b9072 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gem.h +++ b/drivers/gpu/drm/panfrost/panfrost_gem.h @@ -13,6 +13,7 @@ struct panfrost_gem_object { struct drm_mm_node node; bool is_mapped :1; bool noexec :1; + bool is_heap :1; }; static inline @@ -21,6 +22,13 @@ struct panfrost_gem_object *to_panfrost_bo(struct drm_gem_object *obj) return container_of(to_drm_gem_shmem_obj(obj), struct panfrost_gem_object, base); } +static inline +struct panfrost_gem_object *drm_mm_node_to_panfrost_bo(struct drm_mm_node *node) +{ + return container_of(node, struct panfrost_gem_object, node); +} + + struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t size); struct panfrost_gem_object * diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c index d18484a07bfa..3b95c7027188 100644 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c @@ -3,6 +3,7 @@ /* Copyright (C) 2019 Arm Ltd. */ #include #include +#include #include #include #include @@ -10,6 +11,7 @@ #include #include #include +#include #include #include "panfrost_device.h" @@ -257,12 +259,12 @@ void panfrost_mmu_unmap(struct panfrost_gem_object *bo) size_t unmapped_page; size_t pgsize = get_pgsize(iova, len - unmapped_len); - unmapped_page = ops->unmap(ops, iova, pgsize); - if (!unmapped_page) - break; - - iova += unmapped_page; - unmapped_len += unmapped_page; + if (ops->iova_to_phys(ops, iova)) { + unmapped_page = ops->unmap(ops, iova, pgsize); + WARN_ON(unmapped_page != pgsize); + } + iova += pgsize; + unmapped_len += pgsize; } mmu_hw_do_operation(pfdev, 0, bo->node.start << PAGE_SHIFT, @@ -298,6 +300,86 @@ static const struct iommu_gather_ops mmu_tlb_ops = { .tlb_sync = mmu_tlb_sync_context, }; +static struct drm_mm_node *addr_to_drm_mm_node(struct panfrost_device *pfdev, int as, u64 addr) +{ + struct drm_mm_node *node; + u64 offset = addr >> PAGE_SHIFT; + + drm_mm_for_each_node(node, &pfdev->mm) { + if (offset >= node->start && offset < (node->start + node->size)) + return node; + } + return NULL; +} + +#define NUM_FAULT_PAGES (SZ_2M / PAGE_SIZE) + +int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, u64 addr) +{ + int ret, i; + struct drm_mm_node *node; + struct panfrost_gem_object *bo; + struct address_space *mapping; + pgoff_t page_offset; + struct sg_table sgt = {}; + struct page **pages; + + node = addr_to_drm_mm_node(pfdev, as, addr); + if (!node) + return -ENOENT; + + bo = drm_mm_node_to_panfrost_bo(node); + if (!bo->is_heap) { + dev_WARN(pfdev->dev, "matching BO is not heap type (GPU VA = %llx)", + node->start << PAGE_SHIFT); + return -EINVAL; + } + /* Assume 2MB alignment and size multiple */ + addr &= ~((u64)SZ_2M - 1); + page_offset = addr >> PAGE_SHIFT; + page_offset -= node->start; + + pages = kvmalloc_array(NUM_FAULT_PAGES, sizeof(struct page *), GFP_KERNEL); + if (!pages) + return -ENOMEM; + + mapping = bo->base.base.filp->f_mapping; + mapping_set_unevictable(mapping); + + for (i = 0; i < NUM_FAULT_PAGES; i++) { + pages[i] = shmem_read_mapping_page(mapping, page_offset + i); + if (IS_ERR(pages[i])) { + ret = PTR_ERR(pages[i]); + goto err_pages; + } + } + + ret = sg_alloc_table_from_pages(&sgt, pages, NUM_FAULT_PAGES, 0, + SZ_2M, GFP_KERNEL); + if (ret) + goto err_pages; + + if (dma_map_sg(pfdev->dev, sgt.sgl, sgt.nents, DMA_BIDIRECTIONAL) == 0) { + ret = -EINVAL; + goto err_map; + } + + mmu_map_sg(pfdev, addr, IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, &sgt); + + mmu_write(pfdev, MMU_INT_CLEAR, BIT(as)); + bo->is_mapped = true; + + dev_dbg(pfdev->dev, "mapped page fault @ %llx", addr); + + return 0; + +err_map: + sg_free_table(&sgt); +err_pages: + kvfree(pages); + return ret; +} + static const char *access_type_name(struct panfrost_device *pfdev, u32 fault_status) { @@ -323,13 +405,11 @@ static irqreturn_t panfrost_mmu_irq_handler(int irq, void *data) { struct panfrost_device *pfdev = data; u32 status = mmu_read(pfdev, MMU_INT_STAT); - int i; + int i, ret; if (!status) return IRQ_NONE; - dev_err(pfdev->dev, "mmu irq status=%x\n", status); - for (i = 0; status; i++) { u32 mask = BIT(i) | BIT(i + 16); u64 addr; @@ -350,6 +430,17 @@ static irqreturn_t panfrost_mmu_irq_handler(int irq, void *data) access_type = (fault_status >> 8) & 0x3; source_id = (fault_status >> 16); + /* Page fault only */ + if ((status & mask) == BIT(i)) { + WARN_ON(exception_type < 0xC1 || exception_type > 0xC4); + + ret = panfrost_mmu_map_fault_addr(pfdev, i, addr); + if (!ret) { + status &= ~mask; + continue; + } + } + /* terminal fault, print info about the fault */ dev_err(pfdev->dev, "Unhandled Page fault in AS%d at VA 0x%016llX\n" @@ -391,8 +482,9 @@ int panfrost_mmu_init(struct panfrost_device *pfdev) if (irq <= 0) return -ENODEV; - err = devm_request_irq(pfdev->dev, irq, panfrost_mmu_irq_handler, - IRQF_SHARED, "mmu", pfdev); + err = devm_request_threaded_irq(pfdev->dev, irq, NULL, + panfrost_mmu_irq_handler, + IRQF_ONESHOT, "mmu", pfdev); if (err) { dev_err(pfdev->dev, "failed to request mmu irq"); diff --git a/include/uapi/drm/panfrost_drm.h b/include/uapi/drm/panfrost_drm.h index 17fb5d200f7a..9150dd75aad8 100644 --- a/include/uapi/drm/panfrost_drm.h +++ b/include/uapi/drm/panfrost_drm.h @@ -83,6 +83,7 @@ struct drm_panfrost_wait_bo { }; #define PANFROST_BO_NOEXEC 1 +#define PANFROST_BO_HEAP 2 /** * struct drm_panfrost_create_bo - ioctl argument for creating Panfrost BOs. From patchwork Wed Jul 17 18:33:52 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: [5/5] drm/panfrost: Bump driver version to 1.1 X-Patchwork-Submitter: Rob Herring X-Patchwork-Id: 169156 Message-Id: <20190717183352.22519-5-robh@kernel.org> To: dri-devel@lists.freedesktop.org Cc: Boris Brezillon , Robin Murphy , Alyssa Rosenzweig , Tomeu Vizoso , Steven Price Date: Wed, 17 Jul 2019 12:33:52 -0600 From: Rob Herring List-Id: Direct Rendering Infrastructure - Development Increment the driver version to expose the new BO allocation flags. Cc: Tomeu Vizoso Cc: Boris Brezillon Cc: Robin Murphy Cc: Steven Price Cc: Alyssa Rosenzweig Signed-off-by: Rob Herring --- drivers/gpu/drm/panfrost/panfrost_drv.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c index 9e87d0060202..615bd15fc106 100644 --- a/drivers/gpu/drm/panfrost/panfrost_drv.c +++ b/drivers/gpu/drm/panfrost/panfrost_drv.c @@ -348,6 +348,11 @@ static const struct drm_ioctl_desc panfrost_drm_driver_ioctls[] = { DEFINE_DRM_GEM_SHMEM_FOPS(panfrost_drm_driver_fops); +/* + * Panfrost driver version: + * - 1.0 - initial interface + * - 1.1 - adds HEAP and NOEXEC flags for CREATE_BO + */ static struct drm_driver panfrost_drm_driver = { .driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ, .open = panfrost_open, @@ -359,7 +364,7 @@ static struct drm_driver panfrost_drm_driver = { .desc = "panfrost DRM", .date = "20180908", .major = 1, - .minor = 0, + .minor = 1, .gem_create_object = panfrost_gem_create_object, .prime_handle_to_fd = drm_gem_prime_handle_to_fd,