@@ -8,6 +8,34 @@
#include "msm_gem.h"
#include "msm_mmu.h"
+#define vm_dbg(fmt, ...) pr_debug("%s:%d: "fmt"\n", __func__, __LINE__, ##__VA_ARGS__)
+
+/**
+ * struct msm_vm_map_op - create new pgtable mapping
+ */
+struct msm_vm_map_op {
+ /** @iova: start address for mapping */
+ uint64_t iova;
+ /** @range: size of the region to map */
+ uint64_t range;
+ /** @offset: offset into @sgt to map */
+ uint64_t offset;
+ /** @sgt: pages to map, or NULL for a PRR mapping */
+ struct sg_table *sgt;
+ /** @prot: the mapping protection flags */
+ int prot;
+};
+
+/**
+ * struct msm_vm_unmap_op - unmap a range of pages from pgtable
+ */
+struct msm_vm_unmap_op {
+ /** @iova: start address for unmap */
+ uint64_t iova;
+ /** @range: size of region to unmap */
+ uint64_t range;
+};
+
static void
msm_gem_vm_free(struct drm_gpuvm *gpuvm)
{
@@ -21,28 +49,45 @@ msm_gem_vm_free(struct drm_gpuvm *gpuvm)
kfree(vm);
}
+static void
+vm_unmap_op(struct msm_gem_vm *vm, const struct msm_vm_unmap_op *op)
+{
+ vm_dbg("%p: %016llx %016llx", vm, op->iova, op->iova + op->range);
+
+ vm->mmu->funcs->unmap(vm->mmu, op->iova, op->range);
+}
+
/* Actually unmap memory for the vma */
void msm_gem_vma_unmap(struct drm_gpuva *vma)
{
struct msm_gem_vma *msm_vma = to_msm_vma(vma);
- struct msm_gem_vm *vm = to_msm_vm(vma->vm);
- unsigned size = vma->va.range;
/* Don't do anything if the memory isn't mapped */
if (!msm_vma->mapped)
return;
- vm->mmu->funcs->unmap(vm->mmu, vma->va.addr, size);
+ vm_unmap_op(to_msm_vm(vma->vm), &(struct msm_vm_unmap_op){
+ .iova = vma->va.addr,
+ .range = vma->va.range,
+ });
msm_vma->mapped = false;
}
+static int
+vm_map_op(struct msm_gem_vm *vm, const struct msm_vm_map_op *op)
+{
+ vm_dbg("%p: %016llx %016llx", vm, op->iova, op->iova + op->range);
+
+ return vm->mmu->funcs->map(vm->mmu, op->iova, op->sgt, op->offset,
+ op->range, op->prot);
+}
+
/* Map and pin vma: */
int
msm_gem_vma_map(struct drm_gpuva *vma, int prot, struct sg_table *sgt)
{
struct msm_gem_vma *msm_vma = to_msm_vma(vma);
- struct msm_gem_vm *vm = to_msm_vm(vma->vm);
int ret;
if (GEM_WARN_ON(!vma->va.addr))
@@ -62,9 +107,13 @@ msm_gem_vma_map(struct drm_gpuva *vma, int prot, struct sg_table *sgt)
* Revisit this if we can come up with a scheme to pre-alloc pages
* for the pgtable in map/unmap ops.
*/
- ret = vm->mmu->funcs->map(vm->mmu, vma->va.addr, sgt,
- vma->gem.offset, vma->va.range,
- prot);
+ ret = vm_map_op(to_msm_vm(vma->vm), &(struct msm_vm_map_op){
+ .iova = vma->va.addr,
+ .range = vma->va.range,
+ .offset = vma->gem.offset,
+ .sgt = sgt,
+ .prot = prot,
+ });
if (ret) {
msm_vma->mapped = false;
}