@@ -52,6 +52,7 @@ static void put_iova_spaces(struct drm_gem_object *obj, struct drm_gpuvm *vm, bo
static void msm_gem_close(struct drm_gem_object *obj, struct drm_file *file)
{
struct msm_context *ctx = file->driver_priv;
+ struct drm_exec exec;
update_ctx_mem(file, -obj->size);
@@ -70,9 +71,9 @@ static void msm_gem_close(struct drm_gem_object *obj, struct drm_file *file)
dma_resv_wait_timeout(obj->resv, DMA_RESV_USAGE_READ, false,
msecs_to_jiffies(1000));
- msm_gem_lock(obj);
+ msm_gem_lock_vm_and_obj(&exec, obj, ctx->vm);
put_iova_spaces(obj, &ctx->vm->base, true);
- msm_gem_unlock(obj);
+ drm_exec_fini(&exec); /* drop locks */
}
/*
@@ -538,11 +539,12 @@ int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
struct msm_gem_vm *vm, uint64_t *iova,
u64 range_start, u64 range_end)
{
+ struct drm_exec exec;
int ret;
- msm_gem_lock(obj);
+ msm_gem_lock_vm_and_obj(&exec, obj, vm);
ret = get_and_pin_iova_range_locked(obj, vm, iova, range_start, range_end);
- msm_gem_unlock(obj);
+ drm_exec_fini(&exec); /* drop locks */
return ret;
}
@@ -562,16 +564,17 @@ int msm_gem_get_iova(struct drm_gem_object *obj,
struct msm_gem_vm *vm, uint64_t *iova)
{
struct msm_gem_vma *vma;
+ struct drm_exec exec;
int ret = 0;
- msm_gem_lock(obj);
+ msm_gem_lock_vm_and_obj(&exec, obj, vm);
vma = get_vma_locked(obj, vm, 0, U64_MAX);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
} else {
*iova = vma->base.va.addr;
}
- msm_gem_unlock(obj);
+ drm_exec_fini(&exec); /* drop locks */
return ret;
}
@@ -600,9 +603,10 @@ static int clear_iova(struct drm_gem_object *obj,
int msm_gem_set_iova(struct drm_gem_object *obj,
struct msm_gem_vm *vm, uint64_t iova)
{
+ struct drm_exec exec;
int ret = 0;
- msm_gem_lock(obj);
+ msm_gem_lock_vm_and_obj(&exec, obj, vm);
if (!iova) {
ret = clear_iova(obj, vm);
} else {
@@ -615,7 +619,7 @@ int msm_gem_set_iova(struct drm_gem_object *obj,
ret = -EBUSY;
}
}
- msm_gem_unlock(obj);
+ drm_exec_fini(&exec); /* drop locks */
return ret;
}
@@ -1007,12 +1011,27 @@ static void msm_gem_free_object(struct drm_gem_object *obj)
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct drm_device *dev = obj->dev;
struct msm_drm_private *priv = dev->dev_private;
+ struct drm_exec exec;
mutex_lock(&priv->obj_lock);
list_del(&msm_obj->node);
mutex_unlock(&priv->obj_lock);
+ /*
+ * We need to lock any VMs the object is still attached to, but not
+ * the object itself (see explaination in msm_gem_assert_locked()),
+ * so just open-code this special case:
+ */
+ drm_exec_init(&exec, 0, 0);
+ drm_exec_until_all_locked (&exec) {
+ struct drm_gpuvm_bo *vm_bo;
+ drm_gem_for_each_gpuvm_bo (vm_bo, obj) {
+ drm_exec_lock_obj(&exec, drm_gpuvm_resv_obj(vm_bo->vm));
+ drm_exec_retry_on_contention(&exec);
+ }
+ }
put_iova_spaces(obj, NULL, true);
+ drm_exec_fini(&exec); /* drop locks */
if (obj->import_attach) {
GEM_WARN_ON(msm_obj->vaddr);
@@ -62,12 +62,6 @@ struct msm_gem_vm {
*/
struct drm_mm mm;
- /** @mm_lock: protects @mm node allocation/removal */
- struct spinlock mm_lock;
-
- /** @vm_lock: protects gpuvm insert/remove/traverse */
- struct mutex vm_lock;
-
/** @mmu: The mmu object which manages the pgtables */
struct msm_mmu *mmu;
@@ -246,6 +240,37 @@ msm_gem_unlock(struct drm_gem_object *obj)
dma_resv_unlock(obj->resv);
}
+/**
+ * msm_gem_lock_vm_and_obj() - Helper to lock an obj + VM
+ * @exec: the exec context helper which will be initalized
+ * @obj: the GEM object to lock
+ * @vm: the VM to lock
+ *
+ * Operations which modify a VM frequently need to lock both the VM and
+ * the object being mapped/unmapped/etc. This helper uses drm_exec to
+ * acquire both locks, dealing with potential deadlock/backoff scenarios
+ * which arise when multiple locks are involved.
+ */
+static inline int
+msm_gem_lock_vm_and_obj(struct drm_exec *exec,
+ struct drm_gem_object *obj,
+ struct msm_gem_vm *vm)
+{
+ int ret = 0;
+
+ drm_exec_init(exec, 0, 2);
+ drm_exec_until_all_locked (exec) {
+ ret = drm_exec_lock_obj(exec, drm_gpuvm_resv_obj(&vm->base));
+ if (!ret && (obj->resv != drm_gpuvm_resv(&vm->base)))
+ ret = drm_exec_lock_obj(exec, obj);
+ drm_exec_retry_on_contention(exec);
+ if (GEM_WARN_ON(ret))
+ break;
+ }
+
+ return ret;
+}
+
static inline void
msm_gem_assert_locked(struct drm_gem_object *obj)
{
@@ -123,7 +123,7 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
stages[i].freed =
drm_gem_lru_scan(stages[i].lru, nr,
&stages[i].remaining,
- stages[i].shrink);
+ stages[i].shrink);
nr -= stages[i].freed;
freed += stages[i].freed;
remaining += stages[i].remaining;
@@ -247,11 +247,18 @@ static int submit_lookup_cmds(struct msm_gem_submit *submit,
/* This is where we make sure all the bo's are reserved and pin'd: */
static int submit_lock_objects(struct msm_gem_submit *submit)
{
+ unsigned flags = DRM_EXEC_IGNORE_DUPLICATES | DRM_EXEC_INTERRUPTIBLE_WAIT;
int ret;
- drm_exec_init(&submit->exec, DRM_EXEC_INTERRUPTIBLE_WAIT, submit->nr_bos);
+// TODO need to add vm_bind path which locks vm resv + external objs
+ drm_exec_init(&submit->exec, flags, submit->nr_bos);
drm_exec_until_all_locked (&submit->exec) {
+ ret = drm_exec_lock_obj(&submit->exec,
+ drm_gpuvm_resv_obj(&submit->vm->base));
+ drm_exec_retry_on_contention(&submit->exec);
+ if (ret)
+ goto error;
for (unsigned i = 0; i < submit->nr_bos; i++) {
struct drm_gem_object *obj = submit->bos[i].obj;
ret = drm_exec_prepare_obj(&submit->exec, obj, 1);
@@ -92,15 +92,13 @@ void msm_gem_vma_close(struct msm_gem_vma *vma)
GEM_WARN_ON(vma->mapped);
- spin_lock(&vm->mm_lock);
+ drm_gpuvm_resv_assert_held(&vm->base);
+
if (vma->base.va.addr)
drm_mm_remove_node(&vma->node);
- spin_unlock(&vm->mm_lock);
- mutex_lock(&vm->vm_lock);
drm_gpuva_remove(&vma->base);
drm_gpuva_unlink(&vma->base);
- mutex_unlock(&vm->vm_lock);
kfree(vma);
}
@@ -114,16 +112,16 @@ msm_gem_vma_new(struct msm_gem_vm *vm, struct drm_gem_object *obj,
struct msm_gem_vma *vma;
int ret;
+ drm_gpuvm_resv_assert_held(&vm->base);
+
vma = kzalloc(sizeof(*vma), GFP_KERNEL);
if (!vma)
return ERR_PTR(-ENOMEM);
if (vm->managed) {
- spin_lock(&vm->mm_lock);
ret = drm_mm_insert_node_in_range(&vm->mm, &vma->node,
obj->size, PAGE_SIZE, 0,
range_start, range_end, 0);
- spin_unlock(&vm->mm_lock);
if (ret)
goto err_free_vma;
@@ -137,9 +135,7 @@ msm_gem_vma_new(struct msm_gem_vm *vm, struct drm_gem_object *obj,
drm_gpuva_init(&vma->base, range_start, range_end - range_start, obj, 0);
vma->mapped = false;
- mutex_lock(&vm->vm_lock);
ret = drm_gpuva_insert(&vm->base, &vma->base);
- mutex_unlock(&vm->vm_lock);
if (ret)
goto err_free_range;
@@ -149,17 +145,13 @@ msm_gem_vma_new(struct msm_gem_vm *vm, struct drm_gem_object *obj,
goto err_va_remove;
}
- mutex_lock(&vm->vm_lock);
drm_gpuva_link(&vma->base, vm_bo);
- mutex_unlock(&vm->vm_lock);
GEM_WARN_ON(drm_gpuvm_bo_put(vm_bo));
return vma;
err_va_remove:
- mutex_lock(&vm->vm_lock);
drm_gpuva_remove(&vma->base);
- mutex_unlock(&vm->vm_lock);
err_free_range:
if (vm->managed)
drm_mm_remove_node(&vma->node);
@@ -190,7 +182,9 @@ struct msm_gem_vm *
msm_gem_vm_create(struct drm_device *drm, struct msm_mmu *mmu, const char *name,
u64 va_start, u64 va_size, bool managed)
{
- enum drm_gpuvm_flags flags = managed ? DRM_GPUVM_VA_WEAK_REF : 0;
+ enum drm_gpuvm_flags flags =
+ DRM_GPUVM_RESV_PROTECTED |
+ (managed ? DRM_GPUVM_VA_WEAK_REF : 0);
struct msm_gem_vm *vm;
struct drm_gem_object *dummy_gem;
int ret = 0;
@@ -212,9 +206,6 @@ msm_gem_vm_create(struct drm_device *drm, struct msm_mmu *mmu, const char *name,
va_start, va_size, 0, 0, &msm_gpuvm_ops);
drm_gem_object_put(dummy_gem);
- spin_lock_init(&vm->mm_lock);
- mutex_init(&vm->vm_lock);
-
vm->mmu = mmu;
vm->managed = managed;