@@ -1125,6 +1125,8 @@ __drm_gpuvm_prepare_objects(struct drm_gpuvm *gpuvm,
LIST_HEAD(extobjs);
int ret = 0;
+ WARN_ON(gpuvm->flags & DRM_GPUVM_VA_WEAK_REF);
+
for_each_vm_bo_in_list(gpuvm, extobj, &extobjs, vm_bo) {
ret = exec_prepare_obj(exec, vm_bo->obj, num_fences);
if (ret)
@@ -1145,6 +1147,8 @@ drm_gpuvm_prepare_objects_locked(struct drm_gpuvm *gpuvm,
struct drm_gpuvm_bo *vm_bo;
int ret = 0;
+ WARN_ON(gpuvm->flags & DRM_GPUVM_VA_WEAK_REF);
+
drm_gpuvm_resv_assert_held(gpuvm);
list_for_each_entry(vm_bo, &gpuvm->extobj.list, list.entry.extobj) {
ret = exec_prepare_obj(exec, vm_bo->obj, num_fences);
@@ -1386,6 +1390,7 @@ drm_gpuvm_validate_locked(struct drm_gpuvm *gpuvm, struct drm_exec *exec)
struct drm_gpuvm_bo *vm_bo, *next;
int ret = 0;
+ WARN_ON(gpuvm->flags & DRM_GPUVM_VA_WEAK_REF);
drm_gpuvm_resv_assert_held(gpuvm);
list_for_each_entry_safe(vm_bo, next, &gpuvm->evict.list,
@@ -1482,7 +1487,9 @@ drm_gpuvm_bo_create(struct drm_gpuvm *gpuvm,
vm_bo->vm = drm_gpuvm_get(gpuvm);
vm_bo->obj = obj;
- drm_gem_object_get(obj);
+
+ if (!(gpuvm->flags & DRM_GPUVM_VA_WEAK_REF))
+ drm_gem_object_get(obj);
kref_init(&vm_bo->kref);
INIT_LIST_HEAD(&vm_bo->list.gpuva);
@@ -1504,16 +1511,22 @@ drm_gpuvm_bo_destroy(struct kref *kref)
const struct drm_gpuvm_ops *ops = gpuvm->ops;
struct drm_gem_object *obj = vm_bo->obj;
bool lock = !drm_gpuvm_resv_protected(gpuvm);
+ bool unref = !(gpuvm->flags & DRM_GPUVM_VA_WEAK_REF);
if (!lock)
drm_gpuvm_resv_assert_held(gpuvm);
+ if (kref_read(&obj->refcount) > 0) {
+ drm_gem_gpuva_assert_lock_held(obj);
+ } else {
+ WARN_ON(!(gpuvm->flags & DRM_GPUVM_VA_WEAK_REF));
+ WARN_ON(!list_empty(&vm_bo->list.entry.evict));
+ WARN_ON(!list_empty(&vm_bo->list.entry.extobj));
+ }
+
drm_gpuvm_bo_list_del(vm_bo, extobj, lock);
drm_gpuvm_bo_list_del(vm_bo, evict, lock);
- if (kref_read(&obj->refcount) > 0)
- drm_gem_gpuva_assert_lock_held(obj);
-
list_del(&vm_bo->list.entry.gem);
if (ops && ops->vm_bo_free)
@@ -1522,7 +1535,8 @@ drm_gpuvm_bo_destroy(struct kref *kref)
kfree(vm_bo);
drm_gpuvm_put(gpuvm);
- drm_gem_object_put(obj);
+ if (unref)
+ drm_gem_object_put(obj);
}
/**
@@ -1678,6 +1692,12 @@ drm_gpuvm_bo_extobj_add(struct drm_gpuvm_bo *vm_bo)
if (!lock)
drm_gpuvm_resv_assert_held(gpuvm);
+ /* If the vm_bo doesn't hold a hard reference to the obj, then the
+ * driver is responsible for object tracking.
+ */
+ if (gpuvm->flags & DRM_GPUVM_VA_WEAK_REF)
+ return;
+
if (drm_gpuvm_is_extobj(gpuvm, vm_bo->obj))
drm_gpuvm_bo_list_add(vm_bo, extobj, lock);
}
@@ -1699,6 +1719,13 @@ drm_gpuvm_bo_evict(struct drm_gpuvm_bo *vm_bo, bool evict)
bool lock = !drm_gpuvm_resv_protected(gpuvm);
dma_resv_assert_held(obj->resv);
+
+ /* If the vm_bo doesn't hold a hard reference to the obj, then the
+ * driver must track evictions on it's own.
+ */
+ if (gpuvm->flags & DRM_GPUVM_VA_WEAK_REF)
+ return;
+
vm_bo->evicted = evict;
/* Can't add external objects to the evicted list directly if not using
@@ -205,10 +205,25 @@ enum drm_gpuvm_flags {
*/
DRM_GPUVM_RESV_PROTECTED = BIT(0),
+ /**
+ * @DRM_GPUVM_VA_WEAK_REF:
+ *
+ * Flag indicating that the &drm_gpuva (or more correctly, the
+ * &drm_gpuvm_bo) only holds a weak reference to the &drm_gem_object.
+ * This mode is intended to ease migration to drm_gpuvm for drivers
+ * where the GEM object holds a referece to the VA, rather than the
+ * other way around.
+ *
+ * In this mode, drm_gpuvm does not track evicted or external objects.
+ * It is intended for legacy mode, where the needed objects are attached
+ * to the command submission ioctl, therefore this tracking is unneeded.
+ */
+ DRM_GPUVM_VA_WEAK_REF = BIT(1),
+
/**
* @DRM_GPUVM_USERBITS: user defined bits
*/
- DRM_GPUVM_USERBITS = BIT(1),
+ DRM_GPUVM_USERBITS = BIT(2),
};
/**
@@ -651,7 +666,7 @@ struct drm_gpuvm_bo {
/**
* @obj: The &drm_gem_object being mapped in @vm. This is a reference
- * counted pointer.
+ * counted pointer, unless the &DRM_GPUVM_VA_WEAK_REF flag is set.
*/
struct drm_gem_object *obj;