@@ -361,6 +361,8 @@ static inline void msm_gem_submit_put(struct msm_gem_submit *submit)
kref_put(&submit->ref, __msm_gem_submit_destroy);
}
+void msm_submit_retire(struct msm_gem_submit *submit);
+
/* helper to determine of a buffer in submit should be dumped, used for both
* devcoredump and debugfs cmdstream dumping:
*/
@@ -23,8 +23,8 @@
/* make sure these don't conflict w/ MSM_SUBMIT_BO_x */
#define BO_VALID 0x8000 /* is current addr in cmdstream correct/valid? */
-#define BO_LOCKED 0x4000
-#define BO_PINNED 0x2000
+#define BO_LOCKED 0x4000 /* obj lock is held */
+#define BO_PINNED 0x2000 /* obj is pinned and on active list */
static struct msm_gem_submit *submit_create(struct drm_device *dev,
struct msm_gpu *gpu,
@@ -220,21 +220,33 @@ static int submit_lookup_cmds(struct msm_gem_submit *submit,
return ret;
}
-static void submit_unlock_unpin_bo(struct msm_gem_submit *submit,
- int i, bool backoff)
+/* Unwind bo state, according to cleanup_flags. In the success case, only
+ * the lock is dropped at the end of the submit (and active/pin ref is dropped
+ * later when the submit is retired).
+ */
+static void submit_cleanup_bo(struct msm_gem_submit *submit, int i,
+ unsigned cleanup_flags)
{
- struct msm_gem_object *msm_obj = submit->bos[i].obj;
+ struct drm_gem_object *obj = &submit->bos[i].obj->base;
+ unsigned flags = submit->bos[i].flags & cleanup_flags;
- if (submit->bos[i].flags & BO_PINNED)
- msm_gem_unpin_iova_locked(&msm_obj->base, submit->aspace);
+ if (flags & BO_PINNED) {
+ msm_gem_unpin_iova_locked(obj, submit->aspace);
+ msm_gem_active_put(obj);
+ }
- if (submit->bos[i].flags & BO_LOCKED)
- dma_resv_unlock(msm_obj->base.resv);
+ if (flags & BO_LOCKED)
+ dma_resv_unlock(obj->resv);
- if (backoff && !(submit->bos[i].flags & BO_VALID))
- submit->bos[i].iova = 0;
+ submit->bos[i].flags &= ~cleanup_flags;
+}
- submit->bos[i].flags &= ~(BO_LOCKED | BO_PINNED);
+static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, int i)
+{
+ submit_cleanup_bo(submit, i, BO_PINNED | BO_LOCKED);
+
+ if (!(submit->bos[i].flags & BO_VALID))
+ submit->bos[i].iova = 0;
}
/* This is where we make sure all the bo's are reserved and pin'd: */
@@ -266,10 +278,10 @@ static int submit_lock_objects(struct msm_gem_submit *submit)
fail:
for (; i >= 0; i--)
- submit_unlock_unpin_bo(submit, i, true);
+ submit_unlock_unpin_bo(submit, i);
if (slow_locked > 0)
- submit_unlock_unpin_bo(submit, slow_locked, true);
+ submit_unlock_unpin_bo(submit, slow_locked);
if (ret == -EDEADLK) {
struct msm_gem_object *msm_obj = submit->bos[contended].obj;
@@ -325,16 +337,18 @@ static int submit_pin_objects(struct msm_gem_submit *submit)
submit->valid = true;
for (i = 0; i < submit->nr_bos; i++) {
- struct msm_gem_object *msm_obj = submit->bos[i].obj;
+ struct drm_gem_object *obj = &submit->bos[i].obj->base;
uint64_t iova;
/* if locking succeeded, pin bo: */
- ret = msm_gem_get_and_pin_iova_locked(&msm_obj->base,
+ ret = msm_gem_get_and_pin_iova_locked(obj,
submit->aspace, &iova);
if (ret)
break;
+ msm_gem_active_get(obj, submit->gpu);
+
submit->bos[i].flags |= BO_PINNED;
if (iova == submit->bos[i].iova) {
@@ -350,6 +364,20 @@ static int submit_pin_objects(struct msm_gem_submit *submit)
return ret;
}
+static void submit_attach_object_fences(struct msm_gem_submit *submit)
+{
+ int i;
+
+ for (i = 0; i < submit->nr_bos; i++) {
+ struct drm_gem_object *obj = &submit->bos[i].obj->base;
+
+ if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
+ dma_resv_add_excl_fence(obj->resv, submit->fence);
+ else if (submit->bos[i].flags & MSM_SUBMIT_BO_READ)
+ dma_resv_add_shared_fence(obj->resv, submit->fence);
+ }
+}
+
static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
struct msm_gem_object **obj, uint64_t *iova, bool *valid)
{
@@ -444,18 +472,40 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
return ret;
}
-static void submit_cleanup(struct msm_gem_submit *submit)
+/* Cleanup submit at end of ioctl. In the error case, this also drops
+ * references, unpins, and drops active refcnt. In the non-error case,
+ * this is done when the submit is retired.
+ */
+static void submit_cleanup(struct msm_gem_submit *submit, bool error)
{
+ unsigned cleanup_flags = BO_LOCKED;
unsigned i;
+ if (error)
+ cleanup_flags |= BO_PINNED;
+
for (i = 0; i < submit->nr_bos; i++) {
struct msm_gem_object *msm_obj = submit->bos[i].obj;
- submit_unlock_unpin_bo(submit, i, false);
+ submit_cleanup_bo(submit, i, cleanup_flags);
list_del_init(&msm_obj->submit_entry);
- drm_gem_object_put(&msm_obj->base);
+ if (error)
+ drm_gem_object_put(&msm_obj->base);
}
}
+void msm_submit_retire(struct msm_gem_submit *submit)
+{
+ int i;
+
+ for (i = 0; i < submit->nr_bos; i++) {
+ struct drm_gem_object *obj = &submit->bos[i].obj->base;
+
+ msm_gem_lock(obj);
+ submit_cleanup_bo(submit, i, BO_PINNED);
+ msm_gem_unlock(obj);
+ drm_gem_object_put(obj);
+ }
+}
struct msm_submit_post_dep {
struct drm_syncobj *syncobj;
@@ -832,6 +882,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
args->fence_fd = out_fence_fd;
}
+ submit_attach_object_fences(submit);
+
msm_gpu_submit(gpu, submit);
args->fence = submit->fence->seqno;
@@ -844,7 +896,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
out:
pm_runtime_put(&gpu->pdev->dev);
out_pre_pm:
- submit_cleanup(submit);
+ submit_cleanup(submit, !!ret);
if (has_ww_ticket)
ww_acquire_fini(&submit->ticket);
msm_gem_submit_put(submit);
@@ -647,7 +647,6 @@ static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
volatile struct msm_gpu_submit_stats *stats;
u64 elapsed, clock = 0;
unsigned long flags;
- int i;
stats = &ring->memptrs->stats[index];
/* Convert 19.2Mhz alwayson ticks to nanoseconds for elapsed time */
@@ -663,15 +662,7 @@ static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
trace_msm_gpu_submit_retired(submit, elapsed, clock,
stats->alwayson_start, stats->alwayson_end);
- for (i = 0; i < submit->nr_bos; i++) {
- struct drm_gem_object *obj = &submit->bos[i].obj->base;
-
- msm_gem_lock(obj);
- msm_gem_active_put(obj);
- msm_gem_unpin_iova_locked(obj, submit->aspace);
- msm_gem_unlock(obj);
- drm_gem_object_put(obj);
- }
+ msm_submit_retire(submit);
pm_runtime_mark_last_busy(&gpu->pdev->dev);
pm_runtime_put_autosuspend(&gpu->pdev->dev);
@@ -748,7 +739,6 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
struct msm_drm_private *priv = dev->dev_private;
struct msm_ringbuffer *ring = submit->ring;
unsigned long flags;
- int i;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
@@ -762,23 +752,6 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
update_sw_cntrs(gpu);
- for (i = 0; i < submit->nr_bos; i++) {
- struct msm_gem_object *msm_obj = submit->bos[i].obj;
- struct drm_gem_object *drm_obj = &msm_obj->base;
- uint64_t iova;
-
- /* submit takes a reference to the bo and iova until retired: */
- drm_gem_object_get(&msm_obj->base);
- msm_gem_get_and_pin_iova_locked(&msm_obj->base, submit->aspace, &iova);
-
- if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
- dma_resv_add_excl_fence(drm_obj->resv, submit->fence);
- else if (submit->bos[i].flags & MSM_SUBMIT_BO_READ)
- dma_resv_add_shared_fence(drm_obj->resv, submit->fence);
-
- msm_gem_active_get(drm_obj, gpu);
- }
-
/*
* ring->submits holds a ref to the submit, to deal with the case
* that a submit completes before msm_ioctl_gem_submit() returns.