diff mbox series

[3/4] drm/msm: Take lru lock once per submit_pin_objects()

Message ID 20230802222158.11838-4-robdclark@gmail.com
State New
Headers show
Series [1/4] drm/msm: Take lru lock once per job_run | expand

Commit Message

Rob Clark Aug. 2, 2023, 10:21 p.m. UTC
From: Rob Clark <robdclark@chromium.org>

Split out pin_count incrementing and lru updating into a separate loop
so we can take the lru lock only once for all objs.  Since we are still
holding the obj lock, it is safe to split this up.

Signed-off-by: Rob Clark <robdclark@chromium.org>
---
 drivers/gpu/drm/msm/msm_gem.c        | 45 ++++++++++++++++++----------
 drivers/gpu/drm/msm/msm_gem.h        |  1 +
 drivers/gpu/drm/msm/msm_gem_submit.c | 10 ++++++-
 3 files changed, 39 insertions(+), 17 deletions(-)
diff mbox series

Patch

diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 6d1dbffc3905..1c81ff6115ac 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -222,9 +222,7 @@  static void put_pages(struct drm_gem_object *obj)
 static struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj,
 					      unsigned madv)
 {
-	struct msm_drm_private *priv = obj->dev->dev_private;
 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
-	struct page **p;
 
 	msm_gem_assert_locked(obj);
 
@@ -234,16 +232,29 @@  static struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj,
 		return ERR_PTR(-EBUSY);
 	}
 
-	p = get_pages(obj);
-	if (IS_ERR(p))
-		return p;
+	return get_pages(obj);
+}
+
+/*
+ * Update the pin count of the object, call under lru.lock
+ */
+void msm_gem_pin_obj_locked(struct drm_gem_object *obj)
+{
+	struct msm_drm_private *priv = obj->dev->dev_private;
+
+	msm_gem_assert_locked(obj);
+
+	to_msm_bo(obj)->pin_count++;
+	drm_gem_lru_move_tail_locked(&priv->lru.pinned, obj);
+}
+
+static void pin_obj_locked(struct drm_gem_object *obj)
+{
+	struct msm_drm_private *priv = obj->dev->dev_private;
 
 	mutex_lock(&priv->lru.lock);
-	msm_obj->pin_count++;
-	update_lru_locked(obj);
+	msm_gem_pin_obj_locked(obj);
 	mutex_unlock(&priv->lru.lock);
-
-	return p;
 }
 
 struct page **msm_gem_pin_pages(struct drm_gem_object *obj)
@@ -252,6 +263,8 @@  struct page **msm_gem_pin_pages(struct drm_gem_object *obj)
 
 	msm_gem_lock(obj);
 	p = msm_gem_pin_pages_locked(obj, MSM_MADV_WILLNEED);
+	if (!IS_ERR(p))
+		pin_obj_locked(obj);
 	msm_gem_unlock(obj);
 
 	return p;
@@ -463,7 +476,7 @@  int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma)
 {
 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 	struct page **pages;
-	int ret, prot = IOMMU_READ;
+	int prot = IOMMU_READ;
 
 	if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
 		prot |= IOMMU_WRITE;
@@ -480,11 +493,7 @@  int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma)
 	if (IS_ERR(pages))
 		return PTR_ERR(pages);
 
-	ret = msm_gem_vma_map(vma, prot, msm_obj->sgt, obj->size);
-	if (ret)
-		msm_gem_unpin_locked(obj);
-
-	return ret;
+	return msm_gem_vma_map(vma, prot, msm_obj->sgt, obj->size);
 }
 
 void msm_gem_unpin_locked(struct drm_gem_object *obj)
@@ -536,8 +545,10 @@  static int get_and_pin_iova_range_locked(struct drm_gem_object *obj,
 		return PTR_ERR(vma);
 
 	ret = msm_gem_pin_vma_locked(obj, vma);
-	if (!ret)
+	if (!ret) {
 		*iova = vma->iova;
+		pin_obj_locked(obj);
+	}
 
 	return ret;
 }
@@ -700,6 +711,8 @@  static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
 	if (IS_ERR(pages))
 		return ERR_CAST(pages);
 
+	pin_obj_locked(obj);
+
 	/* increment vmap_count *before* vmap() call, so shrinker can
 	 * check vmap_count (is_vunmapable()) outside of msm_obj lock.
 	 * This guarantees that we won't try to msm_gem_vunmap() this
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 31b370474fa8..2ddd896aac68 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -142,6 +142,7 @@  int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
 		struct msm_gem_address_space *aspace, uint64_t *iova);
 void msm_gem_unpin_iova(struct drm_gem_object *obj,
 		struct msm_gem_address_space *aspace);
+void msm_gem_pin_obj_locked(struct drm_gem_object *obj);
 struct page **msm_gem_pin_pages(struct drm_gem_object *obj);
 void msm_gem_unpin_pages(struct drm_gem_object *obj);
 int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index a03bdded1a15..b17561ebd518 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -384,6 +384,7 @@  static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit)
 
 static int submit_pin_objects(struct msm_gem_submit *submit)
 {
+	struct msm_drm_private *priv = submit->dev->dev_private;
 	int i, ret = 0;
 
 	submit->valid = true;
@@ -403,7 +404,7 @@  static int submit_pin_objects(struct msm_gem_submit *submit)
 		if (ret)
 			break;
 
-		submit->bos[i].flags |= BO_OBJ_PINNED | BO_VMA_PINNED;
+		submit->bos[i].flags |= BO_VMA_PINNED;
 		submit->bos[i].vma = vma;
 
 		if (vma->iova == submit->bos[i].iova) {
@@ -416,6 +417,13 @@  static int submit_pin_objects(struct msm_gem_submit *submit)
 		}
 	}
 
+	mutex_lock(&priv->lru.lock);
+	for (i = 0; i < submit->nr_bos; i++) {
+		msm_gem_pin_obj_locked(submit->bos[i].obj);
+		submit->bos[i].flags |= BO_OBJ_PINNED;
+	}
+	mutex_unlock(&priv->lru.lock);
+
 	return ret;
 }