@@ -906,14 +906,9 @@ static int msm_ioctl_gem_madvise(struct drm_device *dev, void *data,
return -EINVAL;
}
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
-
obj = drm_gem_object_lookup(file, args->handle);
if (!obj) {
- ret = -ENOENT;
- goto unlock;
+ return -ENOENT;
}
ret = msm_gem_madvise(obj, args->madv);
@@ -922,10 +917,8 @@ static int msm_ioctl_gem_madvise(struct drm_device *dev, void *data,
ret = 0;
}
- drm_gem_object_put_locked(obj);
+ drm_gem_object_put(obj);
-unlock:
- mutex_unlock(&dev->struct_mutex);
return ret;
}
@@ -639,8 +639,6 @@ int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
mutex_lock(&msm_obj->lock);
- WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
-
if (msm_obj->madv != __MSM_MADV_PURGED)
msm_obj->madv = madv;
@@ -657,7 +655,7 @@ void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass)
struct msm_gem_object *msm_obj = to_msm_bo(obj);
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
- WARN_ON(!is_purgeable(msm_obj));
+ WARN_ON(!is_purgeable(msm_obj, subclass));
WARN_ON(obj->import_attach);
mutex_lock_nested(&msm_obj->lock, subclass);
@@ -749,7 +747,7 @@ void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu)
struct msm_drm_private *priv = obj->dev->dev_private;
might_sleep();
- WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
+ WARN_ON(msm_gem_madv(msm_obj, OBJ_LOCK_NORMAL) != MSM_MADV_WILLNEED);
if (!atomic_fetch_inc(&msm_obj->active_count)) {
mutex_lock(&priv->mm_lock);
@@ -97,18 +97,6 @@ static inline bool is_active(struct msm_gem_object *msm_obj)
return atomic_read(&msm_obj->active_count);
}
-static inline bool is_purgeable(struct msm_gem_object *msm_obj)
-{
- WARN_ON(!mutex_is_locked(&msm_obj->base.dev->struct_mutex));
- return (msm_obj->madv == MSM_MADV_DONTNEED) && msm_obj->sgt &&
- !msm_obj->base.dma_buf && !msm_obj->base.import_attach;
-}
-
-static inline bool is_vunmapable(struct msm_gem_object *msm_obj)
-{
- return (msm_obj->vmap_count == 0) && msm_obj->vaddr;
-}
-
/* The shrinker can be triggered while we hold objA->lock, and need
* to grab objB->lock to purge it. Lockdep just sees these as a single
* class of lock, so we use subclasses to teach it the difference.
@@ -125,6 +113,32 @@ enum msm_gem_lock {
OBJ_LOCK_SHRINKER,
};
+/* Use this helper to read msm_obj->madv when msm_obj->lock not held: */
+static inline unsigned
+msm_gem_madv(struct msm_gem_object *msm_obj, enum msm_gem_lock subclass)
+{
+ unsigned madv;
+
+ mutex_lock_nested(&msm_obj->lock, subclass);
+ madv = msm_obj->madv;
+ mutex_unlock(&msm_obj->lock);
+
+ return madv;
+}
+
+static inline bool
+is_purgeable(struct msm_gem_object *msm_obj, enum msm_gem_lock subclass)
+{
+ return (msm_gem_madv(msm_obj, subclass) == MSM_MADV_DONTNEED) &&
+ msm_obj->sgt && !msm_obj->base.dma_buf &&
+ !msm_obj->base.import_attach;
+}
+
+static inline bool is_vunmapable(struct msm_gem_object *msm_obj)
+{
+ return (msm_obj->vmap_count == 0) && msm_obj->vaddr;
+}
+
void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass);
void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass);
@@ -54,7 +54,7 @@ msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
mutex_lock(&priv->mm_lock);
list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
- if (is_purgeable(msm_obj))
+ if (is_purgeable(msm_obj, OBJ_LOCK_SHRINKER))
count += msm_obj->base.size >> PAGE_SHIFT;
}
@@ -84,7 +84,7 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
if (freed >= sc->nr_to_scan)
break;
- if (is_purgeable(msm_obj)) {
+ if (is_purgeable(msm_obj, OBJ_LOCK_SHRINKER)) {
msm_gem_purge(&msm_obj->base, OBJ_LOCK_SHRINKER);
freed += msm_obj->base.size >> PAGE_SHIFT;
}