@@ -654,7 +654,6 @@ void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass)
struct drm_device *dev = obj->dev;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
WARN_ON(!is_purgeable(msm_obj, subclass));
WARN_ON(obj->import_attach);
@@ -8,48 +8,13 @@
#include "msm_gem.h"
#include "msm_gpu_trace.h"
-static bool msm_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
-{
- /* NOTE: we are *closer* to being able to get rid of
- * mutex_trylock_recursive().. the msm_gem code itself does
- * not need struct_mutex, although codepaths that can trigger
- * shrinker are still called in code-paths that hold the
- * struct_mutex.
- *
- * Also, msm_obj->madv is protected by struct_mutex.
- *
- * The next step is probably split out a seperate lock for
- * protecting inactive_list, so that shrinker does not need
- * struct_mutex.
- */
- switch (mutex_trylock_recursive(&dev->struct_mutex)) {
- case MUTEX_TRYLOCK_FAILED:
- return false;
-
- case MUTEX_TRYLOCK_SUCCESS:
- *unlock = true;
- return true;
-
- case MUTEX_TRYLOCK_RECURSIVE:
- *unlock = false;
- return true;
- }
-
- BUG();
-}
-
static unsigned long
msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
{
struct msm_drm_private *priv =
container_of(shrinker, struct msm_drm_private, shrinker);
- struct drm_device *dev = priv->dev;
struct msm_gem_object *msm_obj;
unsigned long count = 0;
- bool unlock;
-
- if (!msm_gem_shrinker_lock(dev, &unlock))
- return 0;
mutex_lock(&priv->mm_lock);
@@ -60,9 +25,6 @@ msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
mutex_unlock(&priv->mm_lock);
- if (unlock)
- mutex_unlock(&dev->struct_mutex);
-
return count;
}
@@ -71,13 +33,8 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
{
struct msm_drm_private *priv =
container_of(shrinker, struct msm_drm_private, shrinker);
- struct drm_device *dev = priv->dev;
struct msm_gem_object *msm_obj;
unsigned long freed = 0;
- bool unlock;
-
- if (!msm_gem_shrinker_lock(dev, &unlock))
- return SHRINK_STOP;
mutex_lock(&priv->mm_lock);
@@ -92,9 +49,6 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
mutex_unlock(&priv->mm_lock);
- if (unlock)
- mutex_unlock(&dev->struct_mutex);
-
if (freed > 0)
trace_msm_gem_purge(freed << PAGE_SHIFT);
@@ -106,13 +60,8 @@ msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
{
struct msm_drm_private *priv =
container_of(nb, struct msm_drm_private, vmap_notifier);
- struct drm_device *dev = priv->dev;
struct msm_gem_object *msm_obj;
unsigned unmapped = 0;
- bool unlock;
-
- if (!msm_gem_shrinker_lock(dev, &unlock))
- return NOTIFY_DONE;
mutex_lock(&priv->mm_lock);
@@ -130,9 +79,6 @@ msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
mutex_unlock(&priv->mm_lock);
- if (unlock)
- mutex_unlock(&dev->struct_mutex);
-
*(unsigned long *)ptr += unmapped;
if (unmapped > 0)