@@ -82,13 +82,6 @@ static int msm_drm_uninit(struct device *dev)
drm_atomic_helper_shutdown(ddev);
}
- /* We must cancel and cleanup any pending vblank enable/disable
- * work before msm_irq_uninstall() to avoid work re-enabling an
- * irq after uninstall has disabled it.
- */
-
- flush_workqueue(priv->wq);
-
msm_gem_shrinker_cleanup(ddev);
msm_perf_debugfs_cleanup(priv);
@@ -104,8 +97,6 @@ static int msm_drm_uninit(struct device *dev)
ddev->dev_private = NULL;
drm_dev_put(ddev);
- destroy_workqueue(priv->wq);
-
return 0;
}
@@ -227,12 +218,6 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
ddev->dev_private = priv;
priv->dev = ddev;
- priv->wq = alloc_ordered_workqueue("msm", 0);
- if (!priv->wq) {
- ret = -ENOMEM;
- goto err_put_dev;
- }
-
INIT_LIST_HEAD(&priv->objects);
mutex_init(&priv->obj_lock);
@@ -253,12 +238,12 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
if (priv->kms_init) {
ret = drmm_mode_config_init(ddev);
if (ret)
- goto err_destroy_wq;
+ goto err_put_dev;
}
ret = msm_init_vram(ddev);
if (ret)
- goto err_destroy_wq;
+ goto err_put_dev;
dma_set_max_seg_size(dev, UINT_MAX);
@@ -304,8 +289,6 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
err_deinit_vram:
msm_deinit_vram(ddev);
-err_destroy_wq:
- destroy_workqueue(priv->wq);
err_put_dev:
drm_dev_put(ddev);
@@ -227,6 +227,13 @@ void msm_drm_kms_uninit(struct device *dev)
BUG_ON(!kms);
+ /* We must cancel and cleanup any pending vblank enable/disable
+ * work before msm_irq_uninstall() to avoid work re-enabling an
+ * irq after uninstall has disabled it.
+ */
+
+ flush_workqueue(priv->wq);
+
/* clean up event worker threads */
for (i = 0; i < priv->num_crtcs; i++) {
if (priv->event_thread[i].worker)
@@ -243,6 +250,8 @@ void msm_drm_kms_uninit(struct device *dev)
if (kms && kms->funcs)
kms->funcs->destroy(kms);
+
+ destroy_workqueue(priv->wq);
}
int msm_drm_kms_init(struct device *dev, const struct drm_driver *drv)
@@ -258,10 +267,14 @@ int msm_drm_kms_init(struct device *dev, const struct drm_driver *drv)
if (ret)
return ret;
+ priv->wq = alloc_ordered_workqueue("msm", 0);
+ if (!priv->wq)
+ return -ENOMEM;
+
ret = priv->kms_init(ddev);
if (ret) {
DRM_DEV_ERROR(dev, "failed to load kms\n");
- return ret;
+ goto err_msm_uninit;
}
/* Enable normalization of plane zpos */
@@ -319,6 +332,7 @@ int msm_drm_kms_init(struct device *dev, const struct drm_driver *drv)
return 0;
err_msm_uninit:
+ destroy_workqueue(priv->wq);
return ret;
}
The global workqueue is only used for vblanks inside KMS code. Move allocation / flushing / deallcation of it to msm_kms.c Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@oss.qualcomm.com> --- drivers/gpu/drm/msm/msm_drv.c | 21 ++------------------- drivers/gpu/drm/msm/msm_kms.c | 16 +++++++++++++++- 2 files changed, 17 insertions(+), 20 deletions(-)