diff mbox series

[v3,4/9] PM / QoS: Decouple request alloc from dev_pm_qos_mtx

Message ID 20230803220202.78036-5-robdclark@gmail.com
State New
Headers show
Series drm/msm+PM+icc: Make job_run() reclaim-safe | expand

Commit Message

Rob Clark Aug. 3, 2023, 10:01 p.m. UTC
From: Rob Clark <robdclark@chromium.org>

Similar to the previous patch, move the allocation out from under
dev_pm_qos_mtx, by speculatively doing the allocation and handle
any race after acquiring dev_pm_qos_mtx by freeing the redundant
allocation.

Signed-off-by: Rob Clark <robdclark@chromium.org>
---
 drivers/base/power/qos.c | 14 +++++++++++---
 1 file changed, 11 insertions(+), 3 deletions(-)

Comments

Dan Carpenter Aug. 4, 2023, 9:29 a.m. UTC | #1
Hi Rob,

kernel test robot noticed the following build warnings:

https://git-scm.com/docs/git-format-patch#_base_tree_information]

url:    https://github.com/intel-lab-lkp/linux/commits/Rob-Clark/PM-devfreq-Drop-unneed-locking-to-appease-lockdep/20230804-060505
base:   git://anongit.freedesktop.org/drm/drm-misc drm-misc-next
patch link:    https://lore.kernel.org/r/20230803220202.78036-5-robdclark%40gmail.com
patch subject: [PATCH v3 4/9] PM / QoS: Decouple request alloc from dev_pm_qos_mtx
config: i386-randconfig-m021-20230730 (https://download.01.org/0day-ci/archive/20230804/202308041725.Npwswh0L-lkp@intel.com/config)
compiler: gcc-12 (Debian 12.2.0-14) 12.2.0
reproduce: (https://download.01.org/0day-ci/archive/20230804/202308041725.Npwswh0L-lkp@intel.com/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Reported-by: Dan Carpenter <dan.carpenter@linaro.org>
| Closes: https://lore.kernel.org/r/202308041725.Npwswh0L-lkp@intel.com/

smatch warnings:
drivers/base/power/qos.c:973 dev_pm_qos_update_user_latency_tolerance() warn: possible memory leak of 'req'

vim +/req +973 drivers/base/power/qos.c

2d984ad132a87c Rafael J. Wysocki 2014-02-11  923  int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val)
2d984ad132a87c Rafael J. Wysocki 2014-02-11  924  {
b5ac35ff4296f7 Rob Clark         2023-08-03  925  	struct dev_pm_qos_request *req = NULL;
2d984ad132a87c Rafael J. Wysocki 2014-02-11  926  	int ret;
2d984ad132a87c Rafael J. Wysocki 2014-02-11  927  
211fb32e3a0547 Rob Clark         2023-08-03  928  	ret = dev_pm_qos_constraints_ensure_allocated(dev);
211fb32e3a0547 Rob Clark         2023-08-03  929  	if (ret)
211fb32e3a0547 Rob Clark         2023-08-03  930  		return ret;
211fb32e3a0547 Rob Clark         2023-08-03  931  
b5ac35ff4296f7 Rob Clark         2023-08-03  932  	if (!dev->power.qos->latency_tolerance_req)
b5ac35ff4296f7 Rob Clark         2023-08-03  933  		req = kzalloc(sizeof(*req), GFP_KERNEL);
b5ac35ff4296f7 Rob Clark         2023-08-03  934  
2d984ad132a87c Rafael J. Wysocki 2014-02-11  935  	mutex_lock(&dev_pm_qos_mtx);
2d984ad132a87c Rafael J. Wysocki 2014-02-11  936  
211fb32e3a0547 Rob Clark         2023-08-03  937  	if (!dev->power.qos->latency_tolerance_req) {
2d984ad132a87c Rafael J. Wysocki 2014-02-11  938  		if (val < 0) {
80a6f7c79b7822 Andrew Lutomirski 2016-11-29  939  			if (val == PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT)
80a6f7c79b7822 Andrew Lutomirski 2016-11-29  940  				ret = 0;
80a6f7c79b7822 Andrew Lutomirski 2016-11-29  941  			else
2d984ad132a87c Rafael J. Wysocki 2014-02-11  942  				ret = -EINVAL;
2d984ad132a87c Rafael J. Wysocki 2014-02-11  943  			goto out;

kfree(req);?

2d984ad132a87c Rafael J. Wysocki 2014-02-11  944  		}
2d984ad132a87c Rafael J. Wysocki 2014-02-11  945  		if (!req) {
2d984ad132a87c Rafael J. Wysocki 2014-02-11  946  			ret = -ENOMEM;
2d984ad132a87c Rafael J. Wysocki 2014-02-11  947  			goto out;
2d984ad132a87c Rafael J. Wysocki 2014-02-11  948  		}
2d984ad132a87c Rafael J. Wysocki 2014-02-11  949  		ret = __dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY_TOLERANCE, val);
2d984ad132a87c Rafael J. Wysocki 2014-02-11  950  		if (ret < 0) {
2d984ad132a87c Rafael J. Wysocki 2014-02-11  951  			kfree(req);
2d984ad132a87c Rafael J. Wysocki 2014-02-11  952  			goto out;
2d984ad132a87c Rafael J. Wysocki 2014-02-11  953  		}
2d984ad132a87c Rafael J. Wysocki 2014-02-11  954  		dev->power.qos->latency_tolerance_req = req;
2d984ad132a87c Rafael J. Wysocki 2014-02-11  955  	} else {
b5ac35ff4296f7 Rob Clark         2023-08-03  956  		/*
b5ac35ff4296f7 Rob Clark         2023-08-03  957  		 * If we raced with another thread to allocate the request,
b5ac35ff4296f7 Rob Clark         2023-08-03  958  		 * simply free the redundant allocation and move on.
b5ac35ff4296f7 Rob Clark         2023-08-03  959  		 */
b5ac35ff4296f7 Rob Clark         2023-08-03  960  		if (req)
b5ac35ff4296f7 Rob Clark         2023-08-03  961  			kfree(req);
b5ac35ff4296f7 Rob Clark         2023-08-03  962  
2d984ad132a87c Rafael J. Wysocki 2014-02-11  963  		if (val < 0) {
2d984ad132a87c Rafael J. Wysocki 2014-02-11  964  			__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY_TOLERANCE);
2d984ad132a87c Rafael J. Wysocki 2014-02-11  965  			ret = 0;
2d984ad132a87c Rafael J. Wysocki 2014-02-11  966  		} else {
2d984ad132a87c Rafael J. Wysocki 2014-02-11  967  			ret = __dev_pm_qos_update_request(dev->power.qos->latency_tolerance_req, val);
2d984ad132a87c Rafael J. Wysocki 2014-02-11  968  		}
2d984ad132a87c Rafael J. Wysocki 2014-02-11  969  	}
2d984ad132a87c Rafael J. Wysocki 2014-02-11  970  
2d984ad132a87c Rafael J. Wysocki 2014-02-11  971   out:
2d984ad132a87c Rafael J. Wysocki 2014-02-11  972  	mutex_unlock(&dev_pm_qos_mtx);
2d984ad132a87c Rafael J. Wysocki 2014-02-11 @973  	return ret;
2d984ad132a87c Rafael J. Wysocki 2014-02-11  974  }
diff mbox series

Patch

diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index f3e0c6b65635..4537d93ddb45 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -922,17 +922,19 @@  s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev)
  */
 int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val)
 {
+	struct dev_pm_qos_request *req = NULL;
 	int ret;
 
 	ret = dev_pm_qos_constraints_ensure_allocated(dev);
 	if (ret)
 		return ret;
 
+	if (!dev->power.qos->latency_tolerance_req)
+		req = kzalloc(sizeof(*req), GFP_KERNEL);
+
 	mutex_lock(&dev_pm_qos_mtx);
 
 	if (!dev->power.qos->latency_tolerance_req) {
-		struct dev_pm_qos_request *req;
-
 		if (val < 0) {
 			if (val == PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT)
 				ret = 0;
@@ -940,7 +942,6 @@  int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val)
 				ret = -EINVAL;
 			goto out;
 		}
-		req = kzalloc(sizeof(*req), GFP_KERNEL);
 		if (!req) {
 			ret = -ENOMEM;
 			goto out;
@@ -952,6 +953,13 @@  int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val)
 		}
 		dev->power.qos->latency_tolerance_req = req;
 	} else {
+		/*
+		 * If we raced with another thread to allocate the request,
+		 * simply free the redundant allocation and move on.
+		 */
+		if (req)
+			kfree(req);
+
 		if (val < 0) {
 			__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY_TOLERANCE);
 			ret = 0;