diff mbox series

[v1] ufs: core: Unify function names for clk-scaling

Message ID 20220811012533.19761-1-stanley.chu@mediatek.com
State New
Headers show
Series [v1] ufs: core: Unify function names for clk-scaling | expand

Commit Message

Stanley Chu Aug. 11, 2022, 1:25 a.m. UTC
Currently the naming style of clk-scaling related function is
not unified. Simply unify their names for better readability.

This patch does not change the functionality.

Signed-off-by: Stanley Chu <stanley.chu@mediatek.com>
---
 drivers/ufs/core/ufs-sysfs.c |  3 +-
 drivers/ufs/core/ufshcd.c    | 56 ++++++++++++++++++------------------
 include/ufs/ufshcd.h         |  2 +-
 3 files changed, 31 insertions(+), 30 deletions(-)
diff mbox series

Patch

diff --git a/drivers/ufs/core/ufs-sysfs.c b/drivers/ufs/core/ufs-sysfs.c
index 0a088b47d557..a64d069db64c 100644
--- a/drivers/ufs/core/ufs-sysfs.c
+++ b/drivers/ufs/core/ufs-sysfs.c
@@ -225,7 +225,8 @@  static ssize_t wb_on_store(struct device *dev, struct device_attribute *attr,
 	unsigned int wb_enable;
 	ssize_t res;
 
-	if (!ufshcd_is_wb_allowed(hba) || ufshcd_is_clkscaling_supported(hba)) {
+	if (!ufshcd_is_wb_allowed(hba) ||
+	    ufshcd_is_clk_scaling_supported(hba)) {
 		/*
 		 * If the platform supports UFSHCD_CAP_CLK_SCALING, turn WB
 		 * on/off will be done while clock scaling up/down.
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index 581d88af07ab..d2bff2b1bdff 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -253,9 +253,9 @@  static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params);
 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
-static void ufshcd_resume_clkscaling(struct ufs_hba *hba);
-static void ufshcd_suspend_clkscaling(struct ufs_hba *hba);
-static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba);
+static void __ufshcd_clk_scaling_resume(struct ufs_hba *hba);
+static void __ufshcd_clk_scaling_suspend(struct ufs_hba *hba);
+static void ufshcd_devfreq_suspend(struct ufs_hba *hba);
 static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up);
 static irqreturn_t ufshcd_intr(int irq, void *__hba);
 static int ufshcd_change_power_mode(struct ufs_hba *hba,
@@ -1321,7 +1321,7 @@  static void ufshcd_clk_scaling_suspend_work(struct work_struct *work)
 	hba->clk_scaling.is_suspended = true;
 	spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
 
-	__ufshcd_suspend_clkscaling(hba);
+	ufshcd_devfreq_suspend(hba);
 }
 
 static void ufshcd_clk_scaling_resume_work(struct work_struct *work)
@@ -1352,7 +1352,7 @@  static int ufshcd_devfreq_target(struct device *dev,
 	struct ufs_clk_info *clki;
 	unsigned long irq_flags;
 
-	if (!ufshcd_is_clkscaling_supported(hba))
+	if (!ufshcd_is_clk_scaling_supported(hba))
 		return -EINVAL;
 
 	clki = list_first_entry(&hba->clk_list_head, struct ufs_clk_info, list);
@@ -1409,7 +1409,7 @@  static int ufshcd_devfreq_get_dev_status(struct device *dev,
 	struct ufs_clk_info *clki;
 	ktime_t curr_t;
 
-	if (!ufshcd_is_clkscaling_supported(hba))
+	if (!ufshcd_is_clk_scaling_supported(hba))
 		return -EINVAL;
 
 	memset(stat, 0, sizeof(*stat));
@@ -1498,7 +1498,7 @@  static void ufshcd_devfreq_remove(struct ufs_hba *hba)
 	dev_pm_opp_remove(hba->dev, clki->max_freq);
 }
 
-static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba)
+static void ufshcd_devfreq_suspend(struct ufs_hba *hba)
 {
 	unsigned long flags;
 
@@ -1508,7 +1508,7 @@  static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba)
 	spin_unlock_irqrestore(hba->host->host_lock, flags);
 }
 
-static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
+static void __ufshcd_clk_scaling_suspend(struct ufs_hba *hba)
 {
 	unsigned long flags;
 	bool suspend = false;
@@ -1524,10 +1524,10 @@  static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
 	spin_unlock_irqrestore(hba->host->host_lock, flags);
 
 	if (suspend)
-		__ufshcd_suspend_clkscaling(hba);
+		ufshcd_devfreq_suspend(hba);
 }
 
-static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
+static void __ufshcd_clk_scaling_resume(struct ufs_hba *hba)
 {
 	unsigned long flags;
 	bool resume = false;
@@ -1543,7 +1543,7 @@  static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
 		devfreq_resume_device(hba->devfreq);
 }
 
-static ssize_t ufshcd_clkscale_enable_show(struct device *dev,
+static ssize_t ufshcd_clk_scaling_enable_show(struct device *dev,
 		struct device_attribute *attr, char *buf)
 {
 	struct ufs_hba *hba = dev_get_drvdata(dev);
@@ -1551,7 +1551,7 @@  static ssize_t ufshcd_clkscale_enable_show(struct device *dev,
 	return sysfs_emit(buf, "%d\n", hba->clk_scaling.is_enabled);
 }
 
-static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
+static ssize_t ufshcd_clk_scaling_enable_store(struct device *dev,
 		struct device_attribute *attr, const char *buf, size_t count)
 {
 	struct ufs_hba *hba = dev_get_drvdata(dev);
@@ -1577,9 +1577,9 @@  static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
 	hba->clk_scaling.is_enabled = value;
 
 	if (value) {
-		ufshcd_resume_clkscaling(hba);
+		__ufshcd_clk_scaling_resume(hba);
 	} else {
-		ufshcd_suspend_clkscaling(hba);
+		__ufshcd_clk_scaling_suspend(hba);
 		err = ufshcd_devfreq_scale(hba, true);
 		if (err)
 			dev_err(hba->dev, "%s: failed to scale clocks up %d\n",
@@ -1595,8 +1595,8 @@  static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
 
 static void ufshcd_init_clk_scaling_sysfs(struct ufs_hba *hba)
 {
-	hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show;
-	hba->clk_scaling.enable_attr.store = ufshcd_clkscale_enable_store;
+	hba->clk_scaling.enable_attr.show = ufshcd_clk_scaling_enable_show;
+	hba->clk_scaling.enable_attr.store = ufshcd_clk_scaling_enable_store;
 	sysfs_attr_init(&hba->clk_scaling.enable_attr.attr);
 	hba->clk_scaling.enable_attr.attr.name = "clkscale_enable";
 	hba->clk_scaling.enable_attr.attr.mode = 0644;
@@ -1614,7 +1614,7 @@  static void ufshcd_init_clk_scaling(struct ufs_hba *hba)
 {
 	char wq_name[sizeof("ufs_clkscaling_00")];
 
-	if (!ufshcd_is_clkscaling_supported(hba))
+	if (!ufshcd_is_clk_scaling_supported(hba))
 		return;
 
 	if (!hba->clk_scaling.min_gear)
@@ -2016,7 +2016,7 @@  static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
 	ktime_t curr_t = ktime_get();
 	unsigned long flags;
 
-	if (!ufshcd_is_clkscaling_supported(hba))
+	if (!ufshcd_is_clk_scaling_supported(hba))
 		return;
 
 	spin_lock_irqsave(hba->host->host_lock, flags);
@@ -2050,7 +2050,7 @@  static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
 	struct ufs_clk_scaling *scaling = &hba->clk_scaling;
 	unsigned long flags;
 
-	if (!ufshcd_is_clkscaling_supported(hba))
+	if (!ufshcd_is_clk_scaling_supported(hba))
 		return;
 
 	spin_lock_irqsave(hba->host->host_lock, flags);
@@ -6055,12 +6055,12 @@  static void ufshcd_clk_scaling_suspend(struct ufs_hba *hba, bool suspend)
 {
 	if (suspend) {
 		if (hba->clk_scaling.is_enabled)
-			ufshcd_suspend_clkscaling(hba);
+			__ufshcd_clk_scaling_suspend(hba);
 		ufshcd_clk_scaling_allow(hba, false);
 	} else {
 		ufshcd_clk_scaling_allow(hba, true);
 		if (hba->clk_scaling.is_enabled)
-			ufshcd_resume_clkscaling(hba);
+			__ufshcd_clk_scaling_resume(hba);
 	}
 }
 
@@ -6089,9 +6089,9 @@  static void ufshcd_err_handling_prepare(struct ufs_hba *hba)
 		ufshcd_vops_resume(hba, pm_op);
 	} else {
 		ufshcd_hold(hba, false);
-		if (ufshcd_is_clkscaling_supported(hba) &&
+		if (ufshcd_is_clk_scaling_supported(hba) &&
 		    hba->clk_scaling.is_enabled)
-			ufshcd_suspend_clkscaling(hba);
+			__ufshcd_clk_scaling_suspend(hba);
 		ufshcd_clk_scaling_allow(hba, false);
 	}
 	ufshcd_scsi_block_requests(hba);
@@ -6104,7 +6104,7 @@  static void ufshcd_err_handling_unprepare(struct ufs_hba *hba)
 {
 	ufshcd_scsi_unblock_requests(hba);
 	ufshcd_release(hba);
-	if (ufshcd_is_clkscaling_supported(hba))
+	if (ufshcd_is_clk_scaling_supported(hba))
 		ufshcd_clk_scaling_suspend(hba, false);
 	ufshcd_rpm_put(hba);
 }
@@ -8111,7 +8111,7 @@  static int ufshcd_add_lus(struct ufs_hba *hba)
 		goto out;
 
 	/* Initialize devfreq after UFS device is detected */
-	if (ufshcd_is_clkscaling_supported(hba)) {
+	if (ufshcd_is_clk_scaling_supported(hba)) {
 		memcpy(&hba->clk_scaling.saved_pwr_info.info,
 			&hba->pwr_info,
 			sizeof(struct ufs_pa_layer_attr));
@@ -8955,7 +8955,7 @@  static int __ufshcd_wl_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
 	ufshcd_hold(hba, false);
 	hba->clk_gating.is_suspended = true;
 
-	if (ufshcd_is_clkscaling_supported(hba))
+	if (ufshcd_is_clk_scaling_supported(hba))
 		ufshcd_clk_scaling_suspend(hba, true);
 
 	if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
@@ -9061,7 +9061,7 @@  static int __ufshcd_wl_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
 	if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
 		ufshcd_disable_auto_bkops(hba);
 enable_scaling:
-	if (ufshcd_is_clkscaling_supported(hba))
+	if (ufshcd_is_clk_scaling_supported(hba))
 		ufshcd_clk_scaling_suspend(hba, false);
 
 	hba->dev_info.b_rpm_dev_flush_capable = false;
@@ -9144,7 +9144,7 @@  static int __ufshcd_wl_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
 	if (hba->ee_usr_mask)
 		ufshcd_write_ee_control(hba);
 
-	if (ufshcd_is_clkscaling_supported(hba))
+	if (ufshcd_is_clk_scaling_supported(hba))
 		ufshcd_clk_scaling_suspend(hba, false);
 
 	if (hba->dev_info.b_rpm_dev_flush_capable) {
diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h
index 7fe1a926cd99..5fd99a9cacb7 100644
--- a/include/ufs/ufshcd.h
+++ b/include/ufs/ufshcd.h
@@ -976,7 +976,7 @@  static inline bool ufshcd_can_hibern8_during_gating(struct ufs_hba *hba)
 {
 	return hba->caps & UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
 }
-static inline int ufshcd_is_clkscaling_supported(struct ufs_hba *hba)
+static inline int ufshcd_is_clk_scaling_supported(struct ufs_hba *hba)
 {
 	return hba->caps & UFSHCD_CAP_CLK_SCALING;
 }