@@ -1604,3 +1604,85 @@ Description:
prevent the UFS from frequently performing clock gating/ungating.
The attribute is read/write.
+
+What: /sys/bus/platform/drivers/ufshcd/*/hid_trigger
+What: /sys/bus/platform/devices/*.ufs/hid_trigger
+Date: April 2025
+Contact: Huan Tang <tanghuan@vivo.com>
+Description:
+ The host can enable or disable complete HID.
+
+ ======== ============
+ enable Let kworker(ufs_hid_enable_work) execute the complete HID
+ disable Cancel kworker(ufs_hid_enable_work) and disable HID
+ ======== ============
+
+ The file is write only.
+
+What: /sys/bus/platform/drivers/ufshcd/*/attributes/defrag_operation
+What: /sys/bus/platform/devices/*.ufs/attributes/defrag_operation
+Date: April 2025
+Contact: Huan Tang <tanghuan@vivo.com>
+Description:
+ The host can enable or disable HID analysis and HID defrag operations.
+
+ =============== ==================================================
+ all_disable HID analysis and HID defrag operation are disabled
+ analysis_enable HID analysis is enabled
+ all_enable HID analysis and HID defrag operation are enabled
+ =============== ==================================================
+
+ The attribute is read/write.
+
+What: /sys/bus/platform/drivers/ufshcd/*/attributes/hid_available_size
+What: /sys/bus/platform/devices/*.ufs/attributes/hid_available_size
+Date: April 2025
+Contact: Huan Tang <tanghuan@vivo.com>
+Description:
+ The total fragmented size in the device is reported through this
+ attribute.
+
+ The attribute is read only.
+
+What: /sys/bus/platform/drivers/ufshcd/*/attributes/hid_size
+What: /sys/bus/platform/devices/*.ufs/attributes/hid_size
+Date: April 2025
+Contact: Huan Tang <tanghuan@vivo.com>
+Description:
+ The host sets the size to be defragmented by an HID defrag operation.
+
+ The attribute is read/write.
+
+What: /sys/bus/platform/drivers/ufshcd/*/attributes/hid_progress_ratio
+What: /sys/bus/platform/devices/*.ufs/attributes/hid_progress_ratio
+Date: April 2025
+Contact: Huan Tang <tanghuan@vivo.com>
+Description:
+ Defrag progress is reported by this attribute,indicates the ratio of
+ the completed defrag size over the requested defrag size.
+
+ ==== ======================================
+ 01h 1%
+ ...
+ 64h 100%
+ ==== ======================================
+
+ The attribute is read only.
+
+What: /sys/bus/platform/drivers/ufshcd/*/attributes/hid_state
+What: /sys/bus/platform/devices/*.ufs/attributes/hid_state
+Date: April 2025
+Contact: Huan Tang <tanghuan@vivo.com>
+Description:
+ The HID state is reported by this attribute.
+
+ ==== ======================================
+ 00h Idle(analysis required)
+ 01h Analysis in progress
+ 02h Defrag required
+ 03h Defrag in progress
+ 04h Defrag completed
+ 05h Defrag is not required
+ ==== ======================================
+
+ The attribute is read only.
\ No newline at end of file
@@ -50,3 +50,12 @@ config SCSI_UFS_HWMON
a hardware monitoring device will be created for the UFS device.
If unsure, say N.
+
+config SCSI_UFS_HID
+ bool "Support UFS Host Initiated Defrag"
+ help
+ The UFS HID feature allows the host to check the status of whether
+ defragmentation is needed or not and enable the device's internal
+ defrag operation explicitly.
+
+ If unsure, say N.
@@ -7,3 +7,4 @@ ufshcd-core-$(CONFIG_SCSI_UFS_BSG) += ufs_bsg.o
ufshcd-core-$(CONFIG_SCSI_UFS_CRYPTO) += ufshcd-crypto.o
ufshcd-core-$(CONFIG_SCSI_UFS_FAULT_INJECTION) += ufs-fault-injection.o
ufshcd-core-$(CONFIG_SCSI_UFS_HWMON) += ufs-hwmon.o
+ufshcd-core-$(CONFIG_SCSI_UFS_HID) += ufs-hid.o
new file mode 100644
@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: GPL-2.0
+ /*
+ * Universal Flash Storage Host Initiated Defragmentation
+ * Authors:
+ * Huan Tang <tanghuan@vivo.com>
+ * Wenxing Cheng <wenxing.cheng@vivo.com>
+ */
+
+#include <linux/unaligned.h>
+#include <linux/device.h>
+#include <linux/stddef.h>
+#include <linux/delay.h>
+#include <linux/workqueue.h>
+#include <linux/container_of.h>
+#include "ufshcd-priv.h"
+
+#define HID_SCHED_COUNT_LIMIT 300
+static int hid_sched_cnt;
+static void ufs_hid_enable_work_fn(struct work_struct *work)
+{
+ struct ufs_hba *hba;
+ int ret = 0;
+ enum ufs_hid_defrag_operation defrag_op;
+ u32 hid_ahit = 0;
+ bool hid_flag = false;
+
+ hba = container_of(work, struct ufs_hba, ufs_hid_enable_work.work);
+
+ if (!hba->dev_info.hid_sup)
+ return;
+
+ down(&hba->host_sem);
+
+ if (!ufshcd_is_user_access_allowed(hba)) {
+ up(&hba->host_sem);
+ return;
+ }
+
+ ufshcd_rpm_get_sync(hba);
+ hid_ahit = hba->ahit;
+ ufshcd_auto_hibern8_update(hba, 0);
+
+ ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
+ QUERY_ATTR_IDN_HID_STATE, 0, 0, &hba->dev_info.hid_state);
+ if (ret)
+ hba->dev_info.hid_state = HID_IDLE;
+
+ switch (hba->dev_info.hid_state) {
+ case HID_IDLE:
+ defrag_op = HID_ANALYSIS_ENABLE;
+ hid_flag = true;
+ break;
+ case DEFRAG_REQUIRED:
+ defrag_op = HID_ANALYSIS_AND_DEFRAG_ENABLE;
+ hid_flag = true;
+ break;
+ case DEFRAG_COMPLETED:
+ case DEFRAG_IS_NOT_REQUIRED:
+ defrag_op = HID_ANALYSIS_AND_DEFRAG_DISABLE;
+ hid_flag = true;
+ break;
+ case ANALYSIS_IN_PROGRESS:
+ case DEFRAG_IN_PROGRESS:
+ default:
+ break;
+ }
+
+ if (hid_flag) {
+ ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
+ QUERY_ATTR_IDN_HID_DEFRAG_OPERATION, 0, 0, &defrag_op);
+ hid_flag = false;
+ }
+ ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
+ QUERY_ATTR_IDN_HID_STATE, 0, 0, &hba->dev_info.hid_state);
+ if (ret)
+ hba->dev_info.hid_state = HID_IDLE;
+
+ ufshcd_auto_hibern8_update(hba, hid_ahit);
+ ufshcd_rpm_put_sync(hba);
+ up(&hba->host_sem);
+
+ if (hba->dev_info.hid_state != HID_IDLE &&
+ hid_sched_cnt++ < HID_SCHED_COUNT_LIMIT)
+ schedule_delayed_work(&hba->ufs_hid_enable_work,
+ msecs_to_jiffies(1000));
+ else
+ hid_sched_cnt = 0;
+}
+
+int ufs_hid_disable(struct ufs_hba *hba)
+{
+ enum ufs_hid_defrag_operation defrag_op = HID_ANALYSIS_AND_DEFRAG_DISABLE;
+ u32 hid_ahit;
+ int ret;
+
+ down(&hba->host_sem);
+
+ if (!ufshcd_is_user_access_allowed(hba)) {
+ up(&hba->host_sem);
+ return -EBUSY;
+ }
+
+ ufshcd_rpm_get_sync(hba);
+ hid_ahit = hba->ahit;
+ ufshcd_auto_hibern8_update(hba, 0);
+
+ ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
+ QUERY_ATTR_IDN_HID_DEFRAG_OPERATION, 0, 0, &defrag_op);
+
+ ufshcd_auto_hibern8_update(hba, hid_ahit);
+ ufshcd_rpm_put_sync(hba);
+ up(&hba->host_sem);
+
+ return ret;
+}
+
+void ufs_hid_init(struct ufs_hba *hba, const u8 *desc_buf)
+{
+ u32 ext_ufs_feature;
+ struct ufs_dev_info *dev_info = &hba->dev_info;
+
+ ext_ufs_feature = get_unaligned_be32(desc_buf +
+ DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP);
+
+ if (ext_ufs_feature & UFS_DEV_HID_SUPPORT) {
+ dev_info->hid_sup = true;
+ INIT_DELAYED_WORK(&hba->ufs_hid_enable_work,
+ ufs_hid_enable_work_fn);
+ }
+}
@@ -81,6 +81,21 @@ static const char *ufshcd_ufs_dev_pwr_mode_to_string(
}
}
+static const char *ufs_hid_defrag_operation_to_string(
+ enum ufs_hid_defrag_operation op)
+{
+ switch (op) {
+ case HID_ANALYSIS_AND_DEFRAG_DISABLE:
+ return "all_disable";
+ case HID_ANALYSIS_ENABLE:
+ return "analysis_enable";
+ case HID_ANALYSIS_AND_DEFRAG_ENABLE:
+ return "all_enable";
+ default:
+ return "unknown";
+ }
+}
+
static inline ssize_t ufs_sysfs_pm_lvl_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count,
@@ -466,6 +481,34 @@ static ssize_t critical_health_show(struct device *dev,
return sysfs_emit(buf, "%d\n", hba->critical_health_count);
}
+static const char * const hid_trigger_mode[] = {
+ "disable", "enable"
+};
+
+static ssize_t hid_trigger_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ int mode;
+ int ret = 0;
+
+ if (!hba->dev_info.hid_sup)
+ return -EOPNOTSUPP;
+
+ mode = sysfs_match_string(hid_trigger_mode, buf);
+ if (mode < 0)
+ return -EINVAL;
+
+ if (mode)
+ schedule_delayed_work(&hba->ufs_hid_enable_work, 0);
+ else {
+ cancel_delayed_work_sync(&hba->ufs_hid_enable_work);
+ ret = ufs_hid_disable(hba);
+ }
+
+ return ret < 0 ? ret : count;
+}
+
static DEVICE_ATTR_RW(rpm_lvl);
static DEVICE_ATTR_RO(rpm_target_dev_state);
static DEVICE_ATTR_RO(rpm_target_link_state);
@@ -479,6 +522,7 @@ static DEVICE_ATTR_RW(wb_flush_threshold);
static DEVICE_ATTR_RW(rtc_update_ms);
static DEVICE_ATTR_RW(pm_qos_enable);
static DEVICE_ATTR_RO(critical_health);
+static DEVICE_ATTR_WO(hid_trigger);
static struct attribute *ufs_sysfs_ufshcd_attrs[] = {
&dev_attr_rpm_lvl.attr,
@@ -494,6 +538,7 @@ static struct attribute *ufs_sysfs_ufshcd_attrs[] = {
&dev_attr_rtc_update_ms.attr,
&dev_attr_pm_qos_enable.attr,
&dev_attr_critical_health.attr,
+ &dev_attr_hid_trigger.attr,
NULL
};
@@ -1495,6 +1540,101 @@ static inline bool ufshcd_is_wb_attrs(enum attr_idn idn)
idn <= QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE;
}
+static int hid_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
+ enum attr_idn idn, u32 *attr_val)
+{
+ int ret;
+
+ if (!hba->dev_info.hid_sup)
+ return -EOPNOTSUPP;
+
+ down(&hba->host_sem);
+ if (!ufshcd_is_user_access_allowed(hba)) {
+ up(&hba->host_sem);
+ return -EBUSY;
+ }
+
+ ufshcd_rpm_get_sync(hba);
+ ret = ufshcd_query_attr(hba, opcode, idn, 0, 0, attr_val);
+ ufshcd_rpm_put_sync(hba);
+
+ up(&hba->host_sem);
+ return ret;
+}
+
+static ssize_t defrag_operation_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ u32 value;
+ int ret;
+
+ ret = hid_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
+ QUERY_ATTR_IDN_HID_DEFRAG_OPERATION, &value);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%s\n", ufs_hid_defrag_operation_to_string(value));
+}
+
+static const char * const hid_defrag_operation[] = {
+ [HID_ANALYSIS_AND_DEFRAG_DISABLE] = "all_disable",
+ [HID_ANALYSIS_ENABLE] = "analysis_enable",
+ [HID_ANALYSIS_AND_DEFRAG_ENABLE] = "all_enable",
+};
+
+static ssize_t defrag_operation_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ int defrag_op;
+ int ret;
+
+ defrag_op = sysfs_match_string(hid_defrag_operation, buf);
+ if (defrag_op < 0)
+ return -EINVAL;
+
+ ret = hid_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
+ QUERY_ATTR_IDN_HID_DEFRAG_OPERATION, &defrag_op);
+
+ return ret < 0 ? ret : count;
+}
+
+static DEVICE_ATTR_RW(defrag_operation);
+
+static ssize_t hid_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ u32 value;
+ int ret;
+
+ ret = hid_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
+ QUERY_ATTR_IDN_HID_SIZE, &value);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "0x%08X\n", value);
+}
+
+static ssize_t hid_size_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ int value;
+ int ret;
+
+ if (kstrtou32(buf, 0, &value))
+ return -EINVAL;
+
+ ret = hid_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
+ QUERY_ATTR_IDN_HID_SIZE, &value);
+
+ return ret < 0 ? ret : count;
+}
+
+static DEVICE_ATTR_RW(hid_size);
+
#define UFS_ATTRIBUTE(_name, _uname) \
static ssize_t _name##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
@@ -1545,7 +1685,9 @@ UFS_ATTRIBUTE(wb_flush_status, _WB_FLUSH_STATUS);
UFS_ATTRIBUTE(wb_avail_buf, _AVAIL_WB_BUFF_SIZE);
UFS_ATTRIBUTE(wb_life_time_est, _WB_BUFF_LIFE_TIME_EST);
UFS_ATTRIBUTE(wb_cur_buf, _CURR_WB_BUFF_SIZE);
-
+UFS_ATTRIBUTE(hid_available_size, _HID_AVAILABLE_SIZE);
+UFS_ATTRIBUTE(hid_progress_ratio, _HID_PROGRESS_RATIO);
+UFS_ATTRIBUTE(hid_state, _HID_STATE);
static struct attribute *ufs_sysfs_attributes[] = {
&dev_attr_boot_lun_enabled.attr,
@@ -1568,6 +1710,11 @@ static struct attribute *ufs_sysfs_attributes[] = {
&dev_attr_wb_avail_buf.attr,
&dev_attr_wb_life_time_est.attr,
&dev_attr_wb_cur_buf.attr,
+ &dev_attr_defrag_operation.attr,
+ &dev_attr_hid_available_size.attr,
+ &dev_attr_hid_size.attr,
+ &dev_attr_hid_progress_ratio.attr,
+ &dev_attr_hid_state.attr,
NULL,
};
@@ -42,6 +42,14 @@ static inline void ufs_hwmon_remove(struct ufs_hba *hba) {}
static inline void ufs_hwmon_notify_event(struct ufs_hba *hba, u8 ee_mask) {}
#endif
+#ifdef CONFIG_SCSI_UFS_HID
+void ufs_hid_init(struct ufs_hba *hba, const u8 *desc_buf);
+int ufs_hid_disable(struct ufs_hba *hba);
+#else
+static inline void ufs_hid_init(struct ufs_hba *hba, const u8 *desc_buf) {}
+static inline int ufs_hid_disable(struct ufs_hba *hba) {return 0; }
+#endif
+
int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
enum query_opcode opcode,
enum desc_idn idn, u8 index,
@@ -6536,6 +6536,9 @@ static void ufshcd_err_handler(struct work_struct *work)
hba->saved_uic_err, hba->force_reset,
ufshcd_is_link_broken(hba) ? "; link is broken" : "");
+ if (hba->dev_info.hid_sup)
+ cancel_delayed_work_sync(&hba->ufs_hid_enable_work);
+
down(&hba->host_sem);
spin_lock_irqsave(hba->host->host_lock, flags);
if (ufshcd_err_handling_should_stop(hba)) {
@@ -8317,6 +8320,8 @@ static int ufs_get_device_desc(struct ufs_hba *hba)
ufshcd_temp_notif_probe(hba, desc_buf);
+ ufs_hid_init(hba, desc_buf);
+
if (dev_info->wspecversion >= 0x410) {
hba->critical_health_count = 0;
ufshcd_enable_ee(hba, MASK_EE_HEALTH_CRITICAL);
@@ -9614,6 +9619,8 @@ static int __ufshcd_wl_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
req_link_state = UIC_LINK_OFF_STATE;
}
+ if (hba->dev_info.hid_sup)
+ cancel_delayed_work_sync(&hba->ufs_hid_enable_work);
/*
* If we can't transition into any of the low power modes
* just gate the clocks.
@@ -180,7 +180,12 @@ enum attr_idn {
QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE = 0x1D,
QUERY_ATTR_IDN_WB_BUFF_LIFE_TIME_EST = 0x1E,
QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE = 0x1F,
- QUERY_ATTR_IDN_TIMESTAMP = 0x30
+ QUERY_ATTR_IDN_TIMESTAMP = 0x30,
+ QUERY_ATTR_IDN_HID_DEFRAG_OPERATION = 0x35,
+ QUERY_ATTR_IDN_HID_AVAILABLE_SIZE = 0x36,
+ QUERY_ATTR_IDN_HID_SIZE = 0x37,
+ QUERY_ATTR_IDN_HID_PROGRESS_RATIO = 0x38,
+ QUERY_ATTR_IDN_HID_STATE = 0x39,
};
/* Descriptor idn for Query requests */
@@ -390,6 +395,7 @@ enum {
UFS_DEV_EXT_TEMP_NOTIF = BIT(6),
UFS_DEV_HPB_SUPPORT = BIT(7),
UFS_DEV_WRITE_BOOSTER_SUP = BIT(8),
+ UFS_DEV_HID_SUPPORT = BIT(13),
};
#define UFS_DEV_HPB_SUPPORT_VERSION 0x310
@@ -454,6 +460,23 @@ enum ufs_ref_clk_freq {
REF_CLK_FREQ_INVAL = -1,
};
+/* bDefragOperation attribute values */
+enum ufs_hid_defrag_operation {
+ HID_ANALYSIS_AND_DEFRAG_DISABLE = 0,
+ HID_ANALYSIS_ENABLE = 1,
+ HID_ANALYSIS_AND_DEFRAG_ENABLE = 2,
+};
+
+/* bHIDState attribute values */
+enum ufs_hid_state {
+ HID_IDLE = 0,
+ ANALYSIS_IN_PROGRESS = 1,
+ DEFRAG_REQUIRED = 2,
+ DEFRAG_IN_PROGRESS = 3,
+ DEFRAG_COMPLETED = 4,
+ DEFRAG_IS_NOT_REQUIRED = 5,
+};
+
/* Query response result code */
enum {
QUERY_RESULT_SUCCESS = 0x00,
@@ -590,6 +613,9 @@ struct ufs_dev_info {
u32 rtc_update_period;
u8 rtt_cap; /* bDeviceRTTCap */
+
+ bool hid_sup;
+ enum ufs_hid_state hid_state;
};
/*
@@ -1138,6 +1138,8 @@ struct ufs_hba {
bool pm_qos_enabled;
int critical_health_count;
+
+ struct delayed_work ufs_hid_enable_work;
};
/**