diff mbox series

[RFC,v1,5/8] scsi: Add scsi multipath sysfs hooks

Message ID 20241109044529.992935-6-himanshu.madhani@oracle.com
State New
Headers show
Series scsi: Multipath support for scsi disk devices. | expand

Commit Message

Himanshu Madhani Nov. 9, 2024, 4:45 a.m. UTC
From: Himanshu Madhani <himanshu.madhani@oracle.com>

Add Sysfs hook to
- Show current multipath state
- Show and update multipath iopolicy

Signed-off-by: Himanshu Madhani <himanshu.madhani@oracle.com>
---
 drivers/scsi/scsi_sysfs.c | 104 ++++++++++++++++++++++++++++++++++++++
 1 file changed, 104 insertions(+)
diff mbox series

Patch

diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 32f94db6d6bf..cc7dc5c30d2c 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -1198,6 +1198,103 @@  sdev_show_preferred_path(struct device *dev,
 static DEVICE_ATTR(preferred_path, S_IRUGO, sdev_show_preferred_path, NULL);
 #endif
 
+#ifdef CONFIG_SCSI_MULTIPATH
+static const struct {
+	unsigned char	value;
+	char		*name;
+} scsi_multipath_iopolicy[] = {
+	{ SCSI_MPATH_IOPOLICY_NUMA, "NUMA" },
+	{ SCSI_MPATH_IOPOLICY_RR, "Round-Robin" },
+};
+static const char *scsi_mpath_policy_name(unsigned char policy)
+{
+	int i;
+	char *name = NULL;
+
+	for (i = 0; i < ARRAY_SIZE(scsi_multipath_iopolicy); i++) {
+		if (scsi_multipath_iopolicy[i].value == policy) {
+			name = scsi_multipath_iopolicy[i].name;
+			break;
+		}
+	}
+	return name;
+}
+
+static ssize_t
+sdev_show_multipath_iopolicy(struct device *dev,
+			     struct device_attribute *attr,
+			     char *buf)
+{
+	struct scsi_device *sdev = to_scsi_device(dev);
+	const char *name = scsi_mpath_policy_name(sdev->mpath_iopolicy);
+
+	if (!sdev->mpath_disk)
+		return -EINVAL;
+
+	return sysfs_emit(buf, "%s\n", name);
+}
+
+static ssize_t sdev_store_multipath_iopolicy(struct device *dev,
+    struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct scsi_device *sdev = to_scsi_device(dev);
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(scsi_multipath_iopolicy); i++) {
+		if (sysfs_streq(buf, scsi_mpath_policy_name(i))) {
+			scsi_multipath_iopolicy_update(sdev, i);
+			return count;
+		}
+	}
+
+	return -EINVAL;
+}
+static DEVICE_ATTR(multipath_iopolicy, S_IRUGO, sdev_show_multipath_iopolicy,
+    sdev_store_multipath_iopolicy);
+
+static const struct {
+	unsigned char	value;
+	char		*name;
+} scsi_mpath_states[] = {
+	{ SCSI_MPATH_OPTIMAL,	"active/optimized" },
+	{ SCSI_MPATH_ACTIVE,	"active/non-optimized" },
+	{ SCSI_MPATH_STANDBY,	"standby" },
+	{ SCSI_MPATH_UNAVAILABLE,"unavailable" },
+	{ SCSI_MPATH_LBA,	"lba-dependent" },
+	{ SCSI_MPATH_OFFLINE,	"offline" },
+	{ SCSI_MPATH_TRANSITIONING,"transitioning" },
+};
+
+static const char *scsi_mpath_state_names(unsigned char state)
+{
+	int i;
+	char *name = NULL;
+
+	for (i = 0; i < ARRAY_SIZE(scsi_mpath_states); i++) {
+		if (scsi_mpath_states[i].value == state) {
+		    name = scsi_mpath_states[i].name;
+		    break;
+		}
+	}
+	return name;
+}
+
+static ssize_t
+sdev_show_multipath_state(struct device *dev,
+			  struct device_attribute *attr,
+			  char *buf)
+{
+	struct scsi_device *sdev = to_scsi_device(dev);
+	const char *name = scsi_mpath_state_names(sdev->mpath_state);
+
+	if (!sdev->mpath_disk)
+		return -EINVAL;
+
+	return sysfs_emit(buf, "%s\n", name);
+}
+static DEVICE_ATTR(multipath_state, S_IRUGO, sdev_show_multipath_state, NULL);
+#endif
+
 static ssize_t
 sdev_show_queue_ramp_up_period(struct device *dev,
 			       struct device_attribute *attr,
@@ -1335,6 +1432,10 @@  static struct attribute *scsi_sdev_attrs[] = {
 	&dev_attr_dh_state.attr,
 	&dev_attr_access_state.attr,
 	&dev_attr_preferred_path.attr,
+#endif
+#ifdef CONFIG_SCSI_MULTIPATH
+	&dev_attr_multipath_iopolicy.attr,
+	&dev_attr_multipath_state.attr,
 #endif
 	&dev_attr_queue_ramp_up_period.attr,
 	&dev_attr_cdl_supported.attr,
@@ -1500,6 +1601,9 @@  void __scsi_remove_device(struct scsi_device *sdev)
 	} else
 		put_device(&sdev->sdev_dev);
 
+	if (scsi_is_sdev_multipath(sdev))
+		scsi_mpath_dev_release(sdev);
+
 	/*
 	 * Stop accepting new requests and wait until all queuecommand() and
 	 * scsi_run_queue() invocations have finished before tearing down the