new file mode 100644
@@ -0,0 +1,69 @@
+What: /sys/bus/edac/devices/<dev-name>/scrub
+Date: Oct 2024
+KernelVersion: 6.12
+Contact: linux-edac@vger.kernel.org
+Description:
+ The sysfs edac bus devices /<dev-name>/scrub subdirectory
+ belongs to the memory scrub control feature, where <dev-name>
+ directory corresponds to a device/memory region registered
+ with the edac scrub driver and thus registered with the
+ generic edac ras driver too.
+ The sysfs scrub attr nodes would be present only if the
+ client driver has implemented the corresponding attr
+ callback function and pass in ops to the EDAC RAS feature
+ driver during registration.
+
+What: /sys/bus/edac/devices/<dev-name>/scrub/addr_range_base
+Date: Oct 2024
+KernelVersion: 6.12
+Contact: linux-edac@vger.kernel.org
+Description:
+ (RW) The base of the address range of the memory region
+ to be scrubbed (on-demand scrubbing).
+
+What: /sys/bus/edac/devices/<dev-name>/scrub/addr_range_size
+Date: Oct 2024
+KernelVersion: 6.12
+Contact: linux-edac@vger.kernel.org
+Description:
+ (RW) The size of the address range of the memory region
+ to be scrubbed (on-demand scrubbing).
+
+What: /sys/bus/edac/devices/<dev-name>/scrub/enable_background
+Date: Oct 2024
+KernelVersion: 6.12
+Contact: linux-edac@vger.kernel.org
+Description:
+ (RW) Start/Stop background(patrol) scrubbing if supported.
+
+What: /sys/bus/edac/devices/<dev-name>/scrub/enable_on_demand
+Date: Oct 2024
+KernelVersion: 6.12
+Contact: linux-edac@vger.kernel.org
+Description:
+ (RW) Start/Stop on-demand scrubbing the memory region
+ if supported.
+
+What: /sys/bus/edac/devices/<dev-name>/scrub/min_cycle_duration
+Date: Oct 2024
+KernelVersion: 6.12
+Contact: linux-edac@vger.kernel.org
+Description:
+ (RO) Supported minimum scrub cycle duration in seconds
+ by the memory scrubber.
+
+What: /sys/bus/edac/devices/<dev-name>/scrub/max_cycle_duration
+Date: Oct 2024
+KernelVersion: 6.12
+Contact: linux-edac@vger.kernel.org
+Description:
+ (RO) Supported maximum scrub cycle duration in seconds
+ by the memory scrubber.
+
+What: /sys/bus/edac/devices/<dev-name>/scrub/current_cycle_duration
+Date: Oct 2024
+KernelVersion: 6.12
+Contact: linux-edac@vger.kernel.org
+Description:
+ (RW) The current scrub cycle duration in seconds and must be
+ within the supported range by the memory scrubber.
@@ -10,6 +10,7 @@ obj-$(CONFIG_EDAC) := edac_core.o
edac_core-y := edac_mc.o edac_device.o edac_mc_sysfs.o
edac_core-y += edac_module.o edac_device_sysfs.o wq.o
+edac_core-y += edac_scrub.o
edac_core-$(CONFIG_EDAC_DEBUG) += debugfs.o
@@ -612,6 +612,7 @@ static int edac_dev_feat_init(struct device *parent,
case RAS_FEAT_SCRUB:
dev_data->scrub_ops = ras_feat->scrub_ops;
dev_data->private = ras_feat->scrub_ctx;
+ attr_groups[0] = edac_scrub_get_desc();
return 1;
case RAS_FEAT_ECS:
num = ras_feat->ecs_info.num_media_frus;
new file mode 100755
@@ -0,0 +1,315 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Generic EDAC scrub driver supports controlling the memory
+ * scrubbers in the system and the common sysfs scrub interface
+ * promotes unambiguous access from the userspace.
+ *
+ * Copyright (c) 2024 HiSilicon Limited.
+ */
+
+#define pr_fmt(fmt) "EDAC SCRUB: " fmt
+
+#include <linux/edac.h>
+
+static ssize_t addr_range_base_show(struct device *ras_feat_dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct edac_dev_feat_ctx *ctx = dev_get_drvdata(ras_feat_dev);
+ const struct edac_scrub_ops *ops = ctx->scrub.scrub_ops;
+ u64 base, size;
+ int ret;
+
+ ret = ops->read_range(ras_feat_dev->parent, ctx->scrub.private, &base, &size);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "0x%llx\n", base);
+}
+
+static ssize_t addr_range_size_show(struct device *ras_feat_dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct edac_dev_feat_ctx *ctx = dev_get_drvdata(ras_feat_dev);
+ const struct edac_scrub_ops *ops = ctx->scrub.scrub_ops;
+ u64 base, size;
+ int ret;
+
+ ret = ops->read_range(ras_feat_dev->parent, ctx->scrub.private, &base, &size);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "0x%llx\n", size);
+}
+
+static ssize_t addr_range_base_store(struct device *ras_feat_dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct edac_dev_feat_ctx *ctx = dev_get_drvdata(ras_feat_dev);
+ const struct edac_scrub_ops *ops = ctx->scrub.scrub_ops;
+ u64 base, size;
+ int ret;
+
+ ret = ops->read_range(ras_feat_dev->parent, ctx->scrub.private, &base, &size);
+ if (ret)
+ return ret;
+
+ ret = kstrtou64(buf, 0, &base);
+ if (ret < 0)
+ return ret;
+
+ ret = ops->write_range(ras_feat_dev->parent, ctx->scrub.private, base, size);
+ if (ret)
+ return ret;
+
+ return len;
+}
+
+static ssize_t addr_range_size_store(struct device *ras_feat_dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct edac_dev_feat_ctx *ctx = dev_get_drvdata(ras_feat_dev);
+ const struct edac_scrub_ops *ops = ctx->scrub.scrub_ops;
+ u64 base, size;
+ int ret;
+
+ ret = ops->read_range(ras_feat_dev->parent, ctx->scrub.private, &base, &size);
+ if (ret)
+ return ret;
+
+ ret = kstrtou64(buf, 0, &size);
+ if (ret < 0)
+ return ret;
+
+ ret = ops->write_range(ras_feat_dev->parent, ctx->scrub.private, base, size);
+ if (ret)
+ return ret;
+
+ return len;
+}
+
+static ssize_t enable_background_store(struct device *ras_feat_dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct edac_dev_feat_ctx *ctx = dev_get_drvdata(ras_feat_dev);
+ const struct edac_scrub_ops *ops = ctx->scrub.scrub_ops;
+ bool enable;
+ int ret;
+
+ ret = kstrtobool(buf, &enable);
+ if (ret < 0)
+ return ret;
+
+ ret = ops->set_enabled_bg(ras_feat_dev->parent, ctx->scrub.private, enable);
+ if (ret)
+ return ret;
+
+ return len;
+}
+
+static ssize_t enable_background_show(struct device *ras_feat_dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct edac_dev_feat_ctx *ctx = dev_get_drvdata(ras_feat_dev);
+ const struct edac_scrub_ops *ops = ctx->scrub.scrub_ops;
+ bool enable;
+ int ret;
+
+ ret = ops->get_enabled_bg(ras_feat_dev->parent, ctx->scrub.private, &enable);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%d\n", enable);
+}
+
+static ssize_t enable_on_demand_show(struct device *ras_feat_dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct edac_dev_feat_ctx *ctx = dev_get_drvdata(ras_feat_dev);
+ const struct edac_scrub_ops *ops = ctx->scrub.scrub_ops;
+ bool enable;
+ int ret;
+
+ ret = ops->get_enabled_od(ras_feat_dev->parent, ctx->scrub.private, &enable);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%d\n", enable);
+}
+
+static ssize_t enable_on_demand_store(struct device *ras_feat_dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct edac_dev_feat_ctx *ctx = dev_get_drvdata(ras_feat_dev);
+ const struct edac_scrub_ops *ops = ctx->scrub.scrub_ops;
+ bool enable;
+ int ret;
+
+ ret = kstrtobool(buf, &enable);
+ if (ret < 0)
+ return ret;
+
+ ret = ops->set_enabled_od(ras_feat_dev->parent, ctx->scrub.private, enable);
+ if (ret)
+ return ret;
+
+ return len;
+}
+
+static ssize_t min_cycle_duration_show(struct device *ras_feat_dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct edac_dev_feat_ctx *ctx = dev_get_drvdata(ras_feat_dev);
+ const struct edac_scrub_ops *ops = ctx->scrub.scrub_ops;
+ u32 val;
+ int ret;
+
+ ret = ops->min_cycle_read(ras_feat_dev->parent, ctx->scrub.private, &val);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%u\n", val);
+}
+
+static ssize_t max_cycle_duration_show(struct device *ras_feat_dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct edac_dev_feat_ctx *ctx = dev_get_drvdata(ras_feat_dev);
+ const struct edac_scrub_ops *ops = ctx->scrub.scrub_ops;
+ u32 val;
+ int ret;
+
+ ret = ops->max_cycle_read(ras_feat_dev->parent, ctx->scrub.private, &val);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%u\n", val);
+}
+
+static ssize_t current_cycle_duration_show(struct device *ras_feat_dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct edac_dev_feat_ctx *ctx = dev_get_drvdata(ras_feat_dev);
+ const struct edac_scrub_ops *ops = ctx->scrub.scrub_ops;
+ u32 val;
+ int ret;
+
+ ret = ops->cycle_duration_read(ras_feat_dev->parent, ctx->scrub.private, &val);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%u\n", val);
+}
+
+static ssize_t current_cycle_duration_store(struct device *ras_feat_dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct edac_dev_feat_ctx *ctx = dev_get_drvdata(ras_feat_dev);
+ const struct edac_scrub_ops *ops = ctx->scrub.scrub_ops;
+ long val;
+ int ret;
+
+ ret = kstrtol(buf, 0, &val);
+ if (ret < 0)
+ return ret;
+
+ ret = ops->cycle_duration_write(ras_feat_dev->parent, ctx->scrub.private, val);
+ if (ret)
+ return ret;
+
+ return len;
+}
+
+static DEVICE_ATTR_RW(addr_range_base);
+static DEVICE_ATTR_RW(addr_range_size);
+static DEVICE_ATTR_RW(enable_background);
+static DEVICE_ATTR_RW(enable_on_demand);
+static DEVICE_ATTR_RO(min_cycle_duration);
+static DEVICE_ATTR_RO(max_cycle_duration);
+static DEVICE_ATTR_RW(current_cycle_duration);
+
+static struct attribute *scrub_attrs[] = {
+ &dev_attr_addr_range_base.attr,
+ &dev_attr_addr_range_size.attr,
+ &dev_attr_enable_background.attr,
+ &dev_attr_enable_on_demand.attr,
+ &dev_attr_min_cycle_duration.attr,
+ &dev_attr_max_cycle_duration.attr,
+ &dev_attr_current_cycle_duration.attr,
+ NULL
+};
+
+static umode_t scrub_attr_visible(struct kobject *kobj,
+ struct attribute *a, int attr_id)
+{
+ struct device *ras_feat_dev = kobj_to_dev(kobj);
+ struct edac_dev_feat_ctx *ctx;
+ const struct edac_scrub_ops *ops;
+
+ ctx = dev_get_drvdata(ras_feat_dev);
+ if (!ctx)
+ return 0;
+
+ ops = ctx->scrub.scrub_ops;
+ if (a == &dev_attr_addr_range_base.attr ||
+ a == &dev_attr_addr_range_size.attr) {
+ if (ops->read_range && ops->write_range)
+ return a->mode;
+ if (ops->read_range)
+ return 0444;
+ return 0;
+ }
+ if (a == &dev_attr_enable_background.attr) {
+ if (ops->set_enabled_bg && ops->get_enabled_bg)
+ return a->mode;
+ if (ops->get_enabled_bg)
+ return 0444;
+ return 0;
+ }
+ if (a == &dev_attr_enable_on_demand.attr) {
+ if (ops->set_enabled_od && ops->get_enabled_od)
+ return a->mode;
+ if (ops->get_enabled_od)
+ return 0444;
+ return 0;
+ }
+ if (a == &dev_attr_min_cycle_duration.attr)
+ return ops->min_cycle_read ? a->mode : 0;
+ if (a == &dev_attr_max_cycle_duration.attr)
+ return ops->max_cycle_read ? a->mode : 0;
+ if (a == &dev_attr_current_cycle_duration.attr) {
+ if (ops->cycle_duration_read && ops->cycle_duration_write)
+ return a->mode;
+ if (ops->cycle_duration_read)
+ return 0444;
+ return 0;
+ }
+
+ return 0;
+}
+
+static const struct attribute_group scrub_attr_group = {
+ .name = "scrub",
+ .attrs = scrub_attrs,
+ .is_visible = scrub_attr_visible,
+};
+
+/**
+ * edac_scrub_get_desc - get edac scrub's attr descriptor
+ *
+ * Returns attribute_group for the scrub feature.
+ */
+const struct attribute_group *edac_scrub_get_desc(void)
+{
+ return &scrub_attr_group;
+}
@@ -673,6 +673,34 @@ enum edac_dev_feat {
RAS_FEAT_MAX
};
+/**
+ * struct scrub_ops - scrub device operations (all elements optional)
+ * @read_range: read base and offset of scrubbing range.
+ * @write_range: set the base and offset of the scrubbing range.
+ * @get_enabled_bg: check if currently performing background scrub.
+ * @set_enabled_bg: start or stop a bg-scrub.
+ * @get_enabled_od: check if currently performing on-demand scrub.
+ * @set_enabled_od: start or stop an on-demand scrub.
+ * @min_cycle_read: minimum supported scrub cycle duration in seconds.
+ * @max_cycle_read: maximum supported scrub cycle duration in seconds.
+ * @cycle_duration_read: get the scrub cycle duration in seconds.
+ * @cycle_duration_write: set the scrub cycle duration in seconds.
+ */
+struct edac_scrub_ops {
+ int (*read_range)(struct device *dev, void *drv_data, u64 *base, u64 *size);
+ int (*write_range)(struct device *dev, void *drv_data, u64 base, u64 size);
+ int (*get_enabled_bg)(struct device *dev, void *drv_data, bool *enable);
+ int (*set_enabled_bg)(struct device *dev, void *drv_data, bool enable);
+ int (*get_enabled_od)(struct device *dev, void *drv_data, bool *enable);
+ int (*set_enabled_od)(struct device *dev, void *drv_data, bool enable);
+ int (*min_cycle_read)(struct device *dev, void *drv_data, u32 *min);
+ int (*max_cycle_read)(struct device *dev, void *drv_data, u32 *max);
+ int (*cycle_duration_read)(struct device *dev, void *drv_data, u32 *cycle);
+ int (*cycle_duration_write)(struct device *dev, void *drv_data, u32 cycle);
+};
+
+const struct attribute_group *edac_scrub_get_desc(void);
+
struct edac_ecs_ex_info {
u16 num_media_frus;
};