@@ -3267,10 +3267,117 @@ static void arm_smmu_domain_nested_free(struct iommu_domain *domain)
kfree(container_of(domain, struct arm_smmu_nested_domain, domain));
}
+/*
+ * Convert, in place, the raw invalidation command into an internal format that
+ * can be passed to arm_smmu_cmdq_issue_cmdlist(). Internally commands are
+ * stored in CPU endian.
+ *
+ * Enforce the VMID on the command.
+ */
+static int
+arm_smmu_convert_user_cmd(struct arm_smmu_domain *s2_parent,
+ struct iommu_hwpt_arm_smmuv3_invalidate *cmd)
+{
+ u16 vmid = s2_parent->s2_cfg.vmid;
+
+ cmd->cmd[0] = le64_to_cpu(cmd->cmd[0]);
+ cmd->cmd[1] = le64_to_cpu(cmd->cmd[1]);
+
+ switch (cmd->cmd[0] & CMDQ_0_OP) {
+ case CMDQ_OP_TLBI_NSNH_ALL:
+ /* Convert to NH_ALL */
+ cmd->cmd[0] = CMDQ_OP_TLBI_NH_ALL |
+ FIELD_PREP(CMDQ_TLBI_0_VMID, vmid);
+ cmd->cmd[1] = 0;
+ break;
+ case CMDQ_OP_TLBI_NH_VA:
+ case CMDQ_OP_TLBI_NH_VAA:
+ case CMDQ_OP_TLBI_NH_ALL:
+ case CMDQ_OP_TLBI_NH_ASID:
+ cmd->cmd[0] &= ~CMDQ_TLBI_0_VMID;
+ cmd->cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, vmid);
+ break;
+ default:
+ return -EIO;
+ }
+ return 0;
+}
+
+static int __arm_smmu_cache_invalidate_user(struct arm_smmu_domain *s2_parent,
+ struct iommu_user_data_array *array)
+{
+ struct arm_smmu_device *smmu = s2_parent->smmu;
+ struct iommu_hwpt_arm_smmuv3_invalidate *last_batch;
+ struct iommu_hwpt_arm_smmuv3_invalidate *cmds;
+ struct iommu_hwpt_arm_smmuv3_invalidate *cur;
+ struct iommu_hwpt_arm_smmuv3_invalidate *end;
+ struct arm_smmu_cmdq_ent ent;
+ struct arm_smmu_cmdq *cmdq;
+ int ret;
+
+ /* A zero-length array is allowed to validate the array type */
+ if (array->entry_num == 0 &&
+ array->type == IOMMU_HWPT_INVALIDATE_DATA_ARM_SMMUV3) {
+ array->entry_num = 0;
+ return 0;
+ }
+
+ cmds = kcalloc(array->entry_num, sizeof(*cmds), GFP_KERNEL);
+ if (!cmds)
+ return -ENOMEM;
+ cur = cmds;
+ end = cmds + array->entry_num;
+
+ static_assert(sizeof(*cmds) == 2 * sizeof(u64));
+ ret = iommu_copy_struct_from_full_user_array(
+ cmds, sizeof(*cmds), array,
+ IOMMU_HWPT_INVALIDATE_DATA_ARM_SMMUV3);
+ if (ret)
+ goto out;
+
+ ent.opcode = cmds->cmd[0] & CMDQ_0_OP;
+ cmdq = arm_smmu_get_cmdq(smmu, &ent);
+
+ last_batch = cmds;
+ while (cur != end) {
+ ret = arm_smmu_convert_user_cmd(s2_parent, cur);
+ if (ret)
+ goto out;
+
+ /* FIXME work in blocks of CMDQ_BATCH_ENTRIES and copy each block? */
+ cur++;
+ if (cur != end && (cur - last_batch) != CMDQ_BATCH_ENTRIES - 1)
+ continue;
+
+ ret = arm_smmu_cmdq_issue_cmdlist(smmu, cmdq, last_batch->cmd,
+ cur - last_batch, true);
+ if (ret) {
+ cur--;
+ goto out;
+ }
+ last_batch = cur;
+ }
+out:
+ array->entry_num = cur - cmds;
+ kfree(cmds);
+ return ret;
+}
+
+static int arm_smmu_cache_invalidate_user(struct iommu_domain *domain,
+ struct iommu_user_data_array *array)
+{
+ struct arm_smmu_nested_domain *nested_domain =
+ container_of(domain, struct arm_smmu_nested_domain, domain);
+
+ return __arm_smmu_cache_invalidate_user(
+ nested_domain->s2_parent, array);
+}
+
static const struct iommu_domain_ops arm_smmu_nested_ops = {
.get_msi_mapping_domain = arm_smmu_get_msi_mapping_domain,
.attach_dev = arm_smmu_attach_dev_nested,
.free = arm_smmu_domain_nested_free,
+ .cache_invalidate_user = arm_smmu_cache_invalidate_user,
};
static struct iommu_domain *
@@ -3298,6 +3405,14 @@ arm_smmu_domain_alloc_nesting(struct device *dev, u32 flags,
!(master->smmu->features & ARM_SMMU_FEAT_S2FWB))
return ERR_PTR(-EOPNOTSUPP);
+ /*
+ * FORCE_SYNC is not set with FEAT_NESTING. Some study of the exact HW
+ * defect is needed to determine if arm_smmu_cache_invalidate_user()
+ * needs any change to remove this.
+ */
+ if (WARN_ON(master->smmu->options & ARM_SMMU_OPT_CMDQ_FORCE_SYNC))
+ return ERR_PTR(-EOPNOTSUPP);
+
ret = iommu_copy_struct_from_user(&arg, user_data,
IOMMU_HWPT_DATA_ARM_SMMUV3, ste);
if (ret)
@@ -523,6 +523,7 @@ struct arm_smmu_cmdq_ent {
#define CMDQ_OP_TLBI_NH_ALL 0x10
#define CMDQ_OP_TLBI_NH_ASID 0x11
#define CMDQ_OP_TLBI_NH_VA 0x12
+ #define CMDQ_OP_TLBI_NH_VAA 0x13
#define CMDQ_OP_TLBI_EL2_ALL 0x20
#define CMDQ_OP_TLBI_EL2_ASID 0x21
#define CMDQ_OP_TLBI_EL2_VA 0x22
@@ -685,9 +685,11 @@ struct iommu_hwpt_get_dirty_bitmap {
* enum iommu_hwpt_invalidate_data_type - IOMMU HWPT Cache Invalidation
* Data Type
* @IOMMU_HWPT_INVALIDATE_DATA_VTD_S1: Invalidation data for VTD_S1
+ * @IOMMU_HWPT_INVALIDATE_DATA_ARM_SMMUV3: Invalidation data for ARM SMMUv3
*/
enum iommu_hwpt_invalidate_data_type {
IOMMU_HWPT_INVALIDATE_DATA_VTD_S1 = 0,
+ IOMMU_HWPT_INVALIDATE_DATA_ARM_SMMUV3 = 1,
};
/**
@@ -726,6 +728,25 @@ struct iommu_hwpt_vtd_s1_invalidate {
__u32 __reserved;
};
+/**
+ * struct iommu_hwpt_arm_smmuv3_invalidate - ARM SMMUv3 cahce invalidation
+ * (IOMMU_HWPT_INVALIDATE_DATA_ARM_SMMUV3)
+ * @cmd: 128-bit cache invalidation command that runs in SMMU CMDQ.
+ * Must be little-endian.
+ *
+ * Supported command list:
+ * CMDQ_OP_TLBI_NSNH_ALL
+ * CMDQ_OP_TLBI_NH_VA
+ * CMDQ_OP_TLBI_NH_VAA
+ * CMDQ_OP_TLBI_NH_ALL
+ * CMDQ_OP_TLBI_NH_ASID
+ *
+ * -EIO will be returned if the command is not supported.
+ */
+struct iommu_hwpt_arm_smmuv3_invalidate {
+ __aligned_u64 cmd[2];
+};
+
/**
* struct iommu_hwpt_invalidate - ioctl(IOMMU_HWPT_INVALIDATE)
* @size: sizeof(struct iommu_hwpt_invalidate)