@@ -10,6 +10,7 @@
#include <linux/bitfield.h>
#include <linux/iommu.h>
+#include <linux/iommufd.h>
#include <linux/kernel.h>
#include <linux/mmzone.h>
#include <linux/sizes.h>
@@ -835,7 +836,7 @@ struct arm_smmu_domain {
struct arm_smmu_nested_domain {
struct iommu_domain domain;
- struct arm_smmu_domain *s2_parent;
+ struct arm_vsmmu *vsmmu;
__le64 ste[2];
};
@@ -1005,21 +1006,22 @@ tegra241_cmdqv_probe(struct arm_smmu_device *smmu)
}
#endif /* CONFIG_TEGRA241_CMDQV */
+struct arm_vsmmu {
+ struct iommufd_viommu core;
+ struct arm_smmu_device *smmu;
+ struct arm_smmu_domain *s2_parent;
+ u16 vmid;
+};
+
#if IS_ENABLED(CONFIG_ARM_SMMU_V3_IOMMUFD)
void *arm_smmu_hw_info(struct device *dev, u32 *length, u32 *type);
-struct iommu_domain *
-arm_smmu_domain_alloc_nesting(struct device *dev, u32 flags,
- struct iommu_domain *parent,
- const struct iommu_user_data *user_data);
+struct iommufd_viommu *arm_vsmmu_alloc(struct device *dev,
+ struct iommu_domain *parent,
+ struct iommufd_ctx *ictx,
+ unsigned int viommu_type);
#else
#define arm_smmu_hw_info NULL
-static inline struct iommu_domain *
-arm_smmu_domain_alloc_nesting(struct device *dev, u32 flags,
- struct iommu_domain *parent,
- const struct iommu_user_data *user_data)
-{
- return ERR_PTR(-EOPNOTSUPP);
-}
+#define arm_vsmmu_alloc NULL
#endif /* CONFIG_ARM_SMMU_V3_IOMMUFD */
#endif /* _ARM_SMMU_V3_H */
@@ -859,9 +859,11 @@ struct iommu_fault_alloc {
/**
* enum iommu_viommu_type - Virtual IOMMU Type
* @IOMMU_VIOMMU_TYPE_DEFAULT: Reserved for future use
+ * @IOMMU_VIOMMU_TYPE_ARM_SMMUV3: ARM SMMUv3 driver specific type
*/
enum iommu_viommu_type {
IOMMU_VIOMMU_TYPE_DEFAULT = 0,
+ IOMMU_VIOMMU_TYPE_ARM_SMMUV3 = 1,
};
/**
@@ -34,7 +34,8 @@ static void arm_smmu_make_nested_cd_table_ste(
struct arm_smmu_ste *target, struct arm_smmu_master *master,
struct arm_smmu_nested_domain *nested_domain, bool ats_enabled)
{
- arm_smmu_make_s2_domain_ste(target, master, nested_domain->s2_parent,
+ arm_smmu_make_s2_domain_ste(target, master,
+ nested_domain->vsmmu->s2_parent,
ats_enabled);
target->data[0] = cpu_to_le64(STRTAB_STE_0_V |
@@ -75,7 +76,8 @@ static void arm_smmu_make_nested_domain_ste(
break;
case STRTAB_STE_0_CFG_BYPASS:
arm_smmu_make_s2_domain_ste(
- target, master, nested_domain->s2_parent, ats_enabled);
+ target, master, nested_domain->vsmmu->s2_parent,
+ ats_enabled);
break;
case STRTAB_STE_0_CFG_ABORT:
default:
@@ -100,7 +102,7 @@ static int arm_smmu_attach_dev_nested(struct iommu_domain *domain,
struct arm_smmu_ste ste;
int ret;
- if (nested_domain->s2_parent->smmu != master->smmu)
+ if (nested_domain->vsmmu->smmu != master->smmu)
return -EINVAL;
if (arm_smmu_ssids_in_use(&master->cd_table))
return -EBUSY;
@@ -151,36 +153,15 @@ static int arm_smmu_validate_vste(struct iommu_hwpt_arm_smmuv3 *arg)
return 0;
}
-struct iommu_domain *
-arm_smmu_domain_alloc_nesting(struct device *dev, u32 flags,
- struct iommu_domain *parent,
+static struct iommu_domain *
+arm_vsmmu_alloc_domain_nested(struct iommufd_viommu *viommu,
const struct iommu_user_data *user_data)
{
- struct arm_smmu_master *master = dev_iommu_priv_get(dev);
+ struct arm_vsmmu *vsmmu = container_of(viommu, struct arm_vsmmu, core);
struct arm_smmu_nested_domain *nested_domain;
- struct arm_smmu_domain *smmu_parent;
struct iommu_hwpt_arm_smmuv3 arg;
int ret;
- if (flags || !(master->smmu->features & ARM_SMMU_FEAT_NESTING))
- return ERR_PTR(-EOPNOTSUPP);
-
- /*
- * Must support some way to prevent the VM from bypassing the cache
- * because VFIO currently does not do any cache maintenance.
- */
- if (!arm_smmu_master_canwbs(master) &&
- !(master->smmu->features & ARM_SMMU_FEAT_S2FWB))
- return ERR_PTR(-EOPNOTSUPP);
-
- /*
- * The core code checks that parent was created with
- * IOMMU_HWPT_ALLOC_NEST_PARENT
- */
- smmu_parent = to_smmu_domain(parent);
- if (smmu_parent->smmu != master->smmu)
- return ERR_PTR(-EINVAL);
-
ret = iommu_copy_struct_from_user(&arg, user_data,
IOMMU_HWPT_DATA_ARM_SMMUV3, ste);
if (ret)
@@ -196,9 +177,52 @@ arm_smmu_domain_alloc_nesting(struct device *dev, u32 flags,
nested_domain->domain.type = IOMMU_DOMAIN_NESTED;
nested_domain->domain.ops = &arm_smmu_nested_ops;
- nested_domain->s2_parent = smmu_parent;
+ nested_domain->vsmmu = vsmmu;
nested_domain->ste[0] = arg.ste[0];
nested_domain->ste[1] = arg.ste[1] & ~cpu_to_le64(STRTAB_STE_1_EATS);
return &nested_domain->domain;
}
+
+
+static const struct iommufd_viommu_ops arm_vsmmu_ops = {
+ .alloc_domain_nested = arm_vsmmu_alloc_domain_nested,
+};
+
+struct iommufd_viommu *arm_vsmmu_alloc(struct device *dev,
+ struct iommu_domain *parent,
+ struct iommufd_ctx *ictx,
+ unsigned int viommu_type)
+{
+ struct arm_smmu_device *smmu =
+ iommu_get_iommu_dev(dev, struct arm_smmu_device, iommu);
+ struct arm_smmu_master *master = dev_iommu_priv_get(dev);
+ struct arm_smmu_domain *s2_parent = to_smmu_domain(parent);
+ struct arm_vsmmu *vsmmu;
+
+ if (viommu_type != IOMMU_VIOMMU_TYPE_ARM_SMMUV3)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ if (!(smmu->features & ARM_SMMU_FEAT_NESTING))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ /*
+ * Must support some way to prevent the VM from bypassing the cache
+ * because VFIO currently does not do any cache maintenance.
+ */
+ if (!arm_smmu_master_canwbs(master) &&
+ !(smmu->features & ARM_SMMU_FEAT_S2FWB))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ vsmmu = iommufd_viommu_alloc(ictx, struct arm_vsmmu, core,
+ &arm_vsmmu_ops);
+ if (IS_ERR(vsmmu))
+ return ERR_CAST(vsmmu);
+
+ vsmmu->smmu = smmu;
+ vsmmu->s2_parent = s2_parent;
+ /* FIXME Move VMID allocation from the S2 domain allocation to here */
+ vsmmu->vmid = s2_parent->s2_cfg.vmid;
+
+ return &vsmmu->core;
+}
@@ -2661,7 +2661,7 @@ to_smmu_domain_devices(struct iommu_domain *domain)
domain->type == IOMMU_DOMAIN_SVA)
return to_smmu_domain(domain);
if (domain->type == IOMMU_DOMAIN_NESTED)
- return to_smmu_nested_domain(domain)->s2_parent;
+ return to_smmu_nested_domain(domain)->vsmmu->s2_parent;
return NULL;
}
@@ -3126,13 +3126,9 @@ arm_smmu_domain_alloc_user(struct device *dev, u32 flags,
struct arm_smmu_domain *smmu_domain;
int ret;
- if (parent)
- return arm_smmu_domain_alloc_nesting(dev, flags, parent,
- user_data);
-
if (flags & ~PAGING_FLAGS)
return ERR_PTR(-EOPNOTSUPP);
- if (user_data)
+ if (parent || user_data)
return ERR_PTR(-EOPNOTSUPP);
smmu_domain = arm_smmu_domain_alloc();
@@ -3541,6 +3537,7 @@ static struct iommu_ops arm_smmu_ops = {
.dev_disable_feat = arm_smmu_dev_disable_feature,
.page_response = arm_smmu_page_response,
.def_domain_type = arm_smmu_def_domain_type,
+ .viommu_alloc = arm_vsmmu_alloc,
.user_pasid_table = 1,
.pgsize_bitmap = -1UL, /* Restricted during device attach */
.owner = THIS_MODULE,
Add a new driver-type for ARM SMMUv3 to enum iommu_viommu_type. Implement an arm_vsmmu_alloc() with its viommu op arm_vsmmu_domain_alloc_nested(), to replace arm_smmu_domain_alloc_nesting(). As an initial step, copy the VMID from s2_parent. A later cleanup series is required to move the VMID allocation out of the stage-2 domain allocation routine to this. After that, replace nested_domain->s2_parent with nested_domain->vsmmu. Note that the validatting conditions for a nested_domain allocation are moved from arm_vsmmu_domain_alloc_nested to arm_vsmmu_alloc, since there is no point in creating a vIOMMU (vsmmu) from the beginning if it would not support a nested_domain. Signed-off-by: Nicolin Chen <nicolinc@nvidia.com> --- drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h | 26 +++--- include/uapi/linux/iommufd.h | 2 + .../arm/arm-smmu-v3/arm-smmu-v3-iommufd.c | 80 ++++++++++++------- drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 9 +-- 4 files changed, 71 insertions(+), 46 deletions(-)