diff mbox series

[20/20] iommu: Convert remaining simple drivers to domain_alloc_paging()

Message ID 20-v1-21cc72fcfb22+a7a-iommu_all_defdom_jgg@nvidia.com
State New
Headers show
Series iommu: Make default_domain's mandatory | expand

Commit Message

Jason Gunthorpe May 1, 2023, 6:03 p.m. UTC
These drivers don't support IOMMU_DOMAIN_DMA, so this commit effectively
allows them to support that mode.

The prior work to require default_domains makes this safe because every
one of these drivers is either compilation incompatible with dma-iommu.c,
or already establishing a default_domain. In both cases alloc_domain()
will never be called with IOMMU_DOMAIN_DMA for these drivers so it is safe
to drop the test.

Removing these tests clarifies that the domain allocation path is only
about the functionality of a paging domain and has nothing to do with
policy of how the paging domain is used for UNMANAGED/DMA/DMA_FQ.

Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
---
 drivers/iommu/fsl_pamu_domain.c | 7 ++-----
 drivers/iommu/msm_iommu.c       | 7 ++-----
 drivers/iommu/mtk_iommu_v1.c    | 7 ++-----
 drivers/iommu/omap-iommu.c      | 7 ++-----
 drivers/iommu/s390-iommu.c      | 7 ++-----
 drivers/iommu/tegra-gart.c      | 7 ++-----
 6 files changed, 12 insertions(+), 30 deletions(-)

Comments

Niklas Schnelle May 2, 2023, 2:52 p.m. UTC | #1
On Mon, 2023-05-01 at 15:03 -0300, Jason Gunthorpe wrote:
> These drivers don't support IOMMU_DOMAIN_DMA, so this commit effectively
> allows them to support that mode.
> 
> The prior work to require default_domains makes this safe because every
> one of these drivers is either compilation incompatible with dma-iommu.c,
> or already establishing a default_domain. In both cases alloc_domain()
> will never be called with IOMMU_DOMAIN_DMA for these drivers so it is safe
> to drop the test.
> 
> Removing these tests clarifies that the domain allocation path is only
> about the functionality of a paging domain and has nothing to do with
> policy of how the paging domain is used for UNMANAGED/DMA/DMA_FQ.
> 
> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
> ---
>  drivers/iommu/fsl_pamu_domain.c | 7 ++-----
>  drivers/iommu/msm_iommu.c       | 7 ++-----
>  drivers/iommu/mtk_iommu_v1.c    | 7 ++-----
>  drivers/iommu/omap-iommu.c      | 7 ++-----
>  drivers/iommu/s390-iommu.c      | 7 ++-----
>  drivers/iommu/tegra-gart.c      | 7 ++-----
>  6 files changed, 12 insertions(+), 30 deletions(-)
> 
> 
---8<---
> -static struct iommu_domain *s390_domain_alloc(unsigned domain_type)
> +static struct iommu_domain *s390_domain_alloc_paging(struct device *dev)
>  {
>  	struct s390_domain *s390_domain;
>  
> -	if (domain_type != IOMMU_DOMAIN_UNMANAGED)
> -		return NULL;
> -
>  	s390_domain = kzalloc(sizeof(*s390_domain), GFP_KERNEL);
>  	if (!s390_domain)
>  		return NULL;
> @@ -447,7 +444,7 @@ void zpci_destroy_iommu(struct zpci_dev *zdev)
>  static const struct iommu_ops s390_iommu_ops = {
>  	.default_domain = &s390_iommu_platform_domain,
>  	.capable = s390_iommu_capable,
> -	.domain_alloc = s390_domain_alloc,
> +	.domain_alloc_paging = s390_domain_alloc_paging,

Leaving .domain_alloc unset here leads to an OOPs with your GitHub
branch (iommu_mandatory_default) when I try to use vfio-pci for KVM
pass-through via the following call chain:

...
vfio_group_fops_unl_ioctl()
   vfio_group_ioctl_set_container()
      vfio_container_attach_group()
         iommu_group_claim_dma_owner()
            __iommu_take_dma_ownership()
               __iommu_group_alloc_blocking_domain()
               __iommu_domain_alloc(…, IOMMU_DOMAIN_BLOCKED)

The problem is that in __iommu_domain_alloc() a call to bus->iommu_ops-
>domain_alloc() is attempted for IOMMU_DOMAIN_BLCOKED even if the
function pointer is unset.

So I tried with the obvious fix:

@@ -1947,7 +1948,7 @@ static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
        if ((type == IOMMU_DOMAIN_UNMANAGED || type == IOMMU_DOMAIN_DMA) &&
            bus->iommu_ops->domain_alloc_paging)
                domain = bus->iommu_ops->domain_alloc_paging(dev);
-       else
+       else if (bus->iommu_ops->domain_alloc)
                domain = bus->iommu_ops->domain_alloc(type);
        if (!domain)
                return NULL;

This then uses the fallback of an empty IOMMU_DOMAIN_UNMANAGED and I
get a working device in the guest. Also tried hot unplug where the
device is taken over by the host again. I think with my DMA API
conversion patches we can support blocking domains properly but for a
temporary solution the above may be acceptable.

>  	.probe_device = s390_iommu_probe_device,
>  	.release_device = s390_iommu_release_device,
>  	.device_group = generic_device_group,
Jason Gunthorpe May 2, 2023, 3:25 p.m. UTC | #2
On Tue, May 02, 2023 at 04:52:32PM +0200, Niklas Schnelle wrote:
> @@ -1947,7 +1948,7 @@ static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
>         if ((type == IOMMU_DOMAIN_UNMANAGED || type == IOMMU_DOMAIN_DMA) &&
>             bus->iommu_ops->domain_alloc_paging)
>                 domain = bus->iommu_ops->domain_alloc_paging(dev);
> -       else
> +       else if (bus->iommu_ops->domain_alloc)
>                 domain = bus->iommu_ops->domain_alloc(type);
>         if (!domain)
>                 return NULL;

Agh, yes, it should fail, this is right, I'll fold it in, thanks

> This then uses the fallback of an empty IOMMU_DOMAIN_UNMANAGED and I
> get a working device in the guest. Also tried hot unplug where the
> device is taken over by the host again.

Great, thanks, I'll add your tested-by for the s390 drivers.

> I think with my DMA API
> conversion patches we can support blocking domains properly but for a
> temporary solution the above may be acceptable.

Yes, this is a good idea, I encourage all drivers to implement at
least one of BLOCKING or IDENTITY as global static singletons that
can't fail - this will allow us to have cleaner error recovery flows.

Jason
Niklas Schnelle May 2, 2023, 6:02 p.m. UTC | #3
On Tue, 2023-05-02 at 12:25 -0300, Jason Gunthorpe wrote:
> On Tue, May 02, 2023 at 04:52:32PM +0200, Niklas Schnelle wrote:
> > @@ -1947,7 +1948,7 @@ static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
> >         if ((type == IOMMU_DOMAIN_UNMANAGED || type == IOMMU_DOMAIN_DMA) &&
> >             bus->iommu_ops->domain_alloc_paging)
> >                 domain = bus->iommu_ops->domain_alloc_paging(dev);
> > -       else
> > +       else if (bus->iommu_ops->domain_alloc)
> >                 domain = bus->iommu_ops->domain_alloc(type);
> >         if (!domain)
> >                 return NULL;
> 
> Agh, yes, it should fail, this is right, I'll fold it in, thanks
> 
> > This then uses the fallback of an empty IOMMU_DOMAIN_UNMANAGED and I
> > get a working device in the guest. Also tried hot unplug where the
> > device is taken over by the host again.
> 
> Great, thanks, I'll add your tested-by for the s390 drivers.

Yes and with the above change feel free to add my for this patch and
see my other reply for the s390 specific change.

Acked-by: Niklas Schnelle <schnelle@linux.ibm.com> # s390

(I've recently been added as additional S390 IOMMU (PCI) maintainer)
diff mbox series

Patch

diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c
index 4c65f1adfe7511..641e89f7d7f444 100644
--- a/drivers/iommu/fsl_pamu_domain.c
+++ b/drivers/iommu/fsl_pamu_domain.c
@@ -192,13 +192,10 @@  static void fsl_pamu_domain_free(struct iommu_domain *domain)
 	kmem_cache_free(fsl_pamu_domain_cache, dma_domain);
 }
 
-static struct iommu_domain *fsl_pamu_domain_alloc(unsigned type)
+static struct iommu_domain *fsl_pamu_domain_alloc_paging(struct device *dev)
 {
 	struct fsl_dma_domain *dma_domain;
 
-	if (type != IOMMU_DOMAIN_UNMANAGED)
-		return NULL;
-
 	dma_domain = kmem_cache_zalloc(fsl_pamu_domain_cache, GFP_KERNEL);
 	if (!dma_domain)
 		return NULL;
@@ -473,7 +470,7 @@  static struct iommu_device *fsl_pamu_probe_device(struct device *dev)
 static const struct iommu_ops fsl_pamu_ops = {
 	.default_domain = &fsl_pamu_platform_domain,
 	.capable	= fsl_pamu_capable,
-	.domain_alloc	= fsl_pamu_domain_alloc,
+	.domain_alloc_paging = fsl_pamu_domain_alloc_paging,
 	.probe_device	= fsl_pamu_probe_device,
 	.device_group   = fsl_pamu_device_group,
 	.default_domain_ops = &(const struct iommu_domain_ops) {
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index 26ed81cfeee897..a163cee0b7242d 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -302,13 +302,10 @@  static void __program_context(void __iomem *base, int ctx,
 	SET_M(base, ctx, 1);
 }
 
-static struct iommu_domain *msm_iommu_domain_alloc(unsigned type)
+static struct iommu_domain *msm_iommu_domain_alloc_paging(struct device *dev)
 {
 	struct msm_priv *priv;
 
-	if (type != IOMMU_DOMAIN_UNMANAGED)
-		return NULL;
-
 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
 	if (!priv)
 		goto fail_nomem;
@@ -691,7 +688,7 @@  irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id)
 
 static struct iommu_ops msm_iommu_ops = {
 	.identity_domain = &msm_iommu_identity_domain,
-	.domain_alloc = msm_iommu_domain_alloc,
+	.domain_alloc_paging = msm_iommu_domain_alloc_paging,
 	.probe_device = msm_iommu_probe_device,
 	.device_group = generic_device_group,
 	.pgsize_bitmap = MSM_IOMMU_PGSIZES,
diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
index 7c0c1d50df5f75..67e044c1a7d93b 100644
--- a/drivers/iommu/mtk_iommu_v1.c
+++ b/drivers/iommu/mtk_iommu_v1.c
@@ -270,13 +270,10 @@  static int mtk_iommu_v1_domain_finalise(struct mtk_iommu_v1_data *data)
 	return 0;
 }
 
-static struct iommu_domain *mtk_iommu_v1_domain_alloc(unsigned type)
+static struct iommu_domain *mtk_iommu_v1_domain_alloc_paging(struct device *dev)
 {
 	struct mtk_iommu_v1_domain *dom;
 
-	if (type != IOMMU_DOMAIN_UNMANAGED)
-		return NULL;
-
 	dom = kzalloc(sizeof(*dom), GFP_KERNEL);
 	if (!dom)
 		return NULL;
@@ -585,7 +582,7 @@  static int mtk_iommu_v1_hw_init(const struct mtk_iommu_v1_data *data)
 
 static const struct iommu_ops mtk_iommu_v1_ops = {
 	.identity_domain = &mtk_iommu_v1_identity_domain,
-	.domain_alloc	= mtk_iommu_v1_domain_alloc,
+	.domain_alloc_paging = mtk_iommu_v1_domain_alloc_paging,
 	.probe_device	= mtk_iommu_v1_probe_device,
 	.probe_finalize = mtk_iommu_v1_probe_finalize,
 	.release_device	= mtk_iommu_v1_release_device,
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index 34340ef15241bc..fcf99bd195b32e 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -1580,13 +1580,10 @@  static struct iommu_domain omap_iommu_identity_domain = {
 	.ops = &omap_iommu_identity_ops,
 };
 
-static struct iommu_domain *omap_iommu_domain_alloc(unsigned type)
+static struct iommu_domain *omap_iommu_domain_alloc_paging(struct device *dev)
 {
 	struct omap_iommu_domain *omap_domain;
 
-	if (type != IOMMU_DOMAIN_UNMANAGED)
-		return NULL;
-
 	omap_domain = kzalloc(sizeof(*omap_domain), GFP_KERNEL);
 	if (!omap_domain)
 		return NULL;
@@ -1748,7 +1745,7 @@  static struct iommu_group *omap_iommu_device_group(struct device *dev)
 
 static const struct iommu_ops omap_iommu_ops = {
 	.identity_domain = &omap_iommu_identity_domain,
-	.domain_alloc	= omap_iommu_domain_alloc,
+	.domain_alloc_paging = omap_iommu_domain_alloc_paging,
 	.probe_device	= omap_iommu_probe_device,
 	.release_device	= omap_iommu_release_device,
 	.device_group	= omap_iommu_device_group,
diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c
index f0c867c57a5b9b..5695ad71d60e24 100644
--- a/drivers/iommu/s390-iommu.c
+++ b/drivers/iommu/s390-iommu.c
@@ -39,13 +39,10 @@  static bool s390_iommu_capable(struct device *dev, enum iommu_cap cap)
 	}
 }
 
-static struct iommu_domain *s390_domain_alloc(unsigned domain_type)
+static struct iommu_domain *s390_domain_alloc_paging(struct device *dev)
 {
 	struct s390_domain *s390_domain;
 
-	if (domain_type != IOMMU_DOMAIN_UNMANAGED)
-		return NULL;
-
 	s390_domain = kzalloc(sizeof(*s390_domain), GFP_KERNEL);
 	if (!s390_domain)
 		return NULL;
@@ -447,7 +444,7 @@  void zpci_destroy_iommu(struct zpci_dev *zdev)
 static const struct iommu_ops s390_iommu_ops = {
 	.default_domain = &s390_iommu_platform_domain,
 	.capable = s390_iommu_capable,
-	.domain_alloc = s390_domain_alloc,
+	.domain_alloc_paging = s390_domain_alloc_paging,
 	.probe_device = s390_iommu_probe_device,
 	.release_device = s390_iommu_release_device,
 	.device_group = generic_device_group,
diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c
index 09865889ff2480..b90801dddef413 100644
--- a/drivers/iommu/tegra-gart.c
+++ b/drivers/iommu/tegra-gart.c
@@ -167,13 +167,10 @@  static struct iommu_domain gart_iommu_platform_domain = {
 	.ops = &gart_iommu_platform_ops,
 };
 
-static struct iommu_domain *gart_iommu_domain_alloc(unsigned type)
+static struct iommu_domain *gart_iommu_domain_alloc_paging(struct device *dev)
 {
 	struct iommu_domain *domain;
 
-	if (type != IOMMU_DOMAIN_UNMANAGED)
-		return NULL;
-
 	domain = kzalloc(sizeof(*domain), GFP_KERNEL);
 	if (domain) {
 		domain->geometry.aperture_start = gart_handle->iovmm_base;
@@ -294,7 +291,7 @@  static void gart_iommu_sync(struct iommu_domain *domain,
 
 static const struct iommu_ops gart_iommu_ops = {
 	.default_domain = &gart_iommu_platform_domain,
-	.domain_alloc	= gart_iommu_domain_alloc,
+	.domain_alloc_paging = gart_iommu_domain_alloc_paging,
 	.probe_device	= gart_iommu_probe_device,
 	.device_group	= generic_device_group,
 	.pgsize_bitmap	= GART_IOMMU_PGSIZES,