diff mbox series

[v7,2/6] vfio/type1: Check reserve region conflict and update iova list

Message ID 20190626151248.11776-3-shameerali.kolothum.thodi@huawei.com
State New
Headers show
Series vfio/type1: Add support for valid iova list management | expand

Commit Message

Shameerali Kolothum Thodi June 26, 2019, 3:12 p.m. UTC
This retrieves the reserved regions associated with dev group and
checks for conflicts with any existing dma mappings. Also update
the iova list excluding the reserved regions.

Reserved regions with type IOMMU_RESV_DIRECT_RELAXABLE are
excluded from above checks as they are considered as directly
mapped regions which are known to be relaxable.

Signed-off-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>

---
 drivers/vfio/vfio_iommu_type1.c | 96 +++++++++++++++++++++++++++++++++
 1 file changed, 96 insertions(+)

-- 
2.17.1

Comments

Alex Williamson July 3, 2019, 8:34 p.m. UTC | #1
On Wed, 26 Jun 2019 16:12:44 +0100
Shameer Kolothum <shameerali.kolothum.thodi@huawei.com> wrote:

> This retrieves the reserved regions associated with dev group and

> checks for conflicts with any existing dma mappings. Also update

> the iova list excluding the reserved regions.

> 

> Reserved regions with type IOMMU_RESV_DIRECT_RELAXABLE are

> excluded from above checks as they are considered as directly

> mapped regions which are known to be relaxable.

> 

> Signed-off-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>

> ---

>  drivers/vfio/vfio_iommu_type1.c | 96 +++++++++++++++++++++++++++++++++

>  1 file changed, 96 insertions(+)

> 

> diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c

> index 970d1ec06aed..b6bfdfa16c33 100644

> --- a/drivers/vfio/vfio_iommu_type1.c

> +++ b/drivers/vfio/vfio_iommu_type1.c

> @@ -1559,6 +1641,7 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,

>  	phys_addr_t resv_msi_base;

>  	struct iommu_domain_geometry geo;

>  	LIST_HEAD(iova_copy);

> +	LIST_HEAD(group_resv_regions);

>  

>  	mutex_lock(&iommu->lock);

>  

> @@ -1644,6 +1727,13 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,

>  		goto out_detach;

>  	}

>  

> +	iommu_get_group_resv_regions(iommu_group, &group_resv_regions);


This can fail and should have an error case.  I assume we'd fail the
group attach on failure.  Thanks,

Alex
Shameerali Kolothum Thodi July 4, 2019, 12:51 p.m. UTC | #2
> -----Original Message-----

> From: kvm-owner@vger.kernel.org [mailto:kvm-owner@vger.kernel.org] On

> Behalf Of Alex Williamson

> Sent: 03 July 2019 21:34

> To: Shameerali Kolothum Thodi <shameerali.kolothum.thodi@huawei.com>

> Cc: eric.auger@redhat.com; pmorel@linux.vnet.ibm.com;

> kvm@vger.kernel.org; linux-kernel@vger.kernel.org;

> iommu@lists.linux-foundation.org; Linuxarm <linuxarm@huawei.com>; John

> Garry <john.garry@huawei.com>; xuwei (O) <xuwei5@huawei.com>;

> kevin.tian@intel.com

> Subject: Re: [PATCH v7 2/6] vfio/type1: Check reserve region conflict and

> update iova list

> 

> On Wed, 26 Jun 2019 16:12:44 +0100

> Shameer Kolothum <shameerali.kolothum.thodi@huawei.com> wrote:

> 

> > This retrieves the reserved regions associated with dev group and

> > checks for conflicts with any existing dma mappings. Also update

> > the iova list excluding the reserved regions.

> >

> > Reserved regions with type IOMMU_RESV_DIRECT_RELAXABLE are

> > excluded from above checks as they are considered as directly

> > mapped regions which are known to be relaxable.

> >

> > Signed-off-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>

> > ---

> >  drivers/vfio/vfio_iommu_type1.c | 96

> +++++++++++++++++++++++++++++++++

> >  1 file changed, 96 insertions(+)

> >

> > diff --git a/drivers/vfio/vfio_iommu_type1.c

> b/drivers/vfio/vfio_iommu_type1.c

> > index 970d1ec06aed..b6bfdfa16c33 100644

> > --- a/drivers/vfio/vfio_iommu_type1.c

> > +++ b/drivers/vfio/vfio_iommu_type1.c

> > @@ -1559,6 +1641,7 @@ static int vfio_iommu_type1_attach_group(void

> *iommu_data,

> >  	phys_addr_t resv_msi_base;

> >  	struct iommu_domain_geometry geo;

> >  	LIST_HEAD(iova_copy);

> > +	LIST_HEAD(group_resv_regions);

> >

> >  	mutex_lock(&iommu->lock);

> >

> > @@ -1644,6 +1727,13 @@ static int vfio_iommu_type1_attach_group(void

> *iommu_data,

> >  		goto out_detach;

> >  	}

> >

> > +	iommu_get_group_resv_regions(iommu_group, &group_resv_regions);

> 

> This can fail and should have an error case.  I assume we'd fail the

> group attach on failure.  Thanks,


Right. I will add the check. Do you think we should do the same in vfio_iommu_has_sw_msi()
as well? (In fact, it looks like iommu_get_group_resv_regions() ret is not checked anywhere in
kernel). 

Thanks,
Shameer
Shameerali Kolothum Thodi July 4, 2019, 10:06 p.m. UTC | #3
> -----Original Message-----

> From: Shameerali Kolothum Thodi

> Sent: 04 July 2019 13:51

> To: 'Alex Williamson' <alex.williamson@redhat.com>

> Cc: eric.auger@redhat.com; kvm@vger.kernel.org;

> linux-kernel@vger.kernel.org; iommu@lists.linux-foundation.org; Linuxarm

> <linuxarm@huawei.com>; John Garry <john.garry@huawei.com>; xuwei (O)

> <xuwei5@huawei.com>; kevin.tian@intel.com

> Subject: RE: [PATCH v7 2/6] vfio/type1: Check reserve region conflict and

> update iova list

> 

> 

> 

> > -----Original Message-----

> > From: kvm-owner@vger.kernel.org [mailto:kvm-owner@vger.kernel.org] On

> > Behalf Of Alex Williamson

> > Sent: 03 July 2019 21:34

> > To: Shameerali Kolothum Thodi <shameerali.kolothum.thodi@huawei.com>

> > Cc: eric.auger@redhat.com; pmorel@linux.vnet.ibm.com;

> > kvm@vger.kernel.org; linux-kernel@vger.kernel.org;

> > iommu@lists.linux-foundation.org; Linuxarm <linuxarm@huawei.com>; John

> > Garry <john.garry@huawei.com>; xuwei (O) <xuwei5@huawei.com>;

> > kevin.tian@intel.com

> > Subject: Re: [PATCH v7 2/6] vfio/type1: Check reserve region conflict and

> > update iova list

> >

> > On Wed, 26 Jun 2019 16:12:44 +0100

> > Shameer Kolothum <shameerali.kolothum.thodi@huawei.com> wrote:

> >

> > > This retrieves the reserved regions associated with dev group and

> > > checks for conflicts with any existing dma mappings. Also update

> > > the iova list excluding the reserved regions.

> > >

> > > Reserved regions with type IOMMU_RESV_DIRECT_RELAXABLE are

> > > excluded from above checks as they are considered as directly

> > > mapped regions which are known to be relaxable.

> > >

> > > Signed-off-by: Shameer Kolothum

> <shameerali.kolothum.thodi@huawei.com>

> > > ---

> > >  drivers/vfio/vfio_iommu_type1.c | 96

> > +++++++++++++++++++++++++++++++++

> > >  1 file changed, 96 insertions(+)

> > >

> > > diff --git a/drivers/vfio/vfio_iommu_type1.c

> > b/drivers/vfio/vfio_iommu_type1.c

> > > index 970d1ec06aed..b6bfdfa16c33 100644

> > > --- a/drivers/vfio/vfio_iommu_type1.c

> > > +++ b/drivers/vfio/vfio_iommu_type1.c

> > > @@ -1559,6 +1641,7 @@ static int vfio_iommu_type1_attach_group(void

> > *iommu_data,

> > >  	phys_addr_t resv_msi_base;

> > >  	struct iommu_domain_geometry geo;

> > >  	LIST_HEAD(iova_copy);

> > > +	LIST_HEAD(group_resv_regions);

> > >

> > >  	mutex_lock(&iommu->lock);

> > >

> > > @@ -1644,6 +1727,13 @@ static int vfio_iommu_type1_attach_group(void

> > *iommu_data,

> > >  		goto out_detach;

> > >  	}

> > >

> > > +	iommu_get_group_resv_regions(iommu_group,

> &group_resv_regions);

> >

> > This can fail and should have an error case.  I assume we'd fail the

> > group attach on failure.  Thanks,

> 

> Right. I will add the check. Do you think we should do the same in

> vfio_iommu_has_sw_msi()

> as well? (In fact, it looks like iommu_get_group_resv_regions() ret is not

> checked anywhere in

> kernel).


Ah..forgot the fact that patch 6/6 should take care of vfio_iommu_has_sw_msi() 
case as we are removing the duplicate iommu_get_group_resv_regions() there. 

Thanks,
Shameer
Eric Auger July 5, 2019, 12:09 p.m. UTC | #4
Hi Shameer,

On 7/4/19 2:51 PM, Shameerali Kolothum Thodi wrote:
> 

> 

>> -----Original Message-----

>> From: kvm-owner@vger.kernel.org [mailto:kvm-owner@vger.kernel.org] On

>> Behalf Of Alex Williamson

>> Sent: 03 July 2019 21:34

>> To: Shameerali Kolothum Thodi <shameerali.kolothum.thodi@huawei.com>

>> Cc: eric.auger@redhat.com; pmorel@linux.vnet.ibm.com;

>> kvm@vger.kernel.org; linux-kernel@vger.kernel.org;

>> iommu@lists.linux-foundation.org; Linuxarm <linuxarm@huawei.com>; John

>> Garry <john.garry@huawei.com>; xuwei (O) <xuwei5@huawei.com>;

>> kevin.tian@intel.com

>> Subject: Re: [PATCH v7 2/6] vfio/type1: Check reserve region conflict and

>> update iova list

>>

>> On Wed, 26 Jun 2019 16:12:44 +0100

>> Shameer Kolothum <shameerali.kolothum.thodi@huawei.com> wrote:

>>

>>> This retrieves the reserved regions associated with dev group and

>>> checks for conflicts with any existing dma mappings. Also update

>>> the iova list excluding the reserved regions.

>>>

>>> Reserved regions with type IOMMU_RESV_DIRECT_RELAXABLE are

>>> excluded from above checks as they are considered as directly

>>> mapped regions which are known to be relaxable.

>>>

>>> Signed-off-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>

>>> ---

>>>  drivers/vfio/vfio_iommu_type1.c | 96

>> +++++++++++++++++++++++++++++++++

>>>  1 file changed, 96 insertions(+)

>>>

>>> diff --git a/drivers/vfio/vfio_iommu_type1.c

>> b/drivers/vfio/vfio_iommu_type1.c

>>> index 970d1ec06aed..b6bfdfa16c33 100644

>>> --- a/drivers/vfio/vfio_iommu_type1.c

>>> +++ b/drivers/vfio/vfio_iommu_type1.c

>>> @@ -1559,6 +1641,7 @@ static int vfio_iommu_type1_attach_group(void

>> *iommu_data,

>>>  	phys_addr_t resv_msi_base;

>>>  	struct iommu_domain_geometry geo;

>>>  	LIST_HEAD(iova_copy);

>>> +	LIST_HEAD(group_resv_regions);

>>>

>>>  	mutex_lock(&iommu->lock);

>>>

>>> @@ -1644,6 +1727,13 @@ static int vfio_iommu_type1_attach_group(void

>> *iommu_data,

>>>  		goto out_detach;

>>>  	}

>>>

>>> +	iommu_get_group_resv_regions(iommu_group, &group_resv_regions);

>>

>> This can fail and should have an error case.  I assume we'd fail the

>> group attach on failure.  Thanks,

> 

> Right. I will add the check. Do you think we should do the same in vfio_iommu_has_sw_msi()

> as well? (In fact, it looks like iommu_get_group_resv_regions() ret is not checked anywhere in

> kernel). 


I think the can be the topic of another series. I just noticed that in
iommu_insert_resv_region(), which is recursive in case ot merge, I
failed to propagate returned value or recursive calls. This also needs
to be fixed. I volunteer to work on those changes if you prefer. Just
let me know.

Thanks

Eric
> 

> Thanks,

> Shameer

> 

>
Eric Auger July 7, 2019, 3:02 p.m. UTC | #5
Hi Shameer,

On 6/26/19 5:12 PM, Shameer Kolothum wrote:
> This retrieves the reserved regions associated with dev group and

> checks for conflicts with any existing dma mappings. Also update

> the iova list excluding the reserved regions.


s/reserve/reserved in the commit title
> 

> Reserved regions with type IOMMU_RESV_DIRECT_RELAXABLE are

> excluded from above checks as they are considered as directly

> mapped regions which are known to be relaxable.

> 

> Signed-off-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>

> ---

>  drivers/vfio/vfio_iommu_type1.c | 96 +++++++++++++++++++++++++++++++++

>  1 file changed, 96 insertions(+)

> 

> diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c

> index 970d1ec06aed..b6bfdfa16c33 100644

> --- a/drivers/vfio/vfio_iommu_type1.c

> +++ b/drivers/vfio/vfio_iommu_type1.c

> @@ -1508,6 +1508,88 @@ static int vfio_iommu_aper_resize(struct list_head *iova,

>  	return 0;

>  }

>  

> +/*

> + * Check reserved region conflicts with existing dma mappings

> + */

> +static bool vfio_iommu_resv_conflict(struct vfio_iommu *iommu,

> +				     struct list_head *resv_regions)

> +{

> +	struct iommu_resv_region *region;

> +

> +	/* Check for conflict with existing dma mappings */

> +	list_for_each_entry(region, resv_regions, list) {

> +		if (region->type == IOMMU_RESV_DIRECT_RELAXABLE)

> +			continue;

> +

> +		if (vfio_find_dma(iommu, region->start, region->length))

> +			return true;

> +	}

> +

> +	return false;

> +}

> +

> +/*

> + * Check iova region overlap with  reserved regions and

> + * exclude them from the iommu iova range

> + */

> +static int vfio_iommu_resv_exclude(struct list_head *iova,

> +				   struct list_head *resv_regions)

> +{

> +	struct iommu_resv_region *resv;

> +	struct vfio_iova *n, *next;

> +

> +	list_for_each_entry(resv, resv_regions, list) {

> +		phys_addr_t start, end;

> +

> +		if (resv->type == IOMMU_RESV_DIRECT_RELAXABLE)

> +			continue;

> +

> +		start = resv->start;

> +		end = resv->start + resv->length - 1;

> +

> +		list_for_each_entry_safe(n, next, iova, list) {

> +			int ret = 0;

> +

> +			/* No overlap */

> +			if (start > n->end || end < n->start)

> +				continue;

> +			/*

> +			 * Insert a new node if current node overlaps with the

> +			 * reserve region to exlude that from valid iova range.

> +			 * Note that, new node is inserted before the current

> +			 * node and finally the current node is deleted keeping

> +			 * the list updated and sorted.

> +			 */

> +			if (start > n->start)

> +				ret = vfio_iommu_iova_insert(&n->list, n->start,

> +							     start - 1);

> +			if (!ret && end < n->end)

> +				ret = vfio_iommu_iova_insert(&n->list, end + 1,

> +							     n->end);

> +			if (ret)

> +				return ret;

> +

> +			list_del(&n->list);

> +			kfree(n);

> +		}

> +	}

> +

> +	if (list_empty(iova))

> +		return -EINVAL;

> +

> +	return 0;

> +}

> +

> +static void vfio_iommu_resv_free(struct list_head *resv_regions)

> +{

> +	struct iommu_resv_region *n, *next;

> +

> +	list_for_each_entry_safe(n, next, resv_regions, list) {

> +		list_del(&n->list);

> +		kfree(n);

> +	}

> +}

> +

>  static void vfio_iommu_iova_free(struct list_head *iova)

>  {

>  	struct vfio_iova *n, *next;

> @@ -1559,6 +1641,7 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,

>  	phys_addr_t resv_msi_base;

>  	struct iommu_domain_geometry geo;

>  	LIST_HEAD(iova_copy);

> +	LIST_HEAD(group_resv_regions);

>  

>  	mutex_lock(&iommu->lock);

>  

> @@ -1644,6 +1727,13 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,

>  		goto out_detach;

>  	}

>  

> +	iommu_get_group_resv_regions(iommu_group, &group_resv_regions);

> +

> +	if (vfio_iommu_resv_conflict(iommu, &group_resv_regions)) {

> +		ret = -EINVAL;

> +		goto out_detach;

> +	}

> +

>  	/* Get a copy of the current iova list and work on it */

>  	ret = vfio_iommu_iova_get_copy(iommu, &iova_copy);

>  	if (ret)

> @@ -1654,6 +1744,10 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,

>  	if (ret)

>  		goto out_detach;

>  

> +	ret = vfio_iommu_resv_exclude(&iova_copy, &group_resv_regions);

> +	if (ret)

> +		goto out_detach;

> +

>  	resv_msi = vfio_iommu_has_sw_msi(iommu_group, &resv_msi_base);

>  

>  	INIT_LIST_HEAD(&domain->group_list);

> @@ -1714,6 +1808,7 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,

>  	/* Delete the old one and insert new iova list */

>  	vfio_iommu_iova_insert_copy(iommu, &iova_copy);

>  	mutex_unlock(&iommu->lock);

> +	vfio_iommu_resv_free(&group_resv_regions);

>  

>  	return 0;

>  

> @@ -1722,6 +1817,7 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,

>  out_domain:

>  	iommu_domain_free(domain->domain);

>  	vfio_iommu_iova_free(&iova_copy);

> +	vfio_iommu_resv_free(&group_resv_regions);

>  out_free:

>  	kfree(domain);

>  	kfree(group);

> 

Thanks

Eric
Shameerali Kolothum Thodi July 8, 2019, 6:59 a.m. UTC | #6
Hi Eric

> -----Original Message-----

> From: Auger Eric [mailto:eric.auger@redhat.com]

> Sent: 05 July 2019 13:10

> To: Shameerali Kolothum Thodi <shameerali.kolothum.thodi@huawei.com>;

> Alex Williamson <alex.williamson@redhat.com>

> Cc: kvm@vger.kernel.org; linux-kernel@vger.kernel.org;

> iommu@lists.linux-foundation.org; Linuxarm <linuxarm@huawei.com>; John

> Garry <john.garry@huawei.com>; xuwei (O) <xuwei5@huawei.com>;

> kevin.tian@intel.com

> Subject: Re: [PATCH v7 2/6] vfio/type1: Check reserve region conflict and

> update iova list

> 

> Hi Shameer,

> 

> On 7/4/19 2:51 PM, Shameerali Kolothum Thodi wrote:

> >

> >

> >> -----Original Message-----

> >> From: kvm-owner@vger.kernel.org [mailto:kvm-owner@vger.kernel.org] On

> >> Behalf Of Alex Williamson

> >> Sent: 03 July 2019 21:34

> >> To: Shameerali Kolothum Thodi <shameerali.kolothum.thodi@huawei.com>

> >> Cc: eric.auger@redhat.com; pmorel@linux.vnet.ibm.com;

> >> kvm@vger.kernel.org; linux-kernel@vger.kernel.org;

> >> iommu@lists.linux-foundation.org; Linuxarm <linuxarm@huawei.com>;

> John

> >> Garry <john.garry@huawei.com>; xuwei (O) <xuwei5@huawei.com>;

> >> kevin.tian@intel.com

> >> Subject: Re: [PATCH v7 2/6] vfio/type1: Check reserve region conflict and

> >> update iova list

> >>

> >> On Wed, 26 Jun 2019 16:12:44 +0100

> >> Shameer Kolothum <shameerali.kolothum.thodi@huawei.com> wrote:

> >>

> >>> This retrieves the reserved regions associated with dev group and

> >>> checks for conflicts with any existing dma mappings. Also update

> >>> the iova list excluding the reserved regions.

> >>>

> >>> Reserved regions with type IOMMU_RESV_DIRECT_RELAXABLE are

> >>> excluded from above checks as they are considered as directly

> >>> mapped regions which are known to be relaxable.

> >>>

> >>> Signed-off-by: Shameer Kolothum

> <shameerali.kolothum.thodi@huawei.com>

> >>> ---

> >>>  drivers/vfio/vfio_iommu_type1.c | 96

> >> +++++++++++++++++++++++++++++++++

> >>>  1 file changed, 96 insertions(+)

> >>>

> >>> diff --git a/drivers/vfio/vfio_iommu_type1.c

> >> b/drivers/vfio/vfio_iommu_type1.c

> >>> index 970d1ec06aed..b6bfdfa16c33 100644

> >>> --- a/drivers/vfio/vfio_iommu_type1.c

> >>> +++ b/drivers/vfio/vfio_iommu_type1.c

> >>> @@ -1559,6 +1641,7 @@ static int vfio_iommu_type1_attach_group(void

> >> *iommu_data,

> >>>  	phys_addr_t resv_msi_base;

> >>>  	struct iommu_domain_geometry geo;

> >>>  	LIST_HEAD(iova_copy);

> >>> +	LIST_HEAD(group_resv_regions);

> >>>

> >>>  	mutex_lock(&iommu->lock);

> >>>

> >>> @@ -1644,6 +1727,13 @@ static int

> vfio_iommu_type1_attach_group(void

> >> *iommu_data,

> >>>  		goto out_detach;

> >>>  	}

> >>>

> >>> +	iommu_get_group_resv_regions(iommu_group,

> &group_resv_regions);

> >>

> >> This can fail and should have an error case.  I assume we'd fail the

> >> group attach on failure.  Thanks,

> >

> > Right. I will add the check. Do you think we should do the same in

> vfio_iommu_has_sw_msi()

> > as well? (In fact, it looks like iommu_get_group_resv_regions() ret is not

> checked anywhere in

> > kernel).

> 

> I think the can be the topic of another series. I just noticed that in

> iommu_insert_resv_region(), which is recursive in case ot merge, I

> failed to propagate returned value or recursive calls. This also needs

> to be fixed. I volunteer to work on those changes if you prefer. Just

> let me know.


Ok. Please go ahead.

Thanks,
Shameer
diff mbox series

Patch

diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 970d1ec06aed..b6bfdfa16c33 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -1508,6 +1508,88 @@  static int vfio_iommu_aper_resize(struct list_head *iova,
 	return 0;
 }
 
+/*
+ * Check reserved region conflicts with existing dma mappings
+ */
+static bool vfio_iommu_resv_conflict(struct vfio_iommu *iommu,
+				     struct list_head *resv_regions)
+{
+	struct iommu_resv_region *region;
+
+	/* Check for conflict with existing dma mappings */
+	list_for_each_entry(region, resv_regions, list) {
+		if (region->type == IOMMU_RESV_DIRECT_RELAXABLE)
+			continue;
+
+		if (vfio_find_dma(iommu, region->start, region->length))
+			return true;
+	}
+
+	return false;
+}
+
+/*
+ * Check iova region overlap with  reserved regions and
+ * exclude them from the iommu iova range
+ */
+static int vfio_iommu_resv_exclude(struct list_head *iova,
+				   struct list_head *resv_regions)
+{
+	struct iommu_resv_region *resv;
+	struct vfio_iova *n, *next;
+
+	list_for_each_entry(resv, resv_regions, list) {
+		phys_addr_t start, end;
+
+		if (resv->type == IOMMU_RESV_DIRECT_RELAXABLE)
+			continue;
+
+		start = resv->start;
+		end = resv->start + resv->length - 1;
+
+		list_for_each_entry_safe(n, next, iova, list) {
+			int ret = 0;
+
+			/* No overlap */
+			if (start > n->end || end < n->start)
+				continue;
+			/*
+			 * Insert a new node if current node overlaps with the
+			 * reserve region to exlude that from valid iova range.
+			 * Note that, new node is inserted before the current
+			 * node and finally the current node is deleted keeping
+			 * the list updated and sorted.
+			 */
+			if (start > n->start)
+				ret = vfio_iommu_iova_insert(&n->list, n->start,
+							     start - 1);
+			if (!ret && end < n->end)
+				ret = vfio_iommu_iova_insert(&n->list, end + 1,
+							     n->end);
+			if (ret)
+				return ret;
+
+			list_del(&n->list);
+			kfree(n);
+		}
+	}
+
+	if (list_empty(iova))
+		return -EINVAL;
+
+	return 0;
+}
+
+static void vfio_iommu_resv_free(struct list_head *resv_regions)
+{
+	struct iommu_resv_region *n, *next;
+
+	list_for_each_entry_safe(n, next, resv_regions, list) {
+		list_del(&n->list);
+		kfree(n);
+	}
+}
+
 static void vfio_iommu_iova_free(struct list_head *iova)
 {
 	struct vfio_iova *n, *next;
@@ -1559,6 +1641,7 @@  static int vfio_iommu_type1_attach_group(void *iommu_data,
 	phys_addr_t resv_msi_base;
 	struct iommu_domain_geometry geo;
 	LIST_HEAD(iova_copy);
+	LIST_HEAD(group_resv_regions);
 
 	mutex_lock(&iommu->lock);
 
@@ -1644,6 +1727,13 @@  static int vfio_iommu_type1_attach_group(void *iommu_data,
 		goto out_detach;
 	}
 
+	iommu_get_group_resv_regions(iommu_group, &group_resv_regions);
+
+	if (vfio_iommu_resv_conflict(iommu, &group_resv_regions)) {
+		ret = -EINVAL;
+		goto out_detach;
+	}
+
 	/* Get a copy of the current iova list and work on it */
 	ret = vfio_iommu_iova_get_copy(iommu, &iova_copy);
 	if (ret)
@@ -1654,6 +1744,10 @@  static int vfio_iommu_type1_attach_group(void *iommu_data,
 	if (ret)
 		goto out_detach;
 
+	ret = vfio_iommu_resv_exclude(&iova_copy, &group_resv_regions);
+	if (ret)
+		goto out_detach;
+
 	resv_msi = vfio_iommu_has_sw_msi(iommu_group, &resv_msi_base);
 
 	INIT_LIST_HEAD(&domain->group_list);
@@ -1714,6 +1808,7 @@  static int vfio_iommu_type1_attach_group(void *iommu_data,
 	/* Delete the old one and insert new iova list */
 	vfio_iommu_iova_insert_copy(iommu, &iova_copy);
 	mutex_unlock(&iommu->lock);
+	vfio_iommu_resv_free(&group_resv_regions);
 
 	return 0;
 
@@ -1722,6 +1817,7 @@  static int vfio_iommu_type1_attach_group(void *iommu_data,
 out_domain:
 	iommu_domain_free(domain->domain);
 	vfio_iommu_iova_free(&iova_copy);
+	vfio_iommu_resv_free(&group_resv_regions);
 out_free:
 	kfree(domain);
 	kfree(group);