diff mbox series

[v7,3/6] vfio/type1: Update iova list on detach

Message ID 20190626151248.11776-4-shameerali.kolothum.thodi@huawei.com
State New
Headers show
Series vfio/type1: Add support for valid iova list management | expand

Commit Message

Shameerali Kolothum Thodi June 26, 2019, 3:12 p.m. UTC
Get a copy of iova list on _group_detach and try to update the list.
On success replace the current one with the copy. Leave the list as
it is if update fails.

Signed-off-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>

---
 drivers/vfio/vfio_iommu_type1.c | 91 +++++++++++++++++++++++++++++++++
 1 file changed, 91 insertions(+)

-- 
2.17.1

Comments

Alex Williamson July 3, 2019, 8:34 p.m. UTC | #1
On Wed, 26 Jun 2019 16:12:45 +0100
Shameer Kolothum <shameerali.kolothum.thodi@huawei.com> wrote:

> Get a copy of iova list on _group_detach and try to update the list.

> On success replace the current one with the copy. Leave the list as

> it is if update fails.

> 

> Signed-off-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>

> ---

>  drivers/vfio/vfio_iommu_type1.c | 91 +++++++++++++++++++++++++++++++++

>  1 file changed, 91 insertions(+)

> 

> diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c

> index b6bfdfa16c33..e872fb3a0f39 100644

> --- a/drivers/vfio/vfio_iommu_type1.c

> +++ b/drivers/vfio/vfio_iommu_type1.c

> @@ -1873,12 +1873,88 @@ static void vfio_sanity_check_pfn_list(struct vfio_iommu *iommu)

>  	WARN_ON(iommu->notifier.head);

>  }

>  

> +/*

> + * Called when a domain is removed in detach. It is possible that

> + * the removed domain decided the iova aperture window. Modify the

> + * iova aperture with the smallest window among existing domains.

> + */

> +static void vfio_iommu_aper_expand(struct vfio_iommu *iommu,

> +				   struct list_head *iova_copy)

> +{

> +	struct vfio_domain *domain;

> +	struct iommu_domain_geometry geo;

> +	struct vfio_iova *node;

> +	dma_addr_t start = 0;

> +	dma_addr_t end = (dma_addr_t)~0;

> +

> +	list_for_each_entry(domain, &iommu->domain_list, next) {

> +		iommu_domain_get_attr(domain->domain, DOMAIN_ATTR_GEOMETRY,

> +				      &geo);

> +		if (geo.aperture_start > start)

> +			start = geo.aperture_start;

> +		if (geo.aperture_end < end)

> +			end = geo.aperture_end;

> +	}

> +

> +	/* Modify aperture limits. The new aper is either same or bigger */

> +	node = list_first_entry(iova_copy, struct vfio_iova, list);

> +	node->start = start;

> +	node = list_last_entry(iova_copy, struct vfio_iova, list);

> +	node->end = end;

> +}

> +

> +/*

> + * Called when a group is detached. The reserved regions for that

> + * group can be part of valid iova now. But since reserved regions

> + * may be duplicated among groups, populate the iova valid regions

> + * list again.

> + */

> +static int vfio_iommu_resv_refresh(struct vfio_iommu *iommu,

> +				   struct list_head *iova_copy)

> +{

> +	struct vfio_domain *d;

> +	struct vfio_group *g;

> +	struct vfio_iova *node;

> +	dma_addr_t start, end;

> +	LIST_HEAD(resv_regions);

> +	int ret;

> +

> +	list_for_each_entry(d, &iommu->domain_list, next) {

> +		list_for_each_entry(g, &d->group_list, next)

> +			iommu_get_group_resv_regions(g->iommu_group,

> +						     &resv_regions);


Need to account for failure case here too.

> +	}

> +

> +	if (list_empty(&resv_regions))

> +		return 0;

> +

> +	node = list_first_entry(iova_copy, struct vfio_iova, list);

> +	start = node->start;

> +	node = list_last_entry(iova_copy, struct vfio_iova, list);

> +	end = node->end;

> +

> +	/* purge the iova list and create new one */

> +	vfio_iommu_iova_free(iova_copy);

> +

> +	ret = vfio_iommu_aper_resize(iova_copy, start, end);

> +	if (ret)

> +		goto done;

> +

> +	/* Exclude current reserved regions from iova ranges */

> +	ret = vfio_iommu_resv_exclude(iova_copy, &resv_regions);

> +done:

> +	vfio_iommu_resv_free(&resv_regions);

> +	return ret;

> +}

> +

>  static void vfio_iommu_type1_detach_group(void *iommu_data,

>  					  struct iommu_group *iommu_group)

>  {

>  	struct vfio_iommu *iommu = iommu_data;

>  	struct vfio_domain *domain;

>  	struct vfio_group *group;

> +	bool iova_copy_fail;

> +	LIST_HEAD(iova_copy);

>  

>  	mutex_lock(&iommu->lock);

>  

> @@ -1901,6 +1977,12 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,

>  		}

>  	}

>  

> +	/*

> +	 * Get a copy of iova list. If success, use copy to update the

> +	 * list and to replace the current one.

> +	 */

> +	iova_copy_fail = !!vfio_iommu_iova_get_copy(iommu, &iova_copy);

> +

>  	list_for_each_entry(domain, &iommu->domain_list, next) {

>  		group = find_iommu_group(domain, iommu_group);

>  		if (!group)

> @@ -1926,10 +2008,19 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,

>  			iommu_domain_free(domain->domain);

>  			list_del(&domain->next);

>  			kfree(domain);

> +			if (!iova_copy_fail && !list_empty(&iommu->domain_list))

> +				vfio_iommu_aper_expand(iommu, &iova_copy);

>  		}

>  		break;

>  	}

>  

> +	if (!iova_copy_fail && !list_empty(&iommu->domain_list)) {

> +		if (!vfio_iommu_resv_refresh(iommu, &iova_copy))

> +			vfio_iommu_iova_insert_copy(iommu, &iova_copy);

> +		else

> +			vfio_iommu_iova_free(&iova_copy);

> +	}


The iova_copy_fail and list_empty tests are rather ugly, could we avoid
them by pushing the tests to the expand and refresh functions?  ie. it
looks like vfio_iommu_aper_expand() could test list_empty(iova_copy),
the list_for_each on domain_list doesn't need special handling.  Same
for vfio_iommu_resv_refresh().  This would also fix the bug above that
I think we don't free iova_copy if domain_list becomes empty during
this operation.  Thanks,

Alex
Shameerali Kolothum Thodi July 4, 2019, 12:53 p.m. UTC | #2
> -----Original Message-----

> From: kvm-owner@vger.kernel.org [mailto:kvm-owner@vger.kernel.org] On

> Behalf Of Alex Williamson

> Sent: 03 July 2019 21:35

> To: Shameerali Kolothum Thodi <shameerali.kolothum.thodi@huawei.com>

> Cc: eric.auger@redhat.com; pmorel@linux.vnet.ibm.com;

> kvm@vger.kernel.org; linux-kernel@vger.kernel.org;

> iommu@lists.linux-foundation.org; Linuxarm <linuxarm@huawei.com>; John

> Garry <john.garry@huawei.com>; xuwei (O) <xuwei5@huawei.com>;

> kevin.tian@intel.com

> Subject: Re: [PATCH v7 3/6] vfio/type1: Update iova list on detach

> 

> On Wed, 26 Jun 2019 16:12:45 +0100

> Shameer Kolothum <shameerali.kolothum.thodi@huawei.com> wrote:

> 

> > Get a copy of iova list on _group_detach and try to update the list.

> > On success replace the current one with the copy. Leave the list as

> > it is if update fails.

> >

> > Signed-off-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>

> > ---

> >  drivers/vfio/vfio_iommu_type1.c | 91

> +++++++++++++++++++++++++++++++++

> >  1 file changed, 91 insertions(+)

> >

> > diff --git a/drivers/vfio/vfio_iommu_type1.c

> b/drivers/vfio/vfio_iommu_type1.c

> > index b6bfdfa16c33..e872fb3a0f39 100644

> > --- a/drivers/vfio/vfio_iommu_type1.c

> > +++ b/drivers/vfio/vfio_iommu_type1.c

> > @@ -1873,12 +1873,88 @@ static void vfio_sanity_check_pfn_list(struct

> vfio_iommu *iommu)

> >  	WARN_ON(iommu->notifier.head);

> >  }

> >

> > +/*

> > + * Called when a domain is removed in detach. It is possible that

> > + * the removed domain decided the iova aperture window. Modify the

> > + * iova aperture with the smallest window among existing domains.

> > + */

> > +static void vfio_iommu_aper_expand(struct vfio_iommu *iommu,

> > +				   struct list_head *iova_copy)

> > +{

> > +	struct vfio_domain *domain;

> > +	struct iommu_domain_geometry geo;

> > +	struct vfio_iova *node;

> > +	dma_addr_t start = 0;

> > +	dma_addr_t end = (dma_addr_t)~0;

> > +

> > +	list_for_each_entry(domain, &iommu->domain_list, next) {

> > +		iommu_domain_get_attr(domain->domain,

> DOMAIN_ATTR_GEOMETRY,

> > +				      &geo);

> > +		if (geo.aperture_start > start)

> > +			start = geo.aperture_start;

> > +		if (geo.aperture_end < end)

> > +			end = geo.aperture_end;

> > +	}

> > +

> > +	/* Modify aperture limits. The new aper is either same or bigger */

> > +	node = list_first_entry(iova_copy, struct vfio_iova, list);

> > +	node->start = start;

> > +	node = list_last_entry(iova_copy, struct vfio_iova, list);

> > +	node->end = end;

> > +}

> > +

> > +/*

> > + * Called when a group is detached. The reserved regions for that

> > + * group can be part of valid iova now. But since reserved regions

> > + * may be duplicated among groups, populate the iova valid regions

> > + * list again.

> > + */

> > +static int vfio_iommu_resv_refresh(struct vfio_iommu *iommu,

> > +				   struct list_head *iova_copy)

> > +{

> > +	struct vfio_domain *d;

> > +	struct vfio_group *g;

> > +	struct vfio_iova *node;

> > +	dma_addr_t start, end;

> > +	LIST_HEAD(resv_regions);

> > +	int ret;

> > +

> > +	list_for_each_entry(d, &iommu->domain_list, next) {

> > +		list_for_each_entry(g, &d->group_list, next)

> > +			iommu_get_group_resv_regions(g->iommu_group,

> > +						     &resv_regions);

> 

> Need to account for failure case here too.


Ok.

> > +	}

> > +

> > +	if (list_empty(&resv_regions))

> > +		return 0;

> > +

> > +	node = list_first_entry(iova_copy, struct vfio_iova, list);

> > +	start = node->start;

> > +	node = list_last_entry(iova_copy, struct vfio_iova, list);

> > +	end = node->end;

> > +

> > +	/* purge the iova list and create new one */

> > +	vfio_iommu_iova_free(iova_copy);

> > +

> > +	ret = vfio_iommu_aper_resize(iova_copy, start, end);

> > +	if (ret)

> > +		goto done;

> > +

> > +	/* Exclude current reserved regions from iova ranges */

> > +	ret = vfio_iommu_resv_exclude(iova_copy, &resv_regions);

> > +done:

> > +	vfio_iommu_resv_free(&resv_regions);

> > +	return ret;

> > +}

> > +

> >  static void vfio_iommu_type1_detach_group(void *iommu_data,

> >  					  struct iommu_group *iommu_group)

> >  {

> >  	struct vfio_iommu *iommu = iommu_data;

> >  	struct vfio_domain *domain;

> >  	struct vfio_group *group;

> > +	bool iova_copy_fail;

> > +	LIST_HEAD(iova_copy);

> >

> >  	mutex_lock(&iommu->lock);

> >

> > @@ -1901,6 +1977,12 @@ static void vfio_iommu_type1_detach_group(void

> *iommu_data,

> >  		}

> >  	}

> >

> > +	/*

> > +	 * Get a copy of iova list. If success, use copy to update the

> > +	 * list and to replace the current one.

> > +	 */

> > +	iova_copy_fail = !!vfio_iommu_iova_get_copy(iommu, &iova_copy);

> > +

> >  	list_for_each_entry(domain, &iommu->domain_list, next) {

> >  		group = find_iommu_group(domain, iommu_group);

> >  		if (!group)

> > @@ -1926,10 +2008,19 @@ static void

> vfio_iommu_type1_detach_group(void *iommu_data,

> >  			iommu_domain_free(domain->domain);

> >  			list_del(&domain->next);

> >  			kfree(domain);

> > +			if (!iova_copy_fail && !list_empty(&iommu->domain_list))

> > +				vfio_iommu_aper_expand(iommu, &iova_copy);

> >  		}

> >  		break;

> >  	}

> >

> > +	if (!iova_copy_fail && !list_empty(&iommu->domain_list)) {

> > +		if (!vfio_iommu_resv_refresh(iommu, &iova_copy))

> > +			vfio_iommu_iova_insert_copy(iommu, &iova_copy);

> > +		else

> > +			vfio_iommu_iova_free(&iova_copy);

> > +	}

> 

> The iova_copy_fail and list_empty tests are rather ugly, could we avoid

> them by pushing the tests to the expand and refresh functions?  ie. it

> looks like vfio_iommu_aper_expand() could test list_empty(iova_copy),

> the list_for_each on domain_list doesn't need special handling.  Same

> for vfio_iommu_resv_refresh().  This would also fix the bug above that

> I think we don't free iova_copy if domain_list becomes empty during

> this operation.  Thanks,


Agree. I will change that in next revision.

Thanks,
Shameer
Eric Auger July 7, 2019, 3:03 p.m. UTC | #3
Hi Shameer,

On 6/26/19 5:12 PM, Shameer Kolothum wrote:
> Get a copy of iova list on _group_detach and try to update the list.

> On success replace the current one with the copy. Leave the list as

> it is if update fails.

> 

> Signed-off-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>

> ---

>  drivers/vfio/vfio_iommu_type1.c | 91 +++++++++++++++++++++++++++++++++

>  1 file changed, 91 insertions(+)

> 

> diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c

> index b6bfdfa16c33..e872fb3a0f39 100644

> --- a/drivers/vfio/vfio_iommu_type1.c

> +++ b/drivers/vfio/vfio_iommu_type1.c

> @@ -1873,12 +1873,88 @@ static void vfio_sanity_check_pfn_list(struct vfio_iommu *iommu)

>  	WARN_ON(iommu->notifier.head);

>  }

>  

> +/*

> + * Called when a domain is removed in detach. It is possible that

> + * the removed domain decided the iova aperture window. Modify the

> + * iova aperture with the smallest window among existing domains.

> + */

> +static void vfio_iommu_aper_expand(struct vfio_iommu *iommu,

> +				   struct list_head *iova_copy)

Maybe you could just remove iova_copy for the args and return start,
size. See comment below.
> +{

> +	struct vfio_domain *domain;

> +	struct iommu_domain_geometry geo;

> +	struct vfio_iova *node;

> +	dma_addr_t start = 0;

> +	dma_addr_t end = (dma_addr_t)~0;

> +

> +	list_for_each_entry(domain, &iommu->domain_list, next) {

> +		iommu_domain_get_attr(domain->domain, DOMAIN_ATTR_GEOMETRY,

> +				      &geo);

> +		if (geo.aperture_start > start)

> +			start = geo.aperture_start;

> +		if (geo.aperture_end < end)

> +			end = geo.aperture_end;

> +	}

> +

> +	/* Modify aperture limits. The new aper is either same or bigger */

> +	node = list_first_entry(iova_copy, struct vfio_iova, list);

> +	node->start = start;

> +	node = list_last_entry(iova_copy, struct vfio_iova, list);

> +	node->end = end;

> +}

> +

> +/*

> + * Called when a group is detached. The reserved regions for that

> + * group can be part of valid iova now. But since reserved regions

> + * may be duplicated among groups, populate the iova valid regions

> + * list again.

> + */

> +static int vfio_iommu_resv_refresh(struct vfio_iommu *iommu,

> +				   struct list_head *iova_copy)

> +{

> +	struct vfio_domain *d;

> +	struct vfio_group *g;

> +	struct vfio_iova *node;

> +	dma_addr_t start, end;

> +	LIST_HEAD(resv_regions);

> +	int ret;

> +

> +	list_for_each_entry(d, &iommu->domain_list, next) {

> +		list_for_each_entry(g, &d->group_list, next)

> +			iommu_get_group_resv_regions(g->iommu_group,

> +						     &resv_regions);

> +	}

> +

> +	if (list_empty(&resv_regions))

> +		return 0;

vfio_iommu_aper_expand() just extended the start/end of first & last
node respectively.  In case the iova_copy() featured excluded resv
regions before and now you don't have any anymore, the previous holes
will stay if I don't miss anything?

You may unconditionally recompute start/end, free the copy,
aper_resize() with new start/end and exclude resv regions again?

Thanks

Eric

> +

> +	node = list_first_entry(iova_copy, struct vfio_iova, list);

> +	start = node->start;

> +	node = list_last_entry(iova_copy, struct vfio_iova, list);

> +	end = node->end;

> +

> +	/* purge the iova list and create new one */

> +	vfio_iommu_iova_free(iova_copy);

> +

> +	ret = vfio_iommu_aper_resize(iova_copy, start, end);

> +	if (ret)

> +		goto done;

> +

> +	/* Exclude current reserved regions from iova ranges */

> +	ret = vfio_iommu_resv_exclude(iova_copy, &resv_regions);

> +done:

> +	vfio_iommu_resv_free(&resv_regions);

> +	return ret;

> +}

> +

>  static void vfio_iommu_type1_detach_group(void *iommu_data,

>  					  struct iommu_group *iommu_group)

>  {

>  	struct vfio_iommu *iommu = iommu_data;

>  	struct vfio_domain *domain;

>  	struct vfio_group *group;

> +	bool iova_copy_fail;

> +	LIST_HEAD(iova_copy);

>  

>  	mutex_lock(&iommu->lock);

>  

> @@ -1901,6 +1977,12 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,

>  		}

>  	}

>  

> +	/*

> +	 * Get a copy of iova list. If success, use copy to update the

> +	 * list and to replace the current one.

> +	 */

> +	iova_copy_fail = !!vfio_iommu_iova_get_copy(iommu, &iova_copy);

> +

>  	list_for_each_entry(domain, &iommu->domain_list, next) {

>  		group = find_iommu_group(domain, iommu_group);

>  		if (!group)

> @@ -1926,10 +2008,19 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,

>  			iommu_domain_free(domain->domain);

>  			list_del(&domain->next);

>  			kfree(domain);

> +			if (!iova_copy_fail && !list_empty(&iommu->domain_list))

> +				vfio_iommu_aper_expand(iommu, &iova_copy);

>  		}

>  		break;

>  	}

>  

> +	if (!iova_copy_fail && !list_empty(&iommu->domain_list)) {

> +		if (!vfio_iommu_resv_refresh(iommu, &iova_copy))

> +			vfio_iommu_iova_insert_copy(iommu, &iova_copy);

> +		else

> +			vfio_iommu_iova_free(&iova_copy);

> +	}

> +

>  detach_group_done:

>  	mutex_unlock(&iommu->lock);

>  }

>
Shameerali Kolothum Thodi July 8, 2019, 7:10 a.m. UTC | #4
Hi Eric,

> -----Original Message-----

> From: Auger Eric [mailto:eric.auger@redhat.com]

> Sent: 07 July 2019 16:03

> To: Shameerali Kolothum Thodi <shameerali.kolothum.thodi@huawei.com>;

> alex.williamson@redhat.com; pmorel@linux.vnet.ibm.com

> Cc: kvm@vger.kernel.org; linux-kernel@vger.kernel.org;

> iommu@lists.linux-foundation.org; Linuxarm <linuxarm@huawei.com>; John

> Garry <john.garry@huawei.com>; xuwei (O) <xuwei5@huawei.com>;

> kevin.tian@intel.com

> Subject: Re: [PATCH v7 3/6] vfio/type1: Update iova list on detach

> 

> Hi Shameer,

> 

> On 6/26/19 5:12 PM, Shameer Kolothum wrote:

> > Get a copy of iova list on _group_detach and try to update the list.

> > On success replace the current one with the copy. Leave the list as

> > it is if update fails.

> >

> > Signed-off-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>

> > ---

> >  drivers/vfio/vfio_iommu_type1.c | 91

> +++++++++++++++++++++++++++++++++

> >  1 file changed, 91 insertions(+)

> >

> > diff --git a/drivers/vfio/vfio_iommu_type1.c

> b/drivers/vfio/vfio_iommu_type1.c

> > index b6bfdfa16c33..e872fb3a0f39 100644

> > --- a/drivers/vfio/vfio_iommu_type1.c

> > +++ b/drivers/vfio/vfio_iommu_type1.c

> > @@ -1873,12 +1873,88 @@ static void vfio_sanity_check_pfn_list(struct

> vfio_iommu *iommu)

> >  	WARN_ON(iommu->notifier.head);

> >  }

> >

> > +/*

> > + * Called when a domain is removed in detach. It is possible that

> > + * the removed domain decided the iova aperture window. Modify the

> > + * iova aperture with the smallest window among existing domains.

> > + */

> > +static void vfio_iommu_aper_expand(struct vfio_iommu *iommu,

> > +				   struct list_head *iova_copy)

> Maybe you could just remove iova_copy for the args and return start,

> size. See comment below.

> > +{

> > +	struct vfio_domain *domain;

> > +	struct iommu_domain_geometry geo;

> > +	struct vfio_iova *node;

> > +	dma_addr_t start = 0;

> > +	dma_addr_t end = (dma_addr_t)~0;

> > +

> > +	list_for_each_entry(domain, &iommu->domain_list, next) {

> > +		iommu_domain_get_attr(domain->domain,

> DOMAIN_ATTR_GEOMETRY,

> > +				      &geo);

> > +		if (geo.aperture_start > start)

> > +			start = geo.aperture_start;

> > +		if (geo.aperture_end < end)

> > +			end = geo.aperture_end;

> > +	}

> > +

> > +	/* Modify aperture limits. The new aper is either same or bigger */

> > +	node = list_first_entry(iova_copy, struct vfio_iova, list);

> > +	node->start = start;

> > +	node = list_last_entry(iova_copy, struct vfio_iova, list);

> > +	node->end = end;

> > +}

> > +

> > +/*

> > + * Called when a group is detached. The reserved regions for that

> > + * group can be part of valid iova now. But since reserved regions

> > + * may be duplicated among groups, populate the iova valid regions

> > + * list again.

> > + */

> > +static int vfio_iommu_resv_refresh(struct vfio_iommu *iommu,

> > +				   struct list_head *iova_copy)

> > +{

> > +	struct vfio_domain *d;

> > +	struct vfio_group *g;

> > +	struct vfio_iova *node;

> > +	dma_addr_t start, end;

> > +	LIST_HEAD(resv_regions);

> > +	int ret;

> > +

> > +	list_for_each_entry(d, &iommu->domain_list, next) {

> > +		list_for_each_entry(g, &d->group_list, next)

> > +			iommu_get_group_resv_regions(g->iommu_group,

> > +						     &resv_regions);

> > +	}

> > +

> > +	if (list_empty(&resv_regions))

> > +		return 0;

> vfio_iommu_aper_expand() just extended the start/end of first & last

> node respectively.  In case the iova_copy() featured excluded resv

> regions before and now you don't have any anymore, the previous holes

> will stay if I don't miss anything?


Good catch!. Yes, I think there is a problem here.

> 

> You may unconditionally recompute start/end, free the copy,

> aper_resize() with new start/end and exclude resv regions again?


Ok. I will fix this in next revision.

Cheers,
Shameer
diff mbox series

Patch

diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index b6bfdfa16c33..e872fb3a0f39 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -1873,12 +1873,88 @@  static void vfio_sanity_check_pfn_list(struct vfio_iommu *iommu)
 	WARN_ON(iommu->notifier.head);
 }
 
+/*
+ * Called when a domain is removed in detach. It is possible that
+ * the removed domain decided the iova aperture window. Modify the
+ * iova aperture with the smallest window among existing domains.
+ */
+static void vfio_iommu_aper_expand(struct vfio_iommu *iommu,
+				   struct list_head *iova_copy)
+{
+	struct vfio_domain *domain;
+	struct iommu_domain_geometry geo;
+	struct vfio_iova *node;
+	dma_addr_t start = 0;
+	dma_addr_t end = (dma_addr_t)~0;
+
+	list_for_each_entry(domain, &iommu->domain_list, next) {
+		iommu_domain_get_attr(domain->domain, DOMAIN_ATTR_GEOMETRY,
+				      &geo);
+		if (geo.aperture_start > start)
+			start = geo.aperture_start;
+		if (geo.aperture_end < end)
+			end = geo.aperture_end;
+	}
+
+	/* Modify aperture limits. The new aper is either same or bigger */
+	node = list_first_entry(iova_copy, struct vfio_iova, list);
+	node->start = start;
+	node = list_last_entry(iova_copy, struct vfio_iova, list);
+	node->end = end;
+}
+
+/*
+ * Called when a group is detached. The reserved regions for that
+ * group can be part of valid iova now. But since reserved regions
+ * may be duplicated among groups, populate the iova valid regions
+ * list again.
+ */
+static int vfio_iommu_resv_refresh(struct vfio_iommu *iommu,
+				   struct list_head *iova_copy)
+{
+	struct vfio_domain *d;
+	struct vfio_group *g;
+	struct vfio_iova *node;
+	dma_addr_t start, end;
+	LIST_HEAD(resv_regions);
+	int ret;
+
+	list_for_each_entry(d, &iommu->domain_list, next) {
+		list_for_each_entry(g, &d->group_list, next)
+			iommu_get_group_resv_regions(g->iommu_group,
+						     &resv_regions);
+	}
+
+	if (list_empty(&resv_regions))
+		return 0;
+
+	node = list_first_entry(iova_copy, struct vfio_iova, list);
+	start = node->start;
+	node = list_last_entry(iova_copy, struct vfio_iova, list);
+	end = node->end;
+
+	/* purge the iova list and create new one */
+	vfio_iommu_iova_free(iova_copy);
+
+	ret = vfio_iommu_aper_resize(iova_copy, start, end);
+	if (ret)
+		goto done;
+
+	/* Exclude current reserved regions from iova ranges */
+	ret = vfio_iommu_resv_exclude(iova_copy, &resv_regions);
+done:
+	vfio_iommu_resv_free(&resv_regions);
+	return ret;
+}
+
 static void vfio_iommu_type1_detach_group(void *iommu_data,
 					  struct iommu_group *iommu_group)
 {
 	struct vfio_iommu *iommu = iommu_data;
 	struct vfio_domain *domain;
 	struct vfio_group *group;
+	bool iova_copy_fail;
+	LIST_HEAD(iova_copy);
 
 	mutex_lock(&iommu->lock);
 
@@ -1901,6 +1977,12 @@  static void vfio_iommu_type1_detach_group(void *iommu_data,
 		}
 	}
 
+	/*
+	 * Get a copy of iova list. If success, use copy to update the
+	 * list and to replace the current one.
+	 */
+	iova_copy_fail = !!vfio_iommu_iova_get_copy(iommu, &iova_copy);
+
 	list_for_each_entry(domain, &iommu->domain_list, next) {
 		group = find_iommu_group(domain, iommu_group);
 		if (!group)
@@ -1926,10 +2008,19 @@  static void vfio_iommu_type1_detach_group(void *iommu_data,
 			iommu_domain_free(domain->domain);
 			list_del(&domain->next);
 			kfree(domain);
+			if (!iova_copy_fail && !list_empty(&iommu->domain_list))
+				vfio_iommu_aper_expand(iommu, &iova_copy);
 		}
 		break;
 	}
 
+	if (!iova_copy_fail && !list_empty(&iommu->domain_list)) {
+		if (!vfio_iommu_resv_refresh(iommu, &iova_copy))
+			vfio_iommu_iova_insert_copy(iommu, &iova_copy);
+		else
+			vfio_iommu_iova_free(&iova_copy);
+	}
+
 detach_group_done:
 	mutex_unlock(&iommu->lock);
 }