@@ -48,6 +48,7 @@ struct iopt_area {
int iommu_prot;
bool prevent_access : 1;
unsigned int num_accesses;
+ unsigned int num_owners;
};
struct iopt_allowed {
@@ -235,12 +236,11 @@ int iopt_pages_fill_xarray(struct iopt_pages *pages, unsigned long start,
unsigned long last, struct page **out_pages);
void iopt_pages_unfill_xarray(struct iopt_pages *pages, unsigned long start,
unsigned long last);
-
int iopt_area_add_access(struct iopt_area *area, unsigned long start,
unsigned long last, struct page **out_pages,
- unsigned int flags);
+ unsigned int flags, bool is_owner);
void iopt_area_remove_access(struct iopt_area *area, unsigned long start,
- unsigned long last);
+ unsigned long last, bool is_owner);
int iopt_pages_rw_access(struct iopt_pages *pages, unsigned long start_byte,
void *data, unsigned long length, unsigned int flags);
@@ -250,6 +250,7 @@ int iopt_pages_rw_access(struct iopt_pages *pages, unsigned long start_byte,
*/
struct iopt_pages_access {
struct interval_tree_node node;
+ unsigned int owners;
unsigned int users;
};
@@ -1265,7 +1265,8 @@ void iommufd_access_unpin_pages(struct iommufd_access *access,
area, iopt_area_iova_to_index(area, iter.cur_iova),
iopt_area_iova_to_index(
area,
- min(last_iova, iopt_area_last_iova(area))));
+ min(last_iova, iopt_area_last_iova(area))),
+ false);
WARN_ON(!iopt_area_contig_done(&iter));
up_read(&iopt->iova_rwsem);
mutex_unlock(&access->ioas_lock);
@@ -1356,7 +1357,7 @@ int iommufd_access_pin_pages(struct iommufd_access *access, unsigned long iova,
}
rc = iopt_area_add_access(area, index, last_index, out_pages,
- flags);
+ flags, false);
if (rc)
goto err_remove;
out_pages += last_index - index + 1;
@@ -1379,7 +1380,8 @@ int iommufd_access_pin_pages(struct iommufd_access *access, unsigned long iova,
iopt_area_iova_to_index(area, iter.cur_iova),
iopt_area_iova_to_index(
area, min(last_iova,
- iopt_area_last_iova(area))));
+ iopt_area_last_iova(area))),
+ false);
}
up_read(&iopt->iova_rwsem);
mutex_unlock(&access->ioas_lock);
@@ -719,6 +719,12 @@ static int iopt_unmap_iova_range(struct io_pagetable *iopt, unsigned long start,
goto out_unlock_iova;
}
+ /* The area is held by an object that has not been destroyed */
+ if (area->num_owners) {
+ rc = -EBUSY;
+ goto out_unlock_iova;
+ }
+
if (area_first < start || area_last > last) {
rc = -ENOENT;
goto out_unlock_iova;
@@ -2111,7 +2111,7 @@ iopt_pages_get_exact_access(struct iopt_pages *pages, unsigned long index,
*/
int iopt_area_add_access(struct iopt_area *area, unsigned long start_index,
unsigned long last_index, struct page **out_pages,
- unsigned int flags)
+ unsigned int flags, bool is_owner)
{
struct iopt_pages *pages = area->pages;
struct iopt_pages_access *access;
@@ -2124,6 +2124,8 @@ int iopt_area_add_access(struct iopt_area *area, unsigned long start_index,
access = iopt_pages_get_exact_access(pages, start_index, last_index);
if (access) {
area->num_accesses++;
+ if (is_owner)
+ area->num_owners++;
access->users++;
iopt_pages_fill_from_xarray(pages, start_index, last_index,
out_pages);
@@ -2145,6 +2147,8 @@ int iopt_area_add_access(struct iopt_area *area, unsigned long start_index,
access->node.last = last_index;
access->users = 1;
area->num_accesses++;
+ if (is_owner)
+ area->num_owners++;
interval_tree_insert(&access->node, &pages->access_itree);
mutex_unlock(&pages->mutex);
return 0;
@@ -2166,7 +2170,7 @@ int iopt_area_add_access(struct iopt_area *area, unsigned long start_index,
* must stop using the PFNs before calling this.
*/
void iopt_area_remove_access(struct iopt_area *area, unsigned long start_index,
- unsigned long last_index)
+ unsigned long last_index, bool is_owner)
{
struct iopt_pages *pages = area->pages;
struct iopt_pages_access *access;
@@ -2177,6 +2181,10 @@ void iopt_area_remove_access(struct iopt_area *area, unsigned long start_index,
goto out_unlock;
WARN_ON(area->num_accesses == 0 || access->users == 0);
+ if (is_owner) {
+ WARN_ON(area->num_owners == 0);
+ area->num_owners--;
+ }
area->num_accesses--;
access->users--;
if (access->users)
Do not allow to unmap an iopt_area that is owned by an object, giving this owner a choice to lock the mapping in the iopt. It will be used by vIOMMU's HW QUEUE structure that must prevent the queue memory from being unmapped in the nesting parent IO page table, until user space destroys the HW QUEUE object first. Signed-off-by: Nicolin Chen <nicolinc@nvidia.com> --- drivers/iommu/iommufd/io_pagetable.h | 7 ++++--- drivers/iommu/iommufd/device.c | 8 +++++--- drivers/iommu/iommufd/io_pagetable.c | 6 ++++++ drivers/iommu/iommufd/pages.c | 12 ++++++++++-- 4 files changed, 25 insertions(+), 8 deletions(-)