@@ -7,6 +7,7 @@
#include <linux/iommu.h>
#include <linux/iommufd.h>
#include <linux/iova_bitmap.h>
+#include <linux/maple_tree.h>
#include <linux/rwsem.h>
#include <linux/uaccess.h>
#include <linux/xarray.h>
@@ -44,6 +45,7 @@ struct iommufd_ctx {
struct xarray groups;
wait_queue_head_t destroy_wait;
struct rw_semaphore ioas_creation_lock;
+ struct maple_tree mt_mmap;
struct mutex sw_msi_lock;
struct list_head sw_msi_list;
@@ -55,6 +57,18 @@ struct iommufd_ctx {
struct iommufd_ioas *vfio_ioas;
};
+/* Entry for iommufd_ctx::mt_mmap */
+struct iommufd_mmap {
+ struct iommufd_object *owner;
+
+ /* Allocated start position in mt_mmap tree */
+ unsigned long startp;
+
+ /* Physical range for io_remap_pfn_range() */
+ unsigned long mmio_pfn;
+ unsigned long num_pfns;
+};
+
/*
* The IOVA to PFN map. The map automatically copies the PFNs into multiple
* domains and permits sharing of PFNs between io_pagetable instances. This
@@ -276,6 +276,11 @@ int _iommufd_object_depend(struct iommufd_object *obj_dependent,
struct iommufd_object *obj_depended);
void _iommufd_object_undepend(struct iommufd_object *obj_dependent,
struct iommufd_object *obj_depended);
+int _iommufd_alloc_mmap(struct iommufd_ctx *ictx, struct iommufd_object *owner,
+ phys_addr_t mmio_addr, size_t length,
+ unsigned long *offset);
+void _iommufd_destroy_mmap(struct iommufd_ctx *ictx,
+ struct iommufd_object *owner, unsigned long offset);
struct device *iommufd_viommu_find_dev(struct iommufd_viommu *viommu,
unsigned long vdev_id);
int iommufd_viommu_get_vdev_id(struct iommufd_viommu *viommu,
@@ -310,6 +315,20 @@ _iommufd_object_undepend(struct iommufd_object *obj_dependent,
{
}
+static inline int _iommufd_alloc_mmap(struct iommufd_ctx *ictx,
+ struct iommufd_object *owner,
+ phys_addr_t mmio_addr, size_t length,
+ unsigned long *offset)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void _iommufd_destroy_mmap(struct iommufd_ctx *ictx,
+ struct iommufd_object *owner,
+ unsigned long offset)
+{
+}
+
static inline struct device *
iommufd_viommu_find_dev(struct iommufd_viommu *viommu, unsigned long vdev_id)
{
@@ -403,4 +422,30 @@ static inline int iommufd_viommu_report_event(struct iommufd_viommu *viommu,
_iommufd_object_undepend(&dependent->member.obj, \
&depended->member.obj); \
})
+
+/*
+ * Helpers for IOMMU driver to alloc/destroy an mmapable area for a structure.
+ *
+ * To support an mmappable MMIO region, kernel driver must first register it to
+ * iommufd core to allocate an @out_offset, in the context of an driver-struct
+ * allocation (e.g. viommu_alloc op). Then, it should report to user space this
+ * @out_offset and the @length of the MMIO region for mmap syscall.
+ */
+#define iommufd_viommu_alloc_mmap(viommu, member, mmio, length, out_offset) \
+ ({ \
+ static_assert(__same_type(struct iommufd_viommu, \
+ viommu->member)); \
+ static_assert(offsetof(typeof(*viommu), member.obj) == 0); \
+ _iommufd_alloc_mmap(viommu->member.ictx, &viommu->member.obj, \
+ mmio, length, out_offset); \
+ })
+
+#define iommufd_viommu_destroy_mmap(viommu, member, offset) \
+ ({ \
+ static_assert(__same_type(struct iommufd_viommu, \
+ viommu->member)); \
+ static_assert(offsetof(typeof(*viommu), member.obj) == 0); \
+ _iommufd_destroy_mmap(viommu->member.ictx, \
+ &viommu->member.obj, offset); \
+ })
#endif
@@ -82,6 +82,57 @@ void _iommufd_object_undepend(struct iommufd_object *obj_dependent,
}
EXPORT_SYMBOL_NS_GPL(_iommufd_object_undepend, "IOMMUFD");
+/*
+ * Allocate an @offset to return to user space to use for an mmap() syscall
+ *
+ * Driver should use a per-structure helper in include/linux/iommufd.h
+ */
+int _iommufd_alloc_mmap(struct iommufd_ctx *ictx, struct iommufd_object *owner,
+ phys_addr_t mmio_addr, size_t length,
+ unsigned long *offset)
+{
+ struct iommufd_mmap *immap;
+ unsigned long startp;
+ int rc;
+
+ if (!PAGE_ALIGNED(mmio_addr))
+ return -EINVAL;
+ if (!length || !PAGE_ALIGNED(length))
+ return -EINVAL;
+
+ immap = kzalloc(sizeof(*immap), GFP_KERNEL);
+ if (!immap)
+ return -ENOMEM;
+ immap->owner = owner;
+ immap->num_pfns = length >> PAGE_SHIFT;
+ immap->mmio_pfn = mmio_addr >> PAGE_SHIFT;
+
+ rc = mtree_alloc_range(&ictx->mt_mmap, &startp, immap, immap->num_pfns,
+ 0, U32_MAX >> PAGE_SHIFT, GFP_KERNEL);
+ if (rc < 0) {
+ kfree(immap);
+ return rc;
+ }
+
+ immap->startp = startp;
+ /* mmap() syscall will right-shift the offset in vma->vm_pgoff */
+ *offset = startp << PAGE_SHIFT;
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(_iommufd_alloc_mmap, "IOMMUFD");
+
+/* Driver should use a per-structure helper in include/linux/iommufd.h */
+void _iommufd_destroy_mmap(struct iommufd_ctx *ictx,
+ struct iommufd_object *owner, unsigned long offset)
+{
+ struct iommufd_mmap *immap;
+
+ immap = mtree_erase(&ictx->mt_mmap, offset >> PAGE_SHIFT);
+ WARN_ON_ONCE(!immap || immap->owner != owner);
+ kfree(immap);
+}
+EXPORT_SYMBOL_NS_GPL(_iommufd_destroy_mmap, "IOMMUFD");
+
/* Caller should xa_lock(&viommu->vdevs) to protect the return value */
struct device *iommufd_viommu_find_dev(struct iommufd_viommu *viommu,
unsigned long vdev_id)
@@ -225,6 +225,7 @@ static int iommufd_fops_open(struct inode *inode, struct file *filp)
xa_init_flags(&ictx->objects, XA_FLAGS_ALLOC1 | XA_FLAGS_ACCOUNT);
xa_init(&ictx->groups);
ictx->file = filp;
+ mt_init_flags(&ictx->mt_mmap, MT_FLAGS_ALLOC_RANGE);
init_waitqueue_head(&ictx->destroy_wait);
mutex_init(&ictx->sw_msi_lock);
INIT_LIST_HEAD(&ictx->sw_msi_list);
@@ -429,11 +430,71 @@ static long iommufd_fops_ioctl(struct file *filp, unsigned int cmd,
return ret;
}
+static void iommufd_fops_vma_open(struct vm_area_struct *vma)
+{
+ struct iommufd_mmap *immap = vma->vm_private_data;
+
+ refcount_inc(&immap->owner->users);
+}
+
+static void iommufd_fops_vma_close(struct vm_area_struct *vma)
+{
+ struct iommufd_mmap *immap = vma->vm_private_data;
+
+ refcount_dec(&immap->owner->users);
+}
+
+static const struct vm_operations_struct iommufd_vma_ops = {
+ .open = iommufd_fops_vma_open,
+ .close = iommufd_fops_vma_close,
+};
+
+/* The vm_pgoff must be pre-allocated from mt_mmap, and given to user space */
+static int iommufd_fops_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct iommufd_ctx *ictx = filp->private_data;
+ size_t length = vma->vm_end - vma->vm_start;
+ struct iommufd_mmap *immap;
+ int rc;
+
+ if (!PAGE_ALIGNED(length))
+ return -EINVAL;
+ if (!(vma->vm_flags & VM_SHARED))
+ return -EINVAL;
+ if (vma->vm_flags & VM_EXEC)
+ return -EPERM;
+
+ /* vma->vm_pgoff carries an index to an mtree entry (immap) */
+ immap = mtree_load(&ictx->mt_mmap, vma->vm_pgoff);
+ if (!immap)
+ return -ENXIO;
+ /* Validate the vm_pgoff and length against the registered region */
+ if (vma->vm_pgoff != immap->startp)
+ return -ENXIO;
+ if (length != immap->num_pfns << PAGE_SHIFT)
+ return -ENXIO;
+
+ vma->vm_pgoff = 0;
+ vma->vm_private_data = immap;
+ vma->vm_ops = &iommufd_vma_ops;
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ rc = io_remap_pfn_range(vma, vma->vm_start, immap->mmio_pfn, length,
+ vma->vm_page_prot);
+ if (rc)
+ return rc;
+
+ /* vm_ops.open won't be called for mmap itself. */
+ refcount_inc(&immap->owner->users);
+ return rc;
+}
+
static const struct file_operations iommufd_fops = {
.owner = THIS_MODULE,
.open = iommufd_fops_open,
.release = iommufd_fops_release,
.unlocked_ioctl = iommufd_fops_ioctl,
+ .mmap = iommufd_fops_mmap,
};
/**