@@ -68,3 +68,9 @@ void arch_iommu_domain_destroy(struct domain *d)
{
iommu_dt_domain_destroy(d);
}
+
+int arch_iommu_populate_page_table(struct domain *d)
+{
+ /* The IOMMU shares the p2m with the CPU */
+ return -ENOSYS;
+}
@@ -41,6 +41,18 @@ int iommu_assign_dt_device(struct domain *d, struct dt_device_node *dev)
if ( !list_empty(&dev->domain_list) )
goto fail;
+ if ( need_iommu(d) <= 0 )
+ {
+ /*
+ * The hwdom is forced to use IOMMU for protecting assigned
+ * device. Therefore the IOMMU data is already set up.
+ */
+ ASSERT(!is_hardware_domain(d));
+ rc = iommu_construct(d);
+ if ( rc )
+ goto fail;
+ }
+
rc = hd->platform_ops->assign_device(d, 0, dt_to_dev(dev));
if ( rc )
@@ -187,6 +187,32 @@ void iommu_teardown(struct domain *d)
tasklet_schedule(&iommu_pt_cleanup_tasklet);
}
+int iommu_construct(struct domain *d)
+{
+ int rc = 0;
+
+ if ( need_iommu(d) > 0 )
+ return 0;
+
+ if ( !iommu_use_hap_pt(d) )
+ {
+ rc = arch_iommu_populate_page_table(d);
+ if ( rc )
+ return rc;
+ }
+
+ d->need_iommu = 1;
+ /*
+ * There may be dirty cache lines when a device is assigned
+ * and before need_iommu(d) becoming true, this will cause
+ * memory_type_changed lose effect if memory type changes.
+ * Call memory_type_changed here to amend this.
+ */
+ memory_type_changed(d);
+
+ return rc;
+}
+
void iommu_domain_destroy(struct domain *d)
{
struct hvm_iommu *hd = domain_hvm_iommu(d);
@@ -1353,25 +1353,11 @@ static int assign_device(struct domain *d, u16 seg, u8 bus, u8 devfn)
if ( !spin_trylock(&pcidevs_lock) )
return -ERESTART;
- if ( need_iommu(d) <= 0 )
+ rc = iommu_construct(d);
+ if ( rc )
{
- if ( !iommu_use_hap_pt(d) )
- {
- rc = arch_iommu_populate_page_table(d);
- if ( rc )
- {
- spin_unlock(&pcidevs_lock);
- return rc;
- }
- }
- d->need_iommu = 1;
- /*
- * There may be dirty cache lines when a device is assigned
- * and before need_iommu(d) becoming true, this will cause
- * memory_type_changed lose effect if memory type changes.
- * Call memory_type_changed here to amend this.
- */
- memory_type_changed(d);
+ spin_unlock(&pcidevs_lock);
+ return rc;
}
pdev = pci_get_pdev_by_domain(hardware_domain, seg, bus, devfn);
@@ -65,6 +65,8 @@ int arch_iommu_domain_init(struct domain *d);
int arch_iommu_populate_page_table(struct domain *d);
void arch_iommu_check_autotranslated_hwdom(struct domain *d);
+int iommu_construct(struct domain *d);
+
/* Function used internally, use iommu_domain_destroy */
void iommu_teardown(struct domain *d);
This new function will correctly initialize the IOMMU page table for the current domain. Also use it in iommu_assign_dt_device even though the current IOMMU implementation on ARM shares P2M with the processor. Signed-off-by: Julien Grall <julien.grall@linaro.org> Cc: Jan Beulich <jbeulich@suse.com> --- Changes in v4: - Move memory_type_changed in iommu_construct. Added by commit 06ed8cc "x86: avoid needless EPT table ajustment and cache flush" - And an ASSERT and a comment in iommu_assign_dt_device to explain why the call is safe for DOM0 Changes in v3: - The ASSERT in iommu_construct was redundant with the if () - Remove d->need_iommu = 1 in assign_device has it's already done by iommu_construct. - Simplify the code in the caller of iommu_construct Changes in v2: - Add missing Signed-off-by - Rename iommu_buildup to iommu_construct --- xen/drivers/passthrough/arm/iommu.c | 6 ++++++ xen/drivers/passthrough/device_tree.c | 12 ++++++++++++ xen/drivers/passthrough/iommu.c | 26 ++++++++++++++++++++++++++ xen/drivers/passthrough/pci.c | 22 ++++------------------ xen/include/xen/iommu.h | 2 ++ 5 files changed, 50 insertions(+), 18 deletions(-)