@@ -745,7 +745,7 @@ long arch_do_domctl(
"ioport_map:add: dom%d gport=%x mport=%x nr=%x\n",
d->domain_id, fgp, fmp, np);
- list_for_each_entry(g2m_ioport, &hd->g2m_ioport_list, list)
+ list_for_each_entry(g2m_ioport, &hd->arch.g2m_ioport_list, list)
if (g2m_ioport->mport == fmp )
{
g2m_ioport->gport = fgp;
@@ -764,7 +764,7 @@ long arch_do_domctl(
g2m_ioport->gport = fgp;
g2m_ioport->mport = fmp;
g2m_ioport->np = np;
- list_add_tail(&g2m_ioport->list, &hd->g2m_ioport_list);
+ list_add_tail(&g2m_ioport->list, &hd->arch.g2m_ioport_list);
}
if ( !ret )
ret = ioports_permit_access(d, fmp, fmp + np - 1);
@@ -779,7 +779,7 @@ long arch_do_domctl(
printk(XENLOG_G_INFO
"ioport_map:remove: dom%d gport=%x mport=%x nr=%x\n",
d->domain_id, fgp, fmp, np);
- list_for_each_entry(g2m_ioport, &hd->g2m_ioport_list, list)
+ list_for_each_entry(g2m_ioport, &hd->arch.g2m_ioport_list, list)
if ( g2m_ioport->mport == fmp )
{
list_del(&g2m_ioport->list);
@@ -451,7 +451,7 @@ int dpci_ioport_intercept(ioreq_t *p)
unsigned int s = 0, e = 0;
int rc;
- list_for_each_entry( g2m_ioport, &hd->g2m_ioport_list, list )
+ list_for_each_entry( g2m_ioport, &hd->arch.g2m_ioport_list, list )
{
s = g2m_ioport->gport;
e = s + g2m_ioport->np;
@@ -230,7 +230,8 @@ static void tboot_gen_domain_integrity(const uint8_t key[TB_KEY_SIZE],
if ( !is_idle_domain(d) )
{
struct hvm_iommu *hd = domain_hvm_iommu(d);
- update_iommu_mac(&ctx, hd->pgd_maddr, agaw_to_level(hd->agaw));
+ update_iommu_mac(&ctx, hd->arch.pgd_maddr,
+ agaw_to_level(hd->arch.agaw));
}
}
@@ -355,7 +355,7 @@ static void _amd_iommu_flush_pages(struct domain *d,
unsigned long flags;
struct amd_iommu *iommu;
struct hvm_iommu *hd = domain_hvm_iommu(d);
- unsigned int dom_id = hd->domain_id;
+ unsigned int dom_id = hd->arch.domain_id;
/* send INVALIDATE_IOMMU_PAGES command */
for_each_amd_iommu ( iommu )
@@ -60,12 +60,12 @@ static uint16_t guest_bdf(struct domain *d, uint16_t machine_bdf)
static inline struct guest_iommu *domain_iommu(struct domain *d)
{
- return domain_hvm_iommu(d)->g_iommu;
+ return domain_hvm_iommu(d)->arch.g_iommu;
}
static inline struct guest_iommu *vcpu_iommu(struct vcpu *v)
{
- return domain_hvm_iommu(v->domain)->g_iommu;
+ return domain_hvm_iommu(v->domain)->arch.g_iommu;
}
static void guest_iommu_enable(struct guest_iommu *iommu)
@@ -886,7 +886,7 @@ int guest_iommu_init(struct domain* d)
guest_iommu_reg_init(iommu);
iommu->domain = d;
- hd->g_iommu = iommu;
+ hd->arch.g_iommu = iommu;
tasklet_init(&iommu->cmd_buffer_tasklet,
guest_iommu_process_command, (unsigned long)d);
@@ -907,7 +907,7 @@ void guest_iommu_destroy(struct domain *d)
tasklet_kill(&iommu->cmd_buffer_tasklet);
xfree(iommu);
- domain_hvm_iommu(d)->g_iommu = NULL;
+ domain_hvm_iommu(d)->arch.g_iommu = NULL;
}
static int guest_iommu_mmio_range(struct vcpu *v, unsigned long addr)
@@ -344,7 +344,7 @@ static int iommu_update_pde_count(struct domain *d, unsigned long pt_mfn,
struct hvm_iommu *hd = domain_hvm_iommu(d);
bool_t ok = 0;
- ASSERT( spin_is_locked(&hd->mapping_lock) && pt_mfn );
+ ASSERT( spin_is_locked(&hd->arch.mapping_lock) && pt_mfn );
next_level = merge_level - 1;
@@ -398,7 +398,7 @@ static int iommu_merge_pages(struct domain *d, unsigned long pt_mfn,
unsigned long first_mfn;
struct hvm_iommu *hd = domain_hvm_iommu(d);
- ASSERT( spin_is_locked(&hd->mapping_lock) && pt_mfn );
+ ASSERT( spin_is_locked(&hd->arch.mapping_lock) && pt_mfn );
table = map_domain_page(pt_mfn);
pde = table + pfn_to_pde_idx(gfn, merge_level);
@@ -448,8 +448,8 @@ static int iommu_pde_from_gfn(struct domain *d, unsigned long pfn,
struct page_info *table;
struct hvm_iommu *hd = domain_hvm_iommu(d);
- table = hd->root_table;
- level = hd->paging_mode;
+ table = hd->arch.root_table;
+ level = hd->arch.paging_mode;
BUG_ON( table == NULL || level < IOMMU_PAGING_MODE_LEVEL_1 ||
level > IOMMU_PAGING_MODE_LEVEL_6 );
@@ -557,11 +557,11 @@ static int update_paging_mode(struct domain *d, unsigned long gfn)
unsigned long old_root_mfn;
struct hvm_iommu *hd = domain_hvm_iommu(d);
- level = hd->paging_mode;
- old_root = hd->root_table;
+ level = hd->arch.paging_mode;
+ old_root = hd->arch.root_table;
offset = gfn >> (PTE_PER_TABLE_SHIFT * (level - 1));
- ASSERT(spin_is_locked(&hd->mapping_lock) && is_hvm_domain(d));
+ ASSERT(spin_is_locked(&hd->arch.mapping_lock) && is_hvm_domain(d));
while ( offset >= PTE_PER_TABLE_SIZE )
{
@@ -587,8 +587,8 @@ static int update_paging_mode(struct domain *d, unsigned long gfn)
if ( new_root != NULL )
{
- hd->paging_mode = level;
- hd->root_table = new_root;
+ hd->arch.paging_mode = level;
+ hd->arch.root_table = new_root;
if ( !spin_is_locked(&pcidevs_lock) )
AMD_IOMMU_DEBUG("%s Try to access pdev_list "
@@ -613,9 +613,9 @@ static int update_paging_mode(struct domain *d, unsigned long gfn)
/* valid = 0 only works for dom0 passthrough mode */
amd_iommu_set_root_page_table((u32 *)device_entry,
- page_to_maddr(hd->root_table),
- hd->domain_id,
- hd->paging_mode, 1);
+ page_to_maddr(hd->arch.root_table),
+ hd->arch.domain_id,
+ hd->arch.paging_mode, 1);
amd_iommu_flush_device(iommu, req_id);
bdf += pdev->phantom_stride;
@@ -638,14 +638,14 @@ int amd_iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn,
unsigned long pt_mfn[7];
unsigned int merge_level;
- BUG_ON( !hd->root_table );
+ BUG_ON( !hd->arch.root_table );
if ( iommu_use_hap_pt(d) )
return 0;
memset(pt_mfn, 0, sizeof(pt_mfn));
- spin_lock(&hd->mapping_lock);
+ spin_lock(&hd->arch.mapping_lock);
/* Since HVM domain is initialized with 2 level IO page table,
* we might need a deeper page table for lager gfn now */
@@ -653,7 +653,7 @@ int amd_iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn,
{
if ( update_paging_mode(d, gfn) )
{
- spin_unlock(&hd->mapping_lock);
+ spin_unlock(&hd->arch.mapping_lock);
AMD_IOMMU_DEBUG("Update page mode failed gfn = %lx\n", gfn);
domain_crash(d);
return -EFAULT;
@@ -662,7 +662,7 @@ int amd_iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn,
if ( iommu_pde_from_gfn(d, gfn, pt_mfn) || (pt_mfn[1] == 0) )
{
- spin_unlock(&hd->mapping_lock);
+ spin_unlock(&hd->arch.mapping_lock);
AMD_IOMMU_DEBUG("Invalid IO pagetable entry gfn = %lx\n", gfn);
domain_crash(d);
return -EFAULT;
@@ -684,7 +684,7 @@ int amd_iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn,
amd_iommu_flush_pages(d, gfn, 0);
for ( merge_level = IOMMU_PAGING_MODE_LEVEL_2;
- merge_level <= hd->paging_mode; merge_level++ )
+ merge_level <= hd->arch.paging_mode; merge_level++ )
{
if ( pt_mfn[merge_level] == 0 )
break;
@@ -697,7 +697,7 @@ int amd_iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn,
if ( iommu_merge_pages(d, pt_mfn[merge_level], gfn,
flags, merge_level) )
{
- spin_unlock(&hd->mapping_lock);
+ spin_unlock(&hd->arch.mapping_lock);
AMD_IOMMU_DEBUG("Merge iommu page failed at level %d, "
"gfn = %lx mfn = %lx\n", merge_level, gfn, mfn);
domain_crash(d);
@@ -706,7 +706,7 @@ int amd_iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn,
}
out:
- spin_unlock(&hd->mapping_lock);
+ spin_unlock(&hd->arch.mapping_lock);
return 0;
}
@@ -715,14 +715,14 @@ int amd_iommu_unmap_page(struct domain *d, unsigned long gfn)
unsigned long pt_mfn[7];
struct hvm_iommu *hd = domain_hvm_iommu(d);
- BUG_ON( !hd->root_table );
+ BUG_ON( !hd->arch.root_table );
if ( iommu_use_hap_pt(d) )
return 0;
memset(pt_mfn, 0, sizeof(pt_mfn));
- spin_lock(&hd->mapping_lock);
+ spin_lock(&hd->arch.mapping_lock);
/* Since HVM domain is initialized with 2 level IO page table,
* we might need a deeper page table for lager gfn now */
@@ -730,7 +730,7 @@ int amd_iommu_unmap_page(struct domain *d, unsigned long gfn)
{
if ( update_paging_mode(d, gfn) )
{
- spin_unlock(&hd->mapping_lock);
+ spin_unlock(&hd->arch.mapping_lock);
AMD_IOMMU_DEBUG("Update page mode failed gfn = %lx\n", gfn);
domain_crash(d);
return -EFAULT;
@@ -739,7 +739,7 @@ int amd_iommu_unmap_page(struct domain *d, unsigned long gfn)
if ( iommu_pde_from_gfn(d, gfn, pt_mfn) || (pt_mfn[1] == 0) )
{
- spin_unlock(&hd->mapping_lock);
+ spin_unlock(&hd->arch.mapping_lock);
AMD_IOMMU_DEBUG("Invalid IO pagetable entry gfn = %lx\n", gfn);
domain_crash(d);
return -EFAULT;
@@ -747,7 +747,7 @@ int amd_iommu_unmap_page(struct domain *d, unsigned long gfn)
/* mark PTE as 'page not present' */
clear_iommu_pte_present(pt_mfn[1], gfn);
- spin_unlock(&hd->mapping_lock);
+ spin_unlock(&hd->arch.mapping_lock);
amd_iommu_flush_pages(d, gfn, 0);
@@ -792,13 +792,13 @@ void amd_iommu_share_p2m(struct domain *d)
pgd_mfn = pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d)));
p2m_table = mfn_to_page(mfn_x(pgd_mfn));
- if ( hd->root_table != p2m_table )
+ if ( hd->arch.root_table != p2m_table )
{
- free_amd_iommu_pgtable(hd->root_table);
- hd->root_table = p2m_table;
+ free_amd_iommu_pgtable(hd->arch.root_table);
+ hd->arch.root_table = p2m_table;
/* When sharing p2m with iommu, paging mode = 4 */
- hd->paging_mode = IOMMU_PAGING_MODE_LEVEL_4;
+ hd->arch.paging_mode = IOMMU_PAGING_MODE_LEVEL_4;
AMD_IOMMU_DEBUG("Share p2m table with iommu: p2m table = %#lx\n",
mfn_x(pgd_mfn));
}
@@ -120,7 +120,8 @@ static void amd_iommu_setup_domain_device(
struct hvm_iommu *hd = domain_hvm_iommu(domain);
- BUG_ON( !hd->root_table || !hd->paging_mode || !iommu->dev_table.buffer );
+ BUG_ON( !hd->arch.root_table || !hd->arch.paging_mode ||
+ !iommu->dev_table.buffer );
if ( iommu_passthrough && (domain->domain_id == 0) )
valid = 0;
@@ -138,8 +139,8 @@ static void amd_iommu_setup_domain_device(
{
/* bind DTE to domain page-tables */
amd_iommu_set_root_page_table(
- (u32 *)dte, page_to_maddr(hd->root_table), hd->domain_id,
- hd->paging_mode, valid);
+ (u32 *)dte, page_to_maddr(hd->arch.root_table), hd->arch.domain_id,
+ hd->arch.paging_mode, valid);
if ( pci_ats_device(iommu->seg, bus, pdev->devfn) &&
iommu_has_cap(iommu, PCI_CAP_IOTLB_SHIFT) )
@@ -151,8 +152,8 @@ static void amd_iommu_setup_domain_device(
"root table = %#"PRIx64", "
"domain = %d, paging mode = %d\n",
req_id, pdev->type,
- page_to_maddr(hd->root_table),
- hd->domain_id, hd->paging_mode);
+ page_to_maddr(hd->arch.root_table),
+ hd->arch.domain_id, hd->arch.paging_mode);
}
spin_unlock_irqrestore(&iommu->lock, flags);
@@ -225,17 +226,17 @@ int __init amd_iov_detect(void)
static int allocate_domain_resources(struct hvm_iommu *hd)
{
/* allocate root table */
- spin_lock(&hd->mapping_lock);
- if ( !hd->root_table )
+ spin_lock(&hd->arch.mapping_lock);
+ if ( !hd->arch.root_table )
{
- hd->root_table = alloc_amd_iommu_pgtable();
- if ( !hd->root_table )
+ hd->arch.root_table = alloc_amd_iommu_pgtable();
+ if ( !hd->arch.root_table )
{
- spin_unlock(&hd->mapping_lock);
+ spin_unlock(&hd->arch.mapping_lock);
return -ENOMEM;
}
}
- spin_unlock(&hd->mapping_lock);
+ spin_unlock(&hd->arch.mapping_lock);
return 0;
}
@@ -262,18 +263,18 @@ static int amd_iommu_domain_init(struct domain *d)
/* allocate page directroy */
if ( allocate_domain_resources(hd) != 0 )
{
- if ( hd->root_table )
- free_domheap_page(hd->root_table);
+ if ( hd->arch.root_table )
+ free_domheap_page(hd->arch.root_table);
return -ENOMEM;
}
/* For pv and dom0, stick with get_paging_mode(max_page)
* For HVM dom0, use 2 level page table at first */
- hd->paging_mode = is_hvm_domain(d) ?
+ hd->arch.paging_mode = is_hvm_domain(d) ?
IOMMU_PAGING_MODE_LEVEL_2 :
get_paging_mode(max_page);
- hd->domain_id = d->domain_id;
+ hd->arch.domain_id = d->domain_id;
guest_iommu_init(d);
@@ -333,8 +334,8 @@ void amd_iommu_disable_domain_device(struct domain *domain,
AMD_IOMMU_DEBUG("Disable: device id = %#x, "
"domain = %d, paging mode = %d\n",
- req_id, domain_hvm_iommu(domain)->domain_id,
- domain_hvm_iommu(domain)->paging_mode);
+ req_id, domain_hvm_iommu(domain)->arch.domain_id,
+ domain_hvm_iommu(domain)->arch.paging_mode);
}
spin_unlock_irqrestore(&iommu->lock, flags);
@@ -374,7 +375,7 @@ static int reassign_device(struct domain *source, struct domain *target,
/* IO page tables might be destroyed after pci-detach the last device
* In this case, we have to re-allocate root table for next pci-attach.*/
- if ( t->root_table == NULL )
+ if ( t->arch.root_table == NULL )
allocate_domain_resources(t);
amd_iommu_setup_domain_device(target, iommu, devfn, pdev);
@@ -456,13 +457,13 @@ static void deallocate_iommu_page_tables(struct domain *d)
if ( iommu_use_hap_pt(d) )
return;
- spin_lock(&hd->mapping_lock);
- if ( hd->root_table )
+ spin_lock(&hd->arch.mapping_lock);
+ if ( hd->arch.root_table )
{
- deallocate_next_page_table(hd->root_table, hd->paging_mode);
- hd->root_table = NULL;
+ deallocate_next_page_table(hd->arch.root_table, hd->arch.paging_mode);
+ hd->arch.root_table = NULL;
}
- spin_unlock(&hd->mapping_lock);
+ spin_unlock(&hd->arch.mapping_lock);
}
@@ -593,11 +594,11 @@ static void amd_dump_p2m_table(struct domain *d)
{
struct hvm_iommu *hd = domain_hvm_iommu(d);
- if ( !hd->root_table )
+ if ( !hd->arch.root_table )
return;
- printk("p2m table has %d levels\n", hd->paging_mode);
- amd_dump_p2m_table_level(hd->root_table, hd->paging_mode, 0, 0);
+ printk("p2m table has %d levels\n", hd->arch.paging_mode);
+ amd_dump_p2m_table_level(hd->arch.root_table, hd->arch.paging_mode, 0, 0);
}
const struct iommu_ops amd_iommu_ops = {
@@ -117,10 +117,11 @@ static void __init parse_iommu_param(char *s)
int iommu_domain_init(struct domain *d)
{
struct hvm_iommu *hd = domain_hvm_iommu(d);
+ int ret = 0;
- spin_lock_init(&hd->mapping_lock);
- INIT_LIST_HEAD(&hd->g2m_ioport_list);
- INIT_LIST_HEAD(&hd->mapped_rmrrs);
+ ret = arch_iommu_domain_init(d);
+ if ( ret )
+ return ret;
if ( !iommu_enabled )
return 0;
@@ -190,10 +191,7 @@ void iommu_teardown(struct domain *d)
void iommu_domain_destroy(struct domain *d)
{
- struct hvm_iommu *hd = domain_hvm_iommu(d);
- struct list_head *ioport_list, *rmrr_list, *tmp;
- struct g2m_ioport *ioport;
- struct mapped_rmrr *mrmrr;
+ struct hvm_iommu *hd = domain_hvm_iommu(d);
if ( !iommu_enabled || !hd->platform_ops )
return;
@@ -201,20 +199,8 @@ void iommu_domain_destroy(struct domain *d)
if ( need_iommu(d) )
iommu_teardown(d);
- list_for_each_safe ( ioport_list, tmp, &hd->g2m_ioport_list )
- {
- ioport = list_entry(ioport_list, struct g2m_ioport, list);
- list_del(&ioport->list);
- xfree(ioport);
- }
-
- list_for_each_safe ( rmrr_list, tmp, &hd->mapped_rmrrs )
- {
- mrmrr = list_entry(rmrr_list, struct mapped_rmrr, list);
- list_del(&mrmrr->list);
- xfree(mrmrr);
- }
-}
+ arch_iommu_domain_destroy(d);
+ }
int iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn,
unsigned int flags)
@@ -328,14 +314,6 @@ void iommu_suspend()
ops->suspend();
}
-void iommu_share_p2m_table(struct domain* d)
-{
- const struct iommu_ops *ops = iommu_get_ops();
-
- if ( iommu_enabled && is_hvm_domain(d) )
- ops->share_p2m(d);
-}
-
void iommu_crash_shutdown(void)
{
const struct iommu_ops *ops = iommu_get_ops();
@@ -55,6 +55,47 @@ int __init iommu_setup_hpet_msi(struct msi_desc *msi)
return ops->setup_hpet_msi ? ops->setup_hpet_msi(msi) : -ENODEV;
}
+void iommu_share_p2m_table(struct domain* d)
+{
+ const struct iommu_ops *ops = iommu_get_ops();
+
+ if ( iommu_enabled && is_hvm_domain(d) )
+ ops->share_p2m(d);
+}
+
+int arch_iommu_domain_init(struct domain *d)
+{
+ struct hvm_iommu *hd = domain_hvm_iommu(d);
+
+ spin_lock_init(&hd->arch.mapping_lock);
+ INIT_LIST_HEAD(&hd->arch.g2m_ioport_list);
+ INIT_LIST_HEAD(&hd->arch.mapped_rmrrs);
+
+ return 0;
+}
+
+void arch_iommu_domain_destroy(struct domain *d)
+{
+ struct hvm_iommu *hd = domain_hvm_iommu(d);
+ struct list_head *ioport_list, *rmrr_list, *tmp;
+ struct g2m_ioport *ioport;
+ struct mapped_rmrr *mrmrr;
+
+ list_for_each_safe ( ioport_list, tmp, &hd->arch.g2m_ioport_list )
+ {
+ ioport = list_entry(ioport_list, struct g2m_ioport, list);
+ list_del(&ioport->list);
+ xfree(ioport);
+ }
+
+ list_for_each_safe ( rmrr_list, tmp, &hd->arch.mapped_rmrrs )
+ {
+ mrmrr = list_entry(rmrr_list, struct mapped_rmrr, list);
+ list_del(&mrmrr->list);
+ xfree(mrmrr);
+ }
+}
+
/*
* Local variables:
* mode: C
@@ -249,16 +249,16 @@ static u64 addr_to_dma_page_maddr(struct domain *domain, u64 addr, int alloc)
struct acpi_drhd_unit *drhd;
struct pci_dev *pdev;
struct hvm_iommu *hd = domain_hvm_iommu(domain);
- int addr_width = agaw_to_width(hd->agaw);
+ int addr_width = agaw_to_width(hd->arch.agaw);
struct dma_pte *parent, *pte = NULL;
- int level = agaw_to_level(hd->agaw);
+ int level = agaw_to_level(hd->arch.agaw);
int offset;
u64 pte_maddr = 0, maddr;
u64 *vaddr = NULL;
addr &= (((u64)1) << addr_width) - 1;
- ASSERT(spin_is_locked(&hd->mapping_lock));
- if ( hd->pgd_maddr == 0 )
+ ASSERT(spin_is_locked(&hd->arch.mapping_lock));
+ if ( hd->arch.pgd_maddr == 0 )
{
/*
* just get any passthrough device in the domainr - assume user
@@ -266,11 +266,11 @@ static u64 addr_to_dma_page_maddr(struct domain *domain, u64 addr, int alloc)
*/
pdev = pci_get_pdev_by_domain(domain, -1, -1, -1);
drhd = acpi_find_matched_drhd_unit(pdev);
- if ( !alloc || ((hd->pgd_maddr = alloc_pgtable_maddr(drhd, 1)) == 0) )
+ if ( !alloc || ((hd->arch.pgd_maddr = alloc_pgtable_maddr(drhd, 1)) == 0) )
goto out;
}
- parent = (struct dma_pte *)map_vtd_domain_page(hd->pgd_maddr);
+ parent = (struct dma_pte *)map_vtd_domain_page(hd->arch.pgd_maddr);
while ( level > 1 )
{
offset = address_level_offset(addr, level);
@@ -580,7 +580,7 @@ static void __intel_iommu_iotlb_flush(struct domain *d, unsigned long gfn,
{
iommu = drhd->iommu;
- if ( !test_bit(iommu->index, &hd->iommu_bitmap) )
+ if ( !test_bit(iommu->index, &hd->arch.iommu_bitmap) )
continue;
flush_dev_iotlb = find_ats_dev_drhd(iommu) ? 1 : 0;
@@ -622,12 +622,12 @@ static void dma_pte_clear_one(struct domain *domain, u64 addr)
u64 pg_maddr;
struct mapped_rmrr *mrmrr;
- spin_lock(&hd->mapping_lock);
+ spin_lock(&hd->arch.mapping_lock);
/* get last level pte */
pg_maddr = addr_to_dma_page_maddr(domain, addr, 0);
if ( pg_maddr == 0 )
{
- spin_unlock(&hd->mapping_lock);
+ spin_unlock(&hd->arch.mapping_lock);
return;
}
@@ -636,13 +636,13 @@ static void dma_pte_clear_one(struct domain *domain, u64 addr)
if ( !dma_pte_present(*pte) )
{
- spin_unlock(&hd->mapping_lock);
+ spin_unlock(&hd->arch.mapping_lock);
unmap_vtd_domain_page(page);
return;
}
dma_clear_pte(*pte);
- spin_unlock(&hd->mapping_lock);
+ spin_unlock(&hd->arch.mapping_lock);
iommu_flush_cache_entry(pte, sizeof(struct dma_pte));
if ( !this_cpu(iommu_dont_flush_iotlb) )
@@ -653,8 +653,8 @@ static void dma_pte_clear_one(struct domain *domain, u64 addr)
/* if the cleared address is between mapped RMRR region,
* remove the mapped RMRR
*/
- spin_lock(&hd->mapping_lock);
- list_for_each_entry ( mrmrr, &hd->mapped_rmrrs, list )
+ spin_lock(&hd->arch.mapping_lock);
+ list_for_each_entry ( mrmrr, &hd->arch.mapped_rmrrs, list )
{
if ( addr >= mrmrr->base && addr <= mrmrr->end )
{
@@ -663,7 +663,7 @@ static void dma_pte_clear_one(struct domain *domain, u64 addr)
break;
}
}
- spin_unlock(&hd->mapping_lock);
+ spin_unlock(&hd->arch.mapping_lock);
}
static void iommu_free_pagetable(u64 pt_maddr, int level)
@@ -1248,7 +1248,7 @@ static int intel_iommu_domain_init(struct domain *d)
{
struct hvm_iommu *hd = domain_hvm_iommu(d);
- hd->agaw = width_to_agaw(DEFAULT_DOMAIN_ADDRESS_WIDTH);
+ hd->arch.agaw = width_to_agaw(DEFAULT_DOMAIN_ADDRESS_WIDTH);
return 0;
}
@@ -1345,16 +1345,16 @@ int domain_context_mapping_one(
}
else
{
- spin_lock(&hd->mapping_lock);
+ spin_lock(&hd->arch.mapping_lock);
/* Ensure we have pagetables allocated down to leaf PTE. */
- if ( hd->pgd_maddr == 0 )
+ if ( hd->arch.pgd_maddr == 0 )
{
addr_to_dma_page_maddr(domain, 0, 1);
- if ( hd->pgd_maddr == 0 )
+ if ( hd->arch.pgd_maddr == 0 )
{
nomem:
- spin_unlock(&hd->mapping_lock);
+ spin_unlock(&hd->arch.mapping_lock);
spin_unlock(&iommu->lock);
unmap_vtd_domain_page(context_entries);
return -ENOMEM;
@@ -1362,7 +1362,7 @@ int domain_context_mapping_one(
}
/* Skip top levels of page tables for 2- and 3-level DRHDs. */
- pgd_maddr = hd->pgd_maddr;
+ pgd_maddr = hd->arch.pgd_maddr;
for ( agaw = level_to_agaw(4);
agaw != level_to_agaw(iommu->nr_pt_levels);
agaw-- )
@@ -1380,7 +1380,7 @@ int domain_context_mapping_one(
else
context_set_translation_type(*context, CONTEXT_TT_MULTI_LEVEL);
- spin_unlock(&hd->mapping_lock);
+ spin_unlock(&hd->arch.mapping_lock);
}
if ( context_set_domain_id(context, domain, iommu) )
@@ -1406,7 +1406,7 @@ int domain_context_mapping_one(
iommu_flush_iotlb_dsi(iommu, 0, 1, flush_dev_iotlb);
}
- set_bit(iommu->index, &hd->iommu_bitmap);
+ set_bit(iommu->index, &hd->arch.iommu_bitmap);
unmap_vtd_domain_page(context_entries);
@@ -1652,7 +1652,7 @@ static int domain_context_unmap(
struct hvm_iommu *hd = domain_hvm_iommu(domain);
int iommu_domid;
- clear_bit(iommu->index, &hd->iommu_bitmap);
+ clear_bit(iommu->index, &hd->arch.iommu_bitmap);
iommu_domid = domain_iommu_domid(domain, iommu);
if ( iommu_domid == -1 )
@@ -1711,10 +1711,10 @@ static void iommu_domain_teardown(struct domain *d)
if ( iommu_use_hap_pt(d) )
return;
- spin_lock(&hd->mapping_lock);
- iommu_free_pagetable(hd->pgd_maddr, agaw_to_level(hd->agaw));
- hd->pgd_maddr = 0;
- spin_unlock(&hd->mapping_lock);
+ spin_lock(&hd->arch.mapping_lock);
+ iommu_free_pagetable(hd->arch.pgd_maddr, agaw_to_level(hd->arch.agaw));
+ hd->arch.pgd_maddr = 0;
+ spin_unlock(&hd->arch.mapping_lock);
}
static int intel_iommu_map_page(
@@ -1733,12 +1733,12 @@ static int intel_iommu_map_page(
if ( iommu_passthrough && (d->domain_id == 0) )
return 0;
- spin_lock(&hd->mapping_lock);
+ spin_lock(&hd->arch.mapping_lock);
pg_maddr = addr_to_dma_page_maddr(d, (paddr_t)gfn << PAGE_SHIFT_4K, 1);
if ( pg_maddr == 0 )
{
- spin_unlock(&hd->mapping_lock);
+ spin_unlock(&hd->arch.mapping_lock);
return -ENOMEM;
}
page = (struct dma_pte *)map_vtd_domain_page(pg_maddr);
@@ -1755,14 +1755,14 @@ static int intel_iommu_map_page(
if ( old.val == new.val )
{
- spin_unlock(&hd->mapping_lock);
+ spin_unlock(&hd->arch.mapping_lock);
unmap_vtd_domain_page(page);
return 0;
}
*pte = new;
iommu_flush_cache_entry(pte, sizeof(struct dma_pte));
- spin_unlock(&hd->mapping_lock);
+ spin_unlock(&hd->arch.mapping_lock);
unmap_vtd_domain_page(page);
if ( !this_cpu(iommu_dont_flush_iotlb) )
@@ -1796,7 +1796,7 @@ void iommu_pte_flush(struct domain *d, u64 gfn, u64 *pte,
for_each_drhd_unit ( drhd )
{
iommu = drhd->iommu;
- if ( !test_bit(iommu->index, &hd->iommu_bitmap) )
+ if ( !test_bit(iommu->index, &hd->arch.iommu_bitmap) )
continue;
flush_dev_iotlb = find_ats_dev_drhd(iommu) ? 1 : 0;
@@ -1837,7 +1837,7 @@ static void iommu_set_pgd(struct domain *d)
return;
pgd_mfn = pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d)));
- hd->pgd_maddr = pagetable_get_paddr(pagetable_from_mfn(pgd_mfn));
+ hd->arch.pgd_maddr = pagetable_get_paddr(pagetable_from_mfn(pgd_mfn));
}
static int rmrr_identity_mapping(struct domain *d,
@@ -1852,10 +1852,10 @@ static int rmrr_identity_mapping(struct domain *d,
ASSERT(rmrr->base_address < rmrr->end_address);
/*
- * No need to acquire hd->mapping_lock, as the only theoretical race is
+ * No need to acquire hd->arch.mapping_lock, as the only theoretical race is
* with the insertion below (impossible due to holding pcidevs_lock).
*/
- list_for_each_entry( mrmrr, &hd->mapped_rmrrs, list )
+ list_for_each_entry( mrmrr, &hd->arch.mapped_rmrrs, list )
{
if ( mrmrr->base == rmrr->base_address &&
mrmrr->end == rmrr->end_address )
@@ -1880,9 +1880,9 @@ static int rmrr_identity_mapping(struct domain *d,
return -ENOMEM;
mrmrr->base = rmrr->base_address;
mrmrr->end = rmrr->end_address;
- spin_lock(&hd->mapping_lock);
- list_add_tail(&mrmrr->list, &hd->mapped_rmrrs);
- spin_unlock(&hd->mapping_lock);
+ spin_lock(&hd->arch.mapping_lock);
+ list_add_tail(&mrmrr->list, &hd->arch.mapped_rmrrs);
+ spin_unlock(&hd->arch.mapping_lock);
return 0;
}
@@ -2427,8 +2427,8 @@ static void vtd_dump_p2m_table(struct domain *d)
return;
hd = domain_hvm_iommu(d);
- printk("p2m table has %d levels\n", agaw_to_level(hd->agaw));
- vtd_dump_p2m_table_level(hd->pgd_maddr, agaw_to_level(hd->agaw), 0, 0);
+ printk("p2m table has %d levels\n", agaw_to_level(hd->arch.agaw));
+ vtd_dump_p2m_table_level(hd->arch.pgd_maddr, agaw_to_level(hd->arch.agaw), 0, 0);
}
const struct iommu_ops intel_iommu_ops = {
@@ -39,4 +39,33 @@ static inline int iommu_hardware_setup(void)
return 0;
}
+struct g2m_ioport {
+ struct list_head list;
+ unsigned int gport;
+ unsigned int mport;
+ unsigned int np;
+};
+
+struct mapped_rmrr {
+ struct list_head list;
+ u64 base;
+ u64 end;
+};
+
+struct arch_hvm_iommu
+{
+ u64 pgd_maddr; /* io page directory machine address */
+ int agaw; /* adjusted guest address width, 0 is level 2 30-bit */
+ u64 iommu_bitmap; /* bitmap of iommu(s) that the domain uses */
+ /* amd iommu support */
+ int domain_id;
+ int paging_mode;
+ struct page_info *root_table;
+ struct guest_iommu *g_iommu;
+
+ struct list_head g2m_ioport_list; /* guest to machine ioport mapping */
+ struct list_head mapped_rmrrs;
+ spinlock_t mapping_lock; /* io page table lock */
+};
+
#endif /* __ASM_X86_HVM_IOMMU_H__ */
@@ -19,6 +19,10 @@
#include <asm/msi.h>
+/* Does this domain have a P2M table we can use as its IOMMU pagetable? */
+#define iommu_use_hap_pt(d) (hap_enabled(d) && iommu_hap_pt_share)
+#define domain_hvm_iommu(d) (&d->arch.hvm_domain.hvm_iommu)
+
void iommu_update_ire_from_apic(unsigned int apic, unsigned int reg, unsigned int value);
int iommu_update_ire_from_msi(struct msi_desc *msi_desc, struct msi_msg *msg);
void iommu_read_msi_from_ire(struct msi_desc *msi_desc, struct msi_msg *msg);
@@ -23,32 +23,8 @@
#include <xen/iommu.h>
#include <asm/hvm/iommu.h>
-struct g2m_ioport {
- struct list_head list;
- unsigned int gport;
- unsigned int mport;
- unsigned int np;
-};
-
-struct mapped_rmrr {
- struct list_head list;
- u64 base;
- u64 end;
-};
-
struct hvm_iommu {
- u64 pgd_maddr; /* io page directory machine address */
- spinlock_t mapping_lock; /* io page table lock */
- int agaw; /* adjusted guest address width, 0 is level 2 30-bit */
- struct list_head g2m_ioport_list; /* guest to machine ioport mapping */
- u64 iommu_bitmap; /* bitmap of iommu(s) that the domain uses */
- struct list_head mapped_rmrrs;
-
- /* amd iommu support */
- int domain_id;
- int paging_mode;
- struct page_info *root_table;
- struct guest_iommu *g_iommu;
+ struct arch_hvm_iommu arch;
/* iommu_ops */
const struct iommu_ops *platform_ops;
@@ -35,11 +35,6 @@ extern bool_t iommu_hap_pt_share;
extern bool_t iommu_debug;
extern bool_t amd_iommu_perdev_intremap;
-/* Does this domain have a P2M table we can use as its IOMMU pagetable? */
-#define iommu_use_hap_pt(d) (hap_enabled(d) && iommu_hap_pt_share)
-
-#define domain_hvm_iommu(d) (&d->arch.hvm_domain.hvm_iommu)
-
#define PAGE_SHIFT_4K (12)
#define PAGE_SIZE_4K (1UL << PAGE_SHIFT_4K)
#define PAGE_MASK_4K (((u64)-1) << PAGE_SHIFT_4K)
@@ -55,6 +50,9 @@ void iommu_dom0_init(struct domain *d);
void iommu_domain_destroy(struct domain *d);
int deassign_device(struct domain *d, u16 seg, u8 bus, u8 devfn);
+void arch_iommu_domain_destroy(struct domain *d);
+int arch_iommu_domain_init(struct domain *d);
+
/* Function used internally, use iommu_domain_destroy */
void iommu_teardown(struct domain *d);
Currently the structure hvm_iommu (xen/include/xen/hvm/iommu.h) contains x86 specific fields. This patch creates: - arch_hvm_iommu structure which will contain architecture depend fields - arch_iommu_domain_{init,destroy} function to execute arch specific during domain creation/destruction Also move iommu_use_hap_pt and domain_hvm_iommu in asm-x86/iommu.h. Signed-off-by: Julien Grall <julien.grall@linaro.org> Cc: Keir Fraser <keir@xen.org> Cc: Jan Beulich <jbeulich@suse.com> Cc: Joseph Cihula <joseph.cihula@intel.com> Cc: Gang Wei <gang.wei@intel.com> Cc: Shane Wang <shane.wang@intel.com> Cc: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com> Cc: Xiantao Zhang <xiantao.zhang@intel.com> --- xen/arch/x86/domctl.c | 6 +- xen/arch/x86/hvm/io.c | 2 +- xen/arch/x86/tboot.c | 3 +- xen/drivers/passthrough/amd/iommu_cmd.c | 2 +- xen/drivers/passthrough/amd/iommu_guest.c | 8 +-- xen/drivers/passthrough/amd/iommu_map.c | 56 +++++++++---------- xen/drivers/passthrough/amd/pci_amd_iommu.c | 53 +++++++++--------- xen/drivers/passthrough/iommu.c | 36 +++--------- xen/drivers/passthrough/iommu_x86.c | 41 ++++++++++++++ xen/drivers/passthrough/vtd/iommu.c | 80 +++++++++++++-------------- xen/include/asm-x86/hvm/iommu.h | 29 ++++++++++ xen/include/asm-x86/iommu.h | 4 ++ xen/include/xen/hvm/iommu.h | 26 +-------- xen/include/xen/iommu.h | 8 +-- 14 files changed, 191 insertions(+), 163 deletions(-)