diff mbox

[Xen-devel,v3,07/13] xen/passthrough: iommu: Introduce arch specific code

Message ID 1394552999-14171-8-git-send-email-julien.grall@linaro.org
State Superseded, archived
Headers show

Commit Message

Julien Grall March 11, 2014, 3:49 p.m. UTC
Currently the structure hvm_iommu (xen/include/xen/hvm/iommu.h) contains
x86 specific fields.

This patch creates:
    - arch_hvm_iommu structure which will contain architecture depend
    fields
    - arch_iommu_domain_{init,destroy} function to execute arch
    specific during domain creation/destruction

Also move iommu_use_hap_pt and domain_hvm_iommu in asm-x86/iommu.h.

Signed-off-by: Julien Grall <julien.grall@linaro.org>
Cc: Keir Fraser <keir@xen.org>
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Joseph Cihula <joseph.cihula@intel.com>
Cc: Gang Wei <gang.wei@intel.com>
Cc: Shane Wang <shane.wang@intel.com>
Cc: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Cc: Xiantao Zhang <xiantao.zhang@intel.com>
---
 xen/arch/x86/domctl.c                       |    6 +-
 xen/arch/x86/hvm/io.c                       |    2 +-
 xen/arch/x86/tboot.c                        |    3 +-
 xen/drivers/passthrough/amd/iommu_guest.c   |    8 +--
 xen/drivers/passthrough/amd/iommu_map.c     |   54 +++++++++---------
 xen/drivers/passthrough/amd/pci_amd_iommu.c |   49 ++++++++--------
 xen/drivers/passthrough/iommu.c             |   28 +++-------
 xen/drivers/passthrough/vtd/iommu.c         |   80 +++++++++++++--------------
 xen/drivers/passthrough/x86/iommu.c         |   41 ++++++++++++++
 xen/include/asm-x86/hvm/iommu.h             |   28 ++++++++++
 xen/include/asm-x86/iommu.h                 |    4 +-
 xen/include/xen/hvm/iommu.h                 |   25 +--------
 xen/include/xen/iommu.h                     |   16 +++---
 13 files changed, 190 insertions(+), 154 deletions(-)

Comments

Julien Grall March 11, 2014, 4:15 p.m. UTC | #1
On 03/11/2014 03:49 PM, Julien Grall wrote:
> +void iommu_share_p2m_table(struct domain* d)
> +{
> +    const struct iommu_ops *ops = iommu_get_ops();
> +
> +    if ( iommu_enabled && is_hvm_domain(d) )
> +        ops->share_p2m(d);
> +}

Hmmm ... I should have removed this function for
passthrough/x86/iommu.c. By mistake it's duplicate with the one in
passthrough/iommu.c
Jan Beulich March 11, 2014, 4:53 p.m. UTC | #2
>>> On 11.03.14 at 16:49, Julien Grall <julien.grall@linaro.org> wrote:
> Currently the structure hvm_iommu (xen/include/xen/hvm/iommu.h) contains
> x86 specific fields.
> 
> This patch creates:
>     - arch_hvm_iommu structure which will contain architecture depend
>     fields
>     - arch_iommu_domain_{init,destroy} function to execute arch
>     specific during domain creation/destruction
> 
> Also move iommu_use_hap_pt and domain_hvm_iommu in asm-x86/iommu.h.
> 
> Signed-off-by: Julien Grall <julien.grall@linaro.org>
> Cc: Keir Fraser <keir@xen.org>

Acked-by: Jan Beulich <jbeulich@suse.com>

> Cc: Joseph Cihula <joseph.cihula@intel.com>
> Cc: Gang Wei <gang.wei@intel.com>
> Cc: Shane Wang <shane.wang@intel.com>
> Cc: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
> Cc: Xiantao Zhang <xiantao.zhang@intel.com>
> ---
>  xen/arch/x86/domctl.c                       |    6 +-
>  xen/arch/x86/hvm/io.c                       |    2 +-
>  xen/arch/x86/tboot.c                        |    3 +-
>  xen/drivers/passthrough/amd/iommu_guest.c   |    8 +--
>  xen/drivers/passthrough/amd/iommu_map.c     |   54 +++++++++---------
>  xen/drivers/passthrough/amd/pci_amd_iommu.c |   49 ++++++++--------
>  xen/drivers/passthrough/iommu.c             |   28 +++-------
>  xen/drivers/passthrough/vtd/iommu.c         |   80 +++++++++++++--------------
>  xen/drivers/passthrough/x86/iommu.c         |   41 ++++++++++++++
>  xen/include/asm-x86/hvm/iommu.h             |   28 ++++++++++
>  xen/include/asm-x86/iommu.h                 |    4 +-
>  xen/include/xen/hvm/iommu.h                 |   25 +--------
>  xen/include/xen/iommu.h                     |   16 +++---
>  13 files changed, 190 insertions(+), 154 deletions(-)
> 
> diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c
> index 26635ff..e55d9d5 100644
> --- a/xen/arch/x86/domctl.c
> +++ b/xen/arch/x86/domctl.c
> @@ -745,7 +745,7 @@ long arch_do_domctl(
>                     "ioport_map:add: dom%d gport=%x mport=%x nr=%x\n",
>                     d->domain_id, fgp, fmp, np);
>  
> -            list_for_each_entry(g2m_ioport, &hd->g2m_ioport_list, list)
> +            list_for_each_entry(g2m_ioport, &hd->arch.g2m_ioport_list, list)
>                  if (g2m_ioport->mport == fmp )
>                  {
>                      g2m_ioport->gport = fgp;
> @@ -764,7 +764,7 @@ long arch_do_domctl(
>                  g2m_ioport->gport = fgp;
>                  g2m_ioport->mport = fmp;
>                  g2m_ioport->np = np;
> -                list_add_tail(&g2m_ioport->list, &hd->g2m_ioport_list);
> +                list_add_tail(&g2m_ioport->list, &hd->arch.g2m_ioport_list);
>              }
>              if ( !ret )
>                  ret = ioports_permit_access(d, fmp, fmp + np - 1);
> @@ -779,7 +779,7 @@ long arch_do_domctl(
>              printk(XENLOG_G_INFO
>                     "ioport_map:remove: dom%d gport=%x mport=%x nr=%x\n",
>                     d->domain_id, fgp, fmp, np);
> -            list_for_each_entry(g2m_ioport, &hd->g2m_ioport_list, list)
> +            list_for_each_entry(g2m_ioport, &hd->arch.g2m_ioport_list, list)
>                  if ( g2m_ioport->mport == fmp )
>                  {
>                      list_del(&g2m_ioport->list);
> diff --git a/xen/arch/x86/hvm/io.c b/xen/arch/x86/hvm/io.c
> index bf6309d..ddb03f8 100644
> --- a/xen/arch/x86/hvm/io.c
> +++ b/xen/arch/x86/hvm/io.c
> @@ -451,7 +451,7 @@ int dpci_ioport_intercept(ioreq_t *p)
>      unsigned int s = 0, e = 0;
>      int rc;
>  
> -    list_for_each_entry( g2m_ioport, &hd->g2m_ioport_list, list )
> +    list_for_each_entry( g2m_ioport, &hd->arch.g2m_ioport_list, list )
>      {
>          s = g2m_ioport->gport;
>          e = s + g2m_ioport->np;
> diff --git a/xen/arch/x86/tboot.c b/xen/arch/x86/tboot.c
> index ccde4a0..c40fe12 100644
> --- a/xen/arch/x86/tboot.c
> +++ b/xen/arch/x86/tboot.c
> @@ -230,7 +230,8 @@ static void tboot_gen_domain_integrity(const uint8_t 
> key[TB_KEY_SIZE],
>          if ( !is_idle_domain(d) )
>          {
>              struct hvm_iommu *hd = domain_hvm_iommu(d);
> -            update_iommu_mac(&ctx, hd->pgd_maddr, agaw_to_level(hd->agaw));
> +            update_iommu_mac(&ctx, hd->arch.pgd_maddr,
> +                             agaw_to_level(hd->arch.agaw));
>          }
>      }
>  
> diff --git a/xen/drivers/passthrough/amd/iommu_guest.c 
> b/xen/drivers/passthrough/amd/iommu_guest.c
> index 477de20..bd31bb5 100644
> --- a/xen/drivers/passthrough/amd/iommu_guest.c
> +++ b/xen/drivers/passthrough/amd/iommu_guest.c
> @@ -60,12 +60,12 @@ static uint16_t guest_bdf(struct domain *d, uint16_t 
> machine_bdf)
>  
>  static inline struct guest_iommu *domain_iommu(struct domain *d)
>  {
> -    return domain_hvm_iommu(d)->g_iommu;
> +    return domain_hvm_iommu(d)->arch.g_iommu;
>  }
>  
>  static inline struct guest_iommu *vcpu_iommu(struct vcpu *v)
>  {
> -    return domain_hvm_iommu(v->domain)->g_iommu;
> +    return domain_hvm_iommu(v->domain)->arch.g_iommu;
>  }
>  
>  static void guest_iommu_enable(struct guest_iommu *iommu)
> @@ -886,7 +886,7 @@ int guest_iommu_init(struct domain* d)
>  
>      guest_iommu_reg_init(iommu);
>      iommu->domain = d;
> -    hd->g_iommu = iommu;
> +    hd->arch.g_iommu = iommu;
>  
>      tasklet_init(&iommu->cmd_buffer_tasklet,
>                   guest_iommu_process_command, (unsigned long)d);
> @@ -907,7 +907,7 @@ void guest_iommu_destroy(struct domain *d)
>      tasklet_kill(&iommu->cmd_buffer_tasklet);
>      xfree(iommu);
>  
> -    domain_hvm_iommu(d)->g_iommu = NULL;
> +    domain_hvm_iommu(d)->arch.g_iommu = NULL;
>  }
>  
>  static int guest_iommu_mmio_range(struct vcpu *v, unsigned long addr)
> diff --git a/xen/drivers/passthrough/amd/iommu_map.c 
> b/xen/drivers/passthrough/amd/iommu_map.c
> index b79e470..ceb1c28 100644
> --- a/xen/drivers/passthrough/amd/iommu_map.c
> +++ b/xen/drivers/passthrough/amd/iommu_map.c
> @@ -344,7 +344,7 @@ static int iommu_update_pde_count(struct domain *d, 
> unsigned long pt_mfn,
>      struct hvm_iommu *hd = domain_hvm_iommu(d);
>      bool_t ok = 0;
>  
> -    ASSERT( spin_is_locked(&hd->mapping_lock) && pt_mfn );
> +    ASSERT( spin_is_locked(&hd->arch.mapping_lock) && pt_mfn );
>  
>      next_level = merge_level - 1;
>  
> @@ -398,7 +398,7 @@ static int iommu_merge_pages(struct domain *d, unsigned 
> long pt_mfn,
>      unsigned long first_mfn;
>      struct hvm_iommu *hd = domain_hvm_iommu(d);
>  
> -    ASSERT( spin_is_locked(&hd->mapping_lock) && pt_mfn );
> +    ASSERT( spin_is_locked(&hd->arch.mapping_lock) && pt_mfn );
>  
>      table = map_domain_page(pt_mfn);
>      pde = table + pfn_to_pde_idx(gfn, merge_level);
> @@ -448,8 +448,8 @@ static int iommu_pde_from_gfn(struct domain *d, unsigned 
> long pfn,
>      struct page_info *table;
>      struct hvm_iommu *hd = domain_hvm_iommu(d);
>  
> -    table = hd->root_table;
> -    level = hd->paging_mode;
> +    table = hd->arch.root_table;
> +    level = hd->arch.paging_mode;
>  
>      BUG_ON( table == NULL || level < IOMMU_PAGING_MODE_LEVEL_1 || 
>              level > IOMMU_PAGING_MODE_LEVEL_6 );
> @@ -557,11 +557,11 @@ static int update_paging_mode(struct domain *d, 
> unsigned long gfn)
>      unsigned long old_root_mfn;
>      struct hvm_iommu *hd = domain_hvm_iommu(d);
>  
> -    level = hd->paging_mode;
> -    old_root = hd->root_table;
> +    level = hd->arch.paging_mode;
> +    old_root = hd->arch.root_table;
>      offset = gfn >> (PTE_PER_TABLE_SHIFT * (level - 1));
>  
> -    ASSERT(spin_is_locked(&hd->mapping_lock) && is_hvm_domain(d));
> +    ASSERT(spin_is_locked(&hd->arch.mapping_lock) && is_hvm_domain(d));
>  
>      while ( offset >= PTE_PER_TABLE_SIZE )
>      {
> @@ -587,8 +587,8 @@ static int update_paging_mode(struct domain *d, unsigned 
> long gfn)
>  
>      if ( new_root != NULL )
>      {
> -        hd->paging_mode = level;
> -        hd->root_table = new_root;
> +        hd->arch.paging_mode = level;
> +        hd->arch.root_table = new_root;
>  
>          if ( !spin_is_locked(&pcidevs_lock) )
>              AMD_IOMMU_DEBUG("%s Try to access pdev_list "
> @@ -613,9 +613,9 @@ static int update_paging_mode(struct domain *d, unsigned 
> long gfn)
>  
>                  /* valid = 0 only works for dom0 passthrough mode */
>                  amd_iommu_set_root_page_table((u32 *)device_entry,
> -                                              page_to_maddr(hd->root_table),
> +                                              
> page_to_maddr(hd->arch.root_table),
>                                                d->domain_id,
> -                                              hd->paging_mode, 1);
> +                                              hd->arch.paging_mode, 1);
>  
>                  amd_iommu_flush_device(iommu, req_id);
>                  bdf += pdev->phantom_stride;
> @@ -638,14 +638,14 @@ int amd_iommu_map_page(struct domain *d, unsigned long 
> gfn, unsigned long mfn,
>      unsigned long pt_mfn[7];
>      unsigned int merge_level;
>  
> -    BUG_ON( !hd->root_table );
> +    BUG_ON( !hd->arch.root_table );
>  
>      if ( iommu_use_hap_pt(d) )
>          return 0;
>  
>      memset(pt_mfn, 0, sizeof(pt_mfn));
>  
> -    spin_lock(&hd->mapping_lock);
> +    spin_lock(&hd->arch.mapping_lock);
>  
>      /* Since HVM domain is initialized with 2 level IO page table,
>       * we might need a deeper page table for lager gfn now */
> @@ -653,7 +653,7 @@ int amd_iommu_map_page(struct domain *d, unsigned long 
> gfn, unsigned long mfn,
>      {
>          if ( update_paging_mode(d, gfn) )
>          {
> -            spin_unlock(&hd->mapping_lock);
> +            spin_unlock(&hd->arch.mapping_lock);
>              AMD_IOMMU_DEBUG("Update page mode failed gfn = %lx\n", gfn);
>              domain_crash(d);
>              return -EFAULT;
> @@ -662,7 +662,7 @@ int amd_iommu_map_page(struct domain *d, unsigned long 
> gfn, unsigned long mfn,
>  
>      if ( iommu_pde_from_gfn(d, gfn, pt_mfn) || (pt_mfn[1] == 0) )
>      {
> -        spin_unlock(&hd->mapping_lock);
> +        spin_unlock(&hd->arch.mapping_lock);
>          AMD_IOMMU_DEBUG("Invalid IO pagetable entry gfn = %lx\n", gfn);
>          domain_crash(d);
>          return -EFAULT;
> @@ -684,7 +684,7 @@ int amd_iommu_map_page(struct domain *d, unsigned long 
> gfn, unsigned long mfn,
>          amd_iommu_flush_pages(d, gfn, 0);
>  
>      for ( merge_level = IOMMU_PAGING_MODE_LEVEL_2;
> -          merge_level <= hd->paging_mode; merge_level++ )
> +          merge_level <= hd->arch.paging_mode; merge_level++ )
>      {
>          if ( pt_mfn[merge_level] == 0 )
>              break;
> @@ -697,7 +697,7 @@ int amd_iommu_map_page(struct domain *d, unsigned long 
> gfn, unsigned long mfn,
>          if ( iommu_merge_pages(d, pt_mfn[merge_level], gfn, 
>                                 flags, merge_level) )
>          {
> -            spin_unlock(&hd->mapping_lock);
> +            spin_unlock(&hd->arch.mapping_lock);
>              AMD_IOMMU_DEBUG("Merge iommu page failed at level %d, "
>                              "gfn = %lx mfn = %lx\n", merge_level, gfn, 
> mfn);
>              domain_crash(d);
> @@ -706,7 +706,7 @@ int amd_iommu_map_page(struct domain *d, unsigned long 
> gfn, unsigned long mfn,
>      }
>  
>  out:
> -    spin_unlock(&hd->mapping_lock);
> +    spin_unlock(&hd->arch.mapping_lock);
>      return 0;
>  }
>  
> @@ -715,14 +715,14 @@ int amd_iommu_unmap_page(struct domain *d, unsigned 
> long gfn)
>      unsigned long pt_mfn[7];
>      struct hvm_iommu *hd = domain_hvm_iommu(d);
>  
> -    BUG_ON( !hd->root_table );
> +    BUG_ON( !hd->arch.root_table );
>  
>      if ( iommu_use_hap_pt(d) )
>          return 0;
>  
>      memset(pt_mfn, 0, sizeof(pt_mfn));
>  
> -    spin_lock(&hd->mapping_lock);
> +    spin_lock(&hd->arch.mapping_lock);
>  
>      /* Since HVM domain is initialized with 2 level IO page table,
>       * we might need a deeper page table for lager gfn now */
> @@ -730,7 +730,7 @@ int amd_iommu_unmap_page(struct domain *d, unsigned long 
> gfn)
>      {
>          if ( update_paging_mode(d, gfn) )
>          {
> -            spin_unlock(&hd->mapping_lock);
> +            spin_unlock(&hd->arch.mapping_lock);
>              AMD_IOMMU_DEBUG("Update page mode failed gfn = %lx\n", gfn);
>              domain_crash(d);
>              return -EFAULT;
> @@ -739,7 +739,7 @@ int amd_iommu_unmap_page(struct domain *d, unsigned long 
> gfn)
>  
>      if ( iommu_pde_from_gfn(d, gfn, pt_mfn) || (pt_mfn[1] == 0) )
>      {
> -        spin_unlock(&hd->mapping_lock);
> +        spin_unlock(&hd->arch.mapping_lock);
>          AMD_IOMMU_DEBUG("Invalid IO pagetable entry gfn = %lx\n", gfn);
>          domain_crash(d);
>          return -EFAULT;
> @@ -747,7 +747,7 @@ int amd_iommu_unmap_page(struct domain *d, unsigned long 
> gfn)
>  
>      /* mark PTE as 'page not present' */
>      clear_iommu_pte_present(pt_mfn[1], gfn);
> -    spin_unlock(&hd->mapping_lock);
> +    spin_unlock(&hd->arch.mapping_lock);
>  
>      amd_iommu_flush_pages(d, gfn, 0);
>  
> @@ -792,13 +792,13 @@ void amd_iommu_share_p2m(struct domain *d)
>      pgd_mfn = pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d)));
>      p2m_table = mfn_to_page(mfn_x(pgd_mfn));
>  
> -    if ( hd->root_table != p2m_table )
> +    if ( hd->arch.root_table != p2m_table )
>      {
> -        free_amd_iommu_pgtable(hd->root_table);
> -        hd->root_table = p2m_table;
> +        free_amd_iommu_pgtable(hd->arch.root_table);
> +        hd->arch.root_table = p2m_table;
>  
>          /* When sharing p2m with iommu, paging mode = 4 */
> -        hd->paging_mode = IOMMU_PAGING_MODE_LEVEL_4;
> +        hd->arch.paging_mode = IOMMU_PAGING_MODE_LEVEL_4;
>          AMD_IOMMU_DEBUG("Share p2m table with iommu: p2m table = %#lx\n",
>                          mfn_x(pgd_mfn));
>      }
> diff --git a/xen/drivers/passthrough/amd/pci_amd_iommu.c 
> b/xen/drivers/passthrough/amd/pci_amd_iommu.c
> index 79f4a77..aeefabb 100644
> --- a/xen/drivers/passthrough/amd/pci_amd_iommu.c
> +++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c
> @@ -120,7 +120,8 @@ static void amd_iommu_setup_domain_device(
>  
>      struct hvm_iommu *hd = domain_hvm_iommu(domain);
>  
> -    BUG_ON( !hd->root_table || !hd->paging_mode || !iommu->dev_table.buffer );
> +    BUG_ON( !hd->arch.root_table || !hd->arch.paging_mode ||
> +            !iommu->dev_table.buffer );
>  
>      if ( iommu_passthrough && (domain->domain_id == 0) )
>          valid = 0;
> @@ -138,8 +139,8 @@ static void amd_iommu_setup_domain_device(
>      {
>          /* bind DTE to domain page-tables */
>          amd_iommu_set_root_page_table(
> -            (u32 *)dte, page_to_maddr(hd->root_table), domain->domain_id,
> -            hd->paging_mode, valid);
> +            (u32 *)dte, page_to_maddr(hd->arch.root_table), domain->domain_id,
> +            hd->arch.paging_mode, valid);
>  
>          if ( pci_ats_device(iommu->seg, bus, pdev->devfn) &&
>               iommu_has_cap(iommu, PCI_CAP_IOTLB_SHIFT) )
> @@ -151,8 +152,8 @@ static void amd_iommu_setup_domain_device(
>                          "root table = %#"PRIx64", "
>                          "domain = %d, paging mode = %d\n",
>                          req_id, pdev->type,
> -                        page_to_maddr(hd->root_table),
> -                        domain->domain_id, hd->paging_mode);
> +                        page_to_maddr(hd->arch.root_table),
> +                        domain->domain_id, hd->arch.paging_mode);
>      }
>  
>      spin_unlock_irqrestore(&iommu->lock, flags);
> @@ -225,17 +226,17 @@ int __init amd_iov_detect(void)
>  static int allocate_domain_resources(struct hvm_iommu *hd)
>  {
>      /* allocate root table */
> -    spin_lock(&hd->mapping_lock);
> -    if ( !hd->root_table )
> +    spin_lock(&hd->arch.mapping_lock);
> +    if ( !hd->arch.root_table )
>      {
> -        hd->root_table = alloc_amd_iommu_pgtable();
> -        if ( !hd->root_table )
> +        hd->arch.root_table = alloc_amd_iommu_pgtable();
> +        if ( !hd->arch.root_table )
>          {
> -            spin_unlock(&hd->mapping_lock);
> +            spin_unlock(&hd->arch.mapping_lock);
>              return -ENOMEM;
>          }
>      }
> -    spin_unlock(&hd->mapping_lock);
> +    spin_unlock(&hd->arch.mapping_lock);
>      return 0;
>  }
>  
> @@ -262,14 +263,14 @@ static int amd_iommu_domain_init(struct domain *d)
>      /* allocate page directroy */
>      if ( allocate_domain_resources(hd) != 0 )
>      {
> -        if ( hd->root_table )
> -            free_domheap_page(hd->root_table);
> +        if ( hd->arch.root_table )
> +            free_domheap_page(hd->arch.root_table);
>          return -ENOMEM;
>      }
>  
>      /* For pv and dom0, stick with get_paging_mode(max_page)
>       * For HVM dom0, use 2 level page table at first */
> -    hd->paging_mode = is_hvm_domain(d) ?
> +    hd->arch.paging_mode = is_hvm_domain(d) ?
>                        IOMMU_PAGING_MODE_LEVEL_2 :
>                        get_paging_mode(max_page);
>  
> @@ -332,7 +333,7 @@ void amd_iommu_disable_domain_device(struct domain 
> *domain,
>          AMD_IOMMU_DEBUG("Disable: device id = %#x, "
>                          "domain = %d, paging mode = %d\n",
>                          req_id,  domain->domain_id,
> -                        domain_hvm_iommu(domain)->paging_mode);
> +                        domain_hvm_iommu(domain)->arch.paging_mode);
>      }
>      spin_unlock_irqrestore(&iommu->lock, flags);
>  
> @@ -372,7 +373,7 @@ static int reassign_device(struct domain *source, struct 
> domain *target,
>  
>      /* IO page tables might be destroyed after pci-detach the last device
>       * In this case, we have to re-allocate root table for next pci-attach.*/
> -    if ( t->root_table == NULL )
> +    if ( t->arch.root_table == NULL )
>          allocate_domain_resources(t);
>  
>      amd_iommu_setup_domain_device(target, iommu, devfn, pdev);
> @@ -454,13 +455,13 @@ static void deallocate_iommu_page_tables(struct domain 
> *d)
>      if ( iommu_use_hap_pt(d) )
>          return;
>  
> -    spin_lock(&hd->mapping_lock);
> -    if ( hd->root_table )
> +    spin_lock(&hd->arch.mapping_lock);
> +    if ( hd->arch.root_table )
>      {
> -        deallocate_next_page_table(hd->root_table, hd->paging_mode);
> -        hd->root_table = NULL;
> +        deallocate_next_page_table(hd->arch.root_table, hd->arch.paging_mode);
> +        hd->arch.root_table = NULL;
>      }
> -    spin_unlock(&hd->mapping_lock);
> +    spin_unlock(&hd->arch.mapping_lock);
>  }
>  
>  
> @@ -591,11 +592,11 @@ static void amd_dump_p2m_table(struct domain *d)
>  {
>      struct hvm_iommu *hd  = domain_hvm_iommu(d);
>  
> -    if ( !hd->root_table ) 
> +    if ( !hd->arch.root_table ) 
>          return;
>  
> -    printk("p2m table has %d levels\n", hd->paging_mode);
> -    amd_dump_p2m_table_level(hd->root_table, hd->paging_mode, 0, 0);
> +    printk("p2m table has %d levels\n", hd->arch.paging_mode);
> +    amd_dump_p2m_table_level(hd->arch.root_table, hd->arch.paging_mode, 0, 0);
>  }
>  
>  const struct iommu_ops amd_iommu_ops = {
> diff --git a/xen/drivers/passthrough/iommu.c b/xen/drivers/passthrough/iommu.c
> index 8a2fdea..9cd996a 100644
> --- a/xen/drivers/passthrough/iommu.c
> +++ b/xen/drivers/passthrough/iommu.c
> @@ -117,10 +117,11 @@ static void __init parse_iommu_param(char *s)
>  int iommu_domain_init(struct domain *d)
>  {
>      struct hvm_iommu *hd = domain_hvm_iommu(d);
> +    int ret = 0;
>  
> -    spin_lock_init(&hd->mapping_lock);
> -    INIT_LIST_HEAD(&hd->g2m_ioport_list);
> -    INIT_LIST_HEAD(&hd->mapped_rmrrs);
> +    ret = arch_iommu_domain_init(d);
> +    if ( ret )
> +        return ret;
>  
>      if ( !iommu_enabled )
>          return 0;
> @@ -189,10 +190,7 @@ void iommu_teardown(struct domain *d)
>  
>  void iommu_domain_destroy(struct domain *d)
>  {
> -    struct hvm_iommu *hd  = domain_hvm_iommu(d);
> -    struct list_head *ioport_list, *rmrr_list, *tmp;
> -    struct g2m_ioport *ioport;
> -    struct mapped_rmrr *mrmrr;
> +    struct hvm_iommu *hd = domain_hvm_iommu(d);
>  
>      if ( !iommu_enabled || !hd->platform_ops )
>          return;
> @@ -200,20 +198,8 @@ void iommu_domain_destroy(struct domain *d)
>      if ( need_iommu(d) )
>          iommu_teardown(d);
>  
> -    list_for_each_safe ( ioport_list, tmp, &hd->g2m_ioport_list )
> -    {
> -        ioport = list_entry(ioport_list, struct g2m_ioport, list);
> -        list_del(&ioport->list);
> -        xfree(ioport);
> -    }
> -
> -    list_for_each_safe ( rmrr_list, tmp, &hd->mapped_rmrrs )
> -    {
> -        mrmrr = list_entry(rmrr_list, struct mapped_rmrr, list);
> -        list_del(&mrmrr->list);
> -        xfree(mrmrr);
> -    }
> -}
> +    arch_iommu_domain_destroy(d);
> + }
>  
>  int iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn,
>                     unsigned int flags)
> diff --git a/xen/drivers/passthrough/vtd/iommu.c 
> b/xen/drivers/passthrough/vtd/iommu.c
> index d4be75c..8efe6f9 100644
> --- a/xen/drivers/passthrough/vtd/iommu.c
> +++ b/xen/drivers/passthrough/vtd/iommu.c
> @@ -248,16 +248,16 @@ static u64 addr_to_dma_page_maddr(struct domain 
> *domain, u64 addr, int alloc)
>      struct acpi_drhd_unit *drhd;
>      struct pci_dev *pdev;
>      struct hvm_iommu *hd = domain_hvm_iommu(domain);
> -    int addr_width = agaw_to_width(hd->agaw);
> +    int addr_width = agaw_to_width(hd->arch.agaw);
>      struct dma_pte *parent, *pte = NULL;
> -    int level = agaw_to_level(hd->agaw);
> +    int level = agaw_to_level(hd->arch.agaw);
>      int offset;
>      u64 pte_maddr = 0, maddr;
>      u64 *vaddr = NULL;
>  
>      addr &= (((u64)1) << addr_width) - 1;
> -    ASSERT(spin_is_locked(&hd->mapping_lock));
> -    if ( hd->pgd_maddr == 0 )
> +    ASSERT(spin_is_locked(&hd->arch.mapping_lock));
> +    if ( hd->arch.pgd_maddr == 0 )
>      {
>          /*
>           * just get any passthrough device in the domainr - assume user
> @@ -265,11 +265,11 @@ static u64 addr_to_dma_page_maddr(struct domain 
> *domain, u64 addr, int alloc)
>           */
>          pdev = pci_get_pdev_by_domain(domain, -1, -1, -1);
>          drhd = acpi_find_matched_drhd_unit(pdev);
> -        if ( !alloc || ((hd->pgd_maddr = alloc_pgtable_maddr(drhd, 1)) == 0) )
> +        if ( !alloc || ((hd->arch.pgd_maddr = alloc_pgtable_maddr(drhd, 1)) 
> == 0) )
>              goto out;
>      }
>  
> -    parent = (struct dma_pte *)map_vtd_domain_page(hd->pgd_maddr);
> +    parent = (struct dma_pte *)map_vtd_domain_page(hd->arch.pgd_maddr);
>      while ( level > 1 )
>      {
>          offset = address_level_offset(addr, level);
> @@ -579,7 +579,7 @@ static void __intel_iommu_iotlb_flush(struct domain *d, 
> unsigned long gfn,
>      {
>          iommu = drhd->iommu;
>  
> -        if ( !test_bit(iommu->index, &hd->iommu_bitmap) )
> +        if ( !test_bit(iommu->index, &hd->arch.iommu_bitmap) )
>              continue;
>  
>          flush_dev_iotlb = find_ats_dev_drhd(iommu) ? 1 : 0;
> @@ -621,12 +621,12 @@ static void dma_pte_clear_one(struct domain *domain, 
> u64 addr)
>      u64 pg_maddr;
>      struct mapped_rmrr *mrmrr;
>  
> -    spin_lock(&hd->mapping_lock);
> +    spin_lock(&hd->arch.mapping_lock);
>      /* get last level pte */
>      pg_maddr = addr_to_dma_page_maddr(domain, addr, 0);
>      if ( pg_maddr == 0 )
>      {
> -        spin_unlock(&hd->mapping_lock);
> +        spin_unlock(&hd->arch.mapping_lock);
>          return;
>      }
>  
> @@ -635,13 +635,13 @@ static void dma_pte_clear_one(struct domain *domain, 
> u64 addr)
>  
>      if ( !dma_pte_present(*pte) )
>      {
> -        spin_unlock(&hd->mapping_lock);
> +        spin_unlock(&hd->arch.mapping_lock);
>          unmap_vtd_domain_page(page);
>          return;
>      }
>  
>      dma_clear_pte(*pte);
> -    spin_unlock(&hd->mapping_lock);
> +    spin_unlock(&hd->arch.mapping_lock);
>      iommu_flush_cache_entry(pte, sizeof(struct dma_pte));
>  
>      if ( !this_cpu(iommu_dont_flush_iotlb) )
> @@ -652,8 +652,8 @@ static void dma_pte_clear_one(struct domain *domain, u64 
> addr)
>      /* if the cleared address is between mapped RMRR region,
>       * remove the mapped RMRR
>       */
> -    spin_lock(&hd->mapping_lock);
> -    list_for_each_entry ( mrmrr, &hd->mapped_rmrrs, list )
> +    spin_lock(&hd->arch.mapping_lock);
> +    list_for_each_entry ( mrmrr, &hd->arch.mapped_rmrrs, list )
>      {
>          if ( addr >= mrmrr->base && addr <= mrmrr->end )
>          {
> @@ -662,7 +662,7 @@ static void dma_pte_clear_one(struct domain *domain, u64 
> addr)
>              break;
>          }
>      }
> -    spin_unlock(&hd->mapping_lock);
> +    spin_unlock(&hd->arch.mapping_lock);
>  }
>  
>  static void iommu_free_pagetable(u64 pt_maddr, int level)
> @@ -1247,7 +1247,7 @@ static int intel_iommu_domain_init(struct domain *d)
>  {
>      struct hvm_iommu *hd = domain_hvm_iommu(d);
>  
> -    hd->agaw = width_to_agaw(DEFAULT_DOMAIN_ADDRESS_WIDTH);
> +    hd->arch.agaw = width_to_agaw(DEFAULT_DOMAIN_ADDRESS_WIDTH);
>  
>      return 0;
>  }
> @@ -1344,16 +1344,16 @@ int domain_context_mapping_one(
>      }
>      else
>      {
> -        spin_lock(&hd->mapping_lock);
> +        spin_lock(&hd->arch.mapping_lock);
>  
>          /* Ensure we have pagetables allocated down to leaf PTE. */
> -        if ( hd->pgd_maddr == 0 )
> +        if ( hd->arch.pgd_maddr == 0 )
>          {
>              addr_to_dma_page_maddr(domain, 0, 1);
> -            if ( hd->pgd_maddr == 0 )
> +            if ( hd->arch.pgd_maddr == 0 )
>              {
>              nomem:
> -                spin_unlock(&hd->mapping_lock);
> +                spin_unlock(&hd->arch.mapping_lock);
>                  spin_unlock(&iommu->lock);
>                  unmap_vtd_domain_page(context_entries);
>                  return -ENOMEM;
> @@ -1361,7 +1361,7 @@ int domain_context_mapping_one(
>          }
>  
>          /* Skip top levels of page tables for 2- and 3-level DRHDs. */
> -        pgd_maddr = hd->pgd_maddr;
> +        pgd_maddr = hd->arch.pgd_maddr;
>          for ( agaw = level_to_agaw(4);
>                agaw != level_to_agaw(iommu->nr_pt_levels);
>                agaw-- )
> @@ -1379,7 +1379,7 @@ int domain_context_mapping_one(
>          else
>              context_set_translation_type(*context, CONTEXT_TT_MULTI_LEVEL);
>  
> -        spin_unlock(&hd->mapping_lock);
> +        spin_unlock(&hd->arch.mapping_lock);
>      }
>  
>      if ( context_set_domain_id(context, domain, iommu) )
> @@ -1405,7 +1405,7 @@ int domain_context_mapping_one(
>          iommu_flush_iotlb_dsi(iommu, 0, 1, flush_dev_iotlb);
>      }
>  
> -    set_bit(iommu->index, &hd->iommu_bitmap);
> +    set_bit(iommu->index, &hd->arch.iommu_bitmap);
>  
>      unmap_vtd_domain_page(context_entries);
>  
> @@ -1648,7 +1648,7 @@ static int domain_context_unmap(
>          struct hvm_iommu *hd = domain_hvm_iommu(domain);
>          int iommu_domid;
>  
> -        clear_bit(iommu->index, &hd->iommu_bitmap);
> +        clear_bit(iommu->index, &hd->arch.iommu_bitmap);
>  
>          iommu_domid = domain_iommu_domid(domain, iommu);
>          if ( iommu_domid == -1 )
> @@ -1707,10 +1707,10 @@ static void iommu_domain_teardown(struct domain *d)
>      if ( iommu_use_hap_pt(d) )
>          return;
>  
> -    spin_lock(&hd->mapping_lock);
> -    iommu_free_pagetable(hd->pgd_maddr, agaw_to_level(hd->agaw));
> -    hd->pgd_maddr = 0;
> -    spin_unlock(&hd->mapping_lock);
> +    spin_lock(&hd->arch.mapping_lock);
> +    iommu_free_pagetable(hd->arch.pgd_maddr, agaw_to_level(hd->arch.agaw));
> +    hd->arch.pgd_maddr = 0;
> +    spin_unlock(&hd->arch.mapping_lock);
>  }
>  
>  static int intel_iommu_map_page(
> @@ -1729,12 +1729,12 @@ static int intel_iommu_map_page(
>      if ( iommu_passthrough && (d->domain_id == 0) )
>          return 0;
>  
> -    spin_lock(&hd->mapping_lock);
> +    spin_lock(&hd->arch.mapping_lock);
>  
>      pg_maddr = addr_to_dma_page_maddr(d, (paddr_t)gfn << PAGE_SHIFT_4K, 1);
>      if ( pg_maddr == 0 )
>      {
> -        spin_unlock(&hd->mapping_lock);
> +        spin_unlock(&hd->arch.mapping_lock);
>          return -ENOMEM;
>      }
>      page = (struct dma_pte *)map_vtd_domain_page(pg_maddr);
> @@ -1751,14 +1751,14 @@ static int intel_iommu_map_page(
>  
>      if ( old.val == new.val )
>      {
> -        spin_unlock(&hd->mapping_lock);
> +        spin_unlock(&hd->arch.mapping_lock);
>          unmap_vtd_domain_page(page);
>          return 0;
>      }
>      *pte = new;
>  
>      iommu_flush_cache_entry(pte, sizeof(struct dma_pte));
> -    spin_unlock(&hd->mapping_lock);
> +    spin_unlock(&hd->arch.mapping_lock);
>      unmap_vtd_domain_page(page);
>  
>      if ( !this_cpu(iommu_dont_flush_iotlb) )
> @@ -1792,7 +1792,7 @@ void iommu_pte_flush(struct domain *d, u64 gfn, u64 
> *pte,
>      for_each_drhd_unit ( drhd )
>      {
>          iommu = drhd->iommu;
> -        if ( !test_bit(iommu->index, &hd->iommu_bitmap) )
> +        if ( !test_bit(iommu->index, &hd->arch.iommu_bitmap) )
>              continue;
>  
>          flush_dev_iotlb = find_ats_dev_drhd(iommu) ? 1 : 0;
> @@ -1833,7 +1833,7 @@ static void iommu_set_pgd(struct domain *d)
>          return;
>  
>      pgd_mfn = pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d)));
> -    hd->pgd_maddr = pagetable_get_paddr(pagetable_from_mfn(pgd_mfn));
> +    hd->arch.pgd_maddr = pagetable_get_paddr(pagetable_from_mfn(pgd_mfn));
>  }
>  
>  static int rmrr_identity_mapping(struct domain *d,
> @@ -1848,10 +1848,10 @@ static int rmrr_identity_mapping(struct domain *d,
>      ASSERT(rmrr->base_address < rmrr->end_address);
>  
>      /*
> -     * No need to acquire hd->mapping_lock, as the only theoretical race is
> +     * No need to acquire hd->arch.mapping_lock, as the only theoretical race 
> is
>       * with the insertion below (impossible due to holding pcidevs_lock).
>       */
> -    list_for_each_entry( mrmrr, &hd->mapped_rmrrs, list )
> +    list_for_each_entry( mrmrr, &hd->arch.mapped_rmrrs, list )
>      {
>          if ( mrmrr->base == rmrr->base_address &&
>               mrmrr->end == rmrr->end_address )
> @@ -1876,9 +1876,9 @@ static int rmrr_identity_mapping(struct domain *d,
>          return -ENOMEM;
>      mrmrr->base = rmrr->base_address;
>      mrmrr->end = rmrr->end_address;
> -    spin_lock(&hd->mapping_lock);
> -    list_add_tail(&mrmrr->list, &hd->mapped_rmrrs);
> -    spin_unlock(&hd->mapping_lock);
> +    spin_lock(&hd->arch.mapping_lock);
> +    list_add_tail(&mrmrr->list, &hd->arch.mapped_rmrrs);
> +    spin_unlock(&hd->arch.mapping_lock);
>  
>      return 0;
>  }
> @@ -2423,8 +2423,8 @@ static void vtd_dump_p2m_table(struct domain *d)
>          return;
>  
>      hd = domain_hvm_iommu(d);
> -    printk("p2m table has %d levels\n", agaw_to_level(hd->agaw));
> -    vtd_dump_p2m_table_level(hd->pgd_maddr, agaw_to_level(hd->agaw), 0, 0);
> +    printk("p2m table has %d levels\n", agaw_to_level(hd->arch.agaw));
> +    vtd_dump_p2m_table_level(hd->arch.pgd_maddr, agaw_to_level(hd->arch.agaw), 
> 0, 0);
>  }
>  
>  const struct iommu_ops intel_iommu_ops = {
> diff --git a/xen/drivers/passthrough/x86/iommu.c 
> b/xen/drivers/passthrough/x86/iommu.c
> index c857ba8..68e308c 100644
> --- a/xen/drivers/passthrough/x86/iommu.c
> +++ b/xen/drivers/passthrough/x86/iommu.c
> @@ -40,6 +40,47 @@ int __init iommu_setup_hpet_msi(struct msi_desc *msi)
>      return ops->setup_hpet_msi ? ops->setup_hpet_msi(msi) : -ENODEV;
>  }
>  
> +void iommu_share_p2m_table(struct domain* d)
> +{
> +    const struct iommu_ops *ops = iommu_get_ops();
> +
> +    if ( iommu_enabled && is_hvm_domain(d) )
> +        ops->share_p2m(d);
> +}
> +
> +int arch_iommu_domain_init(struct domain *d)
> +{
> +    struct hvm_iommu *hd = domain_hvm_iommu(d);
> +
> +    spin_lock_init(&hd->arch.mapping_lock);
> +    INIT_LIST_HEAD(&hd->arch.g2m_ioport_list);
> +    INIT_LIST_HEAD(&hd->arch.mapped_rmrrs);
> +
> +    return 0;
> +}
> +
> +void arch_iommu_domain_destroy(struct domain *d)
> +{
> +   struct hvm_iommu *hd  = domain_hvm_iommu(d);
> +   struct list_head *ioport_list, *rmrr_list, *tmp;
> +   struct g2m_ioport *ioport;
> +   struct mapped_rmrr *mrmrr;
> +
> +   list_for_each_safe ( ioport_list, tmp, &hd->arch.g2m_ioport_list )
> +   {
> +       ioport = list_entry(ioport_list, struct g2m_ioport, list);
> +       list_del(&ioport->list);
> +       xfree(ioport);
> +   }
> +
> +    list_for_each_safe ( rmrr_list, tmp, &hd->arch.mapped_rmrrs )
> +    {
> +        mrmrr = list_entry(rmrr_list, struct mapped_rmrr, list);
> +        list_del(&mrmrr->list);
> +        xfree(mrmrr);
> +    }
> +}
> +
>  /*
>   * Local variables:
>   * mode: C
> diff --git a/xen/include/asm-x86/hvm/iommu.h b/xen/include/asm-x86/hvm/iommu.h
> index d488edf..927a02d 100644
> --- a/xen/include/asm-x86/hvm/iommu.h
> +++ b/xen/include/asm-x86/hvm/iommu.h
> @@ -39,4 +39,32 @@ static inline int iommu_hardware_setup(void)
>      return 0;
>  }
>  
> +struct g2m_ioport {
> +    struct list_head list;
> +    unsigned int gport;
> +    unsigned int mport;
> +    unsigned int np;
> +};
> +
> +struct mapped_rmrr {
> +    struct list_head list;
> +    u64 base;
> +    u64 end;
> +};
> +
> +struct arch_hvm_iommu
> +{
> +    u64 pgd_maddr;                 /* io page directory machine address */
> +    int agaw;     /* adjusted guest address width, 0 is level 2 30-bit */
> +    u64 iommu_bitmap;              /* bitmap of iommu(s) that the domain 
> uses */
> +    /* amd iommu support */
> +    int paging_mode;
> +    struct page_info *root_table;
> +    struct guest_iommu *g_iommu;
> +
> +    struct list_head g2m_ioport_list;   /* guest to machine ioport mapping 
> */
> +    struct list_head mapped_rmrrs;
> +    spinlock_t mapping_lock;            /* io page table lock */
> +};
> +
>  #endif /* __ASM_X86_HVM_IOMMU_H__ */
> diff --git a/xen/include/asm-x86/iommu.h b/xen/include/asm-x86/iommu.h
> index 946291c..dc06ceb 100644
> --- a/xen/include/asm-x86/iommu.h
> +++ b/xen/include/asm-x86/iommu.h
> @@ -17,7 +17,9 @@
>  
>  #define MAX_IOMMUS 32
>  
> -#include <asm/msi.h>
> +/* Does this domain have a P2M table we can use as its IOMMU pagetable? */
> +#define iommu_use_hap_pt(d) (hap_enabled(d) && iommu_hap_pt_share)
> +#define domain_hvm_iommu(d)     (&d->arch.hvm_domain.hvm_iommu)
>  
>  void iommu_update_ire_from_apic(unsigned int apic, unsigned int reg, 
> unsigned int value);
>  unsigned int iommu_read_apic_from_ire(unsigned int apic, unsigned int reg);
> diff --git a/xen/include/xen/hvm/iommu.h b/xen/include/xen/hvm/iommu.h
> index c9c10c1..f8f8a93 100644
> --- a/xen/include/xen/hvm/iommu.h
> +++ b/xen/include/xen/hvm/iommu.h
> @@ -23,31 +23,8 @@
>  #include <xen/iommu.h>
>  #include <asm/hvm/iommu.h>
>  
> -struct g2m_ioport {
> -    struct list_head list;
> -    unsigned int gport;
> -    unsigned int mport;
> -    unsigned int np;
> -};
> -
> -struct mapped_rmrr {
> -    struct list_head list;
> -    u64 base;
> -    u64 end;
> -};
> -
>  struct hvm_iommu {
> -    u64 pgd_maddr;                 /* io page directory machine address */
> -    spinlock_t mapping_lock;       /* io page table lock */
> -    int agaw;     /* adjusted guest address width, 0 is level 2 30-bit */
> -    struct list_head g2m_ioport_list;  /* guest to machine ioport mapping */
> -    u64 iommu_bitmap;              /* bitmap of iommu(s) that the domain 
> uses */
> -    struct list_head mapped_rmrrs;
> -
> -    /* amd iommu support */
> -    int paging_mode;
> -    struct page_info *root_table;
> -    struct guest_iommu *g_iommu;
> +    struct arch_hvm_iommu arch;
>  
>      /* iommu_ops */
>      const struct iommu_ops *platform_ops;
> diff --git a/xen/include/xen/iommu.h b/xen/include/xen/iommu.h
> index cf61d163..f556a7e 100644
> --- a/xen/include/xen/iommu.h
> +++ b/xen/include/xen/iommu.h
> @@ -35,11 +35,6 @@ extern bool_t iommu_hap_pt_share;
>  extern bool_t iommu_debug;
>  extern bool_t amd_iommu_perdev_intremap;
>  
> -/* Does this domain have a P2M table we can use as its IOMMU pagetable? */
> -#define iommu_use_hap_pt(d) (hap_enabled(d) && iommu_hap_pt_share)
> -
> -#define domain_hvm_iommu(d)     (&d->arch.hvm_domain.hvm_iommu)
> -
>  #define PAGE_SHIFT_4K       (12)
>  #define PAGE_SIZE_4K        (1UL << PAGE_SHIFT_4K)
>  #define PAGE_MASK_4K        (((u64)-1) << PAGE_SHIFT_4K)
> @@ -55,6 +50,9 @@ void iommu_dom0_init(struct domain *d);
>  void iommu_domain_destroy(struct domain *d);
>  int deassign_device(struct domain *d, u16 seg, u8 bus, u8 devfn);
>  
> +void arch_iommu_domain_destroy(struct domain *d);
> +int arch_iommu_domain_init(struct domain *d);
> +
>  /* Function used internally, use iommu_domain_destroy */
>  void iommu_teardown(struct domain *d);
>  
> @@ -81,9 +79,6 @@ struct hvm_irq_dpci *domain_get_irq_dpci(const struct 
> domain *);
>  void free_hvm_irq_dpci(struct hvm_irq_dpci *dpci);
>  bool_t pt_irq_need_timer(uint32_t flags);
>  
> -int iommu_update_ire_from_msi(struct msi_desc *msi_desc, struct msi_msg 
> *msg);
> -void iommu_read_msi_from_ire(struct msi_desc *msi_desc, struct msi_msg 
> *msg);
> -
>  #define PT_IRQ_TIME_OUT MILLISECS(8)
>  #endif /* HAS_PCI */
>  
> @@ -127,6 +122,11 @@ struct iommu_ops {
>      void (*dump_p2m_table)(struct domain *d);
>  };
>  
> +#ifdef HAS_PCI
> +int iommu_update_ire_from_msi(struct msi_desc *msi_desc, struct msi_msg 
> *msg);
> +void iommu_read_msi_from_ire(struct msi_desc *msi_desc, struct msi_msg 
> *msg);
> +#endif
> +
>  void iommu_suspend(void);
>  void iommu_resume(void);
>  void iommu_crash_shutdown(void);
> -- 
> 1.7.10.4
Ian Campbell March 18, 2014, 4:27 p.m. UTC | #3
On Tue, 2014-03-11 at 15:49 +0000, Julien Grall wrote:
> +struct arch_hvm_iommu
> +{
> +    u64 pgd_maddr;                 /* io page directory machine address */
> +    int agaw;     /* adjusted guest address width, 0 is level 2 30-bit */
> +    u64 iommu_bitmap;              /* bitmap of iommu(s) that the domain uses */

Blank line here for clarity?

> +    /* amd iommu support */
> +    int paging_mode;
> +    struct page_info *root_table;
> +    struct guest_iommu *g_iommu;
> +

I don't think the following are amd specific, in their original home
they were up with pgd_maddr and co any way. If they are to stay here
perhaps a new /* heading */ comment would help?

> +    struct list_head g2m_ioport_list;   /* guest to machine ioport mapping */
> +    struct list_head mapped_rmrrs;
> +    spinlock_t mapping_lock;            /* io page table lock */
> +};
> +

Ian.
Julien Grall March 18, 2014, 7:40 p.m. UTC | #4
Hi Ian,

On 03/18/2014 04:27 PM, Ian Campbell wrote:
> On Tue, 2014-03-11 at 15:49 +0000, Julien Grall wrote:
>> +struct arch_hvm_iommu
>> +{
>> +    u64 pgd_maddr;                 /* io page directory machine address */
>> +    int agaw;     /* adjusted guest address width, 0 is level 2 30-bit */
>> +    u64 iommu_bitmap;              /* bitmap of iommu(s) that the domain uses */
> 
> Blank line here for clarity?

Sure.

>> +    /* amd iommu support */
>> +    int paging_mode;
>> +    struct page_info *root_table;
>> +    struct guest_iommu *g_iommu;
>> +
> 
> I don't think the following are amd specific, in their original home
> they were up with pgd_maddr and co any way. If they are to stay here
> perhaps a new /* heading */ comment would help?

I don't remember why I changed the order. I will go back to original
order in the next version.

Regards,
diff mbox

Patch

diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c
index 26635ff..e55d9d5 100644
--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -745,7 +745,7 @@  long arch_do_domctl(
                    "ioport_map:add: dom%d gport=%x mport=%x nr=%x\n",
                    d->domain_id, fgp, fmp, np);
 
-            list_for_each_entry(g2m_ioport, &hd->g2m_ioport_list, list)
+            list_for_each_entry(g2m_ioport, &hd->arch.g2m_ioport_list, list)
                 if (g2m_ioport->mport == fmp )
                 {
                     g2m_ioport->gport = fgp;
@@ -764,7 +764,7 @@  long arch_do_domctl(
                 g2m_ioport->gport = fgp;
                 g2m_ioport->mport = fmp;
                 g2m_ioport->np = np;
-                list_add_tail(&g2m_ioport->list, &hd->g2m_ioport_list);
+                list_add_tail(&g2m_ioport->list, &hd->arch.g2m_ioport_list);
             }
             if ( !ret )
                 ret = ioports_permit_access(d, fmp, fmp + np - 1);
@@ -779,7 +779,7 @@  long arch_do_domctl(
             printk(XENLOG_G_INFO
                    "ioport_map:remove: dom%d gport=%x mport=%x nr=%x\n",
                    d->domain_id, fgp, fmp, np);
-            list_for_each_entry(g2m_ioport, &hd->g2m_ioport_list, list)
+            list_for_each_entry(g2m_ioport, &hd->arch.g2m_ioport_list, list)
                 if ( g2m_ioport->mport == fmp )
                 {
                     list_del(&g2m_ioport->list);
diff --git a/xen/arch/x86/hvm/io.c b/xen/arch/x86/hvm/io.c
index bf6309d..ddb03f8 100644
--- a/xen/arch/x86/hvm/io.c
+++ b/xen/arch/x86/hvm/io.c
@@ -451,7 +451,7 @@  int dpci_ioport_intercept(ioreq_t *p)
     unsigned int s = 0, e = 0;
     int rc;
 
-    list_for_each_entry( g2m_ioport, &hd->g2m_ioport_list, list )
+    list_for_each_entry( g2m_ioport, &hd->arch.g2m_ioport_list, list )
     {
         s = g2m_ioport->gport;
         e = s + g2m_ioport->np;
diff --git a/xen/arch/x86/tboot.c b/xen/arch/x86/tboot.c
index ccde4a0..c40fe12 100644
--- a/xen/arch/x86/tboot.c
+++ b/xen/arch/x86/tboot.c
@@ -230,7 +230,8 @@  static void tboot_gen_domain_integrity(const uint8_t key[TB_KEY_SIZE],
         if ( !is_idle_domain(d) )
         {
             struct hvm_iommu *hd = domain_hvm_iommu(d);
-            update_iommu_mac(&ctx, hd->pgd_maddr, agaw_to_level(hd->agaw));
+            update_iommu_mac(&ctx, hd->arch.pgd_maddr,
+                             agaw_to_level(hd->arch.agaw));
         }
     }
 
diff --git a/xen/drivers/passthrough/amd/iommu_guest.c b/xen/drivers/passthrough/amd/iommu_guest.c
index 477de20..bd31bb5 100644
--- a/xen/drivers/passthrough/amd/iommu_guest.c
+++ b/xen/drivers/passthrough/amd/iommu_guest.c
@@ -60,12 +60,12 @@  static uint16_t guest_bdf(struct domain *d, uint16_t machine_bdf)
 
 static inline struct guest_iommu *domain_iommu(struct domain *d)
 {
-    return domain_hvm_iommu(d)->g_iommu;
+    return domain_hvm_iommu(d)->arch.g_iommu;
 }
 
 static inline struct guest_iommu *vcpu_iommu(struct vcpu *v)
 {
-    return domain_hvm_iommu(v->domain)->g_iommu;
+    return domain_hvm_iommu(v->domain)->arch.g_iommu;
 }
 
 static void guest_iommu_enable(struct guest_iommu *iommu)
@@ -886,7 +886,7 @@  int guest_iommu_init(struct domain* d)
 
     guest_iommu_reg_init(iommu);
     iommu->domain = d;
-    hd->g_iommu = iommu;
+    hd->arch.g_iommu = iommu;
 
     tasklet_init(&iommu->cmd_buffer_tasklet,
                  guest_iommu_process_command, (unsigned long)d);
@@ -907,7 +907,7 @@  void guest_iommu_destroy(struct domain *d)
     tasklet_kill(&iommu->cmd_buffer_tasklet);
     xfree(iommu);
 
-    domain_hvm_iommu(d)->g_iommu = NULL;
+    domain_hvm_iommu(d)->arch.g_iommu = NULL;
 }
 
 static int guest_iommu_mmio_range(struct vcpu *v, unsigned long addr)
diff --git a/xen/drivers/passthrough/amd/iommu_map.c b/xen/drivers/passthrough/amd/iommu_map.c
index b79e470..ceb1c28 100644
--- a/xen/drivers/passthrough/amd/iommu_map.c
+++ b/xen/drivers/passthrough/amd/iommu_map.c
@@ -344,7 +344,7 @@  static int iommu_update_pde_count(struct domain *d, unsigned long pt_mfn,
     struct hvm_iommu *hd = domain_hvm_iommu(d);
     bool_t ok = 0;
 
-    ASSERT( spin_is_locked(&hd->mapping_lock) && pt_mfn );
+    ASSERT( spin_is_locked(&hd->arch.mapping_lock) && pt_mfn );
 
     next_level = merge_level - 1;
 
@@ -398,7 +398,7 @@  static int iommu_merge_pages(struct domain *d, unsigned long pt_mfn,
     unsigned long first_mfn;
     struct hvm_iommu *hd = domain_hvm_iommu(d);
 
-    ASSERT( spin_is_locked(&hd->mapping_lock) && pt_mfn );
+    ASSERT( spin_is_locked(&hd->arch.mapping_lock) && pt_mfn );
 
     table = map_domain_page(pt_mfn);
     pde = table + pfn_to_pde_idx(gfn, merge_level);
@@ -448,8 +448,8 @@  static int iommu_pde_from_gfn(struct domain *d, unsigned long pfn,
     struct page_info *table;
     struct hvm_iommu *hd = domain_hvm_iommu(d);
 
-    table = hd->root_table;
-    level = hd->paging_mode;
+    table = hd->arch.root_table;
+    level = hd->arch.paging_mode;
 
     BUG_ON( table == NULL || level < IOMMU_PAGING_MODE_LEVEL_1 || 
             level > IOMMU_PAGING_MODE_LEVEL_6 );
@@ -557,11 +557,11 @@  static int update_paging_mode(struct domain *d, unsigned long gfn)
     unsigned long old_root_mfn;
     struct hvm_iommu *hd = domain_hvm_iommu(d);
 
-    level = hd->paging_mode;
-    old_root = hd->root_table;
+    level = hd->arch.paging_mode;
+    old_root = hd->arch.root_table;
     offset = gfn >> (PTE_PER_TABLE_SHIFT * (level - 1));
 
-    ASSERT(spin_is_locked(&hd->mapping_lock) && is_hvm_domain(d));
+    ASSERT(spin_is_locked(&hd->arch.mapping_lock) && is_hvm_domain(d));
 
     while ( offset >= PTE_PER_TABLE_SIZE )
     {
@@ -587,8 +587,8 @@  static int update_paging_mode(struct domain *d, unsigned long gfn)
 
     if ( new_root != NULL )
     {
-        hd->paging_mode = level;
-        hd->root_table = new_root;
+        hd->arch.paging_mode = level;
+        hd->arch.root_table = new_root;
 
         if ( !spin_is_locked(&pcidevs_lock) )
             AMD_IOMMU_DEBUG("%s Try to access pdev_list "
@@ -613,9 +613,9 @@  static int update_paging_mode(struct domain *d, unsigned long gfn)
 
                 /* valid = 0 only works for dom0 passthrough mode */
                 amd_iommu_set_root_page_table((u32 *)device_entry,
-                                              page_to_maddr(hd->root_table),
+                                              page_to_maddr(hd->arch.root_table),
                                               d->domain_id,
-                                              hd->paging_mode, 1);
+                                              hd->arch.paging_mode, 1);
 
                 amd_iommu_flush_device(iommu, req_id);
                 bdf += pdev->phantom_stride;
@@ -638,14 +638,14 @@  int amd_iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn,
     unsigned long pt_mfn[7];
     unsigned int merge_level;
 
-    BUG_ON( !hd->root_table );
+    BUG_ON( !hd->arch.root_table );
 
     if ( iommu_use_hap_pt(d) )
         return 0;
 
     memset(pt_mfn, 0, sizeof(pt_mfn));
 
-    spin_lock(&hd->mapping_lock);
+    spin_lock(&hd->arch.mapping_lock);
 
     /* Since HVM domain is initialized with 2 level IO page table,
      * we might need a deeper page table for lager gfn now */
@@ -653,7 +653,7 @@  int amd_iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn,
     {
         if ( update_paging_mode(d, gfn) )
         {
-            spin_unlock(&hd->mapping_lock);
+            spin_unlock(&hd->arch.mapping_lock);
             AMD_IOMMU_DEBUG("Update page mode failed gfn = %lx\n", gfn);
             domain_crash(d);
             return -EFAULT;
@@ -662,7 +662,7 @@  int amd_iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn,
 
     if ( iommu_pde_from_gfn(d, gfn, pt_mfn) || (pt_mfn[1] == 0) )
     {
-        spin_unlock(&hd->mapping_lock);
+        spin_unlock(&hd->arch.mapping_lock);
         AMD_IOMMU_DEBUG("Invalid IO pagetable entry gfn = %lx\n", gfn);
         domain_crash(d);
         return -EFAULT;
@@ -684,7 +684,7 @@  int amd_iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn,
         amd_iommu_flush_pages(d, gfn, 0);
 
     for ( merge_level = IOMMU_PAGING_MODE_LEVEL_2;
-          merge_level <= hd->paging_mode; merge_level++ )
+          merge_level <= hd->arch.paging_mode; merge_level++ )
     {
         if ( pt_mfn[merge_level] == 0 )
             break;
@@ -697,7 +697,7 @@  int amd_iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn,
         if ( iommu_merge_pages(d, pt_mfn[merge_level], gfn, 
                                flags, merge_level) )
         {
-            spin_unlock(&hd->mapping_lock);
+            spin_unlock(&hd->arch.mapping_lock);
             AMD_IOMMU_DEBUG("Merge iommu page failed at level %d, "
                             "gfn = %lx mfn = %lx\n", merge_level, gfn, mfn);
             domain_crash(d);
@@ -706,7 +706,7 @@  int amd_iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn,
     }
 
 out:
-    spin_unlock(&hd->mapping_lock);
+    spin_unlock(&hd->arch.mapping_lock);
     return 0;
 }
 
@@ -715,14 +715,14 @@  int amd_iommu_unmap_page(struct domain *d, unsigned long gfn)
     unsigned long pt_mfn[7];
     struct hvm_iommu *hd = domain_hvm_iommu(d);
 
-    BUG_ON( !hd->root_table );
+    BUG_ON( !hd->arch.root_table );
 
     if ( iommu_use_hap_pt(d) )
         return 0;
 
     memset(pt_mfn, 0, sizeof(pt_mfn));
 
-    spin_lock(&hd->mapping_lock);
+    spin_lock(&hd->arch.mapping_lock);
 
     /* Since HVM domain is initialized with 2 level IO page table,
      * we might need a deeper page table for lager gfn now */
@@ -730,7 +730,7 @@  int amd_iommu_unmap_page(struct domain *d, unsigned long gfn)
     {
         if ( update_paging_mode(d, gfn) )
         {
-            spin_unlock(&hd->mapping_lock);
+            spin_unlock(&hd->arch.mapping_lock);
             AMD_IOMMU_DEBUG("Update page mode failed gfn = %lx\n", gfn);
             domain_crash(d);
             return -EFAULT;
@@ -739,7 +739,7 @@  int amd_iommu_unmap_page(struct domain *d, unsigned long gfn)
 
     if ( iommu_pde_from_gfn(d, gfn, pt_mfn) || (pt_mfn[1] == 0) )
     {
-        spin_unlock(&hd->mapping_lock);
+        spin_unlock(&hd->arch.mapping_lock);
         AMD_IOMMU_DEBUG("Invalid IO pagetable entry gfn = %lx\n", gfn);
         domain_crash(d);
         return -EFAULT;
@@ -747,7 +747,7 @@  int amd_iommu_unmap_page(struct domain *d, unsigned long gfn)
 
     /* mark PTE as 'page not present' */
     clear_iommu_pte_present(pt_mfn[1], gfn);
-    spin_unlock(&hd->mapping_lock);
+    spin_unlock(&hd->arch.mapping_lock);
 
     amd_iommu_flush_pages(d, gfn, 0);
 
@@ -792,13 +792,13 @@  void amd_iommu_share_p2m(struct domain *d)
     pgd_mfn = pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d)));
     p2m_table = mfn_to_page(mfn_x(pgd_mfn));
 
-    if ( hd->root_table != p2m_table )
+    if ( hd->arch.root_table != p2m_table )
     {
-        free_amd_iommu_pgtable(hd->root_table);
-        hd->root_table = p2m_table;
+        free_amd_iommu_pgtable(hd->arch.root_table);
+        hd->arch.root_table = p2m_table;
 
         /* When sharing p2m with iommu, paging mode = 4 */
-        hd->paging_mode = IOMMU_PAGING_MODE_LEVEL_4;
+        hd->arch.paging_mode = IOMMU_PAGING_MODE_LEVEL_4;
         AMD_IOMMU_DEBUG("Share p2m table with iommu: p2m table = %#lx\n",
                         mfn_x(pgd_mfn));
     }
diff --git a/xen/drivers/passthrough/amd/pci_amd_iommu.c b/xen/drivers/passthrough/amd/pci_amd_iommu.c
index 79f4a77..aeefabb 100644
--- a/xen/drivers/passthrough/amd/pci_amd_iommu.c
+++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c
@@ -120,7 +120,8 @@  static void amd_iommu_setup_domain_device(
 
     struct hvm_iommu *hd = domain_hvm_iommu(domain);
 
-    BUG_ON( !hd->root_table || !hd->paging_mode || !iommu->dev_table.buffer );
+    BUG_ON( !hd->arch.root_table || !hd->arch.paging_mode ||
+            !iommu->dev_table.buffer );
 
     if ( iommu_passthrough && (domain->domain_id == 0) )
         valid = 0;
@@ -138,8 +139,8 @@  static void amd_iommu_setup_domain_device(
     {
         /* bind DTE to domain page-tables */
         amd_iommu_set_root_page_table(
-            (u32 *)dte, page_to_maddr(hd->root_table), domain->domain_id,
-            hd->paging_mode, valid);
+            (u32 *)dte, page_to_maddr(hd->arch.root_table), domain->domain_id,
+            hd->arch.paging_mode, valid);
 
         if ( pci_ats_device(iommu->seg, bus, pdev->devfn) &&
              iommu_has_cap(iommu, PCI_CAP_IOTLB_SHIFT) )
@@ -151,8 +152,8 @@  static void amd_iommu_setup_domain_device(
                         "root table = %#"PRIx64", "
                         "domain = %d, paging mode = %d\n",
                         req_id, pdev->type,
-                        page_to_maddr(hd->root_table),
-                        domain->domain_id, hd->paging_mode);
+                        page_to_maddr(hd->arch.root_table),
+                        domain->domain_id, hd->arch.paging_mode);
     }
 
     spin_unlock_irqrestore(&iommu->lock, flags);
@@ -225,17 +226,17 @@  int __init amd_iov_detect(void)
 static int allocate_domain_resources(struct hvm_iommu *hd)
 {
     /* allocate root table */
-    spin_lock(&hd->mapping_lock);
-    if ( !hd->root_table )
+    spin_lock(&hd->arch.mapping_lock);
+    if ( !hd->arch.root_table )
     {
-        hd->root_table = alloc_amd_iommu_pgtable();
-        if ( !hd->root_table )
+        hd->arch.root_table = alloc_amd_iommu_pgtable();
+        if ( !hd->arch.root_table )
         {
-            spin_unlock(&hd->mapping_lock);
+            spin_unlock(&hd->arch.mapping_lock);
             return -ENOMEM;
         }
     }
-    spin_unlock(&hd->mapping_lock);
+    spin_unlock(&hd->arch.mapping_lock);
     return 0;
 }
 
@@ -262,14 +263,14 @@  static int amd_iommu_domain_init(struct domain *d)
     /* allocate page directroy */
     if ( allocate_domain_resources(hd) != 0 )
     {
-        if ( hd->root_table )
-            free_domheap_page(hd->root_table);
+        if ( hd->arch.root_table )
+            free_domheap_page(hd->arch.root_table);
         return -ENOMEM;
     }
 
     /* For pv and dom0, stick with get_paging_mode(max_page)
      * For HVM dom0, use 2 level page table at first */
-    hd->paging_mode = is_hvm_domain(d) ?
+    hd->arch.paging_mode = is_hvm_domain(d) ?
                       IOMMU_PAGING_MODE_LEVEL_2 :
                       get_paging_mode(max_page);
 
@@ -332,7 +333,7 @@  void amd_iommu_disable_domain_device(struct domain *domain,
         AMD_IOMMU_DEBUG("Disable: device id = %#x, "
                         "domain = %d, paging mode = %d\n",
                         req_id,  domain->domain_id,
-                        domain_hvm_iommu(domain)->paging_mode);
+                        domain_hvm_iommu(domain)->arch.paging_mode);
     }
     spin_unlock_irqrestore(&iommu->lock, flags);
 
@@ -372,7 +373,7 @@  static int reassign_device(struct domain *source, struct domain *target,
 
     /* IO page tables might be destroyed after pci-detach the last device
      * In this case, we have to re-allocate root table for next pci-attach.*/
-    if ( t->root_table == NULL )
+    if ( t->arch.root_table == NULL )
         allocate_domain_resources(t);
 
     amd_iommu_setup_domain_device(target, iommu, devfn, pdev);
@@ -454,13 +455,13 @@  static void deallocate_iommu_page_tables(struct domain *d)
     if ( iommu_use_hap_pt(d) )
         return;
 
-    spin_lock(&hd->mapping_lock);
-    if ( hd->root_table )
+    spin_lock(&hd->arch.mapping_lock);
+    if ( hd->arch.root_table )
     {
-        deallocate_next_page_table(hd->root_table, hd->paging_mode);
-        hd->root_table = NULL;
+        deallocate_next_page_table(hd->arch.root_table, hd->arch.paging_mode);
+        hd->arch.root_table = NULL;
     }
-    spin_unlock(&hd->mapping_lock);
+    spin_unlock(&hd->arch.mapping_lock);
 }
 
 
@@ -591,11 +592,11 @@  static void amd_dump_p2m_table(struct domain *d)
 {
     struct hvm_iommu *hd  = domain_hvm_iommu(d);
 
-    if ( !hd->root_table ) 
+    if ( !hd->arch.root_table ) 
         return;
 
-    printk("p2m table has %d levels\n", hd->paging_mode);
-    amd_dump_p2m_table_level(hd->root_table, hd->paging_mode, 0, 0);
+    printk("p2m table has %d levels\n", hd->arch.paging_mode);
+    amd_dump_p2m_table_level(hd->arch.root_table, hd->arch.paging_mode, 0, 0);
 }
 
 const struct iommu_ops amd_iommu_ops = {
diff --git a/xen/drivers/passthrough/iommu.c b/xen/drivers/passthrough/iommu.c
index 8a2fdea..9cd996a 100644
--- a/xen/drivers/passthrough/iommu.c
+++ b/xen/drivers/passthrough/iommu.c
@@ -117,10 +117,11 @@  static void __init parse_iommu_param(char *s)
 int iommu_domain_init(struct domain *d)
 {
     struct hvm_iommu *hd = domain_hvm_iommu(d);
+    int ret = 0;
 
-    spin_lock_init(&hd->mapping_lock);
-    INIT_LIST_HEAD(&hd->g2m_ioport_list);
-    INIT_LIST_HEAD(&hd->mapped_rmrrs);
+    ret = arch_iommu_domain_init(d);
+    if ( ret )
+        return ret;
 
     if ( !iommu_enabled )
         return 0;
@@ -189,10 +190,7 @@  void iommu_teardown(struct domain *d)
 
 void iommu_domain_destroy(struct domain *d)
 {
-    struct hvm_iommu *hd  = domain_hvm_iommu(d);
-    struct list_head *ioport_list, *rmrr_list, *tmp;
-    struct g2m_ioport *ioport;
-    struct mapped_rmrr *mrmrr;
+    struct hvm_iommu *hd = domain_hvm_iommu(d);
 
     if ( !iommu_enabled || !hd->platform_ops )
         return;
@@ -200,20 +198,8 @@  void iommu_domain_destroy(struct domain *d)
     if ( need_iommu(d) )
         iommu_teardown(d);
 
-    list_for_each_safe ( ioport_list, tmp, &hd->g2m_ioport_list )
-    {
-        ioport = list_entry(ioport_list, struct g2m_ioport, list);
-        list_del(&ioport->list);
-        xfree(ioport);
-    }
-
-    list_for_each_safe ( rmrr_list, tmp, &hd->mapped_rmrrs )
-    {
-        mrmrr = list_entry(rmrr_list, struct mapped_rmrr, list);
-        list_del(&mrmrr->list);
-        xfree(mrmrr);
-    }
-}
+    arch_iommu_domain_destroy(d);
+ }
 
 int iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn,
                    unsigned int flags)
diff --git a/xen/drivers/passthrough/vtd/iommu.c b/xen/drivers/passthrough/vtd/iommu.c
index d4be75c..8efe6f9 100644
--- a/xen/drivers/passthrough/vtd/iommu.c
+++ b/xen/drivers/passthrough/vtd/iommu.c
@@ -248,16 +248,16 @@  static u64 addr_to_dma_page_maddr(struct domain *domain, u64 addr, int alloc)
     struct acpi_drhd_unit *drhd;
     struct pci_dev *pdev;
     struct hvm_iommu *hd = domain_hvm_iommu(domain);
-    int addr_width = agaw_to_width(hd->agaw);
+    int addr_width = agaw_to_width(hd->arch.agaw);
     struct dma_pte *parent, *pte = NULL;
-    int level = agaw_to_level(hd->agaw);
+    int level = agaw_to_level(hd->arch.agaw);
     int offset;
     u64 pte_maddr = 0, maddr;
     u64 *vaddr = NULL;
 
     addr &= (((u64)1) << addr_width) - 1;
-    ASSERT(spin_is_locked(&hd->mapping_lock));
-    if ( hd->pgd_maddr == 0 )
+    ASSERT(spin_is_locked(&hd->arch.mapping_lock));
+    if ( hd->arch.pgd_maddr == 0 )
     {
         /*
          * just get any passthrough device in the domainr - assume user
@@ -265,11 +265,11 @@  static u64 addr_to_dma_page_maddr(struct domain *domain, u64 addr, int alloc)
          */
         pdev = pci_get_pdev_by_domain(domain, -1, -1, -1);
         drhd = acpi_find_matched_drhd_unit(pdev);
-        if ( !alloc || ((hd->pgd_maddr = alloc_pgtable_maddr(drhd, 1)) == 0) )
+        if ( !alloc || ((hd->arch.pgd_maddr = alloc_pgtable_maddr(drhd, 1)) == 0) )
             goto out;
     }
 
-    parent = (struct dma_pte *)map_vtd_domain_page(hd->pgd_maddr);
+    parent = (struct dma_pte *)map_vtd_domain_page(hd->arch.pgd_maddr);
     while ( level > 1 )
     {
         offset = address_level_offset(addr, level);
@@ -579,7 +579,7 @@  static void __intel_iommu_iotlb_flush(struct domain *d, unsigned long gfn,
     {
         iommu = drhd->iommu;
 
-        if ( !test_bit(iommu->index, &hd->iommu_bitmap) )
+        if ( !test_bit(iommu->index, &hd->arch.iommu_bitmap) )
             continue;
 
         flush_dev_iotlb = find_ats_dev_drhd(iommu) ? 1 : 0;
@@ -621,12 +621,12 @@  static void dma_pte_clear_one(struct domain *domain, u64 addr)
     u64 pg_maddr;
     struct mapped_rmrr *mrmrr;
 
-    spin_lock(&hd->mapping_lock);
+    spin_lock(&hd->arch.mapping_lock);
     /* get last level pte */
     pg_maddr = addr_to_dma_page_maddr(domain, addr, 0);
     if ( pg_maddr == 0 )
     {
-        spin_unlock(&hd->mapping_lock);
+        spin_unlock(&hd->arch.mapping_lock);
         return;
     }
 
@@ -635,13 +635,13 @@  static void dma_pte_clear_one(struct domain *domain, u64 addr)
 
     if ( !dma_pte_present(*pte) )
     {
-        spin_unlock(&hd->mapping_lock);
+        spin_unlock(&hd->arch.mapping_lock);
         unmap_vtd_domain_page(page);
         return;
     }
 
     dma_clear_pte(*pte);
-    spin_unlock(&hd->mapping_lock);
+    spin_unlock(&hd->arch.mapping_lock);
     iommu_flush_cache_entry(pte, sizeof(struct dma_pte));
 
     if ( !this_cpu(iommu_dont_flush_iotlb) )
@@ -652,8 +652,8 @@  static void dma_pte_clear_one(struct domain *domain, u64 addr)
     /* if the cleared address is between mapped RMRR region,
      * remove the mapped RMRR
      */
-    spin_lock(&hd->mapping_lock);
-    list_for_each_entry ( mrmrr, &hd->mapped_rmrrs, list )
+    spin_lock(&hd->arch.mapping_lock);
+    list_for_each_entry ( mrmrr, &hd->arch.mapped_rmrrs, list )
     {
         if ( addr >= mrmrr->base && addr <= mrmrr->end )
         {
@@ -662,7 +662,7 @@  static void dma_pte_clear_one(struct domain *domain, u64 addr)
             break;
         }
     }
-    spin_unlock(&hd->mapping_lock);
+    spin_unlock(&hd->arch.mapping_lock);
 }
 
 static void iommu_free_pagetable(u64 pt_maddr, int level)
@@ -1247,7 +1247,7 @@  static int intel_iommu_domain_init(struct domain *d)
 {
     struct hvm_iommu *hd = domain_hvm_iommu(d);
 
-    hd->agaw = width_to_agaw(DEFAULT_DOMAIN_ADDRESS_WIDTH);
+    hd->arch.agaw = width_to_agaw(DEFAULT_DOMAIN_ADDRESS_WIDTH);
 
     return 0;
 }
@@ -1344,16 +1344,16 @@  int domain_context_mapping_one(
     }
     else
     {
-        spin_lock(&hd->mapping_lock);
+        spin_lock(&hd->arch.mapping_lock);
 
         /* Ensure we have pagetables allocated down to leaf PTE. */
-        if ( hd->pgd_maddr == 0 )
+        if ( hd->arch.pgd_maddr == 0 )
         {
             addr_to_dma_page_maddr(domain, 0, 1);
-            if ( hd->pgd_maddr == 0 )
+            if ( hd->arch.pgd_maddr == 0 )
             {
             nomem:
-                spin_unlock(&hd->mapping_lock);
+                spin_unlock(&hd->arch.mapping_lock);
                 spin_unlock(&iommu->lock);
                 unmap_vtd_domain_page(context_entries);
                 return -ENOMEM;
@@ -1361,7 +1361,7 @@  int domain_context_mapping_one(
         }
 
         /* Skip top levels of page tables for 2- and 3-level DRHDs. */
-        pgd_maddr = hd->pgd_maddr;
+        pgd_maddr = hd->arch.pgd_maddr;
         for ( agaw = level_to_agaw(4);
               agaw != level_to_agaw(iommu->nr_pt_levels);
               agaw-- )
@@ -1379,7 +1379,7 @@  int domain_context_mapping_one(
         else
             context_set_translation_type(*context, CONTEXT_TT_MULTI_LEVEL);
 
-        spin_unlock(&hd->mapping_lock);
+        spin_unlock(&hd->arch.mapping_lock);
     }
 
     if ( context_set_domain_id(context, domain, iommu) )
@@ -1405,7 +1405,7 @@  int domain_context_mapping_one(
         iommu_flush_iotlb_dsi(iommu, 0, 1, flush_dev_iotlb);
     }
 
-    set_bit(iommu->index, &hd->iommu_bitmap);
+    set_bit(iommu->index, &hd->arch.iommu_bitmap);
 
     unmap_vtd_domain_page(context_entries);
 
@@ -1648,7 +1648,7 @@  static int domain_context_unmap(
         struct hvm_iommu *hd = domain_hvm_iommu(domain);
         int iommu_domid;
 
-        clear_bit(iommu->index, &hd->iommu_bitmap);
+        clear_bit(iommu->index, &hd->arch.iommu_bitmap);
 
         iommu_domid = domain_iommu_domid(domain, iommu);
         if ( iommu_domid == -1 )
@@ -1707,10 +1707,10 @@  static void iommu_domain_teardown(struct domain *d)
     if ( iommu_use_hap_pt(d) )
         return;
 
-    spin_lock(&hd->mapping_lock);
-    iommu_free_pagetable(hd->pgd_maddr, agaw_to_level(hd->agaw));
-    hd->pgd_maddr = 0;
-    spin_unlock(&hd->mapping_lock);
+    spin_lock(&hd->arch.mapping_lock);
+    iommu_free_pagetable(hd->arch.pgd_maddr, agaw_to_level(hd->arch.agaw));
+    hd->arch.pgd_maddr = 0;
+    spin_unlock(&hd->arch.mapping_lock);
 }
 
 static int intel_iommu_map_page(
@@ -1729,12 +1729,12 @@  static int intel_iommu_map_page(
     if ( iommu_passthrough && (d->domain_id == 0) )
         return 0;
 
-    spin_lock(&hd->mapping_lock);
+    spin_lock(&hd->arch.mapping_lock);
 
     pg_maddr = addr_to_dma_page_maddr(d, (paddr_t)gfn << PAGE_SHIFT_4K, 1);
     if ( pg_maddr == 0 )
     {
-        spin_unlock(&hd->mapping_lock);
+        spin_unlock(&hd->arch.mapping_lock);
         return -ENOMEM;
     }
     page = (struct dma_pte *)map_vtd_domain_page(pg_maddr);
@@ -1751,14 +1751,14 @@  static int intel_iommu_map_page(
 
     if ( old.val == new.val )
     {
-        spin_unlock(&hd->mapping_lock);
+        spin_unlock(&hd->arch.mapping_lock);
         unmap_vtd_domain_page(page);
         return 0;
     }
     *pte = new;
 
     iommu_flush_cache_entry(pte, sizeof(struct dma_pte));
-    spin_unlock(&hd->mapping_lock);
+    spin_unlock(&hd->arch.mapping_lock);
     unmap_vtd_domain_page(page);
 
     if ( !this_cpu(iommu_dont_flush_iotlb) )
@@ -1792,7 +1792,7 @@  void iommu_pte_flush(struct domain *d, u64 gfn, u64 *pte,
     for_each_drhd_unit ( drhd )
     {
         iommu = drhd->iommu;
-        if ( !test_bit(iommu->index, &hd->iommu_bitmap) )
+        if ( !test_bit(iommu->index, &hd->arch.iommu_bitmap) )
             continue;
 
         flush_dev_iotlb = find_ats_dev_drhd(iommu) ? 1 : 0;
@@ -1833,7 +1833,7 @@  static void iommu_set_pgd(struct domain *d)
         return;
 
     pgd_mfn = pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d)));
-    hd->pgd_maddr = pagetable_get_paddr(pagetable_from_mfn(pgd_mfn));
+    hd->arch.pgd_maddr = pagetable_get_paddr(pagetable_from_mfn(pgd_mfn));
 }
 
 static int rmrr_identity_mapping(struct domain *d,
@@ -1848,10 +1848,10 @@  static int rmrr_identity_mapping(struct domain *d,
     ASSERT(rmrr->base_address < rmrr->end_address);
 
     /*
-     * No need to acquire hd->mapping_lock, as the only theoretical race is
+     * No need to acquire hd->arch.mapping_lock, as the only theoretical race is
      * with the insertion below (impossible due to holding pcidevs_lock).
      */
-    list_for_each_entry( mrmrr, &hd->mapped_rmrrs, list )
+    list_for_each_entry( mrmrr, &hd->arch.mapped_rmrrs, list )
     {
         if ( mrmrr->base == rmrr->base_address &&
              mrmrr->end == rmrr->end_address )
@@ -1876,9 +1876,9 @@  static int rmrr_identity_mapping(struct domain *d,
         return -ENOMEM;
     mrmrr->base = rmrr->base_address;
     mrmrr->end = rmrr->end_address;
-    spin_lock(&hd->mapping_lock);
-    list_add_tail(&mrmrr->list, &hd->mapped_rmrrs);
-    spin_unlock(&hd->mapping_lock);
+    spin_lock(&hd->arch.mapping_lock);
+    list_add_tail(&mrmrr->list, &hd->arch.mapped_rmrrs);
+    spin_unlock(&hd->arch.mapping_lock);
 
     return 0;
 }
@@ -2423,8 +2423,8 @@  static void vtd_dump_p2m_table(struct domain *d)
         return;
 
     hd = domain_hvm_iommu(d);
-    printk("p2m table has %d levels\n", agaw_to_level(hd->agaw));
-    vtd_dump_p2m_table_level(hd->pgd_maddr, agaw_to_level(hd->agaw), 0, 0);
+    printk("p2m table has %d levels\n", agaw_to_level(hd->arch.agaw));
+    vtd_dump_p2m_table_level(hd->arch.pgd_maddr, agaw_to_level(hd->arch.agaw), 0, 0);
 }
 
 const struct iommu_ops intel_iommu_ops = {
diff --git a/xen/drivers/passthrough/x86/iommu.c b/xen/drivers/passthrough/x86/iommu.c
index c857ba8..68e308c 100644
--- a/xen/drivers/passthrough/x86/iommu.c
+++ b/xen/drivers/passthrough/x86/iommu.c
@@ -40,6 +40,47 @@  int __init iommu_setup_hpet_msi(struct msi_desc *msi)
     return ops->setup_hpet_msi ? ops->setup_hpet_msi(msi) : -ENODEV;
 }
 
+void iommu_share_p2m_table(struct domain* d)
+{
+    const struct iommu_ops *ops = iommu_get_ops();
+
+    if ( iommu_enabled && is_hvm_domain(d) )
+        ops->share_p2m(d);
+}
+
+int arch_iommu_domain_init(struct domain *d)
+{
+    struct hvm_iommu *hd = domain_hvm_iommu(d);
+
+    spin_lock_init(&hd->arch.mapping_lock);
+    INIT_LIST_HEAD(&hd->arch.g2m_ioport_list);
+    INIT_LIST_HEAD(&hd->arch.mapped_rmrrs);
+
+    return 0;
+}
+
+void arch_iommu_domain_destroy(struct domain *d)
+{
+   struct hvm_iommu *hd  = domain_hvm_iommu(d);
+   struct list_head *ioport_list, *rmrr_list, *tmp;
+   struct g2m_ioport *ioport;
+   struct mapped_rmrr *mrmrr;
+
+   list_for_each_safe ( ioport_list, tmp, &hd->arch.g2m_ioport_list )
+   {
+       ioport = list_entry(ioport_list, struct g2m_ioport, list);
+       list_del(&ioport->list);
+       xfree(ioport);
+   }
+
+    list_for_each_safe ( rmrr_list, tmp, &hd->arch.mapped_rmrrs )
+    {
+        mrmrr = list_entry(rmrr_list, struct mapped_rmrr, list);
+        list_del(&mrmrr->list);
+        xfree(mrmrr);
+    }
+}
+
 /*
  * Local variables:
  * mode: C
diff --git a/xen/include/asm-x86/hvm/iommu.h b/xen/include/asm-x86/hvm/iommu.h
index d488edf..927a02d 100644
--- a/xen/include/asm-x86/hvm/iommu.h
+++ b/xen/include/asm-x86/hvm/iommu.h
@@ -39,4 +39,32 @@  static inline int iommu_hardware_setup(void)
     return 0;
 }
 
+struct g2m_ioport {
+    struct list_head list;
+    unsigned int gport;
+    unsigned int mport;
+    unsigned int np;
+};
+
+struct mapped_rmrr {
+    struct list_head list;
+    u64 base;
+    u64 end;
+};
+
+struct arch_hvm_iommu
+{
+    u64 pgd_maddr;                 /* io page directory machine address */
+    int agaw;     /* adjusted guest address width, 0 is level 2 30-bit */
+    u64 iommu_bitmap;              /* bitmap of iommu(s) that the domain uses */
+    /* amd iommu support */
+    int paging_mode;
+    struct page_info *root_table;
+    struct guest_iommu *g_iommu;
+
+    struct list_head g2m_ioport_list;   /* guest to machine ioport mapping */
+    struct list_head mapped_rmrrs;
+    spinlock_t mapping_lock;            /* io page table lock */
+};
+
 #endif /* __ASM_X86_HVM_IOMMU_H__ */
diff --git a/xen/include/asm-x86/iommu.h b/xen/include/asm-x86/iommu.h
index 946291c..dc06ceb 100644
--- a/xen/include/asm-x86/iommu.h
+++ b/xen/include/asm-x86/iommu.h
@@ -17,7 +17,9 @@ 
 
 #define MAX_IOMMUS 32
 
-#include <asm/msi.h>
+/* Does this domain have a P2M table we can use as its IOMMU pagetable? */
+#define iommu_use_hap_pt(d) (hap_enabled(d) && iommu_hap_pt_share)
+#define domain_hvm_iommu(d)     (&d->arch.hvm_domain.hvm_iommu)
 
 void iommu_update_ire_from_apic(unsigned int apic, unsigned int reg, unsigned int value);
 unsigned int iommu_read_apic_from_ire(unsigned int apic, unsigned int reg);
diff --git a/xen/include/xen/hvm/iommu.h b/xen/include/xen/hvm/iommu.h
index c9c10c1..f8f8a93 100644
--- a/xen/include/xen/hvm/iommu.h
+++ b/xen/include/xen/hvm/iommu.h
@@ -23,31 +23,8 @@ 
 #include <xen/iommu.h>
 #include <asm/hvm/iommu.h>
 
-struct g2m_ioport {
-    struct list_head list;
-    unsigned int gport;
-    unsigned int mport;
-    unsigned int np;
-};
-
-struct mapped_rmrr {
-    struct list_head list;
-    u64 base;
-    u64 end;
-};
-
 struct hvm_iommu {
-    u64 pgd_maddr;                 /* io page directory machine address */
-    spinlock_t mapping_lock;       /* io page table lock */
-    int agaw;     /* adjusted guest address width, 0 is level 2 30-bit */
-    struct list_head g2m_ioport_list;  /* guest to machine ioport mapping */
-    u64 iommu_bitmap;              /* bitmap of iommu(s) that the domain uses */
-    struct list_head mapped_rmrrs;
-
-    /* amd iommu support */
-    int paging_mode;
-    struct page_info *root_table;
-    struct guest_iommu *g_iommu;
+    struct arch_hvm_iommu arch;
 
     /* iommu_ops */
     const struct iommu_ops *platform_ops;
diff --git a/xen/include/xen/iommu.h b/xen/include/xen/iommu.h
index cf61d163..f556a7e 100644
--- a/xen/include/xen/iommu.h
+++ b/xen/include/xen/iommu.h
@@ -35,11 +35,6 @@  extern bool_t iommu_hap_pt_share;
 extern bool_t iommu_debug;
 extern bool_t amd_iommu_perdev_intremap;
 
-/* Does this domain have a P2M table we can use as its IOMMU pagetable? */
-#define iommu_use_hap_pt(d) (hap_enabled(d) && iommu_hap_pt_share)
-
-#define domain_hvm_iommu(d)     (&d->arch.hvm_domain.hvm_iommu)
-
 #define PAGE_SHIFT_4K       (12)
 #define PAGE_SIZE_4K        (1UL << PAGE_SHIFT_4K)
 #define PAGE_MASK_4K        (((u64)-1) << PAGE_SHIFT_4K)
@@ -55,6 +50,9 @@  void iommu_dom0_init(struct domain *d);
 void iommu_domain_destroy(struct domain *d);
 int deassign_device(struct domain *d, u16 seg, u8 bus, u8 devfn);
 
+void arch_iommu_domain_destroy(struct domain *d);
+int arch_iommu_domain_init(struct domain *d);
+
 /* Function used internally, use iommu_domain_destroy */
 void iommu_teardown(struct domain *d);
 
@@ -81,9 +79,6 @@  struct hvm_irq_dpci *domain_get_irq_dpci(const struct domain *);
 void free_hvm_irq_dpci(struct hvm_irq_dpci *dpci);
 bool_t pt_irq_need_timer(uint32_t flags);
 
-int iommu_update_ire_from_msi(struct msi_desc *msi_desc, struct msi_msg *msg);
-void iommu_read_msi_from_ire(struct msi_desc *msi_desc, struct msi_msg *msg);
-
 #define PT_IRQ_TIME_OUT MILLISECS(8)
 #endif /* HAS_PCI */
 
@@ -127,6 +122,11 @@  struct iommu_ops {
     void (*dump_p2m_table)(struct domain *d);
 };
 
+#ifdef HAS_PCI
+int iommu_update_ire_from_msi(struct msi_desc *msi_desc, struct msi_msg *msg);
+void iommu_read_msi_from_ire(struct msi_desc *msi_desc, struct msi_msg *msg);
+#endif
+
 void iommu_suspend(void);
 void iommu_resume(void);
 void iommu_crash_shutdown(void);