Message ID | 53E4EE93.3020607@amd.com |
---|---|
State | New |
Headers | show |
On 08/08/14 17:36, Suravee Suthikulanit wrote: > On 7/22/2014 12:39 PM, Roger Pau Monné wrote: >> On 22/07/14 19:30, Suravee Suthikulpanit wrote: >>> Roger, >>> >>> I am not quite sure why you would disable "iommu_hap_pt_share" for AMD >>> IOMMU. The current implementation assumes that the p2m can be shared. >>> >>> Also, I feel that simply just set iommu_hap_pt_share = 0 (while still >>> having several places in the AMD iommu drivers and p2m-pt.c assuming >>> that it can be shared) seems a bit messy. >> >> According to the comment in p2m.h, AMD IOMMU only supports bit 52 to bit >> 58 in the pte to be 0, otherwise the hw generates page faults. >> >> If we want to support doing IO to devices behind an IOMMU from page >> types different than p2m_ram_rw the p2m tables cannot be shared, because >> the bits from 52 to 58 will indeed be different than 0, and will >> generate page faults. >> >> Roger. >> > > As you have mentioned, they cannot be shared due to the 52 and 58 bits. > However, what I was trying to say is that, besides just simply set the > flag to 0, we probably should remove existing logic in various places > that assumes that AMD IOMMU can have share_p2m_table=1. > > If you are agree, the attachment is the patch that should do that. > > I have tested device-passthrough w/ the amd-iommu: disable > iommu_hap_pt_share with AMD IOMMUs, and my patch and it is working. > > Acked-by Suravee Suthikulpanit <suravee.suthikulpanit@amd.com> > > Thanks, > > Suravee > > iommu-removal-of-share_p2m_table-from-AMD-IOMMU.patch > > > From f28838679867fbbc3be6286556eed7f908eea559 Mon Sep 17 00:00:00 2001 > From: Suravee Suthikulpanit <Suravee.Suthikulpanit@amd.com> > Date: Fri, 8 Aug 2014 07:26:15 -0500 > Subject: [PATCH] iommu: Removal of share_p2m_table from AMD IOMMU > > This patch removes existing logics which assumes iommu_hap_pt_share > is enabled for AMD IOMMU. > > Signed-off-by Suravee Suthikulpanit <suravee.suthikulpanit@amd.com> > Cd: Roger Pau Monné <roger.pau@citrix.com> > Cc: Xiantao Zhang <xiantao.zhang@intel.com> > Cc: Jan Beulich <jbeulich@suse.com> > --- > NOTES: This patch depends on the "amd-iommu: disable iommu_hap_pt_share with AMD IOMMUs" > patch from Roger Pau Monne <roger.pau@citrix.com>. > > xen/arch/x86/mm/p2m-pt.c | 23 ++++++-------------- > xen/drivers/passthrough/amd/iommu_map.c | 33 ----------------------------- > xen/drivers/passthrough/amd/pci_amd_iommu.c | 4 ---- > xen/drivers/passthrough/iommu.c | 2 +- > 4 files changed, 8 insertions(+), 54 deletions(-) > > diff --git a/xen/arch/x86/mm/p2m-pt.c b/xen/arch/x86/mm/p2m-pt.c > index 085ab6f..6bec0e9 100644 > --- a/xen/arch/x86/mm/p2m-pt.c > +++ b/xen/arch/x86/mm/p2m-pt.c > @@ -36,7 +36,6 @@ > #include <xen/event.h> > #include <xen/trace.h> > #include <asm/hvm/nestedhvm.h> > -#include <asm/hvm/svm/amd-iommu-proto.h> > > #include "mm-locks.h" > > @@ -653,22 +652,14 @@ p2m_pt_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn, > > if ( iommu_enabled && need_iommu(p2m->domain) ) > { > - if ( iommu_hap_pt_share ) > - { > - if ( old_mfn && (old_mfn != mfn_x(mfn)) ) > - amd_iommu_flush_pages(p2m->domain, gfn, page_order); > - } > + unsigned int flags = p2m_get_iommu_flags(p2mt); > + > + if ( flags != 0 ) > + for ( i = 0; i < (1UL << page_order); i++ ) > + iommu_map_page(p2m->domain, gfn+i, mfn_x(mfn)+i, flags); > else > - { > - unsigned int flags = p2m_get_iommu_flags(p2mt); > - > - if ( flags != 0 ) > - for ( i = 0; i < (1UL << page_order); i++ ) > - iommu_map_page(p2m->domain, gfn+i, mfn_x(mfn)+i, flags); > - else > - for ( int i = 0; i < (1UL << page_order); i++ ) > - iommu_unmap_page(p2m->domain, gfn+i); > - } > + for ( int i = 0; i < (1UL << page_order); i++ ) > + iommu_unmap_page(p2m->domain, gfn+i); > } > > out: > diff --git a/xen/drivers/passthrough/amd/iommu_map.c b/xen/drivers/passthrough/amd/iommu_map.c > index a8c60ec..2808c31 100644 > --- a/xen/drivers/passthrough/amd/iommu_map.c > +++ b/xen/drivers/passthrough/amd/iommu_map.c > @@ -640,9 +640,6 @@ int amd_iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn, > > BUG_ON( !hd->arch.root_table ); > > - if ( iommu_use_hap_pt(d) ) > - return 0; > - > memset(pt_mfn, 0, sizeof(pt_mfn)); > > spin_lock(&hd->arch.mapping_lock); > @@ -718,9 +715,6 @@ int amd_iommu_unmap_page(struct domain *d, unsigned long gfn) > > BUG_ON( !hd->arch.root_table ); > > - if ( iommu_use_hap_pt(d) ) > - return 0; > - > memset(pt_mfn, 0, sizeof(pt_mfn)); > > spin_lock(&hd->arch.mapping_lock); > @@ -777,30 +771,3 @@ int amd_iommu_reserve_domain_unity_map(struct domain *domain, > } > return 0; > } > - > -/* Share p2m table with iommu. */ > -void amd_iommu_share_p2m(struct domain *d) > -{ > - struct hvm_iommu *hd = domain_hvm_iommu(d); > - struct page_info *p2m_table; > - mfn_t pgd_mfn; > - > - ASSERT( is_hvm_domain(d) && d->arch.hvm_domain.hap_enabled ); > - > - if ( !iommu_use_hap_pt(d) ) > - return; > - > - pgd_mfn = pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d))); > - p2m_table = mfn_to_page(mfn_x(pgd_mfn)); > - > - if ( hd->arch.root_table != p2m_table ) > - { > - free_amd_iommu_pgtable(hd->arch.root_table); > - hd->arch.root_table = p2m_table; > - > - /* When sharing p2m with iommu, paging mode = 4 */ > - hd->arch.paging_mode = IOMMU_PAGING_MODE_LEVEL_4; > - AMD_IOMMU_DEBUG("Share p2m table with iommu: p2m table = %#lx\n", > - mfn_x(pgd_mfn)); > - } > -} > diff --git a/xen/drivers/passthrough/amd/pci_amd_iommu.c b/xen/drivers/passthrough/amd/pci_amd_iommu.c > index 0b301b3..c893dea 100644 > --- a/xen/drivers/passthrough/amd/pci_amd_iommu.c > +++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c > @@ -453,9 +453,6 @@ static void deallocate_iommu_page_tables(struct domain *d) > { > struct hvm_iommu *hd = domain_hvm_iommu(d); > > - if ( iommu_use_hap_pt(d) ) > - return; > - > spin_lock(&hd->arch.mapping_lock); > if ( hd->arch.root_table ) > { > @@ -619,7 +616,6 @@ const struct iommu_ops amd_iommu_ops = { > .setup_hpet_msi = amd_setup_hpet_msi, > .suspend = amd_iommu_suspend, > .resume = amd_iommu_resume, > - .share_p2m = amd_iommu_share_p2m, > .crash_shutdown = amd_iommu_suspend, > .dump_p2m_table = amd_dump_p2m_table, > }; > diff --git a/xen/drivers/passthrough/iommu.c b/xen/drivers/passthrough/iommu.c > index cc12735..5d3299a 100644 > --- a/xen/drivers/passthrough/iommu.c > +++ b/xen/drivers/passthrough/iommu.c > @@ -332,7 +332,7 @@ void iommu_share_p2m_table(struct domain* d) > { > const struct iommu_ops *ops = iommu_get_ops(); > > - if ( iommu_enabled && is_hvm_domain(d) ) > + if ( iommu_enabled && is_hvm_domain(d) && ops->share_p2m) > ops->share_p2m(d); Looks fine to me, however I'm not sure if it would be clearer to use iommu_use_hap_pt(d) instead of checking ops->share_p2m directly. Roger.
From f28838679867fbbc3be6286556eed7f908eea559 Mon Sep 17 00:00:00 2001 From: Suravee Suthikulpanit <Suravee.Suthikulpanit@amd.com> Date: Fri, 8 Aug 2014 07:26:15 -0500 Subject: [PATCH] iommu: Removal of share_p2m_table from AMD IOMMU This patch removes existing logics which assumes iommu_hap_pt_share is enabled for AMD IOMMU. Signed-off-by Suravee Suthikulpanit <suravee.suthikulpanit@amd.com> Cd: Roger Pau MonnĂŠ <roger.pau@citrix.com> Cc: Xiantao Zhang <xiantao.zhang@intel.com> Cc: Jan Beulich <jbeulich@suse.com> --- NOTES: This patch depends on the "amd-iommu: disable iommu_hap_pt_share with AMD IOMMUs" patch from Roger Pau Monne <roger.pau@citrix.com>. xen/arch/x86/mm/p2m-pt.c | 23 ++++++-------------- xen/drivers/passthrough/amd/iommu_map.c | 33 ----------------------------- xen/drivers/passthrough/amd/pci_amd_iommu.c | 4 ---- xen/drivers/passthrough/iommu.c | 2 +- 4 files changed, 8 insertions(+), 54 deletions(-) diff --git a/xen/arch/x86/mm/p2m-pt.c b/xen/arch/x86/mm/p2m-pt.c index 085ab6f..6bec0e9 100644 --- a/xen/arch/x86/mm/p2m-pt.c +++ b/xen/arch/x86/mm/p2m-pt.c @@ -36,7 +36,6 @@ #include <xen/event.h> #include <xen/trace.h> #include <asm/hvm/nestedhvm.h> -#include <asm/hvm/svm/amd-iommu-proto.h> #include "mm-locks.h" @@ -653,22 +652,14 @@ p2m_pt_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn, if ( iommu_enabled && need_iommu(p2m->domain) ) { - if ( iommu_hap_pt_share ) - { - if ( old_mfn && (old_mfn != mfn_x(mfn)) ) - amd_iommu_flush_pages(p2m->domain, gfn, page_order); - } + unsigned int flags = p2m_get_iommu_flags(p2mt); + + if ( flags != 0 ) + for ( i = 0; i < (1UL << page_order); i++ ) + iommu_map_page(p2m->domain, gfn+i, mfn_x(mfn)+i, flags); else - { - unsigned int flags = p2m_get_iommu_flags(p2mt); - - if ( flags != 0 ) - for ( i = 0; i < (1UL << page_order); i++ ) - iommu_map_page(p2m->domain, gfn+i, mfn_x(mfn)+i, flags); - else - for ( int i = 0; i < (1UL << page_order); i++ ) - iommu_unmap_page(p2m->domain, gfn+i); - } + for ( int i = 0; i < (1UL << page_order); i++ ) + iommu_unmap_page(p2m->domain, gfn+i); } out: diff --git a/xen/drivers/passthrough/amd/iommu_map.c b/xen/drivers/passthrough/amd/iommu_map.c index a8c60ec..2808c31 100644 --- a/xen/drivers/passthrough/amd/iommu_map.c +++ b/xen/drivers/passthrough/amd/iommu_map.c @@ -640,9 +640,6 @@ int amd_iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn, BUG_ON( !hd->arch.root_table ); - if ( iommu_use_hap_pt(d) ) - return 0; - memset(pt_mfn, 0, sizeof(pt_mfn)); spin_lock(&hd->arch.mapping_lock); @@ -718,9 +715,6 @@ int amd_iommu_unmap_page(struct domain *d, unsigned long gfn) BUG_ON( !hd->arch.root_table ); - if ( iommu_use_hap_pt(d) ) - return 0; - memset(pt_mfn, 0, sizeof(pt_mfn)); spin_lock(&hd->arch.mapping_lock); @@ -777,30 +771,3 @@ int amd_iommu_reserve_domain_unity_map(struct domain *domain, } return 0; } - -/* Share p2m table with iommu. */ -void amd_iommu_share_p2m(struct domain *d) -{ - struct hvm_iommu *hd = domain_hvm_iommu(d); - struct page_info *p2m_table; - mfn_t pgd_mfn; - - ASSERT( is_hvm_domain(d) && d->arch.hvm_domain.hap_enabled ); - - if ( !iommu_use_hap_pt(d) ) - return; - - pgd_mfn = pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d))); - p2m_table = mfn_to_page(mfn_x(pgd_mfn)); - - if ( hd->arch.root_table != p2m_table ) - { - free_amd_iommu_pgtable(hd->arch.root_table); - hd->arch.root_table = p2m_table; - - /* When sharing p2m with iommu, paging mode = 4 */ - hd->arch.paging_mode = IOMMU_PAGING_MODE_LEVEL_4; - AMD_IOMMU_DEBUG("Share p2m table with iommu: p2m table = %#lx\n", - mfn_x(pgd_mfn)); - } -} diff --git a/xen/drivers/passthrough/amd/pci_amd_iommu.c b/xen/drivers/passthrough/amd/pci_amd_iommu.c index 0b301b3..c893dea 100644 --- a/xen/drivers/passthrough/amd/pci_amd_iommu.c +++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c @@ -453,9 +453,6 @@ static void deallocate_iommu_page_tables(struct domain *d) { struct hvm_iommu *hd = domain_hvm_iommu(d); - if ( iommu_use_hap_pt(d) ) - return; - spin_lock(&hd->arch.mapping_lock); if ( hd->arch.root_table ) { @@ -619,7 +616,6 @@ const struct iommu_ops amd_iommu_ops = { .setup_hpet_msi = amd_setup_hpet_msi, .suspend = amd_iommu_suspend, .resume = amd_iommu_resume, - .share_p2m = amd_iommu_share_p2m, .crash_shutdown = amd_iommu_suspend, .dump_p2m_table = amd_dump_p2m_table, }; diff --git a/xen/drivers/passthrough/iommu.c b/xen/drivers/passthrough/iommu.c index cc12735..5d3299a 100644 --- a/xen/drivers/passthrough/iommu.c +++ b/xen/drivers/passthrough/iommu.c @@ -332,7 +332,7 @@ void iommu_share_p2m_table(struct domain* d) { const struct iommu_ops *ops = iommu_get_ops(); - if ( iommu_enabled && is_hvm_domain(d) ) + if ( iommu_enabled && is_hvm_domain(d) && ops->share_p2m) ops->share_p2m(d); } -- 1.9.0