Message ID | 1387208731-18534-1-git-send-email-julien.grall@linaro.org |
---|---|
State | Superseded, archived |
Headers | show |
On Mon, 2013-12-16 at 15:45 +0000, Julien Grall wrote: > With the lake of iommu, dom0 must have a 1:1 memory mapping for all > these guest physical address. When the ballon decides to give back a > page to the kernel, this page must have the same address as previously. > Otherwise, we will loose the 1:1 mapping and will break DMA-capable > devices. > > Signed-off-by: Julien Grall <julien.grall@linaro.org> > Cc: Keir Fraser <keir@xen.org> > Cc: Jan Beulich <jbeulich@suse.com> > > --- > Release: This is a bug that prevents DMA-capable devices to work after > a guest has started. > > Changes in v3: > - Remove spurious page = NULL > - Rename is_dom0_mapped_11 to is_domain_direct_mapped > - Coding style > Changes in v2: > - Drop CONFIG_ARM and add is_dom0_mapped_11 > --- > xen/arch/arm/domain_build.c | 5 +++++ > xen/common/memory.c | 30 +++++++++++++++++++++++++++++- > xen/include/asm-arm/domain.h | 2 ++ > xen/include/asm-x86/domain.h | 2 ++ > 4 files changed, 38 insertions(+), 1 deletion(-) > > diff --git a/xen/arch/arm/domain_build.c b/xen/arch/arm/domain_build.c > index faff88e..2bbee36 100644 > --- a/xen/arch/arm/domain_build.c > +++ b/xen/arch/arm/domain_build.c > @@ -65,6 +65,11 @@ struct vcpu *__init alloc_dom0_vcpu0(void) > return alloc_vcpu(dom0, 0, 0); > } > > +int is_domain_direct_mapped(struct domain *d) > +{ > + return (dom0_11_mapping && d == dom0); > +} > + > static void allocate_memory_11(struct domain *d, struct kernel_info *kinfo) > { > paddr_t start; > diff --git a/xen/common/memory.c b/xen/common/memory.c > index 61791a4..45dbf07 100644 > --- a/xen/common/memory.c > +++ b/xen/common/memory.c > @@ -122,7 +122,29 @@ static void populate_physmap(struct memop_args *a) > } > else > { > - page = alloc_domheap_pages(d, a->extent_order, a->memflags); > + if ( is_domain_direct_mapped(d) ) > + { > + mfn = gpfn; > + if ( !mfn_valid(mfn) ) > + { > + gdprintk(XENLOG_INFO, "Invalid mfn 0x%"PRI_xen_pfn"\n", > + mfn); > + goto out; > + } > + > + page = mfn_to_page(mfn); > + if ( !get_page(page, d) ) > + { > + gdprintk(XENLOG_INFO, > + "mfn 0x%"PRI_xen_pfn" doesn't belong to dom0\n", With the new setup d is not necessarily dom0. > + mfn); > + goto out; > + } > + put_page(page); > + } > + else > + page = alloc_domheap_pages(d, a->extent_order, a->memflags); > + > if ( unlikely(page == NULL) ) > { > if ( !opt_tmem || (a->extent_order != 0) ) > @@ -270,6 +292,12 @@ static void decrease_reservation(struct memop_args *a) > && p2m_pod_decrease_reservation(a->domain, gmfn, a->extent_order) ) > continue; > > + /* With the lake for iommu on some ARM platform, dom0 must retrieve s/lake/lack/ and another stray reference to dom0. > + * the same pfn when the hypercall populate_physmap is called. > + */ > + if ( is_domain_direct_mapped(a->domain) ) > + continue; > + > for ( j = 0; j < (1 << a->extent_order); j++ ) > if ( !guest_remove_page(a->domain, gmfn + j) ) > goto out; > diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h > index 28d39a0..dbc1389 100644 > --- a/xen/include/asm-arm/domain.h > +++ b/xen/include/asm-arm/domain.h > @@ -86,6 +86,8 @@ enum domain_type { > #define is_pv64_domain(d) (0) > #endif > > +int is_domain_direct_mapped(struct domain *d); > + > struct vtimer { > struct vcpu *v; > int irq; > diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h > index 9d39061..2c7f809 100644 > --- a/xen/include/asm-x86/domain.h > +++ b/xen/include/asm-x86/domain.h > @@ -16,6 +16,8 @@ > #define is_pv_32on64_domain(d) (is_pv_32bit_domain(d)) > #define is_pv_32on64_vcpu(v) (is_pv_32on64_domain((v)->domain)) > > +#define is_domain_direct_mapped(d) (0) > + > #define is_hvm_pv_evtchn_domain(d) (has_hvm_container_domain(d) && \ > d->arch.hvm_domain.irq.callback_via_type == HVMIRQ_callback_vector) > #define is_hvm_pv_evtchn_vcpu(v) (is_hvm_pv_evtchn_domain(v->domain))
diff --git a/xen/arch/arm/domain_build.c b/xen/arch/arm/domain_build.c index faff88e..2bbee36 100644 --- a/xen/arch/arm/domain_build.c +++ b/xen/arch/arm/domain_build.c @@ -65,6 +65,11 @@ struct vcpu *__init alloc_dom0_vcpu0(void) return alloc_vcpu(dom0, 0, 0); } +int is_domain_direct_mapped(struct domain *d) +{ + return (dom0_11_mapping && d == dom0); +} + static void allocate_memory_11(struct domain *d, struct kernel_info *kinfo) { paddr_t start; diff --git a/xen/common/memory.c b/xen/common/memory.c index 61791a4..45dbf07 100644 --- a/xen/common/memory.c +++ b/xen/common/memory.c @@ -122,7 +122,29 @@ static void populate_physmap(struct memop_args *a) } else { - page = alloc_domheap_pages(d, a->extent_order, a->memflags); + if ( is_domain_direct_mapped(d) ) + { + mfn = gpfn; + if ( !mfn_valid(mfn) ) + { + gdprintk(XENLOG_INFO, "Invalid mfn 0x%"PRI_xen_pfn"\n", + mfn); + goto out; + } + + page = mfn_to_page(mfn); + if ( !get_page(page, d) ) + { + gdprintk(XENLOG_INFO, + "mfn 0x%"PRI_xen_pfn" doesn't belong to dom0\n", + mfn); + goto out; + } + put_page(page); + } + else + page = alloc_domheap_pages(d, a->extent_order, a->memflags); + if ( unlikely(page == NULL) ) { if ( !opt_tmem || (a->extent_order != 0) ) @@ -270,6 +292,12 @@ static void decrease_reservation(struct memop_args *a) && p2m_pod_decrease_reservation(a->domain, gmfn, a->extent_order) ) continue; + /* With the lake for iommu on some ARM platform, dom0 must retrieve + * the same pfn when the hypercall populate_physmap is called. + */ + if ( is_domain_direct_mapped(a->domain) ) + continue; + for ( j = 0; j < (1 << a->extent_order); j++ ) if ( !guest_remove_page(a->domain, gmfn + j) ) goto out; diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h index 28d39a0..dbc1389 100644 --- a/xen/include/asm-arm/domain.h +++ b/xen/include/asm-arm/domain.h @@ -86,6 +86,8 @@ enum domain_type { #define is_pv64_domain(d) (0) #endif +int is_domain_direct_mapped(struct domain *d); + struct vtimer { struct vcpu *v; int irq; diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h index 9d39061..2c7f809 100644 --- a/xen/include/asm-x86/domain.h +++ b/xen/include/asm-x86/domain.h @@ -16,6 +16,8 @@ #define is_pv_32on64_domain(d) (is_pv_32bit_domain(d)) #define is_pv_32on64_vcpu(v) (is_pv_32on64_domain((v)->domain)) +#define is_domain_direct_mapped(d) (0) + #define is_hvm_pv_evtchn_domain(d) (has_hvm_container_domain(d) && \ d->arch.hvm_domain.irq.callback_via_type == HVMIRQ_callback_vector) #define is_hvm_pv_evtchn_vcpu(v) (is_hvm_pv_evtchn_domain(v->domain))
With the lake of iommu, dom0 must have a 1:1 memory mapping for all these guest physical address. When the ballon decides to give back a page to the kernel, this page must have the same address as previously. Otherwise, we will loose the 1:1 mapping and will break DMA-capable devices. Signed-off-by: Julien Grall <julien.grall@linaro.org> Cc: Keir Fraser <keir@xen.org> Cc: Jan Beulich <jbeulich@suse.com> --- Release: This is a bug that prevents DMA-capable devices to work after a guest has started. Changes in v3: - Remove spurious page = NULL - Rename is_dom0_mapped_11 to is_domain_direct_mapped - Coding style Changes in v2: - Drop CONFIG_ARM and add is_dom0_mapped_11 --- xen/arch/arm/domain_build.c | 5 +++++ xen/common/memory.c | 30 +++++++++++++++++++++++++++++- xen/include/asm-arm/domain.h | 2 ++ xen/include/asm-x86/domain.h | 2 ++ 4 files changed, 38 insertions(+), 1 deletion(-)