diff mbox

[Xen-devel,v5,3/8] xen/arm: return int from flush_page_to_ram and *_dcache_va_range

Message ID 1413212324-664-3-git-send-email-stefano.stabellini@eu.citrix.com
State New
Headers show

Commit Message

Stefano Stabellini Oct. 13, 2014, 2:58 p.m. UTC
These functions cannot really fail on ARM, but their x86 equivalents can
(-EOPNOTSUPP). Change the prototype to return int.

Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
---
 xen/arch/arm/mm.c          |    6 ++++--
 xen/include/asm-arm/page.h |    8 +++++---
 xen/include/asm-x86/page.h |    3 ++-
 3 files changed, 11 insertions(+), 6 deletions(-)

Comments

Julien Grall Oct. 13, 2014, 3:35 p.m. UTC | #1
On 10/13/2014 03:58 PM, Stefano Stabellini wrote:
> These functions cannot really fail on ARM, but their x86 equivalents can
> (-EOPNOTSUPP). Change the prototype to return int.
> 
> Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
> ---
>  xen/arch/arm/mm.c          |    6 ++++--
>  xen/include/asm-arm/page.h |    8 +++++---
>  xen/include/asm-x86/page.h |    3 ++-
>  3 files changed, 11 insertions(+), 6 deletions(-)
> 
> diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
> index 996687b..f6f20aa 100644
> --- a/xen/arch/arm/mm.c
> +++ b/xen/arch/arm/mm.c
> @@ -377,12 +377,14 @@ unsigned long domain_page_map_to_mfn(const void *ptr)
>  }
>  #endif
>  
> -void flush_page_to_ram(unsigned long mfn)
> +int flush_page_to_ram(unsigned long mfn)

If you decide to change the prototype of flush_page_to_ram, you need to
check the return on every call-site.

IHMO, as this function is implemented as a no-op on x86, I would keep
the void and add an ASSERT below to check ret.

Regards,
Stefano Stabellini Oct. 16, 2014, 11:03 a.m. UTC | #2
On Mon, 13 Oct 2014, Julien Grall wrote:
> On 10/13/2014 03:58 PM, Stefano Stabellini wrote:
> > These functions cannot really fail on ARM, but their x86 equivalents can
> > (-EOPNOTSUPP). Change the prototype to return int.
> > 
> > Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
> > ---
> >  xen/arch/arm/mm.c          |    6 ++++--
> >  xen/include/asm-arm/page.h |    8 +++++---
> >  xen/include/asm-x86/page.h |    3 ++-
> >  3 files changed, 11 insertions(+), 6 deletions(-)
> > 
> > diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
> > index 996687b..f6f20aa 100644
> > --- a/xen/arch/arm/mm.c
> > +++ b/xen/arch/arm/mm.c
> > @@ -377,12 +377,14 @@ unsigned long domain_page_map_to_mfn(const void *ptr)
> >  }
> >  #endif
> >  
> > -void flush_page_to_ram(unsigned long mfn)
> > +int flush_page_to_ram(unsigned long mfn)
> 
> If you decide to change the prototype of flush_page_to_ram, you need to
> check the return on every call-site.
> 
> IHMO, as this function is implemented as a no-op on x86, I would keep
> the void and add an ASSERT below to check ret.

I think you are right, I'll do that.
diff mbox

Patch

diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
index 996687b..f6f20aa 100644
--- a/xen/arch/arm/mm.c
+++ b/xen/arch/arm/mm.c
@@ -377,12 +377,14 @@  unsigned long domain_page_map_to_mfn(const void *ptr)
 }
 #endif
 
-void flush_page_to_ram(unsigned long mfn)
+int flush_page_to_ram(unsigned long mfn)
 {
+    int ret;
     void *v = map_domain_page(mfn);
 
-    clean_and_invalidate_dcache_va_range(v, PAGE_SIZE);
+    ret = clean_and_invalidate_dcache_va_range(v, PAGE_SIZE);
     unmap_domain_page(v);
+    return ret;
 }
 
 void __init arch_init_memory(void)
diff --git a/xen/include/asm-arm/page.h b/xen/include/asm-arm/page.h
index 1327b00..6265c45 100644
--- a/xen/include/asm-arm/page.h
+++ b/xen/include/asm-arm/page.h
@@ -268,16 +268,17 @@  extern size_t cacheline_bytes;
 /* Functions for flushing medium-sized areas.
  * if 'range' is large enough we might want to use model-specific
  * full-cache flushes. */
-static inline void clean_dcache_va_range(const void *p, unsigned long size)
+static inline int clean_dcache_va_range(const void *p, unsigned long size)
 {
     const void *end;
     dsb(sy);           /* So the CPU issues all writes to the range */
     for ( end = p + size; p < end; p += cacheline_bytes )
         asm volatile (__clean_dcache_one(0) : : "r" (p));
     dsb(sy);           /* So we know the flushes happen before continuing */
+    return 0;
 }
 
-static inline void clean_and_invalidate_dcache_va_range
+static inline int clean_and_invalidate_dcache_va_range
     (const void *p, unsigned long size)
 {
     const void *end;
@@ -285,6 +286,7 @@  static inline void clean_and_invalidate_dcache_va_range
     for ( end = p + size; p < end; p += cacheline_bytes )
         asm volatile (__clean_and_invalidate_dcache_one(0) : : "r" (p));
     dsb(sy);         /* So we know the flushes happen before continuing */
+    return 0;
 }
 
 /* Macros for flushing a single small item.  The predicate is always
@@ -353,7 +355,7 @@  static inline void flush_xen_data_tlb_range_va(unsigned long va,
 }
 
 /* Flush the dcache for an entire page. */
-void flush_page_to_ram(unsigned long mfn);
+int flush_page_to_ram(unsigned long mfn);
 
 /*
  * Print a walk of a page table or p2m
diff --git a/xen/include/asm-x86/page.h b/xen/include/asm-x86/page.h
index 9aa780e..006e3fa 100644
--- a/xen/include/asm-x86/page.h
+++ b/xen/include/asm-x86/page.h
@@ -21,6 +21,7 @@ 
 #endif
 
 #include <asm/x86_64/page.h>
+#include <xen/errno.h>
 
 /* Read a pte atomically from memory. */
 #define l1e_read_atomic(l1ep) \
@@ -345,7 +346,7 @@  static inline uint32_t cacheattr_to_pte_flags(uint32_t cacheattr)
 }
 
 /* No cache maintenance required on x86 architecture. */
-static inline void flush_page_to_ram(unsigned long mfn) {}
+static inline int flush_page_to_ram(unsigned long mfn) { return -EOPNOTSUPP; }
 
 /* return true if permission increased */
 static inline bool_t