diff mbox

[v3,1/4] ARM: dma-mapping: atomic_pool with struct page **pages

Message ID 1345796945-21115-2-git-send-email-hdoyu@nvidia.com
State New
Headers show

Commit Message

Hiroshi Doyu Aug. 24, 2012, 8:29 a.m. UTC
struct page **pages is necessary to align with non atomic path in
__iommu_get_pages(). atomic_pool() has the intialized **pages instead
of just *page.

Signed-off-by: Hiroshi Doyu <hdoyu@nvidia.com>
---
 arch/arm/mm/dma-mapping.c |   17 +++++++++++++----
 1 files changed, 13 insertions(+), 4 deletions(-)

Comments

Konrad Rzeszutek Wilk Aug. 24, 2012, 11:13 a.m. UTC | #1
On Fri, Aug 24, 2012 at 11:29:02AM +0300, Hiroshi Doyu wrote:
> struct page **pages is necessary to align with non atomic path in
> __iommu_get_pages(). atomic_pool() has the intialized **pages instead
> of just *page.
> 
> Signed-off-by: Hiroshi Doyu <hdoyu@nvidia.com>
> ---
>  arch/arm/mm/dma-mapping.c |   17 +++++++++++++----
>  1 files changed, 13 insertions(+), 4 deletions(-)
> 
> diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
> index 601da7a..b14ee64 100644
> --- a/arch/arm/mm/dma-mapping.c
> +++ b/arch/arm/mm/dma-mapping.c
> @@ -296,7 +296,7 @@ struct dma_pool {
>  	unsigned long *bitmap;
>  	unsigned long nr_pages;
>  	void *vaddr;
> -	struct page *page;
> +	struct page **pages;
>  };
>  
>  static struct dma_pool atomic_pool = {
> @@ -335,12 +335,16 @@ static int __init atomic_pool_init(void)
>  	unsigned long nr_pages = pool->size >> PAGE_SHIFT;
>  	unsigned long *bitmap;
>  	struct page *page;
> +	struct page **pages;
>  	void *ptr;
>  	int bitmap_size = BITS_TO_LONGS(nr_pages) * sizeof(long);
> +	size_t size = nr_pages * sizeof(struct page *);
>  
> -	bitmap = kzalloc(bitmap_size, GFP_KERNEL);
> +	size += bitmap_size;
> +	bitmap = kzalloc(size, GFP_KERNEL);
>  	if (!bitmap)
>  		goto no_bitmap;
> +	pages = (void *)bitmap + bitmap_size;

So you stuck a bitmap field in front of the array then?
Why not just define a structure where this is clearly defined
instead of doing the casting.

>  
>  	if (IS_ENABLED(CONFIG_CMA))
>  		ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page);
> @@ -348,9 +352,14 @@ static int __init atomic_pool_init(void)
>  		ptr = __alloc_remap_buffer(NULL, pool->size, GFP_KERNEL, prot,
>  					   &page, NULL);
>  	if (ptr) {
> +		int i;
> +
> +		for (i = 0; i < nr_pages; i++)
> +			pages[i] = page + i;
> +
>  		spin_lock_init(&pool->lock);
>  		pool->vaddr = ptr;
> -		pool->page = page;
> +		pool->pages = pages;
>  		pool->bitmap = bitmap;
>  		pool->nr_pages = nr_pages;
>  		pr_info("DMA: preallocated %u KiB pool for atomic coherent allocations\n",
> @@ -481,7 +490,7 @@ static void *__alloc_from_pool(size_t size, struct page **ret_page)
>  	if (pageno < pool->nr_pages) {
>  		bitmap_set(pool->bitmap, pageno, count);
>  		ptr = pool->vaddr + PAGE_SIZE * pageno;
> -		*ret_page = pool->page + pageno;
> +		*ret_page = pool->pages[pageno];
>  	} else {
>  		pr_err_once("ERROR: %u KiB atomic DMA coherent pool is too small!\n"
>  			    "Please increase it with coherent_pool= kernel parameter!\n",
> -- 
> 1.7.5.4
>
Hiroshi Doyu Aug. 24, 2012, 11:52 a.m. UTC | #2
Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> wrote @ Fri, 24 Aug 2012 13:13:23 +0200:

> On Fri, Aug 24, 2012 at 11:29:02AM +0300, Hiroshi Doyu wrote:
> > struct page **pages is necessary to align with non atomic path in
> > __iommu_get_pages(). atomic_pool() has the intialized **pages instead
> > of just *page.
> > 
> > Signed-off-by: Hiroshi Doyu <hdoyu@nvidia.com>
> > ---
> >  arch/arm/mm/dma-mapping.c |   17 +++++++++++++----
> >  1 files changed, 13 insertions(+), 4 deletions(-)
> > 
> > diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
> > index 601da7a..b14ee64 100644
> > --- a/arch/arm/mm/dma-mapping.c
> > +++ b/arch/arm/mm/dma-mapping.c
> > @@ -296,7 +296,7 @@ struct dma_pool {
> >  	unsigned long *bitmap;
> >  	unsigned long nr_pages;
> >  	void *vaddr;
> > -	struct page *page;
> > +	struct page **pages;
> >  };
> >  
> >  static struct dma_pool atomic_pool = {
> > @@ -335,12 +335,16 @@ static int __init atomic_pool_init(void)
> >  	unsigned long nr_pages = pool->size >> PAGE_SHIFT;
> >  	unsigned long *bitmap;
> >  	struct page *page;
> > +	struct page **pages;
> >  	void *ptr;
> >  	int bitmap_size = BITS_TO_LONGS(nr_pages) * sizeof(long);
> > +	size_t size = nr_pages * sizeof(struct page *);
> >  
> > -	bitmap = kzalloc(bitmap_size, GFP_KERNEL);
> > +	size += bitmap_size;
> > +	bitmap = kzalloc(size, GFP_KERNEL);
> >  	if (!bitmap)
> >  		goto no_bitmap;
> > +	pages = (void *)bitmap + bitmap_size;
> 
> So you stuck a bitmap field in front of the array then?
> Why not just define a structure where this is clearly defined
> instead of doing the casting.

I just wanted to allocate only once for the members "pool->bitmap" and
"pool->pages" at once. Since the size of a whole bitmap isn't known in
advance, I couldn't find any fixed type for this bitmap, which pointer
can be shifted without casting. IOW, they are variable length.
Marek Szyprowski Aug. 24, 2012, 12:21 p.m. UTC | #3
Hello,

On Friday, August 24, 2012 1:52 PM Hiroshi Doyu wrote:

> Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> wrote @ Fri, 24 Aug 2012 13:13:23 +0200:
> 
> > On Fri, Aug 24, 2012 at 11:29:02AM +0300, Hiroshi Doyu wrote:
> > > struct page **pages is necessary to align with non atomic path in
> > > __iommu_get_pages(). atomic_pool() has the intialized **pages instead
> > > of just *page.
> > >
> > > Signed-off-by: Hiroshi Doyu <hdoyu@nvidia.com>
> > > ---
> > >  arch/arm/mm/dma-mapping.c |   17 +++++++++++++----
> > >  1 files changed, 13 insertions(+), 4 deletions(-)
> > >
> > > diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
> > > index 601da7a..b14ee64 100644
> > > --- a/arch/arm/mm/dma-mapping.c
> > > +++ b/arch/arm/mm/dma-mapping.c
> > > @@ -296,7 +296,7 @@ struct dma_pool {
> > >  	unsigned long *bitmap;
> > >  	unsigned long nr_pages;
> > >  	void *vaddr;
> > > -	struct page *page;
> > > +	struct page **pages;
> > >  };
> > >
> > >  static struct dma_pool atomic_pool = {
> > > @@ -335,12 +335,16 @@ static int __init atomic_pool_init(void)
> > >  	unsigned long nr_pages = pool->size >> PAGE_SHIFT;
> > >  	unsigned long *bitmap;
> > >  	struct page *page;
> > > +	struct page **pages;
> > >  	void *ptr;
> > >  	int bitmap_size = BITS_TO_LONGS(nr_pages) * sizeof(long);
> > > +	size_t size = nr_pages * sizeof(struct page *);
> > >
> > > -	bitmap = kzalloc(bitmap_size, GFP_KERNEL);
> > > +	size += bitmap_size;
> > > +	bitmap = kzalloc(size, GFP_KERNEL);
> > >  	if (!bitmap)
> > >  		goto no_bitmap;
> > > +	pages = (void *)bitmap + bitmap_size;
> >
> > So you stuck a bitmap field in front of the array then?
> > Why not just define a structure where this is clearly defined
> > instead of doing the casting.
> 
> I just wanted to allocate only once for the members "pool->bitmap" and
> "pool->pages" at once. Since the size of a whole bitmap isn't known in
> advance, I couldn't find any fixed type for this bitmap, which pointer
> can be shifted without casting. IOW, they are variable length.

IMHO it is better to avoid any non-trivial things in generic arch code. Merging
those 2 allocations doesn't save any significant bit of memory and might confuse
someone. Better just allocate them separately.

Best regards
diff mbox

Patch

diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 601da7a..b14ee64 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -296,7 +296,7 @@  struct dma_pool {
 	unsigned long *bitmap;
 	unsigned long nr_pages;
 	void *vaddr;
-	struct page *page;
+	struct page **pages;
 };
 
 static struct dma_pool atomic_pool = {
@@ -335,12 +335,16 @@  static int __init atomic_pool_init(void)
 	unsigned long nr_pages = pool->size >> PAGE_SHIFT;
 	unsigned long *bitmap;
 	struct page *page;
+	struct page **pages;
 	void *ptr;
 	int bitmap_size = BITS_TO_LONGS(nr_pages) * sizeof(long);
+	size_t size = nr_pages * sizeof(struct page *);
 
-	bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+	size += bitmap_size;
+	bitmap = kzalloc(size, GFP_KERNEL);
 	if (!bitmap)
 		goto no_bitmap;
+	pages = (void *)bitmap + bitmap_size;
 
 	if (IS_ENABLED(CONFIG_CMA))
 		ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page);
@@ -348,9 +352,14 @@  static int __init atomic_pool_init(void)
 		ptr = __alloc_remap_buffer(NULL, pool->size, GFP_KERNEL, prot,
 					   &page, NULL);
 	if (ptr) {
+		int i;
+
+		for (i = 0; i < nr_pages; i++)
+			pages[i] = page + i;
+
 		spin_lock_init(&pool->lock);
 		pool->vaddr = ptr;
-		pool->page = page;
+		pool->pages = pages;
 		pool->bitmap = bitmap;
 		pool->nr_pages = nr_pages;
 		pr_info("DMA: preallocated %u KiB pool for atomic coherent allocations\n",
@@ -481,7 +490,7 @@  static void *__alloc_from_pool(size_t size, struct page **ret_page)
 	if (pageno < pool->nr_pages) {
 		bitmap_set(pool->bitmap, pageno, count);
 		ptr = pool->vaddr + PAGE_SIZE * pageno;
-		*ret_page = pool->page + pageno;
+		*ret_page = pool->pages[pageno];
 	} else {
 		pr_err_once("ERROR: %u KiB atomic DMA coherent pool is too small!\n"
 			    "Please increase it with coherent_pool= kernel parameter!\n",