diff mbox series

[1/2] mm/cma: remove unsupported gfp_mask parameter from cma_alloc()

Message ID 20180709122019eucas1p2340da484acfcc932537e6014f4fd2c29~-sqTPJKij2939229392eucas1p2j@eucas1p2.samsung.com
State Accepted
Commit 6518202970c1052148daaef9a8096711775e43a2
Headers show
Series CMA: remove unsupported gfp mask parameter | expand

Commit Message

Marek Szyprowski July 9, 2018, 12:19 p.m. UTC
cma_alloc() function doesn't really support gfp flags other than
__GFP_NOWARN, so convert gfp_mask parameter to boolean no_warn parameter.

This will help to avoid giving false feeling that this function supports
standard gfp flags and callers can pass __GFP_ZERO to get zeroed buffer,
what has already been an issue: see commit dd65a941f6ba ("arm64:
dma-mapping: clear buffers allocated with FORCE_CONTIGUOUS flag").

Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>

---
 arch/powerpc/kvm/book3s_hv_builtin.c       | 2 +-
 drivers/s390/char/vmcp.c                   | 2 +-
 drivers/staging/android/ion/ion_cma_heap.c | 2 +-
 include/linux/cma.h                        | 2 +-
 kernel/dma/contiguous.c                    | 3 ++-
 mm/cma.c                                   | 8 ++++----
 mm/cma_debug.c                             | 2 +-
 7 files changed, 11 insertions(+), 10 deletions(-)

-- 
2.17.1

Comments

Michal Hocko July 9, 2018, 1:09 p.m. UTC | #1
On Mon 09-07-18 14:19:55, Marek Szyprowski wrote:
> cma_alloc() function doesn't really support gfp flags other than

> __GFP_NOWARN, so convert gfp_mask parameter to boolean no_warn parameter.

> 

> This will help to avoid giving false feeling that this function supports

> standard gfp flags and callers can pass __GFP_ZERO to get zeroed buffer,

> what has already been an issue: see commit dd65a941f6ba ("arm64:

> dma-mapping: clear buffers allocated with FORCE_CONTIGUOUS flag").

> 

> Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>


Thanks! This makes perfect sense to me. If there is a real need for the
gfp_mask then we should start by defining the semantic first.

Acked-by: Michal Hocko <mhocko@suse.com>


> ---

>  arch/powerpc/kvm/book3s_hv_builtin.c       | 2 +-

>  drivers/s390/char/vmcp.c                   | 2 +-

>  drivers/staging/android/ion/ion_cma_heap.c | 2 +-

>  include/linux/cma.h                        | 2 +-

>  kernel/dma/contiguous.c                    | 3 ++-

>  mm/cma.c                                   | 8 ++++----

>  mm/cma_debug.c                             | 2 +-

>  7 files changed, 11 insertions(+), 10 deletions(-)

> 

> diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c

> index d4a3f4da409b..fc6bb9630a9c 100644

> --- a/arch/powerpc/kvm/book3s_hv_builtin.c

> +++ b/arch/powerpc/kvm/book3s_hv_builtin.c

> @@ -77,7 +77,7 @@ struct page *kvm_alloc_hpt_cma(unsigned long nr_pages)

>  	VM_BUG_ON(order_base_2(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);

>  

>  	return cma_alloc(kvm_cma, nr_pages, order_base_2(HPT_ALIGN_PAGES),

> -			 GFP_KERNEL);

> +			 false);

>  }

>  EXPORT_SYMBOL_GPL(kvm_alloc_hpt_cma);

>  

> diff --git a/drivers/s390/char/vmcp.c b/drivers/s390/char/vmcp.c

> index 948ce82a7725..0fa1b6b1491a 100644

> --- a/drivers/s390/char/vmcp.c

> +++ b/drivers/s390/char/vmcp.c

> @@ -68,7 +68,7 @@ static void vmcp_response_alloc(struct vmcp_session *session)

>  	 * anymore the system won't work anyway.

>  	 */

>  	if (order > 2)

> -		page = cma_alloc(vmcp_cma, nr_pages, 0, GFP_KERNEL);

> +		page = cma_alloc(vmcp_cma, nr_pages, 0, false);

>  	if (page) {

>  		session->response = (char *)page_to_phys(page);

>  		session->cma_alloc = 1;

> diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c

> index 49718c96bf9e..3fafd013d80a 100644

> --- a/drivers/staging/android/ion/ion_cma_heap.c

> +++ b/drivers/staging/android/ion/ion_cma_heap.c

> @@ -39,7 +39,7 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,

>  	if (align > CONFIG_CMA_ALIGNMENT)

>  		align = CONFIG_CMA_ALIGNMENT;

>  

> -	pages = cma_alloc(cma_heap->cma, nr_pages, align, GFP_KERNEL);

> +	pages = cma_alloc(cma_heap->cma, nr_pages, align, false);

>  	if (!pages)

>  		return -ENOMEM;

>  

> diff --git a/include/linux/cma.h b/include/linux/cma.h

> index bf90f0bb42bd..190184b5ff32 100644

> --- a/include/linux/cma.h

> +++ b/include/linux/cma.h

> @@ -33,7 +33,7 @@ extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,

>  					const char *name,

>  					struct cma **res_cma);

>  extern struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,

> -			      gfp_t gfp_mask);

> +			      bool no_warn);

>  extern bool cma_release(struct cma *cma, const struct page *pages, unsigned int count);

>  

>  extern int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data);

> diff --git a/kernel/dma/contiguous.c b/kernel/dma/contiguous.c

> index d987dcd1bd56..19ea5d70150c 100644

> --- a/kernel/dma/contiguous.c

> +++ b/kernel/dma/contiguous.c

> @@ -191,7 +191,8 @@ struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,

>  	if (align > CONFIG_CMA_ALIGNMENT)

>  		align = CONFIG_CMA_ALIGNMENT;

>  

> -	return cma_alloc(dev_get_cma_area(dev), count, align, gfp_mask);

> +	return cma_alloc(dev_get_cma_area(dev), count, align,

> +			 gfp_mask & __GFP_NOWARN);

>  }

>  

>  /**

> diff --git a/mm/cma.c b/mm/cma.c

> index 5809bbe360d7..4cb76121a3ab 100644

> --- a/mm/cma.c

> +++ b/mm/cma.c

> @@ -395,13 +395,13 @@ static inline void cma_debug_show_areas(struct cma *cma) { }

>   * @cma:   Contiguous memory region for which the allocation is performed.

>   * @count: Requested number of pages.

>   * @align: Requested alignment of pages (in PAGE_SIZE order).

> - * @gfp_mask:  GFP mask to use during compaction

> + * @no_warn: Avoid printing message about failed allocation

>   *

>   * This function allocates part of contiguous memory on specific

>   * contiguous memory area.

>   */

>  struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,

> -		       gfp_t gfp_mask)

> +		       bool no_warn)

>  {

>  	unsigned long mask, offset;

>  	unsigned long pfn = -1;

> @@ -447,7 +447,7 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,

>  		pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);

>  		mutex_lock(&cma_mutex);

>  		ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA,

> -					 gfp_mask);

> +				     GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0));

>  		mutex_unlock(&cma_mutex);

>  		if (ret == 0) {

>  			page = pfn_to_page(pfn);

> @@ -466,7 +466,7 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,

>  

>  	trace_cma_alloc(pfn, page, count, align)>  

> -	if (ret && !(gfp_mask & __GFP_NOWARN)) {

> +	if (ret && !no_warn) {

>  		pr_err("%s: alloc failed, req-size: %zu pages, ret: %d\n",

>  			__func__, count, ret);

>  		cma_debug_show_areas(cma);

> diff --git a/mm/cma_debug.c b/mm/cma_debug.c

> index f23467291cfb..ad6723e9d110 100644

> --- a/mm/cma_debug.c

> +++ b/mm/cma_debug.c

> @@ -139,7 +139,7 @@ static int cma_alloc_mem(struct cma *cma, int count)

>  	if (!mem)

>  		return -ENOMEM;

>  

> -	p = cma_alloc(cma, count, 0, GFP_KERNEL);

> +	p = cma_alloc(cma, count, 0, false);

>  	if (!p) {

>  		kfree(mem);

>  		return -ENOMEM;

> -- 

> 2.17.1

> 


-- 
Michal Hocko
SUSE Labs
Laura Abbott July 9, 2018, 5:27 p.m. UTC | #2
On 07/09/2018 05:19 AM, Marek Szyprowski wrote:
> cma_alloc() function doesn't really support gfp flags other than

> __GFP_NOWARN, so convert gfp_mask parameter to boolean no_warn parameter.

> 

> This will help to avoid giving false feeling that this function supports

> standard gfp flags and callers can pass __GFP_ZERO to get zeroed buffer,

> what has already been an issue: see commit dd65a941f6ba ("arm64:

> dma-mapping: clear buffers allocated with FORCE_CONTIGUOUS flag").

> 


For Ion,

Acked-by: Laura Abbott <labbott@redhat.com>


> Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>

> ---

>   arch/powerpc/kvm/book3s_hv_builtin.c       | 2 +-

>   drivers/s390/char/vmcp.c                   | 2 +-

>   drivers/staging/android/ion/ion_cma_heap.c | 2 +-

>   include/linux/cma.h                        | 2 +-

>   kernel/dma/contiguous.c                    | 3 ++-

>   mm/cma.c                                   | 8 ++++----

>   mm/cma_debug.c                             | 2 +-

>   7 files changed, 11 insertions(+), 10 deletions(-)

> 

> diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c

> index d4a3f4da409b..fc6bb9630a9c 100644

> --- a/arch/powerpc/kvm/book3s_hv_builtin.c

> +++ b/arch/powerpc/kvm/book3s_hv_builtin.c

> @@ -77,7 +77,7 @@ struct page *kvm_alloc_hpt_cma(unsigned long nr_pages)

>   	VM_BUG_ON(order_base_2(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);

>   

>   	return cma_alloc(kvm_cma, nr_pages, order_base_2(HPT_ALIGN_PAGES),

> -			 GFP_KERNEL);

> +			 false);

>   }

>   EXPORT_SYMBOL_GPL(kvm_alloc_hpt_cma);

>   

> diff --git a/drivers/s390/char/vmcp.c b/drivers/s390/char/vmcp.c

> index 948ce82a7725..0fa1b6b1491a 100644

> --- a/drivers/s390/char/vmcp.c

> +++ b/drivers/s390/char/vmcp.c

> @@ -68,7 +68,7 @@ static void vmcp_response_alloc(struct vmcp_session *session)

>   	 * anymore the system won't work anyway.

>   	 */

>   	if (order > 2)

> -		page = cma_alloc(vmcp_cma, nr_pages, 0, GFP_KERNEL);

> +		page = cma_alloc(vmcp_cma, nr_pages, 0, false);

>   	if (page) {

>   		session->response = (char *)page_to_phys(page);

>   		session->cma_alloc = 1;

> diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c

> index 49718c96bf9e..3fafd013d80a 100644

> --- a/drivers/staging/android/ion/ion_cma_heap.c

> +++ b/drivers/staging/android/ion/ion_cma_heap.c

> @@ -39,7 +39,7 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,

>   	if (align > CONFIG_CMA_ALIGNMENT)

>   		align = CONFIG_CMA_ALIGNMENT;

>   

> -	pages = cma_alloc(cma_heap->cma, nr_pages, align, GFP_KERNEL);

> +	pages = cma_alloc(cma_heap->cma, nr_pages, align, false);

>   	if (!pages)

>   		return -ENOMEM;

>   

> diff --git a/include/linux/cma.h b/include/linux/cma.h

> index bf90f0bb42bd..190184b5ff32 100644

> --- a/include/linux/cma.h

> +++ b/include/linux/cma.h

> @@ -33,7 +33,7 @@ extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,

>   					const char *name,

>   					struct cma **res_cma);

>   extern struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,

> -			      gfp_t gfp_mask);

> +			      bool no_warn);

>   extern bool cma_release(struct cma *cma, const struct page *pages, unsigned int count);

>   

>   extern int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data);

> diff --git a/kernel/dma/contiguous.c b/kernel/dma/contiguous.c

> index d987dcd1bd56..19ea5d70150c 100644

> --- a/kernel/dma/contiguous.c

> +++ b/kernel/dma/contiguous.c

> @@ -191,7 +191,8 @@ struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,

>   	if (align > CONFIG_CMA_ALIGNMENT)

>   		align = CONFIG_CMA_ALIGNMENT;

>   

> -	return cma_alloc(dev_get_cma_area(dev), count, align, gfp_mask);

> +	return cma_alloc(dev_get_cma_area(dev), count, align,

> +			 gfp_mask & __GFP_NOWARN);

>   }

>   

>   /**

> diff --git a/mm/cma.c b/mm/cma.c

> index 5809bbe360d7..4cb76121a3ab 100644

> --- a/mm/cma.c

> +++ b/mm/cma.c

> @@ -395,13 +395,13 @@ static inline void cma_debug_show_areas(struct cma *cma) { }

>    * @cma:   Contiguous memory region for which the allocation is performed.

>    * @count: Requested number of pages.

>    * @align: Requested alignment of pages (in PAGE_SIZE order).

> - * @gfp_mask:  GFP mask to use during compaction

> + * @no_warn: Avoid printing message about failed allocation

>    *

>    * This function allocates part of contiguous memory on specific

>    * contiguous memory area.

>    */

>   struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,

> -		       gfp_t gfp_mask)

> +		       bool no_warn)

>   {

>   	unsigned long mask, offset;

>   	unsigned long pfn = -1;

> @@ -447,7 +447,7 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,

>   		pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);

>   		mutex_lock(&cma_mutex);

>   		ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA,

> -					 gfp_mask);

> +				     GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0));

>   		mutex_unlock(&cma_mutex);

>   		if (ret == 0) {

>   			page = pfn_to_page(pfn);

> @@ -466,7 +466,7 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,

>   

>   	trace_cma_alloc(pfn, page, count, align);

>   

> -	if (ret && !(gfp_mask & __GFP_NOWARN)) {

> +	if (ret && !no_warn) {

>   		pr_err("%s: alloc failed, req-size: %zu pages, ret: %d\n",

>   			__func__, count, ret);

>   		cma_debug_show_areas(cma);

> diff --git a/mm/cma_debug.c b/mm/cma_debug.c

> index f23467291cfb..ad6723e9d110 100644

> --- a/mm/cma_debug.c

> +++ b/mm/cma_debug.c

> @@ -139,7 +139,7 @@ static int cma_alloc_mem(struct cma *cma, int count)

>   	if (!mem)

>   		return -ENOMEM;

>   

> -	p = cma_alloc(cma, count, 0, GFP_KERNEL);

> +	p = cma_alloc(cma, count, 0, false);

>   	if (!p) {

>   		kfree(mem);

>   		return -ENOMEM;

>
Joonsoo Kim July 10, 2018, 7:19 a.m. UTC | #3
Hello, Marek.

2018-07-09 21:19 GMT+09:00 Marek Szyprowski <m.szyprowski@samsung.com>:
> cma_alloc() function doesn't really support gfp flags other than

> __GFP_NOWARN, so convert gfp_mask parameter to boolean no_warn parameter.


Although gfp_mask isn't used in cma_alloc() except no_warn, it can be used
in alloc_contig_range(). For example, if passed gfp mask has no __GFP_FS,
compaction(isolation) would work differently. Do you have considered
such a case?

Thanks.
Michal Hocko July 10, 2018, 9:50 a.m. UTC | #4
On Tue 10-07-18 16:19:32, Joonsoo Kim wrote:
> Hello, Marek.

> 

> 2018-07-09 21:19 GMT+09:00 Marek Szyprowski <m.szyprowski@samsung.com>:

> > cma_alloc() function doesn't really support gfp flags other than

> > __GFP_NOWARN, so convert gfp_mask parameter to boolean no_warn parameter.

> 

> Although gfp_mask isn't used in cma_alloc() except no_warn, it can be used

> in alloc_contig_range(). For example, if passed gfp mask has no __GFP_FS,

> compaction(isolation) would work differently. Do you have considered

> such a case?


Does any of cma_alloc users actually care about GFP_NO{FS,IO}?
-- 
Michal Hocko
SUSE Labs
Joonsoo Kim July 11, 2018, 7:35 a.m. UTC | #5
2018-07-10 18:50 GMT+09:00 Michal Hocko <mhocko@kernel.org>:
> On Tue 10-07-18 16:19:32, Joonsoo Kim wrote:

>> Hello, Marek.

>>

>> 2018-07-09 21:19 GMT+09:00 Marek Szyprowski <m.szyprowski@samsung.com>:

>> > cma_alloc() function doesn't really support gfp flags other than

>> > __GFP_NOWARN, so convert gfp_mask parameter to boolean no_warn parameter.

>>

>> Although gfp_mask isn't used in cma_alloc() except no_warn, it can be used

>> in alloc_contig_range(). For example, if passed gfp mask has no __GFP_FS,

>> compaction(isolation) would work differently. Do you have considered

>> such a case?

>

> Does any of cma_alloc users actually care about GFP_NO{FS,IO}?


I don't know. My guess is that cma_alloc() is used for DMA allocation so
block device would use it, too. If fs/block subsystem initiates the
request for the device,
it would be possible that cma_alloc() is called with such a flag.
Again, I don't know
much about those subsystem so I would be wrong.

Thanks.
Michal Hocko July 11, 2018, 8:54 a.m. UTC | #6
On Wed 11-07-18 16:35:28, Joonsoo Kim wrote:
> 2018-07-10 18:50 GMT+09:00 Michal Hocko <mhocko@kernel.org>:

> > On Tue 10-07-18 16:19:32, Joonsoo Kim wrote:

> >> Hello, Marek.

> >>

> >> 2018-07-09 21:19 GMT+09:00 Marek Szyprowski <m.szyprowski@samsung.com>:

> >> > cma_alloc() function doesn't really support gfp flags other than

> >> > __GFP_NOWARN, so convert gfp_mask parameter to boolean no_warn parameter.

> >>

> >> Although gfp_mask isn't used in cma_alloc() except no_warn, it can be used

> >> in alloc_contig_range(). For example, if passed gfp mask has no __GFP_FS,

> >> compaction(isolation) would work differently. Do you have considered

> >> such a case?

> >

> > Does any of cma_alloc users actually care about GFP_NO{FS,IO}?

> 

> I don't know. My guess is that cma_alloc() is used for DMA allocation so

> block device would use it, too. If fs/block subsystem initiates the

> request for the device,

> it would be possible that cma_alloc() is called with such a flag.

> Again, I don't know

> much about those subsystem so I would be wrong.


The patch converts existing users and none of them really tries to use
anything other than GFP_KERNEL [|__GFP_NOWARN] so this doesn't seem to
be the case. Should there be a new user requiring more restricted
gfp_mask we should carefuly re-evaluate and think how to support it.

Until then I would simply stick with the proposed approach because my
experience tells me that a wrong gfp mask usage is way too easy so the
simpler the api is the less likely we will see an abuse.
-- 
Michal Hocko
SUSE Labs
Joonsoo Kim July 12, 2018, 2:48 a.m. UTC | #7
2018-07-11 17:54 GMT+09:00 Michal Hocko <mhocko@kernel.org>:
> On Wed 11-07-18 16:35:28, Joonsoo Kim wrote:

>> 2018-07-10 18:50 GMT+09:00 Michal Hocko <mhocko@kernel.org>:

>> > On Tue 10-07-18 16:19:32, Joonsoo Kim wrote:

>> >> Hello, Marek.

>> >>

>> >> 2018-07-09 21:19 GMT+09:00 Marek Szyprowski <m.szyprowski@samsung.com>:

>> >> > cma_alloc() function doesn't really support gfp flags other than

>> >> > __GFP_NOWARN, so convert gfp_mask parameter to boolean no_warn parameter.

>> >>

>> >> Although gfp_mask isn't used in cma_alloc() except no_warn, it can be used

>> >> in alloc_contig_range(). For example, if passed gfp mask has no __GFP_FS,

>> >> compaction(isolation) would work differently. Do you have considered

>> >> such a case?

>> >

>> > Does any of cma_alloc users actually care about GFP_NO{FS,IO}?

>>

>> I don't know. My guess is that cma_alloc() is used for DMA allocation so

>> block device would use it, too. If fs/block subsystem initiates the

>> request for the device,

>> it would be possible that cma_alloc() is called with such a flag.

>> Again, I don't know

>> much about those subsystem so I would be wrong.

>

> The patch converts existing users and none of them really tries to use

> anything other than GFP_KERNEL [|__GFP_NOWARN] so this doesn't seem to

> be the case. Should there be a new user requiring more restricted

> gfp_mask we should carefuly re-evaluate and think how to support it.


One of existing user is general DMA layer and it takes gfp flags that is
provided by user. I don't check all the DMA allocation sites but how do
you convince that none of them try to use anything other
than GFP_KERNEL [|__GFP_NOWARN]?

Thanks.
Christoph Hellwig July 12, 2018, 7:15 a.m. UTC | #8
On Thu, Jul 12, 2018 at 11:48:47AM +0900, Joonsoo Kim wrote:
> One of existing user is general DMA layer and it takes gfp flags that is

> provided by user. I don't check all the DMA allocation sites but how do

> you convince that none of them try to use anything other

> than GFP_KERNEL [|__GFP_NOWARN]?


They use a few others things still like __GFP_COMP, __GPF_DMA or
GFP_HUGEPAGE.  But all these are bogus as we have various implementations
that can't respect them.  I plan to get rid of the gfp_t argument
in the dma_map_ops alloc method in a few merge windows because of that,
but it needs further implementation consolidation first.
Joonsoo Kim July 13, 2018, 6:29 a.m. UTC | #9
2018-07-12 16:15 GMT+09:00 Christoph Hellwig <hch@lst.de>:
> On Thu, Jul 12, 2018 at 11:48:47AM +0900, Joonsoo Kim wrote:

>> One of existing user is general DMA layer and it takes gfp flags that is

>> provided by user. I don't check all the DMA allocation sites but how do

>> you convince that none of them try to use anything other

>> than GFP_KERNEL [|__GFP_NOWARN]?

>

> They use a few others things still like __GFP_COMP, __GPF_DMA or

> GFP_HUGEPAGE.  But all these are bogus as we have various implementations

> that can't respect them.  I plan to get rid of the gfp_t argument

> in the dma_map_ops alloc method in a few merge windows because of that,

> but it needs further implementation consolidation first.


Okay. If those flags are all, this change would be okay.

For the remind of this gfp flag introduction in cma_alloc(), see the
following link.

https://marc.info/?l=linux-mm&m=148431452118407

Thanks.
Vlastimil Babka July 16, 2018, 7:45 a.m. UTC | #10
On 07/09/2018 02:19 PM, Marek Szyprowski wrote:
> cma_alloc() function doesn't really support gfp flags other than

> __GFP_NOWARN, so convert gfp_mask parameter to boolean no_warn parameter.

> 

> This will help to avoid giving false feeling that this function supports

> standard gfp flags and callers can pass __GFP_ZERO to get zeroed buffer,

> what has already been an issue: see commit dd65a941f6ba ("arm64:

> dma-mapping: clear buffers allocated with FORCE_CONTIGUOUS flag").

> 

> Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>


Acked-by: Vlastimil Babka <vbabka@suse.cz>
Christoph Hellwig July 17, 2018, 3:08 p.m. UTC | #11
Looks good:

Reviewed-by: Christoph Hellwig <hch@lst.de>
diff mbox series

Patch

diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
index d4a3f4da409b..fc6bb9630a9c 100644
--- a/arch/powerpc/kvm/book3s_hv_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_builtin.c
@@ -77,7 +77,7 @@  struct page *kvm_alloc_hpt_cma(unsigned long nr_pages)
 	VM_BUG_ON(order_base_2(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
 
 	return cma_alloc(kvm_cma, nr_pages, order_base_2(HPT_ALIGN_PAGES),
-			 GFP_KERNEL);
+			 false);
 }
 EXPORT_SYMBOL_GPL(kvm_alloc_hpt_cma);
 
diff --git a/drivers/s390/char/vmcp.c b/drivers/s390/char/vmcp.c
index 948ce82a7725..0fa1b6b1491a 100644
--- a/drivers/s390/char/vmcp.c
+++ b/drivers/s390/char/vmcp.c
@@ -68,7 +68,7 @@  static void vmcp_response_alloc(struct vmcp_session *session)
 	 * anymore the system won't work anyway.
 	 */
 	if (order > 2)
-		page = cma_alloc(vmcp_cma, nr_pages, 0, GFP_KERNEL);
+		page = cma_alloc(vmcp_cma, nr_pages, 0, false);
 	if (page) {
 		session->response = (char *)page_to_phys(page);
 		session->cma_alloc = 1;
diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c
index 49718c96bf9e..3fafd013d80a 100644
--- a/drivers/staging/android/ion/ion_cma_heap.c
+++ b/drivers/staging/android/ion/ion_cma_heap.c
@@ -39,7 +39,7 @@  static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
 	if (align > CONFIG_CMA_ALIGNMENT)
 		align = CONFIG_CMA_ALIGNMENT;
 
-	pages = cma_alloc(cma_heap->cma, nr_pages, align, GFP_KERNEL);
+	pages = cma_alloc(cma_heap->cma, nr_pages, align, false);
 	if (!pages)
 		return -ENOMEM;
 
diff --git a/include/linux/cma.h b/include/linux/cma.h
index bf90f0bb42bd..190184b5ff32 100644
--- a/include/linux/cma.h
+++ b/include/linux/cma.h
@@ -33,7 +33,7 @@  extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
 					const char *name,
 					struct cma **res_cma);
 extern struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
-			      gfp_t gfp_mask);
+			      bool no_warn);
 extern bool cma_release(struct cma *cma, const struct page *pages, unsigned int count);
 
 extern int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data);
diff --git a/kernel/dma/contiguous.c b/kernel/dma/contiguous.c
index d987dcd1bd56..19ea5d70150c 100644
--- a/kernel/dma/contiguous.c
+++ b/kernel/dma/contiguous.c
@@ -191,7 +191,8 @@  struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
 	if (align > CONFIG_CMA_ALIGNMENT)
 		align = CONFIG_CMA_ALIGNMENT;
 
-	return cma_alloc(dev_get_cma_area(dev), count, align, gfp_mask);
+	return cma_alloc(dev_get_cma_area(dev), count, align,
+			 gfp_mask & __GFP_NOWARN);
 }
 
 /**
diff --git a/mm/cma.c b/mm/cma.c
index 5809bbe360d7..4cb76121a3ab 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -395,13 +395,13 @@  static inline void cma_debug_show_areas(struct cma *cma) { }
  * @cma:   Contiguous memory region for which the allocation is performed.
  * @count: Requested number of pages.
  * @align: Requested alignment of pages (in PAGE_SIZE order).
- * @gfp_mask:  GFP mask to use during compaction
+ * @no_warn: Avoid printing message about failed allocation
  *
  * This function allocates part of contiguous memory on specific
  * contiguous memory area.
  */
 struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
-		       gfp_t gfp_mask)
+		       bool no_warn)
 {
 	unsigned long mask, offset;
 	unsigned long pfn = -1;
@@ -447,7 +447,7 @@  struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
 		pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
 		mutex_lock(&cma_mutex);
 		ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA,
-					 gfp_mask);
+				     GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0));
 		mutex_unlock(&cma_mutex);
 		if (ret == 0) {
 			page = pfn_to_page(pfn);
@@ -466,7 +466,7 @@  struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
 
 	trace_cma_alloc(pfn, page, count, align);
 
-	if (ret && !(gfp_mask & __GFP_NOWARN)) {
+	if (ret && !no_warn) {
 		pr_err("%s: alloc failed, req-size: %zu pages, ret: %d\n",
 			__func__, count, ret);
 		cma_debug_show_areas(cma);
diff --git a/mm/cma_debug.c b/mm/cma_debug.c
index f23467291cfb..ad6723e9d110 100644
--- a/mm/cma_debug.c
+++ b/mm/cma_debug.c
@@ -139,7 +139,7 @@  static int cma_alloc_mem(struct cma *cma, int count)
 	if (!mem)
 		return -ENOMEM;
 
-	p = cma_alloc(cma, count, 0, GFP_KERNEL);
+	p = cma_alloc(cma, count, 0, false);
 	if (!p) {
 		kfree(mem);
 		return -ENOMEM;