diff mbox series

[v4,1/4] mm: cma: introduce gfp flag in cma_alloc instead of no_warn

Message ID 20210121175502.274391-2-minchan@kernel.org
State New
Headers show
Series Chunk Heap Support on DMA-HEAP | expand

Commit Message

Minchan Kim Jan. 21, 2021, 5:54 p.m. UTC
The upcoming patch will introduce __GFP_NORETRY semantic
in alloc_contig_range which is a failfast mode of the API.
Instead of adding a additional parameter for gfp, replace
no_warn with gfp flag.

To keep old behaviors, it follows the rule below.

  no_warn 			gfp_flags

  false         		GFP_KERNEL
  true          		GFP_KERNEL|__GFP_NOWARN
  gfp & __GFP_NOWARN		GFP_KERNEL | (gfp & __GFP_NOWARN)

Reviewed-by: Suren Baghdasaryan <surenb@google.com>
Signed-off-by: Minchan Kim <minchan@kernel.org>
---
 drivers/dma-buf/heaps/cma_heap.c |  2 +-
 drivers/s390/char/vmcp.c         |  2 +-
 include/linux/cma.h              |  2 +-
 kernel/dma/contiguous.c          |  3 ++-
 mm/cma.c                         | 12 ++++++------
 mm/cma_debug.c                   |  2 +-
 mm/hugetlb.c                     |  6 ++++--
 mm/secretmem.c                   |  3 ++-
 8 files changed, 18 insertions(+), 14 deletions(-)

Comments

Minchan Kim Jan. 21, 2021, 6:50 p.m. UTC | #1
On Thu, Jan 21, 2021 at 09:54:59AM -0800, Minchan Kim wrote:
> The upcoming patch will introduce __GFP_NORETRY semantic
> in alloc_contig_range which is a failfast mode of the API.
> Instead of adding a additional parameter for gfp, replace
> no_warn with gfp flag.
> 
> To keep old behaviors, it follows the rule below.
> 
>   no_warn 			gfp_flags
> 
>   false         		GFP_KERNEL
>   true          		GFP_KERNEL|__GFP_NOWARN
>   gfp & __GFP_NOWARN		GFP_KERNEL | (gfp & __GFP_NOWARN)
> 
> Reviewed-by: Suren Baghdasaryan <surenb@google.com>
> Signed-off-by: Minchan Kim <minchan@kernel.org>

Found one missing piece : cma_alloc_alinged

Resend with fixing
Michal Hocko Jan. 26, 2021, 7:38 a.m. UTC | #2
On Mon 25-01-21 11:42:34, Minchan Kim wrote:
> On Mon, Jan 25, 2021 at 02:07:01PM +0100, Michal Hocko wrote:

> > On Thu 21-01-21 09:54:59, Minchan Kim wrote:

> > > The upcoming patch will introduce __GFP_NORETRY semantic

> > > in alloc_contig_range which is a failfast mode of the API.

> > > Instead of adding a additional parameter for gfp, replace

> > > no_warn with gfp flag.

> > > 

> > > To keep old behaviors, it follows the rule below.

> > > 

> > >   no_warn 			gfp_flags

> > > 

> > >   false         		GFP_KERNEL

> > >   true          		GFP_KERNEL|__GFP_NOWARN

> > >   gfp & __GFP_NOWARN		GFP_KERNEL | (gfp & __GFP_NOWARN)

> > > 

> > > Reviewed-by: Suren Baghdasaryan <surenb@google.com>

> > > Signed-off-by: Minchan Kim <minchan@kernel.org>

> > [...]

> > > diff --git a/mm/cma.c b/mm/cma.c

> > > index 0ba69cd16aeb..d50627686fec 100644

> > > --- a/mm/cma.c

> > > +++ b/mm/cma.c

> > > @@ -419,13 +419,13 @@ static inline void cma_debug_show_areas(struct cma *cma) { }

> > >   * @cma:   Contiguous memory region for which the allocation is performed.

> > >   * @count: Requested number of pages.

> > >   * @align: Requested alignment of pages (in PAGE_SIZE order).

> > > - * @no_warn: Avoid printing message about failed allocation

> > > + * @gfp_mask: GFP mask to use during the cma allocation.

> > 

> > Call out supported gfp flags explicitly. Have a look at kvmalloc_node

> > for a guidance.

> 

> How about this?

> 

> 

> diff --git a/mm/cma.c b/mm/cma.c

> index d50627686fec..b94727b694d6 100644

> --- a/mm/cma.c

> +++ b/mm/cma.c

> @@ -423,6 +423,10 @@ static inline void cma_debug_show_areas(struct cma *cma) { }

>   *

>   * This function allocates part of contiguous memory on specific

>   * contiguous memory area.

> + *

> + * For gfp_mask, GFP_KERNEL and __GFP_NORETRY are supported. __GFP_NORETRY

> + * will avoid costly functions(e.g., waiting on page_writeback and locking)

> + * at current implementaion during the page migration.


rather than explicitly mentioning what the flag implies I think it would
be more useful to state the intended usecase. See how kvmalloc_node says
"__GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is
preferable to the vmalloc fallback, due to visible performance
drawbacks.
__GFP_NOWARN is also supported to suppress allocation failure messages."

This would help people not familiar with internals to see whether this
flag is a good fit for them.

In this case I woul go with
"
@flags: gfp mask. Must be compatible (superset) with GFP_KERNEL.
[...]
Reclaim modifiers (__GFP_RETRY_MAYFAIL, __GFP_NOFAIL) are not supported.
__GFP_NORETRY is supported, and it should be used for opportunistic
allocation attempts that should rather fail quickly when the caller has
a fallback strategy.
"

Obviously for this patch you will go with a simple statement that
Reclaim modifiers are not supported at all.

>   */

>  struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,

>                        gfp_t gfp_mask)

> 


-- 
Michal Hocko
SUSE Labs
diff mbox series

Patch

diff --git a/drivers/dma-buf/heaps/cma_heap.c b/drivers/dma-buf/heaps/cma_heap.c
index 364fc2f3e499..0afc1907887a 100644
--- a/drivers/dma-buf/heaps/cma_heap.c
+++ b/drivers/dma-buf/heaps/cma_heap.c
@@ -298,7 +298,7 @@  static int cma_heap_allocate(struct dma_heap *heap,
 	if (align > CONFIG_CMA_ALIGNMENT)
 		align = CONFIG_CMA_ALIGNMENT;
 
-	cma_pages = cma_alloc(cma_heap->cma, pagecount, align, false);
+	cma_pages = cma_alloc(cma_heap->cma, pagecount, align, GFP_KERNEL);
 	if (!cma_pages)
 		goto free_buffer;
 
diff --git a/drivers/s390/char/vmcp.c b/drivers/s390/char/vmcp.c
index 9e066281e2d0..78f9adf56456 100644
--- a/drivers/s390/char/vmcp.c
+++ b/drivers/s390/char/vmcp.c
@@ -70,7 +70,7 @@  static void vmcp_response_alloc(struct vmcp_session *session)
 	 * anymore the system won't work anyway.
 	 */
 	if (order > 2)
-		page = cma_alloc(vmcp_cma, nr_pages, 0, false);
+		page = cma_alloc(vmcp_cma, nr_pages, 0, GFP_KERNEL);
 	if (page) {
 		session->response = (char *)page_to_phys(page);
 		session->cma_alloc = 1;
diff --git a/include/linux/cma.h b/include/linux/cma.h
index 217999c8a762..d6c02d08ddbc 100644
--- a/include/linux/cma.h
+++ b/include/linux/cma.h
@@ -45,7 +45,7 @@  extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
 					const char *name,
 					struct cma **res_cma);
 extern struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
-			      bool no_warn);
+			      gfp_t gfp_mask);
 extern bool cma_release(struct cma *cma, const struct page *pages, unsigned int count);
 
 extern int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data);
diff --git a/kernel/dma/contiguous.c b/kernel/dma/contiguous.c
index 3d63d91cba5c..552ed531c018 100644
--- a/kernel/dma/contiguous.c
+++ b/kernel/dma/contiguous.c
@@ -260,7 +260,8 @@  struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
 	if (align > CONFIG_CMA_ALIGNMENT)
 		align = CONFIG_CMA_ALIGNMENT;
 
-	return cma_alloc(dev_get_cma_area(dev), count, align, no_warn);
+	return cma_alloc(dev_get_cma_area(dev), count, align, GFP_KERNEL |
+			(no_warn ? __GFP_NOWARN : 0));
 }
 
 /**
diff --git a/mm/cma.c b/mm/cma.c
index 0ba69cd16aeb..d50627686fec 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -419,13 +419,13 @@  static inline void cma_debug_show_areas(struct cma *cma) { }
  * @cma:   Contiguous memory region for which the allocation is performed.
  * @count: Requested number of pages.
  * @align: Requested alignment of pages (in PAGE_SIZE order).
- * @no_warn: Avoid printing message about failed allocation
+ * @gfp_mask: GFP mask to use during the cma allocation.
  *
  * This function allocates part of contiguous memory on specific
  * contiguous memory area.
  */
 struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
-		       bool no_warn)
+		       gfp_t gfp_mask)
 {
 	unsigned long mask, offset;
 	unsigned long pfn = -1;
@@ -438,8 +438,8 @@  struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
 	if (!cma || !cma->count || !cma->bitmap)
 		return NULL;
 
-	pr_debug("%s(cma %p, count %zu, align %d)\n", __func__, (void *)cma,
-		 count, align);
+	pr_debug("%s(cma %p, count %zu, align %d gfp_mask 0x%x)\n", __func__,
+			(void *)cma, count, align, gfp_mask);
 
 	if (!count)
 		return NULL;
@@ -471,7 +471,7 @@  struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
 
 		pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
 		ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA,
-				     GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0));
+						gfp_mask);
 
 		if (ret == 0) {
 			page = pfn_to_page(pfn);
@@ -500,7 +500,7 @@  struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
 			page_kasan_tag_reset(page + i);
 	}
 
-	if (ret && !no_warn) {
+	if (ret && !(gfp_mask & __GFP_NOWARN)) {
 		pr_err("%s: alloc failed, req-size: %zu pages, ret: %d\n",
 			__func__, count, ret);
 		cma_debug_show_areas(cma);
diff --git a/mm/cma_debug.c b/mm/cma_debug.c
index d5bf8aa34fdc..00170c41cf81 100644
--- a/mm/cma_debug.c
+++ b/mm/cma_debug.c
@@ -137,7 +137,7 @@  static int cma_alloc_mem(struct cma *cma, int count)
 	if (!mem)
 		return -ENOMEM;
 
-	p = cma_alloc(cma, count, 0, false);
+	p = cma_alloc(cma, count, 0, GFP_KERNEL);
 	if (!p) {
 		kfree(mem);
 		return -ENOMEM;
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index a6bad1f686c5..4209a2ed1e1b 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1266,7 +1266,8 @@  static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
 
 		if (hugetlb_cma[nid]) {
 			page = cma_alloc(hugetlb_cma[nid], nr_pages,
-					huge_page_order(h), true);
+					huge_page_order(h),
+					GFP_KERNEL | __GFP_NOWARN);
 			if (page)
 				return page;
 		}
@@ -1277,7 +1278,8 @@  static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
 					continue;
 
 				page = cma_alloc(hugetlb_cma[node], nr_pages,
-						huge_page_order(h), true);
+						huge_page_order(h),
+						GFP_KERNEL | __GFP_NOWARN);
 				if (page)
 					return page;
 			}
diff --git a/mm/secretmem.c b/mm/secretmem.c
index b8a32954ac68..585d55b9f9d8 100644
--- a/mm/secretmem.c
+++ b/mm/secretmem.c
@@ -86,7 +86,8 @@  static int secretmem_pool_increase(struct secretmem_ctx *ctx, gfp_t gfp)
 	struct page *page;
 	int err;
 
-	page = cma_alloc(secretmem_cma, nr_pages, PMD_SIZE, gfp & __GFP_NOWARN);
+	page = cma_alloc(secretmem_cma, nr_pages, PMD_SIZE,
+				GFP_KERNEL | (gfp & __GFP_NOWARN));
 	if (!page)
 		return -ENOMEM;