diff mbox series

[v4,03/10] mm: apply per-task gfp constraints in fast path

Message ID 20201217185243.3288048-4-pasha.tatashin@soleen.com
State New
Headers show
Series [v4,01/10] mm/gup: don't pin migrated cma pages in movable zone | expand

Commit Message

Pasha Tatashin Dec. 17, 2020, 6:52 p.m. UTC
Function current_gfp_context() is called after fast path. However, soon we
will add more constraints which will also limit zones based on context.
Move this call into fast path, and apply the correct constraints for all
allocations.

Also update .reclaim_idx based on value returned by current_gfp_context()
because it soon will modify the allowed zones.

Note:
With this patch we will do one extra current->flags load during fast path,
but we already load current->flags in fast-path:

__alloc_pages_nodemask()
 prepare_alloc_pages()
  current_alloc_flags(gfp_mask, *alloc_flags);

Later, when we add the zone constrain logic to current_gfp_context() we
will be able to remove current->flags load from current_alloc_flags, and
therefore return fast-path to the current performance level.

Suggested-by: Michal Hocko <mhocko@kernel.org>
Signed-off-by: Pavel Tatashin <pasha.tatashin@soleen.com>
Acked-by: Michal Hocko <mhocko@suse.com>
---
 mm/page_alloc.c | 15 ++++++++-------
 mm/vmscan.c     | 10 ++++++----
 2 files changed, 14 insertions(+), 11 deletions(-)

Comments

Michal Hocko Dec. 18, 2020, 9:36 a.m. UTC | #1
On Thu 17-12-20 13:52:36, Pavel Tatashin wrote:
[..]
> diff --git a/mm/vmscan.c b/mm/vmscan.c

> index 469016222cdb..d9546f5897f4 100644

> --- a/mm/vmscan.c

> +++ b/mm/vmscan.c

> @@ -3234,11 +3234,12 @@ static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist,

>  unsigned long try_to_free_pages(struct zonelist *zonelist, int order,

>  				gfp_t gfp_mask, nodemask_t *nodemask)

>  {

> +	gfp_t current_gfp_mask = current_gfp_context(gfp_mask);

>  	unsigned long nr_reclaimed;

>  	struct scan_control sc = {

>  		.nr_to_reclaim = SWAP_CLUSTER_MAX,

> -		.gfp_mask = current_gfp_context(gfp_mask),

> -		.reclaim_idx = gfp_zone(gfp_mask),

> +		.gfp_mask = current_gfp_mask,

> +		.reclaim_idx = gfp_zone(current_gfp_mask),

>  		.order = order,

>  		.nodemask = nodemask,

>  		.priority = DEF_PRIORITY,

> @@ -4158,17 +4159,18 @@ static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned in

>  {

>  	/* Minimum pages needed in order to stay on node */

>  	const unsigned long nr_pages = 1 << order;

> +	gfp_t current_gfp_mask = current_gfp_context(gfp_mask);

>  	struct task_struct *p = current;

>  	unsigned int noreclaim_flag;

>  	struct scan_control sc = {

>  		.nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX),

> -		.gfp_mask = current_gfp_context(gfp_mask),

> +		.gfp_mask = current_gfp_mask,

>  		.order = order,

>  		.priority = NODE_RECLAIM_PRIORITY,

>  		.may_writepage = !!(node_reclaim_mode & RECLAIM_WRITE),

>  		.may_unmap = !!(node_reclaim_mode & RECLAIM_UNMAP),

>  		.may_swap = 1,

> -		.reclaim_idx = gfp_zone(gfp_mask),

> +		.reclaim_idx = gfp_zone(current_gfp_mask),

>  	};

>  

>  	trace_mm_vmscan_node_reclaim_begin(pgdat->node_id, order,


I was hoping we had agreed these are not necessary and they shouldn't be
touched in the patch.
-- 
Michal Hocko
SUSE Labs
Pasha Tatashin Dec. 18, 2020, 12:23 p.m. UTC | #2
On Fri, Dec 18, 2020 at 4:36 AM Michal Hocko <mhocko@suse.com> wrote:
>

> On Thu 17-12-20 13:52:36, Pavel Tatashin wrote:

> [..]

> > diff --git a/mm/vmscan.c b/mm/vmscan.c

> > index 469016222cdb..d9546f5897f4 100644

> > --- a/mm/vmscan.c

> > +++ b/mm/vmscan.c

> > @@ -3234,11 +3234,12 @@ static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist,

> >  unsigned long try_to_free_pages(struct zonelist *zonelist, int order,

> >                               gfp_t gfp_mask, nodemask_t *nodemask)

> >  {

> > +     gfp_t current_gfp_mask = current_gfp_context(gfp_mask);

> >       unsigned long nr_reclaimed;

> >       struct scan_control sc = {

> >               .nr_to_reclaim = SWAP_CLUSTER_MAX,

> > -             .gfp_mask = current_gfp_context(gfp_mask),

> > -             .reclaim_idx = gfp_zone(gfp_mask),

> > +             .gfp_mask = current_gfp_mask,

> > +             .reclaim_idx = gfp_zone(current_gfp_mask),

> >               .order = order,

> >               .nodemask = nodemask,

> >               .priority = DEF_PRIORITY,

> > @@ -4158,17 +4159,18 @@ static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned in

> >  {

> >       /* Minimum pages needed in order to stay on node */

> >       const unsigned long nr_pages = 1 << order;

> > +     gfp_t current_gfp_mask = current_gfp_context(gfp_mask);

> >       struct task_struct *p = current;

> >       unsigned int noreclaim_flag;

> >       struct scan_control sc = {

> >               .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX),

> > -             .gfp_mask = current_gfp_context(gfp_mask),

> > +             .gfp_mask = current_gfp_mask,

> >               .order = order,

> >               .priority = NODE_RECLAIM_PRIORITY,

> >               .may_writepage = !!(node_reclaim_mode & RECLAIM_WRITE),

> >               .may_unmap = !!(node_reclaim_mode & RECLAIM_UNMAP),

> >               .may_swap = 1,

> > -             .reclaim_idx = gfp_zone(gfp_mask),

> > +             .reclaim_idx = gfp_zone(current_gfp_mask),

> >       };

> >

> >       trace_mm_vmscan_node_reclaim_begin(pgdat->node_id, order,

>

> I was hoping we had agreed these are not necessary and they shouldn't be

> touched in the patch.


Thank you for noticing, I was sure I removed these changes, not sure
what happened :(
They will be gone in the next version.

Thank you,
Pasha

> --

> Michal Hocko

> SUSE Labs
diff mbox series

Patch

diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index ec05396a597b..c2dea9ad0e98 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -4976,6 +4976,13 @@  __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
 	}
 
 	gfp_mask &= gfp_allowed_mask;
+	/*
+	 * Apply scoped allocation constraints. This is mainly about GFP_NOFS
+	 * resp. GFP_NOIO which has to be inherited for all allocation requests
+	 * from a particular context which has been marked by
+	 * memalloc_no{fs,io}_{save,restore}.
+	 */
+	gfp_mask = current_gfp_context(gfp_mask);
 	alloc_mask = gfp_mask;
 	if (!prepare_alloc_pages(gfp_mask, order, preferred_nid, nodemask, &ac, &alloc_mask, &alloc_flags))
 		return NULL;
@@ -4991,13 +4998,7 @@  __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
 	if (likely(page))
 		goto out;
 
-	/*
-	 * Apply scoped allocation constraints. This is mainly about GFP_NOFS
-	 * resp. GFP_NOIO which has to be inherited for all allocation requests
-	 * from a particular context which has been marked by
-	 * memalloc_no{fs,io}_{save,restore}.
-	 */
-	alloc_mask = current_gfp_context(gfp_mask);
+	alloc_mask = gfp_mask;
 	ac.spread_dirty_pages = false;
 
 	/*
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 469016222cdb..d9546f5897f4 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -3234,11 +3234,12 @@  static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist,
 unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
 				gfp_t gfp_mask, nodemask_t *nodemask)
 {
+	gfp_t current_gfp_mask = current_gfp_context(gfp_mask);
 	unsigned long nr_reclaimed;
 	struct scan_control sc = {
 		.nr_to_reclaim = SWAP_CLUSTER_MAX,
-		.gfp_mask = current_gfp_context(gfp_mask),
-		.reclaim_idx = gfp_zone(gfp_mask),
+		.gfp_mask = current_gfp_mask,
+		.reclaim_idx = gfp_zone(current_gfp_mask),
 		.order = order,
 		.nodemask = nodemask,
 		.priority = DEF_PRIORITY,
@@ -4158,17 +4159,18 @@  static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned in
 {
 	/* Minimum pages needed in order to stay on node */
 	const unsigned long nr_pages = 1 << order;
+	gfp_t current_gfp_mask = current_gfp_context(gfp_mask);
 	struct task_struct *p = current;
 	unsigned int noreclaim_flag;
 	struct scan_control sc = {
 		.nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX),
-		.gfp_mask = current_gfp_context(gfp_mask),
+		.gfp_mask = current_gfp_mask,
 		.order = order,
 		.priority = NODE_RECLAIM_PRIORITY,
 		.may_writepage = !!(node_reclaim_mode & RECLAIM_WRITE),
 		.may_unmap = !!(node_reclaim_mode & RECLAIM_UNMAP),
 		.may_swap = 1,
-		.reclaim_idx = gfp_zone(gfp_mask),
+		.reclaim_idx = gfp_zone(current_gfp_mask),
 	};
 
 	trace_mm_vmscan_node_reclaim_begin(pgdat->node_id, order,