diff mbox series

[v12,07/10] secretmem: add memcg accounting

Message ID 20201125092208.12544-8-rppt@kernel.org
State Superseded
Headers show
Series [v12,01/10] mm: add definition of PMD_PAGE_ORDER | expand

Commit Message

Mike Rapoport Nov. 25, 2020, 9:22 a.m. UTC
From: Mike Rapoport <rppt@linux.ibm.com>

Account memory consumed by secretmem to memcg. The accounting is updated
when the memory is actually allocated and freed.

Signed-off-by: Mike Rapoport <rppt@linux.ibm.com>
Acked-by: Roman Gushchin <guro@fb.com>
---
 mm/filemap.c   |  3 ++-
 mm/secretmem.c | 36 +++++++++++++++++++++++++++++++++++-
 2 files changed, 37 insertions(+), 2 deletions(-)

Comments

Shakeel Butt Nov. 29, 2020, 3:53 p.m. UTC | #1
On Wed, Nov 25, 2020 at 1:51 AM Mike Rapoport <rppt@kernel.org> wrote:
>

> From: Mike Rapoport <rppt@linux.ibm.com>

>

> Account memory consumed by secretmem to memcg. The accounting is updated

> when the memory is actually allocated and freed.

>

> Signed-off-by: Mike Rapoport <rppt@linux.ibm.com>

> Acked-by: Roman Gushchin <guro@fb.com>

> ---

>  mm/filemap.c   |  3 ++-

>  mm/secretmem.c | 36 +++++++++++++++++++++++++++++++++++-

>  2 files changed, 37 insertions(+), 2 deletions(-)

>

> diff --git a/mm/filemap.c b/mm/filemap.c

> index 249cf489f5df..cf7f1dc9f4b8 100644

> --- a/mm/filemap.c

> +++ b/mm/filemap.c

> @@ -42,6 +42,7 @@

>  #include <linux/psi.h>

>  #include <linux/ramfs.h>

>  #include <linux/page_idle.h>

> +#include <linux/secretmem.h>

>  #include "internal.h"

>

>  #define CREATE_TRACE_POINTS

> @@ -844,7 +845,7 @@ static noinline int __add_to_page_cache_locked(struct page *page,

>         page->mapping = mapping;

>         page->index = offset;

>

> -       if (!huge) {

> +       if (!huge && !page_is_secretmem(page)) {

>                 error = mem_cgroup_charge(page, current->mm, gfp);

>                 if (error)

>                         goto error;

> diff --git a/mm/secretmem.c b/mm/secretmem.c

> index 52a900a135a5..eb6628390444 100644

> --- a/mm/secretmem.c

> +++ b/mm/secretmem.c

> @@ -18,6 +18,7 @@

>  #include <linux/memblock.h>

>  #include <linux/pseudo_fs.h>

>  #include <linux/secretmem.h>

> +#include <linux/memcontrol.h>

>  #include <linux/set_memory.h>

>  #include <linux/sched/signal.h>

>

> @@ -44,6 +45,32 @@ struct secretmem_ctx {

>

>  static struct cma *secretmem_cma;

>

> +static int secretmem_account_pages(struct page *page, gfp_t gfp, int order)

> +{

> +       int err;

> +

> +       err = memcg_kmem_charge_page(page, gfp, order);

> +       if (err)

> +               return err;

> +

> +       /*

> +        * seceremem caches are unreclaimable kernel allocations, so treat

> +        * them as unreclaimable slab memory for VM statistics purposes

> +        */

> +       mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE_B,

> +                           PAGE_SIZE << order);


Please use mod_lruvec_page_state() instead, so we get the memcg stats too.

BTW I think secretmem deserves a vmstat entry instead of overloading
NR_SLAB_UNRECLAIMABLE_B.

> +

> +       return 0;

> +}

> +

> +static void secretmem_unaccount_pages(struct page *page, int order)

> +{

> +

> +       mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE_B,

> +                           -PAGE_SIZE << order);

> +       memcg_kmem_uncharge_page(page, order);

> +}

> +

>  static int secretmem_pool_increase(struct secretmem_ctx *ctx, gfp_t gfp)

>  {

>         unsigned long nr_pages = (1 << PMD_PAGE_ORDER);

> @@ -56,10 +83,14 @@ static int secretmem_pool_increase(struct secretmem_ctx *ctx, gfp_t gfp)

>         if (!page)

>                 return -ENOMEM;

>

> -       err = set_direct_map_invalid_noflush(page, nr_pages);

> +       err = secretmem_account_pages(page, gfp, PMD_PAGE_ORDER);

>         if (err)

>                 goto err_cma_release;

>

> +       err = set_direct_map_invalid_noflush(page, nr_pages);

> +       if (err)

> +               goto err_memcg_uncharge;

> +

>         addr = (unsigned long)page_address(page);

>         err = gen_pool_add(pool, addr, PMD_SIZE, NUMA_NO_NODE);

>         if (err)

> @@ -76,6 +107,8 @@ static int secretmem_pool_increase(struct secretmem_ctx *ctx, gfp_t gfp)

>          * won't fail

>          */

>         set_direct_map_default_noflush(page, nr_pages);

> +err_memcg_uncharge:

> +       secretmem_unaccount_pages(page, PMD_PAGE_ORDER);

>  err_cma_release:

>         cma_release(secretmem_cma, page, nr_pages);

>         return err;

> @@ -302,6 +335,7 @@ static void secretmem_cleanup_chunk(struct gen_pool *pool,

>         int i;

>

>         set_direct_map_default_noflush(page, nr_pages);

> +       secretmem_unaccount_pages(page, PMD_PAGE_ORDER);

>

>         for (i = 0; i < nr_pages; i++)

>                 clear_highpage(page + i);

> --

> 2.28.0

>
Mike Rapoport Nov. 29, 2020, 5:26 p.m. UTC | #2
On Sun, Nov 29, 2020 at 07:53:45AM -0800, Shakeel Butt wrote:
> On Wed, Nov 25, 2020 at 1:51 AM Mike Rapoport <rppt@kernel.org> wrote:

> >

> > From: Mike Rapoport <rppt@linux.ibm.com>

> >

> > Account memory consumed by secretmem to memcg. The accounting is updated

> > when the memory is actually allocated and freed.

> >

> > Signed-off-by: Mike Rapoport <rppt@linux.ibm.com>

> > Acked-by: Roman Gushchin <guro@fb.com>

> > ---

> >  mm/filemap.c   |  3 ++-

> >  mm/secretmem.c | 36 +++++++++++++++++++++++++++++++++++-

> >  2 files changed, 37 insertions(+), 2 deletions(-)

> >

> > diff --git a/mm/filemap.c b/mm/filemap.c

> > index 249cf489f5df..cf7f1dc9f4b8 100644

> > --- a/mm/filemap.c

> > +++ b/mm/filemap.c

> > @@ -42,6 +42,7 @@

> >  #include <linux/psi.h>

> >  #include <linux/ramfs.h>

> >  #include <linux/page_idle.h>

> > +#include <linux/secretmem.h>

> >  #include "internal.h"

> >

> >  #define CREATE_TRACE_POINTS

> > @@ -844,7 +845,7 @@ static noinline int __add_to_page_cache_locked(struct page *page,

> >         page->mapping = mapping;

> >         page->index = offset;

> >

> > -       if (!huge) {

> > +       if (!huge && !page_is_secretmem(page)) {

> >                 error = mem_cgroup_charge(page, current->mm, gfp);

> >                 if (error)

> >                         goto error;

> > diff --git a/mm/secretmem.c b/mm/secretmem.c

> > index 52a900a135a5..eb6628390444 100644

> > --- a/mm/secretmem.c

> > +++ b/mm/secretmem.c

> > @@ -18,6 +18,7 @@

> >  #include <linux/memblock.h>

> >  #include <linux/pseudo_fs.h>

> >  #include <linux/secretmem.h>

> > +#include <linux/memcontrol.h>

> >  #include <linux/set_memory.h>

> >  #include <linux/sched/signal.h>

> >

> > @@ -44,6 +45,32 @@ struct secretmem_ctx {

> >

> >  static struct cma *secretmem_cma;

> >

> > +static int secretmem_account_pages(struct page *page, gfp_t gfp, int order)

> > +{

> > +       int err;

> > +

> > +       err = memcg_kmem_charge_page(page, gfp, order);

> > +       if (err)

> > +               return err;

> > +

> > +       /*

> > +        * seceremem caches are unreclaimable kernel allocations, so treat

> > +        * them as unreclaimable slab memory for VM statistics purposes

> > +        */

> > +       mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE_B,

> > +                           PAGE_SIZE << order);

> 

> Please use mod_lruvec_page_state() instead, so we get the memcg stats too.


Ok

> BTW I think secretmem deserves a vmstat entry instead of overloading

> NR_SLAB_UNRECLAIMABLE_B.


I'd prefer to wait with a dedicated vmstat for now. We can always add it
later, once we have better picture of secremem usage.

> > +

> > +       return 0;

> > +}

> > +

> > +static void secretmem_unaccount_pages(struct page *page, int order)

> > +{

> > +

> > +       mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE_B,

> > +                           -PAGE_SIZE << order);

> > +       memcg_kmem_uncharge_page(page, order);

> > +}

> > +

> >  static int secretmem_pool_increase(struct secretmem_ctx *ctx, gfp_t gfp)

> >  {

> >         unsigned long nr_pages = (1 << PMD_PAGE_ORDER);

> > @@ -56,10 +83,14 @@ static int secretmem_pool_increase(struct secretmem_ctx *ctx, gfp_t gfp)

> >         if (!page)

> >                 return -ENOMEM;

> >

> > -       err = set_direct_map_invalid_noflush(page, nr_pages);

> > +       err = secretmem_account_pages(page, gfp, PMD_PAGE_ORDER);

> >         if (err)

> >                 goto err_cma_release;

> >

> > +       err = set_direct_map_invalid_noflush(page, nr_pages);

> > +       if (err)

> > +               goto err_memcg_uncharge;

> > +

> >         addr = (unsigned long)page_address(page);

> >         err = gen_pool_add(pool, addr, PMD_SIZE, NUMA_NO_NODE);

> >         if (err)

> > @@ -76,6 +107,8 @@ static int secretmem_pool_increase(struct secretmem_ctx *ctx, gfp_t gfp)

> >          * won't fail

> >          */

> >         set_direct_map_default_noflush(page, nr_pages);

> > +err_memcg_uncharge:

> > +       secretmem_unaccount_pages(page, PMD_PAGE_ORDER);

> >  err_cma_release:

> >         cma_release(secretmem_cma, page, nr_pages);

> >         return err;

> > @@ -302,6 +335,7 @@ static void secretmem_cleanup_chunk(struct gen_pool *pool,

> >         int i;

> >

> >         set_direct_map_default_noflush(page, nr_pages);

> > +       secretmem_unaccount_pages(page, PMD_PAGE_ORDER);

> >

> >         for (i = 0; i < nr_pages; i++)

> >                 clear_highpage(page + i);

> > --

> > 2.28.0

> >


-- 
Sincerely yours,
Mike.
Roman Gushchin Nov. 30, 2020, 8:15 p.m. UTC | #3
On Sun, Nov 29, 2020 at 07:26:25PM +0200, Mike Rapoport wrote:
> On Sun, Nov 29, 2020 at 07:53:45AM -0800, Shakeel Butt wrote:

> > On Wed, Nov 25, 2020 at 1:51 AM Mike Rapoport <rppt@kernel.org> wrote:

> > >

> > > From: Mike Rapoport <rppt@linux.ibm.com>

> > >

> > > Account memory consumed by secretmem to memcg. The accounting is updated

> > > when the memory is actually allocated and freed.

> > >

> > > Signed-off-by: Mike Rapoport <rppt@linux.ibm.com>

> > > Acked-by: Roman Gushchin <guro@fb.com>

> > > ---

> > >  mm/filemap.c   |  3 ++-

> > >  mm/secretmem.c | 36 +++++++++++++++++++++++++++++++++++-

> > >  2 files changed, 37 insertions(+), 2 deletions(-)

> > >

> > > diff --git a/mm/filemap.c b/mm/filemap.c

> > > index 249cf489f5df..cf7f1dc9f4b8 100644

> > > --- a/mm/filemap.c

> > > +++ b/mm/filemap.c

> > > @@ -42,6 +42,7 @@

> > >  #include <linux/psi.h>

> > >  #include <linux/ramfs.h>

> > >  #include <linux/page_idle.h>

> > > +#include <linux/secretmem.h>

> > >  #include "internal.h"

> > >

> > >  #define CREATE_TRACE_POINTS

> > > @@ -844,7 +845,7 @@ static noinline int __add_to_page_cache_locked(struct page *page,

> > >         page->mapping = mapping;

> > >         page->index = offset;

> > >

> > > -       if (!huge) {

> > > +       if (!huge && !page_is_secretmem(page)) {

> > >                 error = mem_cgroup_charge(page, current->mm, gfp);

> > >                 if (error)

> > >                         goto error;

> > > diff --git a/mm/secretmem.c b/mm/secretmem.c

> > > index 52a900a135a5..eb6628390444 100644

> > > --- a/mm/secretmem.c

> > > +++ b/mm/secretmem.c

> > > @@ -18,6 +18,7 @@

> > >  #include <linux/memblock.h>

> > >  #include <linux/pseudo_fs.h>

> > >  #include <linux/secretmem.h>

> > > +#include <linux/memcontrol.h>

> > >  #include <linux/set_memory.h>

> > >  #include <linux/sched/signal.h>

> > >

> > > @@ -44,6 +45,32 @@ struct secretmem_ctx {

> > >

> > >  static struct cma *secretmem_cma;

> > >

> > > +static int secretmem_account_pages(struct page *page, gfp_t gfp, int order)

> > > +{

> > > +       int err;

> > > +

> > > +       err = memcg_kmem_charge_page(page, gfp, order);

> > > +       if (err)

> > > +               return err;

> > > +

> > > +       /*

> > > +        * seceremem caches are unreclaimable kernel allocations, so treat

> > > +        * them as unreclaimable slab memory for VM statistics purposes

> > > +        */

> > > +       mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE_B,

> > > +                           PAGE_SIZE << order);

> > 

> > Please use mod_lruvec_page_state() instead, so we get the memcg stats too.

> 

> Ok

> 

> > BTW I think secretmem deserves a vmstat entry instead of overloading

> > NR_SLAB_UNRECLAIMABLE_B.

> 

> I'd prefer to wait with a dedicated vmstat for now. We can always add it

> later, once we have better picture of secremem usage.


+1 here.

From what I understand it's not clear now how big typical secret areas will be.
If there will be few 2Mb areas per container (like for storing some keys),
IMO it doesn't justify adding a separate counter. If they will be measured
in GBs, then we'll add it later.

Thanks!
diff mbox series

Patch

diff --git a/mm/filemap.c b/mm/filemap.c
index 249cf489f5df..cf7f1dc9f4b8 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -42,6 +42,7 @@ 
 #include <linux/psi.h>
 #include <linux/ramfs.h>
 #include <linux/page_idle.h>
+#include <linux/secretmem.h>
 #include "internal.h"
 
 #define CREATE_TRACE_POINTS
@@ -844,7 +845,7 @@  static noinline int __add_to_page_cache_locked(struct page *page,
 	page->mapping = mapping;
 	page->index = offset;
 
-	if (!huge) {
+	if (!huge && !page_is_secretmem(page)) {
 		error = mem_cgroup_charge(page, current->mm, gfp);
 		if (error)
 			goto error;
diff --git a/mm/secretmem.c b/mm/secretmem.c
index 52a900a135a5..eb6628390444 100644
--- a/mm/secretmem.c
+++ b/mm/secretmem.c
@@ -18,6 +18,7 @@ 
 #include <linux/memblock.h>
 #include <linux/pseudo_fs.h>
 #include <linux/secretmem.h>
+#include <linux/memcontrol.h>
 #include <linux/set_memory.h>
 #include <linux/sched/signal.h>
 
@@ -44,6 +45,32 @@  struct secretmem_ctx {
 
 static struct cma *secretmem_cma;
 
+static int secretmem_account_pages(struct page *page, gfp_t gfp, int order)
+{
+	int err;
+
+	err = memcg_kmem_charge_page(page, gfp, order);
+	if (err)
+		return err;
+
+	/*
+	 * seceremem caches are unreclaimable kernel allocations, so treat
+	 * them as unreclaimable slab memory for VM statistics purposes
+	 */
+	mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE_B,
+			    PAGE_SIZE << order);
+
+	return 0;
+}
+
+static void secretmem_unaccount_pages(struct page *page, int order)
+{
+
+	mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE_B,
+			    -PAGE_SIZE << order);
+	memcg_kmem_uncharge_page(page, order);
+}
+
 static int secretmem_pool_increase(struct secretmem_ctx *ctx, gfp_t gfp)
 {
 	unsigned long nr_pages = (1 << PMD_PAGE_ORDER);
@@ -56,10 +83,14 @@  static int secretmem_pool_increase(struct secretmem_ctx *ctx, gfp_t gfp)
 	if (!page)
 		return -ENOMEM;
 
-	err = set_direct_map_invalid_noflush(page, nr_pages);
+	err = secretmem_account_pages(page, gfp, PMD_PAGE_ORDER);
 	if (err)
 		goto err_cma_release;
 
+	err = set_direct_map_invalid_noflush(page, nr_pages);
+	if (err)
+		goto err_memcg_uncharge;
+
 	addr = (unsigned long)page_address(page);
 	err = gen_pool_add(pool, addr, PMD_SIZE, NUMA_NO_NODE);
 	if (err)
@@ -76,6 +107,8 @@  static int secretmem_pool_increase(struct secretmem_ctx *ctx, gfp_t gfp)
 	 * won't fail
 	 */
 	set_direct_map_default_noflush(page, nr_pages);
+err_memcg_uncharge:
+	secretmem_unaccount_pages(page, PMD_PAGE_ORDER);
 err_cma_release:
 	cma_release(secretmem_cma, page, nr_pages);
 	return err;
@@ -302,6 +335,7 @@  static void secretmem_cleanup_chunk(struct gen_pool *pool,
 	int i;
 
 	set_direct_map_default_noflush(page, nr_pages);
+	secretmem_unaccount_pages(page, PMD_PAGE_ORDER);
 
 	for (i = 0; i < nr_pages; i++)
 		clear_highpage(page + i);