diff mbox series

[1/2] mempool: indicate the usages of multi memzones

Message ID 1512563473-19969-1-git-send-email-hemant.agrawal@nxp.com
State New
Headers show
Series [1/2] mempool: indicate the usages of multi memzones | expand

Commit Message

Hemant Agrawal Dec. 6, 2017, 12:31 p.m. UTC
This is required for the optimizations w.r.t hw mempools.
They will use different kind of optimizations if the buffers
are from single contiguous memzone.

Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>

---
 lib/librte_mempool/rte_mempool.c | 7 +++++--
 lib/librte_mempool/rte_mempool.h | 5 +++++
 2 files changed, 10 insertions(+), 2 deletions(-)

-- 
2.7.4

Comments

Olivier Matz Dec. 19, 2017, 10:24 a.m. UTC | #1
Hi Hemant,

On Wed, Dec 06, 2017 at 06:01:12PM +0530, Hemant Agrawal wrote:
> This is required for the optimizations w.r.t hw mempools.

> They will use different kind of optimizations if the buffers

> are from single contiguous memzone.

> 

> Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>

> ---

>  lib/librte_mempool/rte_mempool.c | 7 +++++--

>  lib/librte_mempool/rte_mempool.h | 5 +++++

>  2 files changed, 10 insertions(+), 2 deletions(-)

> 

> diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c

> index d50dba4..9d3737c 100644

> --- a/lib/librte_mempool/rte_mempool.c

> +++ b/lib/librte_mempool/rte_mempool.c

> @@ -387,13 +387,16 @@ rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,

>  	total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;

>  

>  	/* Detect pool area has sufficient space for elements */

> -	if (mp->flags & MEMPOOL_F_CAPA_PHYS_CONTIG) {

> -		if (len < total_elt_sz * mp->size) {

> +	if (len < total_elt_sz * mp->size) {

> +		if (mp->flags & MEMPOOL_F_CAPA_PHYS_CONTIG) {

>  			RTE_LOG(ERR, MEMPOOL,

>  				"pool area %" PRIx64 " not enough\n",

>  				(uint64_t)len);

>  			return -ENOSPC;

>  		}

> +	} else {

> +		/* Memory will be allocated from multiple memzones */

> +		mp->flags |= MEMPOOL_F_MULTI_MEMZONE;

>  	}

>  

>  	memhdr = rte_zmalloc("MEMPOOL_MEMHDR", sizeof(*memhdr), 0);

> diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h

> index 721227f..394a4fe 100644

> --- a/lib/librte_mempool/rte_mempool.h

> +++ b/lib/librte_mempool/rte_mempool.h

> @@ -292,6 +292,11 @@ struct rte_mempool {

>   */

>  #define MEMPOOL_F_CAPA_BLK_ALIGNED_OBJECTS 0x0080

>  

> +/* Indicates that the mempool buffers are allocated from multiple memzones

> + * the buffer may or may not be physically contiguous.

> + */

> +#define MEMPOOL_F_MULTI_MEMZONE 0x0100

> +

>  /**

>   * @internal When debug is enabled, store some statistics.

>   *

> -- 

> 2.7.4

> 


I'm not confortable with adding more and more flags, as I explained
here: http://dpdk.org/ml/archives/dev/2017-December/083909.html

It makes the generic code very complex, and probably buggy (many
flags are incompatible with other flags).

I'm thinking about moving the populate_* functions in the drivers
(this is described a bit more in the link above). What do you think
about this approach?

Thanks,
Olivier
Hemant Agrawal Dec. 19, 2017, 10:46 a.m. UTC | #2
Hi Olivier,

On 12/19/2017 3:54 PM, Olivier MATZ wrote:
> Hi Hemant,

>

> On Wed, Dec 06, 2017 at 06:01:12PM +0530, Hemant Agrawal wrote:

>> This is required for the optimizations w.r.t hw mempools.

>> They will use different kind of optimizations if the buffers

>> are from single contiguous memzone.

>>

>> Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>

>> ---

>>  lib/librte_mempool/rte_mempool.c | 7 +++++--

>>  lib/librte_mempool/rte_mempool.h | 5 +++++

>>  2 files changed, 10 insertions(+), 2 deletions(-)

>>

>> diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c

>> index d50dba4..9d3737c 100644

>> --- a/lib/librte_mempool/rte_mempool.c

>> +++ b/lib/librte_mempool/rte_mempool.c

>> @@ -387,13 +387,16 @@ rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,

>>  	total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;

>>

>>  	/* Detect pool area has sufficient space for elements */

>> -	if (mp->flags & MEMPOOL_F_CAPA_PHYS_CONTIG) {

>> -		if (len < total_elt_sz * mp->size) {

>> +	if (len < total_elt_sz * mp->size) {

>> +		if (mp->flags & MEMPOOL_F_CAPA_PHYS_CONTIG) {

>>  			RTE_LOG(ERR, MEMPOOL,

>>  				"pool area %" PRIx64 " not enough\n",

>>  				(uint64_t)len);

>>  			return -ENOSPC;

>>  		}

>> +	} else {

>> +		/* Memory will be allocated from multiple memzones */

>> +		mp->flags |= MEMPOOL_F_MULTI_MEMZONE;

>>  	}

>>

>>  	memhdr = rte_zmalloc("MEMPOOL_MEMHDR", sizeof(*memhdr), 0);

>> diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h

>> index 721227f..394a4fe 100644

>> --- a/lib/librte_mempool/rte_mempool.h

>> +++ b/lib/librte_mempool/rte_mempool.h

>> @@ -292,6 +292,11 @@ struct rte_mempool {

>>   */

>>  #define MEMPOOL_F_CAPA_BLK_ALIGNED_OBJECTS 0x0080

>>

>> +/* Indicates that the mempool buffers are allocated from multiple memzones

>> + * the buffer may or may not be physically contiguous.

>> + */

>> +#define MEMPOOL_F_MULTI_MEMZONE 0x0100

>> +

>>  /**

>>   * @internal When debug is enabled, store some statistics.

>>   *

>> --

>> 2.7.4

>>

>

> I'm not confortable with adding more and more flags, as I explained

> here: http://dpdk.org/ml/archives/dev/2017-December/083909.html


This particular flag is not about how to populate mempool. This is just 
indicating how the mempool was populated - a status flag. This 
information is just helpful for the PMDs.

At least I am not able to see that this particular flag is being very 
driver specific.


>

> It makes the generic code very complex, and probably buggy (many

> flags are incompatible with other flags).

>

> I'm thinking about moving the populate_* functions in the drivers

> (this is described a bit more in the link above). What do you think

> about this approach?

>


The idea is good and it will give fine control to the individual 
mempools to populate the memory the way they want. However, on the 
downside, it will also lead to lot of duplicate code or similar code. It 
may also lead to a maintenance issue for the mempool PMD owner.





> Thanks,

> Olivier

>
Olivier Matz Dec. 19, 2017, 11:02 a.m. UTC | #3
On Tue, Dec 19, 2017 at 04:16:33PM +0530, Hemant Agrawal wrote:
> Hi Olivier,

> 

> On 12/19/2017 3:54 PM, Olivier MATZ wrote:

> > Hi Hemant,

> > 

> > On Wed, Dec 06, 2017 at 06:01:12PM +0530, Hemant Agrawal wrote:

> > > This is required for the optimizations w.r.t hw mempools.

> > > They will use different kind of optimizations if the buffers

> > > are from single contiguous memzone.

> > > 

> > > Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>

> > > ---

> > >  lib/librte_mempool/rte_mempool.c | 7 +++++--

> > >  lib/librte_mempool/rte_mempool.h | 5 +++++

> > >  2 files changed, 10 insertions(+), 2 deletions(-)

> > > 

> > > diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c

> > > index d50dba4..9d3737c 100644

> > > --- a/lib/librte_mempool/rte_mempool.c

> > > +++ b/lib/librte_mempool/rte_mempool.c

> > > @@ -387,13 +387,16 @@ rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,

> > >  	total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;

> > > 

> > >  	/* Detect pool area has sufficient space for elements */

> > > -	if (mp->flags & MEMPOOL_F_CAPA_PHYS_CONTIG) {

> > > -		if (len < total_elt_sz * mp->size) {

> > > +	if (len < total_elt_sz * mp->size) {

> > > +		if (mp->flags & MEMPOOL_F_CAPA_PHYS_CONTIG) {

> > >  			RTE_LOG(ERR, MEMPOOL,

> > >  				"pool area %" PRIx64 " not enough\n",

> > >  				(uint64_t)len);

> > >  			return -ENOSPC;

> > >  		}

> > > +	} else {

> > > +		/* Memory will be allocated from multiple memzones */

> > > +		mp->flags |= MEMPOOL_F_MULTI_MEMZONE;

> > >  	}

> > > 

> > >  	memhdr = rte_zmalloc("MEMPOOL_MEMHDR", sizeof(*memhdr), 0);

> > > diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h

> > > index 721227f..394a4fe 100644

> > > --- a/lib/librte_mempool/rte_mempool.h

> > > +++ b/lib/librte_mempool/rte_mempool.h

> > > @@ -292,6 +292,11 @@ struct rte_mempool {

> > >   */

> > >  #define MEMPOOL_F_CAPA_BLK_ALIGNED_OBJECTS 0x0080

> > > 

> > > +/* Indicates that the mempool buffers are allocated from multiple memzones

> > > + * the buffer may or may not be physically contiguous.

> > > + */

> > > +#define MEMPOOL_F_MULTI_MEMZONE 0x0100

> > > +

> > >  /**

> > >   * @internal When debug is enabled, store some statistics.

> > >   *

> > > --

> > > 2.7.4

> > > 

> > 

> > I'm not confortable with adding more and more flags, as I explained

> > here: http://dpdk.org/ml/archives/dev/2017-December/083909.html

> 

> This particular flag is not about how to populate mempool. This is just

> indicating how the mempool was populated - a status flag. This information

> is just helpful for the PMDs.

> 

> At least I am not able to see that this particular flag is being very driver

> specific.


That's true, I commented too fast :)
And what about using mp->nb_mem_chunks instead? Would it do the job
in your use-case?


> > It makes the generic code very complex, and probably buggy (many

> > flags are incompatible with other flags).

> > 

> > I'm thinking about moving the populate_* functions in the drivers

> > (this is described a bit more in the link above). What do you think

> > about this approach?

> > 

> 

> The idea is good and it will give fine control to the individual mempools to

> populate the memory the way they want. However, on the downside, it will

> also lead to lot of duplicate code or similar code. It may also lead to a

> maintenance issue for the mempool PMD owner.


Yes, that will be the drawback. If we do this, we should try to keep some
common helpers in the mempool lib.
Hemant Agrawal Dec. 19, 2017, 1:08 p.m. UTC | #4
> That's true, I commented too fast :)

> And what about using mp->nb_mem_chunks instead? Would it do the job

> in your use-case?


It should work.  Let me check it out.

Thanks
Regards,
Hemant
Hemant Agrawal Dec. 20, 2017, 11:59 a.m. UTC | #5
On 12/19/2017 6:38 PM, Hemant Agrawal wrote:
>

>> That's true, I commented too fast :)

>> And what about using mp->nb_mem_chunks instead? Would it do the job

>> in your use-case?

>

> It should work.  Let me check it out.


There is a slight problem with nb_mem_chunks.

It is getting incremented in the end of "rte_mempool_populate_phys",
while the elements are getting populated before it in the call of 
mempool_add_elem.

I can use nb_mem_chunks are '0' check. However it can break in future if 
mempool_populate_phys changes.


>

> Thanks

> Regards,

> Hemant

>
Olivier Matz Dec. 22, 2017, 1:59 p.m. UTC | #6
On Wed, Dec 20, 2017 at 05:29:59PM +0530, Hemant Agrawal wrote:
> On 12/19/2017 6:38 PM, Hemant Agrawal wrote:

> > 

> > > That's true, I commented too fast :)

> > > And what about using mp->nb_mem_chunks instead? Would it do the job

> > > in your use-case?

> > 

> > It should work.  Let me check it out.

> 

> There is a slight problem with nb_mem_chunks.

> 

> It is getting incremented in the end of "rte_mempool_populate_phys",

> while the elements are getting populated before it in the call of

> mempool_add_elem.

> 

> I can use nb_mem_chunks are '0' check. However it can break in future if

> mempool_populate_phys changes.


Sorry, I'm not sure I'm getting what you say.

My question was about using mp->nb_mem_chunks instead of a new flag in the
dppa driver. Am I missing something?
Hemant Agrawal Dec. 22, 2017, 4:18 p.m. UTC | #7
On 12/22/2017 7:29 PM, Olivier MATZ wrote:
> On Wed, Dec 20, 2017 at 05:29:59PM +0530, Hemant Agrawal wrote:

>> On 12/19/2017 6:38 PM, Hemant Agrawal wrote:

>>>

>>>> That's true, I commented too fast :)

>>>> And what about using mp->nb_mem_chunks instead? Would it do the job

>>>> in your use-case?

>>>

>>> It should work.  Let me check it out.

>>

>> There is a slight problem with nb_mem_chunks.

>>

>> It is getting incremented in the end of "rte_mempool_populate_phys",

>> while the elements are getting populated before it in the call of

>> mempool_add_elem.

>>

>> I can use nb_mem_chunks are '0' check. However it can break in future if

>> mempool_populate_phys changes.

>

> Sorry, I'm not sure I'm getting what you say.

>

> My question was about using mp->nb_mem_chunks instead of a new flag in the

> dppa driver. Am I missing something?

>


mp->nb_mem_chunks gets finalized when the mempool is fully created. It's 
value is transient before that i.e. it will keep on changing on the 
every call to rte_mempool_populate_phys.

However, we need this information on the very first element allocation. 
So, nb_mem_chunks will not work.
santosh Jan. 5, 2018, 10:52 a.m. UTC | #8
On Tuesday 19 December 2017 04:32 PM, Olivier MATZ wrote:
> On Tue, Dec 19, 2017 at 04:16:33PM +0530, Hemant Agrawal wrote:

>> Hi Olivier,

>>

>> On 12/19/2017 3:54 PM, Olivier MATZ wrote:

>>> Hi Hemant,

>>>

>>> On Wed, Dec 06, 2017 at 06:01:12PM +0530, Hemant Agrawal wrote:

>>>> This is required for the optimizations w.r.t hw mempools.

>>>> They will use different kind of optimizations if the buffers

>>>> are from single contiguous memzone.

>>>>

>>>> Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>

>>>> ---

>>>>  lib/librte_mempool/rte_mempool.c | 7 +++++--

>>>>  lib/librte_mempool/rte_mempool.h | 5 +++++

>>>>  2 files changed, 10 insertions(+), 2 deletions(-)

>>>>

>>>> diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c

>>>> index d50dba4..9d3737c 100644

>>>> --- a/lib/librte_mempool/rte_mempool.c

>>>> +++ b/lib/librte_mempool/rte_mempool.c

>>>> @@ -387,13 +387,16 @@ rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,

>>>>  	total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;

>>>>

>>>>  	/* Detect pool area has sufficient space for elements */

>>>> -	if (mp->flags & MEMPOOL_F_CAPA_PHYS_CONTIG) {

>>>> -		if (len < total_elt_sz * mp->size) {

>>>> +	if (len < total_elt_sz * mp->size) {

>>>> +		if (mp->flags & MEMPOOL_F_CAPA_PHYS_CONTIG) {

>>>>  			RTE_LOG(ERR, MEMPOOL,

>>>>  				"pool area %" PRIx64 " not enough\n",

>>>>  				(uint64_t)len);

>>>>  			return -ENOSPC;

>>>>  		}

>>>> +	} else {

>>>> +		/* Memory will be allocated from multiple memzones */

>>>> +		mp->flags |= MEMPOOL_F_MULTI_MEMZONE;

>>>>  	}

>>>>

>>>>  	memhdr = rte_zmalloc("MEMPOOL_MEMHDR", sizeof(*memhdr), 0);

>>>> diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h

>>>> index 721227f..394a4fe 100644

>>>> --- a/lib/librte_mempool/rte_mempool.h

>>>> +++ b/lib/librte_mempool/rte_mempool.h

>>>> @@ -292,6 +292,11 @@ struct rte_mempool {

>>>>   */

>>>>  #define MEMPOOL_F_CAPA_BLK_ALIGNED_OBJECTS 0x0080

>>>>

>>>> +/* Indicates that the mempool buffers are allocated from multiple memzones

>>>> + * the buffer may or may not be physically contiguous.

>>>> + */

>>>> +#define MEMPOOL_F_MULTI_MEMZONE 0x0100

>>>> +

>>>>  /**

>>>>   * @internal When debug is enabled, store some statistics.

>>>>   *

>>>> --

>>>> 2.7.4

>>>>

>>> I'm not confortable with adding more and more flags, as I explained

>>> here: http://dpdk.org/ml/archives/dev/2017-December/083909.html

>> This particular flag is not about how to populate mempool. This is just

>> indicating how the mempool was populated - a status flag. This information

>> is just helpful for the PMDs.

>>

>> At least I am not able to see that this particular flag is being very driver

>> specific.

> That's true, I commented too fast :)

> And what about using mp->nb_mem_chunks instead? Would it do the job

> in your use-case?

>

>

>>> It makes the generic code very complex, and probably buggy (many

>>> flags are incompatible with other flags).

>>>

>>> I'm thinking about moving the populate_* functions in the drivers

>>> (this is described a bit more in the link above). What do you think

>>> about this approach?

>>>

>> The idea is good and it will give fine control to the individual mempools to

>> populate the memory the way they want. However, on the downside, it will

>> also lead to lot of duplicate code or similar code. It may also lead to a

>> maintenance issue for the mempool PMD owner.

> Yes, that will be the drawback. If we do this, we should try to keep some

> common helpers in the mempool lib.


Sorry for jumping late on this and not responding to other thread.

Olivier, We in-fact I tried said approach for ONA mempool driver but never proposed ;) for the reason which
was pointed by Hemant.. meaning more code duplication across mempool PMD thus more maintenance burden. 
However, I'm in favor of giving more control to driver.
Olivier Matz Jan. 16, 2018, 1:51 p.m. UTC | #9
On Fri, Dec 22, 2017 at 09:48:01PM +0530, Hemant Agrawal wrote:
> On 12/22/2017 7:29 PM, Olivier MATZ wrote:

> > On Wed, Dec 20, 2017 at 05:29:59PM +0530, Hemant Agrawal wrote:

> > > On 12/19/2017 6:38 PM, Hemant Agrawal wrote:

> > > > 

> > > > > That's true, I commented too fast :)

> > > > > And what about using mp->nb_mem_chunks instead? Would it do the job

> > > > > in your use-case?

> > > > 

> > > > It should work.  Let me check it out.

> > > 

> > > There is a slight problem with nb_mem_chunks.

> > > 

> > > It is getting incremented in the end of "rte_mempool_populate_phys",

> > > while the elements are getting populated before it in the call of

> > > mempool_add_elem.

> > > 

> > > I can use nb_mem_chunks are '0' check. However it can break in future if

> > > mempool_populate_phys changes.

> > 

> > Sorry, I'm not sure I'm getting what you say.

> > 

> > My question was about using mp->nb_mem_chunks instead of a new flag in the

> > dppa driver. Am I missing something?

> > 

> 

> mp->nb_mem_chunks gets finalized when the mempool is fully created. It's

> value is transient before that i.e. it will keep on changing on the every

> call to rte_mempool_populate_phys.

> 

> However, we need this information on the very first element allocation. So,

> nb_mem_chunks will not work.


I see 2 other alternatives:

1/ in your driver, register a callback rte_mempool_ops_register_memory_area()
   that sets a private flag if (len < total_elt_sz * mp->size).

2/ Move
    STAILQ_INSERT_TAIL(&mp->mem_list, memhdr, next);
    mp->nb_mem_chunks++;
   before the calls to mempool_add_elem(), and in your driver check if
   SLIST_FIRST(&mp->mem_list)->len < total_elt_sz * mp->size

If we can avoid to again create another flag, it is better.
Hemant Agrawal Jan. 17, 2018, 7:49 a.m. UTC | #10
Hi Olivier,

On 1/16/2018 7:21 PM, Olivier Matz wrote:
> On Fri, Dec 22, 2017 at 09:48:01PM +0530, Hemant Agrawal wrote:

>> On 12/22/2017 7:29 PM, Olivier MATZ wrote:

>>> On Wed, Dec 20, 2017 at 05:29:59PM +0530, Hemant Agrawal wrote:

>>>> On 12/19/2017 6:38 PM, Hemant Agrawal wrote:

>>>>>

>>>>>> That's true, I commented too fast :)

>>>>>> And what about using mp->nb_mem_chunks instead? Would it do the job

>>>>>> in your use-case?

>>>>>

>>>>> It should work.  Let me check it out.

>>>>

>>>> There is a slight problem with nb_mem_chunks.

>>>>

>>>> It is getting incremented in the end of "rte_mempool_populate_phys",

>>>> while the elements are getting populated before it in the call of

>>>> mempool_add_elem.

>>>>

>>>> I can use nb_mem_chunks are '0' check. However it can break in future if

>>>> mempool_populate_phys changes.

>>>

>>> Sorry, I'm not sure I'm getting what you say.

>>>

>>> My question was about using mp->nb_mem_chunks instead of a new flag in the

>>> dppa driver. Am I missing something?

>>>

>>

>> mp->nb_mem_chunks gets finalized when the mempool is fully created. It's

>> value is transient before that i.e. it will keep on changing on the every

>> call to rte_mempool_populate_phys.

>>

>> However, we need this information on the very first element allocation. So,

>> nb_mem_chunks will not work.

>

> I see 2 other alternatives:

>

> 1/ in your driver, register a callback rte_mempool_ops_register_memory_area()

>    that sets a private flag if (len < total_elt_sz * mp->size).

>


Thanks!
This one works. Now, the changes will be confined to dpaa code only. I 
will send a v2 for that.

> 2/ Move

>     STAILQ_INSERT_TAIL(&mp->mem_list, memhdr, next);

>     mp->nb_mem_chunks++;

>    before the calls to mempool_add_elem(), and in your driver check if

>    SLIST_FIRST(&mp->mem_list)->len < total_elt_sz * mp->size

>

> If we can avoid to again create another flag, it is better.

>
diff mbox series

Patch

diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c
index d50dba4..9d3737c 100644
--- a/lib/librte_mempool/rte_mempool.c
+++ b/lib/librte_mempool/rte_mempool.c
@@ -387,13 +387,16 @@  rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,
 	total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;
 
 	/* Detect pool area has sufficient space for elements */
-	if (mp->flags & MEMPOOL_F_CAPA_PHYS_CONTIG) {
-		if (len < total_elt_sz * mp->size) {
+	if (len < total_elt_sz * mp->size) {
+		if (mp->flags & MEMPOOL_F_CAPA_PHYS_CONTIG) {
 			RTE_LOG(ERR, MEMPOOL,
 				"pool area %" PRIx64 " not enough\n",
 				(uint64_t)len);
 			return -ENOSPC;
 		}
+	} else {
+		/* Memory will be allocated from multiple memzones */
+		mp->flags |= MEMPOOL_F_MULTI_MEMZONE;
 	}
 
 	memhdr = rte_zmalloc("MEMPOOL_MEMHDR", sizeof(*memhdr), 0);
diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h
index 721227f..394a4fe 100644
--- a/lib/librte_mempool/rte_mempool.h
+++ b/lib/librte_mempool/rte_mempool.h
@@ -292,6 +292,11 @@  struct rte_mempool {
  */
 #define MEMPOOL_F_CAPA_BLK_ALIGNED_OBJECTS 0x0080
 
+/* Indicates that the mempool buffers are allocated from multiple memzones
+ * the buffer may or may not be physically contiguous.
+ */
+#define MEMPOOL_F_MULTI_MEMZONE 0x0100
+
 /**
  * @internal When debug is enabled, store some statistics.
  *