diff mbox

[API-NEXT,PATCHv3] linux-generic: pool: defer ring allocation until pool creation

Message ID 1481747525-32051-1-git-send-email-bill.fischofer@linaro.org
State New
Headers show

Commit Message

Bill Fischofer Dec. 14, 2016, 8:32 p.m. UTC
To avoid excessive memory overhead for pools, defer the allocation of
the pool ring until odp_pool_create() is called. This keeps pool memory
overhead proportional to the number of pools actually in use rather
than the architected maximum number of pools.

This patch addresses Bug https://bugs.linaro.org/show_bug.cgi?id=2765

Signed-off-by: Bill Fischofer <bill.fischofer@linaro.org>

---
Changes for v3:
- Do not reference pool ring on buffer alloc/free if cache can satisfy request

Changes for v2:
- Reset reserved to 0 if ring allocation fails to recover properly

 platform/linux-generic/include/odp_pool_internal.h |  3 ++-
 platform/linux-generic/odp_pool.c                  | 31 +++++++++++++++++-----
 2 files changed, 26 insertions(+), 8 deletions(-)

-- 
2.7.4

Comments

Savolainen, Petri (Nokia - FI/Espoo) Dec. 16, 2016, 8:06 a.m. UTC | #1
> 

> @@ -172,6 +172,7 @@ static pool_t *reserve_pool(void)

>  {

>  	int i;

>  	pool_t *pool;

> +	char ring_name[ODP_POOL_NAME_LEN];

> 

>  	for (i = 0; i < ODP_CONFIG_POOLS; i++) {

>  		pool = pool_entry(i);

> @@ -180,6 +181,17 @@ static pool_t *reserve_pool(void)

>  		if (pool->reserved == 0) {

>  			pool->reserved = 1;

>  			UNLOCK(&pool->lock);

> +			sprintf(ring_name, "_odp_pool_ring_%d", i);

> +			pool->ring_shm =

> +				odp_shm_reserve(ring_name,

> +						sizeof(pool_ring_t),

> +						ODP_CACHE_LINE_SIZE, 0);

> +			if (pool->ring_shm == ODP_SHM_INVALID) {

> +				ODP_ERR("Unable to alloc pool ring %d\n", i);

> +				pool->reserved = 0;


This must be modified inside the lock, otherwise there's a race condition if a pool is reserved or not.

Also, I'd like to have performance impact of the extra indirection verified.

-Petri
Maxim Uvarov Dec. 16, 2016, 12:50 p.m. UTC | #2
On 12/16/16 11:06, Savolainen, Petri (Nokia - FI/Espoo) wrote:
>>

>> @@ -172,6 +172,7 @@ static pool_t *reserve_pool(void)

>>  {

>>  	int i;

>>  	pool_t *pool;

>> +	char ring_name[ODP_POOL_NAME_LEN];

>>

>>  	for (i = 0; i < ODP_CONFIG_POOLS; i++) {

>>  		pool = pool_entry(i);

>> @@ -180,6 +181,17 @@ static pool_t *reserve_pool(void)

>>  		if (pool->reserved == 0) {

>>  			pool->reserved = 1;

>>  			UNLOCK(&pool->lock);

>> +			sprintf(ring_name, "_odp_pool_ring_%d", i);

>> +			pool->ring_shm =

>> +				odp_shm_reserve(ring_name,

>> +						sizeof(pool_ring_t),

>> +						ODP_CACHE_LINE_SIZE, 0);

>> +			if (pool->ring_shm == ODP_SHM_INVALID) {

>> +				ODP_ERR("Unable to alloc pool ring %d\n", i);

>> +				pool->reserved = 0;

> 

> This must be modified inside the lock, otherwise there's a race condition if a pool is reserved or not.

> 

> Also, I'd like to have performance impact of the extra indirection verified.

> 

> -Petri

> 


Isn't set pool->reserved = 1; under the lock is enough?

Maxim.
Maxim Uvarov Dec. 16, 2016, 1:13 p.m. UTC | #3
On 12/16/16 11:06, Savolainen, Petri (Nokia - FI/Espoo) wrote:
>>

>> @@ -172,6 +172,7 @@ static pool_t *reserve_pool(void)

>>  {

>>  	int i;

>>  	pool_t *pool;

>> +	char ring_name[ODP_POOL_NAME_LEN];

>>

>>  	for (i = 0; i < ODP_CONFIG_POOLS; i++) {

>>  		pool = pool_entry(i);

>> @@ -180,6 +181,17 @@ static pool_t *reserve_pool(void)

>>  		if (pool->reserved == 0) {

>>  			pool->reserved = 1;

>>  			UNLOCK(&pool->lock);

>> +			sprintf(ring_name, "_odp_pool_ring_%d", i);



shm reserve will create /tmp/odp-pid-_odp_pool_ring_%d

which looks like more odp works there. I think name pool_ring_%d is
better here.

Maxim.


>> +			pool->ring_shm =

>> +				odp_shm_reserve(ring_name,

>> +						sizeof(pool_ring_t),

>> +						ODP_CACHE_LINE_SIZE, 0);

>> +			if (pool->ring_shm == ODP_SHM_INVALID) {

>> +				ODP_ERR("Unable to alloc pool ring %d\n", i);

>> +				pool->reserved = 0;

> 

> This must be modified inside the lock, otherwise there's a race condition if a pool is reserved or not.

> 

> Also, I'd like to have performance impact of the extra indirection verified.

> 

> -Petri

>
Bill Fischofer Dec. 16, 2016, 2:07 p.m. UTC | #4
On Fri, Dec 16, 2016 at 6:50 AM, Maxim Uvarov <maxim.uvarov@linaro.org> wrote:
> On 12/16/16 11:06, Savolainen, Petri (Nokia - FI/Espoo) wrote:

>>>

>>> @@ -172,6 +172,7 @@ static pool_t *reserve_pool(void)

>>>  {

>>>      int i;

>>>      pool_t *pool;

>>> +    char ring_name[ODP_POOL_NAME_LEN];

>>>

>>>      for (i = 0; i < ODP_CONFIG_POOLS; i++) {

>>>              pool = pool_entry(i);

>>> @@ -180,6 +181,17 @@ static pool_t *reserve_pool(void)

>>>              if (pool->reserved == 0) {

>>>                      pool->reserved = 1;

>>>                      UNLOCK(&pool->lock);

>>> +                    sprintf(ring_name, "_odp_pool_ring_%d", i);

>>> +                    pool->ring_shm =

>>> +                            odp_shm_reserve(ring_name,

>>> +                                            sizeof(pool_ring_t),

>>> +                                            ODP_CACHE_LINE_SIZE, 0);

>>> +                    if (pool->ring_shm == ODP_SHM_INVALID) {

>>> +                            ODP_ERR("Unable to alloc pool ring %d\n", i);

>>> +                            pool->reserved = 0;

>>

>> This must be modified inside the lock, otherwise there's a race condition if a pool is reserved or not.

>>

>> Also, I'd like to have performance impact of the extra indirection verified.


I'm unable to measure any difference on my systems here. You have more
precise measurement capabilities, but given that the global ring isn't
referenced at all if the requests can be satisfied from the local
cache (which should be the case for the bulk of the alloc/free calls)
I'd be surprised if there is anything measurable. What should be
immediately measurable is the memory footprint reduction for most ODP
apps, especially in NFV environments where a single system is going to
be running multiple ODP apps concurrently.

>>

>> -Petri

>>

>

> Isn't set pool->reserved = 1; under the lock is enough?


I'd like to understand the concern better as well since I agree with
Maxim that as long as pool->reserved is set to 1 under the lock
there's no possibility for another thread to try to allocate this pool
slot. The main scan locks the pool then checks the reserved flag and
skips if it sees reserved == 1. Once reserved is set the current
thread owns this pool so the lock is no longer needed. Otherwise you'd
have the same concern for normally operating pools that would need to
hold the pool lock for the life of the pool.

>

> Maxim.
Bill Fischofer Dec. 16, 2016, 2:08 p.m. UTC | #5
On Fri, Dec 16, 2016 at 7:13 AM, Maxim Uvarov <maxim.uvarov@linaro.org> wrote:
> On 12/16/16 11:06, Savolainen, Petri (Nokia - FI/Espoo) wrote:

>>>

>>> @@ -172,6 +172,7 @@ static pool_t *reserve_pool(void)

>>>  {

>>>      int i;

>>>      pool_t *pool;

>>> +    char ring_name[ODP_POOL_NAME_LEN];

>>>

>>>      for (i = 0; i < ODP_CONFIG_POOLS; i++) {

>>>              pool = pool_entry(i);

>>> @@ -180,6 +181,17 @@ static pool_t *reserve_pool(void)

>>>              if (pool->reserved == 0) {

>>>                      pool->reserved = 1;

>>>                      UNLOCK(&pool->lock);

>>> +                    sprintf(ring_name, "_odp_pool_ring_%d", i);

>

>

> shm reserve will create /tmp/odp-pid-_odp_pool_ring_%d

>

> which looks like more odp works there. I think name pool_ring_%d is

> better here.


I can certainly change that and will do so in the next rev. I'd like
to get Petri's feedback on the reserve question first, however, so
that this doesn't have to be two revisions.

>

> Maxim.

>

>

>>> +                    pool->ring_shm =

>>> +                            odp_shm_reserve(ring_name,

>>> +                                            sizeof(pool_ring_t),

>>> +                                            ODP_CACHE_LINE_SIZE, 0);

>>> +                    if (pool->ring_shm == ODP_SHM_INVALID) {

>>> +                            ODP_ERR("Unable to alloc pool ring %d\n", i);

>>> +                            pool->reserved = 0;

>>

>> This must be modified inside the lock, otherwise there's a race condition if a pool is reserved or not.

>>

>> Also, I'd like to have performance impact of the extra indirection verified.

>>

>> -Petri

>>

>
Maxim Uvarov Jan. 9, 2017, 4:56 p.m. UTC | #6
Petri, do you have objections for this patch for locked area? If not,
than Bill can do v2 with more clean shm names.

Maxim.

On 12/16/16 17:08, Bill Fischofer wrote:
> On Fri, Dec 16, 2016 at 7:13 AM, Maxim Uvarov <maxim.uvarov@linaro.org> wrote:

>> On 12/16/16 11:06, Savolainen, Petri (Nokia - FI/Espoo) wrote:

>>>>

>>>> @@ -172,6 +172,7 @@ static pool_t *reserve_pool(void)

>>>>  {

>>>>      int i;

>>>>      pool_t *pool;

>>>> +    char ring_name[ODP_POOL_NAME_LEN];

>>>>

>>>>      for (i = 0; i < ODP_CONFIG_POOLS; i++) {

>>>>              pool = pool_entry(i);

>>>> @@ -180,6 +181,17 @@ static pool_t *reserve_pool(void)

>>>>              if (pool->reserved == 0) {

>>>>                      pool->reserved = 1;

>>>>                      UNLOCK(&pool->lock);

>>>> +                    sprintf(ring_name, "_odp_pool_ring_%d", i);

>>

>>

>> shm reserve will create /tmp/odp-pid-_odp_pool_ring_%d

>>

>> which looks like more odp works there. I think name pool_ring_%d is

>> better here.

> 

> I can certainly change that and will do so in the next rev. I'd like

> to get Petri's feedback on the reserve question first, however, so

> that this doesn't have to be two revisions.

> 

>>

>> Maxim.

>>

>>

>>>> +                    pool->ring_shm =

>>>> +                            odp_shm_reserve(ring_name,

>>>> +                                            sizeof(pool_ring_t),

>>>> +                                            ODP_CACHE_LINE_SIZE, 0);

>>>> +                    if (pool->ring_shm == ODP_SHM_INVALID) {

>>>> +                            ODP_ERR("Unable to alloc pool ring %d\n", i);

>>>> +                            pool->reserved = 0;

>>>

>>> This must be modified inside the lock, otherwise there's a race condition if a pool is reserved or not.

>>>

>>> Also, I'd like to have performance impact of the extra indirection verified.

>>>

>>> -Petri

>>>

>>
Savolainen, Petri (Nokia - FI/Espoo) Jan. 10, 2017, 8:17 a.m. UTC | #7
The race condition should be avoided. That can be done easily by first doing the shm_reserve(). If that succeeds, a pool can be allocated normally behind the lock.

shm = shm_reserve();

if (shm == invalid)
	return -1;

LOCK();

// Search a free pool and reserve it (like today)

UNLOCK();

if (pool == NULL) {
	shm_free(shm);
	return -1;
}

pool->ring_shm = shm;
...


-Petri


> -----Original Message-----

> From: lng-odp [mailto:lng-odp-bounces@lists.linaro.org] On Behalf Of Maxim

> Uvarov

> Sent: Monday, January 09, 2017 6:57 PM

> To: Bill Fischofer <bill.fischofer@linaro.org>

> Cc: lng-odp-forward <lng-odp@lists.linaro.org>

> Subject: Re: [lng-odp] [API-NEXT PATCHv3] linux-generic: pool: defer ring

> allocation until pool creation

> 

> Petri, do you have objections for this patch for locked area? If not,

> than Bill can do v2 with more clean shm names.

> 

> Maxim.

> 

> On 12/16/16 17:08, Bill Fischofer wrote:

> > On Fri, Dec 16, 2016 at 7:13 AM, Maxim Uvarov <maxim.uvarov@linaro.org>

> wrote:

> >> On 12/16/16 11:06, Savolainen, Petri (Nokia - FI/Espoo) wrote:

> >>>>

> >>>> @@ -172,6 +172,7 @@ static pool_t *reserve_pool(void)

> >>>>  {

> >>>>      int i;

> >>>>      pool_t *pool;

> >>>> +    char ring_name[ODP_POOL_NAME_LEN];

> >>>>

> >>>>      for (i = 0; i < ODP_CONFIG_POOLS; i++) {

> >>>>              pool = pool_entry(i);

> >>>> @@ -180,6 +181,17 @@ static pool_t *reserve_pool(void)

> >>>>              if (pool->reserved == 0) {

> >>>>                      pool->reserved = 1;

> >>>>                      UNLOCK(&pool->lock);

> >>>> +                    sprintf(ring_name, "_odp_pool_ring_%d", i);

> >>

> >>

> >> shm reserve will create /tmp/odp-pid-_odp_pool_ring_%d

> >>

> >> which looks like more odp works there. I think name pool_ring_%d is

> >> better here.

> >

> > I can certainly change that and will do so in the next rev. I'd like

> > to get Petri's feedback on the reserve question first, however, so

> > that this doesn't have to be two revisions.

> >

> >>

> >> Maxim.

> >>

> >>

> >>>> +                    pool->ring_shm =

> >>>> +                            odp_shm_reserve(ring_name,

> >>>> +                                            sizeof(pool_ring_t),

> >>>> +                                            ODP_CACHE_LINE_SIZE, 0);

> >>>> +                    if (pool->ring_shm == ODP_SHM_INVALID) {

> >>>> +                            ODP_ERR("Unable to alloc pool ring

> %d\n", i);

> >>>> +                            pool->reserved = 0;

> >>>

> >>> This must be modified inside the lock, otherwise there's a race

> condition if a pool is reserved or not.

> >>>

> >>> Also, I'd like to have performance impact of the extra indirection

> verified.

> >>>

> >>> -Petri

> >>>

> >>
Bill Fischofer Jan. 10, 2017, 1:19 p.m. UTC | #8
On Tue, Jan 10, 2017 at 2:17 AM, Savolainen, Petri (Nokia - FI/Espoo)
<petri.savolainen@nokia-bell-labs.com> wrote:
>

> The race condition should be avoided. That can be done easily by first doing the shm_reserve(). If that succeeds, a pool can be allocated normally behind the lock.

>

> shm = shm_reserve();

>

> if (shm == invalid)

>         return -1;

>

> LOCK();

>

> // Search a free pool and reserve it (like today)

>

> UNLOCK();

>

> if (pool == NULL) {

>         shm_free(shm);

>         return -1;

> }

>

> pool->ring_shm = shm;

> ...

>

>

> -Petri


There is no race condition, but I have no objection to reworking this
as described since they are equivalent. I'll post a v2 with this and
Maxim's changes.

>

>

>> -----Original Message-----

>> From: lng-odp [mailto:lng-odp-bounces@lists.linaro.org] On Behalf Of Maxim

>> Uvarov

>> Sent: Monday, January 09, 2017 6:57 PM

>> To: Bill Fischofer <bill.fischofer@linaro.org>

>> Cc: lng-odp-forward <lng-odp@lists.linaro.org>

>> Subject: Re: [lng-odp] [API-NEXT PATCHv3] linux-generic: pool: defer ring

>> allocation until pool creation

>>

>> Petri, do you have objections for this patch for locked area? If not,

>> than Bill can do v2 with more clean shm names.

>>

>> Maxim.

>>

>> On 12/16/16 17:08, Bill Fischofer wrote:

>> > On Fri, Dec 16, 2016 at 7:13 AM, Maxim Uvarov <maxim.uvarov@linaro.org>

>> wrote:

>> >> On 12/16/16 11:06, Savolainen, Petri (Nokia - FI/Espoo) wrote:

>> >>>>

>> >>>> @@ -172,6 +172,7 @@ static pool_t *reserve_pool(void)

>> >>>>  {

>> >>>>      int i;

>> >>>>      pool_t *pool;

>> >>>> +    char ring_name[ODP_POOL_NAME_LEN];

>> >>>>

>> >>>>      for (i = 0; i < ODP_CONFIG_POOLS; i++) {

>> >>>>              pool = pool_entry(i);

>> >>>> @@ -180,6 +181,17 @@ static pool_t *reserve_pool(void)

>> >>>>              if (pool->reserved == 0) {

>> >>>>                      pool->reserved = 1;

>> >>>>                      UNLOCK(&pool->lock);

>> >>>> +                    sprintf(ring_name, "_odp_pool_ring_%d", i);

>> >>

>> >>

>> >> shm reserve will create /tmp/odp-pid-_odp_pool_ring_%d

>> >>

>> >> which looks like more odp works there. I think name pool_ring_%d is

>> >> better here.

>> >

>> > I can certainly change that and will do so in the next rev. I'd like

>> > to get Petri's feedback on the reserve question first, however, so

>> > that this doesn't have to be two revisions.

>> >

>> >>

>> >> Maxim.

>> >>

>> >>

>> >>>> +                    pool->ring_shm =

>> >>>> +                            odp_shm_reserve(ring_name,

>> >>>> +                                            sizeof(pool_ring_t),

>> >>>> +                                            ODP_CACHE_LINE_SIZE, 0);

>> >>>> +                    if (pool->ring_shm == ODP_SHM_INVALID) {

>> >>>> +                            ODP_ERR("Unable to alloc pool ring

>> %d\n", i);

>> >>>> +                            pool->reserved = 0;

>> >>>

>> >>> This must be modified inside the lock, otherwise there's a race

>> condition if a pool is reserved or not.

>> >>>

>> >>> Also, I'd like to have performance impact of the extra indirection

>> verified.

>> >>>

>> >>> -Petri

>> >>>

>> >>

>
Bill Fischofer Jan. 10, 2017, 2:52 p.m. UTC | #9
On Tue, Jan 10, 2017 at 7:19 AM, Bill Fischofer
<bill.fischofer@linaro.org> wrote:
> On Tue, Jan 10, 2017 at 2:17 AM, Savolainen, Petri (Nokia - FI/Espoo)

> <petri.savolainen@nokia-bell-labs.com> wrote:

>>

>> The race condition should be avoided. That can be done easily by first doing the shm_reserve(). If that succeeds, a pool can be allocated normally behind the lock.

>>

>> shm = shm_reserve();

>>

>> if (shm == invalid)

>>         return -1;

>>

>> LOCK();

>>

>> // Search a free pool and reserve it (like today)

>>

>> UNLOCK();

>>

>> if (pool == NULL) {

>>         shm_free(shm);

>>         return -1;

>> }

>>

>> pool->ring_shm = shm;

>> ...

>>

>>

>> -Petri

>

> There is no race condition, but I have no objection to reworking this

> as described since they are equivalent. I'll post a v2 with this and

> Maxim's changes.

>

>>

>>

>>> -----Original Message-----

>>> From: lng-odp [mailto:lng-odp-bounces@lists.linaro.org] On Behalf Of Maxim

>>> Uvarov

>>> Sent: Monday, January 09, 2017 6:57 PM

>>> To: Bill Fischofer <bill.fischofer@linaro.org>

>>> Cc: lng-odp-forward <lng-odp@lists.linaro.org>

>>> Subject: Re: [lng-odp] [API-NEXT PATCHv3] linux-generic: pool: defer ring

>>> allocation until pool creation

>>>

>>> Petri, do you have objections for this patch for locked area? If not,

>>> than Bill can do v2 with more clean shm names.

>>>

>>> Maxim.

>>>

>>> On 12/16/16 17:08, Bill Fischofer wrote:

>>> > On Fri, Dec 16, 2016 at 7:13 AM, Maxim Uvarov <maxim.uvarov@linaro.org>

>>> wrote:

>>> >> On 12/16/16 11:06, Savolainen, Petri (Nokia - FI/Espoo) wrote:

>>> >>>>

>>> >>>> @@ -172,6 +172,7 @@ static pool_t *reserve_pool(void)

>>> >>>>  {

>>> >>>>      int i;

>>> >>>>      pool_t *pool;

>>> >>>> +    char ring_name[ODP_POOL_NAME_LEN];

>>> >>>>

>>> >>>>      for (i = 0; i < ODP_CONFIG_POOLS; i++) {

>>> >>>>              pool = pool_entry(i);

>>> >>>> @@ -180,6 +181,17 @@ static pool_t *reserve_pool(void)

>>> >>>>              if (pool->reserved == 0) {

>>> >>>>                      pool->reserved = 1;

>>> >>>>                      UNLOCK(&pool->lock);

>>> >>>> +                    sprintf(ring_name, "_odp_pool_ring_%d", i);

>>> >>

>>> >>

>>> >> shm reserve will create /tmp/odp-pid-_odp_pool_ring_%d

>>> >>

>>> >> which looks like more odp works there. I think name pool_ring_%d is

>>> >> better here.

>>> >

>>> > I can certainly change that and will do so in the next rev. I'd like

>>> > to get Petri's feedback on the reserve question first, however, so

>>> > that this doesn't have to be two revisions.


Looking at this closer, the suggested rework greatly complicates the
generation of unique names for the reserved rings. I'd like to spend a
few minutes discussing this in today's ODP call and have added it to
the agenda.

>>> >

>>> >>

>>> >> Maxim.

>>> >>

>>> >>

>>> >>>> +                    pool->ring_shm =

>>> >>>> +                            odp_shm_reserve(ring_name,

>>> >>>> +                                            sizeof(pool_ring_t),

>>> >>>> +                                            ODP_CACHE_LINE_SIZE, 0);

>>> >>>> +                    if (pool->ring_shm == ODP_SHM_INVALID) {

>>> >>>> +                            ODP_ERR("Unable to alloc pool ring

>>> %d\n", i);

>>> >>>> +                            pool->reserved = 0;

>>> >>>

>>> >>> This must be modified inside the lock, otherwise there's a race

>>> condition if a pool is reserved or not.

>>> >>>

>>> >>> Also, I'd like to have performance impact of the extra indirection

>>> verified.

>>> >>>

>>> >>> -Petri

>>> >>>

>>> >>

>>
Savolainen, Petri (Nokia - FI/Espoo) Jan. 10, 2017, 2:55 p.m. UTC | #10
> -----Original Message-----

> From: Bill Fischofer [mailto:bill.fischofer@linaro.org]

> Sent: Tuesday, January 10, 2017 3:20 PM

> To: Savolainen, Petri (Nokia - FI/Espoo) <petri.savolainen@nokia-bell-

> labs.com>

> Cc: Maxim Uvarov <maxim.uvarov@linaro.org>; lng-odp-forward <lng-

> odp@lists.linaro.org>

> Subject: Re: [lng-odp] [API-NEXT PATCHv3] linux-generic: pool: defer ring

> allocation until pool creation

> 

> On Tue, Jan 10, 2017 at 2:17 AM, Savolainen, Petri (Nokia - FI/Espoo)

> <petri.savolainen@nokia-bell-labs.com> wrote:

> >

> > The race condition should be avoided. That can be done easily by first

> doing the shm_reserve(). If that succeeds, a pool can be allocated

> normally behind the lock.

> >

> > shm = shm_reserve();

> >

> > if (shm == invalid)

> >         return -1;

> >

> > LOCK();

> >

> > // Search a free pool and reserve it (like today)

> >

> > UNLOCK();

> >

> > if (pool == NULL) {

> >         shm_free(shm);

> >         return -1;

> > }

> >

> > pool->ring_shm = shm;

> > ...

> >

> >

> > -Petri

> 

> There is no race condition, but I have no objection to reworking this

> as described since they are equivalent. I'll post a v2 with this and

> Maxim's changes.


The race is of type that the write of pool->reserved = 0 may happen only after a very long time (omitting unlock, omits the store release fence). Other observers could see that all pools are reserved, while some might be actually free, but the write has yet propagated through CPU write buffering / caches.

-Petri
diff mbox

Patch

diff --git a/platform/linux-generic/include/odp_pool_internal.h b/platform/linux-generic/include/odp_pool_internal.h
index 5d7b817..4915bda 100644
--- a/platform/linux-generic/include/odp_pool_internal.h
+++ b/platform/linux-generic/include/odp_pool_internal.h
@@ -69,7 +69,8 @@  typedef struct pool_t {
 
 	pool_cache_t     local_cache[ODP_THREAD_COUNT_MAX];
 
-	pool_ring_t      ring;
+	odp_shm_t        ring_shm;
+	pool_ring_t     *ring;
 
 } pool_t;
 
diff --git a/platform/linux-generic/odp_pool.c b/platform/linux-generic/odp_pool.c
index 4be3827..7113331 100644
--- a/platform/linux-generic/odp_pool.c
+++ b/platform/linux-generic/odp_pool.c
@@ -143,7 +143,7 @@  static void flush_cache(pool_cache_t *cache, pool_t *pool)
 	uint32_t mask;
 	uint32_t cache_num, i, data;
 
-	ring = &pool->ring.hdr;
+	ring = &pool->ring->hdr;
 	mask = pool->ring_mask;
 	cache_num = cache->num;
 
@@ -172,6 +172,7 @@  static pool_t *reserve_pool(void)
 {
 	int i;
 	pool_t *pool;
+	char ring_name[ODP_POOL_NAME_LEN];
 
 	for (i = 0; i < ODP_CONFIG_POOLS; i++) {
 		pool = pool_entry(i);
@@ -180,6 +181,17 @@  static pool_t *reserve_pool(void)
 		if (pool->reserved == 0) {
 			pool->reserved = 1;
 			UNLOCK(&pool->lock);
+			sprintf(ring_name, "_odp_pool_ring_%d", i);
+			pool->ring_shm =
+				odp_shm_reserve(ring_name,
+						sizeof(pool_ring_t),
+						ODP_CACHE_LINE_SIZE, 0);
+			if (pool->ring_shm == ODP_SHM_INVALID) {
+				ODP_ERR("Unable to alloc pool ring %d\n", i);
+				pool->reserved = 0;
+				break;
+			}
+			pool->ring = odp_shm_addr(pool->ring_shm);
 			return pool;
 		}
 		UNLOCK(&pool->lock);
@@ -214,7 +226,7 @@  static void init_buffers(pool_t *pool)
 	int type;
 	uint32_t seg_size;
 
-	ring = &pool->ring.hdr;
+	ring = &pool->ring->hdr;
 	mask = pool->ring_mask;
 	type = pool->params.type;
 
@@ -411,7 +423,7 @@  static odp_pool_t pool_create(const char *name, odp_pool_param_t *params,
 		pool->uarea_base_addr = odp_shm_addr(pool->uarea_shm);
 	}
 
-	ring_init(&pool->ring.hdr);
+	ring_init(&pool->ring->hdr);
 	init_buffers(pool);
 
 	return pool->pool_hdl;
@@ -533,6 +545,8 @@  int odp_pool_destroy(odp_pool_t pool_hdl)
 		odp_shm_free(pool->uarea_shm);
 
 	pool->reserved = 0;
+	odp_shm_free(pool->ring_shm);
+	pool->ring = NULL;
 	UNLOCK(&pool->lock);
 
 	return 0;
@@ -589,8 +603,6 @@  int buffer_alloc_multi(pool_t *pool, odp_buffer_t buf[],
 	pool_cache_t *cache;
 	uint32_t cache_num, num_ch, num_deq, burst;
 
-	ring  = &pool->ring.hdr;
-	mask  = pool->ring_mask;
 	cache = local.cache[pool->pool_idx];
 
 	cache_num = cache->num;
@@ -617,6 +629,8 @@  int buffer_alloc_multi(pool_t *pool, odp_buffer_t buf[],
 		 * and not uint32_t. */
 		uint32_t data[burst];
 
+		ring      = &pool->ring->hdr;
+		mask      = pool->ring_mask;
 		burst     = ring_deq_multi(ring, mask, data, burst);
 		cache_num = burst - num_deq;
 
@@ -668,12 +682,12 @@  static inline void buffer_free_to_pool(uint32_t pool_id,
 
 	cache = local.cache[pool_id];
 	pool  = pool_entry(pool_id);
-	ring  = &pool->ring.hdr;
-	mask  = pool->ring_mask;
 
 	/* Special case of a very large free. Move directly to
 	 * the global pool. */
 	if (odp_unlikely(num > CONFIG_POOL_CACHE_SIZE)) {
+		ring  = &pool->ring->hdr;
+		mask  = pool->ring_mask;
 		for (i = 0; i < num; i++)
 			ring_enq(ring, mask, (uint32_t)(uintptr_t)buf[i]);
 
@@ -688,6 +702,9 @@  static inline void buffer_free_to_pool(uint32_t pool_id,
 		uint32_t index;
 		int burst = CACHE_BURST;
 
+		ring  = &pool->ring->hdr;
+		mask  = pool->ring_mask;
+
 		if (odp_unlikely(num > CACHE_BURST))
 			burst = num;