@@ -135,6 +135,9 @@ struct page_pool {
};
struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp);
+struct page *__page_pool_alloc_pages(struct page_pool *pool,
+ struct pp_alloc_cache *alloc,
+ gfp_t gfp);
static inline struct page *page_pool_dev_alloc_pages(struct page_pool *pool)
{
@@ -155,6 +158,13 @@ static inline struct page *page_pool_dev_alloc_frag(struct page_pool *pool,
return page_pool_alloc_frag(pool, offset, size, gfp);
}
+struct page *page_pool_drain_frag(struct page_pool *pool, struct page *page,
+ long drain_count);
+void page_pool_free_frag(struct page_pool *pool, struct page *page,
+ long drain_count);
+void page_pool_empty_alloc_cache_once(struct page_pool *pool,
+ struct pp_alloc_cache *alloc);
+
/* get the stored dma direction. A driver might decide to treat this locally and
* avoid the extra cache line from page_pool to determine the direction
*/
@@ -110,7 +110,8 @@ EXPORT_SYMBOL(page_pool_create);
static void page_pool_return_page(struct page_pool *pool, struct page *page);
noinline
-static struct page *page_pool_refill_alloc_cache(struct page_pool *pool)
+static struct page *page_pool_refill_alloc_cache(struct page_pool *pool,
+ struct pp_alloc_cache *alloc)
{
struct ptr_ring *r = &pool->ring;
struct page *page;
@@ -140,7 +141,7 @@ static struct page *page_pool_refill_alloc_cache(struct page_pool *pool)
break;
if (likely(page_to_nid(page) == pref_nid)) {
- pool->alloc.cache[pool->alloc.count++] = page;
+ alloc->cache[alloc->count++] = page;
} else {
/* NUMA mismatch;
* (1) release 1 page to page-allocator and
@@ -151,27 +152,28 @@ static struct page *page_pool_refill_alloc_cache(struct page_pool *pool)
page = NULL;
break;
}
- } while (pool->alloc.count < PP_ALLOC_CACHE_REFILL);
+ } while (alloc->count < PP_ALLOC_CACHE_REFILL);
/* Return last page */
- if (likely(pool->alloc.count > 0))
- page = pool->alloc.cache[--pool->alloc.count];
+ if (likely(alloc->count > 0))
+ page = alloc->cache[--alloc->count];
spin_unlock(&r->consumer_lock);
return page;
}
/* fast path */
-static struct page *__page_pool_get_cached(struct page_pool *pool)
+static struct page *__page_pool_get_cached(struct page_pool *pool,
+ struct pp_alloc_cache *alloc)
{
struct page *page;
/* Caller MUST guarantee safe non-concurrent access, e.g. softirq */
- if (likely(pool->alloc.count)) {
+ if (likely(alloc->count)) {
/* Fast-path */
- page = pool->alloc.cache[--pool->alloc.count];
+ page = alloc->cache[--alloc->count];
} else {
- page = page_pool_refill_alloc_cache(pool);
+ page = page_pool_refill_alloc_cache(pool, alloc);
}
return page;
@@ -252,6 +254,7 @@ static struct page *__page_pool_alloc_page_order(struct page_pool *pool,
/* slow path */
noinline
static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
+ struct pp_alloc_cache *alloc,
gfp_t gfp)
{
const int bulk = PP_ALLOC_CACHE_REFILL;
@@ -265,13 +268,13 @@ static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
return __page_pool_alloc_page_order(pool, gfp);
/* Unnecessary as alloc cache is empty, but guarantees zero count */
- if (unlikely(pool->alloc.count > 0))
- return pool->alloc.cache[--pool->alloc.count];
+ if (unlikely(alloc->count > 0))
+ return alloc->cache[--alloc->count];
/* Mark empty alloc.cache slots "empty" for alloc_pages_bulk_array */
- memset(&pool->alloc.cache, 0, sizeof(void *) * bulk);
+ memset(alloc->cache, 0, sizeof(void *) * bulk);
- nr_pages = alloc_pages_bulk_array(gfp, bulk, pool->alloc.cache);
+ nr_pages = alloc_pages_bulk_array(gfp, bulk, alloc->cache);
if (unlikely(!nr_pages))
return NULL;
@@ -279,7 +282,7 @@ static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
* page element have not been (possibly) DMA mapped.
*/
for (i = 0; i < nr_pages; i++) {
- page = pool->alloc.cache[i];
+ page = alloc->cache[i];
if ((pp_flags & PP_FLAG_DMA_MAP) &&
unlikely(!page_pool_dma_map(pool, page))) {
put_page(page);
@@ -287,7 +290,7 @@ static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
}
page_pool_set_pp_info(pool, page);
- pool->alloc.cache[pool->alloc.count++] = page;
+ alloc->cache[alloc->count++] = page;
/* Track how many pages are held 'in-flight' */
pool->pages_state_hold_cnt++;
trace_page_pool_state_hold(pool, page,
@@ -295,8 +298,8 @@ static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
}
/* Return last page */
- if (likely(pool->alloc.count > 0))
- page = pool->alloc.cache[--pool->alloc.count];
+ if (likely(alloc->count > 0))
+ page = alloc->cache[--alloc->count];
else
page = NULL;
@@ -307,19 +310,27 @@ static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
/* For using page_pool replace: alloc_pages() API calls, but provide
* synchronization guarantee for allocation side.
*/
-struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp)
+struct page *__page_pool_alloc_pages(struct page_pool *pool,
+ struct pp_alloc_cache *alloc,
+ gfp_t gfp)
{
struct page *page;
/* Fast-path: Get a page from cache */
- page = __page_pool_get_cached(pool);
+ page = __page_pool_get_cached(pool, alloc);
if (page)
return page;
/* Slow-path: cache empty, do real allocation */
- page = __page_pool_alloc_pages_slow(pool, gfp);
+ page = __page_pool_alloc_pages_slow(pool, alloc, gfp);
return page;
}
+EXPORT_SYMBOL(__page_pool_alloc_pages);
+
+struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp)
+{
+ return __page_pool_alloc_pages(pool, &pool->alloc, gfp);
+}
EXPORT_SYMBOL(page_pool_alloc_pages);
/* Calculate distance between two u32 values, valid if distance is below 2^(31)
@@ -522,11 +533,9 @@ void page_pool_put_page_bulk(struct page_pool *pool, void **data,
}
EXPORT_SYMBOL(page_pool_put_page_bulk);
-static struct page *page_pool_drain_frag(struct page_pool *pool,
- struct page *page)
+struct page *page_pool_drain_frag(struct page_pool *pool, struct page *page,
+ long drain_count)
{
- long drain_count = BIAS_MAX - pool->frag_users;
-
/* Some user is still using the page frag */
if (likely(page_pool_atomic_sub_frag_count_return(page,
drain_count)))
@@ -543,13 +552,9 @@ static struct page *page_pool_drain_frag(struct page_pool *pool,
return NULL;
}
-static void page_pool_free_frag(struct page_pool *pool)
+void page_pool_free_frag(struct page_pool *pool, struct page *page,
+ long drain_count)
{
- long drain_count = BIAS_MAX - pool->frag_users;
- struct page *page = pool->frag_page;
-
- pool->frag_page = NULL;
-
if (!page ||
page_pool_atomic_sub_frag_count_return(page, drain_count))
return;
@@ -572,7 +577,8 @@ struct page *page_pool_alloc_frag(struct page_pool *pool,
*offset = pool->frag_offset;
if (page && *offset + size > max_size) {
- page = page_pool_drain_frag(pool, page);
+ page = page_pool_drain_frag(pool, page,
+ BIAS_MAX - pool->frag_users);
if (page)
goto frag_reset;
}
@@ -628,26 +634,26 @@ static void page_pool_free(struct page_pool *pool)
kfree(pool);
}
-static void page_pool_empty_alloc_cache_once(struct page_pool *pool)
+void page_pool_empty_alloc_cache_once(struct page_pool *pool,
+ struct pp_alloc_cache *alloc)
{
struct page *page;
- if (pool->destroy_cnt)
- return;
-
/* Empty alloc cache, assume caller made sure this is
* no-longer in use, and page_pool_alloc_pages() cannot be
* call concurrently.
*/
- while (pool->alloc.count) {
- page = pool->alloc.cache[--pool->alloc.count];
+ while (alloc->count) {
+ page = alloc->cache[--alloc->count];
page_pool_return_page(pool, page);
}
}
static void page_pool_scrub(struct page_pool *pool)
{
- page_pool_empty_alloc_cache_once(pool);
+ if (!pool->destroy_cnt)
+ page_pool_empty_alloc_cache_once(pool, &pool->alloc);
+
pool->destroy_cnt++;
/* No more consumers should exist, but producers could still
@@ -705,7 +711,9 @@ void page_pool_destroy(struct page_pool *pool)
if (!page_pool_put(pool))
return;
- page_pool_free_frag(pool);
+ page_pool_free_frag(pool, pool->frag_page,
+ BIAS_MAX - pool->frag_users);
+ pool->frag_page = NULL;
if (!page_pool_release(pool))
return;
Currently the page pool assumes the caller MUST guarantee safe non-concurrent access, e.g. softirq for rx. This patch refactors the page pool to support multi allocation contexts, in order to support the tx recycling support in the page pool(tx means 'socket to netdev' here). Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com> --- include/net/page_pool.h | 10 ++++++ net/core/page_pool.c | 86 +++++++++++++++++++++++++++---------------------- 2 files changed, 57 insertions(+), 39 deletions(-)