diff mbox series

[net-next,2/2] page_pool: optimize the cpu sync operation when DMA mapping

Message ID 1629425195-10130-3-git-send-email-linyunsheng@huawei.com
State New
Headers show
Series Some minor optimization for page pool | expand

Commit Message

Yunsheng Lin Aug. 20, 2021, 2:06 a.m. UTC
If the DMA_ATTR_SKIP_CPU_SYNC is not set, cpu syncing is
also done in dma_map_page_attrs(), so set the attrs according
to pool->p.flags to avoid calling dma sync function again.

Also mark the dma error as the unlikely case While we are at
it.

Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
---
 net/core/page_pool.c | 11 ++++++-----
 1 file changed, 6 insertions(+), 5 deletions(-)

Comments

Heiner Kallweit Aug. 20, 2021, 6:10 a.m. UTC | #1
On 20.08.2021 04:06, Yunsheng Lin wrote:
> If the DMA_ATTR_SKIP_CPU_SYNC is not set, cpu syncing is
> also done in dma_map_page_attrs(), so set the attrs according
> to pool->p.flags to avoid calling dma sync function again.
> 
> Also mark the dma error as the unlikely case While we are at
> it.
> 
This shouldn't be needed. dma_mapping_error() will be (most likely)
inlined by the compiler, and it includes the unlikely() hint.

> Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
> ---
>  net/core/page_pool.c | 11 ++++++-----
>  1 file changed, 6 insertions(+), 5 deletions(-)
> 
> diff --git a/net/core/page_pool.c b/net/core/page_pool.c
> index 1a69784..8172045 100644
> --- a/net/core/page_pool.c
> +++ b/net/core/page_pool.c
> @@ -191,8 +191,12 @@ static void page_pool_dma_sync_for_device(struct page_pool *pool,
>  
>  static bool page_pool_dma_map(struct page_pool *pool, struct page *page)
>  {
> +	unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC;
>  	dma_addr_t dma;
>  
> +	if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
> +		attrs = 0;
> +
>  	/* Setup DMA mapping: use 'struct page' area for storing DMA-addr
>  	 * since dma_addr_t can be either 32 or 64 bits and does not always fit
>  	 * into page private data (i.e 32bit cpu with 64bit DMA caps)
> @@ -200,15 +204,12 @@ static bool page_pool_dma_map(struct page_pool *pool, struct page *page)
>  	 */
>  	dma = dma_map_page_attrs(pool->p.dev, page, 0,
>  				 (PAGE_SIZE << pool->p.order),
> -				 pool->p.dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
> -	if (dma_mapping_error(pool->p.dev, dma))
> +				 pool->p.dma_dir, attrs);
> +	if (unlikely(dma_mapping_error(pool->p.dev, dma)))
>  		return false;
>  
>  	page_pool_set_dma_addr(page, dma);
>  
> -	if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
> -		page_pool_dma_sync_for_device(pool, page, pool->p.max_len);
> -
>  	return true;
>  }
>  
>
diff mbox series

Patch

diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index 1a69784..8172045 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -191,8 +191,12 @@  static void page_pool_dma_sync_for_device(struct page_pool *pool,
 
 static bool page_pool_dma_map(struct page_pool *pool, struct page *page)
 {
+	unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC;
 	dma_addr_t dma;
 
+	if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
+		attrs = 0;
+
 	/* Setup DMA mapping: use 'struct page' area for storing DMA-addr
 	 * since dma_addr_t can be either 32 or 64 bits and does not always fit
 	 * into page private data (i.e 32bit cpu with 64bit DMA caps)
@@ -200,15 +204,12 @@  static bool page_pool_dma_map(struct page_pool *pool, struct page *page)
 	 */
 	dma = dma_map_page_attrs(pool->p.dev, page, 0,
 				 (PAGE_SIZE << pool->p.order),
-				 pool->p.dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
-	if (dma_mapping_error(pool->p.dev, dma))
+				 pool->p.dma_dir, attrs);
+	if (unlikely(dma_mapping_error(pool->p.dev, dma)))
 		return false;
 
 	page_pool_set_dma_addr(page, dma);
 
-	if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
-		page_pool_dma_sync_for_device(pool, page, pool->p.max_len);
-
 	return true;
 }