mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	Page ppol tried to cache the NAPI ID in page pool info to avoid
having a dependency on the life cycle of the NAPI instance.
Since commit under Fixes the NAPI ID is not populated until
napi_enable() and there's a good chance that page pool is
created before NAPI gets enabled.
Protect the NAPI pointer with the existing page pool mutex,
the reading path already holds it. napi_id itself we need
to READ_ONCE(), it's protected by netdev_lock() which are
not holding in page pool.
Before this patch napi IDs were missing for mlx5:
 # ./cli.py --spec netlink/specs/netdev.yaml --dump page-pool-get
 [{'id': 144, 'ifindex': 2, 'inflight': 3072, 'inflight-mem': 12582912},
  {'id': 143, 'ifindex': 2, 'inflight': 5568, 'inflight-mem': 22806528},
  {'id': 142, 'ifindex': 2, 'inflight': 5120, 'inflight-mem': 20971520},
  {'id': 141, 'ifindex': 2, 'inflight': 4992, 'inflight-mem': 20447232},
  ...
After:
 [{'id': 144, 'ifindex': 2, 'inflight': 3072, 'inflight-mem': 12582912,
   'napi-id': 565},
  {'id': 143, 'ifindex': 2, 'inflight': 4224, 'inflight-mem': 17301504,
   'napi-id': 525},
  {'id': 142, 'ifindex': 2, 'inflight': 4288, 'inflight-mem': 17563648,
   'napi-id': 524},
  ...
Fixes: 86e25f40aa ("net: napi: Add napi_config")
Reviewed-by: Mina Almasry <almasrymina@google.com>
Reviewed-by: Toke Høiland-Jørgensen <toke@redhat.com>
Link: https://patch.msgid.link/20250123231620.1086401-1-kuba@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
		
	
			
		
			
				
	
	
		
			60 lines
		
	
	
	
		
			1.5 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			60 lines
		
	
	
	
		
			1.5 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
/* SPDX-License-Identifier: GPL-2.0 */
 | 
						|
 | 
						|
#ifndef __PAGE_POOL_PRIV_H
 | 
						|
#define __PAGE_POOL_PRIV_H
 | 
						|
 | 
						|
#include <net/page_pool/helpers.h>
 | 
						|
 | 
						|
#include "netmem_priv.h"
 | 
						|
 | 
						|
extern struct mutex page_pools_lock;
 | 
						|
 | 
						|
s32 page_pool_inflight(const struct page_pool *pool, bool strict);
 | 
						|
 | 
						|
int page_pool_list(struct page_pool *pool);
 | 
						|
void page_pool_detached(struct page_pool *pool);
 | 
						|
void page_pool_unlist(struct page_pool *pool);
 | 
						|
 | 
						|
static inline bool
 | 
						|
page_pool_set_dma_addr_netmem(netmem_ref netmem, dma_addr_t addr)
 | 
						|
{
 | 
						|
	if (PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA) {
 | 
						|
		netmem_set_dma_addr(netmem, addr >> PAGE_SHIFT);
 | 
						|
 | 
						|
		/* We assume page alignment to shave off bottom bits,
 | 
						|
		 * if this "compression" doesn't work we need to drop.
 | 
						|
		 */
 | 
						|
		return addr != (dma_addr_t)netmem_get_dma_addr(netmem)
 | 
						|
				       << PAGE_SHIFT;
 | 
						|
	}
 | 
						|
 | 
						|
	netmem_set_dma_addr(netmem, addr);
 | 
						|
	return false;
 | 
						|
}
 | 
						|
 | 
						|
static inline bool page_pool_set_dma_addr(struct page *page, dma_addr_t addr)
 | 
						|
{
 | 
						|
	return page_pool_set_dma_addr_netmem(page_to_netmem(page), addr);
 | 
						|
}
 | 
						|
 | 
						|
#if defined(CONFIG_PAGE_POOL)
 | 
						|
void page_pool_set_pp_info(struct page_pool *pool, netmem_ref netmem);
 | 
						|
void page_pool_clear_pp_info(netmem_ref netmem);
 | 
						|
int page_pool_check_memory_provider(struct net_device *dev,
 | 
						|
				    struct netdev_rx_queue *rxq);
 | 
						|
#else
 | 
						|
static inline void page_pool_set_pp_info(struct page_pool *pool,
 | 
						|
					 netmem_ref netmem)
 | 
						|
{
 | 
						|
}
 | 
						|
static inline void page_pool_clear_pp_info(netmem_ref netmem)
 | 
						|
{
 | 
						|
}
 | 
						|
static inline int page_pool_check_memory_provider(struct net_device *dev,
 | 
						|
						  struct netdev_rx_queue *rxq)
 | 
						|
{
 | 
						|
	return 0;
 | 
						|
}
 | 
						|
#endif
 | 
						|
 | 
						|
#endif
 |