mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	xsk: Bring back busy polling support
Commit86e25f40aa("net: napi: Add napi_config") moved napi->napi_id assignment to a later point in time (napi_hash_add_with_id). This breaks __xdp_rxq_info_reg which copies napi_id at an earlier time and now stores 0 napi_id. It also makes sk_mark_napi_id_once_xdp and __sk_mark_napi_id_once useless because they now work against 0 napi_id. Since sk_busy_loop requires valid napi_id to busy-poll on, there is no way to busy-poll AF_XDP sockets anymore. Bring back the ability to busy-poll on XSK by resolving socket's napi_id at bind time. This relies on relatively recent netif_queue_set_napi, but (assume) at this point most popular drivers should have been converted. This also removes per-tx/rx cycles which used to check and/or set the napi_id value. Confirmed by running a busy-polling AF_XDP socket (github.com/fomichev/xskrtt) on mlx5 and looking at BusyPollRxPackets from /proc/net/netstat. Fixes:86e25f40aa("net: napi: Add napi_config") Signed-off-by: Stanislav Fomichev <sdf@fomichev.me> Acked-by: Magnus Karlsson <magnus.karlsson@intel.com> Reviewed-by: Jakub Kicinski <kuba@kernel.org> Link: https://patch.msgid.link/20250109003436.2829560-1-sdf@fomichev.me Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
		
							parent
							
								
									f0aa6a37a3
								
							
						
					
					
						commit
						5ef44b3cb4
					
				
					 5 changed files with 9 additions and 29 deletions
				
			
		| 
						 | 
				
			
			@ -174,12 +174,4 @@ static inline void sk_mark_napi_id_once(struct sock *sk,
 | 
			
		|||
#endif
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline void sk_mark_napi_id_once_xdp(struct sock *sk,
 | 
			
		||||
					    const struct xdp_buff *xdp)
 | 
			
		||||
{
 | 
			
		||||
#ifdef CONFIG_NET_RX_BUSY_POLL
 | 
			
		||||
	__sk_mark_napi_id_once(sk, xdp->rxq->napi_id);
 | 
			
		||||
#endif
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#endif /* _LINUX_NET_BUSY_POLL_H */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -62,7 +62,6 @@ struct xdp_rxq_info {
 | 
			
		|||
	u32 queue_index;
 | 
			
		||||
	u32 reg_state;
 | 
			
		||||
	struct xdp_mem_info mem;
 | 
			
		||||
	unsigned int napi_id;
 | 
			
		||||
	u32 frag_size;
 | 
			
		||||
} ____cacheline_aligned; /* perf critical, avoid false-sharing */
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -59,15 +59,6 @@ static inline void xsk_pool_fill_cb(struct xsk_buff_pool *pool,
 | 
			
		|||
	xp_fill_cb(pool, desc);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline unsigned int xsk_pool_get_napi_id(struct xsk_buff_pool *pool)
 | 
			
		||||
{
 | 
			
		||||
#ifdef CONFIG_NET_RX_BUSY_POLL
 | 
			
		||||
	return pool->heads[0].xdp.rxq->napi_id;
 | 
			
		||||
#else
 | 
			
		||||
	return 0;
 | 
			
		||||
#endif
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline void xsk_pool_dma_unmap(struct xsk_buff_pool *pool,
 | 
			
		||||
				      unsigned long attrs)
 | 
			
		||||
{
 | 
			
		||||
| 
						 | 
				
			
			@ -306,11 +297,6 @@ static inline void xsk_pool_fill_cb(struct xsk_buff_pool *pool,
 | 
			
		|||
{
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline unsigned int xsk_pool_get_napi_id(struct xsk_buff_pool *pool)
 | 
			
		||||
{
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline void xsk_pool_dma_unmap(struct xsk_buff_pool *pool,
 | 
			
		||||
				      unsigned long attrs)
 | 
			
		||||
{
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -186,7 +186,6 @@ int __xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq,
 | 
			
		|||
	xdp_rxq_info_init(xdp_rxq);
 | 
			
		||||
	xdp_rxq->dev = dev;
 | 
			
		||||
	xdp_rxq->queue_index = queue_index;
 | 
			
		||||
	xdp_rxq->napi_id = napi_id;
 | 
			
		||||
	xdp_rxq->frag_size = frag_size;
 | 
			
		||||
 | 
			
		||||
	xdp_rxq->reg_state = REG_STATE_REGISTERED;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -322,7 +322,6 @@ static int xsk_rcv_check(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
 | 
			
		|||
		return -ENOSPC;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	sk_mark_napi_id_once_xdp(&xs->sk, xdp);
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -908,11 +907,8 @@ static int __xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len
 | 
			
		|||
	if (unlikely(!xs->tx))
 | 
			
		||||
		return -ENOBUFS;
 | 
			
		||||
 | 
			
		||||
	if (sk_can_busy_loop(sk)) {
 | 
			
		||||
		if (xs->zc)
 | 
			
		||||
			__sk_mark_napi_id_once(sk, xsk_pool_get_napi_id(xs->pool));
 | 
			
		||||
	if (sk_can_busy_loop(sk))
 | 
			
		||||
		sk_busy_loop(sk, 1); /* only support non-blocking sockets */
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (xs->zc && xsk_no_wakeup(sk))
 | 
			
		||||
		return 0;
 | 
			
		||||
| 
						 | 
				
			
			@ -1298,6 +1294,14 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
 | 
			
		|||
	xs->queue_id = qid;
 | 
			
		||||
	xp_add_xsk(xs->pool, xs);
 | 
			
		||||
 | 
			
		||||
	if (xs->zc && qid < dev->real_num_rx_queues) {
 | 
			
		||||
		struct netdev_rx_queue *rxq;
 | 
			
		||||
 | 
			
		||||
		rxq = __netif_get_rx_queue(dev, qid);
 | 
			
		||||
		if (rxq->napi)
 | 
			
		||||
			__sk_mark_napi_id_once(sk, rxq->napi->napi_id);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
out_unlock:
 | 
			
		||||
	if (err) {
 | 
			
		||||
		dev_put(dev);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue