forked from mirrors/linux
		
	net: add helpers for setting a memory provider on an rx queue
Add helpers that properly prep or remove a memory provider for an rx queue then restart the queue. Reviewed-by: Jakub Kicinski <kuba@kernel.org> Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: David Wei <dw@davidwei.uk> Link: https://patch.msgid.link/20250204215622.695511-11-dw@davidwei.uk Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
		
							parent
							
								
									56102c013f
								
							
						
					
					
						commit
						6e18ed929d
					
				
					 2 changed files with 74 additions and 0 deletions
				
			
		|  | @ -22,6 +22,11 @@ bool net_mp_niov_set_dma_addr(struct net_iov *niov, dma_addr_t addr); | |||
| void net_mp_niov_set_page_pool(struct page_pool *pool, struct net_iov *niov); | ||||
| void net_mp_niov_clear_page_pool(struct net_iov *niov); | ||||
| 
 | ||||
| int net_mp_open_rxq(struct net_device *dev, unsigned ifq_idx, | ||||
| 		    struct pp_memory_provider_params *p); | ||||
| void net_mp_close_rxq(struct net_device *dev, unsigned ifq_idx, | ||||
| 		      struct pp_memory_provider_params *old_p); | ||||
| 
 | ||||
| /**
 | ||||
|   * net_mp_netmem_place_in_cache() - give a netmem to a page pool | ||||
|   * @pool:      the page pool to place the netmem into | ||||
|  |  | |||
|  | @ -3,6 +3,7 @@ | |||
| #include <linux/netdevice.h> | ||||
| #include <net/netdev_queues.h> | ||||
| #include <net/netdev_rx_queue.h> | ||||
| #include <net/page_pool/memory_provider.h> | ||||
| 
 | ||||
| #include "page_pool_priv.h" | ||||
| 
 | ||||
|  | @ -80,3 +81,71 @@ int netdev_rx_queue_restart(struct net_device *dev, unsigned int rxq_idx) | |||
| 	return err; | ||||
| } | ||||
| EXPORT_SYMBOL_NS_GPL(netdev_rx_queue_restart, "NETDEV_INTERNAL"); | ||||
| 
 | ||||
| static int __net_mp_open_rxq(struct net_device *dev, unsigned ifq_idx, | ||||
| 			     struct pp_memory_provider_params *p) | ||||
| { | ||||
| 	struct netdev_rx_queue *rxq; | ||||
| 	int ret; | ||||
| 
 | ||||
| 	if (ifq_idx >= dev->real_num_rx_queues) | ||||
| 		return -EINVAL; | ||||
| 	ifq_idx = array_index_nospec(ifq_idx, dev->real_num_rx_queues); | ||||
| 
 | ||||
| 	rxq = __netif_get_rx_queue(dev, ifq_idx); | ||||
| 	if (rxq->mp_params.mp_ops) | ||||
| 		return -EEXIST; | ||||
| 
 | ||||
| 	rxq->mp_params = *p; | ||||
| 	ret = netdev_rx_queue_restart(dev, ifq_idx); | ||||
| 	if (ret) { | ||||
| 		rxq->mp_params.mp_ops = NULL; | ||||
| 		rxq->mp_params.mp_priv = NULL; | ||||
| 	} | ||||
| 	return ret; | ||||
| } | ||||
| 
 | ||||
| int net_mp_open_rxq(struct net_device *dev, unsigned ifq_idx, | ||||
| 		    struct pp_memory_provider_params *p) | ||||
| { | ||||
| 	int ret; | ||||
| 
 | ||||
| 	rtnl_lock(); | ||||
| 	ret = __net_mp_open_rxq(dev, ifq_idx, p); | ||||
| 	rtnl_unlock(); | ||||
| 	return ret; | ||||
| } | ||||
| 
 | ||||
| static void __net_mp_close_rxq(struct net_device *dev, unsigned ifq_idx, | ||||
| 			      struct pp_memory_provider_params *old_p) | ||||
| { | ||||
| 	struct netdev_rx_queue *rxq; | ||||
| 
 | ||||
| 	if (WARN_ON_ONCE(ifq_idx >= dev->real_num_rx_queues)) | ||||
| 		return; | ||||
| 
 | ||||
| 	rxq = __netif_get_rx_queue(dev, ifq_idx); | ||||
| 
 | ||||
| 	/* Callers holding a netdev ref may get here after we already
 | ||||
| 	 * went thru shutdown via dev_memory_provider_uninstall(). | ||||
| 	 */ | ||||
| 	if (dev->reg_state > NETREG_REGISTERED && | ||||
| 	    !rxq->mp_params.mp_ops) | ||||
| 		return; | ||||
| 
 | ||||
| 	if (WARN_ON_ONCE(rxq->mp_params.mp_ops != old_p->mp_ops || | ||||
| 			 rxq->mp_params.mp_priv != old_p->mp_priv)) | ||||
| 		return; | ||||
| 
 | ||||
| 	rxq->mp_params.mp_ops = NULL; | ||||
| 	rxq->mp_params.mp_priv = NULL; | ||||
| 	WARN_ON(netdev_rx_queue_restart(dev, ifq_idx)); | ||||
| } | ||||
| 
 | ||||
| void net_mp_close_rxq(struct net_device *dev, unsigned ifq_idx, | ||||
| 		      struct pp_memory_provider_params *old_p) | ||||
| { | ||||
| 	rtnl_lock(); | ||||
| 	__net_mp_close_rxq(dev, ifq_idx, old_p); | ||||
| 	rtnl_unlock(); | ||||
| } | ||||
|  |  | |||
		Loading…
	
		Reference in a new issue
	
	 David Wei
						David Wei