forked from mirrors/linux
		
	memory-provider: dmabuf devmem memory provider
Implement a memory provider that allocates dmabuf devmem in the form of net_iov. The provider receives a reference to the struct netdev_dmabuf_binding via the pool->mp_priv pointer. The driver needs to set this pointer for the provider in the net_iov. The provider obtains a reference on the netdev_dmabuf_binding which guarantees the binding and the underlying mapping remains alive until the provider is destroyed. Usage of PP_FLAG_DMA_MAP is required for this memory provide such that the page_pool can provide the driver with the dma-addrs of the devmem. Support for PP_FLAG_DMA_SYNC_DEV is omitted for simplicity & p.order != 0. Signed-off-by: Willem de Bruijn <willemb@google.com> Signed-off-by: Kaiyuan Zhang <kaiyuanz@google.com> Signed-off-by: Mina Almasry <almasrymina@google.com> Reviewed-by: Pavel Begunkov <asml.silence@gmail.com> Reviewed-by: Jakub Kicinski <kuba@kernel.org> Link: https://patch.msgid.link/20240910171458.219195-7-almasrymina@google.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
		
							parent
							
								
									8ab79ed50c
								
							
						
					
					
						commit
						0f92140468
					
				
					 7 changed files with 255 additions and 29 deletions
				
			
		| 
						 | 
					@ -20,8 +20,18 @@
 | 
				
			||||||
					* device driver responsibility
 | 
										* device driver responsibility
 | 
				
			||||||
					*/
 | 
										*/
 | 
				
			||||||
#define PP_FLAG_SYSTEM_POOL	BIT(2) /* Global system page_pool */
 | 
					#define PP_FLAG_SYSTEM_POOL	BIT(2) /* Global system page_pool */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/* Allow unreadable (net_iov backed) netmem in this page_pool. Drivers setting
 | 
				
			||||||
 | 
					 * this must be able to support unreadable netmem, where netmem_address() would
 | 
				
			||||||
 | 
					 * return NULL. This flag should not be set for header page_pools.
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * If the driver sets PP_FLAG_ALLOW_UNREADABLE_NETMEM, it should also set
 | 
				
			||||||
 | 
					 * page_pool_params.slow.queue_idx.
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					#define PP_FLAG_ALLOW_UNREADABLE_NETMEM BIT(3)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#define PP_FLAG_ALL		(PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV | \
 | 
					#define PP_FLAG_ALL		(PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV | \
 | 
				
			||||||
				 PP_FLAG_SYSTEM_POOL)
 | 
									 PP_FLAG_SYSTEM_POOL | PP_FLAG_ALLOW_UNREADABLE_NETMEM)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * Fast allocation side cache array/stack
 | 
					 * Fast allocation side cache array/stack
 | 
				
			||||||
| 
						 | 
					@ -57,7 +67,9 @@ struct pp_alloc_cache {
 | 
				
			||||||
 * @offset:	DMA sync address offset for PP_FLAG_DMA_SYNC_DEV
 | 
					 * @offset:	DMA sync address offset for PP_FLAG_DMA_SYNC_DEV
 | 
				
			||||||
 * @slow:	params with slowpath access only (initialization and Netlink)
 | 
					 * @slow:	params with slowpath access only (initialization and Netlink)
 | 
				
			||||||
 * @netdev:	netdev this pool will serve (leave as NULL if none or multiple)
 | 
					 * @netdev:	netdev this pool will serve (leave as NULL if none or multiple)
 | 
				
			||||||
 * @flags:	PP_FLAG_DMA_MAP, PP_FLAG_DMA_SYNC_DEV, PP_FLAG_SYSTEM_POOL
 | 
					 * @queue_idx:	queue idx this page_pool is being created for.
 | 
				
			||||||
 | 
					 * @flags:	PP_FLAG_DMA_MAP, PP_FLAG_DMA_SYNC_DEV, PP_FLAG_SYSTEM_POOL,
 | 
				
			||||||
 | 
					 *		PP_FLAG_ALLOW_UNREADABLE_NETMEM.
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
struct page_pool_params {
 | 
					struct page_pool_params {
 | 
				
			||||||
	struct_group_tagged(page_pool_params_fast, fast,
 | 
						struct_group_tagged(page_pool_params_fast, fast,
 | 
				
			||||||
| 
						 | 
					@ -72,6 +84,7 @@ struct page_pool_params {
 | 
				
			||||||
	);
 | 
						);
 | 
				
			||||||
	struct_group_tagged(page_pool_params_slow, slow,
 | 
						struct_group_tagged(page_pool_params_slow, slow,
 | 
				
			||||||
		struct net_device *netdev;
 | 
							struct net_device *netdev;
 | 
				
			||||||
 | 
							unsigned int queue_idx;
 | 
				
			||||||
		unsigned int	flags;
 | 
							unsigned int	flags;
 | 
				
			||||||
/* private: used by test code only */
 | 
					/* private: used by test code only */
 | 
				
			||||||
		void (*init_callback)(netmem_ref netmem, void *arg);
 | 
							void (*init_callback)(netmem_ref netmem, void *arg);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -18,6 +18,7 @@
 | 
				
			||||||
#include <trace/events/page_pool.h>
 | 
					#include <trace/events/page_pool.h>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#include "devmem.h"
 | 
					#include "devmem.h"
 | 
				
			||||||
 | 
					#include "mp_dmabuf_devmem.h"
 | 
				
			||||||
#include "page_pool_priv.h"
 | 
					#include "page_pool_priv.h"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/* Device memory support */
 | 
					/* Device memory support */
 | 
				
			||||||
| 
						 | 
					@ -320,3 +321,69 @@ void dev_dmabuf_uninstall(struct net_device *dev)
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/*** "Dmabuf devmem memory provider" ***/
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					int mp_dmabuf_devmem_init(struct page_pool *pool)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						struct net_devmem_dmabuf_binding *binding = pool->mp_priv;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (!binding)
 | 
				
			||||||
 | 
							return -EINVAL;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (!pool->dma_map)
 | 
				
			||||||
 | 
							return -EOPNOTSUPP;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (pool->dma_sync)
 | 
				
			||||||
 | 
							return -EOPNOTSUPP;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (pool->p.order != 0)
 | 
				
			||||||
 | 
							return -E2BIG;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						net_devmem_dmabuf_binding_get(binding);
 | 
				
			||||||
 | 
						return 0;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					netmem_ref mp_dmabuf_devmem_alloc_netmems(struct page_pool *pool, gfp_t gfp)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						struct net_devmem_dmabuf_binding *binding = pool->mp_priv;
 | 
				
			||||||
 | 
						struct net_iov *niov;
 | 
				
			||||||
 | 
						netmem_ref netmem;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						niov = net_devmem_alloc_dmabuf(binding);
 | 
				
			||||||
 | 
						if (!niov)
 | 
				
			||||||
 | 
							return 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						netmem = net_iov_to_netmem(niov);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						page_pool_set_pp_info(pool, netmem);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						pool->pages_state_hold_cnt++;
 | 
				
			||||||
 | 
						trace_page_pool_state_hold(pool, netmem, pool->pages_state_hold_cnt);
 | 
				
			||||||
 | 
						return netmem;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					void mp_dmabuf_devmem_destroy(struct page_pool *pool)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						struct net_devmem_dmabuf_binding *binding = pool->mp_priv;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						net_devmem_dmabuf_binding_put(binding);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					bool mp_dmabuf_devmem_release_page(struct page_pool *pool, netmem_ref netmem)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						long refcount = atomic_long_read(netmem_get_pp_ref_count_ref(netmem));
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (WARN_ON_ONCE(!netmem_is_net_iov(netmem)))
 | 
				
			||||||
 | 
							return false;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (WARN_ON_ONCE(refcount != 1))
 | 
				
			||||||
 | 
							return false;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						page_pool_clear_pp_info(netmem);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						net_devmem_free_dmabuf(netmem_to_net_iov(netmem));
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* We don't want the page pool put_page()ing our net_iovs. */
 | 
				
			||||||
 | 
						return false;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
							
								
								
									
										44
									
								
								net/core/mp_dmabuf_devmem.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										44
									
								
								net/core/mp_dmabuf_devmem.h
									
									
									
									
									
										Normal file
									
								
							| 
						 | 
					@ -0,0 +1,44 @@
 | 
				
			||||||
 | 
					/* SPDX-License-Identifier: GPL-2.0-or-later */
 | 
				
			||||||
 | 
					/*
 | 
				
			||||||
 | 
					 * Dmabuf device memory provider.
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * Authors:	Mina Almasry <almasrymina@google.com>
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					#ifndef _NET_MP_DMABUF_DEVMEM_H
 | 
				
			||||||
 | 
					#define _NET_MP_DMABUF_DEVMEM_H
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#include <net/netmem.h>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#if defined(CONFIG_NET_DEVMEM)
 | 
				
			||||||
 | 
					int mp_dmabuf_devmem_init(struct page_pool *pool);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					netmem_ref mp_dmabuf_devmem_alloc_netmems(struct page_pool *pool, gfp_t gfp);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					void mp_dmabuf_devmem_destroy(struct page_pool *pool);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					bool mp_dmabuf_devmem_release_page(struct page_pool *pool, netmem_ref netmem);
 | 
				
			||||||
 | 
					#else
 | 
				
			||||||
 | 
					static inline int mp_dmabuf_devmem_init(struct page_pool *pool)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						return -EOPNOTSUPP;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static inline netmem_ref
 | 
				
			||||||
 | 
					mp_dmabuf_devmem_alloc_netmems(struct page_pool *pool, gfp_t gfp)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						return 0;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static inline void mp_dmabuf_devmem_destroy(struct page_pool *pool)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static inline bool
 | 
				
			||||||
 | 
					mp_dmabuf_devmem_release_page(struct page_pool *pool, netmem_ref netmem)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						return false;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#endif /* _NET_MP_DMABUF_DEVMEM_H */
 | 
				
			||||||
| 
						 | 
					@ -4,8 +4,11 @@
 | 
				
			||||||
#include <net/netdev_queues.h>
 | 
					#include <net/netdev_queues.h>
 | 
				
			||||||
#include <net/netdev_rx_queue.h>
 | 
					#include <net/netdev_rx_queue.h>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#include "page_pool_priv.h"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
int netdev_rx_queue_restart(struct net_device *dev, unsigned int rxq_idx)
 | 
					int netdev_rx_queue_restart(struct net_device *dev, unsigned int rxq_idx)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
 | 
						struct netdev_rx_queue *rxq = __netif_get_rx_queue(dev, rxq_idx);
 | 
				
			||||||
	void *new_mem, *old_mem;
 | 
						void *new_mem, *old_mem;
 | 
				
			||||||
	int err;
 | 
						int err;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -31,6 +34,10 @@ int netdev_rx_queue_restart(struct net_device *dev, unsigned int rxq_idx)
 | 
				
			||||||
	if (err)
 | 
						if (err)
 | 
				
			||||||
		goto err_free_old_mem;
 | 
							goto err_free_old_mem;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						err = page_pool_check_memory_provider(dev, rxq);
 | 
				
			||||||
 | 
						if (err)
 | 
				
			||||||
 | 
							goto err_free_new_queue_mem;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	err = dev->queue_mgmt_ops->ndo_queue_stop(dev, old_mem, rxq_idx);
 | 
						err = dev->queue_mgmt_ops->ndo_queue_stop(dev, old_mem, rxq_idx);
 | 
				
			||||||
	if (err)
 | 
						if (err)
 | 
				
			||||||
		goto err_free_new_queue_mem;
 | 
							goto err_free_new_queue_mem;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -11,6 +11,7 @@
 | 
				
			||||||
#include <linux/slab.h>
 | 
					#include <linux/slab.h>
 | 
				
			||||||
#include <linux/device.h>
 | 
					#include <linux/device.h>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#include <net/netdev_rx_queue.h>
 | 
				
			||||||
#include <net/page_pool/helpers.h>
 | 
					#include <net/page_pool/helpers.h>
 | 
				
			||||||
#include <net/xdp.h>
 | 
					#include <net/xdp.h>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -24,6 +25,7 @@
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#include <trace/events/page_pool.h>
 | 
					#include <trace/events/page_pool.h>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#include "mp_dmabuf_devmem.h"
 | 
				
			||||||
#include "netmem_priv.h"
 | 
					#include "netmem_priv.h"
 | 
				
			||||||
#include "page_pool_priv.h"
 | 
					#include "page_pool_priv.h"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -190,6 +192,8 @@ static int page_pool_init(struct page_pool *pool,
 | 
				
			||||||
			  int cpuid)
 | 
								  int cpuid)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	unsigned int ring_qsize = 1024; /* Default */
 | 
						unsigned int ring_qsize = 1024; /* Default */
 | 
				
			||||||
 | 
						struct netdev_rx_queue *rxq;
 | 
				
			||||||
 | 
						int err;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	page_pool_struct_check();
 | 
						page_pool_struct_check();
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -271,7 +275,37 @@ static int page_pool_init(struct page_pool *pool,
 | 
				
			||||||
	if (pool->dma_map)
 | 
						if (pool->dma_map)
 | 
				
			||||||
		get_device(pool->p.dev);
 | 
							get_device(pool->p.dev);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (pool->slow.flags & PP_FLAG_ALLOW_UNREADABLE_NETMEM) {
 | 
				
			||||||
 | 
							/* We rely on rtnl_lock()ing to make sure netdev_rx_queue
 | 
				
			||||||
 | 
							 * configuration doesn't change while we're initializing
 | 
				
			||||||
 | 
							 * the page_pool.
 | 
				
			||||||
 | 
							 */
 | 
				
			||||||
 | 
							ASSERT_RTNL();
 | 
				
			||||||
 | 
							rxq = __netif_get_rx_queue(pool->slow.netdev,
 | 
				
			||||||
 | 
										   pool->slow.queue_idx);
 | 
				
			||||||
 | 
							pool->mp_priv = rxq->mp_params.mp_priv;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (pool->mp_priv) {
 | 
				
			||||||
 | 
							err = mp_dmabuf_devmem_init(pool);
 | 
				
			||||||
 | 
							if (err) {
 | 
				
			||||||
 | 
								pr_warn("%s() mem-provider init failed %d\n", __func__,
 | 
				
			||||||
 | 
									err);
 | 
				
			||||||
 | 
								goto free_ptr_ring;
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							static_branch_inc(&page_pool_mem_providers);
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					free_ptr_ring:
 | 
				
			||||||
 | 
						ptr_ring_cleanup(&pool->ring, NULL);
 | 
				
			||||||
 | 
					#ifdef CONFIG_PAGE_POOL_STATS
 | 
				
			||||||
 | 
						if (!pool->system)
 | 
				
			||||||
 | 
							free_percpu(pool->recycle_stats);
 | 
				
			||||||
 | 
					#endif
 | 
				
			||||||
 | 
						return err;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void page_pool_uninit(struct page_pool *pool)
 | 
					static void page_pool_uninit(struct page_pool *pool)
 | 
				
			||||||
| 
						 | 
					@ -455,28 +489,6 @@ static bool page_pool_dma_map(struct page_pool *pool, netmem_ref netmem)
 | 
				
			||||||
	return false;
 | 
						return false;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void page_pool_set_pp_info(struct page_pool *pool, netmem_ref netmem)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	netmem_set_pp(netmem, pool);
 | 
					 | 
				
			||||||
	netmem_or_pp_magic(netmem, PP_SIGNATURE);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	/* Ensuring all pages have been split into one fragment initially:
 | 
					 | 
				
			||||||
	 * page_pool_set_pp_info() is only called once for every page when it
 | 
					 | 
				
			||||||
	 * is allocated from the page allocator and page_pool_fragment_page()
 | 
					 | 
				
			||||||
	 * is dirtying the same cache line as the page->pp_magic above, so
 | 
					 | 
				
			||||||
	 * the overhead is negligible.
 | 
					 | 
				
			||||||
	 */
 | 
					 | 
				
			||||||
	page_pool_fragment_netmem(netmem, 1);
 | 
					 | 
				
			||||||
	if (pool->has_init_callback)
 | 
					 | 
				
			||||||
		pool->slow.init_callback(netmem, pool->slow.init_arg);
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static void page_pool_clear_pp_info(netmem_ref netmem)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	netmem_clear_pp_magic(netmem);
 | 
					 | 
				
			||||||
	netmem_set_pp(netmem, NULL);
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static struct page *__page_pool_alloc_page_order(struct page_pool *pool,
 | 
					static struct page *__page_pool_alloc_page_order(struct page_pool *pool,
 | 
				
			||||||
						 gfp_t gfp)
 | 
											 gfp_t gfp)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
| 
						 | 
					@ -572,7 +584,10 @@ netmem_ref page_pool_alloc_netmem(struct page_pool *pool, gfp_t gfp)
 | 
				
			||||||
		return netmem;
 | 
							return netmem;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Slow-path: cache empty, do real allocation */
 | 
						/* Slow-path: cache empty, do real allocation */
 | 
				
			||||||
	netmem = __page_pool_alloc_pages_slow(pool, gfp);
 | 
						if (static_branch_unlikely(&page_pool_mem_providers) && pool->mp_priv)
 | 
				
			||||||
 | 
							netmem = mp_dmabuf_devmem_alloc_netmems(pool, gfp);
 | 
				
			||||||
 | 
						else
 | 
				
			||||||
 | 
							netmem = __page_pool_alloc_pages_slow(pool, gfp);
 | 
				
			||||||
	return netmem;
 | 
						return netmem;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
EXPORT_SYMBOL(page_pool_alloc_netmem);
 | 
					EXPORT_SYMBOL(page_pool_alloc_netmem);
 | 
				
			||||||
| 
						 | 
					@ -608,6 +623,28 @@ s32 page_pool_inflight(const struct page_pool *pool, bool strict)
 | 
				
			||||||
	return inflight;
 | 
						return inflight;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					void page_pool_set_pp_info(struct page_pool *pool, netmem_ref netmem)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						netmem_set_pp(netmem, pool);
 | 
				
			||||||
 | 
						netmem_or_pp_magic(netmem, PP_SIGNATURE);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* Ensuring all pages have been split into one fragment initially:
 | 
				
			||||||
 | 
						 * page_pool_set_pp_info() is only called once for every page when it
 | 
				
			||||||
 | 
						 * is allocated from the page allocator and page_pool_fragment_page()
 | 
				
			||||||
 | 
						 * is dirtying the same cache line as the page->pp_magic above, so
 | 
				
			||||||
 | 
						 * the overhead is negligible.
 | 
				
			||||||
 | 
						 */
 | 
				
			||||||
 | 
						page_pool_fragment_netmem(netmem, 1);
 | 
				
			||||||
 | 
						if (pool->has_init_callback)
 | 
				
			||||||
 | 
							pool->slow.init_callback(netmem, pool->slow.init_arg);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					void page_pool_clear_pp_info(netmem_ref netmem)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						netmem_clear_pp_magic(netmem);
 | 
				
			||||||
 | 
						netmem_set_pp(netmem, NULL);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static __always_inline void __page_pool_release_page_dma(struct page_pool *pool,
 | 
					static __always_inline void __page_pool_release_page_dma(struct page_pool *pool,
 | 
				
			||||||
							 netmem_ref netmem)
 | 
												 netmem_ref netmem)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
| 
						 | 
					@ -636,8 +673,13 @@ static __always_inline void __page_pool_release_page_dma(struct page_pool *pool,
 | 
				
			||||||
void page_pool_return_page(struct page_pool *pool, netmem_ref netmem)
 | 
					void page_pool_return_page(struct page_pool *pool, netmem_ref netmem)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	int count;
 | 
						int count;
 | 
				
			||||||
 | 
						bool put;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	__page_pool_release_page_dma(pool, netmem);
 | 
						put = true;
 | 
				
			||||||
 | 
						if (static_branch_unlikely(&page_pool_mem_providers) && pool->mp_priv)
 | 
				
			||||||
 | 
							put = mp_dmabuf_devmem_release_page(pool, netmem);
 | 
				
			||||||
 | 
						else
 | 
				
			||||||
 | 
							__page_pool_release_page_dma(pool, netmem);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* This may be the last page returned, releasing the pool, so
 | 
						/* This may be the last page returned, releasing the pool, so
 | 
				
			||||||
	 * it is not safe to reference pool afterwards.
 | 
						 * it is not safe to reference pool afterwards.
 | 
				
			||||||
| 
						 | 
					@ -645,8 +687,10 @@ void page_pool_return_page(struct page_pool *pool, netmem_ref netmem)
 | 
				
			||||||
	count = atomic_inc_return_relaxed(&pool->pages_state_release_cnt);
 | 
						count = atomic_inc_return_relaxed(&pool->pages_state_release_cnt);
 | 
				
			||||||
	trace_page_pool_state_release(pool, netmem, count);
 | 
						trace_page_pool_state_release(pool, netmem, count);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	page_pool_clear_pp_info(netmem);
 | 
						if (put) {
 | 
				
			||||||
	put_page(netmem_to_page(netmem));
 | 
							page_pool_clear_pp_info(netmem);
 | 
				
			||||||
 | 
							put_page(netmem_to_page(netmem));
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
	/* An optimization would be to call __free_pages(page, pool->p.order)
 | 
						/* An optimization would be to call __free_pages(page, pool->p.order)
 | 
				
			||||||
	 * knowing page is not part of page-cache (thus avoiding a
 | 
						 * knowing page is not part of page-cache (thus avoiding a
 | 
				
			||||||
	 * __page_cache_release() call).
 | 
						 * __page_cache_release() call).
 | 
				
			||||||
| 
						 | 
					@ -965,6 +1009,12 @@ static void __page_pool_destroy(struct page_pool *pool)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	page_pool_unlist(pool);
 | 
						page_pool_unlist(pool);
 | 
				
			||||||
	page_pool_uninit(pool);
 | 
						page_pool_uninit(pool);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (pool->mp_priv) {
 | 
				
			||||||
 | 
							mp_dmabuf_devmem_destroy(pool);
 | 
				
			||||||
 | 
							static_branch_dec(&page_pool_mem_providers);
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	kfree(pool);
 | 
						kfree(pool);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -35,4 +35,24 @@ static inline bool page_pool_set_dma_addr(struct page *page, dma_addr_t addr)
 | 
				
			||||||
	return page_pool_set_dma_addr_netmem(page_to_netmem(page), addr);
 | 
						return page_pool_set_dma_addr_netmem(page_to_netmem(page), addr);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#if defined(CONFIG_PAGE_POOL)
 | 
				
			||||||
 | 
					void page_pool_set_pp_info(struct page_pool *pool, netmem_ref netmem);
 | 
				
			||||||
 | 
					void page_pool_clear_pp_info(netmem_ref netmem);
 | 
				
			||||||
 | 
					int page_pool_check_memory_provider(struct net_device *dev,
 | 
				
			||||||
 | 
									    struct netdev_rx_queue *rxq);
 | 
				
			||||||
 | 
					#else
 | 
				
			||||||
 | 
					static inline void page_pool_set_pp_info(struct page_pool *pool,
 | 
				
			||||||
 | 
										 netmem_ref netmem)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					static inline void page_pool_clear_pp_info(netmem_ref netmem)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					static inline int page_pool_check_memory_provider(struct net_device *dev,
 | 
				
			||||||
 | 
											  struct netdev_rx_queue *rxq)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						return 0;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -4,8 +4,9 @@
 | 
				
			||||||
#include <linux/netdevice.h>
 | 
					#include <linux/netdevice.h>
 | 
				
			||||||
#include <linux/xarray.h>
 | 
					#include <linux/xarray.h>
 | 
				
			||||||
#include <net/net_debug.h>
 | 
					#include <net/net_debug.h>
 | 
				
			||||||
#include <net/page_pool/types.h>
 | 
					#include <net/netdev_rx_queue.h>
 | 
				
			||||||
#include <net/page_pool/helpers.h>
 | 
					#include <net/page_pool/helpers.h>
 | 
				
			||||||
 | 
					#include <net/page_pool/types.h>
 | 
				
			||||||
#include <net/sock.h>
 | 
					#include <net/sock.h>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#include "page_pool_priv.h"
 | 
					#include "page_pool_priv.h"
 | 
				
			||||||
| 
						 | 
					@ -344,6 +345,30 @@ void page_pool_unlist(struct page_pool *pool)
 | 
				
			||||||
	mutex_unlock(&page_pools_lock);
 | 
						mutex_unlock(&page_pools_lock);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					int page_pool_check_memory_provider(struct net_device *dev,
 | 
				
			||||||
 | 
									    struct netdev_rx_queue *rxq)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						struct net_devmem_dmabuf_binding *binding = rxq->mp_params.mp_priv;
 | 
				
			||||||
 | 
						struct page_pool *pool;
 | 
				
			||||||
 | 
						struct hlist_node *n;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (!binding)
 | 
				
			||||||
 | 
							return 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						mutex_lock(&page_pools_lock);
 | 
				
			||||||
 | 
						hlist_for_each_entry_safe(pool, n, &dev->page_pools, user.list) {
 | 
				
			||||||
 | 
							if (pool->mp_priv != binding)
 | 
				
			||||||
 | 
								continue;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							if (pool->slow.queue_idx == get_netdev_rx_queue_index(rxq)) {
 | 
				
			||||||
 | 
								mutex_unlock(&page_pools_lock);
 | 
				
			||||||
 | 
								return 0;
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						mutex_unlock(&page_pools_lock);
 | 
				
			||||||
 | 
						return -ENODATA;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void page_pool_unreg_netdev_wipe(struct net_device *netdev)
 | 
					static void page_pool_unreg_netdev_wipe(struct net_device *netdev)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct page_pool *pool;
 | 
						struct page_pool *pool;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue