mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	net: add get_netmem/put_netmem support
Currently net_iovs support only pp ref counts, and do not support a page ref equivalent. This is fine for the RX path as net_iovs are used exclusively with the pp and only pp refcounting is needed there. The TX path however does not use pp ref counts, thus, support for get_page/put_page equivalent is needed for netmem. Support get_netmem/put_netmem. Check the type of the netmem before passing it to page or net_iov specific code to obtain a page ref equivalent. For dmabuf net_iovs, we obtain a ref on the underlying binding. This ensures the entire binding doesn't disappear until all the net_iovs have been put_netmem'ed. We do not need to track the refcount of individual dmabuf net_iovs as we don't allocate/free them from a pool similar to what the buddy allocator does for pages. This code is written to be extensible by other net_iov implementers. get_netmem/put_netmem will check the type of the netmem and route it to the correct helper: pages -> [get|put]_page() dmabuf net_iovs -> net_devmem_[get|put]_net_iov() new net_iovs -> new helpers Signed-off-by: Mina Almasry <almasrymina@google.com> Acked-by: Stanislav Fomichev <sdf@fomichev.me> Link: https://patch.msgid.link/20250508004830.4100853-3-almasrymina@google.com Signed-off-by: Paolo Abeni <pabeni@redhat.com>
This commit is contained in:
		
							parent
							
								
									03e96b8c11
								
							
						
					
					
						commit
						e9f3d61db5
					
				
					 5 changed files with 65 additions and 2 deletions
				
			
		| 
						 | 
				
			
			@ -17,7 +17,7 @@
 | 
			
		|||
 */
 | 
			
		||||
static inline void __skb_frag_ref(skb_frag_t *frag)
 | 
			
		||||
{
 | 
			
		||||
	get_page(skb_frag_page(frag));
 | 
			
		||||
	get_netmem(skb_frag_netmem(frag));
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
| 
						 | 
				
			
			@ -40,7 +40,7 @@ static inline void skb_page_unref(netmem_ref netmem, bool recycle)
 | 
			
		|||
	if (recycle && napi_pp_put_page(netmem))
 | 
			
		||||
		return;
 | 
			
		||||
#endif
 | 
			
		||||
	put_page(netmem_to_page(netmem));
 | 
			
		||||
	put_netmem(netmem);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -273,4 +273,7 @@ static inline unsigned long netmem_get_dma_addr(netmem_ref netmem)
 | 
			
		|||
	return __netmem_clear_lsb(netmem)->dma_addr;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void get_netmem(netmem_ref netmem);
 | 
			
		||||
void put_netmem(netmem_ref netmem);
 | 
			
		||||
 | 
			
		||||
#endif /* _NET_NETMEM_H */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -295,6 +295,16 @@ net_devmem_bind_dmabuf(struct net_device *dev, unsigned int dmabuf_fd,
 | 
			
		|||
	return ERR_PTR(err);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void net_devmem_get_net_iov(struct net_iov *niov)
 | 
			
		||||
{
 | 
			
		||||
	net_devmem_dmabuf_binding_get(net_devmem_iov_binding(niov));
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void net_devmem_put_net_iov(struct net_iov *niov)
 | 
			
		||||
{
 | 
			
		||||
	net_devmem_dmabuf_binding_put(net_devmem_iov_binding(niov));
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*** "Dmabuf devmem memory provider" ***/
 | 
			
		||||
 | 
			
		||||
int mp_dmabuf_devmem_init(struct page_pool *pool)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -29,6 +29,10 @@ struct net_devmem_dmabuf_binding {
 | 
			
		|||
	 * The binding undos itself and unmaps the underlying dmabuf once all
 | 
			
		||||
	 * those refs are dropped and the binding is no longer desired or in
 | 
			
		||||
	 * use.
 | 
			
		||||
	 *
 | 
			
		||||
	 * net_devmem_get_net_iov() on dmabuf net_iovs will increment this
 | 
			
		||||
	 * reference, making sure that the binding remains alive until all the
 | 
			
		||||
	 * net_iovs are no longer used.
 | 
			
		||||
	 */
 | 
			
		||||
	refcount_t ref;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -111,6 +115,9 @@ net_devmem_dmabuf_binding_put(struct net_devmem_dmabuf_binding *binding)
 | 
			
		|||
	__net_devmem_dmabuf_binding_free(binding);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void net_devmem_get_net_iov(struct net_iov *niov);
 | 
			
		||||
void net_devmem_put_net_iov(struct net_iov *niov);
 | 
			
		||||
 | 
			
		||||
struct net_iov *
 | 
			
		||||
net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding);
 | 
			
		||||
void net_devmem_free_dmabuf(struct net_iov *ppiov);
 | 
			
		||||
| 
						 | 
				
			
			@ -120,6 +127,19 @@ bool net_is_devmem_iov(struct net_iov *niov);
 | 
			
		|||
#else
 | 
			
		||||
struct net_devmem_dmabuf_binding;
 | 
			
		||||
 | 
			
		||||
static inline void
 | 
			
		||||
net_devmem_dmabuf_binding_put(struct net_devmem_dmabuf_binding *binding)
 | 
			
		||||
{
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline void net_devmem_get_net_iov(struct net_iov *niov)
 | 
			
		||||
{
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline void net_devmem_put_net_iov(struct net_iov *niov)
 | 
			
		||||
{
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline void
 | 
			
		||||
__net_devmem_dmabuf_binding_free(struct net_devmem_dmabuf_binding *binding)
 | 
			
		||||
{
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -89,6 +89,7 @@
 | 
			
		|||
#include <linux/textsearch.h>
 | 
			
		||||
 | 
			
		||||
#include "dev.h"
 | 
			
		||||
#include "devmem.h"
 | 
			
		||||
#include "netmem_priv.h"
 | 
			
		||||
#include "sock_destructor.h"
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -7313,3 +7314,32 @@ bool csum_and_copy_from_iter_full(void *addr, size_t bytes,
 | 
			
		|||
	return false;
 | 
			
		||||
}
 | 
			
		||||
EXPORT_SYMBOL(csum_and_copy_from_iter_full);
 | 
			
		||||
 | 
			
		||||
void get_netmem(netmem_ref netmem)
 | 
			
		||||
{
 | 
			
		||||
	struct net_iov *niov;
 | 
			
		||||
 | 
			
		||||
	if (netmem_is_net_iov(netmem)) {
 | 
			
		||||
		niov = netmem_to_net_iov(netmem);
 | 
			
		||||
		if (net_is_devmem_iov(niov))
 | 
			
		||||
			net_devmem_get_net_iov(netmem_to_net_iov(netmem));
 | 
			
		||||
		return;
 | 
			
		||||
	}
 | 
			
		||||
	get_page(netmem_to_page(netmem));
 | 
			
		||||
}
 | 
			
		||||
EXPORT_SYMBOL(get_netmem);
 | 
			
		||||
 | 
			
		||||
void put_netmem(netmem_ref netmem)
 | 
			
		||||
{
 | 
			
		||||
	struct net_iov *niov;
 | 
			
		||||
 | 
			
		||||
	if (netmem_is_net_iov(netmem)) {
 | 
			
		||||
		niov = netmem_to_net_iov(netmem);
 | 
			
		||||
		if (net_is_devmem_iov(niov))
 | 
			
		||||
			net_devmem_put_net_iov(netmem_to_net_iov(netmem));
 | 
			
		||||
		return;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	put_page(netmem_to_page(netmem));
 | 
			
		||||
}
 | 
			
		||||
EXPORT_SYMBOL(put_netmem);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue