linux/net/core/devmem.h
Mina Almasry e9f3d61db5 net: add get_netmem/put_netmem support
Currently net_iovs support only pp ref counts, and do not support a
page ref equivalent.

This is fine for the RX path as net_iovs are used exclusively with the
pp and only pp refcounting is needed there. The TX path however does not
use pp ref counts, thus, support for get_page/put_page equivalent is
needed for netmem.

Support get_netmem/put_netmem. Check the type of the netmem before
passing it to page or net_iov specific code to obtain a page ref
equivalent.

For dmabuf net_iovs, we obtain a ref on the underlying binding. This
ensures the entire binding doesn't disappear until all the net_iovs have
been put_netmem'ed. We do not need to track the refcount of individual
dmabuf net_iovs as we don't allocate/free them from a pool similar to
what the buddy allocator does for pages.

This code is written to be extensible by other net_iov implementers.
get_netmem/put_netmem will check the type of the netmem and route it to
the correct helper:

pages -> [get|put]_page()
dmabuf net_iovs -> net_devmem_[get|put]_net_iov()
new net_iovs ->	new helpers

Signed-off-by: Mina Almasry <almasrymina@google.com>
Acked-by: Stanislav Fomichev <sdf@fomichev.me>
Link: https://patch.msgid.link/20250508004830.4100853-3-almasrymina@google.com
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2025-05-13 11:12:48 +02:00

195 lines
4.9 KiB
C

/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Device memory TCP support
*
* Authors: Mina Almasry <almasrymina@google.com>
* Willem de Bruijn <willemb@google.com>
* Kaiyuan Zhang <kaiyuanz@google.com>
*
*/
#ifndef _NET_DEVMEM_H
#define _NET_DEVMEM_H
#include <net/netmem.h>
struct netlink_ext_ack;
struct net_devmem_dmabuf_binding {
struct dma_buf *dmabuf;
struct dma_buf_attachment *attachment;
struct sg_table *sgt;
struct net_device *dev;
struct gen_pool *chunk_pool;
/* The user holds a ref (via the netlink API) for as long as they want
* the binding to remain alive. Each page pool using this binding holds
* a ref to keep the binding alive. Each allocated net_iov holds a
* ref.
*
* The binding undos itself and unmaps the underlying dmabuf once all
* those refs are dropped and the binding is no longer desired or in
* use.
*
* net_devmem_get_net_iov() on dmabuf net_iovs will increment this
* reference, making sure that the binding remains alive until all the
* net_iovs are no longer used.
*/
refcount_t ref;
/* The list of bindings currently active. Used for netlink to notify us
* of the user dropping the bind.
*/
struct list_head list;
/* rxq's this binding is active on. */
struct xarray bound_rxqs;
/* ID of this binding. Globally unique to all bindings currently
* active.
*/
u32 id;
};
#if defined(CONFIG_NET_DEVMEM)
/* Owner of the dma-buf chunks inserted into the gen pool. Each scatterlist
* entry from the dmabuf is inserted into the genpool as a chunk, and needs
* this owner struct to keep track of some metadata necessary to create
* allocations from this chunk.
*/
struct dmabuf_genpool_chunk_owner {
struct net_iov_area area;
struct net_devmem_dmabuf_binding *binding;
/* dma_addr of the start of the chunk. */
dma_addr_t base_dma_addr;
};
void __net_devmem_dmabuf_binding_free(struct net_devmem_dmabuf_binding *binding);
struct net_devmem_dmabuf_binding *
net_devmem_bind_dmabuf(struct net_device *dev, unsigned int dmabuf_fd,
struct netlink_ext_ack *extack);
void net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding);
int net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx,
struct net_devmem_dmabuf_binding *binding,
struct netlink_ext_ack *extack);
static inline struct dmabuf_genpool_chunk_owner *
net_devmem_iov_to_chunk_owner(const struct net_iov *niov)
{
struct net_iov_area *owner = net_iov_owner(niov);
return container_of(owner, struct dmabuf_genpool_chunk_owner, area);
}
static inline struct net_devmem_dmabuf_binding *
net_devmem_iov_binding(const struct net_iov *niov)
{
return net_devmem_iov_to_chunk_owner(niov)->binding;
}
static inline u32 net_devmem_iov_binding_id(const struct net_iov *niov)
{
return net_devmem_iov_binding(niov)->id;
}
static inline unsigned long net_iov_virtual_addr(const struct net_iov *niov)
{
struct net_iov_area *owner = net_iov_owner(niov);
return owner->base_virtual +
((unsigned long)net_iov_idx(niov) << PAGE_SHIFT);
}
static inline void
net_devmem_dmabuf_binding_get(struct net_devmem_dmabuf_binding *binding)
{
refcount_inc(&binding->ref);
}
static inline void
net_devmem_dmabuf_binding_put(struct net_devmem_dmabuf_binding *binding)
{
if (!refcount_dec_and_test(&binding->ref))
return;
__net_devmem_dmabuf_binding_free(binding);
}
void net_devmem_get_net_iov(struct net_iov *niov);
void net_devmem_put_net_iov(struct net_iov *niov);
struct net_iov *
net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding);
void net_devmem_free_dmabuf(struct net_iov *ppiov);
bool net_is_devmem_iov(struct net_iov *niov);
#else
struct net_devmem_dmabuf_binding;
static inline void
net_devmem_dmabuf_binding_put(struct net_devmem_dmabuf_binding *binding)
{
}
static inline void net_devmem_get_net_iov(struct net_iov *niov)
{
}
static inline void net_devmem_put_net_iov(struct net_iov *niov)
{
}
static inline void
__net_devmem_dmabuf_binding_free(struct net_devmem_dmabuf_binding *binding)
{
}
static inline struct net_devmem_dmabuf_binding *
net_devmem_bind_dmabuf(struct net_device *dev, unsigned int dmabuf_fd,
struct netlink_ext_ack *extack)
{
return ERR_PTR(-EOPNOTSUPP);
}
static inline void
net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding)
{
}
static inline int
net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx,
struct net_devmem_dmabuf_binding *binding,
struct netlink_ext_ack *extack)
{
return -EOPNOTSUPP;
}
static inline struct net_iov *
net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding)
{
return NULL;
}
static inline void net_devmem_free_dmabuf(struct net_iov *ppiov)
{
}
static inline unsigned long net_iov_virtual_addr(const struct net_iov *niov)
{
return 0;
}
static inline u32 net_devmem_iov_binding_id(const struct net_iov *niov)
{
return 0;
}
static inline bool net_is_devmem_iov(struct net_iov *niov)
{
return false;
}
#endif
#endif /* _NET_DEVMEM_H */