mirror of
https://github.com/torvalds/linux.git
synced 2025-11-01 17:18:25 +02:00
virtio_net: xsk: rx: support recv merge mode
Support AF-XDP for merge mode. Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com> Acked-by: Jason Wang <jasowang@redhat.com> Acked-by: Michael S. Tsirkin <mst@redhat.com> Link: https://patch.msgid.link/20240708112537.96291-11-xuanzhuo@linux.alibaba.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
parent
a4e7ba7027
commit
99c861b44e
1 changed files with 144 additions and 0 deletions
|
|
@ -504,6 +504,10 @@ static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp,
|
||||||
struct virtnet_rq_stats *stats);
|
struct virtnet_rq_stats *stats);
|
||||||
static void virtnet_receive_done(struct virtnet_info *vi, struct receive_queue *rq,
|
static void virtnet_receive_done(struct virtnet_info *vi, struct receive_queue *rq,
|
||||||
struct sk_buff *skb, u8 flags);
|
struct sk_buff *skb, u8 flags);
|
||||||
|
static struct sk_buff *virtnet_skb_append_frag(struct sk_buff *head_skb,
|
||||||
|
struct sk_buff *curr_skb,
|
||||||
|
struct page *page, void *buf,
|
||||||
|
int len, int truesize);
|
||||||
|
|
||||||
static bool is_xdp_frame(void *ptr)
|
static bool is_xdp_frame(void *ptr)
|
||||||
{
|
{
|
||||||
|
|
@ -984,6 +988,11 @@ static void virtnet_rq_unmap_free_buf(struct virtqueue *vq, void *buf)
|
||||||
|
|
||||||
rq = &vi->rq[i];
|
rq = &vi->rq[i];
|
||||||
|
|
||||||
|
if (rq->xsk_pool) {
|
||||||
|
xsk_buff_free((struct xdp_buff *)buf);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
if (!vi->big_packets || vi->mergeable_rx_bufs)
|
if (!vi->big_packets || vi->mergeable_rx_bufs)
|
||||||
virtnet_rq_unmap(rq, buf, 0);
|
virtnet_rq_unmap(rq, buf, 0);
|
||||||
|
|
||||||
|
|
@ -1152,6 +1161,139 @@ static struct sk_buff *virtnet_receive_xsk_small(struct net_device *dev, struct
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void xsk_drop_follow_bufs(struct net_device *dev,
|
||||||
|
struct receive_queue *rq,
|
||||||
|
u32 num_buf,
|
||||||
|
struct virtnet_rq_stats *stats)
|
||||||
|
{
|
||||||
|
struct xdp_buff *xdp;
|
||||||
|
u32 len;
|
||||||
|
|
||||||
|
while (num_buf-- > 1) {
|
||||||
|
xdp = virtqueue_get_buf(rq->vq, &len);
|
||||||
|
if (unlikely(!xdp)) {
|
||||||
|
pr_debug("%s: rx error: %d buffers missing\n",
|
||||||
|
dev->name, num_buf);
|
||||||
|
DEV_STATS_INC(dev, rx_length_errors);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
u64_stats_add(&stats->bytes, len);
|
||||||
|
xsk_buff_free(xdp);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static int xsk_append_merge_buffer(struct virtnet_info *vi,
|
||||||
|
struct receive_queue *rq,
|
||||||
|
struct sk_buff *head_skb,
|
||||||
|
u32 num_buf,
|
||||||
|
struct virtio_net_hdr_mrg_rxbuf *hdr,
|
||||||
|
struct virtnet_rq_stats *stats)
|
||||||
|
{
|
||||||
|
struct sk_buff *curr_skb;
|
||||||
|
struct xdp_buff *xdp;
|
||||||
|
u32 len, truesize;
|
||||||
|
struct page *page;
|
||||||
|
void *buf;
|
||||||
|
|
||||||
|
curr_skb = head_skb;
|
||||||
|
|
||||||
|
while (--num_buf) {
|
||||||
|
buf = virtqueue_get_buf(rq->vq, &len);
|
||||||
|
if (unlikely(!buf)) {
|
||||||
|
pr_debug("%s: rx error: %d buffers out of %d missing\n",
|
||||||
|
vi->dev->name, num_buf,
|
||||||
|
virtio16_to_cpu(vi->vdev,
|
||||||
|
hdr->num_buffers));
|
||||||
|
DEV_STATS_INC(vi->dev, rx_length_errors);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
u64_stats_add(&stats->bytes, len);
|
||||||
|
|
||||||
|
xdp = buf_to_xdp(vi, rq, buf, len);
|
||||||
|
if (!xdp)
|
||||||
|
goto err;
|
||||||
|
|
||||||
|
buf = napi_alloc_frag(len);
|
||||||
|
if (!buf) {
|
||||||
|
xsk_buff_free(xdp);
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
|
|
||||||
|
memcpy(buf, xdp->data - vi->hdr_len, len);
|
||||||
|
|
||||||
|
xsk_buff_free(xdp);
|
||||||
|
|
||||||
|
page = virt_to_page(buf);
|
||||||
|
|
||||||
|
truesize = len;
|
||||||
|
|
||||||
|
curr_skb = virtnet_skb_append_frag(head_skb, curr_skb, page,
|
||||||
|
buf, len, truesize);
|
||||||
|
if (!curr_skb) {
|
||||||
|
put_page(page);
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
err:
|
||||||
|
xsk_drop_follow_bufs(vi->dev, rq, num_buf, stats);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct sk_buff *virtnet_receive_xsk_merge(struct net_device *dev, struct virtnet_info *vi,
|
||||||
|
struct receive_queue *rq, struct xdp_buff *xdp,
|
||||||
|
unsigned int *xdp_xmit,
|
||||||
|
struct virtnet_rq_stats *stats)
|
||||||
|
{
|
||||||
|
struct virtio_net_hdr_mrg_rxbuf *hdr;
|
||||||
|
struct bpf_prog *prog;
|
||||||
|
struct sk_buff *skb;
|
||||||
|
u32 ret, num_buf;
|
||||||
|
|
||||||
|
hdr = xdp->data - vi->hdr_len;
|
||||||
|
num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
|
||||||
|
|
||||||
|
ret = XDP_PASS;
|
||||||
|
rcu_read_lock();
|
||||||
|
prog = rcu_dereference(rq->xdp_prog);
|
||||||
|
/* TODO: support multi buffer. */
|
||||||
|
if (prog && num_buf == 1)
|
||||||
|
ret = virtnet_xdp_handler(prog, xdp, dev, xdp_xmit, stats);
|
||||||
|
rcu_read_unlock();
|
||||||
|
|
||||||
|
switch (ret) {
|
||||||
|
case XDP_PASS:
|
||||||
|
skb = xsk_construct_skb(rq, xdp);
|
||||||
|
if (!skb)
|
||||||
|
goto drop_bufs;
|
||||||
|
|
||||||
|
if (xsk_append_merge_buffer(vi, rq, skb, num_buf, hdr, stats)) {
|
||||||
|
dev_kfree_skb(skb);
|
||||||
|
goto drop;
|
||||||
|
}
|
||||||
|
|
||||||
|
return skb;
|
||||||
|
|
||||||
|
case XDP_TX:
|
||||||
|
case XDP_REDIRECT:
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
default:
|
||||||
|
/* drop packet */
|
||||||
|
xsk_buff_free(xdp);
|
||||||
|
}
|
||||||
|
|
||||||
|
drop_bufs:
|
||||||
|
xsk_drop_follow_bufs(dev, rq, num_buf, stats);
|
||||||
|
|
||||||
|
drop:
|
||||||
|
u64_stats_inc(&stats->drops);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
static void virtnet_receive_xsk_buf(struct virtnet_info *vi, struct receive_queue *rq,
|
static void virtnet_receive_xsk_buf(struct virtnet_info *vi, struct receive_queue *rq,
|
||||||
void *buf, u32 len,
|
void *buf, u32 len,
|
||||||
unsigned int *xdp_xmit,
|
unsigned int *xdp_xmit,
|
||||||
|
|
@ -1181,6 +1323,8 @@ static void virtnet_receive_xsk_buf(struct virtnet_info *vi, struct receive_queu
|
||||||
|
|
||||||
if (!vi->mergeable_rx_bufs)
|
if (!vi->mergeable_rx_bufs)
|
||||||
skb = virtnet_receive_xsk_small(dev, vi, rq, xdp, xdp_xmit, stats);
|
skb = virtnet_receive_xsk_small(dev, vi, rq, xdp, xdp_xmit, stats);
|
||||||
|
else
|
||||||
|
skb = virtnet_receive_xsk_merge(dev, vi, rq, xdp, xdp_xmit, stats);
|
||||||
|
|
||||||
if (skb)
|
if (skb)
|
||||||
virtnet_receive_done(vi, rq, skb, flags);
|
virtnet_receive_done(vi, rq, skb, flags);
|
||||||
|
|
|
||||||
Loading…
Reference in a new issue