mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	xsk: Add API to check for available entries in FQ
Add a function that checks whether the Fill Ring has the specified amount of descriptors available. It will be useful for mlx5e that wants to check in advance, whether it can allocate a bulk of RX descriptors, to get the best performance. Signed-off-by: Maxim Mikityanskiy <maximmi@mellanox.com> Signed-off-by: Tariq Toukan <tariqt@mellanox.com> Acked-by: Saeed Mahameed <saeedm@mellanox.com> Acked-by: Björn Töpel <bjorn.topel@intel.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
This commit is contained in:
		
							parent
							
								
									e18953240d
								
							
						
					
					
						commit
						d57d76428a
					
				
					 3 changed files with 41 additions and 0 deletions
				
			
		| 
						 | 
				
			
			@ -77,6 +77,7 @@ int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
 | 
			
		|||
void xsk_flush(struct xdp_sock *xs);
 | 
			
		||||
bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs);
 | 
			
		||||
/* Used from netdev driver */
 | 
			
		||||
bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt);
 | 
			
		||||
u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr);
 | 
			
		||||
void xsk_umem_discard_addr(struct xdp_umem *umem);
 | 
			
		||||
void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries);
 | 
			
		||||
| 
						 | 
				
			
			@ -99,6 +100,16 @@ static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr)
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
/* Reuse-queue aware version of FILL queue helpers */
 | 
			
		||||
static inline bool xsk_umem_has_addrs_rq(struct xdp_umem *umem, u32 cnt)
 | 
			
		||||
{
 | 
			
		||||
	struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
 | 
			
		||||
 | 
			
		||||
	if (rq->length >= cnt)
 | 
			
		||||
		return true;
 | 
			
		||||
 | 
			
		||||
	return xsk_umem_has_addrs(umem, cnt - rq->length);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline u64 *xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr)
 | 
			
		||||
{
 | 
			
		||||
	struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
 | 
			
		||||
| 
						 | 
				
			
			@ -146,6 +157,11 @@ static inline bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs)
 | 
			
		|||
	return false;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt)
 | 
			
		||||
{
 | 
			
		||||
	return false;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr)
 | 
			
		||||
{
 | 
			
		||||
	return NULL;
 | 
			
		||||
| 
						 | 
				
			
			@ -200,6 +216,11 @@ static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr)
 | 
			
		|||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline bool xsk_umem_has_addrs_rq(struct xdp_umem *umem, u32 cnt)
 | 
			
		||||
{
 | 
			
		||||
	return false;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline u64 *xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr)
 | 
			
		||||
{
 | 
			
		||||
	return NULL;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -37,6 +37,12 @@ bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs)
 | 
			
		|||
		READ_ONCE(xs->umem->fq);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt)
 | 
			
		||||
{
 | 
			
		||||
	return xskq_has_addrs(umem->fq, cnt);
 | 
			
		||||
}
 | 
			
		||||
EXPORT_SYMBOL(xsk_umem_has_addrs);
 | 
			
		||||
 | 
			
		||||
u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr)
 | 
			
		||||
{
 | 
			
		||||
	return xskq_peek_addr(umem->fq, addr);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -117,6 +117,20 @@ static inline u32 xskq_nb_free(struct xsk_queue *q, u32 producer, u32 dcnt)
 | 
			
		|||
	return q->nentries - (producer - q->cons_tail);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline bool xskq_has_addrs(struct xsk_queue *q, u32 cnt)
 | 
			
		||||
{
 | 
			
		||||
	u32 entries = q->prod_tail - q->cons_tail;
 | 
			
		||||
 | 
			
		||||
	if (entries >= cnt)
 | 
			
		||||
		return true;
 | 
			
		||||
 | 
			
		||||
	/* Refresh the local pointer. */
 | 
			
		||||
	q->prod_tail = READ_ONCE(q->ring->producer);
 | 
			
		||||
	entries = q->prod_tail - q->cons_tail;
 | 
			
		||||
 | 
			
		||||
	return entries >= cnt;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* UMEM queue */
 | 
			
		||||
 | 
			
		||||
static inline bool xskq_is_valid_addr(struct xsk_queue *q, u64 addr)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue