mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	ixgbevf: add support for using order 1 pages to receive large frames
Based on commit 8649aaef40
("igb: Add support for using order 1 pages to receive large frames")
Add support for using 3K buffers in order 1 page. We are reserving 1K for
now to have space available for future tail room and head room when we
enable build_skb support.
Signed-off-by: Emil Tantilov <emil.s.tantilov@intel.com>
Tested-by: Krishneil Singh <krishneil.k.singh@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
			
			
This commit is contained in:
		
							parent
							
								
									bc04347f5b
								
							
						
					
					
						commit
						f15c5ba5b6
					
				
					 2 changed files with 92 additions and 24 deletions
				
			
		| 
						 | 
				
			
			@ -89,17 +89,11 @@ struct ixgbevf_rx_queue_stats {
 | 
			
		|||
};
 | 
			
		||||
 | 
			
		||||
enum ixgbevf_ring_state_t {
 | 
			
		||||
	__IXGBEVF_RX_3K_BUFFER,
 | 
			
		||||
	__IXGBEVF_TX_DETECT_HANG,
 | 
			
		||||
	__IXGBEVF_HANG_CHECK_ARMED,
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
#define check_for_tx_hang(ring) \
 | 
			
		||||
	test_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state)
 | 
			
		||||
#define set_check_for_tx_hang(ring) \
 | 
			
		||||
	set_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state)
 | 
			
		||||
#define clear_check_for_tx_hang(ring) \
 | 
			
		||||
	clear_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state)
 | 
			
		||||
 | 
			
		||||
struct ixgbevf_ring {
 | 
			
		||||
	struct ixgbevf_ring *next;
 | 
			
		||||
	struct net_device *netdev;
 | 
			
		||||
| 
						 | 
				
			
			@ -156,12 +150,20 @@ struct ixgbevf_ring {
 | 
			
		|||
/* Supported Rx Buffer Sizes */
 | 
			
		||||
#define IXGBEVF_RXBUFFER_256	256    /* Used for packet split */
 | 
			
		||||
#define IXGBEVF_RXBUFFER_2048	2048
 | 
			
		||||
#define IXGBEVF_RXBUFFER_3072	3072
 | 
			
		||||
 | 
			
		||||
#define IXGBEVF_RX_HDR_SIZE	IXGBEVF_RXBUFFER_256
 | 
			
		||||
#define IXGBEVF_RX_BUFSZ	IXGBEVF_RXBUFFER_2048
 | 
			
		||||
 | 
			
		||||
#define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
 | 
			
		||||
 | 
			
		||||
#define IXGBEVF_SKB_PAD		(NET_SKB_PAD + NET_IP_ALIGN)
 | 
			
		||||
#if (PAGE_SIZE < 8192)
 | 
			
		||||
#define IXGBEVF_MAX_FRAME_BUILD_SKB \
 | 
			
		||||
	(SKB_WITH_OVERHEAD(IXGBEVF_RXBUFFER_2048) - IXGBEVF_SKB_PAD)
 | 
			
		||||
#else
 | 
			
		||||
#define IXGBEVF_MAX_FRAME_BUILD_SKB	IXGBEVF_RXBUFFER_2048
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#define IXGBE_TX_FLAGS_CSUM		BIT(0)
 | 
			
		||||
#define IXGBE_TX_FLAGS_VLAN		BIT(1)
 | 
			
		||||
#define IXGBE_TX_FLAGS_TSO		BIT(2)
 | 
			
		||||
| 
						 | 
				
			
			@ -170,6 +172,40 @@ struct ixgbevf_ring {
 | 
			
		|||
#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK	0x0000e000
 | 
			
		||||
#define IXGBE_TX_FLAGS_VLAN_SHIFT	16
 | 
			
		||||
 | 
			
		||||
#define ring_uses_large_buffer(ring) \
 | 
			
		||||
	test_bit(__IXGBEVF_RX_3K_BUFFER, &(ring)->state)
 | 
			
		||||
#define set_ring_uses_large_buffer(ring) \
 | 
			
		||||
	set_bit(__IXGBEVF_RX_3K_BUFFER, &(ring)->state)
 | 
			
		||||
#define clear_ring_uses_large_buffer(ring) \
 | 
			
		||||
	clear_bit(__IXGBEVF_RX_3K_BUFFER, &(ring)->state)
 | 
			
		||||
 | 
			
		||||
static inline unsigned int ixgbevf_rx_bufsz(struct ixgbevf_ring *ring)
 | 
			
		||||
{
 | 
			
		||||
#if (PAGE_SIZE < 8192)
 | 
			
		||||
	if (ring_uses_large_buffer(ring))
 | 
			
		||||
		return IXGBEVF_RXBUFFER_3072;
 | 
			
		||||
#endif
 | 
			
		||||
	return IXGBEVF_RXBUFFER_2048;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline unsigned int ixgbevf_rx_pg_order(struct ixgbevf_ring *ring)
 | 
			
		||||
{
 | 
			
		||||
#if (PAGE_SIZE < 8192)
 | 
			
		||||
	if (ring_uses_large_buffer(ring))
 | 
			
		||||
		return 1;
 | 
			
		||||
#endif
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#define ixgbevf_rx_pg_size(_ring) (PAGE_SIZE << ixgbevf_rx_pg_order(_ring))
 | 
			
		||||
 | 
			
		||||
#define check_for_tx_hang(ring) \
 | 
			
		||||
	test_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state)
 | 
			
		||||
#define set_check_for_tx_hang(ring) \
 | 
			
		||||
	set_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state)
 | 
			
		||||
#define clear_check_for_tx_hang(ring) \
 | 
			
		||||
	clear_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state)
 | 
			
		||||
 | 
			
		||||
struct ixgbevf_ring_container {
 | 
			
		||||
	struct ixgbevf_ring *ring;	/* pointer to linked list of rings */
 | 
			
		||||
	unsigned int total_bytes;	/* total bytes processed this int */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -565,21 +565,22 @@ static bool ixgbevf_alloc_mapped_page(struct ixgbevf_ring *rx_ring,
 | 
			
		|||
		return true;
 | 
			
		||||
 | 
			
		||||
	/* alloc new page for storage */
 | 
			
		||||
	page = dev_alloc_page();
 | 
			
		||||
	page = dev_alloc_pages(ixgbevf_rx_pg_order(rx_ring));
 | 
			
		||||
	if (unlikely(!page)) {
 | 
			
		||||
		rx_ring->rx_stats.alloc_rx_page_failed++;
 | 
			
		||||
		return false;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/* map page for use */
 | 
			
		||||
	dma = dma_map_page_attrs(rx_ring->dev, page, 0, PAGE_SIZE,
 | 
			
		||||
	dma = dma_map_page_attrs(rx_ring->dev, page, 0,
 | 
			
		||||
				 ixgbevf_rx_pg_size(rx_ring),
 | 
			
		||||
				 DMA_FROM_DEVICE, IXGBEVF_RX_DMA_ATTR);
 | 
			
		||||
 | 
			
		||||
	/* if mapping failed free memory back to system since
 | 
			
		||||
	 * there isn't much point in holding memory we can't use
 | 
			
		||||
	 */
 | 
			
		||||
	if (dma_mapping_error(rx_ring->dev, dma)) {
 | 
			
		||||
		__free_page(page);
 | 
			
		||||
		__free_pages(page, ixgbevf_rx_pg_order(rx_ring));
 | 
			
		||||
 | 
			
		||||
		rx_ring->rx_stats.alloc_rx_page_failed++;
 | 
			
		||||
		return false;
 | 
			
		||||
| 
						 | 
				
			
			@ -621,7 +622,7 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring,
 | 
			
		|||
		/* sync the buffer for use by the device */
 | 
			
		||||
		dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
 | 
			
		||||
						 bi->page_offset,
 | 
			
		||||
						 IXGBEVF_RX_BUFSZ,
 | 
			
		||||
						 ixgbevf_rx_bufsz(rx_ring),
 | 
			
		||||
						 DMA_FROM_DEVICE);
 | 
			
		||||
 | 
			
		||||
		/* Refresh the desc even if pkt_addr didn't change
 | 
			
		||||
| 
						 | 
				
			
			@ -750,13 +751,16 @@ static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer,
 | 
			
		|||
		return false;
 | 
			
		||||
 | 
			
		||||
	/* flip page offset to other buffer */
 | 
			
		||||
	rx_buffer->page_offset ^= IXGBEVF_RX_BUFSZ;
 | 
			
		||||
	rx_buffer->page_offset ^= truesize;
 | 
			
		||||
 | 
			
		||||
#else
 | 
			
		||||
	/* move offset up to the next cache line */
 | 
			
		||||
	rx_buffer->page_offset += truesize;
 | 
			
		||||
 | 
			
		||||
	if (rx_buffer->page_offset > (PAGE_SIZE - IXGBEVF_RX_BUFSZ))
 | 
			
		||||
#define IXGBEVF_LAST_OFFSET \
 | 
			
		||||
	(SKB_WITH_OVERHEAD(PAGE_SIZE) - IXGBEVF_RXBUFFER_2048)
 | 
			
		||||
 | 
			
		||||
	if (rx_buffer->page_offset > IXGBEVF_LAST_OFFSET)
 | 
			
		||||
		return false;
 | 
			
		||||
 | 
			
		||||
#endif
 | 
			
		||||
| 
						 | 
				
			
			@ -797,7 +801,7 @@ static bool ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring,
 | 
			
		|||
	struct page *page = rx_buffer->page;
 | 
			
		||||
	void *va = page_address(page) + rx_buffer->page_offset;
 | 
			
		||||
#if (PAGE_SIZE < 8192)
 | 
			
		||||
	unsigned int truesize = IXGBEVF_RX_BUFSZ;
 | 
			
		||||
	unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2;
 | 
			
		||||
#else
 | 
			
		||||
	unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
 | 
			
		||||
#endif
 | 
			
		||||
| 
						 | 
				
			
			@ -888,8 +892,8 @@ static struct sk_buff *ixgbevf_fetch_rx_buffer(struct ixgbevf_ring *rx_ring,
 | 
			
		|||
		 * any references we are holding to it
 | 
			
		||||
		 */
 | 
			
		||||
		dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
 | 
			
		||||
				     PAGE_SIZE, DMA_FROM_DEVICE,
 | 
			
		||||
				     IXGBEVF_RX_DMA_ATTR);
 | 
			
		||||
				     ixgbevf_rx_pg_size(rx_ring),
 | 
			
		||||
				     DMA_FROM_DEVICE, IXGBEVF_RX_DMA_ATTR);
 | 
			
		||||
		__page_frag_cache_drain(page, rx_buffer->pagecnt_bias);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -1586,7 +1590,8 @@ static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
 | 
			
		|||
 | 
			
		||||
#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT	2
 | 
			
		||||
 | 
			
		||||
static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
 | 
			
		||||
static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter,
 | 
			
		||||
				     struct ixgbevf_ring *ring, int index)
 | 
			
		||||
{
 | 
			
		||||
	struct ixgbe_hw *hw = &adapter->hw;
 | 
			
		||||
	u32 srrctl;
 | 
			
		||||
| 
						 | 
				
			
			@ -1594,7 +1599,10 @@ static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
 | 
			
		|||
	srrctl = IXGBE_SRRCTL_DROP_EN;
 | 
			
		||||
 | 
			
		||||
	srrctl |= IXGBEVF_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
 | 
			
		||||
	srrctl |= IXGBEVF_RX_BUFSZ >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
 | 
			
		||||
	if (ring_uses_large_buffer(ring))
 | 
			
		||||
		srrctl |= IXGBEVF_RXBUFFER_3072 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
 | 
			
		||||
	else
 | 
			
		||||
		srrctl |= IXGBEVF_RXBUFFER_2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
 | 
			
		||||
	srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
 | 
			
		||||
 | 
			
		||||
	IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
 | 
			
		||||
| 
						 | 
				
			
			@ -1766,7 +1774,7 @@ static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
 | 
			
		|||
	ring->next_to_use = 0;
 | 
			
		||||
	ring->next_to_alloc = 0;
 | 
			
		||||
 | 
			
		||||
	ixgbevf_configure_srrctl(adapter, reg_idx);
 | 
			
		||||
	ixgbevf_configure_srrctl(adapter, ring, reg_idx);
 | 
			
		||||
 | 
			
		||||
	/* allow any size packet since we can handle overflow */
 | 
			
		||||
	rxdctl &= ~IXGBE_RXDCTL_RLPML_EN;
 | 
			
		||||
| 
						 | 
				
			
			@ -1778,6 +1786,26 @@ static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
 | 
			
		|||
	ixgbevf_alloc_rx_buffers(ring, ixgbevf_desc_unused(ring));
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter,
 | 
			
		||||
				      struct ixgbevf_ring *rx_ring)
 | 
			
		||||
{
 | 
			
		||||
	struct net_device *netdev = adapter->netdev;
 | 
			
		||||
	unsigned int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
 | 
			
		||||
 | 
			
		||||
	/* set build_skb and buffer size flags */
 | 
			
		||||
	clear_ring_uses_large_buffer(rx_ring);
 | 
			
		||||
 | 
			
		||||
	if (adapter->flags & IXGBEVF_FLAGS_LEGACY_RX)
 | 
			
		||||
		return;
 | 
			
		||||
 | 
			
		||||
#if (PAGE_SIZE < 8192)
 | 
			
		||||
	if (max_frame <= IXGBEVF_MAX_FRAME_BUILD_SKB)
 | 
			
		||||
		return;
 | 
			
		||||
 | 
			
		||||
	set_ring_uses_large_buffer(rx_ring);
 | 
			
		||||
#endif
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
 | 
			
		||||
 * @adapter: board private structure
 | 
			
		||||
| 
						 | 
				
			
			@ -1805,8 +1833,12 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
 | 
			
		|||
	/* Setup the HW Rx Head and Tail Descriptor Pointers and
 | 
			
		||||
	 * the Base and Length of the Rx Descriptor Ring
 | 
			
		||||
	 */
 | 
			
		||||
	for (i = 0; i < adapter->num_rx_queues; i++)
 | 
			
		||||
		ixgbevf_configure_rx_ring(adapter, adapter->rx_ring[i]);
 | 
			
		||||
	for (i = 0; i < adapter->num_rx_queues; i++) {
 | 
			
		||||
		struct ixgbevf_ring *rx_ring = adapter->rx_ring[i];
 | 
			
		||||
 | 
			
		||||
		ixgbevf_set_rx_buffer_len(adapter, rx_ring);
 | 
			
		||||
		ixgbevf_configure_rx_ring(adapter, rx_ring);
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev,
 | 
			
		||||
| 
						 | 
				
			
			@ -2135,13 +2167,13 @@ static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring)
 | 
			
		|||
		dma_sync_single_range_for_cpu(rx_ring->dev,
 | 
			
		||||
					      rx_buffer->dma,
 | 
			
		||||
					      rx_buffer->page_offset,
 | 
			
		||||
					      IXGBEVF_RX_BUFSZ,
 | 
			
		||||
					      ixgbevf_rx_bufsz(rx_ring),
 | 
			
		||||
					      DMA_FROM_DEVICE);
 | 
			
		||||
 | 
			
		||||
		/* free resources associated with mapping */
 | 
			
		||||
		dma_unmap_page_attrs(rx_ring->dev,
 | 
			
		||||
				     rx_buffer->dma,
 | 
			
		||||
				     PAGE_SIZE,
 | 
			
		||||
				     ixgbevf_rx_pg_size(rx_ring),
 | 
			
		||||
				     DMA_FROM_DEVICE,
 | 
			
		||||
				     IXGBEVF_RX_DMA_ATTR);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue