mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	ixgbe: Improve performance and reduce size of ixgbe_tx_map
This change is meant to both improve the performance and reduce the size of ixgbe_tx_map. To do this I have expanded the work done in the main loop by pushing first into tx_buffer. This allows us to pull in the dma_mapping_error check, the tx_buffer value assignment, and the initial DMA value assignment to the Tx descriptor. The net result is that the function reduces in size by a little over a 100 bytes and is about 1% or 2% faster. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Tested-by: Phil Schmitt <phillip.j.schmitt@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
This commit is contained in:
		
							parent
							
								
									472148c320
								
							
						
					
					
						commit
						ec718254cb
					
				
					 1 changed files with 19 additions and 23 deletions
				
			
		| 
						 | 
				
			
			@ -6091,21 +6091,22 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
 | 
			
		|||
			 struct ixgbe_tx_buffer *first,
 | 
			
		||||
			 const u8 hdr_len)
 | 
			
		||||
{
 | 
			
		||||
	dma_addr_t dma;
 | 
			
		||||
	struct sk_buff *skb = first->skb;
 | 
			
		||||
	struct ixgbe_tx_buffer *tx_buffer;
 | 
			
		||||
	union ixgbe_adv_tx_desc *tx_desc;
 | 
			
		||||
	struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
 | 
			
		||||
	unsigned int data_len = skb->data_len;
 | 
			
		||||
	unsigned int size = skb_headlen(skb);
 | 
			
		||||
	unsigned int paylen = skb->len - hdr_len;
 | 
			
		||||
	struct skb_frag_struct *frag;
 | 
			
		||||
	dma_addr_t dma;
 | 
			
		||||
	unsigned int data_len, size;
 | 
			
		||||
	u32 tx_flags = first->tx_flags;
 | 
			
		||||
	u32 cmd_type = ixgbe_tx_cmd_type(skb, tx_flags);
 | 
			
		||||
	u16 i = tx_ring->next_to_use;
 | 
			
		||||
 | 
			
		||||
	tx_desc = IXGBE_TX_DESC(tx_ring, i);
 | 
			
		||||
 | 
			
		||||
	ixgbe_tx_olinfo_status(tx_desc, tx_flags, paylen);
 | 
			
		||||
	ixgbe_tx_olinfo_status(tx_desc, tx_flags, skb->len - hdr_len);
 | 
			
		||||
 | 
			
		||||
	size = skb_headlen(skb);
 | 
			
		||||
	data_len = skb->data_len;
 | 
			
		||||
 | 
			
		||||
#ifdef IXGBE_FCOE
 | 
			
		||||
	if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
 | 
			
		||||
| 
						 | 
				
			
			@ -6119,16 +6120,19 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
 | 
			
		|||
 | 
			
		||||
#endif
 | 
			
		||||
	dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
 | 
			
		||||
	if (dma_mapping_error(tx_ring->dev, dma))
 | 
			
		||||
		goto dma_error;
 | 
			
		||||
 | 
			
		||||
	/* record length, and DMA address */
 | 
			
		||||
	dma_unmap_len_set(first, len, size);
 | 
			
		||||
	dma_unmap_addr_set(first, dma, dma);
 | 
			
		||||
	tx_buffer = first;
 | 
			
		||||
 | 
			
		||||
	tx_desc->read.buffer_addr = cpu_to_le64(dma);
 | 
			
		||||
	for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
 | 
			
		||||
		if (dma_mapping_error(tx_ring->dev, dma))
 | 
			
		||||
			goto dma_error;
 | 
			
		||||
 | 
			
		||||
		/* record length, and DMA address */
 | 
			
		||||
		dma_unmap_len_set(tx_buffer, len, size);
 | 
			
		||||
		dma_unmap_addr_set(tx_buffer, dma, dma);
 | 
			
		||||
 | 
			
		||||
		tx_desc->read.buffer_addr = cpu_to_le64(dma);
 | 
			
		||||
 | 
			
		||||
	for (;;) {
 | 
			
		||||
		while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) {
 | 
			
		||||
			tx_desc->read.cmd_type_len =
 | 
			
		||||
				cpu_to_le32(cmd_type ^ IXGBE_MAX_DATA_PER_TXD);
 | 
			
		||||
| 
						 | 
				
			
			@ -6139,12 +6143,12 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
 | 
			
		|||
				tx_desc = IXGBE_TX_DESC(tx_ring, 0);
 | 
			
		||||
				i = 0;
 | 
			
		||||
			}
 | 
			
		||||
			tx_desc->read.olinfo_status = 0;
 | 
			
		||||
 | 
			
		||||
			dma += IXGBE_MAX_DATA_PER_TXD;
 | 
			
		||||
			size -= IXGBE_MAX_DATA_PER_TXD;
 | 
			
		||||
 | 
			
		||||
			tx_desc->read.buffer_addr = cpu_to_le64(dma);
 | 
			
		||||
			tx_desc->read.olinfo_status = 0;
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		if (likely(!data_len))
 | 
			
		||||
| 
						 | 
				
			
			@ -6158,6 +6162,7 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
 | 
			
		|||
			tx_desc = IXGBE_TX_DESC(tx_ring, 0);
 | 
			
		||||
			i = 0;
 | 
			
		||||
		}
 | 
			
		||||
		tx_desc->read.olinfo_status = 0;
 | 
			
		||||
 | 
			
		||||
#ifdef IXGBE_FCOE
 | 
			
		||||
		size = min_t(unsigned int, data_len, skb_frag_size(frag));
 | 
			
		||||
| 
						 | 
				
			
			@ -6168,17 +6173,8 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
 | 
			
		|||
 | 
			
		||||
		dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
 | 
			
		||||
				       DMA_TO_DEVICE);
 | 
			
		||||
		if (dma_mapping_error(tx_ring->dev, dma))
 | 
			
		||||
			goto dma_error;
 | 
			
		||||
 | 
			
		||||
		tx_buffer = &tx_ring->tx_buffer_info[i];
 | 
			
		||||
		dma_unmap_len_set(tx_buffer, len, size);
 | 
			
		||||
		dma_unmap_addr_set(tx_buffer, dma, dma);
 | 
			
		||||
 | 
			
		||||
		tx_desc->read.buffer_addr = cpu_to_le64(dma);
 | 
			
		||||
		tx_desc->read.olinfo_status = 0;
 | 
			
		||||
 | 
			
		||||
		frag++;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/* write last descriptor with RS and EOP bits */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue