forked from mirrors/linux
		
	net_sched: more precise pkt_len computation
One long standing problem with TSO/GSO/GRO packets is that skb->len
doesn't represent a precise amount of bytes on wire.
Headers are only accounted for the first segment.
For TCP, thats typically 66 bytes per 1448 bytes segment missing,
an error of 4.5 % for normal MSS value.
As consequences :
1) TBF/CBQ/HTB/NETEM/... can send more bytes than the assigned limits.
2) Device stats are slightly under estimated as well.
Fix this by taking account of headers in qdisc_skb_cb(skb)->pkt_len
computation.
Packet schedulers should use qdisc pkt_len instead of skb->len for their
bandwidth limitations, and TSO enabled devices drivers could use pkt_len
if their statistics are not hardware assisted, and if they don't scratch
skb->cb[] first word.
Both egress and ingress paths work, thanks to commit fda55eca5a
(net: introduce skb_transport_header_was_set()) : If GRO built
a GSO packet, it also set the transport header for us.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Jamal Hadi Salim <jhs@mojatatu.com>
Cc: Stephen Hemminger <shemminger@vyatta.com>
Cc: Paolo Valente <paolo.valente@unimore.it>
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Cc: Patrick McHardy <kaber@trash.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
			
			
This commit is contained in:
		
							parent
							
								
									3d55b32370
								
							
						
					
					
						commit
						1def9238d4
					
				
					 1 changed files with 21 additions and 1 deletions
				
			
		| 
						 | 
					@ -2532,6 +2532,26 @@ struct netdev_queue *netdev_pick_tx(struct net_device *dev,
 | 
				
			||||||
	return netdev_get_tx_queue(dev, queue_index);
 | 
						return netdev_get_tx_queue(dev, queue_index);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static void qdisc_pkt_len_init(struct sk_buff *skb)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						const struct skb_shared_info *shinfo = skb_shinfo(skb);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						qdisc_skb_cb(skb)->pkt_len = skb->len;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* To get more precise estimation of bytes sent on wire,
 | 
				
			||||||
 | 
						 * we add to pkt_len the headers size of all segments
 | 
				
			||||||
 | 
						 */
 | 
				
			||||||
 | 
						if (shinfo->gso_size)  {
 | 
				
			||||||
 | 
							unsigned int hdr_len = skb_transport_offset(skb);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
 | 
				
			||||||
 | 
								hdr_len += tcp_hdrlen(skb);
 | 
				
			||||||
 | 
							else
 | 
				
			||||||
 | 
								hdr_len += sizeof(struct udphdr);
 | 
				
			||||||
 | 
							qdisc_skb_cb(skb)->pkt_len += (shinfo->gso_segs - 1) * hdr_len;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
 | 
					static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
 | 
				
			||||||
				 struct net_device *dev,
 | 
									 struct net_device *dev,
 | 
				
			||||||
				 struct netdev_queue *txq)
 | 
									 struct netdev_queue *txq)
 | 
				
			||||||
| 
						 | 
					@ -2540,7 +2560,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
 | 
				
			||||||
	bool contended;
 | 
						bool contended;
 | 
				
			||||||
	int rc;
 | 
						int rc;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	qdisc_skb_cb(skb)->pkt_len = skb->len;
 | 
						qdisc_pkt_len_init(skb);
 | 
				
			||||||
	qdisc_calculate_pkt_len(skb, q);
 | 
						qdisc_calculate_pkt_len(skb, q);
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
	 * Heuristic to force contended enqueues to serialize on a
 | 
						 * Heuristic to force contended enqueues to serialize on a
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue