forked from mirrors/linux
		
	net: inline dev_queue_xmit()
Inline dev_queue_xmit() and dev_queue_xmit_accel(), they both are small proxy functions doing nothing but redirecting the control flow to __dev_queue_xmit(). Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
		
							parent
							
								
									657dd5f97b
								
							
						
					
					
						commit
						c526fd8f9f
					
				
					 2 changed files with 14 additions and 15 deletions
				
			
		|  | @ -2940,10 +2940,20 @@ u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb, | |||
| u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb, | ||||
| 		       struct net_device *sb_dev); | ||||
| 
 | ||||
| int dev_queue_xmit(struct sk_buff *skb); | ||||
| int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev); | ||||
| int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev); | ||||
| int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id); | ||||
| 
 | ||||
| static inline int dev_queue_xmit(struct sk_buff *skb) | ||||
| { | ||||
| 	return __dev_queue_xmit(skb, NULL); | ||||
| } | ||||
| 
 | ||||
| static inline int dev_queue_xmit_accel(struct sk_buff *skb, | ||||
| 				       struct net_device *sb_dev) | ||||
| { | ||||
| 	return __dev_queue_xmit(skb, sb_dev); | ||||
| } | ||||
| 
 | ||||
| static inline int dev_direct_xmit(struct sk_buff *skb, u16 queue_id) | ||||
| { | ||||
| 	int ret; | ||||
|  |  | |||
|  | @ -4111,7 +4111,7 @@ struct netdev_queue *netdev_core_pick_tx(struct net_device *dev, | |||
|  *      the BH enable code must have IRQs enabled so that it will not deadlock. | ||||
|  *          --BLG | ||||
|  */ | ||||
| static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev) | ||||
| int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev) | ||||
| { | ||||
| 	struct net_device *dev = skb->dev; | ||||
| 	struct netdev_queue *txq = NULL; | ||||
|  | @ -4235,18 +4235,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev) | |||
| 	rcu_read_unlock_bh(); | ||||
| 	return rc; | ||||
| } | ||||
| 
 | ||||
| int dev_queue_xmit(struct sk_buff *skb) | ||||
| { | ||||
| 	return __dev_queue_xmit(skb, NULL); | ||||
| } | ||||
| EXPORT_SYMBOL(dev_queue_xmit); | ||||
| 
 | ||||
| int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev) | ||||
| { | ||||
| 	return __dev_queue_xmit(skb, sb_dev); | ||||
| } | ||||
| EXPORT_SYMBOL(dev_queue_xmit_accel); | ||||
| EXPORT_SYMBOL(__dev_queue_xmit); | ||||
| 
 | ||||
| int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id) | ||||
| { | ||||
|  |  | |||
		Loading…
	
		Reference in a new issue
	
	 Pavel Begunkov
						Pavel Begunkov