forked from mirrors/linux
		
	net: netif_rx() must disable preemption
Eric Paris reported netif_rx() is calling smp_processor_id() from preemptible context, in particular when caller is ip_dev_loopback_xmit(). RPS commit added this smp_processor_id() call, this patch makes sure preemption is disabled. rps_get_cpus() wants rcu_read_lock() anyway, we can dot it a bit earlier. Reported-by: Eric Paris <eparis@redhat.com> Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
		
							parent
							
								
									fea0691526
								
							
						
					
					
						commit
						b0e28f1eff
					
				
					 1 changed files with 16 additions and 11 deletions
				
			
		|  | @ -2206,6 +2206,7 @@ DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, }; | ||||||
| /*
 | /*
 | ||||||
|  * get_rps_cpu is called from netif_receive_skb and returns the target |  * get_rps_cpu is called from netif_receive_skb and returns the target | ||||||
|  * CPU from the RPS map of the receiving queue for a given skb. |  * CPU from the RPS map of the receiving queue for a given skb. | ||||||
|  |  * rcu_read_lock must be held on entry. | ||||||
|  */ |  */ | ||||||
| static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb) | static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb) | ||||||
| { | { | ||||||
|  | @ -2217,8 +2218,6 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb) | ||||||
| 	u8 ip_proto; | 	u8 ip_proto; | ||||||
| 	u32 addr1, addr2, ports, ihl; | 	u32 addr1, addr2, ports, ihl; | ||||||
| 
 | 
 | ||||||
| 	rcu_read_lock(); |  | ||||||
| 
 |  | ||||||
| 	if (skb_rx_queue_recorded(skb)) { | 	if (skb_rx_queue_recorded(skb)) { | ||||||
| 		u16 index = skb_get_rx_queue(skb); | 		u16 index = skb_get_rx_queue(skb); | ||||||
| 		if (unlikely(index >= dev->num_rx_queues)) { | 		if (unlikely(index >= dev->num_rx_queues)) { | ||||||
|  | @ -2296,7 +2295,6 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| done: | done: | ||||||
| 	rcu_read_unlock(); |  | ||||||
| 	return cpu; | 	return cpu; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | @ -2392,7 +2390,7 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu) | ||||||
| 
 | 
 | ||||||
| int netif_rx(struct sk_buff *skb) | int netif_rx(struct sk_buff *skb) | ||||||
| { | { | ||||||
| 	int cpu; | 	int ret; | ||||||
| 
 | 
 | ||||||
| 	/* if netpoll wants it, pretend we never saw it */ | 	/* if netpoll wants it, pretend we never saw it */ | ||||||
| 	if (netpoll_rx(skb)) | 	if (netpoll_rx(skb)) | ||||||
|  | @ -2402,14 +2400,21 @@ int netif_rx(struct sk_buff *skb) | ||||||
| 		net_timestamp(skb); | 		net_timestamp(skb); | ||||||
| 
 | 
 | ||||||
| #ifdef CONFIG_RPS | #ifdef CONFIG_RPS | ||||||
| 	cpu = get_rps_cpu(skb->dev, skb); | 	{ | ||||||
| 	if (cpu < 0) | 		int cpu; | ||||||
| 		cpu = smp_processor_id(); |  | ||||||
| #else |  | ||||||
| 	cpu = smp_processor_id(); |  | ||||||
| #endif |  | ||||||
| 
 | 
 | ||||||
| 	return enqueue_to_backlog(skb, cpu); | 		rcu_read_lock(); | ||||||
|  | 		cpu = get_rps_cpu(skb->dev, skb); | ||||||
|  | 		if (cpu < 0) | ||||||
|  | 			cpu = smp_processor_id(); | ||||||
|  | 		ret = enqueue_to_backlog(skb, cpu); | ||||||
|  | 		rcu_read_unlock(); | ||||||
|  | 	} | ||||||
|  | #else | ||||||
|  | 	ret = enqueue_to_backlog(skb, get_cpu()); | ||||||
|  | 	put_cpu(); | ||||||
|  | #endif | ||||||
|  | 	return ret; | ||||||
| } | } | ||||||
| EXPORT_SYMBOL(netif_rx); | EXPORT_SYMBOL(netif_rx); | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
		Loading…
	
		Reference in a new issue
	
	 Eric Dumazet
						Eric Dumazet