forked from mirrors/linux
		
	net: rps: protect last_qtail with rps_input_queue_tail_save() helper
Removing one unnecessary reader protection and add another writer protection to finish the locklessly proctection job. Note: the removed READ_ONCE() is not needed because we only have to protect the locklessly reader in the different context (rps_may_expire_flow()). Signed-off-by: Jason Xing <kernelxing@tencent.com> Reviewed-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
		
							parent
							
								
									00ac0dc347
								
							
						
					
					
						commit
						84b6823cd9
					
				
					 1 changed files with 4 additions and 4 deletions
				
			
		| 
						 | 
					@ -4507,7 +4507,7 @@ set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
 | 
				
			||||||
		struct netdev_rx_queue *rxqueue;
 | 
							struct netdev_rx_queue *rxqueue;
 | 
				
			||||||
		struct rps_dev_flow_table *flow_table;
 | 
							struct rps_dev_flow_table *flow_table;
 | 
				
			||||||
		struct rps_dev_flow *old_rflow;
 | 
							struct rps_dev_flow *old_rflow;
 | 
				
			||||||
		u32 flow_id;
 | 
							u32 flow_id, head;
 | 
				
			||||||
		u16 rxq_index;
 | 
							u16 rxq_index;
 | 
				
			||||||
		int rc;
 | 
							int rc;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -4535,8 +4535,8 @@ set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
 | 
				
			||||||
			old_rflow->filter = RPS_NO_FILTER;
 | 
								old_rflow->filter = RPS_NO_FILTER;
 | 
				
			||||||
	out:
 | 
						out:
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
		rflow->last_qtail =
 | 
							head = READ_ONCE(per_cpu(softnet_data, next_cpu).input_queue_head);
 | 
				
			||||||
			READ_ONCE(per_cpu(softnet_data, next_cpu).input_queue_head);
 | 
							rps_input_queue_tail_save(&rflow->last_qtail, head);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	rflow->cpu = next_cpu;
 | 
						rflow->cpu = next_cpu;
 | 
				
			||||||
| 
						 | 
					@ -4619,7 +4619,7 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
 | 
				
			||||||
		if (unlikely(tcpu != next_cpu) &&
 | 
							if (unlikely(tcpu != next_cpu) &&
 | 
				
			||||||
		    (tcpu >= nr_cpu_ids || !cpu_online(tcpu) ||
 | 
							    (tcpu >= nr_cpu_ids || !cpu_online(tcpu) ||
 | 
				
			||||||
		     ((int)(READ_ONCE(per_cpu(softnet_data, tcpu).input_queue_head) -
 | 
							     ((int)(READ_ONCE(per_cpu(softnet_data, tcpu).input_queue_head) -
 | 
				
			||||||
		      READ_ONCE(rflow->last_qtail))) >= 0)) {
 | 
							      rflow->last_qtail)) >= 0)) {
 | 
				
			||||||
			tcpu = next_cpu;
 | 
								tcpu = next_cpu;
 | 
				
			||||||
			rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
 | 
								rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue