forked from mirrors/linux
		
	net: provide generic busy polling to all NAPI drivers
NAPI drivers no longer need to observe a particular protocol to benefit from busy polling (CONFIG_NET_RX_BUSY_POLL=y) napi_hash_add() and napi_hash_del() are automatically called from core networking stack, respectively from netif_napi_add() and netif_napi_del() This patch depends on free_netdev() and netif_napi_del() being called from process context, which seems to be the norm. Drivers might still prefer to call napi_hash_del() on their own, since they might combine all the rcu grace periods into a single one, knowing their NAPI structures lifetime, while core networking stack has no idea of a possible combining. Once this patch proves to not bring serious regressions, we will cleanup drivers to either remove napi_hash_del() or provide appropriate rcu grace periods combining. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
		
							parent
							
								
									34cbe27e81
								
							
						
					
					
						commit
						93d05d4a32
					
				
					 14 changed files with 16 additions and 20 deletions
				
			
		|  | @ -46,7 +46,6 @@ static void bnx2x_add_all_napi_cnic(struct bnx2x *bp) | |||
| 	for_each_rx_queue_cnic(bp, i) { | ||||
| 		netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), | ||||
| 			       bnx2x_poll, NAPI_POLL_WEIGHT); | ||||
| 		napi_hash_add(&bnx2x_fp(bp, i, napi)); | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
|  | @ -58,7 +57,6 @@ static void bnx2x_add_all_napi(struct bnx2x *bp) | |||
| 	for_each_eth_queue(bp, i) { | ||||
| 		netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), | ||||
| 			       bnx2x_poll, NAPI_POLL_WEIGHT); | ||||
| 		napi_hash_add(&bnx2x_fp(bp, i, napi)); | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
|  |  | |||
|  | @ -4227,12 +4227,10 @@ static void bnxt_init_napi(struct bnxt *bp) | |||
| 			bnapi = bp->bnapi[i]; | ||||
| 			netif_napi_add(bp->dev, &bnapi->napi, | ||||
| 				       bnxt_poll, 64); | ||||
| 			napi_hash_add(&bnapi->napi); | ||||
| 		} | ||||
| 	} else { | ||||
| 		bnapi = bp->bnapi[0]; | ||||
| 		netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64); | ||||
| 		napi_hash_add(&bnapi->napi); | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
|  |  | |||
|  | @ -2527,7 +2527,6 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, | |||
| 		goto err; | ||||
| 
 | ||||
| 	netif_napi_add(dev, &iq->napi, napi_rx_handler, 64); | ||||
| 	napi_hash_add(&iq->napi); | ||||
| 	iq->cur_desc = iq->desc; | ||||
| 	iq->cidx = 0; | ||||
| 	iq->gen = 1; | ||||
|  |  | |||
|  | @ -2458,13 +2458,11 @@ static int enic_dev_init(struct enic *enic) | |||
| 	switch (vnic_dev_get_intr_mode(enic->vdev)) { | ||||
| 	default: | ||||
| 		netif_napi_add(netdev, &enic->napi[0], enic_poll, 64); | ||||
| 		napi_hash_add(&enic->napi[0]); | ||||
| 		break; | ||||
| 	case VNIC_DEV_INTR_MODE_MSIX: | ||||
| 		for (i = 0; i < enic->rq_count; i++) { | ||||
| 			netif_napi_add(netdev, &enic->napi[i], | ||||
| 				enic_poll_msix_rq, NAPI_POLL_WEIGHT); | ||||
| 			napi_hash_add(&enic->napi[i]); | ||||
| 		} | ||||
| 		for (i = 0; i < enic->wq_count; i++) | ||||
| 			netif_napi_add(netdev, &enic->napi[enic_cq_wq(enic, i)], | ||||
|  |  | |||
|  | @ -2630,7 +2630,6 @@ static int be_evt_queues_create(struct be_adapter *adapter) | |||
| 				eqo->affinity_mask); | ||||
| 		netif_napi_add(adapter->netdev, &eqo->napi, be_poll, | ||||
| 			       BE_NAPI_WEIGHT); | ||||
| 		napi_hash_add(&eqo->napi); | ||||
| 	} | ||||
| 	return 0; | ||||
| } | ||||
|  |  | |||
|  | @ -844,7 +844,6 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, | |||
| 	/* initialize NAPI */ | ||||
| 	netif_napi_add(adapter->netdev, &q_vector->napi, | ||||
| 		       ixgbe_poll, 64); | ||||
| 	napi_hash_add(&q_vector->napi); | ||||
| 
 | ||||
| #ifdef CONFIG_NET_RX_BUSY_POLL | ||||
| 	/* initialize busy poll */ | ||||
|  |  | |||
|  | @ -2483,9 +2483,6 @@ static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter) | |||
| 		q_vector->v_idx = q_idx; | ||||
| 		netif_napi_add(adapter->netdev, &q_vector->napi, | ||||
| 			       ixgbevf_poll, 64); | ||||
| #ifdef CONFIG_NET_RX_BUSY_POLL | ||||
| 		napi_hash_add(&q_vector->napi); | ||||
| #endif | ||||
| 		adapter->q_vector[q_idx] = q_vector; | ||||
| 	} | ||||
| 
 | ||||
|  |  | |||
|  | @ -155,13 +155,11 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, | |||
| 	cq->mcq.comp  = cq->is_tx ? mlx4_en_tx_irq : mlx4_en_rx_irq; | ||||
| 	cq->mcq.event = mlx4_en_cq_event; | ||||
| 
 | ||||
| 	if (cq->is_tx) { | ||||
| 	if (cq->is_tx) | ||||
| 		netif_tx_napi_add(cq->dev, &cq->napi, mlx4_en_poll_tx_cq, | ||||
| 				  NAPI_POLL_WEIGHT); | ||||
| 	} else { | ||||
| 	else | ||||
| 		netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64); | ||||
| 		napi_hash_add(&cq->napi); | ||||
| 	} | ||||
| 
 | ||||
| 	napi_enable(&cq->napi); | ||||
| 
 | ||||
|  |  | |||
|  | @ -982,7 +982,6 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, | |||
| 	mlx5e_build_channeltc_to_txq_map(priv, ix); | ||||
| 
 | ||||
| 	netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64); | ||||
| 	napi_hash_add(&c->napi); | ||||
| 
 | ||||
| 	err = mlx5e_open_tx_cqs(c, cparam); | ||||
| 	if (err) | ||||
|  |  | |||
|  | @ -3814,7 +3814,6 @@ static int myri10ge_alloc_slices(struct myri10ge_priv *mgp) | |||
| 		ss->dev = mgp->dev; | ||||
| 		netif_napi_add(ss->dev, &ss->napi, myri10ge_poll, | ||||
| 			       myri10ge_napi_weight); | ||||
| 		napi_hash_add(&ss->napi); | ||||
| 	} | ||||
| 	return 0; | ||||
| abort: | ||||
|  |  | |||
|  | @ -2059,7 +2059,6 @@ static void efx_init_napi_channel(struct efx_channel *channel) | |||
| 	channel->napi_dev = efx->net_dev; | ||||
| 	netif_napi_add(channel->napi_dev, &channel->napi_str, | ||||
| 		       efx_poll, napi_weight); | ||||
| 	napi_hash_add(&channel->napi_str); | ||||
| 	efx_channel_busy_poll_init(channel); | ||||
| } | ||||
| 
 | ||||
|  |  | |||
|  | @ -1610,7 +1610,6 @@ static int virtnet_alloc_queues(struct virtnet_info *vi) | |||
| 		vi->rq[i].pages = NULL; | ||||
| 		netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll, | ||||
| 			       napi_weight); | ||||
| 		napi_hash_add(&vi->rq[i].napi); | ||||
| 
 | ||||
| 		sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg)); | ||||
| 		ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len); | ||||
|  |  | |||
|  | @ -466,6 +466,9 @@ static inline void napi_complete(struct napi_struct *n) | |||
|  *	@napi: napi context | ||||
|  * | ||||
|  * generate a new napi_id and store a @napi under it in napi_hash | ||||
|  * Used for busy polling (CONFIG_NET_RX_BUSY_POLL) | ||||
|  * Note: This is normally automatically done from netif_napi_add(), | ||||
|  * so might disappear in a future linux version. | ||||
|  */ | ||||
| void napi_hash_add(struct napi_struct *napi); | ||||
| 
 | ||||
|  | @ -476,6 +479,10 @@ void napi_hash_add(struct napi_struct *napi); | |||
|  * Warning: caller must observe rcu grace period | ||||
|  * before freeing memory containing @napi, if | ||||
|  * this function returns true. | ||||
|  * Note: core networking stack automatically calls it | ||||
|  * from netif_napi_del() | ||||
|  * Drivers might want to call this helper to combine all | ||||
|  * the needed rcu grace periods into a single one. | ||||
|  */ | ||||
| bool napi_hash_del(struct napi_struct *napi); | ||||
| 
 | ||||
|  |  | |||
|  | @ -4807,6 +4807,7 @@ void netif_napi_add(struct net_device *dev, struct napi_struct *napi, | |||
| 	napi->poll_owner = -1; | ||||
| #endif | ||||
| 	set_bit(NAPI_STATE_SCHED, &napi->state); | ||||
| 	napi_hash_add(napi); | ||||
| } | ||||
| EXPORT_SYMBOL(netif_napi_add); | ||||
| 
 | ||||
|  | @ -4826,8 +4827,12 @@ void napi_disable(struct napi_struct *n) | |||
| } | ||||
| EXPORT_SYMBOL(napi_disable); | ||||
| 
 | ||||
| /* Must be called in process context */ | ||||
| void netif_napi_del(struct napi_struct *napi) | ||||
| { | ||||
| 	might_sleep(); | ||||
| 	if (napi_hash_del(napi)) | ||||
| 		synchronize_net(); | ||||
| 	list_del_init(&napi->dev_list); | ||||
| 	napi_free_frags(napi); | ||||
| 
 | ||||
|  | @ -7227,11 +7232,13 @@ EXPORT_SYMBOL(alloc_netdev_mqs); | |||
|  *	This function does the last stage of destroying an allocated device | ||||
|  * 	interface. The reference to the device object is released. | ||||
|  *	If this is the last reference then it will be freed. | ||||
|  *	Must be called in process context. | ||||
|  */ | ||||
| void free_netdev(struct net_device *dev) | ||||
| { | ||||
| 	struct napi_struct *p, *n; | ||||
| 
 | ||||
| 	might_sleep(); | ||||
| 	netif_free_tx_queues(dev); | ||||
| #ifdef CONFIG_SYSFS | ||||
| 	kvfree(dev->_rx); | ||||
|  |  | |||
		Loading…
	
		Reference in a new issue
	
	 Eric Dumazet
						Eric Dumazet