forked from mirrors/linux
		
	net/sched: flower: Move filter handle initialization earlier
To support miss to action during hardware offload the filter's handle is needed when setting up the actions (tcf_exts_init()), and before offloading. Move filter handle initialization earlier. Signed-off-by: Paul Blakey <paulb@nvidia.com> Reviewed-by: Jiri Pirko <jiri@nvidia.com> Reviewed-by: Simon Horman <simon.horman@corigine.com> Reviewed-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com> Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
		
							parent
							
								
									80cd22c35c
								
							
						
					
					
						commit
						08a0063df3
					
				
					 1 changed files with 35 additions and 27 deletions
				
			
		| 
						 | 
				
			
			@ -2187,10 +2187,6 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
 | 
			
		|||
	INIT_LIST_HEAD(&fnew->hw_list);
 | 
			
		||||
	refcount_set(&fnew->refcnt, 1);
 | 
			
		||||
 | 
			
		||||
	err = tcf_exts_init(&fnew->exts, net, TCA_FLOWER_ACT, 0);
 | 
			
		||||
	if (err < 0)
 | 
			
		||||
		goto errout;
 | 
			
		||||
 | 
			
		||||
	if (tb[TCA_FLOWER_FLAGS]) {
 | 
			
		||||
		fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -2200,15 +2196,45 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
 | 
			
		|||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (!fold) {
 | 
			
		||||
		spin_lock(&tp->lock);
 | 
			
		||||
		if (!handle) {
 | 
			
		||||
			handle = 1;
 | 
			
		||||
			err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
 | 
			
		||||
					    INT_MAX, GFP_ATOMIC);
 | 
			
		||||
		} else {
 | 
			
		||||
			err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
 | 
			
		||||
					    handle, GFP_ATOMIC);
 | 
			
		||||
 | 
			
		||||
			/* Filter with specified handle was concurrently
 | 
			
		||||
			 * inserted after initial check in cls_api. This is not
 | 
			
		||||
			 * necessarily an error if NLM_F_EXCL is not set in
 | 
			
		||||
			 * message flags. Returning EAGAIN will cause cls_api to
 | 
			
		||||
			 * try to update concurrently inserted rule.
 | 
			
		||||
			 */
 | 
			
		||||
			if (err == -ENOSPC)
 | 
			
		||||
				err = -EAGAIN;
 | 
			
		||||
		}
 | 
			
		||||
		spin_unlock(&tp->lock);
 | 
			
		||||
 | 
			
		||||
		if (err)
 | 
			
		||||
			goto errout;
 | 
			
		||||
	}
 | 
			
		||||
	fnew->handle = handle;
 | 
			
		||||
 | 
			
		||||
	err = tcf_exts_init(&fnew->exts, net, TCA_FLOWER_ACT, 0);
 | 
			
		||||
	if (err < 0)
 | 
			
		||||
		goto errout_idr;
 | 
			
		||||
 | 
			
		||||
	err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE],
 | 
			
		||||
			   tp->chain->tmplt_priv, flags, fnew->flags,
 | 
			
		||||
			   extack);
 | 
			
		||||
	if (err)
 | 
			
		||||
		goto errout;
 | 
			
		||||
		goto errout_idr;
 | 
			
		||||
 | 
			
		||||
	err = fl_check_assign_mask(head, fnew, fold, mask);
 | 
			
		||||
	if (err)
 | 
			
		||||
		goto errout;
 | 
			
		||||
		goto errout_idr;
 | 
			
		||||
 | 
			
		||||
	err = fl_ht_insert_unique(fnew, fold, &in_ht);
 | 
			
		||||
	if (err)
 | 
			
		||||
| 
						 | 
				
			
			@ -2274,29 +2300,9 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
 | 
			
		|||
		refcount_dec(&fold->refcnt);
 | 
			
		||||
		__fl_put(fold);
 | 
			
		||||
	} else {
 | 
			
		||||
		if (handle) {
 | 
			
		||||
			/* user specifies a handle and it doesn't exist */
 | 
			
		||||
			err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
 | 
			
		||||
					    handle, GFP_ATOMIC);
 | 
			
		||||
 | 
			
		||||
			/* Filter with specified handle was concurrently
 | 
			
		||||
			 * inserted after initial check in cls_api. This is not
 | 
			
		||||
			 * necessarily an error if NLM_F_EXCL is not set in
 | 
			
		||||
			 * message flags. Returning EAGAIN will cause cls_api to
 | 
			
		||||
			 * try to update concurrently inserted rule.
 | 
			
		||||
			 */
 | 
			
		||||
			if (err == -ENOSPC)
 | 
			
		||||
				err = -EAGAIN;
 | 
			
		||||
		} else {
 | 
			
		||||
			handle = 1;
 | 
			
		||||
			err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
 | 
			
		||||
					    INT_MAX, GFP_ATOMIC);
 | 
			
		||||
		}
 | 
			
		||||
		if (err)
 | 
			
		||||
			goto errout_hw;
 | 
			
		||||
		idr_replace(&head->handle_idr, fnew, fnew->handle);
 | 
			
		||||
 | 
			
		||||
		refcount_inc(&fnew->refcnt);
 | 
			
		||||
		fnew->handle = handle;
 | 
			
		||||
		list_add_tail_rcu(&fnew->list, &fnew->mask->filters);
 | 
			
		||||
		spin_unlock(&tp->lock);
 | 
			
		||||
	}
 | 
			
		||||
| 
						 | 
				
			
			@ -2319,6 +2325,8 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
 | 
			
		|||
				       fnew->mask->filter_ht_params);
 | 
			
		||||
errout_mask:
 | 
			
		||||
	fl_mask_put(head, fnew->mask);
 | 
			
		||||
errout_idr:
 | 
			
		||||
	idr_remove(&head->handle_idr, fnew->handle);
 | 
			
		||||
errout:
 | 
			
		||||
	__fl_put(fnew);
 | 
			
		||||
errout_tb:
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue