forked from mirrors/linux
		
	net/tc: introduce TC_ACT_REINSERT.
This is similar TC_ACT_REDIRECT, but with a slightly different semantic: - on ingress the mirred skbs are passed to the target device network stack without any additional check not scrubbing. - the rcu-protected stats provided via the tcf_result struct are updated on error conditions. This new tcfa_action value is not exposed to the user-space and can be used only internally by clsact. v1 -> v2: do not touch TC_ACT_REDIRECT code path, introduce a new action type instead v2 -> v3: - rename the new action value TC_ACT_REINJECT, update the helper accordingly - take care of uncloned reinjected packets in XDP generic hook v3 -> v4: - renamed again the new action value (JiriP) v4 -> v5: - fix build error with !NET_CLS_ACT (kbuild bot) Signed-off-by: Paolo Abeni <pabeni@redhat.com> Acked-by: Jiri Pirko <jiri@mellanox.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
		
							parent
							
								
									7fd4b288ea
								
							
						
					
					
						commit
						cd11b16407
					
				
					 3 changed files with 36 additions and 1 deletions
				
			
		|  | @ -7,6 +7,9 @@ | ||||||
| #include <net/sch_generic.h> | #include <net/sch_generic.h> | ||||||
| #include <net/act_api.h> | #include <net/act_api.h> | ||||||
| 
 | 
 | ||||||
|  | /* TC action not accessible from user space */ | ||||||
|  | #define TC_ACT_REINSERT		(TC_ACT_VALUE_MAX + 1) | ||||||
|  | 
 | ||||||
| /* Basic packet classifier frontend definitions. */ | /* Basic packet classifier frontend definitions. */ | ||||||
| 
 | 
 | ||||||
| struct tcf_walker { | struct tcf_walker { | ||||||
|  |  | ||||||
|  | @ -235,6 +235,12 @@ struct tcf_result { | ||||||
| 			u32		classid; | 			u32		classid; | ||||||
| 		}; | 		}; | ||||||
| 		const struct tcf_proto *goto_tp; | 		const struct tcf_proto *goto_tp; | ||||||
|  | 
 | ||||||
|  | 		/* used by the TC_ACT_REINSERT action */ | ||||||
|  | 		struct { | ||||||
|  | 			bool		ingress; | ||||||
|  | 			struct gnet_stats_queue *qstats; | ||||||
|  | 		}; | ||||||
| 	}; | 	}; | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
|  | @ -569,6 +575,15 @@ static inline void skb_reset_tc(struct sk_buff *skb) | ||||||
| #endif | #endif | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | static inline bool skb_is_tc_redirected(const struct sk_buff *skb) | ||||||
|  | { | ||||||
|  | #ifdef CONFIG_NET_CLS_ACT | ||||||
|  | 	return skb->tc_redirected; | ||||||
|  | #else | ||||||
|  | 	return false; | ||||||
|  | #endif | ||||||
|  | } | ||||||
|  | 
 | ||||||
| static inline bool skb_at_tc_ingress(const struct sk_buff *skb) | static inline bool skb_at_tc_ingress(const struct sk_buff *skb) | ||||||
| { | { | ||||||
| #ifdef CONFIG_NET_CLS_ACT | #ifdef CONFIG_NET_CLS_ACT | ||||||
|  | @ -1108,4 +1123,17 @@ void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp, | ||||||
| void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc, | void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc, | ||||||
| 			  struct mini_Qdisc __rcu **p_miniq); | 			  struct mini_Qdisc __rcu **p_miniq); | ||||||
| 
 | 
 | ||||||
|  | static inline void skb_tc_reinsert(struct sk_buff *skb, struct tcf_result *res) | ||||||
|  | { | ||||||
|  | 	struct gnet_stats_queue *stats = res->qstats; | ||||||
|  | 	int ret; | ||||||
|  | 
 | ||||||
|  | 	if (res->ingress) | ||||||
|  | 		ret = netif_receive_skb(skb); | ||||||
|  | 	else | ||||||
|  | 		ret = dev_queue_xmit(skb); | ||||||
|  | 	if (ret && stats) | ||||||
|  | 		qstats_overlimit_inc(res->qstats); | ||||||
|  | } | ||||||
|  | 
 | ||||||
| #endif | #endif | ||||||
|  |  | ||||||
|  | @ -4252,7 +4252,7 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb, | ||||||
| 	/* Reinjected packets coming from act_mirred or similar should
 | 	/* Reinjected packets coming from act_mirred or similar should
 | ||||||
| 	 * not get XDP generic processing. | 	 * not get XDP generic processing. | ||||||
| 	 */ | 	 */ | ||||||
| 	if (skb_cloned(skb)) | 	if (skb_cloned(skb) || skb_is_tc_redirected(skb)) | ||||||
| 		return XDP_PASS; | 		return XDP_PASS; | ||||||
| 
 | 
 | ||||||
| 	/* XDP packets must be linear and must have sufficient headroom
 | 	/* XDP packets must be linear and must have sufficient headroom
 | ||||||
|  | @ -4602,6 +4602,10 @@ sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret, | ||||||
| 		__skb_push(skb, skb->mac_len); | 		__skb_push(skb, skb->mac_len); | ||||||
| 		skb_do_redirect(skb); | 		skb_do_redirect(skb); | ||||||
| 		return NULL; | 		return NULL; | ||||||
|  | 	case TC_ACT_REINSERT: | ||||||
|  | 		/* this does not scrub the packet, and updates stats on error */ | ||||||
|  | 		skb_tc_reinsert(skb, &cl_res); | ||||||
|  | 		return NULL; | ||||||
| 	default: | 	default: | ||||||
| 		break; | 		break; | ||||||
| 	} | 	} | ||||||
|  |  | ||||||
		Loading…
	
		Reference in a new issue
	
	 Paolo Abeni
						Paolo Abeni