forked from mirrors/linux
		
	net/sched: Add match-all classifier hw offloading.
Following the work that have been done on offloading classifiers like u32 and flower, now the match-all classifier hw offloading is possible. if the interface supports tc offloading. To control the offloading, two tc flags have been introduced: skip_sw and skip_hw. Typical usage: tc filter add dev eth25 parent ffff: \ matchall skip_sw \ action mirred egress mirror \ dev eth27 Signed-off-by: Yotam Gigi <yotamg@mellanox.com> Signed-off-by: Jiri Pirko <jiri@mellanox.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
		
							parent
							
								
									bf3994d2ed
								
							
						
					
					
						commit
						b87f7936a9
					
				
					 4 changed files with 87 additions and 3 deletions
				
			
		| 
						 | 
					@ -787,6 +787,7 @@ enum {
 | 
				
			||||||
	TC_SETUP_MQPRIO,
 | 
						TC_SETUP_MQPRIO,
 | 
				
			||||||
	TC_SETUP_CLSU32,
 | 
						TC_SETUP_CLSU32,
 | 
				
			||||||
	TC_SETUP_CLSFLOWER,
 | 
						TC_SETUP_CLSFLOWER,
 | 
				
			||||||
 | 
						TC_SETUP_MATCHALL,
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
struct tc_cls_u32_offload;
 | 
					struct tc_cls_u32_offload;
 | 
				
			||||||
| 
						 | 
					@ -797,6 +798,7 @@ struct tc_to_netdev {
 | 
				
			||||||
		u8 tc;
 | 
							u8 tc;
 | 
				
			||||||
		struct tc_cls_u32_offload *cls_u32;
 | 
							struct tc_cls_u32_offload *cls_u32;
 | 
				
			||||||
		struct tc_cls_flower_offload *cls_flower;
 | 
							struct tc_cls_flower_offload *cls_flower;
 | 
				
			||||||
 | 
							struct tc_cls_matchall_offload *cls_mall;
 | 
				
			||||||
	};
 | 
						};
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -442,4 +442,15 @@ struct tc_cls_flower_offload {
 | 
				
			||||||
	struct tcf_exts *exts;
 | 
						struct tcf_exts *exts;
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					enum tc_matchall_command {
 | 
				
			||||||
 | 
						TC_CLSMATCHALL_REPLACE,
 | 
				
			||||||
 | 
						TC_CLSMATCHALL_DESTROY,
 | 
				
			||||||
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					struct tc_cls_matchall_offload {
 | 
				
			||||||
 | 
						enum tc_matchall_command command;
 | 
				
			||||||
 | 
						struct tcf_exts *exts;
 | 
				
			||||||
 | 
						unsigned long cookie;
 | 
				
			||||||
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -439,6 +439,7 @@ enum {
 | 
				
			||||||
	TCA_MATCHALL_UNSPEC,
 | 
						TCA_MATCHALL_UNSPEC,
 | 
				
			||||||
	TCA_MATCHALL_CLASSID,
 | 
						TCA_MATCHALL_CLASSID,
 | 
				
			||||||
	TCA_MATCHALL_ACT,
 | 
						TCA_MATCHALL_ACT,
 | 
				
			||||||
 | 
						TCA_MATCHALL_FLAGS,
 | 
				
			||||||
	__TCA_MATCHALL_MAX,
 | 
						__TCA_MATCHALL_MAX,
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -21,6 +21,7 @@ struct cls_mall_filter {
 | 
				
			||||||
	struct tcf_result res;
 | 
						struct tcf_result res;
 | 
				
			||||||
	u32 handle;
 | 
						u32 handle;
 | 
				
			||||||
	struct rcu_head	rcu;
 | 
						struct rcu_head	rcu;
 | 
				
			||||||
 | 
						u32 flags;
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
struct cls_mall_head {
 | 
					struct cls_mall_head {
 | 
				
			||||||
| 
						 | 
					@ -34,6 +35,9 @@ static int mall_classify(struct sk_buff *skb, const struct tcf_proto *tp,
 | 
				
			||||||
	struct cls_mall_head *head = rcu_dereference_bh(tp->root);
 | 
						struct cls_mall_head *head = rcu_dereference_bh(tp->root);
 | 
				
			||||||
	struct cls_mall_filter *f = head->filter;
 | 
						struct cls_mall_filter *f = head->filter;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (tc_skip_sw(f->flags))
 | 
				
			||||||
 | 
							return -1;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return tcf_exts_exec(skb, &f->exts, res);
 | 
						return tcf_exts_exec(skb, &f->exts, res);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -55,18 +59,61 @@ static void mall_destroy_filter(struct rcu_head *head)
 | 
				
			||||||
	struct cls_mall_filter *f = container_of(head, struct cls_mall_filter, rcu);
 | 
						struct cls_mall_filter *f = container_of(head, struct cls_mall_filter, rcu);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	tcf_exts_destroy(&f->exts);
 | 
						tcf_exts_destroy(&f->exts);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	kfree(f);
 | 
						kfree(f);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static int mall_replace_hw_filter(struct tcf_proto *tp,
 | 
				
			||||||
 | 
									  struct cls_mall_filter *f,
 | 
				
			||||||
 | 
									  unsigned long cookie)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						struct net_device *dev = tp->q->dev_queue->dev;
 | 
				
			||||||
 | 
						struct tc_to_netdev offload;
 | 
				
			||||||
 | 
						struct tc_cls_matchall_offload mall_offload = {0};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						offload.type = TC_SETUP_MATCHALL;
 | 
				
			||||||
 | 
						offload.cls_mall = &mall_offload;
 | 
				
			||||||
 | 
						offload.cls_mall->command = TC_CLSMATCHALL_REPLACE;
 | 
				
			||||||
 | 
						offload.cls_mall->exts = &f->exts;
 | 
				
			||||||
 | 
						offload.cls_mall->cookie = cookie;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol,
 | 
				
			||||||
 | 
										     &offload);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static void mall_destroy_hw_filter(struct tcf_proto *tp,
 | 
				
			||||||
 | 
									   struct cls_mall_filter *f,
 | 
				
			||||||
 | 
									   unsigned long cookie)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						struct net_device *dev = tp->q->dev_queue->dev;
 | 
				
			||||||
 | 
						struct tc_to_netdev offload;
 | 
				
			||||||
 | 
						struct tc_cls_matchall_offload mall_offload = {0};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						offload.type = TC_SETUP_MATCHALL;
 | 
				
			||||||
 | 
						offload.cls_mall = &mall_offload;
 | 
				
			||||||
 | 
						offload.cls_mall->command = TC_CLSMATCHALL_DESTROY;
 | 
				
			||||||
 | 
						offload.cls_mall->exts = NULL;
 | 
				
			||||||
 | 
						offload.cls_mall->cookie = cookie;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol,
 | 
				
			||||||
 | 
										     &offload);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static bool mall_destroy(struct tcf_proto *tp, bool force)
 | 
					static bool mall_destroy(struct tcf_proto *tp, bool force)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct cls_mall_head *head = rtnl_dereference(tp->root);
 | 
						struct cls_mall_head *head = rtnl_dereference(tp->root);
 | 
				
			||||||
 | 
						struct net_device *dev = tp->q->dev_queue->dev;
 | 
				
			||||||
 | 
						struct cls_mall_filter *f = head->filter;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!force && head->filter)
 | 
						if (!force && f)
 | 
				
			||||||
		return false;
 | 
							return false;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (head->filter)
 | 
						if (f) {
 | 
				
			||||||
		call_rcu(&head->filter->rcu, mall_destroy_filter);
 | 
							if (tc_should_offload(dev, tp, f->flags))
 | 
				
			||||||
 | 
								mall_destroy_hw_filter(tp, f, (unsigned long) f);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							call_rcu(&f->rcu, mall_destroy_filter);
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
	RCU_INIT_POINTER(tp->root, NULL);
 | 
						RCU_INIT_POINTER(tp->root, NULL);
 | 
				
			||||||
	kfree_rcu(head, rcu);
 | 
						kfree_rcu(head, rcu);
 | 
				
			||||||
	return true;
 | 
						return true;
 | 
				
			||||||
| 
						 | 
					@ -117,8 +164,10 @@ static int mall_change(struct net *net, struct sk_buff *in_skb,
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct cls_mall_head *head = rtnl_dereference(tp->root);
 | 
						struct cls_mall_head *head = rtnl_dereference(tp->root);
 | 
				
			||||||
	struct cls_mall_filter *fold = (struct cls_mall_filter *) *arg;
 | 
						struct cls_mall_filter *fold = (struct cls_mall_filter *) *arg;
 | 
				
			||||||
 | 
						struct net_device *dev = tp->q->dev_queue->dev;
 | 
				
			||||||
	struct cls_mall_filter *f;
 | 
						struct cls_mall_filter *f;
 | 
				
			||||||
	struct nlattr *tb[TCA_MATCHALL_MAX + 1];
 | 
						struct nlattr *tb[TCA_MATCHALL_MAX + 1];
 | 
				
			||||||
 | 
						u32 flags = 0;
 | 
				
			||||||
	int err;
 | 
						int err;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!tca[TCA_OPTIONS])
 | 
						if (!tca[TCA_OPTIONS])
 | 
				
			||||||
| 
						 | 
					@ -135,6 +184,12 @@ static int mall_change(struct net *net, struct sk_buff *in_skb,
 | 
				
			||||||
	if (err < 0)
 | 
						if (err < 0)
 | 
				
			||||||
		return err;
 | 
							return err;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (tb[TCA_MATCHALL_FLAGS]) {
 | 
				
			||||||
 | 
							flags = nla_get_u32(tb[TCA_MATCHALL_FLAGS]);
 | 
				
			||||||
 | 
							if (!tc_flags_valid(flags))
 | 
				
			||||||
 | 
								return -EINVAL;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	f = kzalloc(sizeof(*f), GFP_KERNEL);
 | 
						f = kzalloc(sizeof(*f), GFP_KERNEL);
 | 
				
			||||||
	if (!f)
 | 
						if (!f)
 | 
				
			||||||
		return -ENOBUFS;
 | 
							return -ENOBUFS;
 | 
				
			||||||
| 
						 | 
					@ -144,11 +199,22 @@ static int mall_change(struct net *net, struct sk_buff *in_skb,
 | 
				
			||||||
	if (!handle)
 | 
						if (!handle)
 | 
				
			||||||
		handle = 1;
 | 
							handle = 1;
 | 
				
			||||||
	f->handle = handle;
 | 
						f->handle = handle;
 | 
				
			||||||
 | 
						f->flags = flags;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	err = mall_set_parms(net, tp, f, base, tb, tca[TCA_RATE], ovr);
 | 
						err = mall_set_parms(net, tp, f, base, tb, tca[TCA_RATE], ovr);
 | 
				
			||||||
	if (err)
 | 
						if (err)
 | 
				
			||||||
		goto errout;
 | 
							goto errout;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (tc_should_offload(dev, tp, flags)) {
 | 
				
			||||||
 | 
							err = mall_replace_hw_filter(tp, f, (unsigned long) f);
 | 
				
			||||||
 | 
							if (err) {
 | 
				
			||||||
 | 
								if (tc_skip_sw(flags))
 | 
				
			||||||
 | 
									goto errout;
 | 
				
			||||||
 | 
								else
 | 
				
			||||||
 | 
									err = 0;
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	*arg = (unsigned long) f;
 | 
						*arg = (unsigned long) f;
 | 
				
			||||||
	rcu_assign_pointer(head->filter, f);
 | 
						rcu_assign_pointer(head->filter, f);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -163,6 +229,10 @@ static int mall_delete(struct tcf_proto *tp, unsigned long arg)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct cls_mall_head *head = rtnl_dereference(tp->root);
 | 
						struct cls_mall_head *head = rtnl_dereference(tp->root);
 | 
				
			||||||
	struct cls_mall_filter *f = (struct cls_mall_filter *) arg;
 | 
						struct cls_mall_filter *f = (struct cls_mall_filter *) arg;
 | 
				
			||||||
 | 
						struct net_device *dev = tp->q->dev_queue->dev;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (tc_should_offload(dev, tp, f->flags))
 | 
				
			||||||
 | 
							mall_destroy_hw_filter(tp, f, (unsigned long) f);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	RCU_INIT_POINTER(head->filter, NULL);
 | 
						RCU_INIT_POINTER(head->filter, NULL);
 | 
				
			||||||
	tcf_unbind_filter(tp, &f->res);
 | 
						tcf_unbind_filter(tp, &f->res);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue