mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	net/flower: Introduce hardware offload support
This patch is based on a patch made by John Fastabend.
It adds support for offloading cls_flower.
when NETIF_F_HW_TC is on:
  flags = 0       => Rule will be processed twice - by hardware, and if
                     still relevant, by software.
  flags = SKIP_HW => Rull will be processed by software only
If hardware fail/not capabale to apply the rule, operation will NOT
fail. Filter will be processed by SW only.
Acked-by: Jiri Pirko <jiri@mellanox.com>
Suggested-by: John Fastabend <john.r.fastabend@intel.com>
Signed-off-by: Amir Vadai <amir@vadai.me>
Signed-off-by: David S. Miller <davem@davemloft.net>
			
			
This commit is contained in:
		
							parent
							
								
									10f79037b4
								
							
						
					
					
						commit
						5b33f48842
					
				
					 4 changed files with 81 additions and 1 deletions
				
			
		| 
						 | 
				
			
			@ -786,6 +786,7 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
 | 
			
		|||
enum {
 | 
			
		||||
	TC_SETUP_MQPRIO,
 | 
			
		||||
	TC_SETUP_CLSU32,
 | 
			
		||||
	TC_SETUP_CLSFLOWER,
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
struct tc_cls_u32_offload;
 | 
			
		||||
| 
						 | 
				
			
			@ -795,6 +796,7 @@ struct tc_to_netdev {
 | 
			
		|||
	union {
 | 
			
		||||
		u8 tc;
 | 
			
		||||
		struct tc_cls_u32_offload *cls_u32;
 | 
			
		||||
		struct tc_cls_flower_offload *cls_flower;
 | 
			
		||||
	};
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -409,4 +409,18 @@ static inline bool tc_should_offload(struct net_device *dev, u32 flags)
 | 
			
		|||
	return true;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
enum tc_fl_command {
 | 
			
		||||
	TC_CLSFLOWER_REPLACE,
 | 
			
		||||
	TC_CLSFLOWER_DESTROY,
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
struct tc_cls_flower_offload {
 | 
			
		||||
	enum tc_fl_command command;
 | 
			
		||||
	u64 cookie;
 | 
			
		||||
	struct flow_dissector *dissector;
 | 
			
		||||
	struct fl_flow_key *mask;
 | 
			
		||||
	struct fl_flow_key *key;
 | 
			
		||||
	struct tcf_exts *exts;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
#endif
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -417,6 +417,8 @@ enum {
 | 
			
		|||
	TCA_FLOWER_KEY_TCP_DST,		/* be16 */
 | 
			
		||||
	TCA_FLOWER_KEY_UDP_SRC,		/* be16 */
 | 
			
		||||
	TCA_FLOWER_KEY_UDP_DST,		/* be16 */
 | 
			
		||||
 | 
			
		||||
	TCA_FLOWER_FLAGS,
 | 
			
		||||
	__TCA_FLOWER_MAX,
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -165,6 +165,51 @@ static void fl_destroy_filter(struct rcu_head *head)
 | 
			
		|||
	kfree(f);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void fl_hw_destroy_filter(struct tcf_proto *tp, u64 cookie)
 | 
			
		||||
{
 | 
			
		||||
	struct net_device *dev = tp->q->dev_queue->dev;
 | 
			
		||||
	struct tc_cls_flower_offload offload = {0};
 | 
			
		||||
	struct tc_to_netdev tc;
 | 
			
		||||
 | 
			
		||||
	if (!tc_should_offload(dev, 0))
 | 
			
		||||
		return;
 | 
			
		||||
 | 
			
		||||
	offload.command = TC_CLSFLOWER_DESTROY;
 | 
			
		||||
	offload.cookie = cookie;
 | 
			
		||||
 | 
			
		||||
	tc.type = TC_SETUP_CLSFLOWER;
 | 
			
		||||
	tc.cls_flower = &offload;
 | 
			
		||||
 | 
			
		||||
	dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol, &tc);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void fl_hw_replace_filter(struct tcf_proto *tp,
 | 
			
		||||
				 struct flow_dissector *dissector,
 | 
			
		||||
				 struct fl_flow_key *mask,
 | 
			
		||||
				 struct fl_flow_key *key,
 | 
			
		||||
				 struct tcf_exts *actions,
 | 
			
		||||
				 u64 cookie, u32 flags)
 | 
			
		||||
{
 | 
			
		||||
	struct net_device *dev = tp->q->dev_queue->dev;
 | 
			
		||||
	struct tc_cls_flower_offload offload = {0};
 | 
			
		||||
	struct tc_to_netdev tc;
 | 
			
		||||
 | 
			
		||||
	if (!tc_should_offload(dev, flags))
 | 
			
		||||
		return;
 | 
			
		||||
 | 
			
		||||
	offload.command = TC_CLSFLOWER_REPLACE;
 | 
			
		||||
	offload.cookie = cookie;
 | 
			
		||||
	offload.dissector = dissector;
 | 
			
		||||
	offload.mask = mask;
 | 
			
		||||
	offload.key = key;
 | 
			
		||||
	offload.exts = actions;
 | 
			
		||||
 | 
			
		||||
	tc.type = TC_SETUP_CLSFLOWER;
 | 
			
		||||
	tc.cls_flower = &offload;
 | 
			
		||||
 | 
			
		||||
	dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol, &tc);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static bool fl_destroy(struct tcf_proto *tp, bool force)
 | 
			
		||||
{
 | 
			
		||||
	struct cls_fl_head *head = rtnl_dereference(tp->root);
 | 
			
		||||
| 
						 | 
				
			
			@ -174,6 +219,7 @@ static bool fl_destroy(struct tcf_proto *tp, bool force)
 | 
			
		|||
		return false;
 | 
			
		||||
 | 
			
		||||
	list_for_each_entry_safe(f, next, &head->filters, list) {
 | 
			
		||||
		fl_hw_destroy_filter(tp, (u64)f);
 | 
			
		||||
		list_del_rcu(&f->list);
 | 
			
		||||
		call_rcu(&f->rcu, fl_destroy_filter);
 | 
			
		||||
	}
 | 
			
		||||
| 
						 | 
				
			
			@ -459,6 +505,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
 | 
			
		|||
	struct cls_fl_filter *fnew;
 | 
			
		||||
	struct nlattr *tb[TCA_FLOWER_MAX + 1];
 | 
			
		||||
	struct fl_flow_mask mask = {};
 | 
			
		||||
	u32 flags = 0;
 | 
			
		||||
	int err;
 | 
			
		||||
 | 
			
		||||
	if (!tca[TCA_OPTIONS])
 | 
			
		||||
| 
						 | 
				
			
			@ -486,6 +533,9 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
 | 
			
		|||
	}
 | 
			
		||||
	fnew->handle = handle;
 | 
			
		||||
 | 
			
		||||
	if (tb[TCA_FLOWER_FLAGS])
 | 
			
		||||
		flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
 | 
			
		||||
 | 
			
		||||
	err = fl_set_parms(net, tp, fnew, &mask, base, tb, tca[TCA_RATE], ovr);
 | 
			
		||||
	if (err)
 | 
			
		||||
		goto errout;
 | 
			
		||||
| 
						 | 
				
			
			@ -498,9 +548,20 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
 | 
			
		|||
				     head->ht_params);
 | 
			
		||||
	if (err)
 | 
			
		||||
		goto errout;
 | 
			
		||||
	if (fold)
 | 
			
		||||
 | 
			
		||||
	fl_hw_replace_filter(tp,
 | 
			
		||||
			     &head->dissector,
 | 
			
		||||
			     &mask.key,
 | 
			
		||||
			     &fnew->key,
 | 
			
		||||
			     &fnew->exts,
 | 
			
		||||
			     (u64)fnew,
 | 
			
		||||
			     flags);
 | 
			
		||||
 | 
			
		||||
	if (fold) {
 | 
			
		||||
		rhashtable_remove_fast(&head->ht, &fold->ht_node,
 | 
			
		||||
				       head->ht_params);
 | 
			
		||||
		fl_hw_destroy_filter(tp, (u64)fold);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	*arg = (unsigned long) fnew;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -527,6 +588,7 @@ static int fl_delete(struct tcf_proto *tp, unsigned long arg)
 | 
			
		|||
	rhashtable_remove_fast(&head->ht, &f->ht_node,
 | 
			
		||||
			       head->ht_params);
 | 
			
		||||
	list_del_rcu(&f->list);
 | 
			
		||||
	fl_hw_destroy_filter(tp, (u64)f);
 | 
			
		||||
	tcf_unbind_filter(tp, &f->res);
 | 
			
		||||
	call_rcu(&f->rcu, fl_destroy_filter);
 | 
			
		||||
	return 0;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue