forked from mirrors/linux
		
	net: use indirect call wrappers at GRO network layer
This avoids an indirect calls for L3 GRO receive path, both for ipv4 and ipv6, if the latter is not compiled as a module. Note that when IPv6 is compiled as builtin, it will be checked first, so we have a single additional compare for the more common path. v1 -> v2: - adapted to INDIRECT_CALL_ changes Signed-off-by: Paolo Abeni <pabeni@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
		
							parent
							
								
									283c16a2df
								
							
						
					
					
						commit
						aaa5d90b39
					
				
					 3 changed files with 18 additions and 5 deletions
				
			
		|  | @ -2,6 +2,8 @@ | |||
| #ifndef _INET_COMMON_H | ||||
| #define _INET_COMMON_H | ||||
| 
 | ||||
| #include <linux/indirect_call_wrapper.h> | ||||
| 
 | ||||
| extern const struct proto_ops inet_stream_ops; | ||||
| extern const struct proto_ops inet_dgram_ops; | ||||
| 
 | ||||
|  |  | |||
|  | @ -145,6 +145,7 @@ | |||
| #include <linux/sctp.h> | ||||
| #include <net/udp_tunnel.h> | ||||
| #include <linux/net_namespace.h> | ||||
| #include <linux/indirect_call_wrapper.h> | ||||
| 
 | ||||
| #include "net-sysfs.h" | ||||
| 
 | ||||
|  | @ -5338,6 +5339,8 @@ static void flush_all_backlogs(void) | |||
| 	put_online_cpus(); | ||||
| } | ||||
| 
 | ||||
| INDIRECT_CALLABLE_DECLARE(int inet_gro_complete(struct sk_buff *, int)); | ||||
| INDIRECT_CALLABLE_DECLARE(int ipv6_gro_complete(struct sk_buff *, int)); | ||||
| static int napi_gro_complete(struct sk_buff *skb) | ||||
| { | ||||
| 	struct packet_offload *ptype; | ||||
|  | @ -5357,7 +5360,9 @@ static int napi_gro_complete(struct sk_buff *skb) | |||
| 		if (ptype->type != type || !ptype->callbacks.gro_complete) | ||||
| 			continue; | ||||
| 
 | ||||
| 		err = ptype->callbacks.gro_complete(skb, 0); | ||||
| 		err = INDIRECT_CALL_INET(ptype->callbacks.gro_complete, | ||||
| 					 ipv6_gro_complete, inet_gro_complete, | ||||
| 					 skb, 0); | ||||
| 		break; | ||||
| 	} | ||||
| 	rcu_read_unlock(); | ||||
|  | @ -5504,6 +5509,10 @@ static void gro_flush_oldest(struct list_head *head) | |||
| 	napi_gro_complete(oldest); | ||||
| } | ||||
| 
 | ||||
| INDIRECT_CALLABLE_DECLARE(struct sk_buff *inet_gro_receive(struct list_head *, | ||||
| 							   struct sk_buff *)); | ||||
| INDIRECT_CALLABLE_DECLARE(struct sk_buff *ipv6_gro_receive(struct list_head *, | ||||
| 							   struct sk_buff *)); | ||||
| static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) | ||||
| { | ||||
| 	u32 hash = skb_get_hash_raw(skb) & (GRO_HASH_BUCKETS - 1); | ||||
|  | @ -5553,7 +5562,9 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff | |||
| 			NAPI_GRO_CB(skb)->csum_valid = 0; | ||||
| 		} | ||||
| 
 | ||||
| 		pp = ptype->callbacks.gro_receive(gro_head, skb); | ||||
| 		pp = INDIRECT_CALL_INET(ptype->callbacks.gro_receive, | ||||
| 					ipv6_gro_receive, inet_gro_receive, | ||||
| 					gro_head, skb); | ||||
| 		break; | ||||
| 	} | ||||
| 	rcu_read_unlock(); | ||||
|  |  | |||
|  | @ -164,7 +164,7 @@ static int ipv6_exthdrs_len(struct ipv6hdr *iph, | |||
| 	return len; | ||||
| } | ||||
| 
 | ||||
| static struct sk_buff *ipv6_gro_receive(struct list_head *head, | ||||
| INDIRECT_CALLABLE_SCOPE struct sk_buff *ipv6_gro_receive(struct list_head *head, | ||||
| 							 struct sk_buff *skb) | ||||
| { | ||||
| 	const struct net_offload *ops; | ||||
|  | @ -301,7 +301,7 @@ static struct sk_buff *ip4ip6_gro_receive(struct list_head *head, | |||
| 	return inet_gro_receive(head, skb); | ||||
| } | ||||
| 
 | ||||
| static int ipv6_gro_complete(struct sk_buff *skb, int nhoff) | ||||
| INDIRECT_CALLABLE_SCOPE int ipv6_gro_complete(struct sk_buff *skb, int nhoff) | ||||
| { | ||||
| 	const struct net_offload *ops; | ||||
| 	struct ipv6hdr *iph = (struct ipv6hdr *)(skb->data + nhoff); | ||||
|  |  | |||
		Loading…
	
		Reference in a new issue
	
	 Paolo Abeni
						Paolo Abeni