mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	udp: Increment UDP_MIB_IGNOREDMULTI for arriving unmatched multicasts
As NIC multicast filtering isn't perfect, and some platforms are quite content to spew broadcasts, we should not trigger an event for skb:kfree_skb when we do not have a match for such an incoming datagram. We do though want to avoid sweeping the matter under the rug entirely, so increment a suitable statistic. This incorporates feedback from David L. Stevens, Karl Neiss and Eric Dumazet. V3 - use bool per David Miller Signed-off-by: Rick Jones <rick.jones2@hp.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
		
							parent
							
								
									f46ad73ac6
								
							
						
					
					
						commit
						36cbb2452c
					
				
					 5 changed files with 20 additions and 6 deletions
				
			
		| 
						 | 
				
			
			@ -156,6 +156,7 @@ enum
 | 
			
		|||
	UDP_MIB_RCVBUFERRORS,			/* RcvbufErrors */
 | 
			
		||||
	UDP_MIB_SNDBUFERRORS,			/* SndbufErrors */
 | 
			
		||||
	UDP_MIB_CSUMERRORS,			/* InCsumErrors */
 | 
			
		||||
	UDP_MIB_IGNOREDMULTI,			/* IgnoredMulti */
 | 
			
		||||
	__UDP_MIB_MAX
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -181,6 +181,7 @@ static const struct snmp_mib snmp4_udp_list[] = {
 | 
			
		|||
	SNMP_MIB_ITEM("RcvbufErrors", UDP_MIB_RCVBUFERRORS),
 | 
			
		||||
	SNMP_MIB_ITEM("SndbufErrors", UDP_MIB_SNDBUFERRORS),
 | 
			
		||||
	SNMP_MIB_ITEM("InCsumErrors", UDP_MIB_CSUMERRORS),
 | 
			
		||||
	SNMP_MIB_ITEM("IgnoredMulti", UDP_MIB_IGNOREDMULTI),
 | 
			
		||||
	SNMP_MIB_SENTINEL
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1647,7 +1647,8 @@ static void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
 | 
			
		|||
static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
 | 
			
		||||
				    struct udphdr  *uh,
 | 
			
		||||
				    __be32 saddr, __be32 daddr,
 | 
			
		||||
				    struct udp_table *udptable)
 | 
			
		||||
				    struct udp_table *udptable,
 | 
			
		||||
				    int proto)
 | 
			
		||||
{
 | 
			
		||||
	struct sock *sk, *stack[256 / sizeof(struct sock *)];
 | 
			
		||||
	struct hlist_nulls_node *node;
 | 
			
		||||
| 
						 | 
				
			
			@ -1656,6 +1657,7 @@ static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
 | 
			
		|||
	int dif = skb->dev->ifindex;
 | 
			
		||||
	unsigned int count = 0, offset = offsetof(typeof(*sk), sk_nulls_node);
 | 
			
		||||
	unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10);
 | 
			
		||||
	bool inner_flushed = false;
 | 
			
		||||
 | 
			
		||||
	if (use_hash2) {
 | 
			
		||||
		hash2_any = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum) &
 | 
			
		||||
| 
						 | 
				
			
			@ -1674,6 +1676,7 @@ static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
 | 
			
		|||
					dif, hnum)) {
 | 
			
		||||
			if (unlikely(count == ARRAY_SIZE(stack))) {
 | 
			
		||||
				flush_stack(stack, count, skb, ~0);
 | 
			
		||||
				inner_flushed = true;
 | 
			
		||||
				count = 0;
 | 
			
		||||
			}
 | 
			
		||||
			stack[count++] = sk;
 | 
			
		||||
| 
						 | 
				
			
			@ -1695,7 +1698,10 @@ static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
 | 
			
		|||
	if (count) {
 | 
			
		||||
		flush_stack(stack, count, skb, count - 1);
 | 
			
		||||
	} else {
 | 
			
		||||
		kfree_skb(skb);
 | 
			
		||||
		if (!inner_flushed)
 | 
			
		||||
			UDP_INC_STATS_BH(net, UDP_MIB_IGNOREDMULTI,
 | 
			
		||||
					 proto == IPPROTO_UDPLITE);
 | 
			
		||||
		consume_skb(skb);
 | 
			
		||||
	}
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			@ -1781,7 +1787,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
 | 
			
		|||
 | 
			
		||||
	if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST))
 | 
			
		||||
		return __udp4_lib_mcast_deliver(net, skb, uh,
 | 
			
		||||
				saddr, daddr, udptable);
 | 
			
		||||
						saddr, daddr, udptable, proto);
 | 
			
		||||
 | 
			
		||||
	sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
 | 
			
		||||
	if (sk != NULL) {
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -136,6 +136,7 @@ static const struct snmp_mib snmp6_udp6_list[] = {
 | 
			
		|||
	SNMP_MIB_ITEM("Udp6RcvbufErrors", UDP_MIB_RCVBUFERRORS),
 | 
			
		||||
	SNMP_MIB_ITEM("Udp6SndbufErrors", UDP_MIB_SNDBUFERRORS),
 | 
			
		||||
	SNMP_MIB_ITEM("Udp6InCsumErrors", UDP_MIB_CSUMERRORS),
 | 
			
		||||
	SNMP_MIB_ITEM("Udp6IgnoredMulti", UDP_MIB_IGNOREDMULTI),
 | 
			
		||||
	SNMP_MIB_SENTINEL
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -771,7 +771,7 @@ static void udp6_csum_zero_error(struct sk_buff *skb)
 | 
			
		|||
 */
 | 
			
		||||
static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
 | 
			
		||||
		const struct in6_addr *saddr, const struct in6_addr *daddr,
 | 
			
		||||
		struct udp_table *udptable)
 | 
			
		||||
		struct udp_table *udptable, int proto)
 | 
			
		||||
{
 | 
			
		||||
	struct sock *sk, *stack[256 / sizeof(struct sock *)];
 | 
			
		||||
	const struct udphdr *uh = udp_hdr(skb);
 | 
			
		||||
| 
						 | 
				
			
			@ -781,6 +781,7 @@ static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
 | 
			
		|||
	int dif = inet6_iif(skb);
 | 
			
		||||
	unsigned int count = 0, offset = offsetof(typeof(*sk), sk_nulls_node);
 | 
			
		||||
	unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10);
 | 
			
		||||
	bool inner_flushed = false;
 | 
			
		||||
 | 
			
		||||
	if (use_hash2) {
 | 
			
		||||
		hash2_any = udp6_portaddr_hash(net, &in6addr_any, hnum) &
 | 
			
		||||
| 
						 | 
				
			
			@ -803,6 +804,7 @@ static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
 | 
			
		|||
		    (uh->check || udp_sk(sk)->no_check6_rx)) {
 | 
			
		||||
			if (unlikely(count == ARRAY_SIZE(stack))) {
 | 
			
		||||
				flush_stack(stack, count, skb, ~0);
 | 
			
		||||
				inner_flushed = true;
 | 
			
		||||
				count = 0;
 | 
			
		||||
			}
 | 
			
		||||
			stack[count++] = sk;
 | 
			
		||||
| 
						 | 
				
			
			@ -821,7 +823,10 @@ static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
 | 
			
		|||
	if (count) {
 | 
			
		||||
		flush_stack(stack, count, skb, count - 1);
 | 
			
		||||
	} else {
 | 
			
		||||
		kfree_skb(skb);
 | 
			
		||||
		if (!inner_flushed)
 | 
			
		||||
			UDP_INC_STATS_BH(net, UDP_MIB_IGNOREDMULTI,
 | 
			
		||||
					 proto == IPPROTO_UDPLITE);
 | 
			
		||||
		consume_skb(skb);
 | 
			
		||||
	}
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			@ -873,7 +878,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
 | 
			
		|||
	 */
 | 
			
		||||
	if (ipv6_addr_is_multicast(daddr))
 | 
			
		||||
		return __udp6_lib_mcast_deliver(net, skb,
 | 
			
		||||
				saddr, daddr, udptable);
 | 
			
		||||
				saddr, daddr, udptable, proto);
 | 
			
		||||
 | 
			
		||||
	/* Unicast */
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue