mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	ip: Router Alert RCU conversion
Straightforward conversion to RCU. One rwlock becomes a spinlock, and is static. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
		
							parent
							
								
									8b37ef0a1f
								
							
						
					
					
						commit
						66018506e1
					
				
					 3 changed files with 18 additions and 18 deletions
				
			
		| 
						 | 
				
			
			@ -62,10 +62,10 @@ struct ip_ra_chain {
 | 
			
		|||
	struct ip_ra_chain	*next;
 | 
			
		||||
	struct sock		*sk;
 | 
			
		||||
	void			(*destructor)(struct sock *);
 | 
			
		||||
	struct rcu_head		rcu;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
extern struct ip_ra_chain *ip_ra_chain;
 | 
			
		||||
extern rwlock_t ip_ra_lock;
 | 
			
		||||
 | 
			
		||||
/* IP flags. */
 | 
			
		||||
#define IP_CE		0x8000		/* Flag: "Congestion"		*/
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -146,7 +146,7 @@
 | 
			
		|||
#include <linux/netlink.h>
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 *	Process Router Attention IP option
 | 
			
		||||
 *	Process Router Attention IP option (RFC 2113)
 | 
			
		||||
 */
 | 
			
		||||
int ip_call_ra_chain(struct sk_buff *skb)
 | 
			
		||||
{
 | 
			
		||||
| 
						 | 
				
			
			@ -155,8 +155,7 @@ int ip_call_ra_chain(struct sk_buff *skb)
 | 
			
		|||
	struct sock *last = NULL;
 | 
			
		||||
	struct net_device *dev = skb->dev;
 | 
			
		||||
 | 
			
		||||
	read_lock(&ip_ra_lock);
 | 
			
		||||
	for (ra = ip_ra_chain; ra; ra = ra->next) {
 | 
			
		||||
	for (ra = rcu_dereference(ip_ra_chain); ra; ra = rcu_dereference(ra->next)) {
 | 
			
		||||
		struct sock *sk = ra->sk;
 | 
			
		||||
 | 
			
		||||
		/* If socket is bound to an interface, only report
 | 
			
		||||
| 
						 | 
				
			
			@ -167,10 +166,8 @@ int ip_call_ra_chain(struct sk_buff *skb)
 | 
			
		|||
		     sk->sk_bound_dev_if == dev->ifindex) &&
 | 
			
		||||
		    net_eq(sock_net(sk), dev_net(dev))) {
 | 
			
		||||
			if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) {
 | 
			
		||||
				if (ip_defrag(skb, IP_DEFRAG_CALL_RA_CHAIN)) {
 | 
			
		||||
					read_unlock(&ip_ra_lock);
 | 
			
		||||
				if (ip_defrag(skb, IP_DEFRAG_CALL_RA_CHAIN))
 | 
			
		||||
					return 1;
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
			if (last) {
 | 
			
		||||
				struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
 | 
			
		||||
| 
						 | 
				
			
			@ -183,10 +180,8 @@ int ip_call_ra_chain(struct sk_buff *skb)
 | 
			
		|||
 | 
			
		||||
	if (last) {
 | 
			
		||||
		raw_rcv(last, skb);
 | 
			
		||||
		read_unlock(&ip_ra_lock);
 | 
			
		||||
		return 1;
 | 
			
		||||
	}
 | 
			
		||||
	read_unlock(&ip_ra_lock);
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -239,7 +239,12 @@ int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc)
 | 
			
		|||
   sent to multicast group to reach destination designated router.
 | 
			
		||||
 */
 | 
			
		||||
struct ip_ra_chain *ip_ra_chain;
 | 
			
		||||
DEFINE_RWLOCK(ip_ra_lock);
 | 
			
		||||
static DEFINE_SPINLOCK(ip_ra_lock);
 | 
			
		||||
 | 
			
		||||
static void ip_ra_free_rcu(struct rcu_head *head)
 | 
			
		||||
{
 | 
			
		||||
	kfree(container_of(head, struct ip_ra_chain, rcu));
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int ip_ra_control(struct sock *sk, unsigned char on,
 | 
			
		||||
		  void (*destructor)(struct sock *))
 | 
			
		||||
| 
						 | 
				
			
			@ -251,35 +256,35 @@ int ip_ra_control(struct sock *sk, unsigned char on,
 | 
			
		|||
 | 
			
		||||
	new_ra = on ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL;
 | 
			
		||||
 | 
			
		||||
	write_lock_bh(&ip_ra_lock);
 | 
			
		||||
	spin_lock_bh(&ip_ra_lock);
 | 
			
		||||
	for (rap = &ip_ra_chain; (ra = *rap) != NULL; rap = &ra->next) {
 | 
			
		||||
		if (ra->sk == sk) {
 | 
			
		||||
			if (on) {
 | 
			
		||||
				write_unlock_bh(&ip_ra_lock);
 | 
			
		||||
				spin_unlock_bh(&ip_ra_lock);
 | 
			
		||||
				kfree(new_ra);
 | 
			
		||||
				return -EADDRINUSE;
 | 
			
		||||
			}
 | 
			
		||||
			*rap = ra->next;
 | 
			
		||||
			write_unlock_bh(&ip_ra_lock);
 | 
			
		||||
			rcu_assign_pointer(*rap, ra->next);
 | 
			
		||||
			spin_unlock_bh(&ip_ra_lock);
 | 
			
		||||
 | 
			
		||||
			if (ra->destructor)
 | 
			
		||||
				ra->destructor(sk);
 | 
			
		||||
			sock_put(sk);
 | 
			
		||||
			kfree(ra);
 | 
			
		||||
			call_rcu(&ra->rcu, ip_ra_free_rcu);
 | 
			
		||||
			return 0;
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	if (new_ra == NULL) {
 | 
			
		||||
		write_unlock_bh(&ip_ra_lock);
 | 
			
		||||
		spin_unlock_bh(&ip_ra_lock);
 | 
			
		||||
		return -ENOBUFS;
 | 
			
		||||
	}
 | 
			
		||||
	new_ra->sk = sk;
 | 
			
		||||
	new_ra->destructor = destructor;
 | 
			
		||||
 | 
			
		||||
	new_ra->next = ra;
 | 
			
		||||
	*rap = new_ra;
 | 
			
		||||
	rcu_assign_pointer(*rap, new_ra);
 | 
			
		||||
	sock_hold(sk);
 | 
			
		||||
	write_unlock_bh(&ip_ra_lock);
 | 
			
		||||
	spin_unlock_bh(&ip_ra_lock);
 | 
			
		||||
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue