forked from mirrors/linux
		
	tipc: enqueue arrived buffers in socket in separate function
The code for enqueuing arriving buffers in the function tipc_sk_rcv() contains long code lines and currently goes to two indentation levels. As a cosmetic preparaton for the next commits, we break it out into a separate function. Reviewed-by: Ying Xue <ying.xue@windriver.com> Signed-off-by: Jon Maloy <jon.maloy@ericsson.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
		
							parent
							
								
									1186adf7df
								
							
						
					
					
						commit
						d570d86497
					
				
					 1 changed files with 31 additions and 15 deletions
				
			
		| 
						 | 
					@ -1764,6 +1764,35 @@ static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb)
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/**
 | 
				
			||||||
 | 
					 * tipc_sk_enqueue_skb - enqueue buffer to socket or backlog queue
 | 
				
			||||||
 | 
					 * @sk: socket
 | 
				
			||||||
 | 
					 * @skb: pointer to message. Set to NULL if buffer is consumed.
 | 
				
			||||||
 | 
					 * @dnode: if buffer should be forwarded/returned, send to this node
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * Caller must hold socket lock
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * Returns TIPC_OK (0) or -tipc error code
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					static int tipc_sk_enqueue_skb(struct sock *sk, struct sk_buff **skb)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						unsigned int lim;
 | 
				
			||||||
 | 
						atomic_t *dcnt;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (unlikely(!*skb))
 | 
				
			||||||
 | 
							return TIPC_OK;
 | 
				
			||||||
 | 
						if (!sock_owned_by_user(sk))
 | 
				
			||||||
 | 
							return filter_rcv(sk, skb);
 | 
				
			||||||
 | 
						dcnt = &tipc_sk(sk)->dupl_rcvcnt;
 | 
				
			||||||
 | 
						if (sk->sk_backlog.len)
 | 
				
			||||||
 | 
							atomic_set(dcnt, 0);
 | 
				
			||||||
 | 
						lim = rcvbuf_limit(sk, *skb) + atomic_read(dcnt);
 | 
				
			||||||
 | 
						if (unlikely(sk_add_backlog(sk, *skb, lim)))
 | 
				
			||||||
 | 
							return -TIPC_ERR_OVERLOAD;
 | 
				
			||||||
 | 
						*skb = NULL;
 | 
				
			||||||
 | 
						return TIPC_OK;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/**
 | 
					/**
 | 
				
			||||||
 * tipc_sk_rcv - handle incoming message
 | 
					 * tipc_sk_rcv - handle incoming message
 | 
				
			||||||
 * @skb: buffer containing arriving message
 | 
					 * @skb: buffer containing arriving message
 | 
				
			||||||
| 
						 | 
					@ -1776,8 +1805,7 @@ int tipc_sk_rcv(struct net *net, struct sk_buff *skb)
 | 
				
			||||||
	struct tipc_net *tn;
 | 
						struct tipc_net *tn;
 | 
				
			||||||
	struct sock *sk;
 | 
						struct sock *sk;
 | 
				
			||||||
	u32 dport = msg_destport(buf_msg(skb));
 | 
						u32 dport = msg_destport(buf_msg(skb));
 | 
				
			||||||
	int err = TIPC_OK;
 | 
						int err;
 | 
				
			||||||
	uint limit;
 | 
					 | 
				
			||||||
	u32 dnode;
 | 
						u32 dnode;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Validate destination and message */
 | 
						/* Validate destination and message */
 | 
				
			||||||
| 
						 | 
					@ -1788,20 +1816,8 @@ int tipc_sk_rcv(struct net *net, struct sk_buff *skb)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	sk = &tsk->sk;
 | 
						sk = &tsk->sk;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Queue message */
 | 
					 | 
				
			||||||
	spin_lock_bh(&sk->sk_lock.slock);
 | 
						spin_lock_bh(&sk->sk_lock.slock);
 | 
				
			||||||
 | 
						err = tipc_sk_enqueue_skb(sk, &skb);
 | 
				
			||||||
	if (!sock_owned_by_user(sk)) {
 | 
					 | 
				
			||||||
		err = filter_rcv(sk, &skb);
 | 
					 | 
				
			||||||
	} else {
 | 
					 | 
				
			||||||
		if (sk->sk_backlog.len == 0)
 | 
					 | 
				
			||||||
			atomic_set(&tsk->dupl_rcvcnt, 0);
 | 
					 | 
				
			||||||
		limit = rcvbuf_limit(sk, skb) + atomic_read(&tsk->dupl_rcvcnt);
 | 
					 | 
				
			||||||
		if (likely(!sk_add_backlog(sk, skb, limit)))
 | 
					 | 
				
			||||||
			skb = NULL;
 | 
					 | 
				
			||||||
		else
 | 
					 | 
				
			||||||
			err = -TIPC_ERR_OVERLOAD;
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	spin_unlock_bh(&sk->sk_lock.slock);
 | 
						spin_unlock_bh(&sk->sk_lock.slock);
 | 
				
			||||||
	sock_put(sk);
 | 
						sock_put(sk);
 | 
				
			||||||
exit:
 | 
					exit:
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue