mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	mptcp: avoid lock_fast usage in accept path
Once event support is added this may need to allocate memory while msk lock is held with softirqs disabled. Not using lock_fast also allows to do the allocation with GFP_KERNEL. Signed-off-by: Florian Westphal <fw@strlen.de> Signed-off-by: Mat Martineau <mathew.j.martineau@linux.intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
		
							parent
							
								
									6c714f1b54
								
							
						
					
					
						commit
						4d54cc3211
					
				
					 3 changed files with 35 additions and 3 deletions
				
			
		| 
						 | 
					@ -14,6 +14,7 @@
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
struct genl_multicast_group {
 | 
					struct genl_multicast_group {
 | 
				
			||||||
	char			name[GENL_NAMSIZ];
 | 
						char			name[GENL_NAMSIZ];
 | 
				
			||||||
 | 
						u8			flags;
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
struct genl_ops;
 | 
					struct genl_ops;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -3260,9 +3260,8 @@ static int mptcp_stream_accept(struct socket *sock, struct socket *newsock,
 | 
				
			||||||
		struct mptcp_sock *msk = mptcp_sk(newsock->sk);
 | 
							struct mptcp_sock *msk = mptcp_sk(newsock->sk);
 | 
				
			||||||
		struct mptcp_subflow_context *subflow;
 | 
							struct mptcp_subflow_context *subflow;
 | 
				
			||||||
		struct sock *newsk = newsock->sk;
 | 
							struct sock *newsk = newsock->sk;
 | 
				
			||||||
		bool slowpath;
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
		slowpath = lock_sock_fast(newsk);
 | 
							lock_sock(newsk);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		/* PM/worker can now acquire the first subflow socket
 | 
							/* PM/worker can now acquire the first subflow socket
 | 
				
			||||||
		 * lock without racing with listener queue cleanup,
 | 
							 * lock without racing with listener queue cleanup,
 | 
				
			||||||
| 
						 | 
					@ -3288,7 +3287,7 @@ static int mptcp_stream_accept(struct socket *sock, struct socket *newsock,
 | 
				
			||||||
			if (!ssk->sk_socket)
 | 
								if (!ssk->sk_socket)
 | 
				
			||||||
				mptcp_sock_graft(ssk, newsock);
 | 
									mptcp_sock_graft(ssk, newsock);
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		unlock_sock_fast(newsk, slowpath);
 | 
							release_sock(newsk);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (inet_csk_listen_poll(ssock->sk))
 | 
						if (inet_csk_listen_poll(ssock->sk))
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1360,11 +1360,43 @@ static struct genl_family genl_ctrl __ro_after_init = {
 | 
				
			||||||
	.netnsok = true,
 | 
						.netnsok = true,
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static int genl_bind(struct net *net, int group)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						const struct genl_family *family;
 | 
				
			||||||
 | 
						unsigned int id;
 | 
				
			||||||
 | 
						int ret = 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						genl_lock_all();
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						idr_for_each_entry(&genl_fam_idr, family, id) {
 | 
				
			||||||
 | 
							const struct genl_multicast_group *grp;
 | 
				
			||||||
 | 
							int i;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							if (family->n_mcgrps == 0)
 | 
				
			||||||
 | 
								continue;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							i = group - family->mcgrp_offset;
 | 
				
			||||||
 | 
							if (i < 0 || i >= family->n_mcgrps)
 | 
				
			||||||
 | 
								continue;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							grp = &family->mcgrps[i];
 | 
				
			||||||
 | 
							if ((grp->flags & GENL_UNS_ADMIN_PERM) &&
 | 
				
			||||||
 | 
							    !ns_capable(net->user_ns, CAP_NET_ADMIN))
 | 
				
			||||||
 | 
								ret = -EPERM;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							break;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						genl_unlock_all();
 | 
				
			||||||
 | 
						return ret;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static int __net_init genl_pernet_init(struct net *net)
 | 
					static int __net_init genl_pernet_init(struct net *net)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct netlink_kernel_cfg cfg = {
 | 
						struct netlink_kernel_cfg cfg = {
 | 
				
			||||||
		.input		= genl_rcv,
 | 
							.input		= genl_rcv,
 | 
				
			||||||
		.flags		= NL_CFG_F_NONROOT_RECV,
 | 
							.flags		= NL_CFG_F_NONROOT_RECV,
 | 
				
			||||||
 | 
							.bind		= genl_bind,
 | 
				
			||||||
	};
 | 
						};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* we'll bump the group number right afterwards */
 | 
						/* we'll bump the group number right afterwards */
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue