mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-03 18:20:25 +02:00 
			
		
		
		
	net: memcontrol: defer call to mem_cgroup_sk_alloc()
Instead of calling mem_cgroup_sk_alloc() from BH context,
it is better to call it from inet_csk_accept() in process context.
Not only this removes code in mem_cgroup_sk_alloc(), but it also
fixes a bug since listener might have been dismantled and css_get()
might cause a use-after-free.
Fixes: e994b2f0fb ("tcp: do not lock listener to process SYN packets")
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Tejun Heo <tj@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
			
			
This commit is contained in:
		
							parent
							
								
									529a86e063
								
							
						
					
					
						commit
						9f1c2674b3
					
				
					 3 changed files with 5 additions and 16 deletions
				
			
		| 
						 | 
					@ -5828,21 +5828,6 @@ void mem_cgroup_sk_alloc(struct sock *sk)
 | 
				
			||||||
	if (!mem_cgroup_sockets_enabled)
 | 
						if (!mem_cgroup_sockets_enabled)
 | 
				
			||||||
		return;
 | 
							return;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/*
 | 
					 | 
				
			||||||
	 * Socket cloning can throw us here with sk_memcg already
 | 
					 | 
				
			||||||
	 * filled. It won't however, necessarily happen from
 | 
					 | 
				
			||||||
	 * process context. So the test for root memcg given
 | 
					 | 
				
			||||||
	 * the current task's memcg won't help us in this case.
 | 
					 | 
				
			||||||
	 *
 | 
					 | 
				
			||||||
	 * Respecting the original socket's memcg is a better
 | 
					 | 
				
			||||||
	 * decision in this case.
 | 
					 | 
				
			||||||
	 */
 | 
					 | 
				
			||||||
	if (sk->sk_memcg) {
 | 
					 | 
				
			||||||
		BUG_ON(mem_cgroup_is_root(sk->sk_memcg));
 | 
					 | 
				
			||||||
		css_get(&sk->sk_memcg->css);
 | 
					 | 
				
			||||||
		return;
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	rcu_read_lock();
 | 
						rcu_read_lock();
 | 
				
			||||||
	memcg = mem_cgroup_from_task(current);
 | 
						memcg = mem_cgroup_from_task(current);
 | 
				
			||||||
	if (memcg == root_mem_cgroup)
 | 
						if (memcg == root_mem_cgroup)
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1677,6 +1677,10 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
 | 
				
			||||||
		newsk->sk_dst_pending_confirm = 0;
 | 
							newsk->sk_dst_pending_confirm = 0;
 | 
				
			||||||
		newsk->sk_wmem_queued	= 0;
 | 
							newsk->sk_wmem_queued	= 0;
 | 
				
			||||||
		newsk->sk_forward_alloc = 0;
 | 
							newsk->sk_forward_alloc = 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							/* sk->sk_memcg will be populated at accept() time */
 | 
				
			||||||
 | 
							newsk->sk_memcg = NULL;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		atomic_set(&newsk->sk_drops, 0);
 | 
							atomic_set(&newsk->sk_drops, 0);
 | 
				
			||||||
		newsk->sk_send_head	= NULL;
 | 
							newsk->sk_send_head	= NULL;
 | 
				
			||||||
		newsk->sk_userlocks	= sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
 | 
							newsk->sk_userlocks	= sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
 | 
				
			||||||
| 
						 | 
					@ -1714,7 +1718,6 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
 | 
				
			||||||
		newsk->sk_incoming_cpu = raw_smp_processor_id();
 | 
							newsk->sk_incoming_cpu = raw_smp_processor_id();
 | 
				
			||||||
		atomic64_set(&newsk->sk_cookie, 0);
 | 
							atomic64_set(&newsk->sk_cookie, 0);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		mem_cgroup_sk_alloc(newsk);
 | 
					 | 
				
			||||||
		cgroup_sk_alloc(&newsk->sk_cgrp_data);
 | 
							cgroup_sk_alloc(&newsk->sk_cgrp_data);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		/*
 | 
							/*
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -475,6 +475,7 @@ struct sock *inet_csk_accept(struct sock *sk, int flags, int *err, bool kern)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		spin_unlock_bh(&queue->fastopenq.lock);
 | 
							spin_unlock_bh(&queue->fastopenq.lock);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
						mem_cgroup_sk_alloc(newsk);
 | 
				
			||||||
out:
 | 
					out:
 | 
				
			||||||
	release_sock(sk);
 | 
						release_sock(sk);
 | 
				
			||||||
	if (req)
 | 
						if (req)
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue