forked from mirrors/linux
		
	tcp: remove max_qlen_log
This control variable was set at first listen(fd, backlog) call, but not updated if application tried to increase or decrease backlog. It made sense at the time listener had a non resizeable hash table. Also rounding to powers of two was not very friendly. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
		
							parent
							
								
									10cbc8f179
								
							
						
					
					
						commit
						ef547f2ac1
					
				
					 4 changed files with 6 additions and 18 deletions
				
			
		|  | @ -295,7 +295,7 @@ static inline int inet_csk_reqsk_queue_young(const struct sock *sk) | ||||||
| 
 | 
 | ||||||
| static inline int inet_csk_reqsk_queue_is_full(const struct sock *sk) | static inline int inet_csk_reqsk_queue_is_full(const struct sock *sk) | ||||||
| { | { | ||||||
| 	return reqsk_queue_is_full(&inet_csk(sk)->icsk_accept_queue); | 	return inet_csk_reqsk_queue_len(sk) >= sk->sk_max_ack_backlog; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| void inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req); | void inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req); | ||||||
|  |  | ||||||
|  | @ -157,7 +157,7 @@ struct fastopen_queue { | ||||||
| struct request_sock_queue { | struct request_sock_queue { | ||||||
| 	spinlock_t		rskq_lock; | 	spinlock_t		rskq_lock; | ||||||
| 	u8			rskq_defer_accept; | 	u8			rskq_defer_accept; | ||||||
| 	u8			max_qlen_log; | 
 | ||||||
| 	u32			synflood_warned; | 	u32			synflood_warned; | ||||||
| 	atomic_t		qlen; | 	atomic_t		qlen; | ||||||
| 	atomic_t		young; | 	atomic_t		young; | ||||||
|  | @ -169,8 +169,7 @@ struct request_sock_queue { | ||||||
| 					     */ | 					     */ | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
| void reqsk_queue_alloc(struct request_sock_queue *queue, | void reqsk_queue_alloc(struct request_sock_queue *queue); | ||||||
| 		       unsigned int nr_table_entries); |  | ||||||
| 
 | 
 | ||||||
| void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req, | void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req, | ||||||
| 			   bool reset); | 			   bool reset); | ||||||
|  | @ -240,9 +239,4 @@ static inline int reqsk_queue_len_young(const struct request_sock_queue *queue) | ||||||
| 	return atomic_read(&queue->young); | 	return atomic_read(&queue->young); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static inline int reqsk_queue_is_full(const struct request_sock_queue *queue) |  | ||||||
| { |  | ||||||
| 	return reqsk_queue_len(queue) >> queue->max_qlen_log; |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| #endif /* _REQUEST_SOCK_H */ | #endif /* _REQUEST_SOCK_H */ | ||||||
|  |  | ||||||
|  | @ -37,13 +37,8 @@ | ||||||
| int sysctl_max_syn_backlog = 256; | int sysctl_max_syn_backlog = 256; | ||||||
| EXPORT_SYMBOL(sysctl_max_syn_backlog); | EXPORT_SYMBOL(sysctl_max_syn_backlog); | ||||||
| 
 | 
 | ||||||
| void reqsk_queue_alloc(struct request_sock_queue *queue, | void reqsk_queue_alloc(struct request_sock_queue *queue) | ||||||
| 		       unsigned int nr_table_entries) |  | ||||||
| { | { | ||||||
| 	nr_table_entries = min_t(u32, nr_table_entries, sysctl_max_syn_backlog); |  | ||||||
| 	nr_table_entries = max_t(u32, nr_table_entries, 8); |  | ||||||
| 	nr_table_entries = roundup_pow_of_two(nr_table_entries + 1); |  | ||||||
| 
 |  | ||||||
| 	spin_lock_init(&queue->rskq_lock); | 	spin_lock_init(&queue->rskq_lock); | ||||||
| 
 | 
 | ||||||
| 	spin_lock_init(&queue->fastopenq.lock); | 	spin_lock_init(&queue->fastopenq.lock); | ||||||
|  | @ -53,7 +48,6 @@ void reqsk_queue_alloc(struct request_sock_queue *queue, | ||||||
| 	queue->fastopenq.max_qlen = 0; | 	queue->fastopenq.max_qlen = 0; | ||||||
| 
 | 
 | ||||||
| 	queue->rskq_accept_head = NULL; | 	queue->rskq_accept_head = NULL; | ||||||
| 	queue->max_qlen_log = ilog2(nr_table_entries); |  | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /*
 | /*
 | ||||||
|  |  | ||||||
|  | @ -579,7 +579,7 @@ static void reqsk_timer_handler(unsigned long data) | ||||||
| 	 * ones are about to clog our table. | 	 * ones are about to clog our table. | ||||||
| 	 */ | 	 */ | ||||||
| 	qlen = reqsk_queue_len(queue); | 	qlen = reqsk_queue_len(queue); | ||||||
| 	if (qlen >> (queue->max_qlen_log - 1)) { | 	if ((qlen << 1) > sk_listener->sk_max_ack_backlog) { | ||||||
| 		int young = reqsk_queue_len_young(queue) << 1; | 		int young = reqsk_queue_len_young(queue) << 1; | ||||||
| 
 | 
 | ||||||
| 		while (thresh > 2) { | 		while (thresh > 2) { | ||||||
|  | @ -732,7 +732,7 @@ int inet_csk_listen_start(struct sock *sk, const int nr_table_entries) | ||||||
| 	struct inet_connection_sock *icsk = inet_csk(sk); | 	struct inet_connection_sock *icsk = inet_csk(sk); | ||||||
| 	struct inet_sock *inet = inet_sk(sk); | 	struct inet_sock *inet = inet_sk(sk); | ||||||
| 
 | 
 | ||||||
| 	reqsk_queue_alloc(&icsk->icsk_accept_queue, nr_table_entries); | 	reqsk_queue_alloc(&icsk->icsk_accept_queue); | ||||||
| 
 | 
 | ||||||
| 	sk->sk_max_ack_backlog = 0; | 	sk->sk_max_ack_backlog = 0; | ||||||
| 	sk->sk_ack_backlog = 0; | 	sk->sk_ack_backlog = 0; | ||||||
|  |  | ||||||
		Loading…
	
		Reference in a new issue
	
	 Eric Dumazet
						Eric Dumazet