forked from mirrors/linux
		
	net/tcp: convert to ->poll_mask
Signed-off-by: Christoph Hellwig <hch@lst.de>
This commit is contained in:
		
							parent
							
								
									984652dd8b
								
							
						
					
					
						commit
						2c7d3daceb
					
				
					 4 changed files with 9 additions and 21 deletions
				
			
		|  | @ -388,8 +388,7 @@ bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst); | |||
| void tcp_close(struct sock *sk, long timeout); | ||||
| void tcp_init_sock(struct sock *sk); | ||||
| void tcp_init_transfer(struct sock *sk, int bpf_op); | ||||
| __poll_t tcp_poll(struct file *file, struct socket *sock, | ||||
| 		      struct poll_table_struct *wait); | ||||
| __poll_t tcp_poll_mask(struct socket *sock, __poll_t events); | ||||
| int tcp_getsockopt(struct sock *sk, int level, int optname, | ||||
| 		   char __user *optval, int __user *optlen); | ||||
| int tcp_setsockopt(struct sock *sk, int level, int optname, | ||||
|  |  | |||
|  | @ -986,7 +986,7 @@ const struct proto_ops inet_stream_ops = { | |||
| 	.socketpair	   = sock_no_socketpair, | ||||
| 	.accept		   = inet_accept, | ||||
| 	.getname	   = inet_getname, | ||||
| 	.poll		   = tcp_poll, | ||||
| 	.poll_mask	   = tcp_poll_mask, | ||||
| 	.ioctl		   = inet_ioctl, | ||||
| 	.listen		   = inet_listen, | ||||
| 	.shutdown	   = inet_shutdown, | ||||
|  |  | |||
|  | @ -494,32 +494,21 @@ static inline bool tcp_stream_is_readable(const struct tcp_sock *tp, | |||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  *	Wait for a TCP event. | ||||
|  * | ||||
|  *	Note that we don't need to lock the socket, as the upper poll layers | ||||
|  *	take care of normal races (between the test and the event) and we don't | ||||
|  *	go look at any of the socket buffers directly. | ||||
|  * Socket is not locked. We are protected from async events by poll logic and | ||||
|  * correct handling of state changes made by other threads is impossible in | ||||
|  * any case. | ||||
|  */ | ||||
| __poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait) | ||||
| __poll_t tcp_poll_mask(struct socket *sock, __poll_t events) | ||||
| { | ||||
| 	__poll_t mask; | ||||
| 	struct sock *sk = sock->sk; | ||||
| 	const struct tcp_sock *tp = tcp_sk(sk); | ||||
| 	__poll_t mask = 0; | ||||
| 	int state; | ||||
| 
 | ||||
| 	sock_poll_wait(file, sk_sleep(sk), wait); | ||||
| 
 | ||||
| 	state = inet_sk_state_load(sk); | ||||
| 	if (state == TCP_LISTEN) | ||||
| 		return inet_csk_listen_poll(sk); | ||||
| 
 | ||||
| 	/* Socket is not locked. We are protected from async events
 | ||||
| 	 * by poll logic and correct handling of state changes | ||||
| 	 * made by other threads is impossible in any case. | ||||
| 	 */ | ||||
| 
 | ||||
| 	mask = 0; | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * EPOLLHUP is certainly not done right. But poll() doesn't | ||||
| 	 * have a notion of HUP in just one direction, and for a | ||||
|  | @ -600,7 +589,7 @@ __poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait) | |||
| 
 | ||||
| 	return mask; | ||||
| } | ||||
| EXPORT_SYMBOL(tcp_poll); | ||||
| EXPORT_SYMBOL(tcp_poll_mask); | ||||
| 
 | ||||
| int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg) | ||||
| { | ||||
|  |  | |||
|  | @ -571,7 +571,7 @@ const struct proto_ops inet6_stream_ops = { | |||
| 	.socketpair	   = sock_no_socketpair,	/* a do nothing	*/ | ||||
| 	.accept		   = inet_accept,		/* ok		*/ | ||||
| 	.getname	   = inet6_getname, | ||||
| 	.poll		   = tcp_poll,			/* ok		*/ | ||||
| 	.poll_mask	   = tcp_poll_mask,		/* ok		*/ | ||||
| 	.ioctl		   = inet6_ioctl,		/* must change  */ | ||||
| 	.listen		   = inet_listen,		/* ok		*/ | ||||
| 	.shutdown	   = inet_shutdown,		/* ok		*/ | ||||
|  |  | |||
		Loading…
	
		Reference in a new issue
	
	 Christoph Hellwig
						Christoph Hellwig