aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-02-11 17:34:03 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2018-02-11 17:34:03 -0500
commita9a08845e9acbd224e4ee466f5c1275ed50054e8 (patch)
tree415d6e6a82e001c65e6b161539411f54ba5fe8ce /net/ipv4/tcp.c
parentee5daa1361fceb6f482c005bcc9ba8d01b92ea5c (diff)
vfs: do bulk POLL* -> EPOLL* replacement
This is the mindless scripted replacement of kernel use of POLL* variables as described by Al, done by this script: for V in IN OUT PRI ERR RDNORM RDBAND WRNORM WRBAND HUP RDHUP NVAL MSG; do L=`git grep -l -w POLL$V | grep -v '^t' | grep -v /um/ | grep -v '^sa' | grep -v '/poll.h$'|grep -v '^D'` for f in $L; do sed -i "-es/^\([^\"]*\)\(\<POLL$V\>\)/\\1E\\2/" $f; done done with de-mangling cleanups yet to come. NOTE! On almost all architectures, the EPOLL* constants have the same values as the POLL* constants do. But they keyword here is "almost". For various bad reasons they aren't the same, and epoll() doesn't actually work quite correctly in some cases due to this on Sparc et al. The next patch from Al will sort out the final differences, and we should be all done. Scripted-by: Al Viro <viro@zeniv.linux.org.uk> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'net/ipv4/tcp.c')
-rw-r--r--net/ipv4/tcp.c34
1 files changed, 17 insertions, 17 deletions
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index c059aa7df0a9..48636aee23c3 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -512,36 +512,36 @@ __poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
512 mask = 0; 512 mask = 0;
513 513
514 /* 514 /*
515 * POLLHUP is certainly not done right. But poll() doesn't 515 * EPOLLHUP is certainly not done right. But poll() doesn't
516 * have a notion of HUP in just one direction, and for a 516 * have a notion of HUP in just one direction, and for a
517 * socket the read side is more interesting. 517 * socket the read side is more interesting.
518 * 518 *
519 * Some poll() documentation says that POLLHUP is incompatible 519 * Some poll() documentation says that EPOLLHUP is incompatible
520 * with the POLLOUT/POLLWR flags, so somebody should check this 520 * with the EPOLLOUT/POLLWR flags, so somebody should check this
521 * all. But careful, it tends to be safer to return too many 521 * all. But careful, it tends to be safer to return too many
522 * bits than too few, and you can easily break real applications 522 * bits than too few, and you can easily break real applications
523 * if you don't tell them that something has hung up! 523 * if you don't tell them that something has hung up!
524 * 524 *
525 * Check-me. 525 * Check-me.
526 * 526 *
527 * Check number 1. POLLHUP is _UNMASKABLE_ event (see UNIX98 and 527 * Check number 1. EPOLLHUP is _UNMASKABLE_ event (see UNIX98 and
528 * our fs/select.c). It means that after we received EOF, 528 * our fs/select.c). It means that after we received EOF,
529 * poll always returns immediately, making impossible poll() on write() 529 * poll always returns immediately, making impossible poll() on write()
530 * in state CLOSE_WAIT. One solution is evident --- to set POLLHUP 530 * in state CLOSE_WAIT. One solution is evident --- to set EPOLLHUP
531 * if and only if shutdown has been made in both directions. 531 * if and only if shutdown has been made in both directions.
532 * Actually, it is interesting to look how Solaris and DUX 532 * Actually, it is interesting to look how Solaris and DUX
533 * solve this dilemma. I would prefer, if POLLHUP were maskable, 533 * solve this dilemma. I would prefer, if EPOLLHUP were maskable,
534 * then we could set it on SND_SHUTDOWN. BTW examples given 534 * then we could set it on SND_SHUTDOWN. BTW examples given
535 * in Stevens' books assume exactly this behaviour, it explains 535 * in Stevens' books assume exactly this behaviour, it explains
536 * why POLLHUP is incompatible with POLLOUT. --ANK 536 * why EPOLLHUP is incompatible with EPOLLOUT. --ANK
537 * 537 *
538 * NOTE. Check for TCP_CLOSE is added. The goal is to prevent 538 * NOTE. Check for TCP_CLOSE is added. The goal is to prevent
539 * blocking on fresh not-connected or disconnected socket. --ANK 539 * blocking on fresh not-connected or disconnected socket. --ANK
540 */ 540 */
541 if (sk->sk_shutdown == SHUTDOWN_MASK || state == TCP_CLOSE) 541 if (sk->sk_shutdown == SHUTDOWN_MASK || state == TCP_CLOSE)
542 mask |= POLLHUP; 542 mask |= EPOLLHUP;
543 if (sk->sk_shutdown & RCV_SHUTDOWN) 543 if (sk->sk_shutdown & RCV_SHUTDOWN)
544 mask |= POLLIN | POLLRDNORM | POLLRDHUP; 544 mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
545 545
546 /* Connected or passive Fast Open socket? */ 546 /* Connected or passive Fast Open socket? */
547 if (state != TCP_SYN_SENT && 547 if (state != TCP_SYN_SENT &&
@@ -554,11 +554,11 @@ __poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
554 target++; 554 target++;
555 555
556 if (tp->rcv_nxt - tp->copied_seq >= target) 556 if (tp->rcv_nxt - tp->copied_seq >= target)
557 mask |= POLLIN | POLLRDNORM; 557 mask |= EPOLLIN | EPOLLRDNORM;
558 558
559 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) { 559 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
560 if (sk_stream_is_writeable(sk)) { 560 if (sk_stream_is_writeable(sk)) {
561 mask |= POLLOUT | POLLWRNORM; 561 mask |= EPOLLOUT | EPOLLWRNORM;
562 } else { /* send SIGIO later */ 562 } else { /* send SIGIO later */
563 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); 563 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
564 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 564 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
@@ -570,24 +570,24 @@ __poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
570 */ 570 */
571 smp_mb__after_atomic(); 571 smp_mb__after_atomic();
572 if (sk_stream_is_writeable(sk)) 572 if (sk_stream_is_writeable(sk))
573 mask |= POLLOUT | POLLWRNORM; 573 mask |= EPOLLOUT | EPOLLWRNORM;
574 } 574 }
575 } else 575 } else
576 mask |= POLLOUT | POLLWRNORM; 576 mask |= EPOLLOUT | EPOLLWRNORM;
577 577
578 if (tp->urg_data & TCP_URG_VALID) 578 if (tp->urg_data & TCP_URG_VALID)
579 mask |= POLLPRI; 579 mask |= EPOLLPRI;
580 } else if (state == TCP_SYN_SENT && inet_sk(sk)->defer_connect) { 580 } else if (state == TCP_SYN_SENT && inet_sk(sk)->defer_connect) {
581 /* Active TCP fastopen socket with defer_connect 581 /* Active TCP fastopen socket with defer_connect
582 * Return POLLOUT so application can call write() 582 * Return EPOLLOUT so application can call write()
583 * in order for kernel to generate SYN+data 583 * in order for kernel to generate SYN+data
584 */ 584 */
585 mask |= POLLOUT | POLLWRNORM; 585 mask |= EPOLLOUT | EPOLLWRNORM;
586 } 586 }
587 /* This barrier is coupled with smp_wmb() in tcp_reset() */ 587 /* This barrier is coupled with smp_wmb() in tcp_reset() */
588 smp_rmb(); 588 smp_rmb();
589 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) 589 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
590 mask |= POLLERR; 590 mask |= EPOLLERR;
591 591
592 return mask; 592 return mask;
593} 593}