aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-02-11 17:34:03 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2018-02-11 17:34:03 -0500
commita9a08845e9acbd224e4ee466f5c1275ed50054e8 (patch)
tree415d6e6a82e001c65e6b161539411f54ba5fe8ce /net
parentee5daa1361fceb6f482c005bcc9ba8d01b92ea5c (diff)
vfs: do bulk POLL* -> EPOLL* replacement
This is the mindless scripted replacement of kernel use of POLL* variables as described by Al, done by this script: for V in IN OUT PRI ERR RDNORM RDBAND WRNORM WRBAND HUP RDHUP NVAL MSG; do L=`git grep -l -w POLL$V | grep -v '^t' | grep -v /um/ | grep -v '^sa' | grep -v '/poll.h$'|grep -v '^D'` for f in $L; do sed -i "-es/^\([^\"]*\)\(\<POLL$V\>\)/\\1E\\2/" $f; done done with de-mangling cleanups yet to come. NOTE! On almost all architectures, the EPOLL* constants have the same values as the POLL* constants do. But they keyword here is "almost". For various bad reasons they aren't the same, and epoll() doesn't actually work quite correctly in some cases due to this on Sparc et al. The next patch from Al will sort out the final differences, and we should be all done. Scripted-by: Al Viro <viro@zeniv.linux.org.uk> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'net')
-rw-r--r--net/9p/trans_fd.c26
-rw-r--r--net/atm/common.c8
-rw-r--r--net/batman-adv/icmp_socket.c2
-rw-r--r--net/batman-adv/log.c2
-rw-r--r--net/bluetooth/af_bluetooth.c16
-rw-r--r--net/caif/caif_socket.c12
-rw-r--r--net/core/datagram.c16
-rw-r--r--net/core/sock.c10
-rw-r--r--net/core/stream.c4
-rw-r--r--net/dccp/proto.c12
-rw-r--r--net/decnet/af_decnet.c2
-rw-r--r--net/ipv4/af_inet.c2
-rw-r--r--net/ipv4/tcp.c34
-rw-r--r--net/ipv4/tcp_input.c2
-rw-r--r--net/ipv4/udp.c6
-rw-r--r--net/iucv/af_iucv.c18
-rw-r--r--net/kcm/kcmsock.c6
-rw-r--r--net/nfc/llcp_sock.c16
-rw-r--r--net/packet/af_packet.c4
-rw-r--r--net/phonet/socket.c10
-rw-r--r--net/rds/af_rds.c16
-rw-r--r--net/rfkill/core.c4
-rw-r--r--net/rxrpc/af_rxrpc.c4
-rw-r--r--net/sctp/socket.c20
-rw-r--r--net/smc/af_smc.c24
-rw-r--r--net/smc/smc_rx.c4
-rw-r--r--net/smc/smc_tx.c4
-rw-r--r--net/sunrpc/cache.c4
-rw-r--r--net/sunrpc/rpc_pipe.c6
-rw-r--r--net/tipc/socket.c22
-rw-r--r--net/unix/af_unix.c40
-rw-r--r--net/vmw_vsock/af_vsock.c30
32 files changed, 193 insertions, 193 deletions
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
index d6f7f7cb79c4..0cfba919d167 100644
--- a/net/9p/trans_fd.c
+++ b/net/9p/trans_fd.c
@@ -240,7 +240,7 @@ p9_fd_poll(struct p9_client *client, struct poll_table_struct *pt, int *err)
240 if (!ts) { 240 if (!ts) {
241 if (err) 241 if (err)
242 *err = -EREMOTEIO; 242 *err = -EREMOTEIO;
243 return POLLERR; 243 return EPOLLERR;
244 } 244 }
245 245
246 if (!ts->rd->f_op->poll) 246 if (!ts->rd->f_op->poll)
@@ -253,7 +253,7 @@ p9_fd_poll(struct p9_client *client, struct poll_table_struct *pt, int *err)
253 n = DEFAULT_POLLMASK; 253 n = DEFAULT_POLLMASK;
254 else 254 else
255 n = ts->wr->f_op->poll(ts->wr, pt); 255 n = ts->wr->f_op->poll(ts->wr, pt);
256 ret = (ret & ~POLLOUT) | (n & ~POLLIN); 256 ret = (ret & ~EPOLLOUT) | (n & ~EPOLLIN);
257 } 257 }
258 258
259 return ret; 259 return ret;
@@ -396,11 +396,11 @@ end_clear:
396 396
397 if (!list_empty(&m->req_list)) { 397 if (!list_empty(&m->req_list)) {
398 if (test_and_clear_bit(Rpending, &m->wsched)) 398 if (test_and_clear_bit(Rpending, &m->wsched))
399 n = POLLIN; 399 n = EPOLLIN;
400 else 400 else
401 n = p9_fd_poll(m->client, NULL, NULL); 401 n = p9_fd_poll(m->client, NULL, NULL);
402 402
403 if ((n & POLLIN) && !test_and_set_bit(Rworksched, &m->wsched)) { 403 if ((n & EPOLLIN) && !test_and_set_bit(Rworksched, &m->wsched)) {
404 p9_debug(P9_DEBUG_TRANS, "sched read work %p\n", m); 404 p9_debug(P9_DEBUG_TRANS, "sched read work %p\n", m);
405 schedule_work(&m->rq); 405 schedule_work(&m->rq);
406 } 406 }
@@ -505,11 +505,11 @@ end_clear:
505 505
506 if (m->wsize || !list_empty(&m->unsent_req_list)) { 506 if (m->wsize || !list_empty(&m->unsent_req_list)) {
507 if (test_and_clear_bit(Wpending, &m->wsched)) 507 if (test_and_clear_bit(Wpending, &m->wsched))
508 n = POLLOUT; 508 n = EPOLLOUT;
509 else 509 else
510 n = p9_fd_poll(m->client, NULL, NULL); 510 n = p9_fd_poll(m->client, NULL, NULL);
511 511
512 if ((n & POLLOUT) && 512 if ((n & EPOLLOUT) &&
513 !test_and_set_bit(Wworksched, &m->wsched)) { 513 !test_and_set_bit(Wworksched, &m->wsched)) {
514 p9_debug(P9_DEBUG_TRANS, "sched write work %p\n", m); 514 p9_debug(P9_DEBUG_TRANS, "sched write work %p\n", m);
515 schedule_work(&m->wq); 515 schedule_work(&m->wq);
@@ -599,12 +599,12 @@ static void p9_conn_create(struct p9_client *client)
599 init_poll_funcptr(&m->pt, p9_pollwait); 599 init_poll_funcptr(&m->pt, p9_pollwait);
600 600
601 n = p9_fd_poll(client, &m->pt, NULL); 601 n = p9_fd_poll(client, &m->pt, NULL);
602 if (n & POLLIN) { 602 if (n & EPOLLIN) {
603 p9_debug(P9_DEBUG_TRANS, "mux %p can read\n", m); 603 p9_debug(P9_DEBUG_TRANS, "mux %p can read\n", m);
604 set_bit(Rpending, &m->wsched); 604 set_bit(Rpending, &m->wsched);
605 } 605 }
606 606
607 if (n & POLLOUT) { 607 if (n & EPOLLOUT) {
608 p9_debug(P9_DEBUG_TRANS, "mux %p can write\n", m); 608 p9_debug(P9_DEBUG_TRANS, "mux %p can write\n", m);
609 set_bit(Wpending, &m->wsched); 609 set_bit(Wpending, &m->wsched);
610 } 610 }
@@ -625,12 +625,12 @@ static void p9_poll_mux(struct p9_conn *m)
625 return; 625 return;
626 626
627 n = p9_fd_poll(m->client, NULL, &err); 627 n = p9_fd_poll(m->client, NULL, &err);
628 if (n & (POLLERR | POLLHUP | POLLNVAL)) { 628 if (n & (EPOLLERR | EPOLLHUP | EPOLLNVAL)) {
629 p9_debug(P9_DEBUG_TRANS, "error mux %p err %d\n", m, n); 629 p9_debug(P9_DEBUG_TRANS, "error mux %p err %d\n", m, n);
630 p9_conn_cancel(m, err); 630 p9_conn_cancel(m, err);
631 } 631 }
632 632
633 if (n & POLLIN) { 633 if (n & EPOLLIN) {
634 set_bit(Rpending, &m->wsched); 634 set_bit(Rpending, &m->wsched);
635 p9_debug(P9_DEBUG_TRANS, "mux %p can read\n", m); 635 p9_debug(P9_DEBUG_TRANS, "mux %p can read\n", m);
636 if (!test_and_set_bit(Rworksched, &m->wsched)) { 636 if (!test_and_set_bit(Rworksched, &m->wsched)) {
@@ -639,7 +639,7 @@ static void p9_poll_mux(struct p9_conn *m)
639 } 639 }
640 } 640 }
641 641
642 if (n & POLLOUT) { 642 if (n & EPOLLOUT) {
643 set_bit(Wpending, &m->wsched); 643 set_bit(Wpending, &m->wsched);
644 p9_debug(P9_DEBUG_TRANS, "mux %p can write\n", m); 644 p9_debug(P9_DEBUG_TRANS, "mux %p can write\n", m);
645 if ((m->wsize || !list_empty(&m->unsent_req_list)) && 645 if ((m->wsize || !list_empty(&m->unsent_req_list)) &&
@@ -678,11 +678,11 @@ static int p9_fd_request(struct p9_client *client, struct p9_req_t *req)
678 spin_unlock(&client->lock); 678 spin_unlock(&client->lock);
679 679
680 if (test_and_clear_bit(Wpending, &m->wsched)) 680 if (test_and_clear_bit(Wpending, &m->wsched))
681 n = POLLOUT; 681 n = EPOLLOUT;
682 else 682 else
683 n = p9_fd_poll(m->client, NULL, NULL); 683 n = p9_fd_poll(m->client, NULL, NULL);
684 684
685 if (n & POLLOUT && !test_and_set_bit(Wworksched, &m->wsched)) 685 if (n & EPOLLOUT && !test_and_set_bit(Wworksched, &m->wsched))
686 schedule_work(&m->wq); 686 schedule_work(&m->wq);
687 687
688 return 0; 688 return 0;
diff --git a/net/atm/common.c b/net/atm/common.c
index 6523f38c4957..fc78a0508ae1 100644
--- a/net/atm/common.c
+++ b/net/atm/common.c
@@ -661,15 +661,15 @@ __poll_t vcc_poll(struct file *file, struct socket *sock, poll_table *wait)
661 661
662 /* exceptional events */ 662 /* exceptional events */
663 if (sk->sk_err) 663 if (sk->sk_err)
664 mask = POLLERR; 664 mask = EPOLLERR;
665 665
666 if (test_bit(ATM_VF_RELEASED, &vcc->flags) || 666 if (test_bit(ATM_VF_RELEASED, &vcc->flags) ||
667 test_bit(ATM_VF_CLOSE, &vcc->flags)) 667 test_bit(ATM_VF_CLOSE, &vcc->flags))
668 mask |= POLLHUP; 668 mask |= EPOLLHUP;
669 669
670 /* readable? */ 670 /* readable? */
671 if (!skb_queue_empty(&sk->sk_receive_queue)) 671 if (!skb_queue_empty(&sk->sk_receive_queue))
672 mask |= POLLIN | POLLRDNORM; 672 mask |= EPOLLIN | EPOLLRDNORM;
673 673
674 /* writable? */ 674 /* writable? */
675 if (sock->state == SS_CONNECTING && 675 if (sock->state == SS_CONNECTING &&
@@ -678,7 +678,7 @@ __poll_t vcc_poll(struct file *file, struct socket *sock, poll_table *wait)
678 678
679 if (vcc->qos.txtp.traffic_class != ATM_NONE && 679 if (vcc->qos.txtp.traffic_class != ATM_NONE &&
680 vcc_writable(sk)) 680 vcc_writable(sk))
681 mask |= POLLOUT | POLLWRNORM | POLLWRBAND; 681 mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
682 682
683 return mask; 683 return mask;
684} 684}
diff --git a/net/batman-adv/icmp_socket.c b/net/batman-adv/icmp_socket.c
index 581375d0eed2..e91f29c7c638 100644
--- a/net/batman-adv/icmp_socket.c
+++ b/net/batman-adv/icmp_socket.c
@@ -304,7 +304,7 @@ static __poll_t batadv_socket_poll(struct file *file, poll_table *wait)
304 poll_wait(file, &socket_client->queue_wait, wait); 304 poll_wait(file, &socket_client->queue_wait, wait);
305 305
306 if (socket_client->queue_len > 0) 306 if (socket_client->queue_len > 0)
307 return POLLIN | POLLRDNORM; 307 return EPOLLIN | EPOLLRDNORM;
308 308
309 return 0; 309 return 0;
310} 310}
diff --git a/net/batman-adv/log.c b/net/batman-adv/log.c
index 9be74a44e99d..dc9fa37ddd14 100644
--- a/net/batman-adv/log.c
+++ b/net/batman-adv/log.c
@@ -193,7 +193,7 @@ static __poll_t batadv_log_poll(struct file *file, poll_table *wait)
193 poll_wait(file, &debug_log->queue_wait, wait); 193 poll_wait(file, &debug_log->queue_wait, wait);
194 194
195 if (!batadv_log_empty(debug_log)) 195 if (!batadv_log_empty(debug_log))
196 return POLLIN | POLLRDNORM; 196 return EPOLLIN | EPOLLRDNORM;
197 197
198 return 0; 198 return 0;
199} 199}
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index f897681780db..84d92a077834 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -431,7 +431,7 @@ static inline __poll_t bt_accept_poll(struct sock *parent)
431 if (sk->sk_state == BT_CONNECTED || 431 if (sk->sk_state == BT_CONNECTED ||
432 (test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags) && 432 (test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags) &&
433 sk->sk_state == BT_CONNECT2)) 433 sk->sk_state == BT_CONNECT2))
434 return POLLIN | POLLRDNORM; 434 return EPOLLIN | EPOLLRDNORM;
435 } 435 }
436 436
437 return 0; 437 return 0;
@@ -451,20 +451,20 @@ __poll_t bt_sock_poll(struct file *file, struct socket *sock,
451 return bt_accept_poll(sk); 451 return bt_accept_poll(sk);
452 452
453 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) 453 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
454 mask |= POLLERR | 454 mask |= EPOLLERR |
455 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0); 455 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
456 456
457 if (sk->sk_shutdown & RCV_SHUTDOWN) 457 if (sk->sk_shutdown & RCV_SHUTDOWN)
458 mask |= POLLRDHUP | POLLIN | POLLRDNORM; 458 mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
459 459
460 if (sk->sk_shutdown == SHUTDOWN_MASK) 460 if (sk->sk_shutdown == SHUTDOWN_MASK)
461 mask |= POLLHUP; 461 mask |= EPOLLHUP;
462 462
463 if (!skb_queue_empty(&sk->sk_receive_queue)) 463 if (!skb_queue_empty(&sk->sk_receive_queue))
464 mask |= POLLIN | POLLRDNORM; 464 mask |= EPOLLIN | EPOLLRDNORM;
465 465
466 if (sk->sk_state == BT_CLOSED) 466 if (sk->sk_state == BT_CLOSED)
467 mask |= POLLHUP; 467 mask |= EPOLLHUP;
468 468
469 if (sk->sk_state == BT_CONNECT || 469 if (sk->sk_state == BT_CONNECT ||
470 sk->sk_state == BT_CONNECT2 || 470 sk->sk_state == BT_CONNECT2 ||
@@ -472,7 +472,7 @@ __poll_t bt_sock_poll(struct file *file, struct socket *sock,
472 return mask; 472 return mask;
473 473
474 if (!test_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags) && sock_writeable(sk)) 474 if (!test_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags) && sock_writeable(sk))
475 mask |= POLLOUT | POLLWRNORM | POLLWRBAND; 475 mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
476 else 476 else
477 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); 477 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
478 478
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index b109445a1df9..a6fb1b3bcad9 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -924,7 +924,7 @@ static int caif_release(struct socket *sock)
924 924
925 caif_disconnect_client(sock_net(sk), &cf_sk->layer); 925 caif_disconnect_client(sock_net(sk), &cf_sk->layer);
926 cf_sk->sk.sk_socket->state = SS_DISCONNECTING; 926 cf_sk->sk.sk_socket->state = SS_DISCONNECTING;
927 wake_up_interruptible_poll(sk_sleep(sk), POLLERR|POLLHUP); 927 wake_up_interruptible_poll(sk_sleep(sk), EPOLLERR|EPOLLHUP);
928 928
929 sock_orphan(sk); 929 sock_orphan(sk);
930 sk_stream_kill_queues(&cf_sk->sk); 930 sk_stream_kill_queues(&cf_sk->sk);
@@ -946,23 +946,23 @@ static __poll_t caif_poll(struct file *file,
946 946
947 /* exceptional events? */ 947 /* exceptional events? */
948 if (sk->sk_err) 948 if (sk->sk_err)
949 mask |= POLLERR; 949 mask |= EPOLLERR;
950 if (sk->sk_shutdown == SHUTDOWN_MASK) 950 if (sk->sk_shutdown == SHUTDOWN_MASK)
951 mask |= POLLHUP; 951 mask |= EPOLLHUP;
952 if (sk->sk_shutdown & RCV_SHUTDOWN) 952 if (sk->sk_shutdown & RCV_SHUTDOWN)
953 mask |= POLLRDHUP; 953 mask |= EPOLLRDHUP;
954 954
955 /* readable? */ 955 /* readable? */
956 if (!skb_queue_empty(&sk->sk_receive_queue) || 956 if (!skb_queue_empty(&sk->sk_receive_queue) ||
957 (sk->sk_shutdown & RCV_SHUTDOWN)) 957 (sk->sk_shutdown & RCV_SHUTDOWN))
958 mask |= POLLIN | POLLRDNORM; 958 mask |= EPOLLIN | EPOLLRDNORM;
959 959
960 /* 960 /*
961 * we set writable also when the other side has shut down the 961 * we set writable also when the other side has shut down the
962 * connection. This prevents stuck sockets. 962 * connection. This prevents stuck sockets.
963 */ 963 */
964 if (sock_writeable(sk) && tx_flow_is_on(cf_sk)) 964 if (sock_writeable(sk) && tx_flow_is_on(cf_sk))
965 mask |= POLLOUT | POLLWRNORM | POLLWRBAND; 965 mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
966 966
967 return mask; 967 return mask;
968} 968}
diff --git a/net/core/datagram.c b/net/core/datagram.c
index b7d9293940b5..9938952c5c78 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -75,7 +75,7 @@ static int receiver_wake_function(wait_queue_entry_t *wait, unsigned int mode, i
75 /* 75 /*
76 * Avoid a wakeup if event not interesting for us 76 * Avoid a wakeup if event not interesting for us
77 */ 77 */
78 if (key && !(key_to_poll(key) & (POLLIN | POLLERR))) 78 if (key && !(key_to_poll(key) & (EPOLLIN | EPOLLERR)))
79 return 0; 79 return 0;
80 return autoremove_wake_function(wait, mode, sync, key); 80 return autoremove_wake_function(wait, mode, sync, key);
81} 81}
@@ -842,22 +842,22 @@ __poll_t datagram_poll(struct file *file, struct socket *sock,
842 842
843 /* exceptional events? */ 843 /* exceptional events? */
844 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) 844 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
845 mask |= POLLERR | 845 mask |= EPOLLERR |
846 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0); 846 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
847 847
848 if (sk->sk_shutdown & RCV_SHUTDOWN) 848 if (sk->sk_shutdown & RCV_SHUTDOWN)
849 mask |= POLLRDHUP | POLLIN | POLLRDNORM; 849 mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
850 if (sk->sk_shutdown == SHUTDOWN_MASK) 850 if (sk->sk_shutdown == SHUTDOWN_MASK)
851 mask |= POLLHUP; 851 mask |= EPOLLHUP;
852 852
853 /* readable? */ 853 /* readable? */
854 if (!skb_queue_empty(&sk->sk_receive_queue)) 854 if (!skb_queue_empty(&sk->sk_receive_queue))
855 mask |= POLLIN | POLLRDNORM; 855 mask |= EPOLLIN | EPOLLRDNORM;
856 856
857 /* Connection-based need to check for termination and startup */ 857 /* Connection-based need to check for termination and startup */
858 if (connection_based(sk)) { 858 if (connection_based(sk)) {
859 if (sk->sk_state == TCP_CLOSE) 859 if (sk->sk_state == TCP_CLOSE)
860 mask |= POLLHUP; 860 mask |= EPOLLHUP;
861 /* connection hasn't started yet? */ 861 /* connection hasn't started yet? */
862 if (sk->sk_state == TCP_SYN_SENT) 862 if (sk->sk_state == TCP_SYN_SENT)
863 return mask; 863 return mask;
@@ -865,7 +865,7 @@ __poll_t datagram_poll(struct file *file, struct socket *sock,
865 865
866 /* writable? */ 866 /* writable? */
867 if (sock_writeable(sk)) 867 if (sock_writeable(sk))
868 mask |= POLLOUT | POLLWRNORM | POLLWRBAND; 868 mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
869 else 869 else
870 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); 870 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
871 871
diff --git a/net/core/sock.c b/net/core/sock.c
index b026e1717df4..c501499a04fe 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -2619,7 +2619,7 @@ static void sock_def_error_report(struct sock *sk)
2619 rcu_read_lock(); 2619 rcu_read_lock();
2620 wq = rcu_dereference(sk->sk_wq); 2620 wq = rcu_dereference(sk->sk_wq);
2621 if (skwq_has_sleeper(wq)) 2621 if (skwq_has_sleeper(wq))
2622 wake_up_interruptible_poll(&wq->wait, POLLERR); 2622 wake_up_interruptible_poll(&wq->wait, EPOLLERR);
2623 sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR); 2623 sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
2624 rcu_read_unlock(); 2624 rcu_read_unlock();
2625} 2625}
@@ -2631,8 +2631,8 @@ static void sock_def_readable(struct sock *sk)
2631 rcu_read_lock(); 2631 rcu_read_lock();
2632 wq = rcu_dereference(sk->sk_wq); 2632 wq = rcu_dereference(sk->sk_wq);
2633 if (skwq_has_sleeper(wq)) 2633 if (skwq_has_sleeper(wq))
2634 wake_up_interruptible_sync_poll(&wq->wait, POLLIN | POLLPRI | 2634 wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN | EPOLLPRI |
2635 POLLRDNORM | POLLRDBAND); 2635 EPOLLRDNORM | EPOLLRDBAND);
2636 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); 2636 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
2637 rcu_read_unlock(); 2637 rcu_read_unlock();
2638} 2638}
@@ -2649,8 +2649,8 @@ static void sock_def_write_space(struct sock *sk)
2649 if ((refcount_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) { 2649 if ((refcount_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
2650 wq = rcu_dereference(sk->sk_wq); 2650 wq = rcu_dereference(sk->sk_wq);
2651 if (skwq_has_sleeper(wq)) 2651 if (skwq_has_sleeper(wq))
2652 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT | 2652 wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT |
2653 POLLWRNORM | POLLWRBAND); 2653 EPOLLWRNORM | EPOLLWRBAND);
2654 2654
2655 /* Should agree with poll, otherwise some programs break */ 2655 /* Should agree with poll, otherwise some programs break */
2656 if (sock_writeable(sk)) 2656 if (sock_writeable(sk))
diff --git a/net/core/stream.c b/net/core/stream.c
index 1cff9c6270c6..7d329fb1f553 100644
--- a/net/core/stream.c
+++ b/net/core/stream.c
@@ -38,8 +38,8 @@ void sk_stream_write_space(struct sock *sk)
38 rcu_read_lock(); 38 rcu_read_lock();
39 wq = rcu_dereference(sk->sk_wq); 39 wq = rcu_dereference(sk->sk_wq);
40 if (skwq_has_sleeper(wq)) 40 if (skwq_has_sleeper(wq))
41 wake_up_interruptible_poll(&wq->wait, POLLOUT | 41 wake_up_interruptible_poll(&wq->wait, EPOLLOUT |
42 POLLWRNORM | POLLWRBAND); 42 EPOLLWRNORM | EPOLLWRBAND);
43 if (wq && wq->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN)) 43 if (wq && wq->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN))
44 sock_wake_async(wq, SOCK_WAKE_SPACE, POLL_OUT); 44 sock_wake_async(wq, SOCK_WAKE_SPACE, POLL_OUT);
45 rcu_read_unlock(); 45 rcu_read_unlock();
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index 74685fecfdb9..15bdc002d90c 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -338,21 +338,21 @@ __poll_t dccp_poll(struct file *file, struct socket *sock,
338 338
339 mask = 0; 339 mask = 0;
340 if (sk->sk_err) 340 if (sk->sk_err)
341 mask = POLLERR; 341 mask = EPOLLERR;
342 342
343 if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == DCCP_CLOSED) 343 if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == DCCP_CLOSED)
344 mask |= POLLHUP; 344 mask |= EPOLLHUP;
345 if (sk->sk_shutdown & RCV_SHUTDOWN) 345 if (sk->sk_shutdown & RCV_SHUTDOWN)
346 mask |= POLLIN | POLLRDNORM | POLLRDHUP; 346 mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
347 347
348 /* Connected? */ 348 /* Connected? */
349 if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_RESPOND)) { 349 if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_RESPOND)) {
350 if (atomic_read(&sk->sk_rmem_alloc) > 0) 350 if (atomic_read(&sk->sk_rmem_alloc) > 0)
351 mask |= POLLIN | POLLRDNORM; 351 mask |= EPOLLIN | EPOLLRDNORM;
352 352
353 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) { 353 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
354 if (sk_stream_is_writeable(sk)) { 354 if (sk_stream_is_writeable(sk)) {
355 mask |= POLLOUT | POLLWRNORM; 355 mask |= EPOLLOUT | EPOLLWRNORM;
356 } else { /* send SIGIO later */ 356 } else { /* send SIGIO later */
357 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); 357 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
358 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 358 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
@@ -362,7 +362,7 @@ __poll_t dccp_poll(struct file *file, struct socket *sock,
362 * IO signal will be lost. 362 * IO signal will be lost.
363 */ 363 */
364 if (sk_stream_is_writeable(sk)) 364 if (sk_stream_is_writeable(sk))
365 mask |= POLLOUT | POLLWRNORM; 365 mask |= EPOLLOUT | EPOLLWRNORM;
366 } 366 }
367 } 367 }
368 } 368 }
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index cc1b505453a8..91dd09f79808 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -1216,7 +1216,7 @@ static __poll_t dn_poll(struct file *file, struct socket *sock, poll_table *wai
1216 __poll_t mask = datagram_poll(file, sock, wait); 1216 __poll_t mask = datagram_poll(file, sock, wait);
1217 1217
1218 if (!skb_queue_empty(&scp->other_receive_queue)) 1218 if (!skb_queue_empty(&scp->other_receive_queue))
1219 mask |= POLLRDBAND; 1219 mask |= EPOLLRDBAND;
1220 1220
1221 return mask; 1221 return mask;
1222} 1222}
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index c24008daa3d8..e4329e161943 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -828,7 +828,7 @@ int inet_shutdown(struct socket *sock, int how)
828 case TCP_CLOSE: 828 case TCP_CLOSE:
829 err = -ENOTCONN; 829 err = -ENOTCONN;
830 /* Hack to wake up other listeners, who can poll for 830 /* Hack to wake up other listeners, who can poll for
831 POLLHUP, even on eg. unconnected UDP sockets -- RR */ 831 EPOLLHUP, even on eg. unconnected UDP sockets -- RR */
832 /* fall through */ 832 /* fall through */
833 default: 833 default:
834 sk->sk_shutdown |= how; 834 sk->sk_shutdown |= how;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index c059aa7df0a9..48636aee23c3 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -512,36 +512,36 @@ __poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
512 mask = 0; 512 mask = 0;
513 513
514 /* 514 /*
515 * POLLHUP is certainly not done right. But poll() doesn't 515 * EPOLLHUP is certainly not done right. But poll() doesn't
516 * have a notion of HUP in just one direction, and for a 516 * have a notion of HUP in just one direction, and for a
517 * socket the read side is more interesting. 517 * socket the read side is more interesting.
518 * 518 *
519 * Some poll() documentation says that POLLHUP is incompatible 519 * Some poll() documentation says that EPOLLHUP is incompatible
520 * with the POLLOUT/POLLWR flags, so somebody should check this 520 * with the EPOLLOUT/POLLWR flags, so somebody should check this
521 * all. But careful, it tends to be safer to return too many 521 * all. But careful, it tends to be safer to return too many
522 * bits than too few, and you can easily break real applications 522 * bits than too few, and you can easily break real applications
523 * if you don't tell them that something has hung up! 523 * if you don't tell them that something has hung up!
524 * 524 *
525 * Check-me. 525 * Check-me.
526 * 526 *
527 * Check number 1. POLLHUP is _UNMASKABLE_ event (see UNIX98 and 527 * Check number 1. EPOLLHUP is _UNMASKABLE_ event (see UNIX98 and
528 * our fs/select.c). It means that after we received EOF, 528 * our fs/select.c). It means that after we received EOF,
529 * poll always returns immediately, making impossible poll() on write() 529 * poll always returns immediately, making impossible poll() on write()
530 * in state CLOSE_WAIT. One solution is evident --- to set POLLHUP 530 * in state CLOSE_WAIT. One solution is evident --- to set EPOLLHUP
531 * if and only if shutdown has been made in both directions. 531 * if and only if shutdown has been made in both directions.
532 * Actually, it is interesting to look how Solaris and DUX 532 * Actually, it is interesting to look how Solaris and DUX
533 * solve this dilemma. I would prefer, if POLLHUP were maskable, 533 * solve this dilemma. I would prefer, if EPOLLHUP were maskable,
534 * then we could set it on SND_SHUTDOWN. BTW examples given 534 * then we could set it on SND_SHUTDOWN. BTW examples given
535 * in Stevens' books assume exactly this behaviour, it explains 535 * in Stevens' books assume exactly this behaviour, it explains
536 * why POLLHUP is incompatible with POLLOUT. --ANK 536 * why EPOLLHUP is incompatible with EPOLLOUT. --ANK
537 * 537 *
538 * NOTE. Check for TCP_CLOSE is added. The goal is to prevent 538 * NOTE. Check for TCP_CLOSE is added. The goal is to prevent
539 * blocking on fresh not-connected or disconnected socket. --ANK 539 * blocking on fresh not-connected or disconnected socket. --ANK
540 */ 540 */
541 if (sk->sk_shutdown == SHUTDOWN_MASK || state == TCP_CLOSE) 541 if (sk->sk_shutdown == SHUTDOWN_MASK || state == TCP_CLOSE)
542 mask |= POLLHUP; 542 mask |= EPOLLHUP;
543 if (sk->sk_shutdown & RCV_SHUTDOWN) 543 if (sk->sk_shutdown & RCV_SHUTDOWN)
544 mask |= POLLIN | POLLRDNORM | POLLRDHUP; 544 mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
545 545
546 /* Connected or passive Fast Open socket? */ 546 /* Connected or passive Fast Open socket? */
547 if (state != TCP_SYN_SENT && 547 if (state != TCP_SYN_SENT &&
@@ -554,11 +554,11 @@ __poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
554 target++; 554 target++;
555 555
556 if (tp->rcv_nxt - tp->copied_seq >= target) 556 if (tp->rcv_nxt - tp->copied_seq >= target)
557 mask |= POLLIN | POLLRDNORM; 557 mask |= EPOLLIN | EPOLLRDNORM;
558 558
559 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) { 559 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
560 if (sk_stream_is_writeable(sk)) { 560 if (sk_stream_is_writeable(sk)) {
561 mask |= POLLOUT | POLLWRNORM; 561 mask |= EPOLLOUT | EPOLLWRNORM;
562 } else { /* send SIGIO later */ 562 } else { /* send SIGIO later */
563 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); 563 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
564 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 564 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
@@ -570,24 +570,24 @@ __poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
570 */ 570 */
571 smp_mb__after_atomic(); 571 smp_mb__after_atomic();
572 if (sk_stream_is_writeable(sk)) 572 if (sk_stream_is_writeable(sk))
573 mask |= POLLOUT | POLLWRNORM; 573 mask |= EPOLLOUT | EPOLLWRNORM;
574 } 574 }
575 } else 575 } else
576 mask |= POLLOUT | POLLWRNORM; 576 mask |= EPOLLOUT | EPOLLWRNORM;
577 577
578 if (tp->urg_data & TCP_URG_VALID) 578 if (tp->urg_data & TCP_URG_VALID)
579 mask |= POLLPRI; 579 mask |= EPOLLPRI;
580 } else if (state == TCP_SYN_SENT && inet_sk(sk)->defer_connect) { 580 } else if (state == TCP_SYN_SENT && inet_sk(sk)->defer_connect) {
581 /* Active TCP fastopen socket with defer_connect 581 /* Active TCP fastopen socket with defer_connect
582 * Return POLLOUT so application can call write() 582 * Return EPOLLOUT so application can call write()
583 * in order for kernel to generate SYN+data 583 * in order for kernel to generate SYN+data
584 */ 584 */
585 mask |= POLLOUT | POLLWRNORM; 585 mask |= EPOLLOUT | EPOLLWRNORM;
586 } 586 }
587 /* This barrier is coupled with smp_wmb() in tcp_reset() */ 587 /* This barrier is coupled with smp_wmb() in tcp_reset() */
588 smp_rmb(); 588 smp_rmb();
589 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) 589 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
590 mask |= POLLERR; 590 mask |= EPOLLERR;
591 591
592 return mask; 592 return mask;
593} 593}
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index cfa51cfd2d99..575d3c1fb6e8 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -315,7 +315,7 @@ static void tcp_sndbuf_expand(struct sock *sk)
315 315
316 /* Fast Recovery (RFC 5681 3.2) : 316 /* Fast Recovery (RFC 5681 3.2) :
317 * Cubic needs 1.7 factor, rounded to 2 to include 317 * Cubic needs 1.7 factor, rounded to 2 to include
318 * extra cushion (application might react slowly to POLLOUT) 318 * extra cushion (application might react slowly to EPOLLOUT)
319 */ 319 */
320 sndmem = ca_ops->sndbuf_expand ? ca_ops->sndbuf_expand(sk) : 2; 320 sndmem = ca_ops->sndbuf_expand ? ca_ops->sndbuf_expand(sk) : 2;
321 sndmem *= nr_segs * per_mss; 321 sndmem *= nr_segs * per_mss;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index f81f969f9c06..bfaefe560b5c 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -2501,12 +2501,12 @@ __poll_t udp_poll(struct file *file, struct socket *sock, poll_table *wait)
2501 struct sock *sk = sock->sk; 2501 struct sock *sk = sock->sk;
2502 2502
2503 if (!skb_queue_empty(&udp_sk(sk)->reader_queue)) 2503 if (!skb_queue_empty(&udp_sk(sk)->reader_queue))
2504 mask |= POLLIN | POLLRDNORM; 2504 mask |= EPOLLIN | EPOLLRDNORM;
2505 2505
2506 /* Check for false positives due to checksum errors */ 2506 /* Check for false positives due to checksum errors */
2507 if ((mask & POLLRDNORM) && !(file->f_flags & O_NONBLOCK) && 2507 if ((mask & EPOLLRDNORM) && !(file->f_flags & O_NONBLOCK) &&
2508 !(sk->sk_shutdown & RCV_SHUTDOWN) && first_packet_length(sk) == -1) 2508 !(sk->sk_shutdown & RCV_SHUTDOWN) && first_packet_length(sk) == -1)
2509 mask &= ~(POLLIN | POLLRDNORM); 2509 mask &= ~(EPOLLIN | EPOLLRDNORM);
2510 2510
2511 return mask; 2511 return mask;
2512 2512
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 64331158d693..1e8cc7bcbca3 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -1483,7 +1483,7 @@ static inline __poll_t iucv_accept_poll(struct sock *parent)
1483 sk = (struct sock *) isk; 1483 sk = (struct sock *) isk;
1484 1484
1485 if (sk->sk_state == IUCV_CONNECTED) 1485 if (sk->sk_state == IUCV_CONNECTED)
1486 return POLLIN | POLLRDNORM; 1486 return EPOLLIN | EPOLLRDNORM;
1487 } 1487 }
1488 1488
1489 return 0; 1489 return 0;
@@ -1501,27 +1501,27 @@ __poll_t iucv_sock_poll(struct file *file, struct socket *sock,
1501 return iucv_accept_poll(sk); 1501 return iucv_accept_poll(sk);
1502 1502
1503 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) 1503 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
1504 mask |= POLLERR | 1504 mask |= EPOLLERR |
1505 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0); 1505 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
1506 1506
1507 if (sk->sk_shutdown & RCV_SHUTDOWN) 1507 if (sk->sk_shutdown & RCV_SHUTDOWN)
1508 mask |= POLLRDHUP; 1508 mask |= EPOLLRDHUP;
1509 1509
1510 if (sk->sk_shutdown == SHUTDOWN_MASK) 1510 if (sk->sk_shutdown == SHUTDOWN_MASK)
1511 mask |= POLLHUP; 1511 mask |= EPOLLHUP;
1512 1512
1513 if (!skb_queue_empty(&sk->sk_receive_queue) || 1513 if (!skb_queue_empty(&sk->sk_receive_queue) ||
1514 (sk->sk_shutdown & RCV_SHUTDOWN)) 1514 (sk->sk_shutdown & RCV_SHUTDOWN))
1515 mask |= POLLIN | POLLRDNORM; 1515 mask |= EPOLLIN | EPOLLRDNORM;
1516 1516
1517 if (sk->sk_state == IUCV_CLOSED) 1517 if (sk->sk_state == IUCV_CLOSED)
1518 mask |= POLLHUP; 1518 mask |= EPOLLHUP;
1519 1519
1520 if (sk->sk_state == IUCV_DISCONN) 1520 if (sk->sk_state == IUCV_DISCONN)
1521 mask |= POLLIN; 1521 mask |= EPOLLIN;
1522 1522
1523 if (sock_writeable(sk) && iucv_below_msglim(sk)) 1523 if (sock_writeable(sk) && iucv_below_msglim(sk))
1524 mask |= POLLOUT | POLLWRNORM | POLLWRBAND; 1524 mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
1525 else 1525 else
1526 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); 1526 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
1527 1527
diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
index 4a8d407f8902..f297d53a11aa 100644
--- a/net/kcm/kcmsock.c
+++ b/net/kcm/kcmsock.c
@@ -396,8 +396,8 @@ static int kcm_read_sock_done(struct strparser *strp, int err)
396 396
397static void psock_state_change(struct sock *sk) 397static void psock_state_change(struct sock *sk)
398{ 398{
399 /* TCP only does a POLLIN for a half close. Do a POLLHUP here 399 /* TCP only does a EPOLLIN for a half close. Do a EPOLLHUP here
400 * since application will normally not poll with POLLIN 400 * since application will normally not poll with EPOLLIN
401 * on the TCP sockets. 401 * on the TCP sockets.
402 */ 402 */
403 403
@@ -1338,7 +1338,7 @@ static void init_kcm_sock(struct kcm_sock *kcm, struct kcm_mux *mux)
1338 1338
1339 /* For SOCK_SEQPACKET sock type, datagram_poll checks the sk_state, so 1339 /* For SOCK_SEQPACKET sock type, datagram_poll checks the sk_state, so
1340 * we set sk_state, otherwise epoll_wait always returns right away with 1340 * we set sk_state, otherwise epoll_wait always returns right away with
1341 * POLLHUP 1341 * EPOLLHUP
1342 */ 1342 */
1343 kcm->sk.sk_state = TCP_ESTABLISHED; 1343 kcm->sk.sk_state = TCP_ESTABLISHED;
1344 1344
diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c
index 985909f105eb..376040092142 100644
--- a/net/nfc/llcp_sock.c
+++ b/net/nfc/llcp_sock.c
@@ -543,7 +543,7 @@ static inline __poll_t llcp_accept_poll(struct sock *parent)
543 sk = &llcp_sock->sk; 543 sk = &llcp_sock->sk;
544 544
545 if (sk->sk_state == LLCP_CONNECTED) 545 if (sk->sk_state == LLCP_CONNECTED)
546 return POLLIN | POLLRDNORM; 546 return EPOLLIN | EPOLLRDNORM;
547 } 547 }
548 548
549 return 0; 549 return 0;
@@ -563,23 +563,23 @@ static __poll_t llcp_sock_poll(struct file *file, struct socket *sock,
563 return llcp_accept_poll(sk); 563 return llcp_accept_poll(sk);
564 564
565 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) 565 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
566 mask |= POLLERR | 566 mask |= EPOLLERR |
567 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0); 567 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
568 568
569 if (!skb_queue_empty(&sk->sk_receive_queue)) 569 if (!skb_queue_empty(&sk->sk_receive_queue))
570 mask |= POLLIN | POLLRDNORM; 570 mask |= EPOLLIN | EPOLLRDNORM;
571 571
572 if (sk->sk_state == LLCP_CLOSED) 572 if (sk->sk_state == LLCP_CLOSED)
573 mask |= POLLHUP; 573 mask |= EPOLLHUP;
574 574
575 if (sk->sk_shutdown & RCV_SHUTDOWN) 575 if (sk->sk_shutdown & RCV_SHUTDOWN)
576 mask |= POLLRDHUP | POLLIN | POLLRDNORM; 576 mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
577 577
578 if (sk->sk_shutdown == SHUTDOWN_MASK) 578 if (sk->sk_shutdown == SHUTDOWN_MASK)
579 mask |= POLLHUP; 579 mask |= EPOLLHUP;
580 580
581 if (sock_writeable(sk) && sk->sk_state == LLCP_CONNECTED) 581 if (sock_writeable(sk) && sk->sk_state == LLCP_CONNECTED)
582 mask |= POLLOUT | POLLWRNORM | POLLWRBAND; 582 mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
583 else 583 else
584 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); 584 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
585 585
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 1d1483007e46..e0f3f4aeeb4f 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -4085,7 +4085,7 @@ static __poll_t packet_poll(struct file *file, struct socket *sock,
4085 if (po->rx_ring.pg_vec) { 4085 if (po->rx_ring.pg_vec) {
4086 if (!packet_previous_rx_frame(po, &po->rx_ring, 4086 if (!packet_previous_rx_frame(po, &po->rx_ring,
4087 TP_STATUS_KERNEL)) 4087 TP_STATUS_KERNEL))
4088 mask |= POLLIN | POLLRDNORM; 4088 mask |= EPOLLIN | EPOLLRDNORM;
4089 } 4089 }
4090 if (po->pressure && __packet_rcv_has_room(po, NULL) == ROOM_NORMAL) 4090 if (po->pressure && __packet_rcv_has_room(po, NULL) == ROOM_NORMAL)
4091 po->pressure = 0; 4091 po->pressure = 0;
@@ -4093,7 +4093,7 @@ static __poll_t packet_poll(struct file *file, struct socket *sock,
4093 spin_lock_bh(&sk->sk_write_queue.lock); 4093 spin_lock_bh(&sk->sk_write_queue.lock);
4094 if (po->tx_ring.pg_vec) { 4094 if (po->tx_ring.pg_vec) {
4095 if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE)) 4095 if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE))
4096 mask |= POLLOUT | POLLWRNORM; 4096 mask |= EPOLLOUT | EPOLLWRNORM;
4097 } 4097 }
4098 spin_unlock_bh(&sk->sk_write_queue.lock); 4098 spin_unlock_bh(&sk->sk_write_queue.lock);
4099 return mask; 4099 return mask;
diff --git a/net/phonet/socket.c b/net/phonet/socket.c
index 08f6751d2030..fffcd69f63ff 100644
--- a/net/phonet/socket.c
+++ b/net/phonet/socket.c
@@ -351,18 +351,18 @@ static __poll_t pn_socket_poll(struct file *file, struct socket *sock,
351 poll_wait(file, sk_sleep(sk), wait); 351 poll_wait(file, sk_sleep(sk), wait);
352 352
353 if (sk->sk_state == TCP_CLOSE) 353 if (sk->sk_state == TCP_CLOSE)
354 return POLLERR; 354 return EPOLLERR;
355 if (!skb_queue_empty(&sk->sk_receive_queue)) 355 if (!skb_queue_empty(&sk->sk_receive_queue))
356 mask |= POLLIN | POLLRDNORM; 356 mask |= EPOLLIN | EPOLLRDNORM;
357 if (!skb_queue_empty(&pn->ctrlreq_queue)) 357 if (!skb_queue_empty(&pn->ctrlreq_queue))
358 mask |= POLLPRI; 358 mask |= EPOLLPRI;
359 if (!mask && sk->sk_state == TCP_CLOSE_WAIT) 359 if (!mask && sk->sk_state == TCP_CLOSE_WAIT)
360 return POLLHUP; 360 return EPOLLHUP;
361 361
362 if (sk->sk_state == TCP_ESTABLISHED && 362 if (sk->sk_state == TCP_ESTABLISHED &&
363 refcount_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf && 363 refcount_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf &&
364 atomic_read(&pn->tx_credits)) 364 atomic_read(&pn->tx_credits))
365 mask |= POLLOUT | POLLWRNORM | POLLWRBAND; 365 mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
366 366
367 return mask; 367 return mask;
368} 368}
diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c
index 88aa8ad0f5b6..744c637c86b0 100644
--- a/net/rds/af_rds.c
+++ b/net/rds/af_rds.c
@@ -137,17 +137,17 @@ static int rds_getname(struct socket *sock, struct sockaddr *uaddr,
137 137
138/* 138/*
139 * RDS' poll is without a doubt the least intuitive part of the interface, 139 * RDS' poll is without a doubt the least intuitive part of the interface,
140 * as POLLIN and POLLOUT do not behave entirely as you would expect from 140 * as EPOLLIN and EPOLLOUT do not behave entirely as you would expect from
141 * a network protocol. 141 * a network protocol.
142 * 142 *
143 * POLLIN is asserted if 143 * EPOLLIN is asserted if
144 * - there is data on the receive queue. 144 * - there is data on the receive queue.
145 * - to signal that a previously congested destination may have become 145 * - to signal that a previously congested destination may have become
146 * uncongested 146 * uncongested
147 * - A notification has been queued to the socket (this can be a congestion 147 * - A notification has been queued to the socket (this can be a congestion
148 * update, or a RDMA completion). 148 * update, or a RDMA completion).
149 * 149 *
150 * POLLOUT is asserted if there is room on the send queue. This does not mean 150 * EPOLLOUT is asserted if there is room on the send queue. This does not mean
151 * however, that the next sendmsg() call will succeed. If the application tries 151 * however, that the next sendmsg() call will succeed. If the application tries
152 * to send to a congested destination, the system call may still fail (and 152 * to send to a congested destination, the system call may still fail (and
153 * return ENOBUFS). 153 * return ENOBUFS).
@@ -167,22 +167,22 @@ static __poll_t rds_poll(struct file *file, struct socket *sock,
167 167
168 read_lock_irqsave(&rs->rs_recv_lock, flags); 168 read_lock_irqsave(&rs->rs_recv_lock, flags);
169 if (!rs->rs_cong_monitor) { 169 if (!rs->rs_cong_monitor) {
170 /* When a congestion map was updated, we signal POLLIN for 170 /* When a congestion map was updated, we signal EPOLLIN for
171 * "historical" reasons. Applications can also poll for 171 * "historical" reasons. Applications can also poll for
172 * WRBAND instead. */ 172 * WRBAND instead. */
173 if (rds_cong_updated_since(&rs->rs_cong_track)) 173 if (rds_cong_updated_since(&rs->rs_cong_track))
174 mask |= (POLLIN | POLLRDNORM | POLLWRBAND); 174 mask |= (EPOLLIN | EPOLLRDNORM | EPOLLWRBAND);
175 } else { 175 } else {
176 spin_lock(&rs->rs_lock); 176 spin_lock(&rs->rs_lock);
177 if (rs->rs_cong_notify) 177 if (rs->rs_cong_notify)
178 mask |= (POLLIN | POLLRDNORM); 178 mask |= (EPOLLIN | EPOLLRDNORM);
179 spin_unlock(&rs->rs_lock); 179 spin_unlock(&rs->rs_lock);
180 } 180 }
181 if (!list_empty(&rs->rs_recv_queue) || 181 if (!list_empty(&rs->rs_recv_queue) ||
182 !list_empty(&rs->rs_notify_queue)) 182 !list_empty(&rs->rs_notify_queue))
183 mask |= (POLLIN | POLLRDNORM); 183 mask |= (EPOLLIN | EPOLLRDNORM);
184 if (rs->rs_snd_bytes < rds_sk_sndbuf(rs)) 184 if (rs->rs_snd_bytes < rds_sk_sndbuf(rs))
185 mask |= (POLLOUT | POLLWRNORM); 185 mask |= (EPOLLOUT | EPOLLWRNORM);
186 read_unlock_irqrestore(&rs->rs_recv_lock, flags); 186 read_unlock_irqrestore(&rs->rs_recv_lock, flags);
187 187
188 /* clear state any time we wake a seen-congested socket */ 188 /* clear state any time we wake a seen-congested socket */
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index 124c77e9d058..59d0eb960275 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -1142,13 +1142,13 @@ static int rfkill_fop_open(struct inode *inode, struct file *file)
1142static __poll_t rfkill_fop_poll(struct file *file, poll_table *wait) 1142static __poll_t rfkill_fop_poll(struct file *file, poll_table *wait)
1143{ 1143{
1144 struct rfkill_data *data = file->private_data; 1144 struct rfkill_data *data = file->private_data;
1145 __poll_t res = POLLOUT | POLLWRNORM; 1145 __poll_t res = EPOLLOUT | EPOLLWRNORM;
1146 1146
1147 poll_wait(file, &data->read_wait, wait); 1147 poll_wait(file, &data->read_wait, wait);
1148 1148
1149 mutex_lock(&data->mtx); 1149 mutex_lock(&data->mtx);
1150 if (!list_empty(&data->events)) 1150 if (!list_empty(&data->events))
1151 res = POLLIN | POLLRDNORM; 1151 res = EPOLLIN | EPOLLRDNORM;
1152 mutex_unlock(&data->mtx); 1152 mutex_unlock(&data->mtx);
1153 1153
1154 return res; 1154 return res;
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index 21ad6a3a465c..0c9c18aa7c77 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -742,13 +742,13 @@ static __poll_t rxrpc_poll(struct file *file, struct socket *sock,
742 /* the socket is readable if there are any messages waiting on the Rx 742 /* the socket is readable if there are any messages waiting on the Rx
743 * queue */ 743 * queue */
744 if (!list_empty(&rx->recvmsg_q)) 744 if (!list_empty(&rx->recvmsg_q))
745 mask |= POLLIN | POLLRDNORM; 745 mask |= EPOLLIN | EPOLLRDNORM;
746 746
747 /* the socket is writable if there is space to add new data to the 747 /* the socket is writable if there is space to add new data to the
748 * socket; there is no guarantee that any particular call in progress 748 * socket; there is no guarantee that any particular call in progress
749 * on the socket may have space in the Tx ACK window */ 749 * on the socket may have space in the Tx ACK window */
750 if (rxrpc_writable(sk)) 750 if (rxrpc_writable(sk))
751 mask |= POLLOUT | POLLWRNORM; 751 mask |= EPOLLOUT | EPOLLWRNORM;
752 752
753 return mask; 753 return mask;
754} 754}
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index ebb8cb9eb0bd..bf271f8c2dc9 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -7602,22 +7602,22 @@ __poll_t sctp_poll(struct file *file, struct socket *sock, poll_table *wait)
7602 */ 7602 */
7603 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) 7603 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))
7604 return (!list_empty(&sp->ep->asocs)) ? 7604 return (!list_empty(&sp->ep->asocs)) ?
7605 (POLLIN | POLLRDNORM) : 0; 7605 (EPOLLIN | EPOLLRDNORM) : 0;
7606 7606
7607 mask = 0; 7607 mask = 0;
7608 7608
7609 /* Is there any exceptional events? */ 7609 /* Is there any exceptional events? */
7610 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) 7610 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
7611 mask |= POLLERR | 7611 mask |= EPOLLERR |
7612 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0); 7612 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
7613 if (sk->sk_shutdown & RCV_SHUTDOWN) 7613 if (sk->sk_shutdown & RCV_SHUTDOWN)
7614 mask |= POLLRDHUP | POLLIN | POLLRDNORM; 7614 mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
7615 if (sk->sk_shutdown == SHUTDOWN_MASK) 7615 if (sk->sk_shutdown == SHUTDOWN_MASK)
7616 mask |= POLLHUP; 7616 mask |= EPOLLHUP;
7617 7617
7618 /* Is it readable? Reconsider this code with TCP-style support. */ 7618 /* Is it readable? Reconsider this code with TCP-style support. */
7619 if (!skb_queue_empty(&sk->sk_receive_queue)) 7619 if (!skb_queue_empty(&sk->sk_receive_queue))
7620 mask |= POLLIN | POLLRDNORM; 7620 mask |= EPOLLIN | EPOLLRDNORM;
7621 7621
7622 /* The association is either gone or not ready. */ 7622 /* The association is either gone or not ready. */
7623 if (!sctp_style(sk, UDP) && sctp_sstate(sk, CLOSED)) 7623 if (!sctp_style(sk, UDP) && sctp_sstate(sk, CLOSED))
@@ -7625,7 +7625,7 @@ __poll_t sctp_poll(struct file *file, struct socket *sock, poll_table *wait)
7625 7625
7626 /* Is it writable? */ 7626 /* Is it writable? */
7627 if (sctp_writeable(sk)) { 7627 if (sctp_writeable(sk)) {
7628 mask |= POLLOUT | POLLWRNORM; 7628 mask |= EPOLLOUT | EPOLLWRNORM;
7629 } else { 7629 } else {
7630 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); 7630 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
7631 /* 7631 /*
@@ -7637,7 +7637,7 @@ __poll_t sctp_poll(struct file *file, struct socket *sock, poll_table *wait)
7637 * in the following code to cover it as well. 7637 * in the following code to cover it as well.
7638 */ 7638 */
7639 if (sctp_writeable(sk)) 7639 if (sctp_writeable(sk))
7640 mask |= POLLOUT | POLLWRNORM; 7640 mask |= EPOLLOUT | EPOLLWRNORM;
7641 } 7641 }
7642 return mask; 7642 return mask;
7643} 7643}
@@ -8161,8 +8161,8 @@ void sctp_data_ready(struct sock *sk)
8161 rcu_read_lock(); 8161 rcu_read_lock();
8162 wq = rcu_dereference(sk->sk_wq); 8162 wq = rcu_dereference(sk->sk_wq);
8163 if (skwq_has_sleeper(wq)) 8163 if (skwq_has_sleeper(wq))
8164 wake_up_interruptible_sync_poll(&wq->wait, POLLIN | 8164 wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN |
8165 POLLRDNORM | POLLRDBAND); 8165 EPOLLRDNORM | EPOLLRDBAND);
8166 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); 8166 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
8167 rcu_read_unlock(); 8167 rcu_read_unlock();
8168} 8168}
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index ba4b84debc5a..da1a5cdefd13 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -1145,7 +1145,7 @@ static __poll_t smc_accept_poll(struct sock *parent)
1145 1145
1146 spin_lock(&isk->accept_q_lock); 1146 spin_lock(&isk->accept_q_lock);
1147 if (!list_empty(&isk->accept_q)) 1147 if (!list_empty(&isk->accept_q))
1148 mask = POLLIN | POLLRDNORM; 1148 mask = EPOLLIN | EPOLLRDNORM;
1149 spin_unlock(&isk->accept_q_lock); 1149 spin_unlock(&isk->accept_q_lock);
1150 1150
1151 return mask; 1151 return mask;
@@ -1160,7 +1160,7 @@ static __poll_t smc_poll(struct file *file, struct socket *sock,
1160 int rc; 1160 int rc;
1161 1161
1162 if (!sk) 1162 if (!sk)
1163 return POLLNVAL; 1163 return EPOLLNVAL;
1164 1164
1165 smc = smc_sk(sock->sk); 1165 smc = smc_sk(sock->sk);
1166 sock_hold(sk); 1166 sock_hold(sk);
@@ -1171,16 +1171,16 @@ static __poll_t smc_poll(struct file *file, struct socket *sock,
1171 mask = smc->clcsock->ops->poll(file, smc->clcsock, wait); 1171 mask = smc->clcsock->ops->poll(file, smc->clcsock, wait);
1172 /* if non-blocking connect finished ... */ 1172 /* if non-blocking connect finished ... */
1173 lock_sock(sk); 1173 lock_sock(sk);
1174 if ((sk->sk_state == SMC_INIT) && (mask & POLLOUT)) { 1174 if ((sk->sk_state == SMC_INIT) && (mask & EPOLLOUT)) {
1175 sk->sk_err = smc->clcsock->sk->sk_err; 1175 sk->sk_err = smc->clcsock->sk->sk_err;
1176 if (sk->sk_err) { 1176 if (sk->sk_err) {
1177 mask |= POLLERR; 1177 mask |= EPOLLERR;
1178 } else { 1178 } else {
1179 rc = smc_connect_rdma(smc); 1179 rc = smc_connect_rdma(smc);
1180 if (rc < 0) 1180 if (rc < 0)
1181 mask |= POLLERR; 1181 mask |= EPOLLERR;
1182 /* success cases including fallback */ 1182 /* success cases including fallback */
1183 mask |= POLLOUT | POLLWRNORM; 1183 mask |= EPOLLOUT | EPOLLWRNORM;
1184 } 1184 }
1185 } 1185 }
1186 } else { 1186 } else {
@@ -1190,27 +1190,27 @@ static __poll_t smc_poll(struct file *file, struct socket *sock,
1190 lock_sock(sk); 1190 lock_sock(sk);
1191 } 1191 }
1192 if (sk->sk_err) 1192 if (sk->sk_err)
1193 mask |= POLLERR; 1193 mask |= EPOLLERR;
1194 if ((sk->sk_shutdown == SHUTDOWN_MASK) || 1194 if ((sk->sk_shutdown == SHUTDOWN_MASK) ||
1195 (sk->sk_state == SMC_CLOSED)) 1195 (sk->sk_state == SMC_CLOSED))
1196 mask |= POLLHUP; 1196 mask |= EPOLLHUP;
1197 if (sk->sk_state == SMC_LISTEN) { 1197 if (sk->sk_state == SMC_LISTEN) {
1198 /* woken up by sk_data_ready in smc_listen_work() */ 1198 /* woken up by sk_data_ready in smc_listen_work() */
1199 mask = smc_accept_poll(sk); 1199 mask = smc_accept_poll(sk);
1200 } else { 1200 } else {
1201 if (atomic_read(&smc->conn.sndbuf_space) || 1201 if (atomic_read(&smc->conn.sndbuf_space) ||
1202 sk->sk_shutdown & SEND_SHUTDOWN) { 1202 sk->sk_shutdown & SEND_SHUTDOWN) {
1203 mask |= POLLOUT | POLLWRNORM; 1203 mask |= EPOLLOUT | EPOLLWRNORM;
1204 } else { 1204 } else {
1205 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); 1205 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
1206 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 1206 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1207 } 1207 }
1208 if (atomic_read(&smc->conn.bytes_to_rcv)) 1208 if (atomic_read(&smc->conn.bytes_to_rcv))
1209 mask |= POLLIN | POLLRDNORM; 1209 mask |= EPOLLIN | EPOLLRDNORM;
1210 if (sk->sk_shutdown & RCV_SHUTDOWN) 1210 if (sk->sk_shutdown & RCV_SHUTDOWN)
1211 mask |= POLLIN | POLLRDNORM | POLLRDHUP; 1211 mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
1212 if (sk->sk_state == SMC_APPCLOSEWAIT1) 1212 if (sk->sk_state == SMC_APPCLOSEWAIT1)
1213 mask |= POLLIN; 1213 mask |= EPOLLIN;
1214 } 1214 }
1215 1215
1216 } 1216 }
diff --git a/net/smc/smc_rx.c b/net/smc/smc_rx.c
index 9dc392ca06bf..eff4e0d0bb31 100644
--- a/net/smc/smc_rx.c
+++ b/net/smc/smc_rx.c
@@ -35,8 +35,8 @@ static void smc_rx_data_ready(struct sock *sk)
35 rcu_read_lock(); 35 rcu_read_lock();
36 wq = rcu_dereference(sk->sk_wq); 36 wq = rcu_dereference(sk->sk_wq);
37 if (skwq_has_sleeper(wq)) 37 if (skwq_has_sleeper(wq))
38 wake_up_interruptible_sync_poll(&wq->wait, POLLIN | POLLPRI | 38 wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN | EPOLLPRI |
39 POLLRDNORM | POLLRDBAND); 39 EPOLLRDNORM | EPOLLRDBAND);
40 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); 40 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
41 if ((sk->sk_shutdown == SHUTDOWN_MASK) || 41 if ((sk->sk_shutdown == SHUTDOWN_MASK) ||
42 (sk->sk_state == SMC_CLOSED)) 42 (sk->sk_state == SMC_CLOSED))
diff --git a/net/smc/smc_tx.c b/net/smc/smc_tx.c
index 838bce20c361..72f004c9c9b1 100644
--- a/net/smc/smc_tx.c
+++ b/net/smc/smc_tx.c
@@ -46,8 +46,8 @@ static void smc_tx_write_space(struct sock *sk)
46 wq = rcu_dereference(sk->sk_wq); 46 wq = rcu_dereference(sk->sk_wq);
47 if (skwq_has_sleeper(wq)) 47 if (skwq_has_sleeper(wq))
48 wake_up_interruptible_poll(&wq->wait, 48 wake_up_interruptible_poll(&wq->wait,
49 POLLOUT | POLLWRNORM | 49 EPOLLOUT | EPOLLWRNORM |
50 POLLWRBAND); 50 EPOLLWRBAND);
51 if (wq && wq->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN)) 51 if (wq && wq->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN))
52 sock_wake_async(wq, SOCK_WAKE_SPACE, POLL_OUT); 52 sock_wake_async(wq, SOCK_WAKE_SPACE, POLL_OUT);
53 rcu_read_unlock(); 53 rcu_read_unlock();
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index aa36dad32db1..8a7e1c774f9c 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -940,7 +940,7 @@ static __poll_t cache_poll(struct file *filp, poll_table *wait,
940 poll_wait(filp, &queue_wait, wait); 940 poll_wait(filp, &queue_wait, wait);
941 941
942 /* alway allow write */ 942 /* alway allow write */
943 mask = POLLOUT | POLLWRNORM; 943 mask = EPOLLOUT | EPOLLWRNORM;
944 944
945 if (!rp) 945 if (!rp)
946 return mask; 946 return mask;
@@ -950,7 +950,7 @@ static __poll_t cache_poll(struct file *filp, poll_table *wait,
950 for (cq= &rp->q; &cq->list != &cd->queue; 950 for (cq= &rp->q; &cq->list != &cd->queue;
951 cq = list_entry(cq->list.next, struct cache_queue, list)) 951 cq = list_entry(cq->list.next, struct cache_queue, list))
952 if (!cq->reader) { 952 if (!cq->reader) {
953 mask |= POLLIN | POLLRDNORM; 953 mask |= EPOLLIN | EPOLLRDNORM;
954 break; 954 break;
955 } 955 }
956 spin_unlock(&queue_lock); 956 spin_unlock(&queue_lock);
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 5c4330325787..fc97fc3ed637 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -345,15 +345,15 @@ rpc_pipe_poll(struct file *filp, struct poll_table_struct *wait)
345{ 345{
346 struct inode *inode = file_inode(filp); 346 struct inode *inode = file_inode(filp);
347 struct rpc_inode *rpci = RPC_I(inode); 347 struct rpc_inode *rpci = RPC_I(inode);
348 __poll_t mask = POLLOUT | POLLWRNORM; 348 __poll_t mask = EPOLLOUT | EPOLLWRNORM;
349 349
350 poll_wait(filp, &rpci->waitq, wait); 350 poll_wait(filp, &rpci->waitq, wait);
351 351
352 inode_lock(inode); 352 inode_lock(inode);
353 if (rpci->pipe == NULL) 353 if (rpci->pipe == NULL)
354 mask |= POLLERR | POLLHUP; 354 mask |= EPOLLERR | EPOLLHUP;
355 else if (filp->private_data || !list_empty(&rpci->pipe->pipe)) 355 else if (filp->private_data || !list_empty(&rpci->pipe->pipe))
356 mask |= POLLIN | POLLRDNORM; 356 mask |= EPOLLIN | EPOLLRDNORM;
357 inode_unlock(inode); 357 inode_unlock(inode);
358 return mask; 358 return mask;
359} 359}
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 163f3a547501..b0323ec7971e 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -721,31 +721,31 @@ static __poll_t tipc_poll(struct file *file, struct socket *sock,
721 sock_poll_wait(file, sk_sleep(sk), wait); 721 sock_poll_wait(file, sk_sleep(sk), wait);
722 722
723 if (sk->sk_shutdown & RCV_SHUTDOWN) 723 if (sk->sk_shutdown & RCV_SHUTDOWN)
724 revents |= POLLRDHUP | POLLIN | POLLRDNORM; 724 revents |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
725 if (sk->sk_shutdown == SHUTDOWN_MASK) 725 if (sk->sk_shutdown == SHUTDOWN_MASK)
726 revents |= POLLHUP; 726 revents |= EPOLLHUP;
727 727
728 switch (sk->sk_state) { 728 switch (sk->sk_state) {
729 case TIPC_ESTABLISHED: 729 case TIPC_ESTABLISHED:
730 case TIPC_CONNECTING: 730 case TIPC_CONNECTING:
731 if (!tsk->cong_link_cnt && !tsk_conn_cong(tsk)) 731 if (!tsk->cong_link_cnt && !tsk_conn_cong(tsk))
732 revents |= POLLOUT; 732 revents |= EPOLLOUT;
733 /* fall thru' */ 733 /* fall thru' */
734 case TIPC_LISTEN: 734 case TIPC_LISTEN:
735 if (!skb_queue_empty(&sk->sk_receive_queue)) 735 if (!skb_queue_empty(&sk->sk_receive_queue))
736 revents |= POLLIN | POLLRDNORM; 736 revents |= EPOLLIN | EPOLLRDNORM;
737 break; 737 break;
738 case TIPC_OPEN: 738 case TIPC_OPEN:
739 if (tsk->group_is_open && !tsk->cong_link_cnt) 739 if (tsk->group_is_open && !tsk->cong_link_cnt)
740 revents |= POLLOUT; 740 revents |= EPOLLOUT;
741 if (!tipc_sk_type_connectionless(sk)) 741 if (!tipc_sk_type_connectionless(sk))
742 break; 742 break;
743 if (skb_queue_empty(&sk->sk_receive_queue)) 743 if (skb_queue_empty(&sk->sk_receive_queue))
744 break; 744 break;
745 revents |= POLLIN | POLLRDNORM; 745 revents |= EPOLLIN | EPOLLRDNORM;
746 break; 746 break;
747 case TIPC_DISCONNECTING: 747 case TIPC_DISCONNECTING:
748 revents = POLLIN | POLLRDNORM | POLLHUP; 748 revents = EPOLLIN | EPOLLRDNORM | EPOLLHUP;
749 break; 749 break;
750 } 750 }
751 return revents; 751 return revents;
@@ -1897,8 +1897,8 @@ static void tipc_write_space(struct sock *sk)
1897 rcu_read_lock(); 1897 rcu_read_lock();
1898 wq = rcu_dereference(sk->sk_wq); 1898 wq = rcu_dereference(sk->sk_wq);
1899 if (skwq_has_sleeper(wq)) 1899 if (skwq_has_sleeper(wq))
1900 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT | 1900 wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT |
1901 POLLWRNORM | POLLWRBAND); 1901 EPOLLWRNORM | EPOLLWRBAND);
1902 rcu_read_unlock(); 1902 rcu_read_unlock();
1903} 1903}
1904 1904
@@ -1914,8 +1914,8 @@ static void tipc_data_ready(struct sock *sk)
1914 rcu_read_lock(); 1914 rcu_read_lock();
1915 wq = rcu_dereference(sk->sk_wq); 1915 wq = rcu_dereference(sk->sk_wq);
1916 if (skwq_has_sleeper(wq)) 1916 if (skwq_has_sleeper(wq))
1917 wake_up_interruptible_sync_poll(&wq->wait, POLLIN | 1917 wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN |
1918 POLLRDNORM | POLLRDBAND); 1918 EPOLLRDNORM | EPOLLRDBAND);
1919 rcu_read_unlock(); 1919 rcu_read_unlock();
1920} 1920}
1921 1921
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 0214acbd6bff..d545e1d0dea2 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -415,9 +415,9 @@ static void unix_dgram_peer_wake_disconnect_wakeup(struct sock *sk,
415{ 415{
416 unix_dgram_peer_wake_disconnect(sk, other); 416 unix_dgram_peer_wake_disconnect(sk, other);
417 wake_up_interruptible_poll(sk_sleep(sk), 417 wake_up_interruptible_poll(sk_sleep(sk),
418 POLLOUT | 418 EPOLLOUT |
419 POLLWRNORM | 419 EPOLLWRNORM |
420 POLLWRBAND); 420 EPOLLWRBAND);
421} 421}
422 422
423/* preconditions: 423/* preconditions:
@@ -454,7 +454,7 @@ static void unix_write_space(struct sock *sk)
454 wq = rcu_dereference(sk->sk_wq); 454 wq = rcu_dereference(sk->sk_wq);
455 if (skwq_has_sleeper(wq)) 455 if (skwq_has_sleeper(wq))
456 wake_up_interruptible_sync_poll(&wq->wait, 456 wake_up_interruptible_sync_poll(&wq->wait,
457 POLLOUT | POLLWRNORM | POLLWRBAND); 457 EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND);
458 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); 458 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
459 } 459 }
460 rcu_read_unlock(); 460 rcu_read_unlock();
@@ -2129,8 +2129,8 @@ static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
2129 2129
2130 if (wq_has_sleeper(&u->peer_wait)) 2130 if (wq_has_sleeper(&u->peer_wait))
2131 wake_up_interruptible_sync_poll(&u->peer_wait, 2131 wake_up_interruptible_sync_poll(&u->peer_wait,
2132 POLLOUT | POLLWRNORM | 2132 EPOLLOUT | EPOLLWRNORM |
2133 POLLWRBAND); 2133 EPOLLWRBAND);
2134 2134
2135 if (msg->msg_name) 2135 if (msg->msg_name)
2136 unix_copy_addr(msg, skb->sk); 2136 unix_copy_addr(msg, skb->sk);
@@ -2650,27 +2650,27 @@ static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wa
2650 2650
2651 /* exceptional events? */ 2651 /* exceptional events? */
2652 if (sk->sk_err) 2652 if (sk->sk_err)
2653 mask |= POLLERR; 2653 mask |= EPOLLERR;
2654 if (sk->sk_shutdown == SHUTDOWN_MASK) 2654 if (sk->sk_shutdown == SHUTDOWN_MASK)
2655 mask |= POLLHUP; 2655 mask |= EPOLLHUP;
2656 if (sk->sk_shutdown & RCV_SHUTDOWN) 2656 if (sk->sk_shutdown & RCV_SHUTDOWN)
2657 mask |= POLLRDHUP | POLLIN | POLLRDNORM; 2657 mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
2658 2658
2659 /* readable? */ 2659 /* readable? */
2660 if (!skb_queue_empty(&sk->sk_receive_queue)) 2660 if (!skb_queue_empty(&sk->sk_receive_queue))
2661 mask |= POLLIN | POLLRDNORM; 2661 mask |= EPOLLIN | EPOLLRDNORM;
2662 2662
2663 /* Connection-based need to check for termination and startup */ 2663 /* Connection-based need to check for termination and startup */
2664 if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) && 2664 if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
2665 sk->sk_state == TCP_CLOSE) 2665 sk->sk_state == TCP_CLOSE)
2666 mask |= POLLHUP; 2666 mask |= EPOLLHUP;
2667 2667
2668 /* 2668 /*
2669 * we set writable also when the other side has shut down the 2669 * we set writable also when the other side has shut down the
2670 * connection. This prevents stuck sockets. 2670 * connection. This prevents stuck sockets.
2671 */ 2671 */
2672 if (unix_writable(sk)) 2672 if (unix_writable(sk))
2673 mask |= POLLOUT | POLLWRNORM | POLLWRBAND; 2673 mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
2674 2674
2675 return mask; 2675 return mask;
2676} 2676}
@@ -2687,29 +2687,29 @@ static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
2687 2687
2688 /* exceptional events? */ 2688 /* exceptional events? */
2689 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) 2689 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
2690 mask |= POLLERR | 2690 mask |= EPOLLERR |
2691 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0); 2691 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
2692 2692
2693 if (sk->sk_shutdown & RCV_SHUTDOWN) 2693 if (sk->sk_shutdown & RCV_SHUTDOWN)
2694 mask |= POLLRDHUP | POLLIN | POLLRDNORM; 2694 mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
2695 if (sk->sk_shutdown == SHUTDOWN_MASK) 2695 if (sk->sk_shutdown == SHUTDOWN_MASK)
2696 mask |= POLLHUP; 2696 mask |= EPOLLHUP;
2697 2697
2698 /* readable? */ 2698 /* readable? */
2699 if (!skb_queue_empty(&sk->sk_receive_queue)) 2699 if (!skb_queue_empty(&sk->sk_receive_queue))
2700 mask |= POLLIN | POLLRDNORM; 2700 mask |= EPOLLIN | EPOLLRDNORM;
2701 2701
2702 /* Connection-based need to check for termination and startup */ 2702 /* Connection-based need to check for termination and startup */
2703 if (sk->sk_type == SOCK_SEQPACKET) { 2703 if (sk->sk_type == SOCK_SEQPACKET) {
2704 if (sk->sk_state == TCP_CLOSE) 2704 if (sk->sk_state == TCP_CLOSE)
2705 mask |= POLLHUP; 2705 mask |= EPOLLHUP;
2706 /* connection hasn't started yet? */ 2706 /* connection hasn't started yet? */
2707 if (sk->sk_state == TCP_SYN_SENT) 2707 if (sk->sk_state == TCP_SYN_SENT)
2708 return mask; 2708 return mask;
2709 } 2709 }
2710 2710
2711 /* No write status requested, avoid expensive OUT tests. */ 2711 /* No write status requested, avoid expensive OUT tests. */
2712 if (!(poll_requested_events(wait) & (POLLWRBAND|POLLWRNORM|POLLOUT))) 2712 if (!(poll_requested_events(wait) & (EPOLLWRBAND|EPOLLWRNORM|EPOLLOUT)))
2713 return mask; 2713 return mask;
2714 2714
2715 writable = unix_writable(sk); 2715 writable = unix_writable(sk);
@@ -2726,7 +2726,7 @@ static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
2726 } 2726 }
2727 2727
2728 if (writable) 2728 if (writable)
2729 mask |= POLLOUT | POLLWRNORM | POLLWRBAND; 2729 mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
2730 else 2730 else
2731 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); 2731 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
2732 2732
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index 9d95e773f4c8..e0fc84daed94 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -865,20 +865,20 @@ static __poll_t vsock_poll(struct file *file, struct socket *sock,
865 865
866 if (sk->sk_err) 866 if (sk->sk_err)
867 /* Signify that there has been an error on this socket. */ 867 /* Signify that there has been an error on this socket. */
868 mask |= POLLERR; 868 mask |= EPOLLERR;
869 869
870 /* INET sockets treat local write shutdown and peer write shutdown as a 870 /* INET sockets treat local write shutdown and peer write shutdown as a
871 * case of POLLHUP set. 871 * case of EPOLLHUP set.
872 */ 872 */
873 if ((sk->sk_shutdown == SHUTDOWN_MASK) || 873 if ((sk->sk_shutdown == SHUTDOWN_MASK) ||
874 ((sk->sk_shutdown & SEND_SHUTDOWN) && 874 ((sk->sk_shutdown & SEND_SHUTDOWN) &&
875 (vsk->peer_shutdown & SEND_SHUTDOWN))) { 875 (vsk->peer_shutdown & SEND_SHUTDOWN))) {
876 mask |= POLLHUP; 876 mask |= EPOLLHUP;
877 } 877 }
878 878
879 if (sk->sk_shutdown & RCV_SHUTDOWN || 879 if (sk->sk_shutdown & RCV_SHUTDOWN ||
880 vsk->peer_shutdown & SEND_SHUTDOWN) { 880 vsk->peer_shutdown & SEND_SHUTDOWN) {
881 mask |= POLLRDHUP; 881 mask |= EPOLLRDHUP;
882 } 882 }
883 883
884 if (sock->type == SOCK_DGRAM) { 884 if (sock->type == SOCK_DGRAM) {
@@ -888,11 +888,11 @@ static __poll_t vsock_poll(struct file *file, struct socket *sock,
888 */ 888 */
889 if (!skb_queue_empty(&sk->sk_receive_queue) || 889 if (!skb_queue_empty(&sk->sk_receive_queue) ||
890 (sk->sk_shutdown & RCV_SHUTDOWN)) { 890 (sk->sk_shutdown & RCV_SHUTDOWN)) {
891 mask |= POLLIN | POLLRDNORM; 891 mask |= EPOLLIN | EPOLLRDNORM;
892 } 892 }
893 893
894 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) 894 if (!(sk->sk_shutdown & SEND_SHUTDOWN))
895 mask |= POLLOUT | POLLWRNORM | POLLWRBAND; 895 mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
896 896
897 } else if (sock->type == SOCK_STREAM) { 897 } else if (sock->type == SOCK_STREAM) {
898 lock_sock(sk); 898 lock_sock(sk);
@@ -902,7 +902,7 @@ static __poll_t vsock_poll(struct file *file, struct socket *sock,
902 */ 902 */
903 if (sk->sk_state == TCP_LISTEN 903 if (sk->sk_state == TCP_LISTEN
904 && !vsock_is_accept_queue_empty(sk)) 904 && !vsock_is_accept_queue_empty(sk))
905 mask |= POLLIN | POLLRDNORM; 905 mask |= EPOLLIN | EPOLLRDNORM;
906 906
907 /* If there is something in the queue then we can read. */ 907 /* If there is something in the queue then we can read. */
908 if (transport->stream_is_active(vsk) && 908 if (transport->stream_is_active(vsk) &&
@@ -911,10 +911,10 @@ static __poll_t vsock_poll(struct file *file, struct socket *sock,
911 int ret = transport->notify_poll_in( 911 int ret = transport->notify_poll_in(
912 vsk, 1, &data_ready_now); 912 vsk, 1, &data_ready_now);
913 if (ret < 0) { 913 if (ret < 0) {
914 mask |= POLLERR; 914 mask |= EPOLLERR;
915 } else { 915 } else {
916 if (data_ready_now) 916 if (data_ready_now)
917 mask |= POLLIN | POLLRDNORM; 917 mask |= EPOLLIN | EPOLLRDNORM;
918 918
919 } 919 }
920 } 920 }
@@ -925,7 +925,7 @@ static __poll_t vsock_poll(struct file *file, struct socket *sock,
925 */ 925 */
926 if (sk->sk_shutdown & RCV_SHUTDOWN || 926 if (sk->sk_shutdown & RCV_SHUTDOWN ||
927 vsk->peer_shutdown & SEND_SHUTDOWN) { 927 vsk->peer_shutdown & SEND_SHUTDOWN) {
928 mask |= POLLIN | POLLRDNORM; 928 mask |= EPOLLIN | EPOLLRDNORM;
929 } 929 }
930 930
931 /* Connected sockets that can produce data can be written. */ 931 /* Connected sockets that can produce data can be written. */
@@ -935,25 +935,25 @@ static __poll_t vsock_poll(struct file *file, struct socket *sock,
935 int ret = transport->notify_poll_out( 935 int ret = transport->notify_poll_out(
936 vsk, 1, &space_avail_now); 936 vsk, 1, &space_avail_now);
937 if (ret < 0) { 937 if (ret < 0) {
938 mask |= POLLERR; 938 mask |= EPOLLERR;
939 } else { 939 } else {
940 if (space_avail_now) 940 if (space_avail_now)
941 /* Remove POLLWRBAND since INET 941 /* Remove EPOLLWRBAND since INET
942 * sockets are not setting it. 942 * sockets are not setting it.
943 */ 943 */
944 mask |= POLLOUT | POLLWRNORM; 944 mask |= EPOLLOUT | EPOLLWRNORM;
945 945
946 } 946 }
947 } 947 }
948 } 948 }
949 949
950 /* Simulate INET socket poll behaviors, which sets 950 /* Simulate INET socket poll behaviors, which sets
951 * POLLOUT|POLLWRNORM when peer is closed and nothing to read, 951 * EPOLLOUT|EPOLLWRNORM when peer is closed and nothing to read,
952 * but local send is not shutdown. 952 * but local send is not shutdown.
953 */ 953 */
954 if (sk->sk_state == TCP_CLOSE || sk->sk_state == TCP_CLOSING) { 954 if (sk->sk_state == TCP_CLOSE || sk->sk_state == TCP_CLOSING) {
955 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) 955 if (!(sk->sk_shutdown & SEND_SHUTDOWN))
956 mask |= POLLOUT | POLLWRNORM; 956 mask |= EPOLLOUT | EPOLLWRNORM;
957 957
958 } 958 }
959 959