diff options
author | David S. Miller <davem@davemloft.net> | 2018-07-02 21:26:50 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2018-07-02 21:29:26 -0400 |
commit | 5cd3da4ba2397ef07226ca2aa5094ed21ff8198f (patch) | |
tree | caa3c5968df02c296e9644a98c00e759d011b44c /net/ipv4/tcp.c | |
parent | f6779e4e53b6177c319f05b61a5c447137d0ab70 (diff) | |
parent | d0fbad0aec1df29717fab736eb24c8a49cf2c70b (diff) |
Merge ra.kernel.org:/pub/scm/linux/kernel/git/davem/net
Simple overlapping changes in stmmac driver.
Adjust skb_gro_flush_final_remcsum function signature to make GRO list
changes in net-next, as per Stephen Rothwell's example merge
resolution.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp.c')
-rw-r--r-- | net/ipv4/tcp.c | 23 |
1 files changed, 17 insertions, 6 deletions
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 47c45d5be9f9..bf461fa77ed6 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -494,21 +494,32 @@ static inline bool tcp_stream_is_readable(const struct tcp_sock *tp, | |||
494 | } | 494 | } |
495 | 495 | ||
496 | /* | 496 | /* |
497 | * Socket is not locked. We are protected from async events by poll logic and | 497 | * Wait for a TCP event. |
498 | * correct handling of state changes made by other threads is impossible in | 498 | * |
499 | * any case. | 499 | * Note that we don't need to lock the socket, as the upper poll layers |
500 | * take care of normal races (between the test and the event) and we don't | ||
501 | * go look at any of the socket buffers directly. | ||
500 | */ | 502 | */ |
501 | __poll_t tcp_poll_mask(struct socket *sock, __poll_t events) | 503 | __poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait) |
502 | { | 504 | { |
505 | __poll_t mask; | ||
503 | struct sock *sk = sock->sk; | 506 | struct sock *sk = sock->sk; |
504 | const struct tcp_sock *tp = tcp_sk(sk); | 507 | const struct tcp_sock *tp = tcp_sk(sk); |
505 | __poll_t mask = 0; | ||
506 | int state; | 508 | int state; |
507 | 509 | ||
510 | sock_poll_wait(file, sk_sleep(sk), wait); | ||
511 | |||
508 | state = inet_sk_state_load(sk); | 512 | state = inet_sk_state_load(sk); |
509 | if (state == TCP_LISTEN) | 513 | if (state == TCP_LISTEN) |
510 | return inet_csk_listen_poll(sk); | 514 | return inet_csk_listen_poll(sk); |
511 | 515 | ||
516 | /* Socket is not locked. We are protected from async events | ||
517 | * by poll logic and correct handling of state changes | ||
518 | * made by other threads is impossible in any case. | ||
519 | */ | ||
520 | |||
521 | mask = 0; | ||
522 | |||
512 | /* | 523 | /* |
513 | * EPOLLHUP is certainly not done right. But poll() doesn't | 524 | * EPOLLHUP is certainly not done right. But poll() doesn't |
514 | * have a notion of HUP in just one direction, and for a | 525 | * have a notion of HUP in just one direction, and for a |
@@ -589,7 +600,7 @@ __poll_t tcp_poll_mask(struct socket *sock, __poll_t events) | |||
589 | 600 | ||
590 | return mask; | 601 | return mask; |
591 | } | 602 | } |
592 | EXPORT_SYMBOL(tcp_poll_mask); | 603 | EXPORT_SYMBOL(tcp_poll); |
593 | 604 | ||
594 | int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg) | 605 | int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg) |
595 | { | 606 | { |