diff options
Diffstat (limited to 'include/net/tcp.h')
-rw-r--r-- | include/net/tcp.h | 59 |
1 files changed, 37 insertions, 22 deletions
diff --git a/include/net/tcp.h b/include/net/tcp.h index 646dbe3962ea..19f4150f4d4d 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h | |||
@@ -266,6 +266,19 @@ static inline int tcp_too_many_orphans(struct sock *sk, int num) | |||
266 | atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2]); | 266 | atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2]); |
267 | } | 267 | } |
268 | 268 | ||
269 | /* syncookies: remember time of last synqueue overflow */ | ||
270 | static inline void tcp_synq_overflow(struct sock *sk) | ||
271 | { | ||
272 | tcp_sk(sk)->rx_opt.ts_recent_stamp = jiffies; | ||
273 | } | ||
274 | |||
275 | /* syncookies: no recent synqueue overflow on this listening socket? */ | ||
276 | static inline int tcp_synq_no_recent_overflow(const struct sock *sk) | ||
277 | { | ||
278 | unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp; | ||
279 | return time_after(jiffies, last_overflow + TCP_TIMEOUT_INIT); | ||
280 | } | ||
281 | |||
269 | extern struct proto tcp_prot; | 282 | extern struct proto tcp_prot; |
270 | 283 | ||
271 | #define TCP_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.tcp_statistics, field) | 284 | #define TCP_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.tcp_statistics, field) |
@@ -889,30 +902,32 @@ static inline int tcp_prequeue(struct sock *sk, struct sk_buff *skb) | |||
889 | { | 902 | { |
890 | struct tcp_sock *tp = tcp_sk(sk); | 903 | struct tcp_sock *tp = tcp_sk(sk); |
891 | 904 | ||
892 | if (!sysctl_tcp_low_latency && tp->ucopy.task) { | 905 | if (sysctl_tcp_low_latency || !tp->ucopy.task) |
893 | __skb_queue_tail(&tp->ucopy.prequeue, skb); | 906 | return 0; |
894 | tp->ucopy.memory += skb->truesize; | 907 | |
895 | if (tp->ucopy.memory > sk->sk_rcvbuf) { | 908 | __skb_queue_tail(&tp->ucopy.prequeue, skb); |
896 | struct sk_buff *skb1; | 909 | tp->ucopy.memory += skb->truesize; |
897 | 910 | if (tp->ucopy.memory > sk->sk_rcvbuf) { | |
898 | BUG_ON(sock_owned_by_user(sk)); | 911 | struct sk_buff *skb1; |
899 | 912 | ||
900 | while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) { | 913 | BUG_ON(sock_owned_by_user(sk)); |
901 | sk_backlog_rcv(sk, skb1); | 914 | |
902 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPREQUEUEDROPPED); | 915 | while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) { |
903 | } | 916 | sk_backlog_rcv(sk, skb1); |
904 | 917 | NET_INC_STATS_BH(sock_net(sk), | |
905 | tp->ucopy.memory = 0; | 918 | LINUX_MIB_TCPPREQUEUEDROPPED); |
906 | } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) { | ||
907 | wake_up_interruptible(sk->sk_sleep); | ||
908 | if (!inet_csk_ack_scheduled(sk)) | ||
909 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, | ||
910 | (3 * tcp_rto_min(sk)) / 4, | ||
911 | TCP_RTO_MAX); | ||
912 | } | 919 | } |
913 | return 1; | 920 | |
921 | tp->ucopy.memory = 0; | ||
922 | } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) { | ||
923 | wake_up_interruptible_poll(sk->sk_sleep, | ||
924 | POLLIN | POLLRDNORM | POLLRDBAND); | ||
925 | if (!inet_csk_ack_scheduled(sk)) | ||
926 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, | ||
927 | (3 * tcp_rto_min(sk)) / 4, | ||
928 | TCP_RTO_MAX); | ||
914 | } | 929 | } |
915 | return 0; | 930 | return 1; |
916 | } | 931 | } |
917 | 932 | ||
918 | 933 | ||