aboutsummaryrefslogtreecommitdiffstats
path: root/include/net/tcp.h
diff options
context:
space:
mode:
authorEric Dumazet <dada1@cosmosbay.com>2009-05-07 03:08:38 -0400
committerDavid S. Miller <davem@davemloft.net>2009-05-07 17:52:26 -0400
commitf5f8d86b231e0489c33542c42afbb14d32411ee8 (patch)
treec78033e5ca7a340b4f709b29b2727d68536ea834 /include/net/tcp.h
parent4d5b78c055c76bb563c4a43d2adb92735b3b9405 (diff)
tcp: tcp_prequeue() cleanup
Small cleanup patch to reduce line lengths, before a change in tcp_prequeue(). Signed-off-by: Eric Dumazet <dada1@cosmosbay.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/net/tcp.h')
-rw-r--r--include/net/tcp.h45
1 files changed, 23 insertions, 22 deletions
diff --git a/include/net/tcp.h b/include/net/tcp.h
index b55b4891029e..ac37228b7001 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -890,30 +890,31 @@ static inline int tcp_prequeue(struct sock *sk, struct sk_buff *skb)
890{ 890{
891 struct tcp_sock *tp = tcp_sk(sk); 891 struct tcp_sock *tp = tcp_sk(sk);
892 892
893 if (!sysctl_tcp_low_latency && tp->ucopy.task) { 893 if (sysctl_tcp_low_latency || !tp->ucopy.task)
894 __skb_queue_tail(&tp->ucopy.prequeue, skb); 894 return 0;
895 tp->ucopy.memory += skb->truesize; 895
896 if (tp->ucopy.memory > sk->sk_rcvbuf) { 896 __skb_queue_tail(&tp->ucopy.prequeue, skb);
897 struct sk_buff *skb1; 897 tp->ucopy.memory += skb->truesize;
898 898 if (tp->ucopy.memory > sk->sk_rcvbuf) {
899 BUG_ON(sock_owned_by_user(sk)); 899 struct sk_buff *skb1;
900 900
901 while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) { 901 BUG_ON(sock_owned_by_user(sk));
902 sk_backlog_rcv(sk, skb1); 902
903 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPREQUEUEDROPPED); 903 while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
904 } 904 sk_backlog_rcv(sk, skb1);
905 905 NET_INC_STATS_BH(sock_net(sk),
906 tp->ucopy.memory = 0; 906 LINUX_MIB_TCPPREQUEUEDROPPED);
907 } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
908 wake_up_interruptible(sk->sk_sleep);
909 if (!inet_csk_ack_scheduled(sk))
910 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
911 (3 * TCP_RTO_MIN) / 4,
912 TCP_RTO_MAX);
913 } 907 }
914 return 1; 908
909 tp->ucopy.memory = 0;
910 } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
911 wake_up_interruptible(sk->sk_sleep);
912 if (!inet_csk_ack_scheduled(sk))
913 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
914 (3 * TCP_RTO_MIN) / 4,
915 TCP_RTO_MAX);
915 } 916 }
916 return 0; 917 return 1;
917} 918}
918 919
919 920