aboutsummaryrefslogtreecommitdiffstats
path: root/include/net/tcp.h
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2013-03-06 07:58:01 -0500
committerDavid S. Miller <davem@davemloft.net>2013-03-07 16:22:39 -0500
commitb2fb4f54ecd47c42413d54b4666b06cf93c05abf (patch)
tree572853a2579c8472c31991bf7c61f11bc613e18b /include/net/tcp.h
parentf3564b2bb5f86f42b8a068751551b6bd01325d9c (diff)
tcp: uninline tcp_prequeue()
tcp_prequeue() became too big to be inlined. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/net/tcp.h')
-rw-r--r--include/net/tcp.h45
1 files changed, 1 insertions, 44 deletions
diff --git a/include/net/tcp.h b/include/net/tcp.h
index cf0694d4ad60..a2baa5e4ba31 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -1030,50 +1030,7 @@ static inline void tcp_prequeue_init(struct tcp_sock *tp)
1030#endif 1030#endif
1031} 1031}
1032 1032
1033/* Packet is added to VJ-style prequeue for processing in process 1033extern bool tcp_prequeue(struct sock *sk, struct sk_buff *skb);
1034 * context, if a reader task is waiting. Apparently, this exciting
1035 * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
1036 * failed somewhere. Latency? Burstiness? Well, at least now we will
1037 * see, why it failed. 8)8) --ANK
1038 *
1039 * NOTE: is this not too big to inline?
1040 */
1041static inline bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
1042{
1043 struct tcp_sock *tp = tcp_sk(sk);
1044
1045 if (sysctl_tcp_low_latency || !tp->ucopy.task)
1046 return false;
1047
1048 if (skb->len <= tcp_hdrlen(skb) &&
1049 skb_queue_len(&tp->ucopy.prequeue) == 0)
1050 return false;
1051
1052 __skb_queue_tail(&tp->ucopy.prequeue, skb);
1053 tp->ucopy.memory += skb->truesize;
1054 if (tp->ucopy.memory > sk->sk_rcvbuf) {
1055 struct sk_buff *skb1;
1056
1057 BUG_ON(sock_owned_by_user(sk));
1058
1059 while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
1060 sk_backlog_rcv(sk, skb1);
1061 NET_INC_STATS_BH(sock_net(sk),
1062 LINUX_MIB_TCPPREQUEUEDROPPED);
1063 }
1064
1065 tp->ucopy.memory = 0;
1066 } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
1067 wake_up_interruptible_sync_poll(sk_sleep(sk),
1068 POLLIN | POLLRDNORM | POLLRDBAND);
1069 if (!inet_csk_ack_scheduled(sk))
1070 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
1071 (3 * tcp_rto_min(sk)) / 4,
1072 TCP_RTO_MAX);
1073 }
1074 return true;
1075}
1076
1077 1034
1078#undef STATE_TRACE 1035#undef STATE_TRACE
1079 1036