diff options
author | Eric Dumazet <edumazet@google.com> | 2013-03-06 07:58:01 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2013-03-07 16:22:39 -0500 |
commit | b2fb4f54ecd47c42413d54b4666b06cf93c05abf (patch) | |
tree | 572853a2579c8472c31991bf7c61f11bc613e18b | |
parent | f3564b2bb5f86f42b8a068751551b6bd01325d9c (diff) |
tcp: uninline tcp_prequeue()
tcp_prequeue() became too big to be inlined.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | include/net/tcp.h | 45 | ||||
-rw-r--r-- | net/ipv4/tcp_ipv4.c | 44 |
2 files changed, 45 insertions, 44 deletions
diff --git a/include/net/tcp.h b/include/net/tcp.h index cf0694d4ad60..a2baa5e4ba31 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h | |||
@@ -1030,50 +1030,7 @@ static inline void tcp_prequeue_init(struct tcp_sock *tp) | |||
1030 | #endif | 1030 | #endif |
1031 | } | 1031 | } |
1032 | 1032 | ||
1033 | /* Packet is added to VJ-style prequeue for processing in process | 1033 | extern bool tcp_prequeue(struct sock *sk, struct sk_buff *skb); |
1034 | * context, if a reader task is waiting. Apparently, this exciting | ||
1035 | * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93) | ||
1036 | * failed somewhere. Latency? Burstiness? Well, at least now we will | ||
1037 | * see, why it failed. 8)8) --ANK | ||
1038 | * | ||
1039 | * NOTE: is this not too big to inline? | ||
1040 | */ | ||
1041 | static inline bool tcp_prequeue(struct sock *sk, struct sk_buff *skb) | ||
1042 | { | ||
1043 | struct tcp_sock *tp = tcp_sk(sk); | ||
1044 | |||
1045 | if (sysctl_tcp_low_latency || !tp->ucopy.task) | ||
1046 | return false; | ||
1047 | |||
1048 | if (skb->len <= tcp_hdrlen(skb) && | ||
1049 | skb_queue_len(&tp->ucopy.prequeue) == 0) | ||
1050 | return false; | ||
1051 | |||
1052 | __skb_queue_tail(&tp->ucopy.prequeue, skb); | ||
1053 | tp->ucopy.memory += skb->truesize; | ||
1054 | if (tp->ucopy.memory > sk->sk_rcvbuf) { | ||
1055 | struct sk_buff *skb1; | ||
1056 | |||
1057 | BUG_ON(sock_owned_by_user(sk)); | ||
1058 | |||
1059 | while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) { | ||
1060 | sk_backlog_rcv(sk, skb1); | ||
1061 | NET_INC_STATS_BH(sock_net(sk), | ||
1062 | LINUX_MIB_TCPPREQUEUEDROPPED); | ||
1063 | } | ||
1064 | |||
1065 | tp->ucopy.memory = 0; | ||
1066 | } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) { | ||
1067 | wake_up_interruptible_sync_poll(sk_sleep(sk), | ||
1068 | POLLIN | POLLRDNORM | POLLRDBAND); | ||
1069 | if (!inet_csk_ack_scheduled(sk)) | ||
1070 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, | ||
1071 | (3 * tcp_rto_min(sk)) / 4, | ||
1072 | TCP_RTO_MAX); | ||
1073 | } | ||
1074 | return true; | ||
1075 | } | ||
1076 | |||
1077 | 1034 | ||
1078 | #undef STATE_TRACE | 1035 | #undef STATE_TRACE |
1079 | 1036 | ||
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 4a8ec457310f..8cdee120a50c 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -1950,6 +1950,50 @@ void tcp_v4_early_demux(struct sk_buff *skb) | |||
1950 | } | 1950 | } |
1951 | } | 1951 | } |
1952 | 1952 | ||
1953 | /* Packet is added to VJ-style prequeue for processing in process | ||
1954 | * context, if a reader task is waiting. Apparently, this exciting | ||
1955 | * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93) | ||
1956 | * failed somewhere. Latency? Burstiness? Well, at least now we will | ||
1957 | * see, why it failed. 8)8) --ANK | ||
1958 | * | ||
1959 | */ | ||
1960 | bool tcp_prequeue(struct sock *sk, struct sk_buff *skb) | ||
1961 | { | ||
1962 | struct tcp_sock *tp = tcp_sk(sk); | ||
1963 | |||
1964 | if (sysctl_tcp_low_latency || !tp->ucopy.task) | ||
1965 | return false; | ||
1966 | |||
1967 | if (skb->len <= tcp_hdrlen(skb) && | ||
1968 | skb_queue_len(&tp->ucopy.prequeue) == 0) | ||
1969 | return false; | ||
1970 | |||
1971 | __skb_queue_tail(&tp->ucopy.prequeue, skb); | ||
1972 | tp->ucopy.memory += skb->truesize; | ||
1973 | if (tp->ucopy.memory > sk->sk_rcvbuf) { | ||
1974 | struct sk_buff *skb1; | ||
1975 | |||
1976 | BUG_ON(sock_owned_by_user(sk)); | ||
1977 | |||
1978 | while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) { | ||
1979 | sk_backlog_rcv(sk, skb1); | ||
1980 | NET_INC_STATS_BH(sock_net(sk), | ||
1981 | LINUX_MIB_TCPPREQUEUEDROPPED); | ||
1982 | } | ||
1983 | |||
1984 | tp->ucopy.memory = 0; | ||
1985 | } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) { | ||
1986 | wake_up_interruptible_sync_poll(sk_sleep(sk), | ||
1987 | POLLIN | POLLRDNORM | POLLRDBAND); | ||
1988 | if (!inet_csk_ack_scheduled(sk)) | ||
1989 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, | ||
1990 | (3 * tcp_rto_min(sk)) / 4, | ||
1991 | TCP_RTO_MAX); | ||
1992 | } | ||
1993 | return true; | ||
1994 | } | ||
1995 | EXPORT_SYMBOL(tcp_prequeue); | ||
1996 | |||
1953 | /* | 1997 | /* |
1954 | * From tcp_input.c | 1998 | * From tcp_input.c |
1955 | */ | 1999 | */ |