diff options
author | Eric Dumazet <edumazet@google.com> | 2012-05-02 05:58:29 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2012-05-02 21:11:11 -0400 |
commit | b081f85c2977b1cbb6e635d53d9512f1ef985972 (patch) | |
tree | 3b41dfe3ee282c175907e8bcf9614385115fa2e6 /net/ipv4/tcp.c | |
parent | 923dd347b8904c24bcac89bf038ed4da87f8aa90 (diff) |
net: implement tcp coalescing in tcp_queue_rcv()
Extend tcp coalescing implementing it from tcp_queue_rcv(), the main
receiver function when application is not blocked in recvmsg().
Function tcp_queue_rcv() is moved a bit to allow its call from
tcp_data_queue()
This gives good results especially if GRO could not kick, and if skb
head is a fragment.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Alexander Duyck <alexander.h.duyck@intel.com>
Cc: Neal Cardwell <ncardwell@google.com>
Cc: Tom Herbert <therbert@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp.c')
-rw-r--r-- | net/ipv4/tcp.c | 10 |
1 files changed, 5 insertions, 5 deletions
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 6802c89bc44d..c2cff8b62772 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -981,8 +981,8 @@ static inline int select_size(const struct sock *sk, bool sg) | |||
981 | static int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size) | 981 | static int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size) |
982 | { | 982 | { |
983 | struct sk_buff *skb; | 983 | struct sk_buff *skb; |
984 | struct tcp_skb_cb *cb; | ||
985 | struct tcphdr *th; | 984 | struct tcphdr *th; |
985 | bool fragstolen; | ||
986 | 986 | ||
987 | skb = alloc_skb(size + sizeof(*th), sk->sk_allocation); | 987 | skb = alloc_skb(size + sizeof(*th), sk->sk_allocation); |
988 | if (!skb) | 988 | if (!skb) |
@@ -995,14 +995,14 @@ static int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size) | |||
995 | if (memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size)) | 995 | if (memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size)) |
996 | goto err_free; | 996 | goto err_free; |
997 | 997 | ||
998 | cb = TCP_SKB_CB(skb); | ||
999 | |||
1000 | TCP_SKB_CB(skb)->seq = tcp_sk(sk)->rcv_nxt; | 998 | TCP_SKB_CB(skb)->seq = tcp_sk(sk)->rcv_nxt; |
1001 | TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + size; | 999 | TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + size; |
1002 | TCP_SKB_CB(skb)->ack_seq = tcp_sk(sk)->snd_una - 1; | 1000 | TCP_SKB_CB(skb)->ack_seq = tcp_sk(sk)->snd_una - 1; |
1003 | 1001 | ||
1004 | tcp_queue_rcv(sk, skb, sizeof(*th)); | 1002 | if (tcp_queue_rcv(sk, skb, sizeof(*th), &fragstolen)) { |
1005 | 1003 | WARN_ON_ONCE(fragstolen); /* should not happen */ | |
1004 | __kfree_skb(skb); | ||
1005 | } | ||
1006 | return size; | 1006 | return size; |
1007 | 1007 | ||
1008 | err_free: | 1008 | err_free: |