aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2011-12-04 02:05:17 -0500
committerDavid S. Miller <davem@davemloft.net>2011-12-04 13:20:40 -0500
commit761965eab38d2cbc59c36e355c59609e3a04705a (patch)
tree53bc45ee752f8d31323962e5af2e0451376c3b35 /net
parent117632e64d2a5f464e491fe221d7169a3814a77b (diff)
tcp: tcp_sendmsg() page recycling
If our TCP_PAGE(sk) is not shared (page_count() == 1), we can set page offset to 0. This permits better filling of the pages on small to medium tcp writes. "tbench 16" results on my dev server (2x4x2 machine) : Before : 3072 MB/s After : 3146 MB/s (2.4 % gain) Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/ipv4/tcp.c7
1 files changed, 6 insertions, 1 deletions
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 45156be3abfd..a09fe253b917 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1009,7 +1009,12 @@ new_segment:
1009 int merge = 0; 1009 int merge = 0;
1010 int i = skb_shinfo(skb)->nr_frags; 1010 int i = skb_shinfo(skb)->nr_frags;
1011 struct page *page = TCP_PAGE(sk); 1011 struct page *page = TCP_PAGE(sk);
1012 int off = TCP_OFF(sk); 1012 int off;
1013
1014 if (page && page_count(page) == 1)
1015 TCP_OFF(sk) = 0;
1016
1017 off = TCP_OFF(sk);
1013 1018
1014 if (skb_can_coalesce(skb, i, page, off) && 1019 if (skb_can_coalesce(skb, i, page, off) &&
1015 off != PAGE_SIZE) { 1020 off != PAGE_SIZE) {