diff options
author | Eric Dumazet <edumazet@google.com> | 2014-09-24 07:11:22 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-09-28 16:36:48 -0400 |
commit | cd7d8498c9a5d510c64db38d9f4f4fbc41790f09 (patch) | |
tree | 4057e715ca8227a081db71f1ec1359011c5c1a00 /net/ipv4/tcp_input.c | |
parent | dc83d4d8f6c897022c974a00769b7a6efee6aed8 (diff) |
tcp: change tcp_skb_pcount() location
Our goal is to access no more than one cache line access per skb in
a write or receive queue when doing the various walks.
After recent TCP_SKB_CB() reorganizations, it is almost done.
Last part is tcp_skb_pcount() which currently uses
skb_shinfo(skb)->gso_segs, which is a terrible choice, because it needs
3 cache lines in current kernel (skb->head, skb->end, and
shinfo->gso_segs are all in 3 different cache lines, far from skb->cb)
This very simple patch reuses space currently taken by tcp_tw_isn
only in input path, as tcp_skb_pcount is only needed for skb stored in
write queue.
This considerably speeds up tcp_ack(), granted we avoid shinfo->tx_flags
to get SKBTX_ACK_TSTAMP, which seems possible.
This also speeds up all sack processing in general.
This speeds up tcp_sendmsg() because it no longer has to access/dirty
shinfo.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r-- | net/ipv4/tcp_input.c | 8 |
1 files changed, 4 insertions, 4 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index f3f016a15c5a..2c0af90231cf 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -1295,9 +1295,9 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb, | |||
1295 | TCP_SKB_CB(prev)->end_seq += shifted; | 1295 | TCP_SKB_CB(prev)->end_seq += shifted; |
1296 | TCP_SKB_CB(skb)->seq += shifted; | 1296 | TCP_SKB_CB(skb)->seq += shifted; |
1297 | 1297 | ||
1298 | skb_shinfo(prev)->gso_segs += pcount; | 1298 | tcp_skb_pcount_add(prev, pcount); |
1299 | BUG_ON(skb_shinfo(skb)->gso_segs < pcount); | 1299 | BUG_ON(tcp_skb_pcount(skb) < pcount); |
1300 | skb_shinfo(skb)->gso_segs -= pcount; | 1300 | tcp_skb_pcount_add(skb, -pcount); |
1301 | 1301 | ||
1302 | /* When we're adding to gso_segs == 1, gso_size will be zero, | 1302 | /* When we're adding to gso_segs == 1, gso_size will be zero, |
1303 | * in theory this shouldn't be necessary but as long as DSACK | 1303 | * in theory this shouldn't be necessary but as long as DSACK |
@@ -1310,7 +1310,7 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb, | |||
1310 | } | 1310 | } |
1311 | 1311 | ||
1312 | /* CHECKME: To clear or not to clear? Mimics normal skb currently */ | 1312 | /* CHECKME: To clear or not to clear? Mimics normal skb currently */ |
1313 | if (skb_shinfo(skb)->gso_segs <= 1) { | 1313 | if (tcp_skb_pcount(skb) <= 1) { |
1314 | skb_shinfo(skb)->gso_size = 0; | 1314 | skb_shinfo(skb)->gso_size = 0; |
1315 | skb_shinfo(skb)->gso_type = 0; | 1315 | skb_shinfo(skb)->gso_type = 0; |
1316 | } | 1316 | } |