aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_input.c
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2017-10-06 01:21:26 -0400
committerDavid S. Miller <davem@davemloft.net>2017-10-06 19:28:54 -0400
commitf33198163a0fbb03766444253edf6ea50685d725 (patch)
tree21a25680db4f42f3d65294ca1018c6ec9436f72a /net/ipv4/tcp_input.c
parent8ba6ddaaf86c4c6814774e4e4ef158b732bd9f9f (diff)
tcp: pass previous skb to tcp_shifted_skb()
No need to recompute previous skb, as it will be a bit more expensive when rtx queue is converted to RB tree. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r--net/ipv4/tcp_input.c9
1 files changed, 5 insertions, 4 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index be7644204cd4..72c4732ae2da 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1288,13 +1288,13 @@ static u8 tcp_sacktag_one(struct sock *sk,
1288/* Shift newly-SACKed bytes from this skb to the immediately previous 1288/* Shift newly-SACKed bytes from this skb to the immediately previous
1289 * already-SACKed sk_buff. Mark the newly-SACKed bytes as such. 1289 * already-SACKed sk_buff. Mark the newly-SACKed bytes as such.
1290 */ 1290 */
1291static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb, 1291static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *prev,
1292 struct sk_buff *skb,
1292 struct tcp_sacktag_state *state, 1293 struct tcp_sacktag_state *state,
1293 unsigned int pcount, int shifted, int mss, 1294 unsigned int pcount, int shifted, int mss,
1294 bool dup_sack) 1295 bool dup_sack)
1295{ 1296{
1296 struct tcp_sock *tp = tcp_sk(sk); 1297 struct tcp_sock *tp = tcp_sk(sk);
1297 struct sk_buff *prev = tcp_write_queue_prev(sk, skb);
1298 u32 start_seq = TCP_SKB_CB(skb)->seq; /* start of newly-SACKed */ 1298 u32 start_seq = TCP_SKB_CB(skb)->seq; /* start of newly-SACKed */
1299 u32 end_seq = start_seq + shifted; /* end of newly-SACKed */ 1299 u32 end_seq = start_seq + shifted; /* end of newly-SACKed */
1300 1300
@@ -1495,7 +1495,7 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
1495 1495
1496 if (!skb_shift(prev, skb, len)) 1496 if (!skb_shift(prev, skb, len))
1497 goto fallback; 1497 goto fallback;
1498 if (!tcp_shifted_skb(sk, skb, state, pcount, len, mss, dup_sack)) 1498 if (!tcp_shifted_skb(sk, prev, skb, state, pcount, len, mss, dup_sack))
1499 goto out; 1499 goto out;
1500 1500
1501 /* Hole filled allows collapsing with the next as well, this is very 1501 /* Hole filled allows collapsing with the next as well, this is very
@@ -1514,7 +1514,8 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
1514 len = skb->len; 1514 len = skb->len;
1515 if (skb_shift(prev, skb, len)) { 1515 if (skb_shift(prev, skb, len)) {
1516 pcount += tcp_skb_pcount(skb); 1516 pcount += tcp_skb_pcount(skb);
1517 tcp_shifted_skb(sk, skb, state, tcp_skb_pcount(skb), len, mss, 0); 1517 tcp_shifted_skb(sk, prev, skb, state, tcp_skb_pcount(skb),
1518 len, mss, 0);
1518 } 1519 }
1519 1520
1520out: 1521out: