diff options
Diffstat (limited to 'net/ipv4')
-rw-r--r-- | net/ipv4/tcp_input.c | 9 | ||||
-rw-r--r-- | net/ipv4/tcp_output.c | 1 | ||||
-rw-r--r-- | net/ipv4/tcp_scalable.c | 2 |
3 files changed, 6 insertions, 6 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index a6961d75c7ea..c28976a7e596 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -1374,7 +1374,8 @@ static u8 tcp_sacktag_one(struct sk_buff *skb, struct sock *sk, | |||
1374 | 1374 | ||
1375 | static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb, | 1375 | static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb, |
1376 | struct tcp_sacktag_state *state, | 1376 | struct tcp_sacktag_state *state, |
1377 | unsigned int pcount, int shifted, int mss) | 1377 | unsigned int pcount, int shifted, int mss, |
1378 | int dup_sack) | ||
1378 | { | 1379 | { |
1379 | struct tcp_sock *tp = tcp_sk(sk); | 1380 | struct tcp_sock *tp = tcp_sk(sk); |
1380 | struct sk_buff *prev = tcp_write_queue_prev(sk, skb); | 1381 | struct sk_buff *prev = tcp_write_queue_prev(sk, skb); |
@@ -1410,7 +1411,7 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb, | |||
1410 | } | 1411 | } |
1411 | 1412 | ||
1412 | /* We discard results */ | 1413 | /* We discard results */ |
1413 | tcp_sacktag_one(skb, sk, state, 0, pcount); | 1414 | tcp_sacktag_one(skb, sk, state, dup_sack, pcount); |
1414 | 1415 | ||
1415 | /* Difference in this won't matter, both ACKed by the same cumul. ACK */ | 1416 | /* Difference in this won't matter, both ACKed by the same cumul. ACK */ |
1416 | TCP_SKB_CB(prev)->sacked |= (TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS); | 1417 | TCP_SKB_CB(prev)->sacked |= (TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS); |
@@ -1561,7 +1562,7 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb, | |||
1561 | 1562 | ||
1562 | if (!skb_shift(prev, skb, len)) | 1563 | if (!skb_shift(prev, skb, len)) |
1563 | goto fallback; | 1564 | goto fallback; |
1564 | if (!tcp_shifted_skb(sk, skb, state, pcount, len, mss)) | 1565 | if (!tcp_shifted_skb(sk, skb, state, pcount, len, mss, dup_sack)) |
1565 | goto out; | 1566 | goto out; |
1566 | 1567 | ||
1567 | /* Hole filled allows collapsing with the next as well, this is very | 1568 | /* Hole filled allows collapsing with the next as well, this is very |
@@ -1580,7 +1581,7 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb, | |||
1580 | len = skb->len; | 1581 | len = skb->len; |
1581 | if (skb_shift(prev, skb, len)) { | 1582 | if (skb_shift(prev, skb, len)) { |
1582 | pcount += tcp_skb_pcount(skb); | 1583 | pcount += tcp_skb_pcount(skb); |
1583 | tcp_shifted_skb(sk, skb, state, tcp_skb_pcount(skb), len, mss); | 1584 | tcp_shifted_skb(sk, skb, state, tcp_skb_pcount(skb), len, mss, 0); |
1584 | } | 1585 | } |
1585 | 1586 | ||
1586 | out: | 1587 | out: |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index dda42f0bd7a3..da2c3b8794f2 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -2023,7 +2023,6 @@ void tcp_xmit_retransmit_queue(struct sock *sk) | |||
2023 | last_lost = tp->snd_una; | 2023 | last_lost = tp->snd_una; |
2024 | } | 2024 | } |
2025 | 2025 | ||
2026 | /* First pass: retransmit lost packets. */ | ||
2027 | tcp_for_write_queue_from(skb, sk) { | 2026 | tcp_for_write_queue_from(skb, sk) { |
2028 | __u8 sacked = TCP_SKB_CB(skb)->sacked; | 2027 | __u8 sacked = TCP_SKB_CB(skb)->sacked; |
2029 | 2028 | ||
diff --git a/net/ipv4/tcp_scalable.c b/net/ipv4/tcp_scalable.c index 2747ec7bfb63..4660b088a8ce 100644 --- a/net/ipv4/tcp_scalable.c +++ b/net/ipv4/tcp_scalable.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* Tom Kelly's Scalable TCP | 1 | /* Tom Kelly's Scalable TCP |
2 | * | 2 | * |
3 | * See htt://www-lce.eng.cam.ac.uk/~ctk21/scalable/ | 3 | * See http://www.deneholme.net/tom/scalable/ |
4 | * | 4 | * |
5 | * John Heffner <jheffner@sc.edu> | 5 | * John Heffner <jheffner@sc.edu> |
6 | */ | 6 | */ |