aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_input.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r--net/ipv4/tcp_input.c198
1 files changed, 94 insertions, 104 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index c28976a7e596..2bc8e27a163d 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -64,6 +64,7 @@
64#include <linux/mm.h> 64#include <linux/mm.h>
65#include <linux/module.h> 65#include <linux/module.h>
66#include <linux/sysctl.h> 66#include <linux/sysctl.h>
67#include <linux/kernel.h>
67#include <net/dst.h> 68#include <net/dst.h>
68#include <net/tcp.h> 69#include <net/tcp.h>
69#include <net/inet_common.h> 70#include <net/inet_common.h>
@@ -1178,10 +1179,18 @@ static void tcp_mark_lost_retrans(struct sock *sk)
1178 if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)) 1179 if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS))
1179 continue; 1180 continue;
1180 1181
1181 if (after(received_upto, ack_seq) && 1182 /* TODO: We would like to get rid of tcp_is_fack(tp) only
1182 (tcp_is_fack(tp) || 1183 * constraint here (see above) but figuring out that at
1183 !before(received_upto, 1184 * least tp->reordering SACK blocks reside between ack_seq
1184 ack_seq + tp->reordering * tp->mss_cache))) { 1185 * and received_upto is not easy task to do cheaply with
1186 * the available datastructures.
1187 *
1188 * Whether FACK should check here for tp->reordering segs
1189 * in-between one could argue for either way (it would be
1190 * rather simple to implement as we could count fack_count
1191 * during the walk and do tp->fackets_out - fack_count).
1192 */
1193 if (after(received_upto, ack_seq)) {
1185 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; 1194 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
1186 tp->retrans_out -= tcp_skb_pcount(skb); 1195 tp->retrans_out -= tcp_skb_pcount(skb);
1187 1196
@@ -1794,11 +1803,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb,
1794 for (i = used_sacks - 1; i > 0; i--) { 1803 for (i = used_sacks - 1; i > 0; i--) {
1795 for (j = 0; j < i; j++) { 1804 for (j = 0; j < i; j++) {
1796 if (after(sp[j].start_seq, sp[j + 1].start_seq)) { 1805 if (after(sp[j].start_seq, sp[j + 1].start_seq)) {
1797 struct tcp_sack_block tmp; 1806 swap(sp[j], sp[j + 1]);
1798
1799 tmp = sp[j];
1800 sp[j] = sp[j + 1];
1801 sp[j + 1] = tmp;
1802 1807
1803 /* Track where the first SACK block goes to */ 1808 /* Track where the first SACK block goes to */
1804 if (j == first_sack_index) 1809 if (j == first_sack_index)
@@ -2453,6 +2458,44 @@ static int tcp_time_to_recover(struct sock *sk)
2453 return 0; 2458 return 0;
2454} 2459}
2455 2460
2461/* New heuristics: it is possible only after we switched to restart timer
2462 * each time when something is ACKed. Hence, we can detect timed out packets
2463 * during fast retransmit without falling to slow start.
2464 *
2465 * Usefulness of this as is very questionable, since we should know which of
2466 * the segments is the next to timeout which is relatively expensive to find
2467 * in general case unless we add some data structure just for that. The
2468 * current approach certainly won't find the right one too often and when it
2469 * finally does find _something_ it usually marks large part of the window
2470 * right away (because a retransmission with a larger timestamp blocks the
2471 * loop from advancing). -ij
2472 */
2473static void tcp_timeout_skbs(struct sock *sk)
2474{
2475 struct tcp_sock *tp = tcp_sk(sk);
2476 struct sk_buff *skb;
2477
2478 if (!tcp_is_fack(tp) || !tcp_head_timedout(sk))
2479 return;
2480
2481 skb = tp->scoreboard_skb_hint;
2482 if (tp->scoreboard_skb_hint == NULL)
2483 skb = tcp_write_queue_head(sk);
2484
2485 tcp_for_write_queue_from(skb, sk) {
2486 if (skb == tcp_send_head(sk))
2487 break;
2488 if (!tcp_skb_timedout(sk, skb))
2489 break;
2490
2491 tcp_skb_mark_lost(tp, skb);
2492 }
2493
2494 tp->scoreboard_skb_hint = skb;
2495
2496 tcp_verify_left_out(tp);
2497}
2498
2456/* Mark head of queue up as lost. With RFC3517 SACK, the packets is 2499/* Mark head of queue up as lost. With RFC3517 SACK, the packets is
2457 * is against sacked "cnt", otherwise it's against facked "cnt" 2500 * is against sacked "cnt", otherwise it's against facked "cnt"
2458 */ 2501 */
@@ -2525,30 +2568,7 @@ static void tcp_update_scoreboard(struct sock *sk, int fast_rexmit)
2525 tcp_mark_head_lost(sk, sacked_upto); 2568 tcp_mark_head_lost(sk, sacked_upto);
2526 } 2569 }
2527 2570
2528 /* New heuristics: it is possible only after we switched 2571 tcp_timeout_skbs(sk);
2529 * to restart timer each time when something is ACKed.
2530 * Hence, we can detect timed out packets during fast
2531 * retransmit without falling to slow start.
2532 */
2533 if (tcp_is_fack(tp) && tcp_head_timedout(sk)) {
2534 struct sk_buff *skb;
2535
2536 skb = tp->scoreboard_skb_hint ? tp->scoreboard_skb_hint
2537 : tcp_write_queue_head(sk);
2538
2539 tcp_for_write_queue_from(skb, sk) {
2540 if (skb == tcp_send_head(sk))
2541 break;
2542 if (!tcp_skb_timedout(sk, skb))
2543 break;
2544
2545 tcp_skb_mark_lost(tp, skb);
2546 }
2547
2548 tp->scoreboard_skb_hint = skb;
2549
2550 tcp_verify_left_out(tp);
2551 }
2552} 2572}
2553 2573
2554/* CWND moderation, preventing bursts due to too big ACKs 2574/* CWND moderation, preventing bursts due to too big ACKs
@@ -2813,7 +2833,7 @@ static void tcp_mtup_probe_failed(struct sock *sk)
2813 icsk->icsk_mtup.probe_size = 0; 2833 icsk->icsk_mtup.probe_size = 0;
2814} 2834}
2815 2835
2816static void tcp_mtup_probe_success(struct sock *sk, struct sk_buff *skb) 2836static void tcp_mtup_probe_success(struct sock *sk)
2817{ 2837{
2818 struct tcp_sock *tp = tcp_sk(sk); 2838 struct tcp_sock *tp = tcp_sk(sk);
2819 struct inet_connection_sock *icsk = inet_csk(sk); 2839 struct inet_connection_sock *icsk = inet_csk(sk);
@@ -2841,7 +2861,7 @@ void tcp_simple_retransmit(struct sock *sk)
2841 const struct inet_connection_sock *icsk = inet_csk(sk); 2861 const struct inet_connection_sock *icsk = inet_csk(sk);
2842 struct tcp_sock *tp = tcp_sk(sk); 2862 struct tcp_sock *tp = tcp_sk(sk);
2843 struct sk_buff *skb; 2863 struct sk_buff *skb;
2844 unsigned int mss = tcp_current_mss(sk, 0); 2864 unsigned int mss = tcp_current_mss(sk);
2845 u32 prior_lost = tp->lost_out; 2865 u32 prior_lost = tp->lost_out;
2846 2866
2847 tcp_for_write_queue(skb, sk) { 2867 tcp_for_write_queue(skb, sk) {
@@ -3178,7 +3198,6 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
3178 3198
3179 while ((skb = tcp_write_queue_head(sk)) && skb != tcp_send_head(sk)) { 3199 while ((skb = tcp_write_queue_head(sk)) && skb != tcp_send_head(sk)) {
3180 struct tcp_skb_cb *scb = TCP_SKB_CB(skb); 3200 struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
3181 u32 end_seq;
3182 u32 acked_pcount; 3201 u32 acked_pcount;
3183 u8 sacked = scb->sacked; 3202 u8 sacked = scb->sacked;
3184 3203
@@ -3193,16 +3212,8 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
3193 break; 3212 break;
3194 3213
3195 fully_acked = 0; 3214 fully_acked = 0;
3196 end_seq = tp->snd_una;
3197 } else { 3215 } else {
3198 acked_pcount = tcp_skb_pcount(skb); 3216 acked_pcount = tcp_skb_pcount(skb);
3199 end_seq = scb->end_seq;
3200 }
3201
3202 /* MTU probing checks */
3203 if (fully_acked && icsk->icsk_mtup.probe_size &&
3204 !after(tp->mtu_probe.probe_seq_end, scb->end_seq)) {
3205 tcp_mtup_probe_success(sk, skb);
3206 } 3217 }
3207 3218
3208 if (sacked & TCPCB_RETRANS) { 3219 if (sacked & TCPCB_RETRANS) {
@@ -3267,24 +3278,26 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
3267 const struct tcp_congestion_ops *ca_ops 3278 const struct tcp_congestion_ops *ca_ops
3268 = inet_csk(sk)->icsk_ca_ops; 3279 = inet_csk(sk)->icsk_ca_ops;
3269 3280
3281 if (unlikely(icsk->icsk_mtup.probe_size &&
3282 !after(tp->mtu_probe.probe_seq_end, tp->snd_una))) {
3283 tcp_mtup_probe_success(sk);
3284 }
3285
3270 tcp_ack_update_rtt(sk, flag, seq_rtt); 3286 tcp_ack_update_rtt(sk, flag, seq_rtt);
3271 tcp_rearm_rto(sk); 3287 tcp_rearm_rto(sk);
3272 3288
3273 if (tcp_is_reno(tp)) { 3289 if (tcp_is_reno(tp)) {
3274 tcp_remove_reno_sacks(sk, pkts_acked); 3290 tcp_remove_reno_sacks(sk, pkts_acked);
3275 } else { 3291 } else {
3292 int delta;
3293
3276 /* Non-retransmitted hole got filled? That's reordering */ 3294 /* Non-retransmitted hole got filled? That's reordering */
3277 if (reord < prior_fackets) 3295 if (reord < prior_fackets)
3278 tcp_update_reordering(sk, tp->fackets_out - reord, 0); 3296 tcp_update_reordering(sk, tp->fackets_out - reord, 0);
3279 3297
3280 /* No need to care for underflows here because 3298 delta = tcp_is_fack(tp) ? pkts_acked :
3281 * the lost_skb_hint gets NULLed if we're past it 3299 prior_sacked - tp->sacked_out;
3282 * (or something non-trivial happened) 3300 tp->lost_cnt_hint -= min(tp->lost_cnt_hint, delta);
3283 */
3284 if (tcp_is_fack(tp))
3285 tp->lost_cnt_hint -= pkts_acked;
3286 else
3287 tp->lost_cnt_hint -= prior_sacked - tp->sacked_out;
3288 } 3301 }
3289 3302
3290 tp->fackets_out -= min(pkts_acked, tp->fackets_out); 3303 tp->fackets_out -= min(pkts_acked, tp->fackets_out);
@@ -3396,7 +3409,7 @@ static int tcp_ack_update_window(struct sock *sk, struct sk_buff *skb, u32 ack,
3396 3409
3397 if (tcp_may_update_window(tp, ack, ack_seq, nwin)) { 3410 if (tcp_may_update_window(tp, ack, ack_seq, nwin)) {
3398 flag |= FLAG_WIN_UPDATE; 3411 flag |= FLAG_WIN_UPDATE;
3399 tcp_update_wl(tp, ack, ack_seq); 3412 tcp_update_wl(tp, ack_seq);
3400 3413
3401 if (tp->snd_wnd != nwin) { 3414 if (tp->snd_wnd != nwin) {
3402 tp->snd_wnd = nwin; 3415 tp->snd_wnd = nwin;
@@ -3572,15 +3585,18 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
3572 int prior_packets; 3585 int prior_packets;
3573 int frto_cwnd = 0; 3586 int frto_cwnd = 0;
3574 3587
3575 /* If the ack is newer than sent or older than previous acks 3588 /* If the ack is older than previous acks
3576 * then we can probably ignore it. 3589 * then we can probably ignore it.
3577 */ 3590 */
3578 if (after(ack, tp->snd_nxt))
3579 goto uninteresting_ack;
3580
3581 if (before(ack, prior_snd_una)) 3591 if (before(ack, prior_snd_una))
3582 goto old_ack; 3592 goto old_ack;
3583 3593
3594 /* If the ack includes data we haven't sent yet, discard
3595 * this segment (RFC793 Section 3.9).
3596 */
3597 if (after(ack, tp->snd_nxt))
3598 goto invalid_ack;
3599
3584 if (after(ack, prior_snd_una)) 3600 if (after(ack, prior_snd_una))
3585 flag |= FLAG_SND_UNA_ADVANCED; 3601 flag |= FLAG_SND_UNA_ADVANCED;
3586 3602
@@ -3601,7 +3617,7 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
3601 * No more checks are required. 3617 * No more checks are required.
3602 * Note, we use the fact that SND.UNA>=SND.WL2. 3618 * Note, we use the fact that SND.UNA>=SND.WL2.
3603 */ 3619 */
3604 tcp_update_wl(tp, ack, ack_seq); 3620 tcp_update_wl(tp, ack_seq);
3605 tp->snd_una = ack; 3621 tp->snd_una = ack;
3606 flag |= FLAG_WIN_UPDATE; 3622 flag |= FLAG_WIN_UPDATE;
3607 3623
@@ -3670,6 +3686,10 @@ no_queue:
3670 tcp_ack_probe(sk); 3686 tcp_ack_probe(sk);
3671 return 1; 3687 return 1;
3672 3688
3689invalid_ack:
3690 SOCK_DEBUG(sk, "Ack %u after %u:%u\n", ack, tp->snd_una, tp->snd_nxt);
3691 return -1;
3692
3673old_ack: 3693old_ack:
3674 if (TCP_SKB_CB(skb)->sacked) { 3694 if (TCP_SKB_CB(skb)->sacked) {
3675 tcp_sacktag_write_queue(sk, skb, prior_snd_una); 3695 tcp_sacktag_write_queue(sk, skb, prior_snd_una);
@@ -3677,8 +3697,7 @@ old_ack:
3677 tcp_try_keep_open(sk); 3697 tcp_try_keep_open(sk);
3678 } 3698 }
3679 3699
3680uninteresting_ack: 3700 SOCK_DEBUG(sk, "Ack %u before %u:%u\n", ack, tp->snd_una, tp->snd_nxt);
3681 SOCK_DEBUG(sk, "Ack %u out of %u:%u\n", ack, tp->snd_una, tp->snd_nxt);
3682 return 0; 3701 return 0;
3683} 3702}
3684 3703
@@ -3866,8 +3885,7 @@ static inline void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq)
3866 * Not only, also it occurs for expired timestamps. 3885 * Not only, also it occurs for expired timestamps.
3867 */ 3886 */
3868 3887
3869 if ((s32)(tp->rx_opt.rcv_tsval - tp->rx_opt.ts_recent) >= 0 || 3888 if (tcp_paws_check(&tp->rx_opt, 0))
3870 get_seconds() >= tp->rx_opt.ts_recent_stamp + TCP_PAWS_24DAYS)
3871 tcp_store_ts_recent(tp); 3889 tcp_store_ts_recent(tp);
3872 } 3890 }
3873} 3891}
@@ -3919,9 +3937,9 @@ static inline int tcp_paws_discard(const struct sock *sk,
3919 const struct sk_buff *skb) 3937 const struct sk_buff *skb)
3920{ 3938{
3921 const struct tcp_sock *tp = tcp_sk(sk); 3939 const struct tcp_sock *tp = tcp_sk(sk);
3922 return ((s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) > TCP_PAWS_WINDOW && 3940
3923 get_seconds() < tp->rx_opt.ts_recent_stamp + TCP_PAWS_24DAYS && 3941 return !tcp_paws_check(&tp->rx_opt, TCP_PAWS_WINDOW) &&
3924 !tcp_disordered_ack(sk, skb)); 3942 !tcp_disordered_ack(sk, skb);
3925} 3943}
3926 3944
3927/* Check segment sequence number for validity. 3945/* Check segment sequence number for validity.
@@ -4079,7 +4097,6 @@ static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq)
4079 tp->rx_opt.dsack = 1; 4097 tp->rx_opt.dsack = 1;
4080 tp->duplicate_sack[0].start_seq = seq; 4098 tp->duplicate_sack[0].start_seq = seq;
4081 tp->duplicate_sack[0].end_seq = end_seq; 4099 tp->duplicate_sack[0].end_seq = end_seq;
4082 tp->rx_opt.eff_sacks = tp->rx_opt.num_sacks + 1;
4083 } 4100 }
4084} 4101}
4085 4102
@@ -4134,8 +4151,6 @@ static void tcp_sack_maybe_coalesce(struct tcp_sock *tp)
4134 * Decrease num_sacks. 4151 * Decrease num_sacks.
4135 */ 4152 */
4136 tp->rx_opt.num_sacks--; 4153 tp->rx_opt.num_sacks--;
4137 tp->rx_opt.eff_sacks = tp->rx_opt.num_sacks +
4138 tp->rx_opt.dsack;
4139 for (i = this_sack; i < tp->rx_opt.num_sacks; i++) 4154 for (i = this_sack; i < tp->rx_opt.num_sacks; i++)
4140 sp[i] = sp[i + 1]; 4155 sp[i] = sp[i + 1];
4141 continue; 4156 continue;
@@ -4144,20 +4159,6 @@ static void tcp_sack_maybe_coalesce(struct tcp_sock *tp)
4144 } 4159 }
4145} 4160}
4146 4161
4147static inline void tcp_sack_swap(struct tcp_sack_block *sack1,
4148 struct tcp_sack_block *sack2)
4149{
4150 __u32 tmp;
4151
4152 tmp = sack1->start_seq;
4153 sack1->start_seq = sack2->start_seq;
4154 sack2->start_seq = tmp;
4155
4156 tmp = sack1->end_seq;
4157 sack1->end_seq = sack2->end_seq;
4158 sack2->end_seq = tmp;
4159}
4160
4161static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq) 4162static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq)
4162{ 4163{
4163 struct tcp_sock *tp = tcp_sk(sk); 4164 struct tcp_sock *tp = tcp_sk(sk);
@@ -4172,7 +4173,7 @@ static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq)
4172 if (tcp_sack_extend(sp, seq, end_seq)) { 4173 if (tcp_sack_extend(sp, seq, end_seq)) {
4173 /* Rotate this_sack to the first one. */ 4174 /* Rotate this_sack to the first one. */
4174 for (; this_sack > 0; this_sack--, sp--) 4175 for (; this_sack > 0; this_sack--, sp--)
4175 tcp_sack_swap(sp, sp - 1); 4176 swap(*sp, *(sp - 1));
4176 if (cur_sacks > 1) 4177 if (cur_sacks > 1)
4177 tcp_sack_maybe_coalesce(tp); 4178 tcp_sack_maybe_coalesce(tp);
4178 return; 4179 return;
@@ -4198,7 +4199,6 @@ new_sack:
4198 sp->start_seq = seq; 4199 sp->start_seq = seq;
4199 sp->end_seq = end_seq; 4200 sp->end_seq = end_seq;
4200 tp->rx_opt.num_sacks++; 4201 tp->rx_opt.num_sacks++;
4201 tp->rx_opt.eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack;
4202} 4202}
4203 4203
4204/* RCV.NXT advances, some SACKs should be eaten. */ 4204/* RCV.NXT advances, some SACKs should be eaten. */
@@ -4212,7 +4212,6 @@ static void tcp_sack_remove(struct tcp_sock *tp)
4212 /* Empty ofo queue, hence, all the SACKs are eaten. Clear. */ 4212 /* Empty ofo queue, hence, all the SACKs are eaten. Clear. */
4213 if (skb_queue_empty(&tp->out_of_order_queue)) { 4213 if (skb_queue_empty(&tp->out_of_order_queue)) {
4214 tp->rx_opt.num_sacks = 0; 4214 tp->rx_opt.num_sacks = 0;
4215 tp->rx_opt.eff_sacks = tp->rx_opt.dsack;
4216 return; 4215 return;
4217 } 4216 }
4218 4217
@@ -4233,11 +4232,7 @@ static void tcp_sack_remove(struct tcp_sock *tp)
4233 this_sack++; 4232 this_sack++;
4234 sp++; 4233 sp++;
4235 } 4234 }
4236 if (num_sacks != tp->rx_opt.num_sacks) { 4235 tp->rx_opt.num_sacks = num_sacks;
4237 tp->rx_opt.num_sacks = num_sacks;
4238 tp->rx_opt.eff_sacks = tp->rx_opt.num_sacks +
4239 tp->rx_opt.dsack;
4240 }
4241} 4236}
4242 4237
4243/* This one checks to see if we can put data from the 4238/* This one checks to see if we can put data from the
@@ -4313,10 +4308,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
4313 4308
4314 TCP_ECN_accept_cwr(tp, skb); 4309 TCP_ECN_accept_cwr(tp, skb);
4315 4310
4316 if (tp->rx_opt.dsack) { 4311 tp->rx_opt.dsack = 0;
4317 tp->rx_opt.dsack = 0;
4318 tp->rx_opt.eff_sacks = tp->rx_opt.num_sacks;
4319 }
4320 4312
4321 /* Queue data for delivery to the user. 4313 /* Queue data for delivery to the user.
4322 * Packets in sequence go to the receive queue. 4314 * Packets in sequence go to the receive queue.
@@ -4435,8 +4427,6 @@ drop:
4435 /* Initial out of order segment, build 1 SACK. */ 4427 /* Initial out of order segment, build 1 SACK. */
4436 if (tcp_is_sack(tp)) { 4428 if (tcp_is_sack(tp)) {
4437 tp->rx_opt.num_sacks = 1; 4429 tp->rx_opt.num_sacks = 1;
4438 tp->rx_opt.dsack = 0;
4439 tp->rx_opt.eff_sacks = 1;
4440 tp->selective_acks[0].start_seq = TCP_SKB_CB(skb)->seq; 4430 tp->selective_acks[0].start_seq = TCP_SKB_CB(skb)->seq;
4441 tp->selective_acks[0].end_seq = 4431 tp->selective_acks[0].end_seq =
4442 TCP_SKB_CB(skb)->end_seq; 4432 TCP_SKB_CB(skb)->end_seq;
@@ -5157,7 +5147,8 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
5157 */ 5147 */
5158 5148
5159 if ((tcp_flag_word(th) & TCP_HP_BITS) == tp->pred_flags && 5149 if ((tcp_flag_word(th) & TCP_HP_BITS) == tp->pred_flags &&
5160 TCP_SKB_CB(skb)->seq == tp->rcv_nxt) { 5150 TCP_SKB_CB(skb)->seq == tp->rcv_nxt &&
5151 !after(TCP_SKB_CB(skb)->ack_seq, tp->snd_nxt)) {
5161 int tcp_header_len = tp->tcp_header_len; 5152 int tcp_header_len = tp->tcp_header_len;
5162 5153
5163 /* Timestamp header prediction: tcp_header_len 5154 /* Timestamp header prediction: tcp_header_len
@@ -5310,8 +5301,8 @@ slow_path:
5310 return -res; 5301 return -res;
5311 5302
5312step5: 5303step5:
5313 if (th->ack) 5304 if (th->ack && tcp_ack(sk, skb, FLAG_SLOWPATH) < 0)
5314 tcp_ack(sk, skb, FLAG_SLOWPATH); 5305 goto discard;
5315 5306
5316 tcp_rcv_rtt_measure_ts(sk, skb); 5307 tcp_rcv_rtt_measure_ts(sk, skb);
5317 5308
@@ -5409,7 +5400,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
5409 * never scaled. 5400 * never scaled.
5410 */ 5401 */
5411 tp->snd_wnd = ntohs(th->window); 5402 tp->snd_wnd = ntohs(th->window);
5412 tcp_init_wl(tp, TCP_SKB_CB(skb)->ack_seq, TCP_SKB_CB(skb)->seq); 5403 tcp_init_wl(tp, TCP_SKB_CB(skb)->seq);
5413 5404
5414 if (!tp->rx_opt.wscale_ok) { 5405 if (!tp->rx_opt.wscale_ok) {
5415 tp->rx_opt.snd_wscale = tp->rx_opt.rcv_wscale = 0; 5406 tp->rx_opt.snd_wscale = tp->rx_opt.rcv_wscale = 0;
@@ -5510,7 +5501,7 @@ discard:
5510 5501
5511 /* PAWS check. */ 5502 /* PAWS check. */
5512 if (tp->rx_opt.ts_recent_stamp && tp->rx_opt.saw_tstamp && 5503 if (tp->rx_opt.ts_recent_stamp && tp->rx_opt.saw_tstamp &&
5513 tcp_paws_check(&tp->rx_opt, 0)) 5504 tcp_paws_reject(&tp->rx_opt, 0))
5514 goto discard_and_undo; 5505 goto discard_and_undo;
5515 5506
5516 if (th->syn) { 5507 if (th->syn) {
@@ -5648,7 +5639,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
5648 5639
5649 /* step 5: check the ACK field */ 5640 /* step 5: check the ACK field */
5650 if (th->ack) { 5641 if (th->ack) {
5651 int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH); 5642 int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH) > 0;
5652 5643
5653 switch (sk->sk_state) { 5644 switch (sk->sk_state) {
5654 case TCP_SYN_RECV: 5645 case TCP_SYN_RECV:
@@ -5670,8 +5661,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
5670 tp->snd_una = TCP_SKB_CB(skb)->ack_seq; 5661 tp->snd_una = TCP_SKB_CB(skb)->ack_seq;
5671 tp->snd_wnd = ntohs(th->window) << 5662 tp->snd_wnd = ntohs(th->window) <<
5672 tp->rx_opt.snd_wscale; 5663 tp->rx_opt.snd_wscale;
5673 tcp_init_wl(tp, TCP_SKB_CB(skb)->ack_seq, 5664 tcp_init_wl(tp, TCP_SKB_CB(skb)->seq);
5674 TCP_SKB_CB(skb)->seq);
5675 5665
5676 /* tcp_ack considers this ACK as duplicate 5666 /* tcp_ack considers this ACK as duplicate
5677 * and does not calculate rtt. 5667 * and does not calculate rtt.