aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_input.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r--net/ipv4/tcp_input.c236
1 files changed, 149 insertions, 87 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index cad73b7dfef0..1f5e6049883e 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5,8 +5,6 @@
5 * 5 *
6 * Implementation of the Transmission Control Protocol(TCP). 6 * Implementation of the Transmission Control Protocol(TCP).
7 * 7 *
8 * Version: $Id: tcp_input.c,v 1.243 2002/02/01 22:01:04 davem Exp $
9 *
10 * Authors: Ross Biro 8 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk> 10 * Mark Evans, <evansmp@uhura.aston.ac.uk>
@@ -604,7 +602,7 @@ static u32 tcp_rto_min(struct sock *sk)
604 u32 rto_min = TCP_RTO_MIN; 602 u32 rto_min = TCP_RTO_MIN;
605 603
606 if (dst && dst_metric_locked(dst, RTAX_RTO_MIN)) 604 if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
607 rto_min = dst_metric(dst, RTAX_RTO_MIN); 605 rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
608 return rto_min; 606 return rto_min;
609} 607}
610 608
@@ -731,6 +729,7 @@ void tcp_update_metrics(struct sock *sk)
731 if (dst && (dst->flags & DST_HOST)) { 729 if (dst && (dst->flags & DST_HOST)) {
732 const struct inet_connection_sock *icsk = inet_csk(sk); 730 const struct inet_connection_sock *icsk = inet_csk(sk);
733 int m; 731 int m;
732 unsigned long rtt;
734 733
735 if (icsk->icsk_backoff || !tp->srtt) { 734 if (icsk->icsk_backoff || !tp->srtt) {
736 /* This session failed to estimate rtt. Why? 735 /* This session failed to estimate rtt. Why?
@@ -742,7 +741,8 @@ void tcp_update_metrics(struct sock *sk)
742 return; 741 return;
743 } 742 }
744 743
745 m = dst_metric(dst, RTAX_RTT) - tp->srtt; 744 rtt = dst_metric_rtt(dst, RTAX_RTT);
745 m = rtt - tp->srtt;
746 746
747 /* If newly calculated rtt larger than stored one, 747 /* If newly calculated rtt larger than stored one,
748 * store new one. Otherwise, use EWMA. Remember, 748 * store new one. Otherwise, use EWMA. Remember,
@@ -750,12 +750,13 @@ void tcp_update_metrics(struct sock *sk)
750 */ 750 */
751 if (!(dst_metric_locked(dst, RTAX_RTT))) { 751 if (!(dst_metric_locked(dst, RTAX_RTT))) {
752 if (m <= 0) 752 if (m <= 0)
753 dst->metrics[RTAX_RTT - 1] = tp->srtt; 753 set_dst_metric_rtt(dst, RTAX_RTT, tp->srtt);
754 else 754 else
755 dst->metrics[RTAX_RTT - 1] -= (m >> 3); 755 set_dst_metric_rtt(dst, RTAX_RTT, rtt - (m >> 3));
756 } 756 }
757 757
758 if (!(dst_metric_locked(dst, RTAX_RTTVAR))) { 758 if (!(dst_metric_locked(dst, RTAX_RTTVAR))) {
759 unsigned long var;
759 if (m < 0) 760 if (m < 0)
760 m = -m; 761 m = -m;
761 762
@@ -764,11 +765,13 @@ void tcp_update_metrics(struct sock *sk)
764 if (m < tp->mdev) 765 if (m < tp->mdev)
765 m = tp->mdev; 766 m = tp->mdev;
766 767
767 if (m >= dst_metric(dst, RTAX_RTTVAR)) 768 var = dst_metric_rtt(dst, RTAX_RTTVAR);
768 dst->metrics[RTAX_RTTVAR - 1] = m; 769 if (m >= var)
770 var = m;
769 else 771 else
770 dst->metrics[RTAX_RTTVAR-1] -= 772 var -= (var - m) >> 2;
771 (dst_metric(dst, RTAX_RTTVAR) - m)>>2; 773
774 set_dst_metric_rtt(dst, RTAX_RTTVAR, var);
772 } 775 }
773 776
774 if (tp->snd_ssthresh >= 0xFFFF) { 777 if (tp->snd_ssthresh >= 0xFFFF) {
@@ -899,7 +902,7 @@ static void tcp_init_metrics(struct sock *sk)
899 if (dst_metric(dst, RTAX_RTT) == 0) 902 if (dst_metric(dst, RTAX_RTT) == 0)
900 goto reset; 903 goto reset;
901 904
902 if (!tp->srtt && dst_metric(dst, RTAX_RTT) < (TCP_TIMEOUT_INIT << 3)) 905 if (!tp->srtt && dst_metric_rtt(dst, RTAX_RTT) < (TCP_TIMEOUT_INIT << 3))
903 goto reset; 906 goto reset;
904 907
905 /* Initial rtt is determined from SYN,SYN-ACK. 908 /* Initial rtt is determined from SYN,SYN-ACK.
@@ -916,12 +919,12 @@ static void tcp_init_metrics(struct sock *sk)
916 * to low value, and then abruptly stops to do it and starts to delay 919 * to low value, and then abruptly stops to do it and starts to delay
917 * ACKs, wait for troubles. 920 * ACKs, wait for troubles.
918 */ 921 */
919 if (dst_metric(dst, RTAX_RTT) > tp->srtt) { 922 if (dst_metric_rtt(dst, RTAX_RTT) > tp->srtt) {
920 tp->srtt = dst_metric(dst, RTAX_RTT); 923 tp->srtt = dst_metric_rtt(dst, RTAX_RTT);
921 tp->rtt_seq = tp->snd_nxt; 924 tp->rtt_seq = tp->snd_nxt;
922 } 925 }
923 if (dst_metric(dst, RTAX_RTTVAR) > tp->mdev) { 926 if (dst_metric_rtt(dst, RTAX_RTTVAR) > tp->mdev) {
924 tp->mdev = dst_metric(dst, RTAX_RTTVAR); 927 tp->mdev = dst_metric_rtt(dst, RTAX_RTTVAR);
925 tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk)); 928 tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk));
926 } 929 }
927 tcp_set_rto(sk); 930 tcp_set_rto(sk);
@@ -949,17 +952,21 @@ static void tcp_update_reordering(struct sock *sk, const int metric,
949{ 952{
950 struct tcp_sock *tp = tcp_sk(sk); 953 struct tcp_sock *tp = tcp_sk(sk);
951 if (metric > tp->reordering) { 954 if (metric > tp->reordering) {
955 int mib_idx;
956
952 tp->reordering = min(TCP_MAX_REORDERING, metric); 957 tp->reordering = min(TCP_MAX_REORDERING, metric);
953 958
954 /* This exciting event is worth to be remembered. 8) */ 959 /* This exciting event is worth to be remembered. 8) */
955 if (ts) 960 if (ts)
956 NET_INC_STATS_BH(LINUX_MIB_TCPTSREORDER); 961 mib_idx = LINUX_MIB_TCPTSREORDER;
957 else if (tcp_is_reno(tp)) 962 else if (tcp_is_reno(tp))
958 NET_INC_STATS_BH(LINUX_MIB_TCPRENOREORDER); 963 mib_idx = LINUX_MIB_TCPRENOREORDER;
959 else if (tcp_is_fack(tp)) 964 else if (tcp_is_fack(tp))
960 NET_INC_STATS_BH(LINUX_MIB_TCPFACKREORDER); 965 mib_idx = LINUX_MIB_TCPFACKREORDER;
961 else 966 else
962 NET_INC_STATS_BH(LINUX_MIB_TCPSACKREORDER); 967 mib_idx = LINUX_MIB_TCPSACKREORDER;
968
969 NET_INC_STATS_BH(sock_net(sk), mib_idx);
963#if FASTRETRANS_DEBUG > 1 970#if FASTRETRANS_DEBUG > 1
964 printk(KERN_DEBUG "Disorder%d %d %u f%u s%u rr%d\n", 971 printk(KERN_DEBUG "Disorder%d %d %u f%u s%u rr%d\n",
965 tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state, 972 tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state,
@@ -1155,7 +1162,7 @@ static void tcp_mark_lost_retrans(struct sock *sk)
1155 tp->lost_out += tcp_skb_pcount(skb); 1162 tp->lost_out += tcp_skb_pcount(skb);
1156 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; 1163 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
1157 } 1164 }
1158 NET_INC_STATS_BH(LINUX_MIB_TCPLOSTRETRANSMIT); 1165 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT);
1159 } else { 1166 } else {
1160 if (before(ack_seq, new_low_seq)) 1167 if (before(ack_seq, new_low_seq))
1161 new_low_seq = ack_seq; 1168 new_low_seq = ack_seq;
@@ -1167,10 +1174,11 @@ static void tcp_mark_lost_retrans(struct sock *sk)
1167 tp->lost_retrans_low = new_low_seq; 1174 tp->lost_retrans_low = new_low_seq;
1168} 1175}
1169 1176
1170static int tcp_check_dsack(struct tcp_sock *tp, struct sk_buff *ack_skb, 1177static int tcp_check_dsack(struct sock *sk, struct sk_buff *ack_skb,
1171 struct tcp_sack_block_wire *sp, int num_sacks, 1178 struct tcp_sack_block_wire *sp, int num_sacks,
1172 u32 prior_snd_una) 1179 u32 prior_snd_una)
1173{ 1180{
1181 struct tcp_sock *tp = tcp_sk(sk);
1174 u32 start_seq_0 = get_unaligned_be32(&sp[0].start_seq); 1182 u32 start_seq_0 = get_unaligned_be32(&sp[0].start_seq);
1175 u32 end_seq_0 = get_unaligned_be32(&sp[0].end_seq); 1183 u32 end_seq_0 = get_unaligned_be32(&sp[0].end_seq);
1176 int dup_sack = 0; 1184 int dup_sack = 0;
@@ -1178,7 +1186,7 @@ static int tcp_check_dsack(struct tcp_sock *tp, struct sk_buff *ack_skb,
1178 if (before(start_seq_0, TCP_SKB_CB(ack_skb)->ack_seq)) { 1186 if (before(start_seq_0, TCP_SKB_CB(ack_skb)->ack_seq)) {
1179 dup_sack = 1; 1187 dup_sack = 1;
1180 tcp_dsack_seen(tp); 1188 tcp_dsack_seen(tp);
1181 NET_INC_STATS_BH(LINUX_MIB_TCPDSACKRECV); 1189 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKRECV);
1182 } else if (num_sacks > 1) { 1190 } else if (num_sacks > 1) {
1183 u32 end_seq_1 = get_unaligned_be32(&sp[1].end_seq); 1191 u32 end_seq_1 = get_unaligned_be32(&sp[1].end_seq);
1184 u32 start_seq_1 = get_unaligned_be32(&sp[1].start_seq); 1192 u32 start_seq_1 = get_unaligned_be32(&sp[1].start_seq);
@@ -1187,7 +1195,8 @@ static int tcp_check_dsack(struct tcp_sock *tp, struct sk_buff *ack_skb,
1187 !before(start_seq_0, start_seq_1)) { 1195 !before(start_seq_0, start_seq_1)) {
1188 dup_sack = 1; 1196 dup_sack = 1;
1189 tcp_dsack_seen(tp); 1197 tcp_dsack_seen(tp);
1190 NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOFORECV); 1198 NET_INC_STATS_BH(sock_net(sk),
1199 LINUX_MIB_TCPDSACKOFORECV);
1191 } 1200 }
1192 } 1201 }
1193 1202
@@ -1414,10 +1423,10 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb,
1414 unsigned char *ptr = (skb_transport_header(ack_skb) + 1423 unsigned char *ptr = (skb_transport_header(ack_skb) +
1415 TCP_SKB_CB(ack_skb)->sacked); 1424 TCP_SKB_CB(ack_skb)->sacked);
1416 struct tcp_sack_block_wire *sp_wire = (struct tcp_sack_block_wire *)(ptr+2); 1425 struct tcp_sack_block_wire *sp_wire = (struct tcp_sack_block_wire *)(ptr+2);
1417 struct tcp_sack_block sp[4]; 1426 struct tcp_sack_block sp[TCP_NUM_SACKS];
1418 struct tcp_sack_block *cache; 1427 struct tcp_sack_block *cache;
1419 struct sk_buff *skb; 1428 struct sk_buff *skb;
1420 int num_sacks = (ptr[1] - TCPOLEN_SACK_BASE) >> 3; 1429 int num_sacks = min(TCP_NUM_SACKS, (ptr[1] - TCPOLEN_SACK_BASE) >> 3);
1421 int used_sacks; 1430 int used_sacks;
1422 int reord = tp->packets_out; 1431 int reord = tp->packets_out;
1423 int flag = 0; 1432 int flag = 0;
@@ -1432,7 +1441,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb,
1432 tcp_highest_sack_reset(sk); 1441 tcp_highest_sack_reset(sk);
1433 } 1442 }
1434 1443
1435 found_dup_sack = tcp_check_dsack(tp, ack_skb, sp_wire, 1444 found_dup_sack = tcp_check_dsack(sk, ack_skb, sp_wire,
1436 num_sacks, prior_snd_una); 1445 num_sacks, prior_snd_una);
1437 if (found_dup_sack) 1446 if (found_dup_sack)
1438 flag |= FLAG_DSACKING_ACK; 1447 flag |= FLAG_DSACKING_ACK;
@@ -1458,18 +1467,22 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb,
1458 if (!tcp_is_sackblock_valid(tp, dup_sack, 1467 if (!tcp_is_sackblock_valid(tp, dup_sack,
1459 sp[used_sacks].start_seq, 1468 sp[used_sacks].start_seq,
1460 sp[used_sacks].end_seq)) { 1469 sp[used_sacks].end_seq)) {
1470 int mib_idx;
1471
1461 if (dup_sack) { 1472 if (dup_sack) {
1462 if (!tp->undo_marker) 1473 if (!tp->undo_marker)
1463 NET_INC_STATS_BH(LINUX_MIB_TCPDSACKIGNOREDNOUNDO); 1474 mib_idx = LINUX_MIB_TCPDSACKIGNOREDNOUNDO;
1464 else 1475 else
1465 NET_INC_STATS_BH(LINUX_MIB_TCPDSACKIGNOREDOLD); 1476 mib_idx = LINUX_MIB_TCPDSACKIGNOREDOLD;
1466 } else { 1477 } else {
1467 /* Don't count olds caused by ACK reordering */ 1478 /* Don't count olds caused by ACK reordering */
1468 if ((TCP_SKB_CB(ack_skb)->ack_seq != tp->snd_una) && 1479 if ((TCP_SKB_CB(ack_skb)->ack_seq != tp->snd_una) &&
1469 !after(sp[used_sacks].end_seq, tp->snd_una)) 1480 !after(sp[used_sacks].end_seq, tp->snd_una))
1470 continue; 1481 continue;
1471 NET_INC_STATS_BH(LINUX_MIB_TCPSACKDISCARD); 1482 mib_idx = LINUX_MIB_TCPSACKDISCARD;
1472 } 1483 }
1484
1485 NET_INC_STATS_BH(sock_net(sk), mib_idx);
1473 if (i == 0) 1486 if (i == 0)
1474 first_sack_index = -1; 1487 first_sack_index = -1;
1475 continue; 1488 continue;
@@ -1962,7 +1975,7 @@ static int tcp_check_sack_reneging(struct sock *sk, int flag)
1962{ 1975{
1963 if (flag & FLAG_SACK_RENEGING) { 1976 if (flag & FLAG_SACK_RENEGING) {
1964 struct inet_connection_sock *icsk = inet_csk(sk); 1977 struct inet_connection_sock *icsk = inet_csk(sk);
1965 NET_INC_STATS_BH(LINUX_MIB_TCPSACKRENEGING); 1978 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSACKRENEGING);
1966 1979
1967 tcp_enter_loss(sk, 1); 1980 tcp_enter_loss(sk, 1);
1968 icsk->icsk_retransmits++; 1981 icsk->icsk_retransmits++;
@@ -2382,15 +2395,19 @@ static int tcp_try_undo_recovery(struct sock *sk)
2382 struct tcp_sock *tp = tcp_sk(sk); 2395 struct tcp_sock *tp = tcp_sk(sk);
2383 2396
2384 if (tcp_may_undo(tp)) { 2397 if (tcp_may_undo(tp)) {
2398 int mib_idx;
2399
2385 /* Happy end! We did not retransmit anything 2400 /* Happy end! We did not retransmit anything
2386 * or our original transmission succeeded. 2401 * or our original transmission succeeded.
2387 */ 2402 */
2388 DBGUNDO(sk, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans"); 2403 DBGUNDO(sk, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans");
2389 tcp_undo_cwr(sk, 1); 2404 tcp_undo_cwr(sk, 1);
2390 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss) 2405 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss)
2391 NET_INC_STATS_BH(LINUX_MIB_TCPLOSSUNDO); 2406 mib_idx = LINUX_MIB_TCPLOSSUNDO;
2392 else 2407 else
2393 NET_INC_STATS_BH(LINUX_MIB_TCPFULLUNDO); 2408 mib_idx = LINUX_MIB_TCPFULLUNDO;
2409
2410 NET_INC_STATS_BH(sock_net(sk), mib_idx);
2394 tp->undo_marker = 0; 2411 tp->undo_marker = 0;
2395 } 2412 }
2396 if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) { 2413 if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) {
@@ -2413,7 +2430,7 @@ static void tcp_try_undo_dsack(struct sock *sk)
2413 DBGUNDO(sk, "D-SACK"); 2430 DBGUNDO(sk, "D-SACK");
2414 tcp_undo_cwr(sk, 1); 2431 tcp_undo_cwr(sk, 1);
2415 tp->undo_marker = 0; 2432 tp->undo_marker = 0;
2416 NET_INC_STATS_BH(LINUX_MIB_TCPDSACKUNDO); 2433 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKUNDO);
2417 } 2434 }
2418} 2435}
2419 2436
@@ -2436,7 +2453,7 @@ static int tcp_try_undo_partial(struct sock *sk, int acked)
2436 2453
2437 DBGUNDO(sk, "Hoe"); 2454 DBGUNDO(sk, "Hoe");
2438 tcp_undo_cwr(sk, 0); 2455 tcp_undo_cwr(sk, 0);
2439 NET_INC_STATS_BH(LINUX_MIB_TCPPARTIALUNDO); 2456 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPARTIALUNDO);
2440 2457
2441 /* So... Do not make Hoe's retransmit yet. 2458 /* So... Do not make Hoe's retransmit yet.
2442 * If the first packet was delayed, the rest 2459 * If the first packet was delayed, the rest
@@ -2465,7 +2482,7 @@ static int tcp_try_undo_loss(struct sock *sk)
2465 DBGUNDO(sk, "partial loss"); 2482 DBGUNDO(sk, "partial loss");
2466 tp->lost_out = 0; 2483 tp->lost_out = 0;
2467 tcp_undo_cwr(sk, 1); 2484 tcp_undo_cwr(sk, 1);
2468 NET_INC_STATS_BH(LINUX_MIB_TCPLOSSUNDO); 2485 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSSUNDO);
2469 inet_csk(sk)->icsk_retransmits = 0; 2486 inet_csk(sk)->icsk_retransmits = 0;
2470 tp->undo_marker = 0; 2487 tp->undo_marker = 0;
2471 if (tcp_is_sack(tp)) 2488 if (tcp_is_sack(tp))
@@ -2562,7 +2579,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag)
2562 int is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP)); 2579 int is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
2563 int do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) && 2580 int do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) &&
2564 (tcp_fackets_out(tp) > tp->reordering)); 2581 (tcp_fackets_out(tp) > tp->reordering));
2565 int fast_rexmit = 0; 2582 int fast_rexmit = 0, mib_idx;
2566 2583
2567 if (WARN_ON(!tp->packets_out && tp->sacked_out)) 2584 if (WARN_ON(!tp->packets_out && tp->sacked_out))
2568 tp->sacked_out = 0; 2585 tp->sacked_out = 0;
@@ -2584,7 +2601,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag)
2584 icsk->icsk_ca_state != TCP_CA_Open && 2601 icsk->icsk_ca_state != TCP_CA_Open &&
2585 tp->fackets_out > tp->reordering) { 2602 tp->fackets_out > tp->reordering) {
2586 tcp_mark_head_lost(sk, tp->fackets_out - tp->reordering); 2603 tcp_mark_head_lost(sk, tp->fackets_out - tp->reordering);
2587 NET_INC_STATS_BH(LINUX_MIB_TCPLOSS); 2604 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSS);
2588 } 2605 }
2589 2606
2590 /* D. Check consistency of the current state. */ 2607 /* D. Check consistency of the current state. */
@@ -2685,9 +2702,11 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag)
2685 /* Otherwise enter Recovery state */ 2702 /* Otherwise enter Recovery state */
2686 2703
2687 if (tcp_is_reno(tp)) 2704 if (tcp_is_reno(tp))
2688 NET_INC_STATS_BH(LINUX_MIB_TCPRENORECOVERY); 2705 mib_idx = LINUX_MIB_TCPRENORECOVERY;
2689 else 2706 else
2690 NET_INC_STATS_BH(LINUX_MIB_TCPSACKRECOVERY); 2707 mib_idx = LINUX_MIB_TCPSACKRECOVERY;
2708
2709 NET_INC_STATS_BH(sock_net(sk), mib_idx);
2691 2710
2692 tp->high_seq = tp->snd_nxt; 2711 tp->high_seq = tp->snd_nxt;
2693 tp->prior_ssthresh = 0; 2712 tp->prior_ssthresh = 0;
@@ -3198,7 +3217,7 @@ static int tcp_process_frto(struct sock *sk, int flag)
3198 } 3217 }
3199 tp->frto_counter = 0; 3218 tp->frto_counter = 0;
3200 tp->undo_marker = 0; 3219 tp->undo_marker = 0;
3201 NET_INC_STATS_BH(LINUX_MIB_TCPSPURIOUSRTOS); 3220 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSPURIOUSRTOS);
3202 } 3221 }
3203 return 0; 3222 return 0;
3204} 3223}
@@ -3251,12 +3270,12 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
3251 3270
3252 tcp_ca_event(sk, CA_EVENT_FAST_ACK); 3271 tcp_ca_event(sk, CA_EVENT_FAST_ACK);
3253 3272
3254 NET_INC_STATS_BH(LINUX_MIB_TCPHPACKS); 3273 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPACKS);
3255 } else { 3274 } else {
3256 if (ack_seq != TCP_SKB_CB(skb)->end_seq) 3275 if (ack_seq != TCP_SKB_CB(skb)->end_seq)
3257 flag |= FLAG_DATA; 3276 flag |= FLAG_DATA;
3258 else 3277 else
3259 NET_INC_STATS_BH(LINUX_MIB_TCPPUREACKS); 3278 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPUREACKS);
3260 3279
3261 flag |= tcp_ack_update_window(sk, skb, ack, ack_seq); 3280 flag |= tcp_ack_update_window(sk, skb, ack, ack_seq);
3262 3281
@@ -3450,6 +3469,43 @@ static int tcp_fast_parse_options(struct sk_buff *skb, struct tcphdr *th,
3450 return 1; 3469 return 1;
3451} 3470}
3452 3471
3472#ifdef CONFIG_TCP_MD5SIG
3473/*
3474 * Parse MD5 Signature option
3475 */
3476u8 *tcp_parse_md5sig_option(struct tcphdr *th)
3477{
3478 int length = (th->doff << 2) - sizeof (*th);
3479 u8 *ptr = (u8*)(th + 1);
3480
3481 /* If the TCP option is too short, we can short cut */
3482 if (length < TCPOLEN_MD5SIG)
3483 return NULL;
3484
3485 while (length > 0) {
3486 int opcode = *ptr++;
3487 int opsize;
3488
3489 switch(opcode) {
3490 case TCPOPT_EOL:
3491 return NULL;
3492 case TCPOPT_NOP:
3493 length--;
3494 continue;
3495 default:
3496 opsize = *ptr++;
3497 if (opsize < 2 || opsize > length)
3498 return NULL;
3499 if (opcode == TCPOPT_MD5SIG)
3500 return ptr;
3501 }
3502 ptr += opsize - 2;
3503 length -= opsize;
3504 }
3505 return NULL;
3506}
3507#endif
3508
3453static inline void tcp_store_ts_recent(struct tcp_sock *tp) 3509static inline void tcp_store_ts_recent(struct tcp_sock *tp)
3454{ 3510{
3455 tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval; 3511 tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval;
@@ -3662,26 +3718,33 @@ static inline int tcp_sack_extend(struct tcp_sack_block *sp, u32 seq,
3662 return 0; 3718 return 0;
3663} 3719}
3664 3720
3665static void tcp_dsack_set(struct tcp_sock *tp, u32 seq, u32 end_seq) 3721static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq)
3666{ 3722{
3723 struct tcp_sock *tp = tcp_sk(sk);
3724
3667 if (tcp_is_sack(tp) && sysctl_tcp_dsack) { 3725 if (tcp_is_sack(tp) && sysctl_tcp_dsack) {
3726 int mib_idx;
3727
3668 if (before(seq, tp->rcv_nxt)) 3728 if (before(seq, tp->rcv_nxt))
3669 NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOLDSENT); 3729 mib_idx = LINUX_MIB_TCPDSACKOLDSENT;
3670 else 3730 else
3671 NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOFOSENT); 3731 mib_idx = LINUX_MIB_TCPDSACKOFOSENT;
3732
3733 NET_INC_STATS_BH(sock_net(sk), mib_idx);
3672 3734
3673 tp->rx_opt.dsack = 1; 3735 tp->rx_opt.dsack = 1;
3674 tp->duplicate_sack[0].start_seq = seq; 3736 tp->duplicate_sack[0].start_seq = seq;
3675 tp->duplicate_sack[0].end_seq = end_seq; 3737 tp->duplicate_sack[0].end_seq = end_seq;
3676 tp->rx_opt.eff_sacks = min(tp->rx_opt.num_sacks + 1, 3738 tp->rx_opt.eff_sacks = tp->rx_opt.num_sacks + 1;
3677 4 - tp->rx_opt.tstamp_ok);
3678 } 3739 }
3679} 3740}
3680 3741
3681static void tcp_dsack_extend(struct tcp_sock *tp, u32 seq, u32 end_seq) 3742static void tcp_dsack_extend(struct sock *sk, u32 seq, u32 end_seq)
3682{ 3743{
3744 struct tcp_sock *tp = tcp_sk(sk);
3745
3683 if (!tp->rx_opt.dsack) 3746 if (!tp->rx_opt.dsack)
3684 tcp_dsack_set(tp, seq, end_seq); 3747 tcp_dsack_set(sk, seq, end_seq);
3685 else 3748 else
3686 tcp_sack_extend(tp->duplicate_sack, seq, end_seq); 3749 tcp_sack_extend(tp->duplicate_sack, seq, end_seq);
3687} 3750}
@@ -3692,7 +3755,7 @@ static void tcp_send_dupack(struct sock *sk, struct sk_buff *skb)
3692 3755
3693 if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && 3756 if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
3694 before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { 3757 before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
3695 NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOST); 3758 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
3696 tcp_enter_quickack_mode(sk); 3759 tcp_enter_quickack_mode(sk);
3697 3760
3698 if (tcp_is_sack(tp) && sysctl_tcp_dsack) { 3761 if (tcp_is_sack(tp) && sysctl_tcp_dsack) {
@@ -3700,7 +3763,7 @@ static void tcp_send_dupack(struct sock *sk, struct sk_buff *skb)
3700 3763
3701 if (after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) 3764 if (after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt))
3702 end_seq = tp->rcv_nxt; 3765 end_seq = tp->rcv_nxt;
3703 tcp_dsack_set(tp, TCP_SKB_CB(skb)->seq, end_seq); 3766 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, end_seq);
3704 } 3767 }
3705 } 3768 }
3706 3769
@@ -3727,9 +3790,8 @@ static void tcp_sack_maybe_coalesce(struct tcp_sock *tp)
3727 * Decrease num_sacks. 3790 * Decrease num_sacks.
3728 */ 3791 */
3729 tp->rx_opt.num_sacks--; 3792 tp->rx_opt.num_sacks--;
3730 tp->rx_opt.eff_sacks = min(tp->rx_opt.num_sacks + 3793 tp->rx_opt.eff_sacks = tp->rx_opt.num_sacks +
3731 tp->rx_opt.dsack, 3794 tp->rx_opt.dsack;
3732 4 - tp->rx_opt.tstamp_ok);
3733 for (i = this_sack; i < tp->rx_opt.num_sacks; i++) 3795 for (i = this_sack; i < tp->rx_opt.num_sacks; i++)
3734 sp[i] = sp[i + 1]; 3796 sp[i] = sp[i + 1];
3735 continue; 3797 continue;
@@ -3779,7 +3841,7 @@ static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq)
3779 * 3841 *
3780 * If the sack array is full, forget about the last one. 3842 * If the sack array is full, forget about the last one.
3781 */ 3843 */
3782 if (this_sack >= 4) { 3844 if (this_sack >= TCP_NUM_SACKS) {
3783 this_sack--; 3845 this_sack--;
3784 tp->rx_opt.num_sacks--; 3846 tp->rx_opt.num_sacks--;
3785 sp--; 3847 sp--;
@@ -3792,8 +3854,7 @@ new_sack:
3792 sp->start_seq = seq; 3854 sp->start_seq = seq;
3793 sp->end_seq = end_seq; 3855 sp->end_seq = end_seq;
3794 tp->rx_opt.num_sacks++; 3856 tp->rx_opt.num_sacks++;
3795 tp->rx_opt.eff_sacks = min(tp->rx_opt.num_sacks + tp->rx_opt.dsack, 3857 tp->rx_opt.eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack;
3796 4 - tp->rx_opt.tstamp_ok);
3797} 3858}
3798 3859
3799/* RCV.NXT advances, some SACKs should be eaten. */ 3860/* RCV.NXT advances, some SACKs should be eaten. */
@@ -3830,9 +3891,8 @@ static void tcp_sack_remove(struct tcp_sock *tp)
3830 } 3891 }
3831 if (num_sacks != tp->rx_opt.num_sacks) { 3892 if (num_sacks != tp->rx_opt.num_sacks) {
3832 tp->rx_opt.num_sacks = num_sacks; 3893 tp->rx_opt.num_sacks = num_sacks;
3833 tp->rx_opt.eff_sacks = min(tp->rx_opt.num_sacks + 3894 tp->rx_opt.eff_sacks = tp->rx_opt.num_sacks +
3834 tp->rx_opt.dsack, 3895 tp->rx_opt.dsack;
3835 4 - tp->rx_opt.tstamp_ok);
3836 } 3896 }
3837} 3897}
3838 3898
@@ -3853,7 +3913,7 @@ static void tcp_ofo_queue(struct sock *sk)
3853 __u32 dsack = dsack_high; 3913 __u32 dsack = dsack_high;
3854 if (before(TCP_SKB_CB(skb)->end_seq, dsack_high)) 3914 if (before(TCP_SKB_CB(skb)->end_seq, dsack_high))
3855 dsack_high = TCP_SKB_CB(skb)->end_seq; 3915 dsack_high = TCP_SKB_CB(skb)->end_seq;
3856 tcp_dsack_extend(tp, TCP_SKB_CB(skb)->seq, dsack); 3916 tcp_dsack_extend(sk, TCP_SKB_CB(skb)->seq, dsack);
3857 } 3917 }
3858 3918
3859 if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) { 3919 if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) {
@@ -3911,8 +3971,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
3911 3971
3912 if (tp->rx_opt.dsack) { 3972 if (tp->rx_opt.dsack) {
3913 tp->rx_opt.dsack = 0; 3973 tp->rx_opt.dsack = 0;
3914 tp->rx_opt.eff_sacks = min_t(unsigned int, tp->rx_opt.num_sacks, 3974 tp->rx_opt.eff_sacks = tp->rx_opt.num_sacks;
3915 4 - tp->rx_opt.tstamp_ok);
3916 } 3975 }
3917 3976
3918 /* Queue data for delivery to the user. 3977 /* Queue data for delivery to the user.
@@ -3981,8 +4040,8 @@ queue_and_out:
3981 4040
3982 if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) { 4041 if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) {
3983 /* A retransmit, 2nd most common case. Force an immediate ack. */ 4042 /* A retransmit, 2nd most common case. Force an immediate ack. */
3984 NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOST); 4043 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
3985 tcp_dsack_set(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); 4044 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
3986 4045
3987out_of_window: 4046out_of_window:
3988 tcp_enter_quickack_mode(sk); 4047 tcp_enter_quickack_mode(sk);
@@ -4004,7 +4063,7 @@ drop:
4004 tp->rcv_nxt, TCP_SKB_CB(skb)->seq, 4063 tp->rcv_nxt, TCP_SKB_CB(skb)->seq,
4005 TCP_SKB_CB(skb)->end_seq); 4064 TCP_SKB_CB(skb)->end_seq);
4006 4065
4007 tcp_dsack_set(tp, TCP_SKB_CB(skb)->seq, tp->rcv_nxt); 4066 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, tp->rcv_nxt);
4008 4067
4009 /* If window is closed, drop tail of packet. But after 4068 /* If window is closed, drop tail of packet. But after
4010 * remembering D-SACK for its head made in previous line. 4069 * remembering D-SACK for its head made in previous line.
@@ -4069,12 +4128,12 @@ drop:
4069 if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) { 4128 if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
4070 /* All the bits are present. Drop. */ 4129 /* All the bits are present. Drop. */
4071 __kfree_skb(skb); 4130 __kfree_skb(skb);
4072 tcp_dsack_set(tp, seq, end_seq); 4131 tcp_dsack_set(sk, seq, end_seq);
4073 goto add_sack; 4132 goto add_sack;
4074 } 4133 }
4075 if (after(seq, TCP_SKB_CB(skb1)->seq)) { 4134 if (after(seq, TCP_SKB_CB(skb1)->seq)) {
4076 /* Partial overlap. */ 4135 /* Partial overlap. */
4077 tcp_dsack_set(tp, seq, 4136 tcp_dsack_set(sk, seq,
4078 TCP_SKB_CB(skb1)->end_seq); 4137 TCP_SKB_CB(skb1)->end_seq);
4079 } else { 4138 } else {
4080 skb1 = skb1->prev; 4139 skb1 = skb1->prev;
@@ -4087,12 +4146,12 @@ drop:
4087 (struct sk_buff *)&tp->out_of_order_queue && 4146 (struct sk_buff *)&tp->out_of_order_queue &&
4088 after(end_seq, TCP_SKB_CB(skb1)->seq)) { 4147 after(end_seq, TCP_SKB_CB(skb1)->seq)) {
4089 if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) { 4148 if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
4090 tcp_dsack_extend(tp, TCP_SKB_CB(skb1)->seq, 4149 tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq,
4091 end_seq); 4150 end_seq);
4092 break; 4151 break;
4093 } 4152 }
4094 __skb_unlink(skb1, &tp->out_of_order_queue); 4153 __skb_unlink(skb1, &tp->out_of_order_queue);
4095 tcp_dsack_extend(tp, TCP_SKB_CB(skb1)->seq, 4154 tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq,
4096 TCP_SKB_CB(skb1)->end_seq); 4155 TCP_SKB_CB(skb1)->end_seq);
4097 __kfree_skb(skb1); 4156 __kfree_skb(skb1);
4098 } 4157 }
@@ -4123,7 +4182,7 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list,
4123 struct sk_buff *next = skb->next; 4182 struct sk_buff *next = skb->next;
4124 __skb_unlink(skb, list); 4183 __skb_unlink(skb, list);
4125 __kfree_skb(skb); 4184 __kfree_skb(skb);
4126 NET_INC_STATS_BH(LINUX_MIB_TCPRCVCOLLAPSED); 4185 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED);
4127 skb = next; 4186 skb = next;
4128 continue; 4187 continue;
4129 } 4188 }
@@ -4191,7 +4250,7 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list,
4191 struct sk_buff *next = skb->next; 4250 struct sk_buff *next = skb->next;
4192 __skb_unlink(skb, list); 4251 __skb_unlink(skb, list);
4193 __kfree_skb(skb); 4252 __kfree_skb(skb);
4194 NET_INC_STATS_BH(LINUX_MIB_TCPRCVCOLLAPSED); 4253 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED);
4195 skb = next; 4254 skb = next;
4196 if (skb == tail || 4255 if (skb == tail ||
4197 tcp_hdr(skb)->syn || 4256 tcp_hdr(skb)->syn ||
@@ -4254,7 +4313,7 @@ static int tcp_prune_ofo_queue(struct sock *sk)
4254 int res = 0; 4313 int res = 0;
4255 4314
4256 if (!skb_queue_empty(&tp->out_of_order_queue)) { 4315 if (!skb_queue_empty(&tp->out_of_order_queue)) {
4257 NET_INC_STATS_BH(LINUX_MIB_OFOPRUNED); 4316 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_OFOPRUNED);
4258 __skb_queue_purge(&tp->out_of_order_queue); 4317 __skb_queue_purge(&tp->out_of_order_queue);
4259 4318
4260 /* Reset SACK state. A conforming SACK implementation will 4319 /* Reset SACK state. A conforming SACK implementation will
@@ -4283,7 +4342,7 @@ static int tcp_prune_queue(struct sock *sk)
4283 4342
4284 SOCK_DEBUG(sk, "prune_queue: c=%x\n", tp->copied_seq); 4343 SOCK_DEBUG(sk, "prune_queue: c=%x\n", tp->copied_seq);
4285 4344
4286 NET_INC_STATS_BH(LINUX_MIB_PRUNECALLED); 4345 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PRUNECALLED);
4287 4346
4288 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) 4347 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
4289 tcp_clamp_window(sk); 4348 tcp_clamp_window(sk);
@@ -4312,7 +4371,7 @@ static int tcp_prune_queue(struct sock *sk)
4312 * drop receive data on the floor. It will get retransmitted 4371 * drop receive data on the floor. It will get retransmitted
4313 * and hopefully then we'll have sufficient space. 4372 * and hopefully then we'll have sufficient space.
4314 */ 4373 */
4315 NET_INC_STATS_BH(LINUX_MIB_RCVPRUNED); 4374 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_RCVPRUNED);
4316 4375
4317 /* Massive buffer overcommit. */ 4376 /* Massive buffer overcommit. */
4318 tp->pred_flags = 0; 4377 tp->pred_flags = 0;
@@ -4742,7 +4801,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
4742 tcp_data_snd_check(sk); 4801 tcp_data_snd_check(sk);
4743 return 0; 4802 return 0;
4744 } else { /* Header too small */ 4803 } else { /* Header too small */
4745 TCP_INC_STATS_BH(TCP_MIB_INERRS); 4804 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
4746 goto discard; 4805 goto discard;
4747 } 4806 }
4748 } else { 4807 } else {
@@ -4779,7 +4838,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
4779 4838
4780 __skb_pull(skb, tcp_header_len); 4839 __skb_pull(skb, tcp_header_len);
4781 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; 4840 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
4782 NET_INC_STATS_BH(LINUX_MIB_TCPHPHITSTOUSER); 4841 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITSTOUSER);
4783 } 4842 }
4784 if (copied_early) 4843 if (copied_early)
4785 tcp_cleanup_rbuf(sk, skb->len); 4844 tcp_cleanup_rbuf(sk, skb->len);
@@ -4802,7 +4861,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
4802 if ((int)skb->truesize > sk->sk_forward_alloc) 4861 if ((int)skb->truesize > sk->sk_forward_alloc)
4803 goto step5; 4862 goto step5;
4804 4863
4805 NET_INC_STATS_BH(LINUX_MIB_TCPHPHITS); 4864 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITS);
4806 4865
4807 /* Bulk data transfer: receiver */ 4866 /* Bulk data transfer: receiver */
4808 __skb_pull(skb, tcp_header_len); 4867 __skb_pull(skb, tcp_header_len);
@@ -4846,7 +4905,7 @@ slow_path:
4846 if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp && 4905 if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp &&
4847 tcp_paws_discard(sk, skb)) { 4906 tcp_paws_discard(sk, skb)) {
4848 if (!th->rst) { 4907 if (!th->rst) {
4849 NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED); 4908 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
4850 tcp_send_dupack(sk, skb); 4909 tcp_send_dupack(sk, skb);
4851 goto discard; 4910 goto discard;
4852 } 4911 }
@@ -4881,8 +4940,8 @@ slow_path:
4881 tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq); 4940 tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
4882 4941
4883 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { 4942 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
4884 TCP_INC_STATS_BH(TCP_MIB_INERRS); 4943 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
4885 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONSYN); 4944 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONSYN);
4886 tcp_reset(sk); 4945 tcp_reset(sk);
4887 return 1; 4946 return 1;
4888 } 4947 }
@@ -4904,7 +4963,7 @@ step5:
4904 return 0; 4963 return 0;
4905 4964
4906csum_error: 4965csum_error:
4907 TCP_INC_STATS_BH(TCP_MIB_INERRS); 4966 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
4908 4967
4909discard: 4968discard:
4910 __kfree_skb(skb); 4969 __kfree_skb(skb);
@@ -4938,7 +4997,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
4938 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && 4997 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
4939 !between(tp->rx_opt.rcv_tsecr, tp->retrans_stamp, 4998 !between(tp->rx_opt.rcv_tsecr, tp->retrans_stamp,
4940 tcp_time_stamp)) { 4999 tcp_time_stamp)) {
4941 NET_INC_STATS_BH(LINUX_MIB_PAWSACTIVEREJECTED); 5000 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSACTIVEREJECTED);
4942 goto reset_and_undo; 5001 goto reset_and_undo;
4943 } 5002 }
4944 5003
@@ -5222,7 +5281,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
5222 if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp && 5281 if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp &&
5223 tcp_paws_discard(sk, skb)) { 5282 tcp_paws_discard(sk, skb)) {
5224 if (!th->rst) { 5283 if (!th->rst) {
5225 NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED); 5284 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
5226 tcp_send_dupack(sk, skb); 5285 tcp_send_dupack(sk, skb);
5227 goto discard; 5286 goto discard;
5228 } 5287 }
@@ -5251,7 +5310,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
5251 * Check for a SYN in window. 5310 * Check for a SYN in window.
5252 */ 5311 */
5253 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { 5312 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
5254 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONSYN); 5313 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONSYN);
5255 tcp_reset(sk); 5314 tcp_reset(sk);
5256 return 1; 5315 return 1;
5257 } 5316 }
@@ -5333,7 +5392,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
5333 (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && 5392 (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
5334 after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt))) { 5393 after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt))) {
5335 tcp_done(sk); 5394 tcp_done(sk);
5336 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONDATA); 5395 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
5337 return 1; 5396 return 1;
5338 } 5397 }
5339 5398
@@ -5393,7 +5452,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
5393 if (sk->sk_shutdown & RCV_SHUTDOWN) { 5452 if (sk->sk_shutdown & RCV_SHUTDOWN) {
5394 if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && 5453 if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
5395 after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) { 5454 after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) {
5396 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONDATA); 5455 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
5397 tcp_reset(sk); 5456 tcp_reset(sk);
5398 return 1; 5457 return 1;
5399 } 5458 }
@@ -5422,6 +5481,9 @@ EXPORT_SYMBOL(sysctl_tcp_ecn);
5422EXPORT_SYMBOL(sysctl_tcp_reordering); 5481EXPORT_SYMBOL(sysctl_tcp_reordering);
5423EXPORT_SYMBOL(sysctl_tcp_adv_win_scale); 5482EXPORT_SYMBOL(sysctl_tcp_adv_win_scale);
5424EXPORT_SYMBOL(tcp_parse_options); 5483EXPORT_SYMBOL(tcp_parse_options);
5484#ifdef CONFIG_TCP_MD5SIG
5485EXPORT_SYMBOL(tcp_parse_md5sig_option);
5486#endif
5425EXPORT_SYMBOL(tcp_rcv_established); 5487EXPORT_SYMBOL(tcp_rcv_established);
5426EXPORT_SYMBOL(tcp_rcv_state_process); 5488EXPORT_SYMBOL(tcp_rcv_state_process);
5427EXPORT_SYMBOL(tcp_initialize_rcv_mss); 5489EXPORT_SYMBOL(tcp_initialize_rcv_mss);