diff options
Diffstat (limited to 'net/ipv4/tcp_input.c')
| -rw-r--r-- | net/ipv4/tcp_input.c | 32 |
1 files changed, 15 insertions, 17 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 3dbbb44b3e7d..69d8c38ccd39 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
| @@ -103,7 +103,7 @@ int sysctl_tcp_abc __read_mostly; | |||
| 103 | #define FLAG_SLOWPATH 0x100 /* Do not skip RFC checks for window update.*/ | 103 | #define FLAG_SLOWPATH 0x100 /* Do not skip RFC checks for window update.*/ |
| 104 | #define FLAG_ONLY_ORIG_SACKED 0x200 /* SACKs only non-rexmit sent before RTO */ | 104 | #define FLAG_ONLY_ORIG_SACKED 0x200 /* SACKs only non-rexmit sent before RTO */ |
| 105 | #define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */ | 105 | #define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */ |
| 106 | #define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained DSACK info */ | 106 | #define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */ |
| 107 | #define FLAG_NONHEAD_RETRANS_ACKED 0x1000 /* Non-head rexmitted data was ACKed */ | 107 | #define FLAG_NONHEAD_RETRANS_ACKED 0x1000 /* Non-head rexmitted data was ACKed */ |
| 108 | 108 | ||
| 109 | #define FLAG_ACKED (FLAG_DATA_ACKED|FLAG_SYN_ACKED) | 109 | #define FLAG_ACKED (FLAG_DATA_ACKED|FLAG_SYN_ACKED) |
| @@ -866,7 +866,7 @@ static void tcp_disable_fack(struct tcp_sock *tp) | |||
| 866 | tp->rx_opt.sack_ok &= ~2; | 866 | tp->rx_opt.sack_ok &= ~2; |
| 867 | } | 867 | } |
| 868 | 868 | ||
| 869 | /* Take a notice that peer is sending DSACKs */ | 869 | /* Take a notice that peer is sending D-SACKs */ |
| 870 | static void tcp_dsack_seen(struct tcp_sock *tp) | 870 | static void tcp_dsack_seen(struct tcp_sock *tp) |
| 871 | { | 871 | { |
| 872 | tp->rx_opt.sack_ok |= 4; | 872 | tp->rx_opt.sack_ok |= 4; |
| @@ -1058,7 +1058,7 @@ static void tcp_update_reordering(struct sock *sk, const int metric, | |||
| 1058 | * | 1058 | * |
| 1059 | * With D-SACK the lower bound is extended to cover sequence space below | 1059 | * With D-SACK the lower bound is extended to cover sequence space below |
| 1060 | * SND.UNA down to undo_marker, which is the last point of interest. Yet | 1060 | * SND.UNA down to undo_marker, which is the last point of interest. Yet |
| 1061 | * again, DSACK block must not to go across snd_una (for the same reason as | 1061 | * again, D-SACK block must not to go across snd_una (for the same reason as |
| 1062 | * for the normal SACK blocks, explained above). But there all simplicity | 1062 | * for the normal SACK blocks, explained above). But there all simplicity |
| 1063 | * ends, TCP might receive valid D-SACKs below that. As long as they reside | 1063 | * ends, TCP might receive valid D-SACKs below that. As long as they reside |
| 1064 | * fully below undo_marker they do not affect behavior in anyway and can | 1064 | * fully below undo_marker they do not affect behavior in anyway and can |
| @@ -1080,7 +1080,7 @@ static int tcp_is_sackblock_valid(struct tcp_sock *tp, int is_dsack, | |||
| 1080 | if (!before(start_seq, tp->snd_nxt)) | 1080 | if (!before(start_seq, tp->snd_nxt)) |
| 1081 | return 0; | 1081 | return 0; |
| 1082 | 1082 | ||
| 1083 | /* In outstanding window? ...This is valid exit for DSACKs too. | 1083 | /* In outstanding window? ...This is valid exit for D-SACKs too. |
| 1084 | * start_seq == snd_una is non-sensical (see comments above) | 1084 | * start_seq == snd_una is non-sensical (see comments above) |
| 1085 | */ | 1085 | */ |
| 1086 | if (after(start_seq, tp->snd_una)) | 1086 | if (after(start_seq, tp->snd_una)) |
| @@ -1204,8 +1204,8 @@ static int tcp_check_dsack(struct tcp_sock *tp, struct sk_buff *ack_skb, | |||
| 1204 | * which may fail and creates some hassle (caller must handle error case | 1204 | * which may fail and creates some hassle (caller must handle error case |
| 1205 | * returns). | 1205 | * returns). |
| 1206 | */ | 1206 | */ |
| 1207 | int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb, | 1207 | static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb, |
| 1208 | u32 start_seq, u32 end_seq) | 1208 | u32 start_seq, u32 end_seq) |
| 1209 | { | 1209 | { |
| 1210 | int in_sack, err; | 1210 | int in_sack, err; |
| 1211 | unsigned int pkt_len; | 1211 | unsigned int pkt_len; |
| @@ -1248,6 +1248,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_ | |||
| 1248 | int cached_fack_count; | 1248 | int cached_fack_count; |
| 1249 | int i; | 1249 | int i; |
| 1250 | int first_sack_index; | 1250 | int first_sack_index; |
| 1251 | int force_one_sack; | ||
| 1251 | 1252 | ||
| 1252 | if (!tp->sacked_out) { | 1253 | if (!tp->sacked_out) { |
| 1253 | if (WARN_ON(tp->fackets_out)) | 1254 | if (WARN_ON(tp->fackets_out)) |
| @@ -1272,18 +1273,18 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_ | |||
| 1272 | * if the only SACK change is the increase of the end_seq of | 1273 | * if the only SACK change is the increase of the end_seq of |
| 1273 | * the first block then only apply that SACK block | 1274 | * the first block then only apply that SACK block |
| 1274 | * and use retrans queue hinting otherwise slowpath */ | 1275 | * and use retrans queue hinting otherwise slowpath */ |
| 1275 | flag = 1; | 1276 | force_one_sack = 1; |
| 1276 | for (i = 0; i < num_sacks; i++) { | 1277 | for (i = 0; i < num_sacks; i++) { |
| 1277 | __be32 start_seq = sp[i].start_seq; | 1278 | __be32 start_seq = sp[i].start_seq; |
| 1278 | __be32 end_seq = sp[i].end_seq; | 1279 | __be32 end_seq = sp[i].end_seq; |
| 1279 | 1280 | ||
| 1280 | if (i == 0) { | 1281 | if (i == 0) { |
| 1281 | if (tp->recv_sack_cache[i].start_seq != start_seq) | 1282 | if (tp->recv_sack_cache[i].start_seq != start_seq) |
| 1282 | flag = 0; | 1283 | force_one_sack = 0; |
| 1283 | } else { | 1284 | } else { |
| 1284 | if ((tp->recv_sack_cache[i].start_seq != start_seq) || | 1285 | if ((tp->recv_sack_cache[i].start_seq != start_seq) || |
| 1285 | (tp->recv_sack_cache[i].end_seq != end_seq)) | 1286 | (tp->recv_sack_cache[i].end_seq != end_seq)) |
| 1286 | flag = 0; | 1287 | force_one_sack = 0; |
| 1287 | } | 1288 | } |
| 1288 | tp->recv_sack_cache[i].start_seq = start_seq; | 1289 | tp->recv_sack_cache[i].start_seq = start_seq; |
| 1289 | tp->recv_sack_cache[i].end_seq = end_seq; | 1290 | tp->recv_sack_cache[i].end_seq = end_seq; |
| @@ -1295,7 +1296,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_ | |||
| 1295 | } | 1296 | } |
| 1296 | 1297 | ||
| 1297 | first_sack_index = 0; | 1298 | first_sack_index = 0; |
| 1298 | if (flag) | 1299 | if (force_one_sack) |
| 1299 | num_sacks = 1; | 1300 | num_sacks = 1; |
| 1300 | else { | 1301 | else { |
| 1301 | int j; | 1302 | int j; |
| @@ -1321,9 +1322,6 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_ | |||
| 1321 | } | 1322 | } |
| 1322 | } | 1323 | } |
| 1323 | 1324 | ||
| 1324 | /* clear flag as used for different purpose in following code */ | ||
| 1325 | flag = 0; | ||
| 1326 | |||
| 1327 | /* Use SACK fastpath hint if valid */ | 1325 | /* Use SACK fastpath hint if valid */ |
| 1328 | cached_skb = tp->fastpath_skb_hint; | 1326 | cached_skb = tp->fastpath_skb_hint; |
| 1329 | cached_fack_count = tp->fastpath_cnt_hint; | 1327 | cached_fack_count = tp->fastpath_cnt_hint; |
| @@ -1615,7 +1613,7 @@ void tcp_enter_frto(struct sock *sk) | |||
| 1615 | !icsk->icsk_retransmits)) { | 1613 | !icsk->icsk_retransmits)) { |
| 1616 | tp->prior_ssthresh = tcp_current_ssthresh(sk); | 1614 | tp->prior_ssthresh = tcp_current_ssthresh(sk); |
| 1617 | /* Our state is too optimistic in ssthresh() call because cwnd | 1615 | /* Our state is too optimistic in ssthresh() call because cwnd |
| 1618 | * is not reduced until tcp_enter_frto_loss() when previous FRTO | 1616 | * is not reduced until tcp_enter_frto_loss() when previous F-RTO |
| 1619 | * recovery has not yet completed. Pattern would be this: RTO, | 1617 | * recovery has not yet completed. Pattern would be this: RTO, |
| 1620 | * Cumulative ACK, RTO (2xRTO for the same segment does not end | 1618 | * Cumulative ACK, RTO (2xRTO for the same segment does not end |
| 1621 | * up here twice). | 1619 | * up here twice). |
| @@ -1801,7 +1799,7 @@ void tcp_enter_loss(struct sock *sk, int how) | |||
| 1801 | tcp_set_ca_state(sk, TCP_CA_Loss); | 1799 | tcp_set_ca_state(sk, TCP_CA_Loss); |
| 1802 | tp->high_seq = tp->snd_nxt; | 1800 | tp->high_seq = tp->snd_nxt; |
| 1803 | TCP_ECN_queue_cwr(tp); | 1801 | TCP_ECN_queue_cwr(tp); |
| 1804 | /* Abort FRTO algorithm if one is in progress */ | 1802 | /* Abort F-RTO algorithm if one is in progress */ |
| 1805 | tp->frto_counter = 0; | 1803 | tp->frto_counter = 0; |
| 1806 | } | 1804 | } |
| 1807 | 1805 | ||
| @@ -1946,7 +1944,7 @@ static int tcp_time_to_recover(struct sock *sk) | |||
| 1946 | struct tcp_sock *tp = tcp_sk(sk); | 1944 | struct tcp_sock *tp = tcp_sk(sk); |
| 1947 | __u32 packets_out; | 1945 | __u32 packets_out; |
| 1948 | 1946 | ||
| 1949 | /* Do not perform any recovery during FRTO algorithm */ | 1947 | /* Do not perform any recovery during F-RTO algorithm */ |
| 1950 | if (tp->frto_counter) | 1948 | if (tp->frto_counter) |
| 1951 | return 0; | 1949 | return 0; |
| 1952 | 1950 | ||
| @@ -2962,7 +2960,7 @@ static int tcp_process_frto(struct sock *sk, int flag) | |||
| 2962 | } | 2960 | } |
| 2963 | 2961 | ||
| 2964 | if (tp->frto_counter == 1) { | 2962 | if (tp->frto_counter == 1) { |
| 2965 | /* Sending of the next skb must be allowed or no FRTO */ | 2963 | /* Sending of the next skb must be allowed or no F-RTO */ |
| 2966 | if (!tcp_send_head(sk) || | 2964 | if (!tcp_send_head(sk) || |
| 2967 | after(TCP_SKB_CB(tcp_send_head(sk))->end_seq, | 2965 | after(TCP_SKB_CB(tcp_send_head(sk))->end_seq, |
| 2968 | tp->snd_una + tp->snd_wnd)) { | 2966 | tp->snd_una + tp->snd_wnd)) { |
