aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/tcp.h2
-rw-r--r--net/ipv4/tcp_input.c85
2 files changed, 52 insertions, 35 deletions
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 34acee662230..794497c7d755 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -330,7 +330,7 @@ struct tcp_sock {
330 struct tcp_sack_block duplicate_sack[1]; /* D-SACK block */ 330 struct tcp_sack_block duplicate_sack[1]; /* D-SACK block */
331 struct tcp_sack_block selective_acks[4]; /* The SACKS themselves*/ 331 struct tcp_sack_block selective_acks[4]; /* The SACKS themselves*/
332 332
333 struct tcp_sack_block_wire recv_sack_cache[4]; 333 struct tcp_sack_block recv_sack_cache[4];
334 334
335 struct sk_buff *highest_sack; /* highest skb with SACK received 335 struct sk_buff *highest_sack; /* highest skb with SACK received
336 * (validity guaranteed only if 336 * (validity guaranteed only if
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index a62e0f90f566..a287747e9dd6 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1340,9 +1340,11 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
1340 struct tcp_sock *tp = tcp_sk(sk); 1340 struct tcp_sock *tp = tcp_sk(sk);
1341 unsigned char *ptr = (skb_transport_header(ack_skb) + 1341 unsigned char *ptr = (skb_transport_header(ack_skb) +
1342 TCP_SKB_CB(ack_skb)->sacked); 1342 TCP_SKB_CB(ack_skb)->sacked);
1343 struct tcp_sack_block_wire *sp = (struct tcp_sack_block_wire *)(ptr+2); 1343 struct tcp_sack_block_wire *sp_wire = (struct tcp_sack_block_wire *)(ptr+2);
1344 struct tcp_sack_block sp[4];
1344 struct sk_buff *cached_skb; 1345 struct sk_buff *cached_skb;
1345 int num_sacks = (ptr[1] - TCPOLEN_SACK_BASE)>>3; 1346 int num_sacks = (ptr[1] - TCPOLEN_SACK_BASE)>>3;
1347 int used_sacks;
1346 int reord = tp->packets_out; 1348 int reord = tp->packets_out;
1347 int flag = 0; 1349 int flag = 0;
1348 int found_dup_sack = 0; 1350 int found_dup_sack = 0;
@@ -1357,7 +1359,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
1357 tp->highest_sack = tcp_write_queue_head(sk); 1359 tp->highest_sack = tcp_write_queue_head(sk);
1358 } 1360 }
1359 1361
1360 found_dup_sack = tcp_check_dsack(tp, ack_skb, sp, 1362 found_dup_sack = tcp_check_dsack(tp, ack_skb, sp_wire,
1361 num_sacks, prior_snd_una); 1363 num_sacks, prior_snd_una);
1362 if (found_dup_sack) 1364 if (found_dup_sack)
1363 flag |= FLAG_DSACKING_ACK; 1365 flag |= FLAG_DSACKING_ACK;
@@ -1372,14 +1374,49 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
1372 if (!tp->packets_out) 1374 if (!tp->packets_out)
1373 goto out; 1375 goto out;
1374 1376
1377 used_sacks = 0;
1378 first_sack_index = 0;
1379 for (i = 0; i < num_sacks; i++) {
1380 int dup_sack = !i && found_dup_sack;
1381
1382 sp[used_sacks].start_seq = ntohl(get_unaligned(&sp_wire[i].start_seq));
1383 sp[used_sacks].end_seq = ntohl(get_unaligned(&sp_wire[i].end_seq));
1384
1385 if (!tcp_is_sackblock_valid(tp, dup_sack,
1386 sp[used_sacks].start_seq,
1387 sp[used_sacks].end_seq)) {
1388 if (dup_sack) {
1389 if (!tp->undo_marker)
1390 NET_INC_STATS_BH(LINUX_MIB_TCPDSACKIGNOREDNOUNDO);
1391 else
1392 NET_INC_STATS_BH(LINUX_MIB_TCPDSACKIGNOREDOLD);
1393 } else {
1394 /* Don't count olds caused by ACK reordering */
1395 if ((TCP_SKB_CB(ack_skb)->ack_seq != tp->snd_una) &&
1396 !after(sp[used_sacks].end_seq, tp->snd_una))
1397 continue;
1398 NET_INC_STATS_BH(LINUX_MIB_TCPSACKDISCARD);
1399 }
1400 if (i == 0)
1401 first_sack_index = -1;
1402 continue;
1403 }
1404
1405 /* Ignore very old stuff early */
1406 if (!after(sp[used_sacks].end_seq, prior_snd_una))
1407 continue;
1408
1409 used_sacks++;
1410 }
1411
1375 /* SACK fastpath: 1412 /* SACK fastpath:
1376 * if the only SACK change is the increase of the end_seq of 1413 * if the only SACK change is the increase of the end_seq of
1377 * the first block then only apply that SACK block 1414 * the first block then only apply that SACK block
1378 * and use retrans queue hinting otherwise slowpath */ 1415 * and use retrans queue hinting otherwise slowpath */
1379 force_one_sack = 1; 1416 force_one_sack = 1;
1380 for (i = 0; i < num_sacks; i++) { 1417 for (i = 0; i < used_sacks; i++) {
1381 __be32 start_seq = sp[i].start_seq; 1418 u32 start_seq = sp[i].start_seq;
1382 __be32 end_seq = sp[i].end_seq; 1419 u32 end_seq = sp[i].end_seq;
1383 1420
1384 if (i == 0) { 1421 if (i == 0) {
1385 if (tp->recv_sack_cache[i].start_seq != start_seq) 1422 if (tp->recv_sack_cache[i].start_seq != start_seq)
@@ -1398,19 +1435,17 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
1398 tp->recv_sack_cache[i].end_seq = 0; 1435 tp->recv_sack_cache[i].end_seq = 0;
1399 } 1436 }
1400 1437
1401 first_sack_index = 0;
1402 if (force_one_sack) 1438 if (force_one_sack)
1403 num_sacks = 1; 1439 used_sacks = 1;
1404 else { 1440 else {
1405 int j; 1441 int j;
1406 tp->fastpath_skb_hint = NULL; 1442 tp->fastpath_skb_hint = NULL;
1407 1443
1408 /* order SACK blocks to allow in order walk of the retrans queue */ 1444 /* order SACK blocks to allow in order walk of the retrans queue */
1409 for (i = num_sacks-1; i > 0; i--) { 1445 for (i = used_sacks - 1; i > 0; i--) {
1410 for (j = 0; j < i; j++){ 1446 for (j = 0; j < i; j++){
1411 if (after(ntohl(sp[j].start_seq), 1447 if (after(sp[j].start_seq, sp[j+1].start_seq)) {
1412 ntohl(sp[j+1].start_seq))){ 1448 struct tcp_sack_block tmp;
1413 struct tcp_sack_block_wire tmp;
1414 1449
1415 tmp = sp[j]; 1450 tmp = sp[j];
1416 sp[j] = sp[j+1]; 1451 sp[j] = sp[j+1];
@@ -1433,32 +1468,14 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
1433 cached_fack_count = 0; 1468 cached_fack_count = 0;
1434 } 1469 }
1435 1470
1436 for (i = 0; i < num_sacks; i++) { 1471 for (i = 0; i < used_sacks; i++) {
1437 struct sk_buff *skb; 1472 struct sk_buff *skb;
1438 __u32 start_seq = ntohl(sp->start_seq); 1473 u32 start_seq = sp[i].start_seq;
1439 __u32 end_seq = ntohl(sp->end_seq); 1474 u32 end_seq = sp[i].end_seq;
1440 int fack_count; 1475 int fack_count;
1441 int dup_sack = (found_dup_sack && (i == first_sack_index)); 1476 int dup_sack = (found_dup_sack && (i == first_sack_index));
1442 int next_dup = (found_dup_sack && (i+1 == first_sack_index)); 1477 int next_dup = (found_dup_sack && (i+1 == first_sack_index));
1443 1478
1444 sp++;
1445
1446 if (!tcp_is_sackblock_valid(tp, dup_sack, start_seq, end_seq)) {
1447 if (dup_sack) {
1448 if (!tp->undo_marker)
1449 NET_INC_STATS_BH(LINUX_MIB_TCPDSACKIGNOREDNOUNDO);
1450 else
1451 NET_INC_STATS_BH(LINUX_MIB_TCPDSACKIGNOREDOLD);
1452 } else {
1453 /* Don't count olds caused by ACK reordering */
1454 if ((TCP_SKB_CB(ack_skb)->ack_seq != tp->snd_una) &&
1455 !after(end_seq, tp->snd_una))
1456 continue;
1457 NET_INC_STATS_BH(LINUX_MIB_TCPSACKDISCARD);
1458 }
1459 continue;
1460 }
1461
1462 skb = cached_skb; 1479 skb = cached_skb;
1463 fack_count = cached_fack_count; 1480 fack_count = cached_fack_count;
1464 1481
@@ -1489,8 +1506,8 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
1489 1506
1490 /* Due to sorting DSACK may reside within this SACK block! */ 1507 /* Due to sorting DSACK may reside within this SACK block! */
1491 if (next_dup) { 1508 if (next_dup) {
1492 u32 dup_start = ntohl(sp->start_seq); 1509 u32 dup_start = sp[i+1].start_seq;
1493 u32 dup_end = ntohl(sp->end_seq); 1510 u32 dup_end = sp[i+1].end_seq;
1494 1511
1495 if (before(TCP_SKB_CB(skb)->seq, dup_end)) { 1512 if (before(TCP_SKB_CB(skb)->seq, dup_end)) {
1496 in_sack = tcp_match_skb_to_sack(sk, skb, dup_start, dup_end); 1513 in_sack = tcp_match_skb_to_sack(sk, skb, dup_start, dup_end);