aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_input.c
diff options
context:
space:
mode:
authorIlpo Järvinen <ilpo.jarvinen@helsinki.fi>2008-12-06 01:42:22 -0500
committerDavid S. Miller <davem@davemloft.net>2008-12-06 01:42:22 -0500
commita1197f5a6faa23e5d0c1f8ed97b011deb2a75457 (patch)
tree37c828e0fee0c8adea50b69fb2d8659668ac17c3 /net/ipv4/tcp_input.c
parent775ffabf77a648d78fe1d20cb3a620e771abb921 (diff)
tcp: introduce struct tcp_sacktag_state to reduce arg pressure
There are just too many args to some sacktag functions. This idea was first proposed by David S. Miller around a year ago, and the current situation is much worse that what it was back then. tcp_sacktag_one can be made a bit simpler by returning the new sacked (it can be achieved with a single variable though the previous code "caching" sacked into a local variable and therefore it is not exactly equal but the results will be the same). codiff on x86_64 tcp_sacktag_one | -15 tcp_shifted_skb | -50 tcp_match_skb_to_sack | -1 tcp_sacktag_walk | -64 tcp_sacktag_write_queue | -59 tcp_urg | +1 tcp_event_data_recv | -1 7 functions changed, 1 bytes added, 190 bytes removed, diff: -189 Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@helsinki.fi> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r--net/ipv4/tcp_input.c145
1 files changed, 74 insertions, 71 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 21c670190780..e25827719e70 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1237,6 +1237,12 @@ static int tcp_check_dsack(struct sock *sk, struct sk_buff *ack_skb,
1237 return dup_sack; 1237 return dup_sack;
1238} 1238}
1239 1239
1240struct tcp_sacktag_state {
1241 int reord;
1242 int fack_count;
1243 int flag;
1244};
1245
1240/* Check if skb is fully within the SACK block. In presence of GSO skbs, 1246/* Check if skb is fully within the SACK block. In presence of GSO skbs,
1241 * the incoming SACK may not exactly match but we can find smaller MSS 1247 * the incoming SACK may not exactly match but we can find smaller MSS
1242 * aligned portion of it that matches. Therefore we might need to fragment 1248 * aligned portion of it that matches. Therefore we might need to fragment
@@ -1290,25 +1296,25 @@ static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
1290 return in_sack; 1296 return in_sack;
1291} 1297}
1292 1298
1293static int tcp_sacktag_one(struct sk_buff *skb, struct sock *sk, 1299static u8 tcp_sacktag_one(struct sk_buff *skb, struct sock *sk,
1294 int *reord, int dup_sack, int fack_count, 1300 struct tcp_sacktag_state *state,
1295 u8 *sackedto, int pcount) 1301 int dup_sack, int pcount)
1296{ 1302{
1297 struct tcp_sock *tp = tcp_sk(sk); 1303 struct tcp_sock *tp = tcp_sk(sk);
1298 u8 sacked = TCP_SKB_CB(skb)->sacked; 1304 u8 sacked = TCP_SKB_CB(skb)->sacked;
1299 int flag = 0; 1305 int fack_count = state->fack_count;
1300 1306
1301 /* Account D-SACK for retransmitted packet. */ 1307 /* Account D-SACK for retransmitted packet. */
1302 if (dup_sack && (sacked & TCPCB_RETRANS)) { 1308 if (dup_sack && (sacked & TCPCB_RETRANS)) {
1303 if (after(TCP_SKB_CB(skb)->end_seq, tp->undo_marker)) 1309 if (after(TCP_SKB_CB(skb)->end_seq, tp->undo_marker))
1304 tp->undo_retrans--; 1310 tp->undo_retrans--;
1305 if (sacked & TCPCB_SACKED_ACKED) 1311 if (sacked & TCPCB_SACKED_ACKED)
1306 *reord = min(fack_count, *reord); 1312 state->reord = min(fack_count, state->reord);
1307 } 1313 }
1308 1314
1309 /* Nothing to do; acked frame is about to be dropped (was ACKed). */ 1315 /* Nothing to do; acked frame is about to be dropped (was ACKed). */
1310 if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) 1316 if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
1311 return flag; 1317 return sacked;
1312 1318
1313 if (!(sacked & TCPCB_SACKED_ACKED)) { 1319 if (!(sacked & TCPCB_SACKED_ACKED)) {
1314 if (sacked & TCPCB_SACKED_RETRANS) { 1320 if (sacked & TCPCB_SACKED_RETRANS) {
@@ -1317,7 +1323,7 @@ static int tcp_sacktag_one(struct sk_buff *skb, struct sock *sk,
1317 * that retransmission is still in flight. 1323 * that retransmission is still in flight.
1318 */ 1324 */
1319 if (sacked & TCPCB_LOST) { 1325 if (sacked & TCPCB_LOST) {
1320 *sackedto &= ~(TCPCB_LOST|TCPCB_SACKED_RETRANS); 1326 sacked &= ~(TCPCB_LOST|TCPCB_SACKED_RETRANS);
1321 tp->lost_out -= pcount; 1327 tp->lost_out -= pcount;
1322 tp->retrans_out -= pcount; 1328 tp->retrans_out -= pcount;
1323 } 1329 }
@@ -1328,21 +1334,22 @@ static int tcp_sacktag_one(struct sk_buff *skb, struct sock *sk,
1328 */ 1334 */
1329 if (before(TCP_SKB_CB(skb)->seq, 1335 if (before(TCP_SKB_CB(skb)->seq,
1330 tcp_highest_sack_seq(tp))) 1336 tcp_highest_sack_seq(tp)))
1331 *reord = min(fack_count, *reord); 1337 state->reord = min(fack_count,
1338 state->reord);
1332 1339
1333 /* SACK enhanced F-RTO (RFC4138; Appendix B) */ 1340 /* SACK enhanced F-RTO (RFC4138; Appendix B) */
1334 if (!after(TCP_SKB_CB(skb)->end_seq, tp->frto_highmark)) 1341 if (!after(TCP_SKB_CB(skb)->end_seq, tp->frto_highmark))
1335 flag |= FLAG_ONLY_ORIG_SACKED; 1342 state->flag |= FLAG_ONLY_ORIG_SACKED;
1336 } 1343 }
1337 1344
1338 if (sacked & TCPCB_LOST) { 1345 if (sacked & TCPCB_LOST) {
1339 *sackedto &= ~TCPCB_LOST; 1346 sacked &= ~TCPCB_LOST;
1340 tp->lost_out -= pcount; 1347 tp->lost_out -= pcount;
1341 } 1348 }
1342 } 1349 }
1343 1350
1344 *sackedto |= TCPCB_SACKED_ACKED; 1351 sacked |= TCPCB_SACKED_ACKED;
1345 flag |= FLAG_DATA_SACKED; 1352 state->flag |= FLAG_DATA_SACKED;
1346 tp->sacked_out += pcount; 1353 tp->sacked_out += pcount;
1347 1354
1348 fack_count += pcount; 1355 fack_count += pcount;
@@ -1361,21 +1368,20 @@ static int tcp_sacktag_one(struct sk_buff *skb, struct sock *sk,
1361 * frames and clear it. undo_retrans is decreased above, L|R frames 1368 * frames and clear it. undo_retrans is decreased above, L|R frames
1362 * are accounted above as well. 1369 * are accounted above as well.
1363 */ 1370 */
1364 if (dup_sack && (*sackedto & TCPCB_SACKED_RETRANS)) { 1371 if (dup_sack && (sacked & TCPCB_SACKED_RETRANS)) {
1365 *sackedto &= ~TCPCB_SACKED_RETRANS; 1372 sacked &= ~TCPCB_SACKED_RETRANS;
1366 tp->retrans_out -= pcount; 1373 tp->retrans_out -= pcount;
1367 } 1374 }
1368 1375
1369 return flag; 1376 return sacked;
1370} 1377}
1371 1378
1372static int tcp_shifted_skb(struct sock *sk, struct sk_buff *prev, 1379static int tcp_shifted_skb(struct sock *sk, struct sk_buff *prev,
1373 struct sk_buff *skb, unsigned int pcount, 1380 struct sk_buff *skb,
1374 int shifted, int fack_count, int *reord, 1381 struct tcp_sacktag_state *state,
1375 int *flag, int mss) 1382 unsigned int pcount, int shifted, int mss)
1376{ 1383{
1377 struct tcp_sock *tp = tcp_sk(sk); 1384 struct tcp_sock *tp = tcp_sk(sk);
1378 u8 dummy_sacked = TCP_SKB_CB(skb)->sacked; /* We discard results */
1379 1385
1380 BUG_ON(!pcount); 1386 BUG_ON(!pcount);
1381 1387
@@ -1407,8 +1413,8 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *prev,
1407 skb_shinfo(skb)->gso_type = 0; 1413 skb_shinfo(skb)->gso_type = 0;
1408 } 1414 }
1409 1415
1410 *flag |= tcp_sacktag_one(skb, sk, reord, 0, fack_count, &dummy_sacked, 1416 /* We discard results */
1411 pcount); 1417 tcp_sacktag_one(skb, sk, state, 0, pcount);
1412 1418
1413 /* Difference in this won't matter, both ACKed by the same cumul. ACK */ 1419 /* Difference in this won't matter, both ACKed by the same cumul. ACK */
1414 TCP_SKB_CB(prev)->sacked |= (TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS); 1420 TCP_SKB_CB(prev)->sacked |= (TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS);
@@ -1460,9 +1466,9 @@ static int skb_can_shift(struct sk_buff *skb)
1460 * skb. 1466 * skb.
1461 */ 1467 */
1462static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb, 1468static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
1469 struct tcp_sacktag_state *state,
1463 u32 start_seq, u32 end_seq, 1470 u32 start_seq, u32 end_seq,
1464 int dup_sack, int *fack_count, 1471 int dup_sack)
1465 int *reord, int *flag)
1466{ 1472{
1467 struct tcp_sock *tp = tcp_sk(sk); 1473 struct tcp_sock *tp = tcp_sk(sk);
1468 struct sk_buff *prev; 1474 struct sk_buff *prev;
@@ -1559,8 +1565,7 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
1559 1565
1560 if (!skb_shift(prev, skb, len)) 1566 if (!skb_shift(prev, skb, len))
1561 goto fallback; 1567 goto fallback;
1562 if (!tcp_shifted_skb(sk, prev, skb, pcount, len, *fack_count, reord, 1568 if (!tcp_shifted_skb(sk, prev, skb, state, pcount, len, mss))
1563 flag, mss))
1564 goto out; 1569 goto out;
1565 1570
1566 /* Hole filled allows collapsing with the next as well, this is very 1571 /* Hole filled allows collapsing with the next as well, this is very
@@ -1579,12 +1584,12 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
1579 len = skb->len; 1584 len = skb->len;
1580 if (skb_shift(prev, skb, len)) { 1585 if (skb_shift(prev, skb, len)) {
1581 pcount += tcp_skb_pcount(skb); 1586 pcount += tcp_skb_pcount(skb);
1582 tcp_shifted_skb(sk, prev, skb, tcp_skb_pcount(skb), len, 1587 tcp_shifted_skb(sk, prev, skb, state, tcp_skb_pcount(skb), len,
1583 *fack_count, reord, flag, mss); 1588 mss);
1584 } 1589 }
1585 1590
1586out: 1591out:
1587 *fack_count += pcount; 1592 state->fack_count += pcount;
1588 return prev; 1593 return prev;
1589 1594
1590noop: 1595noop:
@@ -1597,9 +1602,9 @@ fallback:
1597 1602
1598static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk, 1603static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
1599 struct tcp_sack_block *next_dup, 1604 struct tcp_sack_block *next_dup,
1605 struct tcp_sacktag_state *state,
1600 u32 start_seq, u32 end_seq, 1606 u32 start_seq, u32 end_seq,
1601 int dup_sack_in, int *fack_count, 1607 int dup_sack_in)
1602 int *reord, int *flag)
1603{ 1608{
1604 struct tcp_sock *tp = tcp_sk(sk); 1609 struct tcp_sock *tp = tcp_sk(sk);
1605 struct sk_buff *tmp; 1610 struct sk_buff *tmp;
@@ -1629,9 +1634,8 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
1629 * so not even _safe variant of the loop is enough. 1634 * so not even _safe variant of the loop is enough.
1630 */ 1635 */
1631 if (in_sack <= 0) { 1636 if (in_sack <= 0) {
1632 tmp = tcp_shift_skb_data(sk, skb, start_seq, 1637 tmp = tcp_shift_skb_data(sk, skb, state,
1633 end_seq, dup_sack, 1638 start_seq, end_seq, dup_sack);
1634 fack_count, reord, flag);
1635 if (tmp != NULL) { 1639 if (tmp != NULL) {
1636 if (tmp != skb) { 1640 if (tmp != skb) {
1637 skb = tmp; 1641 skb = tmp;
@@ -1650,17 +1654,17 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
1650 break; 1654 break;
1651 1655
1652 if (in_sack) { 1656 if (in_sack) {
1653 *flag |= tcp_sacktag_one(skb, sk, reord, dup_sack, 1657 TCP_SKB_CB(skb)->sacked = tcp_sacktag_one(skb, sk,
1654 *fack_count, 1658 state,
1655 &(TCP_SKB_CB(skb)->sacked), 1659 dup_sack,
1656 tcp_skb_pcount(skb)); 1660 tcp_skb_pcount(skb));
1657 1661
1658 if (!before(TCP_SKB_CB(skb)->seq, 1662 if (!before(TCP_SKB_CB(skb)->seq,
1659 tcp_highest_sack_seq(tp))) 1663 tcp_highest_sack_seq(tp)))
1660 tcp_advance_highest_sack(sk, skb); 1664 tcp_advance_highest_sack(sk, skb);
1661 } 1665 }
1662 1666
1663 *fack_count += tcp_skb_pcount(skb); 1667 state->fack_count += tcp_skb_pcount(skb);
1664 } 1668 }
1665 return skb; 1669 return skb;
1666} 1670}
@@ -1669,7 +1673,8 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
1669 * a normal way 1673 * a normal way
1670 */ 1674 */
1671static struct sk_buff *tcp_sacktag_skip(struct sk_buff *skb, struct sock *sk, 1675static struct sk_buff *tcp_sacktag_skip(struct sk_buff *skb, struct sock *sk,
1672 u32 skip_to_seq, int *fack_count) 1676 struct tcp_sacktag_state *state,
1677 u32 skip_to_seq)
1673{ 1678{
1674 tcp_for_write_queue_from(skb, sk) { 1679 tcp_for_write_queue_from(skb, sk) {
1675 if (skb == tcp_send_head(sk)) 1680 if (skb == tcp_send_head(sk))
@@ -1678,7 +1683,7 @@ static struct sk_buff *tcp_sacktag_skip(struct sk_buff *skb, struct sock *sk,
1678 if (after(TCP_SKB_CB(skb)->end_seq, skip_to_seq)) 1683 if (after(TCP_SKB_CB(skb)->end_seq, skip_to_seq))
1679 break; 1684 break;
1680 1685
1681 *fack_count += tcp_skb_pcount(skb); 1686 state->fack_count += tcp_skb_pcount(skb);
1682 } 1687 }
1683 return skb; 1688 return skb;
1684} 1689}
@@ -1686,18 +1691,17 @@ static struct sk_buff *tcp_sacktag_skip(struct sk_buff *skb, struct sock *sk,
1686static struct sk_buff *tcp_maybe_skipping_dsack(struct sk_buff *skb, 1691static struct sk_buff *tcp_maybe_skipping_dsack(struct sk_buff *skb,
1687 struct sock *sk, 1692 struct sock *sk,
1688 struct tcp_sack_block *next_dup, 1693 struct tcp_sack_block *next_dup,
1689 u32 skip_to_seq, 1694 struct tcp_sacktag_state *state,
1690 int *fack_count, int *reord, 1695 u32 skip_to_seq)
1691 int *flag)
1692{ 1696{
1693 if (next_dup == NULL) 1697 if (next_dup == NULL)
1694 return skb; 1698 return skb;
1695 1699
1696 if (before(next_dup->start_seq, skip_to_seq)) { 1700 if (before(next_dup->start_seq, skip_to_seq)) {
1697 skb = tcp_sacktag_skip(skb, sk, next_dup->start_seq, fack_count); 1701 skb = tcp_sacktag_skip(skb, sk, state, next_dup->start_seq);
1698 skb = tcp_sacktag_walk(skb, sk, NULL, 1702 skb = tcp_sacktag_walk(skb, sk, NULL, state,
1699 next_dup->start_seq, next_dup->end_seq, 1703 next_dup->start_seq, next_dup->end_seq,
1700 1, fack_count, reord, flag); 1704 1);
1701 } 1705 }
1702 1706
1703 return skb; 1707 return skb;
@@ -1719,16 +1723,17 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb,
1719 struct tcp_sack_block_wire *sp_wire = (struct tcp_sack_block_wire *)(ptr+2); 1723 struct tcp_sack_block_wire *sp_wire = (struct tcp_sack_block_wire *)(ptr+2);
1720 struct tcp_sack_block sp[TCP_NUM_SACKS]; 1724 struct tcp_sack_block sp[TCP_NUM_SACKS];
1721 struct tcp_sack_block *cache; 1725 struct tcp_sack_block *cache;
1726 struct tcp_sacktag_state state;
1722 struct sk_buff *skb; 1727 struct sk_buff *skb;
1723 int num_sacks = min(TCP_NUM_SACKS, (ptr[1] - TCPOLEN_SACK_BASE) >> 3); 1728 int num_sacks = min(TCP_NUM_SACKS, (ptr[1] - TCPOLEN_SACK_BASE) >> 3);
1724 int used_sacks; 1729 int used_sacks;
1725 int reord = tp->packets_out;
1726 int flag = 0;
1727 int found_dup_sack = 0; 1730 int found_dup_sack = 0;
1728 int fack_count;
1729 int i, j; 1731 int i, j;
1730 int first_sack_index; 1732 int first_sack_index;
1731 1733
1734 state.flag = 0;
1735 state.reord = tp->packets_out;
1736
1732 if (!tp->sacked_out) { 1737 if (!tp->sacked_out) {
1733 if (WARN_ON(tp->fackets_out)) 1738 if (WARN_ON(tp->fackets_out))
1734 tp->fackets_out = 0; 1739 tp->fackets_out = 0;
@@ -1738,7 +1743,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb,
1738 found_dup_sack = tcp_check_dsack(sk, ack_skb, sp_wire, 1743 found_dup_sack = tcp_check_dsack(sk, ack_skb, sp_wire,
1739 num_sacks, prior_snd_una); 1744 num_sacks, prior_snd_una);
1740 if (found_dup_sack) 1745 if (found_dup_sack)
1741 flag |= FLAG_DSACKING_ACK; 1746 state.flag |= FLAG_DSACKING_ACK;
1742 1747
1743 /* Eliminate too old ACKs, but take into 1748 /* Eliminate too old ACKs, but take into
1744 * account more or less fresh ones, they can 1749 * account more or less fresh ones, they can
@@ -1807,7 +1812,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb,
1807 } 1812 }
1808 1813
1809 skb = tcp_write_queue_head(sk); 1814 skb = tcp_write_queue_head(sk);
1810 fack_count = 0; 1815 state.fack_count = 0;
1811 i = 0; 1816 i = 0;
1812 1817
1813 if (!tp->sacked_out) { 1818 if (!tp->sacked_out) {
@@ -1832,7 +1837,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb,
1832 1837
1833 /* Event "B" in the comment above. */ 1838 /* Event "B" in the comment above. */
1834 if (after(end_seq, tp->high_seq)) 1839 if (after(end_seq, tp->high_seq))
1835 flag |= FLAG_DATA_LOST; 1840 state.flag |= FLAG_DATA_LOST;
1836 1841
1837 /* Skip too early cached blocks */ 1842 /* Skip too early cached blocks */
1838 while (tcp_sack_cache_ok(tp, cache) && 1843 while (tcp_sack_cache_ok(tp, cache) &&
@@ -1845,13 +1850,13 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb,
1845 1850
1846 /* Head todo? */ 1851 /* Head todo? */
1847 if (before(start_seq, cache->start_seq)) { 1852 if (before(start_seq, cache->start_seq)) {
1848 skb = tcp_sacktag_skip(skb, sk, start_seq, 1853 skb = tcp_sacktag_skip(skb, sk, &state,
1849 &fack_count); 1854 start_seq);
1850 skb = tcp_sacktag_walk(skb, sk, next_dup, 1855 skb = tcp_sacktag_walk(skb, sk, next_dup,
1856 &state,
1851 start_seq, 1857 start_seq,
1852 cache->start_seq, 1858 cache->start_seq,
1853 dup_sack, &fack_count, 1859 dup_sack);
1854 &reord, &flag);
1855 } 1860 }
1856 1861
1857 /* Rest of the block already fully processed? */ 1862 /* Rest of the block already fully processed? */
@@ -1859,9 +1864,8 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb,
1859 goto advance_sp; 1864 goto advance_sp;
1860 1865
1861 skb = tcp_maybe_skipping_dsack(skb, sk, next_dup, 1866 skb = tcp_maybe_skipping_dsack(skb, sk, next_dup,
1862 cache->end_seq, 1867 &state,
1863 &fack_count, &reord, 1868 cache->end_seq);
1864 &flag);
1865 1869
1866 /* ...tail remains todo... */ 1870 /* ...tail remains todo... */
1867 if (tcp_highest_sack_seq(tp) == cache->end_seq) { 1871 if (tcp_highest_sack_seq(tp) == cache->end_seq) {
@@ -1869,13 +1873,12 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb,
1869 skb = tcp_highest_sack(sk); 1873 skb = tcp_highest_sack(sk);
1870 if (skb == NULL) 1874 if (skb == NULL)
1871 break; 1875 break;
1872 fack_count = tp->fackets_out; 1876 state.fack_count = tp->fackets_out;
1873 cache++; 1877 cache++;
1874 goto walk; 1878 goto walk;
1875 } 1879 }
1876 1880
1877 skb = tcp_sacktag_skip(skb, sk, cache->end_seq, 1881 skb = tcp_sacktag_skip(skb, sk, &state, cache->end_seq);
1878 &fack_count);
1879 /* Check overlap against next cached too (past this one already) */ 1882 /* Check overlap against next cached too (past this one already) */
1880 cache++; 1883 cache++;
1881 continue; 1884 continue;
@@ -1885,20 +1888,20 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb,
1885 skb = tcp_highest_sack(sk); 1888 skb = tcp_highest_sack(sk);
1886 if (skb == NULL) 1889 if (skb == NULL)
1887 break; 1890 break;
1888 fack_count = tp->fackets_out; 1891 state.fack_count = tp->fackets_out;
1889 } 1892 }
1890 skb = tcp_sacktag_skip(skb, sk, start_seq, &fack_count); 1893 skb = tcp_sacktag_skip(skb, sk, &state, start_seq);
1891 1894
1892walk: 1895walk:
1893 skb = tcp_sacktag_walk(skb, sk, next_dup, start_seq, end_seq, 1896 skb = tcp_sacktag_walk(skb, sk, next_dup, &state,
1894 dup_sack, &fack_count, &reord, &flag); 1897 start_seq, end_seq, dup_sack);
1895 1898
1896advance_sp: 1899advance_sp:
1897 /* SACK enhanced FRTO (RFC4138, Appendix B): Clearing correct 1900 /* SACK enhanced FRTO (RFC4138, Appendix B): Clearing correct
1898 * due to in-order walk 1901 * due to in-order walk
1899 */ 1902 */
1900 if (after(end_seq, tp->frto_highmark)) 1903 if (after(end_seq, tp->frto_highmark))
1901 flag &= ~FLAG_ONLY_ORIG_SACKED; 1904 state.flag &= ~FLAG_ONLY_ORIG_SACKED;
1902 1905
1903 i++; 1906 i++;
1904 } 1907 }
@@ -1915,10 +1918,10 @@ advance_sp:
1915 1918
1916 tcp_verify_left_out(tp); 1919 tcp_verify_left_out(tp);
1917 1920
1918 if ((reord < tp->fackets_out) && 1921 if ((state.reord < tp->fackets_out) &&
1919 ((icsk->icsk_ca_state != TCP_CA_Loss) || tp->undo_marker) && 1922 ((icsk->icsk_ca_state != TCP_CA_Loss) || tp->undo_marker) &&
1920 (!tp->frto_highmark || after(tp->snd_una, tp->frto_highmark))) 1923 (!tp->frto_highmark || after(tp->snd_una, tp->frto_highmark)))
1921 tcp_update_reordering(sk, tp->fackets_out - reord, 0); 1924 tcp_update_reordering(sk, tp->fackets_out - state.reord, 0);
1922 1925
1923out: 1926out:
1924 1927
@@ -1928,7 +1931,7 @@ out:
1928 WARN_ON((int)tp->retrans_out < 0); 1931 WARN_ON((int)tp->retrans_out < 0);
1929 WARN_ON((int)tcp_packets_in_flight(tp) < 0); 1932 WARN_ON((int)tcp_packets_in_flight(tp) < 0);
1930#endif 1933#endif
1931 return flag; 1934 return state.flag;
1932} 1935}
1933 1936
1934/* Limits sacked_out so that sum with lost_out isn't ever larger than 1937/* Limits sacked_out so that sum with lost_out isn't ever larger than