aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_input.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@sunset.davemloft.net>2007-03-07 15:12:44 -0500
committerDavid S. Miller <davem@sunset.davemloft.net>2007-04-26 01:24:02 -0400
commitfe067e8ab5e0dc5ca3c54634924c628da92090b4 (patch)
tree98f5a6ebbb770f16682cfc52caea2da1e7eeb73b /net/ipv4/tcp_input.c
parent02ea4923b4997d7e1310c027081f46d584b9d714 (diff)
[TCP]: Abstract out all write queue operations.
This allows the write queue implementation to be changed, for example, to one which allows fast interval searching. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r--net/ipv4/tcp_input.c64
1 files changed, 41 insertions, 23 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index d0a3630f41a7..22d0bb03c5da 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1044,7 +1044,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
1044 cached_skb = tp->fastpath_skb_hint; 1044 cached_skb = tp->fastpath_skb_hint;
1045 cached_fack_count = tp->fastpath_cnt_hint; 1045 cached_fack_count = tp->fastpath_cnt_hint;
1046 if (!cached_skb) { 1046 if (!cached_skb) {
1047 cached_skb = sk->sk_write_queue.next; 1047 cached_skb = tcp_write_queue_head(sk);
1048 cached_fack_count = 0; 1048 cached_fack_count = 0;
1049 } 1049 }
1050 1050
@@ -1061,10 +1061,13 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
1061 if (after(end_seq, tp->high_seq)) 1061 if (after(end_seq, tp->high_seq))
1062 flag |= FLAG_DATA_LOST; 1062 flag |= FLAG_DATA_LOST;
1063 1063
1064 sk_stream_for_retrans_queue_from(skb, sk) { 1064 tcp_for_write_queue_from(skb, sk) {
1065 int in_sack, pcount; 1065 int in_sack, pcount;
1066 u8 sacked; 1066 u8 sacked;
1067 1067
1068 if (skb == tcp_send_head(sk))
1069 break;
1070
1068 cached_skb = skb; 1071 cached_skb = skb;
1069 cached_fack_count = fack_count; 1072 cached_fack_count = fack_count;
1070 if (i == first_sack_index) { 1073 if (i == first_sack_index) {
@@ -1213,7 +1216,9 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
1213 if (lost_retrans && icsk->icsk_ca_state == TCP_CA_Recovery) { 1216 if (lost_retrans && icsk->icsk_ca_state == TCP_CA_Recovery) {
1214 struct sk_buff *skb; 1217 struct sk_buff *skb;
1215 1218
1216 sk_stream_for_retrans_queue(skb, sk) { 1219 tcp_for_write_queue(skb, sk) {
1220 if (skb == tcp_send_head(sk))
1221 break;
1217 if (after(TCP_SKB_CB(skb)->seq, lost_retrans)) 1222 if (after(TCP_SKB_CB(skb)->seq, lost_retrans))
1218 break; 1223 break;
1219 if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) 1224 if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
@@ -1266,8 +1271,8 @@ int tcp_use_frto(struct sock *sk)
1266 const struct tcp_sock *tp = tcp_sk(sk); 1271 const struct tcp_sock *tp = tcp_sk(sk);
1267 struct sk_buff *skb; 1272 struct sk_buff *skb;
1268 1273
1269 if (!sysctl_tcp_frto || !sk->sk_send_head || 1274 if (!sysctl_tcp_frto || !tcp_send_head(sk) ||
1270 after(TCP_SKB_CB(sk->sk_send_head)->end_seq, 1275 after(TCP_SKB_CB(tcp_send_head(sk))->end_seq,
1271 tp->snd_una + tp->snd_wnd)) 1276 tp->snd_una + tp->snd_wnd))
1272 return 0; 1277 return 0;
1273 1278
@@ -1278,8 +1283,11 @@ int tcp_use_frto(struct sock *sk)
1278 if (tp->retrans_out > 1) 1283 if (tp->retrans_out > 1)
1279 return 0; 1284 return 0;
1280 1285
1281 skb = skb_peek(&sk->sk_write_queue)->next; /* Skips head */ 1286 skb = tcp_write_queue_head(sk);
1282 sk_stream_for_retrans_queue_from(skb, sk) { 1287 skb = tcp_write_queue_next(sk, skb); /* Skips head */
1288 tcp_for_write_queue_from(skb, sk) {
1289 if (skb == tcp_send_head(sk))
1290 break;
1283 if (TCP_SKB_CB(skb)->sacked&TCPCB_RETRANS) 1291 if (TCP_SKB_CB(skb)->sacked&TCPCB_RETRANS)
1284 return 0; 1292 return 0;
1285 /* Short-circuit when first non-SACKed skb has been checked */ 1293 /* Short-circuit when first non-SACKed skb has been checked */
@@ -1343,7 +1351,7 @@ void tcp_enter_frto(struct sock *sk)
1343 tp->undo_marker = tp->snd_una; 1351 tp->undo_marker = tp->snd_una;
1344 tp->undo_retrans = 0; 1352 tp->undo_retrans = 0;
1345 1353
1346 skb = skb_peek(&sk->sk_write_queue); 1354 skb = tcp_write_queue_head(sk);
1347 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) { 1355 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
1348 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; 1356 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
1349 tp->retrans_out -= tcp_skb_pcount(skb); 1357 tp->retrans_out -= tcp_skb_pcount(skb);
@@ -1380,7 +1388,9 @@ static void tcp_enter_frto_loss(struct sock *sk, int allowed_segments, int flag)
1380 tp->fackets_out = 0; 1388 tp->fackets_out = 0;
1381 tp->retrans_out = 0; 1389 tp->retrans_out = 0;
1382 1390
1383 sk_stream_for_retrans_queue(skb, sk) { 1391 tcp_for_write_queue(skb, sk) {
1392 if (skb == tcp_send_head(sk))
1393 break;
1384 cnt += tcp_skb_pcount(skb); 1394 cnt += tcp_skb_pcount(skb);
1385 /* 1395 /*
1386 * Count the retransmission made on RTO correctly (only when 1396 * Count the retransmission made on RTO correctly (only when
@@ -1468,7 +1478,9 @@ void tcp_enter_loss(struct sock *sk, int how)
1468 if (!how) 1478 if (!how)
1469 tp->undo_marker = tp->snd_una; 1479 tp->undo_marker = tp->snd_una;
1470 1480
1471 sk_stream_for_retrans_queue(skb, sk) { 1481 tcp_for_write_queue(skb, sk) {
1482 if (skb == tcp_send_head(sk))
1483 break;
1472 cnt += tcp_skb_pcount(skb); 1484 cnt += tcp_skb_pcount(skb);
1473 if (TCP_SKB_CB(skb)->sacked&TCPCB_RETRANS) 1485 if (TCP_SKB_CB(skb)->sacked&TCPCB_RETRANS)
1474 tp->undo_marker = 0; 1486 tp->undo_marker = 0;
@@ -1503,14 +1515,14 @@ static int tcp_check_sack_reneging(struct sock *sk)
1503 * receiver _host_ is heavily congested (or buggy). 1515 * receiver _host_ is heavily congested (or buggy).
1504 * Do processing similar to RTO timeout. 1516 * Do processing similar to RTO timeout.
1505 */ 1517 */
1506 if ((skb = skb_peek(&sk->sk_write_queue)) != NULL && 1518 if ((skb = tcp_write_queue_head(sk)) != NULL &&
1507 (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) { 1519 (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) {
1508 struct inet_connection_sock *icsk = inet_csk(sk); 1520 struct inet_connection_sock *icsk = inet_csk(sk);
1509 NET_INC_STATS_BH(LINUX_MIB_TCPSACKRENEGING); 1521 NET_INC_STATS_BH(LINUX_MIB_TCPSACKRENEGING);
1510 1522
1511 tcp_enter_loss(sk, 1); 1523 tcp_enter_loss(sk, 1);
1512 icsk->icsk_retransmits++; 1524 icsk->icsk_retransmits++;
1513 tcp_retransmit_skb(sk, skb_peek(&sk->sk_write_queue)); 1525 tcp_retransmit_skb(sk, tcp_write_queue_head(sk));
1514 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 1526 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
1515 icsk->icsk_rto, TCP_RTO_MAX); 1527 icsk->icsk_rto, TCP_RTO_MAX);
1516 return 1; 1528 return 1;
@@ -1531,7 +1543,7 @@ static inline int tcp_skb_timedout(struct sock *sk, struct sk_buff *skb)
1531static inline int tcp_head_timedout(struct sock *sk, struct tcp_sock *tp) 1543static inline int tcp_head_timedout(struct sock *sk, struct tcp_sock *tp)
1532{ 1544{
1533 return tp->packets_out && 1545 return tp->packets_out &&
1534 tcp_skb_timedout(sk, skb_peek(&sk->sk_write_queue)); 1546 tcp_skb_timedout(sk, tcp_write_queue_head(sk));
1535} 1547}
1536 1548
1537/* Linux NewReno/SACK/FACK/ECN state machine. 1549/* Linux NewReno/SACK/FACK/ECN state machine.
@@ -1726,11 +1738,13 @@ static void tcp_mark_head_lost(struct sock *sk, struct tcp_sock *tp,
1726 skb = tp->lost_skb_hint; 1738 skb = tp->lost_skb_hint;
1727 cnt = tp->lost_cnt_hint; 1739 cnt = tp->lost_cnt_hint;
1728 } else { 1740 } else {
1729 skb = sk->sk_write_queue.next; 1741 skb = tcp_write_queue_head(sk);
1730 cnt = 0; 1742 cnt = 0;
1731 } 1743 }
1732 1744
1733 sk_stream_for_retrans_queue_from(skb, sk) { 1745 tcp_for_write_queue_from(skb, sk) {
1746 if (skb == tcp_send_head(sk))
1747 break;
1734 /* TODO: do this better */ 1748 /* TODO: do this better */
1735 /* this is not the most efficient way to do this... */ 1749 /* this is not the most efficient way to do this... */
1736 tp->lost_skb_hint = skb; 1750 tp->lost_skb_hint = skb;
@@ -1777,9 +1791,11 @@ static void tcp_update_scoreboard(struct sock *sk, struct tcp_sock *tp)
1777 struct sk_buff *skb; 1791 struct sk_buff *skb;
1778 1792
1779 skb = tp->scoreboard_skb_hint ? tp->scoreboard_skb_hint 1793 skb = tp->scoreboard_skb_hint ? tp->scoreboard_skb_hint
1780 : sk->sk_write_queue.next; 1794 : tcp_write_queue_head(sk);
1781 1795
1782 sk_stream_for_retrans_queue_from(skb, sk) { 1796 tcp_for_write_queue_from(skb, sk) {
1797 if (skb == tcp_send_head(sk))
1798 break;
1783 if (!tcp_skb_timedout(sk, skb)) 1799 if (!tcp_skb_timedout(sk, skb))
1784 break; 1800 break;
1785 1801
@@ -1970,7 +1986,9 @@ static int tcp_try_undo_loss(struct sock *sk, struct tcp_sock *tp)
1970{ 1986{
1971 if (tcp_may_undo(tp)) { 1987 if (tcp_may_undo(tp)) {
1972 struct sk_buff *skb; 1988 struct sk_buff *skb;
1973 sk_stream_for_retrans_queue(skb, sk) { 1989 tcp_for_write_queue(skb, sk) {
1990 if (skb == tcp_send_head(sk))
1991 break;
1974 TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST; 1992 TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST;
1975 } 1993 }
1976 1994
@@ -2382,8 +2400,8 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p)
2382 = icsk->icsk_ca_ops->rtt_sample; 2400 = icsk->icsk_ca_ops->rtt_sample;
2383 struct timeval tv = { .tv_sec = 0, .tv_usec = 0 }; 2401 struct timeval tv = { .tv_sec = 0, .tv_usec = 0 };
2384 2402
2385 while ((skb = skb_peek(&sk->sk_write_queue)) && 2403 while ((skb = tcp_write_queue_head(sk)) &&
2386 skb != sk->sk_send_head) { 2404 skb != tcp_send_head(sk)) {
2387 struct tcp_skb_cb *scb = TCP_SKB_CB(skb); 2405 struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
2388 __u8 sacked = scb->sacked; 2406 __u8 sacked = scb->sacked;
2389 2407
@@ -2446,7 +2464,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p)
2446 } 2464 }
2447 tcp_dec_pcount_approx(&tp->fackets_out, skb); 2465 tcp_dec_pcount_approx(&tp->fackets_out, skb);
2448 tcp_packets_out_dec(tp, skb); 2466 tcp_packets_out_dec(tp, skb);
2449 __skb_unlink(skb, &sk->sk_write_queue); 2467 tcp_unlink_write_queue(skb, sk);
2450 sk_stream_free_skb(sk, skb); 2468 sk_stream_free_skb(sk, skb);
2451 clear_all_retrans_hints(tp); 2469 clear_all_retrans_hints(tp);
2452 } 2470 }
@@ -2495,7 +2513,7 @@ static void tcp_ack_probe(struct sock *sk)
2495 2513
2496 /* Was it a usable window open? */ 2514 /* Was it a usable window open? */
2497 2515
2498 if (!after(TCP_SKB_CB(sk->sk_send_head)->end_seq, 2516 if (!after(TCP_SKB_CB(tcp_send_head(sk))->end_seq,
2499 tp->snd_una + tp->snd_wnd)) { 2517 tp->snd_una + tp->snd_wnd)) {
2500 icsk->icsk_backoff = 0; 2518 icsk->icsk_backoff = 0;
2501 inet_csk_clear_xmit_timer(sk, ICSK_TIME_PROBE0); 2519 inet_csk_clear_xmit_timer(sk, ICSK_TIME_PROBE0);
@@ -2795,7 +2813,7 @@ no_queue:
2795 * being used to time the probes, and is probably far higher than 2813 * being used to time the probes, and is probably far higher than
2796 * it needs to be for normal retransmission. 2814 * it needs to be for normal retransmission.
2797 */ 2815 */
2798 if (sk->sk_send_head) 2816 if (tcp_send_head(sk))
2799 tcp_ack_probe(sk); 2817 tcp_ack_probe(sk);
2800 return 1; 2818 return 1;
2801 2819