diff options
author | David S. Miller <davem@sunset.davemloft.net> | 2007-03-07 15:12:44 -0500 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2007-04-26 01:24:02 -0400 |
commit | fe067e8ab5e0dc5ca3c54634924c628da92090b4 (patch) | |
tree | 98f5a6ebbb770f16682cfc52caea2da1e7eeb73b /net/ipv4/tcp_output.c | |
parent | 02ea4923b4997d7e1310c027081f46d584b9d714 (diff) |
[TCP]: Abstract out all write queue operations.
This allows the write queue implementation to be changed,
for example, to one which allows fast interval searching.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_output.c')
-rw-r--r-- | net/ipv4/tcp_output.c | 95 |
1 files changed, 44 insertions, 51 deletions
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index d19b2f3b70fd..2a62b55b15f1 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -65,9 +65,7 @@ int sysctl_tcp_slow_start_after_idle __read_mostly = 1; | |||
65 | static void update_send_head(struct sock *sk, struct tcp_sock *tp, | 65 | static void update_send_head(struct sock *sk, struct tcp_sock *tp, |
66 | struct sk_buff *skb) | 66 | struct sk_buff *skb) |
67 | { | 67 | { |
68 | sk->sk_send_head = skb->next; | 68 | tcp_advance_send_head(sk, skb); |
69 | if (sk->sk_send_head == (struct sk_buff *)&sk->sk_write_queue) | ||
70 | sk->sk_send_head = NULL; | ||
71 | tp->snd_nxt = TCP_SKB_CB(skb)->end_seq; | 69 | tp->snd_nxt = TCP_SKB_CB(skb)->end_seq; |
72 | tcp_packets_out_inc(sk, tp, skb); | 70 | tcp_packets_out_inc(sk, tp, skb); |
73 | } | 71 | } |
@@ -567,12 +565,8 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb) | |||
567 | /* Advance write_seq and place onto the write_queue. */ | 565 | /* Advance write_seq and place onto the write_queue. */ |
568 | tp->write_seq = TCP_SKB_CB(skb)->end_seq; | 566 | tp->write_seq = TCP_SKB_CB(skb)->end_seq; |
569 | skb_header_release(skb); | 567 | skb_header_release(skb); |
570 | __skb_queue_tail(&sk->sk_write_queue, skb); | 568 | tcp_add_write_queue_tail(sk, skb); |
571 | sk_charge_skb(sk, skb); | 569 | sk_charge_skb(sk, skb); |
572 | |||
573 | /* Queue it, remembering where we must start sending. */ | ||
574 | if (sk->sk_send_head == NULL) | ||
575 | sk->sk_send_head = skb; | ||
576 | } | 570 | } |
577 | 571 | ||
578 | static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned int mss_now) | 572 | static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned int mss_now) |
@@ -705,7 +699,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss | |||
705 | 699 | ||
706 | /* Link BUFF into the send queue. */ | 700 | /* Link BUFF into the send queue. */ |
707 | skb_header_release(buff); | 701 | skb_header_release(buff); |
708 | __skb_append(skb, buff, &sk->sk_write_queue); | 702 | tcp_insert_write_queue_after(skb, buff, sk); |
709 | 703 | ||
710 | return 0; | 704 | return 0; |
711 | } | 705 | } |
@@ -1056,7 +1050,7 @@ static inline int tcp_snd_wnd_test(struct tcp_sock *tp, struct sk_buff *skb, uns | |||
1056 | return !after(end_seq, tp->snd_una + tp->snd_wnd); | 1050 | return !after(end_seq, tp->snd_una + tp->snd_wnd); |
1057 | } | 1051 | } |
1058 | 1052 | ||
1059 | /* This checks if the data bearing packet SKB (usually sk->sk_send_head) | 1053 | /* This checks if the data bearing packet SKB (usually tcp_send_head(sk)) |
1060 | * should be put on the wire right now. If so, it returns the number of | 1054 | * should be put on the wire right now. If so, it returns the number of |
1061 | * packets allowed by the congestion window. | 1055 | * packets allowed by the congestion window. |
1062 | */ | 1056 | */ |
@@ -1079,15 +1073,9 @@ static unsigned int tcp_snd_test(struct sock *sk, struct sk_buff *skb, | |||
1079 | return cwnd_quota; | 1073 | return cwnd_quota; |
1080 | } | 1074 | } |
1081 | 1075 | ||
1082 | static inline int tcp_skb_is_last(const struct sock *sk, | ||
1083 | const struct sk_buff *skb) | ||
1084 | { | ||
1085 | return skb->next == (struct sk_buff *)&sk->sk_write_queue; | ||
1086 | } | ||
1087 | |||
1088 | int tcp_may_send_now(struct sock *sk, struct tcp_sock *tp) | 1076 | int tcp_may_send_now(struct sock *sk, struct tcp_sock *tp) |
1089 | { | 1077 | { |
1090 | struct sk_buff *skb = sk->sk_send_head; | 1078 | struct sk_buff *skb = tcp_send_head(sk); |
1091 | 1079 | ||
1092 | return (skb && | 1080 | return (skb && |
1093 | tcp_snd_test(sk, skb, tcp_current_mss(sk, 1), | 1081 | tcp_snd_test(sk, skb, tcp_current_mss(sk, 1), |
@@ -1143,7 +1131,7 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, | |||
1143 | 1131 | ||
1144 | /* Link BUFF into the send queue. */ | 1132 | /* Link BUFF into the send queue. */ |
1145 | skb_header_release(buff); | 1133 | skb_header_release(buff); |
1146 | __skb_append(skb, buff, &sk->sk_write_queue); | 1134 | tcp_insert_write_queue_after(skb, buff, sk); |
1147 | 1135 | ||
1148 | return 0; | 1136 | return 0; |
1149 | } | 1137 | } |
@@ -1249,10 +1237,10 @@ static int tcp_mtu_probe(struct sock *sk) | |||
1249 | 1237 | ||
1250 | /* Have enough data in the send queue to probe? */ | 1238 | /* Have enough data in the send queue to probe? */ |
1251 | len = 0; | 1239 | len = 0; |
1252 | if ((skb = sk->sk_send_head) == NULL) | 1240 | if ((skb = tcp_send_head(sk)) == NULL) |
1253 | return -1; | 1241 | return -1; |
1254 | while ((len += skb->len) < probe_size && !tcp_skb_is_last(sk, skb)) | 1242 | while ((len += skb->len) < probe_size && !tcp_skb_is_last(sk, skb)) |
1255 | skb = skb->next; | 1243 | skb = tcp_write_queue_next(sk, skb); |
1256 | if (len < probe_size) | 1244 | if (len < probe_size) |
1257 | return -1; | 1245 | return -1; |
1258 | 1246 | ||
@@ -1279,9 +1267,9 @@ static int tcp_mtu_probe(struct sock *sk) | |||
1279 | return -1; | 1267 | return -1; |
1280 | sk_charge_skb(sk, nskb); | 1268 | sk_charge_skb(sk, nskb); |
1281 | 1269 | ||
1282 | skb = sk->sk_send_head; | 1270 | skb = tcp_send_head(sk); |
1283 | __skb_insert(nskb, skb->prev, skb, &sk->sk_write_queue); | 1271 | tcp_insert_write_queue_before(nskb, skb, sk); |
1284 | sk->sk_send_head = nskb; | 1272 | tcp_advance_send_head(sk, skb); |
1285 | 1273 | ||
1286 | TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq; | 1274 | TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq; |
1287 | TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size; | 1275 | TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size; |
@@ -1292,7 +1280,7 @@ static int tcp_mtu_probe(struct sock *sk) | |||
1292 | 1280 | ||
1293 | len = 0; | 1281 | len = 0; |
1294 | while (len < probe_size) { | 1282 | while (len < probe_size) { |
1295 | next = skb->next; | 1283 | next = tcp_write_queue_next(sk, skb); |
1296 | 1284 | ||
1297 | copy = min_t(int, skb->len, probe_size - len); | 1285 | copy = min_t(int, skb->len, probe_size - len); |
1298 | if (nskb->ip_summed) | 1286 | if (nskb->ip_summed) |
@@ -1305,7 +1293,7 @@ static int tcp_mtu_probe(struct sock *sk) | |||
1305 | /* We've eaten all the data from this skb. | 1293 | /* We've eaten all the data from this skb. |
1306 | * Throw it away. */ | 1294 | * Throw it away. */ |
1307 | TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags; | 1295 | TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags; |
1308 | __skb_unlink(skb, &sk->sk_write_queue); | 1296 | tcp_unlink_write_queue(skb, sk); |
1309 | sk_stream_free_skb(sk, skb); | 1297 | sk_stream_free_skb(sk, skb); |
1310 | } else { | 1298 | } else { |
1311 | TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags & | 1299 | TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags & |
@@ -1377,7 +1365,7 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle) | |||
1377 | sent_pkts = 1; | 1365 | sent_pkts = 1; |
1378 | } | 1366 | } |
1379 | 1367 | ||
1380 | while ((skb = sk->sk_send_head)) { | 1368 | while ((skb = tcp_send_head(sk))) { |
1381 | unsigned int limit; | 1369 | unsigned int limit; |
1382 | 1370 | ||
1383 | tso_segs = tcp_init_tso_segs(sk, skb, mss_now); | 1371 | tso_segs = tcp_init_tso_segs(sk, skb, mss_now); |
@@ -1435,7 +1423,7 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle) | |||
1435 | tcp_cwnd_validate(sk, tp); | 1423 | tcp_cwnd_validate(sk, tp); |
1436 | return 0; | 1424 | return 0; |
1437 | } | 1425 | } |
1438 | return !tp->packets_out && sk->sk_send_head; | 1426 | return !tp->packets_out && tcp_send_head(sk); |
1439 | } | 1427 | } |
1440 | 1428 | ||
1441 | /* Push out any pending frames which were held back due to | 1429 | /* Push out any pending frames which were held back due to |
@@ -1445,7 +1433,7 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle) | |||
1445 | void __tcp_push_pending_frames(struct sock *sk, struct tcp_sock *tp, | 1433 | void __tcp_push_pending_frames(struct sock *sk, struct tcp_sock *tp, |
1446 | unsigned int cur_mss, int nonagle) | 1434 | unsigned int cur_mss, int nonagle) |
1447 | { | 1435 | { |
1448 | struct sk_buff *skb = sk->sk_send_head; | 1436 | struct sk_buff *skb = tcp_send_head(sk); |
1449 | 1437 | ||
1450 | if (skb) { | 1438 | if (skb) { |
1451 | if (tcp_write_xmit(sk, cur_mss, nonagle)) | 1439 | if (tcp_write_xmit(sk, cur_mss, nonagle)) |
@@ -1459,7 +1447,7 @@ void __tcp_push_pending_frames(struct sock *sk, struct tcp_sock *tp, | |||
1459 | void tcp_push_one(struct sock *sk, unsigned int mss_now) | 1447 | void tcp_push_one(struct sock *sk, unsigned int mss_now) |
1460 | { | 1448 | { |
1461 | struct tcp_sock *tp = tcp_sk(sk); | 1449 | struct tcp_sock *tp = tcp_sk(sk); |
1462 | struct sk_buff *skb = sk->sk_send_head; | 1450 | struct sk_buff *skb = tcp_send_head(sk); |
1463 | unsigned int tso_segs, cwnd_quota; | 1451 | unsigned int tso_segs, cwnd_quota; |
1464 | 1452 | ||
1465 | BUG_ON(!skb || skb->len < mss_now); | 1453 | BUG_ON(!skb || skb->len < mss_now); |
@@ -1620,7 +1608,7 @@ u32 __tcp_select_window(struct sock *sk) | |||
1620 | static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb, int mss_now) | 1608 | static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb, int mss_now) |
1621 | { | 1609 | { |
1622 | struct tcp_sock *tp = tcp_sk(sk); | 1610 | struct tcp_sock *tp = tcp_sk(sk); |
1623 | struct sk_buff *next_skb = skb->next; | 1611 | struct sk_buff *next_skb = tcp_write_queue_next(sk, skb); |
1624 | 1612 | ||
1625 | /* The first test we must make is that neither of these two | 1613 | /* The first test we must make is that neither of these two |
1626 | * SKB's are still referenced by someone else. | 1614 | * SKB's are still referenced by someone else. |
@@ -1652,7 +1640,7 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb, int m | |||
1652 | clear_all_retrans_hints(tp); | 1640 | clear_all_retrans_hints(tp); |
1653 | 1641 | ||
1654 | /* Ok. We will be able to collapse the packet. */ | 1642 | /* Ok. We will be able to collapse the packet. */ |
1655 | __skb_unlink(next_skb, &sk->sk_write_queue); | 1643 | tcp_unlink_write_queue(next_skb, sk); |
1656 | 1644 | ||
1657 | memcpy(skb_put(skb, next_skb_size), next_skb->data, next_skb_size); | 1645 | memcpy(skb_put(skb, next_skb_size), next_skb->data, next_skb_size); |
1658 | 1646 | ||
@@ -1706,7 +1694,9 @@ void tcp_simple_retransmit(struct sock *sk) | |||
1706 | unsigned int mss = tcp_current_mss(sk, 0); | 1694 | unsigned int mss = tcp_current_mss(sk, 0); |
1707 | int lost = 0; | 1695 | int lost = 0; |
1708 | 1696 | ||
1709 | sk_stream_for_retrans_queue(skb, sk) { | 1697 | tcp_for_write_queue(skb, sk) { |
1698 | if (skb == tcp_send_head(sk)) | ||
1699 | break; | ||
1710 | if (skb->len > mss && | 1700 | if (skb->len > mss && |
1711 | !(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED)) { | 1701 | !(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED)) { |
1712 | if (TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_RETRANS) { | 1702 | if (TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_RETRANS) { |
@@ -1790,10 +1780,10 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) | |||
1790 | /* Collapse two adjacent packets if worthwhile and we can. */ | 1780 | /* Collapse two adjacent packets if worthwhile and we can. */ |
1791 | if(!(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_SYN) && | 1781 | if(!(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_SYN) && |
1792 | (skb->len < (cur_mss >> 1)) && | 1782 | (skb->len < (cur_mss >> 1)) && |
1793 | (skb->next != sk->sk_send_head) && | 1783 | (tcp_write_queue_next(sk, skb) != tcp_send_head(sk)) && |
1794 | (skb->next != (struct sk_buff *)&sk->sk_write_queue) && | 1784 | (!tcp_skb_is_last(sk, skb)) && |
1795 | (skb_shinfo(skb)->nr_frags == 0 && skb_shinfo(skb->next)->nr_frags == 0) && | 1785 | (skb_shinfo(skb)->nr_frags == 0 && skb_shinfo(tcp_write_queue_next(sk, skb))->nr_frags == 0) && |
1796 | (tcp_skb_pcount(skb) == 1 && tcp_skb_pcount(skb->next) == 1) && | 1786 | (tcp_skb_pcount(skb) == 1 && tcp_skb_pcount(tcp_write_queue_next(sk, skb)) == 1) && |
1797 | (sysctl_tcp_retrans_collapse != 0)) | 1787 | (sysctl_tcp_retrans_collapse != 0)) |
1798 | tcp_retrans_try_collapse(sk, skb, cur_mss); | 1788 | tcp_retrans_try_collapse(sk, skb, cur_mss); |
1799 | 1789 | ||
@@ -1872,15 +1862,17 @@ void tcp_xmit_retransmit_queue(struct sock *sk) | |||
1872 | skb = tp->retransmit_skb_hint; | 1862 | skb = tp->retransmit_skb_hint; |
1873 | packet_cnt = tp->retransmit_cnt_hint; | 1863 | packet_cnt = tp->retransmit_cnt_hint; |
1874 | }else{ | 1864 | }else{ |
1875 | skb = sk->sk_write_queue.next; | 1865 | skb = tcp_write_queue_head(sk); |
1876 | packet_cnt = 0; | 1866 | packet_cnt = 0; |
1877 | } | 1867 | } |
1878 | 1868 | ||
1879 | /* First pass: retransmit lost packets. */ | 1869 | /* First pass: retransmit lost packets. */ |
1880 | if (tp->lost_out) { | 1870 | if (tp->lost_out) { |
1881 | sk_stream_for_retrans_queue_from(skb, sk) { | 1871 | tcp_for_write_queue_from(skb, sk) { |
1882 | __u8 sacked = TCP_SKB_CB(skb)->sacked; | 1872 | __u8 sacked = TCP_SKB_CB(skb)->sacked; |
1883 | 1873 | ||
1874 | if (skb == tcp_send_head(sk)) | ||
1875 | break; | ||
1884 | /* we could do better than to assign each time */ | 1876 | /* we could do better than to assign each time */ |
1885 | tp->retransmit_skb_hint = skb; | 1877 | tp->retransmit_skb_hint = skb; |
1886 | tp->retransmit_cnt_hint = packet_cnt; | 1878 | tp->retransmit_cnt_hint = packet_cnt; |
@@ -1906,8 +1898,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk) | |||
1906 | else | 1898 | else |
1907 | NET_INC_STATS_BH(LINUX_MIB_TCPSLOWSTARTRETRANS); | 1899 | NET_INC_STATS_BH(LINUX_MIB_TCPSLOWSTARTRETRANS); |
1908 | 1900 | ||
1909 | if (skb == | 1901 | if (skb == tcp_write_queue_head(sk)) |
1910 | skb_peek(&sk->sk_write_queue)) | ||
1911 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, | 1902 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, |
1912 | inet_csk(sk)->icsk_rto, | 1903 | inet_csk(sk)->icsk_rto, |
1913 | TCP_RTO_MAX); | 1904 | TCP_RTO_MAX); |
@@ -1944,11 +1935,13 @@ void tcp_xmit_retransmit_queue(struct sock *sk) | |||
1944 | skb = tp->forward_skb_hint; | 1935 | skb = tp->forward_skb_hint; |
1945 | packet_cnt = tp->forward_cnt_hint; | 1936 | packet_cnt = tp->forward_cnt_hint; |
1946 | } else{ | 1937 | } else{ |
1947 | skb = sk->sk_write_queue.next; | 1938 | skb = tcp_write_queue_head(sk); |
1948 | packet_cnt = 0; | 1939 | packet_cnt = 0; |
1949 | } | 1940 | } |
1950 | 1941 | ||
1951 | sk_stream_for_retrans_queue_from(skb, sk) { | 1942 | tcp_for_write_queue_from(skb, sk) { |
1943 | if (skb == tcp_send_head(sk)) | ||
1944 | break; | ||
1952 | tp->forward_cnt_hint = packet_cnt; | 1945 | tp->forward_cnt_hint = packet_cnt; |
1953 | tp->forward_skb_hint = skb; | 1946 | tp->forward_skb_hint = skb; |
1954 | 1947 | ||
@@ -1973,7 +1966,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk) | |||
1973 | break; | 1966 | break; |
1974 | } | 1967 | } |
1975 | 1968 | ||
1976 | if (skb == skb_peek(&sk->sk_write_queue)) | 1969 | if (skb == tcp_write_queue_head(sk)) |
1977 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, | 1970 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, |
1978 | inet_csk(sk)->icsk_rto, | 1971 | inet_csk(sk)->icsk_rto, |
1979 | TCP_RTO_MAX); | 1972 | TCP_RTO_MAX); |
@@ -1989,7 +1982,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk) | |||
1989 | void tcp_send_fin(struct sock *sk) | 1982 | void tcp_send_fin(struct sock *sk) |
1990 | { | 1983 | { |
1991 | struct tcp_sock *tp = tcp_sk(sk); | 1984 | struct tcp_sock *tp = tcp_sk(sk); |
1992 | struct sk_buff *skb = skb_peek_tail(&sk->sk_write_queue); | 1985 | struct sk_buff *skb = tcp_write_queue_tail(sk); |
1993 | int mss_now; | 1986 | int mss_now; |
1994 | 1987 | ||
1995 | /* Optimization, tack on the FIN if we have a queue of | 1988 | /* Optimization, tack on the FIN if we have a queue of |
@@ -1998,7 +1991,7 @@ void tcp_send_fin(struct sock *sk) | |||
1998 | */ | 1991 | */ |
1999 | mss_now = tcp_current_mss(sk, 1); | 1992 | mss_now = tcp_current_mss(sk, 1); |
2000 | 1993 | ||
2001 | if (sk->sk_send_head != NULL) { | 1994 | if (tcp_send_head(sk) != NULL) { |
2002 | TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_FIN; | 1995 | TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_FIN; |
2003 | TCP_SKB_CB(skb)->end_seq++; | 1996 | TCP_SKB_CB(skb)->end_seq++; |
2004 | tp->write_seq++; | 1997 | tp->write_seq++; |
@@ -2071,7 +2064,7 @@ int tcp_send_synack(struct sock *sk) | |||
2071 | { | 2064 | { |
2072 | struct sk_buff* skb; | 2065 | struct sk_buff* skb; |
2073 | 2066 | ||
2074 | skb = skb_peek(&sk->sk_write_queue); | 2067 | skb = tcp_write_queue_head(sk); |
2075 | if (skb == NULL || !(TCP_SKB_CB(skb)->flags&TCPCB_FLAG_SYN)) { | 2068 | if (skb == NULL || !(TCP_SKB_CB(skb)->flags&TCPCB_FLAG_SYN)) { |
2076 | printk(KERN_DEBUG "tcp_send_synack: wrong queue state\n"); | 2069 | printk(KERN_DEBUG "tcp_send_synack: wrong queue state\n"); |
2077 | return -EFAULT; | 2070 | return -EFAULT; |
@@ -2081,9 +2074,9 @@ int tcp_send_synack(struct sock *sk) | |||
2081 | struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC); | 2074 | struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC); |
2082 | if (nskb == NULL) | 2075 | if (nskb == NULL) |
2083 | return -ENOMEM; | 2076 | return -ENOMEM; |
2084 | __skb_unlink(skb, &sk->sk_write_queue); | 2077 | tcp_unlink_write_queue(skb, sk); |
2085 | skb_header_release(nskb); | 2078 | skb_header_release(nskb); |
2086 | __skb_queue_head(&sk->sk_write_queue, nskb); | 2079 | __tcp_add_write_queue_head(sk, nskb); |
2087 | sk_stream_free_skb(sk, skb); | 2080 | sk_stream_free_skb(sk, skb); |
2088 | sk_charge_skb(sk, nskb); | 2081 | sk_charge_skb(sk, nskb); |
2089 | skb = nskb; | 2082 | skb = nskb; |
@@ -2285,7 +2278,7 @@ int tcp_connect(struct sock *sk) | |||
2285 | TCP_SKB_CB(buff)->when = tcp_time_stamp; | 2278 | TCP_SKB_CB(buff)->when = tcp_time_stamp; |
2286 | tp->retrans_stamp = TCP_SKB_CB(buff)->when; | 2279 | tp->retrans_stamp = TCP_SKB_CB(buff)->when; |
2287 | skb_header_release(buff); | 2280 | skb_header_release(buff); |
2288 | __skb_queue_tail(&sk->sk_write_queue, buff); | 2281 | __tcp_add_write_queue_tail(sk, buff); |
2289 | sk_charge_skb(sk, buff); | 2282 | sk_charge_skb(sk, buff); |
2290 | tp->packets_out += tcp_skb_pcount(buff); | 2283 | tp->packets_out += tcp_skb_pcount(buff); |
2291 | tcp_transmit_skb(sk, buff, 1, GFP_KERNEL); | 2284 | tcp_transmit_skb(sk, buff, 1, GFP_KERNEL); |
@@ -2441,7 +2434,7 @@ int tcp_write_wakeup(struct sock *sk) | |||
2441 | struct tcp_sock *tp = tcp_sk(sk); | 2434 | struct tcp_sock *tp = tcp_sk(sk); |
2442 | struct sk_buff *skb; | 2435 | struct sk_buff *skb; |
2443 | 2436 | ||
2444 | if ((skb = sk->sk_send_head) != NULL && | 2437 | if ((skb = tcp_send_head(sk)) != NULL && |
2445 | before(TCP_SKB_CB(skb)->seq, tp->snd_una+tp->snd_wnd)) { | 2438 | before(TCP_SKB_CB(skb)->seq, tp->snd_una+tp->snd_wnd)) { |
2446 | int err; | 2439 | int err; |
2447 | unsigned int mss = tcp_current_mss(sk, 0); | 2440 | unsigned int mss = tcp_current_mss(sk, 0); |
@@ -2491,7 +2484,7 @@ void tcp_send_probe0(struct sock *sk) | |||
2491 | 2484 | ||
2492 | err = tcp_write_wakeup(sk); | 2485 | err = tcp_write_wakeup(sk); |
2493 | 2486 | ||
2494 | if (tp->packets_out || !sk->sk_send_head) { | 2487 | if (tp->packets_out || !tcp_send_head(sk)) { |
2495 | /* Cancel probe timer, if it is not required. */ | 2488 | /* Cancel probe timer, if it is not required. */ |
2496 | icsk->icsk_probes_out = 0; | 2489 | icsk->icsk_probes_out = 0; |
2497 | icsk->icsk_backoff = 0; | 2490 | icsk->icsk_backoff = 0; |