diff options
Diffstat (limited to 'net/ipv4/tcp_output.c')
| -rw-r--r-- | net/ipv4/tcp_output.c | 143 |
1 files changed, 80 insertions, 63 deletions
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index dd30dd137b74..75b68116682a 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
| @@ -105,18 +105,19 @@ static __u16 tcp_advertise_mss(struct sock *sk) | |||
| 105 | 105 | ||
| 106 | /* RFC2861. Reset CWND after idle period longer RTO to "restart window". | 106 | /* RFC2861. Reset CWND after idle period longer RTO to "restart window". |
| 107 | * This is the first part of cwnd validation mechanism. */ | 107 | * This is the first part of cwnd validation mechanism. */ |
| 108 | static void tcp_cwnd_restart(struct tcp_sock *tp, struct dst_entry *dst) | 108 | static void tcp_cwnd_restart(struct sock *sk, struct dst_entry *dst) |
| 109 | { | 109 | { |
| 110 | struct tcp_sock *tp = tcp_sk(sk); | ||
| 110 | s32 delta = tcp_time_stamp - tp->lsndtime; | 111 | s32 delta = tcp_time_stamp - tp->lsndtime; |
| 111 | u32 restart_cwnd = tcp_init_cwnd(tp, dst); | 112 | u32 restart_cwnd = tcp_init_cwnd(tp, dst); |
| 112 | u32 cwnd = tp->snd_cwnd; | 113 | u32 cwnd = tp->snd_cwnd; |
| 113 | 114 | ||
| 114 | tcp_ca_event(tp, CA_EVENT_CWND_RESTART); | 115 | tcp_ca_event(sk, CA_EVENT_CWND_RESTART); |
| 115 | 116 | ||
| 116 | tp->snd_ssthresh = tcp_current_ssthresh(tp); | 117 | tp->snd_ssthresh = tcp_current_ssthresh(sk); |
| 117 | restart_cwnd = min(restart_cwnd, cwnd); | 118 | restart_cwnd = min(restart_cwnd, cwnd); |
| 118 | 119 | ||
| 119 | while ((delta -= tp->rto) > 0 && cwnd > restart_cwnd) | 120 | while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd) |
| 120 | cwnd >>= 1; | 121 | cwnd >>= 1; |
| 121 | tp->snd_cwnd = max(cwnd, restart_cwnd); | 122 | tp->snd_cwnd = max(cwnd, restart_cwnd); |
| 122 | tp->snd_cwnd_stamp = tcp_time_stamp; | 123 | tp->snd_cwnd_stamp = tcp_time_stamp; |
| @@ -126,26 +127,25 @@ static void tcp_cwnd_restart(struct tcp_sock *tp, struct dst_entry *dst) | |||
| 126 | static inline void tcp_event_data_sent(struct tcp_sock *tp, | 127 | static inline void tcp_event_data_sent(struct tcp_sock *tp, |
| 127 | struct sk_buff *skb, struct sock *sk) | 128 | struct sk_buff *skb, struct sock *sk) |
| 128 | { | 129 | { |
| 129 | u32 now = tcp_time_stamp; | 130 | struct inet_connection_sock *icsk = inet_csk(sk); |
| 131 | const u32 now = tcp_time_stamp; | ||
| 130 | 132 | ||
| 131 | if (!tp->packets_out && (s32)(now - tp->lsndtime) > tp->rto) | 133 | if (!tp->packets_out && (s32)(now - tp->lsndtime) > icsk->icsk_rto) |
| 132 | tcp_cwnd_restart(tp, __sk_dst_get(sk)); | 134 | tcp_cwnd_restart(sk, __sk_dst_get(sk)); |
| 133 | 135 | ||
| 134 | tp->lsndtime = now; | 136 | tp->lsndtime = now; |
| 135 | 137 | ||
| 136 | /* If it is a reply for ato after last received | 138 | /* If it is a reply for ato after last received |
| 137 | * packet, enter pingpong mode. | 139 | * packet, enter pingpong mode. |
| 138 | */ | 140 | */ |
| 139 | if ((u32)(now - tp->ack.lrcvtime) < tp->ack.ato) | 141 | if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato) |
| 140 | tp->ack.pingpong = 1; | 142 | icsk->icsk_ack.pingpong = 1; |
| 141 | } | 143 | } |
| 142 | 144 | ||
| 143 | static __inline__ void tcp_event_ack_sent(struct sock *sk, unsigned int pkts) | 145 | static __inline__ void tcp_event_ack_sent(struct sock *sk, unsigned int pkts) |
| 144 | { | 146 | { |
| 145 | struct tcp_sock *tp = tcp_sk(sk); | 147 | tcp_dec_quickack_mode(sk, pkts); |
| 146 | 148 | inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK); | |
| 147 | tcp_dec_quickack_mode(tp, pkts); | ||
| 148 | tcp_clear_xmit_timer(sk, TCP_TIME_DACK); | ||
| 149 | } | 149 | } |
| 150 | 150 | ||
| 151 | /* Determine a window scaling and initial window to offer. | 151 | /* Determine a window scaling and initial window to offer. |
| @@ -265,6 +265,7 @@ static __inline__ u16 tcp_select_window(struct sock *sk) | |||
| 265 | static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb) | 265 | static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb) |
| 266 | { | 266 | { |
| 267 | if (skb != NULL) { | 267 | if (skb != NULL) { |
| 268 | const struct inet_connection_sock *icsk = inet_csk(sk); | ||
| 268 | struct inet_sock *inet = inet_sk(sk); | 269 | struct inet_sock *inet = inet_sk(sk); |
| 269 | struct tcp_sock *tp = tcp_sk(sk); | 270 | struct tcp_sock *tp = tcp_sk(sk); |
| 270 | struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); | 271 | struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); |
| @@ -280,8 +281,8 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb) | |||
| 280 | #define SYSCTL_FLAG_SACK 0x4 | 281 | #define SYSCTL_FLAG_SACK 0x4 |
| 281 | 282 | ||
| 282 | /* If congestion control is doing timestamping */ | 283 | /* If congestion control is doing timestamping */ |
| 283 | if (tp->ca_ops->rtt_sample) | 284 | if (icsk->icsk_ca_ops->rtt_sample) |
| 284 | do_gettimeofday(&skb->stamp); | 285 | __net_timestamp(skb); |
| 285 | 286 | ||
| 286 | sysctl_flags = 0; | 287 | sysctl_flags = 0; |
| 287 | if (tcb->flags & TCPCB_FLAG_SYN) { | 288 | if (tcb->flags & TCPCB_FLAG_SYN) { |
| @@ -308,7 +309,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb) | |||
| 308 | } | 309 | } |
| 309 | 310 | ||
| 310 | if (tcp_packets_in_flight(tp) == 0) | 311 | if (tcp_packets_in_flight(tp) == 0) |
| 311 | tcp_ca_event(tp, CA_EVENT_TX_START); | 312 | tcp_ca_event(sk, CA_EVENT_TX_START); |
| 312 | 313 | ||
| 313 | th = (struct tcphdr *) skb_push(skb, tcp_header_size); | 314 | th = (struct tcphdr *) skb_push(skb, tcp_header_size); |
| 314 | skb->h.th = th; | 315 | skb->h.th = th; |
| @@ -366,7 +367,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb) | |||
| 366 | if (err <= 0) | 367 | if (err <= 0) |
| 367 | return err; | 368 | return err; |
| 368 | 369 | ||
| 369 | tcp_enter_cwr(tp); | 370 | tcp_enter_cwr(sk); |
| 370 | 371 | ||
| 371 | /* NET_XMIT_CN is special. It does not guarantee, | 372 | /* NET_XMIT_CN is special. It does not guarantee, |
| 372 | * that this packet is lost. It tells that device | 373 | * that this packet is lost. It tells that device |
| @@ -482,7 +483,7 @@ static int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned | |||
| 482 | * skbs, which it never sent before. --ANK | 483 | * skbs, which it never sent before. --ANK |
| 483 | */ | 484 | */ |
| 484 | TCP_SKB_CB(buff)->when = TCP_SKB_CB(skb)->when; | 485 | TCP_SKB_CB(buff)->when = TCP_SKB_CB(skb)->when; |
| 485 | buff->stamp = skb->stamp; | 486 | buff->tstamp = skb->tstamp; |
| 486 | 487 | ||
| 487 | if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) { | 488 | if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) { |
| 488 | tp->lost_out -= tcp_skb_pcount(skb); | 489 | tp->lost_out -= tcp_skb_pcount(skb); |
| @@ -505,7 +506,7 @@ static int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned | |||
| 505 | 506 | ||
| 506 | /* Link BUFF into the send queue. */ | 507 | /* Link BUFF into the send queue. */ |
| 507 | skb_header_release(buff); | 508 | skb_header_release(buff); |
| 508 | __skb_append(skb, buff); | 509 | __skb_append(skb, buff, &sk->sk_write_queue); |
| 509 | 510 | ||
| 510 | return 0; | 511 | return 0; |
| 511 | } | 512 | } |
| @@ -696,7 +697,7 @@ static inline void tcp_cwnd_validate(struct sock *sk, struct tcp_sock *tp) | |||
| 696 | if (tp->packets_out > tp->snd_cwnd_used) | 697 | if (tp->packets_out > tp->snd_cwnd_used) |
| 697 | tp->snd_cwnd_used = tp->packets_out; | 698 | tp->snd_cwnd_used = tp->packets_out; |
| 698 | 699 | ||
| 699 | if ((s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= tp->rto) | 700 | if ((s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto) |
| 700 | tcp_cwnd_application_limited(sk); | 701 | tcp_cwnd_application_limited(sk); |
| 701 | } | 702 | } |
| 702 | } | 703 | } |
| @@ -893,7 +894,7 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, | |||
| 893 | 894 | ||
| 894 | /* Link BUFF into the send queue. */ | 895 | /* Link BUFF into the send queue. */ |
| 895 | skb_header_release(buff); | 896 | skb_header_release(buff); |
| 896 | __skb_append(skb, buff); | 897 | __skb_append(skb, buff, &sk->sk_write_queue); |
| 897 | 898 | ||
| 898 | return 0; | 899 | return 0; |
| 899 | } | 900 | } |
| @@ -905,12 +906,13 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, | |||
| 905 | */ | 906 | */ |
| 906 | static int tcp_tso_should_defer(struct sock *sk, struct tcp_sock *tp, struct sk_buff *skb) | 907 | static int tcp_tso_should_defer(struct sock *sk, struct tcp_sock *tp, struct sk_buff *skb) |
| 907 | { | 908 | { |
| 909 | const struct inet_connection_sock *icsk = inet_csk(sk); | ||
| 908 | u32 send_win, cong_win, limit, in_flight; | 910 | u32 send_win, cong_win, limit, in_flight; |
| 909 | 911 | ||
| 910 | if (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) | 912 | if (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) |
| 911 | return 0; | 913 | return 0; |
| 912 | 914 | ||
| 913 | if (tp->ca_state != TCP_CA_Open) | 915 | if (icsk->icsk_ca_state != TCP_CA_Open) |
| 914 | return 0; | 916 | return 0; |
| 915 | 917 | ||
| 916 | in_flight = tcp_packets_in_flight(tp); | 918 | in_flight = tcp_packets_in_flight(tp); |
| @@ -1147,6 +1149,7 @@ void tcp_push_one(struct sock *sk, unsigned int mss_now) | |||
| 1147 | */ | 1149 | */ |
| 1148 | u32 __tcp_select_window(struct sock *sk) | 1150 | u32 __tcp_select_window(struct sock *sk) |
| 1149 | { | 1151 | { |
| 1152 | struct inet_connection_sock *icsk = inet_csk(sk); | ||
| 1150 | struct tcp_sock *tp = tcp_sk(sk); | 1153 | struct tcp_sock *tp = tcp_sk(sk); |
| 1151 | /* MSS for the peer's data. Previous verions used mss_clamp | 1154 | /* MSS for the peer's data. Previous verions used mss_clamp |
| 1152 | * here. I don't know if the value based on our guesses | 1155 | * here. I don't know if the value based on our guesses |
| @@ -1154,7 +1157,7 @@ u32 __tcp_select_window(struct sock *sk) | |||
| 1154 | * but may be worse for the performance because of rcv_mss | 1157 | * but may be worse for the performance because of rcv_mss |
| 1155 | * fluctuations. --SAW 1998/11/1 | 1158 | * fluctuations. --SAW 1998/11/1 |
| 1156 | */ | 1159 | */ |
| 1157 | int mss = tp->ack.rcv_mss; | 1160 | int mss = icsk->icsk_ack.rcv_mss; |
| 1158 | int free_space = tcp_space(sk); | 1161 | int free_space = tcp_space(sk); |
| 1159 | int full_space = min_t(int, tp->window_clamp, tcp_full_space(sk)); | 1162 | int full_space = min_t(int, tp->window_clamp, tcp_full_space(sk)); |
| 1160 | int window; | 1163 | int window; |
| @@ -1163,7 +1166,7 @@ u32 __tcp_select_window(struct sock *sk) | |||
| 1163 | mss = full_space; | 1166 | mss = full_space; |
| 1164 | 1167 | ||
| 1165 | if (free_space < full_space/2) { | 1168 | if (free_space < full_space/2) { |
| 1166 | tp->ack.quick = 0; | 1169 | icsk->icsk_ack.quick = 0; |
| 1167 | 1170 | ||
| 1168 | if (tcp_memory_pressure) | 1171 | if (tcp_memory_pressure) |
| 1169 | tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U*tp->advmss); | 1172 | tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U*tp->advmss); |
| @@ -1238,7 +1241,7 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb, int m | |||
| 1238 | tcp_skb_pcount(next_skb) != 1); | 1241 | tcp_skb_pcount(next_skb) != 1); |
| 1239 | 1242 | ||
| 1240 | /* Ok. We will be able to collapse the packet. */ | 1243 | /* Ok. We will be able to collapse the packet. */ |
| 1241 | __skb_unlink(next_skb, next_skb->list); | 1244 | __skb_unlink(next_skb, &sk->sk_write_queue); |
| 1242 | 1245 | ||
| 1243 | memcpy(skb_put(skb, next_skb_size), next_skb->data, next_skb_size); | 1246 | memcpy(skb_put(skb, next_skb_size), next_skb->data, next_skb_size); |
| 1244 | 1247 | ||
| @@ -1286,6 +1289,7 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb, int m | |||
| 1286 | */ | 1289 | */ |
| 1287 | void tcp_simple_retransmit(struct sock *sk) | 1290 | void tcp_simple_retransmit(struct sock *sk) |
| 1288 | { | 1291 | { |
| 1292 | const struct inet_connection_sock *icsk = inet_csk(sk); | ||
| 1289 | struct tcp_sock *tp = tcp_sk(sk); | 1293 | struct tcp_sock *tp = tcp_sk(sk); |
| 1290 | struct sk_buff *skb; | 1294 | struct sk_buff *skb; |
| 1291 | unsigned int mss = tcp_current_mss(sk, 0); | 1295 | unsigned int mss = tcp_current_mss(sk, 0); |
| @@ -1316,12 +1320,12 @@ void tcp_simple_retransmit(struct sock *sk) | |||
| 1316 | * in network, but units changed and effective | 1320 | * in network, but units changed and effective |
| 1317 | * cwnd/ssthresh really reduced now. | 1321 | * cwnd/ssthresh really reduced now. |
| 1318 | */ | 1322 | */ |
| 1319 | if (tp->ca_state != TCP_CA_Loss) { | 1323 | if (icsk->icsk_ca_state != TCP_CA_Loss) { |
| 1320 | tp->high_seq = tp->snd_nxt; | 1324 | tp->high_seq = tp->snd_nxt; |
| 1321 | tp->snd_ssthresh = tcp_current_ssthresh(tp); | 1325 | tp->snd_ssthresh = tcp_current_ssthresh(sk); |
| 1322 | tp->prior_ssthresh = 0; | 1326 | tp->prior_ssthresh = 0; |
| 1323 | tp->undo_marker = 0; | 1327 | tp->undo_marker = 0; |
| 1324 | tcp_set_ca_state(tp, TCP_CA_Loss); | 1328 | tcp_set_ca_state(sk, TCP_CA_Loss); |
| 1325 | } | 1329 | } |
| 1326 | tcp_xmit_retransmit_queue(sk); | 1330 | tcp_xmit_retransmit_queue(sk); |
| 1327 | } | 1331 | } |
| @@ -1461,6 +1465,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) | |||
| 1461 | */ | 1465 | */ |
| 1462 | void tcp_xmit_retransmit_queue(struct sock *sk) | 1466 | void tcp_xmit_retransmit_queue(struct sock *sk) |
| 1463 | { | 1467 | { |
| 1468 | const struct inet_connection_sock *icsk = inet_csk(sk); | ||
| 1464 | struct tcp_sock *tp = tcp_sk(sk); | 1469 | struct tcp_sock *tp = tcp_sk(sk); |
| 1465 | struct sk_buff *skb; | 1470 | struct sk_buff *skb; |
| 1466 | int packet_cnt = tp->lost_out; | 1471 | int packet_cnt = tp->lost_out; |
| @@ -1484,14 +1489,16 @@ void tcp_xmit_retransmit_queue(struct sock *sk) | |||
| 1484 | if (!(sacked&(TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))) { | 1489 | if (!(sacked&(TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))) { |
| 1485 | if (tcp_retransmit_skb(sk, skb)) | 1490 | if (tcp_retransmit_skb(sk, skb)) |
| 1486 | return; | 1491 | return; |
| 1487 | if (tp->ca_state != TCP_CA_Loss) | 1492 | if (icsk->icsk_ca_state != TCP_CA_Loss) |
| 1488 | NET_INC_STATS_BH(LINUX_MIB_TCPFASTRETRANS); | 1493 | NET_INC_STATS_BH(LINUX_MIB_TCPFASTRETRANS); |
| 1489 | else | 1494 | else |
| 1490 | NET_INC_STATS_BH(LINUX_MIB_TCPSLOWSTARTRETRANS); | 1495 | NET_INC_STATS_BH(LINUX_MIB_TCPSLOWSTARTRETRANS); |
| 1491 | 1496 | ||
| 1492 | if (skb == | 1497 | if (skb == |
| 1493 | skb_peek(&sk->sk_write_queue)) | 1498 | skb_peek(&sk->sk_write_queue)) |
| 1494 | tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto); | 1499 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, |
| 1500 | inet_csk(sk)->icsk_rto, | ||
| 1501 | TCP_RTO_MAX); | ||
| 1495 | } | 1502 | } |
| 1496 | 1503 | ||
| 1497 | packet_cnt -= tcp_skb_pcount(skb); | 1504 | packet_cnt -= tcp_skb_pcount(skb); |
| @@ -1504,7 +1511,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk) | |||
| 1504 | /* OK, demanded retransmission is finished. */ | 1511 | /* OK, demanded retransmission is finished. */ |
| 1505 | 1512 | ||
| 1506 | /* Forward retransmissions are possible only during Recovery. */ | 1513 | /* Forward retransmissions are possible only during Recovery. */ |
| 1507 | if (tp->ca_state != TCP_CA_Recovery) | 1514 | if (icsk->icsk_ca_state != TCP_CA_Recovery) |
| 1508 | return; | 1515 | return; |
| 1509 | 1516 | ||
| 1510 | /* No forward retransmissions in Reno are possible. */ | 1517 | /* No forward retransmissions in Reno are possible. */ |
| @@ -1544,7 +1551,9 @@ void tcp_xmit_retransmit_queue(struct sock *sk) | |||
| 1544 | break; | 1551 | break; |
| 1545 | 1552 | ||
| 1546 | if (skb == skb_peek(&sk->sk_write_queue)) | 1553 | if (skb == skb_peek(&sk->sk_write_queue)) |
| 1547 | tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto); | 1554 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, |
| 1555 | inet_csk(sk)->icsk_rto, | ||
| 1556 | TCP_RTO_MAX); | ||
| 1548 | 1557 | ||
| 1549 | NET_INC_STATS_BH(LINUX_MIB_TCPFORWARDRETRANS); | 1558 | NET_INC_STATS_BH(LINUX_MIB_TCPFORWARDRETRANS); |
| 1550 | } | 1559 | } |
| @@ -1573,7 +1582,7 @@ void tcp_send_fin(struct sock *sk) | |||
| 1573 | } else { | 1582 | } else { |
| 1574 | /* Socket is locked, keep trying until memory is available. */ | 1583 | /* Socket is locked, keep trying until memory is available. */ |
| 1575 | for (;;) { | 1584 | for (;;) { |
| 1576 | skb = alloc_skb(MAX_TCP_HEADER, GFP_KERNEL); | 1585 | skb = alloc_skb_fclone(MAX_TCP_HEADER, GFP_KERNEL); |
| 1577 | if (skb) | 1586 | if (skb) |
| 1578 | break; | 1587 | break; |
| 1579 | yield(); | 1588 | yield(); |
| @@ -1780,8 +1789,8 @@ static inline void tcp_connect_init(struct sock *sk) | |||
| 1780 | tp->rcv_wup = 0; | 1789 | tp->rcv_wup = 0; |
| 1781 | tp->copied_seq = 0; | 1790 | tp->copied_seq = 0; |
| 1782 | 1791 | ||
| 1783 | tp->rto = TCP_TIMEOUT_INIT; | 1792 | inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT; |
| 1784 | tp->retransmits = 0; | 1793 | inet_csk(sk)->icsk_retransmits = 0; |
| 1785 | tcp_clear_retrans(tp); | 1794 | tcp_clear_retrans(tp); |
| 1786 | } | 1795 | } |
| 1787 | 1796 | ||
| @@ -1795,7 +1804,7 @@ int tcp_connect(struct sock *sk) | |||
| 1795 | 1804 | ||
| 1796 | tcp_connect_init(sk); | 1805 | tcp_connect_init(sk); |
| 1797 | 1806 | ||
| 1798 | buff = alloc_skb(MAX_TCP_HEADER + 15, sk->sk_allocation); | 1807 | buff = alloc_skb_fclone(MAX_TCP_HEADER + 15, sk->sk_allocation); |
| 1799 | if (unlikely(buff == NULL)) | 1808 | if (unlikely(buff == NULL)) |
| 1800 | return -ENOBUFS; | 1809 | return -ENOBUFS; |
| 1801 | 1810 | ||
| @@ -1824,7 +1833,8 @@ int tcp_connect(struct sock *sk) | |||
| 1824 | TCP_INC_STATS(TCP_MIB_ACTIVEOPENS); | 1833 | TCP_INC_STATS(TCP_MIB_ACTIVEOPENS); |
| 1825 | 1834 | ||
| 1826 | /* Timer for repeating the SYN until an answer. */ | 1835 | /* Timer for repeating the SYN until an answer. */ |
| 1827 | tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto); | 1836 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, |
| 1837 | inet_csk(sk)->icsk_rto, TCP_RTO_MAX); | ||
| 1828 | return 0; | 1838 | return 0; |
| 1829 | } | 1839 | } |
| 1830 | 1840 | ||
| @@ -1834,20 +1844,21 @@ int tcp_connect(struct sock *sk) | |||
| 1834 | */ | 1844 | */ |
| 1835 | void tcp_send_delayed_ack(struct sock *sk) | 1845 | void tcp_send_delayed_ack(struct sock *sk) |
| 1836 | { | 1846 | { |
| 1837 | struct tcp_sock *tp = tcp_sk(sk); | 1847 | struct inet_connection_sock *icsk = inet_csk(sk); |
| 1838 | int ato = tp->ack.ato; | 1848 | int ato = icsk->icsk_ack.ato; |
| 1839 | unsigned long timeout; | 1849 | unsigned long timeout; |
| 1840 | 1850 | ||
| 1841 | if (ato > TCP_DELACK_MIN) { | 1851 | if (ato > TCP_DELACK_MIN) { |
| 1852 | const struct tcp_sock *tp = tcp_sk(sk); | ||
| 1842 | int max_ato = HZ/2; | 1853 | int max_ato = HZ/2; |
| 1843 | 1854 | ||
| 1844 | if (tp->ack.pingpong || (tp->ack.pending&TCP_ACK_PUSHED)) | 1855 | if (icsk->icsk_ack.pingpong || (icsk->icsk_ack.pending & ICSK_ACK_PUSHED)) |
| 1845 | max_ato = TCP_DELACK_MAX; | 1856 | max_ato = TCP_DELACK_MAX; |
| 1846 | 1857 | ||
| 1847 | /* Slow path, intersegment interval is "high". */ | 1858 | /* Slow path, intersegment interval is "high". */ |
| 1848 | 1859 | ||
| 1849 | /* If some rtt estimate is known, use it to bound delayed ack. | 1860 | /* If some rtt estimate is known, use it to bound delayed ack. |
| 1850 | * Do not use tp->rto here, use results of rtt measurements | 1861 | * Do not use inet_csk(sk)->icsk_rto here, use results of rtt measurements |
| 1851 | * directly. | 1862 | * directly. |
| 1852 | */ | 1863 | */ |
| 1853 | if (tp->srtt) { | 1864 | if (tp->srtt) { |
| @@ -1864,21 +1875,22 @@ void tcp_send_delayed_ack(struct sock *sk) | |||
| 1864 | timeout = jiffies + ato; | 1875 | timeout = jiffies + ato; |
| 1865 | 1876 | ||
| 1866 | /* Use new timeout only if there wasn't a older one earlier. */ | 1877 | /* Use new timeout only if there wasn't a older one earlier. */ |
| 1867 | if (tp->ack.pending&TCP_ACK_TIMER) { | 1878 | if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) { |
| 1868 | /* If delack timer was blocked or is about to expire, | 1879 | /* If delack timer was blocked or is about to expire, |
| 1869 | * send ACK now. | 1880 | * send ACK now. |
| 1870 | */ | 1881 | */ |
| 1871 | if (tp->ack.blocked || time_before_eq(tp->ack.timeout, jiffies+(ato>>2))) { | 1882 | if (icsk->icsk_ack.blocked || |
| 1883 | time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) { | ||
| 1872 | tcp_send_ack(sk); | 1884 | tcp_send_ack(sk); |
| 1873 | return; | 1885 | return; |
| 1874 | } | 1886 | } |
| 1875 | 1887 | ||
| 1876 | if (!time_before(timeout, tp->ack.timeout)) | 1888 | if (!time_before(timeout, icsk->icsk_ack.timeout)) |
| 1877 | timeout = tp->ack.timeout; | 1889 | timeout = icsk->icsk_ack.timeout; |
| 1878 | } | 1890 | } |
| 1879 | tp->ack.pending |= TCP_ACK_SCHED|TCP_ACK_TIMER; | 1891 | icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER; |
| 1880 | tp->ack.timeout = timeout; | 1892 | icsk->icsk_ack.timeout = timeout; |
| 1881 | sk_reset_timer(sk, &tp->delack_timer, timeout); | 1893 | sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout); |
| 1882 | } | 1894 | } |
| 1883 | 1895 | ||
| 1884 | /* This routine sends an ack and also updates the window. */ | 1896 | /* This routine sends an ack and also updates the window. */ |
| @@ -1895,9 +1907,10 @@ void tcp_send_ack(struct sock *sk) | |||
| 1895 | */ | 1907 | */ |
| 1896 | buff = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC); | 1908 | buff = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC); |
| 1897 | if (buff == NULL) { | 1909 | if (buff == NULL) { |
| 1898 | tcp_schedule_ack(tp); | 1910 | inet_csk_schedule_ack(sk); |
| 1899 | tp->ack.ato = TCP_ATO_MIN; | 1911 | inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN; |
| 1900 | tcp_reset_xmit_timer(sk, TCP_TIME_DACK, TCP_DELACK_MAX); | 1912 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, |
| 1913 | TCP_DELACK_MAX, TCP_RTO_MAX); | ||
| 1901 | return; | 1914 | return; |
| 1902 | } | 1915 | } |
| 1903 | 1916 | ||
| @@ -2011,6 +2024,7 @@ int tcp_write_wakeup(struct sock *sk) | |||
| 2011 | */ | 2024 | */ |
| 2012 | void tcp_send_probe0(struct sock *sk) | 2025 | void tcp_send_probe0(struct sock *sk) |
| 2013 | { | 2026 | { |
| 2027 | struct inet_connection_sock *icsk = inet_csk(sk); | ||
| 2014 | struct tcp_sock *tp = tcp_sk(sk); | 2028 | struct tcp_sock *tp = tcp_sk(sk); |
| 2015 | int err; | 2029 | int err; |
| 2016 | 2030 | ||
| @@ -2018,28 +2032,31 @@ void tcp_send_probe0(struct sock *sk) | |||
| 2018 | 2032 | ||
| 2019 | if (tp->packets_out || !sk->sk_send_head) { | 2033 | if (tp->packets_out || !sk->sk_send_head) { |
| 2020 | /* Cancel probe timer, if it is not required. */ | 2034 | /* Cancel probe timer, if it is not required. */ |
| 2021 | tp->probes_out = 0; | 2035 | icsk->icsk_probes_out = 0; |
| 2022 | tp->backoff = 0; | 2036 | icsk->icsk_backoff = 0; |
| 2023 | return; | 2037 | return; |
| 2024 | } | 2038 | } |
| 2025 | 2039 | ||
| 2026 | if (err <= 0) { | 2040 | if (err <= 0) { |
| 2027 | if (tp->backoff < sysctl_tcp_retries2) | 2041 | if (icsk->icsk_backoff < sysctl_tcp_retries2) |
| 2028 | tp->backoff++; | 2042 | icsk->icsk_backoff++; |
| 2029 | tp->probes_out++; | 2043 | icsk->icsk_probes_out++; |
| 2030 | tcp_reset_xmit_timer (sk, TCP_TIME_PROBE0, | 2044 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, |
| 2031 | min(tp->rto << tp->backoff, TCP_RTO_MAX)); | 2045 | min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX), |
| 2046 | TCP_RTO_MAX); | ||
| 2032 | } else { | 2047 | } else { |
| 2033 | /* If packet was not sent due to local congestion, | 2048 | /* If packet was not sent due to local congestion, |
| 2034 | * do not backoff and do not remember probes_out. | 2049 | * do not backoff and do not remember icsk_probes_out. |
| 2035 | * Let local senders to fight for local resources. | 2050 | * Let local senders to fight for local resources. |
| 2036 | * | 2051 | * |
| 2037 | * Use accumulated backoff yet. | 2052 | * Use accumulated backoff yet. |
| 2038 | */ | 2053 | */ |
| 2039 | if (!tp->probes_out) | 2054 | if (!icsk->icsk_probes_out) |
| 2040 | tp->probes_out=1; | 2055 | icsk->icsk_probes_out = 1; |
| 2041 | tcp_reset_xmit_timer (sk, TCP_TIME_PROBE0, | 2056 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, |
| 2042 | min(tp->rto << tp->backoff, TCP_RESOURCE_PROBE_INTERVAL)); | 2057 | min(icsk->icsk_rto << icsk->icsk_backoff, |
| 2058 | TCP_RESOURCE_PROBE_INTERVAL), | ||
| 2059 | TCP_RTO_MAX); | ||
| 2043 | } | 2060 | } |
| 2044 | } | 2061 | } |
| 2045 | 2062 | ||
