aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_output.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/tcp_output.c')
-rw-r--r--net/ipv4/tcp_output.c72
1 files changed, 37 insertions, 35 deletions
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index ed96c543f1cf..980b98f6288c 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -65,7 +65,7 @@ EXPORT_SYMBOL_GPL(sysctl_tcp_cookie_size);
65 65
66 66
67/* Account for new data that has been sent to the network. */ 67/* Account for new data that has been sent to the network. */
68static void tcp_event_new_data_sent(struct sock *sk, struct sk_buff *skb) 68static void tcp_event_new_data_sent(struct sock *sk, const struct sk_buff *skb)
69{ 69{
70 struct tcp_sock *tp = tcp_sk(sk); 70 struct tcp_sock *tp = tcp_sk(sk);
71 unsigned int prior_packets = tp->packets_out; 71 unsigned int prior_packets = tp->packets_out;
@@ -89,9 +89,9 @@ static void tcp_event_new_data_sent(struct sock *sk, struct sk_buff *skb)
89 * Anything in between SND.UNA...SND.UNA+SND.WND also can be already 89 * Anything in between SND.UNA...SND.UNA+SND.WND also can be already
90 * invalid. OK, let's make this for now: 90 * invalid. OK, let's make this for now:
91 */ 91 */
92static inline __u32 tcp_acceptable_seq(struct sock *sk) 92static inline __u32 tcp_acceptable_seq(const struct sock *sk)
93{ 93{
94 struct tcp_sock *tp = tcp_sk(sk); 94 const struct tcp_sock *tp = tcp_sk(sk);
95 95
96 if (!before(tcp_wnd_end(tp), tp->snd_nxt)) 96 if (!before(tcp_wnd_end(tp), tp->snd_nxt))
97 return tp->snd_nxt; 97 return tp->snd_nxt;
@@ -116,7 +116,7 @@ static inline __u32 tcp_acceptable_seq(struct sock *sk)
116static __u16 tcp_advertise_mss(struct sock *sk) 116static __u16 tcp_advertise_mss(struct sock *sk)
117{ 117{
118 struct tcp_sock *tp = tcp_sk(sk); 118 struct tcp_sock *tp = tcp_sk(sk);
119 struct dst_entry *dst = __sk_dst_get(sk); 119 const struct dst_entry *dst = __sk_dst_get(sk);
120 int mss = tp->advmss; 120 int mss = tp->advmss;
121 121
122 if (dst) { 122 if (dst) {
@@ -133,7 +133,7 @@ static __u16 tcp_advertise_mss(struct sock *sk)
133 133
134/* RFC2861. Reset CWND after idle period longer RTO to "restart window". 134/* RFC2861. Reset CWND after idle period longer RTO to "restart window".
135 * This is the first part of cwnd validation mechanism. */ 135 * This is the first part of cwnd validation mechanism. */
136static void tcp_cwnd_restart(struct sock *sk, struct dst_entry *dst) 136static void tcp_cwnd_restart(struct sock *sk, const struct dst_entry *dst)
137{ 137{
138 struct tcp_sock *tp = tcp_sk(sk); 138 struct tcp_sock *tp = tcp_sk(sk);
139 s32 delta = tcp_time_stamp - tp->lsndtime; 139 s32 delta = tcp_time_stamp - tp->lsndtime;
@@ -154,7 +154,7 @@ static void tcp_cwnd_restart(struct sock *sk, struct dst_entry *dst)
154 154
155/* Congestion state accounting after a packet has been sent. */ 155/* Congestion state accounting after a packet has been sent. */
156static void tcp_event_data_sent(struct tcp_sock *tp, 156static void tcp_event_data_sent(struct tcp_sock *tp,
157 struct sk_buff *skb, struct sock *sk) 157 struct sock *sk)
158{ 158{
159 struct inet_connection_sock *icsk = inet_csk(sk); 159 struct inet_connection_sock *icsk = inet_csk(sk);
160 const u32 now = tcp_time_stamp; 160 const u32 now = tcp_time_stamp;
@@ -295,7 +295,7 @@ static u16 tcp_select_window(struct sock *sk)
295} 295}
296 296
297/* Packet ECN state for a SYN-ACK */ 297/* Packet ECN state for a SYN-ACK */
298static inline void TCP_ECN_send_synack(struct tcp_sock *tp, struct sk_buff *skb) 298static inline void TCP_ECN_send_synack(const struct tcp_sock *tp, struct sk_buff *skb)
299{ 299{
300 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_CWR; 300 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_CWR;
301 if (!(tp->ecn_flags & TCP_ECN_OK)) 301 if (!(tp->ecn_flags & TCP_ECN_OK))
@@ -315,7 +315,7 @@ static inline void TCP_ECN_send_syn(struct sock *sk, struct sk_buff *skb)
315} 315}
316 316
317static __inline__ void 317static __inline__ void
318TCP_ECN_make_synack(struct request_sock *req, struct tcphdr *th) 318TCP_ECN_make_synack(const struct request_sock *req, struct tcphdr *th)
319{ 319{
320 if (inet_rsk(req)->ecn_ok) 320 if (inet_rsk(req)->ecn_ok)
321 th->ece = 1; 321 th->ece = 1;
@@ -565,7 +565,8 @@ static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp,
565 */ 565 */
566static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb, 566static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb,
567 struct tcp_out_options *opts, 567 struct tcp_out_options *opts,
568 struct tcp_md5sig_key **md5) { 568 struct tcp_md5sig_key **md5)
569{
569 struct tcp_sock *tp = tcp_sk(sk); 570 struct tcp_sock *tp = tcp_sk(sk);
570 struct tcp_cookie_values *cvp = tp->cookie_values; 571 struct tcp_cookie_values *cvp = tp->cookie_values;
571 unsigned remaining = MAX_TCP_OPTION_SPACE; 572 unsigned remaining = MAX_TCP_OPTION_SPACE;
@@ -743,7 +744,8 @@ static unsigned tcp_synack_options(struct sock *sk,
743 */ 744 */
744static unsigned tcp_established_options(struct sock *sk, struct sk_buff *skb, 745static unsigned tcp_established_options(struct sock *sk, struct sk_buff *skb,
745 struct tcp_out_options *opts, 746 struct tcp_out_options *opts,
746 struct tcp_md5sig_key **md5) { 747 struct tcp_md5sig_key **md5)
748{
747 struct tcp_skb_cb *tcb = skb ? TCP_SKB_CB(skb) : NULL; 749 struct tcp_skb_cb *tcb = skb ? TCP_SKB_CB(skb) : NULL;
748 struct tcp_sock *tp = tcp_sk(sk); 750 struct tcp_sock *tp = tcp_sk(sk);
749 unsigned size = 0; 751 unsigned size = 0;
@@ -893,7 +895,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
893 tcp_event_ack_sent(sk, tcp_skb_pcount(skb)); 895 tcp_event_ack_sent(sk, tcp_skb_pcount(skb));
894 896
895 if (skb->len != tcp_header_size) 897 if (skb->len != tcp_header_size)
896 tcp_event_data_sent(tp, skb, sk); 898 tcp_event_data_sent(tp, sk);
897 899
898 if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq) 900 if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq)
899 TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS, 901 TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS,
@@ -926,7 +928,7 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
926} 928}
927 929
928/* Initialize TSO segments for a packet. */ 930/* Initialize TSO segments for a packet. */
929static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb, 931static void tcp_set_skb_tso_segs(const struct sock *sk, struct sk_buff *skb,
930 unsigned int mss_now) 932 unsigned int mss_now)
931{ 933{
932 if (skb->len <= mss_now || !sk_can_gso(sk) || 934 if (skb->len <= mss_now || !sk_can_gso(sk) ||
@@ -947,7 +949,7 @@ static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb,
947/* When a modification to fackets out becomes necessary, we need to check 949/* When a modification to fackets out becomes necessary, we need to check
948 * skb is counted to fackets_out or not. 950 * skb is counted to fackets_out or not.
949 */ 951 */
950static void tcp_adjust_fackets_out(struct sock *sk, struct sk_buff *skb, 952static void tcp_adjust_fackets_out(struct sock *sk, const struct sk_buff *skb,
951 int decr) 953 int decr)
952{ 954{
953 struct tcp_sock *tp = tcp_sk(sk); 955 struct tcp_sock *tp = tcp_sk(sk);
@@ -962,7 +964,7 @@ static void tcp_adjust_fackets_out(struct sock *sk, struct sk_buff *skb,
962/* Pcount in the middle of the write queue got changed, we need to do various 964/* Pcount in the middle of the write queue got changed, we need to do various
963 * tweaks to fix counters 965 * tweaks to fix counters
964 */ 966 */
965static void tcp_adjust_pcount(struct sock *sk, struct sk_buff *skb, int decr) 967static void tcp_adjust_pcount(struct sock *sk, const struct sk_buff *skb, int decr)
966{ 968{
967 struct tcp_sock *tp = tcp_sk(sk); 969 struct tcp_sock *tp = tcp_sk(sk);
968 970
@@ -1146,10 +1148,10 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
1146} 1148}
1147 1149
1148/* Calculate MSS. Not accounting for SACKs here. */ 1150/* Calculate MSS. Not accounting for SACKs here. */
1149int tcp_mtu_to_mss(struct sock *sk, int pmtu) 1151int tcp_mtu_to_mss(const struct sock *sk, int pmtu)
1150{ 1152{
1151 struct tcp_sock *tp = tcp_sk(sk); 1153 const struct tcp_sock *tp = tcp_sk(sk);
1152 struct inet_connection_sock *icsk = inet_csk(sk); 1154 const struct inet_connection_sock *icsk = inet_csk(sk);
1153 int mss_now; 1155 int mss_now;
1154 1156
1155 /* Calculate base mss without TCP options: 1157 /* Calculate base mss without TCP options:
@@ -1175,10 +1177,10 @@ int tcp_mtu_to_mss(struct sock *sk, int pmtu)
1175} 1177}
1176 1178
1177/* Inverse of above */ 1179/* Inverse of above */
1178int tcp_mss_to_mtu(struct sock *sk, int mss) 1180int tcp_mss_to_mtu(const struct sock *sk, int mss)
1179{ 1181{
1180 struct tcp_sock *tp = tcp_sk(sk); 1182 const struct tcp_sock *tp = tcp_sk(sk);
1181 struct inet_connection_sock *icsk = inet_csk(sk); 1183 const struct inet_connection_sock *icsk = inet_csk(sk);
1182 int mtu; 1184 int mtu;
1183 1185
1184 mtu = mss + 1186 mtu = mss +
@@ -1252,8 +1254,8 @@ EXPORT_SYMBOL(tcp_sync_mss);
1252 */ 1254 */
1253unsigned int tcp_current_mss(struct sock *sk) 1255unsigned int tcp_current_mss(struct sock *sk)
1254{ 1256{
1255 struct tcp_sock *tp = tcp_sk(sk); 1257 const struct tcp_sock *tp = tcp_sk(sk);
1256 struct dst_entry *dst = __sk_dst_get(sk); 1258 const struct dst_entry *dst = __sk_dst_get(sk);
1257 u32 mss_now; 1259 u32 mss_now;
1258 unsigned header_len; 1260 unsigned header_len;
1259 struct tcp_out_options opts; 1261 struct tcp_out_options opts;
@@ -1313,10 +1315,10 @@ static void tcp_cwnd_validate(struct sock *sk)
1313 * modulo only when the receiver window alone is the limiting factor or 1315 * modulo only when the receiver window alone is the limiting factor or
1314 * when we would be allowed to send the split-due-to-Nagle skb fully. 1316 * when we would be allowed to send the split-due-to-Nagle skb fully.
1315 */ 1317 */
1316static unsigned int tcp_mss_split_point(struct sock *sk, struct sk_buff *skb, 1318static unsigned int tcp_mss_split_point(const struct sock *sk, const struct sk_buff *skb,
1317 unsigned int mss_now, unsigned int cwnd) 1319 unsigned int mss_now, unsigned int cwnd)
1318{ 1320{
1319 struct tcp_sock *tp = tcp_sk(sk); 1321 const struct tcp_sock *tp = tcp_sk(sk);
1320 u32 needed, window, cwnd_len; 1322 u32 needed, window, cwnd_len;
1321 1323
1322 window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; 1324 window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
@@ -1336,8 +1338,8 @@ static unsigned int tcp_mss_split_point(struct sock *sk, struct sk_buff *skb,
1336/* Can at least one segment of SKB be sent right now, according to the 1338/* Can at least one segment of SKB be sent right now, according to the
1337 * congestion window rules? If so, return how many segments are allowed. 1339 * congestion window rules? If so, return how many segments are allowed.
1338 */ 1340 */
1339static inline unsigned int tcp_cwnd_test(struct tcp_sock *tp, 1341static inline unsigned int tcp_cwnd_test(const struct tcp_sock *tp,
1340 struct sk_buff *skb) 1342 const struct sk_buff *skb)
1341{ 1343{
1342 u32 in_flight, cwnd; 1344 u32 in_flight, cwnd;
1343 1345
@@ -1358,7 +1360,7 @@ static inline unsigned int tcp_cwnd_test(struct tcp_sock *tp,
1358 * This must be invoked the first time we consider transmitting 1360 * This must be invoked the first time we consider transmitting
1359 * SKB onto the wire. 1361 * SKB onto the wire.
1360 */ 1362 */
1361static int tcp_init_tso_segs(struct sock *sk, struct sk_buff *skb, 1363static int tcp_init_tso_segs(const struct sock *sk, struct sk_buff *skb,
1362 unsigned int mss_now) 1364 unsigned int mss_now)
1363{ 1365{
1364 int tso_segs = tcp_skb_pcount(skb); 1366 int tso_segs = tcp_skb_pcount(skb);
@@ -1396,7 +1398,7 @@ static inline int tcp_nagle_check(const struct tcp_sock *tp,
1396/* Return non-zero if the Nagle test allows this packet to be 1398/* Return non-zero if the Nagle test allows this packet to be
1397 * sent now. 1399 * sent now.
1398 */ 1400 */
1399static inline int tcp_nagle_test(struct tcp_sock *tp, struct sk_buff *skb, 1401static inline int tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb,
1400 unsigned int cur_mss, int nonagle) 1402 unsigned int cur_mss, int nonagle)
1401{ 1403{
1402 /* Nagle rule does not apply to frames, which sit in the middle of the 1404 /* Nagle rule does not apply to frames, which sit in the middle of the
@@ -1422,7 +1424,7 @@ static inline int tcp_nagle_test(struct tcp_sock *tp, struct sk_buff *skb,
1422} 1424}
1423 1425
1424/* Does at least the first segment of SKB fit into the send window? */ 1426/* Does at least the first segment of SKB fit into the send window? */
1425static inline int tcp_snd_wnd_test(struct tcp_sock *tp, struct sk_buff *skb, 1427static inline int tcp_snd_wnd_test(const struct tcp_sock *tp, const struct sk_buff *skb,
1426 unsigned int cur_mss) 1428 unsigned int cur_mss)
1427{ 1429{
1428 u32 end_seq = TCP_SKB_CB(skb)->end_seq; 1430 u32 end_seq = TCP_SKB_CB(skb)->end_seq;
@@ -1437,10 +1439,10 @@ static inline int tcp_snd_wnd_test(struct tcp_sock *tp, struct sk_buff *skb,
1437 * should be put on the wire right now. If so, it returns the number of 1439 * should be put on the wire right now. If so, it returns the number of
1438 * packets allowed by the congestion window. 1440 * packets allowed by the congestion window.
1439 */ 1441 */
1440static unsigned int tcp_snd_test(struct sock *sk, struct sk_buff *skb, 1442static unsigned int tcp_snd_test(const struct sock *sk, struct sk_buff *skb,
1441 unsigned int cur_mss, int nonagle) 1443 unsigned int cur_mss, int nonagle)
1442{ 1444{
1443 struct tcp_sock *tp = tcp_sk(sk); 1445 const struct tcp_sock *tp = tcp_sk(sk);
1444 unsigned int cwnd_quota; 1446 unsigned int cwnd_quota;
1445 1447
1446 tcp_init_tso_segs(sk, skb, cur_mss); 1448 tcp_init_tso_segs(sk, skb, cur_mss);
@@ -1458,7 +1460,7 @@ static unsigned int tcp_snd_test(struct sock *sk, struct sk_buff *skb,
1458/* Test if sending is allowed right now. */ 1460/* Test if sending is allowed right now. */
1459int tcp_may_send_now(struct sock *sk) 1461int tcp_may_send_now(struct sock *sk)
1460{ 1462{
1461 struct tcp_sock *tp = tcp_sk(sk); 1463 const struct tcp_sock *tp = tcp_sk(sk);
1462 struct sk_buff *skb = tcp_send_head(sk); 1464 struct sk_buff *skb = tcp_send_head(sk);
1463 1465
1464 return skb && 1466 return skb &&
@@ -2008,7 +2010,7 @@ static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
2008} 2010}
2009 2011
2010/* Check if coalescing SKBs is legal. */ 2012/* Check if coalescing SKBs is legal. */
2011static int tcp_can_collapse(struct sock *sk, struct sk_buff *skb) 2013static int tcp_can_collapse(const struct sock *sk, const struct sk_buff *skb)
2012{ 2014{
2013 if (tcp_skb_pcount(skb) > 1) 2015 if (tcp_skb_pcount(skb) > 1)
2014 return 0; 2016 return 0;
@@ -2184,7 +2186,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
2184static int tcp_can_forward_retransmit(struct sock *sk) 2186static int tcp_can_forward_retransmit(struct sock *sk)
2185{ 2187{
2186 const struct inet_connection_sock *icsk = inet_csk(sk); 2188 const struct inet_connection_sock *icsk = inet_csk(sk);
2187 struct tcp_sock *tp = tcp_sk(sk); 2189 const struct tcp_sock *tp = tcp_sk(sk);
2188 2190
2189 /* Forward retransmissions are possible only during Recovery. */ 2191 /* Forward retransmissions are possible only during Recovery. */
2190 if (icsk->icsk_ca_state != TCP_CA_Recovery) 2192 if (icsk->icsk_ca_state != TCP_CA_Recovery)
@@ -2550,7 +2552,7 @@ EXPORT_SYMBOL(tcp_make_synack);
2550/* Do all connect socket setups that can be done AF independent. */ 2552/* Do all connect socket setups that can be done AF independent. */
2551static void tcp_connect_init(struct sock *sk) 2553static void tcp_connect_init(struct sock *sk)
2552{ 2554{
2553 struct dst_entry *dst = __sk_dst_get(sk); 2555 const struct dst_entry *dst = __sk_dst_get(sk);
2554 struct tcp_sock *tp = tcp_sk(sk); 2556 struct tcp_sock *tp = tcp_sk(sk);
2555 __u8 rcv_wscale; 2557 __u8 rcv_wscale;
2556 2558