diff options
author | Florian Westphal <fw@strlen.de> | 2014-09-29 07:08:30 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-09-29 14:41:22 -0400 |
commit | 735d383117e113403442d971b23e7cfa2f876c7c (patch) | |
tree | 9040f467826429d05c7d5f4070d18141d652e259 | |
parent | d82bd1229885d550d03926cfa937703f6caa3cc0 (diff) |
tcp: change TCP_ECN prefixes to lower case
Suggested by Stephen. Also drop inline keyword and let compiler decide.
gcc 4.7.3 decides to no longer inline tcp_ecn_check_ce, so split it up.
The actual evaluation is not inlined anymore while the ECN_OK test is.
Suggested-by: Stephen Hemminger <stephen@networkplumber.org>
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Signed-off-by: Florian Westphal <fw@strlen.de>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | net/ipv4/tcp_input.c | 41 | ||||
-rw-r--r-- | net/ipv4/tcp_minisocks.c | 6 | ||||
-rw-r--r-- | net/ipv4/tcp_output.c | 18 |
3 files changed, 34 insertions, 31 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 174181e28ef3..aa38f98b7884 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -201,28 +201,25 @@ static inline bool tcp_in_quickack_mode(const struct sock *sk) | |||
201 | return icsk->icsk_ack.quick && !icsk->icsk_ack.pingpong; | 201 | return icsk->icsk_ack.quick && !icsk->icsk_ack.pingpong; |
202 | } | 202 | } |
203 | 203 | ||
204 | static inline void TCP_ECN_queue_cwr(struct tcp_sock *tp) | 204 | static void tcp_ecn_queue_cwr(struct tcp_sock *tp) |
205 | { | 205 | { |
206 | if (tp->ecn_flags & TCP_ECN_OK) | 206 | if (tp->ecn_flags & TCP_ECN_OK) |
207 | tp->ecn_flags |= TCP_ECN_QUEUE_CWR; | 207 | tp->ecn_flags |= TCP_ECN_QUEUE_CWR; |
208 | } | 208 | } |
209 | 209 | ||
210 | static inline void TCP_ECN_accept_cwr(struct tcp_sock *tp, const struct sk_buff *skb) | 210 | static void tcp_ecn_accept_cwr(struct tcp_sock *tp, const struct sk_buff *skb) |
211 | { | 211 | { |
212 | if (tcp_hdr(skb)->cwr) | 212 | if (tcp_hdr(skb)->cwr) |
213 | tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR; | 213 | tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR; |
214 | } | 214 | } |
215 | 215 | ||
216 | static inline void TCP_ECN_withdraw_cwr(struct tcp_sock *tp) | 216 | static void tcp_ecn_withdraw_cwr(struct tcp_sock *tp) |
217 | { | 217 | { |
218 | tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR; | 218 | tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR; |
219 | } | 219 | } |
220 | 220 | ||
221 | static inline void TCP_ECN_check_ce(struct tcp_sock *tp, const struct sk_buff *skb) | 221 | static void __tcp_ecn_check_ce(struct tcp_sock *tp, const struct sk_buff *skb) |
222 | { | 222 | { |
223 | if (!(tp->ecn_flags & TCP_ECN_OK)) | ||
224 | return; | ||
225 | |||
226 | switch (TCP_SKB_CB(skb)->ip_dsfield & INET_ECN_MASK) { | 223 | switch (TCP_SKB_CB(skb)->ip_dsfield & INET_ECN_MASK) { |
227 | case INET_ECN_NOT_ECT: | 224 | case INET_ECN_NOT_ECT: |
228 | /* Funny extension: if ECT is not set on a segment, | 225 | /* Funny extension: if ECT is not set on a segment, |
@@ -251,19 +248,25 @@ static inline void TCP_ECN_check_ce(struct tcp_sock *tp, const struct sk_buff *s | |||
251 | } | 248 | } |
252 | } | 249 | } |
253 | 250 | ||
254 | static inline void TCP_ECN_rcv_synack(struct tcp_sock *tp, const struct tcphdr *th) | 251 | static void tcp_ecn_check_ce(struct tcp_sock *tp, const struct sk_buff *skb) |
252 | { | ||
253 | if (tp->ecn_flags & TCP_ECN_OK) | ||
254 | __tcp_ecn_check_ce(tp, skb); | ||
255 | } | ||
256 | |||
257 | static void tcp_ecn_rcv_synack(struct tcp_sock *tp, const struct tcphdr *th) | ||
255 | { | 258 | { |
256 | if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || th->cwr)) | 259 | if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || th->cwr)) |
257 | tp->ecn_flags &= ~TCP_ECN_OK; | 260 | tp->ecn_flags &= ~TCP_ECN_OK; |
258 | } | 261 | } |
259 | 262 | ||
260 | static inline void TCP_ECN_rcv_syn(struct tcp_sock *tp, const struct tcphdr *th) | 263 | static void tcp_ecn_rcv_syn(struct tcp_sock *tp, const struct tcphdr *th) |
261 | { | 264 | { |
262 | if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || !th->cwr)) | 265 | if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || !th->cwr)) |
263 | tp->ecn_flags &= ~TCP_ECN_OK; | 266 | tp->ecn_flags &= ~TCP_ECN_OK; |
264 | } | 267 | } |
265 | 268 | ||
266 | static bool TCP_ECN_rcv_ecn_echo(const struct tcp_sock *tp, const struct tcphdr *th) | 269 | static bool tcp_ecn_rcv_ecn_echo(const struct tcp_sock *tp, const struct tcphdr *th) |
267 | { | 270 | { |
268 | if (th->ece && !th->syn && (tp->ecn_flags & TCP_ECN_OK)) | 271 | if (th->ece && !th->syn && (tp->ecn_flags & TCP_ECN_OK)) |
269 | return true; | 272 | return true; |
@@ -660,7 +663,7 @@ static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb) | |||
660 | } | 663 | } |
661 | icsk->icsk_ack.lrcvtime = now; | 664 | icsk->icsk_ack.lrcvtime = now; |
662 | 665 | ||
663 | TCP_ECN_check_ce(tp, skb); | 666 | tcp_ecn_check_ce(tp, skb); |
664 | 667 | ||
665 | if (skb->len >= 128) | 668 | if (skb->len >= 128) |
666 | tcp_grow_window(sk, skb); | 669 | tcp_grow_window(sk, skb); |
@@ -1976,7 +1979,7 @@ void tcp_enter_loss(struct sock *sk) | |||
1976 | sysctl_tcp_reordering); | 1979 | sysctl_tcp_reordering); |
1977 | tcp_set_ca_state(sk, TCP_CA_Loss); | 1980 | tcp_set_ca_state(sk, TCP_CA_Loss); |
1978 | tp->high_seq = tp->snd_nxt; | 1981 | tp->high_seq = tp->snd_nxt; |
1979 | TCP_ECN_queue_cwr(tp); | 1982 | tcp_ecn_queue_cwr(tp); |
1980 | 1983 | ||
1981 | /* F-RTO RFC5682 sec 3.1 step 1: retransmit SND.UNA if no previous | 1984 | /* F-RTO RFC5682 sec 3.1 step 1: retransmit SND.UNA if no previous |
1982 | * loss recovery is underway except recurring timeout(s) on | 1985 | * loss recovery is underway except recurring timeout(s) on |
@@ -2368,7 +2371,7 @@ static void tcp_undo_cwnd_reduction(struct sock *sk, bool unmark_loss) | |||
2368 | 2371 | ||
2369 | if (tp->prior_ssthresh > tp->snd_ssthresh) { | 2372 | if (tp->prior_ssthresh > tp->snd_ssthresh) { |
2370 | tp->snd_ssthresh = tp->prior_ssthresh; | 2373 | tp->snd_ssthresh = tp->prior_ssthresh; |
2371 | TCP_ECN_withdraw_cwr(tp); | 2374 | tcp_ecn_withdraw_cwr(tp); |
2372 | } | 2375 | } |
2373 | } else { | 2376 | } else { |
2374 | tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh); | 2377 | tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh); |
@@ -2498,7 +2501,7 @@ static void tcp_init_cwnd_reduction(struct sock *sk) | |||
2498 | tp->prr_delivered = 0; | 2501 | tp->prr_delivered = 0; |
2499 | tp->prr_out = 0; | 2502 | tp->prr_out = 0; |
2500 | tp->snd_ssthresh = inet_csk(sk)->icsk_ca_ops->ssthresh(sk); | 2503 | tp->snd_ssthresh = inet_csk(sk)->icsk_ca_ops->ssthresh(sk); |
2501 | TCP_ECN_queue_cwr(tp); | 2504 | tcp_ecn_queue_cwr(tp); |
2502 | } | 2505 | } |
2503 | 2506 | ||
2504 | static void tcp_cwnd_reduction(struct sock *sk, const int prior_unsacked, | 2507 | static void tcp_cwnd_reduction(struct sock *sk, const int prior_unsacked, |
@@ -3453,7 +3456,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) | |||
3453 | flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una, | 3456 | flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una, |
3454 | &sack_rtt_us); | 3457 | &sack_rtt_us); |
3455 | 3458 | ||
3456 | if (TCP_ECN_rcv_ecn_echo(tp, tcp_hdr(skb))) { | 3459 | if (tcp_ecn_rcv_ecn_echo(tp, tcp_hdr(skb))) { |
3457 | flag |= FLAG_ECE; | 3460 | flag |= FLAG_ECE; |
3458 | ack_ev_flags |= CA_ACK_ECE; | 3461 | ack_ev_flags |= CA_ACK_ECE; |
3459 | } | 3462 | } |
@@ -4193,7 +4196,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) | |||
4193 | struct sk_buff *skb1; | 4196 | struct sk_buff *skb1; |
4194 | u32 seq, end_seq; | 4197 | u32 seq, end_seq; |
4195 | 4198 | ||
4196 | TCP_ECN_check_ce(tp, skb); | 4199 | tcp_ecn_check_ce(tp, skb); |
4197 | 4200 | ||
4198 | if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) { | 4201 | if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) { |
4199 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFODROP); | 4202 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFODROP); |
@@ -4376,7 +4379,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) | |||
4376 | skb_dst_drop(skb); | 4379 | skb_dst_drop(skb); |
4377 | __skb_pull(skb, tcp_hdr(skb)->doff * 4); | 4380 | __skb_pull(skb, tcp_hdr(skb)->doff * 4); |
4378 | 4381 | ||
4379 | TCP_ECN_accept_cwr(tp, skb); | 4382 | tcp_ecn_accept_cwr(tp, skb); |
4380 | 4383 | ||
4381 | tp->rx_opt.dsack = 0; | 4384 | tp->rx_opt.dsack = 0; |
4382 | 4385 | ||
@@ -5457,7 +5460,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, | |||
5457 | * state to ESTABLISHED..." | 5460 | * state to ESTABLISHED..." |
5458 | */ | 5461 | */ |
5459 | 5462 | ||
5460 | TCP_ECN_rcv_synack(tp, th); | 5463 | tcp_ecn_rcv_synack(tp, th); |
5461 | 5464 | ||
5462 | tcp_init_wl(tp, TCP_SKB_CB(skb)->seq); | 5465 | tcp_init_wl(tp, TCP_SKB_CB(skb)->seq); |
5463 | tcp_ack(sk, skb, FLAG_SLOWPATH); | 5466 | tcp_ack(sk, skb, FLAG_SLOWPATH); |
@@ -5576,7 +5579,7 @@ discard: | |||
5576 | tp->snd_wl1 = TCP_SKB_CB(skb)->seq; | 5579 | tp->snd_wl1 = TCP_SKB_CB(skb)->seq; |
5577 | tp->max_window = tp->snd_wnd; | 5580 | tp->max_window = tp->snd_wnd; |
5578 | 5581 | ||
5579 | TCP_ECN_rcv_syn(tp, th); | 5582 | tcp_ecn_rcv_syn(tp, th); |
5580 | 5583 | ||
5581 | tcp_mtup_init(sk); | 5584 | tcp_mtup_init(sk); |
5582 | tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); | 5585 | tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); |
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 47b73506b77e..63d2680b65db 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c | |||
@@ -393,8 +393,8 @@ void tcp_openreq_init_rwin(struct request_sock *req, | |||
393 | } | 393 | } |
394 | EXPORT_SYMBOL(tcp_openreq_init_rwin); | 394 | EXPORT_SYMBOL(tcp_openreq_init_rwin); |
395 | 395 | ||
396 | static inline void TCP_ECN_openreq_child(struct tcp_sock *tp, | 396 | static void tcp_ecn_openreq_child(struct tcp_sock *tp, |
397 | struct request_sock *req) | 397 | const struct request_sock *req) |
398 | { | 398 | { |
399 | tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0; | 399 | tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0; |
400 | } | 400 | } |
@@ -507,7 +507,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, | |||
507 | if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len) | 507 | if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len) |
508 | newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len; | 508 | newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len; |
509 | newtp->rx_opt.mss_clamp = req->mss; | 509 | newtp->rx_opt.mss_clamp = req->mss; |
510 | TCP_ECN_openreq_child(newtp, req); | 510 | tcp_ecn_openreq_child(newtp, req); |
511 | newtp->fastopen_rsk = NULL; | 511 | newtp->fastopen_rsk = NULL; |
512 | newtp->syn_data_acked = 0; | 512 | newtp->syn_data_acked = 0; |
513 | 513 | ||
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 86a0216fcaa1..ee567e9e98c3 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -318,7 +318,7 @@ static u16 tcp_select_window(struct sock *sk) | |||
318 | } | 318 | } |
319 | 319 | ||
320 | /* Packet ECN state for a SYN-ACK */ | 320 | /* Packet ECN state for a SYN-ACK */ |
321 | static inline void TCP_ECN_send_synack(struct sock *sk, struct sk_buff *skb) | 321 | static void tcp_ecn_send_synack(struct sock *sk, struct sk_buff *skb) |
322 | { | 322 | { |
323 | const struct tcp_sock *tp = tcp_sk(sk); | 323 | const struct tcp_sock *tp = tcp_sk(sk); |
324 | 324 | ||
@@ -330,7 +330,7 @@ static inline void TCP_ECN_send_synack(struct sock *sk, struct sk_buff *skb) | |||
330 | } | 330 | } |
331 | 331 | ||
332 | /* Packet ECN state for a SYN. */ | 332 | /* Packet ECN state for a SYN. */ |
333 | static inline void TCP_ECN_send_syn(struct sock *sk, struct sk_buff *skb) | 333 | static void tcp_ecn_send_syn(struct sock *sk, struct sk_buff *skb) |
334 | { | 334 | { |
335 | struct tcp_sock *tp = tcp_sk(sk); | 335 | struct tcp_sock *tp = tcp_sk(sk); |
336 | 336 | ||
@@ -344,8 +344,8 @@ static inline void TCP_ECN_send_syn(struct sock *sk, struct sk_buff *skb) | |||
344 | } | 344 | } |
345 | } | 345 | } |
346 | 346 | ||
347 | static __inline__ void | 347 | static void |
348 | TCP_ECN_make_synack(const struct request_sock *req, struct tcphdr *th, | 348 | tcp_ecn_make_synack(const struct request_sock *req, struct tcphdr *th, |
349 | struct sock *sk) | 349 | struct sock *sk) |
350 | { | 350 | { |
351 | if (inet_rsk(req)->ecn_ok) { | 351 | if (inet_rsk(req)->ecn_ok) { |
@@ -358,7 +358,7 @@ TCP_ECN_make_synack(const struct request_sock *req, struct tcphdr *th, | |||
358 | /* Set up ECN state for a packet on a ESTABLISHED socket that is about to | 358 | /* Set up ECN state for a packet on a ESTABLISHED socket that is about to |
359 | * be sent. | 359 | * be sent. |
360 | */ | 360 | */ |
361 | static inline void TCP_ECN_send(struct sock *sk, struct sk_buff *skb, | 361 | static void tcp_ecn_send(struct sock *sk, struct sk_buff *skb, |
362 | int tcp_header_len) | 362 | int tcp_header_len) |
363 | { | 363 | { |
364 | struct tcp_sock *tp = tcp_sk(sk); | 364 | struct tcp_sock *tp = tcp_sk(sk); |
@@ -960,7 +960,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, | |||
960 | 960 | ||
961 | tcp_options_write((__be32 *)(th + 1), tp, &opts); | 961 | tcp_options_write((__be32 *)(th + 1), tp, &opts); |
962 | if (likely((tcb->tcp_flags & TCPHDR_SYN) == 0)) | 962 | if (likely((tcb->tcp_flags & TCPHDR_SYN) == 0)) |
963 | TCP_ECN_send(sk, skb, tcp_header_size); | 963 | tcp_ecn_send(sk, skb, tcp_header_size); |
964 | 964 | ||
965 | #ifdef CONFIG_TCP_MD5SIG | 965 | #ifdef CONFIG_TCP_MD5SIG |
966 | /* Calculate the MD5 hash, as we have all we need now */ | 966 | /* Calculate the MD5 hash, as we have all we need now */ |
@@ -2800,7 +2800,7 @@ int tcp_send_synack(struct sock *sk) | |||
2800 | } | 2800 | } |
2801 | 2801 | ||
2802 | TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ACK; | 2802 | TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ACK; |
2803 | TCP_ECN_send_synack(sk, skb); | 2803 | tcp_ecn_send_synack(sk, skb); |
2804 | } | 2804 | } |
2805 | return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); | 2805 | return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); |
2806 | } | 2806 | } |
@@ -2859,7 +2859,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, | |||
2859 | memset(th, 0, sizeof(struct tcphdr)); | 2859 | memset(th, 0, sizeof(struct tcphdr)); |
2860 | th->syn = 1; | 2860 | th->syn = 1; |
2861 | th->ack = 1; | 2861 | th->ack = 1; |
2862 | TCP_ECN_make_synack(req, th, sk); | 2862 | tcp_ecn_make_synack(req, th, sk); |
2863 | th->source = htons(ireq->ir_num); | 2863 | th->source = htons(ireq->ir_num); |
2864 | th->dest = ireq->ir_rmt_port; | 2864 | th->dest = ireq->ir_rmt_port; |
2865 | /* Setting of flags are superfluous here for callers (and ECE is | 2865 | /* Setting of flags are superfluous here for callers (and ECE is |
@@ -3098,7 +3098,7 @@ int tcp_connect(struct sock *sk) | |||
3098 | tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN); | 3098 | tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN); |
3099 | tp->retrans_stamp = tcp_time_stamp; | 3099 | tp->retrans_stamp = tcp_time_stamp; |
3100 | tcp_connect_queue_skb(sk, buff); | 3100 | tcp_connect_queue_skb(sk, buff); |
3101 | TCP_ECN_send_syn(sk, buff); | 3101 | tcp_ecn_send_syn(sk, buff); |
3102 | 3102 | ||
3103 | /* Send off SYN; include data in Fast Open. */ | 3103 | /* Send off SYN; include data in Fast Open. */ |
3104 | err = tp->fastopen_req ? tcp_send_syn_data(sk, buff) : | 3104 | err = tp->fastopen_req ? tcp_send_syn_data(sk, buff) : |