aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_input.c
diff options
context:
space:
mode:
authorFlorian Westphal <fw@strlen.de>2014-09-29 07:08:30 -0400
committerDavid S. Miller <davem@davemloft.net>2014-09-29 14:41:22 -0400
commit735d383117e113403442d971b23e7cfa2f876c7c (patch)
tree9040f467826429d05c7d5f4070d18141d652e259 /net/ipv4/tcp_input.c
parentd82bd1229885d550d03926cfa937703f6caa3cc0 (diff)
tcp: change TCP_ECN prefixes to lower case
Suggested by Stephen. Also drop inline keyword and let compiler decide. gcc 4.7.3 decides to no longer inline tcp_ecn_check_ce, so split it up. The actual evaluation is not inlined anymore while the ECN_OK test is. Suggested-by: Stephen Hemminger <stephen@networkplumber.org> Signed-off-by: Daniel Borkmann <dborkman@redhat.com> Signed-off-by: Florian Westphal <fw@strlen.de> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r--net/ipv4/tcp_input.c41
1 files changed, 22 insertions, 19 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 174181e28ef3..aa38f98b7884 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -201,28 +201,25 @@ static inline bool tcp_in_quickack_mode(const struct sock *sk)
201 return icsk->icsk_ack.quick && !icsk->icsk_ack.pingpong; 201 return icsk->icsk_ack.quick && !icsk->icsk_ack.pingpong;
202} 202}
203 203
204static inline void TCP_ECN_queue_cwr(struct tcp_sock *tp) 204static void tcp_ecn_queue_cwr(struct tcp_sock *tp)
205{ 205{
206 if (tp->ecn_flags & TCP_ECN_OK) 206 if (tp->ecn_flags & TCP_ECN_OK)
207 tp->ecn_flags |= TCP_ECN_QUEUE_CWR; 207 tp->ecn_flags |= TCP_ECN_QUEUE_CWR;
208} 208}
209 209
210static inline void TCP_ECN_accept_cwr(struct tcp_sock *tp, const struct sk_buff *skb) 210static void tcp_ecn_accept_cwr(struct tcp_sock *tp, const struct sk_buff *skb)
211{ 211{
212 if (tcp_hdr(skb)->cwr) 212 if (tcp_hdr(skb)->cwr)
213 tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR; 213 tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
214} 214}
215 215
216static inline void TCP_ECN_withdraw_cwr(struct tcp_sock *tp) 216static void tcp_ecn_withdraw_cwr(struct tcp_sock *tp)
217{ 217{
218 tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR; 218 tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
219} 219}
220 220
221static inline void TCP_ECN_check_ce(struct tcp_sock *tp, const struct sk_buff *skb) 221static void __tcp_ecn_check_ce(struct tcp_sock *tp, const struct sk_buff *skb)
222{ 222{
223 if (!(tp->ecn_flags & TCP_ECN_OK))
224 return;
225
226 switch (TCP_SKB_CB(skb)->ip_dsfield & INET_ECN_MASK) { 223 switch (TCP_SKB_CB(skb)->ip_dsfield & INET_ECN_MASK) {
227 case INET_ECN_NOT_ECT: 224 case INET_ECN_NOT_ECT:
228 /* Funny extension: if ECT is not set on a segment, 225 /* Funny extension: if ECT is not set on a segment,
@@ -251,19 +248,25 @@ static inline void TCP_ECN_check_ce(struct tcp_sock *tp, const struct sk_buff *s
251 } 248 }
252} 249}
253 250
254static inline void TCP_ECN_rcv_synack(struct tcp_sock *tp, const struct tcphdr *th) 251static void tcp_ecn_check_ce(struct tcp_sock *tp, const struct sk_buff *skb)
252{
253 if (tp->ecn_flags & TCP_ECN_OK)
254 __tcp_ecn_check_ce(tp, skb);
255}
256
257static void tcp_ecn_rcv_synack(struct tcp_sock *tp, const struct tcphdr *th)
255{ 258{
256 if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || th->cwr)) 259 if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || th->cwr))
257 tp->ecn_flags &= ~TCP_ECN_OK; 260 tp->ecn_flags &= ~TCP_ECN_OK;
258} 261}
259 262
260static inline void TCP_ECN_rcv_syn(struct tcp_sock *tp, const struct tcphdr *th) 263static void tcp_ecn_rcv_syn(struct tcp_sock *tp, const struct tcphdr *th)
261{ 264{
262 if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || !th->cwr)) 265 if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || !th->cwr))
263 tp->ecn_flags &= ~TCP_ECN_OK; 266 tp->ecn_flags &= ~TCP_ECN_OK;
264} 267}
265 268
266static bool TCP_ECN_rcv_ecn_echo(const struct tcp_sock *tp, const struct tcphdr *th) 269static bool tcp_ecn_rcv_ecn_echo(const struct tcp_sock *tp, const struct tcphdr *th)
267{ 270{
268 if (th->ece && !th->syn && (tp->ecn_flags & TCP_ECN_OK)) 271 if (th->ece && !th->syn && (tp->ecn_flags & TCP_ECN_OK))
269 return true; 272 return true;
@@ -660,7 +663,7 @@ static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb)
660 } 663 }
661 icsk->icsk_ack.lrcvtime = now; 664 icsk->icsk_ack.lrcvtime = now;
662 665
663 TCP_ECN_check_ce(tp, skb); 666 tcp_ecn_check_ce(tp, skb);
664 667
665 if (skb->len >= 128) 668 if (skb->len >= 128)
666 tcp_grow_window(sk, skb); 669 tcp_grow_window(sk, skb);
@@ -1976,7 +1979,7 @@ void tcp_enter_loss(struct sock *sk)
1976 sysctl_tcp_reordering); 1979 sysctl_tcp_reordering);
1977 tcp_set_ca_state(sk, TCP_CA_Loss); 1980 tcp_set_ca_state(sk, TCP_CA_Loss);
1978 tp->high_seq = tp->snd_nxt; 1981 tp->high_seq = tp->snd_nxt;
1979 TCP_ECN_queue_cwr(tp); 1982 tcp_ecn_queue_cwr(tp);
1980 1983
1981 /* F-RTO RFC5682 sec 3.1 step 1: retransmit SND.UNA if no previous 1984 /* F-RTO RFC5682 sec 3.1 step 1: retransmit SND.UNA if no previous
1982 * loss recovery is underway except recurring timeout(s) on 1985 * loss recovery is underway except recurring timeout(s) on
@@ -2368,7 +2371,7 @@ static void tcp_undo_cwnd_reduction(struct sock *sk, bool unmark_loss)
2368 2371
2369 if (tp->prior_ssthresh > tp->snd_ssthresh) { 2372 if (tp->prior_ssthresh > tp->snd_ssthresh) {
2370 tp->snd_ssthresh = tp->prior_ssthresh; 2373 tp->snd_ssthresh = tp->prior_ssthresh;
2371 TCP_ECN_withdraw_cwr(tp); 2374 tcp_ecn_withdraw_cwr(tp);
2372 } 2375 }
2373 } else { 2376 } else {
2374 tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh); 2377 tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh);
@@ -2498,7 +2501,7 @@ static void tcp_init_cwnd_reduction(struct sock *sk)
2498 tp->prr_delivered = 0; 2501 tp->prr_delivered = 0;
2499 tp->prr_out = 0; 2502 tp->prr_out = 0;
2500 tp->snd_ssthresh = inet_csk(sk)->icsk_ca_ops->ssthresh(sk); 2503 tp->snd_ssthresh = inet_csk(sk)->icsk_ca_ops->ssthresh(sk);
2501 TCP_ECN_queue_cwr(tp); 2504 tcp_ecn_queue_cwr(tp);
2502} 2505}
2503 2506
2504static void tcp_cwnd_reduction(struct sock *sk, const int prior_unsacked, 2507static void tcp_cwnd_reduction(struct sock *sk, const int prior_unsacked,
@@ -3453,7 +3456,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3453 flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una, 3456 flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una,
3454 &sack_rtt_us); 3457 &sack_rtt_us);
3455 3458
3456 if (TCP_ECN_rcv_ecn_echo(tp, tcp_hdr(skb))) { 3459 if (tcp_ecn_rcv_ecn_echo(tp, tcp_hdr(skb))) {
3457 flag |= FLAG_ECE; 3460 flag |= FLAG_ECE;
3458 ack_ev_flags |= CA_ACK_ECE; 3461 ack_ev_flags |= CA_ACK_ECE;
3459 } 3462 }
@@ -4193,7 +4196,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
4193 struct sk_buff *skb1; 4196 struct sk_buff *skb1;
4194 u32 seq, end_seq; 4197 u32 seq, end_seq;
4195 4198
4196 TCP_ECN_check_ce(tp, skb); 4199 tcp_ecn_check_ce(tp, skb);
4197 4200
4198 if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) { 4201 if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) {
4199 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFODROP); 4202 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFODROP);
@@ -4376,7 +4379,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
4376 skb_dst_drop(skb); 4379 skb_dst_drop(skb);
4377 __skb_pull(skb, tcp_hdr(skb)->doff * 4); 4380 __skb_pull(skb, tcp_hdr(skb)->doff * 4);
4378 4381
4379 TCP_ECN_accept_cwr(tp, skb); 4382 tcp_ecn_accept_cwr(tp, skb);
4380 4383
4381 tp->rx_opt.dsack = 0; 4384 tp->rx_opt.dsack = 0;
4382 4385
@@ -5457,7 +5460,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
5457 * state to ESTABLISHED..." 5460 * state to ESTABLISHED..."
5458 */ 5461 */
5459 5462
5460 TCP_ECN_rcv_synack(tp, th); 5463 tcp_ecn_rcv_synack(tp, th);
5461 5464
5462 tcp_init_wl(tp, TCP_SKB_CB(skb)->seq); 5465 tcp_init_wl(tp, TCP_SKB_CB(skb)->seq);
5463 tcp_ack(sk, skb, FLAG_SLOWPATH); 5466 tcp_ack(sk, skb, FLAG_SLOWPATH);
@@ -5576,7 +5579,7 @@ discard:
5576 tp->snd_wl1 = TCP_SKB_CB(skb)->seq; 5579 tp->snd_wl1 = TCP_SKB_CB(skb)->seq;
5577 tp->max_window = tp->snd_wnd; 5580 tp->max_window = tp->snd_wnd;
5578 5581
5579 TCP_ECN_rcv_syn(tp, th); 5582 tcp_ecn_rcv_syn(tp, th);
5580 5583
5581 tcp_mtup_init(sk); 5584 tcp_mtup_init(sk);
5582 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); 5585 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);