diff options
author | Ilpo Järvinen <ilpo.jarvinen@helsinki.fi> | 2008-10-07 17:43:06 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-10-07 17:43:06 -0400 |
commit | 33f5f57eeb0c6386fdd85f9c690dc8d700ba7928 (patch) | |
tree | 4bd3421bfa3088018f8e355e6f47e43599748802 /net | |
parent | 654bed16cf86a9ef94495d9e6131b7ff7840a3dd (diff) |
tcp: kill pointless urg_mode
It all started from me noticing that this urgent check in
tcp_clean_rtx_queue is unnecessarily inside the loop. Then
I took a longer look to it and found out that the users of
urg_mode can trivially do without, well almost, there was
one gotcha.
Bonus: those funny people who use urg with >= 2^31 write_seq -
snd_una could now rejoice too (that's the only purpose for the
between being there, otherwise a simple compare would have done
the thing). Not that I assume that the rest of the tcp code
happily lives with such mind-boggling numbers :-). Alas, it
turned out to be impossible to set wmem to such numbers anyway,
yes I really tried a big sendfile after setting some wmem but
nothing happened :-). ...Tcp_wmem is int and so is sk_sndbuf...
So I hacked a bit variable to long and found out that it seems
to work... :-)
Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@helsinki.fi>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r-- | net/ipv4/tcp.c | 4 | ||||
-rw-r--r-- | net/ipv4/tcp_input.c | 11 | ||||
-rw-r--r-- | net/ipv4/tcp_minisocks.c | 1 | ||||
-rw-r--r-- | net/ipv4/tcp_output.c | 18 |
4 files changed, 20 insertions, 14 deletions
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 7d3fe571d15f..eccb7165a80c 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -497,10 +497,8 @@ static inline void skb_entail(struct sock *sk, struct sk_buff *skb) | |||
497 | static inline void tcp_mark_urg(struct tcp_sock *tp, int flags, | 497 | static inline void tcp_mark_urg(struct tcp_sock *tp, int flags, |
498 | struct sk_buff *skb) | 498 | struct sk_buff *skb) |
499 | { | 499 | { |
500 | if (flags & MSG_OOB) { | 500 | if (flags & MSG_OOB) |
501 | tp->urg_mode = 1; | ||
502 | tp->snd_up = tp->write_seq; | 501 | tp->snd_up = tp->write_seq; |
503 | } | ||
504 | } | 502 | } |
505 | 503 | ||
506 | static inline void tcp_push(struct sock *sk, int flags, int mss_now, | 504 | static inline void tcp_push(struct sock *sk, int flags, int mss_now, |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 3b76bce769dd..c19f429dc443 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -2836,7 +2836,8 @@ static u32 tcp_tso_acked(struct sock *sk, struct sk_buff *skb) | |||
2836 | * is before the ack sequence we can discard it as it's confirmed to have | 2836 | * is before the ack sequence we can discard it as it's confirmed to have |
2837 | * arrived at the other end. | 2837 | * arrived at the other end. |
2838 | */ | 2838 | */ |
2839 | static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets) | 2839 | static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets, |
2840 | u32 prior_snd_una) | ||
2840 | { | 2841 | { |
2841 | struct tcp_sock *tp = tcp_sk(sk); | 2842 | struct tcp_sock *tp = tcp_sk(sk); |
2842 | const struct inet_connection_sock *icsk = inet_csk(sk); | 2843 | const struct inet_connection_sock *icsk = inet_csk(sk); |
@@ -2903,9 +2904,6 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets) | |||
2903 | if (sacked & TCPCB_LOST) | 2904 | if (sacked & TCPCB_LOST) |
2904 | tp->lost_out -= acked_pcount; | 2905 | tp->lost_out -= acked_pcount; |
2905 | 2906 | ||
2906 | if (unlikely(tp->urg_mode && !before(end_seq, tp->snd_up))) | ||
2907 | tp->urg_mode = 0; | ||
2908 | |||
2909 | tp->packets_out -= acked_pcount; | 2907 | tp->packets_out -= acked_pcount; |
2910 | pkts_acked += acked_pcount; | 2908 | pkts_acked += acked_pcount; |
2911 | 2909 | ||
@@ -2935,6 +2933,9 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets) | |||
2935 | tp->lost_skb_hint = NULL; | 2933 | tp->lost_skb_hint = NULL; |
2936 | } | 2934 | } |
2937 | 2935 | ||
2936 | if (likely(between(tp->snd_up, prior_snd_una, tp->snd_una))) | ||
2937 | tp->snd_up = tp->snd_una; | ||
2938 | |||
2938 | if (skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) | 2939 | if (skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) |
2939 | flag |= FLAG_SACK_RENEGING; | 2940 | flag |= FLAG_SACK_RENEGING; |
2940 | 2941 | ||
@@ -3311,7 +3312,7 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag) | |||
3311 | goto no_queue; | 3312 | goto no_queue; |
3312 | 3313 | ||
3313 | /* See if we can take anything off of the retransmit queue. */ | 3314 | /* See if we can take anything off of the retransmit queue. */ |
3314 | flag |= tcp_clean_rtx_queue(sk, prior_fackets); | 3315 | flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una); |
3315 | 3316 | ||
3316 | if (tp->frto_counter) | 3317 | if (tp->frto_counter) |
3317 | frto_cwnd = tcp_process_frto(sk, flag); | 3318 | frto_cwnd = tcp_process_frto(sk, flag); |
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index f976fc57892c..779f2e9d0689 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c | |||
@@ -395,6 +395,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, | |||
395 | newtp->pred_flags = 0; | 395 | newtp->pred_flags = 0; |
396 | newtp->rcv_wup = newtp->copied_seq = newtp->rcv_nxt = treq->rcv_isn + 1; | 396 | newtp->rcv_wup = newtp->copied_seq = newtp->rcv_nxt = treq->rcv_isn + 1; |
397 | newtp->snd_sml = newtp->snd_una = newtp->snd_nxt = treq->snt_isn + 1; | 397 | newtp->snd_sml = newtp->snd_una = newtp->snd_nxt = treq->snt_isn + 1; |
398 | newtp->snd_up = treq->snt_isn + 1; | ||
398 | 399 | ||
399 | tcp_prequeue_init(newtp); | 400 | tcp_prequeue_init(newtp); |
400 | 401 | ||
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 493553c71d32..990a58493235 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -345,6 +345,11 @@ static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags) | |||
345 | TCP_SKB_CB(skb)->end_seq = seq; | 345 | TCP_SKB_CB(skb)->end_seq = seq; |
346 | } | 346 | } |
347 | 347 | ||
348 | static inline int tcp_urg_mode(const struct tcp_sock *tp) | ||
349 | { | ||
350 | return tp->snd_una != tp->snd_up; | ||
351 | } | ||
352 | |||
348 | #define OPTION_SACK_ADVERTISE (1 << 0) | 353 | #define OPTION_SACK_ADVERTISE (1 << 0) |
349 | #define OPTION_TS (1 << 1) | 354 | #define OPTION_TS (1 << 1) |
350 | #define OPTION_MD5 (1 << 2) | 355 | #define OPTION_MD5 (1 << 2) |
@@ -646,7 +651,8 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, | |||
646 | th->check = 0; | 651 | th->check = 0; |
647 | th->urg_ptr = 0; | 652 | th->urg_ptr = 0; |
648 | 653 | ||
649 | if (unlikely(tp->urg_mode && | 654 | /* The urg_mode check is necessary during a below snd_una win probe */ |
655 | if (unlikely(tcp_urg_mode(tp) && | ||
650 | between(tp->snd_up, tcb->seq + 1, tcb->seq + 0xFFFF))) { | 656 | between(tp->snd_up, tcb->seq + 1, tcb->seq + 0xFFFF))) { |
651 | th->urg_ptr = htons(tp->snd_up - tcb->seq); | 657 | th->urg_ptr = htons(tp->snd_up - tcb->seq); |
652 | th->urg = 1; | 658 | th->urg = 1; |
@@ -1012,7 +1018,7 @@ unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu) | |||
1012 | /* Compute the current effective MSS, taking SACKs and IP options, | 1018 | /* Compute the current effective MSS, taking SACKs and IP options, |
1013 | * and even PMTU discovery events into account. | 1019 | * and even PMTU discovery events into account. |
1014 | * | 1020 | * |
1015 | * LARGESEND note: !urg_mode is overkill, only frames up to snd_up | 1021 | * LARGESEND note: !tcp_urg_mode is overkill, only frames up to snd_up |
1016 | * cannot be large. However, taking into account rare use of URG, this | 1022 | * cannot be large. However, taking into account rare use of URG, this |
1017 | * is not a big flaw. | 1023 | * is not a big flaw. |
1018 | */ | 1024 | */ |
@@ -1029,7 +1035,7 @@ unsigned int tcp_current_mss(struct sock *sk, int large_allowed) | |||
1029 | 1035 | ||
1030 | mss_now = tp->mss_cache; | 1036 | mss_now = tp->mss_cache; |
1031 | 1037 | ||
1032 | if (large_allowed && sk_can_gso(sk) && !tp->urg_mode) | 1038 | if (large_allowed && sk_can_gso(sk) && !tcp_urg_mode(tp)) |
1033 | doing_tso = 1; | 1039 | doing_tso = 1; |
1034 | 1040 | ||
1035 | if (dst) { | 1041 | if (dst) { |
@@ -1193,7 +1199,7 @@ static inline int tcp_nagle_test(struct tcp_sock *tp, struct sk_buff *skb, | |||
1193 | /* Don't use the nagle rule for urgent data (or for the final FIN). | 1199 | /* Don't use the nagle rule for urgent data (or for the final FIN). |
1194 | * Nagle can be ignored during F-RTO too (see RFC4138). | 1200 | * Nagle can be ignored during F-RTO too (see RFC4138). |
1195 | */ | 1201 | */ |
1196 | if (tp->urg_mode || (tp->frto_counter == 2) || | 1202 | if (tcp_urg_mode(tp) || (tp->frto_counter == 2) || |
1197 | (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN)) | 1203 | (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN)) |
1198 | return 1; | 1204 | return 1; |
1199 | 1205 | ||
@@ -2358,6 +2364,7 @@ static void tcp_connect_init(struct sock *sk) | |||
2358 | tcp_init_wl(tp, tp->write_seq, 0); | 2364 | tcp_init_wl(tp, tp->write_seq, 0); |
2359 | tp->snd_una = tp->write_seq; | 2365 | tp->snd_una = tp->write_seq; |
2360 | tp->snd_sml = tp->write_seq; | 2366 | tp->snd_sml = tp->write_seq; |
2367 | tp->snd_up = tp->write_seq; | ||
2361 | tp->rcv_nxt = 0; | 2368 | tp->rcv_nxt = 0; |
2362 | tp->rcv_wup = 0; | 2369 | tp->rcv_wup = 0; |
2363 | tp->copied_seq = 0; | 2370 | tp->copied_seq = 0; |
@@ -2567,8 +2574,7 @@ int tcp_write_wakeup(struct sock *sk) | |||
2567 | tcp_event_new_data_sent(sk, skb); | 2574 | tcp_event_new_data_sent(sk, skb); |
2568 | return err; | 2575 | return err; |
2569 | } else { | 2576 | } else { |
2570 | if (tp->urg_mode && | 2577 | if (between(tp->snd_up, tp->snd_una + 1, tp->snd_una + 0xFFFF)) |
2571 | between(tp->snd_up, tp->snd_una + 1, tp->snd_una + 0xFFFF)) | ||
2572 | tcp_xmit_probe_skb(sk, 1); | 2578 | tcp_xmit_probe_skb(sk, 1); |
2573 | return tcp_xmit_probe_skb(sk, 0); | 2579 | return tcp_xmit_probe_skb(sk, 0); |
2574 | } | 2580 | } |