aboutsummaryrefslogtreecommitdiffstats
path: root/include/net/tcp.h
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2014-05-03 00:18:05 -0400
committerDavid S. Miller <davem@davemloft.net>2014-05-03 19:23:07 -0400
commit249015515fe3fc9818d86cb5c83bbc92505ad7dc (patch)
tree98db6ccb0d19abccf3c0745551ee1bc5a8076fb3 /include/net/tcp.h
parente114a710aa5058c0ba4aa1dfb105132aefeb5e04 (diff)
tcp: remove in_flight parameter from cong_avoid() methods
Commit e114a710aa505 ("tcp: fix cwnd limited checking to improve congestion control") obsoleted in_flight parameter from tcp_is_cwnd_limited() and its callers. This patch does the removal as promised. Signed-off-by: Eric Dumazet <edumazet@google.com> Acked-by: Neal Cardwell <ncardwell@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/net/tcp.h')
-rw-r--r--include/net/tcp.h8
1 files changed, 3 insertions, 5 deletions
diff --git a/include/net/tcp.h b/include/net/tcp.h
index a9fe7bc4f4bb..3c9418456640 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -796,7 +796,7 @@ struct tcp_congestion_ops {
796 /* return slow start threshold (required) */ 796 /* return slow start threshold (required) */
797 u32 (*ssthresh)(struct sock *sk); 797 u32 (*ssthresh)(struct sock *sk);
798 /* do new cwnd calculation (required) */ 798 /* do new cwnd calculation (required) */
799 void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked, u32 in_flight); 799 void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked);
800 /* call before changing ca_state (optional) */ 800 /* call before changing ca_state (optional) */
801 void (*set_state)(struct sock *sk, u8 new_state); 801 void (*set_state)(struct sock *sk, u8 new_state);
802 /* call when cwnd event occurs (optional) */ 802 /* call when cwnd event occurs (optional) */
@@ -828,7 +828,7 @@ void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w);
828 828
829extern struct tcp_congestion_ops tcp_init_congestion_ops; 829extern struct tcp_congestion_ops tcp_init_congestion_ops;
830u32 tcp_reno_ssthresh(struct sock *sk); 830u32 tcp_reno_ssthresh(struct sock *sk);
831void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked, u32 in_flight); 831void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked);
832extern struct tcp_congestion_ops tcp_reno; 832extern struct tcp_congestion_ops tcp_reno;
833 833
834static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state) 834static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
@@ -986,10 +986,8 @@ static inline u32 tcp_wnd_end(const struct tcp_sock *tp)
986 * risks 100% overshoot. The advantage is that we discourage application to 986 * risks 100% overshoot. The advantage is that we discourage application to
987 * either send more filler packets or data to artificially blow up the cwnd 987 * either send more filler packets or data to artificially blow up the cwnd
988 * usage, and allow application-limited process to probe bw more aggressively. 988 * usage, and allow application-limited process to probe bw more aggressively.
989 *
990 * TODO: remove in_flight once we can fix all callers, and their callers...
991 */ 989 */
992static inline bool tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight) 990static inline bool tcp_is_cwnd_limited(const struct sock *sk)
993{ 991{
994 const struct tcp_sock *tp = tcp_sk(sk); 992 const struct tcp_sock *tp = tcp_sk(sk);
995 993