aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_westwood.c
diff options
context:
space:
mode:
authorArnaldo Carvalho de Melo <acme@mandriva.com>2005-08-10 03:03:31 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2005-08-29 18:56:18 -0400
commit6687e988d9aeaccad6774e6a8304f681f3ec0a03 (patch)
treeecd3d28f9989847aa1dcde4782de0210aeadc290 /net/ipv4/tcp_westwood.c
parent64ce207306debd7157f47282be94770407bec01c (diff)
[ICSK]: Move TCP congestion avoidance members to icsk
This changeset basically moves tcp_sk()->{ca_ops,ca_state,etc} to inet_csk(), minimal renaming/moving done in this changeset to ease review. Most of it is just changes of struct tcp_sock * to struct sock * parameters. With this we move to a state closer to two interesting goals: 1. Generalisation of net/ipv4/tcp_diag.c, becoming inet_diag.c, being used for any INET transport protocol that has struct inet_hashinfo and are derived from struct inet_connection_sock. Keeps the userspace API, that will just not display DCCP sockets, while newer versions of tools can support DCCP. 2. INET generic transport pluggable Congestion Avoidance infrastructure, using the current TCP CA infrastructure with DCCP. Signed-off-by: Arnaldo Carvalho de Melo <acme@mandriva.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_westwood.c')
-rw-r--r--net/ipv4/tcp_westwood.c58
1 files changed, 31 insertions, 27 deletions
diff --git a/net/ipv4/tcp_westwood.c b/net/ipv4/tcp_westwood.c
index ef827242c940..d8a5a2b92e37 100644
--- a/net/ipv4/tcp_westwood.c
+++ b/net/ipv4/tcp_westwood.c
@@ -40,9 +40,9 @@ struct westwood {
40 * way as soon as possible. It will reasonably happen within the first 40 * way as soon as possible. It will reasonably happen within the first
41 * RTT period of the connection lifetime. 41 * RTT period of the connection lifetime.
42 */ 42 */
43static void tcp_westwood_init(struct tcp_sock *tp) 43static void tcp_westwood_init(struct sock *sk)
44{ 44{
45 struct westwood *w = tcp_ca(tp); 45 struct westwood *w = inet_csk_ca(sk);
46 46
47 w->bk = 0; 47 w->bk = 0;
48 w->bw_ns_est = 0; 48 w->bw_ns_est = 0;
@@ -51,7 +51,7 @@ static void tcp_westwood_init(struct tcp_sock *tp)
51 w->cumul_ack = 0; 51 w->cumul_ack = 0;
52 w->rtt_min = w->rtt = TCP_WESTWOOD_INIT_RTT; 52 w->rtt_min = w->rtt = TCP_WESTWOOD_INIT_RTT;
53 w->rtt_win_sx = tcp_time_stamp; 53 w->rtt_win_sx = tcp_time_stamp;
54 w->snd_una = tp->snd_una; 54 w->snd_una = tcp_sk(sk)->snd_una;
55} 55}
56 56
57/* 57/*
@@ -74,11 +74,11 @@ static inline void westwood_filter(struct westwood *w, u32 delta)
74 * Called after processing group of packets. 74 * Called after processing group of packets.
75 * but all westwood needs is the last sample of srtt. 75 * but all westwood needs is the last sample of srtt.
76 */ 76 */
77static void tcp_westwood_pkts_acked(struct tcp_sock *tp, u32 cnt) 77static void tcp_westwood_pkts_acked(struct sock *sk, u32 cnt)
78{ 78{
79 struct westwood *w = tcp_ca(tp); 79 struct westwood *w = inet_csk_ca(sk);
80 if (cnt > 0) 80 if (cnt > 0)
81 w->rtt = tp->srtt >> 3; 81 w->rtt = tcp_sk(sk)->srtt >> 3;
82} 82}
83 83
84/* 84/*
@@ -86,9 +86,9 @@ static void tcp_westwood_pkts_acked(struct tcp_sock *tp, u32 cnt)
86 * It updates RTT evaluation window if it is the right moment to do 86 * It updates RTT evaluation window if it is the right moment to do
87 * it. If so it calls filter for evaluating bandwidth. 87 * it. If so it calls filter for evaluating bandwidth.
88 */ 88 */
89static void westwood_update_window(struct tcp_sock *tp) 89static void westwood_update_window(struct sock *sk)
90{ 90{
91 struct westwood *w = tcp_ca(tp); 91 struct westwood *w = inet_csk_ca(sk);
92 s32 delta = tcp_time_stamp - w->rtt_win_sx; 92 s32 delta = tcp_time_stamp - w->rtt_win_sx;
93 93
94 /* 94 /*
@@ -114,11 +114,12 @@ static void westwood_update_window(struct tcp_sock *tp)
114 * header prediction is successful. In such case in fact update is 114 * header prediction is successful. In such case in fact update is
115 * straight forward and doesn't need any particular care. 115 * straight forward and doesn't need any particular care.
116 */ 116 */
117static inline void westwood_fast_bw(struct tcp_sock *tp) 117static inline void westwood_fast_bw(struct sock *sk)
118{ 118{
119 struct westwood *w = tcp_ca(tp); 119 const struct tcp_sock *tp = tcp_sk(sk);
120 struct westwood *w = inet_csk_ca(sk);
120 121
121 westwood_update_window(tp); 122 westwood_update_window(sk);
122 123
123 w->bk += tp->snd_una - w->snd_una; 124 w->bk += tp->snd_una - w->snd_una;
124 w->snd_una = tp->snd_una; 125 w->snd_una = tp->snd_una;
@@ -130,9 +131,10 @@ static inline void westwood_fast_bw(struct tcp_sock *tp)
130 * This function evaluates cumul_ack for evaluating bk in case of 131 * This function evaluates cumul_ack for evaluating bk in case of
131 * delayed or partial acks. 132 * delayed or partial acks.
132 */ 133 */
133static inline u32 westwood_acked_count(struct tcp_sock *tp) 134static inline u32 westwood_acked_count(struct sock *sk)
134{ 135{
135 struct westwood *w = tcp_ca(tp); 136 const struct tcp_sock *tp = tcp_sk(sk);
137 struct westwood *w = inet_csk_ca(sk);
136 138
137 w->cumul_ack = tp->snd_una - w->snd_una; 139 w->cumul_ack = tp->snd_una - w->snd_una;
138 140
@@ -160,9 +162,10 @@ static inline u32 westwood_acked_count(struct tcp_sock *tp)
160 return w->cumul_ack; 162 return w->cumul_ack;
161} 163}
162 164
163static inline u32 westwood_bw_rttmin(const struct tcp_sock *tp) 165static inline u32 westwood_bw_rttmin(const struct sock *sk)
164{ 166{
165 struct westwood *w = tcp_ca(tp); 167 const struct tcp_sock *tp = tcp_sk(sk);
168 const struct westwood *w = inet_csk_ca(sk);
166 return max_t(u32, (w->bw_est * w->rtt_min) / tp->mss_cache, 2); 169 return max_t(u32, (w->bw_est * w->rtt_min) / tp->mss_cache, 2);
167} 170}
168 171
@@ -172,31 +175,32 @@ static inline u32 westwood_bw_rttmin(const struct tcp_sock *tp)
172 * in packets we use mss_cache). Rttmin is guaranteed to be >= 2 175 * in packets we use mss_cache). Rttmin is guaranteed to be >= 2
173 * so avoids ever returning 0. 176 * so avoids ever returning 0.
174 */ 177 */
175static u32 tcp_westwood_cwnd_min(struct tcp_sock *tp) 178static u32 tcp_westwood_cwnd_min(struct sock *sk)
176{ 179{
177 return westwood_bw_rttmin(tp); 180 return westwood_bw_rttmin(sk);
178} 181}
179 182
180static void tcp_westwood_event(struct tcp_sock *tp, enum tcp_ca_event event) 183static void tcp_westwood_event(struct sock *sk, enum tcp_ca_event event)
181{ 184{
182 struct westwood *w = tcp_ca(tp); 185 struct tcp_sock *tp = tcp_sk(sk);
186 struct westwood *w = inet_csk_ca(sk);
183 187
184 switch(event) { 188 switch(event) {
185 case CA_EVENT_FAST_ACK: 189 case CA_EVENT_FAST_ACK:
186 westwood_fast_bw(tp); 190 westwood_fast_bw(sk);
187 break; 191 break;
188 192
189 case CA_EVENT_COMPLETE_CWR: 193 case CA_EVENT_COMPLETE_CWR:
190 tp->snd_cwnd = tp->snd_ssthresh = westwood_bw_rttmin(tp); 194 tp->snd_cwnd = tp->snd_ssthresh = westwood_bw_rttmin(sk);
191 break; 195 break;
192 196
193 case CA_EVENT_FRTO: 197 case CA_EVENT_FRTO:
194 tp->snd_ssthresh = westwood_bw_rttmin(tp); 198 tp->snd_ssthresh = westwood_bw_rttmin(sk);
195 break; 199 break;
196 200
197 case CA_EVENT_SLOW_ACK: 201 case CA_EVENT_SLOW_ACK:
198 westwood_update_window(tp); 202 westwood_update_window(sk);
199 w->bk += westwood_acked_count(tp); 203 w->bk += westwood_acked_count(sk);
200 w->rtt_min = min(w->rtt, w->rtt_min); 204 w->rtt_min = min(w->rtt, w->rtt_min);
201 break; 205 break;
202 206
@@ -208,10 +212,10 @@ static void tcp_westwood_event(struct tcp_sock *tp, enum tcp_ca_event event)
208 212
209 213
210/* Extract info for Tcp socket info provided via netlink. */ 214/* Extract info for Tcp socket info provided via netlink. */
211static void tcp_westwood_info(struct tcp_sock *tp, u32 ext, 215static void tcp_westwood_info(struct sock *sk, u32 ext,
212 struct sk_buff *skb) 216 struct sk_buff *skb)
213{ 217{
214 const struct westwood *ca = tcp_ca(tp); 218 const struct westwood *ca = inet_csk_ca(sk);
215 if (ext & (1<<(TCPDIAG_VEGASINFO-1))) { 219 if (ext & (1<<(TCPDIAG_VEGASINFO-1))) {
216 struct rtattr *rta; 220 struct rtattr *rta;
217 struct tcpvegas_info *info; 221 struct tcpvegas_info *info;
@@ -242,7 +246,7 @@ static struct tcp_congestion_ops tcp_westwood = {
242 246
243static int __init tcp_westwood_register(void) 247static int __init tcp_westwood_register(void)
244{ 248{
245 BUG_ON(sizeof(struct westwood) > TCP_CA_PRIV_SIZE); 249 BUG_ON(sizeof(struct westwood) > ICSK_CA_PRIV_SIZE);
246 return tcp_register_congestion_control(&tcp_westwood); 250 return tcp_register_congestion_control(&tcp_westwood);
247} 251}
248 252