aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorFlorian Westphal <fw@strlen.de>2014-09-26 16:37:34 -0400
committerDavid S. Miller <davem@davemloft.net>2014-09-29 00:13:10 -0400
commit7354c8c389d18719dd71cc810da70b0921d66694 (patch)
tree776fa6ce824557d0c8bbf4271ad3b08a4832a66f /net
parent30e502a34b8b21fae2c789da102bd9f6e99fef83 (diff)
net: tcp: split ack slow/fast events from cwnd_event
The congestion control ops "cwnd_event" currently supports CA_EVENT_FAST_ACK and CA_EVENT_SLOW_ACK events (among others). Both FAST and SLOW_ACK are only used by Westwood congestion control algorithm. This removes both flags from cwnd_event and adds a new in_ack_event callback for this. The goal is to be able to provide more detailed information about ACKs, such as whether ECE flag was set, or whether the ACK resulted in a window update. It is required for DataCenter TCP (DCTCP) congestion control algorithm as it makes a different choice depending on ECE being set or not. Joint work with Daniel Borkmann and Glenn Judd. Signed-off-by: Florian Westphal <fw@strlen.de> Signed-off-by: Daniel Borkmann <dborkman@redhat.com> Signed-off-by: Glenn Judd <glenn.judd@morganstanley.com> Acked-by: Stephen Hemminger <stephen@networkplumber.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/ipv4/tcp_input.c12
-rw-r--r--net/ipv4/tcp_westwood.c28
2 files changed, 26 insertions, 14 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index fb0fe97e1c54..8a38774cc66e 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -3362,6 +3362,14 @@ static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag)
3362 } 3362 }
3363} 3363}
3364 3364
3365static inline void tcp_in_ack_event(struct sock *sk, u32 flags)
3366{
3367 const struct inet_connection_sock *icsk = inet_csk(sk);
3368
3369 if (icsk->icsk_ca_ops->in_ack_event)
3370 icsk->icsk_ca_ops->in_ack_event(sk, flags);
3371}
3372
3365/* This routine deals with incoming acks, but not outgoing ones. */ 3373/* This routine deals with incoming acks, but not outgoing ones. */
3366static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) 3374static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3367{ 3375{
@@ -3421,7 +3429,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3421 tp->snd_una = ack; 3429 tp->snd_una = ack;
3422 flag |= FLAG_WIN_UPDATE; 3430 flag |= FLAG_WIN_UPDATE;
3423 3431
3424 tcp_ca_event(sk, CA_EVENT_FAST_ACK); 3432 tcp_in_ack_event(sk, 0);
3425 3433
3426 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPACKS); 3434 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPACKS);
3427 } else { 3435 } else {
@@ -3439,7 +3447,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3439 if (TCP_ECN_rcv_ecn_echo(tp, tcp_hdr(skb))) 3447 if (TCP_ECN_rcv_ecn_echo(tp, tcp_hdr(skb)))
3440 flag |= FLAG_ECE; 3448 flag |= FLAG_ECE;
3441 3449
3442 tcp_ca_event(sk, CA_EVENT_SLOW_ACK); 3450 tcp_in_ack_event(sk, CA_ACK_SLOWPATH);
3443 } 3451 }
3444 3452
3445 /* We passed data and got it acked, remove any soft error 3453 /* We passed data and got it acked, remove any soft error
diff --git a/net/ipv4/tcp_westwood.c b/net/ipv4/tcp_westwood.c
index 81911a92356c..bb63fba47d47 100644
--- a/net/ipv4/tcp_westwood.c
+++ b/net/ipv4/tcp_westwood.c
@@ -220,32 +220,35 @@ static u32 tcp_westwood_bw_rttmin(const struct sock *sk)
220 return max_t(u32, (w->bw_est * w->rtt_min) / tp->mss_cache, 2); 220 return max_t(u32, (w->bw_est * w->rtt_min) / tp->mss_cache, 2);
221} 221}
222 222
223static void tcp_westwood_ack(struct sock *sk, u32 ack_flags)
224{
225 if (ack_flags & CA_ACK_SLOWPATH) {
226 struct westwood *w = inet_csk_ca(sk);
227
228 westwood_update_window(sk);
229 w->bk += westwood_acked_count(sk);
230
231 update_rtt_min(w);
232 return;
233 }
234
235 westwood_fast_bw(sk);
236}
237
223static void tcp_westwood_event(struct sock *sk, enum tcp_ca_event event) 238static void tcp_westwood_event(struct sock *sk, enum tcp_ca_event event)
224{ 239{
225 struct tcp_sock *tp = tcp_sk(sk); 240 struct tcp_sock *tp = tcp_sk(sk);
226 struct westwood *w = inet_csk_ca(sk); 241 struct westwood *w = inet_csk_ca(sk);
227 242
228 switch (event) { 243 switch (event) {
229 case CA_EVENT_FAST_ACK:
230 westwood_fast_bw(sk);
231 break;
232
233 case CA_EVENT_COMPLETE_CWR: 244 case CA_EVENT_COMPLETE_CWR:
234 tp->snd_cwnd = tp->snd_ssthresh = tcp_westwood_bw_rttmin(sk); 245 tp->snd_cwnd = tp->snd_ssthresh = tcp_westwood_bw_rttmin(sk);
235 break; 246 break;
236
237 case CA_EVENT_LOSS: 247 case CA_EVENT_LOSS:
238 tp->snd_ssthresh = tcp_westwood_bw_rttmin(sk); 248 tp->snd_ssthresh = tcp_westwood_bw_rttmin(sk);
239 /* Update RTT_min when next ack arrives */ 249 /* Update RTT_min when next ack arrives */
240 w->reset_rtt_min = 1; 250 w->reset_rtt_min = 1;
241 break; 251 break;
242
243 case CA_EVENT_SLOW_ACK:
244 westwood_update_window(sk);
245 w->bk += westwood_acked_count(sk);
246 update_rtt_min(w);
247 break;
248
249 default: 252 default:
250 /* don't care */ 253 /* don't care */
251 break; 254 break;
@@ -274,6 +277,7 @@ static struct tcp_congestion_ops tcp_westwood __read_mostly = {
274 .ssthresh = tcp_reno_ssthresh, 277 .ssthresh = tcp_reno_ssthresh,
275 .cong_avoid = tcp_reno_cong_avoid, 278 .cong_avoid = tcp_reno_cong_avoid,
276 .cwnd_event = tcp_westwood_event, 279 .cwnd_event = tcp_westwood_event,
280 .in_ack_event = tcp_westwood_ack,
277 .get_info = tcp_westwood_info, 281 .get_info = tcp_westwood_info,
278 .pkts_acked = tcp_westwood_pkts_acked, 282 .pkts_acked = tcp_westwood_pkts_acked,
279 283