aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_scalable.c
diff options
context:
space:
mode:
authorFlorian Westphal <fw@strlen.de>2016-11-21 08:18:37 -0500
committerDavid S. Miller <davem@davemloft.net>2016-11-21 13:20:17 -0500
commit85f7e7508a1d288b513493196ef406c6c06134e1 (patch)
tree46cb7e0ff05b6e059d2060fbf3ad28bbc11ddc1e /net/ipv4/tcp_scalable.c
parent2fcb58ab30deb63e49f238bf95d587740fab59c4 (diff)
tcp: add cwnd_undo functions to various tcp cc algorithms
congestion control algorithms that do not halve cwnd in their .ssthresh should provide a .cwnd_undo rather than rely on current fallback which assumes reno halving (and thus doubles the cwnd). All of these do 'something else' in their .ssthresh implementation, thus store the cwnd on loss and provide .undo_cwnd to restore it again. A followup patch will remove the fallback and all algorithms will need to provide a .cwnd_undo function. Signed-off-by: Florian Westphal <fw@strlen.de> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_scalable.c')
-rw-r--r--net/ipv4/tcp_scalable.c15
1 files changed, 15 insertions, 0 deletions
diff --git a/net/ipv4/tcp_scalable.c b/net/ipv4/tcp_scalable.c
index bf5ea9e9bbc1..f2123075ce6e 100644
--- a/net/ipv4/tcp_scalable.c
+++ b/net/ipv4/tcp_scalable.c
@@ -15,6 +15,10 @@
15#define TCP_SCALABLE_AI_CNT 50U 15#define TCP_SCALABLE_AI_CNT 50U
16#define TCP_SCALABLE_MD_SCALE 3 16#define TCP_SCALABLE_MD_SCALE 3
17 17
18struct scalable {
19 u32 loss_cwnd;
20};
21
18static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 acked) 22static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 acked)
19{ 23{
20 struct tcp_sock *tp = tcp_sk(sk); 24 struct tcp_sock *tp = tcp_sk(sk);
@@ -32,12 +36,23 @@ static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 acked)
32static u32 tcp_scalable_ssthresh(struct sock *sk) 36static u32 tcp_scalable_ssthresh(struct sock *sk)
33{ 37{
34 const struct tcp_sock *tp = tcp_sk(sk); 38 const struct tcp_sock *tp = tcp_sk(sk);
39 struct scalable *ca = inet_csk_ca(sk);
40
41 ca->loss_cwnd = tp->snd_cwnd;
35 42
36 return max(tp->snd_cwnd - (tp->snd_cwnd>>TCP_SCALABLE_MD_SCALE), 2U); 43 return max(tp->snd_cwnd - (tp->snd_cwnd>>TCP_SCALABLE_MD_SCALE), 2U);
37} 44}
38 45
46static u32 tcp_scalable_cwnd_undo(struct sock *sk)
47{
48 const struct scalable *ca = inet_csk_ca(sk);
49
50 return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd);
51}
52
39static struct tcp_congestion_ops tcp_scalable __read_mostly = { 53static struct tcp_congestion_ops tcp_scalable __read_mostly = {
40 .ssthresh = tcp_scalable_ssthresh, 54 .ssthresh = tcp_scalable_ssthresh,
55 .undo_cwnd = tcp_scalable_cwnd_undo,
41 .cong_avoid = tcp_scalable_cong_avoid, 56 .cong_avoid = tcp_scalable_cong_avoid,
42 57
43 .owner = THIS_MODULE, 58 .owner = THIS_MODULE,