aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_veno.c
diff options
context:
space:
mode:
authorBin Zhou <zhou0022@ntu.edu.sg>2006-06-05 20:28:30 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2006-06-18 00:29:23 -0400
commit76f1017757aa0c308a0b83ca611c9a89ee9a79a4 (patch)
tree2599325e5774525b5ea0088f1c64cbd2e1f40410 /net/ipv4/tcp_veno.c
parent7c106d7e782bd4805f39da30e81018f861b4b8c5 (diff)
[TCP]: TCP Veno congestion control
TCP Veno module is a new congestion control module to improve TCP performance over wireless networks. The key innovation in TCP Veno is the enhancement of TCP Reno/Sack congestion control algorithm by using the estimated state of a connection based on TCP Vegas. This scheme significantly reduces "blind" reduction of TCP window regardless of the cause of packet loss. This work is based on the research paper "TCP Veno: TCP Enhancement for Transmission over Wireless Access Networks." C. P. Fu, S. C. Liew, IEEE Journal on Selected Areas in Communication, Feb. 2003. Original paper and many latest research works on veno: http://www.ntu.edu.sg/home/ascpfu/veno/veno.html Signed-off-by: Bin Zhou <zhou0022@ntu.edu.sg> Cheng Peng Fu <ascpfu@ntu.edu.sg> Signed-off-by: Stephen Hemminger <shemminger@osdl.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_veno.c')
-rw-r--r--net/ipv4/tcp_veno.c238
1 files changed, 238 insertions, 0 deletions
diff --git a/net/ipv4/tcp_veno.c b/net/ipv4/tcp_veno.c
new file mode 100644
index 000000000000..1091671751c4
--- /dev/null
+++ b/net/ipv4/tcp_veno.c
@@ -0,0 +1,238 @@
1/*
2 * TCP Veno congestion control
3 *
4 * This is based on the congestion detection/avoidance scheme described in
5 * C. P. Fu, S. C. Liew.
6 * "TCP Veno: TCP Enhancement for Transmission over Wireless Access Networks."
7 * IEEE Journal on Selected Areas in Communication,
8 * Feb. 2003.
9 * See http://www.ntu.edu.sg/home5/ZHOU0022/papers/CPFu03a.pdf
10 */
11
12#include <linux/config.h>
13#include <linux/mm.h>
14#include <linux/module.h>
15#include <linux/skbuff.h>
16#include <linux/inet_diag.h>
17
18#include <net/tcp.h>
19
20/* Default values of the Veno variables, in fixed-point representation
21 * with V_PARAM_SHIFT bits to the right of the binary point.
22 */
23#define V_PARAM_SHIFT 1
24static const int beta = 3 << V_PARAM_SHIFT;
25
26/* Veno variables */
27struct veno {
28 u8 doing_veno_now; /* if true, do veno for this rtt */
29 u16 cntrtt; /* # of rtts measured within last rtt */
30 u32 minrtt; /* min of rtts measured within last rtt (in usec) */
31 u32 basertt; /* the min of all Veno rtt measurements seen (in usec) */
32 u32 inc; /* decide whether to increase cwnd */
33 u32 diff; /* calculate the diff rate */
34};
35
36/* There are several situations when we must "re-start" Veno:
37 *
38 * o when a connection is established
39 * o after an RTO
40 * o after fast recovery
41 * o when we send a packet and there is no outstanding
42 * unacknowledged data (restarting an idle connection)
43 *
44 */
45static inline void veno_enable(struct sock *sk)
46{
47 struct veno *veno = inet_csk_ca(sk);
48
49 /* turn on Veno */
50 veno->doing_veno_now = 1;
51
52 veno->minrtt = 0x7fffffff;
53}
54
55static inline void veno_disable(struct sock *sk)
56{
57 struct veno *veno = inet_csk_ca(sk);
58
59 /* turn off Veno */
60 veno->doing_veno_now = 0;
61}
62
63static void tcp_veno_init(struct sock *sk)
64{
65 struct veno *veno = inet_csk_ca(sk);
66
67 veno->basertt = 0x7fffffff;
68 veno->inc = 1;
69 veno_enable(sk);
70}
71
72/* Do rtt sampling needed for Veno. */
73static void tcp_veno_rtt_calc(struct sock *sk, u32 usrtt)
74{
75 struct veno *veno = inet_csk_ca(sk);
76 u32 vrtt = usrtt + 1; /* Never allow zero rtt or basertt */
77
78 /* Filter to find propagation delay: */
79 if (vrtt < veno->basertt)
80 veno->basertt = vrtt;
81
82 /* Find the min rtt during the last rtt to find
83 * the current prop. delay + queuing delay:
84 */
85 veno->minrtt = min(veno->minrtt, vrtt);
86 veno->cntrtt++;
87}
88
89static void tcp_veno_state(struct sock *sk, u8 ca_state)
90{
91 if (ca_state == TCP_CA_Open)
92 veno_enable(sk);
93 else
94 veno_disable(sk);
95}
96
97/*
98 * If the connection is idle and we are restarting,
99 * then we don't want to do any Veno calculations
100 * until we get fresh rtt samples. So when we
101 * restart, we reset our Veno state to a clean
102 * state. After we get acks for this flight of
103 * packets, _then_ we can make Veno calculations
104 * again.
105 */
106static void tcp_veno_cwnd_event(struct sock *sk, enum tcp_ca_event event)
107{
108 if (event == CA_EVENT_CWND_RESTART || event == CA_EVENT_TX_START)
109 tcp_veno_init(sk);
110}
111
112static void tcp_veno_cong_avoid(struct sock *sk, u32 ack,
113 u32 seq_rtt, u32 in_flight, int flag)
114{
115 struct tcp_sock *tp = tcp_sk(sk);
116 struct veno *veno = inet_csk_ca(sk);
117
118 if (!veno->doing_veno_now)
119 return tcp_reno_cong_avoid(sk, ack, seq_rtt, in_flight, flag);
120
121 /* limited by applications */
122 if (!tcp_is_cwnd_limited(sk, in_flight))
123 return;
124
125 /* We do the Veno calculations only if we got enough rtt samples */
126 if (veno->cntrtt <= 2) {
127 /* We don't have enough rtt samples to do the Veno
128 * calculation, so we'll behave like Reno.
129 */
130 tcp_reno_cong_avoid(sk, ack, seq_rtt, in_flight, flag);
131 } else {
132 u32 rtt, target_cwnd;
133
134 /* We have enough rtt samples, so, using the Veno
135 * algorithm, we determine the state of the network.
136 */
137
138 rtt = veno->minrtt;
139
140 target_cwnd = ((tp->snd_cwnd * veno->basertt)
141 << V_PARAM_SHIFT) / rtt;
142
143 veno->diff = (tp->snd_cwnd << V_PARAM_SHIFT) - target_cwnd;
144
145 if (tp->snd_cwnd <= tp->snd_ssthresh) {
146 /* Slow start. */
147 tcp_slow_start(tp);
148 } else {
149 /* Congestion avoidance. */
150 if (veno->diff < beta) {
151 /* In the "non-congestive state", increase cwnd
152 * every rtt.
153 */
154 if (tp->snd_cwnd_cnt >= tp->snd_cwnd) {
155 if (tp->snd_cwnd < tp->snd_cwnd_clamp)
156 tp->snd_cwnd++;
157 tp->snd_cwnd_cnt = 0;
158 } else
159 tp->snd_cwnd_cnt++;
160 } else {
161 /* In the "congestive state", increase cwnd
162 * every other rtt.
163 */
164 if (tp->snd_cwnd_cnt >= tp->snd_cwnd) {
165 if (veno->inc
166 && tp->snd_cwnd <
167 tp->snd_cwnd_clamp) {
168 tp->snd_cwnd++;
169 veno->inc = 0;
170 } else
171 veno->inc = 1;
172 tp->snd_cwnd_cnt = 0;
173 } else
174 tp->snd_cwnd_cnt++;
175 }
176
177 }
178 if (tp->snd_cwnd < 2)
179 tp->snd_cwnd = 2;
180 else if (tp->snd_cwnd > tp->snd_cwnd_clamp)
181 tp->snd_cwnd = tp->snd_cwnd_clamp;
182 }
183 /* Wipe the slate clean for the next rtt. */
184 /* veno->cntrtt = 0; */
185 veno->minrtt = 0x7fffffff;
186}
187
188/* Veno MD phase */
189static u32 tcp_veno_ssthresh(struct sock *sk)
190{
191 const struct tcp_sock *tp = tcp_sk(sk);
192 struct veno *veno = inet_csk_ca(sk);
193
194 if (veno->diff < beta)
195 /* in "non-congestive state", cut cwnd by 1/5 */
196 return max(tp->snd_cwnd * 4 / 5, 2U);
197 else
198 /* in "congestive state", cut cwnd by 1/2 */
199 return max(tp->snd_cwnd >> 1U, 2U);
200}
201
202static u32 tcp_veno_min_cwnd(struct sock * sk)
203{
204 const struct tcp_sock *tp = tcp_sk(sk);
205 return tp->snd_ssthresh;
206}
207
208static struct tcp_congestion_ops tcp_veno = {
209 .init = tcp_veno_init,
210 .ssthresh = tcp_veno_ssthresh,
211 .cong_avoid = tcp_veno_cong_avoid,
212 .min_cwnd = tcp_veno_min_cwnd,
213 .rtt_sample = tcp_veno_rtt_calc,
214 .set_state = tcp_veno_state,
215 .cwnd_event = tcp_veno_cwnd_event,
216
217 .owner = THIS_MODULE,
218 .name = "veno",
219};
220
221static int __init tcp_veno_register(void)
222{
223 BUG_ON(sizeof(struct veno) > ICSK_CA_PRIV_SIZE);
224 tcp_register_congestion_control(&tcp_veno);
225 return 0;
226}
227
228static void __exit tcp_veno_unregister(void)
229{
230 tcp_unregister_congestion_control(&tcp_veno);
231}
232
233module_init(tcp_veno_register);
234module_exit(tcp_veno_unregister);
235
236MODULE_AUTHOR("Bin Zhou, Cheng Peng Fu");
237MODULE_LICENSE("GPL");
238MODULE_DESCRIPTION("TCP Veno");