diff options
author | Stephen Hemminger <shemminger@osdl.org> | 2005-05-03 19:24:32 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2005-05-03 19:24:32 -0400 |
commit | 771018e76aaa6474be20a53c20458bcae8b00485 (patch) | |
tree | acbd1186524815205337dd57322a4926cadc3554 /net/sched/sch_netem.c | |
parent | 8cbe1d46d69f9e2c49f284fe0e9aee3387bd2c71 (diff) |
[PKT_SCHED]: netetm: make qdisc friendly to outer disciplines
Netem currently dumps packets into the queue when timer expires. This
patch makes work by self-clocking (more like TBF). It fixes a bug
when 0 delay is requested (only doing loss or duplication).
Signed-off-by: Stephen Hemminger <shemminger@osdl.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched/sch_netem.c')
-rw-r--r-- | net/sched/sch_netem.c | 113 |
1 files changed, 67 insertions, 46 deletions
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index 31c29deb139d..864b8d353ffa 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c | |||
@@ -138,38 +138,78 @@ static long tabledist(unsigned long mu, long sigma, | |||
138 | } | 138 | } |
139 | 139 | ||
140 | /* Put skb in the private delayed queue. */ | 140 | /* Put skb in the private delayed queue. */ |
141 | static int delay_skb(struct Qdisc *sch, struct sk_buff *skb) | 141 | static int netem_delay(struct Qdisc *sch, struct sk_buff *skb) |
142 | { | 142 | { |
143 | struct netem_sched_data *q = qdisc_priv(sch); | 143 | struct netem_sched_data *q = qdisc_priv(sch); |
144 | struct netem_skb_cb *cb = (struct netem_skb_cb *)skb->cb; | ||
145 | psched_tdiff_t td; | 144 | psched_tdiff_t td; |
146 | psched_time_t now; | 145 | psched_time_t now; |
147 | 146 | ||
148 | PSCHED_GET_TIME(now); | 147 | PSCHED_GET_TIME(now); |
149 | td = tabledist(q->latency, q->jitter, &q->delay_cor, q->delay_dist); | 148 | td = tabledist(q->latency, q->jitter, &q->delay_cor, q->delay_dist); |
150 | PSCHED_TADD2(now, td, cb->time_to_send); | ||
151 | 149 | ||
152 | /* Always queue at tail to keep packets in order */ | 150 | /* Always queue at tail to keep packets in order */ |
153 | if (likely(q->delayed.qlen < q->limit)) { | 151 | if (likely(q->delayed.qlen < q->limit)) { |
152 | struct netem_skb_cb *cb = (struct netem_skb_cb *)skb->cb; | ||
153 | |||
154 | PSCHED_TADD2(now, td, cb->time_to_send); | ||
155 | |||
156 | pr_debug("netem_delay: skb=%p now=%llu tosend=%llu\n", skb, | ||
157 | now, cb->time_to_send); | ||
158 | |||
154 | __skb_queue_tail(&q->delayed, skb); | 159 | __skb_queue_tail(&q->delayed, skb); |
155 | if (!timer_pending(&q->timer)) { | ||
156 | q->timer.expires = jiffies + PSCHED_US2JIFFIE(td); | ||
157 | add_timer(&q->timer); | ||
158 | } | ||
159 | return NET_XMIT_SUCCESS; | 160 | return NET_XMIT_SUCCESS; |
160 | } | 161 | } |
161 | 162 | ||
163 | pr_debug("netem_delay: queue over limit %d\n", q->limit); | ||
164 | sch->qstats.overlimits++; | ||
162 | kfree_skb(skb); | 165 | kfree_skb(skb); |
163 | return NET_XMIT_DROP; | 166 | return NET_XMIT_DROP; |
164 | } | 167 | } |
165 | 168 | ||
169 | /* | ||
170 | * Move a packet that is ready to send from the delay holding | ||
171 | * list to the underlying qdisc. | ||
172 | */ | ||
173 | static int netem_run(struct Qdisc *sch) | ||
174 | { | ||
175 | struct netem_sched_data *q = qdisc_priv(sch); | ||
176 | struct sk_buff *skb; | ||
177 | psched_time_t now; | ||
178 | |||
179 | PSCHED_GET_TIME(now); | ||
180 | |||
181 | skb = skb_peek(&q->delayed); | ||
182 | if (skb) { | ||
183 | const struct netem_skb_cb *cb | ||
184 | = (const struct netem_skb_cb *)skb->cb; | ||
185 | long delay | ||
186 | = PSCHED_US2JIFFIE(PSCHED_TDIFF(cb->time_to_send, now)); | ||
187 | pr_debug("netem_run: skb=%p delay=%ld\n", skb, delay); | ||
188 | |||
189 | /* if more time remaining? */ | ||
190 | if (delay > 0) { | ||
191 | mod_timer(&q->timer, jiffies + delay); | ||
192 | return 1; | ||
193 | } | ||
194 | |||
195 | __skb_unlink(skb, &q->delayed); | ||
196 | |||
197 | if (q->qdisc->enqueue(skb, q->qdisc)) { | ||
198 | sch->q.qlen--; | ||
199 | sch->qstats.drops++; | ||
200 | } | ||
201 | } | ||
202 | |||
203 | return 0; | ||
204 | } | ||
205 | |||
166 | static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) | 206 | static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) |
167 | { | 207 | { |
168 | struct netem_sched_data *q = qdisc_priv(sch); | 208 | struct netem_sched_data *q = qdisc_priv(sch); |
169 | struct sk_buff *skb2; | 209 | struct sk_buff *skb2; |
170 | int ret; | 210 | int ret; |
171 | 211 | ||
172 | pr_debug("netem_enqueue skb=%p @%lu\n", skb, jiffies); | 212 | pr_debug("netem_enqueue skb=%p\n", skb); |
173 | 213 | ||
174 | /* Random packet drop 0 => none, ~0 => all */ | 214 | /* Random packet drop 0 => none, ~0 => all */ |
175 | if (q->loss && q->loss >= get_crandom(&q->loss_cor)) { | 215 | if (q->loss && q->loss >= get_crandom(&q->loss_cor)) { |
@@ -184,7 +224,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
184 | && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) { | 224 | && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) { |
185 | pr_debug("netem_enqueue: dup %p\n", skb2); | 225 | pr_debug("netem_enqueue: dup %p\n", skb2); |
186 | 226 | ||
187 | if (delay_skb(sch, skb2)) { | 227 | if (netem_delay(sch, skb2)) { |
188 | sch->q.qlen++; | 228 | sch->q.qlen++; |
189 | sch->bstats.bytes += skb2->len; | 229 | sch->bstats.bytes += skb2->len; |
190 | sch->bstats.packets++; | 230 | sch->bstats.packets++; |
@@ -202,7 +242,8 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
202 | ret = q->qdisc->enqueue(skb, q->qdisc); | 242 | ret = q->qdisc->enqueue(skb, q->qdisc); |
203 | } else { | 243 | } else { |
204 | q->counter = 0; | 244 | q->counter = 0; |
205 | ret = delay_skb(sch, skb); | 245 | ret = netem_delay(sch, skb); |
246 | netem_run(sch); | ||
206 | } | 247 | } |
207 | 248 | ||
208 | if (likely(ret == NET_XMIT_SUCCESS)) { | 249 | if (likely(ret == NET_XMIT_SUCCESS)) { |
@@ -241,56 +282,35 @@ static unsigned int netem_drop(struct Qdisc* sch) | |||
241 | return len; | 282 | return len; |
242 | } | 283 | } |
243 | 284 | ||
244 | /* Dequeue packet. | ||
245 | * Move all packets that are ready to send from the delay holding | ||
246 | * list to the underlying qdisc, then just call dequeue | ||
247 | */ | ||
248 | static struct sk_buff *netem_dequeue(struct Qdisc *sch) | 285 | static struct sk_buff *netem_dequeue(struct Qdisc *sch) |
249 | { | 286 | { |
250 | struct netem_sched_data *q = qdisc_priv(sch); | 287 | struct netem_sched_data *q = qdisc_priv(sch); |
251 | struct sk_buff *skb; | 288 | struct sk_buff *skb; |
289 | int pending; | ||
290 | |||
291 | pending = netem_run(sch); | ||
252 | 292 | ||
253 | skb = q->qdisc->dequeue(q->qdisc); | 293 | skb = q->qdisc->dequeue(q->qdisc); |
254 | if (skb) | 294 | if (skb) { |
295 | pr_debug("netem_dequeue: return skb=%p\n", skb); | ||
255 | sch->q.qlen--; | 296 | sch->q.qlen--; |
297 | sch->flags &= ~TCQ_F_THROTTLED; | ||
298 | } | ||
299 | else if (pending) { | ||
300 | pr_debug("netem_dequeue: throttling\n"); | ||
301 | sch->flags |= TCQ_F_THROTTLED; | ||
302 | } | ||
303 | |||
256 | return skb; | 304 | return skb; |
257 | } | 305 | } |
258 | 306 | ||
259 | static void netem_watchdog(unsigned long arg) | 307 | static void netem_watchdog(unsigned long arg) |
260 | { | 308 | { |
261 | struct Qdisc *sch = (struct Qdisc *)arg; | 309 | struct Qdisc *sch = (struct Qdisc *)arg; |
262 | struct netem_sched_data *q = qdisc_priv(sch); | ||
263 | struct net_device *dev = sch->dev; | ||
264 | struct sk_buff *skb; | ||
265 | psched_time_t now; | ||
266 | 310 | ||
267 | pr_debug("netem_watchdog: fired @%lu\n", jiffies); | 311 | pr_debug("netem_watchdog qlen=%d\n", sch->q.qlen); |
268 | 312 | sch->flags &= ~TCQ_F_THROTTLED; | |
269 | spin_lock_bh(&dev->queue_lock); | 313 | netif_schedule(sch->dev); |
270 | PSCHED_GET_TIME(now); | ||
271 | |||
272 | while ((skb = skb_peek(&q->delayed)) != NULL) { | ||
273 | const struct netem_skb_cb *cb | ||
274 | = (const struct netem_skb_cb *)skb->cb; | ||
275 | long delay | ||
276 | = PSCHED_US2JIFFIE(PSCHED_TDIFF(cb->time_to_send, now)); | ||
277 | pr_debug("netem_watchdog: skb %p@%lu %ld\n", | ||
278 | skb, jiffies, delay); | ||
279 | |||
280 | /* if more time remaining? */ | ||
281 | if (delay > 0) { | ||
282 | mod_timer(&q->timer, jiffies + delay); | ||
283 | break; | ||
284 | } | ||
285 | __skb_unlink(skb, &q->delayed); | ||
286 | |||
287 | if (q->qdisc->enqueue(skb, q->qdisc)) { | ||
288 | sch->q.qlen--; | ||
289 | sch->qstats.drops++; | ||
290 | } | ||
291 | } | ||
292 | qdisc_run(dev); | ||
293 | spin_unlock_bh(&dev->queue_lock); | ||
294 | } | 314 | } |
295 | 315 | ||
296 | static void netem_reset(struct Qdisc *sch) | 316 | static void netem_reset(struct Qdisc *sch) |
@@ -301,6 +321,7 @@ static void netem_reset(struct Qdisc *sch) | |||
301 | skb_queue_purge(&q->delayed); | 321 | skb_queue_purge(&q->delayed); |
302 | 322 | ||
303 | sch->q.qlen = 0; | 323 | sch->q.qlen = 0; |
324 | sch->flags &= ~TCQ_F_THROTTLED; | ||
304 | del_timer_sync(&q->timer); | 325 | del_timer_sync(&q->timer); |
305 | } | 326 | } |
306 | 327 | ||