diff options
author | Michal Kazior <michal.kazior@tieto.com> | 2016-04-22 08:15:58 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2016-04-25 16:44:27 -0400 |
commit | 79bdc4c862af7cf11a135a6fdf8093622043c862 (patch) | |
tree | 0ef5bf77549832f368878079900f597a3ee76fa4 | |
parent | e425974feaa545575135f04e646f0495439b4c54 (diff) |
codel: generalize the implementation
This strips out qdisc specific bits from the code
and makes it slightly more reusable. Codel will be
used by wireless/mac80211 in the future.
Signed-off-by: Michal Kazior <michal.kazior@tieto.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | include/net/codel.h | 64 | ||||
-rw-r--r-- | net/sched/sch_codel.c | 20 | ||||
-rw-r--r-- | net/sched/sch_fq_codel.c | 19 |
3 files changed, 71 insertions, 32 deletions
diff --git a/include/net/codel.h b/include/net/codel.h index d168aca115cc..06ac687b4909 100644 --- a/include/net/codel.h +++ b/include/net/codel.h | |||
@@ -176,12 +176,10 @@ struct codel_stats { | |||
176 | 176 | ||
177 | #define CODEL_DISABLED_THRESHOLD INT_MAX | 177 | #define CODEL_DISABLED_THRESHOLD INT_MAX |
178 | 178 | ||
179 | static void codel_params_init(struct codel_params *params, | 179 | static void codel_params_init(struct codel_params *params) |
180 | const struct Qdisc *sch) | ||
181 | { | 180 | { |
182 | params->interval = MS2TIME(100); | 181 | params->interval = MS2TIME(100); |
183 | params->target = MS2TIME(5); | 182 | params->target = MS2TIME(5); |
184 | params->mtu = psched_mtu(qdisc_dev(sch)); | ||
185 | params->ce_threshold = CODEL_DISABLED_THRESHOLD; | 183 | params->ce_threshold = CODEL_DISABLED_THRESHOLD; |
186 | params->ecn = false; | 184 | params->ecn = false; |
187 | } | 185 | } |
@@ -226,28 +224,38 @@ static codel_time_t codel_control_law(codel_time_t t, | |||
226 | return t + reciprocal_scale(interval, rec_inv_sqrt << REC_INV_SQRT_SHIFT); | 224 | return t + reciprocal_scale(interval, rec_inv_sqrt << REC_INV_SQRT_SHIFT); |
227 | } | 225 | } |
228 | 226 | ||
227 | typedef u32 (*codel_skb_len_t)(const struct sk_buff *skb); | ||
228 | typedef codel_time_t (*codel_skb_time_t)(const struct sk_buff *skb); | ||
229 | typedef void (*codel_skb_drop_t)(struct sk_buff *skb, void *ctx); | ||
230 | typedef struct sk_buff * (*codel_skb_dequeue_t)(struct codel_vars *vars, | ||
231 | void *ctx); | ||
232 | |||
229 | static bool codel_should_drop(const struct sk_buff *skb, | 233 | static bool codel_should_drop(const struct sk_buff *skb, |
230 | struct Qdisc *sch, | 234 | void *ctx, |
231 | struct codel_vars *vars, | 235 | struct codel_vars *vars, |
232 | struct codel_params *params, | 236 | struct codel_params *params, |
233 | struct codel_stats *stats, | 237 | struct codel_stats *stats, |
238 | codel_skb_len_t skb_len_func, | ||
239 | codel_skb_time_t skb_time_func, | ||
240 | u32 *backlog, | ||
234 | codel_time_t now) | 241 | codel_time_t now) |
235 | { | 242 | { |
236 | bool ok_to_drop; | 243 | bool ok_to_drop; |
244 | u32 skb_len; | ||
237 | 245 | ||
238 | if (!skb) { | 246 | if (!skb) { |
239 | vars->first_above_time = 0; | 247 | vars->first_above_time = 0; |
240 | return false; | 248 | return false; |
241 | } | 249 | } |
242 | 250 | ||
243 | vars->ldelay = now - codel_get_enqueue_time(skb); | 251 | skb_len = skb_len_func(skb); |
244 | sch->qstats.backlog -= qdisc_pkt_len(skb); | 252 | vars->ldelay = now - skb_time_func(skb); |
245 | 253 | ||
246 | if (unlikely(qdisc_pkt_len(skb) > stats->maxpacket)) | 254 | if (unlikely(skb_len > stats->maxpacket)) |
247 | stats->maxpacket = qdisc_pkt_len(skb); | 255 | stats->maxpacket = skb_len; |
248 | 256 | ||
249 | if (codel_time_before(vars->ldelay, params->target) || | 257 | if (codel_time_before(vars->ldelay, params->target) || |
250 | sch->qstats.backlog <= params->mtu) { | 258 | *backlog <= params->mtu) { |
251 | /* went below - stay below for at least interval */ | 259 | /* went below - stay below for at least interval */ |
252 | vars->first_above_time = 0; | 260 | vars->first_above_time = 0; |
253 | return false; | 261 | return false; |
@@ -264,16 +272,17 @@ static bool codel_should_drop(const struct sk_buff *skb, | |||
264 | return ok_to_drop; | 272 | return ok_to_drop; |
265 | } | 273 | } |
266 | 274 | ||
267 | typedef struct sk_buff * (*codel_skb_dequeue_t)(struct codel_vars *vars, | 275 | static struct sk_buff *codel_dequeue(void *ctx, |
268 | struct Qdisc *sch); | 276 | u32 *backlog, |
269 | |||
270 | static struct sk_buff *codel_dequeue(struct Qdisc *sch, | ||
271 | struct codel_params *params, | 277 | struct codel_params *params, |
272 | struct codel_vars *vars, | 278 | struct codel_vars *vars, |
273 | struct codel_stats *stats, | 279 | struct codel_stats *stats, |
280 | codel_skb_len_t skb_len_func, | ||
281 | codel_skb_time_t skb_time_func, | ||
282 | codel_skb_drop_t drop_func, | ||
274 | codel_skb_dequeue_t dequeue_func) | 283 | codel_skb_dequeue_t dequeue_func) |
275 | { | 284 | { |
276 | struct sk_buff *skb = dequeue_func(vars, sch); | 285 | struct sk_buff *skb = dequeue_func(vars, ctx); |
277 | codel_time_t now; | 286 | codel_time_t now; |
278 | bool drop; | 287 | bool drop; |
279 | 288 | ||
@@ -282,7 +291,8 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch, | |||
282 | return skb; | 291 | return skb; |
283 | } | 292 | } |
284 | now = codel_get_time(); | 293 | now = codel_get_time(); |
285 | drop = codel_should_drop(skb, sch, vars, params, stats, now); | 294 | drop = codel_should_drop(skb, ctx, vars, params, stats, |
295 | skb_len_func, skb_time_func, backlog, now); | ||
286 | if (vars->dropping) { | 296 | if (vars->dropping) { |
287 | if (!drop) { | 297 | if (!drop) { |
288 | /* sojourn time below target - leave dropping state */ | 298 | /* sojourn time below target - leave dropping state */ |
@@ -310,12 +320,15 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch, | |||
310 | vars->rec_inv_sqrt); | 320 | vars->rec_inv_sqrt); |
311 | goto end; | 321 | goto end; |
312 | } | 322 | } |
313 | stats->drop_len += qdisc_pkt_len(skb); | 323 | stats->drop_len += skb_len_func(skb); |
314 | qdisc_drop(skb, sch); | 324 | drop_func(skb, ctx); |
315 | stats->drop_count++; | 325 | stats->drop_count++; |
316 | skb = dequeue_func(vars, sch); | 326 | skb = dequeue_func(vars, ctx); |
317 | if (!codel_should_drop(skb, sch, | 327 | if (!codel_should_drop(skb, ctx, |
318 | vars, params, stats, now)) { | 328 | vars, params, stats, |
329 | skb_len_func, | ||
330 | skb_time_func, | ||
331 | backlog, now)) { | ||
319 | /* leave dropping state */ | 332 | /* leave dropping state */ |
320 | vars->dropping = false; | 333 | vars->dropping = false; |
321 | } else { | 334 | } else { |
@@ -333,13 +346,14 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch, | |||
333 | if (params->ecn && INET_ECN_set_ce(skb)) { | 346 | if (params->ecn && INET_ECN_set_ce(skb)) { |
334 | stats->ecn_mark++; | 347 | stats->ecn_mark++; |
335 | } else { | 348 | } else { |
336 | stats->drop_len += qdisc_pkt_len(skb); | 349 | stats->drop_len += skb_len_func(skb); |
337 | qdisc_drop(skb, sch); | 350 | drop_func(skb, ctx); |
338 | stats->drop_count++; | 351 | stats->drop_count++; |
339 | 352 | ||
340 | skb = dequeue_func(vars, sch); | 353 | skb = dequeue_func(vars, ctx); |
341 | drop = codel_should_drop(skb, sch, vars, params, | 354 | drop = codel_should_drop(skb, ctx, vars, params, |
342 | stats, now); | 355 | stats, skb_len_func, |
356 | skb_time_func, backlog, now); | ||
343 | } | 357 | } |
344 | vars->dropping = true; | 358 | vars->dropping = true; |
345 | /* if min went above target close to when we last went below it | 359 | /* if min went above target close to when we last went below it |
diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c index 9b7e2980ee5c..512a94abe351 100644 --- a/net/sched/sch_codel.c +++ b/net/sched/sch_codel.c | |||
@@ -64,20 +64,33 @@ struct codel_sched_data { | |||
64 | * to dequeue a packet from queue. Note: backlog is handled in | 64 | * to dequeue a packet from queue. Note: backlog is handled in |
65 | * codel, we dont need to reduce it here. | 65 | * codel, we dont need to reduce it here. |
66 | */ | 66 | */ |
67 | static struct sk_buff *dequeue(struct codel_vars *vars, struct Qdisc *sch) | 67 | static struct sk_buff *dequeue_func(struct codel_vars *vars, void *ctx) |
68 | { | 68 | { |
69 | struct Qdisc *sch = ctx; | ||
69 | struct sk_buff *skb = __skb_dequeue(&sch->q); | 70 | struct sk_buff *skb = __skb_dequeue(&sch->q); |
70 | 71 | ||
72 | if (skb) | ||
73 | sch->qstats.backlog -= qdisc_pkt_len(skb); | ||
74 | |||
71 | prefetch(&skb->end); /* we'll need skb_shinfo() */ | 75 | prefetch(&skb->end); /* we'll need skb_shinfo() */ |
72 | return skb; | 76 | return skb; |
73 | } | 77 | } |
74 | 78 | ||
79 | static void drop_func(struct sk_buff *skb, void *ctx) | ||
80 | { | ||
81 | struct Qdisc *sch = ctx; | ||
82 | |||
83 | qdisc_drop(skb, sch); | ||
84 | } | ||
85 | |||
75 | static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch) | 86 | static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch) |
76 | { | 87 | { |
77 | struct codel_sched_data *q = qdisc_priv(sch); | 88 | struct codel_sched_data *q = qdisc_priv(sch); |
78 | struct sk_buff *skb; | 89 | struct sk_buff *skb; |
79 | 90 | ||
80 | skb = codel_dequeue(sch, &q->params, &q->vars, &q->stats, dequeue); | 91 | skb = codel_dequeue(sch, &sch->qstats.backlog, &q->params, &q->vars, |
92 | &q->stats, qdisc_pkt_len, codel_get_enqueue_time, | ||
93 | drop_func, dequeue_func); | ||
81 | 94 | ||
82 | /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0, | 95 | /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0, |
83 | * or HTB crashes. Defer it for next round. | 96 | * or HTB crashes. Defer it for next round. |
@@ -173,9 +186,10 @@ static int codel_init(struct Qdisc *sch, struct nlattr *opt) | |||
173 | 186 | ||
174 | sch->limit = DEFAULT_CODEL_LIMIT; | 187 | sch->limit = DEFAULT_CODEL_LIMIT; |
175 | 188 | ||
176 | codel_params_init(&q->params, sch); | 189 | codel_params_init(&q->params); |
177 | codel_vars_init(&q->vars); | 190 | codel_vars_init(&q->vars); |
178 | codel_stats_init(&q->stats); | 191 | codel_stats_init(&q->stats); |
192 | q->params.mtu = psched_mtu(qdisc_dev(sch)); | ||
179 | 193 | ||
180 | if (opt) { | 194 | if (opt) { |
181 | int err = codel_change(sch, opt); | 195 | int err = codel_change(sch, opt); |
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c index d3fc8f9dd3d4..dcf7266e6901 100644 --- a/net/sched/sch_fq_codel.c +++ b/net/sched/sch_fq_codel.c | |||
@@ -220,8 +220,9 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
220 | * to dequeue a packet from queue. Note: backlog is handled in | 220 | * to dequeue a packet from queue. Note: backlog is handled in |
221 | * codel, we dont need to reduce it here. | 221 | * codel, we dont need to reduce it here. |
222 | */ | 222 | */ |
223 | static struct sk_buff *dequeue(struct codel_vars *vars, struct Qdisc *sch) | 223 | static struct sk_buff *dequeue_func(struct codel_vars *vars, void *ctx) |
224 | { | 224 | { |
225 | struct Qdisc *sch = ctx; | ||
225 | struct fq_codel_sched_data *q = qdisc_priv(sch); | 226 | struct fq_codel_sched_data *q = qdisc_priv(sch); |
226 | struct fq_codel_flow *flow; | 227 | struct fq_codel_flow *flow; |
227 | struct sk_buff *skb = NULL; | 228 | struct sk_buff *skb = NULL; |
@@ -231,10 +232,18 @@ static struct sk_buff *dequeue(struct codel_vars *vars, struct Qdisc *sch) | |||
231 | skb = dequeue_head(flow); | 232 | skb = dequeue_head(flow); |
232 | q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb); | 233 | q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb); |
233 | sch->q.qlen--; | 234 | sch->q.qlen--; |
235 | sch->qstats.backlog -= qdisc_pkt_len(skb); | ||
234 | } | 236 | } |
235 | return skb; | 237 | return skb; |
236 | } | 238 | } |
237 | 239 | ||
240 | static void drop_func(struct sk_buff *skb, void *ctx) | ||
241 | { | ||
242 | struct Qdisc *sch = ctx; | ||
243 | |||
244 | qdisc_drop(skb, sch); | ||
245 | } | ||
246 | |||
238 | static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch) | 247 | static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch) |
239 | { | 248 | { |
240 | struct fq_codel_sched_data *q = qdisc_priv(sch); | 249 | struct fq_codel_sched_data *q = qdisc_priv(sch); |
@@ -263,8 +272,9 @@ begin: | |||
263 | prev_ecn_mark = q->cstats.ecn_mark; | 272 | prev_ecn_mark = q->cstats.ecn_mark; |
264 | prev_backlog = sch->qstats.backlog; | 273 | prev_backlog = sch->qstats.backlog; |
265 | 274 | ||
266 | skb = codel_dequeue(sch, &q->cparams, &flow->cvars, &q->cstats, | 275 | skb = codel_dequeue(sch, &sch->qstats.backlog, &q->cparams, |
267 | dequeue); | 276 | &flow->cvars, &q->cstats, qdisc_pkt_len, |
277 | codel_get_enqueue_time, drop_func, dequeue_func); | ||
268 | 278 | ||
269 | flow->dropped += q->cstats.drop_count - prev_drop_count; | 279 | flow->dropped += q->cstats.drop_count - prev_drop_count; |
270 | flow->dropped += q->cstats.ecn_mark - prev_ecn_mark; | 280 | flow->dropped += q->cstats.ecn_mark - prev_ecn_mark; |
@@ -423,9 +433,10 @@ static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt) | |||
423 | q->perturbation = prandom_u32(); | 433 | q->perturbation = prandom_u32(); |
424 | INIT_LIST_HEAD(&q->new_flows); | 434 | INIT_LIST_HEAD(&q->new_flows); |
425 | INIT_LIST_HEAD(&q->old_flows); | 435 | INIT_LIST_HEAD(&q->old_flows); |
426 | codel_params_init(&q->cparams, sch); | 436 | codel_params_init(&q->cparams); |
427 | codel_stats_init(&q->cstats); | 437 | codel_stats_init(&q->cstats); |
428 | q->cparams.ecn = true; | 438 | q->cparams.ecn = true; |
439 | q->cparams.mtu = psched_mtu(qdisc_dev(sch)); | ||
429 | 440 | ||
430 | if (opt) { | 441 | if (opt) { |
431 | int err = fq_codel_change(sch, opt); | 442 | int err = fq_codel_change(sch, opt); |