diff options
| author | Eric Dumazet <eric.dumazet@gmail.com> | 2012-01-04 21:25:16 -0500 |
|---|---|---|
| committer | David S. Miller <davem@davemloft.net> | 2012-01-05 14:01:21 -0500 |
| commit | eeca6688d6599c28bc449a45facb67d7f203be74 (patch) | |
| tree | 5cabbf24a3c1ee2d7757c873ba6449296a8ef7b7 /net | |
| parent | 18cb809850fb499ad9bf288696a95f4071f73931 (diff) | |
net_sched: red: split red_parms into parms and vars
This patch splits the red_parms structure into two components.
One holding the RED 'constant' parameters, and one containing the
variables.
This permits a size reduction of GRED qdisc, and is a preliminary step
to add an optional RED unit to SFQ.
SFQRED will have a single red_parms structure shared by all flows, and a
private red_vars per flow.
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
CC: Dave Taht <dave.taht@gmail.com>
CC: Stephen Hemminger <shemminger@vyatta.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
| -rw-r--r-- | net/sched/sch_choke.c | 40 | ||||
| -rw-r--r-- | net/sched/sch_gred.c | 45 | ||||
| -rw-r--r-- | net/sched/sch_red.c | 29 |
3 files changed, 62 insertions, 52 deletions
diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c index bef00acb8bd2..e465064d39a3 100644 --- a/net/sched/sch_choke.c +++ b/net/sched/sch_choke.c | |||
| @@ -57,6 +57,7 @@ struct choke_sched_data { | |||
| 57 | struct red_parms parms; | 57 | struct red_parms parms; |
| 58 | 58 | ||
| 59 | /* Variables */ | 59 | /* Variables */ |
| 60 | struct red_vars vars; | ||
| 60 | struct tcf_proto *filter_list; | 61 | struct tcf_proto *filter_list; |
| 61 | struct { | 62 | struct { |
| 62 | u32 prob_drop; /* Early probability drops */ | 63 | u32 prob_drop; /* Early probability drops */ |
| @@ -265,7 +266,7 @@ static bool choke_match_random(const struct choke_sched_data *q, | |||
| 265 | static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch) | 266 | static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch) |
| 266 | { | 267 | { |
| 267 | struct choke_sched_data *q = qdisc_priv(sch); | 268 | struct choke_sched_data *q = qdisc_priv(sch); |
| 268 | struct red_parms *p = &q->parms; | 269 | const struct red_parms *p = &q->parms; |
| 269 | int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; | 270 | int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; |
| 270 | 271 | ||
| 271 | if (q->filter_list) { | 272 | if (q->filter_list) { |
| @@ -276,13 +277,13 @@ static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
| 276 | 277 | ||
| 277 | choke_skb_cb(skb)->keys_valid = 0; | 278 | choke_skb_cb(skb)->keys_valid = 0; |
| 278 | /* Compute average queue usage (see RED) */ | 279 | /* Compute average queue usage (see RED) */ |
| 279 | p->qavg = red_calc_qavg(p, sch->q.qlen); | 280 | q->vars.qavg = red_calc_qavg(p, &q->vars, sch->q.qlen); |
| 280 | if (red_is_idling(p)) | 281 | if (red_is_idling(&q->vars)) |
| 281 | red_end_of_idle_period(p); | 282 | red_end_of_idle_period(&q->vars); |
| 282 | 283 | ||
| 283 | /* Is queue small? */ | 284 | /* Is queue small? */ |
| 284 | if (p->qavg <= p->qth_min) | 285 | if (q->vars.qavg <= p->qth_min) |
| 285 | p->qcount = -1; | 286 | q->vars.qcount = -1; |
| 286 | else { | 287 | else { |
| 287 | unsigned int idx; | 288 | unsigned int idx; |
| 288 | 289 | ||
| @@ -294,8 +295,8 @@ static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
| 294 | } | 295 | } |
| 295 | 296 | ||
| 296 | /* Queue is large, always mark/drop */ | 297 | /* Queue is large, always mark/drop */ |
| 297 | if (p->qavg > p->qth_max) { | 298 | if (q->vars.qavg > p->qth_max) { |
| 298 | p->qcount = -1; | 299 | q->vars.qcount = -1; |
| 299 | 300 | ||
| 300 | sch->qstats.overlimits++; | 301 | sch->qstats.overlimits++; |
| 301 | if (use_harddrop(q) || !use_ecn(q) || | 302 | if (use_harddrop(q) || !use_ecn(q) || |
| @@ -305,10 +306,10 @@ static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
| 305 | } | 306 | } |
| 306 | 307 | ||
| 307 | q->stats.forced_mark++; | 308 | q->stats.forced_mark++; |
| 308 | } else if (++p->qcount) { | 309 | } else if (++q->vars.qcount) { |
| 309 | if (red_mark_probability(p, p->qavg)) { | 310 | if (red_mark_probability(p, &q->vars, q->vars.qavg)) { |
| 310 | p->qcount = 0; | 311 | q->vars.qcount = 0; |
| 311 | p->qR = red_random(p); | 312 | q->vars.qR = red_random(p); |
| 312 | 313 | ||
| 313 | sch->qstats.overlimits++; | 314 | sch->qstats.overlimits++; |
| 314 | if (!use_ecn(q) || !INET_ECN_set_ce(skb)) { | 315 | if (!use_ecn(q) || !INET_ECN_set_ce(skb)) { |
| @@ -319,7 +320,7 @@ static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
| 319 | q->stats.prob_mark++; | 320 | q->stats.prob_mark++; |
| 320 | } | 321 | } |
| 321 | } else | 322 | } else |
| 322 | p->qR = red_random(p); | 323 | q->vars.qR = red_random(p); |
| 323 | } | 324 | } |
| 324 | 325 | ||
| 325 | /* Admit new packet */ | 326 | /* Admit new packet */ |
| @@ -353,8 +354,8 @@ static struct sk_buff *choke_dequeue(struct Qdisc *sch) | |||
| 353 | struct sk_buff *skb; | 354 | struct sk_buff *skb; |
| 354 | 355 | ||
| 355 | if (q->head == q->tail) { | 356 | if (q->head == q->tail) { |
| 356 | if (!red_is_idling(&q->parms)) | 357 | if (!red_is_idling(&q->vars)) |
| 357 | red_start_of_idle_period(&q->parms); | 358 | red_start_of_idle_period(&q->vars); |
| 358 | return NULL; | 359 | return NULL; |
| 359 | } | 360 | } |
| 360 | 361 | ||
| @@ -377,8 +378,8 @@ static unsigned int choke_drop(struct Qdisc *sch) | |||
| 377 | if (len > 0) | 378 | if (len > 0) |
| 378 | q->stats.other++; | 379 | q->stats.other++; |
| 379 | else { | 380 | else { |
| 380 | if (!red_is_idling(&q->parms)) | 381 | if (!red_is_idling(&q->vars)) |
| 381 | red_start_of_idle_period(&q->parms); | 382 | red_start_of_idle_period(&q->vars); |
| 382 | } | 383 | } |
| 383 | 384 | ||
| 384 | return len; | 385 | return len; |
| @@ -388,7 +389,7 @@ static void choke_reset(struct Qdisc *sch) | |||
| 388 | { | 389 | { |
| 389 | struct choke_sched_data *q = qdisc_priv(sch); | 390 | struct choke_sched_data *q = qdisc_priv(sch); |
| 390 | 391 | ||
| 391 | red_restart(&q->parms); | 392 | red_restart(&q->vars); |
| 392 | } | 393 | } |
| 393 | 394 | ||
| 394 | static const struct nla_policy choke_policy[TCA_CHOKE_MAX + 1] = { | 395 | static const struct nla_policy choke_policy[TCA_CHOKE_MAX + 1] = { |
| @@ -482,9 +483,10 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt) | |||
| 482 | ctl->Plog, ctl->Scell_log, | 483 | ctl->Plog, ctl->Scell_log, |
| 483 | nla_data(tb[TCA_CHOKE_STAB]), | 484 | nla_data(tb[TCA_CHOKE_STAB]), |
| 484 | max_P); | 485 | max_P); |
| 486 | red_set_vars(&q->vars); | ||
| 485 | 487 | ||
| 486 | if (q->head == q->tail) | 488 | if (q->head == q->tail) |
| 487 | red_end_of_idle_period(&q->parms); | 489 | red_end_of_idle_period(&q->vars); |
| 488 | 490 | ||
| 489 | sch_tree_unlock(sch); | 491 | sch_tree_unlock(sch); |
| 490 | choke_free(old); | 492 | choke_free(old); |
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c index 53204de71c39..0b15236be7b6 100644 --- a/net/sched/sch_gred.c +++ b/net/sched/sch_gred.c | |||
| @@ -41,6 +41,7 @@ struct gred_sched_data { | |||
| 41 | u8 prio; /* the prio of this vq */ | 41 | u8 prio; /* the prio of this vq */ |
| 42 | 42 | ||
| 43 | struct red_parms parms; | 43 | struct red_parms parms; |
| 44 | struct red_vars vars; | ||
| 44 | struct red_stats stats; | 45 | struct red_stats stats; |
| 45 | }; | 46 | }; |
| 46 | 47 | ||
| @@ -55,7 +56,7 @@ struct gred_sched { | |||
| 55 | u32 red_flags; | 56 | u32 red_flags; |
| 56 | u32 DPs; | 57 | u32 DPs; |
| 57 | u32 def; | 58 | u32 def; |
| 58 | struct red_parms wred_set; | 59 | struct red_vars wred_set; |
| 59 | }; | 60 | }; |
| 60 | 61 | ||
| 61 | static inline int gred_wred_mode(struct gred_sched *table) | 62 | static inline int gred_wred_mode(struct gred_sched *table) |
| @@ -125,17 +126,17 @@ static inline u16 tc_index_to_dp(struct sk_buff *skb) | |||
| 125 | return skb->tc_index & GRED_VQ_MASK; | 126 | return skb->tc_index & GRED_VQ_MASK; |
| 126 | } | 127 | } |
| 127 | 128 | ||
| 128 | static inline void gred_load_wred_set(struct gred_sched *table, | 129 | static inline void gred_load_wred_set(const struct gred_sched *table, |
| 129 | struct gred_sched_data *q) | 130 | struct gred_sched_data *q) |
| 130 | { | 131 | { |
| 131 | q->parms.qavg = table->wred_set.qavg; | 132 | q->vars.qavg = table->wred_set.qavg; |
| 132 | q->parms.qidlestart = table->wred_set.qidlestart; | 133 | q->vars.qidlestart = table->wred_set.qidlestart; |
| 133 | } | 134 | } |
| 134 | 135 | ||
| 135 | static inline void gred_store_wred_set(struct gred_sched *table, | 136 | static inline void gred_store_wred_set(struct gred_sched *table, |
| 136 | struct gred_sched_data *q) | 137 | struct gred_sched_data *q) |
| 137 | { | 138 | { |
| 138 | table->wred_set.qavg = q->parms.qavg; | 139 | table->wred_set.qavg = q->vars.qavg; |
| 139 | } | 140 | } |
| 140 | 141 | ||
| 141 | static inline int gred_use_ecn(struct gred_sched *t) | 142 | static inline int gred_use_ecn(struct gred_sched *t) |
| @@ -170,7 +171,7 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
| 170 | goto drop; | 171 | goto drop; |
| 171 | } | 172 | } |
| 172 | 173 | ||
| 173 | /* fix tc_index? --could be controvesial but needed for | 174 | /* fix tc_index? --could be controversial but needed for |
| 174 | requeueing */ | 175 | requeueing */ |
| 175 | skb->tc_index = (skb->tc_index & ~GRED_VQ_MASK) | dp; | 176 | skb->tc_index = (skb->tc_index & ~GRED_VQ_MASK) | dp; |
| 176 | } | 177 | } |
| @@ -181,8 +182,8 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
| 181 | 182 | ||
| 182 | for (i = 0; i < t->DPs; i++) { | 183 | for (i = 0; i < t->DPs; i++) { |
| 183 | if (t->tab[i] && t->tab[i]->prio < q->prio && | 184 | if (t->tab[i] && t->tab[i]->prio < q->prio && |
| 184 | !red_is_idling(&t->tab[i]->parms)) | 185 | !red_is_idling(&t->tab[i]->vars)) |
| 185 | qavg += t->tab[i]->parms.qavg; | 186 | qavg += t->tab[i]->vars.qavg; |
| 186 | } | 187 | } |
| 187 | 188 | ||
| 188 | } | 189 | } |
| @@ -193,15 +194,17 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
| 193 | if (gred_wred_mode(t)) | 194 | if (gred_wred_mode(t)) |
| 194 | gred_load_wred_set(t, q); | 195 | gred_load_wred_set(t, q); |
| 195 | 196 | ||
| 196 | q->parms.qavg = red_calc_qavg(&q->parms, gred_backlog(t, q, sch)); | 197 | q->vars.qavg = red_calc_qavg(&q->parms, |
| 198 | &q->vars, | ||
| 199 | gred_backlog(t, q, sch)); | ||
| 197 | 200 | ||
| 198 | if (red_is_idling(&q->parms)) | 201 | if (red_is_idling(&q->vars)) |
| 199 | red_end_of_idle_period(&q->parms); | 202 | red_end_of_idle_period(&q->vars); |
| 200 | 203 | ||
| 201 | if (gred_wred_mode(t)) | 204 | if (gred_wred_mode(t)) |
| 202 | gred_store_wred_set(t, q); | 205 | gred_store_wred_set(t, q); |
| 203 | 206 | ||
| 204 | switch (red_action(&q->parms, q->parms.qavg + qavg)) { | 207 | switch (red_action(&q->parms, &q->vars, q->vars.qavg + qavg)) { |
| 205 | case RED_DONT_MARK: | 208 | case RED_DONT_MARK: |
| 206 | break; | 209 | break; |
| 207 | 210 | ||
| @@ -260,7 +263,7 @@ static struct sk_buff *gred_dequeue(struct Qdisc *sch) | |||
| 260 | q->backlog -= qdisc_pkt_len(skb); | 263 | q->backlog -= qdisc_pkt_len(skb); |
| 261 | 264 | ||
| 262 | if (!q->backlog && !gred_wred_mode(t)) | 265 | if (!q->backlog && !gred_wred_mode(t)) |
| 263 | red_start_of_idle_period(&q->parms); | 266 | red_start_of_idle_period(&q->vars); |
| 264 | } | 267 | } |
| 265 | 268 | ||
| 266 | return skb; | 269 | return skb; |
| @@ -293,7 +296,7 @@ static unsigned int gred_drop(struct Qdisc *sch) | |||
| 293 | q->stats.other++; | 296 | q->stats.other++; |
| 294 | 297 | ||
| 295 | if (!q->backlog && !gred_wred_mode(t)) | 298 | if (!q->backlog && !gred_wred_mode(t)) |
| 296 | red_start_of_idle_period(&q->parms); | 299 | red_start_of_idle_period(&q->vars); |
| 297 | } | 300 | } |
| 298 | 301 | ||
| 299 | qdisc_drop(skb, sch); | 302 | qdisc_drop(skb, sch); |
| @@ -320,7 +323,7 @@ static void gred_reset(struct Qdisc *sch) | |||
| 320 | if (!q) | 323 | if (!q) |
| 321 | continue; | 324 | continue; |
| 322 | 325 | ||
| 323 | red_restart(&q->parms); | 326 | red_restart(&q->vars); |
| 324 | q->backlog = 0; | 327 | q->backlog = 0; |
| 325 | } | 328 | } |
| 326 | } | 329 | } |
| @@ -398,12 +401,12 @@ static inline int gred_change_vq(struct Qdisc *sch, int dp, | |||
| 398 | q->limit = ctl->limit; | 401 | q->limit = ctl->limit; |
| 399 | 402 | ||
| 400 | if (q->backlog == 0) | 403 | if (q->backlog == 0) |
| 401 | red_end_of_idle_period(&q->parms); | 404 | red_end_of_idle_period(&q->vars); |
| 402 | 405 | ||
| 403 | red_set_parms(&q->parms, | 406 | red_set_parms(&q->parms, |
| 404 | ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Plog, | 407 | ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Plog, |
| 405 | ctl->Scell_log, stab, max_P); | 408 | ctl->Scell_log, stab, max_P); |
| 406 | 409 | red_set_vars(&q->vars); | |
| 407 | return 0; | 410 | return 0; |
| 408 | } | 411 | } |
| 409 | 412 | ||
| @@ -563,12 +566,12 @@ static int gred_dump(struct Qdisc *sch, struct sk_buff *skb) | |||
| 563 | opt.bytesin = q->bytesin; | 566 | opt.bytesin = q->bytesin; |
| 564 | 567 | ||
| 565 | if (gred_wred_mode(table)) { | 568 | if (gred_wred_mode(table)) { |
| 566 | q->parms.qidlestart = | 569 | q->vars.qidlestart = |
| 567 | table->tab[table->def]->parms.qidlestart; | 570 | table->tab[table->def]->vars.qidlestart; |
| 568 | q->parms.qavg = table->tab[table->def]->parms.qavg; | 571 | q->vars.qavg = table->tab[table->def]->vars.qavg; |
| 569 | } | 572 | } |
| 570 | 573 | ||
| 571 | opt.qave = red_calc_qavg(&q->parms, q->parms.qavg); | 574 | opt.qave = red_calc_qavg(&q->parms, &q->vars, q->vars.qavg); |
| 572 | 575 | ||
| 573 | append_opt: | 576 | append_opt: |
| 574 | if (nla_append(skb, sizeof(opt), &opt) < 0) | 577 | if (nla_append(skb, sizeof(opt), &opt) < 0) |
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c index ce2256a17d7e..a5cc3012cf42 100644 --- a/net/sched/sch_red.c +++ b/net/sched/sch_red.c | |||
| @@ -41,6 +41,7 @@ struct red_sched_data { | |||
| 41 | unsigned char flags; | 41 | unsigned char flags; |
| 42 | struct timer_list adapt_timer; | 42 | struct timer_list adapt_timer; |
| 43 | struct red_parms parms; | 43 | struct red_parms parms; |
| 44 | struct red_vars vars; | ||
| 44 | struct red_stats stats; | 45 | struct red_stats stats; |
| 45 | struct Qdisc *qdisc; | 46 | struct Qdisc *qdisc; |
| 46 | }; | 47 | }; |
| @@ -61,12 +62,14 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
| 61 | struct Qdisc *child = q->qdisc; | 62 | struct Qdisc *child = q->qdisc; |
| 62 | int ret; | 63 | int ret; |
| 63 | 64 | ||
| 64 | q->parms.qavg = red_calc_qavg(&q->parms, child->qstats.backlog); | 65 | q->vars.qavg = red_calc_qavg(&q->parms, |
| 66 | &q->vars, | ||
| 67 | child->qstats.backlog); | ||
| 65 | 68 | ||
| 66 | if (red_is_idling(&q->parms)) | 69 | if (red_is_idling(&q->vars)) |
| 67 | red_end_of_idle_period(&q->parms); | 70 | red_end_of_idle_period(&q->vars); |
| 68 | 71 | ||
| 69 | switch (red_action(&q->parms, q->parms.qavg)) { | 72 | switch (red_action(&q->parms, &q->vars, q->vars.qavg)) { |
| 70 | case RED_DONT_MARK: | 73 | case RED_DONT_MARK: |
| 71 | break; | 74 | break; |
| 72 | 75 | ||
| @@ -117,8 +120,8 @@ static struct sk_buff *red_dequeue(struct Qdisc *sch) | |||
| 117 | qdisc_bstats_update(sch, skb); | 120 | qdisc_bstats_update(sch, skb); |
| 118 | sch->q.qlen--; | 121 | sch->q.qlen--; |
| 119 | } else { | 122 | } else { |
| 120 | if (!red_is_idling(&q->parms)) | 123 | if (!red_is_idling(&q->vars)) |
| 121 | red_start_of_idle_period(&q->parms); | 124 | red_start_of_idle_period(&q->vars); |
| 122 | } | 125 | } |
| 123 | return skb; | 126 | return skb; |
| 124 | } | 127 | } |
| @@ -144,8 +147,8 @@ static unsigned int red_drop(struct Qdisc *sch) | |||
| 144 | return len; | 147 | return len; |
| 145 | } | 148 | } |
| 146 | 149 | ||
| 147 | if (!red_is_idling(&q->parms)) | 150 | if (!red_is_idling(&q->vars)) |
| 148 | red_start_of_idle_period(&q->parms); | 151 | red_start_of_idle_period(&q->vars); |
| 149 | 152 | ||
| 150 | return 0; | 153 | return 0; |
| 151 | } | 154 | } |
| @@ -156,7 +159,7 @@ static void red_reset(struct Qdisc *sch) | |||
| 156 | 159 | ||
| 157 | qdisc_reset(q->qdisc); | 160 | qdisc_reset(q->qdisc); |
| 158 | sch->q.qlen = 0; | 161 | sch->q.qlen = 0; |
| 159 | red_restart(&q->parms); | 162 | red_restart(&q->vars); |
| 160 | } | 163 | } |
| 161 | 164 | ||
| 162 | static void red_destroy(struct Qdisc *sch) | 165 | static void red_destroy(struct Qdisc *sch) |
| @@ -212,17 +215,19 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt) | |||
| 212 | q->qdisc = child; | 215 | q->qdisc = child; |
| 213 | } | 216 | } |
| 214 | 217 | ||
| 215 | red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog, | 218 | red_set_parms(&q->parms, |
| 219 | ctl->qth_min, ctl->qth_max, ctl->Wlog, | ||
| 216 | ctl->Plog, ctl->Scell_log, | 220 | ctl->Plog, ctl->Scell_log, |
| 217 | nla_data(tb[TCA_RED_STAB]), | 221 | nla_data(tb[TCA_RED_STAB]), |
| 218 | max_P); | 222 | max_P); |
| 223 | red_set_vars(&q->vars); | ||
| 219 | 224 | ||
| 220 | del_timer(&q->adapt_timer); | 225 | del_timer(&q->adapt_timer); |
| 221 | if (ctl->flags & TC_RED_ADAPTATIVE) | 226 | if (ctl->flags & TC_RED_ADAPTATIVE) |
| 222 | mod_timer(&q->adapt_timer, jiffies + HZ/2); | 227 | mod_timer(&q->adapt_timer, jiffies + HZ/2); |
| 223 | 228 | ||
| 224 | if (!q->qdisc->q.qlen) | 229 | if (!q->qdisc->q.qlen) |
| 225 | red_start_of_idle_period(&q->parms); | 230 | red_start_of_idle_period(&q->vars); |
| 226 | 231 | ||
| 227 | sch_tree_unlock(sch); | 232 | sch_tree_unlock(sch); |
| 228 | return 0; | 233 | return 0; |
| @@ -235,7 +240,7 @@ static inline void red_adaptative_timer(unsigned long arg) | |||
| 235 | spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch)); | 240 | spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch)); |
| 236 | 241 | ||
| 237 | spin_lock(root_lock); | 242 | spin_lock(root_lock); |
| 238 | red_adaptative_algo(&q->parms); | 243 | red_adaptative_algo(&q->parms, &q->vars); |
| 239 | mod_timer(&q->adapt_timer, jiffies + HZ/2); | 244 | mod_timer(&q->adapt_timer, jiffies + HZ/2); |
| 240 | spin_unlock(root_lock); | 245 | spin_unlock(root_lock); |
| 241 | } | 246 | } |
