diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2008-06-19 03:06:57 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-06-19 03:06:57 -0400 |
commit | ad2a3f13b7258a5daaaeb8cff9f835aac468b71d (patch) | |
tree | ac9ea87be655aba0a372b6b4a25215547da0e291 /kernel/sched_rt.c | |
parent | 7ea56616ba6b3d67a4892728182e38ae162ea3e7 (diff) |
sched: rt-group: heirarchy aware throttle
The bandwidth throttle code dequeues a group when it runs out of quota, and
re-queues it once the period rolls over and the quota gets refreshed.
Sadly it failed to take the hierarchy into consideration. Share more of the
enqueue/dequeue code with regular task opterations.
Also, some operations like sched_setscheduler() can dequeue/enqueue tasks that
are in throttled runqueues, we should not inadvertly re-enqueue empty runqueues
so check for that.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Tested-by: Daniel K. <dk@uw.no>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_rt.c')
-rw-r--r-- | kernel/sched_rt.c | 59 |
1 files changed, 33 insertions, 26 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 3432d573205d..837241568d76 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -449,13 +449,19 @@ void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) | |||
449 | #endif | 449 | #endif |
450 | } | 450 | } |
451 | 451 | ||
452 | static void enqueue_rt_entity(struct sched_rt_entity *rt_se) | 452 | static void __enqueue_rt_entity(struct sched_rt_entity *rt_se) |
453 | { | 453 | { |
454 | struct rt_rq *rt_rq = rt_rq_of_se(rt_se); | 454 | struct rt_rq *rt_rq = rt_rq_of_se(rt_se); |
455 | struct rt_prio_array *array = &rt_rq->active; | 455 | struct rt_prio_array *array = &rt_rq->active; |
456 | struct rt_rq *group_rq = group_rt_rq(rt_se); | 456 | struct rt_rq *group_rq = group_rt_rq(rt_se); |
457 | 457 | ||
458 | if (group_rq && rt_rq_throttled(group_rq)) | 458 | /* |
459 | * Don't enqueue the group if its throttled, or when empty. | ||
460 | * The latter is a consequence of the former when a child group | ||
461 | * get throttled and the current group doesn't have any other | ||
462 | * active members. | ||
463 | */ | ||
464 | if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) | ||
459 | return; | 465 | return; |
460 | 466 | ||
461 | list_add_tail(&rt_se->run_list, array->queue + rt_se_prio(rt_se)); | 467 | list_add_tail(&rt_se->run_list, array->queue + rt_se_prio(rt_se)); |
@@ -464,7 +470,7 @@ static void enqueue_rt_entity(struct sched_rt_entity *rt_se) | |||
464 | inc_rt_tasks(rt_se, rt_rq); | 470 | inc_rt_tasks(rt_se, rt_rq); |
465 | } | 471 | } |
466 | 472 | ||
467 | static void dequeue_rt_entity(struct sched_rt_entity *rt_se) | 473 | static void __dequeue_rt_entity(struct sched_rt_entity *rt_se) |
468 | { | 474 | { |
469 | struct rt_rq *rt_rq = rt_rq_of_se(rt_se); | 475 | struct rt_rq *rt_rq = rt_rq_of_se(rt_se); |
470 | struct rt_prio_array *array = &rt_rq->active; | 476 | struct rt_prio_array *array = &rt_rq->active; |
@@ -480,11 +486,10 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se) | |||
480 | * Because the prio of an upper entry depends on the lower | 486 | * Because the prio of an upper entry depends on the lower |
481 | * entries, we must remove entries top - down. | 487 | * entries, we must remove entries top - down. |
482 | */ | 488 | */ |
483 | static void dequeue_rt_stack(struct task_struct *p) | 489 | static void dequeue_rt_stack(struct sched_rt_entity *rt_se) |
484 | { | 490 | { |
485 | struct sched_rt_entity *rt_se, *back = NULL; | 491 | struct sched_rt_entity *back = NULL; |
486 | 492 | ||
487 | rt_se = &p->rt; | ||
488 | for_each_sched_rt_entity(rt_se) { | 493 | for_each_sched_rt_entity(rt_se) { |
489 | rt_se->back = back; | 494 | rt_se->back = back; |
490 | back = rt_se; | 495 | back = rt_se; |
@@ -492,7 +497,26 @@ static void dequeue_rt_stack(struct task_struct *p) | |||
492 | 497 | ||
493 | for (rt_se = back; rt_se; rt_se = rt_se->back) { | 498 | for (rt_se = back; rt_se; rt_se = rt_se->back) { |
494 | if (on_rt_rq(rt_se)) | 499 | if (on_rt_rq(rt_se)) |
495 | dequeue_rt_entity(rt_se); | 500 | __dequeue_rt_entity(rt_se); |
501 | } | ||
502 | } | ||
503 | |||
504 | static void enqueue_rt_entity(struct sched_rt_entity *rt_se) | ||
505 | { | ||
506 | dequeue_rt_stack(rt_se); | ||
507 | for_each_sched_rt_entity(rt_se) | ||
508 | __enqueue_rt_entity(rt_se); | ||
509 | } | ||
510 | |||
511 | static void dequeue_rt_entity(struct sched_rt_entity *rt_se) | ||
512 | { | ||
513 | dequeue_rt_stack(rt_se); | ||
514 | |||
515 | for_each_sched_rt_entity(rt_se) { | ||
516 | struct rt_rq *rt_rq = group_rt_rq(rt_se); | ||
517 | |||
518 | if (rt_rq && rt_rq->rt_nr_running) | ||
519 | __enqueue_rt_entity(rt_se); | ||
496 | } | 520 | } |
497 | } | 521 | } |
498 | 522 | ||
@@ -506,32 +530,15 @@ static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup) | |||
506 | if (wakeup) | 530 | if (wakeup) |
507 | rt_se->timeout = 0; | 531 | rt_se->timeout = 0; |
508 | 532 | ||
509 | dequeue_rt_stack(p); | 533 | enqueue_rt_entity(rt_se); |
510 | |||
511 | /* | ||
512 | * enqueue everybody, bottom - up. | ||
513 | */ | ||
514 | for_each_sched_rt_entity(rt_se) | ||
515 | enqueue_rt_entity(rt_se); | ||
516 | } | 534 | } |
517 | 535 | ||
518 | static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep) | 536 | static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep) |
519 | { | 537 | { |
520 | struct sched_rt_entity *rt_se = &p->rt; | 538 | struct sched_rt_entity *rt_se = &p->rt; |
521 | struct rt_rq *rt_rq; | ||
522 | 539 | ||
523 | update_curr_rt(rq); | 540 | update_curr_rt(rq); |
524 | 541 | dequeue_rt_entity(rt_se); | |
525 | dequeue_rt_stack(p); | ||
526 | |||
527 | /* | ||
528 | * re-enqueue all non-empty rt_rq entities. | ||
529 | */ | ||
530 | for_each_sched_rt_entity(rt_se) { | ||
531 | rt_rq = group_rt_rq(rt_se); | ||
532 | if (rt_rq && rt_rq->rt_nr_running) | ||
533 | enqueue_rt_entity(rt_se); | ||
534 | } | ||
535 | } | 542 | } |
536 | 543 | ||
537 | /* | 544 | /* |