diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2013-11-19 14:12:08 -0500 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2014-02-19 15:33:40 -0500 |
commit | e92deb39f12594254a3dce0b9cac347eab9dd2ad (patch) | |
tree | 6bccf0d5d86baa319528c97f78a6afd2f3803a0e /litmus/sched_cedf.c | |
parent | c99faf7a7afc4ff013628fa4321e590d8bb1aecb (diff) |
PGM: Boost priority of producers, not consumers.
This patch boosts the priority of PGM producers while
they are sending tokens instead of boosting the priority
of consumers while they are waiting for tokens. This improves
schedulability analysis.
Diffstat (limited to 'litmus/sched_cedf.c')
-rw-r--r-- | litmus/sched_cedf.c | 81 |
1 files changed, 54 insertions, 27 deletions
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c index 95b2dc3bea55..c39b8ee407c7 100644 --- a/litmus/sched_cedf.c +++ b/litmus/sched_cedf.c | |||
@@ -464,39 +464,62 @@ static struct task_struct* cedf_schedule(struct task_struct * prev) | |||
464 | entry->linked->comm, entry->linked->pid); | 464 | entry->linked->comm, entry->linked->pid); |
465 | 465 | ||
466 | #ifdef CONFIG_SCHED_PGM | 466 | #ifdef CONFIG_SCHED_PGM |
467 | if (exists && is_pgm_waiting(entry->scheduled)) { | 467 | if (exists) { |
468 | if (!is_priority_boosted(entry->scheduled)) { | 468 | if (is_pgm_sending(entry->scheduled)) { |
469 | TRACE_TASK(entry->scheduled, "is waiting for PGM tokens.\n"); | 469 | if (!is_pgm_satisfied(entry->scheduled)) { |
470 | BUG_ON(is_pgm_satisfied(entry->scheduled)); | 470 | if (!is_priority_boosted(entry->scheduled)) { |
471 | 471 | TRACE_TASK(entry->scheduled, "is sending PGM tokens and needs boosting.\n"); | |
472 | /* Boost priority so we'll be scheduled immediately | 472 | BUG_ON(is_pgm_satisfied(entry->scheduled)); |
473 | when needed tokens arrive. */ | 473 | |
474 | tsk_rt(entry->scheduled)->priority_boosted = 1; | 474 | /* We are either sending tokens or waiting for tokes. |
475 | tsk_rt(entry->scheduled)->boost_start_time = litmus_clock(); | 475 | If waiting: Boost priority so we'll be scheduled |
476 | 476 | immediately when needed tokens arrive. | |
477 | if (unlikely(!blocks)) { | 477 | If sending: Boost priority so no one (specifically, our |
478 | /* Task has probably blocked on an inbound token socket, but | 478 | consumers) will preempt us while signalling the token |
479 | if not, re-evaluate scheduling decisions */ | 479 | transmission. |
480 | unlink(entry->scheduled); | 480 | */ |
481 | cedf_job_arrival(entry->scheduled); | 481 | tsk_rt(entry->scheduled)->priority_boosted = 1; |
482 | tsk_rt(entry->scheduled)->boost_start_time = litmus_clock(); | ||
483 | |||
484 | if (likely(!blocks)) { | ||
485 | unlink(entry->scheduled); | ||
486 | cedf_job_arrival(entry->scheduled); | ||
487 | } | ||
488 | } | ||
489 | } | ||
490 | else { /* sending is satisfied */ | ||
491 | tsk_rt(entry->scheduled)->ctrl_page->pgm_sending = 0; | ||
492 | tsk_rt(entry->scheduled)->ctrl_page->pgm_satisfied = 0; | ||
493 | |||
494 | if (is_priority_boosted(entry->scheduled)) { | ||
495 | TRACE_TASK(entry->scheduled, | ||
496 | "is done sending PGM tokens must relinquish boosting.\n"); | ||
497 | /* clear boosting */ | ||
498 | tsk_rt(entry->scheduled)->priority_boosted = 0; | ||
499 | if(likely(!blocks)) { | ||
500 | /* recheck priority */ | ||
501 | unlink(entry->scheduled); | ||
502 | cedf_job_arrival(entry->scheduled); | ||
503 | } | ||
504 | } | ||
482 | } | 505 | } |
483 | } | 506 | } |
484 | else if (is_pgm_satisfied(entry->scheduled)) { | 507 | #if 0 |
485 | TRACE_TASK(entry->scheduled, "is done waiting for PGM tokens.\n"); | 508 | else if(is_pgm_waiting(entry->scheduled)) { |
486 | BUG_ON(!is_priority_boosted(entry->scheduled)); | 509 | int shifted_release; |
487 | 510 | ||
488 | /* clear any boosting */ | 511 | TRACE_TASK(entry->scheduled, "is waiting for PGM tokens.\n"); |
489 | tsk_rt(entry->scheduled)->priority_boosted = 0; | 512 | /* release the next job if we have the tokens we need */ |
490 | setup_pgm_release(entry->scheduled); | 513 | shifted_release = setup_pgm_release(entry->scheduled); |
491 | 514 | ||
492 | if (likely(!blocks)) { | 515 | /* setup_pgm_release() can screw with our priority, |
493 | /* Task has probably called sched_yield(), so blocking is | 516 | so recheck it */ |
494 | unlikely. Re-evaluate scheduling decisions because we | 517 | if (shifted_release && likely(!blocks)) { |
495 | still want to run. */ | ||
496 | unlink(entry->scheduled); | 518 | unlink(entry->scheduled); |
497 | cedf_job_arrival(entry->scheduled); | 519 | cedf_job_arrival(entry->scheduled); |
498 | } | 520 | } |
499 | } | 521 | } |
522 | #endif | ||
500 | } | 523 | } |
501 | #endif | 524 | #endif |
502 | 525 | ||
@@ -641,6 +664,10 @@ static void cedf_task_wake_up(struct task_struct *task) | |||
641 | release_at(task, now); | 664 | release_at(task, now); |
642 | sched_trace_task_release(task); | 665 | sched_trace_task_release(task); |
643 | } | 666 | } |
667 | if (is_pgm_waiting(task)) { | ||
668 | /* shift out release/deadline, if needed */ | ||
669 | setup_pgm_release(task); | ||
670 | } | ||
644 | cedf_job_arrival(task); | 671 | cedf_job_arrival(task); |
645 | raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags); | 672 | raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags); |
646 | } | 673 | } |