diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2013-11-19 14:12:08 -0500 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2013-11-19 14:12:08 -0500 |
commit | b2b3e869e8d5fee88aabf001c09094b400450bac (patch) | |
tree | 90671f5c73a187873c3dba721df626653bde209e | |
parent | f5bf97f1f5345f7e8aef2b99ec0d57ddd081c1d2 (diff) |
PGM: Boost priority of producers, not consumers.
This patch boosts the priority of PGM producers while
they are sending tokens instead of boosting the priority
of consumers while they are waiting for tokens. This improves
schedulability analysis.
-rw-r--r-- | include/litmus/pgm.h | 3 | ||||
-rw-r--r-- | include/litmus/rt_param.h | 5 | ||||
-rw-r--r-- | litmus/pgm.c | 20 | ||||
-rw-r--r-- | litmus/sched_cedf.c | 81 |
4 files changed, 71 insertions, 38 deletions
diff --git a/include/litmus/pgm.h b/include/litmus/pgm.h index 1e87e170e8c3..5682a76b3acb 100644 --- a/include/litmus/pgm.h +++ b/include/litmus/pgm.h | |||
@@ -4,8 +4,9 @@ | |||
4 | #include <litmus/litmus.h> | 4 | #include <litmus/litmus.h> |
5 | 5 | ||
6 | #define is_pgm_waiting(t) (tsk_rt(t)->ctrl_page && tsk_rt(t)->ctrl_page->pgm_waiting) | 6 | #define is_pgm_waiting(t) (tsk_rt(t)->ctrl_page && tsk_rt(t)->ctrl_page->pgm_waiting) |
7 | #define is_pgm_sending(t) (tsk_rt(t)->ctrl_page && tsk_rt(t)->ctrl_page->pgm_sending) | ||
7 | #define is_pgm_satisfied(t) (tsk_rt(t)->ctrl_page && tsk_rt(t)->ctrl_page->pgm_satisfied) | 8 | #define is_pgm_satisfied(t) (tsk_rt(t)->ctrl_page && tsk_rt(t)->ctrl_page->pgm_satisfied) |
8 | 9 | ||
9 | void setup_pgm_release(struct task_struct* t); | 10 | int setup_pgm_release(struct task_struct* t); |
10 | 11 | ||
11 | #endif | 12 | #endif |
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h index bc074c63c7ad..fe4b31320ac8 100644 --- a/include/litmus/rt_param.h +++ b/include/litmus/rt_param.h | |||
@@ -122,8 +122,9 @@ struct control_page { | |||
122 | * started. */ | 122 | * started. */ |
123 | 123 | ||
124 | /* Flags from userspace signifying PGM wait states. */ | 124 | /* Flags from userspace signifying PGM wait states. */ |
125 | volatile uint32_t pgm_waiting; /* waiting for tokens */ | 125 | volatile uint32_t pgm_waiting; /* waiting for tokens */ |
126 | volatile uint32_t pgm_satisfied; /* needed tokens acquired */ | 126 | volatile uint32_t pgm_sending; /* sending tokens */ |
127 | volatile uint32_t pgm_satisfied; /* done waiting/sending */ | ||
127 | 128 | ||
128 | /* to be extended */ | 129 | /* to be extended */ |
129 | }; | 130 | }; |
diff --git a/litmus/pgm.c b/litmus/pgm.c index 0bc190851718..db3378ff803d 100644 --- a/litmus/pgm.c +++ b/litmus/pgm.c | |||
@@ -12,18 +12,17 @@ | |||
12 | at 'now'. Adjustment threshold currently set to 200us. */ | 12 | at 'now'. Adjustment threshold currently set to 200us. */ |
13 | #define ADJUSTMENT_THRESH_NS (200*1000LL) | 13 | #define ADJUSTMENT_THRESH_NS (200*1000LL) |
14 | 14 | ||
15 | void setup_pgm_release(struct task_struct* t) | 15 | int setup_pgm_release(struct task_struct* t) |
16 | { | 16 | { |
17 | int shifted_release = 0; | ||
18 | |||
17 | /* approximate time last predecessor gave us tokens */ | 19 | /* approximate time last predecessor gave us tokens */ |
18 | lt_t now = litmus_clock(); | 20 | lt_t now = litmus_clock(); |
19 | 21 | ||
20 | TRACE_TASK(t, "is starting a new PGM job: waiting:%d satisfied:%d\n", | 22 | TRACE_TASK(t, "is starting a new PGM job: waiting:%d\n", |
21 | tsk_rt(t)->ctrl_page->pgm_waiting, tsk_rt(t)->ctrl_page->pgm_satisfied); | 23 | tsk_rt(t)->ctrl_page->pgm_waiting); |
22 | |||
23 | BUG_ON(!tsk_rt(t)->ctrl_page->pgm_waiting || !tsk_rt(t)->ctrl_page->pgm_satisfied); | ||
24 | 24 | ||
25 | tsk_rt(t)->ctrl_page->pgm_waiting = 0; | 25 | BUG_ON(!tsk_rt(t)->ctrl_page->pgm_waiting); |
26 | tsk_rt(t)->ctrl_page->pgm_satisfied = 0; | ||
27 | 26 | ||
28 | /* Adjust release time if we got the last tokens after release of this job. | 27 | /* Adjust release time if we got the last tokens after release of this job. |
29 | This is possible since PGM jobs are early-released. Don't shift our | 28 | This is possible since PGM jobs are early-released. Don't shift our |
@@ -40,7 +39,7 @@ void setup_pgm_release(struct task_struct* t) | |||
40 | 39 | ||
41 | tsk_rt(t)->job_params.release = now; | 40 | tsk_rt(t)->job_params.release = now; |
42 | tsk_rt(t)->job_params.deadline = adj_deadline; | 41 | tsk_rt(t)->job_params.deadline = adj_deadline; |
43 | tsk_rt(t)->job_params.exec_time = 0; /* reset budget */ | 42 | shifted_release = 1; |
44 | } | 43 | } |
45 | else { | 44 | else { |
46 | TRACE_TASK(t, "adjustment falls below threshold. %lld < %lld\n", | 45 | TRACE_TASK(t, "adjustment falls below threshold. %lld < %lld\n", |
@@ -53,5 +52,10 @@ void setup_pgm_release(struct task_struct* t) | |||
53 | now, tsk_rt(t)->job_params.release); | 52 | now, tsk_rt(t)->job_params.release); |
54 | } | 53 | } |
55 | 54 | ||
55 | /* possible that there can be multiple instances of pgm_release logged. | ||
56 | analysis tools should filter out all but the last pgm_release for | ||
57 | a given job release */ | ||
56 | sched_trace_pgm_release(t); | 58 | sched_trace_pgm_release(t); |
59 | |||
60 | return shifted_release; | ||
57 | } | 61 | } |
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c index b6bed80133f7..8699f6d9d5b6 100644 --- a/litmus/sched_cedf.c +++ b/litmus/sched_cedf.c | |||
@@ -464,39 +464,62 @@ static struct task_struct* cedf_schedule(struct task_struct * prev) | |||
464 | entry->linked->comm, entry->linked->pid); | 464 | entry->linked->comm, entry->linked->pid); |
465 | 465 | ||
466 | #ifdef CONFIG_SCHED_PGM | 466 | #ifdef CONFIG_SCHED_PGM |
467 | if (exists && is_pgm_waiting(entry->scheduled)) { | 467 | if (exists) { |
468 | if (!is_priority_boosted(entry->scheduled)) { | 468 | if (is_pgm_sending(entry->scheduled)) { |
469 | TRACE_TASK(entry->scheduled, "is waiting for PGM tokens.\n"); | 469 | if (!is_pgm_satisfied(entry->scheduled)) { |
470 | BUG_ON(is_pgm_satisfied(entry->scheduled)); | 470 | if (!is_priority_boosted(entry->scheduled)) { |
471 | 471 | TRACE_TASK(entry->scheduled, "is sending PGM tokens and needs boosting.\n"); | |
472 | /* Boost priority so we'll be scheduled immediately | 472 | BUG_ON(is_pgm_satisfied(entry->scheduled)); |
473 | when needed tokens arrive. */ | 473 | |
474 | tsk_rt(entry->scheduled)->priority_boosted = 1; | 474 | /* We are either sending tokens or waiting for tokes. |
475 | tsk_rt(entry->scheduled)->boost_start_time = litmus_clock(); | 475 | If waiting: Boost priority so we'll be scheduled |
476 | 476 | immediately when needed tokens arrive. | |
477 | if (unlikely(!blocks)) { | 477 | If sending: Boost priority so no one (specifically, our |
478 | /* Task has probably blocked on an inbound token socket, but | 478 | consumers) will preempt us while signalling the token |
479 | if not, re-evaluate scheduling decisions */ | 479 | transmission. |
480 | unlink(entry->scheduled); | 480 | */ |
481 | cedf_job_arrival(entry->scheduled); | 481 | tsk_rt(entry->scheduled)->priority_boosted = 1; |
482 | tsk_rt(entry->scheduled)->boost_start_time = litmus_clock(); | ||
483 | |||
484 | if (likely(!blocks)) { | ||
485 | unlink(entry->scheduled); | ||
486 | cedf_job_arrival(entry->scheduled); | ||
487 | } | ||
488 | } | ||
489 | } | ||
490 | else { /* sending is satisfied */ | ||
491 | tsk_rt(entry->scheduled)->ctrl_page->pgm_sending = 0; | ||
492 | tsk_rt(entry->scheduled)->ctrl_page->pgm_satisfied = 0; | ||
493 | |||
494 | if (is_priority_boosted(entry->scheduled)) { | ||
495 | TRACE_TASK(entry->scheduled, | ||
496 | "is done sending PGM tokens must relinquish boosting.\n"); | ||
497 | /* clear boosting */ | ||
498 | tsk_rt(entry->scheduled)->priority_boosted = 0; | ||
499 | if(likely(!blocks)) { | ||
500 | /* recheck priority */ | ||
501 | unlink(entry->scheduled); | ||
502 | cedf_job_arrival(entry->scheduled); | ||
503 | } | ||
504 | } | ||
482 | } | 505 | } |
483 | } | 506 | } |
484 | else if (is_pgm_satisfied(entry->scheduled)) { | 507 | #if 0 |
485 | TRACE_TASK(entry->scheduled, "is done waiting for PGM tokens.\n"); | 508 | else if(is_pgm_waiting(entry->scheduled)) { |
486 | BUG_ON(!is_priority_boosted(entry->scheduled)); | 509 | int shifted_release; |
487 | 510 | ||
488 | /* clear any boosting */ | 511 | TRACE_TASK(entry->scheduled, "is waiting for PGM tokens.\n"); |
489 | tsk_rt(entry->scheduled)->priority_boosted = 0; | 512 | /* release the next job if we have the tokens we need */ |
490 | setup_pgm_release(entry->scheduled); | 513 | shifted_release = setup_pgm_release(entry->scheduled); |
491 | 514 | ||
492 | if (likely(!blocks)) { | 515 | /* setup_pgm_release() can screw with our priority, |
493 | /* Task has probably called sched_yield(), so blocking is | 516 | so recheck it */ |
494 | unlikely. Re-evaluate scheduling decisions because we | 517 | if (shifted_release && likely(!blocks)) { |
495 | still want to run. */ | ||
496 | unlink(entry->scheduled); | 518 | unlink(entry->scheduled); |
497 | cedf_job_arrival(entry->scheduled); | 519 | cedf_job_arrival(entry->scheduled); |
498 | } | 520 | } |
499 | } | 521 | } |
522 | #endif | ||
500 | } | 523 | } |
501 | #endif | 524 | #endif |
502 | 525 | ||
@@ -641,6 +664,10 @@ static void cedf_task_wake_up(struct task_struct *task) | |||
641 | release_at(task, now); | 664 | release_at(task, now); |
642 | sched_trace_task_release(task); | 665 | sched_trace_task_release(task); |
643 | } | 666 | } |
667 | if (is_pgm_waiting(task)) { | ||
668 | /* shift out release/deadline, if needed */ | ||
669 | setup_pgm_release(task); | ||
670 | } | ||
644 | cedf_job_arrival(task); | 671 | cedf_job_arrival(task); |
645 | raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags); | 672 | raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags); |
646 | } | 673 | } |