aboutsummaryrefslogtreecommitdiffstats
path: root/litmus
diff options
context:
space:
mode:
Diffstat (limited to 'litmus')
-rw-r--r--litmus/Makefile1
-rw-r--r--litmus/litmus.c22
-rw-r--r--litmus/pgm.c61
-rw-r--r--litmus/sched_mc2.c76
-rw-r--r--litmus/sched_task_trace.c23
5 files changed, 179 insertions, 4 deletions
diff --git a/litmus/Makefile b/litmus/Makefile
index 3d0de72284f8..c19b6015b02e 100644
--- a/litmus/Makefile
+++ b/litmus/Makefile
@@ -40,3 +40,4 @@ obj-y += reservation.o polling_reservations.o
40 40
41obj-y += sched_pres.o 41obj-y += sched_pres.o
42obj-y += mc2_common.o sched_mc2.o 42obj-y += mc2_common.o sched_mc2.o
43obj-y += pgm.o
diff --git a/litmus/litmus.c b/litmus/litmus.c
index d72039067689..d2815f52876e 100644
--- a/litmus/litmus.c
+++ b/litmus/litmus.c
@@ -140,10 +140,16 @@ asmlinkage long sys_set_rt_task_param(pid_t pid, struct rt_task __user * param)
140 if (tp.relative_deadline == 0) 140 if (tp.relative_deadline == 0)
141 tp.relative_deadline = tp.period; 141 tp.relative_deadline = tp.period;
142 142
143 if (tp.exec_cost <= 0) 143 if (tp.exec_cost <= 0) {
144 printk(KERN_INFO "litmus: real-time task %d rejected "
145 "because declared job execution time <= 0.", pid);
144 goto out_unlock; 146 goto out_unlock;
145 if (tp.period <= 0) 147 }
148 if (tp.period <= 0) {
149 printk(KERN_INFO "litmus: real-time task %d rejected "
150 "because declared job period <= 0.", pid);
146 goto out_unlock; 151 goto out_unlock;
152 }
147 if (min(tp.relative_deadline, tp.period) < tp.exec_cost) /*density check*/ 153 if (min(tp.relative_deadline, tp.period) < tp.exec_cost) /*density check*/
148 { 154 {
149 printk(KERN_INFO "litmus: real-time task %d rejected " 155 printk(KERN_INFO "litmus: real-time task %d rejected "
@@ -169,6 +175,13 @@ asmlinkage long sys_set_rt_task_param(pid_t pid, struct rt_task __user * param)
169 goto out_unlock; 175 goto out_unlock;
170 } 176 }
171 177
178 if (tp.pgm_type < PGM_NOT_A_NODE || tp.pgm_type > PGM_INTERNAL) {
179 printk(KERN_INFO "litmus: real-time task %d rejected "
180 "because of unknown PGM node type specified (%d)\n",
181 pid, tp.pgm_type);
182 goto out_unlock;
183 }
184
172 target->rt_param.task_params = tp; 185 target->rt_param.task_params = tp;
173 186
174 retval = 0; 187 retval = 0;
@@ -549,6 +562,10 @@ long litmus_admit_task(struct task_struct* tsk)
549 if (get_rt_relative_deadline(tsk) == 0 || 562 if (get_rt_relative_deadline(tsk) == 0 ||
550 get_exec_cost(tsk) > 563 get_exec_cost(tsk) >
551 min(get_rt_relative_deadline(tsk), get_rt_period(tsk)) ) { 564 min(get_rt_relative_deadline(tsk), get_rt_period(tsk)) ) {
565 printk(KERN_INFO "litmus: invalid task parameters "
566 "(e = %lu, p = %lu, d = %lu)\n",
567 get_exec_cost(tsk), get_rt_period(tsk),
568 get_rt_relative_deadline(tsk));
552 TRACE_TASK(tsk, 569 TRACE_TASK(tsk,
553 "litmus admit: invalid task parameters " 570 "litmus admit: invalid task parameters "
554 "(e = %lu, p = %lu, d = %lu)\n", 571 "(e = %lu, p = %lu, d = %lu)\n",
@@ -580,6 +597,7 @@ long litmus_admit_task(struct task_struct* tsk)
580 if (!retval) { 597 if (!retval) {
581 sched_trace_task_name(tsk); 598 sched_trace_task_name(tsk);
582 sched_trace_task_param(tsk); 599 sched_trace_task_param(tsk);
600 sched_trace_pgm_param(tsk);
583 atomic_inc(&rt_task_count); 601 atomic_inc(&rt_task_count);
584 } 602 }
585 603
diff --git a/litmus/pgm.c b/litmus/pgm.c
new file mode 100644
index 000000000000..db3378ff803d
--- /dev/null
+++ b/litmus/pgm.c
@@ -0,0 +1,61 @@
1/* litmus/pgm.c - common pgm control code
2 */
3
4#include <linux/sched.h>
5#include <litmus/litmus.h>
6#include <litmus/pgm.h>
7#include <litmus/sched_trace.h>
8
9/* Only readjust release/deadline if difference is over a given threshold.
10 It's a weak method for accounting overheads. Ideally, we'd know the last
11 time t was woken up by its last predecessor, rather than having to look
12 at 'now'. Adjustment threshold currently set to 200us. */
13#define ADJUSTMENT_THRESH_NS (200*1000LL)
14
15int setup_pgm_release(struct task_struct* t)
16{
17 int shifted_release = 0;
18
19 /* approximate time last predecessor gave us tokens */
20 lt_t now = litmus_clock();
21
22 TRACE_TASK(t, "is starting a new PGM job: waiting:%d\n",
23 tsk_rt(t)->ctrl_page->pgm_waiting);
24
25 BUG_ON(!tsk_rt(t)->ctrl_page->pgm_waiting);
26
27 /* Adjust release time if we got the last tokens after release of this job.
28 This is possible since PGM jobs are early-released. Don't shift our
29 deadline if we got the tokens earlier than expected. */
30 if (now > tsk_rt(t)->job_params.release) {
31 long long diff_ns = now - tsk_rt(t)->job_params.release;
32 if (diff_ns > ADJUSTMENT_THRESH_NS) {
33 lt_t adj_deadline = now + get_rt_relative_deadline(t);
34
35 TRACE_TASK(t, "adjusting PGM release time from (r = %llu, d = %llu) "
36 "to (r = %llu, d = %llu)\n",
37 tsk_rt(t)->job_params.release, tsk_rt(t)->job_params.deadline,
38 now, adj_deadline);
39
40 tsk_rt(t)->job_params.release = now;
41 tsk_rt(t)->job_params.deadline = adj_deadline;
42 shifted_release = 1;
43 }
44 else {
45 TRACE_TASK(t, "adjustment falls below threshold. %lld < %lld\n",
46 diff_ns, ADJUSTMENT_THRESH_NS);
47 }
48 }
49 else {
50 TRACE_TASK(t, "got tokens early--no need to adjust release. "
51 "cur time = %llu, release time = %llu\n",
52 now, tsk_rt(t)->job_params.release);
53 }
54
55 /* possible that there can be multiple instances of pgm_release logged.
56 analysis tools should filter out all but the last pgm_release for
57 a given job release */
58 sched_trace_pgm_release(t);
59
60 return shifted_release;
61}
diff --git a/litmus/sched_mc2.c b/litmus/sched_mc2.c
index a310723dce24..9ed92d355d7a 100644
--- a/litmus/sched_mc2.c
+++ b/litmus/sched_mc2.c
@@ -25,6 +25,7 @@
25#include <litmus/mc2_common.h> 25#include <litmus/mc2_common.h>
26#include <litmus/reservation.h> 26#include <litmus/reservation.h>
27#include <litmus/polling_reservations.h> 27#include <litmus/polling_reservations.h>
28#include <litmus/pgm.h>
28 29
29#define BUDGET_ENFORCEMENT_AT_C 0 30#define BUDGET_ENFORCEMENT_AT_C 0
30 31
@@ -614,14 +615,82 @@ static struct task_struct* mc2_schedule(struct task_struct * prev)
614 /* next == NULL means "schedule background work". */ 615 /* next == NULL means "schedule background work". */
615 lt_t now; 616 lt_t now;
616 struct mc2_cpu_state *state = local_cpu_state(); 617 struct mc2_cpu_state *state = local_cpu_state();
618 int sleep, preempt, np, exists, blocks;
617 619
618 pre_schedule(prev, state->cpu); 620 pre_schedule(prev, state->cpu);
619 621
620 raw_spin_lock(&_global_env.lock); 622 raw_spin_lock(&_global_env.lock);
621 raw_spin_lock(&state->lock); 623 raw_spin_lock(&state->lock);
622 624
623 //BUG_ON(state->scheduled && state->scheduled != prev); 625 /* sanity check */
624 //BUG_ON(state->scheduled && !is_realtime(prev)); 626 BUG_ON(state->scheduled && state->scheduled != prev);
627 BUG_ON(state->scheduled && !is_realtime(prev));
628 BUG_ON(is_realtime(prev) && !state->scheduled);
629
630 /* (0) Determine state */
631 exists = state->scheduled != NULL;
632 blocks = exists && !is_running(state->scheduled);
633 sleep = exists && is_completed(state->scheduled);
634 np = exists && is_np(state->scheduled);
635 preempt = _lowest_prio_cpu.cpu_entries[state->cpu].will_schedule;
636
637 if (exists) {
638 if (is_pgm_sending(state->scheduled)) {
639 if (!is_pgm_satisfied(state->scheduled)) {
640 if (!is_priority_boosted(state->scheduled)) {
641 TRACE_TASK(state->scheduled, "is sending PGM tokens and needs boosting.\n");
642 BUG_ON(is_pgm_satisfied(state->scheduled));
643
644 /* We are either sending tokens or waiting for tokes.
645 If waiting: Boost priority so we'll be scheduled
646 immediately when needed tokens arrive.
647 If sending: Boost priority so no one (specifically, our
648 consumers) will preempt us while signalling the token
649 transmission.
650 */
651 tsk_rt(state->scheduled)->priority_boosted = 1;
652 tsk_rt(state->scheduled)->boost_start_time = litmus_clock();
653
654 if (likely(!blocks)) {
655 task_departs(state->scheduled, is_completed(prev));
656 task_arrives(state, state->scheduled);
657 /* we may regain the processor */
658 if (preempt) {
659 //preempt = resched_cpu[state->cpu];
660 litmus_reschedule_local();
661 if (!preempt) {
662 TRACE_TASK(state->scheduled, "blocked preemption by lazy boosting.\n");
663 }
664 }
665 }
666 }
667 }
668 else { /* sending is satisfied */
669 tsk_rt(state->scheduled)->ctrl_page->pgm_sending = 0;
670 tsk_rt(state->scheduled)->ctrl_page->pgm_satisfied = 0;
671
672 if (is_priority_boosted(state->scheduled)) {
673 TRACE_TASK(state->scheduled,
674 "is done sending PGM tokens must relinquish boosting.\n");
675 /* clear boosting */
676 tsk_rt(state->scheduled)->priority_boosted = 0;
677 if(likely(!blocks)) {
678 /* recheck priority */
679 task_departs(state->scheduled, is_completed(prev));
680 task_arrives(state, state->scheduled);
681 /* we may lose the processor */
682 if (!preempt) {
683 //preempt = resched_cpu[state->cpu];
684 litmus_reschedule_local();
685 if (preempt) {
686 TRACE_TASK(state->scheduled, "preempted by lazy unboosting.\n");
687 }
688 }
689 }
690 }
691 }
692 }
693 }
625 694
626 /* update time */ 695 /* update time */
627 state->sup_env.will_schedule = true; 696 state->sup_env.will_schedule = true;
@@ -701,6 +770,9 @@ static void resume_legacy_task_model_updates(struct task_struct *tsk)
701 sched_trace_task_release(tsk); 770 sched_trace_task_release(tsk);
702 } 771 }
703 } 772 }
773 //if (is_pgm_waiting(tsk)) {
774 // setup_pgm_release(tsk);
775 //}
704} 776}
705 777
706/* mc2_task_resume - Called when the state of tsk changes back to 778/* mc2_task_resume - Called when the state of tsk changes back to
diff --git a/litmus/sched_task_trace.c b/litmus/sched_task_trace.c
index 3a6756deb32f..784b18ecfd8b 100644
--- a/litmus/sched_task_trace.c
+++ b/litmus/sched_task_trace.c
@@ -238,3 +238,26 @@ feather_callback void do_sched_trace_action(unsigned long id,
238 put_record(rec); 238 put_record(rec);
239 } 239 }
240} 240}
241
242feather_callback void do_sched_trace_pgm_param(unsigned long id, unsigned long _task)
243{
244 struct task_struct *t = (struct task_struct*) _task;
245 struct st_event_record* rec = get_record(ST_PGM_PARAM, t);
246 if (rec) {
247 rec->data.pgm_param.node_type = tsk_rt(t)->task_params.pgm_type;
248 rec->data.pgm_param.graph_pid = t->tgid;
249 rec->data.pgm_param.expected_graph_etoe = tsk_rt(t)->task_params.pgm_expected_etoe;
250 put_record(rec);
251 }
252}
253
254feather_callback void do_sched_trace_pgm_release(unsigned long id, unsigned long _task)
255{
256 struct task_struct *t = (struct task_struct*) _task;
257 struct st_event_record* rec = get_record(ST_PGM_RELEASE, t);
258 if (rec) {
259 rec->data.pgm_release.release = get_release(t);
260 rec->data.pgm_release.deadline = get_deadline(t);
261 put_record(rec);
262 }
263}