aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristopher Kenna <cjk@cs.unc.edu>2011-08-27 17:48:58 -0400
committerChristopher Kenna <cjk@cs.unc.edu>2011-08-27 17:48:58 -0400
commit1c5cda5df118735a0e84fd3277d933f58ea814c8 (patch)
tree07a21bb5d91803651cd88667afe968db80bf2634
parent0e059210db4aef3ed1cff173652c23f257ccfa20 (diff)
Refactor the mixed-criticality (MC) plugin.
Add linux kernel configuration option CONFIG_LITMUS_MC. Attempt to restore rt_param.h to its original state as much as possible. Remove fields from rt_task and rt_job and move them into a new mc_data struct. Added mc_data field to rt_param compiled in only if using MC plugin. Make a new MC plugin specific header that contains a mc_data struct, which is a container for mc_task struct and a mc_job struct. Update sched_mc.c to use the new data structures. Also, add some macros that simplify the code, e.g., getting task criticality quickly. Add system call to set MC plugin specific stuff. Check for the change in liblitmus. Add a few lines to exit_litmus to reclaim the MC plugin mc_data struct in the task_struct on task exit.
-rw-r--r--arch/x86/kernel/syscall_table_32.S1
-rw-r--r--include/litmus/rt_param.h20
-rw-r--r--include/litmus/sched_mc.h36
-rw-r--r--include/litmus/unistd_32.h3
-rw-r--r--include/litmus/unistd_64.h4
-rw-r--r--litmus/Kconfig9
-rw-r--r--litmus/Makefile4
-rw-r--r--litmus/jobs.c3
-rw-r--r--litmus/litmus.c83
-rw-r--r--litmus/sched_mc.c129
10 files changed, 217 insertions, 75 deletions
diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S
index 37702905f658..57d5b3e1c1a6 100644
--- a/arch/x86/kernel/syscall_table_32.S
+++ b/arch/x86/kernel/syscall_table_32.S
@@ -352,3 +352,4 @@ ENTRY(sys_call_table)
352 .long sys_wait_for_ts_release 352 .long sys_wait_for_ts_release
353 .long sys_release_ts 353 .long sys_release_ts
354 .long sys_null_call 354 .long sys_null_call
355 .long sys_set_rt_task_mc_param
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h
index 3a456e7135d8..4ded23d658d0 100644
--- a/include/litmus/rt_param.h
+++ b/include/litmus/rt_param.h
@@ -27,14 +27,6 @@ typedef enum {
27 RT_CLASS_BEST_EFFORT 27 RT_CLASS_BEST_EFFORT
28} task_class_t; 28} task_class_t;
29 29
30/* criticality levels */
31typedef enum {
32 CRIT_LEVEL_A,
33 CRIT_LEVEL_B,
34 CRIT_LEVEL_C,
35 CRIT_LEVEL_D,
36} crit_level_t;
37
38typedef enum { 30typedef enum {
39 NO_ENFORCEMENT, /* job may overrun unhindered */ 31 NO_ENFORCEMENT, /* job may overrun unhindered */
40 QUANTUM_ENFORCEMENT, /* budgets are only checked on quantum boundaries */ 32 QUANTUM_ENFORCEMENT, /* budgets are only checked on quantum boundaries */
@@ -48,7 +40,6 @@ struct rt_task {
48 unsigned int cpu; 40 unsigned int cpu;
49 task_class_t cls; 41 task_class_t cls;
50 budget_policy_t budget_policy; /* ignored by pfair */ 42 budget_policy_t budget_policy; /* ignored by pfair */
51 crit_level_t crit;
52}; 43};
53 44
54/* The definition of the data that is shared between the kernel and real-time 45/* The definition of the data that is shared between the kernel and real-time
@@ -99,12 +90,12 @@ struct rt_job {
99 * Increase this sequence number when a job is released. 90 * Increase this sequence number when a job is released.
100 */ 91 */
101 unsigned int job_no; 92 unsigned int job_no;
102
103 lt_t ghost_budget;
104 int is_ghost;
105}; 93};
106 94
107struct pfair_param; 95struct pfair_param;
96#ifdef CONFIG_PLUGIN_MC
97struct mc_data;
98#endif
108 99
109/* RT task parameters for scheduling extensions 100/* RT task parameters for scheduling extensions
110 * These parameters are inherited during clone and therefore must 101 * These parameters are inherited during clone and therefore must
@@ -127,6 +118,11 @@ struct rt_param {
127 lt_t boost_start_time; 118 lt_t boost_start_time;
128#endif 119#endif
129 120
121#ifdef CONFIG_PLUGIN_MC
122 /* mixed criticality specific data */
123 struct mc_data *mc_data;
124#endif
125
130 /* user controlled parameters */ 126 /* user controlled parameters */
131 struct rt_task task_params; 127 struct rt_task task_params;
132 128
diff --git a/include/litmus/sched_mc.h b/include/litmus/sched_mc.h
new file mode 100644
index 000000000000..941a9f4470cc
--- /dev/null
+++ b/include/litmus/sched_mc.h
@@ -0,0 +1,36 @@
1#ifndef _LINUX_SCHED_MC_H_
2#define _LINUX_SCHED_MC_H_
3
4#include <litmus/rt_param.h>
5
6/* criticality levels */
7enum crit_level {
8 /* probably don't need to assign these (paranoid) */
9 CRIT_LEVEL_A = 0,
10 CRIT_LEVEL_B = 1,
11 CRIT_LEVEL_C = 2,
12 CRIT_LEVEL_D = 3,
13 NUM_CRIT_LEVELS = 4,
14};
15
16
17struct mc_task {
18 enum crit_level crit;
19};
20
21struct mc_job {
22 int is_ghost:1;
23 lt_t ghost_budget;
24};
25
26#ifdef __KERNEL__
27/* only used in the kernel (no user space) */
28
29struct mc_data {
30 struct mc_task mc_task;
31 struct mc_job mc_job;
32};
33
34#endif /* __KERNEL__ */
35
36#endif
diff --git a/include/litmus/unistd_32.h b/include/litmus/unistd_32.h
index 94264c27d9ac..71be3cd8d469 100644
--- a/include/litmus/unistd_32.h
+++ b/include/litmus/unistd_32.h
@@ -17,5 +17,6 @@
17#define __NR_wait_for_ts_release __LSC(9) 17#define __NR_wait_for_ts_release __LSC(9)
18#define __NR_release_ts __LSC(10) 18#define __NR_release_ts __LSC(10)
19#define __NR_null_call __LSC(11) 19#define __NR_null_call __LSC(11)
20#define __NR_set_rt_task_mc_param __LSC(12)
20 21
21#define NR_litmus_syscalls 12 22#define NR_litmus_syscalls 13
diff --git a/include/litmus/unistd_64.h b/include/litmus/unistd_64.h
index d5ced0d2642c..95cb74495104 100644
--- a/include/litmus/unistd_64.h
+++ b/include/litmus/unistd_64.h
@@ -29,5 +29,7 @@ __SYSCALL(__NR_wait_for_ts_release, sys_wait_for_ts_release)
29__SYSCALL(__NR_release_ts, sys_release_ts) 29__SYSCALL(__NR_release_ts, sys_release_ts)
30#define __NR_null_call __LSC(11) 30#define __NR_null_call __LSC(11)
31__SYSCALL(__NR_null_call, sys_null_call) 31__SYSCALL(__NR_null_call, sys_null_call)
32#define __NR_set_rt_task_mc_param __LSC(12)
33__SYSCALL(__NR_set_rt_task_mc_param, sys_set_rt_task_mc_param)
32 34
33#define NR_litmus_syscalls 12 35#define NR_litmus_syscalls 13
diff --git a/litmus/Kconfig b/litmus/Kconfig
index ad8dc8308cf0..9a1cc2436580 100644
--- a/litmus/Kconfig
+++ b/litmus/Kconfig
@@ -23,6 +23,15 @@ config PLUGIN_PFAIR
23 23
24 If unsure, say Yes. 24 If unsure, say Yes.
25 25
26config PLUGIN_MC
27 bool "Mixed Criticality Scheduler"
28 depends on X86 && SYSFS
29 default y
30 help
31 Included the mixed criticality scheduler.
32
33 If unsure, say Yes.
34
26config RELEASE_MASTER 35config RELEASE_MASTER
27 bool "Release-master Support" 36 bool "Release-master Support"
28 depends on ARCH_HAS_SEND_PULL_TIMERS 37 depends on ARCH_HAS_SEND_PULL_TIMERS
diff --git a/litmus/Makefile b/litmus/Makefile
index d2bcad53c882..782022be6f28 100644
--- a/litmus/Makefile
+++ b/litmus/Makefile
@@ -17,11 +17,11 @@ obj-y = sched_plugin.o litmus.o \
17 bheap.o \ 17 bheap.o \
18 ctrldev.o \ 18 ctrldev.o \
19 sched_gsn_edf.o \ 19 sched_gsn_edf.o \
20 sched_psn_edf.o \ 20 sched_psn_edf.o
21 sched_mc.o
22 21
23obj-$(CONFIG_PLUGIN_CEDF) += sched_cedf.o 22obj-$(CONFIG_PLUGIN_CEDF) += sched_cedf.o
24obj-$(CONFIG_PLUGIN_PFAIR) += sched_pfair.o 23obj-$(CONFIG_PLUGIN_PFAIR) += sched_pfair.o
24obj-$(CONFIG_PLUGIN_MC) += sched_mc.o
25 25
26obj-$(CONFIG_FEATHER_TRACE) += ft_event.o ftdev.o 26obj-$(CONFIG_FEATHER_TRACE) += ft_event.o ftdev.o
27obj-$(CONFIG_SCHED_TASK_TRACE) += sched_task_trace.o 27obj-$(CONFIG_SCHED_TASK_TRACE) += sched_task_trace.o
diff --git a/litmus/jobs.c b/litmus/jobs.c
index 99b0bd9858f2..36e314625d86 100644
--- a/litmus/jobs.c
+++ b/litmus/jobs.c
@@ -13,9 +13,6 @@ void prepare_for_next_period(struct task_struct *t)
13 t->rt_param.job_params.release = t->rt_param.job_params.deadline; 13 t->rt_param.job_params.release = t->rt_param.job_params.deadline;
14 t->rt_param.job_params.deadline += get_rt_period(t); 14 t->rt_param.job_params.deadline += get_rt_period(t);
15 t->rt_param.job_params.exec_time = 0; 15 t->rt_param.job_params.exec_time = 0;
16 /* mixed criticality stuff*/
17 t->rt_param.job_params.is_ghost = 0;
18 t->rt_param.job_params.ghost_budget = 0;
19 /* update job sequence number */ 16 /* update job sequence number */
20 t->rt_param.job_params.job_no++; 17 t->rt_param.job_params.job_no++;
21 18
diff --git a/litmus/litmus.c b/litmus/litmus.c
index 11ccaafd50de..16b3aeda5615 100644
--- a/litmus/litmus.c
+++ b/litmus/litmus.c
@@ -17,6 +17,12 @@
17#include <litmus/litmus_proc.h> 17#include <litmus/litmus_proc.h>
18#include <litmus/sched_trace.h> 18#include <litmus/sched_trace.h>
19 19
20#ifdef CONFIG_PLUGIN_MC
21#include <litmus/sched_mc.h>
22#else
23struct mc_task;
24#endif
25
20/* Number of RT tasks that exist in the system */ 26/* Number of RT tasks that exist in the system */
21atomic_t rt_task_count = ATOMIC_INIT(0); 27atomic_t rt_task_count = ATOMIC_INIT(0);
22static DEFINE_RAW_SPINLOCK(task_transition_lock); 28static DEFINE_RAW_SPINLOCK(task_transition_lock);
@@ -274,6 +280,74 @@ asmlinkage long sys_null_call(cycles_t __user *ts)
274 return ret; 280 return ret;
275} 281}
276 282
283#ifdef CONFIG_PLUGIN_MC
284asmlinkage long sys_set_rt_task_mc_param(pid_t pid, struct mc_task __user *param)
285{
286 struct mc_task mc;
287 struct mc_data *mc_data;
288 struct task_struct *target;
289 int retval = -EINVAL;
290
291 printk("Setting up mixed-criicality task parameters for process %d.\n",
292 pid);
293
294 if (pid < 0 || param == 0) {
295 goto out;
296 }
297 if (copy_from_user(&mc, param, sizeof(mc))) {
298 retval = -EFAULT;
299 goto out;
300 }
301
302 /* Task search and manipulation must be protected */
303 read_lock_irq(&tasklist_lock);
304 if (!(target = find_task_by_vpid(pid))) {
305 retval = -ESRCH;
306 goto out_unlock;
307 }
308
309 if (is_realtime(target)) {
310 /* The task is already a real-time task.
311 * We cannot not allow parameter changes at this point.
312 */
313 retval = -EBUSY;
314 goto out_unlock;
315 }
316
317 if (mc.crit < CRIT_LEVEL_A || mc.crit > CRIT_LEVEL_D)
318 {
319 printk(KERN_WARNING "litmus: real-time task %d rejected because "
320 "of invalid criticality level\n", pid);
321 goto out_unlock;
322 }
323
324 mc_data = tsk_rt(target)->mc_data;
325 if (!mc_data)
326 {
327 mc_data = kmalloc(sizeof(*mc_data), GFP_ATOMIC);
328 if (!mc_data)
329 {
330 retval = -ENOMEM;
331 goto out_unlock;
332 }
333 tsk_rt(target)->mc_data = mc_data;
334 }
335 mc_data->mc_task.crit = mc.crit;
336
337 retval = 0;
338out_unlock:
339 read_unlock_irq(&tasklist_lock);
340out:
341 return retval;
342}
343#else
344asmlinkage long sys_set_rt_task_mc_param(pid_t pid, struct mc_task __user *param)
345{
346 /* don't allow this syscall if the plugin is not enabled */
347 return -EINVAL;
348}
349#endif
350
277/* p is a real-time task. Re-init its state as a best-effort task. */ 351/* p is a real-time task. Re-init its state as a best-effort task. */
278static void reinit_litmus_state(struct task_struct* p, int restore) 352static void reinit_litmus_state(struct task_struct* p, int restore)
279{ 353{
@@ -479,6 +553,15 @@ void exit_litmus(struct task_struct *dead_tsk)
479 free_page((unsigned long) tsk_rt(dead_tsk)->ctrl_page); 553 free_page((unsigned long) tsk_rt(dead_tsk)->ctrl_page);
480 } 554 }
481 555
556#ifdef CONFIG_PLUGIN_MC
557 /* The MC-setup syscall might succeed and allocate mc_data, but the
558 task may not exit in real-time mode, and that memory will leak.
559 Check and free it here.
560 */
561 if (tsk_rt(dead_tsk)->mc_data)
562 kfree(tsk_rt(dead_tsk)->mc_data);
563#endif
564
482 /* main cleanup only for RT tasks */ 565 /* main cleanup only for RT tasks */
483 if (is_realtime(dead_tsk)) 566 if (is_realtime(dead_tsk))
484 litmus_exit_task(dead_tsk); 567 litmus_exit_task(dead_tsk);
diff --git a/litmus/sched_mc.c b/litmus/sched_mc.c
index 8e8ba0dfb870..7800016d0407 100644
--- a/litmus/sched_mc.c
+++ b/litmus/sched_mc.c
@@ -14,6 +14,7 @@
14#include <linux/percpu.h> 14#include <linux/percpu.h>
15#include <linux/sched.h> 15#include <linux/sched.h>
16#include <linux/hrtimer.h> 16#include <linux/hrtimer.h>
17#include <linux/slab.h>
17 18
18#include <litmus/litmus.h> 19#include <litmus/litmus.h>
19#include <litmus/jobs.h> 20#include <litmus/jobs.h>
@@ -25,6 +26,8 @@
25 26
26#include <linux/module.h> 27#include <linux/module.h>
27 28
29#include <litmus/sched_mc.h>
30
28/* Overview of MC operations. 31/* Overview of MC operations.
29 * 32 *
30 * link_task_to_cpu(T, cpu) - Low-level operation to update the linkage 33 * link_task_to_cpu(T, cpu) - Low-level operation to update the linkage
@@ -97,7 +100,7 @@ typedef struct {
97 atomic_t will_schedule; /* prevent unneeded IPIs */ 100 atomic_t will_schedule; /* prevent unneeded IPIs */
98 struct bheap_node* hn_c; 101 struct bheap_node* hn_c;
99 struct bheap_node* hn_d; 102 struct bheap_node* hn_d;
100 struct task_struct* ghost_tasks[CRIT_LEVEL_D+1]; 103 struct task_struct* ghost_tasks[NUM_CRIT_LEVELS];
101} cpu_entry_t; 104} cpu_entry_t;
102 105
103/*This code is heavily based on Bjoern's budget enforcement code. */ 106/*This code is heavily based on Bjoern's budget enforcement code. */
@@ -107,7 +110,7 @@ struct watchdog_timer {
107 struct task_struct* task; 110 struct task_struct* task;
108}; 111};
109 112
110DEFINE_PER_CPU(struct watchdog_timer[CRIT_LEVEL_D+1], ghost_timers); 113DEFINE_PER_CPU(struct watchdog_timer[NUM_CRIT_LEVELS], ghost_timers);
111#define ghost_timer(cpu, crit) (&(per_cpu(ghost_timers, cpu)[crit])) 114#define ghost_timer(cpu, crit) (&(per_cpu(ghost_timers, cpu)[crit]))
112 115
113DEFINE_PER_CPU(cpu_entry_t, mc_cpu_entries); 116DEFINE_PER_CPU(cpu_entry_t, mc_cpu_entries);
@@ -122,9 +125,15 @@ cpu_entry_t* mc_cpus[NR_CPUS];
122 (atomic_read(&per_cpu(mc_cpu_entries, cpu).will_schedule)) 125 (atomic_read(&per_cpu(mc_cpu_entries, cpu).will_schedule))
123#define remote_cpu_entry(cpu) (&per_cpu(mc_cpu_entries, cpu)) 126#define remote_cpu_entry(cpu) (&per_cpu(mc_cpu_entries, cpu))
124 127
125#define is_ghost(t) (tsk_rt(t)->job_params.is_ghost) 128#define tsk_mc_data(t) (tsk_rt(t)->mc_data)
126 129#define tsk_mc_crit(t) (tsk_mc_data(t)->mc_task.crit)
127 130
131/* need to do a short-circuit null check on mc_data before checking is_ghost */
132static inline int is_ghost(struct task_struct *t)
133{
134 struct mc_data *mc_data = tsk_mc_data(t);
135 return mc_data && mc_data->mc_job.is_ghost;
136}
128 137
129/* the cpus queue themselves according to priority in here */ 138/* the cpus queue themselves according to priority in here */
130static struct bheap_node mc_heap_node_c[NR_CPUS], mc_heap_node_d[NR_CPUS]; 139static struct bheap_node mc_heap_node_c[NR_CPUS], mc_heap_node_d[NR_CPUS];
@@ -156,8 +165,8 @@ static int mc_edf_higher_prio(struct task_struct* first, struct task_struct*
156 /*Only differs from normal EDF when two tasks of differing criticality 165 /*Only differs from normal EDF when two tasks of differing criticality
157 are compared.*/ 166 are compared.*/
158 if (first && second){ 167 if (first && second){
159 int first_crit = first->rt_param.task_params.crit; 168 enum crit_level first_crit = tsk_mc_crit(first);
160 int second_crit = second->rt_param.task_params.crit; 169 enum crit_level second_crit = tsk_mc_crit(second);
161 /*Lower criticality numbers are higher priority*/ 170 /*Lower criticality numbers are higher priority*/
162 if (first_crit < second_crit){ 171 if (first_crit < second_crit){
163 return 1; 172 return 1;
@@ -170,7 +179,7 @@ static int mc_edf_higher_prio(struct task_struct* first, struct task_struct*
170} 179}
171 180
172static int mc_edf_entry_higher_prio(cpu_entry_t* first, cpu_entry_t* second, 181static int mc_edf_entry_higher_prio(cpu_entry_t* first, cpu_entry_t* second,
173 int crit) 182 enum crit_level crit)
174{ 183{
175 struct task_struct *first_active, *second_active; 184 struct task_struct *first_active, *second_active;
176 first_active = first->linked; 185 first_active = first->linked;
@@ -188,7 +197,7 @@ static int mc_edf_entry_higher_prio(cpu_entry_t* first, cpu_entry_t* second,
188 * call only with irqs disabled and with ready_lock acquired 197 * call only with irqs disabled and with ready_lock acquired
189 * THIS DOES NOT TAKE NON-PREEMPTIVE SECTIONS INTO ACCOUNT! 198 * THIS DOES NOT TAKE NON-PREEMPTIVE SECTIONS INTO ACCOUNT!
190 */ 199 */
191static int mc_edf_preemption_needed(rt_domain_t* rt, int crit, 200static int mc_edf_preemption_needed(rt_domain_t* rt, enum crit_level crit,
192 cpu_entry_t* entry) 201 cpu_entry_t* entry)
193{ 202{
194 struct task_struct *active_task; 203 struct task_struct *active_task;
@@ -235,7 +244,7 @@ static void mc_edf_domain_init(rt_domain_t* rt, check_resched_needed_t resched,
235/* Return the domain of a task */ 244/* Return the domain of a task */
236static rt_domain_t* domain_of(struct task_struct* task) 245static rt_domain_t* domain_of(struct task_struct* task)
237{ 246{
238 switch (task->rt_param.task_params.crit) 247 switch (tsk_mc_crit(task))
239 { 248 {
240 case CRIT_LEVEL_A: 249 case CRIT_LEVEL_A:
241 return remote_a_queue(get_partition(task)); 250 return remote_a_queue(get_partition(task));
@@ -249,6 +258,7 @@ static rt_domain_t* domain_of(struct task_struct* task)
249 case CRIT_LEVEL_D: 258 case CRIT_LEVEL_D:
250 return &crit_d; 259 return &crit_d;
251 break; 260 break;
261 case NUM_CRIT_LEVELS:
252 default: 262 default:
253 /*Should never get here*/ 263 /*Should never get here*/
254 BUG(); 264 BUG();
@@ -347,18 +357,18 @@ static void update_ghost_time(struct task_struct *p)
347 delta = 0; 357 delta = 0;
348 TRACE_TASK(p, "WARNING: negative time delta.\n"); 358 TRACE_TASK(p, "WARNING: negative time delta.\n");
349 } 359 }
350 if (p->rt_param.job_params.ghost_budget <= delta) { 360 if (tsk_mc_data(p)->mc_job.ghost_budget <= delta) {
351 /*Currently will just set ghost budget to zero since 361 /*Currently will just set ghost budget to zero since
352 * task has already been queued. Could probably do 362 * task has already been queued. Could probably do
353 * more efficiently with significant reworking. 363 * more efficiently with significant reworking.
354 */ 364 */
355 TRACE_TASK(p, "Ghost job could have ended\n"); 365 TRACE_TASK(p, "Ghost job could have ended\n");
356 p->rt_param.job_params.ghost_budget = 0; 366 tsk_mc_data(p)->mc_job.ghost_budget = 0;
357 p->se.exec_start = clock; 367 p->se.exec_start = clock;
358 } 368 }
359 else{ 369 else{
360 TRACE_TASK(p, "Ghost jub updated, but didn't finish\n"); 370 TRACE_TASK(p, "Ghost jub updated, but didn't finish\n");
361 p->rt_param.job_params.ghost_budget -= delta; 371 tsk_mc_data(p)->mc_job.ghost_budget -= delta;
362 p->se.exec_start = clock; 372 p->se.exec_start = clock;
363 } 373 }
364} 374}
@@ -410,16 +420,15 @@ static noinline void link_task_to_cpu(struct task_struct* linked,
410 420
411 BUG_ON(linked && !is_realtime(linked)); 421 BUG_ON(linked && !is_realtime(linked));
412 BUG_ON(linked && is_realtime(linked) && 422 BUG_ON(linked && is_realtime(linked) &&
413 (linked->rt_param.task_params.crit < CRIT_LEVEL_C) && 423 (tsk_mc_crit(linked) < CRIT_LEVEL_C) &&
414 (linked->rt_param.task_params.cpu != entry->cpu)); 424 (tsk_rt(linked)->task_params.cpu != entry->cpu));
415 425
416 if (linked && is_ghost(linked)) { 426 if (linked && is_ghost(linked)) {
417 TRACE_TASK(linked, "Linking ghost job to CPU %d.\n", 427 TRACE_TASK(linked, "Linking ghost job to CPU %d.\n",
418 entry->cpu); 428 entry->cpu);
419 BUG_ON(entry->linked && 429 BUG_ON(entry->linked &&
420 entry->linked->rt_param.task_params.crit < 430 tsk_mc_crit(entry->linked) < tsk_mc_crit(linked));
421 linked->rt_param.task_params.crit); 431 tmp = entry->ghost_tasks[tsk_mc_crit(linked)];
422 tmp = entry->ghost_tasks[linked->rt_param.task_params.crit];
423 if (tmp) { 432 if (tmp) {
424 unlink(tmp); 433 unlink(tmp);
425 } 434 }
@@ -430,15 +439,14 @@ static noinline void link_task_to_cpu(struct task_struct* linked,
430 BUG_ON(linked->rt_param.linked_on != NO_CPU); 439 BUG_ON(linked->rt_param.linked_on != NO_CPU);
431 linked->rt_param.linked_on = entry->cpu; 440 linked->rt_param.linked_on = entry->cpu;
432 linked->se.exec_start = litmus_clock(); 441 linked->se.exec_start = litmus_clock();
433 entry->ghost_tasks[linked->rt_param.task_params.crit] = linked; 442 entry->ghost_tasks[tsk_mc_crit(linked)] = linked;
434 /* Set up the watchdog timer. */ 443 /* Set up the watchdog timer. */
435 timer = ghost_timer(entry->cpu, 444 timer = ghost_timer(entry->cpu, tsk_mc_crit(linked));
436 linked->rt_param.task_params.crit);
437 if (timer->task){ 445 if (timer->task){
438 cancel_watchdog_timer(timer); 446 cancel_watchdog_timer(timer);
439 } 447 }
440 when_to_fire = litmus_clock() + 448 when_to_fire = litmus_clock() +
441 linked->rt_param.job_params.ghost_budget; 449 tsk_mc_data(linked)->mc_job.ghost_budget;
442 timer->task = linked; 450 timer->task = linked;
443 __hrtimer_start_range_ns(&timer->timer, 451 __hrtimer_start_range_ns(&timer->timer,
444 ns_to_ktime(when_to_fire), 452 ns_to_ktime(when_to_fire),
@@ -474,19 +482,17 @@ static noinline void link_task_to_cpu(struct task_struct* linked,
474 * task is partitioned. 482 * task is partitioned.
475 */ 483 */
476 tmp = sched->linked; 484 tmp = sched->linked;
477 if (entry != sched && 485 if (entry != sched && tsk_mc_crit(linked) >
478 linked->rt_param.task_params.crit >
479 CRIT_LEVEL_B && 486 CRIT_LEVEL_B &&
480 (!tmp || tmp->rt_param.task_params.crit 487 (!tmp || tsk_mc_crit(tmp)
481 > CRIT_LEVEL_B)) { 488 > CRIT_LEVEL_B)) {
482 TRACE_TASK(linked, 489 TRACE_TASK(linked,
483 "already scheduled on %d, updating link.\n", 490 "already scheduled on %d, updating link.\n",
484 sched->cpu); 491 sched->cpu);
485 linked->rt_param.linked_on = sched->cpu; 492 linked->rt_param.linked_on = sched->cpu;
486 sched->linked = linked; 493 sched->linked = linked;
487 for (i = linked-> 494 for (i = tsk_mc_crit(linked);
488 rt_param.task_params.crit; 495 i < NUM_CRIT_LEVELS; i++) {
489 i < CRIT_LEVEL_D + 1; i++) {
490 if (sched->ghost_tasks[i]){ 496 if (sched->ghost_tasks[i]){
491 unlink(sched-> 497 unlink(sched->
492 ghost_tasks[i]); 498 ghost_tasks[i]);
@@ -498,8 +504,8 @@ static noinline void link_task_to_cpu(struct task_struct* linked,
498 } 504 }
499 if (linked) { /* might be NULL due to swap */ 505 if (linked) { /* might be NULL due to swap */
500 linked->rt_param.linked_on = entry->cpu; 506 linked->rt_param.linked_on = entry->cpu;
501 for (i = linked->rt_param.task_params.crit; 507 for (i = tsk_mc_crit(linked);
502 i < CRIT_LEVEL_D + 1; i++){ 508 i < NUM_CRIT_LEVELS; i++){
503 if (entry->ghost_tasks[i]){ 509 if (entry->ghost_tasks[i]){
504 unlink(entry->ghost_tasks[i]); 510 unlink(entry->ghost_tasks[i]);
505 /* WARNING: it is up to the 511 /* WARNING: it is up to the
@@ -547,23 +553,20 @@ static noinline void unlink(struct task_struct* t)
547 * It may be unset if we are called as a result of 553 * It may be unset if we are called as a result of
548 * the watchdog timer triggering. 554 * the watchdog timer triggering.
549 */ 555 */
550 timer = ghost_timer(cpu, 556 timer = ghost_timer(cpu, tsk_mc_crit(t));
551 t->rt_param.task_params.crit);
552 if (timer->task) { 557 if (timer->task) {
553 /* Should already be watching task.*/ 558 /* Should already be watching task.*/
554 BUG_ON(timer->task != t); 559 BUG_ON(timer->task != t);
555 cancel_watchdog_timer(timer); 560 cancel_watchdog_timer(timer);
556 } 561 }
557 if (t->rt_param.job_params.ghost_budget > 0){ 562 if (tsk_mc_data(t)->mc_job.ghost_budget > 0) {
558 /* Job isn't finished, so do accounting. */ 563 /* Job isn't finished, so do accounting. */
559 update_ghost_time(t); 564 update_ghost_time(t);
560 /* Just remove from CPU, even in the rare case 565 /* Just remove from CPU, even in the rare case
561 * of zero time left - it will be scheduled 566 * of zero time left - it will be scheduled
562 * with an immediate timer fire. 567 * with an immediate timer fire.
563 */ 568 */
564 entry->ghost_tasks[ 569 entry->ghost_tasks[tsk_mc_crit(t)] = NULL;
565 t->rt_param.task_params.crit]
566 = NULL;
567 /*TODO: maybe make more efficient by 570 /*TODO: maybe make more efficient by
568 * only updating on C/D completion? 571 * only updating on C/D completion?
569 */ 572 */
@@ -571,8 +574,7 @@ static noinline void unlink(struct task_struct* t)
571 } 574 }
572 else{ 575 else{
573 /* Job finished, so just remove */ 576 /* Job finished, so just remove */
574 entry->ghost_tasks[ 577 entry->ghost_tasks[tsk_mc_crit(t)] = NULL;
575 t->rt_param.task_params.crit] = NULL;
576 update_cpu_position(entry); 578 update_cpu_position(entry);
577 } 579 }
578 } 580 }
@@ -621,7 +623,8 @@ static noinline void requeue(struct task_struct* task)
621 } 623 }
622} 624}
623 625
624static void prepare_preemption(rt_domain_t *dom, cpu_entry_t *cpu, int crit) { 626static void prepare_preemption(rt_domain_t *dom, cpu_entry_t *cpu,
627 enum crit_level crit) {
625 struct task_struct* task; 628 struct task_struct* task;
626 int i; 629 int i;
627 task = __take_ready(dom); 630 task = __take_ready(dom);
@@ -629,8 +632,7 @@ static void prepare_preemption(rt_domain_t *dom, cpu_entry_t *cpu, int crit) {
629 task->pid, cpu->cpu); 632 task->pid, cpu->cpu);
630 if (is_ghost(task)){ 633 if (is_ghost(task)){
631 /* Changing ghost task only affects linked task at our level */ 634 /* Changing ghost task only affects linked task at our level */
632 if (cpu->linked && cpu->linked->rt_param.task_params.crit == 635 if (cpu->linked && tsk_mc_crit(cpu->linked) == crit)
633 crit)
634 requeue(cpu->linked); 636 requeue(cpu->linked);
635 /* Can change ghost task at our level as well. */ 637 /* Can change ghost task at our level as well. */
636 if (cpu->ghost_tasks[crit]) 638 if (cpu->ghost_tasks[crit])
@@ -642,7 +644,7 @@ static void prepare_preemption(rt_domain_t *dom, cpu_entry_t *cpu, int crit) {
642 */ 644 */
643 if (cpu->linked) 645 if (cpu->linked)
644 requeue(cpu->linked); 646 requeue(cpu->linked);
645 for (i = crit; i <= CRIT_LEVEL_D; i++) { 647 for (i = crit; i < NUM_CRIT_LEVELS; i++) {
646 if (cpu->ghost_tasks[i]) 648 if (cpu->ghost_tasks[i])
647 requeue(cpu->ghost_tasks[i]); 649 requeue(cpu->ghost_tasks[i]);
648 } 650 }
@@ -691,22 +693,24 @@ static void check_for_b_preemption(rt_domain_t *dom, cpu_entry_t *cpu) {
691 */ 693 */
692static noinline void mc_job_arrival(struct task_struct* task) 694static noinline void mc_job_arrival(struct task_struct* task)
693{ 695{
696 enum crit_level task_crit_level;
694 BUG_ON(!task); 697 BUG_ON(!task);
695 698
696 TRACE("mc_job_arrival triggered\n"); 699 TRACE("mc_job_arrival triggered\n");
700 task_crit_level = tsk_mc_crit(task);
697 requeue(task); 701 requeue(task);
698 if (task->rt_param.task_params.crit == CRIT_LEVEL_A){ 702 if (task_crit_level == CRIT_LEVEL_A){
699 check_for_a_preemption(remote_a_queue(get_partition(task)), 703 check_for_a_preemption(remote_a_queue(get_partition(task)),
700 remote_cpu_entry(get_partition(task))); 704 remote_cpu_entry(get_partition(task)));
701 } 705 }
702 else if (task->rt_param.task_params.crit == CRIT_LEVEL_B){ 706 else if (task_crit_level == CRIT_LEVEL_B){
703 check_for_a_preemption(remote_b_queue(get_partition(task)), 707 check_for_a_preemption(remote_b_queue(get_partition(task)),
704 remote_cpu_entry(get_partition(task))); 708 remote_cpu_entry(get_partition(task)));
705 } 709 }
706 else if (task->rt_param.task_params.crit == CRIT_LEVEL_C){ 710 else if (task_crit_level == CRIT_LEVEL_C){
707 check_for_c_preemptions(&crit_c); 711 check_for_c_preemptions(&crit_c);
708 } 712 }
709 else if (task->rt_param.task_params.crit == CRIT_LEVEL_D){ 713 else if (task_crit_level == CRIT_LEVEL_D){
710 check_for_d_preemptions(&crit_d); 714 check_for_d_preemptions(&crit_d);
711 } 715 }
712} 716}
@@ -767,8 +771,9 @@ static noinline void job_completion(struct task_struct *t, int forced)
767 cpu = remote_cpu_entry(t->rt_param.scheduled_on); 771 cpu = remote_cpu_entry(t->rt_param.scheduled_on);
768 /*Unlink first while it's not a ghost job.*/ 772 /*Unlink first while it's not a ghost job.*/
769 unlink(t); 773 unlink(t);
770 t->rt_param.job_params.ghost_budget = budget_remaining(t); 774 tsk_mc_data(t)->mc_job.ghost_budget = budget_remaining(t);
771 t->rt_param.job_params.is_ghost = 1; 775 tsk_mc_data(t)->mc_job.is_ghost = 1;
776
772 /* If we did just convert the job to ghost, we can safely 777 /* If we did just convert the job to ghost, we can safely
773 * reschedule it and then let schedule() determine a new 778 * reschedule it and then let schedule() determine a new
774 * job to run in the slack. 779 * job to run in the slack.
@@ -779,7 +784,7 @@ static noinline void job_completion(struct task_struct *t, int forced)
779 * If it doesn't need to, it will fall through and be handled 784 * If it doesn't need to, it will fall through and be handled
780 * properly as well. 785 * properly as well.
781 */ 786 */
782 if (t->rt_param.job_params.ghost_budget > 0){ 787 if (tsk_mc_data(t)->mc_job.ghost_budget > 0) {
783 link_task_to_cpu(t, cpu); 788 link_task_to_cpu(t, cpu);
784 preempt(cpu); 789 preempt(cpu);
785 return; 790 return;
@@ -788,11 +793,13 @@ static noinline void job_completion(struct task_struct *t, int forced)
788 /* prepare for next period - we either just became ghost but with no 793 /* prepare for next period - we either just became ghost but with no
789 * budget left, or we were already ghost and the ghost job expired*/ 794 * budget left, or we were already ghost and the ghost job expired*/
790 if (is_ghost(t)) { 795 if (is_ghost(t)) {
791 t->rt_param.job_params.ghost_budget = 0; 796 tsk_mc_data(t)->mc_job.ghost_budget = 0;
792 /*Need to unlink here so prepare_for_next_period doesn't try 797 /*Need to unlink here so prepare_for_next_period doesn't try
793 * to unlink us 798 * to unlink us
794 */ 799 */
795 unlink(t); 800 unlink(t);
801 tsk_mc_data(t)->mc_job.is_ghost = 0;
802 tsk_mc_data(t)->mc_job.ghost_budget = 0;
796 prepare_for_next_period(t); 803 prepare_for_next_period(t);
797 } 804 }
798 if (is_released(t, litmus_clock())) 805 if (is_released(t, litmus_clock()))
@@ -821,7 +828,7 @@ static enum hrtimer_restart watchdog_timeout(struct hrtimer *timer)
821 * we have an active timer. 828 * we have an active timer.
822 */ 829 */
823 wt->task = NULL; 830 wt->task = NULL;
824 task->rt_param.job_params.ghost_budget = 0; 831 tsk_mc_data(task)->mc_job.ghost_budget = 0;
825 job_completion(task, 0); 832 job_completion(task, 0);
826 TRACE_TASK(task, "Watchdog timeout\n"); 833 TRACE_TASK(task, "Watchdog timeout\n");
827 raw_spin_unlock_irqrestore(&global_lock, flags); 834 raw_spin_unlock_irqrestore(&global_lock, flags);
@@ -900,7 +907,8 @@ static struct task_struct* mc_schedule(struct task_struct * prev)
900 int out_of_time, sleep, preempt, np, exists, blocks; 907 int out_of_time, sleep, preempt, np, exists, blocks;
901 struct task_struct* next = NULL; 908 struct task_struct* next = NULL;
902 struct task_struct* ready_task = NULL; 909 struct task_struct* ready_task = NULL;
903 int ready_crit, i; 910 enum crit_level ready_crit;
911 int i;
904 912
905#ifdef CONFIG_RELEASE_MASTER 913#ifdef CONFIG_RELEASE_MASTER
906 /* Bail out early if we are the release master. 914 /* Bail out early if we are the release master.
@@ -1001,9 +1009,10 @@ static struct task_struct* mc_schedule(struct task_struct * prev)
1001 } 1009 }
1002 } 1010 }
1003 if (!ready_task) { 1011 if (!ready_task) {
1004 ready_crit = CRIT_LEVEL_D + 1; 1012 /* set to something invalid? */
1013 ready_crit = NUM_CRIT_LEVELS;
1005 } 1014 }
1006 for (i = ready_crit; i <= CRIT_LEVEL_D; i++) { 1015 for (i = ready_crit; i < NUM_CRIT_LEVELS; i++) {
1007 if (entry->ghost_tasks[i]) 1016 if (entry->ghost_tasks[i])
1008 requeue(entry->ghost_tasks[i]); 1017 requeue(entry->ghost_tasks[i]);
1009 } 1018 }
@@ -1083,8 +1092,8 @@ static void mc_task_new(struct task_struct * t, int on_rq, int running)
1083 1092
1084 /* setup job params */ 1093 /* setup job params */
1085 release_at(t, litmus_clock()); 1094 release_at(t, litmus_clock());
1086 t->rt_param.job_params.ghost_budget = 0; 1095 tsk_mc_data(t)->mc_job.ghost_budget = 0;
1087 t->rt_param.job_params.is_ghost = 0; 1096 tsk_mc_data(t)->mc_job.is_ghost = 0;
1088 1097
1089 if (running) { 1098 if (running) {
1090 entry = &per_cpu(mc_cpu_entries, task_cpu(t)); 1099 entry = &per_cpu(mc_cpu_entries, task_cpu(t));
@@ -1192,6 +1201,14 @@ static void mc_task_exit(struct task_struct * t)
1192 1201
1193static long mc_admit_task(struct task_struct* tsk) 1202static long mc_admit_task(struct task_struct* tsk)
1194{ 1203{
1204 if (!tsk_mc_data(tsk))
1205 {
1206 printk(KERN_WARNING "tried to admit task with no criticality "
1207 "level\n");
1208 return -EINVAL;
1209 }
1210 printk(KERN_INFO "admitted task with criticality level %d\n",
1211 tsk_mc_crit(tsk));
1195 return 0; 1212 return 0;
1196} 1213}
1197 1214
@@ -1263,7 +1280,7 @@ static int __init init_mc(void)
1263 entry->hn_d = &mc_heap_node_d[cpu]; 1280 entry->hn_d = &mc_heap_node_d[cpu];
1264 bheap_node_init(&entry->hn_c, entry); 1281 bheap_node_init(&entry->hn_c, entry);
1265 bheap_node_init(&entry->hn_d, entry); 1282 bheap_node_init(&entry->hn_d, entry);
1266 for (i = CRIT_LEVEL_A; i <= CRIT_LEVEL_D; i++){ 1283 for (i = CRIT_LEVEL_A; i < NUM_CRIT_LEVELS; i++){
1267 timer = ghost_timer(cpu, i); 1284 timer = ghost_timer(cpu, i);
1268 hrtimer_init(&timer->timer, CLOCK_MONOTONIC, 1285 hrtimer_init(&timer->timer, CLOCK_MONOTONIC,
1269 HRTIMER_MODE_ABS); 1286 HRTIMER_MODE_ABS);