aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJonathan Herman <hermanjl@cs.unc.edu>2011-08-27 21:36:27 -0400
committerJonathan Herman <hermanjl@cs.unc.edu>2011-08-27 21:36:27 -0400
commitae6471dbdd62031588a2ab08d887fcf008c853ab (patch)
tree1780bcf5485009e6fe46b3bec1d866b192eb916e
parent701471774055cda630817fb53ca7901d143a5dfa (diff)
parent1c5cda5df118735a0e84fd3277d933f58ea814c8 (diff)
merge new struct refactor
-rw-r--r--arch/x86/kernel/syscall_table_32.S1
-rw-r--r--include/litmus/rt_param.h20
-rw-r--r--include/litmus/sched_mc.h36
-rw-r--r--include/litmus/unistd_32.h3
-rw-r--r--include/litmus/unistd_64.h4
-rw-r--r--litmus/Kconfig9
-rw-r--r--litmus/Makefile4
-rw-r--r--litmus/jobs.c3
-rw-r--r--litmus/litmus.c83
-rw-r--r--litmus/sched_mc.c133
10 files changed, 218 insertions, 78 deletions
diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S
index 37702905f658..57d5b3e1c1a6 100644
--- a/arch/x86/kernel/syscall_table_32.S
+++ b/arch/x86/kernel/syscall_table_32.S
@@ -352,3 +352,4 @@ ENTRY(sys_call_table)
352 .long sys_wait_for_ts_release 352 .long sys_wait_for_ts_release
353 .long sys_release_ts 353 .long sys_release_ts
354 .long sys_null_call 354 .long sys_null_call
355 .long sys_set_rt_task_mc_param
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h
index 3a456e7135d8..4ded23d658d0 100644
--- a/include/litmus/rt_param.h
+++ b/include/litmus/rt_param.h
@@ -27,14 +27,6 @@ typedef enum {
27 RT_CLASS_BEST_EFFORT 27 RT_CLASS_BEST_EFFORT
28} task_class_t; 28} task_class_t;
29 29
30/* criticality levels */
31typedef enum {
32 CRIT_LEVEL_A,
33 CRIT_LEVEL_B,
34 CRIT_LEVEL_C,
35 CRIT_LEVEL_D,
36} crit_level_t;
37
38typedef enum { 30typedef enum {
39 NO_ENFORCEMENT, /* job may overrun unhindered */ 31 NO_ENFORCEMENT, /* job may overrun unhindered */
40 QUANTUM_ENFORCEMENT, /* budgets are only checked on quantum boundaries */ 32 QUANTUM_ENFORCEMENT, /* budgets are only checked on quantum boundaries */
@@ -48,7 +40,6 @@ struct rt_task {
48 unsigned int cpu; 40 unsigned int cpu;
49 task_class_t cls; 41 task_class_t cls;
50 budget_policy_t budget_policy; /* ignored by pfair */ 42 budget_policy_t budget_policy; /* ignored by pfair */
51 crit_level_t crit;
52}; 43};
53 44
54/* The definition of the data that is shared between the kernel and real-time 45/* The definition of the data that is shared between the kernel and real-time
@@ -99,12 +90,12 @@ struct rt_job {
99 * Increase this sequence number when a job is released. 90 * Increase this sequence number when a job is released.
100 */ 91 */
101 unsigned int job_no; 92 unsigned int job_no;
102
103 lt_t ghost_budget;
104 int is_ghost;
105}; 93};
106 94
107struct pfair_param; 95struct pfair_param;
96#ifdef CONFIG_PLUGIN_MC
97struct mc_data;
98#endif
108 99
109/* RT task parameters for scheduling extensions 100/* RT task parameters for scheduling extensions
110 * These parameters are inherited during clone and therefore must 101 * These parameters are inherited during clone and therefore must
@@ -127,6 +118,11 @@ struct rt_param {
127 lt_t boost_start_time; 118 lt_t boost_start_time;
128#endif 119#endif
129 120
121#ifdef CONFIG_PLUGIN_MC
122 /* mixed criticality specific data */
123 struct mc_data *mc_data;
124#endif
125
130 /* user controlled parameters */ 126 /* user controlled parameters */
131 struct rt_task task_params; 127 struct rt_task task_params;
132 128
diff --git a/include/litmus/sched_mc.h b/include/litmus/sched_mc.h
new file mode 100644
index 000000000000..941a9f4470cc
--- /dev/null
+++ b/include/litmus/sched_mc.h
@@ -0,0 +1,36 @@
1#ifndef _LINUX_SCHED_MC_H_
2#define _LINUX_SCHED_MC_H_
3
4#include <litmus/rt_param.h>
5
6/* criticality levels */
7enum crit_level {
8 /* probably don't need to assign these (paranoid) */
9 CRIT_LEVEL_A = 0,
10 CRIT_LEVEL_B = 1,
11 CRIT_LEVEL_C = 2,
12 CRIT_LEVEL_D = 3,
13 NUM_CRIT_LEVELS = 4,
14};
15
16
17struct mc_task {
18 enum crit_level crit;
19};
20
21struct mc_job {
22 int is_ghost:1;
23 lt_t ghost_budget;
24};
25
26#ifdef __KERNEL__
27/* only used in the kernel (no user space) */
28
29struct mc_data {
30 struct mc_task mc_task;
31 struct mc_job mc_job;
32};
33
34#endif /* __KERNEL__ */
35
36#endif
diff --git a/include/litmus/unistd_32.h b/include/litmus/unistd_32.h
index 94264c27d9ac..71be3cd8d469 100644
--- a/include/litmus/unistd_32.h
+++ b/include/litmus/unistd_32.h
@@ -17,5 +17,6 @@
17#define __NR_wait_for_ts_release __LSC(9) 17#define __NR_wait_for_ts_release __LSC(9)
18#define __NR_release_ts __LSC(10) 18#define __NR_release_ts __LSC(10)
19#define __NR_null_call __LSC(11) 19#define __NR_null_call __LSC(11)
20#define __NR_set_rt_task_mc_param __LSC(12)
20 21
21#define NR_litmus_syscalls 12 22#define NR_litmus_syscalls 13
diff --git a/include/litmus/unistd_64.h b/include/litmus/unistd_64.h
index d5ced0d2642c..95cb74495104 100644
--- a/include/litmus/unistd_64.h
+++ b/include/litmus/unistd_64.h
@@ -29,5 +29,7 @@ __SYSCALL(__NR_wait_for_ts_release, sys_wait_for_ts_release)
29__SYSCALL(__NR_release_ts, sys_release_ts) 29__SYSCALL(__NR_release_ts, sys_release_ts)
30#define __NR_null_call __LSC(11) 30#define __NR_null_call __LSC(11)
31__SYSCALL(__NR_null_call, sys_null_call) 31__SYSCALL(__NR_null_call, sys_null_call)
32#define __NR_set_rt_task_mc_param __LSC(12)
33__SYSCALL(__NR_set_rt_task_mc_param, sys_set_rt_task_mc_param)
32 34
33#define NR_litmus_syscalls 12 35#define NR_litmus_syscalls 13
diff --git a/litmus/Kconfig b/litmus/Kconfig
index ad8dc8308cf0..9a1cc2436580 100644
--- a/litmus/Kconfig
+++ b/litmus/Kconfig
@@ -23,6 +23,15 @@ config PLUGIN_PFAIR
23 23
24 If unsure, say Yes. 24 If unsure, say Yes.
25 25
26config PLUGIN_MC
27 bool "Mixed Criticality Scheduler"
28 depends on X86 && SYSFS
29 default y
30 help
31 Included the mixed criticality scheduler.
32
33 If unsure, say Yes.
34
26config RELEASE_MASTER 35config RELEASE_MASTER
27 bool "Release-master Support" 36 bool "Release-master Support"
28 depends on ARCH_HAS_SEND_PULL_TIMERS 37 depends on ARCH_HAS_SEND_PULL_TIMERS
diff --git a/litmus/Makefile b/litmus/Makefile
index d2bcad53c882..782022be6f28 100644
--- a/litmus/Makefile
+++ b/litmus/Makefile
@@ -17,11 +17,11 @@ obj-y = sched_plugin.o litmus.o \
17 bheap.o \ 17 bheap.o \
18 ctrldev.o \ 18 ctrldev.o \
19 sched_gsn_edf.o \ 19 sched_gsn_edf.o \
20 sched_psn_edf.o \ 20 sched_psn_edf.o
21 sched_mc.o
22 21
23obj-$(CONFIG_PLUGIN_CEDF) += sched_cedf.o 22obj-$(CONFIG_PLUGIN_CEDF) += sched_cedf.o
24obj-$(CONFIG_PLUGIN_PFAIR) += sched_pfair.o 23obj-$(CONFIG_PLUGIN_PFAIR) += sched_pfair.o
24obj-$(CONFIG_PLUGIN_MC) += sched_mc.o
25 25
26obj-$(CONFIG_FEATHER_TRACE) += ft_event.o ftdev.o 26obj-$(CONFIG_FEATHER_TRACE) += ft_event.o ftdev.o
27obj-$(CONFIG_SCHED_TASK_TRACE) += sched_task_trace.o 27obj-$(CONFIG_SCHED_TASK_TRACE) += sched_task_trace.o
diff --git a/litmus/jobs.c b/litmus/jobs.c
index 99b0bd9858f2..36e314625d86 100644
--- a/litmus/jobs.c
+++ b/litmus/jobs.c
@@ -13,9 +13,6 @@ void prepare_for_next_period(struct task_struct *t)
13 t->rt_param.job_params.release = t->rt_param.job_params.deadline; 13 t->rt_param.job_params.release = t->rt_param.job_params.deadline;
14 t->rt_param.job_params.deadline += get_rt_period(t); 14 t->rt_param.job_params.deadline += get_rt_period(t);
15 t->rt_param.job_params.exec_time = 0; 15 t->rt_param.job_params.exec_time = 0;
16 /* mixed criticality stuff*/
17 t->rt_param.job_params.is_ghost = 0;
18 t->rt_param.job_params.ghost_budget = 0;
19 /* update job sequence number */ 16 /* update job sequence number */
20 t->rt_param.job_params.job_no++; 17 t->rt_param.job_params.job_no++;
21 18
diff --git a/litmus/litmus.c b/litmus/litmus.c
index 11ccaafd50de..16b3aeda5615 100644
--- a/litmus/litmus.c
+++ b/litmus/litmus.c
@@ -17,6 +17,12 @@
17#include <litmus/litmus_proc.h> 17#include <litmus/litmus_proc.h>
18#include <litmus/sched_trace.h> 18#include <litmus/sched_trace.h>
19 19
20#ifdef CONFIG_PLUGIN_MC
21#include <litmus/sched_mc.h>
22#else
23struct mc_task;
24#endif
25
20/* Number of RT tasks that exist in the system */ 26/* Number of RT tasks that exist in the system */
21atomic_t rt_task_count = ATOMIC_INIT(0); 27atomic_t rt_task_count = ATOMIC_INIT(0);
22static DEFINE_RAW_SPINLOCK(task_transition_lock); 28static DEFINE_RAW_SPINLOCK(task_transition_lock);
@@ -274,6 +280,74 @@ asmlinkage long sys_null_call(cycles_t __user *ts)
274 return ret; 280 return ret;
275} 281}
276 282
283#ifdef CONFIG_PLUGIN_MC
284asmlinkage long sys_set_rt_task_mc_param(pid_t pid, struct mc_task __user *param)
285{
286 struct mc_task mc;
287 struct mc_data *mc_data;
288 struct task_struct *target;
289 int retval = -EINVAL;
290
291 printk("Setting up mixed-criicality task parameters for process %d.\n",
292 pid);
293
294 if (pid < 0 || param == 0) {
295 goto out;
296 }
297 if (copy_from_user(&mc, param, sizeof(mc))) {
298 retval = -EFAULT;
299 goto out;
300 }
301
302 /* Task search and manipulation must be protected */
303 read_lock_irq(&tasklist_lock);
304 if (!(target = find_task_by_vpid(pid))) {
305 retval = -ESRCH;
306 goto out_unlock;
307 }
308
309 if (is_realtime(target)) {
310 /* The task is already a real-time task.
311 * We cannot not allow parameter changes at this point.
312 */
313 retval = -EBUSY;
314 goto out_unlock;
315 }
316
317 if (mc.crit < CRIT_LEVEL_A || mc.crit > CRIT_LEVEL_D)
318 {
319 printk(KERN_WARNING "litmus: real-time task %d rejected because "
320 "of invalid criticality level\n", pid);
321 goto out_unlock;
322 }
323
324 mc_data = tsk_rt(target)->mc_data;
325 if (!mc_data)
326 {
327 mc_data = kmalloc(sizeof(*mc_data), GFP_ATOMIC);
328 if (!mc_data)
329 {
330 retval = -ENOMEM;
331 goto out_unlock;
332 }
333 tsk_rt(target)->mc_data = mc_data;
334 }
335 mc_data->mc_task.crit = mc.crit;
336
337 retval = 0;
338out_unlock:
339 read_unlock_irq(&tasklist_lock);
340out:
341 return retval;
342}
343#else
344asmlinkage long sys_set_rt_task_mc_param(pid_t pid, struct mc_task __user *param)
345{
346 /* don't allow this syscall if the plugin is not enabled */
347 return -EINVAL;
348}
349#endif
350
277/* p is a real-time task. Re-init its state as a best-effort task. */ 351/* p is a real-time task. Re-init its state as a best-effort task. */
278static void reinit_litmus_state(struct task_struct* p, int restore) 352static void reinit_litmus_state(struct task_struct* p, int restore)
279{ 353{
@@ -479,6 +553,15 @@ void exit_litmus(struct task_struct *dead_tsk)
479 free_page((unsigned long) tsk_rt(dead_tsk)->ctrl_page); 553 free_page((unsigned long) tsk_rt(dead_tsk)->ctrl_page);
480 } 554 }
481 555
556#ifdef CONFIG_PLUGIN_MC
557 /* The MC-setup syscall might succeed and allocate mc_data, but the
558 task may not exit in real-time mode, and that memory will leak.
559 Check and free it here.
560 */
561 if (tsk_rt(dead_tsk)->mc_data)
562 kfree(tsk_rt(dead_tsk)->mc_data);
563#endif
564
482 /* main cleanup only for RT tasks */ 565 /* main cleanup only for RT tasks */
483 if (is_realtime(dead_tsk)) 566 if (is_realtime(dead_tsk))
484 litmus_exit_task(dead_tsk); 567 litmus_exit_task(dead_tsk);
diff --git a/litmus/sched_mc.c b/litmus/sched_mc.c
index 85a4e6342667..3b4e1caa9edb 100644
--- a/litmus/sched_mc.c
+++ b/litmus/sched_mc.c
@@ -14,6 +14,7 @@
14#include <linux/percpu.h> 14#include <linux/percpu.h>
15#include <linux/sched.h> 15#include <linux/sched.h>
16#include <linux/hrtimer.h> 16#include <linux/hrtimer.h>
17#include <linux/slab.h>
17 18
18#include <litmus/litmus.h> 19#include <litmus/litmus.h>
19#include <litmus/jobs.h> 20#include <litmus/jobs.h>
@@ -25,6 +26,8 @@
25 26
26#include <linux/module.h> 27#include <linux/module.h>
27 28
29#include <litmus/sched_mc.h>
30
28/* Overview of MC operations. 31/* Overview of MC operations.
29 * 32 *
30 * link_task_to_cpu(T, cpu) - Low-level operation to update the linkage 33 * link_task_to_cpu(T, cpu) - Low-level operation to update the linkage
@@ -97,7 +100,7 @@ typedef struct {
97 atomic_t will_schedule; /* prevent unneeded IPIs */ 100 atomic_t will_schedule; /* prevent unneeded IPIs */
98 struct bheap_node* hn_c; 101 struct bheap_node* hn_c;
99 struct bheap_node* hn_d; 102 struct bheap_node* hn_d;
100 struct task_struct* ghost_tasks[CRIT_LEVEL_D+1]; 103 struct task_struct* ghost_tasks[NUM_CRIT_LEVELS];
101} cpu_entry_t; 104} cpu_entry_t;
102 105
103/*This code is heavily based on Bjoern's budget enforcement code. */ 106/*This code is heavily based on Bjoern's budget enforcement code. */
@@ -107,7 +110,7 @@ struct watchdog_timer {
107 struct task_struct* task; 110 struct task_struct* task;
108}; 111};
109 112
110DEFINE_PER_CPU(struct watchdog_timer[CRIT_LEVEL_D+1], ghost_timers); 113DEFINE_PER_CPU(struct watchdog_timer[NUM_CRIT_LEVELS], ghost_timers);
111#define ghost_timer(cpu, crit) (&(per_cpu(ghost_timers, cpu)[crit])) 114#define ghost_timer(cpu, crit) (&(per_cpu(ghost_timers, cpu)[crit]))
112 115
113DEFINE_PER_CPU(cpu_entry_t, mc_cpu_entries); 116DEFINE_PER_CPU(cpu_entry_t, mc_cpu_entries);
@@ -122,8 +125,15 @@ cpu_entry_t* mc_cpus[NR_CPUS];
122 (atomic_read(&per_cpu(mc_cpu_entries, cpu).will_schedule)) 125 (atomic_read(&per_cpu(mc_cpu_entries, cpu).will_schedule))
123#define remote_cpu_entry(cpu) (&per_cpu(mc_cpu_entries, cpu)) 126#define remote_cpu_entry(cpu) (&per_cpu(mc_cpu_entries, cpu))
124 127
125#define is_ghost(t) (tsk_rt(t)->job_params.is_ghost) 128#define tsk_mc_data(t) (tsk_rt(t)->mc_data)
129#define tsk_mc_crit(t) (tsk_mc_data(t)->mc_task.crit)
126 130
131/* need to do a short-circuit null check on mc_data before checking is_ghost */
132static inline int is_ghost(struct task_struct *t)
133{
134 struct mc_data *mc_data = tsk_mc_data(t);
135 return mc_data && mc_data->mc_job.is_ghost;
136}
127 137
128/* the cpus queue themselves according to priority in here */ 138/* the cpus queue themselves according to priority in here */
129static struct bheap_node mc_heap_node_c[NR_CPUS], mc_heap_node_d[NR_CPUS]; 139static struct bheap_node mc_heap_node_c[NR_CPUS], mc_heap_node_d[NR_CPUS];
@@ -155,8 +165,8 @@ static int mc_edf_higher_prio(struct task_struct* first, struct task_struct*
155 /*Only differs from normal EDF when two tasks of differing criticality 165 /*Only differs from normal EDF when two tasks of differing criticality
156 are compared.*/ 166 are compared.*/
157 if (first && second){ 167 if (first && second){
158 int first_crit = first->rt_param.task_params.crit; 168 enum crit_level first_crit = tsk_mc_crit(first);
159 int second_crit = second->rt_param.task_params.crit; 169 enum crit_level second_crit = tsk_mc_crit(second);
160 /*Lower criticality numbers are higher priority*/ 170 /*Lower criticality numbers are higher priority*/
161 if (first_crit < second_crit){ 171 if (first_crit < second_crit){
162 return 1; 172 return 1;
@@ -169,7 +179,7 @@ static int mc_edf_higher_prio(struct task_struct* first, struct task_struct*
169} 179}
170 180
171static int mc_edf_entry_higher_prio(cpu_entry_t* first, cpu_entry_t* second, 181static int mc_edf_entry_higher_prio(cpu_entry_t* first, cpu_entry_t* second,
172 int crit) 182 enum crit_level crit)
173{ 183{
174 struct task_struct *first_active, *second_active; 184 struct task_struct *first_active, *second_active;
175 first_active = first->linked; 185 first_active = first->linked;
@@ -187,7 +197,7 @@ static int mc_edf_entry_higher_prio(cpu_entry_t* first, cpu_entry_t* second,
187 * call only with irqs disabled and with ready_lock acquired 197 * call only with irqs disabled and with ready_lock acquired
188 * THIS DOES NOT TAKE NON-PREEMPTIVE SECTIONS INTO ACCOUNT! 198 * THIS DOES NOT TAKE NON-PREEMPTIVE SECTIONS INTO ACCOUNT!
189 */ 199 */
190static int mc_edf_preemption_needed(rt_domain_t* rt, int crit, 200static int mc_edf_preemption_needed(rt_domain_t* rt, enum crit_level crit,
191 cpu_entry_t* entry) 201 cpu_entry_t* entry)
192{ 202{
193 struct task_struct *active_task; 203 struct task_struct *active_task;
@@ -234,7 +244,7 @@ static void mc_edf_domain_init(rt_domain_t* rt, check_resched_needed_t resched,
234/* Return the domain of a task */ 244/* Return the domain of a task */
235static rt_domain_t* domain_of(struct task_struct* task) 245static rt_domain_t* domain_of(struct task_struct* task)
236{ 246{
237 switch (task->rt_param.task_params.crit) 247 switch (tsk_mc_crit(task))
238 { 248 {
239 case CRIT_LEVEL_A: 249 case CRIT_LEVEL_A:
240 return remote_a_queue(get_partition(task)); 250 return remote_a_queue(get_partition(task));
@@ -248,6 +258,7 @@ static rt_domain_t* domain_of(struct task_struct* task)
248 case CRIT_LEVEL_D: 258 case CRIT_LEVEL_D:
249 return &crit_d; 259 return &crit_d;
250 break; 260 break;
261 case NUM_CRIT_LEVELS:
251 default: 262 default:
252 /*Should never get here*/ 263 /*Should never get here*/
253 BUG(); 264 BUG();
@@ -343,18 +354,18 @@ static void update_ghost_time(struct task_struct *p)
343 delta = 0; 354 delta = 0;
344 TRACE_TASK(p, "WARNING: negative time delta.\n"); 355 TRACE_TASK(p, "WARNING: negative time delta.\n");
345 } 356 }
346 if (p->rt_param.job_params.ghost_budget <= delta) { 357 if (tsk_mc_data(p)->mc_job.ghost_budget <= delta) {
347 /*Currently will just set ghost budget to zero since 358 /*Currently will just set ghost budget to zero since
348 * task has already been queued. Could probably do 359 * task has already been queued. Could probably do
349 * more efficiently with significant reworking. 360 * more efficiently with significant reworking.
350 */ 361 */
351 TRACE_TASK(p, "Ghost job could have ended\n"); 362 TRACE_TASK(p, "Ghost job could have ended\n");
352 p->rt_param.job_params.ghost_budget = 0; 363 tsk_mc_data(p)->mc_job.ghost_budget = 0;
353 p->se.exec_start = clock; 364 p->se.exec_start = clock;
354 } 365 }
355 else{ 366 else{
356 TRACE_TASK(p, "Ghost jub updated, but didn't finish\n"); 367 TRACE_TASK(p, "Ghost jub updated, but didn't finish\n");
357 p->rt_param.job_params.ghost_budget -= delta; 368 tsk_mc_data(p)->mc_job.ghost_budget -= delta;
358 p->se.exec_start = clock; 369 p->se.exec_start = clock;
359 } 370 }
360} 371}
@@ -405,16 +416,15 @@ static noinline void link_task_to_cpu(struct task_struct* linked,
405 416
406 BUG_ON(linked && !is_realtime(linked)); 417 BUG_ON(linked && !is_realtime(linked));
407 BUG_ON(linked && is_realtime(linked) && 418 BUG_ON(linked && is_realtime(linked) &&
408 (linked->rt_param.task_params.crit < CRIT_LEVEL_C) && 419 (tsk_mc_crit(linked) < CRIT_LEVEL_C) &&
409 (linked->rt_param.task_params.cpu != entry->cpu)); 420 (tsk_rt(linked)->task_params.cpu != entry->cpu));
410 421
411 if (linked && is_ghost(linked)) { 422 if (linked && is_ghost(linked)) {
412 TRACE_TASK(linked, "Linking ghost job to CPU %d.\n", 423 TRACE_TASK(linked, "Linking ghost job to CPU %d.\n",
413 entry->cpu); 424 entry->cpu);
414 BUG_ON(entry->linked && 425 BUG_ON(entry->linked &&
415 entry->linked->rt_param.task_params.crit < 426 tsk_mc_crit(entry->linked) < tsk_mc_crit(linked));
416 linked->rt_param.task_params.crit); 427 tmp = entry->ghost_tasks[tsk_mc_crit(linked)];
417 tmp = entry->ghost_tasks[linked->rt_param.task_params.crit];
418 if (tmp) { 428 if (tmp) {
419 unlink(tmp); 429 unlink(tmp);
420 } 430 }
@@ -425,15 +435,14 @@ static noinline void link_task_to_cpu(struct task_struct* linked,
425 BUG_ON(linked->rt_param.linked_on != NO_CPU); 435 BUG_ON(linked->rt_param.linked_on != NO_CPU);
426 linked->rt_param.linked_on = entry->cpu; 436 linked->rt_param.linked_on = entry->cpu;
427 linked->se.exec_start = litmus_clock(); 437 linked->se.exec_start = litmus_clock();
428 entry->ghost_tasks[linked->rt_param.task_params.crit] = linked; 438 entry->ghost_tasks[tsk_mc_crit(linked)] = linked;
429 /* Set up the watchdog timer. */ 439 /* Set up the watchdog timer. */
430 timer = ghost_timer(entry->cpu, 440 timer = ghost_timer(entry->cpu, tsk_mc_crit(linked));
431 linked->rt_param.task_params.crit);
432 if (timer->task){ 441 if (timer->task){
433 cancel_watchdog_timer(timer); 442 cancel_watchdog_timer(timer);
434 } 443 }
435 when_to_fire = litmus_clock() + 444 when_to_fire = litmus_clock() +
436 linked->rt_param.job_params.ghost_budget; 445 tsk_mc_data(linked)->mc_job.ghost_budget;
437 timer->task = linked; 446 timer->task = linked;
438 __hrtimer_start_range_ns(&timer->timer, 447 __hrtimer_start_range_ns(&timer->timer,
439 ns_to_ktime(when_to_fire), 448 ns_to_ktime(when_to_fire),
@@ -469,19 +478,17 @@ static noinline void link_task_to_cpu(struct task_struct* linked,
469 * task is partitioned. 478 * task is partitioned.
470 */ 479 */
471 tmp = sched->linked; 480 tmp = sched->linked;
472 if (entry != sched && 481 if (entry != sched && tsk_mc_crit(linked) >
473 linked->rt_param.task_params.crit >
474 CRIT_LEVEL_B && 482 CRIT_LEVEL_B &&
475 (!tmp || tmp->rt_param.task_params.crit 483 (!tmp || tsk_mc_crit(tmp)
476 > CRIT_LEVEL_B)) { 484 > CRIT_LEVEL_B)) {
477 TRACE_TASK(linked, 485 TRACE_TASK(linked,
478 "already scheduled on %d, updating link.\n", 486 "already scheduled on %d, updating link.\n",
479 sched->cpu); 487 sched->cpu);
480 linked->rt_param.linked_on = sched->cpu; 488 linked->rt_param.linked_on = sched->cpu;
481 sched->linked = linked; 489 sched->linked = linked;
482 for (i = linked-> 490 for (i = tsk_mc_crit(linked);
483 rt_param.task_params.crit; 491 i < NUM_CRIT_LEVELS; i++) {
484 i < CRIT_LEVEL_D + 1; i++) {
485 if (sched->ghost_tasks[i]){ 492 if (sched->ghost_tasks[i]){
486 unlink(sched-> 493 unlink(sched->
487 ghost_tasks[i]); 494 ghost_tasks[i]);
@@ -493,8 +500,8 @@ static noinline void link_task_to_cpu(struct task_struct* linked,
493 } 500 }
494 if (linked) { /* might be NULL due to swap */ 501 if (linked) { /* might be NULL due to swap */
495 linked->rt_param.linked_on = entry->cpu; 502 linked->rt_param.linked_on = entry->cpu;
496 for (i = linked->rt_param.task_params.crit; 503 for (i = tsk_mc_crit(linked);
497 i < CRIT_LEVEL_D + 1; i++){ 504 i < NUM_CRIT_LEVELS; i++){
498 if (entry->ghost_tasks[i]){ 505 if (entry->ghost_tasks[i]){
499 unlink(entry->ghost_tasks[i]); 506 unlink(entry->ghost_tasks[i]);
500 /* WARNING: it is up to the 507 /* WARNING: it is up to the
@@ -542,23 +549,20 @@ static noinline void unlink(struct task_struct* t)
542 * It may be unset if we are called as a result of 549 * It may be unset if we are called as a result of
543 * the watchdog timer triggering. 550 * the watchdog timer triggering.
544 */ 551 */
545 timer = ghost_timer(cpu, 552 timer = ghost_timer(cpu, tsk_mc_crit(t));
546 t->rt_param.task_params.crit);
547 if (timer->task) { 553 if (timer->task) {
548 /* Should already be watching task.*/ 554 /* Should already be watching task.*/
549 BUG_ON(timer->task != t); 555 BUG_ON(timer->task != t);
550 cancel_watchdog_timer(timer); 556 cancel_watchdog_timer(timer);
551 } 557 }
552 if (t->rt_param.job_params.ghost_budget > 0){ 558 if (tsk_mc_data(t)->mc_job.ghost_budget > 0) {
553 /* Job isn't finished, so do accounting. */ 559 /* Job isn't finished, so do accounting. */
554 update_ghost_time(t); 560 update_ghost_time(t);
555 /* Just remove from CPU, even in the rare case 561 /* Just remove from CPU, even in the rare case
556 * of zero time left - it will be scheduled 562 * of zero time left - it will be scheduled
557 * with an immediate timer fire. 563 * with an immediate timer fire.
558 */ 564 */
559 entry->ghost_tasks[ 565 entry->ghost_tasks[tsk_mc_crit(t)] = NULL;
560 t->rt_param.task_params.crit]
561 = NULL;
562 /*TODO: maybe make more efficient by 566 /*TODO: maybe make more efficient by
563 * only updating on C/D completion? 567 * only updating on C/D completion?
564 */ 568 */
@@ -566,8 +570,7 @@ static noinline void unlink(struct task_struct* t)
566 } 570 }
567 else{ 571 else{
568 /* Job finished, so just remove */ 572 /* Job finished, so just remove */
569 entry->ghost_tasks[ 573 entry->ghost_tasks[tsk_mc_crit(t)] = NULL;
570 t->rt_param.task_params.crit] = NULL;
571 update_cpu_position(entry); 574 update_cpu_position(entry);
572 } 575 }
573 } 576 }
@@ -616,7 +619,8 @@ static noinline void requeue(struct task_struct* task)
616 } 619 }
617} 620}
618 621
619static void prepare_preemption(rt_domain_t *dom, cpu_entry_t *cpu, int crit) { 622static void prepare_preemption(rt_domain_t *dom, cpu_entry_t *cpu,
623 enum crit_level crit) {
620 struct task_struct* task; 624 struct task_struct* task;
621 int i; 625 int i;
622 task = __take_ready(dom); 626 task = __take_ready(dom);
@@ -624,8 +628,7 @@ static void prepare_preemption(rt_domain_t *dom, cpu_entry_t *cpu, int crit) {
624 task->pid, cpu->cpu); 628 task->pid, cpu->cpu);
625 if (is_ghost(task)){ 629 if (is_ghost(task)){
626 /* Changing ghost task only affects linked task at our level */ 630 /* Changing ghost task only affects linked task at our level */
627 if (cpu->linked && cpu->linked->rt_param.task_params.crit == 631 if (cpu->linked && tsk_mc_crit(cpu->linked) == crit)
628 crit)
629 requeue(cpu->linked); 632 requeue(cpu->linked);
630 /* Can change ghost task at our level as well. */ 633 /* Can change ghost task at our level as well. */
631 if (cpu->ghost_tasks[crit]) 634 if (cpu->ghost_tasks[crit])
@@ -637,7 +640,7 @@ static void prepare_preemption(rt_domain_t *dom, cpu_entry_t *cpu, int crit) {
637 */ 640 */
638 if (cpu->linked) 641 if (cpu->linked)
639 requeue(cpu->linked); 642 requeue(cpu->linked);
640 for (i = crit; i <= CRIT_LEVEL_D; i++) { 643 for (i = crit; i < NUM_CRIT_LEVELS; i++) {
641 if (cpu->ghost_tasks[i]) 644 if (cpu->ghost_tasks[i])
642 requeue(cpu->ghost_tasks[i]); 645 requeue(cpu->ghost_tasks[i]);
643 } 646 }
@@ -686,26 +689,25 @@ static void check_for_b_preemption(rt_domain_t *dom, cpu_entry_t *cpu) {
686 */ 689 */
687static noinline void mc_job_arrival(struct task_struct* task) 690static noinline void mc_job_arrival(struct task_struct* task)
688{ 691{
692 enum crit_level task_crit_level;
689 BUG_ON(!task); 693 BUG_ON(!task);
690 694
691 TRACE("mc_job_arrival triggered\n"); 695 TRACE("mc_job_arrival triggered\n");
696 task_crit_level = tsk_mc_crit(task);
692 requeue(task); 697 requeue(task);
693 if (task->rt_param.task_params.crit == CRIT_LEVEL_A){ 698 if (task_crit_level == CRIT_LEVEL_A){
694 check_for_a_preemption(remote_a_queue(get_partition(task)), 699 check_for_a_preemption(remote_a_queue(get_partition(task)),
695 remote_cpu_entry(get_partition(task))); 700 remote_cpu_entry(get_partition(task)));
696 } 701 } else if (task->rt_param.task_params.crit == CRIT_LEVEL_B){
697 else if (task->rt_param.task_params.crit == CRIT_LEVEL_B){
698 check_for_b_preemption(remote_b_queue(get_partition(task)), 702 check_for_b_preemption(remote_b_queue(get_partition(task)),
699 remote_cpu_entry(get_partition(task))); 703 remote_cpu_entry(get_partition(task)));
700 } 704 } else if (task_crit_level == CRIT_LEVEL_C){
701 else if (task->rt_param.task_params.crit == CRIT_LEVEL_C){
702 check_for_c_preemptions(&crit_c); 705 check_for_c_preemptions(&crit_c);
703 } 706 } else if (task_crit_level == CRIT_LEVEL_D){
704 else if (task->rt_param.task_params.crit == CRIT_LEVEL_D){
705 check_for_d_preemptions(&crit_d); 707 check_for_d_preemptions(&crit_d);
706 } 708 }
707} 709}
708 710nnn
709/* Called by the domain 711/* Called by the domain
710 * Obtains global lock, merges ready tasks, checks for/triggers preemptions, 712 * Obtains global lock, merges ready tasks, checks for/triggers preemptions,
711 * and releases global lock 713 * and releases global lock
@@ -762,8 +764,9 @@ static noinline void job_completion(struct task_struct *t, int forced)
762 cpu = remote_cpu_entry(t->rt_param.scheduled_on); 764 cpu = remote_cpu_entry(t->rt_param.scheduled_on);
763 /*Unlink first while it's not a ghost job.*/ 765 /*Unlink first while it's not a ghost job.*/
764 unlink(t); 766 unlink(t);
765 t->rt_param.job_params.ghost_budget = budget_remaining(t); 767 tsk_mc_data(t)->mc_job.ghost_budget = budget_remaining(t);
766 t->rt_param.job_params.is_ghost = 1; 768 tsk_mc_data(t)->mc_job.is_ghost = 1;
769
767 /* If we did just convert the job to ghost, we can safely 770 /* If we did just convert the job to ghost, we can safely
768 * reschedule it and then let schedule() determine a new 771 * reschedule it and then let schedule() determine a new
769 * job to run in the slack. 772 * job to run in the slack.
@@ -774,7 +777,7 @@ static noinline void job_completion(struct task_struct *t, int forced)
774 * If it doesn't need to, it will fall through and be handled 777 * If it doesn't need to, it will fall through and be handled
775 * properly as well. 778 * properly as well.
776 */ 779 */
777 if (t->rt_param.job_params.ghost_budget > 0){ 780 if (tsk_mc_data(t)->mc_job.ghost_budget > 0) {
778 link_task_to_cpu(t, cpu); 781 link_task_to_cpu(t, cpu);
779 preempt(cpu); 782 preempt(cpu);
780 return; 783 return;
@@ -783,11 +786,13 @@ static noinline void job_completion(struct task_struct *t, int forced)
783 /* prepare for next period - we either just became ghost but with no 786 /* prepare for next period - we either just became ghost but with no
784 * budget left, or we were already ghost and the ghost job expired*/ 787 * budget left, or we were already ghost and the ghost job expired*/
785 if (is_ghost(t)) { 788 if (is_ghost(t)) {
786 t->rt_param.job_params.ghost_budget = 0; 789 tsk_mc_data(t)->mc_job.ghost_budget = 0;
787 /*Need to unlink here so prepare_for_next_period doesn't try 790 /*Need to unlink here so prepare_for_next_period doesn't try
788 * to unlink us 791 * to unlink us
789 */ 792 */
790 unlink(t); 793 unlink(t);
794 tsk_mc_data(t)->mc_job.is_ghost = 0;
795 tsk_mc_data(t)->mc_job.ghost_budget = 0;
791 prepare_for_next_period(t); 796 prepare_for_next_period(t);
792 } 797 }
793 if (is_released(t, litmus_clock())) 798 if (is_released(t, litmus_clock()))
@@ -816,7 +821,7 @@ static enum hrtimer_restart watchdog_timeout(struct hrtimer *timer)
816 * we have an active timer. 821 * we have an active timer.
817 */ 822 */
818 wt->task = NULL; 823 wt->task = NULL;
819 task->rt_param.job_params.ghost_budget = 0; 824 tsk_mc_data(task)->mc_job.ghost_budget = 0;
820 job_completion(task, 0); 825 job_completion(task, 0);
821 TRACE_TASK(task, "Watchdog timeout\n"); 826 TRACE_TASK(task, "Watchdog timeout\n");
822 raw_spin_unlock_irqrestore(&global_lock, flags); 827 raw_spin_unlock_irqrestore(&global_lock, flags);
@@ -895,7 +900,8 @@ static struct task_struct* mc_schedule(struct task_struct * prev)
895 int out_of_time, sleep, preempt, np, exists, blocks; 900 int out_of_time, sleep, preempt, np, exists, blocks;
896 struct task_struct* next = NULL; 901 struct task_struct* next = NULL;
897 struct task_struct* ready_task = NULL; 902 struct task_struct* ready_task = NULL;
898 int ready_crit, i; 903 enum crit_level ready_crit;
904 int i;
899 905
900#ifdef CONFIG_RELEASE_MASTER 906#ifdef CONFIG_RELEASE_MASTER
901 /* Bail out early if we are the release master. 907 /* Bail out early if we are the release master.
@@ -998,9 +1004,10 @@ static struct task_struct* mc_schedule(struct task_struct * prev)
998 } 1004 }
999 } 1005 }
1000 if (!ready_task) { 1006 if (!ready_task) {
1001 ready_crit = CRIT_LEVEL_D + 1; 1007 /* set to something invalid? */
1008 ready_crit = NUM_CRIT_LEVELS;
1002 } 1009 }
1003 for (i = ready_crit; i <= CRIT_LEVEL_D; i++) { 1010 for (i = ready_crit; i < NUM_CRIT_LEVELS; i++) {
1004 if (entry->ghost_tasks[i]) 1011 if (entry->ghost_tasks[i])
1005 requeue(entry->ghost_tasks[i]); 1012 requeue(entry->ghost_tasks[i]);
1006 } 1013 }
@@ -1082,8 +1089,8 @@ static void mc_task_new(struct task_struct * t, int on_rq, int running)
1082 1089
1083 /* setup job params */ 1090 /* setup job params */
1084 release_at(t, litmus_clock()); 1091 release_at(t, litmus_clock());
1085 t->rt_param.job_params.ghost_budget = 0; 1092 tsk_mc_data(t)->mc_job.ghost_budget = 0;
1086 t->rt_param.job_params.is_ghost = 0; 1093 tsk_mc_data(t)->mc_job.is_ghost = 0;
1087 1094
1088 if (running) { 1095 if (running) {
1089 entry = &per_cpu(mc_cpu_entries, task_cpu(t)); 1096 entry = &per_cpu(mc_cpu_entries, task_cpu(t));
@@ -1191,6 +1198,14 @@ static void mc_task_exit(struct task_struct * t)
1191 1198
1192static long mc_admit_task(struct task_struct* tsk) 1199static long mc_admit_task(struct task_struct* tsk)
1193{ 1200{
1201 if (!tsk_mc_data(tsk))
1202 {
1203 printk(KERN_WARNING "tried to admit task with no criticality "
1204 "level\n");
1205 return -EINVAL;
1206 }
1207 printk(KERN_INFO "admitted task with criticality level %d\n",
1208 tsk_mc_crit(tsk));
1194 return 0; 1209 return 0;
1195} 1210}
1196 1211
@@ -1262,7 +1277,7 @@ static int __init init_mc(void)
1262 entry->hn_d = &mc_heap_node_d[cpu]; 1277 entry->hn_d = &mc_heap_node_d[cpu];
1263 bheap_node_init(&entry->hn_c, entry); 1278 bheap_node_init(&entry->hn_c, entry);
1264 bheap_node_init(&entry->hn_d, entry); 1279 bheap_node_init(&entry->hn_d, entry);
1265 for (i = CRIT_LEVEL_A; i <= CRIT_LEVEL_D; i++){ 1280 for (i = CRIT_LEVEL_A; i < NUM_CRIT_LEVELS; i++){
1266 timer = ghost_timer(cpu, i); 1281 timer = ghost_timer(cpu, i);
1267 hrtimer_init(&timer->timer, CLOCK_MONOTONIC, 1282 hrtimer_init(&timer->timer, CLOCK_MONOTONIC,
1268 HRTIMER_MODE_ABS); 1283 HRTIMER_MODE_ABS);