From 1c5cda5df118735a0e84fd3277d933f58ea814c8 Mon Sep 17 00:00:00 2001 From: Christopher Kenna Date: Sat, 27 Aug 2011 17:48:58 -0400 Subject: Refactor the mixed-criticality (MC) plugin. Add linux kernel configuration option CONFIG_LITMUS_MC. Attempt to restore rt_param.h to its original state as much as possible. Remove fields from rt_task and rt_job and move them into a new mc_data struct. Added mc_data field to rt_param compiled in only if using MC plugin. Make a new MC plugin specific header that contains a mc_data struct, which is a container for mc_task struct and a mc_job struct. Update sched_mc.c to use the new data structures. Also, add some macros that simplify the code, e.g., getting task criticality quickly. Add system call to set MC plugin specific stuff. Check for the change in liblitmus. Add a few lines to exit_litmus to reclaim the MC plugin mc_data struct in the task_struct on task exit. --- arch/x86/kernel/syscall_table_32.S | 1 + include/litmus/rt_param.h | 20 +++--- include/litmus/sched_mc.h | 36 +++++++++++ include/litmus/unistd_32.h | 3 +- include/litmus/unistd_64.h | 4 +- litmus/Kconfig | 9 +++ litmus/Makefile | 4 +- litmus/jobs.c | 3 - litmus/litmus.c | 83 ++++++++++++++++++++++++ litmus/sched_mc.c | 129 +++++++++++++++++++++---------------- 10 files changed, 217 insertions(+), 75 deletions(-) create mode 100644 include/litmus/sched_mc.h diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S index 37702905f658..57d5b3e1c1a6 100644 --- a/arch/x86/kernel/syscall_table_32.S +++ b/arch/x86/kernel/syscall_table_32.S @@ -352,3 +352,4 @@ ENTRY(sys_call_table) .long sys_wait_for_ts_release .long sys_release_ts .long sys_null_call + .long sys_set_rt_task_mc_param diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h index 3a456e7135d8..4ded23d658d0 100644 --- a/include/litmus/rt_param.h +++ b/include/litmus/rt_param.h @@ -27,14 +27,6 @@ typedef enum { RT_CLASS_BEST_EFFORT } task_class_t; -/* criticality levels */ -typedef enum { - CRIT_LEVEL_A, - CRIT_LEVEL_B, - CRIT_LEVEL_C, - CRIT_LEVEL_D, -} crit_level_t; - typedef enum { NO_ENFORCEMENT, /* job may overrun unhindered */ QUANTUM_ENFORCEMENT, /* budgets are only checked on quantum boundaries */ @@ -48,7 +40,6 @@ struct rt_task { unsigned int cpu; task_class_t cls; budget_policy_t budget_policy; /* ignored by pfair */ - crit_level_t crit; }; /* The definition of the data that is shared between the kernel and real-time @@ -99,12 +90,12 @@ struct rt_job { * Increase this sequence number when a job is released. */ unsigned int job_no; - - lt_t ghost_budget; - int is_ghost; }; struct pfair_param; +#ifdef CONFIG_PLUGIN_MC +struct mc_data; +#endif /* RT task parameters for scheduling extensions * These parameters are inherited during clone and therefore must @@ -127,6 +118,11 @@ struct rt_param { lt_t boost_start_time; #endif +#ifdef CONFIG_PLUGIN_MC + /* mixed criticality specific data */ + struct mc_data *mc_data; +#endif + /* user controlled parameters */ struct rt_task task_params; diff --git a/include/litmus/sched_mc.h b/include/litmus/sched_mc.h new file mode 100644 index 000000000000..941a9f4470cc --- /dev/null +++ b/include/litmus/sched_mc.h @@ -0,0 +1,36 @@ +#ifndef _LINUX_SCHED_MC_H_ +#define _LINUX_SCHED_MC_H_ + +#include + +/* criticality levels */ +enum crit_level { + /* probably don't need to assign these (paranoid) */ + CRIT_LEVEL_A = 0, + CRIT_LEVEL_B = 1, + CRIT_LEVEL_C = 2, + CRIT_LEVEL_D = 3, + NUM_CRIT_LEVELS = 4, +}; + + +struct mc_task { + enum crit_level crit; +}; + +struct mc_job { + int is_ghost:1; + lt_t ghost_budget; +}; + +#ifdef __KERNEL__ +/* only used in the kernel (no user space) */ + +struct mc_data { + struct mc_task mc_task; + struct mc_job mc_job; +}; + +#endif /* __KERNEL__ */ + +#endif diff --git a/include/litmus/unistd_32.h b/include/litmus/unistd_32.h index 94264c27d9ac..71be3cd8d469 100644 --- a/include/litmus/unistd_32.h +++ b/include/litmus/unistd_32.h @@ -17,5 +17,6 @@ #define __NR_wait_for_ts_release __LSC(9) #define __NR_release_ts __LSC(10) #define __NR_null_call __LSC(11) +#define __NR_set_rt_task_mc_param __LSC(12) -#define NR_litmus_syscalls 12 +#define NR_litmus_syscalls 13 diff --git a/include/litmus/unistd_64.h b/include/litmus/unistd_64.h index d5ced0d2642c..95cb74495104 100644 --- a/include/litmus/unistd_64.h +++ b/include/litmus/unistd_64.h @@ -29,5 +29,7 @@ __SYSCALL(__NR_wait_for_ts_release, sys_wait_for_ts_release) __SYSCALL(__NR_release_ts, sys_release_ts) #define __NR_null_call __LSC(11) __SYSCALL(__NR_null_call, sys_null_call) +#define __NR_set_rt_task_mc_param __LSC(12) +__SYSCALL(__NR_set_rt_task_mc_param, sys_set_rt_task_mc_param) -#define NR_litmus_syscalls 12 +#define NR_litmus_syscalls 13 diff --git a/litmus/Kconfig b/litmus/Kconfig index ad8dc8308cf0..9a1cc2436580 100644 --- a/litmus/Kconfig +++ b/litmus/Kconfig @@ -23,6 +23,15 @@ config PLUGIN_PFAIR If unsure, say Yes. +config PLUGIN_MC + bool "Mixed Criticality Scheduler" + depends on X86 && SYSFS + default y + help + Included the mixed criticality scheduler. + + If unsure, say Yes. + config RELEASE_MASTER bool "Release-master Support" depends on ARCH_HAS_SEND_PULL_TIMERS diff --git a/litmus/Makefile b/litmus/Makefile index d2bcad53c882..782022be6f28 100644 --- a/litmus/Makefile +++ b/litmus/Makefile @@ -17,11 +17,11 @@ obj-y = sched_plugin.o litmus.o \ bheap.o \ ctrldev.o \ sched_gsn_edf.o \ - sched_psn_edf.o \ - sched_mc.o + sched_psn_edf.o obj-$(CONFIG_PLUGIN_CEDF) += sched_cedf.o obj-$(CONFIG_PLUGIN_PFAIR) += sched_pfair.o +obj-$(CONFIG_PLUGIN_MC) += sched_mc.o obj-$(CONFIG_FEATHER_TRACE) += ft_event.o ftdev.o obj-$(CONFIG_SCHED_TASK_TRACE) += sched_task_trace.o diff --git a/litmus/jobs.c b/litmus/jobs.c index 99b0bd9858f2..36e314625d86 100644 --- a/litmus/jobs.c +++ b/litmus/jobs.c @@ -13,9 +13,6 @@ void prepare_for_next_period(struct task_struct *t) t->rt_param.job_params.release = t->rt_param.job_params.deadline; t->rt_param.job_params.deadline += get_rt_period(t); t->rt_param.job_params.exec_time = 0; - /* mixed criticality stuff*/ - t->rt_param.job_params.is_ghost = 0; - t->rt_param.job_params.ghost_budget = 0; /* update job sequence number */ t->rt_param.job_params.job_no++; diff --git a/litmus/litmus.c b/litmus/litmus.c index 11ccaafd50de..16b3aeda5615 100644 --- a/litmus/litmus.c +++ b/litmus/litmus.c @@ -17,6 +17,12 @@ #include #include +#ifdef CONFIG_PLUGIN_MC +#include +#else +struct mc_task; +#endif + /* Number of RT tasks that exist in the system */ atomic_t rt_task_count = ATOMIC_INIT(0); static DEFINE_RAW_SPINLOCK(task_transition_lock); @@ -274,6 +280,74 @@ asmlinkage long sys_null_call(cycles_t __user *ts) return ret; } +#ifdef CONFIG_PLUGIN_MC +asmlinkage long sys_set_rt_task_mc_param(pid_t pid, struct mc_task __user *param) +{ + struct mc_task mc; + struct mc_data *mc_data; + struct task_struct *target; + int retval = -EINVAL; + + printk("Setting up mixed-criicality task parameters for process %d.\n", + pid); + + if (pid < 0 || param == 0) { + goto out; + } + if (copy_from_user(&mc, param, sizeof(mc))) { + retval = -EFAULT; + goto out; + } + + /* Task search and manipulation must be protected */ + read_lock_irq(&tasklist_lock); + if (!(target = find_task_by_vpid(pid))) { + retval = -ESRCH; + goto out_unlock; + } + + if (is_realtime(target)) { + /* The task is already a real-time task. + * We cannot not allow parameter changes at this point. + */ + retval = -EBUSY; + goto out_unlock; + } + + if (mc.crit < CRIT_LEVEL_A || mc.crit > CRIT_LEVEL_D) + { + printk(KERN_WARNING "litmus: real-time task %d rejected because " + "of invalid criticality level\n", pid); + goto out_unlock; + } + + mc_data = tsk_rt(target)->mc_data; + if (!mc_data) + { + mc_data = kmalloc(sizeof(*mc_data), GFP_ATOMIC); + if (!mc_data) + { + retval = -ENOMEM; + goto out_unlock; + } + tsk_rt(target)->mc_data = mc_data; + } + mc_data->mc_task.crit = mc.crit; + + retval = 0; +out_unlock: + read_unlock_irq(&tasklist_lock); +out: + return retval; +} +#else +asmlinkage long sys_set_rt_task_mc_param(pid_t pid, struct mc_task __user *param) +{ + /* don't allow this syscall if the plugin is not enabled */ + return -EINVAL; +} +#endif + /* p is a real-time task. Re-init its state as a best-effort task. */ static void reinit_litmus_state(struct task_struct* p, int restore) { @@ -479,6 +553,15 @@ void exit_litmus(struct task_struct *dead_tsk) free_page((unsigned long) tsk_rt(dead_tsk)->ctrl_page); } +#ifdef CONFIG_PLUGIN_MC + /* The MC-setup syscall might succeed and allocate mc_data, but the + task may not exit in real-time mode, and that memory will leak. + Check and free it here. + */ + if (tsk_rt(dead_tsk)->mc_data) + kfree(tsk_rt(dead_tsk)->mc_data); +#endif + /* main cleanup only for RT tasks */ if (is_realtime(dead_tsk)) litmus_exit_task(dead_tsk); diff --git a/litmus/sched_mc.c b/litmus/sched_mc.c index 8e8ba0dfb870..7800016d0407 100644 --- a/litmus/sched_mc.c +++ b/litmus/sched_mc.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include @@ -25,6 +26,8 @@ #include +#include + /* Overview of MC operations. * * link_task_to_cpu(T, cpu) - Low-level operation to update the linkage @@ -97,7 +100,7 @@ typedef struct { atomic_t will_schedule; /* prevent unneeded IPIs */ struct bheap_node* hn_c; struct bheap_node* hn_d; - struct task_struct* ghost_tasks[CRIT_LEVEL_D+1]; + struct task_struct* ghost_tasks[NUM_CRIT_LEVELS]; } cpu_entry_t; /*This code is heavily based on Bjoern's budget enforcement code. */ @@ -107,7 +110,7 @@ struct watchdog_timer { struct task_struct* task; }; -DEFINE_PER_CPU(struct watchdog_timer[CRIT_LEVEL_D+1], ghost_timers); +DEFINE_PER_CPU(struct watchdog_timer[NUM_CRIT_LEVELS], ghost_timers); #define ghost_timer(cpu, crit) (&(per_cpu(ghost_timers, cpu)[crit])) DEFINE_PER_CPU(cpu_entry_t, mc_cpu_entries); @@ -122,9 +125,15 @@ cpu_entry_t* mc_cpus[NR_CPUS]; (atomic_read(&per_cpu(mc_cpu_entries, cpu).will_schedule)) #define remote_cpu_entry(cpu) (&per_cpu(mc_cpu_entries, cpu)) -#define is_ghost(t) (tsk_rt(t)->job_params.is_ghost) - +#define tsk_mc_data(t) (tsk_rt(t)->mc_data) +#define tsk_mc_crit(t) (tsk_mc_data(t)->mc_task.crit) +/* need to do a short-circuit null check on mc_data before checking is_ghost */ +static inline int is_ghost(struct task_struct *t) +{ + struct mc_data *mc_data = tsk_mc_data(t); + return mc_data && mc_data->mc_job.is_ghost; +} /* the cpus queue themselves according to priority in here */ static struct bheap_node mc_heap_node_c[NR_CPUS], mc_heap_node_d[NR_CPUS]; @@ -156,8 +165,8 @@ static int mc_edf_higher_prio(struct task_struct* first, struct task_struct* /*Only differs from normal EDF when two tasks of differing criticality are compared.*/ if (first && second){ - int first_crit = first->rt_param.task_params.crit; - int second_crit = second->rt_param.task_params.crit; + enum crit_level first_crit = tsk_mc_crit(first); + enum crit_level second_crit = tsk_mc_crit(second); /*Lower criticality numbers are higher priority*/ if (first_crit < second_crit){ return 1; @@ -170,7 +179,7 @@ static int mc_edf_higher_prio(struct task_struct* first, struct task_struct* } static int mc_edf_entry_higher_prio(cpu_entry_t* first, cpu_entry_t* second, - int crit) + enum crit_level crit) { struct task_struct *first_active, *second_active; first_active = first->linked; @@ -188,7 +197,7 @@ static int mc_edf_entry_higher_prio(cpu_entry_t* first, cpu_entry_t* second, * call only with irqs disabled and with ready_lock acquired * THIS DOES NOT TAKE NON-PREEMPTIVE SECTIONS INTO ACCOUNT! */ -static int mc_edf_preemption_needed(rt_domain_t* rt, int crit, +static int mc_edf_preemption_needed(rt_domain_t* rt, enum crit_level crit, cpu_entry_t* entry) { struct task_struct *active_task; @@ -235,7 +244,7 @@ static void mc_edf_domain_init(rt_domain_t* rt, check_resched_needed_t resched, /* Return the domain of a task */ static rt_domain_t* domain_of(struct task_struct* task) { - switch (task->rt_param.task_params.crit) + switch (tsk_mc_crit(task)) { case CRIT_LEVEL_A: return remote_a_queue(get_partition(task)); @@ -249,6 +258,7 @@ static rt_domain_t* domain_of(struct task_struct* task) case CRIT_LEVEL_D: return &crit_d; break; + case NUM_CRIT_LEVELS: default: /*Should never get here*/ BUG(); @@ -347,18 +357,18 @@ static void update_ghost_time(struct task_struct *p) delta = 0; TRACE_TASK(p, "WARNING: negative time delta.\n"); } - if (p->rt_param.job_params.ghost_budget <= delta) { + if (tsk_mc_data(p)->mc_job.ghost_budget <= delta) { /*Currently will just set ghost budget to zero since * task has already been queued. Could probably do * more efficiently with significant reworking. */ TRACE_TASK(p, "Ghost job could have ended\n"); - p->rt_param.job_params.ghost_budget = 0; + tsk_mc_data(p)->mc_job.ghost_budget = 0; p->se.exec_start = clock; } else{ TRACE_TASK(p, "Ghost jub updated, but didn't finish\n"); - p->rt_param.job_params.ghost_budget -= delta; + tsk_mc_data(p)->mc_job.ghost_budget -= delta; p->se.exec_start = clock; } } @@ -410,16 +420,15 @@ static noinline void link_task_to_cpu(struct task_struct* linked, BUG_ON(linked && !is_realtime(linked)); BUG_ON(linked && is_realtime(linked) && - (linked->rt_param.task_params.crit < CRIT_LEVEL_C) && - (linked->rt_param.task_params.cpu != entry->cpu)); + (tsk_mc_crit(linked) < CRIT_LEVEL_C) && + (tsk_rt(linked)->task_params.cpu != entry->cpu)); if (linked && is_ghost(linked)) { TRACE_TASK(linked, "Linking ghost job to CPU %d.\n", entry->cpu); BUG_ON(entry->linked && - entry->linked->rt_param.task_params.crit < - linked->rt_param.task_params.crit); - tmp = entry->ghost_tasks[linked->rt_param.task_params.crit]; + tsk_mc_crit(entry->linked) < tsk_mc_crit(linked)); + tmp = entry->ghost_tasks[tsk_mc_crit(linked)]; if (tmp) { unlink(tmp); } @@ -430,15 +439,14 @@ static noinline void link_task_to_cpu(struct task_struct* linked, BUG_ON(linked->rt_param.linked_on != NO_CPU); linked->rt_param.linked_on = entry->cpu; linked->se.exec_start = litmus_clock(); - entry->ghost_tasks[linked->rt_param.task_params.crit] = linked; + entry->ghost_tasks[tsk_mc_crit(linked)] = linked; /* Set up the watchdog timer. */ - timer = ghost_timer(entry->cpu, - linked->rt_param.task_params.crit); + timer = ghost_timer(entry->cpu, tsk_mc_crit(linked)); if (timer->task){ cancel_watchdog_timer(timer); } when_to_fire = litmus_clock() + - linked->rt_param.job_params.ghost_budget; + tsk_mc_data(linked)->mc_job.ghost_budget; timer->task = linked; __hrtimer_start_range_ns(&timer->timer, ns_to_ktime(when_to_fire), @@ -474,19 +482,17 @@ static noinline void link_task_to_cpu(struct task_struct* linked, * task is partitioned. */ tmp = sched->linked; - if (entry != sched && - linked->rt_param.task_params.crit > + if (entry != sched && tsk_mc_crit(linked) > CRIT_LEVEL_B && - (!tmp || tmp->rt_param.task_params.crit + (!tmp || tsk_mc_crit(tmp) > CRIT_LEVEL_B)) { TRACE_TASK(linked, "already scheduled on %d, updating link.\n", sched->cpu); linked->rt_param.linked_on = sched->cpu; sched->linked = linked; - for (i = linked-> - rt_param.task_params.crit; - i < CRIT_LEVEL_D + 1; i++) { + for (i = tsk_mc_crit(linked); + i < NUM_CRIT_LEVELS; i++) { if (sched->ghost_tasks[i]){ unlink(sched-> ghost_tasks[i]); @@ -498,8 +504,8 @@ static noinline void link_task_to_cpu(struct task_struct* linked, } if (linked) { /* might be NULL due to swap */ linked->rt_param.linked_on = entry->cpu; - for (i = linked->rt_param.task_params.crit; - i < CRIT_LEVEL_D + 1; i++){ + for (i = tsk_mc_crit(linked); + i < NUM_CRIT_LEVELS; i++){ if (entry->ghost_tasks[i]){ unlink(entry->ghost_tasks[i]); /* WARNING: it is up to the @@ -547,23 +553,20 @@ static noinline void unlink(struct task_struct* t) * It may be unset if we are called as a result of * the watchdog timer triggering. */ - timer = ghost_timer(cpu, - t->rt_param.task_params.crit); + timer = ghost_timer(cpu, tsk_mc_crit(t)); if (timer->task) { /* Should already be watching task.*/ BUG_ON(timer->task != t); cancel_watchdog_timer(timer); } - if (t->rt_param.job_params.ghost_budget > 0){ + if (tsk_mc_data(t)->mc_job.ghost_budget > 0) { /* Job isn't finished, so do accounting. */ update_ghost_time(t); /* Just remove from CPU, even in the rare case * of zero time left - it will be scheduled * with an immediate timer fire. */ - entry->ghost_tasks[ - t->rt_param.task_params.crit] - = NULL; + entry->ghost_tasks[tsk_mc_crit(t)] = NULL; /*TODO: maybe make more efficient by * only updating on C/D completion? */ @@ -571,8 +574,7 @@ static noinline void unlink(struct task_struct* t) } else{ /* Job finished, so just remove */ - entry->ghost_tasks[ - t->rt_param.task_params.crit] = NULL; + entry->ghost_tasks[tsk_mc_crit(t)] = NULL; update_cpu_position(entry); } } @@ -621,7 +623,8 @@ static noinline void requeue(struct task_struct* task) } } -static void prepare_preemption(rt_domain_t *dom, cpu_entry_t *cpu, int crit) { +static void prepare_preemption(rt_domain_t *dom, cpu_entry_t *cpu, + enum crit_level crit) { struct task_struct* task; int i; task = __take_ready(dom); @@ -629,8 +632,7 @@ static void prepare_preemption(rt_domain_t *dom, cpu_entry_t *cpu, int crit) { task->pid, cpu->cpu); if (is_ghost(task)){ /* Changing ghost task only affects linked task at our level */ - if (cpu->linked && cpu->linked->rt_param.task_params.crit == - crit) + if (cpu->linked && tsk_mc_crit(cpu->linked) == crit) requeue(cpu->linked); /* Can change ghost task at our level as well. */ if (cpu->ghost_tasks[crit]) @@ -642,7 +644,7 @@ static void prepare_preemption(rt_domain_t *dom, cpu_entry_t *cpu, int crit) { */ if (cpu->linked) requeue(cpu->linked); - for (i = crit; i <= CRIT_LEVEL_D; i++) { + for (i = crit; i < NUM_CRIT_LEVELS; i++) { if (cpu->ghost_tasks[i]) requeue(cpu->ghost_tasks[i]); } @@ -691,22 +693,24 @@ static void check_for_b_preemption(rt_domain_t *dom, cpu_entry_t *cpu) { */ static noinline void mc_job_arrival(struct task_struct* task) { + enum crit_level task_crit_level; BUG_ON(!task); TRACE("mc_job_arrival triggered\n"); + task_crit_level = tsk_mc_crit(task); requeue(task); - if (task->rt_param.task_params.crit == CRIT_LEVEL_A){ + if (task_crit_level == CRIT_LEVEL_A){ check_for_a_preemption(remote_a_queue(get_partition(task)), remote_cpu_entry(get_partition(task))); } - else if (task->rt_param.task_params.crit == CRIT_LEVEL_B){ + else if (task_crit_level == CRIT_LEVEL_B){ check_for_a_preemption(remote_b_queue(get_partition(task)), remote_cpu_entry(get_partition(task))); } - else if (task->rt_param.task_params.crit == CRIT_LEVEL_C){ + else if (task_crit_level == CRIT_LEVEL_C){ check_for_c_preemptions(&crit_c); } - else if (task->rt_param.task_params.crit == CRIT_LEVEL_D){ + else if (task_crit_level == CRIT_LEVEL_D){ check_for_d_preemptions(&crit_d); } } @@ -767,8 +771,9 @@ static noinline void job_completion(struct task_struct *t, int forced) cpu = remote_cpu_entry(t->rt_param.scheduled_on); /*Unlink first while it's not a ghost job.*/ unlink(t); - t->rt_param.job_params.ghost_budget = budget_remaining(t); - t->rt_param.job_params.is_ghost = 1; + tsk_mc_data(t)->mc_job.ghost_budget = budget_remaining(t); + tsk_mc_data(t)->mc_job.is_ghost = 1; + /* If we did just convert the job to ghost, we can safely * reschedule it and then let schedule() determine a new * job to run in the slack. @@ -779,7 +784,7 @@ static noinline void job_completion(struct task_struct *t, int forced) * If it doesn't need to, it will fall through and be handled * properly as well. */ - if (t->rt_param.job_params.ghost_budget > 0){ + if (tsk_mc_data(t)->mc_job.ghost_budget > 0) { link_task_to_cpu(t, cpu); preempt(cpu); return; @@ -788,11 +793,13 @@ static noinline void job_completion(struct task_struct *t, int forced) /* prepare for next period - we either just became ghost but with no * budget left, or we were already ghost and the ghost job expired*/ if (is_ghost(t)) { - t->rt_param.job_params.ghost_budget = 0; + tsk_mc_data(t)->mc_job.ghost_budget = 0; /*Need to unlink here so prepare_for_next_period doesn't try * to unlink us */ unlink(t); + tsk_mc_data(t)->mc_job.is_ghost = 0; + tsk_mc_data(t)->mc_job.ghost_budget = 0; prepare_for_next_period(t); } if (is_released(t, litmus_clock())) @@ -821,7 +828,7 @@ static enum hrtimer_restart watchdog_timeout(struct hrtimer *timer) * we have an active timer. */ wt->task = NULL; - task->rt_param.job_params.ghost_budget = 0; + tsk_mc_data(task)->mc_job.ghost_budget = 0; job_completion(task, 0); TRACE_TASK(task, "Watchdog timeout\n"); raw_spin_unlock_irqrestore(&global_lock, flags); @@ -900,7 +907,8 @@ static struct task_struct* mc_schedule(struct task_struct * prev) int out_of_time, sleep, preempt, np, exists, blocks; struct task_struct* next = NULL; struct task_struct* ready_task = NULL; - int ready_crit, i; + enum crit_level ready_crit; + int i; #ifdef CONFIG_RELEASE_MASTER /* Bail out early if we are the release master. @@ -1001,9 +1009,10 @@ static struct task_struct* mc_schedule(struct task_struct * prev) } } if (!ready_task) { - ready_crit = CRIT_LEVEL_D + 1; + /* set to something invalid? */ + ready_crit = NUM_CRIT_LEVELS; } - for (i = ready_crit; i <= CRIT_LEVEL_D; i++) { + for (i = ready_crit; i < NUM_CRIT_LEVELS; i++) { if (entry->ghost_tasks[i]) requeue(entry->ghost_tasks[i]); } @@ -1083,8 +1092,8 @@ static void mc_task_new(struct task_struct * t, int on_rq, int running) /* setup job params */ release_at(t, litmus_clock()); - t->rt_param.job_params.ghost_budget = 0; - t->rt_param.job_params.is_ghost = 0; + tsk_mc_data(t)->mc_job.ghost_budget = 0; + tsk_mc_data(t)->mc_job.is_ghost = 0; if (running) { entry = &per_cpu(mc_cpu_entries, task_cpu(t)); @@ -1192,6 +1201,14 @@ static void mc_task_exit(struct task_struct * t) static long mc_admit_task(struct task_struct* tsk) { + if (!tsk_mc_data(tsk)) + { + printk(KERN_WARNING "tried to admit task with no criticality " + "level\n"); + return -EINVAL; + } + printk(KERN_INFO "admitted task with criticality level %d\n", + tsk_mc_crit(tsk)); return 0; } @@ -1263,7 +1280,7 @@ static int __init init_mc(void) entry->hn_d = &mc_heap_node_d[cpu]; bheap_node_init(&entry->hn_c, entry); bheap_node_init(&entry->hn_d, entry); - for (i = CRIT_LEVEL_A; i <= CRIT_LEVEL_D; i++){ + for (i = CRIT_LEVEL_A; i < NUM_CRIT_LEVELS; i++){ timer = ghost_timer(cpu, i); hrtimer_init(&timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); -- cgit v1.2.2