From 4b38febbd59fd33542a343991262119eb9860f5e Mon Sep 17 00:00:00 2001 From: Andrea Bastoni Date: Thu, 17 Dec 2009 21:23:36 -0500 Subject: [ported from 2008.3] Core LITMUS^RT infrastructure Port 2008.3 Core LITMUS^RT infrastructure to Linux 2.6.32 litmus_sched_class implements 4 new methods: - prio_changed: void - switched_to: void - get_rr_interval: return infinity (i.e., 0) - select_task_rq: return current cpu --- litmus/Kconfig | 50 ++++ litmus/Makefile | 12 + litmus/ft_event.c | 43 ++++ litmus/heap.c | 314 ++++++++++++++++++++++++ litmus/jobs.c | 43 ++++ litmus/litmus.c | 654 ++++++++++++++++++++++++++++++++++++++++++++++++++ litmus/sched_litmus.c | 275 +++++++++++++++++++++ litmus/sched_plugin.c | 199 +++++++++++++++ 8 files changed, 1590 insertions(+) create mode 100644 litmus/Kconfig create mode 100644 litmus/Makefile create mode 100644 litmus/ft_event.c create mode 100644 litmus/heap.c create mode 100644 litmus/jobs.c create mode 100644 litmus/litmus.c create mode 100644 litmus/sched_litmus.c create mode 100644 litmus/sched_plugin.c (limited to 'litmus') diff --git a/litmus/Kconfig b/litmus/Kconfig new file mode 100644 index 000000000000..f8c642658a2f --- /dev/null +++ b/litmus/Kconfig @@ -0,0 +1,50 @@ +menu "LITMUS^RT" + +menu "Tracing" + +config FEATHER_TRACE + bool "Feather-Trace Infrastructure" + default y + help + Feather-Trace basic tracing infrastructure. Includes device file + driver and instrumentation point support. + + +config SCHED_TASK_TRACE + bool "Trace real-time tasks" + depends on FEATHER_TRACE + default y + help + Include support for the sched_trace_XXX() tracing functions. This + allows the collection of real-time task events such as job + completions, job releases, early completions, etc. This results in a + small overhead in the scheduling code. Disable if the overhead is not + acceptable (e.g., benchmarking). + + Say Yes for debugging. + Say No for overhead tracing. + +config SCHED_OVERHEAD_TRACE + bool "Record timestamps for overhead measurements" + depends on FEATHER_TRACE + default n + help + Export event stream for overhead tracing. + Say Yes for overhead tracing. + +config SCHED_DEBUG_TRACE + bool "TRACE() debugging" + default y + help + Include support for sched_trace_log_messageg(), which is used to + implement TRACE(). If disabled, no TRACE() messages will be included + in the kernel, and no overheads due to debugging statements will be + incurred by the scheduler. Disable if the overhead is not acceptable + (e.g. benchmarking). + + Say Yes for debugging. + Say No for overhead tracing. + +endmenu + +endmenu diff --git a/litmus/Makefile b/litmus/Makefile new file mode 100644 index 000000000000..f4c2d564cd0b --- /dev/null +++ b/litmus/Makefile @@ -0,0 +1,12 @@ +# +# Makefile for LITMUS^RT +# + +obj-y = sched_plugin.o litmus.o \ + jobs.o \ + heap.o + +obj-$(CONFIG_FEATHER_TRACE) += ft_event.o ftdev.o +obj-$(CONFIG_SCHED_TASK_TRACE) += sched_task_trace.o +obj-$(CONFIG_SCHED_DEBUG_TRACE) += sched_trace.o +obj-$(CONFIG_SCHED_OVERHEAD_TRACE) += trace.o diff --git a/litmus/ft_event.c b/litmus/ft_event.c new file mode 100644 index 000000000000..6084b6d6b364 --- /dev/null +++ b/litmus/ft_event.c @@ -0,0 +1,43 @@ +#include + +#include + +#ifndef __ARCH_HAS_FEATHER_TRACE +/* provide dummy implementation */ + +int ft_events[MAX_EVENTS]; + +int ft_enable_event(unsigned long id) +{ + if (id < MAX_EVENTS) { + ft_events[id]++; + return 1; + } else + return 0; +} + +int ft_disable_event(unsigned long id) +{ + if (id < MAX_EVENTS && ft_events[id]) { + ft_events[id]--; + return 1; + } else + return 0; +} + +int ft_disable_all_events(void) +{ + int i; + + for (i = 0; i < MAX_EVENTS; i++) + ft_events[i] = 0; + + return MAX_EVENTS; +} + +int ft_is_event_enabled(unsigned long id) +{ + return id < MAX_EVENTS && ft_events[id]; +} + +#endif diff --git a/litmus/heap.c b/litmus/heap.c new file mode 100644 index 000000000000..112d14da46c3 --- /dev/null +++ b/litmus/heap.c @@ -0,0 +1,314 @@ +#include "linux/kernel.h" +#include "litmus/heap.h" + +void heap_init(struct heap* heap) +{ + heap->head = NULL; + heap->min = NULL; +} + +void heap_node_init(struct heap_node** _h, void* value) +{ + struct heap_node* h = *_h; + h->parent = NULL; + h->next = NULL; + h->child = NULL; + h->degree = NOT_IN_HEAP; + h->value = value; + h->ref = _h; +} + + +/* make child a subtree of root */ +static void __heap_link(struct heap_node* root, + struct heap_node* child) +{ + child->parent = root; + child->next = root->child; + root->child = child; + root->degree++; +} + +/* merge root lists */ +static struct heap_node* __heap_merge(struct heap_node* a, + struct heap_node* b) +{ + struct heap_node* head = NULL; + struct heap_node** pos = &head; + + while (a && b) { + if (a->degree < b->degree) { + *pos = a; + a = a->next; + } else { + *pos = b; + b = b->next; + } + pos = &(*pos)->next; + } + if (a) + *pos = a; + else + *pos = b; + return head; +} + +/* reverse a linked list of nodes. also clears parent pointer */ +static struct heap_node* __heap_reverse(struct heap_node* h) +{ + struct heap_node* tail = NULL; + struct heap_node* next; + + if (!h) + return h; + + h->parent = NULL; + while (h->next) { + next = h->next; + h->next = tail; + tail = h; + h = next; + h->parent = NULL; + } + h->next = tail; + return h; +} + +static void __heap_min(heap_prio_t higher_prio, struct heap* heap, + struct heap_node** prev, struct heap_node** node) +{ + struct heap_node *_prev, *cur; + *prev = NULL; + + if (!heap->head) { + *node = NULL; + return; + } + + *node = heap->head; + _prev = heap->head; + cur = heap->head->next; + while (cur) { + if (higher_prio(cur, *node)) { + *node = cur; + *prev = _prev; + } + _prev = cur; + cur = cur->next; + } +} + +static void __heap_union(heap_prio_t higher_prio, struct heap* heap, + struct heap_node* h2) +{ + struct heap_node* h1; + struct heap_node *prev, *x, *next; + if (!h2) + return; + h1 = heap->head; + if (!h1) { + heap->head = h2; + return; + } + h1 = __heap_merge(h1, h2); + prev = NULL; + x = h1; + next = x->next; + while (next) { + if (x->degree != next->degree || + (next->next && next->next->degree == x->degree)) { + /* nothing to do, advance */ + prev = x; + x = next; + } else if (higher_prio(x, next)) { + /* x becomes the root of next */ + x->next = next->next; + __heap_link(x, next); + } else { + /* next becomes the root of x */ + if (prev) + prev->next = next; + else + h1 = next; + __heap_link(next, x); + x = next; + } + next = x->next; + } + heap->head = h1; +} + +static struct heap_node* __heap_extract_min(heap_prio_t higher_prio, + struct heap* heap) +{ + struct heap_node *prev, *node; + __heap_min(higher_prio, heap, &prev, &node); + if (!node) + return NULL; + if (prev) + prev->next = node->next; + else + heap->head = node->next; + __heap_union(higher_prio, heap, __heap_reverse(node->child)); + return node; +} + +/* insert (and reinitialize) a node into the heap */ +void heap_insert(heap_prio_t higher_prio, struct heap* heap, + struct heap_node* node) +{ + struct heap_node *min; + node->child = NULL; + node->parent = NULL; + node->next = NULL; + node->degree = 0; + if (heap->min && higher_prio(node, heap->min)) { + /* swap min cache */ + min = heap->min; + min->child = NULL; + min->parent = NULL; + min->next = NULL; + min->degree = 0; + __heap_union(higher_prio, heap, min); + heap->min = node; + } else + __heap_union(higher_prio, heap, node); +} + +void heap_uncache_min(heap_prio_t higher_prio, struct heap* heap) +{ + struct heap_node* min; + if (heap->min) { + min = heap->min; + heap->min = NULL; + heap_insert(higher_prio, heap, min); + } +} + +/* merge addition into target */ +void heap_union(heap_prio_t higher_prio, + struct heap* target, struct heap* addition) +{ + /* first insert any cached minima, if necessary */ + heap_uncache_min(higher_prio, target); + heap_uncache_min(higher_prio, addition); + __heap_union(higher_prio, target, addition->head); + /* this is a destructive merge */ + addition->head = NULL; +} + +struct heap_node* heap_peek(heap_prio_t higher_prio, + struct heap* heap) +{ + if (!heap->min) + heap->min = __heap_extract_min(higher_prio, heap); + return heap->min; +} + +struct heap_node* heap_take(heap_prio_t higher_prio, + struct heap* heap) +{ + struct heap_node *node; + if (!heap->min) + heap->min = __heap_extract_min(higher_prio, heap); + node = heap->min; + heap->min = NULL; + if (node) + node->degree = NOT_IN_HEAP; + return node; +} + +int heap_decrease(heap_prio_t higher_prio, struct heap_node* node) +{ + struct heap_node *parent; + struct heap_node** tmp_ref; + void* tmp; + + /* bubble up */ + parent = node->parent; + while (parent && higher_prio(node, parent)) { + /* swap parent and node */ + tmp = parent->value; + parent->value = node->value; + node->value = tmp; + /* swap references */ + *(parent->ref) = node; + *(node->ref) = parent; + tmp_ref = parent->ref; + parent->ref = node->ref; + node->ref = tmp_ref; + /* step up */ + node = parent; + parent = node->parent; + } + + return parent != NULL; +} + +void heap_delete(heap_prio_t higher_prio, struct heap* heap, + struct heap_node* node) +{ + struct heap_node *parent, *prev, *pos; + struct heap_node** tmp_ref; + void* tmp; + + if (heap->min != node) { + /* bubble up */ + parent = node->parent; + while (parent) { + /* swap parent and node */ + tmp = parent->value; + parent->value = node->value; + node->value = tmp; + /* swap references */ + *(parent->ref) = node; + *(node->ref) = parent; + tmp_ref = parent->ref; + parent->ref = node->ref; + node->ref = tmp_ref; + /* step up */ + node = parent; + parent = node->parent; + } + /* now delete: + * first find prev */ + prev = NULL; + pos = heap->head; + while (pos != node) { + prev = pos; + pos = pos->next; + } + /* we have prev, now remove node */ + if (prev) + prev->next = node->next; + else + heap->head = node->next; + __heap_union(higher_prio, heap, __heap_reverse(node->child)); + } else + heap->min = NULL; + node->degree = NOT_IN_HEAP; +} + +/* allocate a heap node for value and insert into the heap */ +int heap_add(heap_prio_t higher_prio, struct heap* heap, + void* value, int gfp_flags) +{ + struct heap_node* hn = heap_node_alloc(gfp_flags); + if (likely(hn)) { + heap_node_init(&hn, value); + heap_insert(higher_prio, heap, hn); + } + return hn != NULL; +} + +void* heap_take_del(heap_prio_t higher_prio, + struct heap* heap) +{ + struct heap_node* hn = heap_take(higher_prio, heap); + void* ret = NULL; + if (hn) { + ret = hn->value; + heap_node_free(hn); + } + return ret; +} diff --git a/litmus/jobs.c b/litmus/jobs.c new file mode 100644 index 000000000000..36e314625d86 --- /dev/null +++ b/litmus/jobs.c @@ -0,0 +1,43 @@ +/* litmus/jobs.c - common job control code + */ + +#include + +#include +#include + +void prepare_for_next_period(struct task_struct *t) +{ + BUG_ON(!t); + /* prepare next release */ + t->rt_param.job_params.release = t->rt_param.job_params.deadline; + t->rt_param.job_params.deadline += get_rt_period(t); + t->rt_param.job_params.exec_time = 0; + /* update job sequence number */ + t->rt_param.job_params.job_no++; + + /* don't confuse Linux */ + t->rt.time_slice = 1; +} + +void release_at(struct task_struct *t, lt_t start) +{ + t->rt_param.job_params.deadline = start; + prepare_for_next_period(t); + set_rt_flags(t, RT_F_RUNNING); +} + + +/* + * Deactivate current task until the beginning of the next period. + */ +long complete_job(void) +{ + /* Mark that we do not excute anymore */ + set_rt_flags(current, RT_F_SLEEP); + /* call schedule, this will return when a new job arrives + * it also takes care of preparing for the next release + */ + schedule(); + return 0; +} diff --git a/litmus/litmus.c b/litmus/litmus.c new file mode 100644 index 000000000000..eb0d17e298d7 --- /dev/null +++ b/litmus/litmus.c @@ -0,0 +1,654 @@ +/* litmus.c -- Implementation of the LITMUS syscalls, the LITMUS intialization code, + * and the procfs interface.. + */ +#include +#include +#include + +#include +#include +#include + +#include +#include +#include + +#include + +#include + +/* Number of RT tasks that exist in the system */ +atomic_t rt_task_count = ATOMIC_INIT(0); +static DEFINE_SPINLOCK(task_transition_lock); + +/* Give log messages sequential IDs. */ +atomic_t __log_seq_no = ATOMIC_INIT(0); + +/* current master CPU for handling timer IRQs */ +atomic_t release_master_cpu = ATOMIC_INIT(NO_CPU); + +static struct kmem_cache * heap_node_cache; + +struct heap_node* heap_node_alloc(int gfp_flags) +{ + return kmem_cache_alloc(heap_node_cache, gfp_flags); +} + +void heap_node_free(struct heap_node* hn) +{ + kmem_cache_free(heap_node_cache, hn); +} + +/* + * sys_set_task_rt_param + * @pid: Pid of the task which scheduling parameters must be changed + * @param: New real-time extension parameters such as the execution cost and + * period + * Syscall for manipulating with task rt extension params + * Returns EFAULT if param is NULL. + * ESRCH if pid is not corrsponding + * to a valid task. + * EINVAL if either period or execution cost is <=0 + * EPERM if pid is a real-time task + * 0 if success + * + * Only non-real-time tasks may be configured with this system call + * to avoid races with the scheduler. In practice, this means that a + * task's parameters must be set _before_ calling sys_prepare_rt_task() + * + * find_task_by_vpid() assumes that we are in the same namespace of the + * target. + */ +asmlinkage long sys_set_rt_task_param(pid_t pid, struct rt_task __user * param) +{ + struct rt_task tp; + struct task_struct *target; + int retval = -EINVAL; + + printk("Setting up rt task parameters for process %d.\n", pid); + + if (pid < 0 || param == 0) { + goto out; + } + if (copy_from_user(&tp, param, sizeof(tp))) { + retval = -EFAULT; + goto out; + } + + /* Task search and manipulation must be protected */ + read_lock_irq(&tasklist_lock); + if (!(target = find_task_by_vpid(pid))) { + retval = -ESRCH; + goto out_unlock; + } + + if (is_realtime(target)) { + /* The task is already a real-time task. + * We cannot not allow parameter changes at this point. + */ + retval = -EBUSY; + goto out_unlock; + } + + if (tp.exec_cost <= 0) + goto out_unlock; + if (tp.period <= 0) + goto out_unlock; + if (!cpu_online(tp.cpu)) + goto out_unlock; + if (tp.period < tp.exec_cost) + { + printk(KERN_INFO "litmus: real-time task %d rejected " + "because wcet > period\n", pid); + goto out_unlock; + } + + target->rt_param.task_params = tp; + + retval = 0; + out_unlock: + read_unlock_irq(&tasklist_lock); + out: + return retval; +} + +/* + * Getter of task's RT params + * returns EINVAL if param or pid is NULL + * returns ESRCH if pid does not correspond to a valid task + * returns EFAULT if copying of parameters has failed. + * + * find_task_by_vpid() assumes that we are in the same namespace of the + * target. + */ +asmlinkage long sys_get_rt_task_param(pid_t pid, struct rt_task __user * param) +{ + int retval = -EINVAL; + struct task_struct *source; + struct rt_task lp; + if (param == 0 || pid < 0) + goto out; + read_lock(&tasklist_lock); + if (!(source = find_task_by_vpid(pid))) { + retval = -ESRCH; + goto out_unlock; + } + lp = source->rt_param.task_params; + read_unlock(&tasklist_lock); + /* Do copying outside the lock */ + retval = + copy_to_user(param, &lp, sizeof(lp)) ? -EFAULT : 0; + return retval; + out_unlock: + read_unlock(&tasklist_lock); + out: + return retval; + +} + +/* + * This is the crucial function for periodic task implementation, + * It checks if a task is periodic, checks if such kind of sleep + * is permitted and calls plugin-specific sleep, which puts the + * task into a wait array. + * returns 0 on successful wakeup + * returns EPERM if current conditions do not permit such sleep + * returns EINVAL if current task is not able to go to sleep + */ +asmlinkage long sys_complete_job(void) +{ + int retval = -EPERM; + if (!is_realtime(current)) { + retval = -EINVAL; + goto out; + } + /* Task with negative or zero period cannot sleep */ + if (get_rt_period(current) <= 0) { + retval = -EINVAL; + goto out; + } + /* The plugin has to put the task into an + * appropriate queue and call schedule + */ + retval = litmus->complete_job(); + out: + return retval; +} + +/* This is an "improved" version of sys_complete_job that + * addresses the problem of unintentionally missing a job after + * an overrun. + * + * returns 0 on successful wakeup + * returns EPERM if current conditions do not permit such sleep + * returns EINVAL if current task is not able to go to sleep + */ +asmlinkage long sys_wait_for_job_release(unsigned int job) +{ + int retval = -EPERM; + if (!is_realtime(current)) { + retval = -EINVAL; + goto out; + } + + /* Task with negative or zero period cannot sleep */ + if (get_rt_period(current) <= 0) { + retval = -EINVAL; + goto out; + } + + retval = 0; + + /* first wait until we have "reached" the desired job + * + * This implementation has at least two problems: + * + * 1) It doesn't gracefully handle the wrap around of + * job_no. Since LITMUS is a prototype, this is not much + * of a problem right now. + * + * 2) It is theoretically racy if a job release occurs + * between checking job_no and calling sleep_next_period(). + * A proper solution would requiring adding another callback + * in the plugin structure and testing the condition with + * interrupts disabled. + * + * FIXME: At least problem 2 should be taken care of eventually. + */ + while (!retval && job > current->rt_param.job_params.job_no) + /* If the last job overran then job <= job_no and we + * don't send the task to sleep. + */ + retval = litmus->complete_job(); + out: + return retval; +} + +/* This is a helper syscall to query the current job sequence number. + * + * returns 0 on successful query + * returns EPERM if task is not a real-time task. + * returns EFAULT if &job is not a valid pointer. + */ +asmlinkage long sys_query_job_no(unsigned int __user *job) +{ + int retval = -EPERM; + if (is_realtime(current)) + retval = put_user(current->rt_param.job_params.job_no, job); + + return retval; +} + +/* sys_null_call() is only used for determining raw system call + * overheads (kernel entry, kernel exit). It has no useful side effects. + * If ts is non-NULL, then the current Feather-Trace time is recorded. + */ +asmlinkage long sys_null_call(cycles_t __user *ts) +{ + long ret = 0; + cycles_t now; + + if (ts) { + now = get_cycles(); + ret = put_user(now, ts); + } + + return ret; +} + +/* p is a real-time task. Re-init its state as a best-effort task. */ +static void reinit_litmus_state(struct task_struct* p, int restore) +{ + struct rt_task user_config = {}; + __user short *np_flag = NULL; + + if (restore) { + /* Safe user-space provided configuration data. */ + user_config = p->rt_param.task_params; + np_flag = p->rt_param.np_flag; + } + + /* We probably should not be inheriting any task's priority + * at this point in time. + */ + WARN_ON(p->rt_param.inh_task); + + /* We need to restore the priority of the task. */ +// __setscheduler(p, p->rt_param.old_policy, p->rt_param.old_prio); + + /* Cleanup everything else. */ + memset(&p->rt_param, 0, sizeof(user_config)); + + /* Restore preserved fields. */ + if (restore) { + p->rt_param.task_params = user_config; + p->rt_param.np_flag = np_flag; + } +} + +long litmus_admit_task(struct task_struct* tsk) +{ + long retval = 0; + unsigned long flags; + + BUG_ON(is_realtime(tsk)); + + if (get_rt_period(tsk) == 0 || + get_exec_cost(tsk) > get_rt_period(tsk)) { + TRACE_TASK(tsk, "litmus admit: invalid task parameters " + "(%lu, %lu)\n", + get_exec_cost(tsk), get_rt_period(tsk)); + return -EINVAL; + } + + if (!cpu_online(get_partition(tsk))) + { + TRACE_TASK(tsk, "litmus admit: cpu %d is not online\n", + get_partition(tsk)); + return -EINVAL; + } + + INIT_LIST_HEAD(&tsk_rt(tsk)->list); + + /* avoid scheduler plugin changing underneath us */ + spin_lock_irqsave(&task_transition_lock, flags); + + /* allocate heap node for this task */ + tsk_rt(tsk)->heap_node = heap_node_alloc(GFP_ATOMIC); + if (!tsk_rt(tsk)->heap_node || + !tsk_rt(tsk)->rel_heap) { + printk(KERN_WARNING "litmus: no more heap node memory!?\n"); + retval = -ENOMEM; + heap_node_free(tsk_rt(tsk)->heap_node); + } else + heap_node_init(&tsk_rt(tsk)->heap_node, tsk); + + if (!retval) + retval = litmus->admit_task(tsk); + + if (!retval) { + sched_trace_task_name(tsk); + sched_trace_task_param(tsk); + atomic_inc(&rt_task_count); + } + + spin_unlock_irqrestore(&task_transition_lock, flags); + + return retval; +} + +void litmus_exit_task(struct task_struct* tsk) +{ + if (is_realtime(tsk)) { + sched_trace_task_completion(tsk, 1); + litmus->task_exit(tsk); + BUG_ON(heap_node_in_heap(tsk_rt(tsk)->heap_node)); + heap_node_free(tsk_rt(tsk)->heap_node); + atomic_dec(&rt_task_count); + reinit_litmus_state(tsk, 1); + } +} + +/* Switching a plugin in use is tricky. + * We must watch out that no real-time tasks exists + * (and that none is created in parallel) and that the plugin is not + * currently in use on any processor (in theory). + * + * For now, we don't enforce the second part since it is unlikely to cause + * any trouble by itself as long as we don't unload modules. + */ +int switch_sched_plugin(struct sched_plugin* plugin) +{ + unsigned long flags; + int ret = 0; + + BUG_ON(!plugin); + + /* stop task transitions */ + spin_lock_irqsave(&task_transition_lock, flags); + + /* don't switch if there are active real-time tasks */ + if (atomic_read(&rt_task_count) == 0) { + ret = litmus->deactivate_plugin(); + if (0 != ret) + goto out; + ret = plugin->activate_plugin(); + if (0 != ret) { + printk(KERN_INFO "Can't activate %s (%d).\n", + plugin->plugin_name, ret); + plugin = &linux_sched_plugin; + } + printk(KERN_INFO "Switching to LITMUS^RT plugin %s.\n", plugin->plugin_name); + litmus = plugin; + } else + ret = -EBUSY; +out: + spin_unlock_irqrestore(&task_transition_lock, flags); + return ret; +} + +/* Called upon fork. + * p is the newly forked task. + */ +void litmus_fork(struct task_struct* p) +{ + if (is_realtime(p)) + /* clean out any litmus related state, don't preserve anything*/ + reinit_litmus_state(p, 0); +} + +/* Called upon execve(). + * current is doing the exec. + * Don't let address space specific stuff leak. + */ +void litmus_exec(void) +{ + struct task_struct* p = current; + + if (is_realtime(p)) { + WARN_ON(p->rt_param.inh_task); + p->rt_param.np_flag = NULL; + } +} + +void exit_litmus(struct task_struct *dead_tsk) +{ + if (is_realtime(dead_tsk)) + litmus_exit_task(dead_tsk); +} + + +#ifdef CONFIG_MAGIC_SYSRQ +int sys_kill(int pid, int sig); + +static void sysrq_handle_kill_rt_tasks(int key, struct tty_struct *tty) +{ + struct task_struct *t; + read_lock(&tasklist_lock); + for_each_process(t) { + if (is_realtime(t)) { + sys_kill(t->pid, SIGKILL); + } + } + read_unlock(&tasklist_lock); +} + +static struct sysrq_key_op sysrq_kill_rt_tasks_op = { + .handler = sysrq_handle_kill_rt_tasks, + .help_msg = "quit-rt-tasks(X)", + .action_msg = "sent SIGKILL to all LITMUS^RT real-time tasks", +}; + + +#endif + + +static int proc_read_stats(char *page, char **start, + off_t off, int count, + int *eof, void *data) +{ + int len; + + len = snprintf(page, PAGE_SIZE, + "real-time tasks = %d\n" + "ready for release = %d\n", + atomic_read(&rt_task_count), + 0); + return len; +} + +static int proc_read_plugins(char *page, char **start, + off_t off, int count, + int *eof, void *data) +{ + int len; + + len = print_sched_plugins(page, PAGE_SIZE); + return len; +} + +static int proc_read_curr(char *page, char **start, + off_t off, int count, + int *eof, void *data) +{ + int len; + + len = snprintf(page, PAGE_SIZE, "%s\n", litmus->plugin_name); + return len; +} + +static int proc_write_curr(struct file *file, + const char *buffer, + unsigned long count, + void *data) +{ + int len, ret; + char name[65]; + struct sched_plugin* found; + + if(count > 64) + len = 64; + else + len = count; + + if(copy_from_user(name, buffer, len)) + return -EFAULT; + + name[len] = '\0'; + /* chomp name */ + if (len > 1 && name[len - 1] == '\n') + name[len - 1] = '\0'; + + found = find_sched_plugin(name); + + if (found) { + ret = switch_sched_plugin(found); + if (ret != 0) + printk(KERN_INFO "Could not switch plugin: %d\n", ret); + } else + printk(KERN_INFO "Plugin '%s' is unknown.\n", name); + + return len; +} + + +static int proc_read_release_master(char *page, char **start, + off_t off, int count, + int *eof, void *data) +{ + int len, master; + master = atomic_read(&release_master_cpu); + if (master == NO_CPU) + len = snprintf(page, PAGE_SIZE, "NO_CPU\n"); + else + len = snprintf(page, PAGE_SIZE, "%d\n", master); + return len; +} + +static int proc_write_release_master(struct file *file, + const char *buffer, + unsigned long count, + void *data) +{ + int cpu, err, online = 0; + char msg[64]; + + if (count > 63) + return -EINVAL; + + if (copy_from_user(msg, buffer, count)) + return -EFAULT; + + /* terminate */ + msg[count] = '\0'; + /* chomp */ + if (count > 1 && msg[count - 1] == '\n') + msg[count - 1] = '\0'; + + if (strcmp(msg, "NO_CPU") == 0) { + atomic_set(&release_master_cpu, NO_CPU); + return count; + } else { + err = sscanf(msg, "%d", &cpu); + if (err == 1 && cpu >= 0 && (online = cpu_online(cpu))) { + atomic_set(&release_master_cpu, cpu); + return count; + } else { + TRACE("invalid release master: '%s' " + "(err:%d cpu:%d online:%d)\n", + msg, err, cpu, online); + return -EINVAL; + } + } +} + +static struct proc_dir_entry *litmus_dir = NULL, + *curr_file = NULL, + *stat_file = NULL, + *plugs_file = NULL, + *release_master_file = NULL; + +static int __init init_litmus_proc(void) +{ + litmus_dir = proc_mkdir("litmus", NULL); + if (!litmus_dir) { + printk(KERN_ERR "Could not allocate LITMUS^RT procfs entry.\n"); + return -ENOMEM; + } + + curr_file = create_proc_entry("active_plugin", + 0644, litmus_dir); + if (!curr_file) { + printk(KERN_ERR "Could not allocate active_plugin " + "procfs entry.\n"); + return -ENOMEM; + } + curr_file->read_proc = proc_read_curr; + curr_file->write_proc = proc_write_curr; + + release_master_file = create_proc_entry("release_master", + 0644, litmus_dir); + if (!release_master_file) { + printk(KERN_ERR "Could not allocate release_master " + "procfs entry.\n"); + return -ENOMEM; + } + release_master_file->read_proc = proc_read_release_master; + release_master_file->write_proc = proc_write_release_master; + + stat_file = create_proc_read_entry("stats", 0444, litmus_dir, + proc_read_stats, NULL); + + plugs_file = create_proc_read_entry("plugins", 0444, litmus_dir, + proc_read_plugins, NULL); + + return 0; +} + +static void exit_litmus_proc(void) +{ + if (plugs_file) + remove_proc_entry("plugins", litmus_dir); + if (stat_file) + remove_proc_entry("stats", litmus_dir); + if (curr_file) + remove_proc_entry("active_plugin", litmus_dir); + if (litmus_dir) + remove_proc_entry("litmus", NULL); +} + +extern struct sched_plugin linux_sched_plugin; + +static int __init _init_litmus(void) +{ + /* Common initializers, + * mode change lock is used to enforce single mode change + * operation. + */ + printk("Starting LITMUS^RT kernel\n"); + + register_sched_plugin(&linux_sched_plugin); + + heap_node_cache = KMEM_CACHE(heap_node, SLAB_PANIC); + +#ifdef CONFIG_MAGIC_SYSRQ + /* offer some debugging help */ + if (!register_sysrq_key('x', &sysrq_kill_rt_tasks_op)) + printk("Registered kill rt tasks magic sysrq.\n"); + else + printk("Could not register kill rt tasks magic sysrq.\n"); +#endif + + init_litmus_proc(); + + return 0; +} + +static void _exit_litmus(void) +{ + exit_litmus_proc(); + kmem_cache_destroy(heap_node_cache); +} + +module_init(_init_litmus); +module_exit(_exit_litmus); diff --git a/litmus/sched_litmus.c b/litmus/sched_litmus.c new file mode 100644 index 000000000000..ccedd3670ac5 --- /dev/null +++ b/litmus/sched_litmus.c @@ -0,0 +1,275 @@ +/* This file is included from kernel/sched.c */ + +#include +#include + +static void update_time_litmus(struct rq *rq, struct task_struct *p) +{ + u64 delta = rq->clock - p->se.exec_start; + if (unlikely((s64)delta < 0)) + delta = 0; + /* per job counter */ + p->rt_param.job_params.exec_time += delta; + /* task counter */ + p->se.sum_exec_runtime += delta; + /* sched_clock() */ + p->se.exec_start = rq->clock; + cpuacct_charge(p, delta); +} + +static void double_rq_lock(struct rq *rq1, struct rq *rq2); +static void double_rq_unlock(struct rq *rq1, struct rq *rq2); + +static void litmus_tick(struct rq *rq, struct task_struct *p) +{ + if (is_realtime(p)) + update_time_litmus(rq, p); + litmus->tick(p); +} + +static void litmus_schedule(struct rq *rq, struct task_struct *prev) +{ + struct rq* other_rq; + long was_running; + lt_t _maybe_deadlock = 0; + /* WARNING: rq is _not_ locked! */ + if (is_realtime(prev)) { + update_time_litmus(rq, prev); + if (!is_running(prev)) + tsk_rt(prev)->present = 0; + } + + /* let the plugin schedule */ + rq->litmus_next = litmus->schedule(prev); + + /* check if a global plugin pulled a task from a different RQ */ + if (rq->litmus_next && task_rq(rq->litmus_next) != rq) { + /* we need to migrate the task */ + other_rq = task_rq(rq->litmus_next); + TRACE_TASK(rq->litmus_next, "migrate from %d\n", other_rq->cpu); + + /* while we drop the lock, the prev task could change its + * state + */ + was_running = is_running(prev); + mb(); + spin_unlock(&rq->lock); + + /* Don't race with a concurrent switch. This could deadlock in + * the case of cross or circular migrations. It's the job of + * the plugin to make sure that doesn't happen. + */ + TRACE_TASK(rq->litmus_next, "stack_in_use=%d\n", + rq->litmus_next->rt_param.stack_in_use); + if (rq->litmus_next->rt_param.stack_in_use != NO_CPU) { + TRACE_TASK(rq->litmus_next, "waiting to deschedule\n"); + _maybe_deadlock = litmus_clock(); + } + while (rq->litmus_next->rt_param.stack_in_use != NO_CPU) { + cpu_relax(); + mb(); + if (rq->litmus_next->rt_param.stack_in_use == NO_CPU) + TRACE_TASK(rq->litmus_next, + "descheduled. Proceeding.\n"); + if (lt_before(_maybe_deadlock + 10000000, + litmus_clock())) { + /* We've been spinning for 10ms. + * Something can't be right! + * Let's abandon the task and bail out; at least + * we will have debug info instead of a hard + * deadlock. + */ + TRACE_TASK(rq->litmus_next, + "stack too long in use. " + "Deadlock?\n"); + rq->litmus_next = NULL; + + /* bail out */ + spin_lock(&rq->lock); + return; + } + } +#ifdef __ARCH_WANT_UNLOCKED_CTXSW + if (rq->litmus_next->oncpu) + TRACE_TASK(rq->litmus_next, "waiting for !oncpu"); + while (rq->litmus_next->oncpu) { + cpu_relax(); + mb(); + } +#endif + double_rq_lock(rq, other_rq); + mb(); + if (is_realtime(prev) && is_running(prev) != was_running) { + TRACE_TASK(prev, + "state changed while we dropped" + " the lock: is_running=%d, was_running=%d\n", + is_running(prev), was_running); + if (is_running(prev) && !was_running) { + /* prev task became unblocked + * we need to simulate normal sequence of events + * to scheduler plugins. + */ + litmus->task_block(prev); + litmus->task_wake_up(prev); + } + } + + set_task_cpu(rq->litmus_next, smp_processor_id()); + + /* DEBUG: now that we have the lock we need to make sure a + * couple of things still hold: + * - it is still a real-time task + * - it is still runnable (could have been stopped) + * If either is violated, then the active plugin is + * doing something wrong. + */ + if (!is_realtime(rq->litmus_next) || + !is_running(rq->litmus_next)) { + /* BAD BAD BAD */ + TRACE_TASK(rq->litmus_next, + "BAD: migration invariant FAILED: " + "rt=%d running=%d\n", + is_realtime(rq->litmus_next), + is_running(rq->litmus_next)); + /* drop the task */ + rq->litmus_next = NULL; + } + /* release the other CPU's runqueue, but keep ours */ + spin_unlock(&other_rq->lock); + } + if (rq->litmus_next) + rq->litmus_next->rt_param.stack_in_use = rq->cpu; +} + +static void enqueue_task_litmus(struct rq *rq, struct task_struct *p, + int wakeup) +{ + if (wakeup) { + sched_trace_task_resume(p); + tsk_rt(p)->present = 1; + litmus->task_wake_up(p); + } else + TRACE_TASK(p, "ignoring an enqueue, not a wake up.\n"); +} + +static void dequeue_task_litmus(struct rq *rq, struct task_struct *p, int sleep) +{ + if (sleep) { + litmus->task_block(p); + tsk_rt(p)->present = 0; + sched_trace_task_block(p); + } else + TRACE_TASK(p, "ignoring a dequeue, not going to sleep.\n"); +} + +static void yield_task_litmus(struct rq *rq) +{ + BUG_ON(rq->curr != current); + litmus->complete_job(); +} + +/* Plugins are responsible for this. + */ +static void check_preempt_curr_litmus(struct rq *rq, struct task_struct *p, int flags) +{ +} + +/* has already been taken care of */ +static void put_prev_task_litmus(struct rq *rq, struct task_struct *p) +{ +} + +static struct task_struct *pick_next_task_litmus(struct rq *rq) +{ + struct task_struct* picked = rq->litmus_next; + rq->litmus_next = NULL; + if (picked) + picked->se.exec_start = rq->clock; + return picked; +} + +static void task_tick_litmus(struct rq *rq, struct task_struct *p, int queued) +{ +} + +static void switched_to_litmus(struct rq *rq, struct task_struct *p, int running) +{ +} + +static void prio_changed_litmus(struct rq *rq, struct task_struct *p, + int oldprio, int running) +{ +} + +unsigned int get_rr_interval_litmus(struct task_struct *p) +{ + /* return infinity */ + return 0; +} + +/* This is called when a task became a real-time task, either due to a SCHED_* + * class transition or due to PI mutex inheritance. We don't handle Linux PI + * mutex inheritance yet (and probably never will). Use LITMUS provided + * synchronization primitives instead. + */ +static void set_curr_task_litmus(struct rq *rq) +{ + rq->curr->se.exec_start = rq->clock; +} + + +#ifdef CONFIG_SMP +/* execve tries to rebalance task in this scheduling domain */ +static int select_task_rq_litmus(struct task_struct *p, int sd_flag, int flags) +{ + /* preemption is already disabled. + * We don't want to change cpu here + */ + return smp_processor_id(); +} + +/* we don't repartition at runtime */ + +static unsigned long +load_balance_litmus(struct rq *this_rq, int this_cpu, struct rq *busiest, + unsigned long max_load_move, + struct sched_domain *sd, enum cpu_idle_type idle, + int *all_pinned, int *this_best_prio) +{ + return 0; +} + +static int +move_one_task_litmus(struct rq *this_rq, int this_cpu, struct rq *busiest, + struct sched_domain *sd, enum cpu_idle_type idle) +{ + return 0; +} +#endif + +const struct sched_class litmus_sched_class = { + .next = &rt_sched_class, + .enqueue_task = enqueue_task_litmus, + .dequeue_task = dequeue_task_litmus, + .yield_task = yield_task_litmus, + + .check_preempt_curr = check_preempt_curr_litmus, + + .pick_next_task = pick_next_task_litmus, + .put_prev_task = put_prev_task_litmus, + +#ifdef CONFIG_SMP + .select_task_rq = select_task_rq_litmus, + + .load_balance = load_balance_litmus, + .move_one_task = move_one_task_litmus, +#endif + + .set_curr_task = set_curr_task_litmus, + .task_tick = task_tick_litmus, + + .get_rr_interval = get_rr_interval_litmus, + + .prio_changed = prio_changed_litmus, + .switched_to = switched_to_litmus, +}; diff --git a/litmus/sched_plugin.c b/litmus/sched_plugin.c new file mode 100644 index 000000000000..0be091ece569 --- /dev/null +++ b/litmus/sched_plugin.c @@ -0,0 +1,199 @@ +/* sched_plugin.c -- core infrastructure for the scheduler plugin system + * + * This file includes the initialization of the plugin system, the no-op Linux + * scheduler plugin and some dummy functions. + */ + +#include +#include + +#include +#include + +#include + +/************************************************************* + * Dummy plugin functions * + *************************************************************/ + +static void litmus_dummy_finish_switch(struct task_struct * prev) +{ +} + +static struct task_struct* litmus_dummy_schedule(struct task_struct * prev) +{ + return NULL; +} + +static void litmus_dummy_tick(struct task_struct* tsk) +{ +} + +static long litmus_dummy_admit_task(struct task_struct* tsk) +{ + printk(KERN_CRIT "LITMUS^RT: Linux plugin rejects %s/%d.\n", + tsk->comm, tsk->pid); + return -EINVAL; +} + +static void litmus_dummy_task_new(struct task_struct *t, int on_rq, int running) +{ +} + +static void litmus_dummy_task_wake_up(struct task_struct *task) +{ +} + +static void litmus_dummy_task_block(struct task_struct *task) +{ +} + +static void litmus_dummy_task_exit(struct task_struct *task) +{ +} + +static long litmus_dummy_complete_job(void) +{ + return -ENOSYS; +} + +static long litmus_dummy_activate_plugin(void) +{ + return 0; +} + +static long litmus_dummy_deactivate_plugin(void) +{ + return 0; +} + +#ifdef CONFIG_FMLP + +static long litmus_dummy_inherit_priority(struct pi_semaphore *sem, + struct task_struct *new_owner) +{ + return -ENOSYS; +} + +static long litmus_dummy_return_priority(struct pi_semaphore *sem) +{ + return -ENOSYS; +} + +static long litmus_dummy_pi_block(struct pi_semaphore *sem, + struct task_struct *new_waiter) +{ + return -ENOSYS; +} + +#endif + + +/* The default scheduler plugin. It doesn't do anything and lets Linux do its + * job. + */ +struct sched_plugin linux_sched_plugin = { + .plugin_name = "Linux", + .tick = litmus_dummy_tick, + .task_new = litmus_dummy_task_new, + .task_exit = litmus_dummy_task_exit, + .task_wake_up = litmus_dummy_task_wake_up, + .task_block = litmus_dummy_task_block, + .complete_job = litmus_dummy_complete_job, + .schedule = litmus_dummy_schedule, + .finish_switch = litmus_dummy_finish_switch, + .activate_plugin = litmus_dummy_activate_plugin, + .deactivate_plugin = litmus_dummy_deactivate_plugin, +#ifdef CONFIG_FMLP + .inherit_priority = litmus_dummy_inherit_priority, + .return_priority = litmus_dummy_return_priority, + .pi_block = litmus_dummy_pi_block, +#endif + .admit_task = litmus_dummy_admit_task +}; + +/* + * The reference to current plugin that is used to schedule tasks within + * the system. It stores references to actual function implementations + * Should be initialized by calling "init_***_plugin()" + */ +struct sched_plugin *litmus = &linux_sched_plugin; + +/* the list of registered scheduling plugins */ +static LIST_HEAD(sched_plugins); +static DEFINE_SPINLOCK(sched_plugins_lock); + +#define CHECK(func) {\ + if (!plugin->func) \ + plugin->func = litmus_dummy_ ## func;} + +/* FIXME: get reference to module */ +int register_sched_plugin(struct sched_plugin* plugin) +{ + printk(KERN_INFO "Registering LITMUS^RT plugin %s.\n", + plugin->plugin_name); + + /* make sure we don't trip over null pointers later */ + CHECK(finish_switch); + CHECK(schedule); + CHECK(tick); + CHECK(task_wake_up); + CHECK(task_exit); + CHECK(task_block); + CHECK(task_new); + CHECK(complete_job); + CHECK(activate_plugin); + CHECK(deactivate_plugin); +#ifdef CONFIG_FMLP + CHECK(inherit_priority); + CHECK(return_priority); + CHECK(pi_block); +#endif + CHECK(admit_task); + + if (!plugin->release_at) + plugin->release_at = release_at; + + spin_lock(&sched_plugins_lock); + list_add(&plugin->list, &sched_plugins); + spin_unlock(&sched_plugins_lock); + + return 0; +} + + +/* FIXME: reference counting, etc. */ +struct sched_plugin* find_sched_plugin(const char* name) +{ + struct list_head *pos; + struct sched_plugin *plugin; + + spin_lock(&sched_plugins_lock); + list_for_each(pos, &sched_plugins) { + plugin = list_entry(pos, struct sched_plugin, list); + if (!strcmp(plugin->plugin_name, name)) + goto out_unlock; + } + plugin = NULL; + +out_unlock: + spin_unlock(&sched_plugins_lock); + return plugin; +} + +int print_sched_plugins(char* buf, int max) +{ + int count = 0; + struct list_head *pos; + struct sched_plugin *plugin; + + spin_lock(&sched_plugins_lock); + list_for_each(pos, &sched_plugins) { + plugin = list_entry(pos, struct sched_plugin, list); + count += snprintf(buf + count, max - count, "%s\n", plugin->plugin_name); + if (max - count <= 0) + break; + } + spin_unlock(&sched_plugins_lock); + return count; +} -- cgit v1.2.2