/* litmus.c -- Implementation of the LITMUS syscalls, the LITMUS intialization, * and the common tick function. */ #include #include #include #include #include #include #include #include #include #define MAX_SERVICE_LEVELS 10 /* Variables that govern the scheduling process */ spolicy sched_policy = SCHED_DEFAULT; int sched_options = 0; /* This is a flag for switching the system into RT mode when it is booted up * In RT-mode non-realtime tasks are scheduled as background tasks. */ /* The system is booting in non-realtime mode */ atomic_t rt_mode = ATOMIC_INIT(MODE_NON_RT); /* Here we specify a mode change to be made */ atomic_t new_mode = ATOMIC_INIT(MODE_NON_RT); /* Number of RT tasks that exist in the system */ atomic_t n_rt_tasks = ATOMIC_INIT(0); /* Only one CPU may perform a mode change. */ static queuelock_t mode_change_lock; /* The time instant when we switched to RT mode */ volatile jiffie_t rt_start_time = 0; /* To send signals from the scheduler * Must drop locks first. */ static LIST_HEAD(sched_sig_list); static DEFINE_SPINLOCK(sched_sig_list_lock); /** * sys_set_rt_mode * @newmode: new mode the scheduler must be switched to * External syscall for setting the RT mode flag * Returns EINVAL if mode is not recognized or mode transition is * not permitted * On success 0 is returned * * FIXME: In a "real" OS we cannot just let any user switch the mode... */ asmlinkage long sys_set_rt_mode(int newmode) { if ((newmode == MODE_NON_RT) || (newmode == MODE_RT_RUN)) { printk(KERN_INFO "real-time mode switch to %s\n", (newmode == MODE_RT_RUN ? "rt" : "non-rt")); atomic_set(&new_mode, newmode); return 0; } return -EINVAL; } /* * sys_set_task_rt_param * @pid: Pid of the task which scheduling parameters must be changed * @param: New real-time extension parameters such as the execution cost and * period * Syscall for manipulating with task rt extension params * Returns EFAULT if param is NULL. * ESRCH if pid is not corrsponding * to a valid task. * EINVAL if either period or execution cost is <=0 * EPERM if pid is a real-time task * 0 if success * * Only non-real-time tasks may be configured with this system call * to avoid races with the scheduler. In practice, this means that a * task's parameters must be set _before_ calling sys_prepare_rt_task() */ asmlinkage long sys_set_rt_task_param(pid_t pid, rt_param_t __user * param) { rt_param_t tp; struct task_struct *target; int retval = -EINVAL; printk("Setting up rt task parameters for process %d.\n", pid); if (pid < 0 || param == 0) { goto out; } if (copy_from_user(&tp, param, sizeof(tp))) { retval = -EFAULT; goto out; } /* Task search and manipulation must be protected */ read_lock_irq(&tasklist_lock); if (!(target = find_task_by_pid(pid))) { retval = -ESRCH; goto out_unlock; } if (is_realtime(target)) { /* The task is already a real-time task. * We cannot not allow parameter changes at this point. */ retval = -EBUSY; goto out_unlock; } if (tp.exec_cost <= 0) goto out_unlock; if (tp.period <= 0) goto out_unlock; if (!cpu_online(tp.cpu)) goto out_unlock; if (tp.period < tp.exec_cost) { printk(KERN_INFO "litmus: real-time task %d rejected " "because wcet > period\n", pid); goto out_unlock; } /* Assign params */ target->rt_param.basic_params = tp; retval = 0; out_unlock: read_unlock_irq(&tasklist_lock); out: return retval; } /* Getter of task's RT params * returns EINVAL if param or pid is NULL * returns ESRCH if pid does not correspond to a valid task * returns EFAULT if copying of parameters has failed. */ asmlinkage long sys_get_rt_task_param(pid_t pid, rt_param_t __user * param) { int retval = -EINVAL; struct task_struct *source; rt_param_t lp; if (param == 0 || pid < 0) goto out; read_lock(&tasklist_lock); if (!(source = find_task_by_pid(pid))) { retval = -ESRCH; goto out_unlock; } lp = source->rt_param.basic_params; read_unlock(&tasklist_lock); /* Do copying outside the lock */ retval = copy_to_user(param, &lp, sizeof(lp)) ? -EFAULT : 0; return retval; out_unlock: read_unlock(&tasklist_lock); out: return retval; } /* * sys_set_service_levels * @pid: Pid of the task that is to be configured * @count: The number of service levels * @levels: The new service levels. * * Returns EFAULT if levels is not a valid address. * ESRCH if pid is not corrsponding * to a valid task. * EINVAL if either period or execution cost is <=0 for any level, * of if utility is not incresing. * EPERM if pid is a real-time task * ENOMEM if there is insufficient memory available * 0 if success * * May not be used on RT tasks to avoid races. */ asmlinkage long sys_set_service_levels(pid_t pid, unsigned int count, service_level_t __user *levels) { struct task_struct *target; service_level_t level, *klevels; int retval = -EINVAL, i; fp_t last_value = FP(0); fp_t last_weight = FP(0); TRACE("Setting up service levels for process %d.\n", pid); if (pid < 0 || count > MAX_SERVICE_LEVELS) { goto out; } /* Task search and manipulation must be protected */ read_lock_irq(&tasklist_lock); if (!(target = find_task_by_pid(pid))) { retval = -ESRCH; read_unlock_irq(&tasklist_lock); goto out; } read_unlock_irq(&tasklist_lock); if (is_realtime(target)) { /* The task is already a real-time task. * We cannot not allow parameter changes at this point. */ retval = -EBUSY; goto out; } /* get rid of old service levels, if any */ kfree(target->rt_param.service_level); target->rt_param.service_level = NULL; target->rt_param.no_service_levels = 0; /* count == 0 means tear down service levels*/ if (count == 0) { retval = 0; goto out; } klevels = kmalloc(sizeof(service_level_t) * count, GFP_KERNEL); if (!klevels) { retval = -ENOMEM; goto out; } for (i = 0; i < count; i++) { if (copy_from_user(&level, levels + i, sizeof(level))) { retval = -EFAULT; kfree(klevels); goto out; } if (level.period <= 0) { TRACE("service level %d period <= 0\n", i); goto out; } if (_leq(level.weight, last_weight)) { TRACE("service level %d weight non-increase\n", i); goto out; } if (_leq(level.value, last_value)) { TRACE("service level %d value non-increase\n", i); goto out; } last_value = level.value; last_weight = level.weight; klevels[i] = level; } target->rt_param.basic_params.exec_cost = _round(_mul(klevels[0].weight, FP(klevels[0].period))); target->rt_param.basic_params.period = klevels[0].period; target->rt_param.service_level = klevels; target->rt_param.no_service_levels = count; retval = 0; out: return retval; } asmlinkage long sys_get_cur_service_level(void) { long level; if (!is_realtime(current)) return -EINVAL; /* block scheduler that might cause reweighting to happen */ local_irq_disable(); level = current->rt_param.cur_service_level; local_irq_enable(); return level; } /* sys_task_mode_transition * @target_mode: The desired execution mode after the system call completes. * Either BACKGROUND_TASK or LITMUS_RT_TASK. * Allow a normal task to become a real-time task, vice versa. * Returns EINVAL if illegal transition requested. * 0 if task mode was changed succesfully * other if plugin failed. */ asmlinkage long sys_task_mode_transition(int target_mode) { int retval = -EINVAL; struct task_struct *t = current; if (( is_realtime(t) && target_mode == BACKGROUND_TASK) || (!is_realtime(t) && target_mode == LITMUS_RT_TASK)) { TRACE_TASK(t, "attempts mode transition to %s\n", is_realtime(t) ? "best-effort" : "real-time"); preempt_disable(); t->rt_param.transition_pending = 1; t->state = TASK_STOPPED; preempt_enable_no_resched(); schedule(); retval = t->rt_param.transition_error; } return retval; } /* implemented in kernel/litmus_sem.c */ void srp_ceiling_block(void); /* * This is the crucial function for periodic task implementation, * It checks if a task is periodic, checks if such kind of sleep * is permitted and calls plugin-specific sleep, which puts the * task into a wait array. * returns 0 on successful wakeup * returns EPERM if current conditions do not permit such sleep * returns EINVAL if current task is not able to go to sleep */ asmlinkage long sys_sleep_next_period(void) { int retval = -EPERM; if (!is_realtime(current)) { retval = -EINVAL; goto out; } /* Task with negative or zero period cannot sleep */ if (get_rt_period(current) <= 0) { retval = -EINVAL; goto out; } /* The plugin has to put the task into an * appropriate queue and call schedule */ retval = curr_sched_plugin->sleep_next_period(); if (!retval && is_subject_to_srp(current)) srp_ceiling_block(); out: return retval; } /* This is an "improved" version of sys_sleep_next_period() that * addresses the problem of unintentionally missing a job after * an overrun. * * returns 0 on successful wakeup * returns EPERM if current conditions do not permit such sleep * returns EINVAL if current task is not able to go to sleep */ asmlinkage long sys_wait_for_job_release(unsigned int job) { int retval = -EPERM; if (!is_realtime(current)) { retval = -EINVAL; goto out; } /* Task with negative or zero period cannot sleep */ if (get_rt_period(current) <= 0) { retval = -EINVAL; goto out; } retval = 0; /* first wait until we have "reached" the desired job * * This implementation has at least two problems: * * 1) It doesn't gracefully handle the wrap around of * job_no. Since LITMUS is a prototype, this is not much * of a problem right now. * * 2) It is theoretically racy if a job release occurs * between checking job_no and calling sleep_next_period(). * A proper solution would requiring adding another callback * in the plugin structure and testing the condition with * interrupts disabled. * * FIXME: At least problem 2 should be taken care of eventually. */ while (!retval && job > current->rt_param.times.job_no) /* If the last job overran then job <= job_no and we * don't send the task to sleep. */ retval = curr_sched_plugin->sleep_next_period(); /* We still have to honor the SRP after the actual release. */ if (!retval && is_subject_to_srp(current)) srp_ceiling_block(); out: return retval; } /* This is a helper syscall to query the current job sequence number. * * returns 0 on successful query * returns EPERM if task is not a real-time task. * returns EFAULT if &job is not a valid pointer. */ asmlinkage long sys_query_job_no(unsigned int __user *job) { int retval = -EPERM; if (is_realtime(current)) retval = put_user(current->rt_param.times.job_no, job); return retval; } /* The LITMUS tick function. It manages the change to and from real-time mode * and then calls the plugin's tick function. */ reschedule_check_t __sched rt_scheduler_tick(void) { /* Check for mode change */ if ((get_rt_mode() != atomic_read(&new_mode))) { queue_lock(&mode_change_lock); // If the mode is already changed, proceed if (get_rt_mode() == atomic_read(&new_mode)) { queue_unlock(&mode_change_lock); goto proceed; } // change the mode if ((atomic_read(&new_mode) == MODE_RT_RUN)) { /* The deferral of entering real-time mode should be * handled by deferring task releases in the plugin. * The plugin interface does not really need to know * about quanta, that is the plugin's job. */ /* update rt start time */ rt_start_time = jiffies; printk(KERN_INFO "Real-Time mode enabled at %ld " "on %d\n", jiffies, smp_processor_id()); } else printk(KERN_INFO "Real-Time mode disabled at %ld " "on %d\n", jiffies, smp_processor_id()); if (curr_sched_plugin->mode_change) curr_sched_plugin-> mode_change(atomic_read(&new_mode)); printk(KERN_INFO "Plugin mode change done at %ld\n", jiffies); set_rt_mode(atomic_read(&new_mode)); queue_unlock(&mode_change_lock); } proceed: /* Call plugin-defined tick handler * * It is the plugin's tick handler' job to detect quantum * boundaries in pfair. */ return curr_sched_plugin->scheduler_tick(); } asmlinkage spolicy sys_sched_setpolicy(spolicy newpolicy) { /* Dynamic policy change is disabled at the moment */ return SCHED_INVALID; } asmlinkage spolicy sys_sched_getpolicy(void) { return sched_policy; } asmlinkage int sys_scheduler_setup(int cmd, void __user *parameter) { int ret = -EINVAL; ret = curr_sched_plugin->scheduler_setup(cmd, parameter); return ret; } struct sched_sig { struct list_head list; struct task_struct* task; unsigned int signal:31; int force:1; }; static void __scheduler_signal(struct task_struct *t, unsigned int signo, int force) { struct sched_sig* sig; sig = kmalloc(GFP_ATOMIC, sizeof(struct sched_sig)); if (!sig) { TRACE_TASK(t, "dropping signal: %u\n", t); return; } spin_lock(&sched_sig_list_lock); sig->signal = signo; sig->force = force; sig->task = t; get_task_struct(t); list_add(&sig->list, &sched_sig_list); spin_unlock(&sched_sig_list_lock); } void scheduler_signal(struct task_struct *t, unsigned int signo) { __scheduler_signal(t, signo, 0); } void force_scheduler_signal(struct task_struct *t, unsigned int signo) { __scheduler_signal(t, signo, 1); } /* FIXME: get rid of the locking and do this on a per-processor basis */ void send_scheduler_signals(void) { unsigned long flags; struct list_head *p, *extra; struct siginfo info; struct sched_sig* sig; struct task_struct* t; struct list_head claimed; if (spin_trylock_irqsave(&sched_sig_list_lock, flags)) { if (list_empty(&sched_sig_list)) p = NULL; else { p = sched_sig_list.next; list_del(&sched_sig_list); INIT_LIST_HEAD(&sched_sig_list); } spin_unlock_irqrestore(&sched_sig_list_lock, flags); /* abort if there are no signals */ if (!p) return; /* take signal list we just obtained */ list_add(&claimed, p); list_for_each_safe(p, extra, &claimed) { list_del(p); sig = list_entry(p, struct sched_sig, list); t = sig->task; info.si_signo = sig->signal; info.si_errno = 0; info.si_code = SI_KERNEL; info.si_pid = 1; info.si_uid = 0; TRACE("sending signal %d to %d\n", info.si_signo, t->pid); if (sig->force) force_sig_info(sig->signal, &info, t); else send_sig_info(sig->signal, &info, t); put_task_struct(t); kfree(sig); } } } static inline void np_mem_error(struct task_struct* t, const char* reason) { if (t->state != TASK_DEAD && !(t->flags & PF_EXITING)) { TRACE("np section: %s => %s/%d killed\n", reason, t->comm, t->pid); force_scheduler_signal(t, SIGKILL); } } /* sys_register_np_flag() allows real-time tasks to register an * np section indicator. * returns 0 if the flag was successfully registered * returns EINVAL if current task is not a real-time task * returns EFAULT if *flag couldn't be written */ asmlinkage long sys_register_np_flag(short __user *flag) { int retval = -EINVAL; short test_val = RT_PREEMPTIVE; /* avoid races with the scheduler */ preempt_disable(); TRACE("reg_np_flag(%p) for %s/%d\n", flag, current->comm, current->pid); /* Let's first try to write to the address. * That way it is initialized and any bugs * involving dangling pointers will caught * early. * NULL indicates disabling np section support * and should not be tested. */ if (flag) retval = poke_kernel_address(test_val, flag); else retval = 0; TRACE("reg_np_flag: retval=%d\n", retval); if (unlikely(0 != retval)) np_mem_error(current, "np flag: not writable"); else /* the pointer is ok */ current->rt_param.np_flag = flag; preempt_enable(); return retval; } void request_exit_np(struct task_struct *t) { int ret; short flag; /* We can only do this if t is actually currently scheduled on this CPU * because otherwise we are in the wrong address space. Thus make sure * to check. */ BUG_ON(t != current); if (unlikely(!is_realtime(t) || !t->rt_param.np_flag)) { TRACE_TASK(t, "request_exit_np(): BAD TASK!\n"); return; } flag = RT_EXIT_NP_REQUESTED; ret = poke_kernel_address(flag, t->rt_param.np_flag + 1); TRACE("request_exit_np(%s/%d)\n", t->comm, t->pid); if (unlikely(0 != ret)) np_mem_error(current, "request_exit_np(): flag not writable"); } int is_np(struct task_struct* t) { int ret; unsigned short flag = 0x5858; /* = XX, looks nicer in debug*/ BUG_ON(t != current); if (unlikely(t->rt_param.kernel_np)) return 1; else if (unlikely(t->rt_param.np_flag == NULL) || t->flags & PF_EXITING || t->state == TASK_DEAD) return 0; else { /* This is the tricky part. The process has registered a * non-preemptive section marker. We now need to check whether * it is set to to NON_PREEMPTIVE. Along the way we could * discover that the pointer points to an unmapped region (=> * kill the task) or that the location contains some garbage * value (=> also kill the task). Killing the task in any case * forces userspace to play nicely. Any bugs will be discovered * immediately. */ ret = probe_kernel_address(t->rt_param.np_flag, flag); if (0 == ret && (flag == RT_NON_PREEMPTIVE || flag == RT_PREEMPTIVE)) return flag != RT_PREEMPTIVE; else { /* either we could not read from the address or * it contained garbage => kill the process * FIXME: Should we cause a SEGFAULT instead? */ TRACE("is_np: ret=%d flag=%c%c (%x)\n", ret, flag & 0xff, (flag >> 8) & 0xff, flag); np_mem_error(t, "is_np() could not read"); return 0; } } } /* * sys_exit_np() allows real-time tasks to signal that it left a * non-preemptable section. It will be called after the kernel requested a * callback in the preemption indicator flag. * returns 0 if the signal was valid and processed. * returns EINVAL if current task is not a real-time task */ asmlinkage long sys_exit_np(void) { int retval = -EINVAL; TS_EXIT_NP_START; if (!is_realtime(current)) goto out; TRACE("sys_exit_np(%s/%d)\n", current->comm, current->pid); /* force rescheduling so that we can be preempted */ set_tsk_need_resched(current); retval = 0; out: TS_EXIT_NP_END; return retval; } long transition_to_rt(struct task_struct* tsk) { long retval; BUG_ON(is_realtime(tsk)); if (get_rt_period(tsk) == 0 || get_exec_cost(tsk) > get_rt_period(tsk)) { TRACE_TASK(tsk, "litmus prepare: invalid task parameters " "(%lu, %lu)\n", get_exec_cost(tsk), get_rt_period(tsk)); return -EINVAL; } if (!cpu_online(get_partition(tsk))) { TRACE_TASK(tsk, "litmus prepare: cpu %d is not online\n", get_partition(tsk)); return -EINVAL; } tsk->rt_param.old_prio = tsk->rt_priority; tsk->rt_param.old_policy = tsk->policy; INIT_LIST_HEAD(&tsk->rt_list); retval = curr_sched_plugin->prepare_task(tsk); if (!retval) { atomic_inc(&n_rt_tasks); tsk->rt_param.is_realtime = 1; tsk->rt_param.litmus_controlled = 1; } return retval; } /* p is a real-time task. Re-init its state as a best-effort task. */ static void reinit_litmus_state(struct task_struct* p, int restore) { rt_param_t user_config; __user short *np_flag; if (restore) { /* Safe user-space provided configuration data. * FIXME: This is missing service levels for adaptive tasks. */ user_config = p->rt_param.basic_params; np_flag = p->rt_param.np_flag; } /* We probably should not be inheriting any task's priority * at this point in time. */ WARN_ON(p->rt_param.inh_task); /* We need to restore the priority of the task. */ __setscheduler(p, p->rt_param.old_policy, p->rt_param.old_prio); /* Cleanup everything else. */ memset(&p->rt_param, 0, sizeof(task_rt_param_t)); /* Restore preserved fields. */ if (restore) { p->rt_param.basic_params = user_config; p->rt_param.np_flag = np_flag; } } long transition_to_be(struct task_struct* tsk) { BUG_ON(!is_realtime(tsk)); curr_sched_plugin->tear_down(tsk); atomic_dec(&n_rt_tasks); reinit_litmus_state(tsk, 1); return 0; } /* Called upon fork. * p is the newly forked task. */ void litmus_fork(struct task_struct* p) { if (is_realtime(p)) /* clean out any litmus related state, don't preserve anything*/ reinit_litmus_state(p, 0); } /* Called upon execve(). * current is doing the exec. * Don't let address space specific stuff leak. */ void litmus_exec(void) { struct task_struct* p = current; if (is_realtime(p)) { WARN_ON(p->rt_param.inh_task); p->rt_param.np_flag = NULL; } } void exit_litmus(struct task_struct *dead_tsk) { if (is_realtime(dead_tsk)) transition_to_be(dead_tsk); kfree(dead_tsk->rt_param.service_level); } void list_qsort(struct list_head* list, list_cmp_t less_than) { struct list_head lt; struct list_head geq; struct list_head *pos, *extra, *pivot; int n_lt = 0, n_geq = 0; BUG_ON(!list); if (list->next == list) return; INIT_LIST_HEAD(<); INIT_LIST_HEAD(&geq); pivot = list->next; list_del(pivot); list_for_each_safe(pos, extra, list) { list_del(pos); if (less_than(pos, pivot)) { list_add(pos, <); n_lt++; } else { list_add(pos, &geq); n_geq++; } } if (n_lt < n_geq) { list_qsort(<, less_than); list_qsort(&geq, less_than); } else { list_qsort(&geq, less_than); list_qsort(<, less_than); } list_splice(&geq, list); list_add(pivot, list); list_splice(<, list); } #ifdef CONFIG_MAGIC_SYSRQ /* We offer the possibility to change the real-time mode of the system * with a magic sys request. This helps in debugging in case the system fails * to perform its planned switch back to normal mode. This may happen if we have * total system utilization and the task that is supposed to do the switch is * always preempted (if it is not a real-time task). */ int sys_kill(int pid, int sig); static void sysrq_handle_toGgle_rt_mode(int key, struct tty_struct *tty) { sys_set_rt_mode(get_rt_mode() == MODE_NON_RT); } static struct sysrq_key_op sysrq_toGgle_rt_mode_op = { .handler = sysrq_handle_toGgle_rt_mode, .help_msg = "toGgle-rt-mode", .action_msg = "real-time mode changed", }; static void sysrq_handle_kill_rt_tasks(int key, struct tty_struct *tty) { struct task_struct *t; read_lock(&tasklist_lock); for_each_process(t) { if (is_realtime(t)) { sys_kill(t->pid, SIGKILL); } } read_unlock(&tasklist_lock); } static struct sysrq_key_op sysrq_kill_rt_tasks_op = { .handler = sysrq_handle_kill_rt_tasks, .help_msg = "Quit-rt-tasks", .action_msg = "sent SIGKILL to all real-time tasks", }; #endif /* * Scheduler initialization so that customized scheduler is * enabled at boot time * by setting boot option "rtsched=plugin_name", e.g. "rtsched=pfair" */ /* All we need to know about other plugins is their initialization * functions. These functions initialize internal data structures of a * scheduler and return a pointer to initialized sched_plugin data * structure with pointers to scheduling function implementations. * If called repeatedly these init functions just return an existing * plugin pointer. */ sched_plugin_t *init_global_edf_plugin(void); sched_plugin_t *init_global_edf_np_plugin(void); sched_plugin_t *init_part_edf_plugin(void); sched_plugin_t *init_edf_hsb_plugin(void); sched_plugin_t *init_pfair_plugin(void); sched_plugin_t *init_gsn_edf_plugin(void); sched_plugin_t *init_psn_edf_plugin(void); sched_plugin_t *init_adaptive_plugin(void); /* keep everything needed to setup plugins in one place */ /* we are lazy, so we use a convention for function naming to fill * a table */ #define PLUGIN(caps, small) \ {PLUGIN_ ## caps, SCHED_ ## caps, init_ ## small ## _plugin} #define init_nosetup_plugin 0 static struct { const char *name; const spolicy policy_id; sched_plugin_t *(*init) (void); } available_plugins[] = { PLUGIN(LINUX, nosetup), PLUGIN(GLOBAL_EDF_NP, global_edf_np), PLUGIN(GLOBAL_EDF, global_edf), PLUGIN(PART_EDF, part_edf), PLUGIN(EDF_HSB, edf_hsb), PLUGIN(PFAIR, pfair), PLUGIN(GSN_EDF, gsn_edf), PLUGIN(PSN_EDF, psn_edf), PLUGIN(ADAPTIVE, adaptive), /********************************************* * Add your custom plugin here **********************************************/ }; /* Some plugins may leave important functions unused. We define dummies * so that we don't have to check for null pointers all over the place. */ void litmus_dummy_finish_switch(struct task_struct * prev); int litmus_dummy_schedule(struct task_struct * prev, struct task_struct** next, runqueue_t* q); reschedule_check_t litmus_dummy_scheduler_tick(void); long litmus_dummy_prepare_task(struct task_struct *t); void litmus_dummy_wake_up_task(struct task_struct *task); void litmus_dummy_task_blocks(struct task_struct *task); long litmus_dummy_tear_down(struct task_struct *task); int litmus_dummy_scheduler_setup(int cmd, void __user *parameter); long litmus_dummy_sleep_next_period(void); long litmus_dummy_inherit_priority(struct pi_semaphore *sem, struct task_struct *new_owner); long litmus_dummy_return_priority(struct pi_semaphore *sem); long litmus_dummy_pi_block(struct pi_semaphore *sem, struct task_struct *t); #define CHECK(func) {\ if (!curr_sched_plugin->func) \ curr_sched_plugin->func = litmus_dummy_ ## func;} static int boot_sched_setup(char *plugin_name) { int i = 0; /* Common initializers, * mode change lock is used to enforce single mode change * operation. */ queue_lock_init(&mode_change_lock); printk("Starting LITMUS^RT kernel\n"); /* Look for a matching plugin. */ for (i = 0; i < ARRAY_SIZE(available_plugins); i++) { if (!strcmp(plugin_name, available_plugins[i].name)) { printk("Using %s scheduler plugin\n", plugin_name); sched_policy = available_plugins[i].policy_id; if (available_plugins[i].init) curr_sched_plugin = available_plugins[i].init(); goto out; } } /* Otherwise we have default linux scheduler */ printk("Plugin name %s is unknown, using default %s\n", plugin_name, curr_sched_plugin->plugin_name); out: /* make sure we don't trip over null pointers later */ CHECK(finish_switch); CHECK(schedule); CHECK(scheduler_tick); CHECK(wake_up_task); CHECK(tear_down); CHECK(task_blocks); CHECK(prepare_task); CHECK(scheduler_setup); CHECK(sleep_next_period); CHECK(inherit_priority); CHECK(return_priority); CHECK(pi_block); #ifdef CONFIG_MAGIC_SYSRQ /* offer some debugging help */ if (!register_sysrq_key('g', &sysrq_toGgle_rt_mode_op)) printk("Registered eXit real-time mode magic sysrq.\n"); else printk("Could not register eXit real-time mode magic sysrq.\n"); if (!register_sysrq_key('q', &sysrq_kill_rt_tasks_op)) printk("Registered kill rt tasks magic sysrq.\n"); else printk("Could not register kill rt tasks magic sysrq.\n"); #endif printk("Litmus setup complete."); return 1; } /* Register for boot option */ __setup("rtsched=", boot_sched_setup);