From b8be8fb192541fad88983ef6f9270cec1b51b59a Mon Sep 17 00:00:00 2001 From: Glenn Elliott Date: Fri, 28 Jan 2011 14:22:27 -0500 Subject: Generalizd architecture for GEDF-style scheduelrs to reduce code redundancy. This patch hoists nearly all of the non-GEDF specific code (decision making (GEDF) vs. carrying out decisions (non-specific)) out of sched_gsn_edf.c and into a new generic sched_global_plugin architecture. A new struct, sched_global_plugin, provides the needed hooks for other GEDF-style schedulers to reuse the non-scheduler-specific code. You may conceptualize this new architecture (in OO-terms) as: * sched_plugin is the parent of sched_global_plugin. * sched_global_plugin is the parent of gsn_edf_plugin. * Both sched_plugin and sched_global_plugin are "pure virtual" This patch drastically reduces the amount of code needed to support G-EDF, EDZL, (Adaptive) EDZL, etc. --- litmus/Makefile | 1 + litmus/litmus.c | 2 +- litmus/sched_cedf.c | 2 +- litmus/sched_global_plugin.c | 675 ++++++++++++++++++++++++++++++++++++++++++ litmus/sched_gsn_edf.c | 688 ++++--------------------------------------- litmus/sched_pfair.c | 2 +- litmus/sched_plugin.c | 2 +- 7 files changed, 733 insertions(+), 639 deletions(-) create mode 100644 litmus/sched_global_plugin.c (limited to 'litmus') diff --git a/litmus/Makefile b/litmus/Makefile index b7366b530749..820deb7f2263 100644 --- a/litmus/Makefile +++ b/litmus/Makefile @@ -15,6 +15,7 @@ obj-y = sched_plugin.o litmus.o \ fmlp.o \ bheap.o \ ctrldev.o \ + sched_global_plugin.o \ sched_gsn_edf.o \ sched_psn_edf.o diff --git a/litmus/litmus.c b/litmus/litmus.c index 8efd3f9ef7ee..744880c90eb5 100644 --- a/litmus/litmus.c +++ b/litmus/litmus.c @@ -416,7 +416,7 @@ int switch_sched_plugin(struct sched_plugin* plugin) ret = litmus->deactivate_plugin(); if (0 != ret) goto out; - ret = plugin->activate_plugin(); + ret = plugin->activate_plugin(plugin); if (0 != ret) { printk(KERN_INFO "Can't activate %s (%d).\n", plugin->plugin_name, ret); diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c index 098a449c2490..0b88d4713602 100644 --- a/litmus/sched_cedf.c +++ b/litmus/sched_cedf.c @@ -641,7 +641,7 @@ static void cleanup_cedf(void) } } -static long cedf_activate_plugin(void) +static long cedf_activate_plugin(void* plugin) { int i, j, cpu, ccpu, cpu_count; cpu_entry_t *entry; diff --git a/litmus/sched_global_plugin.c b/litmus/sched_global_plugin.c new file mode 100644 index 000000000000..22dffa7d62fc --- /dev/null +++ b/litmus/sched_global_plugin.c @@ -0,0 +1,675 @@ +/* + * litmus/sched_global_plugin.c + * + * Implementation of the basic operations and architecture needed by + * G-EDF/G-FIFO/EDZL/AEDZL global schedulers. + * + * This version uses the simple approach and serializes all scheduling + * decisions by the use of a queue lock. This is probably not the + * best way to do it, but it should suffice for now. + */ + +#include +#include +#include + +#include +#include +#include +#include + +#include + +#include + + +/* Overview of Global operations. + * + * gbl_link_task_to_cpu(T, cpu) - Low-level operation to update the linkage + * structure (NOT the actually scheduled + * task). If there is another linked task To + * already it will set To->linked_on = NO_CPU + * (thereby removing its association with this + * CPU). However, it will not requeue the + * previously linked task (if any). It will set + * T's state to RT_F_RUNNING and check whether + * it is already running somewhere else. If T + * is scheduled somewhere else it will link + * it to that CPU instead (and pull the linked + * task to cpu). T may be NULL. + * + * gbl_unlink(T) - Unlink removes T from all scheduler data + * structures. If it is linked to some CPU it + * will link NULL to that CPU. If it is + * currently queued in the gsnedf queue it will + * be removed from the rt_domain. It is safe to + * call gbl_unlink(T) if T is not linked. T may not + * be NULL. + * + * gbl_requeue(T) - Requeue will insert T into the appropriate + * queue. If the system is in real-time mode and + * the T is released already, it will go into the + * ready queue. If the system is not in + * real-time mode is T, then T will go into the + * release queue. If T's release time is in the + * future, it will go into the release + * queue. That means that T's release time/job + * no/etc. has to be updated before requeu(T) is + * called. It is not safe to call gbl_requeue(T) + * when T is already queued. T may not be NULL. + * + * job_arrival(T) - This is the catch all function when T enters + * the system after either a suspension or at a + * job release. It will queue T (which means it + * is not safe to call job_arrival(T) if + * T is already queued) and then check whether a + * preemption is necessary. If a preemption is + * necessary it will update the linkage + * accordingly and cause scheduled to be called + * (either with an IPI or need_resched). It is + * safe to call job_arrival(T) if T's + * next job has not been actually released yet + * (releast time in the future). T will be put + * on the release queue in that case. + * + * job_completion(T) - Take care of everything that needs to be done + * to prepare T for its next release and place + * it in the right queue with + * job_arrival(). + * + * + * When we now that T is linked to CPU then gbl_link_task_to_cpu(NULL, CPU) is + * equivalent to gbl_unlink(T). Note that if you unlink a task from a CPU none of + * the functions will automatically propagate pending task from the ready queue + * to a linked task. This is the job of the calling function (by means of + * __take_ready). + */ + +/* Uncomment this if you want to see all scheduling decisions in the + * TRACE() log. + #define WANT_ALL_SCHED_EVENTS + */ + + +/* Macros to access the current active global plugin. These are + * a lot like C++'s 'this' pointer. + */ +struct sched_global_plugin* active_gbl_plugin; +#define active_gbl_domain (active_gbl_plugin->domain) +#define active_gbl_domain_lock (active_gbl_domain.ready_lock) + + +/*********************************************************************/ +/* "Member" functions for both sched_plugin and sched_global_plugin. */ +/* NOTE: These will automatically call down into "virtual" functions.*/ +/*********************************************************************/ + +/* Priority-related functions */ +int gbl_preemption_needed(struct task_struct *t) +{ + /* we need the read lock for active_gbl_domain's ready_queue */ + /* no need to preempt if there is nothing pending */ + if (!__jobs_pending(&active_gbl_domain)) + return 0; + /* we need to reschedule if t doesn't exist */ + if (!t) + return 1; + + /* NOTE: We cannot check for non-preemptibility since we + * don't know what address space we're currently in. + */ + + /* make sure to get non-rt stuff out of the way */ + return !is_realtime(t) || active_gbl_plugin->prio_order(__next_ready(&active_gbl_domain), t); +} + +int gbl_ready_order(struct bheap_node* a, struct bheap_node* b) +{ + return active_gbl_plugin->prio_order(bheap2task(a), bheap2task(b)); +} + + + +int gbl_cpu_lower_prio(struct bheap_node *_a, struct bheap_node *_b) +{ + cpu_entry_t *a, *b; + a = _a->value; + b = _b->value; + + /* Note that a and b are inverted: we want the lowest-priority CPU at + * the top of the heap. + */ + return active_gbl_plugin->prio_order(b->linked, a->linked); +} + +/* gbl_update_cpu_position - Move the cpu entry to the correct place to maintain + * order in the cpu queue. Caller must hold gbl_domain_lock. + */ +void gbl_update_cpu_position(cpu_entry_t *entry) +{ + if (likely(bheap_node_in_heap(entry->hn))) + bheap_delete(gbl_cpu_lower_prio, &active_gbl_plugin->cpu_heap, entry->hn); + bheap_insert(gbl_cpu_lower_prio, &active_gbl_plugin->cpu_heap, entry->hn); +} + +/* caller must hold gsnedf lock */ +cpu_entry_t* lowest_prio_cpu(void) +{ + struct bheap_node* hn; + hn = bheap_peek(gbl_cpu_lower_prio, &active_gbl_plugin->cpu_heap); + return hn->value; +} + + +/* link_task_to_cpu - Update the link of a CPU. + * Handles the case where the to-be-linked task is already + * scheduled on a different CPU. + */ +void gbl_link_task_to_cpu(struct task_struct* linked, + cpu_entry_t *entry) +{ + cpu_entry_t *sched; + struct task_struct* tmp; + int on_cpu; + + BUG_ON(linked && !is_realtime(linked)); + + /* Currently linked task is set to be unlinked. */ + if (entry->linked) { + entry->linked->rt_param.linked_on = NO_CPU; + } + + /* Link new task to CPU. */ + if (linked) { + set_rt_flags(linked, RT_F_RUNNING); + /* handle task is already scheduled somewhere! */ + on_cpu = linked->rt_param.scheduled_on; + if (on_cpu != NO_CPU) { + sched = active_gbl_plugin->cpus[on_cpu]; + /* this should only happen if not linked already */ + BUG_ON(sched->linked == linked); + + /* If we are already scheduled on the CPU to which we + * wanted to link, we don't need to do the swap -- + * we just link ourselves to the CPU and depend on + * the caller to get things right. + */ + if (entry != sched) { + TRACE_TASK(linked, + "already scheduled on %d, updating link.\n", + sched->cpu); + tmp = sched->linked; + linked->rt_param.linked_on = sched->cpu; + sched->linked = linked; + gbl_update_cpu_position(sched); + linked = tmp; + } + } + if (linked) /* might be NULL due to swap */ + linked->rt_param.linked_on = entry->cpu; + } + entry->linked = linked; +#ifdef WANT_ALL_SCHED_EVENTS + if (linked) + TRACE_TASK(linked, "linked to %d.\n", entry->cpu); + else + TRACE("NULL linked to %d.\n", entry->cpu); +#endif + gbl_update_cpu_position(entry); +} + +/* unlink - Make sure a task is not linked any longer to an entry + * where it was linked before. Must hold + * active_gbl_domain_lock. + */ +void gbl_unlink(struct task_struct* t) +{ + cpu_entry_t *entry; + + if (t->rt_param.linked_on != NO_CPU) { + /* unlink */ + entry = active_gbl_plugin->cpus[t->rt_param.linked_on]; + t->rt_param.linked_on = NO_CPU; + gbl_link_task_to_cpu(NULL, entry); + } else if (is_queued(t)) { + /* This is an interesting situation: t is scheduled, + * but was just recently unlinked. It cannot be + * linked anywhere else (because then it would have + * been relinked to this CPU), thus it must be in some + * queue. We must remove it from the list in this + * case. + */ + remove(&active_gbl_domain, t); + } +} + + +/* preempt - force a CPU to reschedule + */ +void gbl_preempt(cpu_entry_t *entry) +{ + preempt_if_preemptable(entry->scheduled, entry->cpu); +} + +/* requeue - Put an unlinked task into global domain. + * Caller must hold active_gbl_domain. + */ +void gbl_requeue(struct task_struct* task) +{ + BUG_ON(!task); + /* sanity check before insertion */ + BUG_ON(is_queued(task)); + + if (is_released(task, litmus_clock())) + active_gbl_plugin->add_ready(&active_gbl_domain, task); + else { + /* it has got to wait */ + add_release(&active_gbl_domain, task); + } +} + + +/* check for any necessary preemptions */ +void gbl_check_for_preemptions(void) +{ + struct task_struct *task; + cpu_entry_t* last; + + for(last = lowest_prio_cpu(); + gbl_preemption_needed(last->linked); + last = lowest_prio_cpu()) + { + /* preemption necessary */ + task = active_gbl_plugin->take_ready(&active_gbl_domain); + TRACE("check_for_preemptions: attempting to link task %d to %d\n", + task->pid, last->cpu); + if (last->linked) + gbl_requeue(last->linked); + gbl_link_task_to_cpu(task, last); + gbl_preempt(last); + } +} + + +void gbl_release_jobs(rt_domain_t* rt, struct bheap* tasks) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&active_gbl_domain_lock, flags); + + __merge_ready(rt, tasks); + gbl_check_for_preemptions(); + + raw_spin_unlock_irqrestore(&active_gbl_domain_lock, flags); +} + +/* caller holds active_gbl_domain_lock */ +void gbl_job_completion(struct task_struct *t, int forced) +{ + BUG_ON(!t); + + sched_trace_task_completion(t, forced); + + TRACE_TASK(t, "job_completion().\n"); + + /* set flags */ + set_rt_flags(t, RT_F_SLEEP); + /* prepare for next period */ + prepare_for_next_period(t); + if (is_released(t, litmus_clock())) + sched_trace_task_release(t); + /* unlink */ + gbl_unlink(t); + /* requeue + * But don't requeue a blocking task. */ + if (is_running(t)) + active_gbl_plugin->job_arrival(t); +} + + +/*********************************************************************/ +/* These two functions can't use active_* defines since the 'litmus' */ +/* pointer is undefined/invalid when these are called. Think of them */ +/* as static member functions. */ +/*********************************************************************/ + +void gbl_domain_init(struct sched_global_plugin* gbl_plugin, + check_resched_needed_t resched, + release_jobs_t release) +{ + rt_domain_init(&gbl_plugin->domain, gbl_ready_order, resched, release); +} + + +long gbl_activate_plugin(void* plg) +{ + struct sched_plugin* plugin = (struct sched_plugin*)plg; + int cpu; + cpu_entry_t *entry; + + /* set the active global plugin */ + active_gbl_plugin = + container_of(plugin, + struct sched_global_plugin, + plugin); + + bheap_init(&active_gbl_plugin->cpu_heap); +#ifdef CONFIG_RELEASE_MASTER + active_gbl_domain.release_master = atomic_read(&release_master_cpu); +#endif + + for_each_online_cpu(cpu) { + entry = active_gbl_plugin->cpus[cpu]; + bheap_node_init(&entry->hn, entry); + entry->linked = NULL; + entry->scheduled = NULL; +#ifdef CONFIG_RELEASE_MASTER + if (cpu != active_gbl_domain.release_master) { +#endif + TRACE("Global Plugin: Initializing CPU #%d.\n", cpu); + gbl_update_cpu_position(entry); +#ifdef CONFIG_RELEASE_MASTER + } else { + TRACE("Global Plugin: CPU %d is release master.\n", cpu); + } +#endif + } + return 0; +} + + +/********************************************************************/ +/* "Virtual" functions in both sched_plugin and sched_global_plugin */ +/********************************************************************/ + + +/* gbl_job_arrival: task is either resumed or released */ +void gblv_job_arrival(struct task_struct* task) +{ + BUG_ON(!task); + + gbl_requeue(task); + gbl_check_for_preemptions(); +} + +/* gbl_tick - this function is called for every local timer interrupt. + * + * checks whether the current task has expired and checks + * whether we need to preempt it if it has not expired + */ +void gblv_tick(struct task_struct* t) +{ + if (is_realtime(t) && budget_enforced(t) && budget_exhausted(t)) { + if (!is_np(t)) { + /* np tasks will be preempted when they become + * preemptable again + */ + litmus_reschedule_local(); + TRACE("gbl_scheduler_tick: " + "%d is preemptable " + " => FORCE_RESCHED\n", t->pid); + } else if (is_user_np(t)) { + TRACE("gbl_scheduler_tick: " + "%d is non-preemptable, " + "preemption delayed.\n", t->pid); + request_exit_np(t); + } + } +} + +/* Getting schedule() right is a bit tricky. schedule() may not make any + * assumptions on the state of the current task since it may be called for a + * number of reasons. The reasons include a scheduler_tick() determined that it + * was necessary, because sys_exit_np() was called, because some Linux + * subsystem determined so, or even (in the worst case) because there is a bug + * hidden somewhere. Thus, we must take extreme care to determine what the + * current state is. + * + * The CPU could currently be scheduling a task (or not), be linked (or not). + * + * The following assertions for the scheduled task could hold: + * + * - !is_running(scheduled) // the job blocks + * - scheduled->timeslice == 0 // the job completed (forcefully) + * - get_rt_flag() == RT_F_SLEEP // the job completed (by syscall) + * - linked != scheduled // we need to reschedule (for any reason) + * - is_np(scheduled) // rescheduling must be delayed, + * sys_exit_np must be requested + * + * Any of these can occur together. + */ +struct task_struct* gblv_schedule(struct task_struct * prev) +{ + cpu_entry_t* entry = active_gbl_plugin->cpus[smp_processor_id()]; + int out_of_time, sleep, preempt, np, exists, blocks; + struct task_struct* next = NULL; + +#ifdef CONFIG_RELEASE_MASTER + /* Bail out early if we are the release master. + * The release master never schedules any real-time tasks. + */ + if (active_gbl_domain.release_master == entry->cpu) + return NULL; +#endif + + raw_spin_lock(&active_gbl_domain_lock); + + /* sanity checking */ + BUG_ON(entry->scheduled && entry->scheduled != prev); + BUG_ON(entry->scheduled && !is_realtime(prev)); + BUG_ON(is_realtime(prev) && !entry->scheduled); + + /* (0) Determine state */ + exists = entry->scheduled != NULL; + blocks = exists && !is_running(entry->scheduled); + out_of_time = exists && + budget_enforced(entry->scheduled) && + budget_exhausted(entry->scheduled); + np = exists && is_np(entry->scheduled); + sleep = exists && get_rt_flags(entry->scheduled) == RT_F_SLEEP; + preempt = entry->scheduled != entry->linked; + +#ifdef WANT_ALL_SCHED_EVENTS + TRACE_TASK(prev, "invoked gsnedf_schedule.\n"); +#endif + + if (exists) + TRACE_TASK(prev, + "blocks:%d out_of_time:%d np:%d sleep:%d preempt:%d " + "state:%d sig:%d\n", + blocks, out_of_time, np, sleep, preempt, + prev->state, signal_pending(prev)); + if (entry->linked && preempt) + TRACE_TASK(prev, "will be preempted by %s/%d\n", + entry->linked->comm, entry->linked->pid); + + + /* If a task blocks we have no choice but to reschedule. + */ + if (blocks) + gbl_unlink(entry->scheduled); + + /* Request a sys_exit_np() call if we would like to preempt but cannot. + * We need to make sure to update the link structure anyway in case + * that we are still linked. Multiple calls to request_exit_np() don't + * hurt. + */ + if (np && (out_of_time || preempt || sleep)) { + gbl_unlink(entry->scheduled); + request_exit_np(entry->scheduled); + } + + /* Any task that is preemptable and either exhausts its execution + * budget or wants to sleep completes. We may have to reschedule after + * this. Don't do a job completion if we block (can't have timers running + * for blocked jobs). Preemption go first for the same reason. + */ + if (!np && (out_of_time || sleep) && !blocks && !preempt) + active_gbl_plugin->job_completion(entry->scheduled, !sleep); + + /* Link pending task if we became unlinked. + */ + if (!entry->linked) + gbl_link_task_to_cpu(active_gbl_plugin->take_ready(&active_gbl_domain), entry); + + /* The final scheduling decision. Do we need to switch for some reason? + * If linked is different from scheduled, then select linked as next. + */ + if ((!np || blocks) && + entry->linked != entry->scheduled) { + /* Schedule a linked job? */ + if (entry->linked) { + entry->linked->rt_param.scheduled_on = entry->cpu; + next = entry->linked; + } + if (entry->scheduled) { + /* not gonna be scheduled soon */ + entry->scheduled->rt_param.scheduled_on = NO_CPU; + TRACE_TASK(entry->scheduled, "scheduled_on = NO_CPU\n"); + } + } else + /* Only override Linux scheduler if we have a real-time task + * scheduled that needs to continue. + */ + if (exists) + next = prev; + + sched_state_task_picked(); + + raw_spin_unlock(&active_gbl_domain_lock); + +#ifdef WANT_ALL_SCHED_EVENTS + TRACE("active_gbl_domain_lock released, next=0x%p\n", next); + + if (next) + TRACE_TASK(next, "scheduled at %llu\n", litmus_clock()); + else if (exists && !next) + TRACE("becomes idle at %llu.\n", litmus_clock()); +#endif + + + return next; +} + + +/* _finish_switch - we just finished the switch away from prev + */ +void gblv_finish_switch(struct task_struct *prev) +{ + cpu_entry_t* entry = active_gbl_plugin->cpus[smp_processor_id()]; + + entry->scheduled = is_realtime(current) ? current : NULL; +#ifdef WANT_ALL_SCHED_EVENTS + TRACE_TASK(prev, "switched away from\n"); +#endif +} + + +/* Prepare a task for running in RT mode + */ +void gblv_task_new(struct task_struct * t, int on_rq, int running) +{ + unsigned long flags; + cpu_entry_t* entry; + + TRACE("global plugin: task new %d\n", t->pid); + + raw_spin_lock_irqsave(&active_gbl_domain_lock, flags); + + /* setup job params */ + release_at(t, litmus_clock()); + + if (running) { + entry = active_gbl_plugin->cpus[task_cpu(t)]; + BUG_ON(entry->scheduled); + +#ifdef CONFIG_RELEASE_MASTER + if (entry->cpu != active_gbl_domain.release_master) { +#endif + entry->scheduled = t; + tsk_rt(t)->scheduled_on = task_cpu(t); +#ifdef CONFIG_RELEASE_MASTER + } else { + /* do not schedule on release master */ + gbl_preempt(entry); /* force resched */ + tsk_rt(t)->scheduled_on = NO_CPU; + } +#endif + } else { + t->rt_param.scheduled_on = NO_CPU; + } + t->rt_param.linked_on = NO_CPU; + + active_gbl_plugin->job_arrival(t); + raw_spin_unlock_irqrestore(&active_gbl_domain_lock, flags); +} + +void gblv_task_wake_up(struct task_struct *task) +{ + unsigned long flags; + lt_t now; + + TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); + + raw_spin_lock_irqsave(&active_gbl_domain_lock, flags); + /* We need to take suspensions because of semaphores into + * account! If a job resumes after being suspended due to acquiring + * a semaphore, it should never be treated as a new job release. + */ + if (get_rt_flags(task) == RT_F_EXIT_SEM) { + set_rt_flags(task, RT_F_RUNNING); + } else { + now = litmus_clock(); + if (is_tardy(task, now)) { + /* new sporadic release */ + release_at(task, now); + sched_trace_task_release(task); + } + else { + if (task->rt.time_slice) { + /* came back in time before deadline + */ + set_rt_flags(task, RT_F_RUNNING); + } + } + } + active_gbl_plugin->job_arrival(task); + raw_spin_unlock_irqrestore(&active_gbl_domain_lock, flags); +} + +void gblv_task_block(struct task_struct *t) +{ + unsigned long flags; + + TRACE_TASK(t, "block at %llu\n", litmus_clock()); + + /* unlink if necessary */ + raw_spin_lock_irqsave(&active_gbl_domain_lock, flags); + gbl_unlink(t); + raw_spin_unlock_irqrestore(&active_gbl_domain_lock, flags); + + BUG_ON(!is_realtime(t)); +} + + +void gblv_task_exit(struct task_struct * t) +{ + unsigned long flags; + + /* unlink if necessary */ + raw_spin_lock_irqsave(&active_gbl_domain_lock, flags); + gbl_unlink(t); + if (tsk_rt(t)->scheduled_on != NO_CPU) { + active_gbl_plugin->cpus[tsk_rt(t)->scheduled_on]->scheduled = NULL; + tsk_rt(t)->scheduled_on = NO_CPU; + } + raw_spin_unlock_irqrestore(&active_gbl_domain_lock, flags); + + BUG_ON(!is_realtime(t)); + TRACE_TASK(t, "RIP\n"); +} + +long gblv_admit_task(struct task_struct* tsk) +{ + return 0; +} diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c index e9c5e531b1ae..7876d707d939 100644 --- a/litmus/sched_gsn_edf.c +++ b/litmus/sched_gsn_edf.c @@ -14,7 +14,7 @@ #include #include -#include +#include #include #include @@ -24,578 +24,50 @@ #include -/* Overview of GSN-EDF operations. - * - * For a detailed explanation of GSN-EDF have a look at the FMLP paper. This - * description only covers how the individual operations are implemented in - * LITMUS. - * - * link_task_to_cpu(T, cpu) - Low-level operation to update the linkage - * structure (NOT the actually scheduled - * task). If there is another linked task To - * already it will set To->linked_on = NO_CPU - * (thereby removing its association with this - * CPU). However, it will not requeue the - * previously linked task (if any). It will set - * T's state to RT_F_RUNNING and check whether - * it is already running somewhere else. If T - * is scheduled somewhere else it will link - * it to that CPU instead (and pull the linked - * task to cpu). T may be NULL. - * - * unlink(T) - Unlink removes T from all scheduler data - * structures. If it is linked to some CPU it - * will link NULL to that CPU. If it is - * currently queued in the gsnedf queue it will - * be removed from the rt_domain. It is safe to - * call unlink(T) if T is not linked. T may not - * be NULL. - * - * requeue(T) - Requeue will insert T into the appropriate - * queue. If the system is in real-time mode and - * the T is released already, it will go into the - * ready queue. If the system is not in - * real-time mode is T, then T will go into the - * release queue. If T's release time is in the - * future, it will go into the release - * queue. That means that T's release time/job - * no/etc. has to be updated before requeu(T) is - * called. It is not safe to call requeue(T) - * when T is already queued. T may not be NULL. - * - * gsnedf_job_arrival(T) - This is the catch all function when T enters - * the system after either a suspension or at a - * job release. It will queue T (which means it - * is not safe to call gsnedf_job_arrival(T) if - * T is already queued) and then check whether a - * preemption is necessary. If a preemption is - * necessary it will update the linkage - * accordingly and cause scheduled to be called - * (either with an IPI or need_resched). It is - * safe to call gsnedf_job_arrival(T) if T's - * next job has not been actually released yet - * (releast time in the future). T will be put - * on the release queue in that case. - * - * job_completion(T) - Take care of everything that needs to be done - * to prepare T for its next release and place - * it in the right queue with - * gsnedf_job_arrival(). - * - * - * When we now that T is linked to CPU then link_task_to_cpu(NULL, CPU) is - * equivalent to unlink(T). Note that if you unlink a task from a CPU none of - * the functions will automatically propagate pending task from the ready queue - * to a linked task. This is the job of the calling function ( by means of - * __take_ready). - */ - -/* cpu_entry_t - maintain the linked and scheduled state - */ -typedef struct { - int cpu; - struct task_struct* linked; /* only RT tasks */ - struct task_struct* scheduled; /* only RT tasks */ - struct bheap_node* hn; -} cpu_entry_t; DEFINE_PER_CPU(cpu_entry_t, gsnedf_cpu_entries); -cpu_entry_t* gsnedf_cpus[NR_CPUS]; - -/* the cpus queue themselves according to priority in here */ -static struct bheap_node gsnedf_heap_node[NR_CPUS]; -static struct bheap gsnedf_cpu_heap; - -static rt_domain_t gsnedf; -#define gsnedf_lock (gsnedf.ready_lock) - - -/* Uncomment this if you want to see all scheduling decisions in the - * TRACE() log. -#define WANT_ALL_SCHED_EVENTS - */ - -static int cpu_lower_prio(struct bheap_node *_a, struct bheap_node *_b) -{ - cpu_entry_t *a, *b; - a = _a->value; - b = _b->value; - /* Note that a and b are inverted: we want the lowest-priority CPU at - * the top of the heap. - */ - return edf_higher_prio(b->linked, a->linked); -} - -/* update_cpu_position - Move the cpu entry to the correct place to maintain - * order in the cpu queue. Caller must hold gsnedf lock. - */ -static void update_cpu_position(cpu_entry_t *entry) -{ - if (likely(bheap_node_in_heap(entry->hn))) - bheap_delete(cpu_lower_prio, &gsnedf_cpu_heap, entry->hn); - bheap_insert(cpu_lower_prio, &gsnedf_cpu_heap, entry->hn); -} +#define gsnedf_lock (gsn_edf_plugin.domain.ready_lock) -/* caller must hold gsnedf lock */ -static cpu_entry_t* lowest_prio_cpu(void) -{ - struct bheap_node* hn; - hn = bheap_peek(cpu_lower_prio, &gsnedf_cpu_heap); - return hn->value; -} - - -/* link_task_to_cpu - Update the link of a CPU. - * Handles the case where the to-be-linked task is already - * scheduled on a different CPU. - */ -static noinline void link_task_to_cpu(struct task_struct* linked, - cpu_entry_t *entry) -{ - cpu_entry_t *sched; - struct task_struct* tmp; - int on_cpu; - - BUG_ON(linked && !is_realtime(linked)); - - /* Currently linked task is set to be unlinked. */ - if (entry->linked) { - entry->linked->rt_param.linked_on = NO_CPU; - } - - /* Link new task to CPU. */ - if (linked) { - set_rt_flags(linked, RT_F_RUNNING); - /* handle task is already scheduled somewhere! */ - on_cpu = linked->rt_param.scheduled_on; - if (on_cpu != NO_CPU) { - sched = &per_cpu(gsnedf_cpu_entries, on_cpu); - /* this should only happen if not linked already */ - BUG_ON(sched->linked == linked); - - /* If we are already scheduled on the CPU to which we - * wanted to link, we don't need to do the swap -- - * we just link ourselves to the CPU and depend on - * the caller to get things right. - */ - if (entry != sched) { - TRACE_TASK(linked, - "already scheduled on %d, updating link.\n", - sched->cpu); - tmp = sched->linked; - linked->rt_param.linked_on = sched->cpu; - sched->linked = linked; - update_cpu_position(sched); - linked = tmp; - } - } - if (linked) /* might be NULL due to swap */ - linked->rt_param.linked_on = entry->cpu; - } - entry->linked = linked; -#ifdef WANT_ALL_SCHED_EVENTS - if (linked) - TRACE_TASK(linked, "linked to %d.\n", entry->cpu); - else - TRACE("NULL linked to %d.\n", entry->cpu); -#endif - update_cpu_position(entry); -} - -/* unlink - Make sure a task is not linked any longer to an entry - * where it was linked before. Must hold gsnedf_lock. - */ -static noinline void unlink(struct task_struct* t) -{ - cpu_entry_t *entry; - - if (t->rt_param.linked_on != NO_CPU) { - /* unlink */ - entry = &per_cpu(gsnedf_cpu_entries, t->rt_param.linked_on); - t->rt_param.linked_on = NO_CPU; - link_task_to_cpu(NULL, entry); - } else if (is_queued(t)) { - /* This is an interesting situation: t is scheduled, - * but was just recently unlinked. It cannot be - * linked anywhere else (because then it would have - * been relinked to this CPU), thus it must be in some - * queue. We must remove it from the list in this - * case. - */ - remove(&gsnedf, t); - } -} - - -/* preempt - force a CPU to reschedule - */ -static void preempt(cpu_entry_t *entry) -{ - preempt_if_preemptable(entry->scheduled, entry->cpu); -} - -/* requeue - Put an unlinked task into gsn-edf domain. - * Caller must hold gsnedf_lock. - */ -static noinline void requeue(struct task_struct* task) -{ - BUG_ON(!task); - /* sanity check before insertion */ - BUG_ON(is_queued(task)); - - if (is_released(task, litmus_clock())) - __add_ready(&gsnedf, task); - else { - /* it has got to wait */ - add_release(&gsnedf, task); - } -} - -/* check for any necessary preemptions */ -static void check_for_preemptions(void) -{ - struct task_struct *task; - cpu_entry_t* last; - - for(last = lowest_prio_cpu(); - edf_preemption_needed(&gsnedf, last->linked); - last = lowest_prio_cpu()) { - /* preemption necessary */ - task = __take_ready(&gsnedf); - TRACE("check_for_preemptions: attempting to link task %d to %d\n", - task->pid, last->cpu); - if (last->linked) - requeue(last->linked); - link_task_to_cpu(task, last); - preempt(last); - } -} - -/* gsnedf_job_arrival: task is either resumed or released */ -static noinline void gsnedf_job_arrival(struct task_struct* task) -{ - BUG_ON(!task); - - requeue(task); - check_for_preemptions(); -} - -static void gsnedf_release_jobs(rt_domain_t* rt, struct bheap* tasks) -{ - unsigned long flags; - - raw_spin_lock_irqsave(&gsnedf_lock, flags); - - __merge_ready(rt, tasks); - check_for_preemptions(); - - raw_spin_unlock_irqrestore(&gsnedf_lock, flags); -} - -/* caller holds gsnedf_lock */ -static noinline void job_completion(struct task_struct *t, int forced) -{ - BUG_ON(!t); - - sched_trace_task_completion(t, forced); - - TRACE_TASK(t, "job_completion().\n"); - - /* set flags */ - set_rt_flags(t, RT_F_SLEEP); - /* prepare for next period */ - prepare_for_next_period(t); - if (is_released(t, litmus_clock())) - sched_trace_task_release(t); - /* unlink */ - unlink(t); - /* requeue - * But don't requeue a blocking task. */ - if (is_running(t)) - gsnedf_job_arrival(t); -} - -/* gsnedf_tick - this function is called for every local timer - * interrupt. - * - * checks whether the current task has expired and checks - * whether we need to preempt it if it has not expired - */ -static void gsnedf_tick(struct task_struct* t) -{ - if (is_realtime(t) && budget_enforced(t) && budget_exhausted(t)) { - if (!is_np(t)) { - /* np tasks will be preempted when they become - * preemptable again - */ - litmus_reschedule_local(); - TRACE("gsnedf_scheduler_tick: " - "%d is preemptable " - " => FORCE_RESCHED\n", t->pid); - } else if (is_user_np(t)) { - TRACE("gsnedf_scheduler_tick: " - "%d is non-preemptable, " - "preemption delayed.\n", t->pid); - request_exit_np(t); - } - } -} - -/* Getting schedule() right is a bit tricky. schedule() may not make any - * assumptions on the state of the current task since it may be called for a - * number of reasons. The reasons include a scheduler_tick() determined that it - * was necessary, because sys_exit_np() was called, because some Linux - * subsystem determined so, or even (in the worst case) because there is a bug - * hidden somewhere. Thus, we must take extreme care to determine what the - * current state is. - * - * The CPU could currently be scheduling a task (or not), be linked (or not). - * - * The following assertions for the scheduled task could hold: - * - * - !is_running(scheduled) // the job blocks - * - scheduled->timeslice == 0 // the job completed (forcefully) - * - get_rt_flag() == RT_F_SLEEP // the job completed (by syscall) - * - linked != scheduled // we need to reschedule (for any reason) - * - is_np(scheduled) // rescheduling must be delayed, - * sys_exit_np must be requested - * - * Any of these can occur together. - */ -static struct task_struct* gsnedf_schedule(struct task_struct * prev) -{ - cpu_entry_t* entry = &__get_cpu_var(gsnedf_cpu_entries); - int out_of_time, sleep, preempt, np, exists, blocks; - struct task_struct* next = NULL; - -#ifdef CONFIG_RELEASE_MASTER - /* Bail out early if we are the release master. - * The release master never schedules any real-time tasks. - */ - if (gsnedf.release_master == entry->cpu) - return NULL; -#endif - - raw_spin_lock(&gsnedf_lock); - - /* sanity checking */ - BUG_ON(entry->scheduled && entry->scheduled != prev); - BUG_ON(entry->scheduled && !is_realtime(prev)); - BUG_ON(is_realtime(prev) && !entry->scheduled); - - /* (0) Determine state */ - exists = entry->scheduled != NULL; - blocks = exists && !is_running(entry->scheduled); - out_of_time = exists && - budget_enforced(entry->scheduled) && - budget_exhausted(entry->scheduled); - np = exists && is_np(entry->scheduled); - sleep = exists && get_rt_flags(entry->scheduled) == RT_F_SLEEP; - preempt = entry->scheduled != entry->linked; - -#ifdef WANT_ALL_SCHED_EVENTS - TRACE_TASK(prev, "invoked gsnedf_schedule.\n"); -#endif - - if (exists) - TRACE_TASK(prev, - "blocks:%d out_of_time:%d np:%d sleep:%d preempt:%d " - "state:%d sig:%d\n", - blocks, out_of_time, np, sleep, preempt, - prev->state, signal_pending(prev)); - if (entry->linked && preempt) - TRACE_TASK(prev, "will be preempted by %s/%d\n", - entry->linked->comm, entry->linked->pid); - - - /* If a task blocks we have no choice but to reschedule. - */ - if (blocks) - unlink(entry->scheduled); - - /* Request a sys_exit_np() call if we would like to preempt but cannot. - * We need to make sure to update the link structure anyway in case - * that we are still linked. Multiple calls to request_exit_np() don't - * hurt. - */ - if (np && (out_of_time || preempt || sleep)) { - unlink(entry->scheduled); - request_exit_np(entry->scheduled); - } - - /* Any task that is preemptable and either exhausts its execution - * budget or wants to sleep completes. We may have to reschedule after - * this. Don't do a job completion if we block (can't have timers running - * for blocked jobs). Preemption go first for the same reason. - */ - if (!np && (out_of_time || sleep) && !blocks && !preempt) - job_completion(entry->scheduled, !sleep); - - /* Link pending task if we became unlinked. - */ - if (!entry->linked) - link_task_to_cpu(__take_ready(&gsnedf), entry); - - /* The final scheduling decision. Do we need to switch for some reason? - * If linked is different from scheduled, then select linked as next. - */ - if ((!np || blocks) && - entry->linked != entry->scheduled) { - /* Schedule a linked job? */ - if (entry->linked) { - entry->linked->rt_param.scheduled_on = entry->cpu; - next = entry->linked; - } - if (entry->scheduled) { - /* not gonna be scheduled soon */ - entry->scheduled->rt_param.scheduled_on = NO_CPU; - TRACE_TASK(entry->scheduled, "scheduled_on = NO_CPU\n"); - } - } else - /* Only override Linux scheduler if we have a real-time task - * scheduled that needs to continue. - */ - if (exists) - next = prev; - - sched_state_task_picked(); - - raw_spin_unlock(&gsnedf_lock); - -#ifdef WANT_ALL_SCHED_EVENTS - TRACE("gsnedf_lock released, next=0x%p\n", next); - - if (next) - TRACE_TASK(next, "scheduled at %llu\n", litmus_clock()); - else if (exists && !next) - TRACE("becomes idle at %llu.\n", litmus_clock()); -#endif - - - return next; -} - - -/* _finish_switch - we just finished the switch away from prev - */ -static void gsnedf_finish_switch(struct task_struct *prev) -{ - cpu_entry_t* entry = &__get_cpu_var(gsnedf_cpu_entries); - - entry->scheduled = is_realtime(current) ? current : NULL; -#ifdef WANT_ALL_SCHED_EVENTS - TRACE_TASK(prev, "switched away from\n"); -#endif -} - - -/* Prepare a task for running in RT mode - */ -static void gsnedf_task_new(struct task_struct * t, int on_rq, int running) -{ - unsigned long flags; - cpu_entry_t* entry; - - TRACE("gsn edf: task new %d\n", t->pid); - - raw_spin_lock_irqsave(&gsnedf_lock, flags); - - /* setup job params */ - release_at(t, litmus_clock()); - - if (running) { - entry = &per_cpu(gsnedf_cpu_entries, task_cpu(t)); - BUG_ON(entry->scheduled); - -#ifdef CONFIG_RELEASE_MASTER - if (entry->cpu != gsnedf.release_master) { -#endif - entry->scheduled = t; - tsk_rt(t)->scheduled_on = task_cpu(t); -#ifdef CONFIG_RELEASE_MASTER - } else { - /* do not schedule on release master */ - preempt(entry); /* force resched */ - tsk_rt(t)->scheduled_on = NO_CPU; - } +#ifdef CONFIG_FMLP +static long gsnedf_pi_block(struct pi_semaphore *sem, + struct task_struct *new_waiter); +static long gsnedf_inherit_priority(struct pi_semaphore *sem, + struct task_struct *new_owner); +static long gsnedf_return_priority(struct pi_semaphore *sem); #endif - } else { - t->rt_param.scheduled_on = NO_CPU; - } - t->rt_param.linked_on = NO_CPU; - - gsnedf_job_arrival(t); - raw_spin_unlock_irqrestore(&gsnedf_lock, flags); -} -static void gsnedf_task_wake_up(struct task_struct *task) -{ - unsigned long flags; - lt_t now; - - TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); - - raw_spin_lock_irqsave(&gsnedf_lock, flags); - /* We need to take suspensions because of semaphores into - * account! If a job resumes after being suspended due to acquiring - * a semaphore, it should never be treated as a new job release. - */ - if (get_rt_flags(task) == RT_F_EXIT_SEM) { - set_rt_flags(task, RT_F_RUNNING); - } else { - now = litmus_clock(); - if (is_tardy(task, now)) { - /* new sporadic release */ - release_at(task, now); - sched_trace_task_release(task); - } - else { - if (task->rt.time_slice) { - /* came back in time before deadline - */ - set_rt_flags(task, RT_F_RUNNING); - } - } - } - gsnedf_job_arrival(task); - raw_spin_unlock_irqrestore(&gsnedf_lock, flags); -} - -static void gsnedf_task_block(struct task_struct *t) -{ - unsigned long flags; - - TRACE_TASK(t, "block at %llu\n", litmus_clock()); - - /* unlink if necessary */ - raw_spin_lock_irqsave(&gsnedf_lock, flags); - unlink(t); - raw_spin_unlock_irqrestore(&gsnedf_lock, flags); - - BUG_ON(!is_realtime(t)); -} - - -static void gsnedf_task_exit(struct task_struct * t) -{ - unsigned long flags; - - /* unlink if necessary */ - raw_spin_lock_irqsave(&gsnedf_lock, flags); - unlink(t); - if (tsk_rt(t)->scheduled_on != NO_CPU) { - gsnedf_cpus[tsk_rt(t)->scheduled_on]->scheduled = NULL; - tsk_rt(t)->scheduled_on = NO_CPU; - } - raw_spin_unlock_irqrestore(&gsnedf_lock, flags); +/* GSN-EDF Plugin object */ +static struct sched_global_plugin gsn_edf_plugin __cacheline_aligned_in_smp = { + .plugin = { + .plugin_name = "GSN-EDF", + .finish_switch = gblv_finish_switch, + .tick = gblv_tick, + .task_new = gblv_task_new, + .complete_job = complete_job, + .task_exit = gblv_task_exit, + .schedule = gblv_schedule, + .task_wake_up = gblv_task_wake_up, + .task_block = gblv_task_block, + #ifdef CONFIG_FMLP + .fmlp_active = 1, + .pi_block = gsnedf_pi_block, + .inherit_priority = gsnedf_inherit_priority, + .return_priority = gsnedf_return_priority, + #endif + .admit_task = gblv_admit_task, + .activate_plugin = gbl_activate_plugin + }, + + .prio_order = edf_higher_prio, + .take_ready = __take_ready, + .add_ready = __add_ready, + .job_arrival = gblv_job_arrival, + .job_completion = gbl_job_completion +}; - BUG_ON(!is_realtime(t)); - TRACE_TASK(t, "RIP\n"); -} #ifdef CONFIG_FMLP - /* Update the queue position of a task that got it's priority boosted via * priority inheritance. */ static void update_queue_position(struct task_struct *holder) @@ -618,13 +90,13 @@ static void update_queue_position(struct task_struct *holder) * We can't use heap_decrease() here since * the cpu_heap is ordered in reverse direction, so * it is actually an increase. */ - bheap_delete(cpu_lower_prio, &gsnedf_cpu_heap, - gsnedf_cpus[tsk_rt(holder)->linked_on]->hn); - bheap_insert(cpu_lower_prio, &gsnedf_cpu_heap, - gsnedf_cpus[tsk_rt(holder)->linked_on]->hn); + bheap_delete(gbl_cpu_lower_prio, &gsn_edf_plugin.cpu_heap, + gsn_edf_plugin.cpus[tsk_rt(holder)->linked_on]->hn); + bheap_insert(gbl_cpu_lower_prio, &gsn_edf_plugin.cpu_heap, + gsn_edf_plugin.cpus[tsk_rt(holder)->linked_on]->hn); } else { /* holder may be queued: first stop queue changes */ - raw_spin_lock(&gsnedf.release_lock); + raw_spin_lock(&gsn_edf_plugin.domain.release_lock); if (is_queued(holder)) { TRACE_TASK(holder, "%s: is queued\n", __FUNCTION__); @@ -642,7 +114,7 @@ static void update_queue_position(struct task_struct *holder) TRACE_TASK(holder, "%s: is NOT queued => Done.\n", __FUNCTION__); } - raw_spin_unlock(&gsnedf.release_lock); + raw_spin_unlock(&gsn_edf_plugin.domain.release_lock); /* If holder was enqueued in a release heap, then the following * preemption check is pointless, but we can't easily detect @@ -654,9 +126,9 @@ static void update_queue_position(struct task_struct *holder) /* heap_decrease() hit the top level of the heap: make * sure preemption checks get the right task, not the * potentially stale cache. */ - bheap_uncache_min(edf_ready_order, - &gsnedf.ready_queue); - check_for_preemptions(); + bheap_uncache_min(gbl_ready_order, + &gsn_edf_plugin.domain.ready_queue); + gbl_check_for_preemptions(); } } } @@ -740,8 +212,8 @@ static long gsnedf_return_priority(struct pi_semaphore *sem) t->rt_param.inh_task = NULL; /* Check if rescheduling is necessary */ - unlink(t); - gsnedf_job_arrival(t); + gbl_unlink(t); + gsn_edf_plugin.job_arrival(t); raw_spin_unlock(&gsnedf_lock); } @@ -750,78 +222,24 @@ static long gsnedf_return_priority(struct pi_semaphore *sem) #endif -static long gsnedf_admit_task(struct task_struct* tsk) -{ - return 0; -} - -static long gsnedf_activate_plugin(void) -{ - int cpu; - cpu_entry_t *entry; - - bheap_init(&gsnedf_cpu_heap); -#ifdef CONFIG_RELEASE_MASTER - gsnedf.release_master = atomic_read(&release_master_cpu); -#endif - - for_each_online_cpu(cpu) { - entry = &per_cpu(gsnedf_cpu_entries, cpu); - bheap_node_init(&entry->hn, entry); - entry->linked = NULL; - entry->scheduled = NULL; -#ifdef CONFIG_RELEASE_MASTER - if (cpu != gsnedf.release_master) { -#endif - TRACE("GSN-EDF: Initializing CPU #%d.\n", cpu); - update_cpu_position(entry); -#ifdef CONFIG_RELEASE_MASTER - } else { - TRACE("GSN-EDF: CPU %d is release master.\n", cpu); - } -#endif - } - return 0; -} - -/* Plugin object */ -static struct sched_plugin gsn_edf_plugin __cacheline_aligned_in_smp = { - .plugin_name = "GSN-EDF", - .finish_switch = gsnedf_finish_switch, - .tick = gsnedf_tick, - .task_new = gsnedf_task_new, - .complete_job = complete_job, - .task_exit = gsnedf_task_exit, - .schedule = gsnedf_schedule, - .task_wake_up = gsnedf_task_wake_up, - .task_block = gsnedf_task_block, -#ifdef CONFIG_FMLP - .fmlp_active = 1, - .pi_block = gsnedf_pi_block, - .inherit_priority = gsnedf_inherit_priority, - .return_priority = gsnedf_return_priority, -#endif - .admit_task = gsnedf_admit_task, - .activate_plugin = gsnedf_activate_plugin, -}; - static int __init init_gsn_edf(void) { int cpu; cpu_entry_t *entry; - bheap_init(&gsnedf_cpu_heap); + bheap_init(&gsn_edf_plugin.cpu_heap); /* initialize CPU state */ for (cpu = 0; cpu < NR_CPUS; cpu++) { entry = &per_cpu(gsnedf_cpu_entries, cpu); - gsnedf_cpus[cpu] = entry; + gsn_edf_plugin.cpus[cpu] = entry; entry->cpu = cpu; - entry->hn = &gsnedf_heap_node[cpu]; + entry->hn = &gsn_edf_plugin.heap_node[cpu]; bheap_node_init(&entry->hn, entry); } - edf_domain_init(&gsnedf, NULL, gsnedf_release_jobs); - return register_sched_plugin(&gsn_edf_plugin); + gbl_domain_init(&gsn_edf_plugin, NULL, gbl_release_jobs); + + return register_sched_plugin(&gsn_edf_plugin.plugin); } diff --git a/litmus/sched_pfair.c b/litmus/sched_pfair.c index c7d5cf7aa2b3..f2788a962fdb 100644 --- a/litmus/sched_pfair.c +++ b/litmus/sched_pfair.c @@ -813,7 +813,7 @@ static long pfair_admit_task(struct task_struct* t) return 0; } -static long pfair_activate_plugin(void) +static long pfair_activate_plugin(void* plugin) { int cpu; struct pfair_state* state; diff --git a/litmus/sched_plugin.c b/litmus/sched_plugin.c index d912a6494d20..5d14d469d971 100644 --- a/litmus/sched_plugin.c +++ b/litmus/sched_plugin.c @@ -111,7 +111,7 @@ static long litmus_dummy_complete_job(void) return -ENOSYS; } -static long litmus_dummy_activate_plugin(void) +static long litmus_dummy_activate_plugin(void* plugin) { return 0; } -- cgit v1.2.2