/* * litmus/sched_aedzl.c * * Implementation of the Adaptive-EDZL scheduling algorithm. * * This version uses the simple approach and serializes all scheduling * decisions by the use of a queue lock. This is probably not the * best way to do it, but it should suffice for now. */ #include #include #include #include #include #include #include #include #include #include #include /* Overview of AEDZL operations. * * link_task_to_cpu(T, cpu) - Low-level operation to update the linkage * structure (NOT the actually scheduled * task). If there is another linked task To * already it will set To->linked_on = NO_CPU * (thereby removing its association with this * CPU). However, it will not requeue the * previously linked task (if any). It will set * T's state to RT_F_RUNNING and check whether * it is already running somewhere else. If T * is scheduled somewhere else it will link * it to that CPU instead (and pull the linked * task to cpu). T may be NULL. * * unlink(T) - Unlink removes T from all scheduler data * structures. If it is linked to some CPU it * will link NULL to that CPU. If it is * currently queued in the edzl queue it will * be removed from the rt_domain. It is safe to * call unlink(T) if T is not linked. T may not * be NULL. * * requeue(T) - Requeue will insert T into the appropriate * queue. If the system is in real-time mode and * the T is released already, it will go into the * ready queue. If the system is not in * real-time mode is T, then T will go into the * release queue. If T's release time is in the * future, it will go into the release * queue. That means that T's release time/job * no/etc. has to be updated before requeu(T) is * called. It is not safe to call requeue(T) * when T is already queued. T may not be NULL. * * aedzl_job_arrival(T) - This is the catch all function when T enters * the system after either a suspension or at a * job release. It will queue T (which means it * is not safe to call aedzl_job_arrival(T) if * T is already queued) and then check whether a * preemption is necessary. If a preemption is * necessary it will update the linkage * accordingly and cause scheduled to be called * (either with an IPI or need_resched). It is * safe to call aedzl_job_arrival(T) if T's * next job has not been actually released yet * (releast time in the future). T will be put * on the release queue in that case. * * job_completion(T) - Take care of everything that needs to be done * to prepare T for its next release and place * it in the right queue with * aedzl_job_arrival(). * * * When we now that T is linked to CPU then link_task_to_cpu(NULL, CPU) is * equivalent to unlink(T). Note that if you unlink a task from a CPU none of * the functions will automatically propagate pending task from the ready queue * to a linked task. This is the job of the calling function ( by means of * __take_ready). */ /* cpu_entry_t - maintain the linked and scheduled state */ typedef struct { int cpu; struct task_struct* linked; /* only RT tasks */ struct task_struct* scheduled; /* only RT tasks */ atomic_t will_schedule; /* prevent unneeded IPIs */ struct bheap_node* hn; } cpu_entry_t; DEFINE_PER_CPU(cpu_entry_t, aedzl_cpu_entries); cpu_entry_t* aedzl_cpus[NR_CPUS]; #define set_will_schedule() \ (atomic_set(&__get_cpu_var(aedzl_cpu_entries).will_schedule, 1)) #define clear_will_schedule() \ (atomic_set(&__get_cpu_var(aedzl_cpu_entries).will_schedule, 0)) #define test_will_schedule(cpu) \ (atomic_read(&per_cpu(aedzl_cpu_entries, cpu).will_schedule)) /* the cpus queue themselves according to priority in here */ static struct bheap_node aedzl_heap_node[NR_CPUS]; static struct bheap aedzl_cpu_heap; /* Design choice: use one RT domain for both zero-laxity and regular jobs. */ static rt_domain_t aedzl; #define aedzl_lock (aedzl.ready_lock) static const fp_t a_fp = {.val = 104}; /* 0.102 -- 102/1000 */ static const fp_t b_fp = {.val = 310}; /* 0.303 -- 303/1000 */ /* Uncomment this if you want to see all scheduling decisions in the * TRACE() log. #define WANT_ALL_SCHED_EVENTS */ static int cpu_lower_prio(struct bheap_node *_a, struct bheap_node *_b) { cpu_entry_t *a, *b; a = _a->value; b = _b->value; /* Note that a and b are inverted: we want the lowest-priority CPU at * the top of the heap. */ return edzl_higher_prio(b->linked, a->linked); } /* update_cpu_position - Move the cpu entry to the correct place to maintain * order in the cpu queue. Caller must hold edzl lock. */ static void update_cpu_position(cpu_entry_t *entry) { if (likely(bheap_node_in_heap(entry->hn))) bheap_delete(cpu_lower_prio, &aedzl_cpu_heap, entry->hn); bheap_insert(cpu_lower_prio, &aedzl_cpu_heap, entry->hn); } /* caller must hold edzl lock */ static cpu_entry_t* lowest_prio_cpu(void) { struct bheap_node* hn; hn = bheap_peek(cpu_lower_prio, &aedzl_cpu_heap); return hn->value; } /* link_task_to_cpu - Update the link of a CPU. * Handles the case where the to-be-linked task is already * scheduled on a different CPU. */ static noinline void link_task_to_cpu(struct task_struct* linked, cpu_entry_t *entry) { cpu_entry_t *sched; struct task_struct* tmp; int on_cpu; BUG_ON(linked && !is_realtime(linked)); /* Currently linked task is set to be unlinked. */ if (entry->linked) { entry->linked->rt_param.linked_on = NO_CPU; } /* Link new task to CPU. */ if (linked) { set_rt_flags(linked, RT_F_RUNNING); /* handle task is already scheduled somewhere! */ on_cpu = linked->rt_param.scheduled_on; if (on_cpu != NO_CPU) { sched = &per_cpu(aedzl_cpu_entries, on_cpu); /* this should only happen if not linked already */ BUG_ON(sched->linked == linked); /* If we are already scheduled on the CPU to which we * wanted to link, we don't need to do the swap -- * we just link ourselves to the CPU and depend on * the caller to get things right. */ if (entry != sched) { TRACE_TASK(linked, "already scheduled on %d, updating link.\n", sched->cpu); tmp = sched->linked; linked->rt_param.linked_on = sched->cpu; sched->linked = linked; update_cpu_position(sched); linked = tmp; } } if (linked) /* might be NULL due to swap */ linked->rt_param.linked_on = entry->cpu; } entry->linked = linked; #ifdef WANT_ALL_SCHED_EVENTS if (linked) TRACE_TASK(linked, "linked to %d.\n", entry->cpu); else TRACE("NULL linked to %d.\n", entry->cpu); #endif update_cpu_position(entry); } /* unlink - Make sure a task is not linked any longer to an entry * where it was linked before. Must hold aedzl_lock. */ static noinline void unlink(struct task_struct* t) { cpu_entry_t *entry; if (unlikely(!t)) { TRACE_BUG_ON(!t); return; } if (t->rt_param.linked_on != NO_CPU) { /* unlink */ entry = &per_cpu(aedzl_cpu_entries, t->rt_param.linked_on); t->rt_param.linked_on = NO_CPU; link_task_to_cpu(NULL, entry); } else if (is_queued(t)) { /* This is an interesting situation: t is scheduled, * but was just recently unlinked. It cannot be * linked anywhere else (because then it would have * been relinked to this CPU), thus it must be in some * queue. We must remove it from the list in this * case. */ remove(&aedzl, t); } } static inline void update_exe_estimate(struct task_struct *t, lt_t actual_cost) { fp_t err, new; fp_t actual_util = _frac(actual_cost, get_rt_period(t)); TRACE_TASK(t, "OLD cost: %llu, est cost: %llu, est util: %d.%d, est err: %d.%d\n", tsk_rt(t)->task_params.exec_cost, get_exec_cost_est(t), _fp_to_integer(tsk_rt(t)->zl_util_est), _point(tsk_rt(t)->zl_util_est), _fp_to_integer(tsk_rt(t)->zl_accum_err), _point(tsk_rt(t)->zl_accum_err)); err = _sub(actual_util, tsk_rt(t)->zl_util_est); /* TRACE_TASK(t, "delta err = actual - est\n" "\t%d.%d = %d.%d - %d.%d\n", _fp_to_integer(err), _point(err), _fp_to_integer(actual_util), _point(actual_util), _fp_to_integer(tsk_rt(t)->zl_util_est), _point(tsk_rt(t)->zl_util_est)); */ new = _add(_mul(get_feedback_a(t), err), _mul(get_feedback_b(t), tsk_rt(t)->zl_accum_err)); /* TRACE_TASK(t, "util est = a*(delta error) + b*(accum error)\n" "\t%d.%d = %d.%d * %d.%d + %d.%d * %d.%d\n", _fp_to_integer(new), _point(new), _fp_to_integer(a_fp), _point(a_fp), _fp_to_integer(err), _point(err), _fp_to_integer(b_fp), _point(b_fp), _fp_to_integer(tsk_rt(t)->zl_accum_err), _point(tsk_rt(t)->zl_accum_err)); */ tsk_rt(t)->zl_util_est = new; tsk_rt(t)->zl_accum_err = _add(tsk_rt(t)->zl_accum_err, err); TRACE_TASK(t, "cost: %llu, est cost: %llu, est util: %d.%d, est err: %d.%d, (delta cost: %d.%d, delta err: %d.%d)\n", tsk_rt(t)->task_params.exec_cost, get_exec_cost_est(t), _fp_to_integer(tsk_rt(t)->zl_util_est), _point(tsk_rt(t)->zl_util_est), _fp_to_integer(tsk_rt(t)->zl_accum_err), _point(tsk_rt(t)->zl_accum_err), _fp_to_integer(new), _point(new), _fp_to_integer(err), _point(err)); } static void update_queue_position(struct task_struct *t); static enum hrtimer_restart on_zero_laxity(struct hrtimer *timer) { unsigned long flags; struct task_struct* t; lt_t now = litmus_clock(); TRACE("Zero-laxity timer went off!\n"); raw_spin_lock_irqsave(&aedzl_lock, flags); t = container_of(container_of(timer, struct rt_param, zl_timer), struct task_struct, rt_param); TRACE_TASK(t, "Reached zero-laxity. (now: %llu, zl-pt: %lld, time remaining (now): %lld)\n", now, get_deadline(t) - budget_remaining_est(t), get_deadline(t) - now); tsk_rt(t)->zl_timer_armed = 0; set_zerolaxity(t, 1); update_queue_position(t); raw_spin_unlock_irqrestore(&aedzl_lock, flags); return HRTIMER_NORESTART; } /* __aedzl_take_ready - call's __take_ready with EDZL timer cancelation side-effect. */ static inline struct task_struct* __aedzl_take_ready(rt_domain_t* rt) { struct task_struct* t = __take_ready(rt); if(t) { if(get_zerolaxity(t) == 0) { if(tsk_rt(t)->zl_timer_armed) { int cancel_ret; TRACE_TASK(t, "Canceling zero-laxity timer.\n"); cancel_ret = hrtimer_try_to_cancel(&tsk_rt(t)->zl_timer); WARN_ON(cancel_ret == 0); /* should never be inactive. */ tsk_rt(t)->zl_timer_armed = 0; } } else { TRACE_TASK(t, "Task already has zero-laxity flagged.\n"); } } return t; } /* __aedzl_add_ready - call's __add_ready with EDZL setting timer side-effect. */ static inline void __aedzl_add_ready(rt_domain_t* rt, struct task_struct *new) { __add_ready(rt, new); if(get_zerolaxity(new) == 0) { lt_t when_to_fire; when_to_fire = get_deadline(new) - budget_remaining_est(new); TRACE_TASK(new, "Setting zero-laxity timer for %llu. (deadline: %llu, est remaining: %llu)\n", when_to_fire, get_deadline(new), budget_remaining_est(new)); __hrtimer_start_range_ns(&tsk_rt(new)->zl_timer, ns_to_ktime(when_to_fire), 0, HRTIMER_MODE_ABS_PINNED, 0); tsk_rt(new)->zl_timer_armed = 1; } else { TRACE_TASK(new, "Already has zero-laxity when added to ready queue. (deadline: %llu, est remaining: %llu))\n", get_deadline(new), budget_remaining_est(new)); } } static void check_for_preemptions(void); /* Glenn: Originally written for FMLP prio-inheritance, but we can use it to upgrade the priority of a zero-laxity task. */ static void update_queue_position(struct task_struct *t) { /* Assumption: caller holds aedzl_lock */ int check_preempt = 0; if (tsk_rt(t)->linked_on != NO_CPU) { TRACE_TASK(t, "%s: linked on %d\n", __FUNCTION__, tsk_rt(t)->linked_on); /* Holder is scheduled; need to re-order CPUs. * We can't use heap_decrease() here since * the cpu_heap is ordered in reverse direction, so * it is actually an increase. */ bheap_delete(cpu_lower_prio, &aedzl_cpu_heap, aedzl_cpus[tsk_rt(t)->linked_on]->hn); bheap_insert(cpu_lower_prio, &aedzl_cpu_heap, aedzl_cpus[tsk_rt(t)->linked_on]->hn); } else { /* holder may be queued: first stop queue changes */ raw_spin_lock(&aedzl.release_lock); if (is_queued(t)) { TRACE_TASK(t, "%s: is queued\n", __FUNCTION__); /* We need to update the position * of holder in some heap. Note that this * may be a release heap. */ check_preempt = !bheap_decrease(edzl_ready_order, tsk_rt(t)->heap_node); } else { /* Nothing to do: if it is not queued and not linked * then it is currently being moved by other code * (e.g., a timer interrupt handler) that will use the * correct priority when enqueuing the task. */ TRACE_TASK(t, "%s: is NOT queued => Done.\n", __FUNCTION__); } raw_spin_unlock(&aedzl.release_lock); /* If holder was enqueued in a release heap, then the following * preemption check is pointless, but we can't easily detect * that case. If you want to fix this, then consider that * simply adding a state flag requires O(n) time to update when * releasing n tasks, which conflicts with the goal to have * O(log n) merges. */ if (check_preempt) { /* heap_decrease() hit the top level of the heap: make * sure preemption checks get the right task, not the * potentially stale cache. */ bheap_uncache_min(edzl_ready_order, &aedzl.ready_queue); check_for_preemptions(); } } } /* preempt - force a CPU to reschedule */ static void preempt(cpu_entry_t *entry) { preempt_if_preemptable(entry->scheduled, entry->cpu); } /* requeue - Put an unlinked task into gsn-edf domain. * Caller must hold aedzl_lock. */ static noinline void requeue(struct task_struct* task) { BUG_ON(!task); /* sanity check before insertion */ BUG_ON(is_queued(task)); if (is_released(task, litmus_clock())) { __aedzl_add_ready(&aedzl, task); } else { /* it has got to wait */ add_release(&aedzl, task); } } /* check for any necessary preemptions */ static void check_for_preemptions(void) { struct task_struct *task; cpu_entry_t* last; for(last = lowest_prio_cpu(); aedzl_preemption_needed(&aedzl, last->linked); last = lowest_prio_cpu()) { /* preemption necessary */ task = __aedzl_take_ready(&aedzl); TRACE("check_for_preemptions: attempting to link task %d to %d\n", task->pid, last->cpu); if (last->linked) requeue(last->linked); link_task_to_cpu(task, last); preempt(last); } } /* aedzl_job_arrival: task is either resumed or released */ static noinline void aedzl_job_arrival(struct task_struct* task) { BUG_ON(!task); /* tag zero-laxity at release time */ set_zerolaxity(task, laxity_remaining_est(task) == 0); requeue(task); check_for_preemptions(); } static void aedzl_release_jobs(rt_domain_t* rt, struct bheap* tasks) { unsigned long flags; raw_spin_lock_irqsave(&aedzl_lock, flags); __merge_ready(rt, tasks); check_for_preemptions(); raw_spin_unlock_irqrestore(&aedzl_lock, flags); } /* caller holds aedzl_lock */ static noinline void job_completion(struct task_struct *t, int forced) { BUG_ON(!t); sched_trace_task_completion(t, forced); sched_trace_task_completion_adaptive(t); TRACE_TASK(t, "job_completion().\n"); /* set flags */ set_rt_flags(t, RT_F_SLEEP); /* update exe estimate */ update_exe_estimate(t, get_exec_time(t)); /* prepare for next period */ prepare_for_next_period(t); if (is_released(t, litmus_clock())) sched_trace_task_release(t); /* unlink */ unlink(t); /* requeue * But don't requeue a blocking task. */ if (is_running(t)) aedzl_job_arrival(t); } /* aedzl_tick - this function is called for every local timer * interrupt. * * checks whether the current task has expired and checks * whether we need to preempt it if it has not expired */ static void aedzl_tick(struct task_struct* t) { if (is_realtime(t) && budget_enforced(t) && budget_exhausted(t)) { if (!is_np(t)) { /* np tasks will be preempted when they become * preemptable again */ set_tsk_need_resched(t); set_will_schedule(); TRACE("aedzl_scheduler_tick: " "%d is preemptable " " => FORCE_RESCHED\n", t->pid); } else if (is_user_np(t)) { TRACE("aedzl_scheduler_tick: " "%d is non-preemptable, " "preemption delayed.\n", t->pid); request_exit_np(t); } } } /* Getting schedule() right is a bit tricky. schedule() may not make any * assumptions on the state of the current task since it may be called for a * number of reasons. The reasons include a scheduler_tick() determined that it * was necessary, because sys_exit_np() was called, because some Linux * subsystem determined so, or even (in the worst case) because there is a bug * hidden somewhere. Thus, we must take extreme care to determine what the * current state is. * * The CPU could currently be scheduling a task (or not), be linked (or not). * * The following assertions for the scheduled task could hold: * * - !is_running(scheduled) // the job blocks * - scheduled->timeslice == 0 // the job completed (forcefully) * - get_rt_flag() == RT_F_SLEEP // the job completed (by syscall) * - linked != scheduled // we need to reschedule (for any reason) * - is_np(scheduled) // rescheduling must be delayed, * sys_exit_np must be requested * * Any of these can occur together. */ static struct task_struct* aedzl_schedule(struct task_struct * prev) { cpu_entry_t* entry = &__get_cpu_var(aedzl_cpu_entries); int out_of_time, sleep, preempt, np, exists, blocks; struct task_struct* next = NULL; #ifdef CONFIG_RELEASE_MASTER /* Bail out early if we are the release master. * The release master never schedules any real-time tasks. */ if (aedzl.release_master == entry->cpu) return NULL; #endif raw_spin_lock(&aedzl_lock); clear_will_schedule(); /* sanity checking */ BUG_ON(entry->scheduled && entry->scheduled != prev); BUG_ON(entry->scheduled && !is_realtime(prev)); BUG_ON(is_realtime(prev) && !entry->scheduled); /* (0) Determine state */ exists = entry->scheduled != NULL; blocks = exists && !is_running(entry->scheduled); out_of_time = exists && budget_enforced(entry->scheduled) && budget_exhausted(entry->scheduled); np = exists && is_np(entry->scheduled); sleep = exists && get_rt_flags(entry->scheduled) == RT_F_SLEEP; preempt = entry->scheduled != entry->linked; #ifdef WANT_ALL_SCHED_EVENTS TRACE_TASK(prev, "invoked aedzl_schedule.\n"); #endif if (exists) TRACE_TASK(prev, "blocks:%d out_of_time:%d np:%d sleep:%d preempt:%d " "state:%d sig:%d\n", blocks, out_of_time, np, sleep, preempt, prev->state, signal_pending(prev)); if (entry->linked && preempt) TRACE_TASK(prev, "will be preempted by %s/%d\n", entry->linked->comm, entry->linked->pid); /* If a task blocks we have no choice but to reschedule. */ if (blocks) unlink(entry->scheduled); /* Request a sys_exit_np() call if we would like to preempt but cannot. * We need to make sure to update the link structure anyway in case * that we are still linked. Multiple calls to request_exit_np() don't * hurt. */ if (np && (out_of_time || preempt || sleep)) { unlink(entry->scheduled); request_exit_np(entry->scheduled); } /* Any task that is preemptable and either exhausts its execution * budget or wants to sleep completes. We may have to reschedule after * this. Don't do a job completion if we block (can't have timers running * for blocked jobs). Preemption go first for the same reason. */ if (!np && (out_of_time || sleep) && !blocks && !preempt) job_completion(entry->scheduled, !sleep); /* Link pending task if we became unlinked. */ if (!entry->linked) { link_task_to_cpu(__aedzl_take_ready(&aedzl), entry); } /* The final scheduling decision. Do we need to switch for some reason? * If linked is different from scheduled, then select linked as next. */ if ((!np || blocks) && entry->linked != entry->scheduled) { /* Schedule a linked job? */ if (entry->linked) { entry->linked->rt_param.scheduled_on = entry->cpu; next = entry->linked; } if (entry->scheduled) { /* not gonna be scheduled soon */ entry->scheduled->rt_param.scheduled_on = NO_CPU; TRACE_TASK(entry->scheduled, "scheduled_on = NO_CPU\n"); } } else /* Only override Linux scheduler if we have a real-time task * scheduled that needs to continue. */ if (exists) next = prev; raw_spin_unlock(&aedzl_lock); #ifdef WANT_ALL_SCHED_EVENTS TRACE("aedzl_lock released, next=0x%p\n", next); if (next) TRACE_TASK(next, "scheduled at %llu\n", litmus_clock()); else if (exists && !next) TRACE("becomes idle at %llu.\n", litmus_clock()); #endif return next; } /* _finish_switch - we just finished the switch away from prev */ static void aedzl_finish_switch(struct task_struct *prev) { cpu_entry_t* entry = &__get_cpu_var(aedzl_cpu_entries); entry->scheduled = is_realtime(current) ? current : NULL; #ifdef WANT_ALL_SCHED_EVENTS TRACE_TASK(prev, "switched away from\n"); #endif } /* Prepare a task for running in RT mode */ static void aedzl_task_new(struct task_struct * t, int on_rq, int running) { unsigned long flags; cpu_entry_t* entry; TRACE("edzl: task new %d\n", t->pid); raw_spin_lock_irqsave(&aedzl_lock, flags); t->rt_param.zl_timer_armed = 0; hrtimer_init(&t->rt_param.zl_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); t->rt_param.zl_timer.function = on_zero_laxity; /* setup job params */ release_at(t, litmus_clock()); if (running) { entry = &per_cpu(aedzl_cpu_entries, task_cpu(t)); BUG_ON(entry->scheduled); #ifdef CONFIG_RELEASE_MASTER if (entry->cpu != aedzl.release_master) { #endif entry->scheduled = t; tsk_rt(t)->scheduled_on = task_cpu(t); #ifdef CONFIG_RELEASE_MASTER } else { /* do not schedule on release master */ preempt(entry); /* force resched */ tsk_rt(t)->scheduled_on = NO_CPU; } #endif } else { t->rt_param.scheduled_on = NO_CPU; } t->rt_param.linked_on = NO_CPU; aedzl_job_arrival(t); raw_spin_unlock_irqrestore(&aedzl_lock, flags); } static void aedzl_task_wake_up(struct task_struct *task) { unsigned long flags; lt_t now; TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); raw_spin_lock_irqsave(&aedzl_lock, flags); /* We need to take suspensions because of semaphores into * account! If a job resumes after being suspended due to acquiring * a semaphore, it should never be treated as a new job release. */ if (get_rt_flags(task) == RT_F_EXIT_SEM) { set_rt_flags(task, RT_F_RUNNING); } else { now = litmus_clock(); if (is_tardy(task, now)) { /* new sporadic release */ release_at(task, now); sched_trace_task_release(task); } else { if (task->rt.time_slice) { /* came back in time before deadline */ set_rt_flags(task, RT_F_RUNNING); } } } aedzl_job_arrival(task); raw_spin_unlock_irqrestore(&aedzl_lock, flags); } static void aedzl_task_block(struct task_struct *t) { unsigned long flags; TRACE_TASK(t, "block at %llu\n", litmus_clock()); /* unlink if necessary */ raw_spin_lock_irqsave(&aedzl_lock, flags); unlink(t); raw_spin_unlock_irqrestore(&aedzl_lock, flags); BUG_ON(!is_realtime(t)); } static void aedzl_task_exit(struct task_struct * t) { unsigned long flags; /* unlink if necessary */ raw_spin_lock_irqsave(&aedzl_lock, flags); unlink(t); if (tsk_rt(t)->scheduled_on != NO_CPU) { aedzl_cpus[tsk_rt(t)->scheduled_on]->scheduled = NULL; tsk_rt(t)->scheduled_on = NO_CPU; } if(tsk_rt(t)->zl_timer_armed) { /* BUG if reached? */ TRACE_TASK(t, "Canceled armed timer while exiting.\n"); hrtimer_cancel(&tsk_rt(t)->zl_timer); tsk_rt(t)->zl_timer_armed = 0; } raw_spin_unlock_irqrestore(&aedzl_lock, flags); BUG_ON(!is_realtime(t)); TRACE_TASK(t, "RIP\n"); } static long aedzl_admit_task(struct task_struct* tsk) { return 0; } static long aedzl_activate_plugin(void) { int cpu; cpu_entry_t *entry; bheap_init(&aedzl_cpu_heap); #ifdef CONFIG_RELEASE_MASTER aedzl.release_master = atomic_read(&release_master_cpu); #endif for_each_online_cpu(cpu) { entry = &per_cpu(aedzl_cpu_entries, cpu); bheap_node_init(&entry->hn, entry); atomic_set(&entry->will_schedule, 0); entry->linked = NULL; entry->scheduled = NULL; #ifdef CONFIG_RELEASE_MASTER if (cpu != aedzl.release_master) { #endif TRACE("AEDZL: Initializing CPU #%d.\n", cpu); update_cpu_position(entry); #ifdef CONFIG_RELEASE_MASTER } else { TRACE("AEDZL: CPU %d is release master.\n", cpu); } #endif } return 0; } /* Plugin object */ static struct sched_plugin aedzl_plugin __cacheline_aligned_in_smp = { .plugin_name = "AEDZL", .finish_switch = aedzl_finish_switch, .tick = aedzl_tick, .task_new = aedzl_task_new, .complete_job = complete_job, .task_exit = aedzl_task_exit, .schedule = aedzl_schedule, .task_wake_up = aedzl_task_wake_up, .task_block = aedzl_task_block, .admit_task = aedzl_admit_task, .activate_plugin = aedzl_activate_plugin, }; static int __init init_aedzl(void) { int cpu; cpu_entry_t *entry; bheap_init(&aedzl_cpu_heap); /* initialize CPU state */ for (cpu = 0; cpu < NR_CPUS; cpu++) { entry = &per_cpu(aedzl_cpu_entries, cpu); aedzl_cpus[cpu] = entry; atomic_set(&entry->will_schedule, 0); entry->cpu = cpu; entry->hn = &aedzl_heap_node[cpu]; bheap_node_init(&entry->hn, entry); } edzl_domain_init(&aedzl, NULL, aedzl_release_jobs); return register_sched_plugin(&aedzl_plugin); } module_init(init_aedzl);