From 1a6154cb07727ae9716de118da15dbdb399983b9 Mon Sep 17 00:00:00 2001 From: Glenn Elliott Date: Fri, 28 Jan 2011 17:29:03 -0500 Subject: Implementation of the EDZL scheduler. Implementation of the EDZL scheduler. Zero-laxity points are tracked by timers while jobs are in the pending state. Locking primatives are not supported. --- include/litmus/edzl_common.h | 14 ++ include/litmus/litmus.h | 21 +++ include/litmus/rt_param.h | 15 +- include/litmus/sched_global_plugin.h | 6 +- litmus/Kconfig | 13 +- litmus/Makefile | 1 + litmus/edzl_common.c | 57 ++++++ litmus/sched_edzl.c | 327 +++++++++++++++++++++++++++++++++++ litmus/sched_global_plugin.c | 106 +++++++++--- litmus/sched_gsn_edf.c | 70 +------- 10 files changed, 537 insertions(+), 93 deletions(-) create mode 100644 include/litmus/edzl_common.h create mode 100644 litmus/edzl_common.c create mode 100644 litmus/sched_edzl.c diff --git a/include/litmus/edzl_common.h b/include/litmus/edzl_common.h new file mode 100644 index 000000000000..d1a89ee08554 --- /dev/null +++ b/include/litmus/edzl_common.h @@ -0,0 +1,14 @@ +/* + * EDZL common data structures and utility functions shared by all EDZL + * based scheduler plugins + */ + +#ifndef __UNC_EDZL_COMMON_H__ +#define __UNC_EDZL_COMMON_H__ + +#include + +int edzl_higher_prio(struct task_struct* first, + struct task_struct* second); + +#endif diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h index 246483783fc0..3203a0809f96 100644 --- a/include/litmus/litmus.h +++ b/include/litmus/litmus.h @@ -54,6 +54,12 @@ void litmus_exit_task(struct task_struct *tsk); #define get_release(t) (tsk_rt(t)->job_params.release) #define get_class(t) (tsk_rt(t)->task_params.cls) +#ifdef CONFIG_PLUGIN_EDZL +#define get_zerolaxity(t) (tsk_rt(t)->job_params.zero_laxity) +#define set_zerolaxity(t) (tsk_rt(t)->job_params.zero_laxity=1) +#define clear_zerolaxity(t) (tsk_rt(t)->job_params.zero_laxity=0) +#endif + inline static int budget_exhausted(struct task_struct* t) { return get_exec_time(t) >= get_exec_cost(t); @@ -86,6 +92,21 @@ static inline lt_t litmus_clock(void) return ktime_to_ns(ktime_get()); } +#ifdef CONFIG_PLUGIN_EDZL +inline static lt_t laxity_remaining(struct task_struct* t) +{ + lt_t now = litmus_clock(); + lt_t remaining = budget_remaining(t); + lt_t deadline = get_deadline(t); + + if(lt_before(now + remaining, deadline)) + return (deadline - (now + remaining)); + else + return 0; +} +#endif + + /* A macro to convert from nanoseconds to ktime_t. */ #define ns_to_ktime(t) ktime_add_ns(ktime_set(0, 0), t) diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h index a7a183f34a80..41768f446436 100644 --- a/include/litmus/rt_param.h +++ b/include/litmus/rt_param.h @@ -90,6 +90,14 @@ struct rt_job { * Increase this sequence number when a job is released. */ unsigned int job_no; + +#ifdef CONFIG_PLUGIN_EDZL + /* boolean indicating zero-laxity state. We will + set this flag explicitly at zero-laxity detection. + This makes priority comparison operations more + predictable since laxity varies with time */ + unsigned int zero_laxity:1; +#endif }; struct pfair_param; @@ -113,6 +121,11 @@ struct rt_param { /* timing parameters */ struct rt_job job_params; + +#ifdef CONFIG_PLUGIN_EDZL + /* used to trigger zero-laxity detection */ + struct hrtimer zl_timer; +#endif /* task representing the current "inherited" task * priority, assigned by inherit_priority and @@ -120,7 +133,7 @@ struct rt_param { * could point to self if PI does not result in * an increased task priority. */ - struct task_struct* inh_task; + struct task_struct* inh_task; #ifdef CONFIG_NP_SECTION /* For the FMLP under PSN-EDF, it is required to make the task diff --git a/include/litmus/sched_global_plugin.h b/include/litmus/sched_global_plugin.h index cac2de63a3ee..21a91817eac1 100644 --- a/include/litmus/sched_global_plugin.h +++ b/include/litmus/sched_global_plugin.h @@ -29,7 +29,7 @@ typedef struct task_struct* (*take_ready_t)(rt_domain_t* rt); typedef void (*add_ready_t)(rt_domain_t* rt, struct task_struct *new); typedef void (*job_arrival_t)(struct task_struct* task); typedef void (*job_completion_t)(struct task_struct *t, int forced); - +typedef int (*preemption_needed_t)(struct task_struct *t); struct sched_global_plugin { @@ -41,6 +41,7 @@ struct sched_global_plugin { add_ready_t add_ready; job_arrival_t job_arrival; job_completion_t job_completion; + preemption_needed_t preemption_needed; rt_domain_t domain; @@ -60,7 +61,6 @@ extern struct sched_global_plugin* active_gbl_plugin; * * Use prefix "gbl_" (global) */ -int gbl_preemption_needed(struct task_struct *t); int gbl_ready_order(struct bheap_node* a, struct bheap_node* b); int gbl_cpu_lower_prio(struct bheap_node *_a, struct bheap_node *_b); void gbl_update_cpu_position(cpu_entry_t *entry); @@ -69,6 +69,7 @@ void gbl_link_task_to_cpu(struct task_struct* linked, cpu_entry_t *entry); void gbl_unlink(struct task_struct* t); void gbl_preempt(cpu_entry_t *entry); void gbl_requeue(struct task_struct* task); +void gbl_update_queue_position(struct task_struct *task); void gbl_check_for_preemptions(void); void gbl_release_jobs(rt_domain_t* rt, struct bheap* tasks); void gbl_job_completion(struct task_struct *t, int forced); @@ -87,6 +88,7 @@ long gbl_activate_plugin(void* plugin); * Use prefix "gblv_" (global virtual) */ void gblv_job_arrival(struct task_struct* task); +int gblv_preemption_needed(struct task_struct *t); void gblv_tick(struct task_struct* t); struct task_struct* gblv_schedule(struct task_struct * prev); void gblv_finish_switch(struct task_struct *prev); diff --git a/litmus/Kconfig b/litmus/Kconfig index a2f267870f29..1e571af45e72 100644 --- a/litmus/Kconfig +++ b/litmus/Kconfig @@ -23,6 +23,17 @@ config PLUGIN_PFAIR If unsure, say Yes. +config PLUGIN_EDZL + bool "EDZL" + depends on X86 && SYSFS + default y + help + Include the EDZL (Earliest Deadline, Zero Laxity) plugin in the kernel. + EDZL functions like G-EDF, except jobs with zero laxity are given maximum + priority. + + If unsure, say Yes. + config RELEASE_MASTER bool "Release-master Support" depends on ARCH_HAS_SEND_PULL_TIMERS @@ -32,7 +43,7 @@ config RELEASE_MASTER that services all timer interrupts, but that does not schedule real-time tasks. See RTSS'09 paper for details (http://www.cs.unc.edu/~anderson/papers.html). - Currently only supported by GSN-EDF. + Currently only supported by GSN-EDF and EDZL. endmenu diff --git a/litmus/Makefile b/litmus/Makefile index 820deb7f2263..ec4e21106886 100644 --- a/litmus/Makefile +++ b/litmus/Makefile @@ -21,6 +21,7 @@ obj-y = sched_plugin.o litmus.o \ obj-$(CONFIG_PLUGIN_CEDF) += sched_cedf.o obj-$(CONFIG_PLUGIN_PFAIR) += sched_pfair.o +obj-$(CONFIG_PLUGIN_EDZL) += sched_edzl.o edzl_common.o obj-$(CONFIG_FEATHER_TRACE) += ft_event.o ftdev.o obj-$(CONFIG_SCHED_TASK_TRACE) += sched_task_trace.o diff --git a/litmus/edzl_common.c b/litmus/edzl_common.c new file mode 100644 index 000000000000..9e26304a1ea2 --- /dev/null +++ b/litmus/edzl_common.c @@ -0,0 +1,57 @@ +/* + * kernel/edzl_common.c + * + * Common functions for EDZL based scheduler. + */ + +#include +#include +#include + +#include +#include +#include + +#include +#include + + +int edzl_higher_prio(struct task_struct* first, + struct task_struct* second) +{ + struct task_struct *first_task = first; + struct task_struct *second_task = second; + + /* There is no point in comparing a task to itself. */ + if (first && first == second) { + TRACE_TASK(first, + "WARNING: pointless edf priority comparison.\n"); + return 0; + } + + + /* Check for inherited priorities. Change task + * used for comparison in such a case. + */ + if (first && first->rt_param.inh_task) + first_task = first->rt_param.inh_task; + if (second && second->rt_param.inh_task) + second_task = second->rt_param.inh_task; + + /* null checks & rt checks */ + if(!first_task) + return 0; + else if(!second_task || !is_realtime(second_task)) + return 1; + + + if(likely(get_zerolaxity(first_task) == get_zerolaxity(second_task))) + { + /* edf order if both tasks have the same laxity state */ + return(edf_higher_prio(first_task, second_task)); + } + else + { + return(get_zerolaxity(first_task)); + } +} diff --git a/litmus/sched_edzl.c b/litmus/sched_edzl.c new file mode 100644 index 000000000000..0664b78e540b --- /dev/null +++ b/litmus/sched_edzl.c @@ -0,0 +1,327 @@ +/* + * litmus/sched_edzl.c + * + * Implementation of the EDZL scheduling algorithm. + * + * This version uses the simple approach and serializes all scheduling + * decisions by the use of a queue lock. This is probably not the + * best way to do it, but it should suffice for now. + */ + +#include +#include +#include + +#include +#include +#include +#include +#include + +#include + +#include + +#include + +static struct task_struct* __edzl_take_ready(rt_domain_t* rt); +static void __edzl_add_ready(rt_domain_t* rt, struct task_struct *new); +static void edzl_job_arrival(struct task_struct* task); +static void edzl_task_new(struct task_struct * t, int on_rq, int running); +static void edzl_task_wake_up(struct task_struct *task); +static void edzl_task_exit(struct task_struct * t); +static int edzl_preemption_needed(struct task_struct *t); + + +/* EDZL Plugin object */ +static struct sched_global_plugin edzl_plugin __cacheline_aligned_in_smp = { + .plugin = { + .finish_switch = gblv_finish_switch, + .tick = gblv_tick, + .complete_job = complete_job, + .schedule = gblv_schedule, + .task_block = gblv_task_block, + .admit_task = gblv_admit_task, + .activate_plugin = gbl_activate_plugin, + + .plugin_name = "EDZL", + .task_new = edzl_task_new, + .task_wake_up = edzl_task_wake_up, + .task_exit = edzl_task_exit, + }, + + .job_completion = gbl_job_completion, + + .prio_order = edzl_higher_prio, + .take_ready = __edzl_take_ready, + .add_ready = __edzl_add_ready, + .job_arrival = edzl_job_arrival, + .preemption_needed = edzl_preemption_needed +}; + + +#define active_gbl_domain (active_gbl_plugin->domain) +#define active_gbl_domain_lock (active_gbl_domain.ready_lock) + +DEFINE_PER_CPU(cpu_entry_t, edzl_cpu_entries); + + +static enum hrtimer_restart on_zero_laxity(struct hrtimer *timer) +{ + unsigned long flags; + struct task_struct* t; + + lt_t now = litmus_clock(); + + TRACE("Zero-laxity timer went off!\n"); + + raw_spin_lock_irqsave(&active_gbl_domain_lock, flags); + + t = container_of(container_of(timer, struct rt_param, zl_timer), + struct task_struct, + rt_param); + + TRACE_TASK(t, "Reached zero-laxity. (now: %llu, zl-pt: %lld, time remaining (now): %lld)\n", + now, + get_deadline(t) - budget_remaining(t), + get_deadline(t) - now); + + set_zerolaxity(t); + gbl_update_queue_position(t); + + raw_spin_unlock_irqrestore(&active_gbl_domain_lock, flags); + + return HRTIMER_NORESTART; +} + +/* __edzl_take_ready - call's __take_ready with EDZL timer cancelation side-effect. */ +static struct task_struct* __edzl_take_ready(rt_domain_t* rt) +{ + struct task_struct* t = __take_ready(rt); + + if(t) + { + if(get_zerolaxity(t) == 0) + { + if(hrtimer_active(&tsk_rt(t)->zl_timer)) + { + int cancel_ret; + + TRACE_TASK(t, "Canceling zero-laxity timer.\n"); + cancel_ret = hrtimer_try_to_cancel(&tsk_rt(t)->zl_timer); + WARN_ON(cancel_ret == 0); /* should never be inactive. */ + } + } + else + { + TRACE_TASK(t, "Task already has zero-laxity flagged.\n"); + } + } + + return t; +} + +/* __edzl_add_ready - call's __add_ready with EDZL setting timer side-effect. */ +static void __edzl_add_ready(rt_domain_t* rt, struct task_struct *new) +{ + __add_ready(rt, new); + + if(get_zerolaxity(new) == 0) + { + lt_t when_to_fire; + + when_to_fire = get_deadline(new) - budget_remaining(new); + + TRACE_TASK(new, "Setting zero-laxity timer for %llu. (deadline: %llu, remaining: %llu)\n", + when_to_fire, + get_deadline(new), + budget_remaining(new)); + + __hrtimer_start_range_ns(&tsk_rt(new)->zl_timer, + ns_to_ktime(when_to_fire), + 0, + HRTIMER_MODE_ABS_PINNED, + 0); + } + else + { + TRACE_TASK(new, "Already has zero-laxity when added to ready queue. (deadline: %llu, remaining: %llu))\n", + get_deadline(new), + budget_remaining(new)); + } +} + + + +/* edzl_job_arrival: task is either resumed or released */ +static void edzl_job_arrival(struct task_struct* task) +{ + BUG_ON(!task); + + /* clear old laxity flag or tag zero-laxity upon release */ + if(laxity_remaining(task)) + clear_zerolaxity(task); + else + set_zerolaxity(task); + + gbl_requeue(task); + gbl_check_for_preemptions(); +} + + +/* Prepare a task for running in RT mode + */ +static void edzl_task_new(struct task_struct * t, int on_rq, int running) +{ + unsigned long flags; + cpu_entry_t* entry; + + TRACE("edzl: task new %d\n", t->pid); + + raw_spin_lock_irqsave(&active_gbl_domain_lock, flags); + + hrtimer_init(&t->rt_param.zl_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); + t->rt_param.zl_timer.function = on_zero_laxity; + + /* setup job params */ + release_at(t, litmus_clock()); + + if (running) { + entry = active_gbl_plugin->cpus[task_cpu(t)]; + BUG_ON(entry->scheduled); + +#ifdef CONFIG_RELEASE_MASTER + if (entry->cpu != active_gbl_domain.release_master) { +#endif + entry->scheduled = t; + tsk_rt(t)->scheduled_on = task_cpu(t); +#ifdef CONFIG_RELEASE_MASTER + } else { + /* do not schedule on release master */ + gbl_preempt(entry); /* force resched */ + tsk_rt(t)->scheduled_on = NO_CPU; + } +#endif + } else { + t->rt_param.scheduled_on = NO_CPU; + } + t->rt_param.linked_on = NO_CPU; + + active_gbl_plugin->job_arrival(t); + raw_spin_unlock_irqrestore(&active_gbl_domain_lock, flags); +} + + +static void edzl_task_wake_up(struct task_struct *task) +{ + unsigned long flags; + lt_t now; + + TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); + + raw_spin_lock_irqsave(&active_gbl_domain_lock, flags); + /* We need to take suspensions because of semaphores into + * account! If a job resumes after being suspended due to acquiring + * a semaphore, it should never be treated as a new job release. + */ + if (get_rt_flags(task) == RT_F_EXIT_SEM) { + set_rt_flags(task, RT_F_RUNNING); + } else { + now = litmus_clock(); + if (is_tardy(task, now)) { + /* new sporadic release */ + release_at(task, now); + sched_trace_task_release(task); + } + else { + if (task->rt.time_slice) { + /* came back in time before deadline + */ + set_rt_flags(task, RT_F_RUNNING); + } + } + } + active_gbl_plugin->job_arrival(task); + raw_spin_unlock_irqrestore(&active_gbl_domain_lock, flags); +} + + +static void edzl_task_exit(struct task_struct * t) +{ + unsigned long flags; + + /* unlink if necessary */ + raw_spin_lock_irqsave(&active_gbl_domain_lock, flags); + gbl_unlink(t); + if (tsk_rt(t)->scheduled_on != NO_CPU) { + active_gbl_plugin->cpus[tsk_rt(t)->scheduled_on]->scheduled = NULL; + tsk_rt(t)->scheduled_on = NO_CPU; + } + + if(hrtimer_active(&tsk_rt(t)->zl_timer)) + { + /* BUG if reached? */ + TRACE_TASK(t, "Canceled armed timer while exiting.\n"); + hrtimer_cancel(&tsk_rt(t)->zl_timer); + } + + raw_spin_unlock_irqrestore(&active_gbl_domain_lock, flags); + + BUG_ON(!is_realtime(t)); + TRACE_TASK(t, "RIP\n"); +} + + +/* need_to_preempt - check whether the task t needs to be preempted + * call only with irqs disabled and with ready_lock acquired + * THIS DOES NOT TAKE NON-PREEMPTIVE SECTIONS INTO ACCOUNT! + */ +static int edzl_preemption_needed(struct task_struct *t) +{ + /* we need the read lock for edf_ready_queue */ + /* no need to preempt if there is nothing pending */ + if (!__jobs_pending(&active_gbl_domain)) + return 0; + /* we need to reschedule if t doesn't exist */ + if (!t) + return 1; + /* make sure to get non-rt stuff out of the way */ + if (!is_realtime(t)) + return 1; + + /* NOTE: We cannot check for non-preemptibility since we + * don't know what address space we're currently in. + */ + + /* Detect zero-laxity as needed. Easier to do it here than in tick. + (No timer is used to detect zero-laxity while a job is running.) */ + if(unlikely(!get_zerolaxity(t) && laxity_remaining(t) == 0)) + { + set_zerolaxity(t); + } + + return edzl_higher_prio(__next_ready(&active_gbl_domain), t); +} + + +static int __init init_edzl(void) +{ + int cpu; + cpu_entry_t *entry; + + bheap_init(&edzl_plugin.cpu_heap); + /* initialize CPU state */ + for (cpu = 0; cpu < NR_CPUS; cpu++) { + entry = &per_cpu(edzl_cpu_entries, cpu); + edzl_plugin.cpus[cpu] = entry; + entry->cpu = cpu; + entry->hn = &edzl_plugin.heap_node[cpu]; + bheap_node_init(&entry->hn, entry); + } + gbl_domain_init(&edzl_plugin, NULL, gbl_release_jobs); + + return register_sched_plugin(&edzl_plugin.plugin); +} + + +module_init(init_edzl); diff --git a/litmus/sched_global_plugin.c b/litmus/sched_global_plugin.c index 22dffa7d62fc..e94247b66b59 100644 --- a/litmus/sched_global_plugin.c +++ b/litmus/sched_global_plugin.c @@ -105,31 +105,11 @@ struct sched_global_plugin* active_gbl_plugin; /*********************************************************************/ /* Priority-related functions */ -int gbl_preemption_needed(struct task_struct *t) -{ - /* we need the read lock for active_gbl_domain's ready_queue */ - /* no need to preempt if there is nothing pending */ - if (!__jobs_pending(&active_gbl_domain)) - return 0; - /* we need to reschedule if t doesn't exist */ - if (!t) - return 1; - - /* NOTE: We cannot check for non-preemptibility since we - * don't know what address space we're currently in. - */ - - /* make sure to get non-rt stuff out of the way */ - return !is_realtime(t) || active_gbl_plugin->prio_order(__next_ready(&active_gbl_domain), t); -} - int gbl_ready_order(struct bheap_node* a, struct bheap_node* b) { return active_gbl_plugin->prio_order(bheap2task(a), bheap2task(b)); } - - int gbl_cpu_lower_prio(struct bheap_node *_a, struct bheap_node *_b) { cpu_entry_t *a, *b; @@ -243,7 +223,6 @@ void gbl_unlink(struct task_struct* t) } } - /* preempt - force a CPU to reschedule */ void gbl_preempt(cpu_entry_t *entry) @@ -268,6 +247,71 @@ void gbl_requeue(struct task_struct* task) } } +/* + * update_queue_position - call after changing the priority of 'task'. + */ +void gbl_update_queue_position(struct task_struct *task) +{ + /* We don't know whether task is in the ready queue. It should, but + * on a budget overrun it may already be in a release queue. Hence, + * calling unlink() is not possible since it assumes that the task is + * not in a release queue. + */ + + /* Assumption: caller holds active_gbl_domain_lock */ + + int check_preempt = 0; + + if (tsk_rt(task)->linked_on != NO_CPU) { + TRACE_TASK(task, "%s: linked on %d\n", + __FUNCTION__, tsk_rt(task)->linked_on); + /* Task is scheduled; need to re-order CPUs. + * We can't use heap_decrease() here since + * the cpu_heap is ordered in reverse direction, so + * it is actually an increase. */ + bheap_delete(gbl_cpu_lower_prio, &active_gbl_plugin->cpu_heap, + active_gbl_plugin->cpus[tsk_rt(task)->linked_on]->hn); + bheap_insert(gbl_cpu_lower_prio, &active_gbl_plugin->cpu_heap, + active_gbl_plugin->cpus[tsk_rt(task)->linked_on]->hn); + } else { + /* task may be queued: first stop queue changes */ + raw_spin_lock(&active_gbl_domain.release_lock); + if (is_queued(task)) { + TRACE_TASK(task, "%s: is queued\n", + __FUNCTION__); + /* We need to update the position + * of task in some heap. Note that this + * may be a release heap. */ + check_preempt = + !bheap_decrease(gbl_ready_order, + tsk_rt(task)->heap_node); + } else { + /* Nothing to do: if it is not queued and not linked + * then it is currently being moved by other code + * (e.g., a timer interrupt handler) that will use the + * correct priority when enqueuing the task. */ + TRACE_TASK(task, "%s: is NOT queued => Done.\n", + __FUNCTION__); + } + raw_spin_unlock(&active_gbl_domain.release_lock); + + /* If task was enqueued in a release heap, then the following + * preemption check is pointless, but we can't easily detect + * that case. If you want to fix this, then consider that + * simply adding a state flag requires O(n) time to update when + * releasing n tasks, which conflicts with the goal to have + * O(log n) merges. */ + if (check_preempt) { + /* heap_decrease() hit the top level of the heap: make + * sure preemption checks get the right task, not the + * potentially stale cache. */ + bheap_uncache_min(gbl_ready_order, + &active_gbl_domain.ready_queue); + gbl_check_for_preemptions(); + } + } +} + /* check for any necessary preemptions */ void gbl_check_for_preemptions(void) @@ -276,7 +320,7 @@ void gbl_check_for_preemptions(void) cpu_entry_t* last; for(last = lowest_prio_cpu(); - gbl_preemption_needed(last->linked); + active_gbl_plugin->preemption_needed(last->linked); last = lowest_prio_cpu()) { /* preemption necessary */ @@ -392,6 +436,24 @@ void gblv_job_arrival(struct task_struct* task) gbl_check_for_preemptions(); } +int gblv_preemption_needed(struct task_struct *t) +{ + /* we need the read lock for active_gbl_domain's ready_queue */ + /* no need to preempt if there is nothing pending */ + if (!__jobs_pending(&active_gbl_domain)) + return 0; + /* we need to reschedule if t doesn't exist */ + if (!t) + return 1; + + /* NOTE: We cannot check for non-preemptibility since we + * don't know what address space we're currently in. + */ + + /* make sure to get non-rt stuff out of the way */ + return !is_realtime(t) || active_gbl_plugin->prio_order(__next_ready(&active_gbl_domain), t); +} + /* gbl_tick - this function is called for every local timer interrupt. * * checks whether the current task has expired and checks diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c index 7876d707d939..e4d17da0160a 100644 --- a/litmus/sched_gsn_edf.c +++ b/litmus/sched_gsn_edf.c @@ -63,76 +63,12 @@ static struct sched_global_plugin gsn_edf_plugin __cacheline_aligned_in_smp = { .take_ready = __take_ready, .add_ready = __add_ready, .job_arrival = gblv_job_arrival, - .job_completion = gbl_job_completion + .job_completion = gbl_job_completion, + .preemption_needed = gblv_preemption_needed }; #ifdef CONFIG_FMLP -/* Update the queue position of a task that got it's priority boosted via - * priority inheritance. */ -static void update_queue_position(struct task_struct *holder) -{ - /* We don't know whether holder is in the ready queue. It should, but - * on a budget overrun it may already be in a release queue. Hence, - * calling unlink() is not possible since it assumes that the task is - * not in a release queue. However, we can safely check whether - * sem->holder is currently in a queue or scheduled after locking both - * the release and the ready queue lock. */ - - /* Assumption: caller holds gsnedf_lock */ - - int check_preempt = 0; - - if (tsk_rt(holder)->linked_on != NO_CPU) { - TRACE_TASK(holder, "%s: linked on %d\n", - __FUNCTION__, tsk_rt(holder)->linked_on); - /* Holder is scheduled; need to re-order CPUs. - * We can't use heap_decrease() here since - * the cpu_heap is ordered in reverse direction, so - * it is actually an increase. */ - bheap_delete(gbl_cpu_lower_prio, &gsn_edf_plugin.cpu_heap, - gsn_edf_plugin.cpus[tsk_rt(holder)->linked_on]->hn); - bheap_insert(gbl_cpu_lower_prio, &gsn_edf_plugin.cpu_heap, - gsn_edf_plugin.cpus[tsk_rt(holder)->linked_on]->hn); - } else { - /* holder may be queued: first stop queue changes */ - raw_spin_lock(&gsn_edf_plugin.domain.release_lock); - if (is_queued(holder)) { - TRACE_TASK(holder, "%s: is queued\n", - __FUNCTION__); - /* We need to update the position - * of holder in some heap. Note that this - * may be a release heap. */ - check_preempt = - !bheap_decrease(edf_ready_order, - tsk_rt(holder)->heap_node); - } else { - /* Nothing to do: if it is not queued and not linked - * then it is currently being moved by other code - * (e.g., a timer interrupt handler) that will use the - * correct priority when enqueuing the task. */ - TRACE_TASK(holder, "%s: is NOT queued => Done.\n", - __FUNCTION__); - } - raw_spin_unlock(&gsn_edf_plugin.domain.release_lock); - - /* If holder was enqueued in a release heap, then the following - * preemption check is pointless, but we can't easily detect - * that case. If you want to fix this, then consider that - * simply adding a state flag requires O(n) time to update when - * releasing n tasks, which conflicts with the goal to have - * O(log n) merges. */ - if (check_preempt) { - /* heap_decrease() hit the top level of the heap: make - * sure preemption checks get the right task, not the - * potentially stale cache. */ - bheap_uncache_min(gbl_ready_order, - &gsn_edf_plugin.domain.ready_queue); - gbl_check_for_preemptions(); - } - } -} - static long gsnedf_pi_block(struct pi_semaphore *sem, struct task_struct *new_waiter) { @@ -158,7 +94,7 @@ static long gsnedf_pi_block(struct pi_semaphore *sem, new_waiter->comm, new_waiter->pid); /* let holder inherit */ sem->holder->rt_param.inh_task = new_waiter; - update_queue_position(sem->holder); + gbl_update_queue_position(sem->holder); } raw_spin_unlock(&gsnedf_lock); } -- cgit v1.2.2