From c55e411400c13bcfc774d541fc1c9f0f62b644c4 Mon Sep 17 00:00:00 2001 From: "Bjoern B. Brandenburg" Date: Fri, 2 May 2008 18:50:44 -0400 Subject: GSN-EDF: add extra debug info --- litmus/sched_gsn_edf.c | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c index 5dcc250aa6..c60b6ddd49 100644 --- a/litmus/sched_gsn_edf.c +++ b/litmus/sched_gsn_edf.c @@ -354,6 +354,9 @@ static void gsnedf_setup_release_job_timer(struct task_struct *task) #endif /* Expiration time of timer is release time of task. */ + TRACE_TASK(task, "prog timer, rel=%llu, at %llu\n", + get_release(task), + litmus_clock()); release_timer(task).expires = ns_to_ktime(get_release(task)); hrtimer_start(&release_timer(task), release_timer(task).expires, @@ -453,6 +456,17 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev) sleep = exists && get_rt_flags(entry->scheduled) == RT_F_SLEEP; preempt = entry->scheduled != entry->linked; + if (exists) + TRACE_TASK(prev, + "blocks:%d out_of_time:%d np:%d sleep:%d preempt:%d " + "state:%d sig:%d\n", + blocks, out_of_time, np, sleep, preempt, + prev->state, signal_pending(prev)); + if (entry->linked && preempt) + TRACE_TASK(prev, "will be preempted by %s/%d\n", + entry->linked->comm, entry->linked->pid); + + /* If a task blocks we have no choice but to reschedule. */ if (blocks) @@ -497,6 +511,10 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev) spin_unlock(&gsnedf_lock); + if (next) + TRACE_TASK(next, "scheduled at %llu\n", litmus_clock()); + else if (exists && !next) + TRACE("becomes idle at %llu.\n", litmus_clock()); /* don't race with a concurrent switch */ if (next && prev != next) while (next->rt_param.scheduled_on != NO_CPU) @@ -549,6 +567,8 @@ static void gsnedf_task_wake_up(struct task_struct *task) unsigned long flags; lt_t now; + TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); + spin_lock_irqsave(&gsnedf_lock, flags); /* We need to take suspensions because of semaphores into * account! If a job resumes after being suspended due to acquiring @@ -576,6 +596,8 @@ static void gsnedf_task_block(struct task_struct *t) { unsigned long flags; + TRACE_TASK(t, "block at %llu\n", litmus_clock()); + /* unlink if necessary */ spin_lock_irqsave(&gsnedf_lock, flags); unlink(t); -- cgit v1.2.2 From 2f030cd48bbdfc6f4155c38684d0e8b98195f4f5 Mon Sep 17 00:00:00 2001 From: "Bjoern B. Brandenburg" Date: Fri, 2 May 2008 19:00:27 -0400 Subject: LITMUS: rework job migration code The old version had a significant window for races with interrupt handlers executing on other CPUs. --- kernel/sched.c | 3 +- litmus/sched_litmus.c | 77 +++++++++++++++++++++++++++++++++++---------------- 2 files changed, 55 insertions(+), 25 deletions(-) diff --git a/kernel/sched.c b/kernel/sched.c index 00097dea89..170237e3a4 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -3649,9 +3649,10 @@ need_resched_nonpreemptible: /* do litmus scheduling outside of rq lock, so that we * can do proper migrations for global schedulers */ - litmus_schedule(rq, prev); + spin_lock(&rq->lock); clear_tsk_need_resched(prev); + litmus_schedule(rq, prev); if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { if (unlikely((prev->state & TASK_INTERRUPTIBLE) && diff --git a/litmus/sched_litmus.c b/litmus/sched_litmus.c index 16cdf2db59..feb0159033 100644 --- a/litmus/sched_litmus.c +++ b/litmus/sched_litmus.c @@ -24,46 +24,75 @@ static void litmus_tick(struct rq *rq, struct task_struct *p) static void litmus_schedule(struct rq *rq, struct task_struct *prev) { struct rq* other_rq; - int success = 0; + long prev_state; /* WARNING: rq is _not_ locked! */ if (is_realtime(prev)) update_time_litmus(rq, prev); - while (!success) { - /* let the plugin schedule */ - rq->litmus_next = litmus->schedule(prev); - - /* check if a global plugin pulled a task from a different RQ */ - if (rq->litmus_next && task_rq(rq->litmus_next) != rq) { - /* we need to migrate the task */ - other_rq = task_rq(rq->litmus_next); - double_rq_lock(rq, other_rq); - /* now that we have the lock we need to make sure a - * couple of things still hold: - * - it is still a real-time task - * - it is still runnable (could have been stopped) - */ - if (is_realtime(rq->litmus_next) && - is_running(rq->litmus_next)) { - set_task_cpu(rq->litmus_next, smp_processor_id()); - success = 1; - } /* else something raced, retry */ - double_rq_unlock(rq, other_rq); - } else - success = 1; - } + /* let the plugin schedule */ + rq->litmus_next = litmus->schedule(prev); + + /* check if a global plugin pulled a task from a different RQ */ + if (rq->litmus_next && task_rq(rq->litmus_next) != rq) { + /* we need to migrate the task */ + other_rq = task_rq(rq->litmus_next); + TRACE_TASK(rq->litmus_next, "migrate from %d\n", other_rq->cpu); + + /* while we drop the lock, the prev task could change its + * state + */ + prev_state = prev->state; + spin_unlock(&rq->lock); + double_rq_lock(rq, other_rq); + if (prev->state != prev_state) { + TRACE_TASK(prev, + "state changed while we dropped" + " the lock: now=%d, old=%d", + prev->state, prev_state); + if (prev_state && !prev->state) { + /* prev task became unblocked + * we need to simulate normal sequence of events + * to scheduler plugins. + */ + litmus->task_block(prev); + litmus->task_wake_up(prev); + } + } + + set_task_cpu(rq->litmus_next, smp_processor_id()); + + /* now that we have the lock we need to make sure a + * couple of things still hold: + * - it is still a real-time task + * - it is still runnable (could have been stopped) + */ + if (!is_realtime(rq->litmus_next) || + !is_running(rq->litmus_next)) { + /* BAD BAD BAD */ + TRACE_TASK(rq->litmus_next, + "migration invariant FAILED: rt=%d running=%d\n", + is_realtime(rq->litmus_next), + is_running(rq->litmus_next)); + } + /* release the other CPU's runqueue, but keep ours */ + spin_unlock(&other_rq->lock); + } } static void enqueue_task_litmus(struct rq *rq, struct task_struct *p, int wakeup) { if (wakeup) litmus->task_wake_up(p); + else + TRACE_TASK(p, "ignoring an enqueue, not a wake up.\n"); } static void dequeue_task_litmus(struct rq *rq, struct task_struct *p, int sleep) { if (sleep) litmus->task_block(p); + else + TRACE_TASK(p, "ignoring a dequeue, not going to sleep.\n"); } static void yield_task_litmus(struct rq *rq) -- cgit v1.2.2 From a1f204ec33f806c6db8a4bfe6cc1a1e6109e8ef8 Mon Sep 17 00:00:00 2001 From: "Bjoern B. Brandenburg" Date: Fri, 2 May 2008 20:51:29 -0400 Subject: LITMUS: rework rt_domain to not cause circular lockig dependencies --- include/litmus/edf_common.h | 4 +- include/litmus/rt_domain.h | 33 +++++------ litmus/edf_common.c | 4 +- litmus/rt_domain.c | 132 ++++++++++++++++++-------------------------- litmus/sched_gsn_edf.c | 97 ++++++++------------------------ litmus/sched_psn_edf.c | 78 ++++++++------------------ 6 files changed, 123 insertions(+), 225 deletions(-) diff --git a/include/litmus/edf_common.h b/include/litmus/edf_common.h index f79bd76e17..37630e5c26 100644 --- a/include/litmus/edf_common.h +++ b/include/litmus/edf_common.h @@ -12,8 +12,8 @@ #include -void edf_domain_init(rt_domain_t* rt, check_resched_needed_t resched, - release_at_t release); +void edf_domain_init(rt_domain_t* rt, check_resched_needed_t resched, + release_job_t release); int edf_higher_prio(struct task_struct* first, struct task_struct* second); diff --git a/include/litmus/rt_domain.h b/include/litmus/rt_domain.h index fd3c205bcc..7b2a11c0f2 100644 --- a/include/litmus/rt_domain.h +++ b/include/litmus/rt_domain.h @@ -5,14 +5,18 @@ #ifndef __UNC_RT_DOMAIN_H__ #define __UNC_RT_DOMAIN_H__ +#include + struct _rt_domain; typedef int (*check_resched_needed_t)(struct _rt_domain *rt); -typedef void (*release_at_t)(struct task_struct *t); +typedef void (*release_job_t)(struct task_struct *t, struct _rt_domain *rt); typedef struct _rt_domain { + struct tasklet_struct release_tasklet; + /* runnable rt tasks are in here */ - rwlock_t ready_lock; + spinlock_t ready_lock; struct list_head ready_queue; /* real-time tasks waiting for release are in here */ @@ -22,8 +26,8 @@ typedef struct _rt_domain { /* how do we check if we need to kick another CPU? */ check_resched_needed_t check_resched; - /* how do we setup a job release? */ - release_at_t setup_release; + /* how do we release a job? */ + release_job_t release_job; /* how are tasks ordered in the ready queue? */ list_cmp_t order; @@ -35,8 +39,9 @@ typedef struct _rt_domain { #define ready_jobs_pending(rt) \ (!list_empty(&(rt)->ready_queue)) -void rt_domain_init(rt_domain_t *rt, check_resched_needed_t f, - release_at_t g, list_cmp_t order); +void rt_domain_init(rt_domain_t *rt, list_cmp_t order, + check_resched_needed_t check, + release_job_t relase); void __add_ready(rt_domain_t* rt, struct task_struct *new); void __add_release(rt_domain_t* rt, struct task_struct *task); @@ -44,16 +49,13 @@ void __add_release(rt_domain_t* rt, struct task_struct *task); struct task_struct* __take_ready(rt_domain_t* rt); struct task_struct* __peek_ready(rt_domain_t* rt); -void try_release_pending(rt_domain_t* rt); -void __release_pending(rt_domain_t* rt); - static inline void add_ready(rt_domain_t* rt, struct task_struct *new) { unsigned long flags; /* first we need the write lock for rt_ready_queue */ - write_lock_irqsave(&rt->ready_lock, flags); + spin_lock_irqsave(&rt->ready_lock, flags); __add_ready(rt, new); - write_unlock_irqrestore(&rt->ready_lock, flags); + spin_unlock_irqrestore(&rt->ready_lock, flags); } static inline struct task_struct* take_ready(rt_domain_t* rt) @@ -61,9 +63,9 @@ static inline struct task_struct* take_ready(rt_domain_t* rt) unsigned long flags; struct task_struct* ret; /* first we need the write lock for rt_ready_queue */ - write_lock_irqsave(&rt->ready_lock, flags); + spin_lock_irqsave(&rt->ready_lock, flags); ret = __take_ready(rt); - write_unlock_irqrestore(&rt->ready_lock, flags); + spin_unlock_irqrestore(&rt->ready_lock, flags); return ret; } @@ -87,11 +89,10 @@ static inline int jobs_pending(rt_domain_t* rt) unsigned long flags; int ret; /* first we need the write lock for rt_ready_queue */ - read_lock_irqsave(&rt->ready_lock, flags); + spin_lock_irqsave(&rt->ready_lock, flags); ret = __jobs_pending(rt); - read_unlock_irqrestore(&rt->ready_lock, flags); + spin_unlock_irqrestore(&rt->ready_lock, flags); return ret; } - #endif diff --git a/litmus/edf_common.c b/litmus/edf_common.c index 0b05194a04..68c6a401af 100644 --- a/litmus/edf_common.c +++ b/litmus/edf_common.c @@ -68,9 +68,9 @@ int edf_ready_order(struct list_head* a, struct list_head* b) } void edf_domain_init(rt_domain_t* rt, check_resched_needed_t resched, - release_at_t release) + release_job_t release) { - rt_domain_init(rt, resched, release, edf_ready_order); + rt_domain_init(rt, edf_ready_order, resched, release); } /* need_to_preempt - check whether the task t needs to be preempted diff --git a/litmus/rt_domain.c b/litmus/rt_domain.c index d29325f232..cbedbac1dc 100644 --- a/litmus/rt_domain.c +++ b/litmus/rt_domain.c @@ -22,57 +22,80 @@ static int dummy_resched(rt_domain_t *rt) return 0; } -static void dummy_setup_release(struct task_struct *t) +static int dummy_order(struct list_head* a, struct list_head* b) { + return 0; } -static int dummy_order(struct list_head* a, struct list_head* b) +/* default implementation: use default lock */ +static void default_release_job(struct task_struct* t, rt_domain_t* rt) { - return 0; + add_ready(rt, t); } -/* We now set or clear a per_cpu flag indicating if a plugin-specific call - * to setup a timer (that handles a job release) needs to be made. There is - * no need to setup multiple timers for jobs that are released at the same - * time. The actual clearing of this flag is a side effect of the release_order - * comparison function that is used when inserting a task into the - * release queue. - */ -DEFINE_PER_CPU(int, call_setup_release) = 1; -int release_order(struct list_head* a, struct list_head* b) +static enum hrtimer_restart release_job_timer(struct hrtimer *timer) { - struct task_struct *task_a = list_entry(a, struct task_struct, rt_list); - struct task_struct *task_b = list_entry(b, struct task_struct, rt_list); + /* call the current plugin */ + return HRTIMER_NORESTART; +} - /* If the release times are equal, clear the flag. */ - if (get_release(task_a) == get_release(task_b)) { - __get_cpu_var(call_setup_release) = 0; - return 0; +static void setup_job_release_timer(struct task_struct *task) +{ + hrtimer_init(&release_timer(task), CLOCK_MONOTONIC, HRTIMER_MODE_ABS); + release_timer(task).function = release_job_timer; +#ifdef CONFIG_HIGH_RES_TIMERS + release_timer(task).cb_mode = HRTIMER_CB_IRQSAFE_NO_SOFTIRQ; +#endif + /* Expiration time of timer is release time of task. */ + release_timer(task).expires = ns_to_ktime(get_release(task)); + + TRACE_TASK(task, "arming release timer rel=%llu at\n", + get_release(task), litmus_clock()); + + hrtimer_start(&release_timer(task), release_timer(task).expires, + HRTIMER_MODE_ABS); +} + +static void arm_release_timers(unsigned long _rt) +{ + rt_domain_t *rt = (rt_domain_t*) _rt; + unsigned long flags; + struct list_head *pos, *safe; + struct task_struct* t; + + spin_lock_irqsave(&rt->release_lock, flags); + + list_for_each_safe(pos, safe, &rt->release_queue) { + t = list_entry(pos, struct task_struct, rt_list); + list_del(pos); + setup_job_release_timer(t); } - return earlier_release(task_a, task_b); + spin_unlock_irqrestore(&rt->release_lock, flags); } void rt_domain_init(rt_domain_t *rt, - check_resched_needed_t f, - release_at_t g, - list_cmp_t order) + list_cmp_t order, + check_resched_needed_t check, + release_job_t release + ) { BUG_ON(!rt); - if (!f) - f = dummy_resched; - if (!g) - g = dummy_setup_release; + if (!check) + check = dummy_resched; + if (!release) + release = default_release_job; if (!order) order = dummy_order; INIT_LIST_HEAD(&rt->ready_queue); INIT_LIST_HEAD(&rt->release_queue); - rt->ready_lock = RW_LOCK_UNLOCKED; + rt->ready_lock = SPIN_LOCK_UNLOCKED; rt->release_lock = SPIN_LOCK_UNLOCKED; - rt->check_resched = f; - rt->setup_release = g; + rt->check_resched = check; + rt->release_job = release; rt->order = order; + tasklet_init(&rt->release_tasklet, arm_release_timers, (unsigned long) rt); } /* add_ready - add a real-time task to the rt ready queue. It must be runnable. @@ -111,54 +134,7 @@ struct task_struct* __peek_ready(rt_domain_t* rt) */ void __add_release(rt_domain_t* rt, struct task_struct *task) { - TRACE("rt: adding %s/%d (%llu, %llu) rel=%llu to release queue\n", - task->comm, task->pid, get_exec_cost(task), get_rt_period(task), - get_release(task)); - - /* Set flag assuming that we will need to setup another timer for - * the release of this job. If it turns out that this is unnecessary - * (because another job is already being released at that time, - * and setting up two timers is redundant and inefficient), then - * we will clear that flag so another release timer isn't setup. - */ - __get_cpu_var(call_setup_release) = 1; - list_insert(&task->rt_list, &rt->release_queue, release_order); - - /* Setup a job release -- this typically involves a timer. */ - if (__get_cpu_var(call_setup_release)) - rt->setup_release(task); -} - -void __release_pending(rt_domain_t* rt) -{ - struct list_head *pos, *save; - struct task_struct *queued; - lt_t now = litmus_clock(); - list_for_each_safe(pos, save, &rt->release_queue) { - queued = list_entry(pos, struct task_struct, rt_list); - if (likely(is_released(queued, now))) { - /* this one is ready to go*/ - list_del(pos); - set_rt_flags(queued, RT_F_RUNNING); - - sched_trace_job_release(queued); - - /* now it can be picked up */ - barrier(); - add_ready(rt, queued); - } - else - /* the release queue is ordered */ - break; - } + list_add(&task->rt_list, &rt->release_queue); + tasklet_hi_schedule(&rt->release_tasklet); } -void try_release_pending(rt_domain_t* rt) -{ - unsigned long flags; - - if (spin_trylock_irqsave(&rt->release_lock, flags)) { - __release_pending(rt); - spin_unlock_irqrestore(&rt->release_lock, flags); - } -} diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c index c60b6ddd49..c988e91e6e 100644 --- a/litmus/sched_gsn_edf.c +++ b/litmus/sched_gsn_edf.c @@ -109,15 +109,11 @@ DEFINE_PER_CPU(cpu_entry_t, gsnedf_cpu_entries); #define NO_CPU 0xffffffff -/* The gsnedf_lock is used to serialize all scheduling events. - * It protects - */ -static DEFINE_SPINLOCK(gsnedf_lock); /* the cpus queue themselves according to priority in here */ static LIST_HEAD(gsnedf_cpu_queue); static rt_domain_t gsnedf; - +#define gsnedf_lock (gsnedf.ready_lock) /* update_cpu_position - Move the cpu entry to the correct place to maintain * order in the cpu queue. Caller must hold gsnedf lock. @@ -269,7 +265,7 @@ static noinline void requeue(struct task_struct* task) __add_ready(&gsnedf, task); else { /* it has got to wait */ - __add_release(&gsnedf, task); + add_release(&gsnedf, task); } } else @@ -307,60 +303,37 @@ static noinline void gsnedf_job_arrival(struct task_struct* task) } /* check for current job releases */ -static noinline void gsnedf_release_jobs(void) -{ - struct list_head *pos, *save; - struct task_struct *queued; - lt_t now = litmus_clock(); - - list_for_each_safe(pos, save, &gsnedf.release_queue) { - queued = list_entry(pos, struct task_struct, rt_list); - if (likely(is_released(queued, now))) { - /* this one is ready to go */ - list_del(pos); - set_rt_flags(queued, RT_F_RUNNING); - - sched_trace_job_release(queued); - gsnedf_job_arrival(queued); - } - else - /* the release queue is ordered */ - break; - } -} - -/* handles job releases when a timer expires */ -static enum hrtimer_restart gsnedf_release_job_timer(struct hrtimer *timer) +static void gsnedf_job_release(struct task_struct* t, rt_domain_t* _) { unsigned long flags; spin_lock_irqsave(&gsnedf_lock, flags); - /* Release all pending ready jobs. */ - gsnedf_release_jobs(); + sched_trace_job_release(queued); + gsnedf_job_arrival(t); spin_unlock_irqrestore(&gsnedf_lock, flags); - - return HRTIMER_NORESTART; } -/* setup a new job release timer */ -static void gsnedf_setup_release_job_timer(struct task_struct *task) +/* caller holds gsnedf_lock */ +static noinline void job_completion(struct task_struct *t) { - hrtimer_init(&release_timer(task), CLOCK_MONOTONIC, HRTIMER_MODE_ABS); - release_timer(task).function = gsnedf_release_job_timer; -#ifdef CONFIG_HIGH_RES_TIMERS - release_timer(task).cb_mode = HRTIMER_CB_IRQSAFE_NO_RESTART; -#endif - - /* Expiration time of timer is release time of task. */ - TRACE_TASK(task, "prog timer, rel=%llu, at %llu\n", - get_release(task), - litmus_clock()); - release_timer(task).expires = ns_to_ktime(get_release(task)); - - hrtimer_start(&release_timer(task), release_timer(task).expires, - HRTIMER_MODE_ABS); + BUG_ON(!t); + + sched_trace_job_completion(t); + + TRACE_TASK(t, "job_completion().\n"); + + /* set flags */ + set_rt_flags(t, RT_F_SLEEP); + /* prepare for next period */ + prepare_for_next_period(t); + /* unlink */ + unlink(t); + /* requeue + * But don't requeue a blocking task. */ + if (is_running(t)) + gsnedf_job_arrival(t); } /* gsnedf_tick - this function is called for every local timer @@ -390,28 +363,6 @@ static void gsnedf_tick(struct task_struct* t) } } -/* caller holds gsnedf_lock */ -static noinline void job_completion(struct task_struct *t) -{ - BUG_ON(!t); - - sched_trace_job_completion(t); - - TRACE_TASK(t, "job_completion().\n"); - - /* set flags */ - set_rt_flags(t, RT_F_SLEEP); - /* prepare for next period */ - prepare_for_next_period(t); - /* unlink */ - unlink(t); - /* requeue - * But don't requeue a blocking task. */ - if (is_running(t)) - gsnedf_job_arrival(t); -} - - /* Getting schedule() right is a bit tricky. schedule() may not make any * assumptions on the state of the current task since it may be called for a * number of reasons. The reasons include a scheduler_tick() determined that it @@ -748,7 +699,7 @@ static int __init init_gsn_edf(void) INIT_LIST_HEAD(&entry->list); } - edf_domain_init(&gsnedf, NULL, gsnedf_setup_release_job_timer); + edf_domain_init(&gsnedf, NULL, gsnedf_job_release); return register_sched_plugin(&gsn_edf_plugin); } diff --git a/litmus/sched_psn_edf.c b/litmus/sched_psn_edf.c index d4d01789b0..cc7b09108f 100644 --- a/litmus/sched_psn_edf.c +++ b/litmus/sched_psn_edf.c @@ -26,9 +26,13 @@ typedef struct { rt_domain_t domain; int cpu; struct task_struct* scheduled; /* only RT tasks */ - spinlock_t lock; /* protects the domain and - * serializes scheduling decisions - */ + +/* scheduling lock + */ +#define slock domain.ready_lock +/* protects the domain and + * serializes scheduling decisions + */ } psnedf_domain_t; DEFINE_PER_CPU(psnedf_domain_t, psnedf_domains); @@ -42,13 +46,12 @@ DEFINE_PER_CPU(psnedf_domain_t, psnedf_domains); static void psnedf_domain_init(psnedf_domain_t* pedf, - check_resched_needed_t check, - release_at_t release, - int cpu) + check_resched_needed_t check, + release_job_t release, + int cpu) { edf_domain_init(&pedf->domain, check, release); pedf->cpu = cpu; - pedf->lock = SPIN_LOCK_UNLOCKED; pedf->scheduled = NULL; } @@ -64,7 +67,7 @@ static void requeue(struct task_struct* t, rt_domain_t *edf) if (is_released(t, litmus_clock())) __add_ready(edf, t); else - __add_release(edf, t); /* it has got to wait */ + add_release(edf, t); /* it has got to wait */ } /* we assume the lock is being held */ @@ -100,39 +103,6 @@ static int psnedf_check_resched(rt_domain_t *edf) return ret; } -/* handles job releases when a timer expires */ -static enum hrtimer_restart psnedf_release_job_timer(struct hrtimer *timer) -{ - unsigned long flags; - rt_domain_t *edf = local_edf; - psnedf_domain_t *pedf = local_pedf; - - spin_lock_irqsave(&pedf->lock, flags); - - /* Release all pending ready jobs. */ - __release_pending(edf); - - spin_unlock_irqrestore(&pedf->lock, flags); - - return HRTIMER_NORESTART; -} - -/* setup a new job release timer */ -static void psnedf_setup_release_job_timer(struct task_struct *task) -{ - hrtimer_init(&release_timer(task), CLOCK_MONOTONIC, HRTIMER_MODE_ABS); - release_timer(task).function = psnedf_release_job_timer; -#ifdef CONFIG_HIGH_RES_TIMERS - release_timer(task).cb_mode = HRTIMER_CB_IRQSAFE_NO_RESTART; -#endif - - /* Expiration time of timer is release time of task. */ - release_timer(task).expires = ns_to_ktime(get_release(task)); - - hrtimer_start(&release_timer(task), release_timer(task).expires, - HRTIMER_MODE_ABS); -} - static void psnedf_tick(struct task_struct *t) { psnedf_domain_t *pedf = local_pedf; @@ -171,7 +141,7 @@ static struct task_struct* psnedf_schedule(struct task_struct * prev) int out_of_time, sleep, preempt, np, exists, blocks, resched; - spin_lock(&pedf->lock); + spin_lock(&pedf->slock); /* sanity checking */ BUG_ON(pedf->scheduled && pedf->scheduled != prev); @@ -234,7 +204,7 @@ static struct task_struct* psnedf_schedule(struct task_struct * prev) set_rt_flags(next, RT_F_RUNNING); pedf->scheduled = next; - spin_unlock(&pedf->lock); + spin_unlock(&pedf->slock); return next; } @@ -257,7 +227,7 @@ static void psnedf_task_new(struct task_struct * t, int on_rq, int running) /* The task should be running in the queue, otherwise signal * code will try to wake it up with fatal consequences. */ - spin_lock_irqsave(&pedf->lock, flags); + spin_lock_irqsave(&pedf->slock, flags); if (running) { /* there shouldn't be anything else running at the time */ BUG_ON(pedf->scheduled); @@ -267,7 +237,7 @@ static void psnedf_task_new(struct task_struct * t, int on_rq, int running) /* maybe we have to reschedule */ preempt(pedf); } - spin_unlock_irqrestore(&pedf->lock, flags); + spin_unlock_irqrestore(&pedf->slock, flags); } static void psnedf_task_wake_up(struct task_struct *task) @@ -277,7 +247,7 @@ static void psnedf_task_wake_up(struct task_struct *task) rt_domain_t* edf = task_edf(task); lt_t now; - spin_lock_irqsave(&pedf->lock, flags); + spin_lock_irqsave(&pedf->slock, flags); BUG_ON(in_list(&task->rt_list)); /* We need to take suspensions because of semaphores into * account! If a job resumes after being suspended due to acquiring @@ -293,7 +263,7 @@ static void psnedf_task_wake_up(struct task_struct *task) sched_trace_job_release(task); } requeue(task, edf); - spin_unlock_irqrestore(&pedf->lock, flags); + spin_unlock_irqrestore(&pedf->slock, flags); } static void psnedf_task_block(struct task_struct *t) @@ -308,13 +278,13 @@ static void psnedf_task_exit(struct task_struct * t) unsigned long flags; psnedf_domain_t* pedf = task_pedf(t); - spin_lock_irqsave(&pedf->lock, flags); + spin_lock_irqsave(&pedf->slock, flags); if (in_list(&t->rt_list)) /* dequeue */ list_del(&t->rt_list); preempt(pedf); - spin_unlock_irqrestore(&pedf->lock, flags); + spin_unlock_irqrestore(&pedf->slock, flags); } static long psnedf_pi_block(struct pi_semaphore *sem, @@ -333,7 +303,7 @@ static long psnedf_pi_block(struct pi_semaphore *sem, edf = task_edf(new_waiter); /* interrupts already disabled */ - spin_lock(&pedf->lock); + spin_lock(&pedf->slock); /* store new highest-priority task */ sem->hp.cpu_task[cpu] = new_waiter; @@ -357,7 +327,7 @@ static long psnedf_pi_block(struct pi_semaphore *sem, if (edf_preemption_needed(edf, current)) preempt(pedf); - spin_unlock(&pedf->lock); + spin_unlock(&pedf->slock); } return 0; @@ -411,7 +381,7 @@ static long psnedf_return_priority(struct pi_semaphore *sem) TRACE_CUR("return priority of %s/%d\n", current->rt_param.inh_task->comm, current->rt_param.inh_task->pid); - spin_lock(&pedf->lock); + spin_lock(&pedf->slock); /* Reset inh_task to NULL. */ current->rt_param.inh_task = NULL; @@ -420,7 +390,7 @@ static long psnedf_return_priority(struct pi_semaphore *sem) if (edf_preemption_needed(edf, current)) preempt(pedf); - spin_unlock(&pedf->lock); + spin_unlock(&pedf->slock); } else TRACE_CUR(" no priority to return %p\n", sem); @@ -460,7 +430,7 @@ static int __init init_psn_edf(void) { psnedf_domain_init(remote_pedf(i), psnedf_check_resched, - psnedf_setup_release_job_timer, i); + NULL, i); } return register_sched_plugin(&psn_edf_plugin); } -- cgit v1.2.2