From fab768a4cdc49ad7886cac0d0361f8432965a817 Mon Sep 17 00:00:00 2001 From: "Bjoern B. Brandenburg" Date: Sat, 29 Jan 2011 13:38:24 -0500 Subject: GSN-EDF: re-implement FMLP support This introduces the global FMLP based on the generic locking layer. --- litmus/sched_gsn_edf.c | 322 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 322 insertions(+) diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c index 5de0980e3faa..c525d43eb051 100644 --- a/litmus/sched_gsn_edf.c +++ b/litmus/sched_gsn_edf.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include @@ -446,6 +447,7 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev) if (entry->linked) { entry->linked->rt_param.scheduled_on = entry->cpu; next = entry->linked; + TRACE_TASK(next, "scheduled_on = P%d\n", smp_processor_id()); } if (entry->scheduled) { /* not gonna be scheduled soon */ @@ -600,6 +602,323 @@ static long gsnedf_admit_task(struct task_struct* tsk) return 0; } +#ifdef CONFIG_LITMUS_LOCKING + +#include + +/* called with IRQs off */ +static void set_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh) +{ + int linked_on; + int check_preempt = 0; + + raw_spin_lock(&gsnedf_lock); + + TRACE_TASK(t, "inherits priority from %s/%d\n", prio_inh->comm, prio_inh->pid); + tsk_rt(t)->inh_task = prio_inh; + + linked_on = tsk_rt(t)->linked_on; + + /* If it is scheduled, then we need to reorder the CPU heap. */ + if (linked_on != NO_CPU) { + TRACE_TASK(t, "%s: linked on %d\n", + __FUNCTION__, linked_on); + /* Holder is scheduled; need to re-order CPUs. + * We can't use heap_decrease() here since + * the cpu_heap is ordered in reverse direction, so + * it is actually an increase. */ + bheap_delete(cpu_lower_prio, &gsnedf_cpu_heap, + gsnedf_cpus[linked_on]->hn); + bheap_insert(cpu_lower_prio, &gsnedf_cpu_heap, + gsnedf_cpus[linked_on]->hn); + } else { + /* holder may be queued: first stop queue changes */ + raw_spin_lock(&gsnedf.release_lock); + if (is_queued(t)) { + TRACE_TASK(t, "%s: is queued\n", + __FUNCTION__); + /* We need to update the position of holder in some + * heap. Note that this could be a release heap if we + * budget enforcement is used and this job overran. */ + check_preempt = + !bheap_decrease(edf_ready_order, + tsk_rt(t)->heap_node); + } else { + /* Nothing to do: if it is not queued and not linked + * then it is either sleeping or currently being moved + * by other code (e.g., a timer interrupt handler) that + * will use the correct priority when enqueuing the + * task. */ + TRACE_TASK(t, "%s: is NOT queued => Done.\n", + __FUNCTION__); + } + raw_spin_unlock(&gsnedf.release_lock); + + /* If holder was enqueued in a release heap, then the following + * preemption check is pointless, but we can't easily detect + * that case. If you want to fix this, then consider that + * simply adding a state flag requires O(n) time to update when + * releasing n tasks, which conflicts with the goal to have + * O(log n) merges. */ + if (check_preempt) { + /* heap_decrease() hit the top level of the heap: make + * sure preemption checks get the right task, not the + * potentially stale cache. */ + bheap_uncache_min(edf_ready_order, + &gsnedf.ready_queue); + check_for_preemptions(); + } + } + + raw_spin_unlock(&gsnedf_lock); +} + +/* called with IRQs off */ +static void clear_priority_inheritance(struct task_struct* t) +{ + raw_spin_lock(&gsnedf_lock); + + /* A job only stops inheriting a priority when it releases a + * resource. Thus we can make the following assumption.*/ + BUG_ON(tsk_rt(t)->scheduled_on == NO_CPU); + + TRACE_TASK(t, "priority restored\n"); + tsk_rt(t)->inh_task = NULL; + + /* Check if rescheduling is necessary. We can't use heap_decrease() + * since the priority was effectively lowered. */ + unlink(t); + gsnedf_job_arrival(t); + + raw_spin_unlock(&gsnedf_lock); +} + + +/* ******************** FMLP support ********************** */ + +/* struct for semaphore with priority inheritance */ +struct fmlp_semaphore { + struct litmus_lock litmus_lock; + + /* current resource holder */ + struct task_struct *owner; + + /* highest-priority waiter */ + struct task_struct *hp_waiter; + + /* FIFO queue of waiting tasks */ + wait_queue_head_t wait; +}; + +static inline struct fmlp_semaphore* fmlp_from_lock(struct litmus_lock* lock) +{ + return container_of(lock, struct fmlp_semaphore, litmus_lock); +} + +/* caller is responsible for locking */ +struct task_struct* find_hp_waiter(struct fmlp_semaphore *sem, + struct task_struct* skip) +{ + struct list_head *pos; + struct task_struct *queued, *found = NULL; + + list_for_each(pos, &sem->wait.task_list) { + queued = (struct task_struct*) list_entry(pos, wait_queue_t, + task_list)->private; + + /* Compare task prios, find high prio task. */ + if (queued != skip && edf_higher_prio(queued, found)) + found = queued; + } + return found; +} + +int gsnedf_fmlp_lock(struct litmus_lock* l) +{ + struct task_struct* t = current; + struct fmlp_semaphore *sem = fmlp_from_lock(l); + wait_queue_t wait; + unsigned long flags; + + if (!is_realtime(t)) + return -EPERM; + + spin_lock_irqsave(&sem->wait.lock, flags); + + if (sem->owner) { + /* resource is not free => must suspend and wait */ + + init_waitqueue_entry(&wait, t); + + /* FIXME: interruptible would be nice some day */ + set_task_state(t, TASK_UNINTERRUPTIBLE); + + __add_wait_queue_tail_exclusive(&sem->wait, &wait); + + /* check if we need to activate priority inheritance */ + if (edf_higher_prio(t, sem->hp_waiter)) { + sem->hp_waiter = t; + if (edf_higher_prio(t, sem->owner)) + set_priority_inheritance(sem->owner, sem->hp_waiter); + } + + /* release lock before sleeping */ + spin_unlock_irqrestore(&sem->wait.lock, flags); + + /* We depend on the FIFO order. Thus, we don't need to recheck + * when we wake up; we are guaranteed to have the lock since + * there is only one wake up per release. + */ + + schedule(); + + /* Since we hold the lock, no other task will change + * ->owner. We can thus check it without acquiring the spin + * lock. */ + BUG_ON(sem->owner != t); + + remove_wait_queue(&sem->wait, &wait); + } else { + /* it's ours now */ + sem->owner = t; + + spin_unlock_irqrestore(&sem->wait.lock, flags); + } + + return 0; +} + +int gsnedf_fmlp_unlock(struct litmus_lock* l) +{ + struct task_struct *t = current, *next; + struct fmlp_semaphore *sem = fmlp_from_lock(l); + unsigned long flags; + int err = 0; + + spin_lock_irqsave(&sem->wait.lock, flags); + + if (sem->owner != t) { + err = -EINVAL; + goto out; + } + + /* check if there are jobs waiting for this resource */ + next = waitqueue_first(&sem->wait); + if (next) { + /* next becomes the resouce holder */ + sem->owner = next; + TRACE_CUR("lock ownership passed to %s/%d\n", next->comm, next->pid); + + /* determine new hp_waiter if necessary */ + if (next == sem->hp_waiter) { + TRACE_TASK(next, "was highest-prio waiter\n"); + /* next has the highest priority --- it doesn't need to + * inherit. However, we need to make sure that the + * next-highest priority in the queue is reflected in + * hp_waiter. */ + sem->hp_waiter = find_hp_waiter(sem, next); + if (sem->hp_waiter) + TRACE_TASK(sem->hp_waiter, "is new highest-prio waiter\n"); + else + TRACE("no further waiters\n"); + } else { + /* Well, if next is not the highest-priority waiter, + * then it ought to inherit the highest-priority + * waiter's priority. */ + set_priority_inheritance(next, sem->hp_waiter); + } + + /* wake up next */ + wake_up_process(next); + } else + /* becomes available */ + sem->owner = NULL; + + /* we lose the benefit of priority inheritance (if any) */ + if (tsk_rt(t)->inh_task) + clear_priority_inheritance(t); + +out: + spin_unlock_irqrestore(&sem->wait.lock, flags); + + return err; +} + +int gsnedf_fmlp_close(struct litmus_lock* l) +{ + struct task_struct *t = current; + struct fmlp_semaphore *sem = fmlp_from_lock(l); + unsigned long flags; + + int owner; + + spin_lock_irqsave(&sem->wait.lock, flags); + + owner = sem->owner == t; + + spin_unlock_irqrestore(&sem->wait.lock, flags); + + if (owner) + gsnedf_fmlp_unlock(l); + + return 0; +} + +void gsnedf_fmlp_free(struct litmus_lock* lock) +{ + kfree(fmlp_from_lock(lock)); +} + +static struct litmus_lock_ops gsnedf_fmlp_lock_ops = { + .close = gsnedf_fmlp_close, + .lock = gsnedf_fmlp_lock, + .unlock = gsnedf_fmlp_unlock, + .deallocate = gsnedf_fmlp_free, +}; + +static struct litmus_lock* gsnedf_new_fmlp(void) +{ + struct fmlp_semaphore* sem; + + sem = kmalloc(sizeof(*sem), GFP_KERNEL); + if (!sem) + return NULL; + + sem->owner = NULL; + sem->hp_waiter = NULL; + init_waitqueue_head(&sem->wait); + sem->litmus_lock.ops = &gsnedf_fmlp_lock_ops; + + return &sem->litmus_lock; +} + +/* **** lock constructor **** */ + + +static long gsnedf_allocate_lock(struct litmus_lock **lock, int type) +{ + int err = -ENXIO; + + /* GSN-EDF currently only supports the FMLP for global resources. */ + switch (type) { + + case FMLP_SEM: + /* Flexible Multiprocessor Locking Protocol */ + *lock = gsnedf_new_fmlp(); + if (*lock) + err = 0; + else + err = -ENOMEM; + break; + + }; + + return err; +} + +#endif + + static long gsnedf_activate_plugin(void) { int cpu; @@ -642,6 +961,9 @@ static struct sched_plugin gsn_edf_plugin __cacheline_aligned_in_smp = { .task_block = gsnedf_task_block, .admit_task = gsnedf_admit_task, .activate_plugin = gsnedf_activate_plugin, +#ifdef CONFIG_LITMUS_LOCKING + .allocate_lock = gsnedf_allocate_lock, +#endif }; -- cgit v1.2.2