From fc6482bb7a6a638474565c90159997bd59069297 Mon Sep 17 00:00:00 2001 From: "Bjoern B. Brandenburg" Date: Fri, 28 Jan 2011 17:30:14 -0500 Subject: FMLP: remove old implementation --- include/litmus/edf_common.h | 2 - litmus/Makefile | 1 - litmus/fdso.c | 3 +- litmus/fmlp.c | 214 -------------------------------------------- litmus/sched_gsn_edf.c | 155 -------------------------------- litmus/sched_psn_edf.c | 126 -------------------------- 6 files changed, 1 insertion(+), 500 deletions(-) delete mode 100644 litmus/fmlp.c diff --git a/include/litmus/edf_common.h b/include/litmus/edf_common.h index 80d4321cc87e..bbaf22ea7f12 100644 --- a/include/litmus/edf_common.h +++ b/include/litmus/edf_common.h @@ -22,6 +22,4 @@ int edf_ready_order(struct bheap_node* a, struct bheap_node* b); int edf_preemption_needed(rt_domain_t* rt, struct task_struct *t); -int edf_set_hp_task(struct pi_semaphore *sem); -int edf_set_hp_cpu_task(struct pi_semaphore *sem, int cpu); #endif diff --git a/litmus/Makefile b/litmus/Makefile index 4e019d4a6e0c..62a20e266eeb 100644 --- a/litmus/Makefile +++ b/litmus/Makefile @@ -13,7 +13,6 @@ obj-y = sched_plugin.o litmus.o \ fdso.o \ locking.o \ srp.o \ - fmlp.o \ bheap.o \ ctrldev.o \ sched_gsn_edf.o \ diff --git a/litmus/fdso.c b/litmus/fdso.c index 209431f3ce11..b3a95f13d651 100644 --- a/litmus/fdso.c +++ b/litmus/fdso.c @@ -18,11 +18,10 @@ #include -extern struct fdso_ops fmlp_sem_ops; extern struct fdso_ops generic_lock_ops; static const struct fdso_ops* fdso_ops[] = { - &fmlp_sem_ops, + &generic_lock_ops, /* FMLP_SEM */ &generic_lock_ops, /* SRP_SEM */ }; diff --git a/litmus/fmlp.c b/litmus/fmlp.c deleted file mode 100644 index 6e3ddadbc429..000000000000 --- a/litmus/fmlp.c +++ /dev/null @@ -1,214 +0,0 @@ -/* - * FMLP implementation. - * Much of the code here is borrowed from include/asm-i386/semaphore.h - */ - -#include - -#include -#include -#include -#include - -#include -#include -#include - -#include - -#include - -#ifdef CONFIG_FMLP - -static void* create_fmlp_semaphore(obj_type_t type) -{ - struct pi_semaphore* sem; - int i; - - sem = kmalloc(sizeof(*sem), GFP_KERNEL); - if (!sem) - return NULL; - atomic_set(&sem->count, 1); - sem->sleepers = 0; - init_waitqueue_head(&sem->wait); - sem->hp.task = NULL; - sem->holder = NULL; - for (i = 0; i < NR_CPUS; i++) - sem->hp.cpu_task[i] = NULL; - return sem; -} - -static int open_fmlp_semaphore(struct od_table_entry* entry, void* __user arg) -{ - if (!fmlp_active()) - return -EBUSY; - return 0; -} - -static void destroy_fmlp_semaphore(obj_type_t type, void* sem) -{ - /* XXX assert invariants */ - kfree(sem); -} - -struct fdso_ops fmlp_sem_ops = { - .create = create_fmlp_semaphore, - .open = open_fmlp_semaphore, - .destroy = destroy_fmlp_semaphore -}; - -struct wq_pair { - struct task_struct* tsk; - struct pi_semaphore* sem; -}; - -static int rt_pi_wake_up(wait_queue_t *wait, unsigned mode, int sync, - void *key) -{ - struct wq_pair* wqp = (struct wq_pair*) wait->private; - set_rt_flags(wqp->tsk, RT_F_EXIT_SEM); - litmus->inherit_priority(wqp->sem, wqp->tsk); - TRACE_TASK(wqp->tsk, - "woken up by rt_pi_wake_up() (RT_F_SEM_EXIT, PI)\n"); - /* point to task for default_wake_function() */ - wait->private = wqp->tsk; - default_wake_function(wait, mode, sync, key); - - /* Always return true since we know that if we encountered a task - * that was already running the wake_up raced with the schedule in - * rt_pi_down(). In that case the task in rt_pi_down() will be scheduled - * immediately and own the lock. We must not wake up another task in - * any case. - */ - return 1; -} - -/* caller is responsible for locking */ -int edf_set_hp_task(struct pi_semaphore *sem) -{ - struct list_head *tmp, *next; - struct task_struct *queued; - int ret = 0; - - sem->hp.task = NULL; - list_for_each_safe(tmp, next, &sem->wait.task_list) { - queued = ((struct wq_pair*) - list_entry(tmp, wait_queue_t, - task_list)->private)->tsk; - - /* Compare task prios, find high prio task. */ - if (edf_higher_prio(queued, sem->hp.task)) { - sem->hp.task = queued; - ret = 1; - } - } - return ret; -} - -/* caller is responsible for locking */ -int edf_set_hp_cpu_task(struct pi_semaphore *sem, int cpu) -{ - struct list_head *tmp, *next; - struct task_struct *queued; - int ret = 0; - - sem->hp.cpu_task[cpu] = NULL; - list_for_each_safe(tmp, next, &sem->wait.task_list) { - queued = ((struct wq_pair*) - list_entry(tmp, wait_queue_t, - task_list)->private)->tsk; - - /* Compare task prios, find high prio task. */ - if (get_partition(queued) == cpu && - edf_higher_prio(queued, sem->hp.cpu_task[cpu])) { - sem->hp.cpu_task[cpu] = queued; - ret = 1; - } - } - return ret; -} - -static int do_fmlp_down(struct pi_semaphore* sem) -{ - unsigned long flags; - struct task_struct *tsk = current; - struct wq_pair pair; - int suspended = 1; - wait_queue_t wait = { - .private = &pair, - .func = rt_pi_wake_up, - .task_list = {NULL, NULL} - }; - - pair.tsk = tsk; - pair.sem = sem; - spin_lock_irqsave(&sem->wait.lock, flags); - - if (atomic_dec_return(&sem->count) < 0 || - waitqueue_active(&sem->wait)) { - /* we need to suspend */ - tsk->state = TASK_UNINTERRUPTIBLE; - __add_wait_queue_tail_exclusive(&sem->wait, &wait); - - TRACE_CUR("suspends on PI lock %p\n", sem); - litmus->pi_block(sem, tsk); - - /* release lock before sleeping */ - spin_unlock_irqrestore(&sem->wait.lock, flags); - - TS_PI_DOWN_END; - preempt_enable_no_resched(); - - - /* we depend on the FIFO order - * Thus, we don't need to recheck when we wake up, we - * are guaranteed to have the lock since there is only one - * wake up per release - */ - schedule(); - - TRACE_CUR("woke up, now owns PI lock %p\n", sem); - - /* try_to_wake_up() set our state to TASK_RUNNING, - * all we need to do is to remove our wait queue entry - */ - remove_wait_queue(&sem->wait, &wait); - } else { - /* no priority inheritance necessary, since there are no queued - * tasks. - */ - suspended = 0; - TRACE_CUR("acquired PI lock %p, no contention\n", sem); - sem->holder = tsk; - - /* don't know if we're global or partitioned. */ - sem->hp.task = tsk; - sem->hp.cpu_task[get_partition(tsk)] = tsk; - - litmus->inherit_priority(sem, tsk); - spin_unlock_irqrestore(&sem->wait.lock, flags); - } - return suspended; -} - -static void do_fmlp_up(struct pi_semaphore* sem) -{ - unsigned long flags; - - spin_lock_irqsave(&sem->wait.lock, flags); - - TRACE_CUR("releases PI lock %p\n", sem); - litmus->return_priority(sem); - sem->holder = NULL; - if (atomic_inc_return(&sem->count) < 1) - /* there is a task queued */ - wake_up_locked(&sem->wait); - - spin_unlock_irqrestore(&sem->wait.lock, flags); -} - -#else - -struct fdso_ops fmlp_sem_ops = {}; - -#endif diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c index 4ad95dba4a04..5de0980e3faa 100644 --- a/litmus/sched_gsn_edf.c +++ b/litmus/sched_gsn_edf.c @@ -594,161 +594,6 @@ static void gsnedf_task_exit(struct task_struct * t) TRACE_TASK(t, "RIP\n"); } -#if 0 - -/* Update the queue position of a task that got it's priority boosted via - * priority inheritance. */ -static void update_queue_position(struct task_struct *holder) -{ - /* We don't know whether holder is in the ready queue. It should, but - * on a budget overrun it may already be in a release queue. Hence, - * calling unlink() is not possible since it assumes that the task is - * not in a release queue. However, we can safely check whether - * sem->holder is currently in a queue or scheduled after locking both - * the release and the ready queue lock. */ - - /* Assumption: caller holds gsnedf_lock */ - - int check_preempt = 0; - - if (tsk_rt(holder)->linked_on != NO_CPU) { - TRACE_TASK(holder, "%s: linked on %d\n", - __FUNCTION__, tsk_rt(holder)->linked_on); - /* Holder is scheduled; need to re-order CPUs. - * We can't use heap_decrease() here since - * the cpu_heap is ordered in reverse direction, so - * it is actually an increase. */ - bheap_delete(cpu_lower_prio, &gsnedf_cpu_heap, - gsnedf_cpus[tsk_rt(holder)->linked_on]->hn); - bheap_insert(cpu_lower_prio, &gsnedf_cpu_heap, - gsnedf_cpus[tsk_rt(holder)->linked_on]->hn); - } else { - /* holder may be queued: first stop queue changes */ - raw_spin_lock(&gsnedf.release_lock); - if (is_queued(holder)) { - TRACE_TASK(holder, "%s: is queued\n", - __FUNCTION__); - /* We need to update the position - * of holder in some heap. Note that this - * may be a release heap. */ - check_preempt = - !bheap_decrease(edf_ready_order, - tsk_rt(holder)->heap_node); - } else { - /* Nothing to do: if it is not queued and not linked - * then it is currently being moved by other code - * (e.g., a timer interrupt handler) that will use the - * correct priority when enqueuing the task. */ - TRACE_TASK(holder, "%s: is NOT queued => Done.\n", - __FUNCTION__); - } - raw_spin_unlock(&gsnedf.release_lock); - - /* If holder was enqueued in a release heap, then the following - * preemption check is pointless, but we can't easily detect - * that case. If you want to fix this, then consider that - * simply adding a state flag requires O(n) time to update when - * releasing n tasks, which conflicts with the goal to have - * O(log n) merges. */ - if (check_preempt) { - /* heap_decrease() hit the top level of the heap: make - * sure preemption checks get the right task, not the - * potentially stale cache. */ - bheap_uncache_min(edf_ready_order, - &gsnedf.ready_queue); - check_for_preemptions(); - } - } -} - -static long gsnedf_pi_block(struct pi_semaphore *sem, - struct task_struct *new_waiter) -{ - /* This callback has to handle the situation where a new waiter is - * added to the wait queue of the semaphore. - * - * We must check if has a higher priority than the currently - * highest-priority task, and then potentially reschedule. - */ - - BUG_ON(!new_waiter); - - if (edf_higher_prio(new_waiter, sem->hp.task)) { - TRACE_TASK(new_waiter, " boosts priority via %p\n", sem); - /* called with IRQs disabled */ - raw_spin_lock(&gsnedf_lock); - /* store new highest-priority task */ - sem->hp.task = new_waiter; - if (sem->holder) { - TRACE_TASK(sem->holder, - " holds %p and will inherit from %s/%d\n", - sem, - new_waiter->comm, new_waiter->pid); - /* let holder inherit */ - sem->holder->rt_param.inh_task = new_waiter; - update_queue_position(sem->holder); - } - raw_spin_unlock(&gsnedf_lock); - } - - return 0; -} - -static long gsnedf_inherit_priority(struct pi_semaphore *sem, - struct task_struct *new_owner) -{ - /* We don't need to acquire the gsnedf_lock since at the time of this - * call new_owner isn't actually scheduled yet (it's still sleeping) - * and since the calling function already holds sem->wait.lock, which - * prevents concurrent sem->hp.task changes. - */ - - if (sem->hp.task && sem->hp.task != new_owner) { - new_owner->rt_param.inh_task = sem->hp.task; - TRACE_TASK(new_owner, "inherited priority from %s/%d\n", - sem->hp.task->comm, sem->hp.task->pid); - } else - TRACE_TASK(new_owner, - "cannot inherit priority, " - "no higher priority job waits.\n"); - return 0; -} - -/* This function is called on a semaphore release, and assumes that - * the current task is also the semaphore holder. - */ -static long gsnedf_return_priority(struct pi_semaphore *sem) -{ - struct task_struct* t = current; - int ret = 0; - - /* Find new highest-priority semaphore task - * if holder task is the current hp.task. - * - * Calling function holds sem->wait.lock. - */ - if (t == sem->hp.task) - edf_set_hp_task(sem); - - TRACE_CUR("gsnedf_return_priority for lock %p\n", sem); - - if (t->rt_param.inh_task) { - /* interrupts already disabled by PI code */ - raw_spin_lock(&gsnedf_lock); - - /* Reset inh_task to NULL. */ - t->rt_param.inh_task = NULL; - - /* Check if rescheduling is necessary */ - unlink(t); - gsnedf_job_arrival(t); - raw_spin_unlock(&gsnedf_lock); - } - - return ret; -} - -#endif static long gsnedf_admit_task(struct task_struct* tsk) { diff --git a/litmus/sched_psn_edf.c b/litmus/sched_psn_edf.c index c1e27960576b..fc64c1722ae9 100644 --- a/litmus/sched_psn_edf.c +++ b/litmus/sched_psn_edf.c @@ -309,132 +309,6 @@ static void psnedf_task_exit(struct task_struct * t) raw_spin_unlock_irqrestore(&pedf->slock, flags); } -#if 0 -static long psnedf_pi_block(struct pi_semaphore *sem, - struct task_struct *new_waiter) -{ - psnedf_domain_t* pedf; - rt_domain_t* edf; - struct task_struct* t; - int cpu = get_partition(new_waiter); - - BUG_ON(!new_waiter); - - if (edf_higher_prio(new_waiter, sem->hp.cpu_task[cpu])) { - TRACE_TASK(new_waiter, " boosts priority\n"); - pedf = task_pedf(new_waiter); - edf = task_edf(new_waiter); - - /* interrupts already disabled */ - raw_spin_lock(&pedf->slock); - - /* store new highest-priority task */ - sem->hp.cpu_task[cpu] = new_waiter; - if (sem->holder && - get_partition(sem->holder) == get_partition(new_waiter)) { - /* let holder inherit */ - sem->holder->rt_param.inh_task = new_waiter; - t = sem->holder; - if (is_queued(t)) { - /* queued in domain*/ - remove(edf, t); - /* readd to make priority change take place */ - /* FIXME: this looks outdated */ - if (is_released(t, litmus_clock())) - __add_ready(edf, t); - else - add_release(edf, t); - } - } - - /* check if we need to reschedule */ - if (edf_preemption_needed(edf, current)) - preempt(pedf); - - raw_spin_unlock(&pedf->slock); - } - - return 0; -} - -static long psnedf_inherit_priority(struct pi_semaphore *sem, - struct task_struct *new_owner) -{ - int cpu = get_partition(new_owner); - - new_owner->rt_param.inh_task = sem->hp.cpu_task[cpu]; - if (sem->hp.cpu_task[cpu] && new_owner != sem->hp.cpu_task[cpu]) { - TRACE_TASK(new_owner, - "inherited priority from %s/%d\n", - sem->hp.cpu_task[cpu]->comm, - sem->hp.cpu_task[cpu]->pid); - } else - TRACE_TASK(new_owner, - "cannot inherit priority: " - "no higher priority job waits on this CPU!\n"); - /* make new owner non-preemptable as required by FMLP under - * PSN-EDF. - */ - make_np(new_owner); - return 0; -} - - -/* This function is called on a semaphore release, and assumes that - * the current task is also the semaphore holder. - */ -static long psnedf_return_priority(struct pi_semaphore *sem) -{ - struct task_struct* t = current; - psnedf_domain_t* pedf = task_pedf(t); - rt_domain_t* edf = task_edf(t); - int ret = 0; - int cpu = get_partition(current); - int still_np; - - - /* Find new highest-priority semaphore task - * if holder task is the current hp.cpu_task[cpu]. - * - * Calling function holds sem->wait.lock. - */ - if (t == sem->hp.cpu_task[cpu]) - edf_set_hp_cpu_task(sem, cpu); - - still_np = take_np(current); - - /* Since we don't nest resources, this - * should always be zero */ - BUG_ON(still_np); - - if (current->rt_param.inh_task) { - TRACE_CUR("return priority of %s/%d\n", - current->rt_param.inh_task->comm, - current->rt_param.inh_task->pid); - } else - TRACE_CUR(" no priority to return %p\n", sem); - - - /* Always check for delayed preemptions that might have become - * necessary due to non-preemptive execution. - */ - raw_spin_lock(&pedf->slock); - - /* Reset inh_task to NULL. */ - current->rt_param.inh_task = NULL; - - /* check if we need to reschedule */ - if (edf_preemption_needed(edf, current)) - preempt(pedf); - - raw_spin_unlock(&pedf->slock); - - - return ret; -} - -#endif - #ifdef CONFIG_LITMUS_LOCKING #include -- cgit v1.2.2