From 8973214f010cf55fbf18cb88471d6c99ed6ff575 Mon Sep 17 00:00:00 2001 From: Glenn Elliott Date: Thu, 22 Mar 2012 14:45:39 -0400 Subject: Introduction of basic nesting foundations. --- include/litmus/locking.h | 6 ++++++ include/litmus/rt_param.h | 12 +++++++++++- litmus/Kconfig | 7 +++++++ litmus/edf_common.c | 10 +++++----- litmus/litmus.c | 11 +++++++++-- litmus/locking.c | 4 +++- litmus/sched_gsn_edf.c | 22 +++++++++++++--------- 7 files changed, 54 insertions(+), 18 deletions(-) diff --git a/include/litmus/locking.h b/include/litmus/locking.h index 4d7b870cb443..27eafd002556 100644 --- a/include/litmus/locking.h +++ b/include/litmus/locking.h @@ -1,6 +1,8 @@ #ifndef LITMUS_LOCKING_H #define LITMUS_LOCKING_H +#include + struct litmus_lock_ops; /* Generic base struct for LITMUS^RT userspace semaphores. @@ -9,6 +11,10 @@ struct litmus_lock_ops; struct litmus_lock { struct litmus_lock_ops *ops; int type; + +#ifdef CONFIG_LITMUS_NESTED_LOCKING + struct list_head lock_chain; +#endif }; struct litmus_lock_ops { diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h index d6d799174160..3a054db8ee07 100644 --- a/include/litmus/rt_param.h +++ b/include/litmus/rt_param.h @@ -133,7 +133,17 @@ struct rt_param { * could point to self if PI does not result in * an increased task priority. */ - struct task_struct* inh_task; + struct task_struct* eff_prio; + +#ifdef CONFIG_LITMUS_NESTED_LOCKING + struct task_struct* local_prio; + struct task_struct* trans_prio; + + /* pointer to the last lock acquired */ + struct litmus_lock* last_lock; +#endif + + #ifdef CONFIG_NP_SECTION /* For the FMLP under PSN-EDF, it is required to make the task diff --git a/litmus/Kconfig b/litmus/Kconfig index 94b48e199577..841a7e4e9723 100644 --- a/litmus/Kconfig +++ b/litmus/Kconfig @@ -60,6 +60,13 @@ config LITMUS_LOCKING Say Yes if you want to include locking protocols such as the FMLP and Baker's SRP. +config LITMUS_NESTED_LOCKING + bool "Support for nested inheritance in locking protocols" + depends on LITMUS_LOCKING + default n + help + Enable nested priority inheritance. + endmenu menu "Performance Enhancements" diff --git a/litmus/edf_common.c b/litmus/edf_common.c index 9b44dc2d8d1e..54cfada586be 100644 --- a/litmus/edf_common.c +++ b/litmus/edf_common.c @@ -42,10 +42,10 @@ int edf_higher_prio(struct task_struct* first, /* Check for inherited priorities. Change task * used for comparison in such a case. */ - if (unlikely(first->rt_param.inh_task)) - first_task = first->rt_param.inh_task; - if (unlikely(second->rt_param.inh_task)) - second_task = second->rt_param.inh_task; + if (unlikely(first->rt_param.eff_prio)) + first_task = first->rt_param.eff_prio; + if (unlikely(second->rt_param.eff_prio)) + second_task = second->rt_param.eff_prio; /* Check for priority boosting. Tie-break by start of boosting. */ @@ -81,7 +81,7 @@ int edf_higher_prio(struct task_struct* first, * priority wins. */ (first_task->pid == second_task->pid && - !second->rt_param.inh_task))); + !second->rt_param.eff_prio))); } int edf_ready_order(struct bheap_node* a, struct bheap_node* b) diff --git a/litmus/litmus.c b/litmus/litmus.c index 301390148d02..40340dfa9d67 100644 --- a/litmus/litmus.c +++ b/litmus/litmus.c @@ -302,7 +302,12 @@ static void reinit_litmus_state(struct task_struct* p, int restore) /* We probably should not be inheriting any task's priority * at this point in time. */ - WARN_ON(p->rt_param.inh_task); + WARN_ON(p->rt_param.eff_prio); + +#ifdef CONFIG_LITMUS_NESTED_LOCKING + WARN_ON(p->rt_param.local_prio); + WARN_ON(p->rt_param.trans_prio); +#endif /* Cleanup everything else. */ memset(&p->rt_param, 0, sizeof(p->rt_param)); @@ -468,7 +473,9 @@ void litmus_exec(void) struct task_struct* p = current; if (is_realtime(p)) { - WARN_ON(p->rt_param.inh_task); + WARN_ON(p->rt_param.eff_prio); + WARN_ON(p->rt_param.local_prio); + WARN_ON(p->rt_param.trans_prio); if (tsk_rt(p)->ctrl_page) { free_page((unsigned long) tsk_rt(p)->ctrl_page); tsk_rt(p)->ctrl_page = NULL; diff --git a/litmus/locking.c b/litmus/locking.c index 0c1aa6aa40b7..5897beb941cf 100644 --- a/litmus/locking.c +++ b/litmus/locking.c @@ -34,8 +34,10 @@ static int create_generic_lock(void** obj_ref, obj_type_t type, void* __user ar int err; err = litmus->allocate_lock(&lock, type, arg); - if (err == 0) + if (err == 0) { + INIT_LIST_HEAD(&lock->lock_chain); *obj_ref = lock; + } return err; } diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c index 6ed504f4750e..677a932e08be 100644 --- a/litmus/sched_gsn_edf.c +++ b/litmus/sched_gsn_edf.c @@ -645,7 +645,7 @@ static long gsnedf_admit_task(struct task_struct* tsk) #include /* called with IRQs off */ -static void set_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh) +static void increase_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh) { int linked_on; int check_preempt = 0; @@ -653,7 +653,7 @@ static void set_priority_inheritance(struct task_struct* t, struct task_struct* raw_spin_lock(&gsnedf_lock); TRACE_TASK(t, "inherits priority from %s/%d\n", prio_inh->comm, prio_inh->pid); - tsk_rt(t)->inh_task = prio_inh; + tsk_rt(t)->eff_prio = prio_inh; linked_on = tsk_rt(t)->linked_on; @@ -712,7 +712,7 @@ static void set_priority_inheritance(struct task_struct* t, struct task_struct* } /* called with IRQs off */ -static void clear_priority_inheritance(struct task_struct* t) +static void decrease_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh) { raw_spin_lock(&gsnedf_lock); @@ -720,8 +720,12 @@ static void clear_priority_inheritance(struct task_struct* t) * resource. Thus we can make the following assumption.*/ BUG_ON(tsk_rt(t)->scheduled_on == NO_CPU); - TRACE_TASK(t, "priority restored\n"); - tsk_rt(t)->inh_task = NULL; + if(prio_inh) + TRACE_TASK(t, "inherited priority decreased to %s/%d\n", prio_inh->comm, prio_inh->pid); + else + TRACE_TASK(t, "base priority restored.\n"); + + tsk_rt(t)->eff_prio = prio_inh; /* Check if rescheduling is necessary. We can't use heap_decrease() * since the priority was effectively lowered. */ @@ -797,7 +801,7 @@ int gsnedf_fmlp_lock(struct litmus_lock* l) if (edf_higher_prio(t, sem->hp_waiter)) { sem->hp_waiter = t; if (edf_higher_prio(t, sem->owner)) - set_priority_inheritance(sem->owner, sem->hp_waiter); + increase_priority_inheritance(sem->owner, sem->hp_waiter); } TS_LOCK_SUSPEND; @@ -865,7 +869,7 @@ int gsnedf_fmlp_unlock(struct litmus_lock* l) /* Well, if next is not the highest-priority waiter, * then it ought to inherit the highest-priority * waiter's priority. */ - set_priority_inheritance(next, sem->hp_waiter); + increase_priority_inheritance(next, sem->hp_waiter); } /* wake up next */ @@ -875,8 +879,8 @@ int gsnedf_fmlp_unlock(struct litmus_lock* l) sem->owner = NULL; /* we lose the benefit of priority inheritance (if any) */ - if (tsk_rt(t)->inh_task) - clear_priority_inheritance(t); + if (tsk_rt(t)->eff_prio) + decrease_priority_inheritance(t, NULL); out: spin_unlock_irqrestore(&sem->wait.lock, flags); -- cgit v1.2.2