aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2012-03-22 14:45:39 -0400
committerGlenn Elliott <gelliott@cs.unc.edu>2012-03-22 14:45:39 -0400
commit8973214f010cf55fbf18cb88471d6c99ed6ff575 (patch)
tree80fe28857305bb6cfaaa130206967282759511e7
parent6a00f206debf8a5c8899055726ad127dbeeed098 (diff)
Introduction of basic nesting foundations.
-rw-r--r--include/litmus/locking.h6
-rw-r--r--include/litmus/rt_param.h12
-rw-r--r--litmus/Kconfig7
-rw-r--r--litmus/edf_common.c10
-rw-r--r--litmus/litmus.c11
-rw-r--r--litmus/locking.c4
-rw-r--r--litmus/sched_gsn_edf.c22
7 files changed, 54 insertions, 18 deletions
diff --git a/include/litmus/locking.h b/include/litmus/locking.h
index 4d7b870cb443..27eafd002556 100644
--- a/include/litmus/locking.h
+++ b/include/litmus/locking.h
@@ -1,6 +1,8 @@
1#ifndef LITMUS_LOCKING_H 1#ifndef LITMUS_LOCKING_H
2#define LITMUS_LOCKING_H 2#define LITMUS_LOCKING_H
3 3
4#include <linux/list.h>
5
4struct litmus_lock_ops; 6struct litmus_lock_ops;
5 7
6/* Generic base struct for LITMUS^RT userspace semaphores. 8/* Generic base struct for LITMUS^RT userspace semaphores.
@@ -9,6 +11,10 @@ struct litmus_lock_ops;
9struct litmus_lock { 11struct litmus_lock {
10 struct litmus_lock_ops *ops; 12 struct litmus_lock_ops *ops;
11 int type; 13 int type;
14
15#ifdef CONFIG_LITMUS_NESTED_LOCKING
16 struct list_head lock_chain;
17#endif
12}; 18};
13 19
14struct litmus_lock_ops { 20struct litmus_lock_ops {
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h
index d6d799174160..3a054db8ee07 100644
--- a/include/litmus/rt_param.h
+++ b/include/litmus/rt_param.h
@@ -133,7 +133,17 @@ struct rt_param {
133 * could point to self if PI does not result in 133 * could point to self if PI does not result in
134 * an increased task priority. 134 * an increased task priority.
135 */ 135 */
136 struct task_struct* inh_task; 136 struct task_struct* eff_prio;
137
138#ifdef CONFIG_LITMUS_NESTED_LOCKING
139 struct task_struct* local_prio;
140 struct task_struct* trans_prio;
141
142 /* pointer to the last lock acquired */
143 struct litmus_lock* last_lock;
144#endif
145
146
137 147
138#ifdef CONFIG_NP_SECTION 148#ifdef CONFIG_NP_SECTION
139 /* For the FMLP under PSN-EDF, it is required to make the task 149 /* For the FMLP under PSN-EDF, it is required to make the task
diff --git a/litmus/Kconfig b/litmus/Kconfig
index 94b48e199577..841a7e4e9723 100644
--- a/litmus/Kconfig
+++ b/litmus/Kconfig
@@ -60,6 +60,13 @@ config LITMUS_LOCKING
60 Say Yes if you want to include locking protocols such as the FMLP and 60 Say Yes if you want to include locking protocols such as the FMLP and
61 Baker's SRP. 61 Baker's SRP.
62 62
63config LITMUS_NESTED_LOCKING
64 bool "Support for nested inheritance in locking protocols"
65 depends on LITMUS_LOCKING
66 default n
67 help
68 Enable nested priority inheritance.
69
63endmenu 70endmenu
64 71
65menu "Performance Enhancements" 72menu "Performance Enhancements"
diff --git a/litmus/edf_common.c b/litmus/edf_common.c
index 9b44dc2d8d1e..54cfada586be 100644
--- a/litmus/edf_common.c
+++ b/litmus/edf_common.c
@@ -42,10 +42,10 @@ int edf_higher_prio(struct task_struct* first,
42 /* Check for inherited priorities. Change task 42 /* Check for inherited priorities. Change task
43 * used for comparison in such a case. 43 * used for comparison in such a case.
44 */ 44 */
45 if (unlikely(first->rt_param.inh_task)) 45 if (unlikely(first->rt_param.eff_prio))
46 first_task = first->rt_param.inh_task; 46 first_task = first->rt_param.eff_prio;
47 if (unlikely(second->rt_param.inh_task)) 47 if (unlikely(second->rt_param.eff_prio))
48 second_task = second->rt_param.inh_task; 48 second_task = second->rt_param.eff_prio;
49 49
50 /* Check for priority boosting. Tie-break by start of boosting. 50 /* Check for priority boosting. Tie-break by start of boosting.
51 */ 51 */
@@ -81,7 +81,7 @@ int edf_higher_prio(struct task_struct* first,
81 * priority wins. 81 * priority wins.
82 */ 82 */
83 (first_task->pid == second_task->pid && 83 (first_task->pid == second_task->pid &&
84 !second->rt_param.inh_task))); 84 !second->rt_param.eff_prio)));
85} 85}
86 86
87int edf_ready_order(struct bheap_node* a, struct bheap_node* b) 87int edf_ready_order(struct bheap_node* a, struct bheap_node* b)
diff --git a/litmus/litmus.c b/litmus/litmus.c
index 301390148d02..40340dfa9d67 100644
--- a/litmus/litmus.c
+++ b/litmus/litmus.c
@@ -302,7 +302,12 @@ static void reinit_litmus_state(struct task_struct* p, int restore)
302 /* We probably should not be inheriting any task's priority 302 /* We probably should not be inheriting any task's priority
303 * at this point in time. 303 * at this point in time.
304 */ 304 */
305 WARN_ON(p->rt_param.inh_task); 305 WARN_ON(p->rt_param.eff_prio);
306
307#ifdef CONFIG_LITMUS_NESTED_LOCKING
308 WARN_ON(p->rt_param.local_prio);
309 WARN_ON(p->rt_param.trans_prio);
310#endif
306 311
307 /* Cleanup everything else. */ 312 /* Cleanup everything else. */
308 memset(&p->rt_param, 0, sizeof(p->rt_param)); 313 memset(&p->rt_param, 0, sizeof(p->rt_param));
@@ -468,7 +473,9 @@ void litmus_exec(void)
468 struct task_struct* p = current; 473 struct task_struct* p = current;
469 474
470 if (is_realtime(p)) { 475 if (is_realtime(p)) {
471 WARN_ON(p->rt_param.inh_task); 476 WARN_ON(p->rt_param.eff_prio);
477 WARN_ON(p->rt_param.local_prio);
478 WARN_ON(p->rt_param.trans_prio);
472 if (tsk_rt(p)->ctrl_page) { 479 if (tsk_rt(p)->ctrl_page) {
473 free_page((unsigned long) tsk_rt(p)->ctrl_page); 480 free_page((unsigned long) tsk_rt(p)->ctrl_page);
474 tsk_rt(p)->ctrl_page = NULL; 481 tsk_rt(p)->ctrl_page = NULL;
diff --git a/litmus/locking.c b/litmus/locking.c
index 0c1aa6aa40b7..5897beb941cf 100644
--- a/litmus/locking.c
+++ b/litmus/locking.c
@@ -34,8 +34,10 @@ static int create_generic_lock(void** obj_ref, obj_type_t type, void* __user ar
34 int err; 34 int err;
35 35
36 err = litmus->allocate_lock(&lock, type, arg); 36 err = litmus->allocate_lock(&lock, type, arg);
37 if (err == 0) 37 if (err == 0) {
38 INIT_LIST_HEAD(&lock->lock_chain);
38 *obj_ref = lock; 39 *obj_ref = lock;
40 }
39 return err; 41 return err;
40} 42}
41 43
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c
index 6ed504f4750e..677a932e08be 100644
--- a/litmus/sched_gsn_edf.c
+++ b/litmus/sched_gsn_edf.c
@@ -645,7 +645,7 @@ static long gsnedf_admit_task(struct task_struct* tsk)
645#include <litmus/fdso.h> 645#include <litmus/fdso.h>
646 646
647/* called with IRQs off */ 647/* called with IRQs off */
648static void set_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh) 648static void increase_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh)
649{ 649{
650 int linked_on; 650 int linked_on;
651 int check_preempt = 0; 651 int check_preempt = 0;
@@ -653,7 +653,7 @@ static void set_priority_inheritance(struct task_struct* t, struct task_struct*
653 raw_spin_lock(&gsnedf_lock); 653 raw_spin_lock(&gsnedf_lock);
654 654
655 TRACE_TASK(t, "inherits priority from %s/%d\n", prio_inh->comm, prio_inh->pid); 655 TRACE_TASK(t, "inherits priority from %s/%d\n", prio_inh->comm, prio_inh->pid);
656 tsk_rt(t)->inh_task = prio_inh; 656 tsk_rt(t)->eff_prio = prio_inh;
657 657
658 linked_on = tsk_rt(t)->linked_on; 658 linked_on = tsk_rt(t)->linked_on;
659 659
@@ -712,7 +712,7 @@ static void set_priority_inheritance(struct task_struct* t, struct task_struct*
712} 712}
713 713
714/* called with IRQs off */ 714/* called with IRQs off */
715static void clear_priority_inheritance(struct task_struct* t) 715static void decrease_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh)
716{ 716{
717 raw_spin_lock(&gsnedf_lock); 717 raw_spin_lock(&gsnedf_lock);
718 718
@@ -720,8 +720,12 @@ static void clear_priority_inheritance(struct task_struct* t)
720 * resource. Thus we can make the following assumption.*/ 720 * resource. Thus we can make the following assumption.*/
721 BUG_ON(tsk_rt(t)->scheduled_on == NO_CPU); 721 BUG_ON(tsk_rt(t)->scheduled_on == NO_CPU);
722 722
723 TRACE_TASK(t, "priority restored\n"); 723 if(prio_inh)
724 tsk_rt(t)->inh_task = NULL; 724 TRACE_TASK(t, "inherited priority decreased to %s/%d\n", prio_inh->comm, prio_inh->pid);
725 else
726 TRACE_TASK(t, "base priority restored.\n");
727
728 tsk_rt(t)->eff_prio = prio_inh;
725 729
726 /* Check if rescheduling is necessary. We can't use heap_decrease() 730 /* Check if rescheduling is necessary. We can't use heap_decrease()
727 * since the priority was effectively lowered. */ 731 * since the priority was effectively lowered. */
@@ -797,7 +801,7 @@ int gsnedf_fmlp_lock(struct litmus_lock* l)
797 if (edf_higher_prio(t, sem->hp_waiter)) { 801 if (edf_higher_prio(t, sem->hp_waiter)) {
798 sem->hp_waiter = t; 802 sem->hp_waiter = t;
799 if (edf_higher_prio(t, sem->owner)) 803 if (edf_higher_prio(t, sem->owner))
800 set_priority_inheritance(sem->owner, sem->hp_waiter); 804 increase_priority_inheritance(sem->owner, sem->hp_waiter);
801 } 805 }
802 806
803 TS_LOCK_SUSPEND; 807 TS_LOCK_SUSPEND;
@@ -865,7 +869,7 @@ int gsnedf_fmlp_unlock(struct litmus_lock* l)
865 /* Well, if next is not the highest-priority waiter, 869 /* Well, if next is not the highest-priority waiter,
866 * then it ought to inherit the highest-priority 870 * then it ought to inherit the highest-priority
867 * waiter's priority. */ 871 * waiter's priority. */
868 set_priority_inheritance(next, sem->hp_waiter); 872 increase_priority_inheritance(next, sem->hp_waiter);
869 } 873 }
870 874
871 /* wake up next */ 875 /* wake up next */
@@ -875,8 +879,8 @@ int gsnedf_fmlp_unlock(struct litmus_lock* l)
875 sem->owner = NULL; 879 sem->owner = NULL;
876 880
877 /* we lose the benefit of priority inheritance (if any) */ 881 /* we lose the benefit of priority inheritance (if any) */
878 if (tsk_rt(t)->inh_task) 882 if (tsk_rt(t)->eff_prio)
879 clear_priority_inheritance(t); 883 decrease_priority_inheritance(t, NULL);
880 884
881out: 885out:
882 spin_unlock_irqrestore(&sem->wait.lock, flags); 886 spin_unlock_irqrestore(&sem->wait.lock, flags);