diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-30 16:43:52 -0400 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-30 16:43:52 -0400 |
commit | 62f2907f445b08f958acf1cc1a0c29736d4ba206 (patch) | |
tree | a11743eddcc125c9c3ac0c527078338e3d01b295 | |
parent | d0961e328a2a4c026c884c768b798cb882922708 (diff) |
Nested inheritance with fine-grained locking.
Minor hack to lockdep was required too allow
the inheritance propagation locking logic to
work.
-rw-r--r-- | include/litmus/binheap.h | 13 | ||||
-rw-r--r-- | include/litmus/edf_common.h | 17 | ||||
-rw-r--r-- | include/litmus/litmus.h | 7 | ||||
-rw-r--r-- | include/litmus/locking.h | 20 | ||||
-rw-r--r-- | include/litmus/preempt.h | 1 | ||||
-rw-r--r-- | include/litmus/rt_param.h | 12 | ||||
-rw-r--r-- | kernel/lockdep.c | 4 | ||||
-rw-r--r-- | litmus/edf_common.c | 71 | ||||
-rw-r--r-- | litmus/litmus.c | 31 | ||||
-rw-r--r-- | litmus/locking.c | 54 | ||||
-rw-r--r-- | litmus/sched_gsn_edf.c | 741 |
11 files changed, 737 insertions, 234 deletions
diff --git a/include/litmus/binheap.h b/include/litmus/binheap.h index fd4c0a318b96..412a4c7bd4bc 100644 --- a/include/litmus/binheap.h +++ b/include/litmus/binheap.h | |||
@@ -57,6 +57,9 @@ struct binheap_handle { | |||
57 | }; | 57 | }; |
58 | 58 | ||
59 | 59 | ||
60 | #define BINHEAP_POISON ((void*)(0xdeadbeef)) | ||
61 | |||
62 | |||
60 | /** | 63 | /** |
61 | * binheap_entry - get the struct for this heap node. | 64 | * binheap_entry - get the struct for this heap node. |
62 | * Only valid when called upon heap nodes other than the root handle. | 65 | * Only valid when called upon heap nodes other than the root handle. |
@@ -329,7 +332,7 @@ static inline void __binheap_bubble_up( | |||
329 | { | 332 | { |
330 | /* Note: NULL data pointers are used internally for arbitrary delete */ | 333 | /* Note: NULL data pointers are used internally for arbitrary delete */ |
331 | while((node->parent != NULL) && | 334 | while((node->parent != NULL) && |
332 | ((node->data == NULL) /* let null data bubble to the top */ || | 335 | ((node->data == BINHEAP_POISON) /* let BINHEAP_POISON data bubble to the top */ || |
333 | handle->compare(node, node->parent))) { | 336 | handle->compare(node, node->parent))) { |
334 | __binheap_swap(node->parent, node); | 337 | __binheap_swap(node->parent, node); |
335 | node = node->parent; | 338 | node = node->parent; |
@@ -370,12 +373,6 @@ static inline void __binheap_add(struct binheap_node *new_node, | |||
370 | struct binheap_handle *handle, | 373 | struct binheap_handle *handle, |
371 | void *data) | 374 | void *data) |
372 | { | 375 | { |
373 | /* NULL data pointers are used internally */ | ||
374 | if(!data) { | ||
375 | WARN_ON(!data); | ||
376 | return; | ||
377 | } | ||
378 | |||
379 | new_node->data = data; | 376 | new_node->data = data; |
380 | new_node->ref = new_node; | 377 | new_node->ref = new_node; |
381 | new_node->ref_ptr = &(new_node->ref); | 378 | new_node->ref_ptr = &(new_node->ref); |
@@ -513,7 +510,7 @@ static inline void __binheap_delete( | |||
513 | void *temp_data = target->data; | 510 | void *temp_data = target->data; |
514 | 511 | ||
515 | /* temporarily set data to null to allow node to bubble up to the top. */ | 512 | /* temporarily set data to null to allow node to bubble up to the top. */ |
516 | target->data = NULL; | 513 | target->data = BINHEAP_POISON; |
517 | 514 | ||
518 | __binheap_bubble_up(handle, target); | 515 | __binheap_bubble_up(handle, target); |
519 | __binheap_delete_root(handle, node_to_delete); | 516 | __binheap_delete_root(handle, node_to_delete); |
diff --git a/include/litmus/edf_common.h b/include/litmus/edf_common.h index bbaf22ea7f12..aacfab14a254 100644 --- a/include/litmus/edf_common.h +++ b/include/litmus/edf_common.h | |||
@@ -20,6 +20,23 @@ int edf_higher_prio(struct task_struct* first, | |||
20 | 20 | ||
21 | int edf_ready_order(struct bheap_node* a, struct bheap_node* b); | 21 | int edf_ready_order(struct bheap_node* a, struct bheap_node* b); |
22 | 22 | ||
23 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | ||
24 | /* binheap_nodes must be embedded within 'struct litmus_lock' */ | ||
25 | int edf_max_heap_order(struct binheap_node *a, struct binheap_node *b); | ||
26 | int edf_min_heap_order(struct binheap_node *a, struct binheap_node *b); | ||
27 | int edf_max_heap_base_priority_order(struct binheap_node *a, struct binheap_node *b); | ||
28 | int edf_min_heap_base_priority_order(struct binheap_node *a, struct binheap_node *b); | ||
29 | |||
30 | typedef enum | ||
31 | { | ||
32 | BASE, | ||
33 | INHERITED | ||
34 | } comparison_mode_t; | ||
35 | |||
36 | int __edf_higher_prio(struct task_struct* first, comparison_mode_t first_mode, | ||
37 | struct task_struct* second, comparison_mode_t second_mode); | ||
38 | #endif | ||
39 | |||
23 | int edf_preemption_needed(rt_domain_t* rt, struct task_struct *t); | 40 | int edf_preemption_needed(rt_domain_t* rt, struct task_struct *t); |
24 | 41 | ||
25 | #endif | 42 | #endif |
diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h index 0b071fd359f9..9a7334f1eb00 100644 --- a/include/litmus/litmus.h +++ b/include/litmus/litmus.h | |||
@@ -53,12 +53,16 @@ void litmus_exit_task(struct task_struct *tsk); | |||
53 | #define get_rt_phase(t) (tsk_rt(t)->task_params.phase) | 53 | #define get_rt_phase(t) (tsk_rt(t)->task_params.phase) |
54 | #define get_partition(t) (tsk_rt(t)->task_params.cpu) | 54 | #define get_partition(t) (tsk_rt(t)->task_params.cpu) |
55 | #define get_deadline(t) (tsk_rt(t)->job_params.deadline) | 55 | #define get_deadline(t) (tsk_rt(t)->job_params.deadline) |
56 | #define get_period(t) (tsk_rt(t)->task_params.period) | ||
56 | #define get_release(t) (tsk_rt(t)->job_params.release) | 57 | #define get_release(t) (tsk_rt(t)->job_params.release) |
57 | #define get_class(t) (tsk_rt(t)->task_params.cls) | 58 | #define get_class(t) (tsk_rt(t)->task_params.cls) |
58 | 59 | ||
59 | #define is_priority_boosted(t) (tsk_rt(t)->priority_boosted) | 60 | #define is_priority_boosted(t) (tsk_rt(t)->priority_boosted) |
60 | #define get_boost_start(t) (tsk_rt(t)->boost_start_time) | 61 | #define get_boost_start(t) (tsk_rt(t)->boost_start_time) |
61 | 62 | ||
63 | #define effective_priority(t) ((!(tsk_rt(t)->inh_task)) ? t : tsk_rt(t)->inh_task) | ||
64 | #define base_priority(t) (t) | ||
65 | |||
62 | inline static int budget_exhausted(struct task_struct* t) | 66 | inline static int budget_exhausted(struct task_struct* t) |
63 | { | 67 | { |
64 | return get_exec_time(t) >= get_exec_cost(t); | 68 | return get_exec_time(t) >= get_exec_cost(t); |
@@ -114,6 +118,9 @@ static inline lt_t litmus_clock(void) | |||
114 | #define earlier_deadline(a, b) (lt_before(\ | 118 | #define earlier_deadline(a, b) (lt_before(\ |
115 | (a)->rt_param.job_params.deadline,\ | 119 | (a)->rt_param.job_params.deadline,\ |
116 | (b)->rt_param.job_params.deadline)) | 120 | (b)->rt_param.job_params.deadline)) |
121 | #define shorter_period(a, b) (lt_before(\ | ||
122 | (a)->rt_param.task_params.period,\ | ||
123 | (b)->rt_param.task_params.period)) | ||
117 | #define earlier_release(a, b) (lt_before(\ | 124 | #define earlier_release(a, b) (lt_before(\ |
118 | (a)->rt_param.job_params.release,\ | 125 | (a)->rt_param.job_params.release,\ |
119 | (b)->rt_param.job_params.release)) | 126 | (b)->rt_param.job_params.release)) |
diff --git a/include/litmus/locking.h b/include/litmus/locking.h index a01890153fa1..57d2abfe2a12 100644 --- a/include/litmus/locking.h +++ b/include/litmus/locking.h | |||
@@ -13,13 +13,19 @@ struct litmus_lock { | |||
13 | int type; | 13 | int type; |
14 | 14 | ||
15 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | 15 | #ifdef CONFIG_LITMUS_NESTED_LOCKING |
16 | struct list_head lock_chain; | 16 | struct task_struct *hp_waiter_eff_prio; |
17 | #endif | 17 | struct task_struct **hp_waiter_ptr; |
18 | }; | 18 | struct binheap_node hp_binheap_node; |
19 | |||
20 | int ident; | ||
21 | |||
22 | //#ifdef CONFIG_DEBUG_SPINLOCK | ||
23 | char cheat_lockdep[2]; | ||
24 | struct lock_class_key key; | ||
25 | //#endif | ||
19 | 26 | ||
20 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | ||
21 | void nest_lock(struct litmus_lock *l, struct task_struct *t); | ||
22 | #endif | 27 | #endif |
28 | }; | ||
23 | 29 | ||
24 | struct litmus_lock_ops { | 30 | struct litmus_lock_ops { |
25 | /* Current task tries to obtain / drop a reference to a lock. | 31 | /* Current task tries to obtain / drop a reference to a lock. |
@@ -35,8 +41,8 @@ struct litmus_lock_ops { | |||
35 | void (*deallocate)(struct litmus_lock*); | 41 | void (*deallocate)(struct litmus_lock*); |
36 | 42 | ||
37 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | 43 | #ifdef CONFIG_LITMUS_NESTED_LOCKING |
38 | void (*propagate_increase_inheritance)(struct litmus_lock* l, struct task_struct* t); | 44 | void (*propagate_increase_inheritance)(struct litmus_lock* l, struct task_struct* t, raw_spinlock_t* to_unlock, unsigned long irqflags); |
39 | void (*propagate_decrease_inheritance)(struct litmus_lock* l, struct task_struct* t); | 45 | void (*propagate_decrease_inheritance)(struct litmus_lock* l, struct task_struct* t, raw_spinlock_t* to_unlock, unsigned long irqflags); |
40 | #endif | 46 | #endif |
41 | }; | 47 | }; |
42 | 48 | ||
diff --git a/include/litmus/preempt.h b/include/litmus/preempt.h index 380b886d78ff..61f4db18e49d 100644 --- a/include/litmus/preempt.h +++ b/include/litmus/preempt.h | |||
@@ -31,7 +31,6 @@ const char* sched_state_name(int s); | |||
31 | cpu, (x), sched_state_name(x), \ | 31 | cpu, (x), sched_state_name(x), \ |
32 | (y), sched_state_name(y)) | 32 | (y), sched_state_name(y)) |
33 | 33 | ||
34 | |||
35 | typedef enum scheduling_state { | 34 | typedef enum scheduling_state { |
36 | TASK_SCHEDULED = (1 << 0), /* The currently scheduled task is the one that | 35 | TASK_SCHEDULED = (1 << 0), /* The currently scheduled task is the one that |
37 | * should be scheduled, and the processor does not | 36 | * should be scheduled, and the processor does not |
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h index 5239d4a6f508..8947dd0c7eb2 100644 --- a/include/litmus/rt_param.h +++ b/include/litmus/rt_param.h | |||
@@ -75,6 +75,8 @@ struct control_page { | |||
75 | /* don't export internal data structures to user space (liblitmus) */ | 75 | /* don't export internal data structures to user space (liblitmus) */ |
76 | #ifdef __KERNEL__ | 76 | #ifdef __KERNEL__ |
77 | 77 | ||
78 | #include <litmus/binheap.h> | ||
79 | |||
78 | struct _rt_domain; | 80 | struct _rt_domain; |
79 | struct bheap_node; | 81 | struct bheap_node; |
80 | struct release_heap; | 82 | struct release_heap; |
@@ -133,14 +135,12 @@ struct rt_param { | |||
133 | * could point to self if PI does not result in | 135 | * could point to self if PI does not result in |
134 | * an increased task priority. | 136 | * an increased task priority. |
135 | */ | 137 | */ |
136 | struct task_struct* eff_prio; | 138 | struct task_struct* inh_task; |
137 | 139 | ||
138 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | 140 | #ifdef CONFIG_LITMUS_NESTED_LOCKING |
139 | struct task_struct* local_prio; | 141 | raw_spinlock_t hp_blocked_tasks_lock; |
140 | struct task_struct* trans_prio; | 142 | struct binheap_handle hp_blocked_tasks; |
141 | 143 | ||
142 | /* pointer to the last lock acquired */ | ||
143 | struct litmus_lock* last_lock; | ||
144 | 144 | ||
145 | /* pointer to lock upon which is currently blocked */ | 145 | /* pointer to lock upon which is currently blocked */ |
146 | struct litmus_lock* blocked_lock; | 146 | struct litmus_lock* blocked_lock; |
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 298c9276dfdb..4b3107a244fa 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
@@ -583,6 +583,10 @@ static int static_obj(void *obj) | |||
583 | end = (unsigned long) &_end, | 583 | end = (unsigned long) &_end, |
584 | addr = (unsigned long) obj; | 584 | addr = (unsigned long) obj; |
585 | 585 | ||
586 | // GLENN | ||
587 | return 1; | ||
588 | |||
589 | |||
586 | /* | 590 | /* |
587 | * static variable? | 591 | * static variable? |
588 | */ | 592 | */ |
diff --git a/litmus/edf_common.c b/litmus/edf_common.c index 54cfada586be..7a3a9a02a703 100644 --- a/litmus/edf_common.c +++ b/litmus/edf_common.c | |||
@@ -12,15 +12,25 @@ | |||
12 | #include <litmus/sched_plugin.h> | 12 | #include <litmus/sched_plugin.h> |
13 | #include <litmus/sched_trace.h> | 13 | #include <litmus/sched_trace.h> |
14 | 14 | ||
15 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | ||
16 | #include <litmus/locking.h> | ||
17 | #endif | ||
18 | |||
15 | #include <litmus/edf_common.h> | 19 | #include <litmus/edf_common.h> |
16 | 20 | ||
21 | |||
17 | /* edf_higher_prio - returns true if first has a higher EDF priority | 22 | /* edf_higher_prio - returns true if first has a higher EDF priority |
18 | * than second. Deadline ties are broken by PID. | 23 | * than second. Deadline ties are broken by PID. |
19 | * | 24 | * |
20 | * both first and second may be NULL | 25 | * both first and second may be NULL |
21 | */ | 26 | */ |
22 | int edf_higher_prio(struct task_struct* first, | 27 | #ifdef CONFIG_LITMUS_NESTED_LOCKING |
23 | struct task_struct* second) | 28 | int __edf_higher_prio( |
29 | struct task_struct* first, comparison_mode_t first_mode, | ||
30 | struct task_struct* second, comparison_mode_t second_mode) | ||
31 | #else | ||
32 | int edf_higher_prio(struct task_struct* first, struct task_struct* second) | ||
33 | #endif | ||
24 | { | 34 | { |
25 | struct task_struct *first_task = first; | 35 | struct task_struct *first_task = first; |
26 | struct task_struct *second_task = second; | 36 | struct task_struct *second_task = second; |
@@ -38,14 +48,23 @@ int edf_higher_prio(struct task_struct* first, | |||
38 | return first && !second; | 48 | return first && !second; |
39 | 49 | ||
40 | #ifdef CONFIG_LITMUS_LOCKING | 50 | #ifdef CONFIG_LITMUS_LOCKING |
41 | |||
42 | /* Check for inherited priorities. Change task | 51 | /* Check for inherited priorities. Change task |
43 | * used for comparison in such a case. | 52 | * used for comparison in such a case. |
44 | */ | 53 | */ |
45 | if (unlikely(first->rt_param.eff_prio)) | 54 | if (unlikely(first->rt_param.inh_task) |
46 | first_task = first->rt_param.eff_prio; | 55 | #ifdef CONFIG_LITMUS_NESTED_LOCKING |
47 | if (unlikely(second->rt_param.eff_prio)) | 56 | && (first_mode == INHERITED) |
48 | second_task = second->rt_param.eff_prio; | 57 | #endif |
58 | ) { | ||
59 | first_task = first->rt_param.inh_task; | ||
60 | } | ||
61 | if (unlikely(second->rt_param.inh_task) | ||
62 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | ||
63 | && (second_mode == INHERITED) | ||
64 | #endif | ||
65 | ) { | ||
66 | second_task = second->rt_param.inh_task; | ||
67 | } | ||
49 | 68 | ||
50 | /* Check for priority boosting. Tie-break by start of boosting. | 69 | /* Check for priority boosting. Tie-break by start of boosting. |
51 | */ | 70 | */ |
@@ -63,7 +82,6 @@ int edf_higher_prio(struct task_struct* first, | |||
63 | 82 | ||
64 | #endif | 83 | #endif |
65 | 84 | ||
66 | |||
67 | return !is_realtime(second_task) || | 85 | return !is_realtime(second_task) || |
68 | 86 | ||
69 | /* is the deadline of the first task earlier? | 87 | /* is the deadline of the first task earlier? |
@@ -81,9 +99,44 @@ int edf_higher_prio(struct task_struct* first, | |||
81 | * priority wins. | 99 | * priority wins. |
82 | */ | 100 | */ |
83 | (first_task->pid == second_task->pid && | 101 | (first_task->pid == second_task->pid && |
84 | !second->rt_param.eff_prio))); | 102 | !second->rt_param.inh_task))); |
103 | } | ||
104 | |||
105 | |||
106 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | ||
107 | int edf_higher_prio(struct task_struct* first, struct task_struct* second) | ||
108 | { | ||
109 | return __edf_higher_prio(first, INHERITED, second, INHERITED); | ||
85 | } | 110 | } |
86 | 111 | ||
112 | int edf_max_heap_order(struct binheap_node *a, struct binheap_node *b) | ||
113 | { | ||
114 | struct litmus_lock *l_a = (struct litmus_lock *)binheap_entry(a, struct litmus_lock, hp_binheap_node); | ||
115 | struct litmus_lock *l_b = (struct litmus_lock *)binheap_entry(b, struct litmus_lock, hp_binheap_node); | ||
116 | |||
117 | return __edf_higher_prio(l_a->hp_waiter_eff_prio, INHERITED, l_b->hp_waiter_eff_prio, INHERITED); | ||
118 | } | ||
119 | |||
120 | int edf_min_heap_order(struct binheap_node *a, struct binheap_node *b) | ||
121 | { | ||
122 | return edf_max_heap_order(b, a); // swap comparison | ||
123 | } | ||
124 | |||
125 | int edf_max_heap_base_priority_order(struct binheap_node *a, struct binheap_node *b) | ||
126 | { | ||
127 | struct litmus_lock *l_a = (struct litmus_lock *)binheap_entry(a, struct litmus_lock, hp_binheap_node); | ||
128 | struct litmus_lock *l_b = (struct litmus_lock *)binheap_entry(b, struct litmus_lock, hp_binheap_node); | ||
129 | |||
130 | return __edf_higher_prio(l_a->hp_waiter_eff_prio, BASE, l_b->hp_waiter_eff_prio, BASE); | ||
131 | } | ||
132 | |||
133 | int edf_min_heap_base_priority_order(struct binheap_node *a, struct binheap_node *b) | ||
134 | { | ||
135 | return edf_max_heap_base_priority_order(b, a); // swap comparison | ||
136 | } | ||
137 | #endif | ||
138 | |||
139 | |||
87 | int edf_ready_order(struct bheap_node* a, struct bheap_node* b) | 140 | int edf_ready_order(struct bheap_node* a, struct bheap_node* b) |
88 | { | 141 | { |
89 | return edf_higher_prio(bheap2task(a), bheap2task(b)); | 142 | return edf_higher_prio(bheap2task(a), bheap2task(b)); |
diff --git a/litmus/litmus.c b/litmus/litmus.c index 40340dfa9d67..9d4fbd2f8a65 100644 --- a/litmus/litmus.c +++ b/litmus/litmus.c | |||
@@ -291,7 +291,12 @@ static void reinit_litmus_state(struct task_struct* p, int restore) | |||
291 | { | 291 | { |
292 | struct rt_task user_config = {}; | 292 | struct rt_task user_config = {}; |
293 | void* ctrl_page = NULL; | 293 | void* ctrl_page = NULL; |
294 | 294 | ||
295 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | ||
296 | binheap_order_t prio_order = NULL; | ||
297 | #endif | ||
298 | |||
299 | |||
295 | if (restore) { | 300 | if (restore) { |
296 | /* Safe user-space provided configuration data. | 301 | /* Safe user-space provided configuration data. |
297 | * and allocated page. */ | 302 | * and allocated page. */ |
@@ -299,24 +304,33 @@ static void reinit_litmus_state(struct task_struct* p, int restore) | |||
299 | ctrl_page = p->rt_param.ctrl_page; | 304 | ctrl_page = p->rt_param.ctrl_page; |
300 | } | 305 | } |
301 | 306 | ||
307 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | ||
308 | prio_order = p->rt_param.hp_blocked_tasks.compare; | ||
309 | #endif | ||
310 | |||
302 | /* We probably should not be inheriting any task's priority | 311 | /* We probably should not be inheriting any task's priority |
303 | * at this point in time. | 312 | * at this point in time. |
304 | */ | 313 | */ |
305 | WARN_ON(p->rt_param.eff_prio); | 314 | WARN_ON(p->rt_param.inh_task); |
306 | 315 | ||
307 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | 316 | #ifdef CONFIG_LITMUS_NESTED_LOCKING |
308 | WARN_ON(p->rt_param.local_prio); | 317 | WARN_ON(p->rt_param.blocked_lock); |
309 | WARN_ON(p->rt_param.trans_prio); | 318 | WARN_ON(!binheap_empty(&p->rt_param.hp_blocked_tasks)); |
310 | #endif | 319 | #endif |
311 | 320 | ||
312 | /* Cleanup everything else. */ | 321 | /* Cleanup everything else. */ |
313 | memset(&p->rt_param, 0, sizeof(p->rt_param)); | 322 | memset(&p->rt_param, 0, sizeof(p->rt_param)); |
314 | 323 | ||
315 | /* Restore preserved fields. */ | 324 | /* Restore preserved fields. */ |
316 | if (restore) { | 325 | if (restore) { |
317 | p->rt_param.task_params = user_config; | 326 | p->rt_param.task_params = user_config; |
318 | p->rt_param.ctrl_page = ctrl_page; | 327 | p->rt_param.ctrl_page = ctrl_page; |
319 | } | 328 | } |
329 | |||
330 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | ||
331 | INIT_BINHEAP_HANDLE(&p->rt_param.hp_blocked_tasks, prio_order); | ||
332 | raw_spin_lock_init(&p->rt_param.hp_blocked_tasks_lock); | ||
333 | #endif | ||
320 | } | 334 | } |
321 | 335 | ||
322 | long litmus_admit_task(struct task_struct* tsk) | 336 | long litmus_admit_task(struct task_struct* tsk) |
@@ -363,6 +377,9 @@ long litmus_admit_task(struct task_struct* tsk) | |||
363 | bheap_node_init(&tsk_rt(tsk)->heap_node, tsk); | 377 | bheap_node_init(&tsk_rt(tsk)->heap_node, tsk); |
364 | } | 378 | } |
365 | 379 | ||
380 | tsk_rt(tsk)->blocked_lock = NULL; | ||
381 | raw_spin_lock_init(&tsk_rt(tsk)->hp_blocked_tasks_lock); | ||
382 | |||
366 | retval = litmus->admit_task(tsk); | 383 | retval = litmus->admit_task(tsk); |
367 | 384 | ||
368 | if (!retval) { | 385 | if (!retval) { |
@@ -473,9 +490,7 @@ void litmus_exec(void) | |||
473 | struct task_struct* p = current; | 490 | struct task_struct* p = current; |
474 | 491 | ||
475 | if (is_realtime(p)) { | 492 | if (is_realtime(p)) { |
476 | WARN_ON(p->rt_param.eff_prio); | 493 | WARN_ON(p->rt_param.inh_task); |
477 | WARN_ON(p->rt_param.local_prio); | ||
478 | WARN_ON(p->rt_param.trans_prio); | ||
479 | if (tsk_rt(p)->ctrl_page) { | 494 | if (tsk_rt(p)->ctrl_page) { |
480 | free_page((unsigned long) tsk_rt(p)->ctrl_page); | 495 | free_page((unsigned long) tsk_rt(p)->ctrl_page); |
481 | tsk_rt(p)->ctrl_page = NULL; | 496 | tsk_rt(p)->ctrl_page = NULL; |
diff --git a/litmus/locking.c b/litmus/locking.c index f3fa273314fb..78d609409536 100644 --- a/litmus/locking.c +++ b/litmus/locking.c | |||
@@ -29,6 +29,11 @@ static inline struct litmus_lock* get_lock(struct od_table_entry* entry) | |||
29 | return (struct litmus_lock*) entry->obj->obj; | 29 | return (struct litmus_lock*) entry->obj->obj; |
30 | } | 30 | } |
31 | 31 | ||
32 | |||
33 | atomic_t lock_id_gen = ATOMIC_INIT(0); | ||
34 | raw_spinlock_t rsm_global_lock; | ||
35 | |||
36 | |||
32 | static int create_generic_lock(void** obj_ref, obj_type_t type, void* __user arg) | 37 | static int create_generic_lock(void** obj_ref, obj_type_t type, void* __user arg) |
33 | { | 38 | { |
34 | struct litmus_lock* lock; | 39 | struct litmus_lock* lock; |
@@ -36,7 +41,18 @@ static int create_generic_lock(void** obj_ref, obj_type_t type, void* __user ar | |||
36 | 41 | ||
37 | err = litmus->allocate_lock(&lock, type, arg); | 42 | err = litmus->allocate_lock(&lock, type, arg); |
38 | if (err == 0) { | 43 | if (err == 0) { |
39 | INIT_LIST_HEAD(&lock->lock_chain); | 44 | #ifdef CONFIG_LITMUS_NESTED_LOCKING |
45 | lock->hp_waiter_eff_prio = NULL; | ||
46 | |||
47 | INIT_BINHEAP_NODE(&lock->hp_binheap_node); | ||
48 | WARN_ON(!(lock->hp_waiter_ptr)); | ||
49 | |||
50 | lock->ident = atomic_inc_return(&lock_id_gen); | ||
51 | |||
52 | if(lock->ident == 1) { | ||
53 | raw_spin_lock_init(&rsm_global_lock); | ||
54 | } | ||
55 | #endif | ||
40 | *obj_ref = lock; | 56 | *obj_ref = lock; |
41 | } | 57 | } |
42 | return err; | 58 | return err; |
@@ -125,24 +141,24 @@ struct task_struct* __waitqueue_remove_first(wait_queue_head_t *wq) | |||
125 | } | 141 | } |
126 | 142 | ||
127 | 143 | ||
128 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | 144 | //#ifdef CONFIG_LITMUS_NESTED_LOCKING |
129 | /* not "lock_nest" ... get it? */ | 145 | ///* not "lock_nest" ... get it? */ |
130 | void nest_lock(struct litmus_lock *l, struct task_struct *t) | 146 | //void nest_lock(struct litmus_lock *l, struct task_struct *t) |
131 | { | 147 | //{ |
132 | if(tsk_rt(t)->last_lock) { | 148 | // if(tsk_rt(t)->last_lock) { |
133 | /* push new lock to front of old lock */ | 149 | // /* push new lock to front of old lock */ |
134 | struct litmus_lock *old = tsk_rt(t)->last_lock; | 150 | // struct litmus_lock *old = tsk_rt(t)->last_lock; |
135 | 151 | // | |
136 | list_add(&l->lock_chain, &old->lock_chain); | 152 | // list_add(&l->lock_chain, &old->lock_chain); |
137 | } | 153 | // } |
138 | 154 | // | |
139 | tsk_rt(t)->last_lock = l; | 155 | // tsk_rt(t)->last_lock = l; |
140 | 156 | // | |
141 | // local inh now becomes transitive inh | 157 | // // local inh now becomes transitive inh |
142 | tsk_rt(t)->trans_prio = tsk_rt(t)->local_prio; // what about old transitive prio??? | 158 | // tsk_rt(t)->trans_prio = tsk_rt(t)->local_prio; // what about old transitive prio??? |
143 | tsk_rt(t)->local_prio = NULL; | 159 | // tsk_rt(t)->local_prio = NULL; |
144 | } | 160 | //} |
145 | #endif | 161 | //#endif |
146 | 162 | ||
147 | 163 | ||
148 | 164 | ||
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c index 6edcc339ea5e..99c93ae65e06 100644 --- a/litmus/sched_gsn_edf.c +++ b/litmus/sched_gsn_edf.c | |||
@@ -638,11 +638,17 @@ static void gsnedf_task_exit(struct task_struct * t) | |||
638 | 638 | ||
639 | static long gsnedf_admit_task(struct task_struct* tsk) | 639 | static long gsnedf_admit_task(struct task_struct* tsk) |
640 | { | 640 | { |
641 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | ||
642 | INIT_BINHEAP_HANDLE(&tsk_rt(tsk)->hp_blocked_tasks, edf_max_heap_base_priority_order); | ||
643 | #endif | ||
644 | |||
641 | return 0; | 645 | return 0; |
642 | } | 646 | } |
643 | 647 | ||
644 | #ifdef CONFIG_LITMUS_LOCKING | 648 | #ifdef CONFIG_LITMUS_LOCKING |
645 | 649 | ||
650 | extern raw_spinlock_t rsm_global_lock; | ||
651 | |||
646 | #include <litmus/fdso.h> | 652 | #include <litmus/fdso.h> |
647 | 653 | ||
648 | /* called with IRQs off */ | 654 | /* called with IRQs off */ |
@@ -653,60 +659,71 @@ static void increase_priority_inheritance(struct task_struct* t, struct task_str | |||
653 | 659 | ||
654 | raw_spin_lock(&gsnedf_lock); | 660 | raw_spin_lock(&gsnedf_lock); |
655 | 661 | ||
656 | TRACE_TASK(t, "inherits priority from %s/%d\n", prio_inh->comm, prio_inh->pid); | 662 | /* this sanity check allows for weaker locking in protocols */ |
657 | tsk_rt(t)->eff_prio = prio_inh; | 663 | if(__edf_higher_prio(prio_inh, BASE, t, INHERITED)) { |
658 | 664 | TRACE_TASK(t, "inherits priority from %s/%d\n", prio_inh->comm, prio_inh->pid); | |
659 | linked_on = tsk_rt(t)->linked_on; | 665 | tsk_rt(t)->inh_task = prio_inh; |
660 | 666 | ||
661 | /* If it is scheduled, then we need to reorder the CPU heap. */ | 667 | linked_on = tsk_rt(t)->linked_on; |
662 | if (linked_on != NO_CPU) { | 668 | |
663 | TRACE_TASK(t, "%s: linked on %d\n", | 669 | /* If it is scheduled, then we need to reorder the CPU heap. */ |
664 | __FUNCTION__, linked_on); | 670 | if (linked_on != NO_CPU) { |
665 | /* Holder is scheduled; need to re-order CPUs. | 671 | TRACE_TASK(t, "%s: linked on %d\n", |
666 | * We can't use heap_decrease() here since | 672 | __FUNCTION__, linked_on); |
667 | * the cpu_heap is ordered in reverse direction, so | 673 | /* Holder is scheduled; need to re-order CPUs. |
668 | * it is actually an increase. */ | 674 | * We can't use heap_decrease() here since |
669 | binheap_delete(&gsnedf_cpus[linked_on]->hn, &gsnedf_cpu_heap); | 675 | * the cpu_heap is ordered in reverse direction, so |
670 | binheap_add(&gsnedf_cpus[linked_on]->hn, | 676 | * it is actually an increase. */ |
671 | &gsnedf_cpu_heap, cpu_entry_t, hn); | 677 | binheap_delete(&gsnedf_cpus[linked_on]->hn, &gsnedf_cpu_heap); |
672 | } else { | 678 | binheap_add(&gsnedf_cpus[linked_on]->hn, |
673 | /* holder may be queued: first stop queue changes */ | 679 | &gsnedf_cpu_heap, cpu_entry_t, hn); |
674 | raw_spin_lock(&gsnedf.release_lock); | ||
675 | if (is_queued(t)) { | ||
676 | TRACE_TASK(t, "%s: is queued\n", | ||
677 | __FUNCTION__); | ||
678 | /* We need to update the position of holder in some | ||
679 | * heap. Note that this could be a release heap if we | ||
680 | * budget enforcement is used and this job overran. */ | ||
681 | check_preempt = | ||
682 | !bheap_decrease(edf_ready_order, | ||
683 | tsk_rt(t)->heap_node); | ||
684 | } else { | 680 | } else { |
685 | /* Nothing to do: if it is not queued and not linked | 681 | /* holder may be queued: first stop queue changes */ |
686 | * then it is either sleeping or currently being moved | 682 | raw_spin_lock(&gsnedf.release_lock); |
687 | * by other code (e.g., a timer interrupt handler) that | 683 | if (is_queued(t)) { |
688 | * will use the correct priority when enqueuing the | 684 | TRACE_TASK(t, "%s: is queued\n", |
689 | * task. */ | 685 | __FUNCTION__); |
690 | TRACE_TASK(t, "%s: is NOT queued => Done.\n", | 686 | /* We need to update the position of holder in some |
691 | __FUNCTION__); | 687 | * heap. Note that this could be a release heap if we |
692 | } | 688 | * budget enforcement is used and this job overran. */ |
693 | raw_spin_unlock(&gsnedf.release_lock); | 689 | check_preempt = |
694 | 690 | !bheap_decrease(edf_ready_order, | |
695 | /* If holder was enqueued in a release heap, then the following | 691 | tsk_rt(t)->heap_node); |
696 | * preemption check is pointless, but we can't easily detect | 692 | } else { |
697 | * that case. If you want to fix this, then consider that | 693 | /* Nothing to do: if it is not queued and not linked |
698 | * simply adding a state flag requires O(n) time to update when | 694 | * then it is either sleeping or currently being moved |
699 | * releasing n tasks, which conflicts with the goal to have | 695 | * by other code (e.g., a timer interrupt handler) that |
700 | * O(log n) merges. */ | 696 | * will use the correct priority when enqueuing the |
701 | if (check_preempt) { | 697 | * task. */ |
702 | /* heap_decrease() hit the top level of the heap: make | 698 | TRACE_TASK(t, "%s: is NOT queued => Done.\n", |
703 | * sure preemption checks get the right task, not the | 699 | __FUNCTION__); |
704 | * potentially stale cache. */ | 700 | } |
705 | bheap_uncache_min(edf_ready_order, | 701 | raw_spin_unlock(&gsnedf.release_lock); |
706 | &gsnedf.ready_queue); | 702 | |
707 | check_for_preemptions(); | 703 | /* If holder was enqueued in a release heap, then the following |
704 | * preemption check is pointless, but we can't easily detect | ||
705 | * that case. If you want to fix this, then consider that | ||
706 | * simply adding a state flag requires O(n) time to update when | ||
707 | * releasing n tasks, which conflicts with the goal to have | ||
708 | * O(log n) merges. */ | ||
709 | if (check_preempt) { | ||
710 | /* heap_decrease() hit the top level of the heap: make | ||
711 | * sure preemption checks get the right task, not the | ||
712 | * potentially stale cache. */ | ||
713 | bheap_uncache_min(edf_ready_order, | ||
714 | &gsnedf.ready_queue); | ||
715 | check_for_preemptions(); | ||
716 | } | ||
708 | } | 717 | } |
709 | } | 718 | } |
719 | else { | ||
720 | TRACE_TASK(t, "Spurious invalid priority increase. " | ||
721 | "Inheritance request: %s/%d [eff_prio = %s/%d] to inherit from %s/%d\n" | ||
722 | "Occurance is likely okay: probably due to (hopefully safe) concurrent priority updates.\n", | ||
723 | t->comm, t->pid, | ||
724 | effective_priority(t)->comm, effective_priority(t)->pid, | ||
725 | prio_inh->comm, prio_inh->pid); | ||
726 | } | ||
710 | 727 | ||
711 | raw_spin_unlock(&gsnedf_lock); | 728 | raw_spin_unlock(&gsnedf_lock); |
712 | } | 729 | } |
@@ -715,23 +732,52 @@ static void increase_priority_inheritance(struct task_struct* t, struct task_str | |||
715 | static void decrease_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh) | 732 | static void decrease_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh) |
716 | { | 733 | { |
717 | raw_spin_lock(&gsnedf_lock); | 734 | raw_spin_lock(&gsnedf_lock); |
718 | 735 | ||
719 | /* A job only stops inheriting a priority when it releases a | 736 | if(__edf_higher_prio(t, INHERITED, prio_inh, BASE)) { |
720 | * resource. Thus we can make the following assumption.*/ | 737 | /* A job only stops inheriting a priority when it releases a |
721 | //BUG_ON(tsk_rt(t)->scheduled_on == NO_CPU); | 738 | * resource. Thus we can make the following assumption.*/ |
722 | 739 | if(prio_inh) | |
723 | if(prio_inh) | 740 | TRACE_TASK(t, "inherited priority decreased to %s/%d\n", prio_inh->comm, prio_inh->pid); |
724 | TRACE_TASK(t, "inherited priority decreased to %s/%d\n", prio_inh->comm, prio_inh->pid); | 741 | else |
725 | else | 742 | TRACE_TASK(t, "base priority restored.\n"); |
726 | TRACE_TASK(t, "base priority restored.\n"); | ||
727 | 743 | ||
728 | tsk_rt(t)->eff_prio = prio_inh; | 744 | tsk_rt(t)->inh_task = prio_inh; |
729 | 745 | ||
730 | /* Check if rescheduling is necessary. We can't use heap_decrease() | 746 | if(tsk_rt(t)->scheduled_on != NO_CPU) { |
731 | * since the priority was effectively lowered. */ | 747 | TRACE_TASK(t, "is scheduled.\n"); |
732 | unlink(t); | 748 | |
733 | gsnedf_job_arrival(t); | 749 | /* Check if rescheduling is necessary. We can't use heap_decrease() |
750 | * since the priority was effectively lowered. */ | ||
751 | unlink(t); | ||
752 | gsnedf_job_arrival(t); | ||
753 | } | ||
754 | else { | ||
755 | /* task is queued */ | ||
756 | raw_spin_lock(&gsnedf.release_lock); | ||
757 | if (is_queued(t)) { | ||
758 | TRACE_TASK(t, "is queued.\n"); | ||
759 | |||
760 | /* decrease in priority, so we have to re-add to binomial heap */ | ||
761 | unlink(t); | ||
762 | gsnedf_job_arrival(t); | ||
763 | } | ||
764 | else { | ||
765 | TRACE_TASK(t, "is not in scheduler. Probably on wait queue somewhere.\n"); | ||
766 | } | ||
767 | raw_spin_unlock(&gsnedf.release_lock); | ||
768 | } | ||
769 | } | ||
770 | else { | ||
771 | TRACE_TASK(t, "Spurious invalid priority decrease. " | ||
772 | "Inheritance request: %s/%d [eff_prio = %s/%d] to inherit from %s/%d\n" | ||
773 | "Occurance is likely okay: probably due to (hopefully safe) concurrent priority updates.\n", | ||
774 | t->comm, t->pid, | ||
775 | effective_priority(t)->comm, effective_priority(t)->pid, | ||
776 | (prio_inh) ? prio_inh->comm : "nil", | ||
777 | (prio_inh) ? prio_inh->pid : -1); | ||
778 | } | ||
734 | 779 | ||
780 | |||
735 | raw_spin_unlock(&gsnedf_lock); | 781 | raw_spin_unlock(&gsnedf_lock); |
736 | } | 782 | } |
737 | 783 | ||
@@ -741,110 +787,113 @@ static void decrease_priority_inheritance(struct task_struct* t, struct task_str | |||
741 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | 787 | #ifdef CONFIG_LITMUS_NESTED_LOCKING |
742 | 788 | ||
743 | 789 | ||
744 | 790 | void print_hp_waiters(struct binheap_node* n, int depth) | |
745 | /* called with IRQs off */ | ||
746 | static void nested_increase_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh) | ||
747 | { | 791 | { |
748 | increase_priority_inheritance(t, prio_inh); // increase our prio. | 792 | struct litmus_lock* l; |
793 | char padding[81] = " "; | ||
794 | struct task_struct *hp = NULL; | ||
795 | struct task_struct *hp_eff = NULL; | ||
796 | struct task_struct *node_prio = NULL; | ||
797 | |||
749 | 798 | ||
750 | // beware: recursion | 799 | if(n == NULL) { |
751 | if(tsk_rt(t)->blocked_lock && | 800 | TRACE("+-> %p\n", NULL); |
752 | tsk_rt(t)->blocked_lock->ops->propagate_increase_inheritance) { | 801 | return; |
753 | TRACE_TASK(mutex->hp_waiter, "Inheritor is blocked. Checking lock %p.", l); | ||
754 | tsk_rt(t)->blocked_lock->ops->propagate_increase_inheritance(tsk_rt(t)->blocked_lock, t); | ||
755 | } | 802 | } |
803 | |||
804 | l = binheap_entry(n, struct litmus_lock, hp_binheap_node); | ||
805 | |||
806 | if(depth*2 <= 80) | ||
807 | padding[depth*2] = '\0'; | ||
808 | |||
809 | if(*(l->hp_waiter_ptr)) { | ||
810 | hp = *(l->hp_waiter_ptr); | ||
811 | |||
812 | if(tsk_rt(hp)->inh_task) { | ||
813 | hp_eff = tsk_rt(hp)->inh_task; | ||
814 | } | ||
815 | } | ||
816 | |||
817 | node_prio = l->hp_waiter_eff_prio; | ||
818 | |||
819 | TRACE("%s+-> %s/%d [waiter = %s/%d] [waiter's inh = %s/%d] (lock = %d)\n", | ||
820 | padding, | ||
821 | (node_prio) ? node_prio->comm : "nil", | ||
822 | (node_prio) ? node_prio->pid : -1, | ||
823 | (hp) ? hp->comm : "nil", | ||
824 | (hp) ? hp->pid : -1, | ||
825 | (hp_eff) ? hp_eff->comm : "nil", | ||
826 | (hp_eff) ? hp_eff->pid : -1, | ||
827 | l->ident); | ||
828 | |||
829 | if(n->left) print_hp_waiters(n->left, depth+1); | ||
830 | if(n->right) print_hp_waiters(n->right, depth+1); | ||
756 | } | 831 | } |
757 | 832 | ||
833 | |||
758 | /* called with IRQs off */ | 834 | /* called with IRQs off */ |
759 | static void nested_decrease_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh) | 835 | /* preconditions: |
836 | (1) The 'hp_blocked_tasks_lock' of task 't' is held. | ||
837 | (2) The lock 'to_unlock' is held. | ||
838 | */ | ||
839 | static void nested_increase_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh, raw_spinlock_t *to_unlock, unsigned long irqflags) | ||
760 | { | 840 | { |
761 | decrease_priority_inheritance(t, prio_inh); | 841 | struct litmus_lock *blocked_lock = tsk_rt(t)->blocked_lock; |
762 | |||
763 | // beware: recursion | ||
764 | if(tsk_rt(t)->blocked_lock && tsk_rt(t)->blocked_lock->ops->propagate_decrease_inheritance) { | ||
765 | TRACE_TASK(mutex->hp_waiter, "Inheritor is blocked. Checking lock %p.", l); | ||
766 | tsk_rt(t)->blocked_lock->ops->propagate_decrease_inheritance(tsk_rt(t)->blocked_lock, t); | ||
767 | } | ||
768 | } | ||
769 | 842 | ||
843 | increase_priority_inheritance(t, prio_inh); // increase our prio. | ||
770 | 844 | ||
771 | void gsnedf_rsm_mutex_propagate_increase_inheritance(struct litmus_lock* l, | 845 | raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); // unlock the t's heap. |
772 | struct task_struct* t) | ||
773 | { | ||
774 | struct rsm_mutex *mutex = rsm_mutex_from_lock(l); | ||
775 | unsigned long flags; | ||
776 | 846 | ||
777 | spin_lock_irqsave(&mutex->wait.lock, flags); | ||
778 | 847 | ||
779 | if(tsk_rt(t)->blocked_lock == l) { // prevent race on tsk_rt(t)->blocked | 848 | if(blocked_lock) { |
780 | if(t != mutex->hp_waiter) { | 849 | if(blocked_lock->ops->propagate_increase_inheritance) { |
781 | if(edf_higher_prio(t, mutex->hp_waiter)) { | 850 | TRACE_TASK(t, "Inheritor is blocked (...perhaps). Checking lock %d.\n", blocked_lock->ident); |
782 | mutex->hp_waiter = t; | ||
783 | |||
784 | TRACE_TASK(mutex->hp_waiter, "is new highest-prio waiter by propagation.\n"); | ||
785 | } | ||
786 | else { | ||
787 | goto EXIT; // HP waiter has greater prio than us. bail out. | ||
788 | } | ||
789 | } | ||
790 | 851 | ||
791 | if(edf_higher_prio(mutex->hp_waiter, mutex->owner)) { | 852 | // beware: recursion |
792 | struct task_struct* prio = (tsk_rt(t)->eff_prio) ? tsk_rt(t)->eff_prio : t; | 853 | blocked_lock->ops->propagate_increase_inheritance(blocked_lock, t, to_unlock, irqflags); |
793 | 854 | } | |
794 | TRACE_TASK(mutex->hp_waiter, "Propagating inheritance to holder of %p.\n", l); | 855 | else { |
795 | 856 | TRACE_TASK(t, "Inheritor is blocked on lock (%d) that does not support nesting!\n", blocked_lock->ident); | |
796 | nested_increase_priority_inheritance(mutex->owner, prio); | 857 | raw_spin_unlock_irqrestore(to_unlock, irqflags); |
797 | } | 858 | } |
798 | } | 859 | } |
799 | 860 | else { | |
800 | EXIT: | 861 | TRACE_TASK(t, "is not blocked. No propagation.\n"); |
801 | 862 | raw_spin_unlock_irqrestore(to_unlock, irqflags); | |
802 | spin_unlock_irqrestore(&mutex->wait.lock, flags); | 863 | } |
803 | } | 864 | } |
804 | 865 | ||
805 | void gsnedf_rsm_mutex_propagate_decrease_inheritance(struct litmus_lock* l, | 866 | /* called with IRQs off */ |
806 | struct task_struct* t) | 867 | /* preconditions: |
868 | (1) The 'hp_blocked_tasks_lock' of task 't' is held. | ||
869 | (2) The lock 'to_unlock' is held. | ||
870 | */ | ||
871 | static void nested_decrease_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh, raw_spinlock_t *to_unlock, unsigned long irqflags) | ||
807 | { | 872 | { |
808 | struct rsm_mutex *mutex = rsm_mutex_from_lock(l); | 873 | struct litmus_lock *blocked_lock = tsk_rt(t)->blocked_lock; |
809 | unsigned long flags; | 874 | decrease_priority_inheritance(t, prio_inh); |
810 | 875 | ||
811 | spin_lock_irqsave(&mutex->wait.lock, flags); | 876 | raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); // unlock the t's heap. |
812 | 877 | ||
813 | if(tsk_rt(t)->blocked_lock == l) { // prevent race on tsk_rt(t)->blocked | 878 | if(blocked_lock) { |
814 | if(t == mutex->hp_waiter) { | 879 | if(blocked_lock->ops->propagate_decrease_inheritance) { |
815 | struct task_struct* prio; | 880 | TRACE_TASK(t, "Inheritor is blocked (...perhaps). Checking lock %d.\n", blocked_lock->ident); |
816 | |||
817 | TRACE_TASK(t, "was highest-prio waiter\n"); | ||
818 | /* next has the highest priority --- it doesn't need to | ||
819 | * inherit. However, we need to make sure that the | ||
820 | * next-highest priority in the queue is reflected in | ||
821 | * hp_waiter. */ | ||
822 | mutex->hp_waiter = rsm_mutex_find_hp_waiter(mutex, NULL); | ||
823 | |||
824 | TRACE_TASK(mutex->hp_waiter, "is new highest-prio waiter\n"); | ||
825 | |||
826 | TRACE_TASK(mutex->hp_waiter, "Propagating inheritance to holder of %p.\n", l); | ||
827 | 881 | ||
828 | 882 | // beware: recursion | |
829 | // lower eff_prio of owner to new hp if needed. | 883 | blocked_lock->ops->propagate_decrease_inheritance(blocked_lock, t, to_unlock, irqflags); |
830 | if(t == mutex->owner->eff_prio) | 884 | } |
831 | { | 885 | else { |
832 | 886 | TRACE_TASK(t, "Inheritor is blocked on lock (%p) that does not support nesting!\n", blocked_lock); | |
833 | } | 887 | raw_spin_unlock_irqrestore(to_unlock, irqflags); |
834 | |||
835 | |||
836 | nested_increase_priority_inheritance(mutex->owner, prio); | ||
837 | } | 888 | } |
838 | } | 889 | } |
839 | 890 | else { | |
840 | spin_unlock_irqrestore(&mutex->wait.lock, flags); | 891 | TRACE_TASK(t, "is not blocked. No propagation.\n"); |
892 | raw_spin_unlock_irqrestore(to_unlock, irqflags); | ||
893 | } | ||
841 | } | 894 | } |
842 | 895 | ||
843 | 896 | ||
844 | |||
845 | |||
846 | |||
847 | |||
848 | /* ******************** RSM MUTEX ********************** */ | 897 | /* ******************** RSM MUTEX ********************** */ |
849 | 898 | ||
850 | /* struct for semaphore with priority inheritance */ | 899 | /* struct for semaphore with priority inheritance */ |
@@ -858,7 +907,11 @@ struct rsm_mutex { | |||
858 | struct task_struct *hp_waiter; | 907 | struct task_struct *hp_waiter; |
859 | 908 | ||
860 | /* FIFO queue of waiting tasks -- for now. time stamp in the future. */ | 909 | /* FIFO queue of waiting tasks -- for now. time stamp in the future. */ |
861 | wait_queue_head_t wait; | 910 | wait_queue_head_t wait; |
911 | |||
912 | /* we do some nesting within spinlocks, so we can't use the normal | ||
913 | sleeplocks found in wait_queue_head_t. */ | ||
914 | raw_spinlock_t lock; | ||
862 | }; | 915 | }; |
863 | 916 | ||
864 | static inline struct rsm_mutex* rsm_mutex_from_lock(struct litmus_lock* lock) | 917 | static inline struct rsm_mutex* rsm_mutex_from_lock(struct litmus_lock* lock) |
@@ -884,30 +937,38 @@ struct task_struct* rsm_mutex_find_hp_waiter(struct rsm_mutex *mutex, | |||
884 | return found; | 937 | return found; |
885 | } | 938 | } |
886 | 939 | ||
887 | 940 | static inline struct task_struct* top_priority(struct binheap_handle* handle) { | |
888 | 941 | if(!binheap_empty(handle)) { | |
942 | return (struct task_struct*)(binheap_top_entry(handle, struct litmus_lock, hp_binheap_node)->hp_waiter_eff_prio); | ||
943 | } | ||
944 | return NULL; | ||
945 | } | ||
889 | 946 | ||
890 | int gsnedf_rsm_mutex_lock(struct litmus_lock* l) | 947 | int gsnedf_rsm_mutex_lock(struct litmus_lock* l) |
891 | { | 948 | { |
892 | struct task_struct* t = current; | 949 | struct task_struct *t = current; |
950 | struct task_struct *owner; | ||
893 | struct rsm_mutex *mutex = rsm_mutex_from_lock(l); | 951 | struct rsm_mutex *mutex = rsm_mutex_from_lock(l); |
894 | wait_queue_t wait; | 952 | wait_queue_t wait; |
895 | unsigned long flags; | 953 | unsigned long flags; |
896 | 954 | ||
897 | if (!is_realtime(t)) | 955 | if (!is_realtime(t)) |
898 | return -EPERM; | 956 | return -EPERM; |
899 | 957 | ||
900 | spin_lock_irqsave(&mutex->wait.lock, flags); | 958 | raw_spin_lock_irqsave(&mutex->lock, flags); |
901 | 959 | //raw_spin_lock_irqsave(&rsm_global_lock, flags); | |
960 | |||
902 | if (mutex->owner) { | 961 | if (mutex->owner) { |
962 | TRACE_TASK(t, "Blocking on lock %d.\n", l->ident); | ||
963 | |||
903 | /* resource is not free => must suspend and wait */ | 964 | /* resource is not free => must suspend and wait */ |
965 | |||
966 | owner = mutex->owner; | ||
904 | 967 | ||
905 | init_waitqueue_entry(&wait, t); | 968 | init_waitqueue_entry(&wait, t); |
906 | 969 | ||
907 | // TODO: inheritance propagation from another thread may not finish | ||
908 | // before I check local inheritance... | ||
909 | tsk_rt(t)->blocked_lock = l; /* record where we are blocked */ | 970 | tsk_rt(t)->blocked_lock = l; /* record where we are blocked */ |
910 | mb(); | 971 | mb(); // needed? |
911 | 972 | ||
912 | /* FIXME: interruptible would be nice some day */ | 973 | /* FIXME: interruptible would be nice some day */ |
913 | set_task_state(t, TASK_UNINTERRUPTIBLE); | 974 | set_task_state(t, TASK_UNINTERRUPTIBLE); |
@@ -916,17 +977,67 @@ int gsnedf_rsm_mutex_lock(struct litmus_lock* l) | |||
916 | 977 | ||
917 | /* check if we need to activate priority inheritance */ | 978 | /* check if we need to activate priority inheritance */ |
918 | if (edf_higher_prio(t, mutex->hp_waiter)) { | 979 | if (edf_higher_prio(t, mutex->hp_waiter)) { |
980 | |||
981 | struct task_struct *old_max_eff_prio; | ||
982 | struct task_struct *new_max_eff_prio; | ||
983 | struct task_struct *new_prio = NULL; | ||
984 | |||
985 | if(mutex->hp_waiter) | ||
986 | TRACE_TASK(t, "has higher prio than hp_waiter (%s/%d).\n", mutex->hp_waiter->comm, mutex->hp_waiter->pid); | ||
987 | else | ||
988 | TRACE_TASK(t, "has higher prio than hp_waiter (NIL).\n"); | ||
989 | |||
990 | raw_spin_lock(&tsk_rt(owner)->hp_blocked_tasks_lock); | ||
991 | |||
992 | //TRACE_TASK(owner, "Heap Before:\n"); | ||
993 | //print_hp_waiters(tsk_rt(owner)->hp_blocked_tasks.root, 0); | ||
994 | |||
995 | old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); | ||
996 | |||
919 | mutex->hp_waiter = t; | 997 | mutex->hp_waiter = t; |
920 | if (edf_higher_prio(t, mutex->owner)) { | 998 | l->hp_waiter_eff_prio = effective_priority(mutex->hp_waiter); |
921 | struct task_struct* prio = (tsk_rt(t)->eff_prio) ? tsk_rt(t)->eff_prio : t; | 999 | |
922 | nested_increase_priority_inheritance(mutex->owner, prio); | 1000 | binheap_decrease(&l->hp_binheap_node, &tsk_rt(owner)->hp_blocked_tasks); |
1001 | |||
1002 | //TRACE_TASK(owner, "Heap After:\n"); | ||
1003 | //print_hp_waiters(tsk_rt(owner)->hp_blocked_tasks.root, 0); | ||
1004 | |||
1005 | new_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); | ||
1006 | |||
1007 | if(new_max_eff_prio != old_max_eff_prio) { | ||
1008 | TRACE_TASK(t, "is new hp_waiter.\n"); | ||
1009 | |||
1010 | if ((effective_priority(owner) == old_max_eff_prio) || | ||
1011 | (__edf_higher_prio(new_max_eff_prio, BASE, owner, INHERITED))){ | ||
1012 | new_prio = new_max_eff_prio; | ||
1013 | } | ||
1014 | } | ||
1015 | else { | ||
1016 | TRACE_TASK(t, "no change in max_eff_prio of heap.\n"); | ||
1017 | } | ||
1018 | |||
1019 | //raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); | ||
1020 | |||
1021 | if(new_prio) { | ||
1022 | nested_increase_priority_inheritance(owner, new_prio, &mutex->lock, flags); // unlocks lock. | ||
923 | } | 1023 | } |
1024 | else { | ||
1025 | raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); | ||
1026 | raw_spin_unlock_irqrestore(&mutex->lock, flags); | ||
1027 | } | ||
1028 | |||
1029 | } | ||
1030 | else { | ||
1031 | TRACE_TASK(t, "no change in hp_waiter.\n"); | ||
1032 | raw_spin_unlock_irqrestore(&mutex->lock, flags); | ||
924 | } | 1033 | } |
925 | 1034 | ||
1035 | |||
926 | TS_LOCK_SUSPEND; | 1036 | TS_LOCK_SUSPEND; |
927 | 1037 | ||
928 | /* release lock before sleeping */ | 1038 | /* release lock before sleeping */ |
929 | spin_unlock_irqrestore(&mutex->wait.lock, flags); | 1039 | //raw_spin_unlock_irqrestore(&rsm_global_lock, flags); |
1040 | //raw_spin_unlock_irqrestore(&mutex->lock, flags); | ||
930 | 1041 | ||
931 | /* We depend on the FIFO order. Thus, we don't need to recheck | 1042 | /* We depend on the FIFO order. Thus, we don't need to recheck |
932 | * when we wake up; we are guaranteed to have the lock since | 1043 | * when we wake up; we are guaranteed to have the lock since |
@@ -934,44 +1045,99 @@ int gsnedf_rsm_mutex_lock(struct litmus_lock* l) | |||
934 | */ | 1045 | */ |
935 | 1046 | ||
936 | schedule(); | 1047 | schedule(); |
937 | 1048 | ||
938 | TS_LOCK_RESUME; | 1049 | TS_LOCK_RESUME; |
939 | 1050 | ||
940 | /* Since we hold the lock, no other task will change | 1051 | /* Since we hold the lock, no other task will change |
941 | * ->owner. We can thus check it without acquiring the spin | 1052 | * ->owner. We can thus check it without acquiring the spin |
942 | * lock. */ | 1053 | * lock. */ |
943 | BUG_ON(mutex->owner != t); | 1054 | BUG_ON(mutex->owner != t); |
1055 | |||
1056 | TRACE_TASK(t, "Acquired lock %d.\n", l->ident); | ||
1057 | |||
944 | } else { | 1058 | } else { |
1059 | TRACE_TASK(t, "Acquired lock %d with no blocking.\n", l->ident); | ||
1060 | |||
945 | /* it's ours now */ | 1061 | /* it's ours now */ |
946 | mutex->owner = t; | 1062 | mutex->owner = t; |
947 | |||
948 | nest_lock(l, t); | ||
949 | 1063 | ||
950 | spin_unlock_irqrestore(&mutex->wait.lock, flags); | 1064 | raw_spin_lock(&tsk_rt(mutex->owner)->hp_blocked_tasks_lock); |
1065 | binheap_add(&l->hp_binheap_node, &tsk_rt(t)->hp_blocked_tasks, struct litmus_lock, hp_binheap_node); | ||
1066 | raw_spin_unlock(&tsk_rt(mutex->owner)->hp_blocked_tasks_lock); | ||
1067 | |||
1068 | raw_spin_unlock_irqrestore(&mutex->lock, flags); | ||
1069 | //raw_spin_unlock_irqrestore(&rsm_global_lock, flags); | ||
951 | } | 1070 | } |
952 | 1071 | ||
953 | return 0; | 1072 | return 0; |
954 | } | 1073 | } |
955 | 1074 | ||
1075 | |||
956 | int gsnedf_rsm_mutex_unlock(struct litmus_lock* l) | 1076 | int gsnedf_rsm_mutex_unlock(struct litmus_lock* l) |
957 | { | 1077 | { |
958 | struct task_struct *t = current, *next; | 1078 | struct task_struct *t = current, *next; |
959 | struct rsm_mutex *mutex = rsm_mutex_from_lock(l); | 1079 | struct rsm_mutex *mutex = rsm_mutex_from_lock(l); |
960 | unsigned long flags; | 1080 | unsigned long flags; |
1081 | |||
1082 | struct task_struct *old_max_eff_prio; | ||
1083 | |||
1084 | |||
961 | int err = 0; | 1085 | int err = 0; |
962 | 1086 | ||
963 | spin_lock_irqsave(&mutex->wait.lock, flags); | 1087 | raw_spin_lock_irqsave(&mutex->lock, flags); |
1088 | //raw_spin_lock_irqsave(&rsm_global_lock, flags); | ||
1089 | |||
964 | 1090 | ||
965 | if (mutex->owner != t) { | 1091 | if (mutex->owner != t) { |
966 | err = -EINVAL; | 1092 | err = -EINVAL; |
967 | goto out; | 1093 | goto out; |
968 | } | 1094 | } |
969 | 1095 | ||
970 | /* we lose the benefit of priority inheritance (if any) */ | 1096 | |
971 | if (tsk_rt(t)->local_prio) { | 1097 | raw_spin_lock(&tsk_rt(t)->hp_blocked_tasks_lock); |
972 | nested_decrease_priority_inheritance(t, NULL); | 1098 | |
1099 | TRACE_TASK(t, "Freeing lock %d\n", l->ident); | ||
1100 | //TRACE_TASK(t, "Heap Before:\n"); | ||
1101 | //print_hp_waiters(tsk_rt(t)->hp_blocked_tasks.root, 0); | ||
1102 | |||
1103 | //old_max_hp_waiter = *(binheap_top_entry(&tsk_rt(t)->hp_blocked_tasks, struct litmus_lock, hp_binheap_node)->hp_waiter_ptr); | ||
1104 | old_max_eff_prio = top_priority(&tsk_rt(t)->hp_blocked_tasks); | ||
1105 | binheap_delete(&l->hp_binheap_node, &tsk_rt(t)->hp_blocked_tasks); | ||
1106 | |||
1107 | //raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); | ||
1108 | |||
1109 | //TRACE_TASK(t, "Heap After:\n"); | ||
1110 | //print_hp_waiters(tsk_rt(t)->hp_blocked_tasks.root, 0); | ||
1111 | |||
1112 | if(tsk_rt(t)->inh_task){ | ||
1113 | struct task_struct *new_max_eff_prio = top_priority(&tsk_rt(t)->hp_blocked_tasks); | ||
1114 | |||
1115 | if((new_max_eff_prio == NULL) || | ||
1116 | ( (new_max_eff_prio != old_max_eff_prio) /* there was a change in eff prio */ && | ||
1117 | (effective_priority(t) == old_max_eff_prio) /* and owner had the old eff prio */) ) | ||
1118 | { | ||
1119 | // old_max_eff_prio > new_max_eff_prio | ||
1120 | |||
1121 | if(__edf_higher_prio(new_max_eff_prio, BASE, t, INHERITED)) { | ||
1122 | TRACE_TASK(t, "new_max_eff_prio > task's eff_prio-- new_max_eff_prio: %s/%d task: %s/%d [%s/%d]\n", | ||
1123 | new_max_eff_prio->comm, new_max_eff_prio->pid, | ||
1124 | t->comm, t->pid, tsk_rt(t)->inh_task->comm, tsk_rt(t)->inh_task->pid); | ||
1125 | WARN_ON(1); | ||
1126 | } | ||
1127 | |||
1128 | decrease_priority_inheritance(t, new_max_eff_prio); | ||
1129 | } | ||
973 | } | 1130 | } |
974 | 1131 | ||
1132 | if(binheap_empty(&tsk_rt(t)->hp_blocked_tasks) && tsk_rt(t)->inh_task != NULL) | ||
1133 | { | ||
1134 | WARN_ON(tsk_rt(t)->inh_task != NULL); | ||
1135 | TRACE_TASK(t, "No more locks are held, but eff_prio = %s/%d\n", | ||
1136 | tsk_rt(t)->inh_task->comm, tsk_rt(t)->inh_task->pid); | ||
1137 | } | ||
1138 | |||
1139 | raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); | ||
1140 | |||
975 | 1141 | ||
976 | /* check if there are jobs waiting for this resource */ | 1142 | /* check if there are jobs waiting for this resource */ |
977 | next = __waitqueue_remove_first(&mutex->wait); | 1143 | next = __waitqueue_remove_first(&mutex->wait); |
@@ -980,28 +1146,70 @@ int gsnedf_rsm_mutex_unlock(struct litmus_lock* l) | |||
980 | mutex->owner = next; | 1146 | mutex->owner = next; |
981 | TRACE_CUR("lock ownership passed to %s/%d\n", next->comm, next->pid); | 1147 | TRACE_CUR("lock ownership passed to %s/%d\n", next->comm, next->pid); |
982 | 1148 | ||
1149 | |||
983 | tsk_rt(next)->blocked_lock = NULL; | 1150 | tsk_rt(next)->blocked_lock = NULL; |
984 | nest_lock(l, next); // moves local_prio to trans_prio | 1151 | |
985 | 1152 | ||
986 | /* determine new hp_waiter if necessary */ | 1153 | /* determine new hp_waiter if necessary */ |
987 | if (next == mutex->hp_waiter) { | 1154 | if (next == mutex->hp_waiter) { |
1155 | |||
988 | TRACE_TASK(next, "was highest-prio waiter\n"); | 1156 | TRACE_TASK(next, "was highest-prio waiter\n"); |
989 | /* next has the highest priority --- it doesn't need to | 1157 | /* next has the highest priority --- it doesn't need to |
990 | * inherit. However, we need to make sure that the | 1158 | * inherit. However, we need to make sure that the |
991 | * next-highest priority in the queue is reflected in | 1159 | * next-highest priority in the queue is reflected in |
992 | * hp_waiter. */ | 1160 | * hp_waiter. */ |
993 | mutex->hp_waiter = rsm_mutex_find_hp_waiter(mutex, next); | 1161 | mutex->hp_waiter = rsm_mutex_find_hp_waiter(mutex, next); |
1162 | l->hp_waiter_eff_prio = (mutex->hp_waiter) ? effective_priority(mutex->hp_waiter) : NULL; | ||
1163 | |||
994 | if (mutex->hp_waiter) | 1164 | if (mutex->hp_waiter) |
995 | TRACE_TASK(mutex->hp_waiter, "is new highest-prio waiter\n"); | 1165 | TRACE_TASK(mutex->hp_waiter, "is new highest-prio waiter\n"); |
996 | else | 1166 | else |
997 | TRACE("no further waiters\n"); | 1167 | TRACE("no further waiters\n"); |
1168 | |||
1169 | raw_spin_lock(&tsk_rt(next)->hp_blocked_tasks_lock); | ||
1170 | |||
1171 | //TRACE_TASK(next, "Heap Before:\n"); | ||
1172 | //print_hp_waiters(tsk_rt(next)->hp_blocked_tasks.root, 0); | ||
1173 | |||
1174 | binheap_add(&l->hp_binheap_node, &tsk_rt(next)->hp_blocked_tasks, struct litmus_lock, hp_binheap_node); | ||
1175 | |||
1176 | //TRACE_TASK(next, "Heap After:\n"); | ||
1177 | //print_hp_waiters(tsk_rt(next)->hp_blocked_tasks.root, 0); | ||
1178 | |||
1179 | raw_spin_unlock(&tsk_rt(next)->hp_blocked_tasks_lock); | ||
998 | } else { | 1180 | } else { |
999 | /* Well, if next is not the highest-priority waiter, | 1181 | /* Well, if 'next' is not the highest-priority waiter, |
1000 | * then it ought to inherit the highest-priority | 1182 | * then it (probably) ought to inherit the highest-priority |
1001 | * waiter's priority. */ | 1183 | * waiter's priority. */ |
1002 | increase_priority_inheritance(next, mutex->hp_waiter); | 1184 | TRACE_TASK(next, "is not hp_waiter of lock %d.\n", l->ident); |
1185 | |||
1186 | raw_spin_lock(&tsk_rt(next)->hp_blocked_tasks_lock); | ||
1187 | |||
1188 | //TRACE_TASK(next, "Heap Before:\n"); | ||
1189 | //print_hp_waiters(tsk_rt(next)->hp_blocked_tasks.root, 0); | ||
1190 | |||
1191 | binheap_add(&l->hp_binheap_node, &tsk_rt(next)->hp_blocked_tasks, | ||
1192 | struct litmus_lock, hp_binheap_node); | ||
1193 | |||
1194 | //TRACE_TASK(next, "Heap After:\n"); | ||
1195 | //print_hp_waiters(tsk_rt(next)->hp_blocked_tasks.root, 0); | ||
1196 | |||
1197 | /* It is possible that 'next' *should* be the hp_waiter, but isn't | ||
1198 | * because that update hasn't yet executed (update operation is | ||
1199 | * probably blocked on mutex->lock). So only inherit if the top of | ||
1200 | * 'next's top heap node is indeed the effective prio. of hp_waiter. | ||
1201 | * (We use l->hp_waiter_eff_prio instead of effective_priority(hp_waiter) | ||
1202 | * since the effective priority of hp_waiter can change (and the | ||
1203 | * update has not made it to this lock).) | ||
1204 | */ | ||
1205 | if(likely(top_priority(&tsk_rt(next)->hp_blocked_tasks) == l->hp_waiter_eff_prio)) | ||
1206 | { | ||
1207 | increase_priority_inheritance(next, l->hp_waiter_eff_prio); | ||
1208 | } | ||
1209 | |||
1210 | raw_spin_unlock(&tsk_rt(next)->hp_blocked_tasks_lock); | ||
1003 | } | 1211 | } |
1004 | 1212 | ||
1005 | /* wake up next */ | 1213 | /* wake up next */ |
1006 | wake_up_process(next); | 1214 | wake_up_process(next); |
1007 | } | 1215 | } |
@@ -1011,11 +1219,178 @@ int gsnedf_rsm_mutex_unlock(struct litmus_lock* l) | |||
1011 | } | 1219 | } |
1012 | 1220 | ||
1013 | out: | 1221 | out: |
1014 | spin_unlock_irqrestore(&mutex->wait.lock, flags); | 1222 | raw_spin_unlock_irqrestore(&mutex->lock, flags); |
1223 | //raw_spin_unlock_irqrestore(&rsm_global_lock, flags); | ||
1015 | 1224 | ||
1016 | return err; | 1225 | return err; |
1017 | } | 1226 | } |
1018 | 1227 | ||
1228 | |||
1229 | void gsnedf_rsm_mutex_propagate_increase_inheritance(struct litmus_lock* l, | ||
1230 | struct task_struct* t, | ||
1231 | raw_spinlock_t* to_unlock, | ||
1232 | unsigned long irqflags) | ||
1233 | { | ||
1234 | struct rsm_mutex *mutex = rsm_mutex_from_lock(l); | ||
1235 | |||
1236 | // relay-style locking | ||
1237 | raw_spin_lock(&mutex->lock); | ||
1238 | raw_spin_unlock(to_unlock); | ||
1239 | |||
1240 | if(tsk_rt(t)->blocked_lock == l) { // prevent race on tsk_rt(t)->blocked | ||
1241 | struct task_struct *owner = mutex->owner; | ||
1242 | |||
1243 | struct task_struct *old_max_eff_prio; | ||
1244 | struct task_struct *new_max_eff_prio; | ||
1245 | |||
1246 | raw_spin_lock(&tsk_rt(owner)->hp_blocked_tasks_lock); | ||
1247 | |||
1248 | old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); | ||
1249 | |||
1250 | if((t != mutex->hp_waiter) && edf_higher_prio(t, mutex->hp_waiter)) { | ||
1251 | TRACE_TASK(t, "is new highest-prio waiter by propagation.\n"); | ||
1252 | mutex->hp_waiter = t; | ||
1253 | } | ||
1254 | if(t == mutex->hp_waiter) { | ||
1255 | // reflect the decreased priority in the heap node. | ||
1256 | l->hp_waiter_eff_prio = effective_priority(mutex->hp_waiter); | ||
1257 | binheap_decrease(&l->hp_binheap_node, &tsk_rt(owner)->hp_blocked_tasks); | ||
1258 | } | ||
1259 | |||
1260 | new_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); | ||
1261 | |||
1262 | |||
1263 | if(new_max_eff_prio != old_max_eff_prio) { | ||
1264 | // new_max_eff_prio > old_max_eff_prio holds. | ||
1265 | if ((effective_priority(owner) == old_max_eff_prio) || | ||
1266 | (__edf_higher_prio(new_max_eff_prio, BASE, owner, INHERITED))) { | ||
1267 | |||
1268 | TRACE_TASK(new_max_eff_prio, "Propagating inheritance to holder of lock %d.\n", l->ident); | ||
1269 | |||
1270 | // beware: recursion | ||
1271 | nested_increase_priority_inheritance(owner, new_max_eff_prio, &mutex->lock, irqflags); // unlocks mutex->lock | ||
1272 | } | ||
1273 | else { | ||
1274 | TRACE_TASK(new_max_eff_prio, "Lower priority than holder %s/%d. No propagation.\n", owner->comm, owner->pid); | ||
1275 | raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); | ||
1276 | raw_spin_unlock_irqrestore(&mutex->lock, irqflags); | ||
1277 | } | ||
1278 | } | ||
1279 | else { | ||
1280 | TRACE_TASK(mutex->owner, "No change in maxiumum effective priority.\n"); | ||
1281 | raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); | ||
1282 | raw_spin_unlock_irqrestore(&mutex->lock, irqflags); | ||
1283 | } | ||
1284 | } | ||
1285 | else { | ||
1286 | struct litmus_lock *still_blocked = tsk_rt(t)->blocked_lock; | ||
1287 | |||
1288 | TRACE_TASK(t, "is not blocked on lock %d.\n", l->ident); | ||
1289 | if(still_blocked) { | ||
1290 | TRACE_TASK(t, "is still blocked on a lock though (lock %d).\n", still_blocked->ident); | ||
1291 | if(still_blocked->ops->propagate_increase_inheritance) { | ||
1292 | /* due to relay-style nesting of spinlocks (acq. A, acq. B, free A, free B) | ||
1293 | we know that task 't' has not released any locks behind us in this | ||
1294 | chain. Propagation just needs to catch up with task 't'. */ | ||
1295 | still_blocked->ops->propagate_increase_inheritance(still_blocked, t, &mutex->lock, irqflags); | ||
1296 | } | ||
1297 | else { | ||
1298 | TRACE_TASK(t, "Inheritor is blocked on lock (%p) that does not support nesting!\n", still_blocked); | ||
1299 | raw_spin_unlock_irqrestore(&mutex->lock, irqflags); | ||
1300 | } | ||
1301 | } | ||
1302 | else { | ||
1303 | raw_spin_unlock_irqrestore(&mutex->lock, irqflags); | ||
1304 | } | ||
1305 | } | ||
1306 | } | ||
1307 | |||
1308 | |||
1309 | void gsnedf_rsm_mutex_propagate_decrease_inheritance(struct litmus_lock* l, | ||
1310 | struct task_struct* t, | ||
1311 | raw_spinlock_t* to_unlock, | ||
1312 | unsigned long irqflags) | ||
1313 | { | ||
1314 | struct rsm_mutex *mutex = rsm_mutex_from_lock(l); | ||
1315 | |||
1316 | // relay-style locking | ||
1317 | raw_spin_lock(&mutex->lock); | ||
1318 | raw_spin_unlock(to_unlock); | ||
1319 | |||
1320 | if(tsk_rt(t)->blocked_lock == l) { // prevent race on tsk_rt(t)->blocked | ||
1321 | if(t == mutex->hp_waiter) { | ||
1322 | struct task_struct *owner = mutex->owner; | ||
1323 | |||
1324 | struct task_struct *old_max_eff_prio; | ||
1325 | struct task_struct *new_max_eff_prio; | ||
1326 | |||
1327 | raw_spin_lock(&tsk_rt(owner)->hp_blocked_tasks_lock); | ||
1328 | |||
1329 | old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); | ||
1330 | |||
1331 | binheap_delete(&l->hp_binheap_node, &tsk_rt(owner)->hp_blocked_tasks); | ||
1332 | mutex->hp_waiter = rsm_mutex_find_hp_waiter(mutex, NULL); | ||
1333 | l->hp_waiter_eff_prio = (mutex->hp_waiter) ? effective_priority(mutex->hp_waiter) : NULL; | ||
1334 | binheap_add(&l->hp_binheap_node, &tsk_rt(owner)->hp_blocked_tasks, struct litmus_lock, hp_binheap_node); | ||
1335 | |||
1336 | new_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); | ||
1337 | |||
1338 | if((old_max_eff_prio != new_max_eff_prio) && | ||
1339 | (effective_priority(owner) == old_max_eff_prio)) | ||
1340 | { | ||
1341 | // Need to set new effective_priority for owner | ||
1342 | |||
1343 | struct task_struct *decreased_prio; | ||
1344 | |||
1345 | TRACE_TASK(new_max_eff_prio, "Propagating decreased inheritance to holder of lock %d.\n", l->ident); | ||
1346 | |||
1347 | if(__edf_higher_prio(new_max_eff_prio, BASE, owner, BASE)) { | ||
1348 | TRACE_TASK(new_max_eff_prio, "has greater base priority than base priority of owner of lock %d.\n", l->ident); | ||
1349 | decreased_prio = new_max_eff_prio; | ||
1350 | } | ||
1351 | else { | ||
1352 | TRACE_TASK(new_max_eff_prio, "has lesser base priority than base priority of owner of lock %d.\n", l->ident); | ||
1353 | decreased_prio = NULL; | ||
1354 | } | ||
1355 | |||
1356 | // beware: recursion | ||
1357 | nested_decrease_priority_inheritance(owner, decreased_prio, &mutex->lock, irqflags); // will unlock mutex->lock | ||
1358 | } | ||
1359 | else { | ||
1360 | raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); | ||
1361 | raw_spin_unlock_irqrestore(&mutex->lock, irqflags); | ||
1362 | } | ||
1363 | } | ||
1364 | else { | ||
1365 | TRACE_TASK(t, "is not hp_waiter. No propagation.\n"); | ||
1366 | raw_spin_unlock_irqrestore(&mutex->lock, irqflags); | ||
1367 | } | ||
1368 | } | ||
1369 | else { | ||
1370 | struct litmus_lock *still_blocked = tsk_rt(t)->blocked_lock; | ||
1371 | |||
1372 | TRACE_TASK(t, "is not blocked on lock %d.\n", l->ident); | ||
1373 | if(still_blocked) { | ||
1374 | TRACE_TASK(t, "is still blocked on a lock though (lock %d).\n", still_blocked->ident); | ||
1375 | if(still_blocked->ops->propagate_decrease_inheritance) { | ||
1376 | /* due to linked nesting of spinlocks (acq. A, acq. B, free A, free B) | ||
1377 | we know that task 't' has not released any locks behind us in this | ||
1378 | chain. propagation just needs to catch up with task 't' */ | ||
1379 | still_blocked->ops->propagate_decrease_inheritance(still_blocked, t, &mutex->lock, irqflags); | ||
1380 | } | ||
1381 | else { | ||
1382 | TRACE_TASK(t, "Inheritor is blocked on lock (%p) that does not support nesting!\n", still_blocked); | ||
1383 | raw_spin_unlock_irqrestore(&mutex->lock, irqflags); | ||
1384 | } | ||
1385 | } | ||
1386 | else { | ||
1387 | raw_spin_unlock_irqrestore(&mutex->lock, irqflags); | ||
1388 | } | ||
1389 | } | ||
1390 | } | ||
1391 | |||
1392 | |||
1393 | |||
1019 | int gsnedf_rsm_mutex_close(struct litmus_lock* l) | 1394 | int gsnedf_rsm_mutex_close(struct litmus_lock* l) |
1020 | { | 1395 | { |
1021 | struct task_struct *t = current; | 1396 | struct task_struct *t = current; |
@@ -1024,11 +1399,13 @@ int gsnedf_rsm_mutex_close(struct litmus_lock* l) | |||
1024 | 1399 | ||
1025 | int owner; | 1400 | int owner; |
1026 | 1401 | ||
1027 | spin_lock_irqsave(&mutex->wait.lock, flags); | 1402 | raw_spin_lock_irqsave(&mutex->lock, flags); |
1403 | //raw_spin_lock_irqsave(&rsm_global_lock, flags); | ||
1028 | 1404 | ||
1029 | owner = (mutex->owner == t); | 1405 | owner = (mutex->owner == t); |
1030 | 1406 | ||
1031 | spin_unlock_irqrestore(&mutex->wait.lock, flags); | 1407 | raw_spin_unlock_irqrestore(&mutex->lock, flags); |
1408 | //raw_spin_unlock_irqrestore(&rsm_global_lock, flags); | ||
1032 | 1409 | ||
1033 | if (owner) | 1410 | if (owner) |
1034 | gsnedf_rsm_mutex_unlock(l); | 1411 | gsnedf_rsm_mutex_unlock(l); |
@@ -1061,7 +1438,19 @@ static struct litmus_lock* gsnedf_new_rsm_mutex(void) | |||
1061 | mutex->owner = NULL; | 1438 | mutex->owner = NULL; |
1062 | mutex->hp_waiter = NULL; | 1439 | mutex->hp_waiter = NULL; |
1063 | init_waitqueue_head(&mutex->wait); | 1440 | init_waitqueue_head(&mutex->wait); |
1441 | |||
1442 | |||
1443 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
1444 | { | ||
1445 | __raw_spin_lock_init(&mutex->lock, ((struct litmus_lock*)mutex)->cheat_lockdep, &((struct litmus_lock*)mutex)->key); | ||
1446 | } | ||
1447 | #else | ||
1448 | raw_spin_lock_init(&mutex->lock); | ||
1449 | #endif | ||
1450 | |||
1064 | mutex->litmus_lock.ops = &gsnedf_rsm_mutex_lock_ops; | 1451 | mutex->litmus_lock.ops = &gsnedf_rsm_mutex_lock_ops; |
1452 | |||
1453 | ((struct litmus_lock*)mutex)->hp_waiter_ptr = &mutex->hp_waiter; | ||
1065 | 1454 | ||
1066 | return &mutex->litmus_lock; | 1455 | return &mutex->litmus_lock; |
1067 | } | 1456 | } |
@@ -1223,7 +1612,7 @@ int gsnedf_fmlp_unlock(struct litmus_lock* l) | |||
1223 | sem->owner = NULL; | 1612 | sem->owner = NULL; |
1224 | 1613 | ||
1225 | /* we lose the benefit of priority inheritance (if any) */ | 1614 | /* we lose the benefit of priority inheritance (if any) */ |
1226 | if (tsk_rt(t)->eff_prio) | 1615 | if (tsk_rt(t)->inh_task) |
1227 | decrease_priority_inheritance(t, NULL); | 1616 | decrease_priority_inheritance(t, NULL); |
1228 | 1617 | ||
1229 | out: | 1618 | out: |