aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/litmus/bheap.h11
-rw-r--r--include/litmus/budget.h4
-rw-r--r--litmus/bheap.c20
-rw-r--r--litmus/fifo_lock.c13
-rw-r--r--litmus/sched_cedf.c187
5 files changed, 147 insertions, 88 deletions
diff --git a/include/litmus/bheap.h b/include/litmus/bheap.h
index 4fded5724b28..72dec0cc0240 100644
--- a/include/litmus/bheap.h
+++ b/include/litmus/bheap.h
@@ -24,8 +24,6 @@ struct bheap {
24 * This speeds up repeated peek operations. 24 * This speeds up repeated peek operations.
25 */ 25 */
26 struct bheap_node* min; 26 struct bheap_node* min;
27
28// unsigned int size;
29}; 27};
30 28
31typedef int (*bheap_prio_t)(struct bheap_node* a, struct bheap_node* b); 29typedef int (*bheap_prio_t)(struct bheap_node* a, struct bheap_node* b);
@@ -43,14 +41,9 @@ static inline int bheap_empty(struct bheap* heap)
43 return heap->head == NULL && heap->min == NULL; 41 return heap->head == NULL && heap->min == NULL;
44} 42}
45 43
46//static inline unsigned int bheap_size(struct bheap* heap) 44typedef void (*bheap_for_each_t)(struct bheap_node* node, void* args);
47//{
48// return heap->size;
49//}
50
51typedef void (*bheap_for_all_t)(struct bheap_node* node, void* args);
52 45
53void bheap_for_all(struct bheap* heap, bheap_for_all_t fn, void* args); 46void bheap_for_each(struct bheap* heap, bheap_for_each_t fn, void* args);
54 47
55/* insert (and reinitialize) a node into the heap */ 48/* insert (and reinitialize) a node into the heap */
56void bheap_insert(bheap_prio_t higher_prio, 49void bheap_insert(bheap_prio_t higher_prio,
diff --git a/include/litmus/budget.h b/include/litmus/budget.h
index 08d5e0970d1d..bcdbf3d82f7b 100644
--- a/include/litmus/budget.h
+++ b/include/litmus/budget.h
@@ -80,12 +80,8 @@ void simple_on_exit(struct task_struct* t);
80 * 80 *
81 * Limitation: Quantum budget tracking is unsupported. 81 * Limitation: Quantum budget tracking is unsupported.
82 */ 82 */
83//void sobliv_on_scheduled(struct task_struct* t);
84void sobliv_on_blocked(struct task_struct* t); 83void sobliv_on_blocked(struct task_struct* t);
85void sobliv_on_wakeup(struct task_struct* t); 84void sobliv_on_wakeup(struct task_struct* t);
86//void sobliv_on_sleep(struct task_struct* t);
87//void sobliv_on_preempt(struct task_struct* t);
88/* Use the DRAIN_SIMPLE implementations */
89#define sobliv_on_exit simple_on_exit 85#define sobliv_on_exit simple_on_exit
90void sobliv_on_inherit(struct task_struct* t, struct task_struct* prio_inh); 86void sobliv_on_inherit(struct task_struct* t, struct task_struct* prio_inh);
91void sobliv_on_disinherit(struct task_struct* t, struct task_struct* prio_inh); 87void sobliv_on_disinherit(struct task_struct* t, struct task_struct* prio_inh);
diff --git a/litmus/bheap.c b/litmus/bheap.c
index 403c09cc9e81..c69d75c28aaf 100644
--- a/litmus/bheap.c
+++ b/litmus/bheap.c
@@ -5,8 +5,6 @@ void bheap_init(struct bheap* heap)
5{ 5{
6 heap->head = NULL; 6 heap->head = NULL;
7 heap->min = NULL; 7 heap->min = NULL;
8
9// heap->size = 0;
10} 8}
11 9
12void bheap_node_init(struct bheap_node** _h, void* value) 10void bheap_node_init(struct bheap_node** _h, void* value)
@@ -21,19 +19,19 @@ void bheap_node_init(struct bheap_node** _h, void* value)
21} 19}
22 20
23 21
24static void __bheap_for_all(struct bheap_node *h, bheap_for_all_t fn, void* args) 22static void __bheap_for_each(struct bheap_node *h, bheap_for_each_t fn, void* args)
25{ 23{
26 /* pre-order */ 24 /* pre-order */
27 fn(h, args); 25 fn(h, args);
28 26
29 /* depth-first */ 27 /* depth-first */
30 if (h->child) 28 if (h->child)
31 __bheap_for_all(h->child, fn, args); 29 __bheap_for_each(h->child, fn, args);
32 if (h->next) 30 if (h->next)
33 __bheap_for_all(h->next, fn, args); 31 __bheap_for_each(h->next, fn, args);
34} 32}
35 33
36void bheap_for_all(struct bheap* heap, bheap_for_all_t fn, void* args) 34void bheap_for_each(struct bheap* heap, bheap_for_each_t fn, void* args)
37{ 35{
38 struct bheap_node *head; 36 struct bheap_node *head;
39 37
@@ -41,7 +39,7 @@ void bheap_for_all(struct bheap* heap, bheap_for_all_t fn, void* args)
41 BUG_ON(!fn); 39 BUG_ON(!fn);
42 40
43 head = heap->head; 41 head = heap->head;
44 __bheap_for_all(head, fn, args); 42 __bheap_for_each(head, fn, args);
45} 43}
46 44
47/* make child a subtree of root */ 45/* make child a subtree of root */
@@ -198,8 +196,6 @@ void bheap_insert(bheap_prio_t higher_prio, struct bheap* heap,
198 heap->min = node; 196 heap->min = node;
199 } else 197 } else
200 __bheap_union(higher_prio, heap, node); 198 __bheap_union(higher_prio, heap, node);
201
202// ++heap->size;
203} 199}
204 200
205void bheap_uncache_min(bheap_prio_t higher_prio, struct bheap* heap) 201void bheap_uncache_min(bheap_prio_t higher_prio, struct bheap* heap)
@@ -222,8 +218,6 @@ void bheap_union(bheap_prio_t higher_prio,
222 __bheap_union(higher_prio, target, addition->head); 218 __bheap_union(higher_prio, target, addition->head);
223 /* this is a destructive merge */ 219 /* this is a destructive merge */
224 addition->head = NULL; 220 addition->head = NULL;
225
226// target->size += addition->size;
227} 221}
228 222
229struct bheap_node* bheap_peek(bheap_prio_t higher_prio, 223struct bheap_node* bheap_peek(bheap_prio_t higher_prio,
@@ -245,8 +239,6 @@ struct bheap_node* bheap_take(bheap_prio_t higher_prio,
245 if (node) 239 if (node)
246 node->degree = NOT_IN_HEAP; 240 node->degree = NOT_IN_HEAP;
247 241
248// --heap->size;
249
250 return node; 242 return node;
251} 243}
252 244
@@ -320,8 +312,6 @@ void bheap_delete(bheap_prio_t higher_prio, struct bheap* heap,
320 heap->min = NULL; 312 heap->min = NULL;
321 313
322 node->degree = NOT_IN_HEAP; 314 node->degree = NOT_IN_HEAP;
323
324// --heap->size;
325} 315}
326 316
327/* allocate a heap node for value and insert into the heap */ 317/* allocate a heap node for value and insert into the heap */
diff --git a/litmus/fifo_lock.c b/litmus/fifo_lock.c
index ed637044c948..e3a4420851b2 100644
--- a/litmus/fifo_lock.c
+++ b/litmus/fifo_lock.c
@@ -735,20 +735,11 @@ void fifo_mutex_budget_exhausted(struct litmus_lock* l, struct task_struct* t)
735 struct fifo_mutex *mutex = fifo_mutex_from_lock(l); 735 struct fifo_mutex *mutex = fifo_mutex_from_lock(l);
736 unsigned long flags = 0; 736 unsigned long flags = 0;
737 737
738#ifdef CONFIG_LITMUS_DGL_SUPPORT 738 /* DGL lock must already be held on this code path */
739 unsigned long dglirqflags;
740 raw_spinlock_t *dgl_lock = litmus->get_dgl_spinlock(t);
741 lock_global_irqsave(dgl_lock, dglirqflags);
742#endif
743
744 lock_fine_irqsave(&mutex->lock, flags); 739 lock_fine_irqsave(&mutex->lock, flags);
745 740
746 // unlocks mutex->lock 741 /* unlocks mutex->lock */
747 __fifo_mutex_propagate_decrease_inheritance(&mutex->litmus_lock, t, flags, 1); 742 __fifo_mutex_propagate_decrease_inheritance(&mutex->litmus_lock, t, flags, 1);
748
749#ifdef CONFIG_LITMUS_DGL_SUPPORT
750 unlock_global_irqrestore(dgl_lock, dglirqflags);
751#endif
752} 743}
753 744
754 745
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c
index fc174c464a17..f6fa8a339d48 100644
--- a/litmus/sched_cedf.c
+++ b/litmus/sched_cedf.c
@@ -87,6 +87,11 @@
87#include <litmus/gpu_affinity.h> 87#include <litmus/gpu_affinity.h>
88#endif 88#endif
89 89
90
91/* TODO: Move this to litmus/Kconfig */
92#define RECURSIVE_READY_QUEUE_LOCK
93
94
90/* Reference configuration variable. Determines which cache level is used to 95/* Reference configuration variable. Determines which cache level is used to
91 * group CPUs into clusters. GLOBAL_CLUSTER, which is the default, means that 96 * group CPUs into clusters. GLOBAL_CLUSTER, which is the default, means that
92 * all CPUs form a single cluster (just like GSN-EDF). 97 * all CPUs form a single cluster (just like GSN-EDF).
@@ -133,8 +138,10 @@ typedef struct clusterdomain {
133 cpumask_var_t cpu_map; 138 cpumask_var_t cpu_map;
134 /* the cpus queue themselves according to priority in here */ 139 /* the cpus queue themselves according to priority in here */
135 struct binheap cpu_heap; 140 struct binheap cpu_heap;
136 /* lock for this cluster */ 141#ifdef RECURSIVE_READY_QUEUE_LOCK
137#define cluster_lock domain.ready_lock 142 int recursive_depth;
143 atomic_t owner_cpu;
144#endif
138 145
139#ifdef CONFIG_LITMUS_PAI_SOFTIRQD 146#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
140 struct tasklet_head pending_tasklets; 147 struct tasklet_head pending_tasklets;
@@ -150,6 +157,82 @@ typedef struct clusterdomain {
150 157
151} cedf_domain_t; 158} cedf_domain_t;
152 159
160
161#ifdef RECURSIVE_READY_QUEUE_LOCK
162#define lock_readyq_irqsave(cluster, flags) \
163do { \
164 if (unlikely(irqs_disabled() && atomic_read(&cluster->owner_cpu) == smp_processor_id())) { \
165 local_irq_save(flags); /* useless. makes compiler happy though */ \
166 ++cluster->recursive_depth; \
167 } \
168 else { \
169 raw_spin_lock_irqsave(&cluster->domain.ready_lock, flags); \
170 atomic_set(&cluster->owner_cpu, smp_processor_id()); \
171 BUG_ON(cluster->recursive_depth != 0); \
172 } \
173}while(0)
174
175#define lock_readyq(cluster) \
176do { \
177 if (unlikely(irqs_disabled() && atomic_read(&cluster->owner_cpu) == smp_processor_id())) \
178 ++cluster->recursive_depth; \
179 else { \
180 raw_spin_lock(&cluster->domain.ready_lock); \
181 atomic_set(&cluster->owner_cpu, smp_processor_id()); \
182 BUG_ON(cluster->recursive_depth != 0); \
183 } \
184}while(0)
185
186#define unlock_readyq_irqrestore(cluster, flags) \
187do { \
188 BUG_ON(!raw_spin_is_locked(&cluster->domain.ready_lock)); \
189 BUG_ON(atomic_read(&cluster->owner_cpu) != smp_processor_id()); \
190 if (unlikely(cluster->recursive_depth > 0)) { \
191 --cluster->recursive_depth; \
192 local_irq_restore(flags); /* useless. makes compiler happy though */ \
193 } \
194 else { \
195 atomic_set(&cluster->owner_cpu, NO_CPU); \
196 raw_spin_unlock_irqrestore(&cluster->domain.ready_lock, flags); \
197 } \
198}while(0)
199
200#define unlock_readyq(cluster) \
201do { \
202 BUG_ON(!raw_spin_is_locked(&cluster->domain.ready_lock)); \
203 if (unlikely(cluster->recursive_depth > 0)) { \
204 BUG_ON(atomic_read(&cluster->owner_cpu) != smp_processor_id()); \
205 --cluster->recursive_depth; \
206 } \
207 else { \
208 atomic_set(&cluster->owner_cpu, NO_CPU); \
209 raw_spin_unlock(&cluster->domain.ready_lock); \
210 } \
211}while(0)
212
213#else
214#define lock_readyq_irqsave(cluster, flags) \
215do {\
216 raw_spin_lock_irqsave(&cluster->domain.ready_lock, flags); \
217}while(0)
218
219#define lock_readyq(cluster) \
220do {\
221 raw_spin_lock(&cluster->domain.ready_lock); \
222}while(0)
223
224#define unlock_readyq_irqrestore(cluster, flags) \
225do {\
226 raw_spin_unlock_irqrestore(&cluster->domain.ready_lock, flags); \
227}while(0)
228
229#define unlock_readyq(cluster) \
230do {\
231 raw_spin_unlock(&cluster->domain.ready_lock); \
232}while(0)
233#endif
234
235
153/* a cedf_domain per cluster; allocation is done at init/activation time */ 236/* a cedf_domain per cluster; allocation is done at init/activation time */
154cedf_domain_t *cedf; 237cedf_domain_t *cedf;
155 238
@@ -292,7 +375,7 @@ static void cedf_untrack_in_top_m(struct task_struct *t)
292 &cluster->top_m, 375 &cluster->top_m,
293 struct budget_tracker, top_m_node); 376 struct budget_tracker, top_m_node);
294 bt_flag_set(to_move, BTF_IS_TOP_M); 377 bt_flag_set(to_move, BTF_IS_TOP_M);
295 budget_state_machine(t,on_enter_top_m); 378 budget_state_machine(to_move,on_enter_top_m);
296 } 379 }
297 else { 380 else {
298 --cluster->top_m_size; 381 --cluster->top_m_size;
@@ -561,14 +644,14 @@ static void cedf_release_jobs(rt_domain_t* rt, struct bheap* tasks)
561 cedf_domain_t* cluster = container_of(rt, cedf_domain_t, domain); 644 cedf_domain_t* cluster = container_of(rt, cedf_domain_t, domain);
562 unsigned long flags; 645 unsigned long flags;
563 646
564 raw_spin_lock_irqsave(&cluster->cluster_lock, flags); 647 lock_readyq_irqsave(cluster, flags);
565 648
566 bheap_for_all(tasks, cedf_track_on_release, NULL); 649 bheap_for_each(tasks, cedf_track_on_release, NULL);
567 650
568 __merge_ready(&cluster->domain, tasks); 651 __merge_ready(&cluster->domain, tasks);
569 check_for_preemptions(cluster); 652 check_for_preemptions(cluster);
570 653
571 raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags); 654 unlock_readyq_irqrestore(cluster, flags);
572} 655}
573 656
574/* caller holds cluster_lock */ 657/* caller holds cluster_lock */
@@ -755,7 +838,19 @@ static enum hrtimer_restart cedf_sobliv_on_exhausted(struct task_struct *t)
755 /* force job completion */ 838 /* force job completion */
756 TRACE_TASK(t, "blocked, postponing deadline\n"); 839 TRACE_TASK(t, "blocked, postponing deadline\n");
757 840
758 raw_spin_lock_irqsave(&cluster->cluster_lock, flags); 841 /* Outermost lock of the cluster. Recursive lock calls are
842 * possible on this code path. This should be the _ONLY_
843 * scenario where recursive calls are made. */
844#ifdef CONFIG_LITMUS_DGL_SUPPORT
845 /* Unfortunately, we _might_ need to grab the DGL lock, so we
846 * must grab it every time since it must be take before the
847 * cluster lock. */
848 raw_spin_lock_irqsave(&cluster->dgl_lock, flags);
849 lock_readyq(cluster);
850#else
851 lock_readyq_irqsave(cluster, flags);
852#endif
853
759 job_completion(t, 1); /* refreshes budget and pushes out deadline */ 854 job_completion(t, 1); /* refreshes budget and pushes out deadline */
760 855
761#ifdef CONFIG_LITMUS_LOCKING 856#ifdef CONFIG_LITMUS_LOCKING
@@ -792,7 +887,7 @@ static enum hrtimer_restart cedf_sobliv_on_exhausted(struct task_struct *t)
792 } 887 }
793 } 888 }
794#endif 889#endif
795 raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags); 890// unlock_readyq_irqrestore(cluster, flags);
796 891
797#ifdef CONFIG_LITMUS_LOCKING 892#ifdef CONFIG_LITMUS_LOCKING
798 /* Check our inheritance and propagate any changes forward. */ 893 /* Check our inheritance and propagate any changes forward. */
@@ -810,9 +905,17 @@ static enum hrtimer_restart cedf_sobliv_on_exhausted(struct task_struct *t)
810 905
811#ifdef CONFIG_LITMUS_LOCKING 906#ifdef CONFIG_LITMUS_LOCKING
812 /* double-check that everything is okay */ 907 /* double-check that everything is okay */
813 raw_spin_lock_irqsave(&cluster->cluster_lock, flags); 908// lock_readyq_irqsave(cluster, flags);
814 check_for_preemptions(cluster); 909 check_for_preemptions(cluster);
815 raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags); 910// unlock_readyq_irqrestore(cluster, flags);
911#endif
912
913 /* should be the outermost unlock call */
914#ifdef CONFIG_LITMUS_DGL_SUPPORT
915 unlock_readyq(cluster);
916 raw_spin_unlock_irqrestore(&cluster->dgl_lock, flags);
917#else
918 unlock_readyq_irqrestore(cluster, flags);
816#endif 919#endif
817 920
818 /* we need to set up the budget timer since we're within the callback. */ 921 /* we need to set up the budget timer since we're within the callback. */
@@ -890,7 +993,7 @@ static void do_lit_tasklets(cedf_domain_t* cluster, struct task_struct* sched_ta
890 993
891 TS_NV_SCHED_BOTISR_START; 994 TS_NV_SCHED_BOTISR_START;
892 995
893 raw_spin_lock_irqsave(&cluster->cluster_lock, flags); 996 lock_readyq_irqsave(cluster, flags);
894 997
895 if(cluster->pending_tasklets.head != NULL) { 998 if(cluster->pending_tasklets.head != NULL) {
896 // remove tasklet at head. 999 // remove tasklet at head.
@@ -932,7 +1035,7 @@ static void do_lit_tasklets(cedf_domain_t* cluster, struct task_struct* sched_ta
932 TRACE("%s: Tasklet queue is empty.\n", __FUNCTION__); 1035 TRACE("%s: Tasklet queue is empty.\n", __FUNCTION__);
933 } 1036 }
934 1037
935 raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags); 1038 unlock_readyq_irqrestore(cluster, flags);
936 1039
937 if(tasklet) { 1040 if(tasklet) {
938 __do_lit_tasklet(tasklet, 0ul); 1041 __do_lit_tasklet(tasklet, 0ul);
@@ -1040,7 +1143,7 @@ static int cedf_enqueue_pai_tasklet(struct tasklet_struct* tasklet)
1040 1143
1041 cluster = task_cpu_cluster(tasklet->owner); 1144 cluster = task_cpu_cluster(tasklet->owner);
1042 1145
1043 raw_spin_lock_irqsave(&cluster->cluster_lock, flags); 1146 lock_readyq_irqsave(cluster, flags);
1044 1147
1045 thisCPU = smp_processor_id(); 1148 thisCPU = smp_processor_id();
1046 1149
@@ -1091,7 +1194,7 @@ static int cedf_enqueue_pai_tasklet(struct tasklet_struct* tasklet)
1091 __add_pai_tasklet(tasklet, cluster); 1194 __add_pai_tasklet(tasklet, cluster);
1092 } 1195 }
1093 1196
1094 raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags); 1197 unlock_readyq_irqrestore(cluster, flags);
1095 1198
1096 1199
1097 if (runLocal /*&& runNow */) { // runNow == 1 is implied 1200 if (runLocal /*&& runNow */) { // runNow == 1 is implied
@@ -1128,14 +1231,14 @@ static void cedf_change_prio_pai_tasklet(struct task_struct *old_prio,
1128 cluster = task_cpu_cluster(probe); 1231 cluster = task_cpu_cluster(probe);
1129 1232
1130 if(cluster->pending_tasklets.head != NULL) { 1233 if(cluster->pending_tasklets.head != NULL) {
1131 raw_spin_lock_irqsave(&cluster->cluster_lock, flags); 1234 lock_readyq_irqsave(cluster, flags);
1132 for(step = cluster->pending_tasklets.head; step != NULL; step = step->next) { 1235 for(step = cluster->pending_tasklets.head; step != NULL; step = step->next) {
1133 if(step->owner == old_prio) { 1236 if(step->owner == old_prio) {
1134 TRACE("%s: Found tasklet to change: %d\n", __FUNCTION__, step->owner->pid); 1237 TRACE("%s: Found tasklet to change: %d\n", __FUNCTION__, step->owner->pid);
1135 step->owner = new_prio; 1238 step->owner = new_prio;
1136 } 1239 }
1137 } 1240 }
1138 raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags); 1241 unlock_readyq_irqrestore(cluster, flags);
1139 } 1242 }
1140 } 1243 }
1141 else { 1244 else {
@@ -1184,7 +1287,7 @@ static struct task_struct* cedf_schedule(struct task_struct * prev)
1184 } 1287 }
1185#endif 1288#endif
1186 1289
1187 raw_spin_lock(&cluster->cluster_lock); 1290 lock_readyq(cluster);
1188 clear_will_schedule(); 1291 clear_will_schedule();
1189 1292
1190 /* sanity checking */ 1293 /* sanity checking */
@@ -1307,27 +1410,8 @@ static struct task_struct* cedf_schedule(struct task_struct * prev)
1307out_set_state: 1410out_set_state:
1308#endif 1411#endif
1309 1412
1310//#ifdef CONFIG_LITMUS_LOCKING
1311// /* Update priority inheritance linkbacks.
1312// * A blocked task may have multiple tasks that inherit from it, but only
1313// * one of those tasks should be runnable. Provide a link-back between the
1314// * blocked task and the one that inherits from it. */
1315//
1316// /* TODO: Support klmirqd and aux tasks */
1317// /* TODO: MOVE THESE CALLS TO __increase AND __decrease TO CATCH ALL CASES.
1318// PAY ATTENTION TO RUN-STATE OF INHERITOR & INHERITEE */
1319// if (next != prev) {
1320// if (prev && tsk_rt(prev)->inh_task) {
1321// clear_inh_task_linkback(prev, tsk_rt(prev)->inh_task);
1322// }
1323// if (next && tsk_rt(next)->inh_task) {
1324// set_inh_task_linkback(next, tsk_rt(next)->inh_task);
1325// }
1326// }
1327//#endif
1328
1329 sched_state_task_picked(); 1413 sched_state_task_picked();
1330 raw_spin_unlock(&cluster->cluster_lock); 1414 unlock_readyq(cluster);
1331 1415
1332#ifdef WANT_ALL_SCHED_EVENTS 1416#ifdef WANT_ALL_SCHED_EVENTS
1333 TRACE("cluster_lock released, next=0x%p\n", next); 1417 TRACE("cluster_lock released, next=0x%p\n", next);
@@ -1368,7 +1452,7 @@ static void cedf_task_new(struct task_struct * t, int on_rq, int running)
1368 /* the cluster doesn't change even if t is running */ 1452 /* the cluster doesn't change even if t is running */
1369 cluster = task_cpu_cluster(t); 1453 cluster = task_cpu_cluster(t);
1370 1454
1371 raw_spin_lock_irqsave(&cluster->cluster_lock, flags); 1455 lock_readyq_irqsave(cluster, flags);
1372 1456
1373 /* setup job params */ 1457 /* setup job params */
1374 release_at(t, litmus_clock()); 1458 release_at(t, litmus_clock());
@@ -1400,7 +1484,7 @@ static void cedf_task_new(struct task_struct * t, int on_rq, int running)
1400 cedf_job_arrival(t); 1484 cedf_job_arrival(t);
1401 } 1485 }
1402 1486
1403 raw_spin_unlock_irqrestore(&(cluster->cluster_lock), flags); 1487 unlock_readyq_irqrestore(cluster, flags);
1404} 1488}
1405 1489
1406static void cedf_task_wake_up(struct task_struct *t) 1490static void cedf_task_wake_up(struct task_struct *t)
@@ -1411,7 +1495,7 @@ static void cedf_task_wake_up(struct task_struct *t)
1411 1495
1412 cluster = task_cpu_cluster(t); 1496 cluster = task_cpu_cluster(t);
1413 1497
1414 raw_spin_lock_irqsave(&cluster->cluster_lock, flags); 1498 lock_readyq_irqsave(cluster, flags);
1415 1499
1416 now = litmus_clock(); 1500 now = litmus_clock();
1417 TRACE_TASK(t, "wake_up at %llu\n", now); 1501 TRACE_TASK(t, "wake_up at %llu\n", now);
@@ -1443,7 +1527,7 @@ static void cedf_task_wake_up(struct task_struct *t)
1443 budget_state_machine(t,on_wakeup); 1527 budget_state_machine(t,on_wakeup);
1444 cedf_job_arrival(t); 1528 cedf_job_arrival(t);
1445 1529
1446 raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags); 1530 unlock_readyq_irqrestore(cluster, flags);
1447} 1531}
1448 1532
1449static void cedf_task_block(struct task_struct *t) 1533static void cedf_task_block(struct task_struct *t)
@@ -1456,7 +1540,7 @@ static void cedf_task_block(struct task_struct *t)
1456 cluster = task_cpu_cluster(t); 1540 cluster = task_cpu_cluster(t);
1457 1541
1458 /* unlink if necessary */ 1542 /* unlink if necessary */
1459 raw_spin_lock_irqsave(&cluster->cluster_lock, flags); 1543 lock_readyq_irqsave(cluster, flags);
1460 1544
1461 unlink(t); 1545 unlink(t);
1462 1546
@@ -1476,7 +1560,7 @@ static void cedf_task_block(struct task_struct *t)
1476 } 1560 }
1477#endif 1561#endif
1478 1562
1479 raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags); 1563 unlock_readyq_irqrestore(cluster, flags);
1480 1564
1481 BUG_ON(!is_realtime(t)); 1565 BUG_ON(!is_realtime(t));
1482} 1566}
@@ -1492,7 +1576,7 @@ static void cedf_task_exit(struct task_struct * t)
1492#endif 1576#endif
1493 1577
1494 /* unlink if necessary */ 1578 /* unlink if necessary */
1495 raw_spin_lock_irqsave(&cluster->cluster_lock, flags); 1579 lock_readyq_irqsave(cluster, flags);
1496 1580
1497 if (tsk_rt(t)->inh_task) { 1581 if (tsk_rt(t)->inh_task) {
1498 WARN_ON(1); 1582 WARN_ON(1);
@@ -1528,7 +1612,7 @@ static void cedf_task_exit(struct task_struct * t)
1528 cpu->scheduled = NULL; 1612 cpu->scheduled = NULL;
1529 tsk_rt(t)->scheduled_on = NO_CPU; 1613 tsk_rt(t)->scheduled_on = NO_CPU;
1530 } 1614 }
1531 raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags); 1615 unlock_readyq_irqrestore(cluster, flags);
1532 1616
1533 BUG_ON(!is_realtime(t)); 1617 BUG_ON(!is_realtime(t));
1534 TRACE_TASK(t, "RIP\n"); 1618 TRACE_TASK(t, "RIP\n");
@@ -1792,13 +1876,13 @@ static void increase_priority_inheritance(struct task_struct* t, struct task_str
1792{ 1876{
1793 cedf_domain_t* cluster = task_cpu_cluster(t); 1877 cedf_domain_t* cluster = task_cpu_cluster(t);
1794 1878
1795 raw_spin_lock(&cluster->cluster_lock); 1879 lock_readyq(cluster);
1796 1880
1797 TRACE_TASK(t, "to inherit from %s/%d\n", prio_inh->comm, prio_inh->pid); 1881 TRACE_TASK(t, "to inherit from %s/%d\n", prio_inh->comm, prio_inh->pid);
1798 1882
1799 __increase_priority_inheritance(t, prio_inh); 1883 __increase_priority_inheritance(t, prio_inh);
1800 1884
1801 raw_spin_unlock(&cluster->cluster_lock); 1885 unlock_readyq(cluster);
1802 1886
1803#if defined(CONFIG_LITMUS_PAI_SOFTIRQD) && defined(CONFIG_LITMUS_NVIDIA) 1887#if defined(CONFIG_LITMUS_PAI_SOFTIRQD) && defined(CONFIG_LITMUS_NVIDIA)
1804 if(tsk_rt(t)->held_gpus) { 1888 if(tsk_rt(t)->held_gpus) {
@@ -1962,7 +2046,7 @@ static void decrease_priority_inheritance(struct task_struct* t,
1962{ 2046{
1963 cedf_domain_t* cluster = task_cpu_cluster(t); 2047 cedf_domain_t* cluster = task_cpu_cluster(t);
1964 2048
1965 raw_spin_lock(&cluster->cluster_lock); 2049 lock_readyq(cluster);
1966 2050
1967 TRACE_TASK(t, "to inherit from %s/%d (decrease)\n", 2051 TRACE_TASK(t, "to inherit from %s/%d (decrease)\n",
1968 (prio_inh) ? prio_inh->comm : "null", 2052 (prio_inh) ? prio_inh->comm : "null",
@@ -1970,7 +2054,7 @@ static void decrease_priority_inheritance(struct task_struct* t,
1970 2054
1971 __decrease_priority_inheritance(t, prio_inh, budget_tiggered); 2055 __decrease_priority_inheritance(t, prio_inh, budget_tiggered);
1972 2056
1973 raw_spin_unlock(&cluster->cluster_lock); 2057 unlock_readyq(cluster);
1974 2058
1975#if defined(CONFIG_LITMUS_PAI_SOFTIRQD) && defined(CONFIG_LITMUS_NVIDIA) 2059#if defined(CONFIG_LITMUS_PAI_SOFTIRQD) && defined(CONFIG_LITMUS_NVIDIA)
1976 if(tsk_rt(t)->held_gpus) { 2060 if(tsk_rt(t)->held_gpus) {
@@ -2438,6 +2522,11 @@ static long cedf_activate_plugin(void)
2438 raw_spin_lock_init(&cedf[i].dgl_lock); 2522 raw_spin_lock_init(&cedf[i].dgl_lock);
2439#endif 2523#endif
2440 2524
2525#ifdef RECURSIVE_READY_QUEUE_LOCK
2526 cedf[i].recursive_depth = 0;
2527 atomic_set(&cedf[i].owner_cpu, NO_CPU);
2528#endif
2529
2441 cedf[i].top_m_size = 0; 2530 cedf[i].top_m_size = 0;
2442 INIT_BINHEAP_HANDLE(&cedf[i].top_m, cedf_min_heap_base_priority_order); 2531 INIT_BINHEAP_HANDLE(&cedf[i].top_m, cedf_min_heap_base_priority_order);
2443 INIT_BINHEAP_HANDLE(&cedf[i].not_top_m, cedf_max_heap_base_priority_order); 2532 INIT_BINHEAP_HANDLE(&cedf[i].not_top_m, cedf_max_heap_base_priority_order);