aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2013-04-05 18:01:21 -0400
committerGlenn Elliott <gelliott@cs.unc.edu>2013-04-05 18:01:21 -0400
commitbf7f892f22a6a6804f09168256226cc6c2bc230c (patch)
tree673a806a5a632a0016eb1ef78be01437a49a2a5c
parentc4954ee90811cde64e11cc71bd01404871126206 (diff)
resolve rebase issues
-rw-r--r--litmus/sched_cedf.c153
-rw-r--r--litmus/sched_gsn_edf.c44
-rw-r--r--litmus/sched_pfp.c40
-rw-r--r--litmus/sched_psn_edf.c24
4 files changed, 83 insertions, 178 deletions
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c
index f6fa8a339d48..2837f12953ea 100644
--- a/litmus/sched_cedf.c
+++ b/litmus/sched_cedf.c
@@ -87,11 +87,6 @@
87#include <litmus/gpu_affinity.h> 87#include <litmus/gpu_affinity.h>
88#endif 88#endif
89 89
90
91/* TODO: Move this to litmus/Kconfig */
92#define RECURSIVE_READY_QUEUE_LOCK
93
94
95/* Reference configuration variable. Determines which cache level is used to 90/* Reference configuration variable. Determines which cache level is used to
96 * group CPUs into clusters. GLOBAL_CLUSTER, which is the default, means that 91 * group CPUs into clusters. GLOBAL_CLUSTER, which is the default, means that
97 * all CPUs form a single cluster (just like GSN-EDF). 92 * all CPUs form a single cluster (just like GSN-EDF).
@@ -138,10 +133,8 @@ typedef struct clusterdomain {
138 cpumask_var_t cpu_map; 133 cpumask_var_t cpu_map;
139 /* the cpus queue themselves according to priority in here */ 134 /* the cpus queue themselves according to priority in here */
140 struct binheap cpu_heap; 135 struct binheap cpu_heap;
141#ifdef RECURSIVE_READY_QUEUE_LOCK 136
142 int recursive_depth; 137#define cluster_lock domain.ready_lock
143 atomic_t owner_cpu;
144#endif
145 138
146#ifdef CONFIG_LITMUS_PAI_SOFTIRQD 139#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
147 struct tasklet_head pending_tasklets; 140 struct tasklet_head pending_tasklets;
@@ -158,81 +151,6 @@ typedef struct clusterdomain {
158} cedf_domain_t; 151} cedf_domain_t;
159 152
160 153
161#ifdef RECURSIVE_READY_QUEUE_LOCK
162#define lock_readyq_irqsave(cluster, flags) \
163do { \
164 if (unlikely(irqs_disabled() && atomic_read(&cluster->owner_cpu) == smp_processor_id())) { \
165 local_irq_save(flags); /* useless. makes compiler happy though */ \
166 ++cluster->recursive_depth; \
167 } \
168 else { \
169 raw_spin_lock_irqsave(&cluster->domain.ready_lock, flags); \
170 atomic_set(&cluster->owner_cpu, smp_processor_id()); \
171 BUG_ON(cluster->recursive_depth != 0); \
172 } \
173}while(0)
174
175#define lock_readyq(cluster) \
176do { \
177 if (unlikely(irqs_disabled() && atomic_read(&cluster->owner_cpu) == smp_processor_id())) \
178 ++cluster->recursive_depth; \
179 else { \
180 raw_spin_lock(&cluster->domain.ready_lock); \
181 atomic_set(&cluster->owner_cpu, smp_processor_id()); \
182 BUG_ON(cluster->recursive_depth != 0); \
183 } \
184}while(0)
185
186#define unlock_readyq_irqrestore(cluster, flags) \
187do { \
188 BUG_ON(!raw_spin_is_locked(&cluster->domain.ready_lock)); \
189 BUG_ON(atomic_read(&cluster->owner_cpu) != smp_processor_id()); \
190 if (unlikely(cluster->recursive_depth > 0)) { \
191 --cluster->recursive_depth; \
192 local_irq_restore(flags); /* useless. makes compiler happy though */ \
193 } \
194 else { \
195 atomic_set(&cluster->owner_cpu, NO_CPU); \
196 raw_spin_unlock_irqrestore(&cluster->domain.ready_lock, flags); \
197 } \
198}while(0)
199
200#define unlock_readyq(cluster) \
201do { \
202 BUG_ON(!raw_spin_is_locked(&cluster->domain.ready_lock)); \
203 if (unlikely(cluster->recursive_depth > 0)) { \
204 BUG_ON(atomic_read(&cluster->owner_cpu) != smp_processor_id()); \
205 --cluster->recursive_depth; \
206 } \
207 else { \
208 atomic_set(&cluster->owner_cpu, NO_CPU); \
209 raw_spin_unlock(&cluster->domain.ready_lock); \
210 } \
211}while(0)
212
213#else
214#define lock_readyq_irqsave(cluster, flags) \
215do {\
216 raw_spin_lock_irqsave(&cluster->domain.ready_lock, flags); \
217}while(0)
218
219#define lock_readyq(cluster) \
220do {\
221 raw_spin_lock(&cluster->domain.ready_lock); \
222}while(0)
223
224#define unlock_readyq_irqrestore(cluster, flags) \
225do {\
226 raw_spin_unlock_irqrestore(&cluster->domain.ready_lock, flags); \
227}while(0)
228
229#define unlock_readyq(cluster) \
230do {\
231 raw_spin_unlock(&cluster->domain.ready_lock); \
232}while(0)
233#endif
234
235
236/* a cedf_domain per cluster; allocation is done at init/activation time */ 154/* a cedf_domain per cluster; allocation is done at init/activation time */
237cedf_domain_t *cedf; 155cedf_domain_t *cedf;
238 156
@@ -644,14 +562,14 @@ static void cedf_release_jobs(rt_domain_t* rt, struct bheap* tasks)
644 cedf_domain_t* cluster = container_of(rt, cedf_domain_t, domain); 562 cedf_domain_t* cluster = container_of(rt, cedf_domain_t, domain);
645 unsigned long flags; 563 unsigned long flags;
646 564
647 lock_readyq_irqsave(cluster, flags); 565 raw_readyq_lock_irqsave(&cluster->cluster_lock, flags);
648 566
649 bheap_for_each(tasks, cedf_track_on_release, NULL); 567 bheap_for_each(tasks, cedf_track_on_release, NULL);
650 568
651 __merge_ready(&cluster->domain, tasks); 569 __merge_ready(&cluster->domain, tasks);
652 check_for_preemptions(cluster); 570 check_for_preemptions(cluster);
653 571
654 unlock_readyq_irqrestore(cluster, flags); 572 raw_readyq_unlock_irqrestore(&cluster->cluster_lock, flags);
655} 573}
656 574
657/* caller holds cluster_lock */ 575/* caller holds cluster_lock */
@@ -803,7 +721,6 @@ static enum hrtimer_restart cedf_sobliv_on_exhausted(struct task_struct *t)
803 if (budget_enforced(t) && !bt_flag_is_set(t, BTF_BUDGET_EXHAUSTED)) { 721 if (budget_enforced(t) && !bt_flag_is_set(t, BTF_BUDGET_EXHAUSTED)) {
804 if (is_np(t) && is_user_np(t)) { 722 if (is_np(t) && is_user_np(t)) {
805 TRACE_TASK(t, "is non-preemptable, preemption delayed.\n"); 723 TRACE_TASK(t, "is non-preemptable, preemption delayed.\n");
806
807 bt_flag_set(t, BTF_BUDGET_EXHAUSTED); 724 bt_flag_set(t, BTF_BUDGET_EXHAUSTED);
808 request_exit_np(t); 725 request_exit_np(t);
809 } 726 }
@@ -846,9 +763,9 @@ static enum hrtimer_restart cedf_sobliv_on_exhausted(struct task_struct *t)
846 * must grab it every time since it must be take before the 763 * must grab it every time since it must be take before the
847 * cluster lock. */ 764 * cluster lock. */
848 raw_spin_lock_irqsave(&cluster->dgl_lock, flags); 765 raw_spin_lock_irqsave(&cluster->dgl_lock, flags);
849 lock_readyq(cluster); 766 raw_readyq_lock(&cluster->cluster_lock);
850#else 767#else
851 lock_readyq_irqsave(cluster, flags); 768 raw_readyq_lock_irqsave(&cluster->cluster_lock, flags);
852#endif 769#endif
853 770
854 job_completion(t, 1); /* refreshes budget and pushes out deadline */ 771 job_completion(t, 1); /* refreshes budget and pushes out deadline */
@@ -887,7 +804,6 @@ static enum hrtimer_restart cedf_sobliv_on_exhausted(struct task_struct *t)
887 } 804 }
888 } 805 }
889#endif 806#endif
890// unlock_readyq_irqrestore(cluster, flags);
891 807
892#ifdef CONFIG_LITMUS_LOCKING 808#ifdef CONFIG_LITMUS_LOCKING
893 /* Check our inheritance and propagate any changes forward. */ 809 /* Check our inheritance and propagate any changes forward. */
@@ -905,17 +821,15 @@ static enum hrtimer_restart cedf_sobliv_on_exhausted(struct task_struct *t)
905 821
906#ifdef CONFIG_LITMUS_LOCKING 822#ifdef CONFIG_LITMUS_LOCKING
907 /* double-check that everything is okay */ 823 /* double-check that everything is okay */
908// lock_readyq_irqsave(cluster, flags);
909 check_for_preemptions(cluster); 824 check_for_preemptions(cluster);
910// unlock_readyq_irqrestore(cluster, flags);
911#endif 825#endif
912 826
913 /* should be the outermost unlock call */ 827 /* should be the outermost unlock call */
914#ifdef CONFIG_LITMUS_DGL_SUPPORT 828#ifdef CONFIG_LITMUS_DGL_SUPPORT
915 unlock_readyq(cluster); 829 raw_readyq_unlock(&cluster->cluster_lock);
916 raw_spin_unlock_irqrestore(&cluster->dgl_lock, flags); 830 raw_spin_unlock_irqrestore(&cluster->dgl_lock, flags);
917#else 831#else
918 unlock_readyq_irqrestore(cluster, flags); 832 raw_readyq_unlock_irqrestore(&cluster->cluster_lock, flags);
919#endif 833#endif
920 834
921 /* we need to set up the budget timer since we're within the callback. */ 835 /* we need to set up the budget timer since we're within the callback. */
@@ -993,7 +907,7 @@ static void do_lit_tasklets(cedf_domain_t* cluster, struct task_struct* sched_ta
993 907
994 TS_NV_SCHED_BOTISR_START; 908 TS_NV_SCHED_BOTISR_START;
995 909
996 lock_readyq_irqsave(cluster, flags); 910 raw_readyq_lock_irqsave(&cluster->cluster_lock, flags);
997 911
998 if(cluster->pending_tasklets.head != NULL) { 912 if(cluster->pending_tasklets.head != NULL) {
999 // remove tasklet at head. 913 // remove tasklet at head.
@@ -1035,7 +949,7 @@ static void do_lit_tasklets(cedf_domain_t* cluster, struct task_struct* sched_ta
1035 TRACE("%s: Tasklet queue is empty.\n", __FUNCTION__); 949 TRACE("%s: Tasklet queue is empty.\n", __FUNCTION__);
1036 } 950 }
1037 951
1038 unlock_readyq_irqrestore(cluster, flags); 952 raw_readyq_unlock_irqrestore(&cluster->cluster_lock, flags);
1039 953
1040 if(tasklet) { 954 if(tasklet) {
1041 __do_lit_tasklet(tasklet, 0ul); 955 __do_lit_tasklet(tasklet, 0ul);
@@ -1143,7 +1057,7 @@ static int cedf_enqueue_pai_tasklet(struct tasklet_struct* tasklet)
1143 1057
1144 cluster = task_cpu_cluster(tasklet->owner); 1058 cluster = task_cpu_cluster(tasklet->owner);
1145 1059
1146 lock_readyq_irqsave(cluster, flags); 1060 raw_readyq_lock_irqsave(&cluster->cluster_lock, flags);
1147 1061
1148 thisCPU = smp_processor_id(); 1062 thisCPU = smp_processor_id();
1149 1063
@@ -1194,7 +1108,7 @@ static int cedf_enqueue_pai_tasklet(struct tasklet_struct* tasklet)
1194 __add_pai_tasklet(tasklet, cluster); 1108 __add_pai_tasklet(tasklet, cluster);
1195 } 1109 }
1196 1110
1197 unlock_readyq_irqrestore(cluster, flags); 1111 raw_readyq_unlock_irqrestore(&cluster->cluster_lock, flags);
1198 1112
1199 1113
1200 if (runLocal /*&& runNow */) { // runNow == 1 is implied 1114 if (runLocal /*&& runNow */) { // runNow == 1 is implied
@@ -1231,14 +1145,14 @@ static void cedf_change_prio_pai_tasklet(struct task_struct *old_prio,
1231 cluster = task_cpu_cluster(probe); 1145 cluster = task_cpu_cluster(probe);
1232 1146
1233 if(cluster->pending_tasklets.head != NULL) { 1147 if(cluster->pending_tasklets.head != NULL) {
1234 lock_readyq_irqsave(cluster, flags); 1148 raw_readyq_lock_irqsave(&cluster->cluster_lock, flags);
1235 for(step = cluster->pending_tasklets.head; step != NULL; step = step->next) { 1149 for(step = cluster->pending_tasklets.head; step != NULL; step = step->next) {
1236 if(step->owner == old_prio) { 1150 if(step->owner == old_prio) {
1237 TRACE("%s: Found tasklet to change: %d\n", __FUNCTION__, step->owner->pid); 1151 TRACE("%s: Found tasklet to change: %d\n", __FUNCTION__, step->owner->pid);
1238 step->owner = new_prio; 1152 step->owner = new_prio;
1239 } 1153 }
1240 } 1154 }
1241 unlock_readyq_irqrestore(cluster, flags); 1155 raw_readyq_unlock_irqrestore(&cluster->cluster_lock, flags);
1242 } 1156 }
1243 } 1157 }
1244 else { 1158 else {
@@ -1287,7 +1201,7 @@ static struct task_struct* cedf_schedule(struct task_struct * prev)
1287 } 1201 }
1288#endif 1202#endif
1289 1203
1290 lock_readyq(cluster); 1204 raw_readyq_lock(&cluster->cluster_lock);
1291 clear_will_schedule(); 1205 clear_will_schedule();
1292 1206
1293 /* sanity checking */ 1207 /* sanity checking */
@@ -1411,7 +1325,7 @@ out_set_state:
1411#endif 1325#endif
1412 1326
1413 sched_state_task_picked(); 1327 sched_state_task_picked();
1414 unlock_readyq(cluster); 1328 raw_readyq_unlock(&cluster->cluster_lock);
1415 1329
1416#ifdef WANT_ALL_SCHED_EVENTS 1330#ifdef WANT_ALL_SCHED_EVENTS
1417 TRACE("cluster_lock released, next=0x%p\n", next); 1331 TRACE("cluster_lock released, next=0x%p\n", next);
@@ -1452,7 +1366,7 @@ static void cedf_task_new(struct task_struct * t, int on_rq, int running)
1452 /* the cluster doesn't change even if t is running */ 1366 /* the cluster doesn't change even if t is running */
1453 cluster = task_cpu_cluster(t); 1367 cluster = task_cpu_cluster(t);
1454 1368
1455 lock_readyq_irqsave(cluster, flags); 1369 raw_readyq_lock_irqsave(&cluster->cluster_lock, flags);
1456 1370
1457 /* setup job params */ 1371 /* setup job params */
1458 release_at(t, litmus_clock()); 1372 release_at(t, litmus_clock());
@@ -1484,7 +1398,7 @@ static void cedf_task_new(struct task_struct * t, int on_rq, int running)
1484 cedf_job_arrival(t); 1398 cedf_job_arrival(t);
1485 } 1399 }
1486 1400
1487 unlock_readyq_irqrestore(cluster, flags); 1401 raw_readyq_unlock_irqrestore(&cluster->cluster_lock, flags);
1488} 1402}
1489 1403
1490static void cedf_task_wake_up(struct task_struct *t) 1404static void cedf_task_wake_up(struct task_struct *t)
@@ -1495,7 +1409,7 @@ static void cedf_task_wake_up(struct task_struct *t)
1495 1409
1496 cluster = task_cpu_cluster(t); 1410 cluster = task_cpu_cluster(t);
1497 1411
1498 lock_readyq_irqsave(cluster, flags); 1412 raw_readyq_lock_irqsave(&cluster->cluster_lock, flags);
1499 1413
1500 now = litmus_clock(); 1414 now = litmus_clock();
1501 TRACE_TASK(t, "wake_up at %llu\n", now); 1415 TRACE_TASK(t, "wake_up at %llu\n", now);
@@ -1527,7 +1441,7 @@ static void cedf_task_wake_up(struct task_struct *t)
1527 budget_state_machine(t,on_wakeup); 1441 budget_state_machine(t,on_wakeup);
1528 cedf_job_arrival(t); 1442 cedf_job_arrival(t);
1529 1443
1530 unlock_readyq_irqrestore(cluster, flags); 1444 raw_readyq_unlock_irqrestore(&cluster->cluster_lock, flags);
1531} 1445}
1532 1446
1533static void cedf_task_block(struct task_struct *t) 1447static void cedf_task_block(struct task_struct *t)
@@ -1540,7 +1454,7 @@ static void cedf_task_block(struct task_struct *t)
1540 cluster = task_cpu_cluster(t); 1454 cluster = task_cpu_cluster(t);
1541 1455
1542 /* unlink if necessary */ 1456 /* unlink if necessary */
1543 lock_readyq_irqsave(cluster, flags); 1457 raw_readyq_lock_irqsave(&cluster->cluster_lock, flags);
1544 1458
1545 unlink(t); 1459 unlink(t);
1546 1460
@@ -1560,7 +1474,7 @@ static void cedf_task_block(struct task_struct *t)
1560 } 1474 }
1561#endif 1475#endif
1562 1476
1563 unlock_readyq_irqrestore(cluster, flags); 1477 raw_readyq_unlock_irqrestore(&cluster->cluster_lock, flags);
1564 1478
1565 BUG_ON(!is_realtime(t)); 1479 BUG_ON(!is_realtime(t));
1566} 1480}
@@ -1576,7 +1490,7 @@ static void cedf_task_exit(struct task_struct * t)
1576#endif 1490#endif
1577 1491
1578 /* unlink if necessary */ 1492 /* unlink if necessary */
1579 lock_readyq_irqsave(cluster, flags); 1493 raw_readyq_lock_irqsave(&cluster->cluster_lock, flags);
1580 1494
1581 if (tsk_rt(t)->inh_task) { 1495 if (tsk_rt(t)->inh_task) {
1582 WARN_ON(1); 1496 WARN_ON(1);
@@ -1612,7 +1526,7 @@ static void cedf_task_exit(struct task_struct * t)
1612 cpu->scheduled = NULL; 1526 cpu->scheduled = NULL;
1613 tsk_rt(t)->scheduled_on = NO_CPU; 1527 tsk_rt(t)->scheduled_on = NO_CPU;
1614 } 1528 }
1615 unlock_readyq_irqrestore(cluster, flags); 1529 raw_readyq_unlock_irqrestore(&cluster->cluster_lock, flags);
1616 1530
1617 BUG_ON(!is_realtime(t)); 1531 BUG_ON(!is_realtime(t));
1618 TRACE_TASK(t, "RIP\n"); 1532 TRACE_TASK(t, "RIP\n");
@@ -1702,8 +1616,6 @@ static long cedf_admit_task(struct task_struct* tsk)
1702 1616
1703#include <litmus/fdso.h> 1617#include <litmus/fdso.h>
1704 1618
1705
1706
1707/* called with IRQs off */ 1619/* called with IRQs off */
1708static int __increase_priority_inheritance(struct task_struct* t, 1620static int __increase_priority_inheritance(struct task_struct* t,
1709 struct task_struct* prio_inh) 1621 struct task_struct* prio_inh)
@@ -1876,13 +1788,13 @@ static void increase_priority_inheritance(struct task_struct* t, struct task_str
1876{ 1788{
1877 cedf_domain_t* cluster = task_cpu_cluster(t); 1789 cedf_domain_t* cluster = task_cpu_cluster(t);
1878 1790
1879 lock_readyq(cluster); 1791 raw_readyq_lock(&cluster->cluster_lock);
1880 1792
1881 TRACE_TASK(t, "to inherit from %s/%d\n", prio_inh->comm, prio_inh->pid); 1793 TRACE_TASK(t, "to inherit from %s/%d\n", prio_inh->comm, prio_inh->pid);
1882 1794
1883 __increase_priority_inheritance(t, prio_inh); 1795 __increase_priority_inheritance(t, prio_inh);
1884 1796
1885 unlock_readyq(cluster); 1797 raw_readyq_unlock(&cluster->cluster_lock);
1886 1798
1887#if defined(CONFIG_LITMUS_PAI_SOFTIRQD) && defined(CONFIG_LITMUS_NVIDIA) 1799#if defined(CONFIG_LITMUS_PAI_SOFTIRQD) && defined(CONFIG_LITMUS_NVIDIA)
1888 if(tsk_rt(t)->held_gpus) { 1800 if(tsk_rt(t)->held_gpus) {
@@ -2046,7 +1958,7 @@ static void decrease_priority_inheritance(struct task_struct* t,
2046{ 1958{
2047 cedf_domain_t* cluster = task_cpu_cluster(t); 1959 cedf_domain_t* cluster = task_cpu_cluster(t);
2048 1960
2049 lock_readyq(cluster); 1961 raw_readyq_lock(&cluster->cluster_lock);
2050 1962
2051 TRACE_TASK(t, "to inherit from %s/%d (decrease)\n", 1963 TRACE_TASK(t, "to inherit from %s/%d (decrease)\n",
2052 (prio_inh) ? prio_inh->comm : "null", 1964 (prio_inh) ? prio_inh->comm : "null",
@@ -2054,7 +1966,7 @@ static void decrease_priority_inheritance(struct task_struct* t,
2054 1966
2055 __decrease_priority_inheritance(t, prio_inh, budget_tiggered); 1967 __decrease_priority_inheritance(t, prio_inh, budget_tiggered);
2056 1968
2057 unlock_readyq(cluster); 1969 raw_readyq_unlock(&cluster->cluster_lock);
2058 1970
2059#if defined(CONFIG_LITMUS_PAI_SOFTIRQD) && defined(CONFIG_LITMUS_NVIDIA) 1971#if defined(CONFIG_LITMUS_PAI_SOFTIRQD) && defined(CONFIG_LITMUS_NVIDIA)
2060 if(tsk_rt(t)->held_gpus) { 1972 if(tsk_rt(t)->held_gpus) {
@@ -2093,7 +2005,6 @@ static void nested_increase_priority_inheritance(struct task_struct* t,
2093 2005
2094 raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); // unlock the t's heap. 2006 raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); // unlock the t's heap.
2095 2007
2096
2097 if(blocked_lock) { 2008 if(blocked_lock) {
2098 if(blocked_lock->ops->supports_nesting) { 2009 if(blocked_lock->ops->supports_nesting) {
2099 TRACE_TASK(t, "Inheritor is blocked (...perhaps). Checking lock %d.\n", 2010 TRACE_TASK(t, "Inheritor is blocked (...perhaps). Checking lock %d.\n",
@@ -2136,7 +2047,6 @@ static void nested_decrease_priority_inheritance(struct task_struct* t,
2136 if(blocked_lock->ops->supports_nesting) { 2047 if(blocked_lock->ops->supports_nesting) {
2137 TRACE_TASK(t, "Inheritor is blocked (...perhaps). Checking lock %d.\n", 2048 TRACE_TASK(t, "Inheritor is blocked (...perhaps). Checking lock %d.\n",
2138 blocked_lock->ident); 2049 blocked_lock->ident);
2139
2140 // beware: recursion 2050 // beware: recursion
2141 blocked_lock->ops->propagate_decrease_inheritance(blocked_lock, t, 2051 blocked_lock->ops->propagate_decrease_inheritance(blocked_lock, t,
2142 to_unlock, 2052 to_unlock,
@@ -2254,10 +2164,6 @@ static struct litmus_lock* cedf_new_ikglp(void* __user arg)
2254 return ikglp_new(cluster_size, &cedf_ikglp_lock_ops, arg); 2164 return ikglp_new(cluster_size, &cedf_ikglp_lock_ops, arg);
2255} 2165}
2256 2166
2257#endif /* CONFIG_LITMUS_NESTED_LOCKING */
2258
2259
2260
2261 2167
2262/* ******************** KFMLP support ********************** */ 2168/* ******************** KFMLP support ********************** */
2263 2169
@@ -2382,8 +2288,7 @@ UNSUPPORTED_AFF_OBS:
2382 return err; 2288 return err;
2383} 2289}
2384#endif 2290#endif
2385 2291#endif // CONFIG_LITMUS_AFFINITY_LOCKING
2386
2387 2292
2388 2293
2389#ifdef VERBOSE_INIT 2294#ifdef VERBOSE_INIT
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c
index ab97d59c9587..3084f377a5f1 100644
--- a/litmus/sched_gsn_edf.c
+++ b/litmus/sched_gsn_edf.c
@@ -389,13 +389,13 @@ static void gsnedf_release_jobs(rt_domain_t* rt, struct bheap* tasks)
389{ 389{
390 unsigned long flags; 390 unsigned long flags;
391 391
392 raw_spin_lock_irqsave(&gsnedf_lock, flags); 392 raw_readyq_lock_irqsave(&gsnedf_lock, flags);
393 393
394 __merge_ready(rt, tasks); 394 __merge_ready(rt, tasks);
395 395
396 check_for_preemptions(); 396 check_for_preemptions();
397 397
398 raw_spin_unlock_irqrestore(&gsnedf_lock, flags); 398 raw_readyq_unlock_irqrestore(&gsnedf_lock, flags);
399} 399}
400 400
401/* caller holds gsnedf_lock */ 401/* caller holds gsnedf_lock */
@@ -509,7 +509,7 @@ static void do_lit_tasklets(struct task_struct* sched_task)
509 TS_NV_SCHED_BOTISR_START; 509 TS_NV_SCHED_BOTISR_START;
510 510
511 // execute one tasklet that has higher priority 511 // execute one tasklet that has higher priority
512 raw_spin_lock_irqsave(&gsnedf_lock, flags); 512 raw_readyq_lock_irqsave(&gsnedf_lock, flags);
513 513
514 if(gsnedf_pending_tasklets.head != NULL) { 514 if(gsnedf_pending_tasklets.head != NULL) {
515 struct tasklet_struct *prev = NULL; 515 struct tasklet_struct *prev = NULL;
@@ -540,7 +540,7 @@ static void do_lit_tasklets(struct task_struct* sched_task)
540 TRACE("%s: Tasklet queue is empty.\n", __FUNCTION__); 540 TRACE("%s: Tasklet queue is empty.\n", __FUNCTION__);
541 } 541 }
542 542
543 raw_spin_unlock_irqrestore(&gsnedf_lock, flags); 543 raw_readyq_unlock_irqrestore(&gsnedf_lock, flags);
544 544
545 if(tasklet) { 545 if(tasklet) {
546 __do_lit_tasklet(tasklet, 0ul); 546 __do_lit_tasklet(tasklet, 0ul);
@@ -683,7 +683,7 @@ static int gsnedf_enqueue_pai_tasklet(struct tasklet_struct* tasklet)
683 } 683 }
684 684
685 685
686 raw_spin_lock_irqsave(&gsnedf_lock, flags); 686 raw_readyq_lock_irqsave(&gsnedf_lock, flags);
687 687
688 thisCPU = smp_processor_id(); 688 thisCPU = smp_processor_id();
689 689
@@ -738,7 +738,7 @@ static int gsnedf_enqueue_pai_tasklet(struct tasklet_struct* tasklet)
738 __add_pai_tasklet(tasklet); 738 __add_pai_tasklet(tasklet);
739 } 739 }
740 740
741 raw_spin_unlock_irqrestore(&gsnedf_lock, flags); 741 raw_readyq_unlock_irqrestore(&gsnedf_lock, flags);
742 742
743 743
744 if (runLocal /*&& runNow */) { // runNow == 1 is implied 744 if (runLocal /*&& runNow */) { // runNow == 1 is implied
@@ -763,14 +763,14 @@ static void gsnedf_change_prio_pai_tasklet(struct task_struct *old_prio,
763 unsigned long flags; 763 unsigned long flags;
764 764
765 if(gsnedf_pending_tasklets.head != NULL) { 765 if(gsnedf_pending_tasklets.head != NULL) {
766 raw_spin_lock_irqsave(&gsnedf_lock, flags); 766 raw_readyq_lock_irqsave(&gsnedf_lock, flags);
767 for(step = gsnedf_pending_tasklets.head; step != NULL; step = step->next) { 767 for(step = gsnedf_pending_tasklets.head; step != NULL; step = step->next) {
768 if(step->owner == old_prio) { 768 if(step->owner == old_prio) {
769 TRACE("%s: Found tasklet to change: %d\n", __FUNCTION__, step->owner->pid); 769 TRACE("%s: Found tasklet to change: %d\n", __FUNCTION__, step->owner->pid);
770 step->owner = new_prio; 770 step->owner = new_prio;
771 } 771 }
772 } 772 }
773 raw_spin_unlock_irqrestore(&gsnedf_lock, flags); 773 raw_readyq_unlock_irqrestore(&gsnedf_lock, flags);
774 } 774 }
775} 775}
776 776
@@ -814,7 +814,7 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev)
814 } 814 }
815#endif 815#endif
816 816
817 raw_spin_lock(&gsnedf_lock); 817 raw_readyq_lock(&gsnedf_lock);
818 818
819 /* sanity checking */ 819 /* sanity checking */
820 BUG_ON(entry->scheduled && entry->scheduled != prev); 820 BUG_ON(entry->scheduled && entry->scheduled != prev);
@@ -930,7 +930,7 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev)
930 930
931 sched_state_task_picked(); 931 sched_state_task_picked();
932 932
933 raw_spin_unlock(&gsnedf_lock); 933 raw_readyq_unlock(&gsnedf_lock);
934 934
935#ifdef WANT_ALL_SCHED_EVENTS 935#ifdef WANT_ALL_SCHED_EVENTS
936 TRACE("gsnedf_lock released, next=0x%p\n", next); 936 TRACE("gsnedf_lock released, next=0x%p\n", next);
@@ -968,7 +968,7 @@ static void gsnedf_task_new(struct task_struct * t, int on_rq, int running)
968 968
969 TRACE("gsn edf: task new = %d on_rq = %d running = %d\n", t->pid, on_rq, running); 969 TRACE("gsn edf: task new = %d on_rq = %d running = %d\n", t->pid, on_rq, running);
970 970
971 raw_spin_lock_irqsave(&gsnedf_lock, flags); 971 raw_readyq_lock_irqsave(&gsnedf_lock, flags);
972 972
973 /* setup job params */ 973 /* setup job params */
974 release_at(t, litmus_clock()); 974 release_at(t, litmus_clock());
@@ -995,7 +995,7 @@ static void gsnedf_task_new(struct task_struct * t, int on_rq, int running)
995 t->rt_param.linked_on = NO_CPU; 995 t->rt_param.linked_on = NO_CPU;
996 996
997 gsnedf_job_arrival(t); 997 gsnedf_job_arrival(t);
998 raw_spin_unlock_irqrestore(&gsnedf_lock, flags); 998 raw_readyq_unlock_irqrestore(&gsnedf_lock, flags);
999} 999}
1000 1000
1001static void gsnedf_task_wake_up(struct task_struct *task) 1001static void gsnedf_task_wake_up(struct task_struct *task)
@@ -1005,7 +1005,7 @@ static void gsnedf_task_wake_up(struct task_struct *task)
1005 1005
1006 TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); 1006 TRACE_TASK(task, "wake_up at %llu\n", litmus_clock());
1007 1007
1008 raw_spin_lock_irqsave(&gsnedf_lock, flags); 1008 raw_readyq_lock_irqsave(&gsnedf_lock, flags);
1009 1009
1010#if 0 1010#if 0
1011 /* sporadic task model. will increment job numbers automatically */ 1011 /* sporadic task model. will increment job numbers automatically */
@@ -1042,7 +1042,7 @@ static void gsnedf_task_wake_up(struct task_struct *task)
1042#endif 1042#endif
1043 1043
1044 gsnedf_job_arrival(task); 1044 gsnedf_job_arrival(task);
1045 raw_spin_unlock_irqrestore(&gsnedf_lock, flags); 1045 raw_readyq_unlock_irqrestore(&gsnedf_lock, flags);
1046} 1046}
1047 1047
1048static void gsnedf_task_block(struct task_struct *t) 1048static void gsnedf_task_block(struct task_struct *t)
@@ -1052,7 +1052,7 @@ static void gsnedf_task_block(struct task_struct *t)
1052 TRACE_TASK(t, "block at %llu\n", litmus_clock()); 1052 TRACE_TASK(t, "block at %llu\n", litmus_clock());
1053 1053
1054 /* unlink if necessary */ 1054 /* unlink if necessary */
1055 raw_spin_lock_irqsave(&gsnedf_lock, flags); 1055 raw_readyq_lock_irqsave(&gsnedf_lock, flags);
1056 1056
1057 unlink(t); 1057 unlink(t);
1058 1058
@@ -1072,7 +1072,7 @@ static void gsnedf_task_block(struct task_struct *t)
1072 } 1072 }
1073#endif 1073#endif
1074 1074
1075 raw_spin_unlock_irqrestore(&gsnedf_lock, flags); 1075 raw_readyq_unlock_irqrestore(&gsnedf_lock, flags);
1076 1076
1077 BUG_ON(!is_realtime(t)); 1077 BUG_ON(!is_realtime(t));
1078} 1078}
@@ -1087,7 +1087,7 @@ static void gsnedf_task_exit(struct task_struct * t)
1087#endif 1087#endif
1088 1088
1089 /* unlink if necessary */ 1089 /* unlink if necessary */
1090 raw_spin_lock_irqsave(&gsnedf_lock, flags); 1090 raw_readyq_lock_irqsave(&gsnedf_lock, flags);
1091 1091
1092 /* disable budget enforcement */ 1092 /* disable budget enforcement */
1093 if (tsk_rt(t)->budget.ops) 1093 if (tsk_rt(t)->budget.ops)
@@ -1115,7 +1115,7 @@ static void gsnedf_task_exit(struct task_struct * t)
1115 gsnedf_cpus[tsk_rt(t)->scheduled_on]->scheduled = NULL; 1115 gsnedf_cpus[tsk_rt(t)->scheduled_on]->scheduled = NULL;
1116 tsk_rt(t)->scheduled_on = NO_CPU; 1116 tsk_rt(t)->scheduled_on = NO_CPU;
1117 } 1117 }
1118 raw_spin_unlock_irqrestore(&gsnedf_lock, flags); 1118 raw_readyq_unlock_irqrestore(&gsnedf_lock, flags);
1119 1119
1120 BUG_ON(!is_realtime(t)); 1120 BUG_ON(!is_realtime(t));
1121 TRACE_TASK(t, "RIP\n"); 1121 TRACE_TASK(t, "RIP\n");
@@ -1275,11 +1275,11 @@ static void increase_priority_inheritance(struct task_struct* t, struct task_str
1275{ 1275{
1276 int success; 1276 int success;
1277 1277
1278 raw_spin_lock(&gsnedf_lock); 1278 raw_readyq_lock(&gsnedf_lock);
1279 1279
1280 success = __increase_priority_inheritance(t, prio_inh); 1280 success = __increase_priority_inheritance(t, prio_inh);
1281 1281
1282 raw_spin_unlock(&gsnedf_lock); 1282 raw_readyq_unlock(&gsnedf_lock);
1283 1283
1284#if defined(CONFIG_LITMUS_PAI_SOFTIRQD) && defined(CONFIG_LITMUS_NVIDIA) 1284#if defined(CONFIG_LITMUS_PAI_SOFTIRQD) && defined(CONFIG_LITMUS_NVIDIA)
1285 if(tsk_rt(t)->held_gpus) { 1285 if(tsk_rt(t)->held_gpus) {
@@ -1385,11 +1385,11 @@ static void decrease_priority_inheritance(struct task_struct* t,
1385{ 1385{
1386 int success; 1386 int success;
1387 1387
1388 raw_spin_lock(&gsnedf_lock); 1388 raw_readyq_lock(&gsnedf_lock);
1389 1389
1390 success = __decrease_priority_inheritance(t, prio_inh, budget_triggered); 1390 success = __decrease_priority_inheritance(t, prio_inh, budget_triggered);
1391 1391
1392 raw_spin_unlock(&gsnedf_lock); 1392 raw_readyq_unlock(&gsnedf_lock);
1393 1393
1394#if defined(CONFIG_LITMUS_PAI_SOFTIRQD) && defined(CONFIG_LITMUS_NVIDIA) 1394#if defined(CONFIG_LITMUS_PAI_SOFTIRQD) && defined(CONFIG_LITMUS_NVIDIA)
1395 if(tsk_rt(t)->held_gpus) { 1395 if(tsk_rt(t)->held_gpus) {
diff --git a/litmus/sched_pfp.c b/litmus/sched_pfp.c
index 3ff131e58553..ad2b8d81f29f 100644
--- a/litmus/sched_pfp.c
+++ b/litmus/sched_pfp.c
@@ -76,7 +76,7 @@ static void pfp_release_jobs(rt_domain_t* rt, struct bheap* tasks)
76 struct task_struct* t; 76 struct task_struct* t;
77 struct bheap_node* hn; 77 struct bheap_node* hn;
78 78
79 raw_spin_lock_irqsave(&pfp->slock, flags); 79 raw_readyq_lock_irqsave(&pfp->slock, flags);
80 80
81 while (!bheap_empty(tasks)) { 81 while (!bheap_empty(tasks)) {
82 hn = bheap_take(fp_ready_order, tasks); 82 hn = bheap_take(fp_ready_order, tasks);
@@ -92,7 +92,7 @@ static void pfp_release_jobs(rt_domain_t* rt, struct bheap* tasks)
92 preempt(pfp); 92 preempt(pfp);
93 } 93 }
94 94
95 raw_spin_unlock_irqrestore(&pfp->slock, flags); 95 raw_readyq_unlock_irqrestore(&pfp->slock, flags);
96} 96}
97 97
98static void pfp_preempt_check(pfp_domain_t *pfp) 98static void pfp_preempt_check(pfp_domain_t *pfp)
@@ -186,7 +186,7 @@ static struct task_struct* pfp_schedule(struct task_struct * prev)
186 186
187 int out_of_time, sleep, preempt, np, exists, blocks, resched, migrate; 187 int out_of_time, sleep, preempt, np, exists, blocks, resched, migrate;
188 188
189 raw_spin_lock(&pfp->slock); 189 raw_readyq_lock(&pfp->slock);
190 190
191 /* sanity checking 191 /* sanity checking
192 * differently from gedf, when a task exits (dead) 192 * differently from gedf, when a task exits (dead)
@@ -292,7 +292,7 @@ static struct task_struct* pfp_schedule(struct task_struct * prev)
292 292
293 pfp->scheduled = next; 293 pfp->scheduled = next;
294 sched_state_task_picked(); 294 sched_state_task_picked();
295 raw_spin_unlock(&pfp->slock); 295 raw_readyq_unlock(&pfp->slock);
296 296
297 return next; 297 return next;
298} 298}
@@ -312,14 +312,14 @@ static void pfp_finish_switch(struct task_struct *prev)
312 312
313 to = task_pfp(prev); 313 to = task_pfp(prev);
314 314
315 raw_spin_lock(&to->slock); 315 raw_readyq_lock(&to->slock);
316 316
317 TRACE_TASK(prev, "adding to queue on P%d\n", to->cpu); 317 TRACE_TASK(prev, "adding to queue on P%d\n", to->cpu);
318 requeue(prev, to); 318 requeue(prev, to);
319 if (fp_preemption_needed(&to->ready_queue, to->scheduled)) 319 if (fp_preemption_needed(&to->ready_queue, to->scheduled))
320 preempt(to); 320 preempt(to);
321 321
322 raw_spin_unlock(&to->slock); 322 raw_readyq_unlock(&to->slock);
323 323
324 } 324 }
325} 325}
@@ -342,7 +342,7 @@ static void pfp_task_new(struct task_struct * t, int on_rq, int running)
342 /* The task should be running in the queue, otherwise signal 342 /* The task should be running in the queue, otherwise signal
343 * code will try to wake it up with fatal consequences. 343 * code will try to wake it up with fatal consequences.
344 */ 344 */
345 raw_spin_lock_irqsave(&pfp->slock, flags); 345 raw_readyq_lock_irqsave(&pfp->slock, flags);
346 if (running) { 346 if (running) {
347 /* there shouldn't be anything else running at the time */ 347 /* there shouldn't be anything else running at the time */
348 BUG_ON(pfp->scheduled); 348 BUG_ON(pfp->scheduled);
@@ -352,7 +352,7 @@ static void pfp_task_new(struct task_struct * t, int on_rq, int running)
352 /* maybe we have to reschedule */ 352 /* maybe we have to reschedule */
353 pfp_preempt_check(pfp); 353 pfp_preempt_check(pfp);
354 } 354 }
355 raw_spin_unlock_irqrestore(&pfp->slock, flags); 355 raw_readyq_unlock_irqrestore(&pfp->slock, flags);
356} 356}
357 357
358static void pfp_task_wake_up(struct task_struct *task) 358static void pfp_task_wake_up(struct task_struct *task)
@@ -362,7 +362,7 @@ static void pfp_task_wake_up(struct task_struct *task)
362 lt_t now; 362 lt_t now;
363 363
364 TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); 364 TRACE_TASK(task, "wake_up at %llu\n", litmus_clock());
365 raw_spin_lock_irqsave(&pfp->slock, flags); 365 raw_readyq_lock_irqsave(&pfp->slock, flags);
366 366
367#ifdef CONFIG_LITMUS_LOCKING 367#ifdef CONFIG_LITMUS_LOCKING
368 /* Should only be queued when processing a fake-wake up due to a 368 /* Should only be queued when processing a fake-wake up due to a
@@ -404,7 +404,7 @@ static void pfp_task_wake_up(struct task_struct *task)
404#ifdef CONFIG_LITMUS_LOCKING 404#ifdef CONFIG_LITMUS_LOCKING
405out_unlock: 405out_unlock:
406#endif 406#endif
407 raw_spin_unlock_irqrestore(&pfp->slock, flags); 407 raw_readyq_unlock_irqrestore(&pfp->slock, flags);
408 TRACE_TASK(task, "wake up done\n"); 408 TRACE_TASK(task, "wake up done\n");
409} 409}
410 410
@@ -433,7 +433,7 @@ static void pfp_task_exit(struct task_struct * t)
433 pfp_domain_t* pfp = task_pfp(t); 433 pfp_domain_t* pfp = task_pfp(t);
434 rt_domain_t* dom; 434 rt_domain_t* dom;
435 435
436 raw_spin_lock_irqsave(&pfp->slock, flags); 436 raw_readyq_lock_irqsave(&pfp->slock, flags);
437 437
438 /* disable budget enforcement */ 438 /* disable budget enforcement */
439 if (tsk_rt(t)->budget.ops) 439 if (tsk_rt(t)->budget.ops)
@@ -451,7 +451,7 @@ static void pfp_task_exit(struct task_struct * t)
451 } 451 }
452 TRACE_TASK(t, "RIP, now reschedule\n"); 452 TRACE_TASK(t, "RIP, now reschedule\n");
453 453
454 raw_spin_unlock_irqrestore(&pfp->slock, flags); 454 raw_readyq_unlock_irqrestore(&pfp->slock, flags);
455} 455}
456 456
457#ifdef CONFIG_LITMUS_LOCKING 457#ifdef CONFIG_LITMUS_LOCKING
@@ -514,7 +514,7 @@ static void boost_priority(struct task_struct* t, lt_t priority_point)
514 unsigned long flags; 514 unsigned long flags;
515 pfp_domain_t* pfp = task_pfp(t); 515 pfp_domain_t* pfp = task_pfp(t);
516 516
517 raw_spin_lock_irqsave(&pfp->slock, flags); 517 raw_readyq_lock_irqsave(&pfp->slock, flags);
518 518
519 519
520 TRACE_TASK(t, "priority boosted at %llu\n", litmus_clock()); 520 TRACE_TASK(t, "priority boosted at %llu\n", litmus_clock());
@@ -528,7 +528,7 @@ static void boost_priority(struct task_struct* t, lt_t priority_point)
528 * part of lock acquisitions. */ 528 * part of lock acquisitions. */
529 BUG_ON(pfp->scheduled != t); 529 BUG_ON(pfp->scheduled != t);
530 530
531 raw_spin_unlock_irqrestore(&pfp->slock, flags); 531 raw_readyq_unlock_irqrestore(&pfp->slock, flags);
532} 532}
533 533
534static void unboost_priority(struct task_struct* t) 534static void unboost_priority(struct task_struct* t)
@@ -537,7 +537,7 @@ static void unboost_priority(struct task_struct* t)
537 pfp_domain_t* pfp = task_pfp(t); 537 pfp_domain_t* pfp = task_pfp(t);
538 lt_t now; 538 lt_t now;
539 539
540 raw_spin_lock_irqsave(&pfp->slock, flags); 540 raw_readyq_lock_irqsave(&pfp->slock, flags);
541 now = litmus_clock(); 541 now = litmus_clock();
542 542
543 /* assumption: this only happens when the job is scheduled */ 543 /* assumption: this only happens when the job is scheduled */
@@ -555,7 +555,7 @@ static void unboost_priority(struct task_struct* t)
555 if (fp_preemption_needed(&pfp->ready_queue, pfp->scheduled)) 555 if (fp_preemption_needed(&pfp->ready_queue, pfp->scheduled))
556 preempt(pfp); 556 preempt(pfp);
557 557
558 raw_spin_unlock_irqrestore(&pfp->slock, flags); 558 raw_readyq_unlock_irqrestore(&pfp->slock, flags);
559} 559}
560 560
561/* ******************** SRP support ************************ */ 561/* ******************** SRP support ************************ */
@@ -1127,7 +1127,7 @@ static void pcp_priority_inheritance(void)
1127 blocker = ceiling ? ceiling->owner : NULL; 1127 blocker = ceiling ? ceiling->owner : NULL;
1128 blocked = __get_cpu_var(pcp_state).hp_waiter; 1128 blocked = __get_cpu_var(pcp_state).hp_waiter;
1129 1129
1130 raw_spin_lock_irqsave(&pfp->slock, flags); 1130 raw_readyq_lock_irqsave(&pfp->slock, flags);
1131 1131
1132 /* Current is no longer inheriting anything by default. This should be 1132 /* Current is no longer inheriting anything by default. This should be
1133 * the currently scheduled job, and hence not currently queued. */ 1133 * the currently scheduled job, and hence not currently queued. */
@@ -1153,7 +1153,7 @@ static void pcp_priority_inheritance(void)
1153 fp_higher_prio(fp_prio_peek(&pfp->ready_queue), pfp->scheduled)) 1153 fp_higher_prio(fp_prio_peek(&pfp->ready_queue), pfp->scheduled))
1154 preempt(pfp); 1154 preempt(pfp);
1155 1155
1156 raw_spin_unlock_irqrestore(&pfp->slock, flags); 1156 raw_readyq_unlock_irqrestore(&pfp->slock, flags);
1157} 1157}
1158 1158
1159/* called with preemptions off */ 1159/* called with preemptions off */
@@ -1457,12 +1457,12 @@ static void pfp_migrate_to(int target_cpu)
1457 /* lock both pfp domains in order of address */ 1457 /* lock both pfp domains in order of address */
1458 from = task_pfp(t); 1458 from = task_pfp(t);
1459 1459
1460 raw_spin_lock(&from->slock); 1460 raw_readyq_lock(&from->slock);
1461 1461
1462 /* switch partitions */ 1462 /* switch partitions */
1463 tsk_rt(t)->task_params.cpu = target_cpu; 1463 tsk_rt(t)->task_params.cpu = target_cpu;
1464 1464
1465 raw_spin_unlock(&from->slock); 1465 raw_readyq_unlock(&from->slock);
1466 1466
1467 /* Don't trace scheduler costs as part of 1467 /* Don't trace scheduler costs as part of
1468 * locking overhead. Scheduling costs are accounted for 1468 * locking overhead. Scheduling costs are accounted for
diff --git a/litmus/sched_psn_edf.c b/litmus/sched_psn_edf.c
index 720a733aff9e..dffaeb425abf 100644
--- a/litmus/sched_psn_edf.c
+++ b/litmus/sched_psn_edf.c
@@ -81,7 +81,7 @@ static void boost_priority(struct task_struct* t)
81 psnedf_domain_t* pedf = task_pedf(t); 81 psnedf_domain_t* pedf = task_pedf(t);
82 lt_t now; 82 lt_t now;
83 83
84 raw_spin_lock_irqsave(&pedf->slock, flags); 84 raw_readyq_lock_irqsave(&pedf->slock, flags);
85 now = litmus_clock(); 85 now = litmus_clock();
86 86
87 TRACE_TASK(t, "priority boosted at %llu\n", now); 87 TRACE_TASK(t, "priority boosted at %llu\n", now);
@@ -101,7 +101,7 @@ static void boost_priority(struct task_struct* t)
101 raw_spin_unlock(&pedf->domain.release_lock); 101 raw_spin_unlock(&pedf->domain.release_lock);
102 } /* else: nothing to do since the job is not queued while scheduled */ 102 } /* else: nothing to do since the job is not queued while scheduled */
103 103
104 raw_spin_unlock_irqrestore(&pedf->slock, flags); 104 raw_readyq_unlock_irqrestore(&pedf->slock, flags);
105} 105}
106 106
107static void unboost_priority(struct task_struct* t) 107static void unboost_priority(struct task_struct* t)
@@ -110,7 +110,7 @@ static void unboost_priority(struct task_struct* t)
110 psnedf_domain_t* pedf = task_pedf(t); 110 psnedf_domain_t* pedf = task_pedf(t);
111 lt_t now; 111 lt_t now;
112 112
113 raw_spin_lock_irqsave(&pedf->slock, flags); 113 raw_readyq_lock_irqsave(&pedf->slock, flags);
114 now = litmus_clock(); 114 now = litmus_clock();
115 115
116 /* assumption: this only happens when the job is scheduled */ 116 /* assumption: this only happens when the job is scheduled */
@@ -128,7 +128,7 @@ static void unboost_priority(struct task_struct* t)
128 if (edf_preemption_needed(&pedf->domain, pedf->scheduled)) 128 if (edf_preemption_needed(&pedf->domain, pedf->scheduled))
129 preempt(pedf); 129 preempt(pedf);
130 130
131 raw_spin_unlock_irqrestore(&pedf->slock, flags); 131 raw_readyq_unlock_irqrestore(&pedf->slock, flags);
132} 132}
133 133
134#endif 134#endif
@@ -219,7 +219,7 @@ static struct task_struct* psnedf_schedule(struct task_struct * prev)
219 219
220 int out_of_time, sleep, preempt, np, exists, blocks, resched; 220 int out_of_time, sleep, preempt, np, exists, blocks, resched;
221 221
222 raw_spin_lock(&pedf->slock); 222 raw_readyq_lock(&pedf->slock);
223 223
224 /* sanity checking 224 /* sanity checking
225 * differently from gedf, when a task exits (dead) 225 * differently from gedf, when a task exits (dead)
@@ -304,7 +304,7 @@ static struct task_struct* psnedf_schedule(struct task_struct * prev)
304 304
305 pedf->scheduled = next; 305 pedf->scheduled = next;
306 sched_state_task_picked(); 306 sched_state_task_picked();
307 raw_spin_unlock(&pedf->slock); 307 raw_readyq_unlock(&pedf->slock);
308 308
309 return next; 309 return next;
310} 310}
@@ -327,7 +327,7 @@ static void psnedf_task_new(struct task_struct * t, int on_rq, int running)
327 /* The task should be running in the queue, otherwise signal 327 /* The task should be running in the queue, otherwise signal
328 * code will try to wake it up with fatal consequences. 328 * code will try to wake it up with fatal consequences.
329 */ 329 */
330 raw_spin_lock_irqsave(&pedf->slock, flags); 330 raw_readyq_lock_irqsave(&pedf->slock, flags);
331 if (running) { 331 if (running) {
332 /* there shouldn't be anything else running at the time */ 332 /* there shouldn't be anything else running at the time */
333 BUG_ON(pedf->scheduled); 333 BUG_ON(pedf->scheduled);
@@ -337,7 +337,7 @@ static void psnedf_task_new(struct task_struct * t, int on_rq, int running)
337 /* maybe we have to reschedule */ 337 /* maybe we have to reschedule */
338 psnedf_preempt_check(pedf); 338 psnedf_preempt_check(pedf);
339 } 339 }
340 raw_spin_unlock_irqrestore(&pedf->slock, flags); 340 raw_readyq_unlock_irqrestore(&pedf->slock, flags);
341} 341}
342 342
343static void psnedf_task_wake_up(struct task_struct *task) 343static void psnedf_task_wake_up(struct task_struct *task)
@@ -348,7 +348,7 @@ static void psnedf_task_wake_up(struct task_struct *task)
348 lt_t now; 348 lt_t now;
349 349
350 TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); 350 TRACE_TASK(task, "wake_up at %llu\n", litmus_clock());
351 raw_spin_lock_irqsave(&pedf->slock, flags); 351 raw_readyq_lock_irqsave(&pedf->slock, flags);
352 BUG_ON(is_queued(task)); 352 BUG_ON(is_queued(task));
353 now = litmus_clock(); 353 now = litmus_clock();
354 if (is_sporadic(task) && is_tardy(task, now) 354 if (is_sporadic(task) && is_tardy(task, now)
@@ -376,7 +376,7 @@ static void psnedf_task_wake_up(struct task_struct *task)
376 psnedf_preempt_check(pedf); 376 psnedf_preempt_check(pedf);
377 } 377 }
378 378
379 raw_spin_unlock_irqrestore(&pedf->slock, flags); 379 raw_readyq_unlock_irqrestore(&pedf->slock, flags);
380 TRACE_TASK(task, "wake up done\n"); 380 TRACE_TASK(task, "wake up done\n");
381} 381}
382 382
@@ -395,7 +395,7 @@ static void psnedf_task_exit(struct task_struct * t)
395 psnedf_domain_t* pedf = task_pedf(t); 395 psnedf_domain_t* pedf = task_pedf(t);
396 rt_domain_t* edf; 396 rt_domain_t* edf;
397 397
398 raw_spin_lock_irqsave(&pedf->slock, flags); 398 raw_readyq_lock_irqsave(&pedf->slock, flags);
399 399
400 /* disable budget enforcement */ 400 /* disable budget enforcement */
401 if (tsk_rt(t)->budget.ops) 401 if (tsk_rt(t)->budget.ops)
@@ -412,7 +412,7 @@ static void psnedf_task_exit(struct task_struct * t)
412 TRACE_TASK(t, "RIP, now reschedule\n"); 412 TRACE_TASK(t, "RIP, now reschedule\n");
413 413
414 preempt(pedf); 414 preempt(pedf);
415 raw_spin_unlock_irqrestore(&pedf->slock, flags); 415 raw_readyq_unlock_irqrestore(&pedf->slock, flags);
416} 416}
417 417
418#ifdef CONFIG_LITMUS_LOCKING 418#ifdef CONFIG_LITMUS_LOCKING