aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2013-10-09 15:45:23 -0400
committerGlenn Elliott <gelliott@cs.unc.edu>2013-10-09 15:47:26 -0400
commit7fb54decc59fa5855cd273bfda908fd443a37e67 (patch)
tree5941c299884d77e9ecd6be7e4041d94f00e01ef0
parentb5b69edba68805ab2d4bdeae58fe783710111bfc (diff)
Remove PAI support
Support for process-aware interrupt handling was broken when the tasklet ownership model changed in GPUSync, post-ECRTS12. We will want to re-add support later, but remove it for now.
-rw-r--r--include/linux/interrupt.h2
-rw-r--r--include/litmus/sched_plugin.h5
-rw-r--r--include/litmus/trace.h5
-rw-r--r--kernel/sched.c4
-rw-r--r--kernel/softirq.c16
-rw-r--r--litmus/Kconfig11
-rw-r--r--litmus/Makefile1
-rw-r--r--litmus/litmus_pai_softirq.c64
-rw-r--r--litmus/nvidia_info.c195
-rw-r--r--litmus/sched_cedf.c343
-rw-r--r--litmus/sched_crm.c343
-rw-r--r--litmus/sched_gsn_edf.c356
-rw-r--r--litmus/sched_plugin.c30
13 files changed, 4 insertions, 1371 deletions
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index af52b5c497b9..7ffc79559866 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -502,7 +502,7 @@ struct tasklet_struct
502 void (*func)(unsigned long); 502 void (*func)(unsigned long);
503 unsigned long data; 503 unsigned long data;
504#if 0 504#if 0
505#if defined(CONFIG_LITMUS_SOFTIRQD) || defined(CONFIG_LITMUS_PAI_SOFTIRQD) 505#if defined(CONFIG_LITMUS_SOFTIRQD)
506 struct task_struct *owner; 506 struct task_struct *owner;
507#endif 507#endif
508#endif 508#endif
diff --git a/include/litmus/sched_plugin.h b/include/litmus/sched_plugin.h
index 82e62e8283e9..0982f5ff2631 100644
--- a/include/litmus/sched_plugin.h
+++ b/include/litmus/sched_plugin.h
@@ -165,11 +165,6 @@ struct sched_plugin {
165#ifdef CONFIG_LITMUS_AFFINITY_LOCKING 165#ifdef CONFIG_LITMUS_AFFINITY_LOCKING
166 allocate_affinity_observer_t allocate_aff_obs; 166 allocate_affinity_observer_t allocate_aff_obs;
167#endif 167#endif
168#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
169 enqueue_pai_tasklet_t enqueue_pai_tasklet;
170 change_prio_pai_tasklet_t change_prio_pai_tasklet;
171 run_tasklets_t run_tasklets;
172#endif
173 168
174#if defined(CONFIG_LITMUS_NVIDIA) && defined(CONFIG_LITMUS_SOFTIRQD) 169#if defined(CONFIG_LITMUS_NVIDIA) && defined(CONFIG_LITMUS_SOFTIRQD)
175 default_cpu_for_gpu_t map_gpu_to_cpu; 170 default_cpu_for_gpu_t map_gpu_to_cpu;
diff --git a/include/litmus/trace.h b/include/litmus/trace.h
index f10d8a0311cc..6410ab91f42d 100644
--- a/include/litmus/trace.h
+++ b/include/litmus/trace.h
@@ -160,11 +160,6 @@ feather_callback void save_timestamp_hide_irq(unsigned long event);
160#define TS_NV_RELEASE_BOTISR_END TIMESTAMP(205) 160#define TS_NV_RELEASE_BOTISR_END TIMESTAMP(205)
161#endif 161#endif
162 162
163#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
164#define TS_NV_SCHED_BOTISR_START TIMESTAMP(206)
165#define TS_NV_SCHED_BOTISR_END TIMESTAMP(207)
166#endif
167
168#define TS_RELEASE_LATENCY(when) LTIMESTAMP(208, &(when)) 163#define TS_RELEASE_LATENCY(when) LTIMESTAMP(208, &(when))
169 164
170#define TS_UPDATE_GPU_EST_START TIMESTAMP(210) 165#define TS_UPDATE_GPU_EST_START TIMESTAMP(210)
diff --git a/kernel/sched.c b/kernel/sched.c
index 7d27a3851a4d..a784777a2779 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4450,10 +4450,6 @@ litmus_need_resched_nonpreemptible:
4450 if (need_resched()) 4450 if (need_resched())
4451 goto need_resched; 4451 goto need_resched;
4452 4452
4453#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
4454 litmus->run_tasklets(prev);
4455#endif
4456
4457 srp_ceiling_block(); 4453 srp_ceiling_block();
4458} 4454}
4459EXPORT_SYMBOL(schedule); 4455EXPORT_SYMBOL(schedule);
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 561d60ce1f65..fb2947ed6e51 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -487,8 +487,7 @@ void __tasklet_schedule(struct tasklet_struct *t)
487{ 487{
488#if defined(CONFIG_LITMUS_NVIDIA) && ( \ 488#if defined(CONFIG_LITMUS_NVIDIA) && ( \
489 defined(CONFIG_LITMUS_NVIDIA_NONSPLIT_INTERRUPTS) || \ 489 defined(CONFIG_LITMUS_NVIDIA_NONSPLIT_INTERRUPTS) || \
490 defined(CONFIG_LITMUS_SOFTIRQD) || \ 490 defined(CONFIG_LITMUS_SOFTIRQD)) \
491 defined(CONFIG_LITMUS_PAI_SOFTIRQD))
492 if(is_nvidia_func(t->func)) 491 if(is_nvidia_func(t->func))
493 { 492 {
494#if defined(CONFIG_LITMUS_NVIDIA_NONSPLIT_INTERRUPTS) 493#if defined(CONFIG_LITMUS_NVIDIA_NONSPLIT_INTERRUPTS)
@@ -502,8 +501,6 @@ void __tasklet_schedule(struct tasklet_struct *t)
502 return; 501 return;
503 else 502 else
504 goto default_linux_handling; 503 goto default_linux_handling;
505#elif defined(CONFIG_LITMUS_PAI_SOFTIRQD)
506 /* broken at the moment! */
507#endif 504#endif
508 } 505 }
509 506
@@ -532,8 +529,7 @@ void __tasklet_hi_schedule(struct tasklet_struct *t)
532{ 529{
533#if defined(CONFIG_LITMUS_NVIDIA) && ( \ 530#if defined(CONFIG_LITMUS_NVIDIA) && ( \
534 defined(CONFIG_LITMUS_NVIDIA_NONSPLIT_INTERRUPTS) || \ 531 defined(CONFIG_LITMUS_NVIDIA_NONSPLIT_INTERRUPTS) || \
535 defined(CONFIG_LITMUS_SOFTIRQD) || \ 532 defined(CONFIG_LITMUS_SOFTIRQD)) \
536 defined(CONFIG_LITMUS_PAI_SOFTIRQD))
537 if(is_nvidia_func(t->func)) 533 if(is_nvidia_func(t->func))
538 { 534 {
539#if defined(CONFIG_LITMUS_NVIDIA_NONSPLIT_INTERRUPTS) 535#if defined(CONFIG_LITMUS_NVIDIA_NONSPLIT_INTERRUPTS)
@@ -547,8 +543,6 @@ void __tasklet_hi_schedule(struct tasklet_struct *t)
547 return; 543 return;
548 else 544 else
549 goto default_linux_handling; 545 goto default_linux_handling;
550#elif defined(CONFIG_LITMUS_PAI_SOFTIRQD)
551 /* broken at the moment! */
552#endif 546#endif
553 } 547 }
554 548
@@ -576,8 +570,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
576 BUG_ON(!irqs_disabled()); 570 BUG_ON(!irqs_disabled());
577#if defined(CONFIG_LITMUS_NVIDIA) && ( \ 571#if defined(CONFIG_LITMUS_NVIDIA) && ( \
578 defined(CONFIG_LITMUS_NVIDIA_NONSPLIT_INTERRUPTS) || \ 572 defined(CONFIG_LITMUS_NVIDIA_NONSPLIT_INTERRUPTS) || \
579 defined(CONFIG_LITMUS_SOFTIRQD) || \ 573 defined(CONFIG_LITMUS_SOFTIRQD)) \
580 defined(CONFIG_LITMUS_PAI_SOFTIRQD))
581 if(is_nvidia_func(t->func)) 574 if(is_nvidia_func(t->func))
582 { 575 {
583#if defined(CONFIG_LITMUS_NVIDIA_NONSPLIT_INTERRUPTS) 576#if defined(CONFIG_LITMUS_NVIDIA_NONSPLIT_INTERRUPTS)
@@ -589,9 +582,6 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
589 return; 582 return;
590 else 583 else
591 goto default_linux_handling; 584 goto default_linux_handling;
592#elif defined(CONFIG_LITMUS_PAI_SOFTIRQD)
593 /* broken at the moment! */
594#endif
595 } 585 }
596 586
597default_linux_handling: 587default_linux_handling:
diff --git a/litmus/Kconfig b/litmus/Kconfig
index 32c1c92cb56f..03f31157abc7 100644
--- a/litmus/Kconfig
+++ b/litmus/Kconfig
@@ -451,17 +451,6 @@ config LITMUS_SOFTIRQD
451 451
452 G-EDF, C-EDF ONLY for now! 452 G-EDF, C-EDF ONLY for now!
453 453
454
455#config LITMUS_PAI_SOFTIRQD
456# bool "Defer tasklets to context switch points."
457# help
458# Only execute scheduled tasklet bottom halves at
459# scheduling points. Trades context switch overhead
460# at the cost of non-preemptive durations of bottom half
461# processing.
462#
463# G-EDF, C-EDF ONLY for now!
464
465endchoice 454endchoice
466 455
467 456
diff --git a/litmus/Makefile b/litmus/Makefile
index 60794ac3e2bc..08ed4a663d8f 100644
--- a/litmus/Makefile
+++ b/litmus/Makefile
@@ -37,7 +37,6 @@ obj-$(CONFIG_SCHED_OVERHEAD_TRACE) += trace.o
37obj-$(CONFIG_LITMUS_LOCKING) += kfmlp_lock.o 37obj-$(CONFIG_LITMUS_LOCKING) += kfmlp_lock.o
38obj-$(CONFIG_LITMUS_NESTED_LOCKING) += fifo_lock.o prioq_lock.o ikglp_lock.o 38obj-$(CONFIG_LITMUS_NESTED_LOCKING) += fifo_lock.o prioq_lock.o ikglp_lock.o
39obj-$(CONFIG_LITMUS_SOFTIRQD) += litmus_softirq.o 39obj-$(CONFIG_LITMUS_SOFTIRQD) += litmus_softirq.o
40obj-$(CONFIG_LITMUS_PAI_SOFTIRQD) += litmus_pai_softirq.o
41obj-$(CONFIG_LITMUS_NVIDIA) += nvidia_info.o sched_trace_external.o 40obj-$(CONFIG_LITMUS_NVIDIA) += nvidia_info.o sched_trace_external.o
42 41
43obj-$(CONFIG_LITMUS_AFFINITY_LOCKING) += kexclu_affinity.o gpu_affinity.o 42obj-$(CONFIG_LITMUS_AFFINITY_LOCKING) += kexclu_affinity.o gpu_affinity.o
diff --git a/litmus/litmus_pai_softirq.c b/litmus/litmus_pai_softirq.c
deleted file mode 100644
index 300571a81bbd..000000000000
--- a/litmus/litmus_pai_softirq.c
+++ /dev/null
@@ -1,64 +0,0 @@
1#include <linux/interrupt.h>
2#include <linux/percpu.h>
3#include <linux/cpu.h>
4#include <linux/kthread.h>
5#include <linux/ftrace.h>
6#include <linux/smp.h>
7#include <linux/slab.h>
8#include <linux/mutex.h>
9
10#include <linux/sched.h>
11#include <linux/cpuset.h>
12
13#include <litmus/litmus.h>
14#include <litmus/sched_trace.h>
15#include <litmus/jobs.h>
16#include <litmus/sched_plugin.h>
17#include <litmus/litmus_softirq.h>
18
19
20
21int __litmus_tasklet_schedule(struct tasklet_struct *t, unsigned int k_id)
22{
23 int ret = 0; /* assume failure */
24 if(unlikely((t->owner == NULL) || !is_realtime(t->owner)))
25 {
26 TRACE("%s: No owner associated with this tasklet!\n", __FUNCTION__);
27 BUG();
28 }
29
30 ret = litmus->enqueue_pai_tasklet(t);
31
32 return(ret);
33}
34
35EXPORT_SYMBOL(__litmus_tasklet_schedule);
36
37
38
39// failure causes default Linux handling.
40int __litmus_tasklet_hi_schedule(struct tasklet_struct *t, unsigned int k_id)
41{
42 int ret = 0; /* assume failure */
43 return(ret);
44}
45EXPORT_SYMBOL(__litmus_tasklet_hi_schedule);
46
47
48// failure causes default Linux handling.
49int __litmus_tasklet_hi_schedule_first(struct tasklet_struct *t, unsigned int k_id)
50{
51 int ret = 0; /* assume failure */
52 return(ret);
53}
54EXPORT_SYMBOL(__litmus_tasklet_hi_schedule_first);
55
56
57// failure causes default Linux handling.
58int __litmus_schedule_work(struct work_struct *w, unsigned int k_id)
59{
60 int ret = 0; /* assume failure */
61 return(ret);
62}
63EXPORT_SYMBOL(__litmus_schedule_work);
64
diff --git a/litmus/nvidia_info.c b/litmus/nvidia_info.c
index 6a101882e615..096480c53fad 100644
--- a/litmus/nvidia_info.c
+++ b/litmus/nvidia_info.c
@@ -1223,198 +1223,3 @@ int reg_nv_device(int reg_device_id, int reg_action, struct task_struct *t)
1223 1223
1224 return(ret); 1224 return(ret);
1225} 1225}
1226
1227
1228
1229#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
1230//void pai_check_priority_increase(struct task_struct *t, int reg_device_id)
1231//{
1232// unsigned long flags;
1233// nv_device_registry_t *reg = &NV_DEVICE_REG[reg_device_id];
1234//
1235//
1236//
1237// if(reg->max_prio_owner != t) {
1238//
1239// raw_spin_lock_irqsave(&reg->lock, flags);
1240//
1241// if(reg->max_prio_owner != t) {
1242// if(litmus->compare(t, reg->max_prio_owner)) {
1243// litmus->change_prio_pai_tasklet(reg->max_prio_owner, t);
1244// reg->max_prio_owner = t;
1245// }
1246// }
1247//
1248// raw_spin_unlock_irqrestore(&reg->lock, flags);
1249// }
1250//}
1251//
1252//
1253//void pai_check_priority_decrease(struct task_struct *t, int reg_device_id)
1254//{
1255// unsigned long flags;
1256// nv_device_registry_t *reg = &NV_DEVICE_REG[reg_device_id];
1257//
1258// if(reg->max_prio_owner == t) {
1259//
1260// raw_spin_lock_irqsave(&reg->lock, flags);
1261//
1262// if(reg->max_prio_owner == t) {
1263// reg->max_prio_owner = find_hp_owner(reg, NULL);
1264// if(reg->max_prio_owner != t) {
1265// litmus->change_prio_pai_tasklet(t, reg->max_prio_owner);
1266// }
1267// }
1268//
1269// raw_spin_unlock_irqrestore(&reg->lock, flags);
1270// }
1271//}
1272#endif
1273
1274
1275
1276
1277
1278//static int __reg_nv_device(int reg_device_id, struct task_struct *t)
1279//{
1280// int ret = 0;
1281// int i;
1282// struct task_struct *old_max = NULL;
1283//
1284//
1285// raw_spin_lock_irqsave(&reg->lock, flags);
1286//
1287// if(reg->nr_owners < NV_MAX_SIMULT_USERS) {
1288// TRACE_TASK(t, "registers GPU %d\n", reg_device_id);
1289// for(i = 0; i < NV_MAX_SIMULT_USERS; ++i) {
1290// if(reg->owners[i] == NULL) {
1291// reg->owners[i] = t;
1292//
1293// //if(edf_higher_prio(t, reg->max_prio_owner)) {
1294// if(litmus->compare(t, reg->max_prio_owner)) {
1295// old_max = reg->max_prio_owner;
1296// reg->max_prio_owner = t;
1297//
1298//#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
1299// litmus->change_prio_pai_tasklet(old_max, t);
1300//#endif
1301// }
1302//
1303//#ifdef CONFIG_LITMUS_SOFTIRQD
1304// down_and_set_stat(t, HELD, &tsk_rt(t)->klmirqd_sem);
1305//#endif
1306// ++(reg->nr_owners);
1307//
1308// break;
1309// }
1310// }
1311// }
1312// else
1313// {
1314// TRACE_CUR("%s: device %d is already in use!\n", __FUNCTION__, reg_device_id);
1315// //ret = -EBUSY;
1316// }
1317//
1318// raw_spin_unlock_irqrestore(&reg->lock, flags);
1319//
1320// __set_bit(reg_device_id, &tsk_rt(t)->held_gpus);
1321//
1322// return(ret);
1323//}
1324//
1325//static int __clear_reg_nv_device(int de_reg_device_id, struct task_struct *t)
1326//{
1327// int ret = 0;
1328// int i;
1329// unsigned long flags;
1330// nv_device_registry_t *reg = &NV_DEVICE_REG[de_reg_device_id];
1331//
1332//#ifdef CONFIG_LITMUS_SOFTIRQD
1333// struct task_struct* klmirqd_th = get_klmirqd(de_reg_device_id);
1334//#endif
1335//
1336// if(!test_bit(de_reg_device_id, &tsk_rt(t)->held_gpus)) {
1337// return ret;
1338// }
1339//
1340// raw_spin_lock_irqsave(&reg->lock, flags);
1341//
1342// TRACE_TASK(t, "unregisters GPU %d\n", de_reg_device_id);
1343//
1344// for(i = 0; i < NV_MAX_SIMULT_USERS; ++i) {
1345// if(reg->owners[i] == t) {
1346//#ifdef CONFIG_LITMUS_SOFTIRQD
1347// flush_pending(klmirqd_th, t);
1348//#endif
1349// if(reg->max_prio_owner == t) {
1350// reg->max_prio_owner = find_hp_owner(reg, t);
1351//#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
1352// litmus->change_prio_pai_tasklet(t, reg->max_prio_owner);
1353//#endif
1354// }
1355//
1356//#ifdef CONFIG_LITMUS_SOFTIRQD
1357// up_and_set_stat(t, NOT_HELD, &tsk_rt(t)->klmirqd_sem);
1358//#endif
1359//
1360// reg->owners[i] = NULL;
1361// --(reg->nr_owners);
1362//
1363// break;
1364// }
1365// }
1366//
1367// raw_spin_unlock_irqrestore(&reg->lock, flags);
1368//
1369// __clear_bit(de_reg_device_id, &tsk_rt(t)->held_gpus);
1370//
1371// return(ret);
1372//}
1373//
1374//
1375//int reg_nv_device(int reg_device_id, int reg_action, struct task_struct *t)
1376//{
1377// int ret;
1378//
1379// if((reg_device_id < NV_DEVICE_NUM) && (reg_device_id >= 0))
1380// {
1381// if(reg_action)
1382// ret = __reg_nv_device(reg_device_id, t);
1383// else
1384// ret = __clear_reg_nv_device(reg_device_id, t);
1385// }
1386// else
1387// {
1388// ret = -ENODEV;
1389// }
1390//
1391// return(ret);
1392//}
1393
1394
1395
1396//void lock_nv_registry(u32 target_device_id, unsigned long* flags)
1397//{
1398// BUG_ON(target_device_id >= NV_DEVICE_NUM);
1399//
1400// if(in_interrupt())
1401// TRACE("Locking registry for %d.\n", target_device_id);
1402// else
1403// TRACE_CUR("Locking registry for %d.\n", target_device_id);
1404//
1405// raw_spin_lock_irqsave(&NV_DEVICE_REG[target_device_id].lock, *flags);
1406//}
1407//
1408//void unlock_nv_registry(u32 target_device_id, unsigned long* flags)
1409//{
1410// BUG_ON(target_device_id >= NV_DEVICE_NUM);
1411//
1412// if(in_interrupt())
1413// TRACE("Unlocking registry for %d.\n", target_device_id);
1414// else
1415// TRACE_CUR("Unlocking registry for %d.\n", target_device_id);
1416//
1417// raw_spin_unlock_irqrestore(&NV_DEVICE_REG[target_device_id].lock, *flags);
1418//}
1419
1420
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c
index 00dabbf65af7..69bba98dc664 100644
--- a/litmus/sched_cedf.c
+++ b/litmus/sched_cedf.c
@@ -75,10 +75,6 @@
75#include <litmus/litmus_softirq.h> 75#include <litmus/litmus_softirq.h>
76#endif 76#endif
77 77
78#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
79#include <linux/interrupt.h>
80#endif
81
82#ifdef CONFIG_LITMUS_NVIDIA 78#ifdef CONFIG_LITMUS_NVIDIA
83#include <litmus/nvidia_info.h> 79#include <litmus/nvidia_info.h>
84#endif 80#endif
@@ -136,10 +132,6 @@ typedef struct clusterdomain {
136 132
137#define cluster_lock domain.ready_lock 133#define cluster_lock domain.ready_lock
138 134
139#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
140 struct tasklet_head pending_tasklets;
141#endif
142
143#ifdef CONFIG_LITMUS_DGL_SUPPORT 135#ifdef CONFIG_LITMUS_DGL_SUPPORT
144 raw_spinlock_t dgl_lock; 136 raw_spinlock_t dgl_lock;
145#endif 137#endif
@@ -1103,303 +1095,6 @@ static void cedf_tick(struct task_struct* t)
1103 } 1095 }
1104} 1096}
1105 1097
1106
1107
1108#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
1109
1110static void __do_lit_tasklet(struct tasklet_struct* tasklet, unsigned long flushed)
1111{
1112 if (!atomic_read(&tasklet->count)) {
1113 if(tasklet->owner) {
1114 sched_trace_tasklet_begin(tasklet->owner);
1115 }
1116
1117 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &tasklet->state))
1118 {
1119 BUG();
1120 }
1121 TRACE("%s: Invoking tasklet with owner pid = %d (flushed = %d).\n",
1122 __FUNCTION__,
1123 (tasklet->owner) ? tasklet->owner->pid : 0,
1124 (tasklet->owner) ? 0 : 1);
1125 tasklet->func(tasklet->data);
1126 tasklet_unlock(tasklet);
1127
1128 if(tasklet->owner) {
1129 sched_trace_tasklet_end(tasklet->owner, flushed);
1130 }
1131 }
1132 else {
1133 BUG();
1134 }
1135}
1136
1137
1138static void do_lit_tasklets(cedf_domain_t* cluster, struct task_struct* sched_task)
1139{
1140 int work_to_do = 1;
1141 struct tasklet_struct *tasklet = NULL;
1142 unsigned long flags;
1143
1144 while(work_to_do) {
1145
1146 TS_NV_SCHED_BOTISR_START;
1147
1148 raw_readyq_lock_irqsave(&cluster->cluster_lock, flags);
1149
1150 if(cluster->pending_tasklets.head != NULL) {
1151 // remove tasklet at head.
1152 struct tasklet_struct *prev = NULL;
1153 tasklet = cluster->pending_tasklets.head;
1154
1155 // find a tasklet with prio to execute; skip ones where
1156 // sched_task has a higher priority.
1157 // We use the '!edf' test instead of swaping function arguments since
1158 // both sched_task and owner could be NULL. In this case, we want to
1159 // still execute the tasklet.
1160 while(tasklet && !edf_higher_prio(tasklet->owner, sched_task)) {
1161 prev = tasklet;
1162 tasklet = tasklet->next;
1163 }
1164
1165 if(tasklet) { // found something to execuite
1166 // remove the tasklet from the queue
1167 if(prev) {
1168 prev->next = tasklet->next;
1169 if(prev->next == NULL) {
1170 TRACE("%s: Tasklet for %d is the last element in tasklet queue.\n", __FUNCTION__, tasklet->owner->pid);
1171 cluster->pending_tasklets.tail = &(prev);
1172 }
1173 }
1174 else {
1175 cluster->pending_tasklets.head = tasklet->next;
1176 if(tasklet->next == NULL) {
1177 TRACE("%s: Tasklet for %d is the last element in tasklet queue.\n", __FUNCTION__, tasklet->owner->pid);
1178 cluster->pending_tasklets.tail = &(cluster->pending_tasklets.head);
1179 }
1180 }
1181 }
1182 else {
1183 TRACE("%s: No tasklets with eligible priority.\n", __FUNCTION__);
1184 }
1185 }
1186 else {
1187 TRACE("%s: Tasklet queue is empty.\n", __FUNCTION__);
1188 }
1189
1190 raw_readyq_unlock_irqrestore(&cluster->cluster_lock, flags);
1191
1192 if(tasklet) {
1193 __do_lit_tasklet(tasklet, 0ul);
1194 tasklet = NULL;
1195 }
1196 else {
1197 work_to_do = 0;
1198 }
1199
1200 TS_NV_SCHED_BOTISR_END;
1201 }
1202}
1203
1204static void __add_pai_tasklet(struct tasklet_struct* tasklet, cedf_domain_t* cluster)
1205{
1206 struct tasklet_struct* step;
1207
1208 tasklet->next = NULL; // make sure there are no old values floating around
1209
1210 step = cluster->pending_tasklets.head;
1211 if(step == NULL) {
1212 TRACE("%s: tasklet queue empty. inserting tasklet for %d at head.\n", __FUNCTION__, tasklet->owner->pid);
1213 // insert at tail.
1214 *(cluster->pending_tasklets.tail) = tasklet;
1215 cluster->pending_tasklets.tail = &(tasklet->next);
1216 }
1217 else if((*(cluster->pending_tasklets.tail) != NULL) &&
1218 edf_higher_prio((*(cluster->pending_tasklets.tail))->owner, tasklet->owner)) {
1219 // insert at tail.
1220 TRACE("%s: tasklet belongs at end. inserting tasklet for %d at tail.\n", __FUNCTION__, tasklet->owner->pid);
1221
1222 *(cluster->pending_tasklets.tail) = tasklet;
1223 cluster->pending_tasklets.tail = &(tasklet->next);
1224 }
1225 else {
1226
1227 // insert the tasklet somewhere in the middle.
1228
1229 TRACE("%s: tasklet belongs somewhere in the middle.\n", __FUNCTION__);
1230
1231 while(step->next && edf_higher_prio(step->next->owner, tasklet->owner)) {
1232 step = step->next;
1233 }
1234
1235 // insert tasklet right before step->next.
1236
1237 TRACE("%s: inserting tasklet for %d between %d and %d.\n", __FUNCTION__,
1238 tasklet->owner->pid,
1239 (step->owner) ?
1240 step->owner->pid :
1241 -1,
1242 (step->next) ?
1243 ((step->next->owner) ?
1244 step->next->owner->pid :
1245 -1) :
1246 -1);
1247
1248 tasklet->next = step->next;
1249 step->next = tasklet;
1250
1251 // patch up the head if needed.
1252 if(cluster->pending_tasklets.head == step)
1253 {
1254 TRACE("%s: %d is the new tasklet queue head.\n", __FUNCTION__, tasklet->owner->pid);
1255 cluster->pending_tasklets.head = tasklet;
1256 }
1257 }
1258}
1259
1260static void cedf_run_tasklets(struct task_struct* sched_task)
1261{
1262 cedf_domain_t* cluster;
1263
1264 preempt_disable();
1265
1266 cluster = (is_realtime(sched_task)) ?
1267 task_cpu_cluster(sched_task) :
1268 remote_cluster(smp_processor_id());
1269
1270 if(cluster && cluster->pending_tasklets.head != NULL) {
1271 TRACE("%s: There are tasklets to process.\n", __FUNCTION__);
1272 do_lit_tasklets(cluster, sched_task);
1273 }
1274
1275 preempt_enable_no_resched();
1276}
1277
1278
1279
1280static int cedf_enqueue_pai_tasklet(struct tasklet_struct* tasklet)
1281{
1282#if 0
1283 cedf_domain_t *cluster = NULL;
1284 cpu_entry_t *targetCPU = NULL;
1285 int thisCPU;
1286 int runLocal = 0;
1287 int runNow = 0;
1288 unsigned long flags;
1289
1290 if(unlikely((tasklet->owner == NULL) || !is_realtime(tasklet->owner)))
1291 {
1292 TRACE("%s: No owner associated with this tasklet!\n", __FUNCTION__);
1293 return 0;
1294 }
1295
1296 cluster = task_cpu_cluster(tasklet->owner);
1297
1298 raw_readyq_lock_irqsave(&cluster->cluster_lock, flags);
1299
1300 thisCPU = smp_processor_id();
1301
1302#ifdef CONFIG_SCHED_CPU_AFFINITY
1303 {
1304 cpu_entry_t* affinity = NULL;
1305
1306 // use this CPU if it is in our cluster and isn't running any RT work.
1307 if(cpu_isset(thisCPU, *cluster->cpu_map) && (__get_cpu_var(cedf_cpu_entries).linked == NULL)) {
1308 affinity = &(__get_cpu_var(cedf_cpu_entries));
1309 }
1310 else {
1311 // this CPU is busy or shouldn't run tasklet in this cluster.
1312 // look for available near by CPUs.
1313 // NOTE: Affinity towards owner and not this CPU. Is this right?
1314 affinity =
1315 cedf_get_nearest_available_cpu(cluster,
1316 &per_cpu(cedf_cpu_entries, task_cpu(tasklet->owner)));
1317 }
1318
1319 targetCPU = affinity;
1320 }
1321#endif
1322
1323 if (targetCPU == NULL) {
1324 targetCPU = lowest_prio_cpu(cluster);
1325 }
1326
1327 if (edf_higher_prio(tasklet->owner, targetCPU->linked)) {
1328 if (thisCPU == targetCPU->cpu) {
1329 TRACE("%s: Run tasklet locally (and now).\n", __FUNCTION__);
1330 runLocal = 1;
1331 runNow = 1;
1332 }
1333 else {
1334 TRACE("%s: Run tasklet remotely (and now).\n", __FUNCTION__);
1335 runLocal = 0;
1336 runNow = 1;
1337 }
1338 }
1339 else {
1340 runLocal = 0;
1341 runNow = 0;
1342 }
1343
1344 if(!runLocal) {
1345 // enqueue the tasklet
1346 __add_pai_tasklet(tasklet, cluster);
1347 }
1348
1349 raw_readyq_unlock_irqrestore(&cluster->cluster_lock, flags);
1350
1351
1352 if (runLocal /*&& runNow */) { // runNow == 1 is implied
1353 TRACE("%s: Running tasklet on CPU where it was received.\n", __FUNCTION__);
1354 __do_lit_tasklet(tasklet, 0ul);
1355 }
1356 else if (runNow /*&& !runLocal */) { // runLocal == 0 is implied
1357 TRACE("%s: Triggering CPU %d to run tasklet.\n", __FUNCTION__, targetCPU->cpu);
1358 preempt(targetCPU); // need to be protected by cluster_lock?
1359 }
1360 else {
1361 TRACE("%s: Scheduling of tasklet was deferred.\n", __FUNCTION__);
1362 }
1363#else
1364 TRACE("%s: Running tasklet on CPU where it was received.\n", __FUNCTION__);
1365 __do_lit_tasklet(tasklet, 0ul);
1366#endif
1367 return(1); // success
1368}
1369
1370static void cedf_change_prio_pai_tasklet(struct task_struct *old_prio,
1371 struct task_struct *new_prio)
1372{
1373 struct tasklet_struct* step;
1374 unsigned long flags;
1375 cedf_domain_t *cluster;
1376 struct task_struct *probe;
1377
1378 // identify the cluster by the assignment of these tasks. one should
1379 // be non-NULL.
1380 probe = (old_prio) ? old_prio : new_prio;
1381
1382 if(probe) {
1383 cluster = task_cpu_cluster(probe);
1384
1385 if(cluster->pending_tasklets.head != NULL) {
1386 raw_readyq_lock_irqsave(&cluster->cluster_lock, flags);
1387 for(step = cluster->pending_tasklets.head; step != NULL; step = step->next) {
1388 if(step->owner == old_prio) {
1389 TRACE("%s: Found tasklet to change: %d\n", __FUNCTION__, step->owner->pid);
1390 step->owner = new_prio;
1391 }
1392 }
1393 raw_readyq_unlock_irqrestore(&cluster->cluster_lock, flags);
1394 }
1395 }
1396 else {
1397 TRACE("%s: Both priorities were NULL\n");
1398 }
1399}
1400
1401#endif // PAI
1402
1403#ifdef CONFIG_LITMUS_LOCKING 1098#ifdef CONFIG_LITMUS_LOCKING
1404static int __increase_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh); 1099static int __increase_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh);
1405#endif 1100#endif
@@ -1791,10 +1486,6 @@ static void cedf_task_exit(struct task_struct * t)
1791 unsigned long flags; 1486 unsigned long flags;
1792 cedf_domain_t *cluster = task_cpu_cluster(t); 1487 cedf_domain_t *cluster = task_cpu_cluster(t);
1793 1488
1794#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
1795 cedf_change_prio_pai_tasklet(t, NULL);
1796#endif
1797
1798 /* unlink if necessary */ 1489 /* unlink if necessary */
1799 raw_readyq_lock_irqsave(&cluster->cluster_lock, flags); 1490 raw_readyq_lock_irqsave(&cluster->cluster_lock, flags);
1800 1491
@@ -2120,17 +1811,6 @@ static void increase_priority_inheritance(struct task_struct* t, struct task_str
2120 __increase_priority_inheritance(t, prio_inh); 1811 __increase_priority_inheritance(t, prio_inh);
2121 1812
2122 raw_readyq_unlock(&cluster->cluster_lock); 1813 raw_readyq_unlock(&cluster->cluster_lock);
2123
2124#if defined(CONFIG_LITMUS_PAI_SOFTIRQD) && defined(CONFIG_LITMUS_NVIDIA)
2125 if(tsk_rt(t)->held_gpus) {
2126 int i;
2127 for(i = find_first_bit(&tsk_rt(t)->held_gpus, BITS_PER_BYTE*sizeof(tsk_rt(t)->held_gpus));
2128 i < NV_DEVICE_NUM;
2129 i = find_next_bit(&tsk_rt(t)->held_gpus, BITS_PER_BYTE*sizeof(tsk_rt(t)->held_gpus), i+1)) {
2130 pai_check_priority_increase(t, i);
2131 }
2132 }
2133#endif
2134} 1814}
2135 1815
2136/* called with IRQs off */ 1816/* called with IRQs off */
@@ -2294,17 +1974,6 @@ static void decrease_priority_inheritance(struct task_struct* t,
2294 __decrease_priority_inheritance(t, prio_inh, budget_tiggered); 1974 __decrease_priority_inheritance(t, prio_inh, budget_tiggered);
2295 1975
2296 raw_readyq_unlock(&cluster->cluster_lock); 1976 raw_readyq_unlock(&cluster->cluster_lock);
2297
2298#if defined(CONFIG_LITMUS_PAI_SOFTIRQD) && defined(CONFIG_LITMUS_NVIDIA)
2299 if(tsk_rt(t)->held_gpus) {
2300 int i;
2301 for(i = find_first_bit(&tsk_rt(t)->held_gpus, BITS_PER_BYTE*sizeof(tsk_rt(t)->held_gpus));
2302 i < NV_DEVICE_NUM;
2303 i = find_next_bit(&tsk_rt(t)->held_gpus, BITS_PER_BYTE*sizeof(tsk_rt(t)->held_gpus), i+1)) {
2304 pai_check_priority_decrease(t, i);
2305 }
2306 }
2307#endif
2308} 1977}
2309 1978
2310 1979
@@ -2740,13 +2409,6 @@ static long cedf_activate_plugin(void)
2740 INIT_BINHEAP_HANDLE(&(cedf[i].cpu_heap), cpu_lower_prio); 2409 INIT_BINHEAP_HANDLE(&(cedf[i].cpu_heap), cpu_lower_prio);
2741 edf_domain_init(&(cedf[i].domain), NULL, cedf_release_jobs); 2410 edf_domain_init(&(cedf[i].domain), NULL, cedf_release_jobs);
2742 2411
2743
2744#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
2745 cedf[i].pending_tasklets.head = NULL;
2746 cedf[i].pending_tasklets.tail = &(cedf[i].pending_tasklets.head);
2747#endif
2748
2749
2750 if(!zalloc_cpumask_var(&cedf[i].cpu_map, GFP_ATOMIC)) 2412 if(!zalloc_cpumask_var(&cedf[i].cpu_map, GFP_ATOMIC))
2751 return -ENOMEM; 2413 return -ENOMEM;
2752#ifdef CONFIG_RELEASE_MASTER 2414#ifdef CONFIG_RELEASE_MASTER
@@ -2864,11 +2526,6 @@ static struct sched_plugin cedf_plugin __cacheline_aligned_in_smp = {
2864#ifdef CONFIG_LITMUS_AFFINITY_LOCKING 2526#ifdef CONFIG_LITMUS_AFFINITY_LOCKING
2865 .allocate_aff_obs = cedf_allocate_affinity_observer, 2527 .allocate_aff_obs = cedf_allocate_affinity_observer,
2866#endif 2528#endif
2867#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
2868 .enqueue_pai_tasklet = cedf_enqueue_pai_tasklet,
2869 .change_prio_pai_tasklet = cedf_change_prio_pai_tasklet,
2870 .run_tasklets = cedf_run_tasklets,
2871#endif
2872#if defined(CONFIG_LITMUS_NVIDIA) && defined(CONFIG_LITMUS_SOFTIRQD) 2529#if defined(CONFIG_LITMUS_NVIDIA) && defined(CONFIG_LITMUS_SOFTIRQD)
2873 .map_gpu_to_cpu = cedf_map_gpu_to_cpu, 2530 .map_gpu_to_cpu = cedf_map_gpu_to_cpu,
2874#endif 2531#endif
diff --git a/litmus/sched_crm.c b/litmus/sched_crm.c
index a60f819138d8..791b9979190e 100644
--- a/litmus/sched_crm.c
+++ b/litmus/sched_crm.c
@@ -75,10 +75,6 @@
75#include <litmus/litmus_softirq.h> 75#include <litmus/litmus_softirq.h>
76#endif 76#endif
77 77
78#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
79#include <linux/interrupt.h>
80#endif
81
82#ifdef CONFIG_LITMUS_NVIDIA 78#ifdef CONFIG_LITMUS_NVIDIA
83#include <litmus/nvidia_info.h> 79#include <litmus/nvidia_info.h>
84#endif 80#endif
@@ -136,10 +132,6 @@ typedef struct clusterdomain {
136 132
137#define cluster_lock domain.ready_lock 133#define cluster_lock domain.ready_lock
138 134
139#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
140 struct tasklet_head pending_tasklets;
141#endif
142
143#ifdef CONFIG_LITMUS_DGL_SUPPORT 135#ifdef CONFIG_LITMUS_DGL_SUPPORT
144 raw_spinlock_t dgl_lock; 136 raw_spinlock_t dgl_lock;
145#endif 137#endif
@@ -1103,303 +1095,6 @@ static void crm_tick(struct task_struct* t)
1103 } 1095 }
1104} 1096}
1105 1097
1106
1107
1108#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
1109
1110static void __do_lit_tasklet(struct tasklet_struct* tasklet, unsigned long flushed)
1111{
1112 if (!atomic_read(&tasklet->count)) {
1113 if(tasklet->owner) {
1114 sched_trace_tasklet_begin(tasklet->owner);
1115 }
1116
1117 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &tasklet->state))
1118 {
1119 BUG();
1120 }
1121 TRACE("%s: Invoking tasklet with owner pid = %d (flushed = %d).\n",
1122 __FUNCTION__,
1123 (tasklet->owner) ? tasklet->owner->pid : 0,
1124 (tasklet->owner) ? 0 : 1);
1125 tasklet->func(tasklet->data);
1126 tasklet_unlock(tasklet);
1127
1128 if(tasklet->owner) {
1129 sched_trace_tasklet_end(tasklet->owner, flushed);
1130 }
1131 }
1132 else {
1133 BUG();
1134 }
1135}
1136
1137
1138static void do_lit_tasklets(crm_domain_t* cluster, struct task_struct* sched_task)
1139{
1140 int work_to_do = 1;
1141 struct tasklet_struct *tasklet = NULL;
1142 unsigned long flags;
1143
1144 while(work_to_do) {
1145
1146 TS_NV_SCHED_BOTISR_START;
1147
1148 raw_readyq_lock_irqsave(&cluster->cluster_lock, flags);
1149
1150 if(cluster->pending_tasklets.head != NULL) {
1151 // remove tasklet at head.
1152 struct tasklet_struct *prev = NULL;
1153 tasklet = cluster->pending_tasklets.head;
1154
1155 // find a tasklet with prio to execute; skip ones where
1156 // sched_task has a higher priority.
1157 // We use the '!edf' test instead of swaping function arguments since
1158 // both sched_task and owner could be NULL. In this case, we want to
1159 // still execute the tasklet.
1160 while(tasklet && !rm_higher_prio(tasklet->owner, sched_task)) {
1161 prev = tasklet;
1162 tasklet = tasklet->next;
1163 }
1164
1165 if(tasklet) { // found something to execuite
1166 // remove the tasklet from the queue
1167 if(prev) {
1168 prev->next = tasklet->next;
1169 if(prev->next == NULL) {
1170 TRACE("%s: Tasklet for %d is the last element in tasklet queue.\n", __FUNCTION__, tasklet->owner->pid);
1171 cluster->pending_tasklets.tail = &(prev);
1172 }
1173 }
1174 else {
1175 cluster->pending_tasklets.head = tasklet->next;
1176 if(tasklet->next == NULL) {
1177 TRACE("%s: Tasklet for %d is the last element in tasklet queue.\n", __FUNCTION__, tasklet->owner->pid);
1178 cluster->pending_tasklets.tail = &(cluster->pending_tasklets.head);
1179 }
1180 }
1181 }
1182 else {
1183 TRACE("%s: No tasklets with eligible priority.\n", __FUNCTION__);
1184 }
1185 }
1186 else {
1187 TRACE("%s: Tasklet queue is empty.\n", __FUNCTION__);
1188 }
1189
1190 raw_readyq_unlock_irqrestore(&cluster->cluster_lock, flags);
1191
1192 if(tasklet) {
1193 __do_lit_tasklet(tasklet, 0ul);
1194 tasklet = NULL;
1195 }
1196 else {
1197 work_to_do = 0;
1198 }
1199
1200 TS_NV_SCHED_BOTISR_END;
1201 }
1202}
1203
1204static void __add_pai_tasklet(struct tasklet_struct* tasklet, crm_domain_t* cluster)
1205{
1206 struct tasklet_struct* step;
1207
1208 tasklet->next = NULL; // make sure there are no old values floating around
1209
1210 step = cluster->pending_tasklets.head;
1211 if(step == NULL) {
1212 TRACE("%s: tasklet queue empty. inserting tasklet for %d at head.\n", __FUNCTION__, tasklet->owner->pid);
1213 // insert at tail.
1214 *(cluster->pending_tasklets.tail) = tasklet;
1215 cluster->pending_tasklets.tail = &(tasklet->next);
1216 }
1217 else if((*(cluster->pending_tasklets.tail) != NULL) &&
1218 rm_higher_prio((*(cluster->pending_tasklets.tail))->owner, tasklet->owner)) {
1219 // insert at tail.
1220 TRACE("%s: tasklet belongs at end. inserting tasklet for %d at tail.\n", __FUNCTION__, tasklet->owner->pid);
1221
1222 *(cluster->pending_tasklets.tail) = tasklet;
1223 cluster->pending_tasklets.tail = &(tasklet->next);
1224 }
1225 else {
1226
1227 // insert the tasklet somewhere in the middle.
1228
1229 TRACE("%s: tasklet belongs somewhere in the middle.\n", __FUNCTION__);
1230
1231 while(step->next && rm_higher_prio(step->next->owner, tasklet->owner)) {
1232 step = step->next;
1233 }
1234
1235 // insert tasklet right before step->next.
1236
1237 TRACE("%s: inserting tasklet for %d between %d and %d.\n", __FUNCTION__,
1238 tasklet->owner->pid,
1239 (step->owner) ?
1240 step->owner->pid :
1241 -1,
1242 (step->next) ?
1243 ((step->next->owner) ?
1244 step->next->owner->pid :
1245 -1) :
1246 -1);
1247
1248 tasklet->next = step->next;
1249 step->next = tasklet;
1250
1251 // patch up the head if needed.
1252 if(cluster->pending_tasklets.head == step)
1253 {
1254 TRACE("%s: %d is the new tasklet queue head.\n", __FUNCTION__, tasklet->owner->pid);
1255 cluster->pending_tasklets.head = tasklet;
1256 }
1257 }
1258}
1259
1260static void crm_run_tasklets(struct task_struct* sched_task)
1261{
1262 crm_domain_t* cluster;
1263
1264 preempt_disable();
1265
1266 cluster = (is_realtime(sched_task)) ?
1267 task_cpu_cluster(sched_task) :
1268 remote_cluster(smp_processor_id());
1269
1270 if(cluster && cluster->pending_tasklets.head != NULL) {
1271 TRACE("%s: There are tasklets to process.\n", __FUNCTION__);
1272 do_lit_tasklets(cluster, sched_task);
1273 }
1274
1275 preempt_enable_no_resched();
1276}
1277
1278
1279
1280static int crm_enqueue_pai_tasklet(struct tasklet_struct* tasklet)
1281{
1282#if 0
1283 crm_domain_t *cluster = NULL;
1284 cpu_entry_t *targetCPU = NULL;
1285 int thisCPU;
1286 int runLocal = 0;
1287 int runNow = 0;
1288 unsigned long flags;
1289
1290 if(unlikely((tasklet->owner == NULL) || !is_realtime(tasklet->owner)))
1291 {
1292 TRACE("%s: No owner associated with this tasklet!\n", __FUNCTION__);
1293 return 0;
1294 }
1295
1296 cluster = task_cpu_cluster(tasklet->owner);
1297
1298 raw_readyq_lock_irqsave(&cluster->cluster_lock, flags);
1299
1300 thisCPU = smp_processor_id();
1301
1302#ifdef CONFIG_SCHED_CPU_AFFINITY
1303 {
1304 cpu_entry_t* affinity = NULL;
1305
1306 // use this CPU if it is in our cluster and isn't running any RT work.
1307 if(cpu_isset(thisCPU, *cluster->cpu_map) && (__get_cpu_var(crm_cpu_entries).linked == NULL)) {
1308 affinity = &(__get_cpu_var(crm_cpu_entries));
1309 }
1310 else {
1311 // this CPU is busy or shouldn't run tasklet in this cluster.
1312 // look for available near by CPUs.
1313 // NOTE: Affinity towards owner and not this CPU. Is this right?
1314 affinity =
1315 crm_get_nearest_available_cpu(cluster,
1316 &per_cpu(crm_cpu_entries, task_cpu(tasklet->owner)));
1317 }
1318
1319 targetCPU = affinity;
1320 }
1321#endif
1322
1323 if (targetCPU == NULL) {
1324 targetCPU = lowest_prio_cpu(cluster);
1325 }
1326
1327 if (rm_higher_prio(tasklet->owner, targetCPU->linked)) {
1328 if (thisCPU == targetCPU->cpu) {
1329 TRACE("%s: Run tasklet locally (and now).\n", __FUNCTION__);
1330 runLocal = 1;
1331 runNow = 1;
1332 }
1333 else {
1334 TRACE("%s: Run tasklet remotely (and now).\n", __FUNCTION__);
1335 runLocal = 0;
1336 runNow = 1;
1337 }
1338 }
1339 else {
1340 runLocal = 0;
1341 runNow = 0;
1342 }
1343
1344 if(!runLocal) {
1345 // enqueue the tasklet
1346 __add_pai_tasklet(tasklet, cluster);
1347 }
1348
1349 raw_readyq_unlock_irqrestore(&cluster->cluster_lock, flags);
1350
1351
1352 if (runLocal /*&& runNow */) { // runNow == 1 is implied
1353 TRACE("%s: Running tasklet on CPU where it was received.\n", __FUNCTION__);
1354 __do_lit_tasklet(tasklet, 0ul);
1355 }
1356 else if (runNow /*&& !runLocal */) { // runLocal == 0 is implied
1357 TRACE("%s: Triggering CPU %d to run tasklet.\n", __FUNCTION__, targetCPU->cpu);
1358 preempt(targetCPU); // need to be protected by cluster_lock?
1359 }
1360 else {
1361 TRACE("%s: Scheduling of tasklet was deferred.\n", __FUNCTION__);
1362 }
1363#else
1364 TRACE("%s: Running tasklet on CPU where it was received.\n", __FUNCTION__);
1365 __do_lit_tasklet(tasklet, 0ul);
1366#endif
1367 return(1); // success
1368}
1369
1370static void crm_change_prio_pai_tasklet(struct task_struct *old_prio,
1371 struct task_struct *new_prio)
1372{
1373 struct tasklet_struct* step;
1374 unsigned long flags;
1375 crm_domain_t *cluster;
1376 struct task_struct *probe;
1377
1378 // identify the cluster by the assignment of these tasks. one should
1379 // be non-NULL.
1380 probe = (old_prio) ? old_prio : new_prio;
1381
1382 if(probe) {
1383 cluster = task_cpu_cluster(probe);
1384
1385 if(cluster->pending_tasklets.head != NULL) {
1386 raw_readyq_lock_irqsave(&cluster->cluster_lock, flags);
1387 for(step = cluster->pending_tasklets.head; step != NULL; step = step->next) {
1388 if(step->owner == old_prio) {
1389 TRACE("%s: Found tasklet to change: %d\n", __FUNCTION__, step->owner->pid);
1390 step->owner = new_prio;
1391 }
1392 }
1393 raw_readyq_unlock_irqrestore(&cluster->cluster_lock, flags);
1394 }
1395 }
1396 else {
1397 TRACE("%s: Both priorities were NULL\n");
1398 }
1399}
1400
1401#endif // PAI
1402
1403#ifdef CONFIG_LITMUS_LOCKING 1098#ifdef CONFIG_LITMUS_LOCKING
1404static int __increase_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh); 1099static int __increase_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh);
1405#endif 1100#endif
@@ -1791,10 +1486,6 @@ static void crm_task_exit(struct task_struct * t)
1791 unsigned long flags; 1486 unsigned long flags;
1792 crm_domain_t *cluster = task_cpu_cluster(t); 1487 crm_domain_t *cluster = task_cpu_cluster(t);
1793 1488
1794#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
1795 crm_change_prio_pai_tasklet(t, NULL);
1796#endif
1797
1798 /* unlink if necessary */ 1489 /* unlink if necessary */
1799 raw_readyq_lock_irqsave(&cluster->cluster_lock, flags); 1490 raw_readyq_lock_irqsave(&cluster->cluster_lock, flags);
1800 1491
@@ -2120,17 +1811,6 @@ static void increase_priority_inheritance(struct task_struct* t, struct task_str
2120 __increase_priority_inheritance(t, prio_inh); 1811 __increase_priority_inheritance(t, prio_inh);
2121 1812
2122 raw_readyq_unlock(&cluster->cluster_lock); 1813 raw_readyq_unlock(&cluster->cluster_lock);
2123
2124#if defined(CONFIG_LITMUS_PAI_SOFTIRQD) && defined(CONFIG_LITMUS_NVIDIA)
2125 if(tsk_rt(t)->held_gpus) {
2126 int i;
2127 for(i = find_first_bit(&tsk_rt(t)->held_gpus, BITS_PER_BYTE*sizeof(tsk_rt(t)->held_gpus));
2128 i < NV_DEVICE_NUM;
2129 i = find_next_bit(&tsk_rt(t)->held_gpus, BITS_PER_BYTE*sizeof(tsk_rt(t)->held_gpus), i+1)) {
2130 pai_check_priority_increase(t, i);
2131 }
2132 }
2133#endif
2134} 1814}
2135 1815
2136/* called with IRQs off */ 1816/* called with IRQs off */
@@ -2294,17 +1974,6 @@ static void decrease_priority_inheritance(struct task_struct* t,
2294 __decrease_priority_inheritance(t, prio_inh, budget_tiggered); 1974 __decrease_priority_inheritance(t, prio_inh, budget_tiggered);
2295 1975
2296 raw_readyq_unlock(&cluster->cluster_lock); 1976 raw_readyq_unlock(&cluster->cluster_lock);
2297
2298#if defined(CONFIG_LITMUS_PAI_SOFTIRQD) && defined(CONFIG_LITMUS_NVIDIA)
2299 if(tsk_rt(t)->held_gpus) {
2300 int i;
2301 for(i = find_first_bit(&tsk_rt(t)->held_gpus, BITS_PER_BYTE*sizeof(tsk_rt(t)->held_gpus));
2302 i < NV_DEVICE_NUM;
2303 i = find_next_bit(&tsk_rt(t)->held_gpus, BITS_PER_BYTE*sizeof(tsk_rt(t)->held_gpus), i+1)) {
2304 pai_check_priority_decrease(t, i);
2305 }
2306 }
2307#endif
2308} 1977}
2309 1978
2310 1979
@@ -2741,13 +2410,6 @@ static long crm_activate_plugin(void)
2741 INIT_BINHEAP_HANDLE(&(crm[i].cpu_heap), cpu_lower_prio); 2410 INIT_BINHEAP_HANDLE(&(crm[i].cpu_heap), cpu_lower_prio);
2742 rm_domain_init(&(crm[i].domain), NULL, crm_release_jobs); 2411 rm_domain_init(&(crm[i].domain), NULL, crm_release_jobs);
2743 2412
2744
2745#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
2746 crm[i].pending_tasklets.head = NULL;
2747 crm[i].pending_tasklets.tail = &(crm[i].pending_tasklets.head);
2748#endif
2749
2750
2751 if(!zalloc_cpumask_var(&crm[i].cpu_map, GFP_ATOMIC)) 2413 if(!zalloc_cpumask_var(&crm[i].cpu_map, GFP_ATOMIC))
2752 return -ENOMEM; 2414 return -ENOMEM;
2753#ifdef CONFIG_RELEASE_MASTER 2415#ifdef CONFIG_RELEASE_MASTER
@@ -2865,11 +2527,6 @@ static struct sched_plugin crm_plugin __cacheline_aligned_in_smp = {
2865#ifdef CONFIG_LITMUS_AFFINITY_LOCKING 2527#ifdef CONFIG_LITMUS_AFFINITY_LOCKING
2866 .allocate_aff_obs = crm_allocate_affinity_observer, 2528 .allocate_aff_obs = crm_allocate_affinity_observer,
2867#endif 2529#endif
2868#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
2869 .enqueue_pai_tasklet = crm_enqueue_pai_tasklet,
2870 .change_prio_pai_tasklet = crm_change_prio_pai_tasklet,
2871 .run_tasklets = crm_run_tasklets,
2872#endif
2873#if defined(CONFIG_LITMUS_NVIDIA) && defined(CONFIG_LITMUS_SOFTIRQD) 2530#if defined(CONFIG_LITMUS_NVIDIA) && defined(CONFIG_LITMUS_SOFTIRQD)
2874 .map_gpu_to_cpu = crm_map_gpu_to_cpu, 2531 .map_gpu_to_cpu = crm_map_gpu_to_cpu,
2875#endif 2532#endif
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c
index c652048f8df1..a624a0df3acb 100644
--- a/litmus/sched_gsn_edf.c
+++ b/litmus/sched_gsn_edf.c
@@ -49,10 +49,6 @@
49#include <litmus/litmus_softirq.h> 49#include <litmus/litmus_softirq.h>
50#endif 50#endif
51 51
52#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
53#include <linux/interrupt.h>
54#endif
55
56#ifdef CONFIG_LITMUS_NVIDIA 52#ifdef CONFIG_LITMUS_NVIDIA
57#include <litmus/nvidia_info.h> 53#include <litmus/nvidia_info.h>
58#endif 54#endif
@@ -155,11 +151,6 @@ static raw_spinlock_t* gsnedf_get_dgl_spinlock(struct task_struct *t)
155} 151}
156#endif 152#endif
157 153
158#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
159struct tasklet_head gsnedf_pending_tasklets;
160#endif
161
162
163/* Uncomment this if you want to see all scheduling decisions in the 154/* Uncomment this if you want to see all scheduling decisions in the
164 * TRACE() log. 155 * TRACE() log.
165#define WANT_ALL_SCHED_EVENTS 156#define WANT_ALL_SCHED_EVENTS
@@ -465,317 +456,6 @@ static void gsnedf_tick(struct task_struct* t)
465 } 456 }
466} 457}
467 458
468
469
470#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
471
472
473static void __do_lit_tasklet(struct tasklet_struct* tasklet, unsigned long flushed)
474{
475 if (!atomic_read(&tasklet->count)) {
476 if(tasklet->owner) {
477 sched_trace_tasklet_begin(tasklet->owner);
478 }
479
480 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &tasklet->state))
481 {
482 BUG();
483 }
484 TRACE("%s: Invoking tasklet with owner pid = %d (flushed = %d).\n",
485 __FUNCTION__,
486 (tasklet->owner) ? tasklet->owner->pid : 0,
487 (tasklet->owner) ? 0 : 1);
488 tasklet->func(tasklet->data);
489 tasklet_unlock(tasklet);
490
491 if(tasklet->owner) {
492 sched_trace_tasklet_end(tasklet->owner, flushed);
493 }
494 }
495 else {
496 BUG();
497 }
498}
499
500static void do_lit_tasklets(struct task_struct* sched_task)
501{
502 int work_to_do = 1;
503 struct tasklet_struct *tasklet = NULL;
504 unsigned long flags;
505
506 while(work_to_do) {
507
508 TS_NV_SCHED_BOTISR_START;
509
510 // execute one tasklet that has higher priority
511 raw_readyq_lock_irqsave(&gsnedf_lock, flags);
512
513 if(gsnedf_pending_tasklets.head != NULL) {
514 struct tasklet_struct *prev = NULL;
515 tasklet = gsnedf_pending_tasklets.head;
516
517 while(tasklet && edf_higher_prio(sched_task, tasklet->owner)) {
518 prev = tasklet;
519 tasklet = tasklet->next;
520 }
521
522 // remove the tasklet from the queue
523 if(prev) {
524 prev->next = tasklet->next;
525 if(prev->next == NULL) {
526 TRACE("%s: Tasklet for %d is the last element in tasklet queue.\n", __FUNCTION__, tasklet->owner->pid);
527 gsnedf_pending_tasklets.tail = &(prev);
528 }
529 }
530 else {
531 gsnedf_pending_tasklets.head = tasklet->next;
532 if(tasklet->next == NULL) {
533 TRACE("%s: Tasklet for %d is the last element in tasklet queue.\n", __FUNCTION__, tasklet->owner->pid);
534 gsnedf_pending_tasklets.tail = &(gsnedf_pending_tasklets.head);
535 }
536 }
537 }
538 else {
539 TRACE("%s: Tasklet queue is empty.\n", __FUNCTION__);
540 }
541
542 raw_readyq_unlock_irqrestore(&gsnedf_lock, flags);
543
544 if(tasklet) {
545 __do_lit_tasklet(tasklet, 0ul);
546 tasklet = NULL;
547 }
548 else {
549 work_to_do = 0;
550 }
551
552 TS_NV_SCHED_BOTISR_END;
553 }
554}
555
556//static void do_lit_tasklets(struct task_struct* sched_task)
557//{
558// int work_to_do = 1;
559// struct tasklet_struct *tasklet = NULL;
560// //struct tasklet_struct *step;
561// unsigned long flags;
562//
563// while(work_to_do) {
564//
565// TS_NV_SCHED_BOTISR_START;
566//
567// // remove tasklet at head of list if it has higher priority.
568// raw_spin_lock_irqsave(&gsnedf_lock, flags);
569//
570// if(gsnedf_pending_tasklets.head != NULL) {
571// // remove tasklet at head.
572// tasklet = gsnedf_pending_tasklets.head;
573//
574// if(edf_higher_prio(tasklet->owner, sched_task)) {
575//
576// if(NULL == tasklet->next) {
577// // tasklet is at the head, list only has one element
578// TRACE("%s: Tasklet for %d is the last element in tasklet queue.\n", __FUNCTION__, tasklet->owner->pid);
579// gsnedf_pending_tasklets.tail = &(gsnedf_pending_tasklets.head);
580// }
581//
582// // remove the tasklet from the queue
583// gsnedf_pending_tasklets.head = tasklet->next;
584//
585// TRACE("%s: Removed tasklet for %d from tasklet queue.\n", __FUNCTION__, tasklet->owner->pid);
586// }
587// else {
588// TRACE("%s: Pending tasklet (%d) does not have priority to run on this CPU (%d).\n", __FUNCTION__, tasklet->owner->pid, smp_processor_id());
589// tasklet = NULL;
590// }
591// }
592// else {
593// TRACE("%s: Tasklet queue is empty.\n", __FUNCTION__);
594// }
595//
596// raw_spin_unlock_irqrestore(&gsnedf_lock, flags);
597//
598// TS_NV_SCHED_BOTISR_END;
599//
600// if(tasklet) {
601// __do_lit_tasklet(tasklet, 0ul);
602// tasklet = NULL;
603// }
604// else {
605// work_to_do = 0;
606// }
607// }
608//
609// //TRACE("%s: exited.\n", __FUNCTION__);
610//}
611
612static void __add_pai_tasklet(struct tasklet_struct* tasklet)
613{
614 struct tasklet_struct* step;
615
616 tasklet->next = NULL; // make sure there are no old values floating around
617
618 step = gsnedf_pending_tasklets.head;
619 if(step == NULL) {
620 TRACE("%s: tasklet queue empty. inserting tasklet for %d at head.\n", __FUNCTION__, tasklet->owner->pid);
621 // insert at tail.
622 *(gsnedf_pending_tasklets.tail) = tasklet;
623 gsnedf_pending_tasklets.tail = &(tasklet->next);
624 }
625 else if((*(gsnedf_pending_tasklets.tail) != NULL) &&
626 edf_higher_prio((*(gsnedf_pending_tasklets.tail))->owner, tasklet->owner)) {
627 // insert at tail.
628 TRACE("%s: tasklet belongs at end. inserting tasklet for %d at tail.\n", __FUNCTION__, tasklet->owner->pid);
629
630 *(gsnedf_pending_tasklets.tail) = tasklet;
631 gsnedf_pending_tasklets.tail = &(tasklet->next);
632 }
633 else {
634 // insert the tasklet somewhere in the middle.
635
636 TRACE("%s: tasklet belongs somewhere in the middle.\n", __FUNCTION__);
637
638 while(step->next && edf_higher_prio(step->next->owner, tasklet->owner)) {
639 step = step->next;
640 }
641
642 // insert tasklet right before step->next.
643
644 TRACE("%s: inserting tasklet for %d between %d and %d.\n", __FUNCTION__, tasklet->owner->pid, step->owner->pid, (step->next) ? step->next->owner->pid : 0);
645
646 tasklet->next = step->next;
647 step->next = tasklet;
648
649 // patch up the head if needed.
650 if(gsnedf_pending_tasklets.head == step)
651 {
652 TRACE("%s: %d is the new tasklet queue head.\n", __FUNCTION__, tasklet->owner->pid);
653 gsnedf_pending_tasklets.head = tasklet;
654 }
655 }
656}
657
658static void gsnedf_run_tasklets(struct task_struct* sched_task)
659{
660 preempt_disable();
661
662 if(gsnedf_pending_tasklets.head != NULL) {
663 TRACE("%s: There are tasklets to process.\n", __FUNCTION__);
664 do_lit_tasklets(sched_task);
665 }
666
667 preempt_enable_no_resched();
668}
669
670static int gsnedf_enqueue_pai_tasklet(struct tasklet_struct* tasklet)
671{
672 cpu_entry_t *targetCPU = NULL;
673 int thisCPU;
674 int runLocal = 0;
675 int runNow = 0;
676 unsigned long flags;
677
678 if(unlikely((tasklet->owner == NULL) || !is_realtime(tasklet->owner)))
679 {
680 TRACE("%s: No owner associated with this tasklet!\n", __FUNCTION__);
681 return 0;
682 }
683
684
685 raw_readyq_lock_irqsave(&gsnedf_lock, flags);
686
687 thisCPU = smp_processor_id();
688
689#ifdef CONFIG_SCHED_CPU_AFFINITY
690 {
691 cpu_entry_t* affinity = NULL;
692
693 // use this CPU if it is in our cluster and isn't running any RT work.
694 if(
695#ifdef CONFIG_RELEASE_MASTER
696 (thisCPU != gsnedf.release_master) &&
697#endif
698 (__get_cpu_var(gsnedf_cpu_entries).linked == NULL)) {
699 affinity = &(__get_cpu_var(gsnedf_cpu_entries));
700 }
701 else {
702 // this CPU is busy or shouldn't run tasklet in this cluster.
703 // look for available near by CPUs.
704 // NOTE: Affinity towards owner and not this CPU. Is this right?
705 affinity =
706 gsnedf_get_nearest_available_cpu(
707 &per_cpu(gsnedf_cpu_entries, task_cpu(tasklet->owner)));
708 }
709
710 targetCPU = affinity;
711 }
712#endif
713
714 if (targetCPU == NULL) {
715 targetCPU = lowest_prio_cpu();
716 }
717
718 if (edf_higher_prio(tasklet->owner, targetCPU->linked)) {
719 if (thisCPU == targetCPU->cpu) {
720 TRACE("%s: Run tasklet locally (and now).\n", __FUNCTION__);
721 runLocal = 1;
722 runNow = 1;
723 }
724 else {
725 TRACE("%s: Run tasklet remotely (and now).\n", __FUNCTION__);
726 runLocal = 0;
727 runNow = 1;
728 }
729 }
730 else {
731 runLocal = 0;
732 runNow = 0;
733 }
734
735 if(!runLocal) {
736 // enqueue the tasklet
737 __add_pai_tasklet(tasklet);
738 }
739
740 raw_readyq_unlock_irqrestore(&gsnedf_lock, flags);
741
742
743 if (runLocal /*&& runNow */) { // runNow == 1 is implied
744 TRACE("%s: Running tasklet on CPU where it was received.\n", __FUNCTION__);
745 __do_lit_tasklet(tasklet, 0ul);
746 }
747 else if (runNow /*&& !runLocal */) { // runLocal == 0 is implied
748 TRACE("%s: Triggering CPU %d to run tasklet.\n", __FUNCTION__, targetCPU->cpu);
749 preempt(targetCPU); // need to be protected by cedf_lock?
750 }
751 else {
752 TRACE("%s: Scheduling of tasklet was deferred.\n", __FUNCTION__);
753 }
754
755 return(1); // success
756}
757
758static void gsnedf_change_prio_pai_tasklet(struct task_struct *old_prio,
759 struct task_struct *new_prio)
760{
761 struct tasklet_struct* step;
762 unsigned long flags;
763
764 if(gsnedf_pending_tasklets.head != NULL) {
765 raw_readyq_lock_irqsave(&gsnedf_lock, flags);
766 for(step = gsnedf_pending_tasklets.head; step != NULL; step = step->next) {
767 if(step->owner == old_prio) {
768 TRACE("%s: Found tasklet to change: %d\n", __FUNCTION__, step->owner->pid);
769 step->owner = new_prio;
770 }
771 }
772 raw_readyq_unlock_irqrestore(&gsnedf_lock, flags);
773 }
774}
775
776#endif // end PAI
777
778
779/* Getting schedule() right is a bit tricky. schedule() may not make any 459/* Getting schedule() right is a bit tricky. schedule() may not make any
780 * assumptions on the state of the current task since it may be called for a 460 * assumptions on the state of the current task since it may be called for a
781 * number of reasons. The reasons include a scheduler_tick() determined that it 461 * number of reasons. The reasons include a scheduler_tick() determined that it
@@ -1072,10 +752,6 @@ static void gsnedf_task_exit(struct task_struct * t)
1072{ 752{
1073 unsigned long flags; 753 unsigned long flags;
1074 754
1075#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
1076 gsnedf_change_prio_pai_tasklet(t, NULL);
1077#endif
1078
1079 /* unlink if necessary */ 755 /* unlink if necessary */
1080 raw_readyq_lock_irqsave(&gsnedf_lock, flags); 756 raw_readyq_lock_irqsave(&gsnedf_lock, flags);
1081 757
@@ -1275,17 +951,6 @@ static void increase_priority_inheritance(struct task_struct* t, struct task_str
1275 success = __increase_priority_inheritance(t, prio_inh); 951 success = __increase_priority_inheritance(t, prio_inh);
1276 952
1277 raw_readyq_unlock(&gsnedf_lock); 953 raw_readyq_unlock(&gsnedf_lock);
1278
1279#if defined(CONFIG_LITMUS_PAI_SOFTIRQD) && defined(CONFIG_LITMUS_NVIDIA)
1280 if(tsk_rt(t)->held_gpus) {
1281 int i;
1282 for(i = find_first_bit(&tsk_rt(t)->held_gpus, BITS_PER_BYTE*sizeof(tsk_rt(t)->held_gpus));
1283 i < NV_DEVICE_NUM;
1284 i = find_next_bit(&tsk_rt(t)->held_gpus, BITS_PER_BYTE*sizeof(tsk_rt(t)->held_gpus), i+1)) {
1285 pai_check_priority_increase(t, i);
1286 }
1287 }
1288#endif
1289} 954}
1290 955
1291 956
@@ -1392,17 +1057,6 @@ static void decrease_priority_inheritance(struct task_struct* t,
1392 success = __decrease_priority_inheritance(t, prio_inh, budget_triggered); 1057 success = __decrease_priority_inheritance(t, prio_inh, budget_triggered);
1393 1058
1394 raw_readyq_unlock(&gsnedf_lock); 1059 raw_readyq_unlock(&gsnedf_lock);
1395
1396#if defined(CONFIG_LITMUS_PAI_SOFTIRQD) && defined(CONFIG_LITMUS_NVIDIA)
1397 if(tsk_rt(t)->held_gpus) {
1398 int i;
1399 for(i = find_first_bit(&tsk_rt(t)->held_gpus, BITS_PER_BYTE*sizeof(tsk_rt(t)->held_gpus));
1400 i < NV_DEVICE_NUM;
1401 i = find_next_bit(&tsk_rt(t)->held_gpus, BITS_PER_BYTE*sizeof(tsk_rt(t)->held_gpus), i+1)) {
1402 pai_check_priority_decrease(t, i);
1403 }
1404 }
1405#endif
1406} 1060}
1407 1061
1408 1062
@@ -1906,11 +1560,6 @@ static long gsnedf_activate_plugin(void)
1906#endif 1560#endif
1907 } 1561 }
1908 1562
1909#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
1910 gsnedf_pending_tasklets.head = NULL;
1911 gsnedf_pending_tasklets.tail = &(gsnedf_pending_tasklets.head);
1912#endif
1913
1914#ifdef CONFIG_LITMUS_SOFTIRQD 1563#ifdef CONFIG_LITMUS_SOFTIRQD
1915 init_klmirqd(); 1564 init_klmirqd();
1916#endif 1565#endif
@@ -1954,11 +1603,6 @@ static struct sched_plugin gsn_edf_plugin __cacheline_aligned_in_smp = {
1954#ifdef CONFIG_LITMUS_AFFINITY_LOCKING 1603#ifdef CONFIG_LITMUS_AFFINITY_LOCKING
1955 .allocate_aff_obs = gsnedf_allocate_affinity_observer, 1604 .allocate_aff_obs = gsnedf_allocate_affinity_observer,
1956#endif 1605#endif
1957#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
1958 .enqueue_pai_tasklet = gsnedf_enqueue_pai_tasklet,
1959 .change_prio_pai_tasklet = gsnedf_change_prio_pai_tasklet,
1960 .run_tasklets = gsnedf_run_tasklets,
1961#endif
1962#if defined(CONFIG_LITMUS_NVIDIA) && defined(CONFIG_LITMUS_SOFTIRQD) 1606#if defined(CONFIG_LITMUS_NVIDIA) && defined(CONFIG_LITMUS_SOFTIRQD)
1963 .map_gpu_to_cpu = gsnedf_map_gpu_to_cpu, 1607 .map_gpu_to_cpu = gsnedf_map_gpu_to_cpu,
1964#endif 1608#endif
diff --git a/litmus/sched_plugin.c b/litmus/sched_plugin.c
index 35a98eb806a2..f82272f64403 100644
--- a/litmus/sched_plugin.c
+++ b/litmus/sched_plugin.c
@@ -167,26 +167,6 @@ static int litmus_dummy___decrease_prio(struct task_struct* t, struct task_struc
167} 167}
168#endif 168#endif
169 169
170
171#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
172static int litmus_dummy_enqueue_pai_tasklet(struct tasklet_struct* t)
173{
174 TRACE("%s: PAI Tasklet unsupported in this plugin!!!!!!\n", __FUNCTION__);
175 return(0); // failure.
176}
177
178static void litmus_dummy_change_prio_pai_tasklet(struct task_struct *old_prio,
179 struct task_struct *new_prio)
180{
181 TRACE("%s: PAI Tasklet unsupported in this plugin!!!!!!\n", __FUNCTION__);
182}
183
184static void litmus_dummy_run_tasklets(struct task_struct* t)
185{
186 //TRACE("%s: PAI Tasklet unsupported in this plugin!!!!!!\n", __FUNCTION__);
187}
188#endif
189
190#ifdef CONFIG_LITMUS_NESTED_LOCKING 170#ifdef CONFIG_LITMUS_NESTED_LOCKING
191static void litmus_dummy_nested_increase_prio(struct task_struct* t, struct task_struct* prio_inh, 171static void litmus_dummy_nested_increase_prio(struct task_struct* t, struct task_struct* prio_inh,
192 raw_spinlock_t *to_unlock, unsigned long irqflags) 172 raw_spinlock_t *to_unlock, unsigned long irqflags)
@@ -258,11 +238,6 @@ struct sched_plugin linux_sched_plugin = {
258 .nested_decrease_prio = litmus_dummy_nested_decrease_prio, 238 .nested_decrease_prio = litmus_dummy_nested_decrease_prio,
259 .__compare = litmus_dummy___compare, 239 .__compare = litmus_dummy___compare,
260#endif 240#endif
261#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
262 .enqueue_pai_tasklet = litmus_dummy_enqueue_pai_tasklet,
263 .change_prio_pai_tasklet = litmus_dummy_change_prio_pai_tasklet,
264 .run_tasklets = litmus_dummy_run_tasklets,
265#endif
266#ifdef CONFIG_LITMUS_DGL_SUPPORT 241#ifdef CONFIG_LITMUS_DGL_SUPPORT
267 .get_dgl_spinlock = litmus_dummy_get_dgl_spinlock, 242 .get_dgl_spinlock = litmus_dummy_get_dgl_spinlock,
268#endif 243#endif
@@ -320,11 +295,6 @@ int register_sched_plugin(struct sched_plugin* plugin)
320 CHECK(nested_decrease_prio); 295 CHECK(nested_decrease_prio);
321 CHECK(__compare); 296 CHECK(__compare);
322#endif 297#endif
323#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
324 CHECK(enqueue_pai_tasklet);
325 CHECK(change_prio_pai_tasklet);
326 CHECK(run_tasklets);
327#endif
328#ifdef CONFIG_LITMUS_DGL_SUPPORT 298#ifdef CONFIG_LITMUS_DGL_SUPPORT
329 CHECK(get_dgl_spinlock); 299 CHECK(get_dgl_spinlock);
330#endif 300#endif