aboutsummaryrefslogtreecommitdiffstats
path: root/litmus/sched_crm.c
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2013-10-09 15:45:23 -0400
committerGlenn Elliott <gelliott@cs.unc.edu>2013-10-09 15:47:26 -0400
commit7fb54decc59fa5855cd273bfda908fd443a37e67 (patch)
tree5941c299884d77e9ecd6be7e4041d94f00e01ef0 /litmus/sched_crm.c
parentb5b69edba68805ab2d4bdeae58fe783710111bfc (diff)
Remove PAI support
Support for process-aware interrupt handling was broken when the tasklet ownership model changed in GPUSync, post-ECRTS12. We will want to re-add support later, but remove it for now.
Diffstat (limited to 'litmus/sched_crm.c')
-rw-r--r--litmus/sched_crm.c343
1 files changed, 0 insertions, 343 deletions
diff --git a/litmus/sched_crm.c b/litmus/sched_crm.c
index a60f819138d8..791b9979190e 100644
--- a/litmus/sched_crm.c
+++ b/litmus/sched_crm.c
@@ -75,10 +75,6 @@
75#include <litmus/litmus_softirq.h> 75#include <litmus/litmus_softirq.h>
76#endif 76#endif
77 77
78#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
79#include <linux/interrupt.h>
80#endif
81
82#ifdef CONFIG_LITMUS_NVIDIA 78#ifdef CONFIG_LITMUS_NVIDIA
83#include <litmus/nvidia_info.h> 79#include <litmus/nvidia_info.h>
84#endif 80#endif
@@ -136,10 +132,6 @@ typedef struct clusterdomain {
136 132
137#define cluster_lock domain.ready_lock 133#define cluster_lock domain.ready_lock
138 134
139#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
140 struct tasklet_head pending_tasklets;
141#endif
142
143#ifdef CONFIG_LITMUS_DGL_SUPPORT 135#ifdef CONFIG_LITMUS_DGL_SUPPORT
144 raw_spinlock_t dgl_lock; 136 raw_spinlock_t dgl_lock;
145#endif 137#endif
@@ -1103,303 +1095,6 @@ static void crm_tick(struct task_struct* t)
1103 } 1095 }
1104} 1096}
1105 1097
1106
1107
1108#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
1109
1110static void __do_lit_tasklet(struct tasklet_struct* tasklet, unsigned long flushed)
1111{
1112 if (!atomic_read(&tasklet->count)) {
1113 if(tasklet->owner) {
1114 sched_trace_tasklet_begin(tasklet->owner);
1115 }
1116
1117 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &tasklet->state))
1118 {
1119 BUG();
1120 }
1121 TRACE("%s: Invoking tasklet with owner pid = %d (flushed = %d).\n",
1122 __FUNCTION__,
1123 (tasklet->owner) ? tasklet->owner->pid : 0,
1124 (tasklet->owner) ? 0 : 1);
1125 tasklet->func(tasklet->data);
1126 tasklet_unlock(tasklet);
1127
1128 if(tasklet->owner) {
1129 sched_trace_tasklet_end(tasklet->owner, flushed);
1130 }
1131 }
1132 else {
1133 BUG();
1134 }
1135}
1136
1137
1138static void do_lit_tasklets(crm_domain_t* cluster, struct task_struct* sched_task)
1139{
1140 int work_to_do = 1;
1141 struct tasklet_struct *tasklet = NULL;
1142 unsigned long flags;
1143
1144 while(work_to_do) {
1145
1146 TS_NV_SCHED_BOTISR_START;
1147
1148 raw_readyq_lock_irqsave(&cluster->cluster_lock, flags);
1149
1150 if(cluster->pending_tasklets.head != NULL) {
1151 // remove tasklet at head.
1152 struct tasklet_struct *prev = NULL;
1153 tasklet = cluster->pending_tasklets.head;
1154
1155 // find a tasklet with prio to execute; skip ones where
1156 // sched_task has a higher priority.
1157 // We use the '!edf' test instead of swaping function arguments since
1158 // both sched_task and owner could be NULL. In this case, we want to
1159 // still execute the tasklet.
1160 while(tasklet && !rm_higher_prio(tasklet->owner, sched_task)) {
1161 prev = tasklet;
1162 tasklet = tasklet->next;
1163 }
1164
1165 if(tasklet) { // found something to execuite
1166 // remove the tasklet from the queue
1167 if(prev) {
1168 prev->next = tasklet->next;
1169 if(prev->next == NULL) {
1170 TRACE("%s: Tasklet for %d is the last element in tasklet queue.\n", __FUNCTION__, tasklet->owner->pid);
1171 cluster->pending_tasklets.tail = &(prev);
1172 }
1173 }
1174 else {
1175 cluster->pending_tasklets.head = tasklet->next;
1176 if(tasklet->next == NULL) {
1177 TRACE("%s: Tasklet for %d is the last element in tasklet queue.\n", __FUNCTION__, tasklet->owner->pid);
1178 cluster->pending_tasklets.tail = &(cluster->pending_tasklets.head);
1179 }
1180 }
1181 }
1182 else {
1183 TRACE("%s: No tasklets with eligible priority.\n", __FUNCTION__);
1184 }
1185 }
1186 else {
1187 TRACE("%s: Tasklet queue is empty.\n", __FUNCTION__);
1188 }
1189
1190 raw_readyq_unlock_irqrestore(&cluster->cluster_lock, flags);
1191
1192 if(tasklet) {
1193 __do_lit_tasklet(tasklet, 0ul);
1194 tasklet = NULL;
1195 }
1196 else {
1197 work_to_do = 0;
1198 }
1199
1200 TS_NV_SCHED_BOTISR_END;
1201 }
1202}
1203
1204static void __add_pai_tasklet(struct tasklet_struct* tasklet, crm_domain_t* cluster)
1205{
1206 struct tasklet_struct* step;
1207
1208 tasklet->next = NULL; // make sure there are no old values floating around
1209
1210 step = cluster->pending_tasklets.head;
1211 if(step == NULL) {
1212 TRACE("%s: tasklet queue empty. inserting tasklet for %d at head.\n", __FUNCTION__, tasklet->owner->pid);
1213 // insert at tail.
1214 *(cluster->pending_tasklets.tail) = tasklet;
1215 cluster->pending_tasklets.tail = &(tasklet->next);
1216 }
1217 else if((*(cluster->pending_tasklets.tail) != NULL) &&
1218 rm_higher_prio((*(cluster->pending_tasklets.tail))->owner, tasklet->owner)) {
1219 // insert at tail.
1220 TRACE("%s: tasklet belongs at end. inserting tasklet for %d at tail.\n", __FUNCTION__, tasklet->owner->pid);
1221
1222 *(cluster->pending_tasklets.tail) = tasklet;
1223 cluster->pending_tasklets.tail = &(tasklet->next);
1224 }
1225 else {
1226
1227 // insert the tasklet somewhere in the middle.
1228
1229 TRACE("%s: tasklet belongs somewhere in the middle.\n", __FUNCTION__);
1230
1231 while(step->next && rm_higher_prio(step->next->owner, tasklet->owner)) {
1232 step = step->next;
1233 }
1234
1235 // insert tasklet right before step->next.
1236
1237 TRACE("%s: inserting tasklet for %d between %d and %d.\n", __FUNCTION__,
1238 tasklet->owner->pid,
1239 (step->owner) ?
1240 step->owner->pid :
1241 -1,
1242 (step->next) ?
1243 ((step->next->owner) ?
1244 step->next->owner->pid :
1245 -1) :
1246 -1);
1247
1248 tasklet->next = step->next;
1249 step->next = tasklet;
1250
1251 // patch up the head if needed.
1252 if(cluster->pending_tasklets.head == step)
1253 {
1254 TRACE("%s: %d is the new tasklet queue head.\n", __FUNCTION__, tasklet->owner->pid);
1255 cluster->pending_tasklets.head = tasklet;
1256 }
1257 }
1258}
1259
1260static void crm_run_tasklets(struct task_struct* sched_task)
1261{
1262 crm_domain_t* cluster;
1263
1264 preempt_disable();
1265
1266 cluster = (is_realtime(sched_task)) ?
1267 task_cpu_cluster(sched_task) :
1268 remote_cluster(smp_processor_id());
1269
1270 if(cluster && cluster->pending_tasklets.head != NULL) {
1271 TRACE("%s: There are tasklets to process.\n", __FUNCTION__);
1272 do_lit_tasklets(cluster, sched_task);
1273 }
1274
1275 preempt_enable_no_resched();
1276}
1277
1278
1279
1280static int crm_enqueue_pai_tasklet(struct tasklet_struct* tasklet)
1281{
1282#if 0
1283 crm_domain_t *cluster = NULL;
1284 cpu_entry_t *targetCPU = NULL;
1285 int thisCPU;
1286 int runLocal = 0;
1287 int runNow = 0;
1288 unsigned long flags;
1289
1290 if(unlikely((tasklet->owner == NULL) || !is_realtime(tasklet->owner)))
1291 {
1292 TRACE("%s: No owner associated with this tasklet!\n", __FUNCTION__);
1293 return 0;
1294 }
1295
1296 cluster = task_cpu_cluster(tasklet->owner);
1297
1298 raw_readyq_lock_irqsave(&cluster->cluster_lock, flags);
1299
1300 thisCPU = smp_processor_id();
1301
1302#ifdef CONFIG_SCHED_CPU_AFFINITY
1303 {
1304 cpu_entry_t* affinity = NULL;
1305
1306 // use this CPU if it is in our cluster and isn't running any RT work.
1307 if(cpu_isset(thisCPU, *cluster->cpu_map) && (__get_cpu_var(crm_cpu_entries).linked == NULL)) {
1308 affinity = &(__get_cpu_var(crm_cpu_entries));
1309 }
1310 else {
1311 // this CPU is busy or shouldn't run tasklet in this cluster.
1312 // look for available near by CPUs.
1313 // NOTE: Affinity towards owner and not this CPU. Is this right?
1314 affinity =
1315 crm_get_nearest_available_cpu(cluster,
1316 &per_cpu(crm_cpu_entries, task_cpu(tasklet->owner)));
1317 }
1318
1319 targetCPU = affinity;
1320 }
1321#endif
1322
1323 if (targetCPU == NULL) {
1324 targetCPU = lowest_prio_cpu(cluster);
1325 }
1326
1327 if (rm_higher_prio(tasklet->owner, targetCPU->linked)) {
1328 if (thisCPU == targetCPU->cpu) {
1329 TRACE("%s: Run tasklet locally (and now).\n", __FUNCTION__);
1330 runLocal = 1;
1331 runNow = 1;
1332 }
1333 else {
1334 TRACE("%s: Run tasklet remotely (and now).\n", __FUNCTION__);
1335 runLocal = 0;
1336 runNow = 1;
1337 }
1338 }
1339 else {
1340 runLocal = 0;
1341 runNow = 0;
1342 }
1343
1344 if(!runLocal) {
1345 // enqueue the tasklet
1346 __add_pai_tasklet(tasklet, cluster);
1347 }
1348
1349 raw_readyq_unlock_irqrestore(&cluster->cluster_lock, flags);
1350
1351
1352 if (runLocal /*&& runNow */) { // runNow == 1 is implied
1353 TRACE("%s: Running tasklet on CPU where it was received.\n", __FUNCTION__);
1354 __do_lit_tasklet(tasklet, 0ul);
1355 }
1356 else if (runNow /*&& !runLocal */) { // runLocal == 0 is implied
1357 TRACE("%s: Triggering CPU %d to run tasklet.\n", __FUNCTION__, targetCPU->cpu);
1358 preempt(targetCPU); // need to be protected by cluster_lock?
1359 }
1360 else {
1361 TRACE("%s: Scheduling of tasklet was deferred.\n", __FUNCTION__);
1362 }
1363#else
1364 TRACE("%s: Running tasklet on CPU where it was received.\n", __FUNCTION__);
1365 __do_lit_tasklet(tasklet, 0ul);
1366#endif
1367 return(1); // success
1368}
1369
1370static void crm_change_prio_pai_tasklet(struct task_struct *old_prio,
1371 struct task_struct *new_prio)
1372{
1373 struct tasklet_struct* step;
1374 unsigned long flags;
1375 crm_domain_t *cluster;
1376 struct task_struct *probe;
1377
1378 // identify the cluster by the assignment of these tasks. one should
1379 // be non-NULL.
1380 probe = (old_prio) ? old_prio : new_prio;
1381
1382 if(probe) {
1383 cluster = task_cpu_cluster(probe);
1384
1385 if(cluster->pending_tasklets.head != NULL) {
1386 raw_readyq_lock_irqsave(&cluster->cluster_lock, flags);
1387 for(step = cluster->pending_tasklets.head; step != NULL; step = step->next) {
1388 if(step->owner == old_prio) {
1389 TRACE("%s: Found tasklet to change: %d\n", __FUNCTION__, step->owner->pid);
1390 step->owner = new_prio;
1391 }
1392 }
1393 raw_readyq_unlock_irqrestore(&cluster->cluster_lock, flags);
1394 }
1395 }
1396 else {
1397 TRACE("%s: Both priorities were NULL\n");
1398 }
1399}
1400
1401#endif // PAI
1402
1403#ifdef CONFIG_LITMUS_LOCKING 1098#ifdef CONFIG_LITMUS_LOCKING
1404static int __increase_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh); 1099static int __increase_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh);
1405#endif 1100#endif
@@ -1791,10 +1486,6 @@ static void crm_task_exit(struct task_struct * t)
1791 unsigned long flags; 1486 unsigned long flags;
1792 crm_domain_t *cluster = task_cpu_cluster(t); 1487 crm_domain_t *cluster = task_cpu_cluster(t);
1793 1488
1794#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
1795 crm_change_prio_pai_tasklet(t, NULL);
1796#endif
1797
1798 /* unlink if necessary */ 1489 /* unlink if necessary */
1799 raw_readyq_lock_irqsave(&cluster->cluster_lock, flags); 1490 raw_readyq_lock_irqsave(&cluster->cluster_lock, flags);
1800 1491
@@ -2120,17 +1811,6 @@ static void increase_priority_inheritance(struct task_struct* t, struct task_str
2120 __increase_priority_inheritance(t, prio_inh); 1811 __increase_priority_inheritance(t, prio_inh);
2121 1812
2122 raw_readyq_unlock(&cluster->cluster_lock); 1813 raw_readyq_unlock(&cluster->cluster_lock);
2123
2124#if defined(CONFIG_LITMUS_PAI_SOFTIRQD) && defined(CONFIG_LITMUS_NVIDIA)
2125 if(tsk_rt(t)->held_gpus) {
2126 int i;
2127 for(i = find_first_bit(&tsk_rt(t)->held_gpus, BITS_PER_BYTE*sizeof(tsk_rt(t)->held_gpus));
2128 i < NV_DEVICE_NUM;
2129 i = find_next_bit(&tsk_rt(t)->held_gpus, BITS_PER_BYTE*sizeof(tsk_rt(t)->held_gpus), i+1)) {
2130 pai_check_priority_increase(t, i);
2131 }
2132 }
2133#endif
2134} 1814}
2135 1815
2136/* called with IRQs off */ 1816/* called with IRQs off */
@@ -2294,17 +1974,6 @@ static void decrease_priority_inheritance(struct task_struct* t,
2294 __decrease_priority_inheritance(t, prio_inh, budget_tiggered); 1974 __decrease_priority_inheritance(t, prio_inh, budget_tiggered);
2295 1975
2296 raw_readyq_unlock(&cluster->cluster_lock); 1976 raw_readyq_unlock(&cluster->cluster_lock);
2297
2298#if defined(CONFIG_LITMUS_PAI_SOFTIRQD) && defined(CONFIG_LITMUS_NVIDIA)
2299 if(tsk_rt(t)->held_gpus) {
2300 int i;
2301 for(i = find_first_bit(&tsk_rt(t)->held_gpus, BITS_PER_BYTE*sizeof(tsk_rt(t)->held_gpus));
2302 i < NV_DEVICE_NUM;
2303 i = find_next_bit(&tsk_rt(t)->held_gpus, BITS_PER_BYTE*sizeof(tsk_rt(t)->held_gpus), i+1)) {
2304 pai_check_priority_decrease(t, i);
2305 }
2306 }
2307#endif
2308} 1977}
2309 1978
2310 1979
@@ -2741,13 +2410,6 @@ static long crm_activate_plugin(void)
2741 INIT_BINHEAP_HANDLE(&(crm[i].cpu_heap), cpu_lower_prio); 2410 INIT_BINHEAP_HANDLE(&(crm[i].cpu_heap), cpu_lower_prio);
2742 rm_domain_init(&(crm[i].domain), NULL, crm_release_jobs); 2411 rm_domain_init(&(crm[i].domain), NULL, crm_release_jobs);
2743 2412
2744
2745#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
2746 crm[i].pending_tasklets.head = NULL;
2747 crm[i].pending_tasklets.tail = &(crm[i].pending_tasklets.head);
2748#endif
2749
2750
2751 if(!zalloc_cpumask_var(&crm[i].cpu_map, GFP_ATOMIC)) 2413 if(!zalloc_cpumask_var(&crm[i].cpu_map, GFP_ATOMIC))
2752 return -ENOMEM; 2414 return -ENOMEM;
2753#ifdef CONFIG_RELEASE_MASTER 2415#ifdef CONFIG_RELEASE_MASTER
@@ -2865,11 +2527,6 @@ static struct sched_plugin crm_plugin __cacheline_aligned_in_smp = {
2865#ifdef CONFIG_LITMUS_AFFINITY_LOCKING 2527#ifdef CONFIG_LITMUS_AFFINITY_LOCKING
2866 .allocate_aff_obs = crm_allocate_affinity_observer, 2528 .allocate_aff_obs = crm_allocate_affinity_observer,
2867#endif 2529#endif
2868#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
2869 .enqueue_pai_tasklet = crm_enqueue_pai_tasklet,
2870 .change_prio_pai_tasklet = crm_change_prio_pai_tasklet,
2871 .run_tasklets = crm_run_tasklets,
2872#endif
2873#if defined(CONFIG_LITMUS_NVIDIA) && defined(CONFIG_LITMUS_SOFTIRQD) 2530#if defined(CONFIG_LITMUS_NVIDIA) && defined(CONFIG_LITMUS_SOFTIRQD)
2874 .map_gpu_to_cpu = crm_map_gpu_to_cpu, 2531 .map_gpu_to_cpu = crm_map_gpu_to_cpu,
2875#endif 2532#endif