aboutsummaryrefslogtreecommitdiffstats
path: root/litmus/sched_cedf.c
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2012-12-11 22:01:01 -0500
committerGlenn Elliott <gelliott@cs.unc.edu>2012-12-12 14:14:41 -0500
commitc8483ef0959672310bf4ebb72e1a308b00543f74 (patch)
tree9cb306009b01c5226178f69172738026431d37f2 /litmus/sched_cedf.c
parentfbd9574e298157b54c38f82f536e5cea8f766dff (diff)
make klmirqd work like aux tasks. checkpoint.
this code is untested!
Diffstat (limited to 'litmus/sched_cedf.c')
-rw-r--r--litmus/sched_cedf.c165
1 files changed, 38 insertions, 127 deletions
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c
index 44c8336c5061..84aafca78cde 100644
--- a/litmus/sched_cedf.c
+++ b/litmus/sched_cedf.c
@@ -44,6 +44,7 @@
44 44
45#include <litmus/bheap.h> 45#include <litmus/bheap.h>
46#include <litmus/binheap.h> 46#include <litmus/binheap.h>
47#include <litmus/trace.h>
47 48
48#ifdef CONFIG_LITMUS_LOCKING 49#ifdef CONFIG_LITMUS_LOCKING
49#include <litmus/kfmlp_lock.h> 50#include <litmus/kfmlp_lock.h>
@@ -75,7 +76,6 @@
75 76
76#ifdef CONFIG_LITMUS_PAI_SOFTIRQD 77#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
77#include <linux/interrupt.h> 78#include <linux/interrupt.h>
78#include <litmus/trace.h>
79#endif 79#endif
80 80
81#ifdef CONFIG_LITMUS_NVIDIA 81#ifdef CONFIG_LITMUS_NVIDIA
@@ -118,14 +118,6 @@ DEFINE_PER_CPU(cpu_entry_t, cedf_cpu_entries);
118#define test_will_schedule(cpu) \ 118#define test_will_schedule(cpu) \
119 (atomic_read(&per_cpu(cedf_cpu_entries, cpu).will_schedule)) 119 (atomic_read(&per_cpu(cedf_cpu_entries, cpu).will_schedule))
120 120
121#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
122struct tasklet_head
123{
124 struct tasklet_struct *head;
125 struct tasklet_struct **tail;
126};
127#endif
128
129/* 121/*
130 * In C-EDF there is a cedf domain _per_ cluster 122 * In C-EDF there is a cedf domain _per_ cluster
131 * The number of clusters is dynamically determined accordingly to the 123 * The number of clusters is dynamically determined accordingly to the
@@ -1038,6 +1030,13 @@ static void cedf_task_wake_up(struct task_struct *task)
1038 } 1030 }
1039#endif 1031#endif
1040 1032
1033#ifdef CONFIG_LITMUS_NVIDIA
1034 if (tsk_rt(task)->held_gpus && !tsk_rt(task)->hide_from_gpu) {
1035 TRACE_CUR("%s/%d is ready so gpu klmirqd tasks may not inherit.\n", task->comm, task->pid);
1036 disable_gpu_owner(task);
1037 }
1038#endif
1039
1041 cedf_job_arrival(task); 1040 cedf_job_arrival(task);
1042 raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags); 1041 raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags);
1043} 1042}
@@ -1064,6 +1063,14 @@ static void cedf_task_block(struct task_struct *t)
1064 } 1063 }
1065#endif 1064#endif
1066 1065
1066#ifdef CONFIG_LITMUS_NVIDIA
1067 if (tsk_rt(t)->held_gpus && !tsk_rt(t)->hide_from_gpu) {
1068
1069 TRACE_CUR("%s/%d is blocked so aux tasks may inherit.\n", t->comm, t->pid);
1070 enable_gpu_owner(t);
1071 }
1072#endif
1073
1067 raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags); 1074 raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags);
1068 1075
1069 BUG_ON(!is_realtime(t)); 1076 BUG_ON(!is_realtime(t));
@@ -1092,6 +1099,13 @@ static void cedf_task_exit(struct task_struct * t)
1092 } 1099 }
1093#endif 1100#endif
1094 1101
1102#ifdef CONFIG_LITMUS_NVIDIA
1103 /* make sure we clean up on our way out */
1104 if(tsk_rt(t)->held_gpus) {
1105 disable_gpu_owner(t);
1106 }
1107#endif
1108
1095 unlink(t); 1109 unlink(t);
1096 if (tsk_rt(t)->scheduled_on != NO_CPU) { 1110 if (tsk_rt(t)->scheduled_on != NO_CPU) {
1097 cpu_entry_t *cpu; 1111 cpu_entry_t *cpu;
@@ -1208,6 +1222,13 @@ static int __increase_priority_inheritance(struct task_struct* t,
1208 aux_task_owner_increase_priority(t); 1222 aux_task_owner_increase_priority(t);
1209 } 1223 }
1210#endif 1224#endif
1225
1226#ifdef CONFIG_LITMUS_NVIDIA
1227 /* propagate to gpu klmirqd */
1228 if (tsk_rt(t)->held_gpus) {
1229 gpu_owner_increase_priority(t);
1230 }
1231#endif
1211 } 1232 }
1212#ifdef CONFIG_LITMUS_NESTED_LOCKING 1233#ifdef CONFIG_LITMUS_NESTED_LOCKING
1213 } 1234 }
@@ -1237,16 +1258,6 @@ static void increase_priority_inheritance(struct task_struct* t, struct task_str
1237 1258
1238 __increase_priority_inheritance(t, prio_inh); 1259 __increase_priority_inheritance(t, prio_inh);
1239 1260
1240#ifdef CONFIG_LITMUS_SOFTIRQD
1241 if(tsk_rt(t)->cur_klmirqd != NULL)
1242 {
1243 TRACE_TASK(t, "%s/%d inherits a new priority!\n",
1244 tsk_rt(t)->cur_klmirqd->comm, tsk_rt(t)->cur_klmirqd->pid);
1245
1246 __increase_priority_inheritance(tsk_rt(t)->cur_klmirqd, prio_inh);
1247 }
1248#endif
1249
1250 raw_spin_unlock(&cluster->cluster_lock); 1261 raw_spin_unlock(&cluster->cluster_lock);
1251 1262
1252#if defined(CONFIG_LITMUS_PAI_SOFTIRQD) && defined(CONFIG_LITMUS_NVIDIA) 1263#if defined(CONFIG_LITMUS_PAI_SOFTIRQD) && defined(CONFIG_LITMUS_NVIDIA)
@@ -1320,6 +1331,13 @@ static int __decrease_priority_inheritance(struct task_struct* t,
1320 } 1331 }
1321#endif 1332#endif
1322 1333
1334#ifdef CONFIG_LITMUS_NVIDIA
1335 /* propagate to gpu */
1336 if (tsk_rt(t)->held_gpus) {
1337 gpu_owner_decrease_priority(t);
1338 }
1339#endif
1340
1323#ifdef CONFIG_LITMUS_NESTED_LOCKING 1341#ifdef CONFIG_LITMUS_NESTED_LOCKING
1324 } 1342 }
1325 else { 1343 else {
@@ -1346,16 +1364,6 @@ static void decrease_priority_inheritance(struct task_struct* t,
1346 raw_spin_lock(&cluster->cluster_lock); 1364 raw_spin_lock(&cluster->cluster_lock);
1347 __decrease_priority_inheritance(t, prio_inh); 1365 __decrease_priority_inheritance(t, prio_inh);
1348 1366
1349#ifdef CONFIG_LITMUS_SOFTIRQD
1350 if(tsk_rt(t)->cur_klmirqd != NULL)
1351 {
1352 TRACE_TASK(t, "%s/%d decreases in priority!\n",
1353 tsk_rt(t)->cur_klmirqd->comm, tsk_rt(t)->cur_klmirqd->pid);
1354
1355 __decrease_priority_inheritance(tsk_rt(t)->cur_klmirqd, prio_inh);
1356 }
1357#endif
1358
1359 raw_spin_unlock(&cluster->cluster_lock); 1367 raw_spin_unlock(&cluster->cluster_lock);
1360 1368
1361#if defined(CONFIG_LITMUS_PAI_SOFTIRQD) && defined(CONFIG_LITMUS_NVIDIA) 1369#if defined(CONFIG_LITMUS_PAI_SOFTIRQD) && defined(CONFIG_LITMUS_NVIDIA)
@@ -1371,73 +1379,6 @@ static void decrease_priority_inheritance(struct task_struct* t,
1371} 1379}
1372 1380
1373 1381
1374
1375
1376
1377#ifdef CONFIG_LITMUS_SOFTIRQD
1378/* called with IRQs off */
1379static void increase_priority_inheritance_klmirqd(struct task_struct* klmirqd,
1380 struct task_struct* old_owner,
1381 struct task_struct* new_owner)
1382{
1383 cedf_domain_t* cluster = task_cpu_cluster(klmirqd);
1384
1385 BUG_ON(!(tsk_rt(klmirqd)->is_proxy_thread));
1386
1387 raw_spin_lock(&cluster->cluster_lock);
1388
1389 if(old_owner != new_owner)
1390 {
1391 if(old_owner)
1392 {
1393 // unreachable?
1394 tsk_rt(old_owner)->cur_klmirqd = NULL;
1395 }
1396
1397 TRACE_TASK(klmirqd, "giving ownership to %s/%d.\n",
1398 new_owner->comm, new_owner->pid);
1399
1400 tsk_rt(new_owner)->cur_klmirqd = klmirqd;
1401 }
1402
1403 __decrease_priority_inheritance(klmirqd, NULL); // kludge to clear out cur prio.
1404
1405 __increase_priority_inheritance(klmirqd,
1406 (tsk_rt(new_owner)->inh_task == NULL) ?
1407 new_owner :
1408 tsk_rt(new_owner)->inh_task);
1409
1410 raw_spin_unlock(&cluster->cluster_lock);
1411}
1412
1413
1414/* called with IRQs off */
1415static void decrease_priority_inheritance_klmirqd(struct task_struct* klmirqd,
1416 struct task_struct* old_owner,
1417 struct task_struct* new_owner)
1418{
1419 cedf_domain_t* cluster = task_cpu_cluster(klmirqd);
1420
1421 BUG_ON(!(tsk_rt(klmirqd)->is_proxy_thread));
1422
1423 raw_spin_lock(&cluster->cluster_lock);
1424
1425 TRACE_TASK(klmirqd, "priority restored\n");
1426
1427 __decrease_priority_inheritance(klmirqd, new_owner);
1428
1429 tsk_rt(old_owner)->cur_klmirqd = NULL;
1430
1431 raw_spin_unlock(&cluster->cluster_lock);
1432}
1433#endif // CONFIG_LITMUS_SOFTIRQD
1434
1435
1436
1437
1438
1439
1440
1441#ifdef CONFIG_LITMUS_NESTED_LOCKING 1382#ifdef CONFIG_LITMUS_NESTED_LOCKING
1442 1383
1443/* called with IRQs off */ 1384/* called with IRQs off */
@@ -1836,33 +1777,7 @@ static long cedf_activate_plugin(void)
1836 } 1777 }
1837 1778
1838#ifdef CONFIG_LITMUS_SOFTIRQD 1779#ifdef CONFIG_LITMUS_SOFTIRQD
1839 { 1780 init_klmirqd();
1840 /* distribute the daemons evenly across the clusters. */
1841 int* affinity = kmalloc(NR_LITMUS_SOFTIRQD * sizeof(int), GFP_ATOMIC);
1842 int num_daemons_per_cluster = NR_LITMUS_SOFTIRQD / num_clusters;
1843 int left_over = NR_LITMUS_SOFTIRQD % num_clusters;
1844
1845 int daemon = 0;
1846 for(i = 0; i < num_clusters; ++i)
1847 {
1848 int num_on_this_cluster = num_daemons_per_cluster;
1849 if(left_over)
1850 {
1851 ++num_on_this_cluster;
1852 --left_over;
1853 }
1854
1855 for(j = 0; j < num_on_this_cluster; ++j)
1856 {
1857 // first CPU of this cluster
1858 affinity[daemon++] = i*cluster_size;
1859 }
1860 }
1861
1862 spawn_klmirqd(affinity);
1863
1864 kfree(affinity);
1865 }
1866#endif 1781#endif
1867 1782
1868#ifdef CONFIG_LITMUS_NVIDIA 1783#ifdef CONFIG_LITMUS_NVIDIA
@@ -1906,10 +1821,6 @@ static struct sched_plugin cedf_plugin __cacheline_aligned_in_smp = {
1906#ifdef CONFIG_LITMUS_AFFINITY_LOCKING 1821#ifdef CONFIG_LITMUS_AFFINITY_LOCKING
1907 .allocate_aff_obs = cedf_allocate_affinity_observer, 1822 .allocate_aff_obs = cedf_allocate_affinity_observer,
1908#endif 1823#endif
1909#ifdef CONFIG_LITMUS_SOFTIRQD
1910 .increase_prio_klmirqd = increase_priority_inheritance_klmirqd,
1911 .decrease_prio_klmirqd = decrease_priority_inheritance_klmirqd,
1912#endif
1913#ifdef CONFIG_LITMUS_PAI_SOFTIRQD 1824#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
1914 .enqueue_pai_tasklet = cedf_enqueue_pai_tasklet, 1825 .enqueue_pai_tasklet = cedf_enqueue_pai_tasklet,
1915 .change_prio_pai_tasklet = cedf_change_prio_pai_tasklet, 1826 .change_prio_pai_tasklet = cedf_change_prio_pai_tasklet,