aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2013-02-20 20:01:23 -0500
committerGlenn Elliott <gelliott@cs.unc.edu>2013-02-20 20:01:23 -0500
commitea9a56dc3198669da2e96b8e7c09bb855249f48b (patch)
treec5938786114ad30bc4aa0e945c7b013c85641f06
parent6987dcbc8945509db8a11d0408a9e2cc5036cbce (diff)
Fix partitioned C-EDF
-rw-r--r--kernel/softirq.c15
-rw-r--r--litmus/Kconfig2
-rw-r--r--litmus/nvidia_info.c2
-rw-r--r--litmus/sched_cedf.c68
4 files changed, 66 insertions, 21 deletions
diff --git a/kernel/softirq.c b/kernel/softirq.c
index ef70dbe92a97..9c20a9ec06b2 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -485,7 +485,10 @@ static int __klmirqd_nv_tasklet_schedule(struct tasklet_struct *t,
485 485
486void __tasklet_schedule(struct tasklet_struct *t) 486void __tasklet_schedule(struct tasklet_struct *t)
487{ 487{
488#ifdef CONFIG_LITMUS_NVIDIA 488#if defined(CONFIG_LITMUS_NVIDIA) && ( \
489 defined(CONFIG_LITMUS_NVIDIA_NONSPLIT_INTERRUPTS) || \
490 defined(CONFIG_LITMUS_SOFTIRQD) || \
491 defined(CONFIG_LITMUS_PAI_SOFTIRQD))
489 if(is_nvidia_func(t->func)) 492 if(is_nvidia_func(t->func))
490 { 493 {
491#if defined(CONFIG_LITMUS_NVIDIA_NONSPLIT_INTERRUPTS) 494#if defined(CONFIG_LITMUS_NVIDIA_NONSPLIT_INTERRUPTS)
@@ -527,7 +530,10 @@ EXPORT_SYMBOL(___tasklet_schedule);
527 530
528void __tasklet_hi_schedule(struct tasklet_struct *t) 531void __tasklet_hi_schedule(struct tasklet_struct *t)
529{ 532{
530#ifdef CONFIG_LITMUS_NVIDIA 533#if defined(CONFIG_LITMUS_NVIDIA) && ( \
534 defined(CONFIG_LITMUS_NVIDIA_NONSPLIT_INTERRUPTS) || \
535 defined(CONFIG_LITMUS_SOFTIRQD) || \
536 defined(CONFIG_LITMUS_PAI_SOFTIRQD))
531 if(is_nvidia_func(t->func)) 537 if(is_nvidia_func(t->func))
532 { 538 {
533#if defined(CONFIG_LITMUS_NVIDIA_NONSPLIT_INTERRUPTS) 539#if defined(CONFIG_LITMUS_NVIDIA_NONSPLIT_INTERRUPTS)
@@ -568,7 +574,10 @@ EXPORT_SYMBOL(___tasklet_hi_schedule);
568void __tasklet_hi_schedule_first(struct tasklet_struct *t) 574void __tasklet_hi_schedule_first(struct tasklet_struct *t)
569{ 575{
570 BUG_ON(!irqs_disabled()); 576 BUG_ON(!irqs_disabled());
571#ifdef CONFIG_LITMUS_NVIDIA 577#if defined(CONFIG_LITMUS_NVIDIA) && ( \
578 defined(CONFIG_LITMUS_NVIDIA_NONSPLIT_INTERRUPTS) || \
579 defined(CONFIG_LITMUS_SOFTIRQD) || \
580 defined(CONFIG_LITMUS_PAI_SOFTIRQD))
572 if(is_nvidia_func(t->func)) 581 if(is_nvidia_func(t->func))
573 { 582 {
574#if defined(CONFIG_LITMUS_NVIDIA_NONSPLIT_INTERRUPTS) 583#if defined(CONFIG_LITMUS_NVIDIA_NONSPLIT_INTERRUPTS)
diff --git a/litmus/Kconfig b/litmus/Kconfig
index 4bd371d20a1a..644d2d55bfb6 100644
--- a/litmus/Kconfig
+++ b/litmus/Kconfig
@@ -489,7 +489,7 @@ config LITMUS_AFFINITY_AWARE_GPU_ASSINGMENT
489 489
490config NV_DEVICE_NUM 490config NV_DEVICE_NUM
491 int "Number of NVIDIA GPUs." 491 int "Number of NVIDIA GPUs."
492 depends on LITMUS_SOFTIRQD || LITMUS_PAI_SOFTIRQD 492 depends on LITMUS_NVIDIA
493 range 1 16 493 range 1 16
494 default "1" 494 default "1"
495 help 495 help
diff --git a/litmus/nvidia_info.c b/litmus/nvidia_info.c
index fc4d5297fe6f..e87e56542a23 100644
--- a/litmus/nvidia_info.c
+++ b/litmus/nvidia_info.c
@@ -474,7 +474,6 @@ static int gpu_owner_max_priority_order(struct binheap_node *a,
474static int init_nv_device_reg(void) 474static int init_nv_device_reg(void)
475{ 475{
476 int i; 476 int i;
477 char name[MAX_KLMIRQD_NAME_LEN+1];
478 477
479#ifdef CONFIG_LITMUS_SOFTIRQD 478#ifdef CONFIG_LITMUS_SOFTIRQD
480 if (!klmirqd_is_ready()) { 479 if (!klmirqd_is_ready()) {
@@ -498,6 +497,7 @@ static int init_nv_device_reg(void)
498 497
499#ifdef CONFIG_LITMUS_SOFTIRQD 498#ifdef CONFIG_LITMUS_SOFTIRQD
500 { 499 {
500 char name[MAX_KLMIRQD_NAME_LEN+1];
501 int default_cpu = litmus->map_gpu_to_cpu(i); 501 int default_cpu = litmus->map_gpu_to_cpu(i);
502 502
503 /* spawn the interrupt thread */ 503 /* spawn the interrupt thread */
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c
index fc8f277a1958..efb11bb8c656 100644
--- a/litmus/sched_cedf.c
+++ b/litmus/sched_cedf.c
@@ -326,8 +326,7 @@ static noinline void requeue(struct task_struct* task)
326#endif 326#endif
327 __add_ready(&cluster->domain, task); 327 __add_ready(&cluster->domain, task);
328 else { 328 else {
329 /* it has got to wait */ 329 TRACE_TASK(task, "not requeueing non-yet-released job\n");
330 add_release(&cluster->domain, task);
331 } 330 }
332} 331}
333 332
@@ -412,6 +411,7 @@ static void cedf_release_jobs(rt_domain_t* rt, struct bheap* tasks)
412/* caller holds cluster_lock */ 411/* caller holds cluster_lock */
413static noinline void job_completion(struct task_struct *t, int forced) 412static noinline void job_completion(struct task_struct *t, int forced)
414{ 413{
414 lt_t now;
415 BUG_ON(!t); 415 BUG_ON(!t);
416 416
417 sched_trace_task_completion(t, forced); 417 sched_trace_task_completion(t, forced);
@@ -426,14 +426,23 @@ static noinline void job_completion(struct task_struct *t, int forced)
426 tsk_rt(t)->completed = 1; 426 tsk_rt(t)->completed = 1;
427 /* prepare for next period */ 427 /* prepare for next period */
428 prepare_for_next_period(t); 428 prepare_for_next_period(t);
429 if (is_released(t, litmus_clock())) 429
430 now = litmus_clock();
431 if (is_released(t, now)) {
432 /* log here to capture overheads */
430 sched_trace_task_release(t); 433 sched_trace_task_release(t);
431 /* unlink */ 434 }
435
432 unlink(t); 436 unlink(t);
433 /* requeue 437
434 * But don't requeue a blocking task. */ 438 /* release or arm next job */
435 if (is_running(t)) 439 tsk_rt(t)->completed = 0;
436 cedf_job_arrival(t); 440 if (is_running(t)) {
441 if (!is_released(t, now))
442 add_release(&task_cpu_cluster(t)->domain, t);
443 else
444 cedf_job_arrival(t);
445 }
437} 446}
438 447
439/* cedf_tick - this function is called for every local timer 448/* cedf_tick - this function is called for every local timer
@@ -1191,6 +1200,18 @@ static int __increase_priority_inheritance(struct task_struct* t,
1191 1200
1192 cluster = task_cpu_cluster(t); 1201 cluster = task_cpu_cluster(t);
1193 1202
1203#if 0
1204 if (prio_inh && task_cpu_cluster(prio_inh) != cluster) {
1205 WARN_ONCE(1, "Illegal to inherit between clusters. " \
1206 "target (%s/%d) on cluster w/ CPU %d and " \
1207 "inh_prio (%s/%d) on w/ CPU %d\n", \
1208 t->comm, t->pid, cluster->cpus[0]->cpu, \
1209 prio_inh->comm, prio_inh->pid, \
1210 task_cpu_cluster(prio_inh)->cpus[0]->cpu);
1211 return 1;
1212 }
1213#endif
1214
1194#ifdef CONFIG_LITMUS_NESTED_LOCKING 1215#ifdef CONFIG_LITMUS_NESTED_LOCKING
1195 /* this sanity check allows for weaker locking in protocols */ 1216 /* this sanity check allows for weaker locking in protocols */
1196 /* TODO (klmirqd): Skip this check if 't' is a proxy thread (???) */ 1217 /* TODO (klmirqd): Skip this check if 't' is a proxy thread (???) */
@@ -1316,6 +1337,7 @@ static void increase_priority_inheritance(struct task_struct* t, struct task_str
1316static int __decrease_priority_inheritance(struct task_struct* t, 1337static int __decrease_priority_inheritance(struct task_struct* t,
1317 struct task_struct* prio_inh) 1338 struct task_struct* prio_inh)
1318{ 1339{
1340 cedf_domain_t* cluster;
1319 int success = 1; 1341 int success = 1;
1320 1342
1321 if (prio_inh == tsk_rt(t)->inh_task) { 1343 if (prio_inh == tsk_rt(t)->inh_task) {
@@ -1347,6 +1369,20 @@ static int __decrease_priority_inheritance(struct task_struct* t,
1347#endif 1369#endif
1348 } 1370 }
1349 1371
1372 cluster = task_cpu_cluster(t);
1373
1374#if 0
1375 if (prio_inh && task_cpu_cluster(prio_inh) != cluster) {
1376 WARN_ONCE(1, "Illegal to inherit between clusters. " \
1377 "target (%s/%d) on cluster w/ CPU %d and " \
1378 "inh_prio (%s/%d) on w/ CPU %d\n", \
1379 t->comm, t->pid, cluster->cpus[0]->cpu, \
1380 prio_inh->comm, prio_inh->pid, \
1381 task_cpu_cluster(prio_inh)->cpus[0]->cpu);
1382 return 1;
1383 }
1384#endif
1385
1350#ifdef CONFIG_LITMUS_NESTED_LOCKING 1386#ifdef CONFIG_LITMUS_NESTED_LOCKING
1351 if(__edf_higher_prio(t, EFFECTIVE, prio_inh, BASE)) { 1387 if(__edf_higher_prio(t, EFFECTIVE, prio_inh, BASE)) {
1352#endif 1388#endif
@@ -1371,15 +1407,16 @@ static int __decrease_priority_inheritance(struct task_struct* t,
1371 cedf_job_arrival(t); 1407 cedf_job_arrival(t);
1372 } 1408 }
1373 else { 1409 else {
1374 cedf_domain_t* cluster = task_cpu_cluster(t);
1375 /* task is queued */ 1410 /* task is queued */
1376 raw_spin_lock(&cluster->domain.release_lock); 1411 raw_spin_lock(&cluster->domain.release_lock);
1377 if (is_queued(t)) { 1412 if (is_queued(t)) {
1378 TRACE_TASK(t, "is queued.\n"); 1413 TRACE_TASK(t, "is queued.\n");
1379 1414
1380 /* decrease in priority, so we have to re-add to binomial heap */
1381 unlink(t); 1415 unlink(t);
1382 cedf_job_arrival(t); 1416 if (is_released(t, litmus_clock()))
1417 cedf_job_arrival(t);
1418 else
1419 add_release(&cluster->domain, t);
1383 } 1420 }
1384 else { 1421 else {
1385 TRACE_TASK(t, "is not in scheduler. Probably on wait queue somewhere.\n"); 1422 TRACE_TASK(t, "is not in scheduler. Probably on wait queue somewhere.\n");
@@ -1912,16 +1949,15 @@ static long cedf_activate_plugin(void)
1912 1949
1913 entry = &per_cpu(cedf_cpu_entries, ccpu); 1950 entry = &per_cpu(cedf_cpu_entries, ccpu);
1914 cedf[i].cpus[cpu_count] = entry; 1951 cedf[i].cpus[cpu_count] = entry;
1915 atomic_set(&entry->will_schedule, 0); 1952
1953 memset(entry, 0, sizeof(*entry));
1916 entry->cpu = ccpu; 1954 entry->cpu = ccpu;
1917 entry->cluster = &cedf[i]; 1955 entry->cluster = &cedf[i];
1918
1919 INIT_BINHEAP_NODE(&entry->hn); 1956 INIT_BINHEAP_NODE(&entry->hn);
1957 mb();
1920 1958
1921 cpu_count++; 1959 ++cpu_count;
1922 1960
1923 entry->linked = NULL;
1924 entry->scheduled = NULL;
1925#ifdef CONFIG_RELEASE_MASTER 1961#ifdef CONFIG_RELEASE_MASTER
1926 /* only add CPUs that should schedule jobs */ 1962 /* only add CPUs that should schedule jobs */
1927 if (entry->cpu != entry->cluster->domain.release_master) 1963 if (entry->cpu != entry->cluster->domain.release_master)