aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2014-01-17 00:21:41 -0500
committerGlenn Elliott <gelliott@cs.unc.edu>2014-01-17 00:21:41 -0500
commitd7e35a002b94ad52304a4c400d8195e462e0308a (patch)
treea0baec755d5939d9c1b2b554e8baec1d6fe82155
parent7e1db96d01247ca7a3b23ca8ac40fc195ef4a149 (diff)
Don't unlink np tasks that block preemptions.
This patch prevents non-preemptive tasks from being unlinked up a blocked preemption. Apparently was leading to lost tasks (they would suspend and never be heard from again).
-rw-r--r--litmus/sched_cfl_split.c45
1 files changed, 33 insertions, 12 deletions
diff --git a/litmus/sched_cfl_split.c b/litmus/sched_cfl_split.c
index b6dde71cdf84..6fd1591aec5a 100644
--- a/litmus/sched_cfl_split.c
+++ b/litmus/sched_cfl_split.c
@@ -394,11 +394,16 @@ static cpu_entry_t* cflsplit_get_nearest_available_cpu(
394#endif 394#endif
395 ); 395 );
396 396
397 /* make sure CPU is in our cluster */ 397 if (affinity) {
398 if (affinity && cpu_isset(affinity->cpu, *cluster->cpu_map)) 398 /* make sure CPU is in our cluster */
399 return(affinity); 399 if(cpu_isset(affinity->cpu, *cluster->cpu_map)) {
400 else 400 return(affinity);
401 return(NULL); 401 }
402 else {
403 TRACE("CPU %d is not in our cluster.\n", affinity->cpu);
404 }
405 }
406 return(NULL);
402} 407}
403#endif 408#endif
404 409
@@ -414,8 +419,8 @@ static void check_for_preemptions(cflsplit_domain_t *cluster)
414 last = lowest_prio_cpu(cluster)) { 419 last = lowest_prio_cpu(cluster)) {
415 /* preemption necessary */ 420 /* preemption necessary */
416 task = __take_ready(&cluster->domain); 421 task = __take_ready(&cluster->domain);
417 TRACE("check_for_preemptions: attempting to link task %d to %d\n", 422 TRACE("check_for_preemptions: attempting to link task %s/%d to %d\n",
418 task->pid, last->cpu); 423 task->comm, task->pid, last->cpu);
419#ifdef CONFIG_SCHED_CPU_AFFINITY 424#ifdef CONFIG_SCHED_CPU_AFFINITY
420 { 425 {
421 cpu_entry_t *affinity = 426 cpu_entry_t *affinity =
@@ -584,9 +589,11 @@ static struct task_struct* cflsplit_schedule(struct task_struct * prev)
584 if (exists) 589 if (exists)
585 TRACE_TASK(prev, 590 TRACE_TASK(prev,
586 "blocks:%d out_of_time:%d needs_move: %d np:%d" 591 "blocks:%d out_of_time:%d needs_move: %d np:%d"
587 " sleep:%d preempt:%d state:%d sig:%d\n", 592 " sleep:%d preempt:%d state:%d sig:%d boosted:%d\n",
588 blocks, out_of_time, needs_move, np, sleep, preempt, 593 blocks, out_of_time, needs_move, np, sleep, preempt,
589 prev->state, signal_pending(prev)); 594 prev->state, signal_pending(prev),
595 is_priority_boosted(entry->scheduled));
596
590 if (entry->linked && preempt) 597 if (entry->linked && preempt)
591 TRACE_TASK(prev, "will be preempted by %s/%d\n", 598 TRACE_TASK(prev, "will be preempted by %s/%d\n",
592 entry->linked->comm, entry->linked->pid); 599 entry->linked->comm, entry->linked->pid);
@@ -612,6 +619,13 @@ static struct task_struct* cflsplit_schedule(struct task_struct * prev)
612 if (likely(!blocks)) { 619 if (likely(!blocks)) {
613 unlink(entry->scheduled); 620 unlink(entry->scheduled);
614 cflsplit_job_arrival(entry->scheduled); 621 cflsplit_job_arrival(entry->scheduled);
622 /* we may regain the processor */
623 if (preempt) {
624 preempt = entry->scheduled != entry->linked;
625 if (!preempt) {
626 TRACE_TASK(entry->scheduled, "blocked preemption by lazy boosting.\n");
627 }
628 }
615 } 629 }
616 } 630 }
617 } 631 }
@@ -628,6 +642,13 @@ static struct task_struct* cflsplit_schedule(struct task_struct * prev)
628 /* recheck priority */ 642 /* recheck priority */
629 unlink(entry->scheduled); 643 unlink(entry->scheduled);
630 cflsplit_job_arrival(entry->scheduled); 644 cflsplit_job_arrival(entry->scheduled);
645 /* we may lose the processor */
646 if (!preempt) {
647 preempt = entry->scheduled != entry->linked;
648 if (preempt) {
649 TRACE_TASK(entry->scheduled, "preempted by lazy unboosting.\n");
650 }
651 }
631 } 652 }
632 } 653 }
633 } 654 }
@@ -647,11 +668,12 @@ static struct task_struct* cflsplit_schedule(struct task_struct * prev)
647 * 668 *
648 * Job deadline moves handled similarly 669 * Job deadline moves handled similarly
649 */ 670 */
650 if (np && (out_of_time || preempt || sleep)) { 671// if (np && (out_of_time || preempt || sleep)) {
672 if (np && (out_of_time || sleep)) {
651 unlink(entry->scheduled); 673 unlink(entry->scheduled);
652 request_exit_np(entry->scheduled); 674 request_exit_np(entry->scheduled);
653 } 675 }
654 else if (np && needs_move) { 676 else if (np && (needs_move || preempt)) {
655 request_exit_np(entry->scheduled); 677 request_exit_np(entry->scheduled);
656 } 678 }
657 679
@@ -713,7 +735,6 @@ static struct task_struct* cflsplit_schedule(struct task_struct * prev)
713 TRACE("becomes idle at %llu.\n", litmus_clock()); 735 TRACE("becomes idle at %llu.\n", litmus_clock());
714#endif 736#endif
715 737
716
717 return next; 738 return next;
718} 739}
719 740