diff options
author | Jonathan Herman <hermanjl@cs.unc.edu> | 2011-10-10 22:45:18 -0400 |
---|---|---|
committer | Jonathan Herman <hermanjl@cs.unc.edu> | 2011-10-10 22:45:18 -0400 |
commit | fc509f9abd444f6a75b1d0e27c6bec5eadecd823 (patch) | |
tree | cfb350e9faddd90a7962e7849fe78788e766ca56 | |
parent | 980c85219653614ebc0f6bb072e97dc09e0ef90f (diff) |
Fixed issue where long delay in VM could lose global tasks.
-rw-r--r-- | litmus/sched_mc.c | 14 |
1 files changed, 11 insertions, 3 deletions
diff --git a/litmus/sched_mc.c b/litmus/sched_mc.c index 9edf038ef164..3d311505f437 100644 --- a/litmus/sched_mc.c +++ b/litmus/sched_mc.c | |||
@@ -41,6 +41,7 @@ | |||
41 | struct cpu_entry { | 41 | struct cpu_entry { |
42 | int cpu; | 42 | int cpu; |
43 | struct task_struct* scheduled; | 43 | struct task_struct* scheduled; |
44 | struct task_struct* will_schedule; | ||
44 | struct task_struct* linked; | 45 | struct task_struct* linked; |
45 | raw_spinlock_t lock; | 46 | raw_spinlock_t lock; |
46 | struct crit_entry crit_entries[NUM_CRIT_LEVELS]; | 47 | struct crit_entry crit_entries[NUM_CRIT_LEVELS]; |
@@ -892,12 +893,18 @@ static struct task_struct* mc_schedule(struct task_struct* prev) | |||
892 | QT_START; | 893 | QT_START; |
893 | 894 | ||
894 | local_irq_save(flags); | 895 | local_irq_save(flags); |
895 | raw_spin_lock(&entry->lock); | ||
896 | 896 | ||
897 | if (entry->scheduled && entry->scheduled != prev) { | 897 | /* Litmus gave up because it couldn't access the stack of the CPU |
898 | TRACE_TASK(entry->scheduled, " and prev is %d\n", prev->pid); | 898 | * on which will_schedule was migrating from. Requeue it. |
899 | * This really only happens in VMs. | ||
900 | */ | ||
901 | if (entry->will_schedule && entry->will_schedule != prev) { | ||
902 | entry->will_schedule->rt_param.scheduled_on = NO_CPU; | ||
903 | low_prio_arrival(entry->will_schedule); | ||
899 | } | 904 | } |
900 | 905 | ||
906 | raw_spin_lock(&entry->lock); | ||
907 | |||
901 | /* Sanity checking */ | 908 | /* Sanity checking */ |
902 | BUG_ON(entry->scheduled && entry->scheduled != prev); | 909 | BUG_ON(entry->scheduled && entry->scheduled != prev); |
903 | BUG_ON(entry->scheduled && !is_realtime(prev)); | 910 | BUG_ON(entry->scheduled && !is_realtime(prev)); |
@@ -988,6 +995,7 @@ static struct task_struct* mc_schedule(struct task_struct* prev) | |||
988 | next = entry->linked; | 995 | next = entry->linked; |
989 | if (entry->linked) | 996 | if (entry->linked) |
990 | entry->linked->rt_param.scheduled_on = entry->cpu; | 997 | entry->linked->rt_param.scheduled_on = entry->cpu; |
998 | entry->will_schedule = entry->linked; | ||
991 | sched_state_task_picked(); | 999 | sched_state_task_picked(); |
992 | 1000 | ||
993 | raw_spin_unlock(&entry->lock); | 1001 | raw_spin_unlock(&entry->lock); |