aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJonathan Herman <hermanjl@cs.unc.edu>2012-10-07 18:20:37 -0400
committerJonathan Herman <hermanjl@cs.unc.edu>2012-10-07 18:20:37 -0400
commit4f196c1854e8e51d33fc0c68ec7249f54886ae83 (patch)
tree8e7c1e1cbd0b1be118464005662b60893099aee9
parent3b5a238604b535d22cd458dfbbe74d2946bc0b8c (diff)
Removed race condition in lock_acquired code.
-rw-r--r--litmus/sched_mc.c146
1 files changed, 82 insertions, 64 deletions
diff --git a/litmus/sched_mc.c b/litmus/sched_mc.c
index d0a56fc664c..8028ce85e2d 100644
--- a/litmus/sched_mc.c
+++ b/litmus/sched_mc.c
@@ -91,7 +91,6 @@ static int interrupt_cpu;
91static void acquire_resources(struct task_struct *t) 91static void acquire_resources(struct task_struct *t)
92{ 92{
93 int cpu; 93 int cpu;
94 struct task_struct *sched;
95 94
96 /* Can't contend for resources if not logically running */ 95 /* Can't contend for resources if not logically running */
97 BUG_ON(tsk_rt(t)->linked_on == NO_CPU); 96 BUG_ON(tsk_rt(t)->linked_on == NO_CPU);
@@ -463,7 +462,7 @@ static void preempt_cpu(struct cpu_entry *entry, struct task_struct *t)
463} 462}
464 463
465/** 464/**
466 * preempt() - Preempt a logically running task with a higher priority one. 465 * preempt_crit() - Preempt a logically running task with a higher priority one.
467 * @dom Domain from which to draw higher priority task 466 * @dom Domain from which to draw higher priority task
468 * @ce CPU criticality level to preempt 467 * @ce CPU criticality level to preempt
469 * 468 *
@@ -493,9 +492,7 @@ static void preempt_crit(struct domain *dom, struct crit_entry *ce)
493 if (!is_ghost(task)) { 492 if (!is_ghost(task)) {
494 preempt_cpu(entry, task); 493 preempt_cpu(entry, task);
495 } else if (old && old == entry->linked) { 494 } else if (old && old == entry->linked) {
496 /* Preempted a running task with a ghost job. Null needs to be 495 /* Preempted running task with ghost job. Nothing should run */
497 * running.
498 */
499 preempt_cpu(entry, NULL); 496 preempt_cpu(entry, NULL);
500 } 497 }
501} 498}
@@ -993,17 +990,63 @@ out:
993 return ret; 990 return ret;
994} 991}
995 992
993void pick_next_task(struct cpu_entry *entry)
994{
995 int i;
996 struct crit_entry *ce;
997 struct domain *dom;
998 struct task_struct *dtask, *ready_task;
999
1000 for (i = 0; i < NUM_CRIT_LEVELS && !entry->linked; i++) {
1001 ce = &entry->crit_entries[i];
1002 dom = ce->domain;
1003
1004 /* Swap locks. We cannot acquire a domain lock while
1005 * holding an entry lock or deadlocks will happen.
1006 */
1007 raw_spin_unlock(&entry->lock);
1008 raw_spin_lock(dom->lock);
1009
1010 /* Do domain stuff before grabbing CPU locks */
1011 dtask = dom->peek_ready(dom);
1012 fix_crit_position(ce);
1013
1014 raw_spin_lock(&entry->lock);
1015
1016 ready_task = NULL;
1017 if (!entry->linked && can_use(ce)) {
1018 if (ce->linked) {
1019 ready_task = ce->linked;
1020 } else if (dtask) {
1021 /* Need a new task */
1022 dom->take_ready(dom);
1023 ready_task = dtask;
1024
1025 link_task_to_crit(ce, dtask);
1026 update_crit_position(ce);
1027 }
1028 }
1029 if (ready_task && !is_ghost(ready_task)) {
1030 link_task_to_cpu(entry, ready_task);
1031 raw_spin_unlock(dom->lock);
1032 update_crit_levels(entry);
1033 raw_spin_lock(&entry->lock);
1034 continue;
1035 }
1036 raw_spin_unlock(dom->lock);
1037 }
1038}
1039
996/** 1040/**
997 * mc_schedule() - Return next task which should be scheduled. 1041 * mc_schedule() - Return next task which should be scheduled.
998 */ 1042 */
999static struct task_struct* mc_schedule(struct task_struct* prev) 1043static struct task_struct* mc_schedule(struct task_struct* prev)
1000{ 1044{
1001 unsigned long flags; 1045 unsigned long flags;
1002 struct domain *dom;
1003 struct crit_entry *ce; 1046 struct crit_entry *ce;
1004 struct cpu_entry* entry = &__get_cpu_var(cpus); 1047 struct cpu_entry* entry = &__get_cpu_var(cpus);
1005 int i, out_of_time, sleep, preempt, exists, blocks, global, lower; 1048 int out_of_time, sleep, preempt, exists, blocks, global, lower, update;
1006 struct task_struct *dtask = NULL, *ready_task = NULL, *next = NULL; 1049 struct task_struct *next = NULL;
1007 1050
1008 local_irq_save(flags); 1051 local_irq_save(flags);
1009 1052
@@ -1034,8 +1077,8 @@ static struct task_struct* mc_schedule(struct task_struct* prev)
1034 lower = exists && preempt && entry->linked && 1077 lower = exists && preempt && entry->linked &&
1035 tsk_mc_crit(entry->scheduled) > tsk_mc_crit(entry->linked); 1078 tsk_mc_crit(entry->scheduled) > tsk_mc_crit(entry->linked);
1036 1079
1037 TRACE(TS " blocks:%d out_of_time:%d sleep:%d preempt:%d\n", 1080 TRACE(TS " blocks:%d out_of_time:%d sleep:%d preempt:%d, now: %llu\n",
1038 TA(prev), blocks, out_of_time, sleep, preempt); 1081 TA(prev), blocks, out_of_time, sleep, preempt, litmus_clock());
1039 1082
1040 if (exists) 1083 if (exists)
1041 prev->rt_param.scheduled_on = NO_CPU; 1084 prev->rt_param.scheduled_on = NO_CPU;
@@ -1052,79 +1095,54 @@ static struct task_struct* mc_schedule(struct task_struct* prev)
1052 if (blocks) 1095 if (blocks)
1053 remove_from_all(entry->scheduled); 1096 remove_from_all(entry->scheduled);
1054 /* Any task which exhausts its budget or sleeps waiting for its next 1097 /* Any task which exhausts its budget or sleeps waiting for its next
1055 * period completes unless its execution has been forcibly stopped. 1098 * period completes unless its execution has been forcibly stopped
1056 */ 1099 */
1057 if ((out_of_time || sleep) && !blocks)/* && !preempt)*/ 1100 else if (out_of_time || sleep)/* && !preempt)*/
1058 job_completion(entry->scheduled, !sleep); 1101 job_completion(entry->scheduled, !sleep);
1059 /* Global scheduled tasks must wait for a deschedule before they 1102 /* Global scheduled tasks must wait for a deschedule before they
1060 * can rejoin the global state. Rejoin them here. 1103 * can rejoin the global state. Rejoin them here
1061 */ 1104 */
1062 else if (global && preempt && !blocks) { 1105 else if (global && preempt) {
1063 if (lower) 1106 if (lower)
1064 low_prio_arrival(entry->scheduled); 1107 low_prio_arrival(entry->scheduled);
1065 else 1108 else
1066 job_arrival(entry->scheduled); 1109 job_arrival(entry->scheduled);
1067 } 1110 }
1068 1111
1069 /* Signal sent by lock acquisition */
1070 if (entry->lock_acquired < NUM_CRIT_LEVELS) {
1071 STRACE("Lock acquired for %d\n", entry->lock_acquired);
1072 dom = entry->crit_entries[entry->lock_acquired].domain;
1073 raw_spin_lock(dom->lock);
1074 check_for_preempt(dom);
1075 raw_spin_unlock(dom->lock);
1076 entry->lock_acquired = NUM_CRIT_LEVELS;
1077 }
1078
1079 /* Pick next task if none is linked */
1080 raw_spin_lock(&entry->lock); 1112 raw_spin_lock(&entry->lock);
1081 for (i = 0; i < NUM_CRIT_LEVELS && !entry->linked; i++) {
1082 ce = &entry->crit_entries[i];
1083 dom = ce->domain;
1084 1113
1085 /* Swap locks. We cannot acquire a domain lock while 1114 /* Pick next task if none is linked */
1086 * holding an entry lock or deadlocks will happen. 1115 if (!entry->linked)
1087 */ 1116 pick_next_task(entry);
1088 raw_spin_unlock(&entry->lock);
1089 raw_spin_lock(dom->lock);
1090
1091 /* Do domain stuff before grabbing CPU locks */
1092 dtask = dom->peek_ready(dom);
1093 fix_crit_position(ce);
1094
1095 raw_spin_lock(&entry->lock);
1096 1117
1097 ready_task = NULL; 1118 /* Set this now so that any reschedule signals received after this
1098 if (!entry->linked && can_use(ce)) { 1119 * point will cause another reschedule
1099 if (ce->linked) { 1120 */
1100 ready_task = ce->linked; 1121 sched_state_task_picked();
1101 } else if (dtask) {
1102 /* Need a new task */
1103 dom->take_ready(dom);
1104 ready_task = dtask;
1105 1122
1106 link_task_to_crit(ce, dtask); 1123 /* Ghost task acquired lock, is no longer ghost */
1107 update_crit_position(ce); 1124 update = 0;
1108 } 1125 if (entry->lock_acquired < NUM_CRIT_LEVELS) {
1109 } 1126 ce = &entry->crit_entries[entry->lock_acquired];
1110 if (ready_task && !is_ghost(ready_task)) { 1127 if (ce->linked && !is_ghost(ce->linked) &&
1111 link_task_to_cpu(entry, ready_task); 1128 ce->linked != entry->linked) {
1112 raw_spin_unlock(dom->lock); 1129 link_task_to_cpu(entry, ce->linked);
1113 update_crit_levels(entry); 1130 update = 1;
1114 raw_spin_lock(&entry->lock);
1115 continue;
1116 } 1131 }
1117 raw_spin_unlock(dom->lock); 1132 entry->lock_acquired = NUM_CRIT_LEVELS;
1118 } 1133 }
1119 1134
1120 /* Schedule next task */ 1135 /* Schedule next task */
1121 next = entry->linked; 1136 next = entry->linked;
1122 if (entry->linked) 1137 if (next)
1123 entry->linked->rt_param.scheduled_on = entry->cpu; 1138 next->rt_param.scheduled_on = entry->cpu;
1124 entry->will_schedule = entry->linked; 1139 entry->will_schedule = next;
1125 sched_state_task_picked(); 1140
1141 if (update)
1142 update_crit_levels(entry); /* Will release lock */
1143 else
1144 raw_spin_unlock(&entry->lock);
1126 1145
1127 raw_spin_unlock(&entry->lock);
1128 local_irq_restore(flags); 1146 local_irq_restore(flags);
1129 if (next) { 1147 if (next) {
1130 BUG_ON(!get_rt_job(next)); 1148 BUG_ON(!get_rt_job(next));
@@ -1222,7 +1240,7 @@ out:
1222 1240
1223static void mc_release_ts(lt_t time) 1241static void mc_release_ts(lt_t time)
1224{ 1242{
1225 int i, cpu, base_id = 0, cont_id = -1; 1243 int cpu, cont_id = -1;
1226 char name[TASK_COMM_LEN]; 1244 char name[TASK_COMM_LEN];
1227 enum crit_level level; 1245 enum crit_level level;
1228 struct cpu_entry *entry; 1246 struct cpu_entry *entry;