diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched.c | 69 |
1 files changed, 60 insertions, 9 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 49be34e1f0b8..a7475913b009 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -1159,21 +1159,72 @@ void wait_task_inactive(struct task_struct *p) | |||
1159 | { | 1159 | { |
1160 | unsigned long flags; | 1160 | unsigned long flags; |
1161 | struct rq *rq; | 1161 | struct rq *rq; |
1162 | int preempted; | 1162 | struct prio_array *array; |
1163 | int running; | ||
1163 | 1164 | ||
1164 | repeat: | 1165 | repeat: |
1166 | /* | ||
1167 | * We do the initial early heuristics without holding | ||
1168 | * any task-queue locks at all. We'll only try to get | ||
1169 | * the runqueue lock when things look like they will | ||
1170 | * work out! | ||
1171 | */ | ||
1172 | rq = task_rq(p); | ||
1173 | |||
1174 | /* | ||
1175 | * If the task is actively running on another CPU | ||
1176 | * still, just relax and busy-wait without holding | ||
1177 | * any locks. | ||
1178 | * | ||
1179 | * NOTE! Since we don't hold any locks, it's not | ||
1180 | * even sure that "rq" stays as the right runqueue! | ||
1181 | * But we don't care, since "task_running()" will | ||
1182 | * return false if the runqueue has changed and p | ||
1183 | * is actually now running somewhere else! | ||
1184 | */ | ||
1185 | while (task_running(rq, p)) | ||
1186 | cpu_relax(); | ||
1187 | |||
1188 | /* | ||
1189 | * Ok, time to look more closely! We need the rq | ||
1190 | * lock now, to be *sure*. If we're wrong, we'll | ||
1191 | * just go back and repeat. | ||
1192 | */ | ||
1165 | rq = task_rq_lock(p, &flags); | 1193 | rq = task_rq_lock(p, &flags); |
1166 | /* Must be off runqueue entirely, not preempted. */ | 1194 | running = task_running(rq, p); |
1167 | if (unlikely(p->array || task_running(rq, p))) { | 1195 | array = p->array; |
1168 | /* If it's preempted, we yield. It could be a while. */ | 1196 | task_rq_unlock(rq, &flags); |
1169 | preempted = !task_running(rq, p); | 1197 | |
1170 | task_rq_unlock(rq, &flags); | 1198 | /* |
1199 | * Was it really running after all now that we | ||
1200 | * checked with the proper locks actually held? | ||
1201 | * | ||
1202 | * Oops. Go back and try again.. | ||
1203 | */ | ||
1204 | if (unlikely(running)) { | ||
1171 | cpu_relax(); | 1205 | cpu_relax(); |
1172 | if (preempted) | ||
1173 | yield(); | ||
1174 | goto repeat; | 1206 | goto repeat; |
1175 | } | 1207 | } |
1176 | task_rq_unlock(rq, &flags); | 1208 | |
1209 | /* | ||
1210 | * It's not enough that it's not actively running, | ||
1211 | * it must be off the runqueue _entirely_, and not | ||
1212 | * preempted! | ||
1213 | * | ||
1214 | * So if it wa still runnable (but just not actively | ||
1215 | * running right now), it's preempted, and we should | ||
1216 | * yield - it could be a while. | ||
1217 | */ | ||
1218 | if (unlikely(array)) { | ||
1219 | yield(); | ||
1220 | goto repeat; | ||
1221 | } | ||
1222 | |||
1223 | /* | ||
1224 | * Ahh, all good. It wasn't running, and it wasn't | ||
1225 | * runnable, which means that it will never become | ||
1226 | * running in the future either. We're all done! | ||
1227 | */ | ||
1177 | } | 1228 | } |
1178 | 1229 | ||
1179 | /*** | 1230 | /*** |