diff options
author | Nick Piggin <nickpiggin@yahoo.com.au> | 2005-06-25 17:57:23 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-06-25 19:24:43 -0400 |
commit | 4866cde064afbb6c2a488c265e696879de616daa (patch) | |
tree | 6effad1ab6271129fc607b98273086409876563a /kernel | |
parent | 48c08d3f8ff94fa118187e4d8d4a5707bb85e59d (diff) |
[PATCH] sched: cleanup context switch locking
Instead of requiring architecture code to interact with the scheduler's
locking implementation, provide a couple of defines that can be used by the
architecture to request runqueue unlocked context switches, and ask for
interrupts to be enabled over the context switch.
Also replaces the "switch_lock" used by these architectures with an oncpu
flag (note, not a potentially slow bitflag). This eliminates one bus
locked memory operation when context switching, and simplifies the
task_running function.
Signed-off-by: Nick Piggin <nickpiggin@yahoo.com.au>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched.c | 132 |
1 files changed, 108 insertions, 24 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 98bf1c091da5..b1410577f9a8 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -268,14 +268,71 @@ static DEFINE_PER_CPU(struct runqueue, runqueues); | |||
268 | #define task_rq(p) cpu_rq(task_cpu(p)) | 268 | #define task_rq(p) cpu_rq(task_cpu(p)) |
269 | #define cpu_curr(cpu) (cpu_rq(cpu)->curr) | 269 | #define cpu_curr(cpu) (cpu_rq(cpu)->curr) |
270 | 270 | ||
271 | /* | ||
272 | * Default context-switch locking: | ||
273 | */ | ||
274 | #ifndef prepare_arch_switch | 271 | #ifndef prepare_arch_switch |
275 | # define prepare_arch_switch(rq, next) do { } while (0) | 272 | # define prepare_arch_switch(next) do { } while (0) |
276 | # define finish_arch_switch(rq, next) spin_unlock_irq(&(rq)->lock) | 273 | #endif |
277 | # define task_running(rq, p) ((rq)->curr == (p)) | 274 | #ifndef finish_arch_switch |
275 | # define finish_arch_switch(prev) do { } while (0) | ||
276 | #endif | ||
277 | |||
278 | #ifndef __ARCH_WANT_UNLOCKED_CTXSW | ||
279 | static inline int task_running(runqueue_t *rq, task_t *p) | ||
280 | { | ||
281 | return rq->curr == p; | ||
282 | } | ||
283 | |||
284 | static inline void prepare_lock_switch(runqueue_t *rq, task_t *next) | ||
285 | { | ||
286 | } | ||
287 | |||
288 | static inline void finish_lock_switch(runqueue_t *rq, task_t *prev) | ||
289 | { | ||
290 | spin_unlock_irq(&rq->lock); | ||
291 | } | ||
292 | |||
293 | #else /* __ARCH_WANT_UNLOCKED_CTXSW */ | ||
294 | static inline int task_running(runqueue_t *rq, task_t *p) | ||
295 | { | ||
296 | #ifdef CONFIG_SMP | ||
297 | return p->oncpu; | ||
298 | #else | ||
299 | return rq->curr == p; | ||
300 | #endif | ||
301 | } | ||
302 | |||
303 | static inline void prepare_lock_switch(runqueue_t *rq, task_t *next) | ||
304 | { | ||
305 | #ifdef CONFIG_SMP | ||
306 | /* | ||
307 | * We can optimise this out completely for !SMP, because the | ||
308 | * SMP rebalancing from interrupt is the only thing that cares | ||
309 | * here. | ||
310 | */ | ||
311 | next->oncpu = 1; | ||
312 | #endif | ||
313 | #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW | ||
314 | spin_unlock_irq(&rq->lock); | ||
315 | #else | ||
316 | spin_unlock(&rq->lock); | ||
278 | #endif | 317 | #endif |
318 | } | ||
319 | |||
320 | static inline void finish_lock_switch(runqueue_t *rq, task_t *prev) | ||
321 | { | ||
322 | #ifdef CONFIG_SMP | ||
323 | /* | ||
324 | * After ->oncpu is cleared, the task can be moved to a different CPU. | ||
325 | * We must ensure this doesn't happen until the switch is completely | ||
326 | * finished. | ||
327 | */ | ||
328 | smp_wmb(); | ||
329 | prev->oncpu = 0; | ||
330 | #endif | ||
331 | #ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW | ||
332 | local_irq_enable(); | ||
333 | #endif | ||
334 | } | ||
335 | #endif /* __ARCH_WANT_UNLOCKED_CTXSW */ | ||
279 | 336 | ||
280 | /* | 337 | /* |
281 | * task_rq_lock - lock the runqueue a given task resides on and disable | 338 | * task_rq_lock - lock the runqueue a given task resides on and disable |
@@ -1196,17 +1253,14 @@ void fastcall sched_fork(task_t *p) | |||
1196 | p->state = TASK_RUNNING; | 1253 | p->state = TASK_RUNNING; |
1197 | INIT_LIST_HEAD(&p->run_list); | 1254 | INIT_LIST_HEAD(&p->run_list); |
1198 | p->array = NULL; | 1255 | p->array = NULL; |
1199 | spin_lock_init(&p->switch_lock); | ||
1200 | #ifdef CONFIG_SCHEDSTATS | 1256 | #ifdef CONFIG_SCHEDSTATS |
1201 | memset(&p->sched_info, 0, sizeof(p->sched_info)); | 1257 | memset(&p->sched_info, 0, sizeof(p->sched_info)); |
1202 | #endif | 1258 | #endif |
1259 | #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) | ||
1260 | p->oncpu = 0; | ||
1261 | #endif | ||
1203 | #ifdef CONFIG_PREEMPT | 1262 | #ifdef CONFIG_PREEMPT |
1204 | /* | 1263 | /* Want to start with kernel preemption disabled. */ |
1205 | * During context-switch we hold precisely one spinlock, which | ||
1206 | * schedule_tail drops. (in the common case it's this_rq()->lock, | ||
1207 | * but it also can be p->switch_lock.) So we compensate with a count | ||
1208 | * of 1. Also, we want to start with kernel preemption disabled. | ||
1209 | */ | ||
1210 | p->thread_info->preempt_count = 1; | 1264 | p->thread_info->preempt_count = 1; |
1211 | #endif | 1265 | #endif |
1212 | /* | 1266 | /* |
@@ -1388,22 +1442,40 @@ void fastcall sched_exit(task_t * p) | |||
1388 | } | 1442 | } |
1389 | 1443 | ||
1390 | /** | 1444 | /** |
1445 | * prepare_task_switch - prepare to switch tasks | ||
1446 | * @rq: the runqueue preparing to switch | ||
1447 | * @next: the task we are going to switch to. | ||
1448 | * | ||
1449 | * This is called with the rq lock held and interrupts off. It must | ||
1450 | * be paired with a subsequent finish_task_switch after the context | ||
1451 | * switch. | ||
1452 | * | ||
1453 | * prepare_task_switch sets up locking and calls architecture specific | ||
1454 | * hooks. | ||
1455 | */ | ||
1456 | static inline void prepare_task_switch(runqueue_t *rq, task_t *next) | ||
1457 | { | ||
1458 | prepare_lock_switch(rq, next); | ||
1459 | prepare_arch_switch(next); | ||
1460 | } | ||
1461 | |||
1462 | /** | ||
1391 | * finish_task_switch - clean up after a task-switch | 1463 | * finish_task_switch - clean up after a task-switch |
1392 | * @prev: the thread we just switched away from. | 1464 | * @prev: the thread we just switched away from. |
1393 | * | 1465 | * |
1394 | * We enter this with the runqueue still locked, and finish_arch_switch() | 1466 | * finish_task_switch must be called after the context switch, paired |
1395 | * will unlock it along with doing any other architecture-specific cleanup | 1467 | * with a prepare_task_switch call before the context switch. |
1396 | * actions. | 1468 | * finish_task_switch will reconcile locking set up by prepare_task_switch, |
1469 | * and do any other architecture-specific cleanup actions. | ||
1397 | * | 1470 | * |
1398 | * Note that we may have delayed dropping an mm in context_switch(). If | 1471 | * Note that we may have delayed dropping an mm in context_switch(). If |
1399 | * so, we finish that here outside of the runqueue lock. (Doing it | 1472 | * so, we finish that here outside of the runqueue lock. (Doing it |
1400 | * with the lock held can cause deadlocks; see schedule() for | 1473 | * with the lock held can cause deadlocks; see schedule() for |
1401 | * details.) | 1474 | * details.) |
1402 | */ | 1475 | */ |
1403 | static inline void finish_task_switch(task_t *prev) | 1476 | static inline void finish_task_switch(runqueue_t *rq, task_t *prev) |
1404 | __releases(rq->lock) | 1477 | __releases(rq->lock) |
1405 | { | 1478 | { |
1406 | runqueue_t *rq = this_rq(); | ||
1407 | struct mm_struct *mm = rq->prev_mm; | 1479 | struct mm_struct *mm = rq->prev_mm; |
1408 | unsigned long prev_task_flags; | 1480 | unsigned long prev_task_flags; |
1409 | 1481 | ||
@@ -1421,7 +1493,8 @@ static inline void finish_task_switch(task_t *prev) | |||
1421 | * Manfred Spraul <manfred@colorfullife.com> | 1493 | * Manfred Spraul <manfred@colorfullife.com> |
1422 | */ | 1494 | */ |
1423 | prev_task_flags = prev->flags; | 1495 | prev_task_flags = prev->flags; |
1424 | finish_arch_switch(rq, prev); | 1496 | finish_arch_switch(prev); |
1497 | finish_lock_switch(rq, prev); | ||
1425 | if (mm) | 1498 | if (mm) |
1426 | mmdrop(mm); | 1499 | mmdrop(mm); |
1427 | if (unlikely(prev_task_flags & PF_DEAD)) | 1500 | if (unlikely(prev_task_flags & PF_DEAD)) |
@@ -1435,8 +1508,12 @@ static inline void finish_task_switch(task_t *prev) | |||
1435 | asmlinkage void schedule_tail(task_t *prev) | 1508 | asmlinkage void schedule_tail(task_t *prev) |
1436 | __releases(rq->lock) | 1509 | __releases(rq->lock) |
1437 | { | 1510 | { |
1438 | finish_task_switch(prev); | 1511 | runqueue_t *rq = this_rq(); |
1439 | 1512 | finish_task_switch(rq, prev); | |
1513 | #ifdef __ARCH_WANT_UNLOCKED_CTXSW | ||
1514 | /* In this case, finish_task_switch does not reenable preemption */ | ||
1515 | preempt_enable(); | ||
1516 | #endif | ||
1440 | if (current->set_child_tid) | 1517 | if (current->set_child_tid) |
1441 | put_user(current->pid, current->set_child_tid); | 1518 | put_user(current->pid, current->set_child_tid); |
1442 | } | 1519 | } |
@@ -2816,11 +2893,15 @@ switch_tasks: | |||
2816 | rq->curr = next; | 2893 | rq->curr = next; |
2817 | ++*switch_count; | 2894 | ++*switch_count; |
2818 | 2895 | ||
2819 | prepare_arch_switch(rq, next); | 2896 | prepare_task_switch(rq, next); |
2820 | prev = context_switch(rq, prev, next); | 2897 | prev = context_switch(rq, prev, next); |
2821 | barrier(); | 2898 | barrier(); |
2822 | 2899 | /* | |
2823 | finish_task_switch(prev); | 2900 | * this_rq must be evaluated again because prev may have moved |
2901 | * CPUs since it called schedule(), thus the 'rq' on its stack | ||
2902 | * frame will be invalid. | ||
2903 | */ | ||
2904 | finish_task_switch(this_rq(), prev); | ||
2824 | } else | 2905 | } else |
2825 | spin_unlock_irq(&rq->lock); | 2906 | spin_unlock_irq(&rq->lock); |
2826 | 2907 | ||
@@ -4085,6 +4166,9 @@ void __devinit init_idle(task_t *idle, int cpu) | |||
4085 | 4166 | ||
4086 | spin_lock_irqsave(&rq->lock, flags); | 4167 | spin_lock_irqsave(&rq->lock, flags); |
4087 | rq->curr = rq->idle = idle; | 4168 | rq->curr = rq->idle = idle; |
4169 | #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) | ||
4170 | idle->oncpu = 1; | ||
4171 | #endif | ||
4088 | set_tsk_need_resched(idle); | 4172 | set_tsk_need_resched(idle); |
4089 | spin_unlock_irqrestore(&rq->lock, flags); | 4173 | spin_unlock_irqrestore(&rq->lock, flags); |
4090 | 4174 | ||