aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2011-04-05 11:23:57 -0400
committerIngo Molnar <mingo@elte.hu>2011-04-14 02:52:40 -0400
commitc05fbafba1c5482bee399b360288fa405415e126 (patch)
treed693cfca73670d17e755fa785e108e65842223c3 /kernel/sched.c
parent23f41eeb42ce7f6f1210904e49e84718f02cb61c (diff)
sched: Restructure ttwu() some more
Factor our helper functions to make the inner workings of try_to_wake_up() more obvious, this also allows for adding remote queues. Reviewed-by: Frank Rowand <frank.rowand@am.sony.com> Cc: Mike Galbraith <efault@gmx.de> Cc: Nick Piggin <npiggin@kernel.dk> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/20110405152729.475848012@chello.nl
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c91
1 files changed, 58 insertions, 33 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index e309dbad2038..7d8b85fcdf06 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2483,6 +2483,48 @@ ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
2483#endif 2483#endif
2484} 2484}
2485 2485
2486static void
2487ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags)
2488{
2489#ifdef CONFIG_SMP
2490 if (p->sched_contributes_to_load)
2491 rq->nr_uninterruptible--;
2492#endif
2493
2494 ttwu_activate(rq, p, ENQUEUE_WAKEUP | ENQUEUE_WAKING);
2495 ttwu_do_wakeup(rq, p, wake_flags);
2496}
2497
2498/*
2499 * Called in case the task @p isn't fully descheduled from its runqueue,
2500 * in this case we must do a remote wakeup. Its a 'light' wakeup though,
2501 * since all we need to do is flip p->state to TASK_RUNNING, since
2502 * the task is still ->on_rq.
2503 */
2504static int ttwu_remote(struct task_struct *p, int wake_flags)
2505{
2506 struct rq *rq;
2507 int ret = 0;
2508
2509 rq = __task_rq_lock(p);
2510 if (p->on_rq) {
2511 ttwu_do_wakeup(rq, p, wake_flags);
2512 ret = 1;
2513 }
2514 __task_rq_unlock(rq);
2515
2516 return ret;
2517}
2518
2519static void ttwu_queue(struct task_struct *p, int cpu)
2520{
2521 struct rq *rq = cpu_rq(cpu);
2522
2523 raw_spin_lock(&rq->lock);
2524 ttwu_do_activate(rq, p, 0);
2525 raw_spin_unlock(&rq->lock);
2526}
2527
2486/** 2528/**
2487 * try_to_wake_up - wake up a thread 2529 * try_to_wake_up - wake up a thread
2488 * @p: the thread to be awakened 2530 * @p: the thread to be awakened
@@ -2501,27 +2543,25 @@ ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
2501static int 2543static int
2502try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) 2544try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
2503{ 2545{
2504 int cpu, this_cpu, success = 0;
2505 unsigned long flags; 2546 unsigned long flags;
2506 struct rq *rq; 2547 int cpu, success = 0;
2507
2508 this_cpu = get_cpu();
2509 2548
2510 smp_wmb(); 2549 smp_wmb();
2511 raw_spin_lock_irqsave(&p->pi_lock, flags); 2550 raw_spin_lock_irqsave(&p->pi_lock, flags);
2512 if (!(p->state & state)) 2551 if (!(p->state & state))
2513 goto out; 2552 goto out;
2514 2553
2554 success = 1; /* we're going to change ->state */
2515 cpu = task_cpu(p); 2555 cpu = task_cpu(p);
2516 2556
2517 if (p->on_rq) { 2557 if (p->on_rq && ttwu_remote(p, wake_flags))
2518 rq = __task_rq_lock(p); 2558 goto stat;
2519 if (p->on_rq)
2520 goto out_running;
2521 __task_rq_unlock(rq);
2522 }
2523 2559
2524#ifdef CONFIG_SMP 2560#ifdef CONFIG_SMP
2561 /*
2562 * If the owning (remote) cpu is still in the middle of schedule() with
2563 * this task as prev, wait until its done referencing the task.
2564 */
2525 while (p->on_cpu) { 2565 while (p->on_cpu) {
2526#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW 2566#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
2527 /* 2567 /*
@@ -2530,8 +2570,10 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
2530 * to spin on ->on_cpu if p is current, since that would 2570 * to spin on ->on_cpu if p is current, since that would
2531 * deadlock. 2571 * deadlock.
2532 */ 2572 */
2533 if (p == current) 2573 if (p == current) {
2534 goto out_activate; 2574 ttwu_queue(p, cpu);
2575 goto stat;
2576 }
2535#endif 2577#endif
2536 cpu_relax(); 2578 cpu_relax();
2537 } 2579 }
@@ -2547,32 +2589,15 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
2547 p->sched_class->task_waking(p); 2589 p->sched_class->task_waking(p);
2548 2590
2549 cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags); 2591 cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
2550#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW 2592 if (task_cpu(p) != cpu)
2551out_activate:
2552#endif
2553#endif /* CONFIG_SMP */
2554
2555 rq = cpu_rq(cpu);
2556 raw_spin_lock(&rq->lock);
2557
2558#ifdef CONFIG_SMP
2559 if (cpu != task_cpu(p))
2560 set_task_cpu(p, cpu); 2593 set_task_cpu(p, cpu);
2594#endif /* CONFIG_SMP */
2561 2595
2562 if (p->sched_contributes_to_load) 2596 ttwu_queue(p, cpu);
2563 rq->nr_uninterruptible--; 2597stat:
2564#endif
2565
2566 ttwu_activate(rq, p, ENQUEUE_WAKEUP | ENQUEUE_WAKING);
2567out_running:
2568 ttwu_do_wakeup(rq, p, wake_flags);
2569 success = 1;
2570 __task_rq_unlock(rq);
2571
2572 ttwu_stat(p, cpu, wake_flags); 2598 ttwu_stat(p, cpu, wake_flags);
2573out: 2599out:
2574 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 2600 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
2575 put_cpu();
2576 2601
2577 return success; 2602 return success;
2578} 2603}