diff options
author | Oleg Nesterov <oleg@redhat.com> | 2010-05-19 08:57:11 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-06-09 04:34:50 -0400 |
commit | 246d86b51845063e4b06b27579990492dc5fa317 (patch) | |
tree | 3f60fc00b8cc587826cc76e31a1afb92353d08dd /kernel/sched.c | |
parent | c676329abb2b8359d9a5d734dec0c81779823fd6 (diff) |
sched: Simplify the reacquire_kernel_lock() logic
- Contrary to what 6d558c3a says, there is no need to reload
prev = rq->curr after the context switch. You always schedule
back to where you came from, prev must be equal to current
even if cpu/rq was changed.
- This also means reacquire_kernel_lock() can use prev instead
of current.
- No need to reassign switch_count if reacquire_kernel_lock()
reports need_resched(), we can just move the initial assignment
down, under the "need_resched_nonpreemptible:" label.
- Try to update the comment after context_switch().
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <20100519125711.GA30199@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 13 |
1 files changed, 6 insertions, 7 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 3abd8f780dae..f37a9618fac3 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -3636,7 +3636,6 @@ need_resched: | |||
3636 | rq = cpu_rq(cpu); | 3636 | rq = cpu_rq(cpu); |
3637 | rcu_note_context_switch(cpu); | 3637 | rcu_note_context_switch(cpu); |
3638 | prev = rq->curr; | 3638 | prev = rq->curr; |
3639 | switch_count = &prev->nivcsw; | ||
3640 | 3639 | ||
3641 | release_kernel_lock(prev); | 3640 | release_kernel_lock(prev); |
3642 | need_resched_nonpreemptible: | 3641 | need_resched_nonpreemptible: |
@@ -3649,6 +3648,7 @@ need_resched_nonpreemptible: | |||
3649 | raw_spin_lock_irq(&rq->lock); | 3648 | raw_spin_lock_irq(&rq->lock); |
3650 | clear_tsk_need_resched(prev); | 3649 | clear_tsk_need_resched(prev); |
3651 | 3650 | ||
3651 | switch_count = &prev->nivcsw; | ||
3652 | if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { | 3652 | if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { |
3653 | if (unlikely(signal_pending_state(prev->state, prev))) { | 3653 | if (unlikely(signal_pending_state(prev->state, prev))) { |
3654 | prev->state = TASK_RUNNING; | 3654 | prev->state = TASK_RUNNING; |
@@ -3689,8 +3689,10 @@ need_resched_nonpreemptible: | |||
3689 | 3689 | ||
3690 | context_switch(rq, prev, next); /* unlocks the rq */ | 3690 | context_switch(rq, prev, next); /* unlocks the rq */ |
3691 | /* | 3691 | /* |
3692 | * the context switch might have flipped the stack from under | 3692 | * The context switch have flipped the stack from under us |
3693 | * us, hence refresh the local variables. | 3693 | * and restored the local variables which were saved when |
3694 | * this task called schedule() in the past. prev == current | ||
3695 | * is still correct, but it can be moved to another cpu/rq. | ||
3694 | */ | 3696 | */ |
3695 | cpu = smp_processor_id(); | 3697 | cpu = smp_processor_id(); |
3696 | rq = cpu_rq(cpu); | 3698 | rq = cpu_rq(cpu); |
@@ -3699,11 +3701,8 @@ need_resched_nonpreemptible: | |||
3699 | 3701 | ||
3700 | post_schedule(rq); | 3702 | post_schedule(rq); |
3701 | 3703 | ||
3702 | if (unlikely(reacquire_kernel_lock(current) < 0)) { | 3704 | if (unlikely(reacquire_kernel_lock(prev))) |
3703 | prev = rq->curr; | ||
3704 | switch_count = &prev->nivcsw; | ||
3705 | goto need_resched_nonpreemptible; | 3705 | goto need_resched_nonpreemptible; |
3706 | } | ||
3707 | 3706 | ||
3708 | preempt_enable_no_resched(); | 3707 | preempt_enable_no_resched(); |
3709 | if (need_resched()) | 3708 | if (need_resched()) |