diff options
| author | Lai Jiangshan <laijs@cn.fujitsu.com> | 2009-03-06 06:40:20 -0500 |
|---|---|---|
| committer | Ingo Molnar <mingo@elte.hu> | 2009-03-06 06:48:55 -0500 |
| commit | 5ed0cec0ac5f1b3759bdbe4d9df32ee4ff8afb5a (patch) | |
| tree | a804da35b296f278865b194661cc6e75bfdaf11f | |
| parent | 7fc07d84108d54c5b94625c0e168f31b2d66976e (diff) | |
sched: TIF_NEED_RESCHED -> need_reshed() cleanup
Impact: cleanup
Use test_tsk_need_resched(), set_tsk_need_resched(), need_resched()
instead of using TIF_NEED_RESCHED.
Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <49B10BA4.9070209@cn.fujitsu.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
| -rw-r--r-- | kernel/sched.c | 10 | ||||
| -rw-r--r-- | lib/kernel_lock.c | 2 |
2 files changed, 6 insertions, 6 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 8b92f40c147d..e0fa739a441b 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
| @@ -1189,10 +1189,10 @@ static void resched_task(struct task_struct *p) | |||
| 1189 | 1189 | ||
| 1190 | assert_spin_locked(&task_rq(p)->lock); | 1190 | assert_spin_locked(&task_rq(p)->lock); |
| 1191 | 1191 | ||
| 1192 | if (unlikely(test_tsk_thread_flag(p, TIF_NEED_RESCHED))) | 1192 | if (test_tsk_need_resched(p)) |
| 1193 | return; | 1193 | return; |
| 1194 | 1194 | ||
| 1195 | set_tsk_thread_flag(p, TIF_NEED_RESCHED); | 1195 | set_tsk_need_resched(p); |
| 1196 | 1196 | ||
| 1197 | cpu = task_cpu(p); | 1197 | cpu = task_cpu(p); |
| 1198 | if (cpu == smp_processor_id()) | 1198 | if (cpu == smp_processor_id()) |
| @@ -1248,7 +1248,7 @@ void wake_up_idle_cpu(int cpu) | |||
| 1248 | * lockless. The worst case is that the other CPU runs the | 1248 | * lockless. The worst case is that the other CPU runs the |
| 1249 | * idle task through an additional NOOP schedule() | 1249 | * idle task through an additional NOOP schedule() |
| 1250 | */ | 1250 | */ |
| 1251 | set_tsk_thread_flag(rq->idle, TIF_NEED_RESCHED); | 1251 | set_tsk_need_resched(rq->idle); |
| 1252 | 1252 | ||
| 1253 | /* NEED_RESCHED must be visible before we test polling */ | 1253 | /* NEED_RESCHED must be visible before we test polling */ |
| 1254 | smp_mb(); | 1254 | smp_mb(); |
| @@ -4740,7 +4740,7 @@ asmlinkage void __sched preempt_schedule(void) | |||
| 4740 | * between schedule and now. | 4740 | * between schedule and now. |
| 4741 | */ | 4741 | */ |
| 4742 | barrier(); | 4742 | barrier(); |
| 4743 | } while (unlikely(test_thread_flag(TIF_NEED_RESCHED))); | 4743 | } while (need_resched()); |
| 4744 | } | 4744 | } |
| 4745 | EXPORT_SYMBOL(preempt_schedule); | 4745 | EXPORT_SYMBOL(preempt_schedule); |
| 4746 | 4746 | ||
| @@ -4769,7 +4769,7 @@ asmlinkage void __sched preempt_schedule_irq(void) | |||
| 4769 | * between schedule and now. | 4769 | * between schedule and now. |
| 4770 | */ | 4770 | */ |
| 4771 | barrier(); | 4771 | barrier(); |
| 4772 | } while (unlikely(test_thread_flag(TIF_NEED_RESCHED))); | 4772 | } while (need_resched()); |
| 4773 | } | 4773 | } |
| 4774 | 4774 | ||
| 4775 | #endif /* CONFIG_PREEMPT */ | 4775 | #endif /* CONFIG_PREEMPT */ |
diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c index 01a3c22c1b5a..39f1029e3525 100644 --- a/lib/kernel_lock.c +++ b/lib/kernel_lock.c | |||
| @@ -39,7 +39,7 @@ static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag); | |||
| 39 | int __lockfunc __reacquire_kernel_lock(void) | 39 | int __lockfunc __reacquire_kernel_lock(void) |
| 40 | { | 40 | { |
| 41 | while (!_raw_spin_trylock(&kernel_flag)) { | 41 | while (!_raw_spin_trylock(&kernel_flag)) { |
| 42 | if (test_thread_flag(TIF_NEED_RESCHED)) | 42 | if (need_resched()) |
| 43 | return -EAGAIN; | 43 | return -EAGAIN; |
| 44 | cpu_relax(); | 44 | cpu_relax(); |
| 45 | } | 45 | } |
