aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--kernel/sched.c10
-rw-r--r--lib/kernel_lock.c2
2 files changed, 6 insertions, 6 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 8b92f40c147d..e0fa739a441b 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1189,10 +1189,10 @@ static void resched_task(struct task_struct *p)
1189 1189
1190 assert_spin_locked(&task_rq(p)->lock); 1190 assert_spin_locked(&task_rq(p)->lock);
1191 1191
1192 if (unlikely(test_tsk_thread_flag(p, TIF_NEED_RESCHED))) 1192 if (test_tsk_need_resched(p))
1193 return; 1193 return;
1194 1194
1195 set_tsk_thread_flag(p, TIF_NEED_RESCHED); 1195 set_tsk_need_resched(p);
1196 1196
1197 cpu = task_cpu(p); 1197 cpu = task_cpu(p);
1198 if (cpu == smp_processor_id()) 1198 if (cpu == smp_processor_id())
@@ -1248,7 +1248,7 @@ void wake_up_idle_cpu(int cpu)
1248 * lockless. The worst case is that the other CPU runs the 1248 * lockless. The worst case is that the other CPU runs the
1249 * idle task through an additional NOOP schedule() 1249 * idle task through an additional NOOP schedule()
1250 */ 1250 */
1251 set_tsk_thread_flag(rq->idle, TIF_NEED_RESCHED); 1251 set_tsk_need_resched(rq->idle);
1252 1252
1253 /* NEED_RESCHED must be visible before we test polling */ 1253 /* NEED_RESCHED must be visible before we test polling */
1254 smp_mb(); 1254 smp_mb();
@@ -4740,7 +4740,7 @@ asmlinkage void __sched preempt_schedule(void)
4740 * between schedule and now. 4740 * between schedule and now.
4741 */ 4741 */
4742 barrier(); 4742 barrier();
4743 } while (unlikely(test_thread_flag(TIF_NEED_RESCHED))); 4743 } while (need_resched());
4744} 4744}
4745EXPORT_SYMBOL(preempt_schedule); 4745EXPORT_SYMBOL(preempt_schedule);
4746 4746
@@ -4769,7 +4769,7 @@ asmlinkage void __sched preempt_schedule_irq(void)
4769 * between schedule and now. 4769 * between schedule and now.
4770 */ 4770 */
4771 barrier(); 4771 barrier();
4772 } while (unlikely(test_thread_flag(TIF_NEED_RESCHED))); 4772 } while (need_resched());
4773} 4773}
4774 4774
4775#endif /* CONFIG_PREEMPT */ 4775#endif /* CONFIG_PREEMPT */
diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c
index 01a3c22c1b5a..39f1029e3525 100644
--- a/lib/kernel_lock.c
+++ b/lib/kernel_lock.c
@@ -39,7 +39,7 @@ static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag);
39int __lockfunc __reacquire_kernel_lock(void) 39int __lockfunc __reacquire_kernel_lock(void)
40{ 40{
41 while (!_raw_spin_trylock(&kernel_flag)) { 41 while (!_raw_spin_trylock(&kernel_flag)) {
42 if (test_thread_flag(TIF_NEED_RESCHED)) 42 if (need_resched())
43 return -EAGAIN; 43 return -EAGAIN;
44 cpu_relax(); 44 cpu_relax();
45 } 45 }