aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/core.c
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2013-09-17 03:30:55 -0400
committerIngo Molnar <mingo@kernel.org>2013-09-25 07:53:08 -0400
commitb021fe3e25094fbec22d0eff846d2adeee1b9736 (patch)
tree6193cc7952f980625e9f77c76f650fb2a7844071 /kernel/sched/core.c
parent0c44c2d0f459cd7e275242b72f500137c4fa834d (diff)
sched, rcu: Make RCU use resched_cpu()
We're going to deprecate and remove set_need_resched() for it will do the wrong thing. Make an exception for RCU and allow it to use resched_cpu() which will do the right thing. Signed-off-by: Peter Zijlstra <peterz@infradead.org> Cc: Paul McKenney <paulmck@linux.vnet.ibm.com> Link: http://lkml.kernel.org/n/tip-2eywnacjl1nllctl1nszqa5w@git.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r--kernel/sched/core.c10
1 files changed, 2 insertions, 8 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index ac5796783c49..242da0c03aba 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -513,12 +513,11 @@ static inline void init_hrtick(void)
513 * might also involve a cross-CPU call to trigger the scheduler on 513 * might also involve a cross-CPU call to trigger the scheduler on
514 * the target CPU. 514 * the target CPU.
515 */ 515 */
516#ifdef CONFIG_SMP
517void resched_task(struct task_struct *p) 516void resched_task(struct task_struct *p)
518{ 517{
519 int cpu; 518 int cpu;
520 519
521 assert_raw_spin_locked(&task_rq(p)->lock); 520 lockdep_assert_held(&task_rq(p)->lock);
522 521
523 if (test_tsk_need_resched(p)) 522 if (test_tsk_need_resched(p))
524 return; 523 return;
@@ -546,6 +545,7 @@ void resched_cpu(int cpu)
546 raw_spin_unlock_irqrestore(&rq->lock, flags); 545 raw_spin_unlock_irqrestore(&rq->lock, flags);
547} 546}
548 547
548#ifdef CONFIG_SMP
549#ifdef CONFIG_NO_HZ_COMMON 549#ifdef CONFIG_NO_HZ_COMMON
550/* 550/*
551 * In the semi idle case, use the nearest busy cpu for migrating timers 551 * In the semi idle case, use the nearest busy cpu for migrating timers
@@ -693,12 +693,6 @@ void sched_avg_update(struct rq *rq)
693 } 693 }
694} 694}
695 695
696#else /* !CONFIG_SMP */
697void resched_task(struct task_struct *p)
698{
699 assert_raw_spin_locked(&task_rq(p)->lock);
700 set_tsk_need_resched(p);
701}
702#endif /* CONFIG_SMP */ 696#endif /* CONFIG_SMP */
703 697
704#if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \ 698#if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \