aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-10-26 10:26:53 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-10-26 10:26:53 -0400
commit19b4a8d520a6e0176dd52aaa429261ad4fcaa545 (patch)
tree6dcf5a780718fc50b9cd79cc803daa7c7e080a02 /kernel/sched.c
parent3cfef9524677a4ecb392d6fbffe6ebce6302f1d4 (diff)
parent048b718029033af117870d3da47da12995be14a3 (diff)
Merge branch 'core-rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
* 'core-rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (45 commits) rcu: Move propagation of ->completed from rcu_start_gp() to rcu_report_qs_rsp() rcu: Remove rcu_needs_cpu_flush() to avoid false quiescent states rcu: Wire up RCU_BOOST_PRIO for rcutree rcu: Make rcu_torture_boost() exit loops at end of test rcu: Make rcu_torture_fqs() exit loops at end of test rcu: Permit rt_mutex_unlock() with irqs disabled rcu: Avoid having just-onlined CPU resched itself when RCU is idle rcu: Suppress NMI backtraces when stall ends before dump rcu: Prohibit grace periods during early boot rcu: Simplify unboosting checks rcu: Prevent early boot set_need_resched() from __rcu_pending() rcu: Dump local stack if cannot dump all CPUs' stacks rcu: Move __rcu_read_unlock()'s barrier() within if-statement rcu: Improve rcu_assign_pointer() and RCU_INIT_POINTER() documentation rcu: Make rcu_assign_pointer() unconditionally insert a memory barrier rcu: Make rcu_implicit_dynticks_qs() locals be correct size rcu: Eliminate in_irq() checks in rcu_enter_nohz() nohz: Remove nohz_cpu_mask rcu: Document interpretation of RCU-lockdep splats rcu: Allow rcutorture's stat_interval parameter to be changed at runtime ...
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c13
1 files changed, 2 insertions, 11 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 8aa00803c1ec..03ad0113801a 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4213,6 +4213,7 @@ static inline void schedule_debug(struct task_struct *prev)
4213 */ 4213 */
4214 if (unlikely(in_atomic_preempt_off() && !prev->exit_state)) 4214 if (unlikely(in_atomic_preempt_off() && !prev->exit_state))
4215 __schedule_bug(prev); 4215 __schedule_bug(prev);
4216 rcu_sleep_check();
4216 4217
4217 profile_hit(SCHED_PROFILING, __builtin_return_address(0)); 4218 profile_hit(SCHED_PROFILING, __builtin_return_address(0));
4218 4219
@@ -5955,15 +5956,6 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
5955} 5956}
5956 5957
5957/* 5958/*
5958 * In a system that switches off the HZ timer nohz_cpu_mask
5959 * indicates which cpus entered this state. This is used
5960 * in the rcu update to wait only for active cpus. For system
5961 * which do not switch off the HZ timer nohz_cpu_mask should
5962 * always be CPU_BITS_NONE.
5963 */
5964cpumask_var_t nohz_cpu_mask;
5965
5966/*
5967 * Increase the granularity value when there are more CPUs, 5959 * Increase the granularity value when there are more CPUs,
5968 * because with more CPUs the 'effective latency' as visible 5960 * because with more CPUs the 'effective latency' as visible
5969 * to users decreases. But the relationship is not linear, 5961 * to users decreases. But the relationship is not linear,
@@ -8175,8 +8167,6 @@ void __init sched_init(void)
8175 */ 8167 */
8176 current->sched_class = &fair_sched_class; 8168 current->sched_class = &fair_sched_class;
8177 8169
8178 /* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */
8179 zalloc_cpumask_var(&nohz_cpu_mask, GFP_NOWAIT);
8180#ifdef CONFIG_SMP 8170#ifdef CONFIG_SMP
8181 zalloc_cpumask_var(&sched_domains_tmpmask, GFP_NOWAIT); 8171 zalloc_cpumask_var(&sched_domains_tmpmask, GFP_NOWAIT);
8182#ifdef CONFIG_NO_HZ 8172#ifdef CONFIG_NO_HZ
@@ -8206,6 +8196,7 @@ void __might_sleep(const char *file, int line, int preempt_offset)
8206{ 8196{
8207 static unsigned long prev_jiffy; /* ratelimiting */ 8197 static unsigned long prev_jiffy; /* ratelimiting */
8208 8198
8199 rcu_sleep_check(); /* WARN_ON_ONCE() by default, no rate limit reqd. */
8209 if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) || 8200 if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) ||
8210 system_state != SYSTEM_RUNNING || oops_in_progress) 8201 system_state != SYSTEM_RUNNING || oops_in_progress)
8211 return; 8202 return;