aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2011-08-10 17:21:01 -0400
committerFrederic Weisbecker <fweisbec@gmail.com>2013-04-03 07:56:03 -0400
commit3451d0243c3cdfd729b36f9684a14659d4895ca3 (patch)
tree5307f4492708ae089dd0a6d81b54f9e606707ca8 /kernel/sched
parentab71d36ddb9e60d4ddb28a187718815d38c3c666 (diff)
nohz: Rename CONFIG_NO_HZ to CONFIG_NO_HZ_COMMON
We are planning to convert the dynticks Kconfig options layout into a choice menu. The user must be able to easily pick any of the following implementations: constant periodic tick, idle dynticks, full dynticks. As this implies a mutual exclusion, the two dynticks implementions need to converge on the selection of a common Kconfig option in order to ease the sharing of a common infrastructure. It would thus seem pretty natural to reuse CONFIG_NO_HZ to that end. It already implements all the idle dynticks code and the full dynticks depends on all that code for now. So ideally the choice menu would propose CONFIG_NO_HZ_IDLE and CONFIG_NO_HZ_EXTENDED then both would select CONFIG_NO_HZ. On the other hand we want to stay backward compatible: if CONFIG_NO_HZ is set in an older config file, we want to enable CONFIG_NO_HZ_IDLE by default. But we can't afford both at the same time or we run into a circular dependency: 1) CONFIG_NO_HZ_IDLE and CONFIG_NO_HZ_EXTENDED both select CONFIG_NO_HZ 2) If CONFIG_NO_HZ is set, we default to CONFIG_NO_HZ_IDLE We might be able to support that from Kconfig/Kbuild but it may not be wise to introduce such a confusing behaviour. So to solve this, create a new CONFIG_NO_HZ_COMMON option which gathers the common code between idle and full dynticks (that common code for now is simply the idle dynticks code) and select it from their referring Kconfig. Then we'll later create CONFIG_NO_HZ_IDLE and map CONFIG_NO_HZ to it for backward compatibility. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Chris Metcalf <cmetcalf@tilera.com> Cc: Christoph Lameter <cl@linux.com> Cc: Geoff Levand <geoff@infradead.org> Cc: Gilad Ben Yossef <gilad@benyossef.com> Cc: Hakan Akkan <hakanakkan@gmail.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Kevin Hilman <khilman@linaro.org> Cc: Li Zhong <zhong@linux.vnet.ibm.com> Cc: Namhyung Kim <namhyung.kim@lge.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Paul Gortmaker <paul.gortmaker@windriver.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/core.c18
-rw-r--r--kernel/sched/fair.c10
-rw-r--r--kernel/sched/sched.h4
3 files changed, 16 insertions, 16 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index e91ee589f793..9bb397da63d6 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -549,7 +549,7 @@ void resched_cpu(int cpu)
549 raw_spin_unlock_irqrestore(&rq->lock, flags); 549 raw_spin_unlock_irqrestore(&rq->lock, flags);
550} 550}
551 551
552#ifdef CONFIG_NO_HZ 552#ifdef CONFIG_NO_HZ_COMMON
553/* 553/*
554 * In the semi idle case, use the nearest busy cpu for migrating timers 554 * In the semi idle case, use the nearest busy cpu for migrating timers
555 * from an idle cpu. This is good for power-savings. 555 * from an idle cpu. This is good for power-savings.
@@ -641,14 +641,14 @@ static inline bool got_nohz_idle_kick(void)
641 return idle_cpu(cpu) && test_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu)); 641 return idle_cpu(cpu) && test_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu));
642} 642}
643 643
644#else /* CONFIG_NO_HZ */ 644#else /* CONFIG_NO_HZ_COMMON */
645 645
646static inline bool got_nohz_idle_kick(void) 646static inline bool got_nohz_idle_kick(void)
647{ 647{
648 return false; 648 return false;
649} 649}
650 650
651#endif /* CONFIG_NO_HZ */ 651#endif /* CONFIG_NO_HZ_COMMON */
652 652
653void sched_avg_update(struct rq *rq) 653void sched_avg_update(struct rq *rq)
654{ 654{
@@ -2139,7 +2139,7 @@ calc_load(unsigned long load, unsigned long exp, unsigned long active)
2139 return load >> FSHIFT; 2139 return load >> FSHIFT;
2140} 2140}
2141 2141
2142#ifdef CONFIG_NO_HZ 2142#ifdef CONFIG_NO_HZ_COMMON
2143/* 2143/*
2144 * Handle NO_HZ for the global load-average. 2144 * Handle NO_HZ for the global load-average.
2145 * 2145 *
@@ -2365,12 +2365,12 @@ static void calc_global_nohz(void)
2365 smp_wmb(); 2365 smp_wmb();
2366 calc_load_idx++; 2366 calc_load_idx++;
2367} 2367}
2368#else /* !CONFIG_NO_HZ */ 2368#else /* !CONFIG_NO_HZ_COMMON */
2369 2369
2370static inline long calc_load_fold_idle(void) { return 0; } 2370static inline long calc_load_fold_idle(void) { return 0; }
2371static inline void calc_global_nohz(void) { } 2371static inline void calc_global_nohz(void) { }
2372 2372
2373#endif /* CONFIG_NO_HZ */ 2373#endif /* CONFIG_NO_HZ_COMMON */
2374 2374
2375/* 2375/*
2376 * calc_load - update the avenrun load estimates 10 ticks after the 2376 * calc_load - update the avenrun load estimates 10 ticks after the
@@ -2530,7 +2530,7 @@ static void __update_cpu_load(struct rq *this_rq, unsigned long this_load,
2530 sched_avg_update(this_rq); 2530 sched_avg_update(this_rq);
2531} 2531}
2532 2532
2533#ifdef CONFIG_NO_HZ 2533#ifdef CONFIG_NO_HZ_COMMON
2534/* 2534/*
2535 * There is no sane way to deal with nohz on smp when using jiffies because the 2535 * There is no sane way to deal with nohz on smp when using jiffies because the
2536 * cpu doing the jiffies update might drift wrt the cpu doing the jiffy reading 2536 * cpu doing the jiffies update might drift wrt the cpu doing the jiffy reading
@@ -2590,7 +2590,7 @@ void update_cpu_load_nohz(void)
2590 } 2590 }
2591 raw_spin_unlock(&this_rq->lock); 2591 raw_spin_unlock(&this_rq->lock);
2592} 2592}
2593#endif /* CONFIG_NO_HZ */ 2593#endif /* CONFIG_NO_HZ_COMMON */
2594 2594
2595/* 2595/*
2596 * Called from scheduler_tick() 2596 * Called from scheduler_tick()
@@ -7023,7 +7023,7 @@ void __init sched_init(void)
7023 INIT_LIST_HEAD(&rq->cfs_tasks); 7023 INIT_LIST_HEAD(&rq->cfs_tasks);
7024 7024
7025 rq_attach_root(rq, &def_root_domain); 7025 rq_attach_root(rq, &def_root_domain);
7026#ifdef CONFIG_NO_HZ 7026#ifdef CONFIG_NO_HZ_COMMON
7027 rq->nohz_flags = 0; 7027 rq->nohz_flags = 0;
7028#endif 7028#endif
7029#endif 7029#endif
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 539760ef00c4..5c97fca091a7 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5331,7 +5331,7 @@ out_unlock:
5331 return 0; 5331 return 0;
5332} 5332}
5333 5333
5334#ifdef CONFIG_NO_HZ 5334#ifdef CONFIG_NO_HZ_COMMON
5335/* 5335/*
5336 * idle load balancing details 5336 * idle load balancing details
5337 * - When one of the busy CPUs notice that there may be an idle rebalancing 5337 * - When one of the busy CPUs notice that there may be an idle rebalancing
@@ -5541,9 +5541,9 @@ out:
5541 rq->next_balance = next_balance; 5541 rq->next_balance = next_balance;
5542} 5542}
5543 5543
5544#ifdef CONFIG_NO_HZ 5544#ifdef CONFIG_NO_HZ_COMMON
5545/* 5545/*
5546 * In CONFIG_NO_HZ case, the idle balance kickee will do the 5546 * In CONFIG_NO_HZ_COMMON case, the idle balance kickee will do the
5547 * rebalancing for all the cpus for whom scheduler ticks are stopped. 5547 * rebalancing for all the cpus for whom scheduler ticks are stopped.
5548 */ 5548 */
5549static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) 5549static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle)
@@ -5686,7 +5686,7 @@ void trigger_load_balance(struct rq *rq, int cpu)
5686 if (time_after_eq(jiffies, rq->next_balance) && 5686 if (time_after_eq(jiffies, rq->next_balance) &&
5687 likely(!on_null_domain(cpu))) 5687 likely(!on_null_domain(cpu)))
5688 raise_softirq(SCHED_SOFTIRQ); 5688 raise_softirq(SCHED_SOFTIRQ);
5689#ifdef CONFIG_NO_HZ 5689#ifdef CONFIG_NO_HZ_COMMON
5690 if (nohz_kick_needed(rq, cpu) && likely(!on_null_domain(cpu))) 5690 if (nohz_kick_needed(rq, cpu) && likely(!on_null_domain(cpu)))
5691 nohz_balancer_kick(cpu); 5691 nohz_balancer_kick(cpu);
5692#endif 5692#endif
@@ -6156,7 +6156,7 @@ __init void init_sched_fair_class(void)
6156#ifdef CONFIG_SMP 6156#ifdef CONFIG_SMP
6157 open_softirq(SCHED_SOFTIRQ, run_rebalance_domains); 6157 open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
6158 6158
6159#ifdef CONFIG_NO_HZ 6159#ifdef CONFIG_NO_HZ_COMMON
6160 nohz.next_balance = jiffies; 6160 nohz.next_balance = jiffies;
6161 zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT); 6161 zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
6162 cpu_notifier(sched_ilb_notifier, 0); 6162 cpu_notifier(sched_ilb_notifier, 0);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 3bd15a43eebc..889904dd6d77 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -404,7 +404,7 @@ struct rq {
404 #define CPU_LOAD_IDX_MAX 5 404 #define CPU_LOAD_IDX_MAX 5
405 unsigned long cpu_load[CPU_LOAD_IDX_MAX]; 405 unsigned long cpu_load[CPU_LOAD_IDX_MAX];
406 unsigned long last_load_update_tick; 406 unsigned long last_load_update_tick;
407#ifdef CONFIG_NO_HZ 407#ifdef CONFIG_NO_HZ_COMMON
408 u64 nohz_stamp; 408 u64 nohz_stamp;
409 unsigned long nohz_flags; 409 unsigned long nohz_flags;
410#endif 410#endif
@@ -1333,7 +1333,7 @@ extern void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq);
1333 1333
1334extern void account_cfs_bandwidth_used(int enabled, int was_enabled); 1334extern void account_cfs_bandwidth_used(int enabled, int was_enabled);
1335 1335
1336#ifdef CONFIG_NO_HZ 1336#ifdef CONFIG_NO_HZ_COMMON
1337enum rq_nohz_flag_bits { 1337enum rq_nohz_flag_bits {
1338 NOHZ_TICK_STOPPED, 1338 NOHZ_TICK_STOPPED,
1339 NOHZ_BALANCE_KICK, 1339 NOHZ_BALANCE_KICK,