aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2011-08-10 17:21:01 -0400
committerFrederic Weisbecker <fweisbec@gmail.com>2013-04-03 07:56:03 -0400
commit3451d0243c3cdfd729b36f9684a14659d4895ca3 (patch)
tree5307f4492708ae089dd0a6d81b54f9e606707ca8 /kernel
parentab71d36ddb9e60d4ddb28a187718815d38c3c666 (diff)
nohz: Rename CONFIG_NO_HZ to CONFIG_NO_HZ_COMMON
We are planning to convert the dynticks Kconfig options layout into a choice menu. The user must be able to easily pick any of the following implementations: constant periodic tick, idle dynticks, full dynticks. As this implies a mutual exclusion, the two dynticks implementions need to converge on the selection of a common Kconfig option in order to ease the sharing of a common infrastructure. It would thus seem pretty natural to reuse CONFIG_NO_HZ to that end. It already implements all the idle dynticks code and the full dynticks depends on all that code for now. So ideally the choice menu would propose CONFIG_NO_HZ_IDLE and CONFIG_NO_HZ_EXTENDED then both would select CONFIG_NO_HZ. On the other hand we want to stay backward compatible: if CONFIG_NO_HZ is set in an older config file, we want to enable CONFIG_NO_HZ_IDLE by default. But we can't afford both at the same time or we run into a circular dependency: 1) CONFIG_NO_HZ_IDLE and CONFIG_NO_HZ_EXTENDED both select CONFIG_NO_HZ 2) If CONFIG_NO_HZ is set, we default to CONFIG_NO_HZ_IDLE We might be able to support that from Kconfig/Kbuild but it may not be wise to introduce such a confusing behaviour. So to solve this, create a new CONFIG_NO_HZ_COMMON option which gathers the common code between idle and full dynticks (that common code for now is simply the idle dynticks code) and select it from their referring Kconfig. Then we'll later create CONFIG_NO_HZ_IDLE and map CONFIG_NO_HZ to it for backward compatibility. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Chris Metcalf <cmetcalf@tilera.com> Cc: Christoph Lameter <cl@linux.com> Cc: Geoff Levand <geoff@infradead.org> Cc: Gilad Ben Yossef <gilad@benyossef.com> Cc: Hakan Akkan <hakanakkan@gmail.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Kevin Hilman <khilman@linaro.org> Cc: Li Zhong <zhong@linux.vnet.ibm.com> Cc: Namhyung Kim <namhyung.kim@lge.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Paul Gortmaker <paul.gortmaker@windriver.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/hrtimer.c4
-rw-r--r--kernel/sched/core.c18
-rw-r--r--kernel/sched/fair.c10
-rw-r--r--kernel/sched/sched.h4
-rw-r--r--kernel/softirq.c2
-rw-r--r--kernel/time/Kconfig13
-rw-r--r--kernel/time/tick-sched.c12
-rw-r--r--kernel/timer.c4
8 files changed, 36 insertions, 31 deletions
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index cc47812d3feb..ec60482d8b03 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -160,7 +160,7 @@ struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer,
160 */ 160 */
161static int hrtimer_get_target(int this_cpu, int pinned) 161static int hrtimer_get_target(int this_cpu, int pinned)
162{ 162{
163#ifdef CONFIG_NO_HZ 163#ifdef CONFIG_NO_HZ_COMMON
164 if (!pinned && get_sysctl_timer_migration() && idle_cpu(this_cpu)) 164 if (!pinned && get_sysctl_timer_migration() && idle_cpu(this_cpu))
165 return get_nohz_timer_target(); 165 return get_nohz_timer_target();
166#endif 166#endif
@@ -1106,7 +1106,7 @@ ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
1106} 1106}
1107EXPORT_SYMBOL_GPL(hrtimer_get_remaining); 1107EXPORT_SYMBOL_GPL(hrtimer_get_remaining);
1108 1108
1109#ifdef CONFIG_NO_HZ 1109#ifdef CONFIG_NO_HZ_COMMON
1110/** 1110/**
1111 * hrtimer_get_next_event - get the time until next expiry event 1111 * hrtimer_get_next_event - get the time until next expiry event
1112 * 1112 *
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index e91ee589f793..9bb397da63d6 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -549,7 +549,7 @@ void resched_cpu(int cpu)
549 raw_spin_unlock_irqrestore(&rq->lock, flags); 549 raw_spin_unlock_irqrestore(&rq->lock, flags);
550} 550}
551 551
552#ifdef CONFIG_NO_HZ 552#ifdef CONFIG_NO_HZ_COMMON
553/* 553/*
554 * In the semi idle case, use the nearest busy cpu for migrating timers 554 * In the semi idle case, use the nearest busy cpu for migrating timers
555 * from an idle cpu. This is good for power-savings. 555 * from an idle cpu. This is good for power-savings.
@@ -641,14 +641,14 @@ static inline bool got_nohz_idle_kick(void)
641 return idle_cpu(cpu) && test_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu)); 641 return idle_cpu(cpu) && test_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu));
642} 642}
643 643
644#else /* CONFIG_NO_HZ */ 644#else /* CONFIG_NO_HZ_COMMON */
645 645
646static inline bool got_nohz_idle_kick(void) 646static inline bool got_nohz_idle_kick(void)
647{ 647{
648 return false; 648 return false;
649} 649}
650 650
651#endif /* CONFIG_NO_HZ */ 651#endif /* CONFIG_NO_HZ_COMMON */
652 652
653void sched_avg_update(struct rq *rq) 653void sched_avg_update(struct rq *rq)
654{ 654{
@@ -2139,7 +2139,7 @@ calc_load(unsigned long load, unsigned long exp, unsigned long active)
2139 return load >> FSHIFT; 2139 return load >> FSHIFT;
2140} 2140}
2141 2141
2142#ifdef CONFIG_NO_HZ 2142#ifdef CONFIG_NO_HZ_COMMON
2143/* 2143/*
2144 * Handle NO_HZ for the global load-average. 2144 * Handle NO_HZ for the global load-average.
2145 * 2145 *
@@ -2365,12 +2365,12 @@ static void calc_global_nohz(void)
2365 smp_wmb(); 2365 smp_wmb();
2366 calc_load_idx++; 2366 calc_load_idx++;
2367} 2367}
2368#else /* !CONFIG_NO_HZ */ 2368#else /* !CONFIG_NO_HZ_COMMON */
2369 2369
2370static inline long calc_load_fold_idle(void) { return 0; } 2370static inline long calc_load_fold_idle(void) { return 0; }
2371static inline void calc_global_nohz(void) { } 2371static inline void calc_global_nohz(void) { }
2372 2372
2373#endif /* CONFIG_NO_HZ */ 2373#endif /* CONFIG_NO_HZ_COMMON */
2374 2374
2375/* 2375/*
2376 * calc_load - update the avenrun load estimates 10 ticks after the 2376 * calc_load - update the avenrun load estimates 10 ticks after the
@@ -2530,7 +2530,7 @@ static void __update_cpu_load(struct rq *this_rq, unsigned long this_load,
2530 sched_avg_update(this_rq); 2530 sched_avg_update(this_rq);
2531} 2531}
2532 2532
2533#ifdef CONFIG_NO_HZ 2533#ifdef CONFIG_NO_HZ_COMMON
2534/* 2534/*
2535 * There is no sane way to deal with nohz on smp when using jiffies because the 2535 * There is no sane way to deal with nohz on smp when using jiffies because the
2536 * cpu doing the jiffies update might drift wrt the cpu doing the jiffy reading 2536 * cpu doing the jiffies update might drift wrt the cpu doing the jiffy reading
@@ -2590,7 +2590,7 @@ void update_cpu_load_nohz(void)
2590 } 2590 }
2591 raw_spin_unlock(&this_rq->lock); 2591 raw_spin_unlock(&this_rq->lock);
2592} 2592}
2593#endif /* CONFIG_NO_HZ */ 2593#endif /* CONFIG_NO_HZ_COMMON */
2594 2594
2595/* 2595/*
2596 * Called from scheduler_tick() 2596 * Called from scheduler_tick()
@@ -7023,7 +7023,7 @@ void __init sched_init(void)
7023 INIT_LIST_HEAD(&rq->cfs_tasks); 7023 INIT_LIST_HEAD(&rq->cfs_tasks);
7024 7024
7025 rq_attach_root(rq, &def_root_domain); 7025 rq_attach_root(rq, &def_root_domain);
7026#ifdef CONFIG_NO_HZ 7026#ifdef CONFIG_NO_HZ_COMMON
7027 rq->nohz_flags = 0; 7027 rq->nohz_flags = 0;
7028#endif 7028#endif
7029#endif 7029#endif
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 539760ef00c4..5c97fca091a7 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5331,7 +5331,7 @@ out_unlock:
5331 return 0; 5331 return 0;
5332} 5332}
5333 5333
5334#ifdef CONFIG_NO_HZ 5334#ifdef CONFIG_NO_HZ_COMMON
5335/* 5335/*
5336 * idle load balancing details 5336 * idle load balancing details
5337 * - When one of the busy CPUs notice that there may be an idle rebalancing 5337 * - When one of the busy CPUs notice that there may be an idle rebalancing
@@ -5541,9 +5541,9 @@ out:
5541 rq->next_balance = next_balance; 5541 rq->next_balance = next_balance;
5542} 5542}
5543 5543
5544#ifdef CONFIG_NO_HZ 5544#ifdef CONFIG_NO_HZ_COMMON
5545/* 5545/*
5546 * In CONFIG_NO_HZ case, the idle balance kickee will do the 5546 * In CONFIG_NO_HZ_COMMON case, the idle balance kickee will do the
5547 * rebalancing for all the cpus for whom scheduler ticks are stopped. 5547 * rebalancing for all the cpus for whom scheduler ticks are stopped.
5548 */ 5548 */
5549static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) 5549static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle)
@@ -5686,7 +5686,7 @@ void trigger_load_balance(struct rq *rq, int cpu)
5686 if (time_after_eq(jiffies, rq->next_balance) && 5686 if (time_after_eq(jiffies, rq->next_balance) &&
5687 likely(!on_null_domain(cpu))) 5687 likely(!on_null_domain(cpu)))
5688 raise_softirq(SCHED_SOFTIRQ); 5688 raise_softirq(SCHED_SOFTIRQ);
5689#ifdef CONFIG_NO_HZ 5689#ifdef CONFIG_NO_HZ_COMMON
5690 if (nohz_kick_needed(rq, cpu) && likely(!on_null_domain(cpu))) 5690 if (nohz_kick_needed(rq, cpu) && likely(!on_null_domain(cpu)))
5691 nohz_balancer_kick(cpu); 5691 nohz_balancer_kick(cpu);
5692#endif 5692#endif
@@ -6156,7 +6156,7 @@ __init void init_sched_fair_class(void)
6156#ifdef CONFIG_SMP 6156#ifdef CONFIG_SMP
6157 open_softirq(SCHED_SOFTIRQ, run_rebalance_domains); 6157 open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
6158 6158
6159#ifdef CONFIG_NO_HZ 6159#ifdef CONFIG_NO_HZ_COMMON
6160 nohz.next_balance = jiffies; 6160 nohz.next_balance = jiffies;
6161 zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT); 6161 zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
6162 cpu_notifier(sched_ilb_notifier, 0); 6162 cpu_notifier(sched_ilb_notifier, 0);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 3bd15a43eebc..889904dd6d77 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -404,7 +404,7 @@ struct rq {
404 #define CPU_LOAD_IDX_MAX 5 404 #define CPU_LOAD_IDX_MAX 5
405 unsigned long cpu_load[CPU_LOAD_IDX_MAX]; 405 unsigned long cpu_load[CPU_LOAD_IDX_MAX];
406 unsigned long last_load_update_tick; 406 unsigned long last_load_update_tick;
407#ifdef CONFIG_NO_HZ 407#ifdef CONFIG_NO_HZ_COMMON
408 u64 nohz_stamp; 408 u64 nohz_stamp;
409 unsigned long nohz_flags; 409 unsigned long nohz_flags;
410#endif 410#endif
@@ -1333,7 +1333,7 @@ extern void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq);
1333 1333
1334extern void account_cfs_bandwidth_used(int enabled, int was_enabled); 1334extern void account_cfs_bandwidth_used(int enabled, int was_enabled);
1335 1335
1336#ifdef CONFIG_NO_HZ 1336#ifdef CONFIG_NO_HZ_COMMON
1337enum rq_nohz_flag_bits { 1337enum rq_nohz_flag_bits {
1338 NOHZ_TICK_STOPPED, 1338 NOHZ_TICK_STOPPED,
1339 NOHZ_BALANCE_KICK, 1339 NOHZ_BALANCE_KICK,
diff --git a/kernel/softirq.c b/kernel/softirq.c
index b4d252fd195b..de15813f2a66 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -348,7 +348,7 @@ void irq_exit(void)
348 if (!in_interrupt() && local_softirq_pending()) 348 if (!in_interrupt() && local_softirq_pending())
349 invoke_softirq(); 349 invoke_softirq();
350 350
351#ifdef CONFIG_NO_HZ 351#ifdef CONFIG_NO_HZ_COMMON
352 /* Make sure that timer wheel updates are propagated */ 352 /* Make sure that timer wheel updates are propagated */
353 if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched()) 353 if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched())
354 tick_nohz_irq_exit(); 354 tick_nohz_irq_exit();
diff --git a/kernel/time/Kconfig b/kernel/time/Kconfig
index 726c33e00da2..c88fc43494c9 100644
--- a/kernel/time/Kconfig
+++ b/kernel/time/Kconfig
@@ -64,16 +64,21 @@ config GENERIC_CMOS_UPDATE
64if GENERIC_CLOCKEVENTS 64if GENERIC_CLOCKEVENTS
65menu "Timers subsystem" 65menu "Timers subsystem"
66 66
67# Core internal switch. Selected by NO_HZ / HIGH_RES_TIMERS. This is 67# Core internal switch. Selected by NO_HZ_COMMON / HIGH_RES_TIMERS. This is
68# only related to the tick functionality. Oneshot clockevent devices 68# only related to the tick functionality. Oneshot clockevent devices
69# are supported independ of this. 69# are supported independ of this.
70config TICK_ONESHOT 70config TICK_ONESHOT
71 bool 71 bool
72 72
73config NO_HZ_COMMON
74 bool
75 depends on !ARCH_USES_GETTIMEOFFSET && GENERIC_CLOCKEVENTS
76 select TICK_ONESHOT
77
73config NO_HZ 78config NO_HZ
74 bool "Tickless System (Dynamic Ticks)" 79 bool "Tickless System (Dynamic Ticks)"
75 depends on !ARCH_USES_GETTIMEOFFSET && GENERIC_CLOCKEVENTS 80 depends on !ARCH_USES_GETTIMEOFFSET && GENERIC_CLOCKEVENTS
76 select TICK_ONESHOT 81 select NO_HZ_COMMON
77 help 82 help
78 This option enables a tickless system: timer interrupts will 83 This option enables a tickless system: timer interrupts will
79 only trigger on an as-needed basis both when the system is 84 only trigger on an as-needed basis both when the system is
@@ -81,14 +86,14 @@ config NO_HZ
81 86
82config NO_HZ_EXTENDED 87config NO_HZ_EXTENDED
83 bool "Full dynticks system" 88 bool "Full dynticks system"
84 # NO_HZ dependency 89 # NO_HZ_COMMON dependency
85 depends on !ARCH_USES_GETTIMEOFFSET && GENERIC_CLOCKEVENTS 90 depends on !ARCH_USES_GETTIMEOFFSET && GENERIC_CLOCKEVENTS
86 # RCU_USER_QS 91 # RCU_USER_QS
87 depends on HAVE_CONTEXT_TRACKING && SMP 92 depends on HAVE_CONTEXT_TRACKING && SMP
88 # RCU_NOCB_CPU dependency 93 # RCU_NOCB_CPU dependency
89 depends on TREE_RCU || TREE_PREEMPT_RCU 94 depends on TREE_RCU || TREE_PREEMPT_RCU
90 depends on VIRT_CPU_ACCOUNTING_GEN 95 depends on VIRT_CPU_ACCOUNTING_GEN
91 select NO_HZ 96 select NO_HZ_COMMON
92 select RCU_USER_QS 97 select RCU_USER_QS
93 select RCU_NOCB_CPU 98 select RCU_NOCB_CPU
94 select CONTEXT_TRACKING_FORCE 99 select CONTEXT_TRACKING_FORCE
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 57bb3fe5aaa3..ccfc2086cd4b 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -104,7 +104,7 @@ static void tick_sched_do_timer(ktime_t now)
104{ 104{
105 int cpu = smp_processor_id(); 105 int cpu = smp_processor_id();
106 106
107#ifdef CONFIG_NO_HZ 107#ifdef CONFIG_NO_HZ_COMMON
108 /* 108 /*
109 * Check if the do_timer duty was dropped. We don't care about 109 * Check if the do_timer duty was dropped. We don't care about
110 * concurrency: This happens only when the cpu in charge went 110 * concurrency: This happens only when the cpu in charge went
@@ -124,7 +124,7 @@ static void tick_sched_do_timer(ktime_t now)
124 124
125static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs) 125static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs)
126{ 126{
127#ifdef CONFIG_NO_HZ 127#ifdef CONFIG_NO_HZ_COMMON
128 /* 128 /*
129 * When we are idle and the tick is stopped, we have to touch 129 * When we are idle and the tick is stopped, we have to touch
130 * the watchdog as we might not schedule for a really long 130 * the watchdog as we might not schedule for a really long
@@ -235,7 +235,7 @@ core_initcall(init_tick_nohz_extended);
235/* 235/*
236 * NOHZ - aka dynamic tick functionality 236 * NOHZ - aka dynamic tick functionality
237 */ 237 */
238#ifdef CONFIG_NO_HZ 238#ifdef CONFIG_NO_HZ_COMMON
239/* 239/*
240 * NO HZ enabled ? 240 * NO HZ enabled ?
241 */ 241 */
@@ -907,7 +907,7 @@ static inline void tick_check_nohz(int cpu)
907static inline void tick_nohz_switch_to_nohz(void) { } 907static inline void tick_nohz_switch_to_nohz(void) { }
908static inline void tick_check_nohz(int cpu) { } 908static inline void tick_check_nohz(int cpu) { }
909 909
910#endif /* NO_HZ */ 910#endif /* CONFIG_NO_HZ_COMMON */
911 911
912/* 912/*
913 * Called from irq_enter to notify about the possible interruption of idle() 913 * Called from irq_enter to notify about the possible interruption of idle()
@@ -992,14 +992,14 @@ void tick_setup_sched_timer(void)
992 now = ktime_get(); 992 now = ktime_get();
993 } 993 }
994 994
995#ifdef CONFIG_NO_HZ 995#ifdef CONFIG_NO_HZ_COMMON
996 if (tick_nohz_enabled) 996 if (tick_nohz_enabled)
997 ts->nohz_mode = NOHZ_MODE_HIGHRES; 997 ts->nohz_mode = NOHZ_MODE_HIGHRES;
998#endif 998#endif
999} 999}
1000#endif /* HIGH_RES_TIMERS */ 1000#endif /* HIGH_RES_TIMERS */
1001 1001
1002#if defined CONFIG_NO_HZ || defined CONFIG_HIGH_RES_TIMERS 1002#if defined CONFIG_NO_HZ_COMMON || defined CONFIG_HIGH_RES_TIMERS
1003void tick_cancel_sched_timer(int cpu) 1003void tick_cancel_sched_timer(int cpu)
1004{ 1004{
1005 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); 1005 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
diff --git a/kernel/timer.c b/kernel/timer.c
index 4e3040b40d16..1b7489fdea41 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -738,7 +738,7 @@ __mod_timer(struct timer_list *timer, unsigned long expires,
738 738
739 cpu = smp_processor_id(); 739 cpu = smp_processor_id();
740 740
741#if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP) 741#if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP)
742 if (!pinned && get_sysctl_timer_migration() && idle_cpu(cpu)) 742 if (!pinned && get_sysctl_timer_migration() && idle_cpu(cpu))
743 cpu = get_nohz_timer_target(); 743 cpu = get_nohz_timer_target();
744#endif 744#endif
@@ -1188,7 +1188,7 @@ static inline void __run_timers(struct tvec_base *base)
1188 spin_unlock_irq(&base->lock); 1188 spin_unlock_irq(&base->lock);
1189} 1189}
1190 1190
1191#ifdef CONFIG_NO_HZ 1191#ifdef CONFIG_NO_HZ_COMMON
1192/* 1192/*
1193 * Find out when the next timer event is due to happen. This 1193 * Find out when the next timer event is due to happen. This
1194 * is used on S/390 to stop all activity when a CPU is idle. 1194 * is used on S/390 to stop all activity when a CPU is idle.