aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/time
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2013-08-14 11:58:56 -0400
committerIngo Molnar <mingo@kernel.org>2013-08-14 11:58:56 -0400
commit6f1d657668ac3041b65265d3653d7e9172a0d603 (patch)
tree6e837c683783708637cc4caf9de759166c7469b7 /kernel/time
parentd4e4ab86bcba5a72779c43dc1459f71fea3d89c8 (diff)
parentd13508f9440e46dccac6a2dd48d51a73b2207482 (diff)
Merge branch 'timers/nohz-v3' of git://git.kernel.org/pub/scm/linux/kernel/git/frederic/linux-dynticks into timers/nohz
Pull nohz improvements from Frederic Weisbecker: " It mostly contains fixes and full dynticks off-case optimizations. I believe that distros want to enable this feature so it seems important to optimize the case where the "nohz_full=" parameter is empty. ie: I'm trying to remove any performance regression that comes with NO_HZ_FULL=y when the feature is not used. This patchset improves the current situation a lot (off-case appears to be around 11% faster with hackbench, although I guess it may vary depending on the configuration but it should be significantly faster in any case) now there is still some work to do: I can still observe a remaining loss of 1.6% throughput seen with hackbench compared to CONFIG_NO_HZ_FULL=n. " Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/time')
-rw-r--r--kernel/time/Kconfig1
-rw-r--r--kernel/time/sched_clock.c2
-rw-r--r--kernel/time/tick-sched.c59
3 files changed, 28 insertions, 34 deletions
diff --git a/kernel/time/Kconfig b/kernel/time/Kconfig
index 70f27e89012b..747bbc70f53b 100644
--- a/kernel/time/Kconfig
+++ b/kernel/time/Kconfig
@@ -105,7 +105,6 @@ config NO_HZ_FULL
105 select RCU_USER_QS 105 select RCU_USER_QS
106 select RCU_NOCB_CPU 106 select RCU_NOCB_CPU
107 select VIRT_CPU_ACCOUNTING_GEN 107 select VIRT_CPU_ACCOUNTING_GEN
108 select CONTEXT_TRACKING_FORCE
109 select IRQ_WORK 108 select IRQ_WORK
110 help 109 help
111 Adaptively try to shutdown the tick whenever possible, even when 110 Adaptively try to shutdown the tick whenever possible, even when
diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c
index a326f27d7f09..0b479a6a22bb 100644
--- a/kernel/time/sched_clock.c
+++ b/kernel/time/sched_clock.c
@@ -121,7 +121,7 @@ void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate)
121 BUG_ON(bits > 32); 121 BUG_ON(bits > 32);
122 WARN_ON(!irqs_disabled()); 122 WARN_ON(!irqs_disabled());
123 read_sched_clock = read; 123 read_sched_clock = read;
124 sched_clock_mask = (1 << bits) - 1; 124 sched_clock_mask = (1ULL << bits) - 1;
125 cd.rate = rate; 125 cd.rate = rate;
126 126
127 /* calculate the mult/shift to convert counter ticks to ns. */ 127 /* calculate the mult/shift to convert counter ticks to ns. */
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index e77edc97e036..adea6fc3ba2a 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -23,6 +23,7 @@
23#include <linux/irq_work.h> 23#include <linux/irq_work.h>
24#include <linux/posix-timers.h> 24#include <linux/posix-timers.h>
25#include <linux/perf_event.h> 25#include <linux/perf_event.h>
26#include <linux/context_tracking.h>
26 27
27#include <asm/irq_regs.h> 28#include <asm/irq_regs.h>
28 29
@@ -148,8 +149,8 @@ static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs)
148} 149}
149 150
150#ifdef CONFIG_NO_HZ_FULL 151#ifdef CONFIG_NO_HZ_FULL
151static cpumask_var_t nohz_full_mask; 152cpumask_var_t tick_nohz_full_mask;
152bool have_nohz_full_mask; 153bool tick_nohz_full_running;
153 154
154static bool can_stop_full_tick(void) 155static bool can_stop_full_tick(void)
155{ 156{
@@ -182,7 +183,8 @@ static bool can_stop_full_tick(void)
182 * Don't allow the user to think they can get 183 * Don't allow the user to think they can get
183 * full NO_HZ with this machine. 184 * full NO_HZ with this machine.
184 */ 185 */
185 WARN_ONCE(1, "NO_HZ FULL will not work with unstable sched clock"); 186 WARN_ONCE(tick_nohz_full_running,
187 "NO_HZ FULL will not work with unstable sched clock");
186 return false; 188 return false;
187 } 189 }
188#endif 190#endif
@@ -196,7 +198,7 @@ static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now);
196 * Re-evaluate the need for the tick on the current CPU 198 * Re-evaluate the need for the tick on the current CPU
197 * and restart it if necessary. 199 * and restart it if necessary.
198 */ 200 */
199void tick_nohz_full_check(void) 201void __tick_nohz_full_check(void)
200{ 202{
201 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); 203 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
202 204
@@ -210,7 +212,7 @@ void tick_nohz_full_check(void)
210 212
211static void nohz_full_kick_work_func(struct irq_work *work) 213static void nohz_full_kick_work_func(struct irq_work *work)
212{ 214{
213 tick_nohz_full_check(); 215 __tick_nohz_full_check();
214} 216}
215 217
216static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = { 218static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = {
@@ -229,7 +231,7 @@ void tick_nohz_full_kick(void)
229 231
230static void nohz_full_kick_ipi(void *info) 232static void nohz_full_kick_ipi(void *info)
231{ 233{
232 tick_nohz_full_check(); 234 __tick_nohz_full_check();
233} 235}
234 236
235/* 237/*
@@ -238,11 +240,11 @@ static void nohz_full_kick_ipi(void *info)
238 */ 240 */
239void tick_nohz_full_kick_all(void) 241void tick_nohz_full_kick_all(void)
240{ 242{
241 if (!have_nohz_full_mask) 243 if (!tick_nohz_full_running)
242 return; 244 return;
243 245
244 preempt_disable(); 246 preempt_disable();
245 smp_call_function_many(nohz_full_mask, 247 smp_call_function_many(tick_nohz_full_mask,
246 nohz_full_kick_ipi, NULL, false); 248 nohz_full_kick_ipi, NULL, false);
247 preempt_enable(); 249 preempt_enable();
248} 250}
@@ -252,7 +254,7 @@ void tick_nohz_full_kick_all(void)
252 * It might need the tick due to per task/process properties: 254 * It might need the tick due to per task/process properties:
253 * perf events, posix cpu timers, ... 255 * perf events, posix cpu timers, ...
254 */ 256 */
255void tick_nohz_task_switch(struct task_struct *tsk) 257void __tick_nohz_task_switch(struct task_struct *tsk)
256{ 258{
257 unsigned long flags; 259 unsigned long flags;
258 260
@@ -268,31 +270,23 @@ out:
268 local_irq_restore(flags); 270 local_irq_restore(flags);
269} 271}
270 272
271int tick_nohz_full_cpu(int cpu)
272{
273 if (!have_nohz_full_mask)
274 return 0;
275
276 return cpumask_test_cpu(cpu, nohz_full_mask);
277}
278
279/* Parse the boot-time nohz CPU list from the kernel parameters. */ 273/* Parse the boot-time nohz CPU list from the kernel parameters. */
280static int __init tick_nohz_full_setup(char *str) 274static int __init tick_nohz_full_setup(char *str)
281{ 275{
282 int cpu; 276 int cpu;
283 277
284 alloc_bootmem_cpumask_var(&nohz_full_mask); 278 alloc_bootmem_cpumask_var(&tick_nohz_full_mask);
285 if (cpulist_parse(str, nohz_full_mask) < 0) { 279 if (cpulist_parse(str, tick_nohz_full_mask) < 0) {
286 pr_warning("NOHZ: Incorrect nohz_full cpumask\n"); 280 pr_warning("NOHZ: Incorrect nohz_full cpumask\n");
287 return 1; 281 return 1;
288 } 282 }
289 283
290 cpu = smp_processor_id(); 284 cpu = smp_processor_id();
291 if (cpumask_test_cpu(cpu, nohz_full_mask)) { 285 if (cpumask_test_cpu(cpu, tick_nohz_full_mask)) {
292 pr_warning("NO_HZ: Clearing %d from nohz_full range for timekeeping\n", cpu); 286 pr_warning("NO_HZ: Clearing %d from nohz_full range for timekeeping\n", cpu);
293 cpumask_clear_cpu(cpu, nohz_full_mask); 287 cpumask_clear_cpu(cpu, tick_nohz_full_mask);
294 } 288 }
295 have_nohz_full_mask = true; 289 tick_nohz_full_running = true;
296 290
297 return 1; 291 return 1;
298} 292}
@@ -310,7 +304,7 @@ static int tick_nohz_cpu_down_callback(struct notifier_block *nfb,
310 * If we handle the timekeeping duty for full dynticks CPUs, 304 * If we handle the timekeeping duty for full dynticks CPUs,
311 * we can't safely shutdown that CPU. 305 * we can't safely shutdown that CPU.
312 */ 306 */
313 if (have_nohz_full_mask && tick_do_timer_cpu == cpu) 307 if (tick_nohz_full_running && tick_do_timer_cpu == cpu)
314 return NOTIFY_BAD; 308 return NOTIFY_BAD;
315 break; 309 break;
316 } 310 }
@@ -329,14 +323,14 @@ static int tick_nohz_init_all(void)
329 int err = -1; 323 int err = -1;
330 324
331#ifdef CONFIG_NO_HZ_FULL_ALL 325#ifdef CONFIG_NO_HZ_FULL_ALL
332 if (!alloc_cpumask_var(&nohz_full_mask, GFP_KERNEL)) { 326 if (!alloc_cpumask_var(&tick_nohz_full_mask, GFP_KERNEL)) {
333 pr_err("NO_HZ: Can't allocate full dynticks cpumask\n"); 327 pr_err("NO_HZ: Can't allocate full dynticks cpumask\n");
334 return err; 328 return err;
335 } 329 }
336 err = 0; 330 err = 0;
337 cpumask_setall(nohz_full_mask); 331 cpumask_setall(tick_nohz_full_mask);
338 cpumask_clear_cpu(smp_processor_id(), nohz_full_mask); 332 cpumask_clear_cpu(smp_processor_id(), tick_nohz_full_mask);
339 have_nohz_full_mask = true; 333 tick_nohz_full_running = true;
340#endif 334#endif
341 return err; 335 return err;
342} 336}
@@ -345,17 +339,18 @@ void __init tick_nohz_init(void)
345{ 339{
346 int cpu; 340 int cpu;
347 341
348 if (!have_nohz_full_mask) { 342 if (!tick_nohz_full_running) {
349 if (tick_nohz_init_all() < 0) 343 if (tick_nohz_init_all() < 0)
350 return; 344 return;
351 } 345 }
352 346
347 for_each_cpu(cpu, tick_nohz_full_mask)
348 context_tracking_cpu_set(cpu);
349
353 cpu_notifier(tick_nohz_cpu_down_callback, 0); 350 cpu_notifier(tick_nohz_cpu_down_callback, 0);
354 cpulist_scnprintf(nohz_full_buf, sizeof(nohz_full_buf), nohz_full_mask); 351 cpulist_scnprintf(nohz_full_buf, sizeof(nohz_full_buf), tick_nohz_full_mask);
355 pr_info("NO_HZ: Full dynticks CPUs: %s.\n", nohz_full_buf); 352 pr_info("NO_HZ: Full dynticks CPUs: %s.\n", nohz_full_buf);
356} 353}
357#else
358#define have_nohz_full_mask (0)
359#endif 354#endif
360 355
361/* 356/*
@@ -733,7 +728,7 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
733 return false; 728 return false;
734 } 729 }
735 730
736 if (have_nohz_full_mask) { 731 if (tick_nohz_full_enabled()) {
737 /* 732 /*
738 * Keep the tick alive to guarantee timekeeping progression 733 * Keep the tick alive to guarantee timekeeping progression
739 * if there are full dynticks CPUs around 734 * if there are full dynticks CPUs around