aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/time/tick-sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/time/tick-sched.c')
-rw-r--r--kernel/time/tick-sched.c59
1 files changed, 27 insertions, 32 deletions
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index e77edc97e036..adea6fc3ba2a 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -23,6 +23,7 @@
23#include <linux/irq_work.h> 23#include <linux/irq_work.h>
24#include <linux/posix-timers.h> 24#include <linux/posix-timers.h>
25#include <linux/perf_event.h> 25#include <linux/perf_event.h>
26#include <linux/context_tracking.h>
26 27
27#include <asm/irq_regs.h> 28#include <asm/irq_regs.h>
28 29
@@ -148,8 +149,8 @@ static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs)
148} 149}
149 150
150#ifdef CONFIG_NO_HZ_FULL 151#ifdef CONFIG_NO_HZ_FULL
151static cpumask_var_t nohz_full_mask; 152cpumask_var_t tick_nohz_full_mask;
152bool have_nohz_full_mask; 153bool tick_nohz_full_running;
153 154
154static bool can_stop_full_tick(void) 155static bool can_stop_full_tick(void)
155{ 156{
@@ -182,7 +183,8 @@ static bool can_stop_full_tick(void)
182 * Don't allow the user to think they can get 183 * Don't allow the user to think they can get
183 * full NO_HZ with this machine. 184 * full NO_HZ with this machine.
184 */ 185 */
185 WARN_ONCE(1, "NO_HZ FULL will not work with unstable sched clock"); 186 WARN_ONCE(tick_nohz_full_running,
187 "NO_HZ FULL will not work with unstable sched clock");
186 return false; 188 return false;
187 } 189 }
188#endif 190#endif
@@ -196,7 +198,7 @@ static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now);
196 * Re-evaluate the need for the tick on the current CPU 198 * Re-evaluate the need for the tick on the current CPU
197 * and restart it if necessary. 199 * and restart it if necessary.
198 */ 200 */
199void tick_nohz_full_check(void) 201void __tick_nohz_full_check(void)
200{ 202{
201 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); 203 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
202 204
@@ -210,7 +212,7 @@ void tick_nohz_full_check(void)
210 212
211static void nohz_full_kick_work_func(struct irq_work *work) 213static void nohz_full_kick_work_func(struct irq_work *work)
212{ 214{
213 tick_nohz_full_check(); 215 __tick_nohz_full_check();
214} 216}
215 217
216static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = { 218static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = {
@@ -229,7 +231,7 @@ void tick_nohz_full_kick(void)
229 231
230static void nohz_full_kick_ipi(void *info) 232static void nohz_full_kick_ipi(void *info)
231{ 233{
232 tick_nohz_full_check(); 234 __tick_nohz_full_check();
233} 235}
234 236
235/* 237/*
@@ -238,11 +240,11 @@ static void nohz_full_kick_ipi(void *info)
238 */ 240 */
239void tick_nohz_full_kick_all(void) 241void tick_nohz_full_kick_all(void)
240{ 242{
241 if (!have_nohz_full_mask) 243 if (!tick_nohz_full_running)
242 return; 244 return;
243 245
244 preempt_disable(); 246 preempt_disable();
245 smp_call_function_many(nohz_full_mask, 247 smp_call_function_many(tick_nohz_full_mask,
246 nohz_full_kick_ipi, NULL, false); 248 nohz_full_kick_ipi, NULL, false);
247 preempt_enable(); 249 preempt_enable();
248} 250}
@@ -252,7 +254,7 @@ void tick_nohz_full_kick_all(void)
252 * It might need the tick due to per task/process properties: 254 * It might need the tick due to per task/process properties:
253 * perf events, posix cpu timers, ... 255 * perf events, posix cpu timers, ...
254 */ 256 */
255void tick_nohz_task_switch(struct task_struct *tsk) 257void __tick_nohz_task_switch(struct task_struct *tsk)
256{ 258{
257 unsigned long flags; 259 unsigned long flags;
258 260
@@ -268,31 +270,23 @@ out:
268 local_irq_restore(flags); 270 local_irq_restore(flags);
269} 271}
270 272
271int tick_nohz_full_cpu(int cpu)
272{
273 if (!have_nohz_full_mask)
274 return 0;
275
276 return cpumask_test_cpu(cpu, nohz_full_mask);
277}
278
279/* Parse the boot-time nohz CPU list from the kernel parameters. */ 273/* Parse the boot-time nohz CPU list from the kernel parameters. */
280static int __init tick_nohz_full_setup(char *str) 274static int __init tick_nohz_full_setup(char *str)
281{ 275{
282 int cpu; 276 int cpu;
283 277
284 alloc_bootmem_cpumask_var(&nohz_full_mask); 278 alloc_bootmem_cpumask_var(&tick_nohz_full_mask);
285 if (cpulist_parse(str, nohz_full_mask) < 0) { 279 if (cpulist_parse(str, tick_nohz_full_mask) < 0) {
286 pr_warning("NOHZ: Incorrect nohz_full cpumask\n"); 280 pr_warning("NOHZ: Incorrect nohz_full cpumask\n");
287 return 1; 281 return 1;
288 } 282 }
289 283
290 cpu = smp_processor_id(); 284 cpu = smp_processor_id();
291 if (cpumask_test_cpu(cpu, nohz_full_mask)) { 285 if (cpumask_test_cpu(cpu, tick_nohz_full_mask)) {
292 pr_warning("NO_HZ: Clearing %d from nohz_full range for timekeeping\n", cpu); 286 pr_warning("NO_HZ: Clearing %d from nohz_full range for timekeeping\n", cpu);
293 cpumask_clear_cpu(cpu, nohz_full_mask); 287 cpumask_clear_cpu(cpu, tick_nohz_full_mask);
294 } 288 }
295 have_nohz_full_mask = true; 289 tick_nohz_full_running = true;
296 290
297 return 1; 291 return 1;
298} 292}
@@ -310,7 +304,7 @@ static int tick_nohz_cpu_down_callback(struct notifier_block *nfb,
310 * If we handle the timekeeping duty for full dynticks CPUs, 304 * If we handle the timekeeping duty for full dynticks CPUs,
311 * we can't safely shutdown that CPU. 305 * we can't safely shutdown that CPU.
312 */ 306 */
313 if (have_nohz_full_mask && tick_do_timer_cpu == cpu) 307 if (tick_nohz_full_running && tick_do_timer_cpu == cpu)
314 return NOTIFY_BAD; 308 return NOTIFY_BAD;
315 break; 309 break;
316 } 310 }
@@ -329,14 +323,14 @@ static int tick_nohz_init_all(void)
329 int err = -1; 323 int err = -1;
330 324
331#ifdef CONFIG_NO_HZ_FULL_ALL 325#ifdef CONFIG_NO_HZ_FULL_ALL
332 if (!alloc_cpumask_var(&nohz_full_mask, GFP_KERNEL)) { 326 if (!alloc_cpumask_var(&tick_nohz_full_mask, GFP_KERNEL)) {
333 pr_err("NO_HZ: Can't allocate full dynticks cpumask\n"); 327 pr_err("NO_HZ: Can't allocate full dynticks cpumask\n");
334 return err; 328 return err;
335 } 329 }
336 err = 0; 330 err = 0;
337 cpumask_setall(nohz_full_mask); 331 cpumask_setall(tick_nohz_full_mask);
338 cpumask_clear_cpu(smp_processor_id(), nohz_full_mask); 332 cpumask_clear_cpu(smp_processor_id(), tick_nohz_full_mask);
339 have_nohz_full_mask = true; 333 tick_nohz_full_running = true;
340#endif 334#endif
341 return err; 335 return err;
342} 336}
@@ -345,17 +339,18 @@ void __init tick_nohz_init(void)
345{ 339{
346 int cpu; 340 int cpu;
347 341
348 if (!have_nohz_full_mask) { 342 if (!tick_nohz_full_running) {
349 if (tick_nohz_init_all() < 0) 343 if (tick_nohz_init_all() < 0)
350 return; 344 return;
351 } 345 }
352 346
347 for_each_cpu(cpu, tick_nohz_full_mask)
348 context_tracking_cpu_set(cpu);
349
353 cpu_notifier(tick_nohz_cpu_down_callback, 0); 350 cpu_notifier(tick_nohz_cpu_down_callback, 0);
354 cpulist_scnprintf(nohz_full_buf, sizeof(nohz_full_buf), nohz_full_mask); 351 cpulist_scnprintf(nohz_full_buf, sizeof(nohz_full_buf), tick_nohz_full_mask);
355 pr_info("NO_HZ: Full dynticks CPUs: %s.\n", nohz_full_buf); 352 pr_info("NO_HZ: Full dynticks CPUs: %s.\n", nohz_full_buf);
356} 353}
357#else
358#define have_nohz_full_mask (0)
359#endif 354#endif
360 355
361/* 356/*
@@ -733,7 +728,7 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
733 return false; 728 return false;
734 } 729 }
735 730
736 if (have_nohz_full_mask) { 731 if (tick_nohz_full_enabled()) {
737 /* 732 /*
738 * Keep the tick alive to guarantee timekeeping progression 733 * Keep the tick alive to guarantee timekeeping progression
739 * if there are full dynticks CPUs around 734 * if there are full dynticks CPUs around