aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2016-07-01 06:42:35 -0400
committerIngo Molnar <mingo@kernel.org>2016-07-01 06:45:34 -0400
commit0de7611a1031f25b713fda7d36de44f17c2ed790 (patch)
tree1b956111fe48f444a648868d1cbd813abe437df7
parent6168f8ed01dc46a277908938294f1132d723f58d (diff)
timers/nohz: Capitalize 'CPU' consistently
While reviewing another patch I noticed that kernel/time/tick-sched.c had a charmingly (confusingly, annoyingly) rich set of variants for spelling 'CPU': cpu cpus CPU CPUs per CPU per-CPU per cpu ... sometimes these were mixed even within the same comment block! Compress these variants down to a single consistent set of: CPU CPUs per-CPU Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--kernel/time/tick-sched.c42
1 files changed, 21 insertions, 21 deletions
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 6d83e9c4a302..db57d1ba73eb 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -31,7 +31,7 @@
31#include <trace/events/timer.h> 31#include <trace/events/timer.h>
32 32
33/* 33/*
34 * Per cpu nohz control structure 34 * Per-CPU nohz control structure
35 */ 35 */
36static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched); 36static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched);
37 37
@@ -116,8 +116,8 @@ static void tick_sched_do_timer(ktime_t now)
116#ifdef CONFIG_NO_HZ_COMMON 116#ifdef CONFIG_NO_HZ_COMMON
117 /* 117 /*
118 * Check if the do_timer duty was dropped. We don't care about 118 * Check if the do_timer duty was dropped. We don't care about
119 * concurrency: This happens only when the cpu in charge went 119 * concurrency: This happens only when the CPU in charge went
120 * into a long sleep. If two cpus happen to assign themselves to 120 * into a long sleep. If two CPUs happen to assign themselves to
121 * this duty, then the jiffies update is still serialized by 121 * this duty, then the jiffies update is still serialized by
122 * jiffies_lock. 122 * jiffies_lock.
123 */ 123 */
@@ -349,7 +349,7 @@ void tick_nohz_dep_clear_signal(struct signal_struct *sig, enum tick_dep_bits bi
349/* 349/*
350 * Re-evaluate the need for the tick as we switch the current task. 350 * Re-evaluate the need for the tick as we switch the current task.
351 * It might need the tick due to per task/process properties: 351 * It might need the tick due to per task/process properties:
352 * perf events, posix cpu timers, ... 352 * perf events, posix CPU timers, ...
353 */ 353 */
354void __tick_nohz_task_switch(void) 354void __tick_nohz_task_switch(void)
355{ 355{
@@ -509,8 +509,8 @@ int tick_nohz_tick_stopped(void)
509 * 509 *
510 * In case the sched_tick was stopped on this CPU, we have to check if jiffies 510 * In case the sched_tick was stopped on this CPU, we have to check if jiffies
511 * must be updated. Otherwise an interrupt handler could use a stale jiffy 511 * must be updated. Otherwise an interrupt handler could use a stale jiffy
512 * value. We do this unconditionally on any cpu, as we don't know whether the 512 * value. We do this unconditionally on any CPU, as we don't know whether the
513 * cpu, which has the update task assigned is in a long sleep. 513 * CPU, which has the update task assigned is in a long sleep.
514 */ 514 */
515static void tick_nohz_update_jiffies(ktime_t now) 515static void tick_nohz_update_jiffies(ktime_t now)
516{ 516{
@@ -526,7 +526,7 @@ static void tick_nohz_update_jiffies(ktime_t now)
526} 526}
527 527
528/* 528/*
529 * Updates the per cpu time idle statistics counters 529 * Updates the per-CPU time idle statistics counters
530 */ 530 */
531static void 531static void
532update_ts_time_stats(int cpu, struct tick_sched *ts, ktime_t now, u64 *last_update_time) 532update_ts_time_stats(int cpu, struct tick_sched *ts, ktime_t now, u64 *last_update_time)
@@ -566,7 +566,7 @@ static ktime_t tick_nohz_start_idle(struct tick_sched *ts)
566} 566}
567 567
568/** 568/**
569 * get_cpu_idle_time_us - get the total idle time of a cpu 569 * get_cpu_idle_time_us - get the total idle time of a CPU
570 * @cpu: CPU number to query 570 * @cpu: CPU number to query
571 * @last_update_time: variable to store update time in. Do not update 571 * @last_update_time: variable to store update time in. Do not update
572 * counters if NULL. 572 * counters if NULL.
@@ -607,7 +607,7 @@ u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time)
607EXPORT_SYMBOL_GPL(get_cpu_idle_time_us); 607EXPORT_SYMBOL_GPL(get_cpu_idle_time_us);
608 608
609/** 609/**
610 * get_cpu_iowait_time_us - get the total iowait time of a cpu 610 * get_cpu_iowait_time_us - get the total iowait time of a CPU
611 * @cpu: CPU number to query 611 * @cpu: CPU number to query
612 * @last_update_time: variable to store update time in. Do not update 612 * @last_update_time: variable to store update time in. Do not update
613 * counters if NULL. 613 * counters if NULL.
@@ -726,12 +726,12 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
726 } 726 }
727 727
728 /* 728 /*
729 * If this cpu is the one which updates jiffies, then give up 729 * If this CPU is the one which updates jiffies, then give up
730 * the assignment and let it be taken by the cpu which runs 730 * the assignment and let it be taken by the CPU which runs
731 * the tick timer next, which might be this cpu as well. If we 731 * the tick timer next, which might be this CPU as well. If we
732 * don't drop this here the jiffies might be stale and 732 * don't drop this here the jiffies might be stale and
733 * do_timer() never invoked. Keep track of the fact that it 733 * do_timer() never invoked. Keep track of the fact that it
734 * was the one which had the do_timer() duty last. If this cpu 734 * was the one which had the do_timer() duty last. If this CPU
735 * is the one which had the do_timer() duty last, we limit the 735 * is the one which had the do_timer() duty last, we limit the
736 * sleep time to the timekeeping max_deferment value. 736 * sleep time to the timekeeping max_deferment value.
737 * Otherwise we can sleep as long as we want. 737 * Otherwise we can sleep as long as we want.
@@ -841,9 +841,9 @@ static void tick_nohz_full_update_tick(struct tick_sched *ts)
841static bool can_stop_idle_tick(int cpu, struct tick_sched *ts) 841static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
842{ 842{
843 /* 843 /*
844 * If this cpu is offline and it is the one which updates 844 * If this CPU is offline and it is the one which updates
845 * jiffies, then give up the assignment and let it be taken by 845 * jiffies, then give up the assignment and let it be taken by
846 * the cpu which runs the tick timer next. If we don't drop 846 * the CPU which runs the tick timer next. If we don't drop
847 * this here the jiffies might be stale and do_timer() never 847 * this here the jiffies might be stale and do_timer() never
848 * invoked. 848 * invoked.
849 */ 849 */
@@ -933,11 +933,11 @@ void tick_nohz_idle_enter(void)
933 WARN_ON_ONCE(irqs_disabled()); 933 WARN_ON_ONCE(irqs_disabled());
934 934
935 /* 935 /*
936 * Update the idle state in the scheduler domain hierarchy 936 * Update the idle state in the scheduler domain hierarchy
937 * when tick_nohz_stop_sched_tick() is called from the idle loop. 937 * when tick_nohz_stop_sched_tick() is called from the idle loop.
938 * State will be updated to busy during the first busy tick after 938 * State will be updated to busy during the first busy tick after
939 * exiting idle. 939 * exiting idle.
940 */ 940 */
941 set_cpu_sd_state_idle(); 941 set_cpu_sd_state_idle();
942 942
943 local_irq_disable(); 943 local_irq_disable();
@@ -1211,7 +1211,7 @@ void tick_setup_sched_timer(void)
1211 hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); 1211 hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1212 ts->sched_timer.function = tick_sched_timer; 1212 ts->sched_timer.function = tick_sched_timer;
1213 1213
1214 /* Get the next period (per cpu) */ 1214 /* Get the next period (per-CPU) */
1215 hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update()); 1215 hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update());
1216 1216
1217 /* Offset the tick to avert jiffies_lock contention. */ 1217 /* Offset the tick to avert jiffies_lock contention. */