aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/time
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/time')
-rw-r--r--kernel/time/clockevents.c13
-rw-r--r--kernel/time/clocksource.c33
-rw-r--r--kernel/time/tick-broadcast.c7
-rw-r--r--kernel/time/tick-internal.h2
-rw-r--r--kernel/time/tick-sched.c89
-rw-r--r--kernel/time/timekeeping.c30
-rw-r--r--kernel/time/timer_stats.c2
7 files changed, 109 insertions, 67 deletions
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index 5fb139fef9fa..3e59fce6dd43 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -41,6 +41,11 @@ unsigned long clockevent_delta2ns(unsigned long latch,
41{ 41{
42 u64 clc = ((u64) latch << evt->shift); 42 u64 clc = ((u64) latch << evt->shift);
43 43
44 if (unlikely(!evt->mult)) {
45 evt->mult = 1;
46 WARN_ON(1);
47 }
48
44 do_div(clc, evt->mult); 49 do_div(clc, evt->mult);
45 if (clc < 1000) 50 if (clc < 1000)
46 clc = 1000; 51 clc = 1000;
@@ -151,6 +156,14 @@ static void clockevents_notify_released(void)
151void clockevents_register_device(struct clock_event_device *dev) 156void clockevents_register_device(struct clock_event_device *dev)
152{ 157{
153 BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED); 158 BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED);
159 /*
160 * A nsec2cyc multiplicator of 0 is invalid and we'd crash
161 * on it, so fix it up and emit a warning:
162 */
163 if (unlikely(!dev->mult)) {
164 dev->mult = 1;
165 WARN_ON(1);
166 }
154 167
155 spin_lock(&clockevents_lock); 168 spin_lock(&clockevents_lock);
156 169
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index c8a9d13874df..6e9259a5d501 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -142,8 +142,13 @@ static void clocksource_watchdog(unsigned long data)
142 } 142 }
143 143
144 if (!list_empty(&watchdog_list)) { 144 if (!list_empty(&watchdog_list)) {
145 __mod_timer(&watchdog_timer, 145 /* Cycle through CPUs to check if the CPUs stay synchronized to
146 watchdog_timer.expires + WATCHDOG_INTERVAL); 146 * each other. */
147 int next_cpu = next_cpu(raw_smp_processor_id(), cpu_online_map);
148 if (next_cpu >= NR_CPUS)
149 next_cpu = first_cpu(cpu_online_map);
150 watchdog_timer.expires += WATCHDOG_INTERVAL;
151 add_timer_on(&watchdog_timer, next_cpu);
147 } 152 }
148 spin_unlock(&watchdog_lock); 153 spin_unlock(&watchdog_lock);
149} 154}
@@ -165,7 +170,7 @@ static void clocksource_check_watchdog(struct clocksource *cs)
165 if (!started && watchdog) { 170 if (!started && watchdog) {
166 watchdog_last = watchdog->read(); 171 watchdog_last = watchdog->read();
167 watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL; 172 watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL;
168 add_timer(&watchdog_timer); 173 add_timer_on(&watchdog_timer, first_cpu(cpu_online_map));
169 } 174 }
170 } else { 175 } else {
171 if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) 176 if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
@@ -175,7 +180,7 @@ static void clocksource_check_watchdog(struct clocksource *cs)
175 if (watchdog) 180 if (watchdog)
176 del_timer(&watchdog_timer); 181 del_timer(&watchdog_timer);
177 watchdog = cs; 182 watchdog = cs;
178 init_timer(&watchdog_timer); 183 init_timer_deferrable(&watchdog_timer);
179 watchdog_timer.function = clocksource_watchdog; 184 watchdog_timer.function = clocksource_watchdog;
180 185
181 /* Reset watchdog cycles */ 186 /* Reset watchdog cycles */
@@ -186,7 +191,8 @@ static void clocksource_check_watchdog(struct clocksource *cs)
186 watchdog_last = watchdog->read(); 191 watchdog_last = watchdog->read();
187 watchdog_timer.expires = 192 watchdog_timer.expires =
188 jiffies + WATCHDOG_INTERVAL; 193 jiffies + WATCHDOG_INTERVAL;
189 add_timer(&watchdog_timer); 194 add_timer_on(&watchdog_timer,
195 first_cpu(cpu_online_map));
190 } 196 }
191 } 197 }
192 } 198 }
@@ -331,6 +337,21 @@ void clocksource_change_rating(struct clocksource *cs, int rating)
331 spin_unlock_irqrestore(&clocksource_lock, flags); 337 spin_unlock_irqrestore(&clocksource_lock, flags);
332} 338}
333 339
340/**
341 * clocksource_unregister - remove a registered clocksource
342 */
343void clocksource_unregister(struct clocksource *cs)
344{
345 unsigned long flags;
346
347 spin_lock_irqsave(&clocksource_lock, flags);
348 list_del(&cs->list);
349 if (clocksource_override == cs)
350 clocksource_override = NULL;
351 next_clocksource = select_clocksource();
352 spin_unlock_irqrestore(&clocksource_lock, flags);
353}
354
334#ifdef CONFIG_SYSFS 355#ifdef CONFIG_SYSFS
335/** 356/**
336 * sysfs_show_current_clocksources - sysfs interface for current clocksource 357 * sysfs_show_current_clocksources - sysfs interface for current clocksource
@@ -441,7 +462,7 @@ static SYSDEV_ATTR(available_clocksource, 0600,
441 sysfs_show_available_clocksources, NULL); 462 sysfs_show_available_clocksources, NULL);
442 463
443static struct sysdev_class clocksource_sysclass = { 464static struct sysdev_class clocksource_sysclass = {
444 set_kset_name("clocksource"), 465 .name = "clocksource",
445}; 466};
446 467
447static struct sys_device device_clocksource = { 468static struct sys_device device_clocksource = {
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index 5b86698faa0b..e1bd50cbbf5d 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -126,9 +126,9 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
126/* 126/*
127 * Broadcast the event to the cpus, which are set in the mask 127 * Broadcast the event to the cpus, which are set in the mask
128 */ 128 */
129int tick_do_broadcast(cpumask_t mask) 129static void tick_do_broadcast(cpumask_t mask)
130{ 130{
131 int ret = 0, cpu = smp_processor_id(); 131 int cpu = smp_processor_id();
132 struct tick_device *td; 132 struct tick_device *td;
133 133
134 /* 134 /*
@@ -138,7 +138,6 @@ int tick_do_broadcast(cpumask_t mask)
138 cpu_clear(cpu, mask); 138 cpu_clear(cpu, mask);
139 td = &per_cpu(tick_cpu_device, cpu); 139 td = &per_cpu(tick_cpu_device, cpu);
140 td->evtdev->event_handler(td->evtdev); 140 td->evtdev->event_handler(td->evtdev);
141 ret = 1;
142 } 141 }
143 142
144 if (!cpus_empty(mask)) { 143 if (!cpus_empty(mask)) {
@@ -151,9 +150,7 @@ int tick_do_broadcast(cpumask_t mask)
151 cpu = first_cpu(mask); 150 cpu = first_cpu(mask);
152 td = &per_cpu(tick_cpu_device, cpu); 151 td = &per_cpu(tick_cpu_device, cpu);
153 td->evtdev->broadcast(mask); 152 td->evtdev->broadcast(mask);
154 ret = 1;
155 } 153 }
156 return ret;
157} 154}
158 155
159/* 156/*
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
index bb13f2724905..f13f2b7f4fd4 100644
--- a/kernel/time/tick-internal.h
+++ b/kernel/time/tick-internal.h
@@ -70,8 +70,6 @@ static inline int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
70 * Broadcasting support 70 * Broadcasting support
71 */ 71 */
72#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST 72#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
73extern int tick_do_broadcast(cpumask_t mask);
74
75extern int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu); 73extern int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu);
76extern int tick_check_broadcast_device(struct clock_event_device *dev); 74extern int tick_check_broadcast_device(struct clock_event_device *dev);
77extern int tick_is_broadcast_device(struct clock_event_device *dev); 75extern int tick_is_broadcast_device(struct clock_event_device *dev);
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index cb89fa8db110..63f24b550695 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -9,7 +9,7 @@
9 * 9 *
10 * Started by: Thomas Gleixner and Ingo Molnar 10 * Started by: Thomas Gleixner and Ingo Molnar
11 * 11 *
12 * For licencing details see kernel-base/COPYING 12 * Distribute under GPLv2.
13 */ 13 */
14#include <linux/cpu.h> 14#include <linux/cpu.h>
15#include <linux/err.h> 15#include <linux/err.h>
@@ -143,6 +143,44 @@ void tick_nohz_update_jiffies(void)
143 local_irq_restore(flags); 143 local_irq_restore(flags);
144} 144}
145 145
146void tick_nohz_stop_idle(int cpu)
147{
148 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
149
150 if (ts->idle_active) {
151 ktime_t now, delta;
152 now = ktime_get();
153 delta = ktime_sub(now, ts->idle_entrytime);
154 ts->idle_lastupdate = now;
155 ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);
156 ts->idle_active = 0;
157 }
158}
159
160static ktime_t tick_nohz_start_idle(int cpu)
161{
162 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
163 ktime_t now, delta;
164
165 now = ktime_get();
166 if (ts->idle_active) {
167 delta = ktime_sub(now, ts->idle_entrytime);
168 ts->idle_lastupdate = now;
169 ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);
170 }
171 ts->idle_entrytime = now;
172 ts->idle_active = 1;
173 return now;
174}
175
176u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time)
177{
178 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
179
180 *last_update_time = ktime_to_us(ts->idle_lastupdate);
181 return ktime_to_us(ts->idle_sleeptime);
182}
183
146/** 184/**
147 * tick_nohz_stop_sched_tick - stop the idle tick from the idle task 185 * tick_nohz_stop_sched_tick - stop the idle tick from the idle task
148 * 186 *
@@ -153,14 +191,16 @@ void tick_nohz_update_jiffies(void)
153void tick_nohz_stop_sched_tick(void) 191void tick_nohz_stop_sched_tick(void)
154{ 192{
155 unsigned long seq, last_jiffies, next_jiffies, delta_jiffies, flags; 193 unsigned long seq, last_jiffies, next_jiffies, delta_jiffies, flags;
194 unsigned long rt_jiffies;
156 struct tick_sched *ts; 195 struct tick_sched *ts;
157 ktime_t last_update, expires, now, delta; 196 ktime_t last_update, expires, now;
158 struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev; 197 struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;
159 int cpu; 198 int cpu;
160 199
161 local_irq_save(flags); 200 local_irq_save(flags);
162 201
163 cpu = smp_processor_id(); 202 cpu = smp_processor_id();
203 now = tick_nohz_start_idle(cpu);
164 ts = &per_cpu(tick_cpu_sched, cpu); 204 ts = &per_cpu(tick_cpu_sched, cpu);
165 205
166 /* 206 /*
@@ -192,19 +232,7 @@ void tick_nohz_stop_sched_tick(void)
192 } 232 }
193 } 233 }
194 234
195 now = ktime_get();
196 /*
197 * When called from irq_exit we need to account the idle sleep time
198 * correctly.
199 */
200 if (ts->tick_stopped) {
201 delta = ktime_sub(now, ts->idle_entrytime);
202 ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);
203 }
204
205 ts->idle_entrytime = now;
206 ts->idle_calls++; 235 ts->idle_calls++;
207
208 /* Read jiffies and the time when jiffies were updated last */ 236 /* Read jiffies and the time when jiffies were updated last */
209 do { 237 do {
210 seq = read_seqbegin(&xtime_lock); 238 seq = read_seqbegin(&xtime_lock);
@@ -216,6 +244,10 @@ void tick_nohz_stop_sched_tick(void)
216 next_jiffies = get_next_timer_interrupt(last_jiffies); 244 next_jiffies = get_next_timer_interrupt(last_jiffies);
217 delta_jiffies = next_jiffies - last_jiffies; 245 delta_jiffies = next_jiffies - last_jiffies;
218 246
247 rt_jiffies = rt_needs_cpu(cpu);
248 if (rt_jiffies && rt_jiffies < delta_jiffies)
249 delta_jiffies = rt_jiffies;
250
219 if (rcu_needs_cpu(cpu)) 251 if (rcu_needs_cpu(cpu))
220 delta_jiffies = 1; 252 delta_jiffies = 1;
221 /* 253 /*
@@ -291,7 +323,7 @@ void tick_nohz_stop_sched_tick(void)
291 /* Check, if the timer was already in the past */ 323 /* Check, if the timer was already in the past */
292 if (hrtimer_active(&ts->sched_timer)) 324 if (hrtimer_active(&ts->sched_timer))
293 goto out; 325 goto out;
294 } else if(!tick_program_event(expires, 0)) 326 } else if (!tick_program_event(expires, 0))
295 goto out; 327 goto out;
296 /* 328 /*
297 * We are past the event already. So we crossed a 329 * We are past the event already. So we crossed a
@@ -332,23 +364,22 @@ void tick_nohz_restart_sched_tick(void)
332 int cpu = smp_processor_id(); 364 int cpu = smp_processor_id();
333 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); 365 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
334 unsigned long ticks; 366 unsigned long ticks;
335 ktime_t now, delta; 367 ktime_t now;
336 368
337 if (!ts->tick_stopped) 369 local_irq_disable();
370 tick_nohz_stop_idle(cpu);
371
372 if (!ts->tick_stopped) {
373 local_irq_enable();
338 return; 374 return;
375 }
339 376
340 /* Update jiffies first */ 377 /* Update jiffies first */
341 now = ktime_get();
342
343 local_irq_disable();
344 select_nohz_load_balancer(0); 378 select_nohz_load_balancer(0);
379 now = ktime_get();
345 tick_do_update_jiffies64(now); 380 tick_do_update_jiffies64(now);
346 cpu_clear(cpu, nohz_cpu_mask); 381 cpu_clear(cpu, nohz_cpu_mask);
347 382
348 /* Account the idle time */
349 delta = ktime_sub(now, ts->idle_entrytime);
350 ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);
351
352 /* 383 /*
353 * We stopped the tick in idle. Update process times would miss the 384 * We stopped the tick in idle. Update process times would miss the
354 * time we slept as update_process_times does only a 1 tick 385 * time we slept as update_process_times does only a 1 tick
@@ -502,14 +533,13 @@ static inline void tick_nohz_switch_to_nohz(void) { }
502 */ 533 */
503#ifdef CONFIG_HIGH_RES_TIMERS 534#ifdef CONFIG_HIGH_RES_TIMERS
504/* 535/*
505 * We rearm the timer until we get disabled by the idle code 536 * We rearm the timer until we get disabled by the idle code.
506 * Called with interrupts disabled and timer->base->cpu_base->lock held. 537 * Called with interrupts disabled and timer->base->cpu_base->lock held.
507 */ 538 */
508static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer) 539static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer)
509{ 540{
510 struct tick_sched *ts = 541 struct tick_sched *ts =
511 container_of(timer, struct tick_sched, sched_timer); 542 container_of(timer, struct tick_sched, sched_timer);
512 struct hrtimer_cpu_base *base = timer->base->cpu_base;
513 struct pt_regs *regs = get_irq_regs(); 543 struct pt_regs *regs = get_irq_regs();
514 ktime_t now = ktime_get(); 544 ktime_t now = ktime_get();
515 int cpu = smp_processor_id(); 545 int cpu = smp_processor_id();
@@ -547,15 +577,8 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer)
547 touch_softlockup_watchdog(); 577 touch_softlockup_watchdog();
548 ts->idle_jiffies++; 578 ts->idle_jiffies++;
549 } 579 }
550 /*
551 * update_process_times() might take tasklist_lock, hence
552 * drop the base lock. sched-tick hrtimers are per-CPU and
553 * never accessible by userspace APIs, so this is safe to do.
554 */
555 spin_unlock(&base->lock);
556 update_process_times(user_mode(regs)); 580 update_process_times(user_mode(regs));
557 profile_tick(CPU_PROFILING); 581 profile_tick(CPU_PROFILING);
558 spin_lock(&base->lock);
559 } 582 }
560 583
561 /* Do not restart, when we are in the idle loop */ 584 /* Do not restart, when we are in the idle loop */
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index e5e466b27598..092a2366b5a9 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -82,13 +82,12 @@ static inline s64 __get_nsec_offset(void)
82} 82}
83 83
84/** 84/**
85 * __get_realtime_clock_ts - Returns the time of day in a timespec 85 * getnstimeofday - Returns the time of day in a timespec
86 * @ts: pointer to the timespec to be set 86 * @ts: pointer to the timespec to be set
87 * 87 *
88 * Returns the time of day in a timespec. Used by 88 * Returns the time of day in a timespec.
89 * do_gettimeofday() and get_realtime_clock_ts().
90 */ 89 */
91static inline void __get_realtime_clock_ts(struct timespec *ts) 90void getnstimeofday(struct timespec *ts)
92{ 91{
93 unsigned long seq; 92 unsigned long seq;
94 s64 nsecs; 93 s64 nsecs;
@@ -104,30 +103,19 @@ static inline void __get_realtime_clock_ts(struct timespec *ts)
104 timespec_add_ns(ts, nsecs); 103 timespec_add_ns(ts, nsecs);
105} 104}
106 105
107/**
108 * getnstimeofday - Returns the time of day in a timespec
109 * @ts: pointer to the timespec to be set
110 *
111 * Returns the time of day in a timespec.
112 */
113void getnstimeofday(struct timespec *ts)
114{
115 __get_realtime_clock_ts(ts);
116}
117
118EXPORT_SYMBOL(getnstimeofday); 106EXPORT_SYMBOL(getnstimeofday);
119 107
120/** 108/**
121 * do_gettimeofday - Returns the time of day in a timeval 109 * do_gettimeofday - Returns the time of day in a timeval
122 * @tv: pointer to the timeval to be set 110 * @tv: pointer to the timeval to be set
123 * 111 *
124 * NOTE: Users should be converted to using get_realtime_clock_ts() 112 * NOTE: Users should be converted to using getnstimeofday()
125 */ 113 */
126void do_gettimeofday(struct timeval *tv) 114void do_gettimeofday(struct timeval *tv)
127{ 115{
128 struct timespec now; 116 struct timespec now;
129 117
130 __get_realtime_clock_ts(&now); 118 getnstimeofday(&now);
131 tv->tv_sec = now.tv_sec; 119 tv->tv_sec = now.tv_sec;
132 tv->tv_usec = now.tv_nsec/1000; 120 tv->tv_usec = now.tv_nsec/1000;
133} 121}
@@ -198,7 +186,8 @@ static void change_clocksource(void)
198 186
199 clock->error = 0; 187 clock->error = 0;
200 clock->xtime_nsec = 0; 188 clock->xtime_nsec = 0;
201 clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH); 189 clocksource_calculate_interval(clock,
190 (unsigned long)(current_tick_length()>>TICK_LENGTH_SHIFT));
202 191
203 tick_clock_notify(); 192 tick_clock_notify();
204 193
@@ -255,7 +244,8 @@ void __init timekeeping_init(void)
255 ntp_clear(); 244 ntp_clear();
256 245
257 clock = clocksource_get_next(); 246 clock = clocksource_get_next();
258 clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH); 247 clocksource_calculate_interval(clock,
248 (unsigned long)(current_tick_length()>>TICK_LENGTH_SHIFT));
259 clock->cycle_last = clocksource_read(clock); 249 clock->cycle_last = clocksource_read(clock);
260 250
261 xtime.tv_sec = sec; 251 xtime.tv_sec = sec;
@@ -335,9 +325,9 @@ static int timekeeping_suspend(struct sys_device *dev, pm_message_t state)
335 325
336/* sysfs resume/suspend bits for timekeeping */ 326/* sysfs resume/suspend bits for timekeeping */
337static struct sysdev_class timekeeping_sysclass = { 327static struct sysdev_class timekeeping_sysclass = {
328 .name = "timekeeping",
338 .resume = timekeeping_resume, 329 .resume = timekeeping_resume,
339 .suspend = timekeeping_suspend, 330 .suspend = timekeeping_suspend,
340 set_kset_name("timekeeping"),
341}; 331};
342 332
343static struct sys_device device_timer = { 333static struct sys_device device_timer = {
diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
index c36bb7ed0301..417da8c5bc72 100644
--- a/kernel/time/timer_stats.c
+++ b/kernel/time/timer_stats.c
@@ -26,7 +26,7 @@
26 * the pid and cmdline from the owner process if applicable. 26 * the pid and cmdline from the owner process if applicable.
27 * 27 *
28 * Start/stop data collection: 28 * Start/stop data collection:
29 * # echo 1[0] >/proc/timer_stats 29 * # echo [1|0] >/proc/timer_stats
30 * 30 *
31 * Display the information collected so far: 31 * Display the information collected so far:
32 * # cat /proc/timer_stats 32 * # cat /proc/timer_stats