aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/time
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/time')
-rw-r--r--kernel/time/Kconfig1
-rw-r--r--kernel/time/clocksource.c3
-rw-r--r--kernel/time/jiffies.c1
-rw-r--r--kernel/time/ntp.c96
-rw-r--r--kernel/time/tick-broadcast.c27
-rw-r--r--kernel/time/tick-common.c10
-rw-r--r--kernel/time/tick-internal.h9
-rw-r--r--kernel/time/tick-sched.c140
-rw-r--r--kernel/time/timekeeping.c122
-rw-r--r--kernel/time/timer_list.c28
10 files changed, 290 insertions, 147 deletions
diff --git a/kernel/time/Kconfig b/kernel/time/Kconfig
index 8d53106a0a92..95ed42951e0a 100644
--- a/kernel/time/Kconfig
+++ b/kernel/time/Kconfig
@@ -3,7 +3,6 @@
3# 3#
4config TICK_ONESHOT 4config TICK_ONESHOT
5 bool 5 bool
6 default n
7 6
8config NO_HZ 7config NO_HZ
9 bool "Tickless System (Dynamic Ticks)" 8 bool "Tickless System (Dynamic Ticks)"
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index 093d4acf993b..9ed2eec97526 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -325,6 +325,9 @@ int clocksource_register(struct clocksource *c)
325 unsigned long flags; 325 unsigned long flags;
326 int ret; 326 int ret;
327 327
328 /* save mult_orig on registration */
329 c->mult_orig = c->mult;
330
328 spin_lock_irqsave(&clocksource_lock, flags); 331 spin_lock_irqsave(&clocksource_lock, flags);
329 ret = clocksource_enqueue(c); 332 ret = clocksource_enqueue(c);
330 if (!ret) 333 if (!ret)
diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c
index 4c256fdb8875..1ca99557e929 100644
--- a/kernel/time/jiffies.c
+++ b/kernel/time/jiffies.c
@@ -61,6 +61,7 @@ struct clocksource clocksource_jiffies = {
61 .read = jiffies_read, 61 .read = jiffies_read,
62 .mask = 0xffffffff, /*32bits*/ 62 .mask = 0xffffffff, /*32bits*/
63 .mult = NSEC_PER_JIFFY << JIFFIES_SHIFT, /* details above */ 63 .mult = NSEC_PER_JIFFY << JIFFIES_SHIFT, /* details above */
64 .mult_orig = NSEC_PER_JIFFY << JIFFIES_SHIFT,
64 .shift = JIFFIES_SHIFT, 65 .shift = JIFFIES_SHIFT,
65}; 66};
66 67
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index 1ad46f3df6e7..8ff15e5d486b 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -10,13 +10,13 @@
10 10
11#include <linux/mm.h> 11#include <linux/mm.h>
12#include <linux/time.h> 12#include <linux/time.h>
13#include <linux/timer.h>
14#include <linux/timex.h> 13#include <linux/timex.h>
15#include <linux/jiffies.h> 14#include <linux/jiffies.h>
16#include <linux/hrtimer.h> 15#include <linux/hrtimer.h>
17#include <linux/capability.h> 16#include <linux/capability.h>
18#include <linux/math64.h> 17#include <linux/math64.h>
19#include <linux/clocksource.h> 18#include <linux/clocksource.h>
19#include <linux/workqueue.h>
20#include <asm/timex.h> 20#include <asm/timex.h>
21 21
22/* 22/*
@@ -142,8 +142,7 @@ static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer)
142 time_state = TIME_OOP; 142 time_state = TIME_OOP;
143 printk(KERN_NOTICE "Clock: " 143 printk(KERN_NOTICE "Clock: "
144 "inserting leap second 23:59:60 UTC\n"); 144 "inserting leap second 23:59:60 UTC\n");
145 leap_timer.expires = ktime_add_ns(leap_timer.expires, 145 hrtimer_add_expires_ns(&leap_timer, NSEC_PER_SEC);
146 NSEC_PER_SEC);
147 res = HRTIMER_RESTART; 146 res = HRTIMER_RESTART;
148 break; 147 break;
149 case TIME_DEL: 148 case TIME_DEL:
@@ -218,11 +217,11 @@ void second_overflow(void)
218/* Disable the cmos update - used by virtualization and embedded */ 217/* Disable the cmos update - used by virtualization and embedded */
219int no_sync_cmos_clock __read_mostly; 218int no_sync_cmos_clock __read_mostly;
220 219
221static void sync_cmos_clock(unsigned long dummy); 220static void sync_cmos_clock(struct work_struct *work);
222 221
223static DEFINE_TIMER(sync_cmos_timer, sync_cmos_clock, 0, 0); 222static DECLARE_DELAYED_WORK(sync_cmos_work, sync_cmos_clock);
224 223
225static void sync_cmos_clock(unsigned long dummy) 224static void sync_cmos_clock(struct work_struct *work)
226{ 225{
227 struct timespec now, next; 226 struct timespec now, next;
228 int fail = 1; 227 int fail = 1;
@@ -258,13 +257,13 @@ static void sync_cmos_clock(unsigned long dummy)
258 next.tv_sec++; 257 next.tv_sec++;
259 next.tv_nsec -= NSEC_PER_SEC; 258 next.tv_nsec -= NSEC_PER_SEC;
260 } 259 }
261 mod_timer(&sync_cmos_timer, jiffies + timespec_to_jiffies(&next)); 260 schedule_delayed_work(&sync_cmos_work, timespec_to_jiffies(&next));
262} 261}
263 262
264static void notify_cmos_timer(void) 263static void notify_cmos_timer(void)
265{ 264{
266 if (!no_sync_cmos_clock) 265 if (!no_sync_cmos_clock)
267 mod_timer(&sync_cmos_timer, jiffies + 1); 266 schedule_delayed_work(&sync_cmos_work, 0);
268} 267}
269 268
270#else 269#else
@@ -277,38 +276,50 @@ static inline void notify_cmos_timer(void) { }
277int do_adjtimex(struct timex *txc) 276int do_adjtimex(struct timex *txc)
278{ 277{
279 struct timespec ts; 278 struct timespec ts;
280 long save_adjust, sec;
281 int result; 279 int result;
282 280
283 /* In order to modify anything, you gotta be super-user! */ 281 /* Validate the data before disabling interrupts */
284 if (txc->modes && !capable(CAP_SYS_TIME)) 282 if (txc->modes & ADJ_ADJTIME) {
285 return -EPERM;
286
287 /* Now we validate the data before disabling interrupts */
288
289 if ((txc->modes & ADJ_OFFSET_SINGLESHOT) == ADJ_OFFSET_SINGLESHOT) {
290 /* singleshot must not be used with any other mode bits */ 283 /* singleshot must not be used with any other mode bits */
291 if (txc->modes & ~ADJ_OFFSET_SS_READ) 284 if (!(txc->modes & ADJ_OFFSET_SINGLESHOT))
292 return -EINVAL; 285 return -EINVAL;
286 if (!(txc->modes & ADJ_OFFSET_READONLY) &&
287 !capable(CAP_SYS_TIME))
288 return -EPERM;
289 } else {
290 /* In order to modify anything, you gotta be super-user! */
291 if (txc->modes && !capable(CAP_SYS_TIME))
292 return -EPERM;
293
294 /* if the quartz is off by more than 10% something is VERY wrong! */
295 if (txc->modes & ADJ_TICK &&
296 (txc->tick < 900000/USER_HZ ||
297 txc->tick > 1100000/USER_HZ))
298 return -EINVAL;
299
300 if (txc->modes & ADJ_STATUS && time_state != TIME_OK)
301 hrtimer_cancel(&leap_timer);
293 } 302 }
294 303
295 /* if the quartz is off by more than 10% something is VERY wrong ! */
296 if (txc->modes & ADJ_TICK)
297 if (txc->tick < 900000/USER_HZ ||
298 txc->tick > 1100000/USER_HZ)
299 return -EINVAL;
300
301 if (time_state != TIME_OK && txc->modes & ADJ_STATUS)
302 hrtimer_cancel(&leap_timer);
303 getnstimeofday(&ts); 304 getnstimeofday(&ts);
304 305
305 write_seqlock_irq(&xtime_lock); 306 write_seqlock_irq(&xtime_lock);
306 307
307 /* Save for later - semantics of adjtime is to return old value */
308 save_adjust = time_adjust;
309
310 /* If there are input parameters, then process them */ 308 /* If there are input parameters, then process them */
309 if (txc->modes & ADJ_ADJTIME) {
310 long save_adjust = time_adjust;
311
312 if (!(txc->modes & ADJ_OFFSET_READONLY)) {
313 /* adjtime() is independent from ntp_adjtime() */
314 time_adjust = txc->offset;
315 ntp_update_frequency();
316 }
317 txc->offset = save_adjust;
318 goto adj_done;
319 }
311 if (txc->modes) { 320 if (txc->modes) {
321 long sec;
322
312 if (txc->modes & ADJ_STATUS) { 323 if (txc->modes & ADJ_STATUS) {
313 if ((time_status & STA_PLL) && 324 if ((time_status & STA_PLL) &&
314 !(txc->status & STA_PLL)) { 325 !(txc->status & STA_PLL)) {
@@ -375,13 +386,8 @@ int do_adjtimex(struct timex *txc)
375 if (txc->modes & ADJ_TAI && txc->constant > 0) 386 if (txc->modes & ADJ_TAI && txc->constant > 0)
376 time_tai = txc->constant; 387 time_tai = txc->constant;
377 388
378 if (txc->modes & ADJ_OFFSET) { 389 if (txc->modes & ADJ_OFFSET)
379 if (txc->modes == ADJ_OFFSET_SINGLESHOT) 390 ntp_update_offset(txc->offset);
380 /* adjtime() is independent from ntp_adjtime() */
381 time_adjust = txc->offset;
382 else
383 ntp_update_offset(txc->offset);
384 }
385 if (txc->modes & ADJ_TICK) 391 if (txc->modes & ADJ_TICK)
386 tick_usec = txc->tick; 392 tick_usec = txc->tick;
387 393
@@ -389,22 +395,18 @@ int do_adjtimex(struct timex *txc)
389 ntp_update_frequency(); 395 ntp_update_frequency();
390 } 396 }
391 397
398 txc->offset = shift_right(time_offset * NTP_INTERVAL_FREQ,
399 NTP_SCALE_SHIFT);
400 if (!(time_status & STA_NANO))
401 txc->offset /= NSEC_PER_USEC;
402
403adj_done:
392 result = time_state; /* mostly `TIME_OK' */ 404 result = time_state; /* mostly `TIME_OK' */
393 if (time_status & (STA_UNSYNC|STA_CLOCKERR)) 405 if (time_status & (STA_UNSYNC|STA_CLOCKERR))
394 result = TIME_ERROR; 406 result = TIME_ERROR;
395 407
396 if ((txc->modes == ADJ_OFFSET_SINGLESHOT) || 408 txc->freq = shift_right((time_freq >> PPM_SCALE_INV_SHIFT) *
397 (txc->modes == ADJ_OFFSET_SS_READ)) 409 (s64)PPM_SCALE_INV, NTP_SCALE_SHIFT);
398 txc->offset = save_adjust;
399 else {
400 txc->offset = shift_right(time_offset * NTP_INTERVAL_FREQ,
401 NTP_SCALE_SHIFT);
402 if (!(time_status & STA_NANO))
403 txc->offset /= NSEC_PER_USEC;
404 }
405 txc->freq = shift_right((s32)(time_freq >> PPM_SCALE_INV_SHIFT) *
406 (s64)PPM_SCALE_INV,
407 NTP_SCALE_SHIFT);
408 txc->maxerror = time_maxerror; 410 txc->maxerror = time_maxerror;
409 txc->esterror = time_esterror; 411 txc->esterror = time_esterror;
410 txc->status = time_status; 412 txc->status = time_status;
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index f1f3eee28113..f98a1b7b16e9 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -235,7 +235,8 @@ static void tick_do_broadcast_on_off(void *why)
235 case CLOCK_EVT_NOTIFY_BROADCAST_FORCE: 235 case CLOCK_EVT_NOTIFY_BROADCAST_FORCE:
236 if (!cpu_isset(cpu, tick_broadcast_mask)) { 236 if (!cpu_isset(cpu, tick_broadcast_mask)) {
237 cpu_set(cpu, tick_broadcast_mask); 237 cpu_set(cpu, tick_broadcast_mask);
238 if (td->mode == TICKDEV_MODE_PERIODIC) 238 if (tick_broadcast_device.mode ==
239 TICKDEV_MODE_PERIODIC)
239 clockevents_shutdown(dev); 240 clockevents_shutdown(dev);
240 } 241 }
241 if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_FORCE) 242 if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_FORCE)
@@ -245,7 +246,8 @@ static void tick_do_broadcast_on_off(void *why)
245 if (!tick_broadcast_force && 246 if (!tick_broadcast_force &&
246 cpu_isset(cpu, tick_broadcast_mask)) { 247 cpu_isset(cpu, tick_broadcast_mask)) {
247 cpu_clear(cpu, tick_broadcast_mask); 248 cpu_clear(cpu, tick_broadcast_mask);
248 if (td->mode == TICKDEV_MODE_PERIODIC) 249 if (tick_broadcast_device.mode ==
250 TICKDEV_MODE_PERIODIC)
249 tick_setup_periodic(dev, 0); 251 tick_setup_periodic(dev, 0);
250 } 252 }
251 break; 253 break;
@@ -382,6 +384,19 @@ int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
382} 384}
383 385
384/* 386/*
387 * Called from irq_enter() when idle was interrupted to reenable the
388 * per cpu device.
389 */
390void tick_check_oneshot_broadcast(int cpu)
391{
392 if (cpu_isset(cpu, tick_broadcast_oneshot_mask)) {
393 struct tick_device *td = &per_cpu(tick_cpu_device, cpu);
394
395 clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_ONESHOT);
396 }
397}
398
399/*
385 * Handle oneshot mode broadcasting 400 * Handle oneshot mode broadcasting
386 */ 401 */
387static void tick_handle_oneshot_broadcast(struct clock_event_device *dev) 402static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
@@ -575,4 +590,12 @@ void tick_shutdown_broadcast_oneshot(unsigned int *cpup)
575 spin_unlock_irqrestore(&tick_broadcast_lock, flags); 590 spin_unlock_irqrestore(&tick_broadcast_lock, flags);
576} 591}
577 592
593/*
594 * Check, whether the broadcast device is in one shot mode
595 */
596int tick_broadcast_oneshot_active(void)
597{
598 return tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT;
599}
600
578#endif 601#endif
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index 019315ebf9de..df12434b43ca 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -33,7 +33,7 @@ DEFINE_PER_CPU(struct tick_device, tick_cpu_device);
33 */ 33 */
34ktime_t tick_next_period; 34ktime_t tick_next_period;
35ktime_t tick_period; 35ktime_t tick_period;
36int tick_do_timer_cpu __read_mostly = -1; 36int tick_do_timer_cpu __read_mostly = TICK_DO_TIMER_BOOT;
37DEFINE_SPINLOCK(tick_device_lock); 37DEFINE_SPINLOCK(tick_device_lock);
38 38
39/* 39/*
@@ -109,7 +109,8 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast)
109 if (!tick_device_is_functional(dev)) 109 if (!tick_device_is_functional(dev))
110 return; 110 return;
111 111
112 if (dev->features & CLOCK_EVT_FEAT_PERIODIC) { 112 if ((dev->features & CLOCK_EVT_FEAT_PERIODIC) &&
113 !tick_broadcast_oneshot_active()) {
113 clockevents_set_mode(dev, CLOCK_EVT_MODE_PERIODIC); 114 clockevents_set_mode(dev, CLOCK_EVT_MODE_PERIODIC);
114 } else { 115 } else {
115 unsigned long seq; 116 unsigned long seq;
@@ -148,7 +149,7 @@ static void tick_setup_device(struct tick_device *td,
148 * If no cpu took the do_timer update, assign it to 149 * If no cpu took the do_timer update, assign it to
149 * this cpu: 150 * this cpu:
150 */ 151 */
151 if (tick_do_timer_cpu == -1) { 152 if (tick_do_timer_cpu == TICK_DO_TIMER_BOOT) {
152 tick_do_timer_cpu = cpu; 153 tick_do_timer_cpu = cpu;
153 tick_next_period = ktime_get(); 154 tick_next_period = ktime_get();
154 tick_period = ktime_set(0, NSEC_PER_SEC / HZ); 155 tick_period = ktime_set(0, NSEC_PER_SEC / HZ);
@@ -300,7 +301,8 @@ static void tick_shutdown(unsigned int *cpup)
300 if (*cpup == tick_do_timer_cpu) { 301 if (*cpup == tick_do_timer_cpu) {
301 int cpu = first_cpu(cpu_online_map); 302 int cpu = first_cpu(cpu_online_map);
302 303
303 tick_do_timer_cpu = (cpu != NR_CPUS) ? cpu : -1; 304 tick_do_timer_cpu = (cpu != NR_CPUS) ? cpu :
305 TICK_DO_TIMER_NONE;
304 } 306 }
305 spin_unlock_irqrestore(&tick_device_lock, flags); 307 spin_unlock_irqrestore(&tick_device_lock, flags);
306} 308}
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
index 6e9db9734aa6..b1c05bf75ee0 100644
--- a/kernel/time/tick-internal.h
+++ b/kernel/time/tick-internal.h
@@ -1,6 +1,10 @@
1/* 1/*
2 * tick internal variable and functions used by low/high res code 2 * tick internal variable and functions used by low/high res code
3 */ 3 */
4
5#define TICK_DO_TIMER_NONE -1
6#define TICK_DO_TIMER_BOOT -2
7
4DECLARE_PER_CPU(struct tick_device, tick_cpu_device); 8DECLARE_PER_CPU(struct tick_device, tick_cpu_device);
5extern spinlock_t tick_device_lock; 9extern spinlock_t tick_device_lock;
6extern ktime_t tick_next_period; 10extern ktime_t tick_next_period;
@@ -31,6 +35,8 @@ extern void tick_broadcast_oneshot_control(unsigned long reason);
31extern void tick_broadcast_switch_to_oneshot(void); 35extern void tick_broadcast_switch_to_oneshot(void);
32extern void tick_shutdown_broadcast_oneshot(unsigned int *cpup); 36extern void tick_shutdown_broadcast_oneshot(unsigned int *cpup);
33extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc); 37extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc);
38extern int tick_broadcast_oneshot_active(void);
39extern void tick_check_oneshot_broadcast(int cpu);
34# else /* BROADCAST */ 40# else /* BROADCAST */
35static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) 41static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
36{ 42{
@@ -39,6 +45,8 @@ static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
39static inline void tick_broadcast_oneshot_control(unsigned long reason) { } 45static inline void tick_broadcast_oneshot_control(unsigned long reason) { }
40static inline void tick_broadcast_switch_to_oneshot(void) { } 46static inline void tick_broadcast_switch_to_oneshot(void) { }
41static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { } 47static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { }
48static inline int tick_broadcast_oneshot_active(void) { return 0; }
49static inline void tick_check_oneshot_broadcast(int cpu) { }
42# endif /* !BROADCAST */ 50# endif /* !BROADCAST */
43 51
44#else /* !ONESHOT */ 52#else /* !ONESHOT */
@@ -68,6 +76,7 @@ static inline int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
68{ 76{
69 return 0; 77 return 0;
70} 78}
79static inline int tick_broadcast_oneshot_active(void) { return 0; }
71#endif /* !TICK_ONESHOT */ 80#endif /* !TICK_ONESHOT */
72 81
73/* 82/*
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index a87b0468568b..5bbb1044f847 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -20,6 +20,7 @@
20#include <linux/profile.h> 20#include <linux/profile.h>
21#include <linux/sched.h> 21#include <linux/sched.h>
22#include <linux/tick.h> 22#include <linux/tick.h>
23#include <linux/module.h>
23 24
24#include <asm/irq_regs.h> 25#include <asm/irq_regs.h>
25 26
@@ -75,6 +76,9 @@ static void tick_do_update_jiffies64(ktime_t now)
75 incr * ticks); 76 incr * ticks);
76 } 77 }
77 do_timer(++ticks); 78 do_timer(++ticks);
79
80 /* Keep the tick_next_period variable up to date */
81 tick_next_period = ktime_add(last_jiffies_update, tick_period);
78 } 82 }
79 write_sequnlock(&xtime_lock); 83 write_sequnlock(&xtime_lock);
80} 84}
@@ -151,7 +155,7 @@ void tick_nohz_update_jiffies(void)
151 touch_softlockup_watchdog(); 155 touch_softlockup_watchdog();
152} 156}
153 157
154void tick_nohz_stop_idle(int cpu) 158static void tick_nohz_stop_idle(int cpu)
155{ 159{
156 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); 160 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
157 161
@@ -187,9 +191,17 @@ u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time)
187{ 191{
188 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); 192 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
189 193
190 *last_update_time = ktime_to_us(ts->idle_lastupdate); 194 if (!tick_nohz_enabled)
195 return -1;
196
197 if (ts->idle_active)
198 *last_update_time = ktime_to_us(ts->idle_lastupdate);
199 else
200 *last_update_time = ktime_to_us(ktime_get());
201
191 return ktime_to_us(ts->idle_sleeptime); 202 return ktime_to_us(ts->idle_sleeptime);
192} 203}
204EXPORT_SYMBOL_GPL(get_cpu_idle_time_us);
193 205
194/** 206/**
195 * tick_nohz_stop_sched_tick - stop the idle tick from the idle task 207 * tick_nohz_stop_sched_tick - stop the idle tick from the idle task
@@ -221,7 +233,7 @@ void tick_nohz_stop_sched_tick(int inidle)
221 */ 233 */
222 if (unlikely(!cpu_online(cpu))) { 234 if (unlikely(!cpu_online(cpu))) {
223 if (cpu == tick_do_timer_cpu) 235 if (cpu == tick_do_timer_cpu)
224 tick_do_timer_cpu = -1; 236 tick_do_timer_cpu = TICK_DO_TIMER_NONE;
225 } 237 }
226 238
227 if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) 239 if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE))
@@ -258,7 +270,7 @@ void tick_nohz_stop_sched_tick(int inidle)
258 next_jiffies = get_next_timer_interrupt(last_jiffies); 270 next_jiffies = get_next_timer_interrupt(last_jiffies);
259 delta_jiffies = next_jiffies - last_jiffies; 271 delta_jiffies = next_jiffies - last_jiffies;
260 272
261 if (rcu_needs_cpu(cpu)) 273 if (rcu_needs_cpu(cpu) || printk_needs_cpu(cpu))
262 delta_jiffies = 1; 274 delta_jiffies = 1;
263 /* 275 /*
264 * Do not stop the tick, if we are only one off 276 * Do not stop the tick, if we are only one off
@@ -288,7 +300,7 @@ void tick_nohz_stop_sched_tick(int inidle)
288 goto out; 300 goto out;
289 } 301 }
290 302
291 ts->idle_tick = ts->sched_timer.expires; 303 ts->idle_tick = hrtimer_get_expires(&ts->sched_timer);
292 ts->tick_stopped = 1; 304 ts->tick_stopped = 1;
293 ts->idle_jiffies = last_jiffies; 305 ts->idle_jiffies = last_jiffies;
294 rcu_enter_nohz(); 306 rcu_enter_nohz();
@@ -303,7 +315,7 @@ void tick_nohz_stop_sched_tick(int inidle)
303 * invoked. 315 * invoked.
304 */ 316 */
305 if (cpu == tick_do_timer_cpu) 317 if (cpu == tick_do_timer_cpu)
306 tick_do_timer_cpu = -1; 318 tick_do_timer_cpu = TICK_DO_TIMER_NONE;
307 319
308 ts->idle_sleeps++; 320 ts->idle_sleeps++;
309 321
@@ -365,6 +377,32 @@ ktime_t tick_nohz_get_sleep_length(void)
365 return ts->sleep_length; 377 return ts->sleep_length;
366} 378}
367 379
380static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
381{
382 hrtimer_cancel(&ts->sched_timer);
383 hrtimer_set_expires(&ts->sched_timer, ts->idle_tick);
384
385 while (1) {
386 /* Forward the time to expire in the future */
387 hrtimer_forward(&ts->sched_timer, now, tick_period);
388
389 if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
390 hrtimer_start_expires(&ts->sched_timer,
391 HRTIMER_MODE_ABS);
392 /* Check, if the timer was already in the past */
393 if (hrtimer_active(&ts->sched_timer))
394 break;
395 } else {
396 if (!tick_program_event(
397 hrtimer_get_expires(&ts->sched_timer), 0))
398 break;
399 }
400 /* Update jiffies and reread time */
401 tick_do_update_jiffies64(now);
402 now = ktime_get();
403 }
404}
405
368/** 406/**
369 * tick_nohz_restart_sched_tick - restart the idle tick from the idle task 407 * tick_nohz_restart_sched_tick - restart the idle tick from the idle task
370 * 408 *
@@ -418,35 +456,16 @@ void tick_nohz_restart_sched_tick(void)
418 */ 456 */
419 ts->tick_stopped = 0; 457 ts->tick_stopped = 0;
420 ts->idle_exittime = now; 458 ts->idle_exittime = now;
421 hrtimer_cancel(&ts->sched_timer);
422 ts->sched_timer.expires = ts->idle_tick;
423 459
424 while (1) { 460 tick_nohz_restart(ts, now);
425 /* Forward the time to expire in the future */
426 hrtimer_forward(&ts->sched_timer, now, tick_period);
427 461
428 if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
429 hrtimer_start(&ts->sched_timer,
430 ts->sched_timer.expires,
431 HRTIMER_MODE_ABS);
432 /* Check, if the timer was already in the past */
433 if (hrtimer_active(&ts->sched_timer))
434 break;
435 } else {
436 if (!tick_program_event(ts->sched_timer.expires, 0))
437 break;
438 }
439 /* Update jiffies and reread time */
440 tick_do_update_jiffies64(now);
441 now = ktime_get();
442 }
443 local_irq_enable(); 462 local_irq_enable();
444} 463}
445 464
446static int tick_nohz_reprogram(struct tick_sched *ts, ktime_t now) 465static int tick_nohz_reprogram(struct tick_sched *ts, ktime_t now)
447{ 466{
448 hrtimer_forward(&ts->sched_timer, now, tick_period); 467 hrtimer_forward(&ts->sched_timer, now, tick_period);
449 return tick_program_event(ts->sched_timer.expires, 0); 468 return tick_program_event(hrtimer_get_expires(&ts->sched_timer), 0);
450} 469}
451 470
452/* 471/*
@@ -468,7 +487,7 @@ static void tick_nohz_handler(struct clock_event_device *dev)
468 * this duty, then the jiffies update is still serialized by 487 * this duty, then the jiffies update is still serialized by
469 * xtime_lock. 488 * xtime_lock.
470 */ 489 */
471 if (unlikely(tick_do_timer_cpu == -1)) 490 if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE))
472 tick_do_timer_cpu = cpu; 491 tick_do_timer_cpu = cpu;
473 492
474 /* Check, if the jiffies need an update */ 493 /* Check, if the jiffies need an update */
@@ -491,10 +510,6 @@ static void tick_nohz_handler(struct clock_event_device *dev)
491 update_process_times(user_mode(regs)); 510 update_process_times(user_mode(regs));
492 profile_tick(CPU_PROFILING); 511 profile_tick(CPU_PROFILING);
493 512
494 /* Do not restart, when we are in the idle loop */
495 if (ts->tick_stopped)
496 return;
497
498 while (tick_nohz_reprogram(ts, now)) { 513 while (tick_nohz_reprogram(ts, now)) {
499 now = ktime_get(); 514 now = ktime_get();
500 tick_do_update_jiffies64(now); 515 tick_do_update_jiffies64(now);
@@ -529,7 +544,7 @@ static void tick_nohz_switch_to_nohz(void)
529 next = tick_init_jiffy_update(); 544 next = tick_init_jiffy_update();
530 545
531 for (;;) { 546 for (;;) {
532 ts->sched_timer.expires = next; 547 hrtimer_set_expires(&ts->sched_timer, next);
533 if (!tick_program_event(next, 0)) 548 if (!tick_program_event(next, 0))
534 break; 549 break;
535 next = ktime_add(next, tick_period); 550 next = ktime_add(next, tick_period);
@@ -540,6 +555,37 @@ static void tick_nohz_switch_to_nohz(void)
540 smp_processor_id()); 555 smp_processor_id());
541} 556}
542 557
558/*
559 * When NOHZ is enabled and the tick is stopped, we need to kick the
560 * tick timer from irq_enter() so that the jiffies update is kept
561 * alive during long running softirqs. That's ugly as hell, but
562 * correctness is key even if we need to fix the offending softirq in
563 * the first place.
564 *
565 * Note, this is different to tick_nohz_restart. We just kick the
566 * timer and do not touch the other magic bits which need to be done
567 * when idle is left.
568 */
569static void tick_nohz_kick_tick(int cpu)
570{
571 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
572 ktime_t delta, now;
573
574 if (!ts->tick_stopped)
575 return;
576
577 /*
578 * Do not touch the tick device, when the next expiry is either
579 * already reached or less/equal than the tick period.
580 */
581 now = ktime_get();
582 delta = ktime_sub(hrtimer_get_expires(&ts->sched_timer), now);
583 if (delta.tv64 <= tick_period.tv64)
584 return;
585
586 tick_nohz_restart(ts, now);
587}
588
543#else 589#else
544 590
545static inline void tick_nohz_switch_to_nohz(void) { } 591static inline void tick_nohz_switch_to_nohz(void) { }
@@ -547,6 +593,19 @@ static inline void tick_nohz_switch_to_nohz(void) { }
547#endif /* NO_HZ */ 593#endif /* NO_HZ */
548 594
549/* 595/*
596 * Called from irq_enter to notify about the possible interruption of idle()
597 */
598void tick_check_idle(int cpu)
599{
600 tick_check_oneshot_broadcast(cpu);
601#ifdef CONFIG_NO_HZ
602 tick_nohz_stop_idle(cpu);
603 tick_nohz_update_jiffies();
604 tick_nohz_kick_tick(cpu);
605#endif
606}
607
608/*
550 * High resolution timer specific code 609 * High resolution timer specific code
551 */ 610 */
552#ifdef CONFIG_HIGH_RES_TIMERS 611#ifdef CONFIG_HIGH_RES_TIMERS
@@ -570,7 +629,7 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer)
570 * this duty, then the jiffies update is still serialized by 629 * this duty, then the jiffies update is still serialized by
571 * xtime_lock. 630 * xtime_lock.
572 */ 631 */
573 if (unlikely(tick_do_timer_cpu == -1)) 632 if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE))
574 tick_do_timer_cpu = cpu; 633 tick_do_timer_cpu = cpu;
575#endif 634#endif
576 635
@@ -599,10 +658,6 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer)
599 profile_tick(CPU_PROFILING); 658 profile_tick(CPU_PROFILING);
600 } 659 }
601 660
602 /* Do not restart, when we are in the idle loop */
603 if (ts->tick_stopped)
604 return HRTIMER_NORESTART;
605
606 hrtimer_forward(timer, now, tick_period); 661 hrtimer_forward(timer, now, tick_period);
607 662
608 return HRTIMER_RESTART; 663 return HRTIMER_RESTART;
@@ -622,19 +677,18 @@ void tick_setup_sched_timer(void)
622 */ 677 */
623 hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); 678 hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
624 ts->sched_timer.function = tick_sched_timer; 679 ts->sched_timer.function = tick_sched_timer;
625 ts->sched_timer.cb_mode = HRTIMER_CB_IRQSAFE_NO_SOFTIRQ; 680 ts->sched_timer.cb_mode = HRTIMER_CB_IRQSAFE_PERCPU;
626 681
627 /* Get the next period (per cpu) */ 682 /* Get the next period (per cpu) */
628 ts->sched_timer.expires = tick_init_jiffy_update(); 683 hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update());
629 offset = ktime_to_ns(tick_period) >> 1; 684 offset = ktime_to_ns(tick_period) >> 1;
630 do_div(offset, num_possible_cpus()); 685 do_div(offset, num_possible_cpus());
631 offset *= smp_processor_id(); 686 offset *= smp_processor_id();
632 ts->sched_timer.expires = ktime_add_ns(ts->sched_timer.expires, offset); 687 hrtimer_add_expires_ns(&ts->sched_timer, offset);
633 688
634 for (;;) { 689 for (;;) {
635 hrtimer_forward(&ts->sched_timer, now, tick_period); 690 hrtimer_forward(&ts->sched_timer, now, tick_period);
636 hrtimer_start(&ts->sched_timer, ts->sched_timer.expires, 691 hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS);
637 HRTIMER_MODE_ABS);
638 /* Check, if the timer was already in the past */ 692 /* Check, if the timer was already in the past */
639 if (hrtimer_active(&ts->sched_timer)) 693 if (hrtimer_active(&ts->sched_timer))
640 break; 694 break;
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index e91c29f961c9..e7acfb482a68 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -58,27 +58,26 @@ struct clocksource *clock;
58 58
59#ifdef CONFIG_GENERIC_TIME 59#ifdef CONFIG_GENERIC_TIME
60/** 60/**
61 * __get_nsec_offset - Returns nanoseconds since last call to periodic_hook 61 * clocksource_forward_now - update clock to the current time
62 * 62 *
63 * private function, must hold xtime_lock lock when being 63 * Forward the current clock to update its state since the last call to
64 * called. Returns the number of nanoseconds since the 64 * update_wall_time(). This is useful before significant clock changes,
65 * last call to update_wall_time() (adjusted by NTP scaling) 65 * as it avoids having to deal with this time offset explicitly.
66 */ 66 */
67static inline s64 __get_nsec_offset(void) 67static void clocksource_forward_now(void)
68{ 68{
69 cycle_t cycle_now, cycle_delta; 69 cycle_t cycle_now, cycle_delta;
70 s64 ns_offset; 70 s64 nsec;
71 71
72 /* read clocksource: */
73 cycle_now = clocksource_read(clock); 72 cycle_now = clocksource_read(clock);
74
75 /* calculate the delta since the last update_wall_time: */
76 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; 73 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
74 clock->cycle_last = cycle_now;
77 75
78 /* convert to nanoseconds: */ 76 nsec = cyc2ns(clock, cycle_delta);
79 ns_offset = cyc2ns(clock, cycle_delta); 77 timespec_add_ns(&xtime, nsec);
80 78
81 return ns_offset; 79 nsec = ((s64)cycle_delta * clock->mult_orig) >> clock->shift;
80 clock->raw_time.tv_nsec += nsec;
82} 81}
83 82
84/** 83/**
@@ -89,6 +88,7 @@ static inline s64 __get_nsec_offset(void)
89 */ 88 */
90void getnstimeofday(struct timespec *ts) 89void getnstimeofday(struct timespec *ts)
91{ 90{
91 cycle_t cycle_now, cycle_delta;
92 unsigned long seq; 92 unsigned long seq;
93 s64 nsecs; 93 s64 nsecs;
94 94
@@ -96,7 +96,15 @@ void getnstimeofday(struct timespec *ts)
96 seq = read_seqbegin(&xtime_lock); 96 seq = read_seqbegin(&xtime_lock);
97 97
98 *ts = xtime; 98 *ts = xtime;
99 nsecs = __get_nsec_offset(); 99
100 /* read clocksource: */
101 cycle_now = clocksource_read(clock);
102
103 /* calculate the delta since the last update_wall_time: */
104 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
105
106 /* convert to nanoseconds: */
107 nsecs = cyc2ns(clock, cycle_delta);
100 108
101 } while (read_seqretry(&xtime_lock, seq)); 109 } while (read_seqretry(&xtime_lock, seq));
102 110
@@ -129,22 +137,22 @@ EXPORT_SYMBOL(do_gettimeofday);
129 */ 137 */
130int do_settimeofday(struct timespec *tv) 138int do_settimeofday(struct timespec *tv)
131{ 139{
140 struct timespec ts_delta;
132 unsigned long flags; 141 unsigned long flags;
133 time_t wtm_sec, sec = tv->tv_sec;
134 long wtm_nsec, nsec = tv->tv_nsec;
135 142
136 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) 143 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
137 return -EINVAL; 144 return -EINVAL;
138 145
139 write_seqlock_irqsave(&xtime_lock, flags); 146 write_seqlock_irqsave(&xtime_lock, flags);
140 147
141 nsec -= __get_nsec_offset(); 148 clocksource_forward_now();
149
150 ts_delta.tv_sec = tv->tv_sec - xtime.tv_sec;
151 ts_delta.tv_nsec = tv->tv_nsec - xtime.tv_nsec;
152 wall_to_monotonic = timespec_sub(wall_to_monotonic, ts_delta);
142 153
143 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec); 154 xtime = *tv;
144 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
145 155
146 set_normalized_timespec(&xtime, sec, nsec);
147 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
148 update_xtime_cache(0); 156 update_xtime_cache(0);
149 157
150 clock->error = 0; 158 clock->error = 0;
@@ -170,22 +178,19 @@ EXPORT_SYMBOL(do_settimeofday);
170static void change_clocksource(void) 178static void change_clocksource(void)
171{ 179{
172 struct clocksource *new; 180 struct clocksource *new;
173 cycle_t now;
174 u64 nsec;
175 181
176 new = clocksource_get_next(); 182 new = clocksource_get_next();
177 183
178 if (clock == new) 184 if (clock == new)
179 return; 185 return;
180 186
181 new->cycle_last = 0; 187 clocksource_forward_now();
182 now = clocksource_read(new);
183 nsec = __get_nsec_offset();
184 timespec_add_ns(&xtime, nsec);
185 188
186 clock = new; 189 new->raw_time = clock->raw_time;
187 clock->cycle_last = now;
188 190
191 clock = new;
192 clock->cycle_last = 0;
193 clock->cycle_last = clocksource_read(new);
189 clock->error = 0; 194 clock->error = 0;
190 clock->xtime_nsec = 0; 195 clock->xtime_nsec = 0;
191 clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH); 196 clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH);
@@ -200,11 +205,44 @@ static void change_clocksource(void)
200 */ 205 */
201} 206}
202#else 207#else
208static inline void clocksource_forward_now(void) { }
203static inline void change_clocksource(void) { } 209static inline void change_clocksource(void) { }
204static inline s64 __get_nsec_offset(void) { return 0; }
205#endif 210#endif
206 211
207/** 212/**
213 * getrawmonotonic - Returns the raw monotonic time in a timespec
214 * @ts: pointer to the timespec to be set
215 *
216 * Returns the raw monotonic time (completely un-modified by ntp)
217 */
218void getrawmonotonic(struct timespec *ts)
219{
220 unsigned long seq;
221 s64 nsecs;
222 cycle_t cycle_now, cycle_delta;
223
224 do {
225 seq = read_seqbegin(&xtime_lock);
226
227 /* read clocksource: */
228 cycle_now = clocksource_read(clock);
229
230 /* calculate the delta since the last update_wall_time: */
231 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
232
233 /* convert to nanoseconds: */
234 nsecs = ((s64)cycle_delta * clock->mult_orig) >> clock->shift;
235
236 *ts = clock->raw_time;
237
238 } while (read_seqretry(&xtime_lock, seq));
239
240 timespec_add_ns(ts, nsecs);
241}
242EXPORT_SYMBOL(getrawmonotonic);
243
244
245/**
208 * timekeeping_valid_for_hres - Check if timekeeping is suitable for hres 246 * timekeeping_valid_for_hres - Check if timekeeping is suitable for hres
209 */ 247 */
210int timekeeping_valid_for_hres(void) 248int timekeeping_valid_for_hres(void)
@@ -265,8 +303,6 @@ void __init timekeeping_init(void)
265static int timekeeping_suspended; 303static int timekeeping_suspended;
266/* time in seconds when suspend began */ 304/* time in seconds when suspend began */
267static unsigned long timekeeping_suspend_time; 305static unsigned long timekeeping_suspend_time;
268/* xtime offset when we went into suspend */
269static s64 timekeeping_suspend_nsecs;
270 306
271/** 307/**
272 * timekeeping_resume - Resumes the generic timekeeping subsystem. 308 * timekeeping_resume - Resumes the generic timekeeping subsystem.
@@ -292,8 +328,6 @@ static int timekeeping_resume(struct sys_device *dev)
292 wall_to_monotonic.tv_sec -= sleep_length; 328 wall_to_monotonic.tv_sec -= sleep_length;
293 total_sleep_time += sleep_length; 329 total_sleep_time += sleep_length;
294 } 330 }
295 /* Make sure that we have the correct xtime reference */
296 timespec_add_ns(&xtime, timekeeping_suspend_nsecs);
297 update_xtime_cache(0); 331 update_xtime_cache(0);
298 /* re-base the last cycle value */ 332 /* re-base the last cycle value */
299 clock->cycle_last = 0; 333 clock->cycle_last = 0;
@@ -319,8 +353,7 @@ static int timekeeping_suspend(struct sys_device *dev, pm_message_t state)
319 timekeeping_suspend_time = read_persistent_clock(); 353 timekeeping_suspend_time = read_persistent_clock();
320 354
321 write_seqlock_irqsave(&xtime_lock, flags); 355 write_seqlock_irqsave(&xtime_lock, flags);
322 /* Get the current xtime offset */ 356 clocksource_forward_now();
323 timekeeping_suspend_nsecs = __get_nsec_offset();
324 timekeeping_suspended = 1; 357 timekeeping_suspended = 1;
325 write_sequnlock_irqrestore(&xtime_lock, flags); 358 write_sequnlock_irqrestore(&xtime_lock, flags);
326 359
@@ -454,23 +487,29 @@ void update_wall_time(void)
454#else 487#else
455 offset = clock->cycle_interval; 488 offset = clock->cycle_interval;
456#endif 489#endif
457 clock->xtime_nsec += (s64)xtime.tv_nsec << clock->shift; 490 clock->xtime_nsec = (s64)xtime.tv_nsec << clock->shift;
458 491
459 /* normally this loop will run just once, however in the 492 /* normally this loop will run just once, however in the
460 * case of lost or late ticks, it will accumulate correctly. 493 * case of lost or late ticks, it will accumulate correctly.
461 */ 494 */
462 while (offset >= clock->cycle_interval) { 495 while (offset >= clock->cycle_interval) {
463 /* accumulate one interval */ 496 /* accumulate one interval */
464 clock->xtime_nsec += clock->xtime_interval;
465 clock->cycle_last += clock->cycle_interval;
466 offset -= clock->cycle_interval; 497 offset -= clock->cycle_interval;
498 clock->cycle_last += clock->cycle_interval;
467 499
500 clock->xtime_nsec += clock->xtime_interval;
468 if (clock->xtime_nsec >= (u64)NSEC_PER_SEC << clock->shift) { 501 if (clock->xtime_nsec >= (u64)NSEC_PER_SEC << clock->shift) {
469 clock->xtime_nsec -= (u64)NSEC_PER_SEC << clock->shift; 502 clock->xtime_nsec -= (u64)NSEC_PER_SEC << clock->shift;
470 xtime.tv_sec++; 503 xtime.tv_sec++;
471 second_overflow(); 504 second_overflow();
472 } 505 }
473 506
507 clock->raw_time.tv_nsec += clock->raw_interval;
508 if (clock->raw_time.tv_nsec >= NSEC_PER_SEC) {
509 clock->raw_time.tv_nsec -= NSEC_PER_SEC;
510 clock->raw_time.tv_sec++;
511 }
512
474 /* accumulate error between NTP and clock interval */ 513 /* accumulate error between NTP and clock interval */
475 clock->error += tick_length; 514 clock->error += tick_length;
476 clock->error -= clock->xtime_interval << (NTP_SCALE_SHIFT - clock->shift); 515 clock->error -= clock->xtime_interval << (NTP_SCALE_SHIFT - clock->shift);
@@ -479,9 +518,12 @@ void update_wall_time(void)
479 /* correct the clock when NTP error is too big */ 518 /* correct the clock when NTP error is too big */
480 clocksource_adjust(offset); 519 clocksource_adjust(offset);
481 520
482 /* store full nanoseconds into xtime */ 521 /* store full nanoseconds into xtime after rounding it up and
483 xtime.tv_nsec = (s64)clock->xtime_nsec >> clock->shift; 522 * add the remainder to the error difference.
523 */
524 xtime.tv_nsec = ((s64)clock->xtime_nsec >> clock->shift) + 1;
484 clock->xtime_nsec -= (s64)xtime.tv_nsec << clock->shift; 525 clock->xtime_nsec -= (s64)xtime.tv_nsec << clock->shift;
526 clock->error += clock->xtime_nsec << (NTP_SCALE_SHIFT - clock->shift);
485 527
486 update_xtime_cache(cyc2ns(clock, offset)); 528 update_xtime_cache(cyc2ns(clock, offset));
487 529
diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
index a40e20fd0001..a999b92a1277 100644
--- a/kernel/time/timer_list.c
+++ b/kernel/time/timer_list.c
@@ -47,13 +47,14 @@ static void print_name_offset(struct seq_file *m, void *sym)
47} 47}
48 48
49static void 49static void
50print_timer(struct seq_file *m, struct hrtimer *timer, int idx, u64 now) 50print_timer(struct seq_file *m, struct hrtimer *taddr, struct hrtimer *timer,
51 int idx, u64 now)
51{ 52{
52#ifdef CONFIG_TIMER_STATS 53#ifdef CONFIG_TIMER_STATS
53 char tmp[TASK_COMM_LEN + 1]; 54 char tmp[TASK_COMM_LEN + 1];
54#endif 55#endif
55 SEQ_printf(m, " #%d: ", idx); 56 SEQ_printf(m, " #%d: ", idx);
56 print_name_offset(m, timer); 57 print_name_offset(m, taddr);
57 SEQ_printf(m, ", "); 58 SEQ_printf(m, ", ");
58 print_name_offset(m, timer->function); 59 print_name_offset(m, timer->function);
59 SEQ_printf(m, ", S:%02lx", timer->state); 60 SEQ_printf(m, ", S:%02lx", timer->state);
@@ -65,9 +66,11 @@ print_timer(struct seq_file *m, struct hrtimer *timer, int idx, u64 now)
65 SEQ_printf(m, ", %s/%d", tmp, timer->start_pid); 66 SEQ_printf(m, ", %s/%d", tmp, timer->start_pid);
66#endif 67#endif
67 SEQ_printf(m, "\n"); 68 SEQ_printf(m, "\n");
68 SEQ_printf(m, " # expires at %Lu nsecs [in %Ld nsecs]\n", 69 SEQ_printf(m, " # expires at %Lu-%Lu nsecs [in %Ld to %Ld nsecs]\n",
69 (unsigned long long)ktime_to_ns(timer->expires), 70 (unsigned long long)ktime_to_ns(hrtimer_get_softexpires(timer)),
70 (long long)(ktime_to_ns(timer->expires) - now)); 71 (unsigned long long)ktime_to_ns(hrtimer_get_expires(timer)),
72 (long long)(ktime_to_ns(hrtimer_get_softexpires(timer)) - now),
73 (long long)(ktime_to_ns(hrtimer_get_expires(timer)) - now));
71} 74}
72 75
73static void 76static void
@@ -99,7 +102,7 @@ next_one:
99 tmp = *timer; 102 tmp = *timer;
100 spin_unlock_irqrestore(&base->cpu_base->lock, flags); 103 spin_unlock_irqrestore(&base->cpu_base->lock, flags);
101 104
102 print_timer(m, &tmp, i, now); 105 print_timer(m, timer, &tmp, i, now);
103 next++; 106 next++;
104 goto next_one; 107 goto next_one;
105 } 108 }
@@ -109,6 +112,7 @@ next_one:
109static void 112static void
110print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now) 113print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
111{ 114{
115 SEQ_printf(m, " .base: %p\n", base);
112 SEQ_printf(m, " .index: %d\n", 116 SEQ_printf(m, " .index: %d\n",
113 base->index); 117 base->index);
114 SEQ_printf(m, " .resolution: %Lu nsecs\n", 118 SEQ_printf(m, " .resolution: %Lu nsecs\n",
@@ -183,12 +187,16 @@ static void print_cpu(struct seq_file *m, int cpu, u64 now)
183 187
184#ifdef CONFIG_GENERIC_CLOCKEVENTS 188#ifdef CONFIG_GENERIC_CLOCKEVENTS
185static void 189static void
186print_tickdevice(struct seq_file *m, struct tick_device *td) 190print_tickdevice(struct seq_file *m, struct tick_device *td, int cpu)
187{ 191{
188 struct clock_event_device *dev = td->evtdev; 192 struct clock_event_device *dev = td->evtdev;
189 193
190 SEQ_printf(m, "\n"); 194 SEQ_printf(m, "\n");
191 SEQ_printf(m, "Tick Device: mode: %d\n", td->mode); 195 SEQ_printf(m, "Tick Device: mode: %d\n", td->mode);
196 if (cpu < 0)
197 SEQ_printf(m, "Broadcast device\n");
198 else
199 SEQ_printf(m, "Per CPU device: %d\n", cpu);
192 200
193 SEQ_printf(m, "Clock Event Device: "); 201 SEQ_printf(m, "Clock Event Device: ");
194 if (!dev) { 202 if (!dev) {
@@ -222,7 +230,7 @@ static void timer_list_show_tickdevices(struct seq_file *m)
222 int cpu; 230 int cpu;
223 231
224#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST 232#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
225 print_tickdevice(m, tick_get_broadcast_device()); 233 print_tickdevice(m, tick_get_broadcast_device(), -1);
226 SEQ_printf(m, "tick_broadcast_mask: %08lx\n", 234 SEQ_printf(m, "tick_broadcast_mask: %08lx\n",
227 tick_get_broadcast_mask()->bits[0]); 235 tick_get_broadcast_mask()->bits[0]);
228#ifdef CONFIG_TICK_ONESHOT 236#ifdef CONFIG_TICK_ONESHOT
@@ -232,7 +240,7 @@ static void timer_list_show_tickdevices(struct seq_file *m)
232 SEQ_printf(m, "\n"); 240 SEQ_printf(m, "\n");
233#endif 241#endif
234 for_each_online_cpu(cpu) 242 for_each_online_cpu(cpu)
235 print_tickdevice(m, tick_get_device(cpu)); 243 print_tickdevice(m, tick_get_device(cpu), cpu);
236 SEQ_printf(m, "\n"); 244 SEQ_printf(m, "\n");
237} 245}
238#else 246#else
@@ -244,7 +252,7 @@ static int timer_list_show(struct seq_file *m, void *v)
244 u64 now = ktime_to_ns(ktime_get()); 252 u64 now = ktime_to_ns(ktime_get());
245 int cpu; 253 int cpu;
246 254
247 SEQ_printf(m, "Timer List Version: v0.3\n"); 255 SEQ_printf(m, "Timer List Version: v0.4\n");
248 SEQ_printf(m, "HRTIMER_MAX_CLOCK_BASES: %d\n", HRTIMER_MAX_CLOCK_BASES); 256 SEQ_printf(m, "HRTIMER_MAX_CLOCK_BASES: %d\n", HRTIMER_MAX_CLOCK_BASES);
249 SEQ_printf(m, "now at %Ld nsecs\n", (unsigned long long)now); 257 SEQ_printf(m, "now at %Ld nsecs\n", (unsigned long long)now);
250 258