diff options
Diffstat (limited to 'kernel/time')
| -rw-r--r-- | kernel/time/Kconfig | 1 | ||||
| -rw-r--r-- | kernel/time/clocksource.c | 3 | ||||
| -rw-r--r-- | kernel/time/jiffies.c | 1 | ||||
| -rw-r--r-- | kernel/time/ntp.c | 96 | ||||
| -rw-r--r-- | kernel/time/tick-broadcast.c | 13 | ||||
| -rw-r--r-- | kernel/time/tick-internal.h | 2 | ||||
| -rw-r--r-- | kernel/time/tick-sched.c | 131 | ||||
| -rw-r--r-- | kernel/time/timekeeping.c | 122 | ||||
| -rw-r--r-- | kernel/time/timer_list.c | 28 |
9 files changed, 261 insertions, 136 deletions
diff --git a/kernel/time/Kconfig b/kernel/time/Kconfig index 8d53106a0a9..95ed42951e0 100644 --- a/kernel/time/Kconfig +++ b/kernel/time/Kconfig | |||
| @@ -3,7 +3,6 @@ | |||
| 3 | # | 3 | # |
| 4 | config TICK_ONESHOT | 4 | config TICK_ONESHOT |
| 5 | bool | 5 | bool |
| 6 | default n | ||
| 7 | 6 | ||
| 8 | config NO_HZ | 7 | config NO_HZ |
| 9 | bool "Tickless System (Dynamic Ticks)" | 8 | bool "Tickless System (Dynamic Ticks)" |
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index 093d4acf993..9ed2eec9752 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c | |||
| @@ -325,6 +325,9 @@ int clocksource_register(struct clocksource *c) | |||
| 325 | unsigned long flags; | 325 | unsigned long flags; |
| 326 | int ret; | 326 | int ret; |
| 327 | 327 | ||
| 328 | /* save mult_orig on registration */ | ||
| 329 | c->mult_orig = c->mult; | ||
| 330 | |||
| 328 | spin_lock_irqsave(&clocksource_lock, flags); | 331 | spin_lock_irqsave(&clocksource_lock, flags); |
| 329 | ret = clocksource_enqueue(c); | 332 | ret = clocksource_enqueue(c); |
| 330 | if (!ret) | 333 | if (!ret) |
diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c index 4c256fdb887..1ca99557e92 100644 --- a/kernel/time/jiffies.c +++ b/kernel/time/jiffies.c | |||
| @@ -61,6 +61,7 @@ struct clocksource clocksource_jiffies = { | |||
| 61 | .read = jiffies_read, | 61 | .read = jiffies_read, |
| 62 | .mask = 0xffffffff, /*32bits*/ | 62 | .mask = 0xffffffff, /*32bits*/ |
| 63 | .mult = NSEC_PER_JIFFY << JIFFIES_SHIFT, /* details above */ | 63 | .mult = NSEC_PER_JIFFY << JIFFIES_SHIFT, /* details above */ |
| 64 | .mult_orig = NSEC_PER_JIFFY << JIFFIES_SHIFT, | ||
| 64 | .shift = JIFFIES_SHIFT, | 65 | .shift = JIFFIES_SHIFT, |
| 65 | }; | 66 | }; |
| 66 | 67 | ||
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c index 1ad46f3df6e..8ff15e5d486 100644 --- a/kernel/time/ntp.c +++ b/kernel/time/ntp.c | |||
| @@ -10,13 +10,13 @@ | |||
| 10 | 10 | ||
| 11 | #include <linux/mm.h> | 11 | #include <linux/mm.h> |
| 12 | #include <linux/time.h> | 12 | #include <linux/time.h> |
| 13 | #include <linux/timer.h> | ||
| 14 | #include <linux/timex.h> | 13 | #include <linux/timex.h> |
| 15 | #include <linux/jiffies.h> | 14 | #include <linux/jiffies.h> |
| 16 | #include <linux/hrtimer.h> | 15 | #include <linux/hrtimer.h> |
| 17 | #include <linux/capability.h> | 16 | #include <linux/capability.h> |
| 18 | #include <linux/math64.h> | 17 | #include <linux/math64.h> |
| 19 | #include <linux/clocksource.h> | 18 | #include <linux/clocksource.h> |
| 19 | #include <linux/workqueue.h> | ||
| 20 | #include <asm/timex.h> | 20 | #include <asm/timex.h> |
| 21 | 21 | ||
| 22 | /* | 22 | /* |
| @@ -142,8 +142,7 @@ static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer) | |||
| 142 | time_state = TIME_OOP; | 142 | time_state = TIME_OOP; |
| 143 | printk(KERN_NOTICE "Clock: " | 143 | printk(KERN_NOTICE "Clock: " |
| 144 | "inserting leap second 23:59:60 UTC\n"); | 144 | "inserting leap second 23:59:60 UTC\n"); |
| 145 | leap_timer.expires = ktime_add_ns(leap_timer.expires, | 145 | hrtimer_add_expires_ns(&leap_timer, NSEC_PER_SEC); |
| 146 | NSEC_PER_SEC); | ||
| 147 | res = HRTIMER_RESTART; | 146 | res = HRTIMER_RESTART; |
| 148 | break; | 147 | break; |
| 149 | case TIME_DEL: | 148 | case TIME_DEL: |
| @@ -218,11 +217,11 @@ void second_overflow(void) | |||
| 218 | /* Disable the cmos update - used by virtualization and embedded */ | 217 | /* Disable the cmos update - used by virtualization and embedded */ |
| 219 | int no_sync_cmos_clock __read_mostly; | 218 | int no_sync_cmos_clock __read_mostly; |
| 220 | 219 | ||
| 221 | static void sync_cmos_clock(unsigned long dummy); | 220 | static void sync_cmos_clock(struct work_struct *work); |
| 222 | 221 | ||
| 223 | static DEFINE_TIMER(sync_cmos_timer, sync_cmos_clock, 0, 0); | 222 | static DECLARE_DELAYED_WORK(sync_cmos_work, sync_cmos_clock); |
| 224 | 223 | ||
| 225 | static void sync_cmos_clock(unsigned long dummy) | 224 | static void sync_cmos_clock(struct work_struct *work) |
| 226 | { | 225 | { |
| 227 | struct timespec now, next; | 226 | struct timespec now, next; |
| 228 | int fail = 1; | 227 | int fail = 1; |
| @@ -258,13 +257,13 @@ static void sync_cmos_clock(unsigned long dummy) | |||
| 258 | next.tv_sec++; | 257 | next.tv_sec++; |
| 259 | next.tv_nsec -= NSEC_PER_SEC; | 258 | next.tv_nsec -= NSEC_PER_SEC; |
| 260 | } | 259 | } |
| 261 | mod_timer(&sync_cmos_timer, jiffies + timespec_to_jiffies(&next)); | 260 | schedule_delayed_work(&sync_cmos_work, timespec_to_jiffies(&next)); |
| 262 | } | 261 | } |
| 263 | 262 | ||
| 264 | static void notify_cmos_timer(void) | 263 | static void notify_cmos_timer(void) |
| 265 | { | 264 | { |
| 266 | if (!no_sync_cmos_clock) | 265 | if (!no_sync_cmos_clock) |
| 267 | mod_timer(&sync_cmos_timer, jiffies + 1); | 266 | schedule_delayed_work(&sync_cmos_work, 0); |
| 268 | } | 267 | } |
| 269 | 268 | ||
| 270 | #else | 269 | #else |
| @@ -277,38 +276,50 @@ static inline void notify_cmos_timer(void) { } | |||
| 277 | int do_adjtimex(struct timex *txc) | 276 | int do_adjtimex(struct timex *txc) |
| 278 | { | 277 | { |
| 279 | struct timespec ts; | 278 | struct timespec ts; |
| 280 | long save_adjust, sec; | ||
| 281 | int result; | 279 | int result; |
| 282 | 280 | ||
| 283 | /* In order to modify anything, you gotta be super-user! */ | 281 | /* Validate the data before disabling interrupts */ |
| 284 | if (txc->modes && !capable(CAP_SYS_TIME)) | 282 | if (txc->modes & ADJ_ADJTIME) { |
| 285 | return -EPERM; | ||
| 286 | |||
| 287 | /* Now we validate the data before disabling interrupts */ | ||
| 288 | |||
| 289 | if ((txc->modes & ADJ_OFFSET_SINGLESHOT) == ADJ_OFFSET_SINGLESHOT) { | ||
| 290 | /* singleshot must not be used with any other mode bits */ | 283 | /* singleshot must not be used with any other mode bits */ |
| 291 | if (txc->modes & ~ADJ_OFFSET_SS_READ) | 284 | if (!(txc->modes & ADJ_OFFSET_SINGLESHOT)) |
| 292 | return -EINVAL; | 285 | return -EINVAL; |
| 286 | if (!(txc->modes & ADJ_OFFSET_READONLY) && | ||
| 287 | !capable(CAP_SYS_TIME)) | ||
| 288 | return -EPERM; | ||
| 289 | } else { | ||
| 290 | /* In order to modify anything, you gotta be super-user! */ | ||
| 291 | if (txc->modes && !capable(CAP_SYS_TIME)) | ||
| 292 | return -EPERM; | ||
| 293 | |||
| 294 | /* if the quartz is off by more than 10% something is VERY wrong! */ | ||
| 295 | if (txc->modes & ADJ_TICK && | ||
| 296 | (txc->tick < 900000/USER_HZ || | ||
| 297 | txc->tick > 1100000/USER_HZ)) | ||
| 298 | return -EINVAL; | ||
| 299 | |||
| 300 | if (txc->modes & ADJ_STATUS && time_state != TIME_OK) | ||
| 301 | hrtimer_cancel(&leap_timer); | ||
| 293 | } | 302 | } |
| 294 | 303 | ||
| 295 | /* if the quartz is off by more than 10% something is VERY wrong ! */ | ||
| 296 | if (txc->modes & ADJ_TICK) | ||
| 297 | if (txc->tick < 900000/USER_HZ || | ||
| 298 | txc->tick > 1100000/USER_HZ) | ||
| 299 | return -EINVAL; | ||
| 300 | |||
| 301 | if (time_state != TIME_OK && txc->modes & ADJ_STATUS) | ||
| 302 | hrtimer_cancel(&leap_timer); | ||
| 303 | getnstimeofday(&ts); | 304 | getnstimeofday(&ts); |
| 304 | 305 | ||
| 305 | write_seqlock_irq(&xtime_lock); | 306 | write_seqlock_irq(&xtime_lock); |
| 306 | 307 | ||
| 307 | /* Save for later - semantics of adjtime is to return old value */ | ||
| 308 | save_adjust = time_adjust; | ||
| 309 | |||
| 310 | /* If there are input parameters, then process them */ | 308 | /* If there are input parameters, then process them */ |
| 309 | if (txc->modes & ADJ_ADJTIME) { | ||
| 310 | long save_adjust = time_adjust; | ||
| 311 | |||
| 312 | if (!(txc->modes & ADJ_OFFSET_READONLY)) { | ||
| 313 | /* adjtime() is independent from ntp_adjtime() */ | ||
| 314 | time_adjust = txc->offset; | ||
| 315 | ntp_update_frequency(); | ||
| 316 | } | ||
| 317 | txc->offset = save_adjust; | ||
| 318 | goto adj_done; | ||
| 319 | } | ||
| 311 | if (txc->modes) { | 320 | if (txc->modes) { |
| 321 | long sec; | ||
| 322 | |||
| 312 | if (txc->modes & ADJ_STATUS) { | 323 | if (txc->modes & ADJ_STATUS) { |
| 313 | if ((time_status & STA_PLL) && | 324 | if ((time_status & STA_PLL) && |
| 314 | !(txc->status & STA_PLL)) { | 325 | !(txc->status & STA_PLL)) { |
| @@ -375,13 +386,8 @@ int do_adjtimex(struct timex *txc) | |||
| 375 | if (txc->modes & ADJ_TAI && txc->constant > 0) | 386 | if (txc->modes & ADJ_TAI && txc->constant > 0) |
| 376 | time_tai = txc->constant; | 387 | time_tai = txc->constant; |
| 377 | 388 | ||
| 378 | if (txc->modes & ADJ_OFFSET) { | 389 | if (txc->modes & ADJ_OFFSET) |
| 379 | if (txc->modes == ADJ_OFFSET_SINGLESHOT) | 390 | ntp_update_offset(txc->offset); |
| 380 | /* adjtime() is independent from ntp_adjtime() */ | ||
| 381 | time_adjust = txc->offset; | ||
| 382 | else | ||
| 383 | ntp_update_offset(txc->offset); | ||
| 384 | } | ||
| 385 | if (txc->modes & ADJ_TICK) | 391 | if (txc->modes & ADJ_TICK) |
| 386 | tick_usec = txc->tick; | 392 | tick_usec = txc->tick; |
| 387 | 393 | ||
| @@ -389,22 +395,18 @@ int do_adjtimex(struct timex *txc) | |||
| 389 | ntp_update_frequency(); | 395 | ntp_update_frequency(); |
| 390 | } | 396 | } |
| 391 | 397 | ||
| 398 | txc->offset = shift_right(time_offset * NTP_INTERVAL_FREQ, | ||
| 399 | NTP_SCALE_SHIFT); | ||
| 400 | if (!(time_status & STA_NANO)) | ||
| 401 | txc->offset /= NSEC_PER_USEC; | ||
| 402 | |||
| 403 | adj_done: | ||
| 392 | result = time_state; /* mostly `TIME_OK' */ | 404 | result = time_state; /* mostly `TIME_OK' */ |
| 393 | if (time_status & (STA_UNSYNC|STA_CLOCKERR)) | 405 | if (time_status & (STA_UNSYNC|STA_CLOCKERR)) |
| 394 | result = TIME_ERROR; | 406 | result = TIME_ERROR; |
| 395 | 407 | ||
| 396 | if ((txc->modes == ADJ_OFFSET_SINGLESHOT) || | 408 | txc->freq = shift_right((time_freq >> PPM_SCALE_INV_SHIFT) * |
| 397 | (txc->modes == ADJ_OFFSET_SS_READ)) | 409 | (s64)PPM_SCALE_INV, NTP_SCALE_SHIFT); |
| 398 | txc->offset = save_adjust; | ||
| 399 | else { | ||
| 400 | txc->offset = shift_right(time_offset * NTP_INTERVAL_FREQ, | ||
| 401 | NTP_SCALE_SHIFT); | ||
| 402 | if (!(time_status & STA_NANO)) | ||
| 403 | txc->offset /= NSEC_PER_USEC; | ||
| 404 | } | ||
| 405 | txc->freq = shift_right((s32)(time_freq >> PPM_SCALE_INV_SHIFT) * | ||
| 406 | (s64)PPM_SCALE_INV, | ||
| 407 | NTP_SCALE_SHIFT); | ||
| 408 | txc->maxerror = time_maxerror; | 410 | txc->maxerror = time_maxerror; |
| 409 | txc->esterror = time_esterror; | 411 | txc->esterror = time_esterror; |
| 410 | txc->status = time_status; | 412 | txc->status = time_status; |
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index cb01cd8f919..f98a1b7b16e 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c | |||
| @@ -384,6 +384,19 @@ int tick_resume_broadcast_oneshot(struct clock_event_device *bc) | |||
| 384 | } | 384 | } |
| 385 | 385 | ||
| 386 | /* | 386 | /* |
| 387 | * Called from irq_enter() when idle was interrupted to reenable the | ||
| 388 | * per cpu device. | ||
| 389 | */ | ||
| 390 | void tick_check_oneshot_broadcast(int cpu) | ||
| 391 | { | ||
| 392 | if (cpu_isset(cpu, tick_broadcast_oneshot_mask)) { | ||
| 393 | struct tick_device *td = &per_cpu(tick_cpu_device, cpu); | ||
| 394 | |||
| 395 | clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_ONESHOT); | ||
| 396 | } | ||
| 397 | } | ||
| 398 | |||
| 399 | /* | ||
| 387 | * Handle oneshot mode broadcasting | 400 | * Handle oneshot mode broadcasting |
| 388 | */ | 401 | */ |
| 389 | static void tick_handle_oneshot_broadcast(struct clock_event_device *dev) | 402 | static void tick_handle_oneshot_broadcast(struct clock_event_device *dev) |
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h index 469248782c2..b1c05bf75ee 100644 --- a/kernel/time/tick-internal.h +++ b/kernel/time/tick-internal.h | |||
| @@ -36,6 +36,7 @@ extern void tick_broadcast_switch_to_oneshot(void); | |||
| 36 | extern void tick_shutdown_broadcast_oneshot(unsigned int *cpup); | 36 | extern void tick_shutdown_broadcast_oneshot(unsigned int *cpup); |
| 37 | extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc); | 37 | extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc); |
| 38 | extern int tick_broadcast_oneshot_active(void); | 38 | extern int tick_broadcast_oneshot_active(void); |
| 39 | extern void tick_check_oneshot_broadcast(int cpu); | ||
| 39 | # else /* BROADCAST */ | 40 | # else /* BROADCAST */ |
| 40 | static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) | 41 | static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) |
| 41 | { | 42 | { |
| @@ -45,6 +46,7 @@ static inline void tick_broadcast_oneshot_control(unsigned long reason) { } | |||
| 45 | static inline void tick_broadcast_switch_to_oneshot(void) { } | 46 | static inline void tick_broadcast_switch_to_oneshot(void) { } |
| 46 | static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { } | 47 | static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { } |
| 47 | static inline int tick_broadcast_oneshot_active(void) { return 0; } | 48 | static inline int tick_broadcast_oneshot_active(void) { return 0; } |
| 49 | static inline void tick_check_oneshot_broadcast(int cpu) { } | ||
| 48 | # endif /* !BROADCAST */ | 50 | # endif /* !BROADCAST */ |
| 49 | 51 | ||
| 50 | #else /* !ONESHOT */ | 52 | #else /* !ONESHOT */ |
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index cb02324bdb8..342fc9ccab4 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
| @@ -20,6 +20,7 @@ | |||
| 20 | #include <linux/profile.h> | 20 | #include <linux/profile.h> |
| 21 | #include <linux/sched.h> | 21 | #include <linux/sched.h> |
| 22 | #include <linux/tick.h> | 22 | #include <linux/tick.h> |
| 23 | #include <linux/module.h> | ||
| 23 | 24 | ||
| 24 | #include <asm/irq_regs.h> | 25 | #include <asm/irq_regs.h> |
| 25 | 26 | ||
| @@ -154,7 +155,7 @@ void tick_nohz_update_jiffies(void) | |||
| 154 | touch_softlockup_watchdog(); | 155 | touch_softlockup_watchdog(); |
| 155 | } | 156 | } |
| 156 | 157 | ||
| 157 | void tick_nohz_stop_idle(int cpu) | 158 | static void tick_nohz_stop_idle(int cpu) |
| 158 | { | 159 | { |
| 159 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | 160 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); |
| 160 | 161 | ||
| @@ -190,9 +191,17 @@ u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time) | |||
| 190 | { | 191 | { |
| 191 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | 192 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); |
| 192 | 193 | ||
| 193 | *last_update_time = ktime_to_us(ts->idle_lastupdate); | 194 | if (!tick_nohz_enabled) |
| 195 | return -1; | ||
| 196 | |||
| 197 | if (ts->idle_active) | ||
| 198 | *last_update_time = ktime_to_us(ts->idle_lastupdate); | ||
| 199 | else | ||
| 200 | *last_update_time = ktime_to_us(ktime_get()); | ||
| 201 | |||
| 194 | return ktime_to_us(ts->idle_sleeptime); | 202 | return ktime_to_us(ts->idle_sleeptime); |
| 195 | } | 203 | } |
| 204 | EXPORT_SYMBOL_GPL(get_cpu_idle_time_us); | ||
| 196 | 205 | ||
| 197 | /** | 206 | /** |
| 198 | * tick_nohz_stop_sched_tick - stop the idle tick from the idle task | 207 | * tick_nohz_stop_sched_tick - stop the idle tick from the idle task |
| @@ -261,7 +270,7 @@ void tick_nohz_stop_sched_tick(int inidle) | |||
| 261 | next_jiffies = get_next_timer_interrupt(last_jiffies); | 270 | next_jiffies = get_next_timer_interrupt(last_jiffies); |
| 262 | delta_jiffies = next_jiffies - last_jiffies; | 271 | delta_jiffies = next_jiffies - last_jiffies; |
| 263 | 272 | ||
| 264 | if (rcu_needs_cpu(cpu)) | 273 | if (rcu_needs_cpu(cpu) || printk_needs_cpu(cpu)) |
| 265 | delta_jiffies = 1; | 274 | delta_jiffies = 1; |
| 266 | /* | 275 | /* |
| 267 | * Do not stop the tick, if we are only one off | 276 | * Do not stop the tick, if we are only one off |
| @@ -291,7 +300,7 @@ void tick_nohz_stop_sched_tick(int inidle) | |||
| 291 | goto out; | 300 | goto out; |
| 292 | } | 301 | } |
| 293 | 302 | ||
| 294 | ts->idle_tick = ts->sched_timer.expires; | 303 | ts->idle_tick = hrtimer_get_expires(&ts->sched_timer); |
| 295 | ts->tick_stopped = 1; | 304 | ts->tick_stopped = 1; |
| 296 | ts->idle_jiffies = last_jiffies; | 305 | ts->idle_jiffies = last_jiffies; |
| 297 | rcu_enter_nohz(); | 306 | rcu_enter_nohz(); |
| @@ -368,6 +377,32 @@ ktime_t tick_nohz_get_sleep_length(void) | |||
| 368 | return ts->sleep_length; | 377 | return ts->sleep_length; |
| 369 | } | 378 | } |
| 370 | 379 | ||
| 380 | static void tick_nohz_restart(struct tick_sched *ts, ktime_t now) | ||
| 381 | { | ||
| 382 | hrtimer_cancel(&ts->sched_timer); | ||
| 383 | hrtimer_set_expires(&ts->sched_timer, ts->idle_tick); | ||
| 384 | |||
| 385 | while (1) { | ||
| 386 | /* Forward the time to expire in the future */ | ||
| 387 | hrtimer_forward(&ts->sched_timer, now, tick_period); | ||
| 388 | |||
| 389 | if (ts->nohz_mode == NOHZ_MODE_HIGHRES) { | ||
| 390 | hrtimer_start_expires(&ts->sched_timer, | ||
| 391 | HRTIMER_MODE_ABS); | ||
| 392 | /* Check, if the timer was already in the past */ | ||
| 393 | if (hrtimer_active(&ts->sched_timer)) | ||
| 394 | break; | ||
| 395 | } else { | ||
| 396 | if (!tick_program_event( | ||
| 397 | hrtimer_get_expires(&ts->sched_timer), 0)) | ||
| 398 | break; | ||
| 399 | } | ||
| 400 | /* Update jiffies and reread time */ | ||
| 401 | tick_do_update_jiffies64(now); | ||
| 402 | now = ktime_get(); | ||
| 403 | } | ||
| 404 | } | ||
| 405 | |||
| 371 | /** | 406 | /** |
| 372 | * tick_nohz_restart_sched_tick - restart the idle tick from the idle task | 407 | * tick_nohz_restart_sched_tick - restart the idle tick from the idle task |
| 373 | * | 408 | * |
| @@ -421,35 +456,16 @@ void tick_nohz_restart_sched_tick(void) | |||
| 421 | */ | 456 | */ |
| 422 | ts->tick_stopped = 0; | 457 | ts->tick_stopped = 0; |
| 423 | ts->idle_exittime = now; | 458 | ts->idle_exittime = now; |
| 424 | hrtimer_cancel(&ts->sched_timer); | ||
| 425 | ts->sched_timer.expires = ts->idle_tick; | ||
| 426 | 459 | ||
| 427 | while (1) { | 460 | tick_nohz_restart(ts, now); |
| 428 | /* Forward the time to expire in the future */ | ||
| 429 | hrtimer_forward(&ts->sched_timer, now, tick_period); | ||
| 430 | 461 | ||
| 431 | if (ts->nohz_mode == NOHZ_MODE_HIGHRES) { | ||
| 432 | hrtimer_start(&ts->sched_timer, | ||
| 433 | ts->sched_timer.expires, | ||
| 434 | HRTIMER_MODE_ABS); | ||
| 435 | /* Check, if the timer was already in the past */ | ||
| 436 | if (hrtimer_active(&ts->sched_timer)) | ||
| 437 | break; | ||
| 438 | } else { | ||
| 439 | if (!tick_program_event(ts->sched_timer.expires, 0)) | ||
| 440 | break; | ||
| 441 | } | ||
| 442 | /* Update jiffies and reread time */ | ||
| 443 | tick_do_update_jiffies64(now); | ||
| 444 | now = ktime_get(); | ||
| 445 | } | ||
| 446 | local_irq_enable(); | 462 | local_irq_enable(); |
| 447 | } | 463 | } |
| 448 | 464 | ||
| 449 | static int tick_nohz_reprogram(struct tick_sched *ts, ktime_t now) | 465 | static int tick_nohz_reprogram(struct tick_sched *ts, ktime_t now) |
| 450 | { | 466 | { |
| 451 | hrtimer_forward(&ts->sched_timer, now, tick_period); | 467 | hrtimer_forward(&ts->sched_timer, now, tick_period); |
| 452 | return tick_program_event(ts->sched_timer.expires, 0); | 468 | return tick_program_event(hrtimer_get_expires(&ts->sched_timer), 0); |
| 453 | } | 469 | } |
| 454 | 470 | ||
| 455 | /* | 471 | /* |
| @@ -494,10 +510,6 @@ static void tick_nohz_handler(struct clock_event_device *dev) | |||
| 494 | update_process_times(user_mode(regs)); | 510 | update_process_times(user_mode(regs)); |
| 495 | profile_tick(CPU_PROFILING); | 511 | profile_tick(CPU_PROFILING); |
| 496 | 512 | ||
| 497 | /* Do not restart, when we are in the idle loop */ | ||
| 498 | if (ts->tick_stopped) | ||
| 499 | return; | ||
| 500 | |||
| 501 | while (tick_nohz_reprogram(ts, now)) { | 513 | while (tick_nohz_reprogram(ts, now)) { |
| 502 | now = ktime_get(); | 514 | now = ktime_get(); |
| 503 | tick_do_update_jiffies64(now); | 515 | tick_do_update_jiffies64(now); |
| @@ -532,7 +544,7 @@ static void tick_nohz_switch_to_nohz(void) | |||
| 532 | next = tick_init_jiffy_update(); | 544 | next = tick_init_jiffy_update(); |
| 533 | 545 | ||
| 534 | for (;;) { | 546 | for (;;) { |
| 535 | ts->sched_timer.expires = next; | 547 | hrtimer_set_expires(&ts->sched_timer, next); |
| 536 | if (!tick_program_event(next, 0)) | 548 | if (!tick_program_event(next, 0)) |
| 537 | break; | 549 | break; |
| 538 | next = ktime_add(next, tick_period); | 550 | next = ktime_add(next, tick_period); |
| @@ -543,6 +555,41 @@ static void tick_nohz_switch_to_nohz(void) | |||
| 543 | smp_processor_id()); | 555 | smp_processor_id()); |
| 544 | } | 556 | } |
| 545 | 557 | ||
| 558 | /* | ||
| 559 | * When NOHZ is enabled and the tick is stopped, we need to kick the | ||
| 560 | * tick timer from irq_enter() so that the jiffies update is kept | ||
| 561 | * alive during long running softirqs. That's ugly as hell, but | ||
| 562 | * correctness is key even if we need to fix the offending softirq in | ||
| 563 | * the first place. | ||
| 564 | * | ||
| 565 | * Note, this is different to tick_nohz_restart. We just kick the | ||
| 566 | * timer and do not touch the other magic bits which need to be done | ||
| 567 | * when idle is left. | ||
| 568 | */ | ||
| 569 | static void tick_nohz_kick_tick(int cpu) | ||
| 570 | { | ||
| 571 | #if 0 | ||
| 572 | /* Switch back to 2.6.27 behaviour */ | ||
| 573 | |||
| 574 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | ||
| 575 | ktime_t delta, now; | ||
| 576 | |||
| 577 | if (!ts->tick_stopped) | ||
| 578 | return; | ||
| 579 | |||
| 580 | /* | ||
| 581 | * Do not touch the tick device, when the next expiry is either | ||
| 582 | * already reached or less/equal than the tick period. | ||
| 583 | */ | ||
| 584 | now = ktime_get(); | ||
| 585 | delta = ktime_sub(hrtimer_get_expires(&ts->sched_timer), now); | ||
| 586 | if (delta.tv64 <= tick_period.tv64) | ||
| 587 | return; | ||
| 588 | |||
| 589 | tick_nohz_restart(ts, now); | ||
| 590 | #endif | ||
| 591 | } | ||
| 592 | |||
| 546 | #else | 593 | #else |
| 547 | 594 | ||
| 548 | static inline void tick_nohz_switch_to_nohz(void) { } | 595 | static inline void tick_nohz_switch_to_nohz(void) { } |
| @@ -550,6 +597,19 @@ static inline void tick_nohz_switch_to_nohz(void) { } | |||
| 550 | #endif /* NO_HZ */ | 597 | #endif /* NO_HZ */ |
| 551 | 598 | ||
| 552 | /* | 599 | /* |
| 600 | * Called from irq_enter to notify about the possible interruption of idle() | ||
| 601 | */ | ||
| 602 | void tick_check_idle(int cpu) | ||
| 603 | { | ||
| 604 | tick_check_oneshot_broadcast(cpu); | ||
| 605 | #ifdef CONFIG_NO_HZ | ||
| 606 | tick_nohz_stop_idle(cpu); | ||
| 607 | tick_nohz_update_jiffies(); | ||
| 608 | tick_nohz_kick_tick(cpu); | ||
| 609 | #endif | ||
| 610 | } | ||
| 611 | |||
| 612 | /* | ||
| 553 | * High resolution timer specific code | 613 | * High resolution timer specific code |
| 554 | */ | 614 | */ |
| 555 | #ifdef CONFIG_HIGH_RES_TIMERS | 615 | #ifdef CONFIG_HIGH_RES_TIMERS |
| @@ -602,10 +662,6 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer) | |||
| 602 | profile_tick(CPU_PROFILING); | 662 | profile_tick(CPU_PROFILING); |
| 603 | } | 663 | } |
| 604 | 664 | ||
| 605 | /* Do not restart, when we are in the idle loop */ | ||
| 606 | if (ts->tick_stopped) | ||
| 607 | return HRTIMER_NORESTART; | ||
| 608 | |||
| 609 | hrtimer_forward(timer, now, tick_period); | 665 | hrtimer_forward(timer, now, tick_period); |
| 610 | 666 | ||
| 611 | return HRTIMER_RESTART; | 667 | return HRTIMER_RESTART; |
| @@ -628,16 +684,15 @@ void tick_setup_sched_timer(void) | |||
| 628 | ts->sched_timer.cb_mode = HRTIMER_CB_IRQSAFE_PERCPU; | 684 | ts->sched_timer.cb_mode = HRTIMER_CB_IRQSAFE_PERCPU; |
| 629 | 685 | ||
| 630 | /* Get the next period (per cpu) */ | 686 | /* Get the next period (per cpu) */ |
| 631 | ts->sched_timer.expires = tick_init_jiffy_update(); | 687 | hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update()); |
| 632 | offset = ktime_to_ns(tick_period) >> 1; | 688 | offset = ktime_to_ns(tick_period) >> 1; |
| 633 | do_div(offset, num_possible_cpus()); | 689 | do_div(offset, num_possible_cpus()); |
| 634 | offset *= smp_processor_id(); | 690 | offset *= smp_processor_id(); |
| 635 | ts->sched_timer.expires = ktime_add_ns(ts->sched_timer.expires, offset); | 691 | hrtimer_add_expires_ns(&ts->sched_timer, offset); |
| 636 | 692 | ||
| 637 | for (;;) { | 693 | for (;;) { |
| 638 | hrtimer_forward(&ts->sched_timer, now, tick_period); | 694 | hrtimer_forward(&ts->sched_timer, now, tick_period); |
| 639 | hrtimer_start(&ts->sched_timer, ts->sched_timer.expires, | 695 | hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS); |
| 640 | HRTIMER_MODE_ABS); | ||
| 641 | /* Check, if the timer was already in the past */ | 696 | /* Check, if the timer was already in the past */ |
| 642 | if (hrtimer_active(&ts->sched_timer)) | 697 | if (hrtimer_active(&ts->sched_timer)) |
| 643 | break; | 698 | break; |
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index e91c29f961c..e7acfb482a6 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c | |||
| @@ -58,27 +58,26 @@ struct clocksource *clock; | |||
| 58 | 58 | ||
| 59 | #ifdef CONFIG_GENERIC_TIME | 59 | #ifdef CONFIG_GENERIC_TIME |
| 60 | /** | 60 | /** |
| 61 | * __get_nsec_offset - Returns nanoseconds since last call to periodic_hook | 61 | * clocksource_forward_now - update clock to the current time |
| 62 | * | 62 | * |
| 63 | * private function, must hold xtime_lock lock when being | 63 | * Forward the current clock to update its state since the last call to |
| 64 | * called. Returns the number of nanoseconds since the | 64 | * update_wall_time(). This is useful before significant clock changes, |
| 65 | * last call to update_wall_time() (adjusted by NTP scaling) | 65 | * as it avoids having to deal with this time offset explicitly. |
| 66 | */ | 66 | */ |
| 67 | static inline s64 __get_nsec_offset(void) | 67 | static void clocksource_forward_now(void) |
| 68 | { | 68 | { |
| 69 | cycle_t cycle_now, cycle_delta; | 69 | cycle_t cycle_now, cycle_delta; |
| 70 | s64 ns_offset; | 70 | s64 nsec; |
| 71 | 71 | ||
| 72 | /* read clocksource: */ | ||
| 73 | cycle_now = clocksource_read(clock); | 72 | cycle_now = clocksource_read(clock); |
| 74 | |||
| 75 | /* calculate the delta since the last update_wall_time: */ | ||
| 76 | cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; | 73 | cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; |
| 74 | clock->cycle_last = cycle_now; | ||
| 77 | 75 | ||
| 78 | /* convert to nanoseconds: */ | 76 | nsec = cyc2ns(clock, cycle_delta); |
| 79 | ns_offset = cyc2ns(clock, cycle_delta); | 77 | timespec_add_ns(&xtime, nsec); |
| 80 | 78 | ||
| 81 | return ns_offset; | 79 | nsec = ((s64)cycle_delta * clock->mult_orig) >> clock->shift; |
| 80 | clock->raw_time.tv_nsec += nsec; | ||
| 82 | } | 81 | } |
| 83 | 82 | ||
| 84 | /** | 83 | /** |
| @@ -89,6 +88,7 @@ static inline s64 __get_nsec_offset(void) | |||
| 89 | */ | 88 | */ |
| 90 | void getnstimeofday(struct timespec *ts) | 89 | void getnstimeofday(struct timespec *ts) |
| 91 | { | 90 | { |
| 91 | cycle_t cycle_now, cycle_delta; | ||
| 92 | unsigned long seq; | 92 | unsigned long seq; |
| 93 | s64 nsecs; | 93 | s64 nsecs; |
| 94 | 94 | ||
| @@ -96,7 +96,15 @@ void getnstimeofday(struct timespec *ts) | |||
| 96 | seq = read_seqbegin(&xtime_lock); | 96 | seq = read_seqbegin(&xtime_lock); |
| 97 | 97 | ||
| 98 | *ts = xtime; | 98 | *ts = xtime; |
| 99 | nsecs = __get_nsec_offset(); | 99 | |
| 100 | /* read clocksource: */ | ||
| 101 | cycle_now = clocksource_read(clock); | ||
| 102 | |||
| 103 | /* calculate the delta since the last update_wall_time: */ | ||
| 104 | cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; | ||
| 105 | |||
| 106 | /* convert to nanoseconds: */ | ||
| 107 | nsecs = cyc2ns(clock, cycle_delta); | ||
| 100 | 108 | ||
| 101 | } while (read_seqretry(&xtime_lock, seq)); | 109 | } while (read_seqretry(&xtime_lock, seq)); |
| 102 | 110 | ||
| @@ -129,22 +137,22 @@ EXPORT_SYMBOL(do_gettimeofday); | |||
| 129 | */ | 137 | */ |
| 130 | int do_settimeofday(struct timespec *tv) | 138 | int do_settimeofday(struct timespec *tv) |
| 131 | { | 139 | { |
| 140 | struct timespec ts_delta; | ||
| 132 | unsigned long flags; | 141 | unsigned long flags; |
| 133 | time_t wtm_sec, sec = tv->tv_sec; | ||
| 134 | long wtm_nsec, nsec = tv->tv_nsec; | ||
| 135 | 142 | ||
| 136 | if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) | 143 | if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) |
| 137 | return -EINVAL; | 144 | return -EINVAL; |
| 138 | 145 | ||
| 139 | write_seqlock_irqsave(&xtime_lock, flags); | 146 | write_seqlock_irqsave(&xtime_lock, flags); |
| 140 | 147 | ||
| 141 | nsec -= __get_nsec_offset(); | 148 | clocksource_forward_now(); |
| 149 | |||
| 150 | ts_delta.tv_sec = tv->tv_sec - xtime.tv_sec; | ||
| 151 | ts_delta.tv_nsec = tv->tv_nsec - xtime.tv_nsec; | ||
| 152 | wall_to_monotonic = timespec_sub(wall_to_monotonic, ts_delta); | ||
| 142 | 153 | ||
| 143 | wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec); | 154 | xtime = *tv; |
| 144 | wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec); | ||
| 145 | 155 | ||
| 146 | set_normalized_timespec(&xtime, sec, nsec); | ||
| 147 | set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); | ||
| 148 | update_xtime_cache(0); | 156 | update_xtime_cache(0); |
| 149 | 157 | ||
| 150 | clock->error = 0; | 158 | clock->error = 0; |
| @@ -170,22 +178,19 @@ EXPORT_SYMBOL(do_settimeofday); | |||
| 170 | static void change_clocksource(void) | 178 | static void change_clocksource(void) |
| 171 | { | 179 | { |
| 172 | struct clocksource *new; | 180 | struct clocksource *new; |
| 173 | cycle_t now; | ||
| 174 | u64 nsec; | ||
| 175 | 181 | ||
| 176 | new = clocksource_get_next(); | 182 | new = clocksource_get_next(); |
| 177 | 183 | ||
| 178 | if (clock == new) | 184 | if (clock == new) |
| 179 | return; | 185 | return; |
| 180 | 186 | ||
| 181 | new->cycle_last = 0; | 187 | clocksource_forward_now(); |
| 182 | now = clocksource_read(new); | ||
| 183 | nsec = __get_nsec_offset(); | ||
| 184 | timespec_add_ns(&xtime, nsec); | ||
| 185 | 188 | ||
| 186 | clock = new; | 189 | new->raw_time = clock->raw_time; |
| 187 | clock->cycle_last = now; | ||
| 188 | 190 | ||
| 191 | clock = new; | ||
| 192 | clock->cycle_last = 0; | ||
| 193 | clock->cycle_last = clocksource_read(new); | ||
| 189 | clock->error = 0; | 194 | clock->error = 0; |
| 190 | clock->xtime_nsec = 0; | 195 | clock->xtime_nsec = 0; |
| 191 | clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH); | 196 | clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH); |
| @@ -200,11 +205,44 @@ static void change_clocksource(void) | |||
| 200 | */ | 205 | */ |
| 201 | } | 206 | } |
| 202 | #else | 207 | #else |
| 208 | static inline void clocksource_forward_now(void) { } | ||
| 203 | static inline void change_clocksource(void) { } | 209 | static inline void change_clocksource(void) { } |
| 204 | static inline s64 __get_nsec_offset(void) { return 0; } | ||
| 205 | #endif | 210 | #endif |
| 206 | 211 | ||
| 207 | /** | 212 | /** |
| 213 | * getrawmonotonic - Returns the raw monotonic time in a timespec | ||
| 214 | * @ts: pointer to the timespec to be set | ||
| 215 | * | ||
| 216 | * Returns the raw monotonic time (completely un-modified by ntp) | ||
| 217 | */ | ||
| 218 | void getrawmonotonic(struct timespec *ts) | ||
| 219 | { | ||
| 220 | unsigned long seq; | ||
| 221 | s64 nsecs; | ||
| 222 | cycle_t cycle_now, cycle_delta; | ||
| 223 | |||
| 224 | do { | ||
| 225 | seq = read_seqbegin(&xtime_lock); | ||
| 226 | |||
| 227 | /* read clocksource: */ | ||
| 228 | cycle_now = clocksource_read(clock); | ||
| 229 | |||
| 230 | /* calculate the delta since the last update_wall_time: */ | ||
| 231 | cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; | ||
| 232 | |||
| 233 | /* convert to nanoseconds: */ | ||
| 234 | nsecs = ((s64)cycle_delta * clock->mult_orig) >> clock->shift; | ||
| 235 | |||
| 236 | *ts = clock->raw_time; | ||
| 237 | |||
| 238 | } while (read_seqretry(&xtime_lock, seq)); | ||
| 239 | |||
| 240 | timespec_add_ns(ts, nsecs); | ||
| 241 | } | ||
| 242 | EXPORT_SYMBOL(getrawmonotonic); | ||
| 243 | |||
| 244 | |||
| 245 | /** | ||
| 208 | * timekeeping_valid_for_hres - Check if timekeeping is suitable for hres | 246 | * timekeeping_valid_for_hres - Check if timekeeping is suitable for hres |
| 209 | */ | 247 | */ |
| 210 | int timekeeping_valid_for_hres(void) | 248 | int timekeeping_valid_for_hres(void) |
| @@ -265,8 +303,6 @@ void __init timekeeping_init(void) | |||
| 265 | static int timekeeping_suspended; | 303 | static int timekeeping_suspended; |
| 266 | /* time in seconds when suspend began */ | 304 | /* time in seconds when suspend began */ |
| 267 | static unsigned long timekeeping_suspend_time; | 305 | static unsigned long timekeeping_suspend_time; |
| 268 | /* xtime offset when we went into suspend */ | ||
| 269 | static s64 timekeeping_suspend_nsecs; | ||
| 270 | 306 | ||
| 271 | /** | 307 | /** |
| 272 | * timekeeping_resume - Resumes the generic timekeeping subsystem. | 308 | * timekeeping_resume - Resumes the generic timekeeping subsystem. |
| @@ -292,8 +328,6 @@ static int timekeeping_resume(struct sys_device *dev) | |||
| 292 | wall_to_monotonic.tv_sec -= sleep_length; | 328 | wall_to_monotonic.tv_sec -= sleep_length; |
| 293 | total_sleep_time += sleep_length; | 329 | total_sleep_time += sleep_length; |
| 294 | } | 330 | } |
| 295 | /* Make sure that we have the correct xtime reference */ | ||
| 296 | timespec_add_ns(&xtime, timekeeping_suspend_nsecs); | ||
| 297 | update_xtime_cache(0); | 331 | update_xtime_cache(0); |
| 298 | /* re-base the last cycle value */ | 332 | /* re-base the last cycle value */ |
| 299 | clock->cycle_last = 0; | 333 | clock->cycle_last = 0; |
| @@ -319,8 +353,7 @@ static int timekeeping_suspend(struct sys_device *dev, pm_message_t state) | |||
| 319 | timekeeping_suspend_time = read_persistent_clock(); | 353 | timekeeping_suspend_time = read_persistent_clock(); |
| 320 | 354 | ||
| 321 | write_seqlock_irqsave(&xtime_lock, flags); | 355 | write_seqlock_irqsave(&xtime_lock, flags); |
| 322 | /* Get the current xtime offset */ | 356 | clocksource_forward_now(); |
| 323 | timekeeping_suspend_nsecs = __get_nsec_offset(); | ||
| 324 | timekeeping_suspended = 1; | 357 | timekeeping_suspended = 1; |
| 325 | write_sequnlock_irqrestore(&xtime_lock, flags); | 358 | write_sequnlock_irqrestore(&xtime_lock, flags); |
| 326 | 359 | ||
| @@ -454,23 +487,29 @@ void update_wall_time(void) | |||
| 454 | #else | 487 | #else |
| 455 | offset = clock->cycle_interval; | 488 | offset = clock->cycle_interval; |
| 456 | #endif | 489 | #endif |
| 457 | clock->xtime_nsec += (s64)xtime.tv_nsec << clock->shift; | 490 | clock->xtime_nsec = (s64)xtime.tv_nsec << clock->shift; |
| 458 | 491 | ||
| 459 | /* normally this loop will run just once, however in the | 492 | /* normally this loop will run just once, however in the |
| 460 | * case of lost or late ticks, it will accumulate correctly. | 493 | * case of lost or late ticks, it will accumulate correctly. |
| 461 | */ | 494 | */ |
| 462 | while (offset >= clock->cycle_interval) { | 495 | while (offset >= clock->cycle_interval) { |
| 463 | /* accumulate one interval */ | 496 | /* accumulate one interval */ |
| 464 | clock->xtime_nsec += clock->xtime_interval; | ||
| 465 | clock->cycle_last += clock->cycle_interval; | ||
| 466 | offset -= clock->cycle_interval; | 497 | offset -= clock->cycle_interval; |
| 498 | clock->cycle_last += clock->cycle_interval; | ||
| 467 | 499 | ||
| 500 | clock->xtime_nsec += clock->xtime_interval; | ||
| 468 | if (clock->xtime_nsec >= (u64)NSEC_PER_SEC << clock->shift) { | 501 | if (clock->xtime_nsec >= (u64)NSEC_PER_SEC << clock->shift) { |
| 469 | clock->xtime_nsec -= (u64)NSEC_PER_SEC << clock->shift; | 502 | clock->xtime_nsec -= (u64)NSEC_PER_SEC << clock->shift; |
| 470 | xtime.tv_sec++; | 503 | xtime.tv_sec++; |
| 471 | second_overflow(); | 504 | second_overflow(); |
| 472 | } | 505 | } |
| 473 | 506 | ||
| 507 | clock->raw_time.tv_nsec += clock->raw_interval; | ||
| 508 | if (clock->raw_time.tv_nsec >= NSEC_PER_SEC) { | ||
| 509 | clock->raw_time.tv_nsec -= NSEC_PER_SEC; | ||
| 510 | clock->raw_time.tv_sec++; | ||
| 511 | } | ||
| 512 | |||
| 474 | /* accumulate error between NTP and clock interval */ | 513 | /* accumulate error between NTP and clock interval */ |
| 475 | clock->error += tick_length; | 514 | clock->error += tick_length; |
| 476 | clock->error -= clock->xtime_interval << (NTP_SCALE_SHIFT - clock->shift); | 515 | clock->error -= clock->xtime_interval << (NTP_SCALE_SHIFT - clock->shift); |
| @@ -479,9 +518,12 @@ void update_wall_time(void) | |||
| 479 | /* correct the clock when NTP error is too big */ | 518 | /* correct the clock when NTP error is too big */ |
| 480 | clocksource_adjust(offset); | 519 | clocksource_adjust(offset); |
| 481 | 520 | ||
| 482 | /* store full nanoseconds into xtime */ | 521 | /* store full nanoseconds into xtime after rounding it up and |
| 483 | xtime.tv_nsec = (s64)clock->xtime_nsec >> clock->shift; | 522 | * add the remainder to the error difference. |
| 523 | */ | ||
| 524 | xtime.tv_nsec = ((s64)clock->xtime_nsec >> clock->shift) + 1; | ||
| 484 | clock->xtime_nsec -= (s64)xtime.tv_nsec << clock->shift; | 525 | clock->xtime_nsec -= (s64)xtime.tv_nsec << clock->shift; |
| 526 | clock->error += clock->xtime_nsec << (NTP_SCALE_SHIFT - clock->shift); | ||
| 485 | 527 | ||
| 486 | update_xtime_cache(cyc2ns(clock, offset)); | 528 | update_xtime_cache(cyc2ns(clock, offset)); |
| 487 | 529 | ||
diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c index a40e20fd000..a999b92a127 100644 --- a/kernel/time/timer_list.c +++ b/kernel/time/timer_list.c | |||
| @@ -47,13 +47,14 @@ static void print_name_offset(struct seq_file *m, void *sym) | |||
| 47 | } | 47 | } |
| 48 | 48 | ||
| 49 | static void | 49 | static void |
| 50 | print_timer(struct seq_file *m, struct hrtimer *timer, int idx, u64 now) | 50 | print_timer(struct seq_file *m, struct hrtimer *taddr, struct hrtimer *timer, |
| 51 | int idx, u64 now) | ||
| 51 | { | 52 | { |
| 52 | #ifdef CONFIG_TIMER_STATS | 53 | #ifdef CONFIG_TIMER_STATS |
| 53 | char tmp[TASK_COMM_LEN + 1]; | 54 | char tmp[TASK_COMM_LEN + 1]; |
| 54 | #endif | 55 | #endif |
| 55 | SEQ_printf(m, " #%d: ", idx); | 56 | SEQ_printf(m, " #%d: ", idx); |
| 56 | print_name_offset(m, timer); | 57 | print_name_offset(m, taddr); |
| 57 | SEQ_printf(m, ", "); | 58 | SEQ_printf(m, ", "); |
| 58 | print_name_offset(m, timer->function); | 59 | print_name_offset(m, timer->function); |
| 59 | SEQ_printf(m, ", S:%02lx", timer->state); | 60 | SEQ_printf(m, ", S:%02lx", timer->state); |
| @@ -65,9 +66,11 @@ print_timer(struct seq_file *m, struct hrtimer *timer, int idx, u64 now) | |||
| 65 | SEQ_printf(m, ", %s/%d", tmp, timer->start_pid); | 66 | SEQ_printf(m, ", %s/%d", tmp, timer->start_pid); |
| 66 | #endif | 67 | #endif |
| 67 | SEQ_printf(m, "\n"); | 68 | SEQ_printf(m, "\n"); |
| 68 | SEQ_printf(m, " # expires at %Lu nsecs [in %Ld nsecs]\n", | 69 | SEQ_printf(m, " # expires at %Lu-%Lu nsecs [in %Ld to %Ld nsecs]\n", |
| 69 | (unsigned long long)ktime_to_ns(timer->expires), | 70 | (unsigned long long)ktime_to_ns(hrtimer_get_softexpires(timer)), |
| 70 | (long long)(ktime_to_ns(timer->expires) - now)); | 71 | (unsigned long long)ktime_to_ns(hrtimer_get_expires(timer)), |
| 72 | (long long)(ktime_to_ns(hrtimer_get_softexpires(timer)) - now), | ||
| 73 | (long long)(ktime_to_ns(hrtimer_get_expires(timer)) - now)); | ||
| 71 | } | 74 | } |
| 72 | 75 | ||
| 73 | static void | 76 | static void |
| @@ -99,7 +102,7 @@ next_one: | |||
| 99 | tmp = *timer; | 102 | tmp = *timer; |
| 100 | spin_unlock_irqrestore(&base->cpu_base->lock, flags); | 103 | spin_unlock_irqrestore(&base->cpu_base->lock, flags); |
| 101 | 104 | ||
| 102 | print_timer(m, &tmp, i, now); | 105 | print_timer(m, timer, &tmp, i, now); |
| 103 | next++; | 106 | next++; |
| 104 | goto next_one; | 107 | goto next_one; |
| 105 | } | 108 | } |
| @@ -109,6 +112,7 @@ next_one: | |||
| 109 | static void | 112 | static void |
| 110 | print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now) | 113 | print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now) |
| 111 | { | 114 | { |
| 115 | SEQ_printf(m, " .base: %p\n", base); | ||
| 112 | SEQ_printf(m, " .index: %d\n", | 116 | SEQ_printf(m, " .index: %d\n", |
| 113 | base->index); | 117 | base->index); |
| 114 | SEQ_printf(m, " .resolution: %Lu nsecs\n", | 118 | SEQ_printf(m, " .resolution: %Lu nsecs\n", |
| @@ -183,12 +187,16 @@ static void print_cpu(struct seq_file *m, int cpu, u64 now) | |||
| 183 | 187 | ||
| 184 | #ifdef CONFIG_GENERIC_CLOCKEVENTS | 188 | #ifdef CONFIG_GENERIC_CLOCKEVENTS |
| 185 | static void | 189 | static void |
| 186 | print_tickdevice(struct seq_file *m, struct tick_device *td) | 190 | print_tickdevice(struct seq_file *m, struct tick_device *td, int cpu) |
| 187 | { | 191 | { |
| 188 | struct clock_event_device *dev = td->evtdev; | 192 | struct clock_event_device *dev = td->evtdev; |
| 189 | 193 | ||
| 190 | SEQ_printf(m, "\n"); | 194 | SEQ_printf(m, "\n"); |
| 191 | SEQ_printf(m, "Tick Device: mode: %d\n", td->mode); | 195 | SEQ_printf(m, "Tick Device: mode: %d\n", td->mode); |
| 196 | if (cpu < 0) | ||
| 197 | SEQ_printf(m, "Broadcast device\n"); | ||
| 198 | else | ||
| 199 | SEQ_printf(m, "Per CPU device: %d\n", cpu); | ||
| 192 | 200 | ||
| 193 | SEQ_printf(m, "Clock Event Device: "); | 201 | SEQ_printf(m, "Clock Event Device: "); |
| 194 | if (!dev) { | 202 | if (!dev) { |
| @@ -222,7 +230,7 @@ static void timer_list_show_tickdevices(struct seq_file *m) | |||
| 222 | int cpu; | 230 | int cpu; |
| 223 | 231 | ||
| 224 | #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST | 232 | #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST |
| 225 | print_tickdevice(m, tick_get_broadcast_device()); | 233 | print_tickdevice(m, tick_get_broadcast_device(), -1); |
| 226 | SEQ_printf(m, "tick_broadcast_mask: %08lx\n", | 234 | SEQ_printf(m, "tick_broadcast_mask: %08lx\n", |
| 227 | tick_get_broadcast_mask()->bits[0]); | 235 | tick_get_broadcast_mask()->bits[0]); |
| 228 | #ifdef CONFIG_TICK_ONESHOT | 236 | #ifdef CONFIG_TICK_ONESHOT |
| @@ -232,7 +240,7 @@ static void timer_list_show_tickdevices(struct seq_file *m) | |||
| 232 | SEQ_printf(m, "\n"); | 240 | SEQ_printf(m, "\n"); |
| 233 | #endif | 241 | #endif |
| 234 | for_each_online_cpu(cpu) | 242 | for_each_online_cpu(cpu) |
| 235 | print_tickdevice(m, tick_get_device(cpu)); | 243 | print_tickdevice(m, tick_get_device(cpu), cpu); |
| 236 | SEQ_printf(m, "\n"); | 244 | SEQ_printf(m, "\n"); |
| 237 | } | 245 | } |
| 238 | #else | 246 | #else |
| @@ -244,7 +252,7 @@ static int timer_list_show(struct seq_file *m, void *v) | |||
| 244 | u64 now = ktime_to_ns(ktime_get()); | 252 | u64 now = ktime_to_ns(ktime_get()); |
| 245 | int cpu; | 253 | int cpu; |
| 246 | 254 | ||
| 247 | SEQ_printf(m, "Timer List Version: v0.3\n"); | 255 | SEQ_printf(m, "Timer List Version: v0.4\n"); |
| 248 | SEQ_printf(m, "HRTIMER_MAX_CLOCK_BASES: %d\n", HRTIMER_MAX_CLOCK_BASES); | 256 | SEQ_printf(m, "HRTIMER_MAX_CLOCK_BASES: %d\n", HRTIMER_MAX_CLOCK_BASES); |
| 249 | SEQ_printf(m, "now at %Ld nsecs\n", (unsigned long long)now); | 257 | SEQ_printf(m, "now at %Ld nsecs\n", (unsigned long long)now); |
| 250 | 258 | ||
