aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/time
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-10-26 11:15:03 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-10-26 11:15:03 -0400
commit39adff5f69d6849ca22353a88058c9f8630528c0 (patch)
treeb0c2d2de77ebc5c97fd19c29b81eeb03549553f8 /kernel/time
parent8a4a8918ed6e4a361f4df19f199bbc2d0a89a46c (diff)
parente35f95b36e43f67a6f806172555a152c11ea0a78 (diff)
Merge branch 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
* 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (23 commits) time, s390: Get rid of compile warning dw_apb_timer: constify clocksource name time: Cleanup old CONFIG_GENERIC_TIME references that snuck in time: Change jiffies_to_clock_t() argument type to unsigned long alarmtimers: Fix error handling clocksource: Make watchdog reset lockless posix-cpu-timers: Cure SMP accounting oddities s390: Use direct ktime path for s390 clockevent device clockevents: Add direct ktime programming function clockevents: Make minimum delay adjustments configurable nohz: Remove "Switched to NOHz mode" debugging messages proc: Consider NO_HZ when printing idle and iowait times nohz: Make idle/iowait counter update conditional nohz: Fix update_ts_time_stat idle accounting cputime: Clean up cputime_to_usecs and usecs_to_cputime macros alarmtimers: Rework RTC device selection using class interface alarmtimers: Add try_to_cancel functionality alarmtimers: Add more refined alarm state tracking alarmtimers: Remove period from alarm structure alarmtimers: Remove interval cap limit hack ...
Diffstat (limited to 'kernel/time')
-rw-r--r--kernel/time/Kconfig2
-rw-r--r--kernel/time/alarmtimer.c266
-rw-r--r--kernel/time/clockevents.c129
-rw-r--r--kernel/time/clocksource.c38
-rw-r--r--kernel/time/tick-broadcast.c4
-rw-r--r--kernel/time/tick-common.c4
-rw-r--r--kernel/time/tick-internal.h2
-rw-r--r--kernel/time/tick-oneshot.c77
-rw-r--r--kernel/time/tick-sched.c55
9 files changed, 364 insertions, 213 deletions
diff --git a/kernel/time/Kconfig b/kernel/time/Kconfig
index f06a8a365648..b26c2228fe92 100644
--- a/kernel/time/Kconfig
+++ b/kernel/time/Kconfig
@@ -27,3 +27,5 @@ config GENERIC_CLOCKEVENTS_BUILD
27 default y 27 default y
28 depends on GENERIC_CLOCKEVENTS || GENERIC_CLOCKEVENTS_MIGR 28 depends on GENERIC_CLOCKEVENTS || GENERIC_CLOCKEVENTS_MIGR
29 29
30config GENERIC_CLOCKEVENTS_MIN_ADJUST
31 bool
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index ea5e1a928d5b..c436e790b21b 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -53,27 +53,6 @@ static struct rtc_device *rtcdev;
53static DEFINE_SPINLOCK(rtcdev_lock); 53static DEFINE_SPINLOCK(rtcdev_lock);
54 54
55/** 55/**
56 * has_wakealarm - check rtc device has wakealarm ability
57 * @dev: current device
58 * @name_ptr: name to be returned
59 *
60 * This helper function checks to see if the rtc device can wake
61 * from suspend.
62 */
63static int has_wakealarm(struct device *dev, void *name_ptr)
64{
65 struct rtc_device *candidate = to_rtc_device(dev);
66
67 if (!candidate->ops->set_alarm)
68 return 0;
69 if (!device_may_wakeup(candidate->dev.parent))
70 return 0;
71
72 *(const char **)name_ptr = dev_name(dev);
73 return 1;
74}
75
76/**
77 * alarmtimer_get_rtcdev - Return selected rtcdevice 56 * alarmtimer_get_rtcdev - Return selected rtcdevice
78 * 57 *
79 * This function returns the rtc device to use for wakealarms. 58 * This function returns the rtc device to use for wakealarms.
@@ -82,37 +61,64 @@ static int has_wakealarm(struct device *dev, void *name_ptr)
82 */ 61 */
83static struct rtc_device *alarmtimer_get_rtcdev(void) 62static struct rtc_device *alarmtimer_get_rtcdev(void)
84{ 63{
85 struct device *dev;
86 char *str;
87 unsigned long flags; 64 unsigned long flags;
88 struct rtc_device *ret; 65 struct rtc_device *ret;
89 66
90 spin_lock_irqsave(&rtcdev_lock, flags); 67 spin_lock_irqsave(&rtcdev_lock, flags);
91 if (!rtcdev) {
92 /* Find an rtc device and init the rtc_timer */
93 dev = class_find_device(rtc_class, NULL, &str, has_wakealarm);
94 /* If we have a device then str is valid. See has_wakealarm() */
95 if (dev) {
96 rtcdev = rtc_class_open(str);
97 /*
98 * Drop the reference we got in class_find_device,
99 * rtc_open takes its own.
100 */
101 put_device(dev);
102 rtc_timer_init(&rtctimer, NULL, NULL);
103 }
104 }
105 ret = rtcdev; 68 ret = rtcdev;
106 spin_unlock_irqrestore(&rtcdev_lock, flags); 69 spin_unlock_irqrestore(&rtcdev_lock, flags);
107 70
108 return ret; 71 return ret;
109} 72}
73
74
75static int alarmtimer_rtc_add_device(struct device *dev,
76 struct class_interface *class_intf)
77{
78 unsigned long flags;
79 struct rtc_device *rtc = to_rtc_device(dev);
80
81 if (rtcdev)
82 return -EBUSY;
83
84 if (!rtc->ops->set_alarm)
85 return -1;
86 if (!device_may_wakeup(rtc->dev.parent))
87 return -1;
88
89 spin_lock_irqsave(&rtcdev_lock, flags);
90 if (!rtcdev) {
91 rtcdev = rtc;
92 /* hold a reference so it doesn't go away */
93 get_device(dev);
94 }
95 spin_unlock_irqrestore(&rtcdev_lock, flags);
96 return 0;
97}
98
99static struct class_interface alarmtimer_rtc_interface = {
100 .add_dev = &alarmtimer_rtc_add_device,
101};
102
103static int alarmtimer_rtc_interface_setup(void)
104{
105 alarmtimer_rtc_interface.class = rtc_class;
106 return class_interface_register(&alarmtimer_rtc_interface);
107}
108static void alarmtimer_rtc_interface_remove(void)
109{
110 class_interface_unregister(&alarmtimer_rtc_interface);
111}
110#else 112#else
111#define alarmtimer_get_rtcdev() (0) 113static inline struct rtc_device *alarmtimer_get_rtcdev(void)
112#define rtcdev (0) 114{
115 return NULL;
116}
117#define rtcdev (NULL)
118static inline int alarmtimer_rtc_interface_setup(void) { return 0; }
119static inline void alarmtimer_rtc_interface_remove(void) { }
113#endif 120#endif
114 121
115
116/** 122/**
117 * alarmtimer_enqueue - Adds an alarm timer to an alarm_base timerqueue 123 * alarmtimer_enqueue - Adds an alarm timer to an alarm_base timerqueue
118 * @base: pointer to the base where the timer is being run 124 * @base: pointer to the base where the timer is being run
@@ -126,6 +132,8 @@ static struct rtc_device *alarmtimer_get_rtcdev(void)
126static void alarmtimer_enqueue(struct alarm_base *base, struct alarm *alarm) 132static void alarmtimer_enqueue(struct alarm_base *base, struct alarm *alarm)
127{ 133{
128 timerqueue_add(&base->timerqueue, &alarm->node); 134 timerqueue_add(&base->timerqueue, &alarm->node);
135 alarm->state |= ALARMTIMER_STATE_ENQUEUED;
136
129 if (&alarm->node == timerqueue_getnext(&base->timerqueue)) { 137 if (&alarm->node == timerqueue_getnext(&base->timerqueue)) {
130 hrtimer_try_to_cancel(&base->timer); 138 hrtimer_try_to_cancel(&base->timer);
131 hrtimer_start(&base->timer, alarm->node.expires, 139 hrtimer_start(&base->timer, alarm->node.expires,
@@ -147,7 +155,12 @@ static void alarmtimer_remove(struct alarm_base *base, struct alarm *alarm)
147{ 155{
148 struct timerqueue_node *next = timerqueue_getnext(&base->timerqueue); 156 struct timerqueue_node *next = timerqueue_getnext(&base->timerqueue);
149 157
158 if (!(alarm->state & ALARMTIMER_STATE_ENQUEUED))
159 return;
160
150 timerqueue_del(&base->timerqueue, &alarm->node); 161 timerqueue_del(&base->timerqueue, &alarm->node);
162 alarm->state &= ~ALARMTIMER_STATE_ENQUEUED;
163
151 if (next == &alarm->node) { 164 if (next == &alarm->node) {
152 hrtimer_try_to_cancel(&base->timer); 165 hrtimer_try_to_cancel(&base->timer);
153 next = timerqueue_getnext(&base->timerqueue); 166 next = timerqueue_getnext(&base->timerqueue);
@@ -174,6 +187,7 @@ static enum hrtimer_restart alarmtimer_fired(struct hrtimer *timer)
174 unsigned long flags; 187 unsigned long flags;
175 ktime_t now; 188 ktime_t now;
176 int ret = HRTIMER_NORESTART; 189 int ret = HRTIMER_NORESTART;
190 int restart = ALARMTIMER_NORESTART;
177 191
178 spin_lock_irqsave(&base->lock, flags); 192 spin_lock_irqsave(&base->lock, flags);
179 now = base->gettime(); 193 now = base->gettime();
@@ -187,17 +201,19 @@ static enum hrtimer_restart alarmtimer_fired(struct hrtimer *timer)
187 alarm = container_of(next, struct alarm, node); 201 alarm = container_of(next, struct alarm, node);
188 202
189 timerqueue_del(&base->timerqueue, &alarm->node); 203 timerqueue_del(&base->timerqueue, &alarm->node);
190 alarm->enabled = 0; 204 alarm->state &= ~ALARMTIMER_STATE_ENQUEUED;
191 /* Re-add periodic timers */ 205
192 if (alarm->period.tv64) { 206 alarm->state |= ALARMTIMER_STATE_CALLBACK;
193 alarm->node.expires = ktime_add(expired, alarm->period);
194 timerqueue_add(&base->timerqueue, &alarm->node);
195 alarm->enabled = 1;
196 }
197 spin_unlock_irqrestore(&base->lock, flags); 207 spin_unlock_irqrestore(&base->lock, flags);
198 if (alarm->function) 208 if (alarm->function)
199 alarm->function(alarm); 209 restart = alarm->function(alarm, now);
200 spin_lock_irqsave(&base->lock, flags); 210 spin_lock_irqsave(&base->lock, flags);
211 alarm->state &= ~ALARMTIMER_STATE_CALLBACK;
212
213 if (restart != ALARMTIMER_NORESTART) {
214 timerqueue_add(&base->timerqueue, &alarm->node);
215 alarm->state |= ALARMTIMER_STATE_ENQUEUED;
216 }
201 } 217 }
202 218
203 if (next) { 219 if (next) {
@@ -234,7 +250,7 @@ static int alarmtimer_suspend(struct device *dev)
234 freezer_delta = ktime_set(0, 0); 250 freezer_delta = ktime_set(0, 0);
235 spin_unlock_irqrestore(&freezer_delta_lock, flags); 251 spin_unlock_irqrestore(&freezer_delta_lock, flags);
236 252
237 rtc = rtcdev; 253 rtc = alarmtimer_get_rtcdev();
238 /* If we have no rtcdev, just return */ 254 /* If we have no rtcdev, just return */
239 if (!rtc) 255 if (!rtc)
240 return 0; 256 return 0;
@@ -299,53 +315,111 @@ static void alarmtimer_freezerset(ktime_t absexp, enum alarmtimer_type type)
299 * @function: callback that is run when the alarm fires 315 * @function: callback that is run when the alarm fires
300 */ 316 */
301void alarm_init(struct alarm *alarm, enum alarmtimer_type type, 317void alarm_init(struct alarm *alarm, enum alarmtimer_type type,
302 void (*function)(struct alarm *)) 318 enum alarmtimer_restart (*function)(struct alarm *, ktime_t))
303{ 319{
304 timerqueue_init(&alarm->node); 320 timerqueue_init(&alarm->node);
305 alarm->period = ktime_set(0, 0);
306 alarm->function = function; 321 alarm->function = function;
307 alarm->type = type; 322 alarm->type = type;
308 alarm->enabled = 0; 323 alarm->state = ALARMTIMER_STATE_INACTIVE;
309} 324}
310 325
311/** 326/**
312 * alarm_start - Sets an alarm to fire 327 * alarm_start - Sets an alarm to fire
313 * @alarm: ptr to alarm to set 328 * @alarm: ptr to alarm to set
314 * @start: time to run the alarm 329 * @start: time to run the alarm
315 * @period: period at which the alarm will recur
316 */ 330 */
317void alarm_start(struct alarm *alarm, ktime_t start, ktime_t period) 331void alarm_start(struct alarm *alarm, ktime_t start)
318{ 332{
319 struct alarm_base *base = &alarm_bases[alarm->type]; 333 struct alarm_base *base = &alarm_bases[alarm->type];
320 unsigned long flags; 334 unsigned long flags;
321 335
322 spin_lock_irqsave(&base->lock, flags); 336 spin_lock_irqsave(&base->lock, flags);
323 if (alarm->enabled) 337 if (alarmtimer_active(alarm))
324 alarmtimer_remove(base, alarm); 338 alarmtimer_remove(base, alarm);
325 alarm->node.expires = start; 339 alarm->node.expires = start;
326 alarm->period = period;
327 alarmtimer_enqueue(base, alarm); 340 alarmtimer_enqueue(base, alarm);
328 alarm->enabled = 1;
329 spin_unlock_irqrestore(&base->lock, flags); 341 spin_unlock_irqrestore(&base->lock, flags);
330} 342}
331 343
332/** 344/**
333 * alarm_cancel - Tries to cancel an alarm timer 345 * alarm_try_to_cancel - Tries to cancel an alarm timer
334 * @alarm: ptr to alarm to be canceled 346 * @alarm: ptr to alarm to be canceled
347 *
348 * Returns 1 if the timer was canceled, 0 if it was not running,
349 * and -1 if the callback was running
335 */ 350 */
336void alarm_cancel(struct alarm *alarm) 351int alarm_try_to_cancel(struct alarm *alarm)
337{ 352{
338 struct alarm_base *base = &alarm_bases[alarm->type]; 353 struct alarm_base *base = &alarm_bases[alarm->type];
339 unsigned long flags; 354 unsigned long flags;
340 355 int ret = -1;
341 spin_lock_irqsave(&base->lock, flags); 356 spin_lock_irqsave(&base->lock, flags);
342 if (alarm->enabled) 357
358 if (alarmtimer_callback_running(alarm))
359 goto out;
360
361 if (alarmtimer_is_queued(alarm)) {
343 alarmtimer_remove(base, alarm); 362 alarmtimer_remove(base, alarm);
344 alarm->enabled = 0; 363 ret = 1;
364 } else
365 ret = 0;
366out:
345 spin_unlock_irqrestore(&base->lock, flags); 367 spin_unlock_irqrestore(&base->lock, flags);
368 return ret;
369}
370
371
372/**
373 * alarm_cancel - Spins trying to cancel an alarm timer until it is done
374 * @alarm: ptr to alarm to be canceled
375 *
376 * Returns 1 if the timer was canceled, 0 if it was not active.
377 */
378int alarm_cancel(struct alarm *alarm)
379{
380 for (;;) {
381 int ret = alarm_try_to_cancel(alarm);
382 if (ret >= 0)
383 return ret;
384 cpu_relax();
385 }
386}
387
388
389u64 alarm_forward(struct alarm *alarm, ktime_t now, ktime_t interval)
390{
391 u64 overrun = 1;
392 ktime_t delta;
393
394 delta = ktime_sub(now, alarm->node.expires);
395
396 if (delta.tv64 < 0)
397 return 0;
398
399 if (unlikely(delta.tv64 >= interval.tv64)) {
400 s64 incr = ktime_to_ns(interval);
401
402 overrun = ktime_divns(delta, incr);
403
404 alarm->node.expires = ktime_add_ns(alarm->node.expires,
405 incr*overrun);
406
407 if (alarm->node.expires.tv64 > now.tv64)
408 return overrun;
409 /*
410 * This (and the ktime_add() below) is the
411 * correction for exact:
412 */
413 overrun++;
414 }
415
416 alarm->node.expires = ktime_add(alarm->node.expires, interval);
417 return overrun;
346} 418}
347 419
348 420
421
422
349/** 423/**
350 * clock2alarm - helper that converts from clockid to alarmtypes 424 * clock2alarm - helper that converts from clockid to alarmtypes
351 * @clockid: clockid. 425 * @clockid: clockid.
@@ -365,12 +439,21 @@ static enum alarmtimer_type clock2alarm(clockid_t clockid)
365 * 439 *
366 * Posix timer callback for expired alarm timers. 440 * Posix timer callback for expired alarm timers.
367 */ 441 */
368static void alarm_handle_timer(struct alarm *alarm) 442static enum alarmtimer_restart alarm_handle_timer(struct alarm *alarm,
443 ktime_t now)
369{ 444{
370 struct k_itimer *ptr = container_of(alarm, struct k_itimer, 445 struct k_itimer *ptr = container_of(alarm, struct k_itimer,
371 it.alarmtimer); 446 it.alarm.alarmtimer);
372 if (posix_timer_event(ptr, 0) != 0) 447 if (posix_timer_event(ptr, 0) != 0)
373 ptr->it_overrun++; 448 ptr->it_overrun++;
449
450 /* Re-add periodic timers */
451 if (ptr->it.alarm.interval.tv64) {
452 ptr->it_overrun += alarm_forward(alarm, now,
453 ptr->it.alarm.interval);
454 return ALARMTIMER_RESTART;
455 }
456 return ALARMTIMER_NORESTART;
374} 457}
375 458
376/** 459/**
@@ -427,7 +510,7 @@ static int alarm_timer_create(struct k_itimer *new_timer)
427 510
428 type = clock2alarm(new_timer->it_clock); 511 type = clock2alarm(new_timer->it_clock);
429 base = &alarm_bases[type]; 512 base = &alarm_bases[type];
430 alarm_init(&new_timer->it.alarmtimer, type, alarm_handle_timer); 513 alarm_init(&new_timer->it.alarm.alarmtimer, type, alarm_handle_timer);
431 return 0; 514 return 0;
432} 515}
433 516
@@ -444,9 +527,9 @@ static void alarm_timer_get(struct k_itimer *timr,
444 memset(cur_setting, 0, sizeof(struct itimerspec)); 527 memset(cur_setting, 0, sizeof(struct itimerspec));
445 528
446 cur_setting->it_interval = 529 cur_setting->it_interval =
447 ktime_to_timespec(timr->it.alarmtimer.period); 530 ktime_to_timespec(timr->it.alarm.interval);
448 cur_setting->it_value = 531 cur_setting->it_value =
449 ktime_to_timespec(timr->it.alarmtimer.node.expires); 532 ktime_to_timespec(timr->it.alarm.alarmtimer.node.expires);
450 return; 533 return;
451} 534}
452 535
@@ -461,7 +544,9 @@ static int alarm_timer_del(struct k_itimer *timr)
461 if (!rtcdev) 544 if (!rtcdev)
462 return -ENOTSUPP; 545 return -ENOTSUPP;
463 546
464 alarm_cancel(&timr->it.alarmtimer); 547 if (alarm_try_to_cancel(&timr->it.alarm.alarmtimer) < 0)
548 return TIMER_RETRY;
549
465 return 0; 550 return 0;
466} 551}
467 552
@@ -481,25 +566,17 @@ static int alarm_timer_set(struct k_itimer *timr, int flags,
481 if (!rtcdev) 566 if (!rtcdev)
482 return -ENOTSUPP; 567 return -ENOTSUPP;
483 568
484 /*
485 * XXX HACK! Currently we can DOS a system if the interval
486 * period on alarmtimers is too small. Cap the interval here
487 * to 100us and solve this properly in a future patch! -jstultz
488 */
489 if ((new_setting->it_interval.tv_sec == 0) &&
490 (new_setting->it_interval.tv_nsec < 100000))
491 new_setting->it_interval.tv_nsec = 100000;
492
493 if (old_setting) 569 if (old_setting)
494 alarm_timer_get(timr, old_setting); 570 alarm_timer_get(timr, old_setting);
495 571
496 /* If the timer was already set, cancel it */ 572 /* If the timer was already set, cancel it */
497 alarm_cancel(&timr->it.alarmtimer); 573 if (alarm_try_to_cancel(&timr->it.alarm.alarmtimer) < 0)
574 return TIMER_RETRY;
498 575
499 /* start the timer */ 576 /* start the timer */
500 alarm_start(&timr->it.alarmtimer, 577 timr->it.alarm.interval = timespec_to_ktime(new_setting->it_interval);
501 timespec_to_ktime(new_setting->it_value), 578 alarm_start(&timr->it.alarm.alarmtimer,
502 timespec_to_ktime(new_setting->it_interval)); 579 timespec_to_ktime(new_setting->it_value));
503 return 0; 580 return 0;
504} 581}
505 582
@@ -509,13 +586,15 @@ static int alarm_timer_set(struct k_itimer *timr, int flags,
509 * 586 *
510 * Wakes up the task that set the alarmtimer 587 * Wakes up the task that set the alarmtimer
511 */ 588 */
512static void alarmtimer_nsleep_wakeup(struct alarm *alarm) 589static enum alarmtimer_restart alarmtimer_nsleep_wakeup(struct alarm *alarm,
590 ktime_t now)
513{ 591{
514 struct task_struct *task = (struct task_struct *)alarm->data; 592 struct task_struct *task = (struct task_struct *)alarm->data;
515 593
516 alarm->data = NULL; 594 alarm->data = NULL;
517 if (task) 595 if (task)
518 wake_up_process(task); 596 wake_up_process(task);
597 return ALARMTIMER_NORESTART;
519} 598}
520 599
521/** 600/**
@@ -530,7 +609,7 @@ static int alarmtimer_do_nsleep(struct alarm *alarm, ktime_t absexp)
530 alarm->data = (void *)current; 609 alarm->data = (void *)current;
531 do { 610 do {
532 set_current_state(TASK_INTERRUPTIBLE); 611 set_current_state(TASK_INTERRUPTIBLE);
533 alarm_start(alarm, absexp, ktime_set(0, 0)); 612 alarm_start(alarm, absexp);
534 if (likely(alarm->data)) 613 if (likely(alarm->data))
535 schedule(); 614 schedule();
536 615
@@ -691,6 +770,7 @@ static struct platform_driver alarmtimer_driver = {
691 */ 770 */
692static int __init alarmtimer_init(void) 771static int __init alarmtimer_init(void)
693{ 772{
773 struct platform_device *pdev;
694 int error = 0; 774 int error = 0;
695 int i; 775 int i;
696 struct k_clock alarm_clock = { 776 struct k_clock alarm_clock = {
@@ -719,10 +799,26 @@ static int __init alarmtimer_init(void)
719 HRTIMER_MODE_ABS); 799 HRTIMER_MODE_ABS);
720 alarm_bases[i].timer.function = alarmtimer_fired; 800 alarm_bases[i].timer.function = alarmtimer_fired;
721 } 801 }
802
803 error = alarmtimer_rtc_interface_setup();
804 if (error)
805 return error;
806
722 error = platform_driver_register(&alarmtimer_driver); 807 error = platform_driver_register(&alarmtimer_driver);
723 platform_device_register_simple("alarmtimer", -1, NULL, 0); 808 if (error)
809 goto out_if;
724 810
811 pdev = platform_device_register_simple("alarmtimer", -1, NULL, 0);
812 if (IS_ERR(pdev)) {
813 error = PTR_ERR(pdev);
814 goto out_drv;
815 }
816 return 0;
817
818out_drv:
819 platform_driver_unregister(&alarmtimer_driver);
820out_if:
821 alarmtimer_rtc_interface_remove();
725 return error; 822 return error;
726} 823}
727device_initcall(alarmtimer_init); 824device_initcall(alarmtimer_init);
728
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index e4c699dfa4e8..1ecd6ba36d6c 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -94,42 +94,143 @@ void clockevents_shutdown(struct clock_event_device *dev)
94 dev->next_event.tv64 = KTIME_MAX; 94 dev->next_event.tv64 = KTIME_MAX;
95} 95}
96 96
97#ifdef CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST
98
99/* Limit min_delta to a jiffie */
100#define MIN_DELTA_LIMIT (NSEC_PER_SEC / HZ)
101
102/**
103 * clockevents_increase_min_delta - raise minimum delta of a clock event device
104 * @dev: device to increase the minimum delta
105 *
106 * Returns 0 on success, -ETIME when the minimum delta reached the limit.
107 */
108static int clockevents_increase_min_delta(struct clock_event_device *dev)
109{
110 /* Nothing to do if we already reached the limit */
111 if (dev->min_delta_ns >= MIN_DELTA_LIMIT) {
112 printk(KERN_WARNING "CE: Reprogramming failure. Giving up\n");
113 dev->next_event.tv64 = KTIME_MAX;
114 return -ETIME;
115 }
116
117 if (dev->min_delta_ns < 5000)
118 dev->min_delta_ns = 5000;
119 else
120 dev->min_delta_ns += dev->min_delta_ns >> 1;
121
122 if (dev->min_delta_ns > MIN_DELTA_LIMIT)
123 dev->min_delta_ns = MIN_DELTA_LIMIT;
124
125 printk(KERN_WARNING "CE: %s increased min_delta_ns to %llu nsec\n",
126 dev->name ? dev->name : "?",
127 (unsigned long long) dev->min_delta_ns);
128 return 0;
129}
130
131/**
132 * clockevents_program_min_delta - Set clock event device to the minimum delay.
133 * @dev: device to program
134 *
135 * Returns 0 on success, -ETIME when the retry loop failed.
136 */
137static int clockevents_program_min_delta(struct clock_event_device *dev)
138{
139 unsigned long long clc;
140 int64_t delta;
141 int i;
142
143 for (i = 0;;) {
144 delta = dev->min_delta_ns;
145 dev->next_event = ktime_add_ns(ktime_get(), delta);
146
147 if (dev->mode == CLOCK_EVT_MODE_SHUTDOWN)
148 return 0;
149
150 dev->retries++;
151 clc = ((unsigned long long) delta * dev->mult) >> dev->shift;
152 if (dev->set_next_event((unsigned long) clc, dev) == 0)
153 return 0;
154
155 if (++i > 2) {
156 /*
157 * We tried 3 times to program the device with the
158 * given min_delta_ns. Try to increase the minimum
159 * delta, if that fails as well get out of here.
160 */
161 if (clockevents_increase_min_delta(dev))
162 return -ETIME;
163 i = 0;
164 }
165 }
166}
167
168#else /* CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST */
169
170/**
171 * clockevents_program_min_delta - Set clock event device to the minimum delay.
172 * @dev: device to program
173 *
174 * Returns 0 on success, -ETIME when the retry loop failed.
175 */
176static int clockevents_program_min_delta(struct clock_event_device *dev)
177{
178 unsigned long long clc;
179 int64_t delta;
180
181 delta = dev->min_delta_ns;
182 dev->next_event = ktime_add_ns(ktime_get(), delta);
183
184 if (dev->mode == CLOCK_EVT_MODE_SHUTDOWN)
185 return 0;
186
187 dev->retries++;
188 clc = ((unsigned long long) delta * dev->mult) >> dev->shift;
189 return dev->set_next_event((unsigned long) clc, dev);
190}
191
192#endif /* CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST */
193
97/** 194/**
98 * clockevents_program_event - Reprogram the clock event device. 195 * clockevents_program_event - Reprogram the clock event device.
196 * @dev: device to program
99 * @expires: absolute expiry time (monotonic clock) 197 * @expires: absolute expiry time (monotonic clock)
198 * @force: program minimum delay if expires can not be set
100 * 199 *
101 * Returns 0 on success, -ETIME when the event is in the past. 200 * Returns 0 on success, -ETIME when the event is in the past.
102 */ 201 */
103int clockevents_program_event(struct clock_event_device *dev, ktime_t expires, 202int clockevents_program_event(struct clock_event_device *dev, ktime_t expires,
104 ktime_t now) 203 bool force)
105{ 204{
106 unsigned long long clc; 205 unsigned long long clc;
107 int64_t delta; 206 int64_t delta;
207 int rc;
108 208
109 if (unlikely(expires.tv64 < 0)) { 209 if (unlikely(expires.tv64 < 0)) {
110 WARN_ON_ONCE(1); 210 WARN_ON_ONCE(1);
111 return -ETIME; 211 return -ETIME;
112 } 212 }
113 213
114 delta = ktime_to_ns(ktime_sub(expires, now));
115
116 if (delta <= 0)
117 return -ETIME;
118
119 dev->next_event = expires; 214 dev->next_event = expires;
120 215
121 if (dev->mode == CLOCK_EVT_MODE_SHUTDOWN) 216 if (dev->mode == CLOCK_EVT_MODE_SHUTDOWN)
122 return 0; 217 return 0;
123 218
124 if (delta > dev->max_delta_ns) 219 /* Shortcut for clockevent devices that can deal with ktime. */
125 delta = dev->max_delta_ns; 220 if (dev->features & CLOCK_EVT_FEAT_KTIME)
126 if (delta < dev->min_delta_ns) 221 return dev->set_next_ktime(expires, dev);
127 delta = dev->min_delta_ns; 222
223 delta = ktime_to_ns(ktime_sub(expires, ktime_get()));
224 if (delta <= 0)
225 return force ? clockevents_program_min_delta(dev) : -ETIME;
128 226
129 clc = delta * dev->mult; 227 delta = min(delta, (int64_t) dev->max_delta_ns);
130 clc >>= dev->shift; 228 delta = max(delta, (int64_t) dev->min_delta_ns);
131 229
132 return dev->set_next_event((unsigned long) clc, dev); 230 clc = ((unsigned long long) delta * dev->mult) >> dev->shift;
231 rc = dev->set_next_event((unsigned long) clc, dev);
232
233 return (rc && force) ? clockevents_program_min_delta(dev) : rc;
133} 234}
134 235
135/** 236/**
@@ -258,7 +359,7 @@ int clockevents_update_freq(struct clock_event_device *dev, u32 freq)
258 if (dev->mode != CLOCK_EVT_MODE_ONESHOT) 359 if (dev->mode != CLOCK_EVT_MODE_ONESHOT)
259 return 0; 360 return 0;
260 361
261 return clockevents_program_event(dev, dev->next_event, ktime_get()); 362 return clockevents_program_event(dev, dev->next_event, false);
262} 363}
263 364
264/* 365/*
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index e0980f0d9a0a..cf52fda2e096 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -186,6 +186,7 @@ static struct timer_list watchdog_timer;
186static DECLARE_WORK(watchdog_work, clocksource_watchdog_work); 186static DECLARE_WORK(watchdog_work, clocksource_watchdog_work);
187static DEFINE_SPINLOCK(watchdog_lock); 187static DEFINE_SPINLOCK(watchdog_lock);
188static int watchdog_running; 188static int watchdog_running;
189static atomic_t watchdog_reset_pending;
189 190
190static int clocksource_watchdog_kthread(void *data); 191static int clocksource_watchdog_kthread(void *data);
191static void __clocksource_change_rating(struct clocksource *cs, int rating); 192static void __clocksource_change_rating(struct clocksource *cs, int rating);
@@ -247,12 +248,14 @@ static void clocksource_watchdog(unsigned long data)
247 struct clocksource *cs; 248 struct clocksource *cs;
248 cycle_t csnow, wdnow; 249 cycle_t csnow, wdnow;
249 int64_t wd_nsec, cs_nsec; 250 int64_t wd_nsec, cs_nsec;
250 int next_cpu; 251 int next_cpu, reset_pending;
251 252
252 spin_lock(&watchdog_lock); 253 spin_lock(&watchdog_lock);
253 if (!watchdog_running) 254 if (!watchdog_running)
254 goto out; 255 goto out;
255 256
257 reset_pending = atomic_read(&watchdog_reset_pending);
258
256 list_for_each_entry(cs, &watchdog_list, wd_list) { 259 list_for_each_entry(cs, &watchdog_list, wd_list) {
257 260
258 /* Clocksource already marked unstable? */ 261 /* Clocksource already marked unstable? */
@@ -268,7 +271,8 @@ static void clocksource_watchdog(unsigned long data)
268 local_irq_enable(); 271 local_irq_enable();
269 272
270 /* Clocksource initialized ? */ 273 /* Clocksource initialized ? */
271 if (!(cs->flags & CLOCK_SOURCE_WATCHDOG)) { 274 if (!(cs->flags & CLOCK_SOURCE_WATCHDOG) ||
275 atomic_read(&watchdog_reset_pending)) {
272 cs->flags |= CLOCK_SOURCE_WATCHDOG; 276 cs->flags |= CLOCK_SOURCE_WATCHDOG;
273 cs->wd_last = wdnow; 277 cs->wd_last = wdnow;
274 cs->cs_last = csnow; 278 cs->cs_last = csnow;
@@ -283,8 +287,11 @@ static void clocksource_watchdog(unsigned long data)
283 cs->cs_last = csnow; 287 cs->cs_last = csnow;
284 cs->wd_last = wdnow; 288 cs->wd_last = wdnow;
285 289
290 if (atomic_read(&watchdog_reset_pending))
291 continue;
292
286 /* Check the deviation from the watchdog clocksource. */ 293 /* Check the deviation from the watchdog clocksource. */
287 if (abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD) { 294 if ((abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD)) {
288 clocksource_unstable(cs, cs_nsec - wd_nsec); 295 clocksource_unstable(cs, cs_nsec - wd_nsec);
289 continue; 296 continue;
290 } 297 }
@@ -303,6 +310,13 @@ static void clocksource_watchdog(unsigned long data)
303 } 310 }
304 311
305 /* 312 /*
313 * We only clear the watchdog_reset_pending, when we did a
314 * full cycle through all clocksources.
315 */
316 if (reset_pending)
317 atomic_dec(&watchdog_reset_pending);
318
319 /*
306 * Cycle through CPUs to check if the CPUs stay synchronized 320 * Cycle through CPUs to check if the CPUs stay synchronized
307 * to each other. 321 * to each other.
308 */ 322 */
@@ -344,23 +358,7 @@ static inline void clocksource_reset_watchdog(void)
344 358
345static void clocksource_resume_watchdog(void) 359static void clocksource_resume_watchdog(void)
346{ 360{
347 unsigned long flags; 361 atomic_inc(&watchdog_reset_pending);
348
349 /*
350 * We use trylock here to avoid a potential dead lock when
351 * kgdb calls this code after the kernel has been stopped with
352 * watchdog_lock held. When watchdog_lock is held we just
353 * return and accept, that the watchdog might trigger and mark
354 * the monitored clock source (usually TSC) unstable.
355 *
356 * This does not affect the other caller clocksource_resume()
357 * because at this point the kernel is UP, interrupts are
358 * disabled and nothing can hold watchdog_lock.
359 */
360 if (!spin_trylock_irqsave(&watchdog_lock, flags))
361 return;
362 clocksource_reset_watchdog();
363 spin_unlock_irqrestore(&watchdog_lock, flags);
364} 362}
365 363
366static void clocksource_enqueue_watchdog(struct clocksource *cs) 364static void clocksource_enqueue_watchdog(struct clocksource *cs)
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index c7218d132738..f954282d9a82 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -194,7 +194,7 @@ static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
194 for (next = dev->next_event; ;) { 194 for (next = dev->next_event; ;) {
195 next = ktime_add(next, tick_period); 195 next = ktime_add(next, tick_period);
196 196
197 if (!clockevents_program_event(dev, next, ktime_get())) 197 if (!clockevents_program_event(dev, next, false))
198 return; 198 return;
199 tick_do_periodic_broadcast(); 199 tick_do_periodic_broadcast();
200 } 200 }
@@ -373,7 +373,7 @@ static int tick_broadcast_set_event(ktime_t expires, int force)
373{ 373{
374 struct clock_event_device *bc = tick_broadcast_device.evtdev; 374 struct clock_event_device *bc = tick_broadcast_device.evtdev;
375 375
376 return tick_dev_program_event(bc, expires, force); 376 return clockevents_program_event(bc, expires, force);
377} 377}
378 378
379int tick_resume_broadcast_oneshot(struct clock_event_device *bc) 379int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index 119528de8235..da6c9ecad4e4 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -94,7 +94,7 @@ void tick_handle_periodic(struct clock_event_device *dev)
94 */ 94 */
95 next = ktime_add(dev->next_event, tick_period); 95 next = ktime_add(dev->next_event, tick_period);
96 for (;;) { 96 for (;;) {
97 if (!clockevents_program_event(dev, next, ktime_get())) 97 if (!clockevents_program_event(dev, next, false))
98 return; 98 return;
99 /* 99 /*
100 * Have to be careful here. If we're in oneshot mode, 100 * Have to be careful here. If we're in oneshot mode,
@@ -137,7 +137,7 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast)
137 clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT); 137 clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);
138 138
139 for (;;) { 139 for (;;) {
140 if (!clockevents_program_event(dev, next, ktime_get())) 140 if (!clockevents_program_event(dev, next, false))
141 return; 141 return;
142 next = ktime_add(next, tick_period); 142 next = ktime_add(next, tick_period);
143 } 143 }
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
index 1009b06d6f89..4e265b901fed 100644
--- a/kernel/time/tick-internal.h
+++ b/kernel/time/tick-internal.h
@@ -26,8 +26,6 @@ extern void clockevents_shutdown(struct clock_event_device *dev);
26extern void tick_setup_oneshot(struct clock_event_device *newdev, 26extern void tick_setup_oneshot(struct clock_event_device *newdev,
27 void (*handler)(struct clock_event_device *), 27 void (*handler)(struct clock_event_device *),
28 ktime_t nextevt); 28 ktime_t nextevt);
29extern int tick_dev_program_event(struct clock_event_device *dev,
30 ktime_t expires, int force);
31extern int tick_program_event(ktime_t expires, int force); 29extern int tick_program_event(ktime_t expires, int force);
32extern void tick_oneshot_notify(void); 30extern void tick_oneshot_notify(void);
33extern int tick_switch_to_oneshot(void (*handler)(struct clock_event_device *)); 31extern int tick_switch_to_oneshot(void (*handler)(struct clock_event_device *));
diff --git a/kernel/time/tick-oneshot.c b/kernel/time/tick-oneshot.c
index 2d04411a5f05..824109060a33 100644
--- a/kernel/time/tick-oneshot.c
+++ b/kernel/time/tick-oneshot.c
@@ -21,74 +21,6 @@
21 21
22#include "tick-internal.h" 22#include "tick-internal.h"
23 23
24/* Limit min_delta to a jiffie */
25#define MIN_DELTA_LIMIT (NSEC_PER_SEC / HZ)
26
27static int tick_increase_min_delta(struct clock_event_device *dev)
28{
29 /* Nothing to do if we already reached the limit */
30 if (dev->min_delta_ns >= MIN_DELTA_LIMIT)
31 return -ETIME;
32
33 if (dev->min_delta_ns < 5000)
34 dev->min_delta_ns = 5000;
35 else
36 dev->min_delta_ns += dev->min_delta_ns >> 1;
37
38 if (dev->min_delta_ns > MIN_DELTA_LIMIT)
39 dev->min_delta_ns = MIN_DELTA_LIMIT;
40
41 printk(KERN_WARNING "CE: %s increased min_delta_ns to %llu nsec\n",
42 dev->name ? dev->name : "?",
43 (unsigned long long) dev->min_delta_ns);
44 return 0;
45}
46
47/**
48 * tick_program_event internal worker function
49 */
50int tick_dev_program_event(struct clock_event_device *dev, ktime_t expires,
51 int force)
52{
53 ktime_t now = ktime_get();
54 int i;
55
56 for (i = 0;;) {
57 int ret = clockevents_program_event(dev, expires, now);
58
59 if (!ret || !force)
60 return ret;
61
62 dev->retries++;
63 /*
64 * We tried 3 times to program the device with the given
65 * min_delta_ns. If that's not working then we increase it
66 * and emit a warning.
67 */
68 if (++i > 2) {
69 /* Increase the min. delta and try again */
70 if (tick_increase_min_delta(dev)) {
71 /*
72 * Get out of the loop if min_delta_ns
73 * hit the limit already. That's
74 * better than staying here forever.
75 *
76 * We clear next_event so we have a
77 * chance that the box survives.
78 */
79 printk(KERN_WARNING
80 "CE: Reprogramming failure. Giving up\n");
81 dev->next_event.tv64 = KTIME_MAX;
82 return -ETIME;
83 }
84 i = 0;
85 }
86
87 now = ktime_get();
88 expires = ktime_add_ns(now, dev->min_delta_ns);
89 }
90}
91
92/** 24/**
93 * tick_program_event 25 * tick_program_event
94 */ 26 */
@@ -96,7 +28,7 @@ int tick_program_event(ktime_t expires, int force)
96{ 28{
97 struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); 29 struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
98 30
99 return tick_dev_program_event(dev, expires, force); 31 return clockevents_program_event(dev, expires, force);
100} 32}
101 33
102/** 34/**
@@ -104,11 +36,10 @@ int tick_program_event(ktime_t expires, int force)
104 */ 36 */
105void tick_resume_oneshot(void) 37void tick_resume_oneshot(void)
106{ 38{
107 struct tick_device *td = &__get_cpu_var(tick_cpu_device); 39 struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
108 struct clock_event_device *dev = td->evtdev;
109 40
110 clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT); 41 clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);
111 tick_program_event(ktime_get(), 1); 42 clockevents_program_event(dev, ktime_get(), true);
112} 43}
113 44
114/** 45/**
@@ -120,7 +51,7 @@ void tick_setup_oneshot(struct clock_event_device *newdev,
120{ 51{
121 newdev->event_handler = handler; 52 newdev->event_handler = handler;
122 clockevents_set_mode(newdev, CLOCK_EVT_MODE_ONESHOT); 53 clockevents_set_mode(newdev, CLOCK_EVT_MODE_ONESHOT);
123 tick_dev_program_event(newdev, next_event, 1); 54 clockevents_program_event(newdev, next_event, true);
124} 55}
125 56
126/** 57/**
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index eb98e55196b9..40420644d0ba 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -158,9 +158,10 @@ update_ts_time_stats(int cpu, struct tick_sched *ts, ktime_t now, u64 *last_upda
158 158
159 if (ts->idle_active) { 159 if (ts->idle_active) {
160 delta = ktime_sub(now, ts->idle_entrytime); 160 delta = ktime_sub(now, ts->idle_entrytime);
161 ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);
162 if (nr_iowait_cpu(cpu) > 0) 161 if (nr_iowait_cpu(cpu) > 0)
163 ts->iowait_sleeptime = ktime_add(ts->iowait_sleeptime, delta); 162 ts->iowait_sleeptime = ktime_add(ts->iowait_sleeptime, delta);
163 else
164 ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);
164 ts->idle_entrytime = now; 165 ts->idle_entrytime = now;
165 } 166 }
166 167
@@ -196,11 +197,11 @@ static ktime_t tick_nohz_start_idle(int cpu, struct tick_sched *ts)
196/** 197/**
197 * get_cpu_idle_time_us - get the total idle time of a cpu 198 * get_cpu_idle_time_us - get the total idle time of a cpu
198 * @cpu: CPU number to query 199 * @cpu: CPU number to query
199 * @last_update_time: variable to store update time in 200 * @last_update_time: variable to store update time in. Do not update
201 * counters if NULL.
200 * 202 *
201 * Return the cummulative idle time (since boot) for a given 203 * Return the cummulative idle time (since boot) for a given
202 * CPU, in microseconds. The idle time returned includes 204 * CPU, in microseconds.
203 * the iowait time (unlike what "top" and co report).
204 * 205 *
205 * This time is measured via accounting rather than sampling, 206 * This time is measured via accounting rather than sampling,
206 * and is as accurate as ktime_get() is. 207 * and is as accurate as ktime_get() is.
@@ -210,20 +211,35 @@ static ktime_t tick_nohz_start_idle(int cpu, struct tick_sched *ts)
210u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time) 211u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time)
211{ 212{
212 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); 213 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
214 ktime_t now, idle;
213 215
214 if (!tick_nohz_enabled) 216 if (!tick_nohz_enabled)
215 return -1; 217 return -1;
216 218
217 update_ts_time_stats(cpu, ts, ktime_get(), last_update_time); 219 now = ktime_get();
220 if (last_update_time) {
221 update_ts_time_stats(cpu, ts, now, last_update_time);
222 idle = ts->idle_sleeptime;
223 } else {
224 if (ts->idle_active && !nr_iowait_cpu(cpu)) {
225 ktime_t delta = ktime_sub(now, ts->idle_entrytime);
226
227 idle = ktime_add(ts->idle_sleeptime, delta);
228 } else {
229 idle = ts->idle_sleeptime;
230 }
231 }
232
233 return ktime_to_us(idle);
218 234
219 return ktime_to_us(ts->idle_sleeptime);
220} 235}
221EXPORT_SYMBOL_GPL(get_cpu_idle_time_us); 236EXPORT_SYMBOL_GPL(get_cpu_idle_time_us);
222 237
223/* 238/**
224 * get_cpu_iowait_time_us - get the total iowait time of a cpu 239 * get_cpu_iowait_time_us - get the total iowait time of a cpu
225 * @cpu: CPU number to query 240 * @cpu: CPU number to query
226 * @last_update_time: variable to store update time in 241 * @last_update_time: variable to store update time in. Do not update
242 * counters if NULL.
227 * 243 *
228 * Return the cummulative iowait time (since boot) for a given 244 * Return the cummulative iowait time (since boot) for a given
229 * CPU, in microseconds. 245 * CPU, in microseconds.
@@ -236,13 +252,26 @@ EXPORT_SYMBOL_GPL(get_cpu_idle_time_us);
236u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time) 252u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time)
237{ 253{
238 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); 254 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
255 ktime_t now, iowait;
239 256
240 if (!tick_nohz_enabled) 257 if (!tick_nohz_enabled)
241 return -1; 258 return -1;
242 259
243 update_ts_time_stats(cpu, ts, ktime_get(), last_update_time); 260 now = ktime_get();
261 if (last_update_time) {
262 update_ts_time_stats(cpu, ts, now, last_update_time);
263 iowait = ts->iowait_sleeptime;
264 } else {
265 if (ts->idle_active && nr_iowait_cpu(cpu) > 0) {
266 ktime_t delta = ktime_sub(now, ts->idle_entrytime);
244 267
245 return ktime_to_us(ts->iowait_sleeptime); 268 iowait = ktime_add(ts->iowait_sleeptime, delta);
269 } else {
270 iowait = ts->iowait_sleeptime;
271 }
272 }
273
274 return ktime_to_us(iowait);
246} 275}
247EXPORT_SYMBOL_GPL(get_cpu_iowait_time_us); 276EXPORT_SYMBOL_GPL(get_cpu_iowait_time_us);
248 277
@@ -634,8 +663,6 @@ static void tick_nohz_switch_to_nohz(void)
634 next = ktime_add(next, tick_period); 663 next = ktime_add(next, tick_period);
635 } 664 }
636 local_irq_enable(); 665 local_irq_enable();
637
638 printk(KERN_INFO "Switched to NOHz mode on CPU #%d\n", smp_processor_id());
639} 666}
640 667
641/* 668/*
@@ -787,10 +814,8 @@ void tick_setup_sched_timer(void)
787 } 814 }
788 815
789#ifdef CONFIG_NO_HZ 816#ifdef CONFIG_NO_HZ
790 if (tick_nohz_enabled) { 817 if (tick_nohz_enabled)
791 ts->nohz_mode = NOHZ_MODE_HIGHRES; 818 ts->nohz_mode = NOHZ_MODE_HIGHRES;
792 printk(KERN_INFO "Switched to NOHz mode on CPU #%d\n", smp_processor_id());
793 }
794#endif 819#endif
795} 820}
796#endif /* HIGH_RES_TIMERS */ 821#endif /* HIGH_RES_TIMERS */