diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-05-23 14:30:28 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-05-23 14:30:28 -0400 |
commit | 30cb6d5f2eb24d15d20139d5ceefaccc68734bd7 (patch) | |
tree | 773c5a98645e4b945343caddcfe5af365566ccc5 /kernel | |
parent | 4867faab1e3eb8cc3f74e390357615d9b8e8cda6 (diff) | |
parent | 68fa61c026057a39d6ccb850aa8785043afbee02 (diff) |
Merge branch 'timers-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'timers-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
hrtimers: Reorder clock bases
hrtimers: Avoid touching inactive timer bases
hrtimers: Make struct hrtimer_cpu_base layout less stupid
timerfd: Manage cancelable timers in timerfd
clockevents: Move C3 stop test outside lock
alarmtimer: Drop device refcount after rtc_open()
alarmtimer: Check return value of class_find_device()
timerfd: Allow timers to be cancelled when clock was set
hrtimers: Prepare for cancel on clock was set timers
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/hrtimer.c | 162 | ||||
-rw-r--r-- | kernel/posix-cpu-timers.c | 4 | ||||
-rw-r--r-- | kernel/posix-timers.c | 2 | ||||
-rw-r--r-- | kernel/time/alarmtimer.c | 16 | ||||
-rw-r--r-- | kernel/time/tick-broadcast.c | 16 | ||||
-rw-r--r-- | kernel/time/timekeeping.c | 17 |
6 files changed, 125 insertions, 92 deletions
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index dbbbf7d43080..c541ee527ecb 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c | |||
@@ -64,17 +64,20 @@ DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) = | |||
64 | .clock_base = | 64 | .clock_base = |
65 | { | 65 | { |
66 | { | 66 | { |
67 | .index = CLOCK_REALTIME, | 67 | .index = HRTIMER_BASE_MONOTONIC, |
68 | .get_time = &ktime_get_real, | 68 | .clockid = CLOCK_MONOTONIC, |
69 | .get_time = &ktime_get, | ||
69 | .resolution = KTIME_LOW_RES, | 70 | .resolution = KTIME_LOW_RES, |
70 | }, | 71 | }, |
71 | { | 72 | { |
72 | .index = CLOCK_MONOTONIC, | 73 | .index = HRTIMER_BASE_REALTIME, |
73 | .get_time = &ktime_get, | 74 | .clockid = CLOCK_REALTIME, |
75 | .get_time = &ktime_get_real, | ||
74 | .resolution = KTIME_LOW_RES, | 76 | .resolution = KTIME_LOW_RES, |
75 | }, | 77 | }, |
76 | { | 78 | { |
77 | .index = CLOCK_BOOTTIME, | 79 | .index = HRTIMER_BASE_BOOTTIME, |
80 | .clockid = CLOCK_BOOTTIME, | ||
78 | .get_time = &ktime_get_boottime, | 81 | .get_time = &ktime_get_boottime, |
79 | .resolution = KTIME_LOW_RES, | 82 | .resolution = KTIME_LOW_RES, |
80 | }, | 83 | }, |
@@ -196,7 +199,7 @@ switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base, | |||
196 | struct hrtimer_cpu_base *new_cpu_base; | 199 | struct hrtimer_cpu_base *new_cpu_base; |
197 | int this_cpu = smp_processor_id(); | 200 | int this_cpu = smp_processor_id(); |
198 | int cpu = hrtimer_get_target(this_cpu, pinned); | 201 | int cpu = hrtimer_get_target(this_cpu, pinned); |
199 | int basenum = hrtimer_clockid_to_base(base->index); | 202 | int basenum = base->index; |
200 | 203 | ||
201 | again: | 204 | again: |
202 | new_cpu_base = &per_cpu(hrtimer_bases, cpu); | 205 | new_cpu_base = &per_cpu(hrtimer_bases, cpu); |
@@ -621,66 +624,6 @@ static int hrtimer_reprogram(struct hrtimer *timer, | |||
621 | return res; | 624 | return res; |
622 | } | 625 | } |
623 | 626 | ||
624 | |||
625 | /* | ||
626 | * Retrigger next event is called after clock was set | ||
627 | * | ||
628 | * Called with interrupts disabled via on_each_cpu() | ||
629 | */ | ||
630 | static void retrigger_next_event(void *arg) | ||
631 | { | ||
632 | struct hrtimer_cpu_base *base; | ||
633 | struct timespec realtime_offset, wtm, sleep; | ||
634 | |||
635 | if (!hrtimer_hres_active()) | ||
636 | return; | ||
637 | |||
638 | get_xtime_and_monotonic_and_sleep_offset(&realtime_offset, &wtm, | ||
639 | &sleep); | ||
640 | set_normalized_timespec(&realtime_offset, -wtm.tv_sec, -wtm.tv_nsec); | ||
641 | |||
642 | base = &__get_cpu_var(hrtimer_bases); | ||
643 | |||
644 | /* Adjust CLOCK_REALTIME offset */ | ||
645 | raw_spin_lock(&base->lock); | ||
646 | base->clock_base[HRTIMER_BASE_REALTIME].offset = | ||
647 | timespec_to_ktime(realtime_offset); | ||
648 | base->clock_base[HRTIMER_BASE_BOOTTIME].offset = | ||
649 | timespec_to_ktime(sleep); | ||
650 | |||
651 | hrtimer_force_reprogram(base, 0); | ||
652 | raw_spin_unlock(&base->lock); | ||
653 | } | ||
654 | |||
655 | /* | ||
656 | * Clock realtime was set | ||
657 | * | ||
658 | * Change the offset of the realtime clock vs. the monotonic | ||
659 | * clock. | ||
660 | * | ||
661 | * We might have to reprogram the high resolution timer interrupt. On | ||
662 | * SMP we call the architecture specific code to retrigger _all_ high | ||
663 | * resolution timer interrupts. On UP we just disable interrupts and | ||
664 | * call the high resolution interrupt code. | ||
665 | */ | ||
666 | void clock_was_set(void) | ||
667 | { | ||
668 | /* Retrigger the CPU local events everywhere */ | ||
669 | on_each_cpu(retrigger_next_event, NULL, 1); | ||
670 | } | ||
671 | |||
672 | /* | ||
673 | * During resume we might have to reprogram the high resolution timer | ||
674 | * interrupt (on the local CPU): | ||
675 | */ | ||
676 | void hres_timers_resume(void) | ||
677 | { | ||
678 | WARN_ONCE(!irqs_disabled(), | ||
679 | KERN_INFO "hres_timers_resume() called with IRQs enabled!"); | ||
680 | |||
681 | retrigger_next_event(NULL); | ||
682 | } | ||
683 | |||
684 | /* | 627 | /* |
685 | * Initialize the high resolution related parts of cpu_base | 628 | * Initialize the high resolution related parts of cpu_base |
686 | */ | 629 | */ |
@@ -715,11 +658,39 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, | |||
715 | } | 658 | } |
716 | 659 | ||
717 | /* | 660 | /* |
661 | * Retrigger next event is called after clock was set | ||
662 | * | ||
663 | * Called with interrupts disabled via on_each_cpu() | ||
664 | */ | ||
665 | static void retrigger_next_event(void *arg) | ||
666 | { | ||
667 | struct hrtimer_cpu_base *base = &__get_cpu_var(hrtimer_bases); | ||
668 | struct timespec realtime_offset, xtim, wtm, sleep; | ||
669 | |||
670 | if (!hrtimer_hres_active()) | ||
671 | return; | ||
672 | |||
673 | /* Optimized out for !HIGH_RES */ | ||
674 | get_xtime_and_monotonic_and_sleep_offset(&xtim, &wtm, &sleep); | ||
675 | set_normalized_timespec(&realtime_offset, -wtm.tv_sec, -wtm.tv_nsec); | ||
676 | |||
677 | /* Adjust CLOCK_REALTIME offset */ | ||
678 | raw_spin_lock(&base->lock); | ||
679 | base->clock_base[HRTIMER_BASE_REALTIME].offset = | ||
680 | timespec_to_ktime(realtime_offset); | ||
681 | base->clock_base[HRTIMER_BASE_BOOTTIME].offset = | ||
682 | timespec_to_ktime(sleep); | ||
683 | |||
684 | hrtimer_force_reprogram(base, 0); | ||
685 | raw_spin_unlock(&base->lock); | ||
686 | } | ||
687 | |||
688 | /* | ||
718 | * Switch to high resolution mode | 689 | * Switch to high resolution mode |
719 | */ | 690 | */ |
720 | static int hrtimer_switch_to_hres(void) | 691 | static int hrtimer_switch_to_hres(void) |
721 | { | 692 | { |
722 | int cpu = smp_processor_id(); | 693 | int i, cpu = smp_processor_id(); |
723 | struct hrtimer_cpu_base *base = &per_cpu(hrtimer_bases, cpu); | 694 | struct hrtimer_cpu_base *base = &per_cpu(hrtimer_bases, cpu); |
724 | unsigned long flags; | 695 | unsigned long flags; |
725 | 696 | ||
@@ -735,9 +706,8 @@ static int hrtimer_switch_to_hres(void) | |||
735 | return 0; | 706 | return 0; |
736 | } | 707 | } |
737 | base->hres_active = 1; | 708 | base->hres_active = 1; |
738 | base->clock_base[HRTIMER_BASE_REALTIME].resolution = KTIME_HIGH_RES; | 709 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) |
739 | base->clock_base[HRTIMER_BASE_MONOTONIC].resolution = KTIME_HIGH_RES; | 710 | base->clock_base[i].resolution = KTIME_HIGH_RES; |
740 | base->clock_base[HRTIMER_BASE_BOOTTIME].resolution = KTIME_HIGH_RES; | ||
741 | 711 | ||
742 | tick_setup_sched_timer(); | 712 | tick_setup_sched_timer(); |
743 | 713 | ||
@@ -761,9 +731,43 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, | |||
761 | return 0; | 731 | return 0; |
762 | } | 732 | } |
763 | static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { } | 733 | static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { } |
734 | static inline void retrigger_next_event(void *arg) { } | ||
764 | 735 | ||
765 | #endif /* CONFIG_HIGH_RES_TIMERS */ | 736 | #endif /* CONFIG_HIGH_RES_TIMERS */ |
766 | 737 | ||
738 | /* | ||
739 | * Clock realtime was set | ||
740 | * | ||
741 | * Change the offset of the realtime clock vs. the monotonic | ||
742 | * clock. | ||
743 | * | ||
744 | * We might have to reprogram the high resolution timer interrupt. On | ||
745 | * SMP we call the architecture specific code to retrigger _all_ high | ||
746 | * resolution timer interrupts. On UP we just disable interrupts and | ||
747 | * call the high resolution interrupt code. | ||
748 | */ | ||
749 | void clock_was_set(void) | ||
750 | { | ||
751 | #ifdef CONFIG_HIGHRES_TIMERS | ||
752 | /* Retrigger the CPU local events everywhere */ | ||
753 | on_each_cpu(retrigger_next_event, NULL, 1); | ||
754 | #endif | ||
755 | timerfd_clock_was_set(); | ||
756 | } | ||
757 | |||
758 | /* | ||
759 | * During resume we might have to reprogram the high resolution timer | ||
760 | * interrupt (on the local CPU): | ||
761 | */ | ||
762 | void hrtimers_resume(void) | ||
763 | { | ||
764 | WARN_ONCE(!irqs_disabled(), | ||
765 | KERN_INFO "hrtimers_resume() called with IRQs enabled!"); | ||
766 | |||
767 | retrigger_next_event(NULL); | ||
768 | timerfd_clock_was_set(); | ||
769 | } | ||
770 | |||
767 | static inline void timer_stats_hrtimer_set_start_info(struct hrtimer *timer) | 771 | static inline void timer_stats_hrtimer_set_start_info(struct hrtimer *timer) |
768 | { | 772 | { |
769 | #ifdef CONFIG_TIMER_STATS | 773 | #ifdef CONFIG_TIMER_STATS |
@@ -856,6 +860,7 @@ static int enqueue_hrtimer(struct hrtimer *timer, | |||
856 | debug_activate(timer); | 860 | debug_activate(timer); |
857 | 861 | ||
858 | timerqueue_add(&base->active, &timer->node); | 862 | timerqueue_add(&base->active, &timer->node); |
863 | base->cpu_base->active_bases |= 1 << base->index; | ||
859 | 864 | ||
860 | /* | 865 | /* |
861 | * HRTIMER_STATE_ENQUEUED is or'ed to the current state to preserve the | 866 | * HRTIMER_STATE_ENQUEUED is or'ed to the current state to preserve the |
@@ -897,6 +902,8 @@ static void __remove_hrtimer(struct hrtimer *timer, | |||
897 | #endif | 902 | #endif |
898 | } | 903 | } |
899 | timerqueue_del(&base->active, &timer->node); | 904 | timerqueue_del(&base->active, &timer->node); |
905 | if (!timerqueue_getnext(&base->active)) | ||
906 | base->cpu_base->active_bases &= ~(1 << base->index); | ||
900 | out: | 907 | out: |
901 | timer->state = newstate; | 908 | timer->state = newstate; |
902 | } | 909 | } |
@@ -1234,7 +1241,6 @@ static void __run_hrtimer(struct hrtimer *timer, ktime_t *now) | |||
1234 | void hrtimer_interrupt(struct clock_event_device *dev) | 1241 | void hrtimer_interrupt(struct clock_event_device *dev) |
1235 | { | 1242 | { |
1236 | struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); | 1243 | struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); |
1237 | struct hrtimer_clock_base *base; | ||
1238 | ktime_t expires_next, now, entry_time, delta; | 1244 | ktime_t expires_next, now, entry_time, delta; |
1239 | int i, retries = 0; | 1245 | int i, retries = 0; |
1240 | 1246 | ||
@@ -1256,12 +1262,15 @@ retry: | |||
1256 | */ | 1262 | */ |
1257 | cpu_base->expires_next.tv64 = KTIME_MAX; | 1263 | cpu_base->expires_next.tv64 = KTIME_MAX; |
1258 | 1264 | ||
1259 | base = cpu_base->clock_base; | ||
1260 | |||
1261 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { | 1265 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { |
1262 | ktime_t basenow; | 1266 | struct hrtimer_clock_base *base; |
1263 | struct timerqueue_node *node; | 1267 | struct timerqueue_node *node; |
1268 | ktime_t basenow; | ||
1269 | |||
1270 | if (!(cpu_base->active_bases & (1 << i))) | ||
1271 | continue; | ||
1264 | 1272 | ||
1273 | base = cpu_base->clock_base + i; | ||
1265 | basenow = ktime_add(now, base->offset); | 1274 | basenow = ktime_add(now, base->offset); |
1266 | 1275 | ||
1267 | while ((node = timerqueue_getnext(&base->active))) { | 1276 | while ((node = timerqueue_getnext(&base->active))) { |
@@ -1294,7 +1303,6 @@ retry: | |||
1294 | 1303 | ||
1295 | __run_hrtimer(timer, &basenow); | 1304 | __run_hrtimer(timer, &basenow); |
1296 | } | 1305 | } |
1297 | base++; | ||
1298 | } | 1306 | } |
1299 | 1307 | ||
1300 | /* | 1308 | /* |
@@ -1525,7 +1533,7 @@ long __sched hrtimer_nanosleep_restart(struct restart_block *restart) | |||
1525 | struct timespec __user *rmtp; | 1533 | struct timespec __user *rmtp; |
1526 | int ret = 0; | 1534 | int ret = 0; |
1527 | 1535 | ||
1528 | hrtimer_init_on_stack(&t.timer, restart->nanosleep.index, | 1536 | hrtimer_init_on_stack(&t.timer, restart->nanosleep.clockid, |
1529 | HRTIMER_MODE_ABS); | 1537 | HRTIMER_MODE_ABS); |
1530 | hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires); | 1538 | hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires); |
1531 | 1539 | ||
@@ -1577,7 +1585,7 @@ long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp, | |||
1577 | 1585 | ||
1578 | restart = ¤t_thread_info()->restart_block; | 1586 | restart = ¤t_thread_info()->restart_block; |
1579 | restart->fn = hrtimer_nanosleep_restart; | 1587 | restart->fn = hrtimer_nanosleep_restart; |
1580 | restart->nanosleep.index = t.timer.base->index; | 1588 | restart->nanosleep.clockid = t.timer.base->clockid; |
1581 | restart->nanosleep.rmtp = rmtp; | 1589 | restart->nanosleep.rmtp = rmtp; |
1582 | restart->nanosleep.expires = hrtimer_get_expires_tv64(&t.timer); | 1590 | restart->nanosleep.expires = hrtimer_get_expires_tv64(&t.timer); |
1583 | 1591 | ||
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index 0791b13df7bf..58f405b581e7 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c | |||
@@ -1514,7 +1514,7 @@ static int posix_cpu_nsleep(const clockid_t which_clock, int flags, | |||
1514 | return -EFAULT; | 1514 | return -EFAULT; |
1515 | 1515 | ||
1516 | restart_block->fn = posix_cpu_nsleep_restart; | 1516 | restart_block->fn = posix_cpu_nsleep_restart; |
1517 | restart_block->nanosleep.index = which_clock; | 1517 | restart_block->nanosleep.clockid = which_clock; |
1518 | restart_block->nanosleep.rmtp = rmtp; | 1518 | restart_block->nanosleep.rmtp = rmtp; |
1519 | restart_block->nanosleep.expires = timespec_to_ns(rqtp); | 1519 | restart_block->nanosleep.expires = timespec_to_ns(rqtp); |
1520 | } | 1520 | } |
@@ -1523,7 +1523,7 @@ static int posix_cpu_nsleep(const clockid_t which_clock, int flags, | |||
1523 | 1523 | ||
1524 | static long posix_cpu_nsleep_restart(struct restart_block *restart_block) | 1524 | static long posix_cpu_nsleep_restart(struct restart_block *restart_block) |
1525 | { | 1525 | { |
1526 | clockid_t which_clock = restart_block->nanosleep.index; | 1526 | clockid_t which_clock = restart_block->nanosleep.clockid; |
1527 | struct timespec t; | 1527 | struct timespec t; |
1528 | struct itimerspec it; | 1528 | struct itimerspec it; |
1529 | int error; | 1529 | int error; |
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c index e5498d7405c3..a1b5edf1bf92 100644 --- a/kernel/posix-timers.c +++ b/kernel/posix-timers.c | |||
@@ -1056,7 +1056,7 @@ SYSCALL_DEFINE4(clock_nanosleep, const clockid_t, which_clock, int, flags, | |||
1056 | */ | 1056 | */ |
1057 | long clock_nanosleep_restart(struct restart_block *restart_block) | 1057 | long clock_nanosleep_restart(struct restart_block *restart_block) |
1058 | { | 1058 | { |
1059 | clockid_t which_clock = restart_block->nanosleep.index; | 1059 | clockid_t which_clock = restart_block->nanosleep.clockid; |
1060 | struct k_clock *kc = clockid_to_kclock(which_clock); | 1060 | struct k_clock *kc = clockid_to_kclock(which_clock); |
1061 | 1061 | ||
1062 | if (WARN_ON_ONCE(!kc || !kc->nsleep_restart)) | 1062 | if (WARN_ON_ONCE(!kc || !kc->nsleep_restart)) |
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c index 9265014cb4db..2d966244ea60 100644 --- a/kernel/time/alarmtimer.c +++ b/kernel/time/alarmtimer.c | |||
@@ -494,7 +494,7 @@ static int update_rmtp(ktime_t exp, enum alarmtimer_type type, | |||
494 | */ | 494 | */ |
495 | static long __sched alarm_timer_nsleep_restart(struct restart_block *restart) | 495 | static long __sched alarm_timer_nsleep_restart(struct restart_block *restart) |
496 | { | 496 | { |
497 | enum alarmtimer_type type = restart->nanosleep.index; | 497 | enum alarmtimer_type type = restart->nanosleep.clockid; |
498 | ktime_t exp; | 498 | ktime_t exp; |
499 | struct timespec __user *rmtp; | 499 | struct timespec __user *rmtp; |
500 | struct alarm alarm; | 500 | struct alarm alarm; |
@@ -573,7 +573,7 @@ static int alarm_timer_nsleep(const clockid_t which_clock, int flags, | |||
573 | 573 | ||
574 | restart = ¤t_thread_info()->restart_block; | 574 | restart = ¤t_thread_info()->restart_block; |
575 | restart->fn = alarm_timer_nsleep_restart; | 575 | restart->fn = alarm_timer_nsleep_restart; |
576 | restart->nanosleep.index = type; | 576 | restart->nanosleep.clockid = type; |
577 | restart->nanosleep.expires = exp.tv64; | 577 | restart->nanosleep.expires = exp.tv64; |
578 | restart->nanosleep.rmtp = rmtp; | 578 | restart->nanosleep.rmtp = rmtp; |
579 | ret = -ERESTART_RESTARTBLOCK; | 579 | ret = -ERESTART_RESTARTBLOCK; |
@@ -669,12 +669,20 @@ static int __init has_wakealarm(struct device *dev, void *name_ptr) | |||
669 | */ | 669 | */ |
670 | static int __init alarmtimer_init_late(void) | 670 | static int __init alarmtimer_init_late(void) |
671 | { | 671 | { |
672 | struct device *dev; | ||
672 | char *str; | 673 | char *str; |
673 | 674 | ||
674 | /* Find an rtc device and init the rtc_timer */ | 675 | /* Find an rtc device and init the rtc_timer */ |
675 | class_find_device(rtc_class, NULL, &str, has_wakealarm); | 676 | dev = class_find_device(rtc_class, NULL, &str, has_wakealarm); |
676 | if (str) | 677 | /* If we have a device then str is valid. See has_wakealarm() */ |
678 | if (dev) { | ||
677 | rtcdev = rtc_class_open(str); | 679 | rtcdev = rtc_class_open(str); |
680 | /* | ||
681 | * Drop the reference we got in class_find_device, | ||
682 | * rtc_open takes its own. | ||
683 | */ | ||
684 | put_device(dev); | ||
685 | } | ||
678 | if (!rtcdev) { | 686 | if (!rtcdev) { |
679 | printk(KERN_WARNING "No RTC device found, ALARM timers will" | 687 | printk(KERN_WARNING "No RTC device found, ALARM timers will" |
680 | " not wake from suspend"); | 688 | " not wake from suspend"); |
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index 723c7637e55a..c7218d132738 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c | |||
@@ -456,23 +456,27 @@ void tick_broadcast_oneshot_control(unsigned long reason) | |||
456 | unsigned long flags; | 456 | unsigned long flags; |
457 | int cpu; | 457 | int cpu; |
458 | 458 | ||
459 | raw_spin_lock_irqsave(&tick_broadcast_lock, flags); | ||
460 | |||
461 | /* | 459 | /* |
462 | * Periodic mode does not care about the enter/exit of power | 460 | * Periodic mode does not care about the enter/exit of power |
463 | * states | 461 | * states |
464 | */ | 462 | */ |
465 | if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) | 463 | if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) |
466 | goto out; | 464 | return; |
467 | 465 | ||
468 | bc = tick_broadcast_device.evtdev; | 466 | /* |
467 | * We are called with preemtion disabled from the depth of the | ||
468 | * idle code, so we can't be moved away. | ||
469 | */ | ||
469 | cpu = smp_processor_id(); | 470 | cpu = smp_processor_id(); |
470 | td = &per_cpu(tick_cpu_device, cpu); | 471 | td = &per_cpu(tick_cpu_device, cpu); |
471 | dev = td->evtdev; | 472 | dev = td->evtdev; |
472 | 473 | ||
473 | if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) | 474 | if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) |
474 | goto out; | 475 | return; |
476 | |||
477 | bc = tick_broadcast_device.evtdev; | ||
475 | 478 | ||
479 | raw_spin_lock_irqsave(&tick_broadcast_lock, flags); | ||
476 | if (reason == CLOCK_EVT_NOTIFY_BROADCAST_ENTER) { | 480 | if (reason == CLOCK_EVT_NOTIFY_BROADCAST_ENTER) { |
477 | if (!cpumask_test_cpu(cpu, tick_get_broadcast_oneshot_mask())) { | 481 | if (!cpumask_test_cpu(cpu, tick_get_broadcast_oneshot_mask())) { |
478 | cpumask_set_cpu(cpu, tick_get_broadcast_oneshot_mask()); | 482 | cpumask_set_cpu(cpu, tick_get_broadcast_oneshot_mask()); |
@@ -489,8 +493,6 @@ void tick_broadcast_oneshot_control(unsigned long reason) | |||
489 | tick_program_event(dev->next_event, 1); | 493 | tick_program_event(dev->next_event, 1); |
490 | } | 494 | } |
491 | } | 495 | } |
492 | |||
493 | out: | ||
494 | raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); | 496 | raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
495 | } | 497 | } |
496 | 498 | ||
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 8e6a05a5915a..342408cf68dd 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c | |||
@@ -680,7 +680,7 @@ static void timekeeping_resume(void) | |||
680 | clockevents_notify(CLOCK_EVT_NOTIFY_RESUME, NULL); | 680 | clockevents_notify(CLOCK_EVT_NOTIFY_RESUME, NULL); |
681 | 681 | ||
682 | /* Resume hrtimers */ | 682 | /* Resume hrtimers */ |
683 | hres_timers_resume(); | 683 | hrtimers_resume(); |
684 | } | 684 | } |
685 | 685 | ||
686 | static int timekeeping_suspend(void) | 686 | static int timekeeping_suspend(void) |
@@ -1099,6 +1099,21 @@ void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim, | |||
1099 | } | 1099 | } |
1100 | 1100 | ||
1101 | /** | 1101 | /** |
1102 | * ktime_get_monotonic_offset() - get wall_to_monotonic in ktime_t format | ||
1103 | */ | ||
1104 | ktime_t ktime_get_monotonic_offset(void) | ||
1105 | { | ||
1106 | unsigned long seq; | ||
1107 | struct timespec wtom; | ||
1108 | |||
1109 | do { | ||
1110 | seq = read_seqbegin(&xtime_lock); | ||
1111 | wtom = wall_to_monotonic; | ||
1112 | } while (read_seqretry(&xtime_lock, seq)); | ||
1113 | return timespec_to_ktime(wtom); | ||
1114 | } | ||
1115 | |||
1116 | /** | ||
1102 | * xtime_update() - advances the timekeeping infrastructure | 1117 | * xtime_update() - advances the timekeeping infrastructure |
1103 | * @ticks: number of ticks, that have elapsed since the last call. | 1118 | * @ticks: number of ticks, that have elapsed since the last call. |
1104 | * | 1119 | * |