diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-05-19 20:11:10 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-05-19 20:11:10 -0400 |
commit | 164d44fd92e79d5bce54d0d62df9f856f7b23925 (patch) | |
tree | 9f21607849b7e684b255578ffdf41951bc31787e /kernel/time | |
parent | 5bfec46baa3a752393433b8d89d3b2c70820f61d (diff) | |
parent | d7e81c269db899b800e0963dc4aceece1f82a680 (diff) |
Merge branch 'timers-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'timers-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
clocksource: Add clocksource_register_hz/khz interface
posix-cpu-timers: Optimize run_posix_cpu_timers()
time: Remove xtime_cache
mqueue: Convert message queue timeout to use hrtimers
hrtimers: Provide schedule_hrtimeout for CLOCK_REALTIME
timers: Introduce the concept of timer slack for legacy timers
ntp: Remove tickadj
ntp: Make time_adjust static
time: Add xtime, wall_to_monotonic to feature-removal-schedule
timer: Try to survive timer callback preempt_count leak
timer: Split out timer function call
timer: Print function name for timer callbacks modifying preemption count
time: Clean up warp_clock()
cpu-timers: Avoid iterating over all threads in fastpath_timer_check()
cpu-timers: Change SIGEV_NONE timer implementation
cpu-timers: Return correct previous timer reload value
cpu-timers: Cleanup arm_timer()
cpu-timers: Simplify RLIMIT_CPU handling
Diffstat (limited to 'kernel/time')
-rw-r--r-- | kernel/time/clocksource.c | 48 | ||||
-rw-r--r-- | kernel/time/ntp.c | 2 | ||||
-rw-r--r-- | kernel/time/timekeeping.c | 35 |
3 files changed, 65 insertions, 20 deletions
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index 1f5dde637457..f08e99c1d561 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c | |||
@@ -625,6 +625,54 @@ static void clocksource_enqueue(struct clocksource *cs) | |||
625 | list_add(&cs->list, entry); | 625 | list_add(&cs->list, entry); |
626 | } | 626 | } |
627 | 627 | ||
628 | |||
629 | /* | ||
630 | * Maximum time we expect to go between ticks. This includes idle | ||
631 | * tickless time. It provides the trade off between selecting a | ||
632 | * mult/shift pair that is very precise but can only handle a short | ||
633 | * period of time, vs. a mult/shift pair that can handle long periods | ||
634 | * of time but isn't as precise. | ||
635 | * | ||
636 | * This is a subsystem constant, and actual hardware limitations | ||
637 | * may override it (ie: clocksources that wrap every 3 seconds). | ||
638 | */ | ||
639 | #define MAX_UPDATE_LENGTH 5 /* Seconds */ | ||
640 | |||
641 | /** | ||
642 | * __clocksource_register_scale - Used to install new clocksources | ||
643 | * @t: clocksource to be registered | ||
644 | * @scale: Scale factor multiplied against freq to get clocksource hz | ||
645 | * @freq: clocksource frequency (cycles per second) divided by scale | ||
646 | * | ||
647 | * Returns -EBUSY if registration fails, zero otherwise. | ||
648 | * | ||
649 | * This *SHOULD NOT* be called directly! Please use the | ||
650 | * clocksource_register_hz() or clocksource_register_khz helper functions. | ||
651 | */ | ||
652 | int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq) | ||
653 | { | ||
654 | |||
655 | /* | ||
656 | * Ideally we want to use some of the limits used in | ||
657 | * clocksource_max_deferment, to provide a more informed | ||
658 | * MAX_UPDATE_LENGTH. But for now this just gets the | ||
659 | * register interface working properly. | ||
660 | */ | ||
661 | clocks_calc_mult_shift(&cs->mult, &cs->shift, freq, | ||
662 | NSEC_PER_SEC/scale, | ||
663 | MAX_UPDATE_LENGTH*scale); | ||
664 | cs->max_idle_ns = clocksource_max_deferment(cs); | ||
665 | |||
666 | mutex_lock(&clocksource_mutex); | ||
667 | clocksource_enqueue(cs); | ||
668 | clocksource_select(); | ||
669 | clocksource_enqueue_watchdog(cs); | ||
670 | mutex_unlock(&clocksource_mutex); | ||
671 | return 0; | ||
672 | } | ||
673 | EXPORT_SYMBOL_GPL(__clocksource_register_scale); | ||
674 | |||
675 | |||
628 | /** | 676 | /** |
629 | * clocksource_register - Used to install new clocksources | 677 | * clocksource_register - Used to install new clocksources |
630 | * @t: clocksource to be registered | 678 | * @t: clocksource to be registered |
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c index 7c0f180d6e9d..c63116863a80 100644 --- a/kernel/time/ntp.c +++ b/kernel/time/ntp.c | |||
@@ -69,7 +69,7 @@ static s64 time_freq; | |||
69 | /* time at last adjustment (secs): */ | 69 | /* time at last adjustment (secs): */ |
70 | static long time_reftime; | 70 | static long time_reftime; |
71 | 71 | ||
72 | long time_adjust; | 72 | static long time_adjust; |
73 | 73 | ||
74 | /* constant (boot-param configurable) NTP tick adjustment (upscaled) */ | 74 | /* constant (boot-param configurable) NTP tick adjustment (upscaled) */ |
75 | static s64 ntp_tick_adj; | 75 | static s64 ntp_tick_adj; |
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 39f6177fafac..caf8d4d4f5c8 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c | |||
@@ -165,13 +165,6 @@ struct timespec raw_time; | |||
165 | /* flag for if timekeeping is suspended */ | 165 | /* flag for if timekeeping is suspended */ |
166 | int __read_mostly timekeeping_suspended; | 166 | int __read_mostly timekeeping_suspended; |
167 | 167 | ||
168 | static struct timespec xtime_cache __attribute__ ((aligned (16))); | ||
169 | void update_xtime_cache(u64 nsec) | ||
170 | { | ||
171 | xtime_cache = xtime; | ||
172 | timespec_add_ns(&xtime_cache, nsec); | ||
173 | } | ||
174 | |||
175 | /* must hold xtime_lock */ | 168 | /* must hold xtime_lock */ |
176 | void timekeeping_leap_insert(int leapsecond) | 169 | void timekeeping_leap_insert(int leapsecond) |
177 | { | 170 | { |
@@ -332,8 +325,6 @@ int do_settimeofday(struct timespec *tv) | |||
332 | 325 | ||
333 | xtime = *tv; | 326 | xtime = *tv; |
334 | 327 | ||
335 | update_xtime_cache(0); | ||
336 | |||
337 | timekeeper.ntp_error = 0; | 328 | timekeeper.ntp_error = 0; |
338 | ntp_clear(); | 329 | ntp_clear(); |
339 | 330 | ||
@@ -559,7 +550,6 @@ void __init timekeeping_init(void) | |||
559 | } | 550 | } |
560 | set_normalized_timespec(&wall_to_monotonic, | 551 | set_normalized_timespec(&wall_to_monotonic, |
561 | -boot.tv_sec, -boot.tv_nsec); | 552 | -boot.tv_sec, -boot.tv_nsec); |
562 | update_xtime_cache(0); | ||
563 | total_sleep_time.tv_sec = 0; | 553 | total_sleep_time.tv_sec = 0; |
564 | total_sleep_time.tv_nsec = 0; | 554 | total_sleep_time.tv_nsec = 0; |
565 | write_sequnlock_irqrestore(&xtime_lock, flags); | 555 | write_sequnlock_irqrestore(&xtime_lock, flags); |
@@ -593,7 +583,6 @@ static int timekeeping_resume(struct sys_device *dev) | |||
593 | wall_to_monotonic = timespec_sub(wall_to_monotonic, ts); | 583 | wall_to_monotonic = timespec_sub(wall_to_monotonic, ts); |
594 | total_sleep_time = timespec_add_safe(total_sleep_time, ts); | 584 | total_sleep_time = timespec_add_safe(total_sleep_time, ts); |
595 | } | 585 | } |
596 | update_xtime_cache(0); | ||
597 | /* re-base the last cycle value */ | 586 | /* re-base the last cycle value */ |
598 | timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock); | 587 | timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock); |
599 | timekeeper.ntp_error = 0; | 588 | timekeeper.ntp_error = 0; |
@@ -788,7 +777,6 @@ void update_wall_time(void) | |||
788 | { | 777 | { |
789 | struct clocksource *clock; | 778 | struct clocksource *clock; |
790 | cycle_t offset; | 779 | cycle_t offset; |
791 | u64 nsecs; | ||
792 | int shift = 0, maxshift; | 780 | int shift = 0, maxshift; |
793 | 781 | ||
794 | /* Make sure we're fully resumed: */ | 782 | /* Make sure we're fully resumed: */ |
@@ -847,7 +835,9 @@ void update_wall_time(void) | |||
847 | timekeeper.ntp_error += neg << timekeeper.ntp_error_shift; | 835 | timekeeper.ntp_error += neg << timekeeper.ntp_error_shift; |
848 | } | 836 | } |
849 | 837 | ||
850 | /* store full nanoseconds into xtime after rounding it up and | 838 | |
839 | /* | ||
840 | * Store full nanoseconds into xtime after rounding it up and | ||
851 | * add the remainder to the error difference. | 841 | * add the remainder to the error difference. |
852 | */ | 842 | */ |
853 | xtime.tv_nsec = ((s64) timekeeper.xtime_nsec >> timekeeper.shift) + 1; | 843 | xtime.tv_nsec = ((s64) timekeeper.xtime_nsec >> timekeeper.shift) + 1; |
@@ -855,8 +845,15 @@ void update_wall_time(void) | |||
855 | timekeeper.ntp_error += timekeeper.xtime_nsec << | 845 | timekeeper.ntp_error += timekeeper.xtime_nsec << |
856 | timekeeper.ntp_error_shift; | 846 | timekeeper.ntp_error_shift; |
857 | 847 | ||
858 | nsecs = clocksource_cyc2ns(offset, timekeeper.mult, timekeeper.shift); | 848 | /* |
859 | update_xtime_cache(nsecs); | 849 | * Finally, make sure that after the rounding |
850 | * xtime.tv_nsec isn't larger then NSEC_PER_SEC | ||
851 | */ | ||
852 | if (unlikely(xtime.tv_nsec >= NSEC_PER_SEC)) { | ||
853 | xtime.tv_nsec -= NSEC_PER_SEC; | ||
854 | xtime.tv_sec++; | ||
855 | second_overflow(); | ||
856 | } | ||
860 | 857 | ||
861 | /* check to see if there is a new clocksource to use */ | 858 | /* check to see if there is a new clocksource to use */ |
862 | update_vsyscall(&xtime, timekeeper.clock, timekeeper.mult); | 859 | update_vsyscall(&xtime, timekeeper.clock, timekeeper.mult); |
@@ -896,13 +893,13 @@ EXPORT_SYMBOL_GPL(monotonic_to_bootbased); | |||
896 | 893 | ||
897 | unsigned long get_seconds(void) | 894 | unsigned long get_seconds(void) |
898 | { | 895 | { |
899 | return xtime_cache.tv_sec; | 896 | return xtime.tv_sec; |
900 | } | 897 | } |
901 | EXPORT_SYMBOL(get_seconds); | 898 | EXPORT_SYMBOL(get_seconds); |
902 | 899 | ||
903 | struct timespec __current_kernel_time(void) | 900 | struct timespec __current_kernel_time(void) |
904 | { | 901 | { |
905 | return xtime_cache; | 902 | return xtime; |
906 | } | 903 | } |
907 | 904 | ||
908 | struct timespec current_kernel_time(void) | 905 | struct timespec current_kernel_time(void) |
@@ -913,7 +910,7 @@ struct timespec current_kernel_time(void) | |||
913 | do { | 910 | do { |
914 | seq = read_seqbegin(&xtime_lock); | 911 | seq = read_seqbegin(&xtime_lock); |
915 | 912 | ||
916 | now = xtime_cache; | 913 | now = xtime; |
917 | } while (read_seqretry(&xtime_lock, seq)); | 914 | } while (read_seqretry(&xtime_lock, seq)); |
918 | 915 | ||
919 | return now; | 916 | return now; |
@@ -928,7 +925,7 @@ struct timespec get_monotonic_coarse(void) | |||
928 | do { | 925 | do { |
929 | seq = read_seqbegin(&xtime_lock); | 926 | seq = read_seqbegin(&xtime_lock); |
930 | 927 | ||
931 | now = xtime_cache; | 928 | now = xtime; |
932 | mono = wall_to_monotonic; | 929 | mono = wall_to_monotonic; |
933 | } while (read_seqretry(&xtime_lock, seq)); | 930 | } while (read_seqretry(&xtime_lock, seq)); |
934 | 931 | ||