diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-03-29 17:16:48 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-03-29 17:16:48 -0400 |
commit | bcd550745fc54f789c14e7526e0633222c505faa (patch) | |
tree | c3fe11a6503b7ffdd4406a9fece5c40b3e2a3f6d /arch/x86/kernel | |
parent | 93f378883cecb9dcb2cf5b51d9d24175906659da (diff) | |
parent | 646783a389828e76e813f50791f7999429c821bc (diff) |
Merge branch 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull timer core updates from Thomas Gleixner.
* 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
ia64: vsyscall: Add missing paranthesis
alarmtimer: Don't call rtc_timer_init() when CONFIG_RTC_CLASS=n
x86: vdso: Put declaration before code
x86-64: Inline vdso clock_gettime helpers
x86-64: Simplify and optimize vdso clock_gettime monotonic variants
kernel-time: fix s/then/than/ spelling errors
time: remove no_sync_cmos_clock
time: Avoid scary backtraces when warning of > 11% adj
alarmtimer: Make sure we initialize the rtctimer
ntp: Fix leap-second hrtimer livelock
x86, tsc: Skip refined tsc calibration on systems with reliable TSC
rtc: Provide flag for rtc devices that don't support UIE
ia64: vsyscall: Use seqcount instead of seqlock
x86: vdso: Use seqcount instead of seqlock
x86: vdso: Remove bogus locking in update_vsyscall_tz()
time: Remove bogus comments
time: Fix change_clocksource locking
time: x86: Fix race switching from vsyscall to non-vsyscall clock
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r-- | arch/x86/kernel/tsc.c | 10 | ||||
-rw-r--r-- | arch/x86/kernel/vsyscall_64.c | 25 |
2 files changed, 22 insertions, 13 deletions
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 899a03f2d18..fc0a147e372 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c | |||
@@ -933,6 +933,16 @@ static int __init init_tsc_clocksource(void) | |||
933 | clocksource_tsc.rating = 0; | 933 | clocksource_tsc.rating = 0; |
934 | clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS; | 934 | clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS; |
935 | } | 935 | } |
936 | |||
937 | /* | ||
938 | * Trust the results of the earlier calibration on systems | ||
939 | * exporting a reliable TSC. | ||
940 | */ | ||
941 | if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE)) { | ||
942 | clocksource_register_khz(&clocksource_tsc, tsc_khz); | ||
943 | return 0; | ||
944 | } | ||
945 | |||
936 | schedule_delayed_work(&tsc_irqwork, 0); | 946 | schedule_delayed_work(&tsc_irqwork, 0); |
937 | return 0; | 947 | return 0; |
938 | } | 948 | } |
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c index b07ba939356..d5c69860b52 100644 --- a/arch/x86/kernel/vsyscall_64.c +++ b/arch/x86/kernel/vsyscall_64.c | |||
@@ -52,10 +52,7 @@ | |||
52 | #include "vsyscall_trace.h" | 52 | #include "vsyscall_trace.h" |
53 | 53 | ||
54 | DEFINE_VVAR(int, vgetcpu_mode); | 54 | DEFINE_VVAR(int, vgetcpu_mode); |
55 | DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) = | 55 | DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data); |
56 | { | ||
57 | .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock), | ||
58 | }; | ||
59 | 56 | ||
60 | static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE; | 57 | static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE; |
61 | 58 | ||
@@ -80,20 +77,15 @@ early_param("vsyscall", vsyscall_setup); | |||
80 | 77 | ||
81 | void update_vsyscall_tz(void) | 78 | void update_vsyscall_tz(void) |
82 | { | 79 | { |
83 | unsigned long flags; | ||
84 | |||
85 | write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags); | ||
86 | /* sys_tz has changed */ | ||
87 | vsyscall_gtod_data.sys_tz = sys_tz; | 80 | vsyscall_gtod_data.sys_tz = sys_tz; |
88 | write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags); | ||
89 | } | 81 | } |
90 | 82 | ||
91 | void update_vsyscall(struct timespec *wall_time, struct timespec *wtm, | 83 | void update_vsyscall(struct timespec *wall_time, struct timespec *wtm, |
92 | struct clocksource *clock, u32 mult) | 84 | struct clocksource *clock, u32 mult) |
93 | { | 85 | { |
94 | unsigned long flags; | 86 | struct timespec monotonic; |
95 | 87 | ||
96 | write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags); | 88 | write_seqcount_begin(&vsyscall_gtod_data.seq); |
97 | 89 | ||
98 | /* copy vsyscall data */ | 90 | /* copy vsyscall data */ |
99 | vsyscall_gtod_data.clock.vclock_mode = clock->archdata.vclock_mode; | 91 | vsyscall_gtod_data.clock.vclock_mode = clock->archdata.vclock_mode; |
@@ -101,12 +93,19 @@ void update_vsyscall(struct timespec *wall_time, struct timespec *wtm, | |||
101 | vsyscall_gtod_data.clock.mask = clock->mask; | 93 | vsyscall_gtod_data.clock.mask = clock->mask; |
102 | vsyscall_gtod_data.clock.mult = mult; | 94 | vsyscall_gtod_data.clock.mult = mult; |
103 | vsyscall_gtod_data.clock.shift = clock->shift; | 95 | vsyscall_gtod_data.clock.shift = clock->shift; |
96 | |||
104 | vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec; | 97 | vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec; |
105 | vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec; | 98 | vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec; |
106 | vsyscall_gtod_data.wall_to_monotonic = *wtm; | 99 | |
100 | monotonic = timespec_add(*wall_time, *wtm); | ||
101 | vsyscall_gtod_data.monotonic_time_sec = monotonic.tv_sec; | ||
102 | vsyscall_gtod_data.monotonic_time_nsec = monotonic.tv_nsec; | ||
103 | |||
107 | vsyscall_gtod_data.wall_time_coarse = __current_kernel_time(); | 104 | vsyscall_gtod_data.wall_time_coarse = __current_kernel_time(); |
105 | vsyscall_gtod_data.monotonic_time_coarse = | ||
106 | timespec_add(vsyscall_gtod_data.wall_time_coarse, *wtm); | ||
108 | 107 | ||
109 | write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags); | 108 | write_seqcount_end(&vsyscall_gtod_data.seq); |
110 | } | 109 | } |
111 | 110 | ||
112 | static void warn_bad_vsyscall(const char *level, struct pt_regs *regs, | 111 | static void warn_bad_vsyscall(const char *level, struct pt_regs *regs, |