diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-03-29 17:16:48 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-03-29 17:16:48 -0400 |
commit | bcd550745fc54f789c14e7526e0633222c505faa (patch) | |
tree | c3fe11a6503b7ffdd4406a9fece5c40b3e2a3f6d /arch | |
parent | 93f378883cecb9dcb2cf5b51d9d24175906659da (diff) | |
parent | 646783a389828e76e813f50791f7999429c821bc (diff) |
Merge branch 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull timer core updates from Thomas Gleixner.
* 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
ia64: vsyscall: Add missing paranthesis
alarmtimer: Don't call rtc_timer_init() when CONFIG_RTC_CLASS=n
x86: vdso: Put declaration before code
x86-64: Inline vdso clock_gettime helpers
x86-64: Simplify and optimize vdso clock_gettime monotonic variants
kernel-time: fix s/then/than/ spelling errors
time: remove no_sync_cmos_clock
time: Avoid scary backtraces when warning of > 11% adj
alarmtimer: Make sure we initialize the rtctimer
ntp: Fix leap-second hrtimer livelock
x86, tsc: Skip refined tsc calibration on systems with reliable TSC
rtc: Provide flag for rtc devices that don't support UIE
ia64: vsyscall: Use seqcount instead of seqlock
x86: vdso: Use seqcount instead of seqlock
x86: vdso: Remove bogus locking in update_vsyscall_tz()
time: Remove bogus comments
time: Fix change_clocksource locking
time: x86: Fix race switching from vsyscall to non-vsyscall clock
Diffstat (limited to 'arch')
-rw-r--r-- | arch/ia64/kernel/asm-offsets.c | 4 | ||||
-rw-r--r-- | arch/ia64/kernel/fsys.S | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/fsyscall_gtod_data.h | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/time.c | 10 | ||||
-rw-r--r-- | arch/x86/include/asm/vgtod.h | 17 | ||||
-rw-r--r-- | arch/x86/kernel/tsc.c | 10 | ||||
-rw-r--r-- | arch/x86/kernel/vsyscall_64.c | 25 | ||||
-rw-r--r-- | arch/x86/vdso/vclock_gettime.c | 135 |
8 files changed, 106 insertions, 99 deletions
diff --git a/arch/ia64/kernel/asm-offsets.c b/arch/ia64/kernel/asm-offsets.c index af5650169043..a48bd9a9927b 100644 --- a/arch/ia64/kernel/asm-offsets.c +++ b/arch/ia64/kernel/asm-offsets.c | |||
@@ -269,8 +269,8 @@ void foo(void) | |||
269 | BLANK(); | 269 | BLANK(); |
270 | 270 | ||
271 | /* used by fsys_gettimeofday in arch/ia64/kernel/fsys.S */ | 271 | /* used by fsys_gettimeofday in arch/ia64/kernel/fsys.S */ |
272 | DEFINE(IA64_GTOD_LOCK_OFFSET, | 272 | DEFINE(IA64_GTOD_SEQ_OFFSET, |
273 | offsetof (struct fsyscall_gtod_data_t, lock)); | 273 | offsetof (struct fsyscall_gtod_data_t, seq)); |
274 | DEFINE(IA64_GTOD_WALL_TIME_OFFSET, | 274 | DEFINE(IA64_GTOD_WALL_TIME_OFFSET, |
275 | offsetof (struct fsyscall_gtod_data_t, wall_time)); | 275 | offsetof (struct fsyscall_gtod_data_t, wall_time)); |
276 | DEFINE(IA64_GTOD_MONO_TIME_OFFSET, | 276 | DEFINE(IA64_GTOD_MONO_TIME_OFFSET, |
diff --git a/arch/ia64/kernel/fsys.S b/arch/ia64/kernel/fsys.S index f15d8601827f..cc26edac0ec6 100644 --- a/arch/ia64/kernel/fsys.S +++ b/arch/ia64/kernel/fsys.S | |||
@@ -173,7 +173,7 @@ ENTRY(fsys_set_tid_address) | |||
173 | FSYS_RETURN | 173 | FSYS_RETURN |
174 | END(fsys_set_tid_address) | 174 | END(fsys_set_tid_address) |
175 | 175 | ||
176 | #if IA64_GTOD_LOCK_OFFSET !=0 | 176 | #if IA64_GTOD_SEQ_OFFSET !=0 |
177 | #error fsys_gettimeofday incompatible with changes to struct fsyscall_gtod_data_t | 177 | #error fsys_gettimeofday incompatible with changes to struct fsyscall_gtod_data_t |
178 | #endif | 178 | #endif |
179 | #if IA64_ITC_JITTER_OFFSET !=0 | 179 | #if IA64_ITC_JITTER_OFFSET !=0 |
diff --git a/arch/ia64/kernel/fsyscall_gtod_data.h b/arch/ia64/kernel/fsyscall_gtod_data.h index 57d2ee6c83e1..146b15b5fec3 100644 --- a/arch/ia64/kernel/fsyscall_gtod_data.h +++ b/arch/ia64/kernel/fsyscall_gtod_data.h | |||
@@ -6,7 +6,7 @@ | |||
6 | */ | 6 | */ |
7 | 7 | ||
8 | struct fsyscall_gtod_data_t { | 8 | struct fsyscall_gtod_data_t { |
9 | seqlock_t lock; | 9 | seqcount_t seq; |
10 | struct timespec wall_time; | 10 | struct timespec wall_time; |
11 | struct timespec monotonic_time; | 11 | struct timespec monotonic_time; |
12 | cycle_t clk_mask; | 12 | cycle_t clk_mask; |
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c index aa94bdda9de8..ecc904b33c5f 100644 --- a/arch/ia64/kernel/time.c +++ b/arch/ia64/kernel/time.c | |||
@@ -34,9 +34,7 @@ | |||
34 | 34 | ||
35 | static cycle_t itc_get_cycles(struct clocksource *cs); | 35 | static cycle_t itc_get_cycles(struct clocksource *cs); |
36 | 36 | ||
37 | struct fsyscall_gtod_data_t fsyscall_gtod_data = { | 37 | struct fsyscall_gtod_data_t fsyscall_gtod_data; |
38 | .lock = __SEQLOCK_UNLOCKED(fsyscall_gtod_data.lock), | ||
39 | }; | ||
40 | 38 | ||
41 | struct itc_jitter_data_t itc_jitter_data; | 39 | struct itc_jitter_data_t itc_jitter_data; |
42 | 40 | ||
@@ -459,9 +457,7 @@ void update_vsyscall_tz(void) | |||
459 | void update_vsyscall(struct timespec *wall, struct timespec *wtm, | 457 | void update_vsyscall(struct timespec *wall, struct timespec *wtm, |
460 | struct clocksource *c, u32 mult) | 458 | struct clocksource *c, u32 mult) |
461 | { | 459 | { |
462 | unsigned long flags; | 460 | write_seqcount_begin(&fsyscall_gtod_data.seq); |
463 | |||
464 | write_seqlock_irqsave(&fsyscall_gtod_data.lock, flags); | ||
465 | 461 | ||
466 | /* copy fsyscall clock data */ | 462 | /* copy fsyscall clock data */ |
467 | fsyscall_gtod_data.clk_mask = c->mask; | 463 | fsyscall_gtod_data.clk_mask = c->mask; |
@@ -484,6 +480,6 @@ void update_vsyscall(struct timespec *wall, struct timespec *wtm, | |||
484 | fsyscall_gtod_data.monotonic_time.tv_sec++; | 480 | fsyscall_gtod_data.monotonic_time.tv_sec++; |
485 | } | 481 | } |
486 | 482 | ||
487 | write_sequnlock_irqrestore(&fsyscall_gtod_data.lock, flags); | 483 | write_seqcount_end(&fsyscall_gtod_data.seq); |
488 | } | 484 | } |
489 | 485 | ||
diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h index 815285bcaceb..8b38be2de9e1 100644 --- a/arch/x86/include/asm/vgtod.h +++ b/arch/x86/include/asm/vgtod.h | |||
@@ -5,13 +5,8 @@ | |||
5 | #include <linux/clocksource.h> | 5 | #include <linux/clocksource.h> |
6 | 6 | ||
7 | struct vsyscall_gtod_data { | 7 | struct vsyscall_gtod_data { |
8 | seqlock_t lock; | 8 | seqcount_t seq; |
9 | 9 | ||
10 | /* open coded 'struct timespec' */ | ||
11 | time_t wall_time_sec; | ||
12 | u32 wall_time_nsec; | ||
13 | |||
14 | struct timezone sys_tz; | ||
15 | struct { /* extract of a clocksource struct */ | 10 | struct { /* extract of a clocksource struct */ |
16 | int vclock_mode; | 11 | int vclock_mode; |
17 | cycle_t cycle_last; | 12 | cycle_t cycle_last; |
@@ -19,8 +14,16 @@ struct vsyscall_gtod_data { | |||
19 | u32 mult; | 14 | u32 mult; |
20 | u32 shift; | 15 | u32 shift; |
21 | } clock; | 16 | } clock; |
22 | struct timespec wall_to_monotonic; | 17 | |
18 | /* open coded 'struct timespec' */ | ||
19 | time_t wall_time_sec; | ||
20 | u32 wall_time_nsec; | ||
21 | u32 monotonic_time_nsec; | ||
22 | time_t monotonic_time_sec; | ||
23 | |||
24 | struct timezone sys_tz; | ||
23 | struct timespec wall_time_coarse; | 25 | struct timespec wall_time_coarse; |
26 | struct timespec monotonic_time_coarse; | ||
24 | }; | 27 | }; |
25 | extern struct vsyscall_gtod_data vsyscall_gtod_data; | 28 | extern struct vsyscall_gtod_data vsyscall_gtod_data; |
26 | 29 | ||
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 899a03f2d181..fc0a147e3727 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c | |||
@@ -933,6 +933,16 @@ static int __init init_tsc_clocksource(void) | |||
933 | clocksource_tsc.rating = 0; | 933 | clocksource_tsc.rating = 0; |
934 | clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS; | 934 | clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS; |
935 | } | 935 | } |
936 | |||
937 | /* | ||
938 | * Trust the results of the earlier calibration on systems | ||
939 | * exporting a reliable TSC. | ||
940 | */ | ||
941 | if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE)) { | ||
942 | clocksource_register_khz(&clocksource_tsc, tsc_khz); | ||
943 | return 0; | ||
944 | } | ||
945 | |||
936 | schedule_delayed_work(&tsc_irqwork, 0); | 946 | schedule_delayed_work(&tsc_irqwork, 0); |
937 | return 0; | 947 | return 0; |
938 | } | 948 | } |
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c index b07ba9393564..d5c69860b524 100644 --- a/arch/x86/kernel/vsyscall_64.c +++ b/arch/x86/kernel/vsyscall_64.c | |||
@@ -52,10 +52,7 @@ | |||
52 | #include "vsyscall_trace.h" | 52 | #include "vsyscall_trace.h" |
53 | 53 | ||
54 | DEFINE_VVAR(int, vgetcpu_mode); | 54 | DEFINE_VVAR(int, vgetcpu_mode); |
55 | DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) = | 55 | DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data); |
56 | { | ||
57 | .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock), | ||
58 | }; | ||
59 | 56 | ||
60 | static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE; | 57 | static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE; |
61 | 58 | ||
@@ -80,20 +77,15 @@ early_param("vsyscall", vsyscall_setup); | |||
80 | 77 | ||
81 | void update_vsyscall_tz(void) | 78 | void update_vsyscall_tz(void) |
82 | { | 79 | { |
83 | unsigned long flags; | ||
84 | |||
85 | write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags); | ||
86 | /* sys_tz has changed */ | ||
87 | vsyscall_gtod_data.sys_tz = sys_tz; | 80 | vsyscall_gtod_data.sys_tz = sys_tz; |
88 | write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags); | ||
89 | } | 81 | } |
90 | 82 | ||
91 | void update_vsyscall(struct timespec *wall_time, struct timespec *wtm, | 83 | void update_vsyscall(struct timespec *wall_time, struct timespec *wtm, |
92 | struct clocksource *clock, u32 mult) | 84 | struct clocksource *clock, u32 mult) |
93 | { | 85 | { |
94 | unsigned long flags; | 86 | struct timespec monotonic; |
95 | 87 | ||
96 | write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags); | 88 | write_seqcount_begin(&vsyscall_gtod_data.seq); |
97 | 89 | ||
98 | /* copy vsyscall data */ | 90 | /* copy vsyscall data */ |
99 | vsyscall_gtod_data.clock.vclock_mode = clock->archdata.vclock_mode; | 91 | vsyscall_gtod_data.clock.vclock_mode = clock->archdata.vclock_mode; |
@@ -101,12 +93,19 @@ void update_vsyscall(struct timespec *wall_time, struct timespec *wtm, | |||
101 | vsyscall_gtod_data.clock.mask = clock->mask; | 93 | vsyscall_gtod_data.clock.mask = clock->mask; |
102 | vsyscall_gtod_data.clock.mult = mult; | 94 | vsyscall_gtod_data.clock.mult = mult; |
103 | vsyscall_gtod_data.clock.shift = clock->shift; | 95 | vsyscall_gtod_data.clock.shift = clock->shift; |
96 | |||
104 | vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec; | 97 | vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec; |
105 | vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec; | 98 | vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec; |
106 | vsyscall_gtod_data.wall_to_monotonic = *wtm; | 99 | |
100 | monotonic = timespec_add(*wall_time, *wtm); | ||
101 | vsyscall_gtod_data.monotonic_time_sec = monotonic.tv_sec; | ||
102 | vsyscall_gtod_data.monotonic_time_nsec = monotonic.tv_nsec; | ||
103 | |||
107 | vsyscall_gtod_data.wall_time_coarse = __current_kernel_time(); | 104 | vsyscall_gtod_data.wall_time_coarse = __current_kernel_time(); |
105 | vsyscall_gtod_data.monotonic_time_coarse = | ||
106 | timespec_add(vsyscall_gtod_data.wall_time_coarse, *wtm); | ||
108 | 107 | ||
109 | write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags); | 108 | write_seqcount_end(&vsyscall_gtod_data.seq); |
110 | } | 109 | } |
111 | 110 | ||
112 | static void warn_bad_vsyscall(const char *level, struct pt_regs *regs, | 111 | static void warn_bad_vsyscall(const char *level, struct pt_regs *regs, |
diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c index 6bc0e723b6e8..885eff49d6ab 100644 --- a/arch/x86/vdso/vclock_gettime.c +++ b/arch/x86/vdso/vclock_gettime.c | |||
@@ -70,100 +70,98 @@ notrace static long vdso_fallback_gettime(long clock, struct timespec *ts) | |||
70 | return ret; | 70 | return ret; |
71 | } | 71 | } |
72 | 72 | ||
73 | notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz) | ||
74 | { | ||
75 | long ret; | ||
76 | |||
77 | asm("syscall" : "=a" (ret) : | ||
78 | "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory"); | ||
79 | return ret; | ||
80 | } | ||
81 | |||
82 | |||
73 | notrace static inline long vgetns(void) | 83 | notrace static inline long vgetns(void) |
74 | { | 84 | { |
75 | long v; | 85 | long v; |
76 | cycles_t cycles; | 86 | cycles_t cycles; |
77 | if (gtod->clock.vclock_mode == VCLOCK_TSC) | 87 | if (gtod->clock.vclock_mode == VCLOCK_TSC) |
78 | cycles = vread_tsc(); | 88 | cycles = vread_tsc(); |
79 | else | 89 | else if (gtod->clock.vclock_mode == VCLOCK_HPET) |
80 | cycles = vread_hpet(); | 90 | cycles = vread_hpet(); |
91 | else | ||
92 | return 0; | ||
81 | v = (cycles - gtod->clock.cycle_last) & gtod->clock.mask; | 93 | v = (cycles - gtod->clock.cycle_last) & gtod->clock.mask; |
82 | return (v * gtod->clock.mult) >> gtod->clock.shift; | 94 | return (v * gtod->clock.mult) >> gtod->clock.shift; |
83 | } | 95 | } |
84 | 96 | ||
85 | notrace static noinline int do_realtime(struct timespec *ts) | 97 | /* Code size doesn't matter (vdso is 4k anyway) and this is faster. */ |
98 | notrace static int __always_inline do_realtime(struct timespec *ts) | ||
86 | { | 99 | { |
87 | unsigned long seq, ns; | 100 | unsigned long seq, ns; |
101 | int mode; | ||
102 | |||
88 | do { | 103 | do { |
89 | seq = read_seqbegin(>od->lock); | 104 | seq = read_seqcount_begin(>od->seq); |
105 | mode = gtod->clock.vclock_mode; | ||
90 | ts->tv_sec = gtod->wall_time_sec; | 106 | ts->tv_sec = gtod->wall_time_sec; |
91 | ts->tv_nsec = gtod->wall_time_nsec; | 107 | ts->tv_nsec = gtod->wall_time_nsec; |
92 | ns = vgetns(); | 108 | ns = vgetns(); |
93 | } while (unlikely(read_seqretry(>od->lock, seq))); | 109 | } while (unlikely(read_seqcount_retry(>od->seq, seq))); |
110 | |||
94 | timespec_add_ns(ts, ns); | 111 | timespec_add_ns(ts, ns); |
95 | return 0; | 112 | return mode; |
96 | } | 113 | } |
97 | 114 | ||
98 | notrace static noinline int do_monotonic(struct timespec *ts) | 115 | notrace static int do_monotonic(struct timespec *ts) |
99 | { | 116 | { |
100 | unsigned long seq, ns, secs; | 117 | unsigned long seq, ns; |
118 | int mode; | ||
119 | |||
101 | do { | 120 | do { |
102 | seq = read_seqbegin(>od->lock); | 121 | seq = read_seqcount_begin(>od->seq); |
103 | secs = gtod->wall_time_sec; | 122 | mode = gtod->clock.vclock_mode; |
104 | ns = gtod->wall_time_nsec + vgetns(); | 123 | ts->tv_sec = gtod->monotonic_time_sec; |
105 | secs += gtod->wall_to_monotonic.tv_sec; | 124 | ts->tv_nsec = gtod->monotonic_time_nsec; |
106 | ns += gtod->wall_to_monotonic.tv_nsec; | 125 | ns = vgetns(); |
107 | } while (unlikely(read_seqretry(>od->lock, seq))); | 126 | } while (unlikely(read_seqcount_retry(>od->seq, seq))); |
108 | 127 | timespec_add_ns(ts, ns); | |
109 | /* wall_time_nsec, vgetns(), and wall_to_monotonic.tv_nsec | ||
110 | * are all guaranteed to be nonnegative. | ||
111 | */ | ||
112 | while (ns >= NSEC_PER_SEC) { | ||
113 | ns -= NSEC_PER_SEC; | ||
114 | ++secs; | ||
115 | } | ||
116 | ts->tv_sec = secs; | ||
117 | ts->tv_nsec = ns; | ||
118 | 128 | ||
119 | return 0; | 129 | return mode; |
120 | } | 130 | } |
121 | 131 | ||
122 | notrace static noinline int do_realtime_coarse(struct timespec *ts) | 132 | notrace static int do_realtime_coarse(struct timespec *ts) |
123 | { | 133 | { |
124 | unsigned long seq; | 134 | unsigned long seq; |
125 | do { | 135 | do { |
126 | seq = read_seqbegin(>od->lock); | 136 | seq = read_seqcount_begin(>od->seq); |
127 | ts->tv_sec = gtod->wall_time_coarse.tv_sec; | 137 | ts->tv_sec = gtod->wall_time_coarse.tv_sec; |
128 | ts->tv_nsec = gtod->wall_time_coarse.tv_nsec; | 138 | ts->tv_nsec = gtod->wall_time_coarse.tv_nsec; |
129 | } while (unlikely(read_seqretry(>od->lock, seq))); | 139 | } while (unlikely(read_seqcount_retry(>od->seq, seq))); |
130 | return 0; | 140 | return 0; |
131 | } | 141 | } |
132 | 142 | ||
133 | notrace static noinline int do_monotonic_coarse(struct timespec *ts) | 143 | notrace static int do_monotonic_coarse(struct timespec *ts) |
134 | { | 144 | { |
135 | unsigned long seq, ns, secs; | 145 | unsigned long seq; |
136 | do { | 146 | do { |
137 | seq = read_seqbegin(>od->lock); | 147 | seq = read_seqcount_begin(>od->seq); |
138 | secs = gtod->wall_time_coarse.tv_sec; | 148 | ts->tv_sec = gtod->monotonic_time_coarse.tv_sec; |
139 | ns = gtod->wall_time_coarse.tv_nsec; | 149 | ts->tv_nsec = gtod->monotonic_time_coarse.tv_nsec; |
140 | secs += gtod->wall_to_monotonic.tv_sec; | 150 | } while (unlikely(read_seqcount_retry(>od->seq, seq))); |
141 | ns += gtod->wall_to_monotonic.tv_nsec; | ||
142 | } while (unlikely(read_seqretry(>od->lock, seq))); | ||
143 | |||
144 | /* wall_time_nsec and wall_to_monotonic.tv_nsec are | ||
145 | * guaranteed to be between 0 and NSEC_PER_SEC. | ||
146 | */ | ||
147 | if (ns >= NSEC_PER_SEC) { | ||
148 | ns -= NSEC_PER_SEC; | ||
149 | ++secs; | ||
150 | } | ||
151 | ts->tv_sec = secs; | ||
152 | ts->tv_nsec = ns; | ||
153 | 151 | ||
154 | return 0; | 152 | return 0; |
155 | } | 153 | } |
156 | 154 | ||
157 | notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts) | 155 | notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts) |
158 | { | 156 | { |
157 | int ret = VCLOCK_NONE; | ||
158 | |||
159 | switch (clock) { | 159 | switch (clock) { |
160 | case CLOCK_REALTIME: | 160 | case CLOCK_REALTIME: |
161 | if (likely(gtod->clock.vclock_mode != VCLOCK_NONE)) | 161 | ret = do_realtime(ts); |
162 | return do_realtime(ts); | ||
163 | break; | 162 | break; |
164 | case CLOCK_MONOTONIC: | 163 | case CLOCK_MONOTONIC: |
165 | if (likely(gtod->clock.vclock_mode != VCLOCK_NONE)) | 164 | ret = do_monotonic(ts); |
166 | return do_monotonic(ts); | ||
167 | break; | 165 | break; |
168 | case CLOCK_REALTIME_COARSE: | 166 | case CLOCK_REALTIME_COARSE: |
169 | return do_realtime_coarse(ts); | 167 | return do_realtime_coarse(ts); |
@@ -171,32 +169,33 @@ notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts) | |||
171 | return do_monotonic_coarse(ts); | 169 | return do_monotonic_coarse(ts); |
172 | } | 170 | } |
173 | 171 | ||
174 | return vdso_fallback_gettime(clock, ts); | 172 | if (ret == VCLOCK_NONE) |
173 | return vdso_fallback_gettime(clock, ts); | ||
174 | return 0; | ||
175 | } | 175 | } |
176 | int clock_gettime(clockid_t, struct timespec *) | 176 | int clock_gettime(clockid_t, struct timespec *) |
177 | __attribute__((weak, alias("__vdso_clock_gettime"))); | 177 | __attribute__((weak, alias("__vdso_clock_gettime"))); |
178 | 178 | ||
179 | notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz) | 179 | notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz) |
180 | { | 180 | { |
181 | long ret; | 181 | long ret = VCLOCK_NONE; |
182 | if (likely(gtod->clock.vclock_mode != VCLOCK_NONE)) { | 182 | |
183 | if (likely(tv != NULL)) { | 183 | if (likely(tv != NULL)) { |
184 | BUILD_BUG_ON(offsetof(struct timeval, tv_usec) != | 184 | BUILD_BUG_ON(offsetof(struct timeval, tv_usec) != |
185 | offsetof(struct timespec, tv_nsec) || | 185 | offsetof(struct timespec, tv_nsec) || |
186 | sizeof(*tv) != sizeof(struct timespec)); | 186 | sizeof(*tv) != sizeof(struct timespec)); |
187 | do_realtime((struct timespec *)tv); | 187 | ret = do_realtime((struct timespec *)tv); |
188 | tv->tv_usec /= 1000; | 188 | tv->tv_usec /= 1000; |
189 | } | ||
190 | if (unlikely(tz != NULL)) { | ||
191 | /* Avoid memcpy. Some old compilers fail to inline it */ | ||
192 | tz->tz_minuteswest = gtod->sys_tz.tz_minuteswest; | ||
193 | tz->tz_dsttime = gtod->sys_tz.tz_dsttime; | ||
194 | } | ||
195 | return 0; | ||
196 | } | 189 | } |
197 | asm("syscall" : "=a" (ret) : | 190 | if (unlikely(tz != NULL)) { |
198 | "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory"); | 191 | /* Avoid memcpy. Some old compilers fail to inline it */ |
199 | return ret; | 192 | tz->tz_minuteswest = gtod->sys_tz.tz_minuteswest; |
193 | tz->tz_dsttime = gtod->sys_tz.tz_dsttime; | ||
194 | } | ||
195 | |||
196 | if (ret == VCLOCK_NONE) | ||
197 | return vdso_fallback_gtod(tv, tz); | ||
198 | return 0; | ||
200 | } | 199 | } |
201 | int gettimeofday(struct timeval *, struct timezone *) | 200 | int gettimeofday(struct timeval *, struct timezone *) |
202 | __attribute__((weak, alias("__vdso_gettimeofday"))); | 201 | __attribute__((weak, alias("__vdso_gettimeofday"))); |