aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndy Lutomirski <luto@mit.edu>2012-03-23 00:15:51 -0400
committerJohn Stultz <john.stultz@linaro.org>2012-03-23 19:49:33 -0400
commit91ec87d57fc38c529034e853687dfb7756de5406 (patch)
tree4f88d6a7e0221bdd791a1de6d4c36b17da8337a2
parent88b28adf6fcdd6d10a1cfc7765bb200d7366a265 (diff)
x86-64: Simplify and optimize vdso clock_gettime monotonic variants
We used to store the wall-to-monotonic offset and the realtime base. It's faster to precompute the monotonic base. This is about a 3% speedup on Sandy Bridge for CLOCK_MONOTONIC. It's much more impressive for CLOCK_MONOTONIC_COARSE. Signed-off-by: Andy Lutomirski <luto@amacapital.net> Signed-off-by: John Stultz <john.stultz@linaro.org>
-rw-r--r--arch/x86/include/asm/vgtod.h15
-rw-r--r--arch/x86/kernel/vsyscall_64.c10
-rw-r--r--arch/x86/vdso/vclock_gettime.c38
3 files changed, 26 insertions, 37 deletions
diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h
index 1f007178c813..8b38be2de9e1 100644
--- a/arch/x86/include/asm/vgtod.h
+++ b/arch/x86/include/asm/vgtod.h
@@ -7,11 +7,6 @@
7struct vsyscall_gtod_data { 7struct vsyscall_gtod_data {
8 seqcount_t seq; 8 seqcount_t seq;
9 9
10 /* open coded 'struct timespec' */
11 time_t wall_time_sec;
12 u32 wall_time_nsec;
13
14 struct timezone sys_tz;
15 struct { /* extract of a clocksource struct */ 10 struct { /* extract of a clocksource struct */
16 int vclock_mode; 11 int vclock_mode;
17 cycle_t cycle_last; 12 cycle_t cycle_last;
@@ -19,8 +14,16 @@ struct vsyscall_gtod_data {
19 u32 mult; 14 u32 mult;
20 u32 shift; 15 u32 shift;
21 } clock; 16 } clock;
22 struct timespec wall_to_monotonic; 17
18 /* open coded 'struct timespec' */
19 time_t wall_time_sec;
20 u32 wall_time_nsec;
21 u32 monotonic_time_nsec;
22 time_t monotonic_time_sec;
23
24 struct timezone sys_tz;
23 struct timespec wall_time_coarse; 25 struct timespec wall_time_coarse;
26 struct timespec monotonic_time_coarse;
24}; 27};
25extern struct vsyscall_gtod_data vsyscall_gtod_data; 28extern struct vsyscall_gtod_data vsyscall_gtod_data;
26 29
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
index cdc95a707cd1..4285f1f404c2 100644
--- a/arch/x86/kernel/vsyscall_64.c
+++ b/arch/x86/kernel/vsyscall_64.c
@@ -84,6 +84,7 @@ void update_vsyscall(struct timespec *wall_time, struct timespec *wtm,
84 struct clocksource *clock, u32 mult) 84 struct clocksource *clock, u32 mult)
85{ 85{
86 write_seqcount_begin(&vsyscall_gtod_data.seq); 86 write_seqcount_begin(&vsyscall_gtod_data.seq);
87 struct timespec monotonic;
87 88
88 /* copy vsyscall data */ 89 /* copy vsyscall data */
89 vsyscall_gtod_data.clock.vclock_mode = clock->archdata.vclock_mode; 90 vsyscall_gtod_data.clock.vclock_mode = clock->archdata.vclock_mode;
@@ -91,10 +92,17 @@ void update_vsyscall(struct timespec *wall_time, struct timespec *wtm,
91 vsyscall_gtod_data.clock.mask = clock->mask; 92 vsyscall_gtod_data.clock.mask = clock->mask;
92 vsyscall_gtod_data.clock.mult = mult; 93 vsyscall_gtod_data.clock.mult = mult;
93 vsyscall_gtod_data.clock.shift = clock->shift; 94 vsyscall_gtod_data.clock.shift = clock->shift;
95
94 vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec; 96 vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec;
95 vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec; 97 vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec;
96 vsyscall_gtod_data.wall_to_monotonic = *wtm; 98
99 monotonic = timespec_add(*wall_time, *wtm);
100 vsyscall_gtod_data.monotonic_time_sec = monotonic.tv_sec;
101 vsyscall_gtod_data.monotonic_time_nsec = monotonic.tv_nsec;
102
97 vsyscall_gtod_data.wall_time_coarse = __current_kernel_time(); 103 vsyscall_gtod_data.wall_time_coarse = __current_kernel_time();
104 vsyscall_gtod_data.monotonic_time_coarse =
105 timespec_add(vsyscall_gtod_data.wall_time_coarse, *wtm);
98 106
99 write_seqcount_end(&vsyscall_gtod_data.seq); 107 write_seqcount_end(&vsyscall_gtod_data.seq);
100} 108}
diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c
index 944c5e5d6b6a..6eea70b8f384 100644
--- a/arch/x86/vdso/vclock_gettime.c
+++ b/arch/x86/vdso/vclock_gettime.c
@@ -113,27 +113,17 @@ notrace static noinline int do_realtime(struct timespec *ts)
113 113
114notrace static noinline int do_monotonic(struct timespec *ts) 114notrace static noinline int do_monotonic(struct timespec *ts)
115{ 115{
116 unsigned long seq, ns, secs; 116 unsigned long seq, ns;
117 int mode; 117 int mode;
118 118
119 do { 119 do {
120 seq = read_seqcount_begin(&gtod->seq); 120 seq = read_seqcount_begin(&gtod->seq);
121 mode = gtod->clock.vclock_mode; 121 mode = gtod->clock.vclock_mode;
122 secs = gtod->wall_time_sec; 122 ts->tv_sec = gtod->monotonic_time_sec;
123 ns = gtod->wall_time_nsec + vgetns(); 123 ts->tv_nsec = gtod->monotonic_time_nsec;
124 secs += gtod->wall_to_monotonic.tv_sec; 124 ns = vgetns();
125 ns += gtod->wall_to_monotonic.tv_nsec;
126 } while (unlikely(read_seqcount_retry(&gtod->seq, seq))); 125 } while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
127 126 timespec_add_ns(ts, ns);
128 /* wall_time_nsec, vgetns(), and wall_to_monotonic.tv_nsec
129 * are all guaranteed to be nonnegative.
130 */
131 while (ns >= NSEC_PER_SEC) {
132 ns -= NSEC_PER_SEC;
133 ++secs;
134 }
135 ts->tv_sec = secs;
136 ts->tv_nsec = ns;
137 127
138 return mode; 128 return mode;
139} 129}
@@ -151,25 +141,13 @@ notrace static noinline int do_realtime_coarse(struct timespec *ts)
151 141
152notrace static noinline int do_monotonic_coarse(struct timespec *ts) 142notrace static noinline int do_monotonic_coarse(struct timespec *ts)
153{ 143{
154 unsigned long seq, ns, secs; 144 unsigned long seq;
155 do { 145 do {
156 seq = read_seqcount_begin(&gtod->seq); 146 seq = read_seqcount_begin(&gtod->seq);
157 secs = gtod->wall_time_coarse.tv_sec; 147 ts->tv_sec = gtod->monotonic_time_coarse.tv_sec;
158 ns = gtod->wall_time_coarse.tv_nsec; 148 ts->tv_nsec = gtod->monotonic_time_coarse.tv_nsec;
159 secs += gtod->wall_to_monotonic.tv_sec;
160 ns += gtod->wall_to_monotonic.tv_nsec;
161 } while (unlikely(read_seqcount_retry(&gtod->seq, seq))); 149 } while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
162 150
163 /* wall_time_nsec and wall_to_monotonic.tv_nsec are
164 * guaranteed to be between 0 and NSEC_PER_SEC.
165 */
166 if (ns >= NSEC_PER_SEC) {
167 ns -= NSEC_PER_SEC;
168 ++secs;
169 }
170 ts->tv_sec = secs;
171 ts->tv_nsec = ns;
172
173 return 0; 151 return 0;
174} 152}
175 153