aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2018-09-17 08:45:41 -0400
committerThomas Gleixner <tglx@linutronix.de>2018-10-04 17:00:26 -0400
commitf3e839384164cf86faedd185b8f6024f73050f5e (patch)
tree30b32d2ddbbb98e9fdb4ba01f9b87d3827dc3775
parent6deec5bdef4518bd6524a47be9d621ff650d3ba4 (diff)
x86/vdso: Replace the clockid switch case
Now that the time getter functions use the clockid as index into the storage array for the base time access, the switch case can be replaced. - Check for clockid >= MAX_CLOCKS and for negative clockid (CPU/FD) first and call the fallback function right away. - After establishing that clockid is < MAX_CLOCKS, convert the clockid to a bitmask - Check for the supported high resolution and coarse functions by anding the bitmask of supported clocks and check whether a bit is set. This completely avoids jump tables, reduces the number of conditionals and makes the VDSO extensible for other clock ids. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Andy Lutomirski <luto@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Matt Rickard <matt@softrans.com.au> Cc: Stephen Boyd <sboyd@kernel.org> Cc: John Stultz <john.stultz@linaro.org> Cc: Florian Weimer <fweimer@redhat.com> Cc: "K. Y. Srinivasan" <kys@microsoft.com> Cc: Vitaly Kuznetsov <vkuznets@redhat.com> Cc: devel@linuxdriverproject.org Cc: virtualization@lists.linux-foundation.org Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Juergen Gross <jgross@suse.com> Link: https://lkml.kernel.org/r/20180917130707.574315796@linutronix.de
-rw-r--r--arch/x86/entry/vdso/vclock_gettime.c38
1 files changed, 18 insertions, 20 deletions
diff --git a/arch/x86/entry/vdso/vclock_gettime.c b/arch/x86/entry/vdso/vclock_gettime.c
index b27dea0e23af..672e50e35d6c 100644
--- a/arch/x86/entry/vdso/vclock_gettime.c
+++ b/arch/x86/entry/vdso/vclock_gettime.c
@@ -241,29 +241,27 @@ notrace static void do_coarse(clockid_t clk, struct timespec *ts)
241 241
242notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts) 242notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
243{ 243{
244 switch (clock) { 244 unsigned int msk;
245 case CLOCK_REALTIME:
246 if (do_hres(CLOCK_REALTIME, ts) == VCLOCK_NONE)
247 goto fallback;
248 break;
249 case CLOCK_MONOTONIC:
250 if (do_hres(CLOCK_MONOTONIC, ts) == VCLOCK_NONE)
251 goto fallback;
252 break;
253 case CLOCK_REALTIME_COARSE:
254 do_coarse(CLOCK_REALTIME_COARSE, ts);
255 break;
256 case CLOCK_MONOTONIC_COARSE:
257 do_coarse(CLOCK_MONOTONIC_COARSE, ts);
258 break;
259 default:
260 goto fallback;
261 }
262 245
263 return 0; 246 /* Sort out negative (CPU/FD) and invalid clocks */
264fallback: 247 if (unlikely((unsigned int) clock >= MAX_CLOCKS))
248 return vdso_fallback_gettime(clock, ts);
249
250 /*
251 * Convert the clockid to a bitmask and use it to check which
252 * clocks are handled in the VDSO directly.
253 */
254 msk = 1U << clock;
255 if (likely(msk & VGTOD_HRES)) {
256 if (do_hres(clock, ts) != VCLOCK_NONE)
257 return 0;
258 } else if (msk & VGTOD_COARSE) {
259 do_coarse(clock, ts);
260 return 0;
261 }
265 return vdso_fallback_gettime(clock, ts); 262 return vdso_fallback_gettime(clock, ts);
266} 263}
264
267int clock_gettime(clockid_t, struct timespec *) 265int clock_gettime(clockid_t, struct timespec *)
268 __attribute__((weak, alias("__vdso_clock_gettime"))); 266 __attribute__((weak, alias("__vdso_clock_gettime")));
269 267