diff options
author | Dave Hansen <dave.hansen@linux.intel.com> | 2016-06-01 13:42:20 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2016-06-08 07:33:33 -0400 |
commit | d1898b733619bd46194bd25aa6452d238ff2dc4e (patch) | |
tree | 4cc10df3594b68becae9a7dac03e35025aa5c7d1 /arch/x86/kernel | |
parent | c8ae067f2635be0f8c7e5db1bb74b757d623e05b (diff) |
x86/fpu: Add tracepoints to dump FPU state at key points
I've been carrying this patch around for a bit and it's helped me
solve at least a couple FPU-related bugs. In addition to using
it for debugging, I also drug it out because using AVX (and
AVX2/AVX-512) can have serious power consequences for a modern
core. It's very important to be able to figure out who is using
it.
It's also insanely useful to go out and see who is using a given
feature, like MPX or Memory Protection Keys. If you, for
instance, want to find all processes using protection keys, you
can do:
echo 'xfeatures & 0x200' > filter
Since 0x200 is the protection keys feature bit.
Note that this touches the KVM code. KVM did a CREATE_TRACE_POINTS
and then included a bunch of random headers. If anyone one of
those included other tracepoints, it would have defined the *OTHER*
tracepoints. That's bogus, so move it to the right place.
Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Dave Hansen <dave@sr71.net>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Quentin Casasnovas <quentin.casasnovas@oracle.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/20160601174220.3CDFB90E@viggo.jf.intel.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r-- | arch/x86/kernel/fpu/core.c | 18 | ||||
-rw-r--r-- | arch/x86/kernel/fpu/signal.c | 3 |
2 files changed, 21 insertions, 0 deletions
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c index 97027545a72d..7d564742e499 100644 --- a/arch/x86/kernel/fpu/core.c +++ b/arch/x86/kernel/fpu/core.c | |||
@@ -12,6 +12,9 @@ | |||
12 | 12 | ||
13 | #include <linux/hardirq.h> | 13 | #include <linux/hardirq.h> |
14 | 14 | ||
15 | #define CREATE_TRACE_POINTS | ||
16 | #include <asm/trace/fpu.h> | ||
17 | |||
15 | /* | 18 | /* |
16 | * Represents the initial FPU state. It's mostly (but not completely) zeroes, | 19 | * Represents the initial FPU state. It's mostly (but not completely) zeroes, |
17 | * depending on the FPU hardware format: | 20 | * depending on the FPU hardware format: |
@@ -192,6 +195,7 @@ void fpu__save(struct fpu *fpu) | |||
192 | WARN_ON_FPU(fpu != ¤t->thread.fpu); | 195 | WARN_ON_FPU(fpu != ¤t->thread.fpu); |
193 | 196 | ||
194 | preempt_disable(); | 197 | preempt_disable(); |
198 | trace_x86_fpu_before_save(fpu); | ||
195 | if (fpu->fpregs_active) { | 199 | if (fpu->fpregs_active) { |
196 | if (!copy_fpregs_to_fpstate(fpu)) { | 200 | if (!copy_fpregs_to_fpstate(fpu)) { |
197 | if (use_eager_fpu()) | 201 | if (use_eager_fpu()) |
@@ -200,6 +204,7 @@ void fpu__save(struct fpu *fpu) | |||
200 | fpregs_deactivate(fpu); | 204 | fpregs_deactivate(fpu); |
201 | } | 205 | } |
202 | } | 206 | } |
207 | trace_x86_fpu_after_save(fpu); | ||
203 | preempt_enable(); | 208 | preempt_enable(); |
204 | } | 209 | } |
205 | EXPORT_SYMBOL_GPL(fpu__save); | 210 | EXPORT_SYMBOL_GPL(fpu__save); |
@@ -275,6 +280,9 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu) | |||
275 | } | 280 | } |
276 | preempt_enable(); | 281 | preempt_enable(); |
277 | 282 | ||
283 | trace_x86_fpu_copy_src(src_fpu); | ||
284 | trace_x86_fpu_copy_dst(dst_fpu); | ||
285 | |||
278 | return 0; | 286 | return 0; |
279 | } | 287 | } |
280 | 288 | ||
@@ -288,7 +296,9 @@ void fpu__activate_curr(struct fpu *fpu) | |||
288 | 296 | ||
289 | if (!fpu->fpstate_active) { | 297 | if (!fpu->fpstate_active) { |
290 | fpstate_init(&fpu->state); | 298 | fpstate_init(&fpu->state); |
299 | trace_x86_fpu_init_state(fpu); | ||
291 | 300 | ||
301 | trace_x86_fpu_activate_state(fpu); | ||
292 | /* Safe to do for the current task: */ | 302 | /* Safe to do for the current task: */ |
293 | fpu->fpstate_active = 1; | 303 | fpu->fpstate_active = 1; |
294 | } | 304 | } |
@@ -314,7 +324,9 @@ void fpu__activate_fpstate_read(struct fpu *fpu) | |||
314 | } else { | 324 | } else { |
315 | if (!fpu->fpstate_active) { | 325 | if (!fpu->fpstate_active) { |
316 | fpstate_init(&fpu->state); | 326 | fpstate_init(&fpu->state); |
327 | trace_x86_fpu_init_state(fpu); | ||
317 | 328 | ||
329 | trace_x86_fpu_activate_state(fpu); | ||
318 | /* Safe to do for current and for stopped child tasks: */ | 330 | /* Safe to do for current and for stopped child tasks: */ |
319 | fpu->fpstate_active = 1; | 331 | fpu->fpstate_active = 1; |
320 | } | 332 | } |
@@ -347,7 +359,9 @@ void fpu__activate_fpstate_write(struct fpu *fpu) | |||
347 | fpu->last_cpu = -1; | 359 | fpu->last_cpu = -1; |
348 | } else { | 360 | } else { |
349 | fpstate_init(&fpu->state); | 361 | fpstate_init(&fpu->state); |
362 | trace_x86_fpu_init_state(fpu); | ||
350 | 363 | ||
364 | trace_x86_fpu_activate_state(fpu); | ||
351 | /* Safe to do for stopped child tasks: */ | 365 | /* Safe to do for stopped child tasks: */ |
352 | fpu->fpstate_active = 1; | 366 | fpu->fpstate_active = 1; |
353 | } | 367 | } |
@@ -432,9 +446,11 @@ void fpu__restore(struct fpu *fpu) | |||
432 | 446 | ||
433 | /* Avoid __kernel_fpu_begin() right after fpregs_activate() */ | 447 | /* Avoid __kernel_fpu_begin() right after fpregs_activate() */ |
434 | kernel_fpu_disable(); | 448 | kernel_fpu_disable(); |
449 | trace_x86_fpu_before_restore(fpu); | ||
435 | fpregs_activate(fpu); | 450 | fpregs_activate(fpu); |
436 | copy_kernel_to_fpregs(&fpu->state); | 451 | copy_kernel_to_fpregs(&fpu->state); |
437 | fpu->counter++; | 452 | fpu->counter++; |
453 | trace_x86_fpu_after_restore(fpu); | ||
438 | kernel_fpu_enable(); | 454 | kernel_fpu_enable(); |
439 | } | 455 | } |
440 | EXPORT_SYMBOL_GPL(fpu__restore); | 456 | EXPORT_SYMBOL_GPL(fpu__restore); |
@@ -463,6 +479,8 @@ void fpu__drop(struct fpu *fpu) | |||
463 | 479 | ||
464 | fpu->fpstate_active = 0; | 480 | fpu->fpstate_active = 0; |
465 | 481 | ||
482 | trace_x86_fpu_dropped(fpu); | ||
483 | |||
466 | preempt_enable(); | 484 | preempt_enable(); |
467 | } | 485 | } |
468 | 486 | ||
diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c index 31c6a60505e6..c6f2a3cee2c2 100644 --- a/arch/x86/kernel/fpu/signal.c +++ b/arch/x86/kernel/fpu/signal.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <asm/fpu/regset.h> | 10 | #include <asm/fpu/regset.h> |
11 | 11 | ||
12 | #include <asm/sigframe.h> | 12 | #include <asm/sigframe.h> |
13 | #include <asm/trace/fpu.h> | ||
13 | 14 | ||
14 | static struct _fpx_sw_bytes fx_sw_reserved, fx_sw_reserved_ia32; | 15 | static struct _fpx_sw_bytes fx_sw_reserved, fx_sw_reserved_ia32; |
15 | 16 | ||
@@ -282,6 +283,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size) | |||
282 | */ | 283 | */ |
283 | state_size = sizeof(struct fxregs_state); | 284 | state_size = sizeof(struct fxregs_state); |
284 | fx_only = 1; | 285 | fx_only = 1; |
286 | trace_x86_fpu_xstate_check_failed(fpu); | ||
285 | } else { | 287 | } else { |
286 | state_size = fx_sw_user.xstate_size; | 288 | state_size = fx_sw_user.xstate_size; |
287 | xfeatures = fx_sw_user.xfeatures; | 289 | xfeatures = fx_sw_user.xfeatures; |
@@ -311,6 +313,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size) | |||
311 | if (__copy_from_user(&fpu->state.xsave, buf_fx, state_size) || | 313 | if (__copy_from_user(&fpu->state.xsave, buf_fx, state_size) || |
312 | __copy_from_user(&env, buf, sizeof(env))) { | 314 | __copy_from_user(&env, buf, sizeof(env))) { |
313 | fpstate_init(&fpu->state); | 315 | fpstate_init(&fpu->state); |
316 | trace_x86_fpu_init_state(fpu); | ||
314 | err = -1; | 317 | err = -1; |
315 | } else { | 318 | } else { |
316 | sanitize_restored_xstate(tsk, &env, xfeatures, fx_only); | 319 | sanitize_restored_xstate(tsk, &env, xfeatures, fx_only); |