diff options
author | Rik van Riel <riel@redhat.com> | 2016-10-04 20:34:34 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2016-10-07 05:14:40 -0400 |
commit | 3913cc3507575273beb165a5e027a081913ed507 (patch) | |
tree | 639a7c648ea033aae514ea414f35eca8502f8ecc | |
parent | c592b57347069abfc0dcad3b3a302cf882602597 (diff) |
x86/fpu: Remove struct fpu::counter
With the lazy FPU code gone, we no longer use the counter field
in struct fpu for anything. Get rid it.
Signed-off-by: Rik van Riel <riel@redhat.com>
Reviewed-by: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Quentin Casasnovas <quentin.casasnovas@oracle.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: pbonzini@redhat.com
Link: http://lkml.kernel.org/r/1475627678-20788-6-git-send-email-riel@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r-- | arch/x86/include/asm/fpu/internal.h | 3 | ||||
-rw-r--r-- | arch/x86/include/asm/fpu/types.h | 11 | ||||
-rw-r--r-- | arch/x86/include/asm/trace/fpu.h | 5 | ||||
-rw-r--r-- | arch/x86/kernel/fpu/core.c | 3 |
4 files changed, 1 insertions, 21 deletions
diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h index 7801d32347a2..499d6ed0e376 100644 --- a/arch/x86/include/asm/fpu/internal.h +++ b/arch/x86/include/asm/fpu/internal.h | |||
@@ -581,16 +581,13 @@ switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu) | |||
581 | 581 | ||
582 | /* Don't change CR0.TS if we just switch! */ | 582 | /* Don't change CR0.TS if we just switch! */ |
583 | if (fpu.preload) { | 583 | if (fpu.preload) { |
584 | new_fpu->counter++; | ||
585 | __fpregs_activate(new_fpu); | 584 | __fpregs_activate(new_fpu); |
586 | trace_x86_fpu_regs_activated(new_fpu); | 585 | trace_x86_fpu_regs_activated(new_fpu); |
587 | prefetch(&new_fpu->state); | 586 | prefetch(&new_fpu->state); |
588 | } | 587 | } |
589 | } else { | 588 | } else { |
590 | old_fpu->counter = 0; | ||
591 | old_fpu->last_cpu = -1; | 589 | old_fpu->last_cpu = -1; |
592 | if (fpu.preload) { | 590 | if (fpu.preload) { |
593 | new_fpu->counter++; | ||
594 | if (fpu_want_lazy_restore(new_fpu, cpu)) | 591 | if (fpu_want_lazy_restore(new_fpu, cpu)) |
595 | fpu.preload = 0; | 592 | fpu.preload = 0; |
596 | else | 593 | else |
diff --git a/arch/x86/include/asm/fpu/types.h b/arch/x86/include/asm/fpu/types.h index 48df486b02f9..e31332d6f0e8 100644 --- a/arch/x86/include/asm/fpu/types.h +++ b/arch/x86/include/asm/fpu/types.h | |||
@@ -322,17 +322,6 @@ struct fpu { | |||
322 | unsigned char fpregs_active; | 322 | unsigned char fpregs_active; |
323 | 323 | ||
324 | /* | 324 | /* |
325 | * @counter: | ||
326 | * | ||
327 | * This counter contains the number of consecutive context switches | ||
328 | * during which the FPU stays used. If this is over a threshold, the | ||
329 | * lazy FPU restore logic becomes eager, to save the trap overhead. | ||
330 | * This is an unsigned char so that after 256 iterations the counter | ||
331 | * wraps and the context switch behavior turns lazy again; this is to | ||
332 | * deal with bursty apps that only use the FPU for a short time: | ||
333 | */ | ||
334 | unsigned char counter; | ||
335 | /* | ||
336 | * @state: | 325 | * @state: |
337 | * | 326 | * |
338 | * In-memory copy of all FPU registers that we save/restore | 327 | * In-memory copy of all FPU registers that we save/restore |
diff --git a/arch/x86/include/asm/trace/fpu.h b/arch/x86/include/asm/trace/fpu.h index 9217ab1f5bf6..342e59789fcd 100644 --- a/arch/x86/include/asm/trace/fpu.h +++ b/arch/x86/include/asm/trace/fpu.h | |||
@@ -14,7 +14,6 @@ DECLARE_EVENT_CLASS(x86_fpu, | |||
14 | __field(struct fpu *, fpu) | 14 | __field(struct fpu *, fpu) |
15 | __field(bool, fpregs_active) | 15 | __field(bool, fpregs_active) |
16 | __field(bool, fpstate_active) | 16 | __field(bool, fpstate_active) |
17 | __field(int, counter) | ||
18 | __field(u64, xfeatures) | 17 | __field(u64, xfeatures) |
19 | __field(u64, xcomp_bv) | 18 | __field(u64, xcomp_bv) |
20 | ), | 19 | ), |
@@ -23,17 +22,15 @@ DECLARE_EVENT_CLASS(x86_fpu, | |||
23 | __entry->fpu = fpu; | 22 | __entry->fpu = fpu; |
24 | __entry->fpregs_active = fpu->fpregs_active; | 23 | __entry->fpregs_active = fpu->fpregs_active; |
25 | __entry->fpstate_active = fpu->fpstate_active; | 24 | __entry->fpstate_active = fpu->fpstate_active; |
26 | __entry->counter = fpu->counter; | ||
27 | if (boot_cpu_has(X86_FEATURE_OSXSAVE)) { | 25 | if (boot_cpu_has(X86_FEATURE_OSXSAVE)) { |
28 | __entry->xfeatures = fpu->state.xsave.header.xfeatures; | 26 | __entry->xfeatures = fpu->state.xsave.header.xfeatures; |
29 | __entry->xcomp_bv = fpu->state.xsave.header.xcomp_bv; | 27 | __entry->xcomp_bv = fpu->state.xsave.header.xcomp_bv; |
30 | } | 28 | } |
31 | ), | 29 | ), |
32 | TP_printk("x86/fpu: %p fpregs_active: %d fpstate_active: %d counter: %d xfeatures: %llx xcomp_bv: %llx", | 30 | TP_printk("x86/fpu: %p fpregs_active: %d fpstate_active: %d xfeatures: %llx xcomp_bv: %llx", |
33 | __entry->fpu, | 31 | __entry->fpu, |
34 | __entry->fpregs_active, | 32 | __entry->fpregs_active, |
35 | __entry->fpstate_active, | 33 | __entry->fpstate_active, |
36 | __entry->counter, | ||
37 | __entry->xfeatures, | 34 | __entry->xfeatures, |
38 | __entry->xcomp_bv | 35 | __entry->xcomp_bv |
39 | ) | 36 | ) |
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c index 036e14fe3b77..6a37d525bdbe 100644 --- a/arch/x86/kernel/fpu/core.c +++ b/arch/x86/kernel/fpu/core.c | |||
@@ -222,7 +222,6 @@ EXPORT_SYMBOL_GPL(fpstate_init); | |||
222 | 222 | ||
223 | int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu) | 223 | int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu) |
224 | { | 224 | { |
225 | dst_fpu->counter = 0; | ||
226 | dst_fpu->fpregs_active = 0; | 225 | dst_fpu->fpregs_active = 0; |
227 | dst_fpu->last_cpu = -1; | 226 | dst_fpu->last_cpu = -1; |
228 | 227 | ||
@@ -430,7 +429,6 @@ void fpu__restore(struct fpu *fpu) | |||
430 | trace_x86_fpu_before_restore(fpu); | 429 | trace_x86_fpu_before_restore(fpu); |
431 | fpregs_activate(fpu); | 430 | fpregs_activate(fpu); |
432 | copy_kernel_to_fpregs(&fpu->state); | 431 | copy_kernel_to_fpregs(&fpu->state); |
433 | fpu->counter++; | ||
434 | trace_x86_fpu_after_restore(fpu); | 432 | trace_x86_fpu_after_restore(fpu); |
435 | kernel_fpu_enable(); | 433 | kernel_fpu_enable(); |
436 | } | 434 | } |
@@ -448,7 +446,6 @@ EXPORT_SYMBOL_GPL(fpu__restore); | |||
448 | void fpu__drop(struct fpu *fpu) | 446 | void fpu__drop(struct fpu *fpu) |
449 | { | 447 | { |
450 | preempt_disable(); | 448 | preempt_disable(); |
451 | fpu->counter = 0; | ||
452 | 449 | ||
453 | if (fpu->fpregs_active) { | 450 | if (fpu->fpregs_active) { |
454 | /* Ignore delayed exceptions from user space */ | 451 | /* Ignore delayed exceptions from user space */ |