aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorOleg Nesterov <oleg@redhat.com>2015-01-15 14:20:05 -0500
committerThomas Gleixner <tglx@linutronix.de>2015-01-20 07:53:07 -0500
commit33a3ebdc077fd85f1bf4d4586eea579b297461ae (patch)
treea3a6045efe0606f0dd0ae89df9f3db9cf7d84c06
parent14e153ef75eecae8fd0738ffb42120f4962a00cd (diff)
x86, fpu: Don't abuse has_fpu in __kernel_fpu_begin/end()
Now that we have in_kernel_fpu we can remove __thread_clear_has_fpu() in __kernel_fpu_begin(). And this allows to replace the asymmetrical and nontrivial use_eager_fpu + tsk_used_math check in kernel_fpu_end() with the same __thread_has_fpu() check. The logic becomes really simple; if _begin() does save() then _end() needs restore(), this is controlled by __thread_has_fpu(). Otherwise they do clts/stts unless use_eager_fpu(). Not only this makes begin/end symmetrical and imo more understandable, potentially this allows to change irq_fpu_usable() to avoid all other checks except "in_kernel_fpu". Also, with this patch __kernel_fpu_end() does restore_fpu_checking() and WARNs if it fails instead of math_state_restore(). I think this looks better because we no longer need __thread_fpu_begin(), and it would be better to report the failure in this case. Signed-off-by: Oleg Nesterov <oleg@redhat.com> Acked-by: Rik van Riel <riel@redhat.com> Cc: matt.fleming@intel.com Cc: bp@suse.de Cc: pbonzini@redhat.com Cc: luto@amacapital.net Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Suresh Siddha <sbsiddha@gmail.com> Link: http://lkml.kernel.org/r/20150115192005.GC27332@redhat.com Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--arch/x86/kernel/i387.c19
1 files changed, 6 insertions, 13 deletions
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
index a81572338243..12088a3f459f 100644
--- a/arch/x86/kernel/i387.c
+++ b/arch/x86/kernel/i387.c
@@ -81,9 +81,7 @@ void __kernel_fpu_begin(void)
81 this_cpu_write(in_kernel_fpu, true); 81 this_cpu_write(in_kernel_fpu, true);
82 82
83 if (__thread_has_fpu(me)) { 83 if (__thread_has_fpu(me)) {
84 __thread_clear_has_fpu(me);
85 __save_init_fpu(me); 84 __save_init_fpu(me);
86 /* We do 'stts()' in __kernel_fpu_end() */
87 } else if (!use_eager_fpu()) { 85 } else if (!use_eager_fpu()) {
88 this_cpu_write(fpu_owner_task, NULL); 86 this_cpu_write(fpu_owner_task, NULL);
89 clts(); 87 clts();
@@ -93,17 +91,12 @@ EXPORT_SYMBOL(__kernel_fpu_begin);
93 91
94void __kernel_fpu_end(void) 92void __kernel_fpu_end(void)
95{ 93{
96 if (use_eager_fpu()) { 94 struct task_struct *me = current;
97 /* 95
98 * For eager fpu, most the time, tsk_used_math() is true. 96 if (__thread_has_fpu(me)) {
99 * Restore the user math as we are done with the kernel usage. 97 if (WARN_ON(restore_fpu_checking(me)))
100 * At few instances during thread exit, signal handling etc, 98 drop_init_fpu(me);
101 * tsk_used_math() is false. Those few places will take proper 99 } else if (!use_eager_fpu()) {
102 * actions, so we don't need to restore the math here.
103 */
104 if (likely(tsk_used_math(current)))
105 math_state_restore();
106 } else {
107 stts(); 100 stts();
108 } 101 }
109 102