summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2015-04-22 07:16:47 -0400
committerIngo Molnar <mingo@kernel.org>2015-05-19 09:47:18 -0400
commit3a0aee4801d475b64a408539c01ec0d17d52192b (patch)
treed73a8d2e97069b135cc3a79508d5d8de37fde4c2
parent93b90712c64ca2db4b39fcb2e7dffcf0d478468d (diff)
x86/fpu: Rename math_state_restore() to fpu__restore()
Move to the new fpu__*() namespace. Reviewed-by: Borislav Petkov <bp@alien8.de> Cc: Andy Lutomirski <luto@amacapital.net> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Fenghua Yu <fenghua.yu@intel.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--Documentation/preempt-locking.txt2
-rw-r--r--arch/x86/include/asm/i387.h2
-rw-r--r--arch/x86/kernel/fpu/core.c6
-rw-r--r--arch/x86/kernel/fpu/xsave.c2
-rw-r--r--arch/x86/kernel/process_32.c2
-rw-r--r--arch/x86/kernel/process_64.c2
-rw-r--r--arch/x86/kernel/traps.c2
-rw-r--r--drivers/lguest/x86/core.c4
8 files changed, 11 insertions, 11 deletions
diff --git a/Documentation/preempt-locking.txt b/Documentation/preempt-locking.txt
index 57883ca2498b..e89ce6624af2 100644
--- a/Documentation/preempt-locking.txt
+++ b/Documentation/preempt-locking.txt
@@ -48,7 +48,7 @@ preemption must be disabled around such regions.
48 48
49Note, some FPU functions are already explicitly preempt safe. For example, 49Note, some FPU functions are already explicitly preempt safe. For example,
50kernel_fpu_begin and kernel_fpu_end will disable and enable preemption. 50kernel_fpu_begin and kernel_fpu_end will disable and enable preemption.
51However, math_state_restore must be called with preemption disabled. 51However, fpu__restore() must be called with preemption disabled.
52 52
53 53
54RULE #3: Lock acquire and release must be performed by same task 54RULE #3: Lock acquire and release must be performed by same task
diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
index d6fc84440b73..c8ee395dd6c6 100644
--- a/arch/x86/include/asm/i387.h
+++ b/arch/x86/include/asm/i387.h
@@ -23,7 +23,7 @@ extern void fpstate_init(struct fpu *fpu);
23extern void fpu__flush_thread(struct task_struct *tsk); 23extern void fpu__flush_thread(struct task_struct *tsk);
24 24
25extern int dump_fpu(struct pt_regs *, struct user_i387_struct *); 25extern int dump_fpu(struct pt_regs *, struct user_i387_struct *);
26extern void math_state_restore(void); 26extern void fpu__restore(void);
27 27
28extern bool irq_fpu_usable(void); 28extern bool irq_fpu_usable(void);
29 29
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index 7add2fb7369e..15c3cf7bd160 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -228,7 +228,7 @@ static int fpu__unlazy_stopped(struct task_struct *child)
228} 228}
229 229
230/* 230/*
231 * 'math_state_restore()' saves the current math information in the 231 * 'fpu__restore()' saves the current math information in the
232 * old math state array, and gets the new ones from the current task 232 * old math state array, and gets the new ones from the current task
233 * 233 *
234 * Careful.. There are problems with IBM-designed IRQ13 behaviour. 234 * Careful.. There are problems with IBM-designed IRQ13 behaviour.
@@ -237,7 +237,7 @@ static int fpu__unlazy_stopped(struct task_struct *child)
237 * Must be called with kernel preemption disabled (eg with local 237 * Must be called with kernel preemption disabled (eg with local
238 * local interrupts as in the case of do_device_not_available). 238 * local interrupts as in the case of do_device_not_available).
239 */ 239 */
240void math_state_restore(void) 240void fpu__restore(void)
241{ 241{
242 struct task_struct *tsk = current; 242 struct task_struct *tsk = current;
243 243
@@ -267,7 +267,7 @@ void math_state_restore(void)
267 } 267 }
268 kernel_fpu_enable(); 268 kernel_fpu_enable();
269} 269}
270EXPORT_SYMBOL_GPL(math_state_restore); 270EXPORT_SYMBOL_GPL(fpu__restore);
271 271
272void fpu__flush_thread(struct task_struct *tsk) 272void fpu__flush_thread(struct task_struct *tsk)
273{ 273{
diff --git a/arch/x86/kernel/fpu/xsave.c b/arch/x86/kernel/fpu/xsave.c
index 163b5cc582ef..d913d5024901 100644
--- a/arch/x86/kernel/fpu/xsave.c
+++ b/arch/x86/kernel/fpu/xsave.c
@@ -404,7 +404,7 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
404 set_used_math(); 404 set_used_math();
405 if (use_eager_fpu()) { 405 if (use_eager_fpu()) {
406 preempt_disable(); 406 preempt_disable();
407 math_state_restore(); 407 fpu__restore();
408 preempt_enable(); 408 preempt_enable();
409 } 409 }
410 410
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 84d647d4b14d..1a0edce626b2 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -295,7 +295,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
295 * Leave lazy mode, flushing any hypercalls made here. 295 * Leave lazy mode, flushing any hypercalls made here.
296 * This must be done before restoring TLS segments so 296 * This must be done before restoring TLS segments so
297 * the GDT and LDT are properly updated, and must be 297 * the GDT and LDT are properly updated, and must be
298 * done before math_state_restore, so the TS bit is up 298 * done before fpu__restore(), so the TS bit is up
299 * to date. 299 * to date.
300 */ 300 */
301 arch_end_context_switch(next_p); 301 arch_end_context_switch(next_p);
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index ae6efeccb46e..99cc4b8589ad 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -298,7 +298,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
298 * Leave lazy mode, flushing any hypercalls made here. This 298 * Leave lazy mode, flushing any hypercalls made here. This
299 * must be done after loading TLS entries in the GDT but before 299 * must be done after loading TLS entries in the GDT but before
300 * loading segments that might reference them, and and it must 300 * loading segments that might reference them, and and it must
301 * be done before math_state_restore, so the TS bit is up to 301 * be done before fpu__restore(), so the TS bit is up to
302 * date. 302 * date.
303 */ 303 */
304 arch_end_context_switch(next_p); 304 arch_end_context_switch(next_p);
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 63c7fc3677b4..22ad90a40dbf 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -846,7 +846,7 @@ do_device_not_available(struct pt_regs *regs, long error_code)
846 return; 846 return;
847 } 847 }
848#endif 848#endif
849 math_state_restore(); /* interrupts still off */ 849 fpu__restore(); /* interrupts still off */
850#ifdef CONFIG_X86_32 850#ifdef CONFIG_X86_32
851 conditional_sti(regs); 851 conditional_sti(regs);
852#endif 852#endif
diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
index 30f2aef69d78..bcb534a5512d 100644
--- a/drivers/lguest/x86/core.c
+++ b/drivers/lguest/x86/core.c
@@ -297,12 +297,12 @@ void lguest_arch_run_guest(struct lg_cpu *cpu)
297 /* 297 /*
298 * Similarly, if we took a trap because the Guest used the FPU, 298 * Similarly, if we took a trap because the Guest used the FPU,
299 * we have to restore the FPU it expects to see. 299 * we have to restore the FPU it expects to see.
300 * math_state_restore() may sleep and we may even move off to 300 * fpu__restore() may sleep and we may even move off to
301 * a different CPU. So all the critical stuff should be done 301 * a different CPU. So all the critical stuff should be done
302 * before this. 302 * before this.
303 */ 303 */
304 else if (cpu->regs->trapnum == 7 && !user_has_fpu()) 304 else if (cpu->regs->trapnum == 7 && !user_has_fpu())
305 math_state_restore(); 305 fpu__restore();
306} 306}
307 307
308/*H:130 308/*H:130