aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include/asm/i387.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-02-16 16:33:12 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-02-16 16:33:12 -0500
commit6d59d7a9f5b723a7ac1925c136e93ec83c0c3043 (patch)
tree9478d5c1cc3a3f85cc937d8d6ddaad926af7e830 /arch/x86/include/asm/i387.h
parentb6c66418dcad0fcf83cd1d0a39482db37bf4fc41 (diff)
i387: don't ever touch TS_USEDFPU directly, use helper functions
This creates three helper functions that do the TS_USEDFPU accesses, and makes everybody that used to do it by hand use those helpers instead. In addition, there's a couple of helper functions for the "change both CR0.TS and TS_USEDFPU at the same time" case, and the places that do that together have been changed to use those. That means that we have fewer random places that open-code this situation. The intent is partly to clarify the code without actually changing any semantics yet (since we clearly still have some hard to reproduce bug in this area), but also to make it much easier to use another approach entirely to caching the CR0.TS bit for software accesses. Right now we use a bit in the thread-info 'status' variable (this patch does not change that), but we might want to make it a full field of its own or even make it a per-cpu variable. Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/x86/include/asm/i387.h')
-rw-r--r--arch/x86/include/asm/i387.h75
1 files changed, 55 insertions, 20 deletions
diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
index 1e12c2d087e4..548b2c07ac9a 100644
--- a/arch/x86/include/asm/i387.h
+++ b/arch/x86/include/asm/i387.h
@@ -280,6 +280,47 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
280} 280}
281 281
282/* 282/*
283 * Software FPU state helpers. Careful: these need to
284 * be preemption protection *and* they need to be
285 * properly paired with the CR0.TS changes!
286 */
287static inline int __thread_has_fpu(struct thread_info *ti)
288{
289 return ti->status & TS_USEDFPU;
290}
291
292/* Must be paired with an 'stts' after! */
293static inline void __thread_clear_has_fpu(struct thread_info *ti)
294{
295 ti->status &= ~TS_USEDFPU;
296}
297
298/* Must be paired with a 'clts' before! */
299static inline void __thread_set_has_fpu(struct thread_info *ti)
300{
301 ti->status |= TS_USEDFPU;
302}
303
304/*
305 * Encapsulate the CR0.TS handling together with the
306 * software flag.
307 *
308 * These generally need preemption protection to work,
309 * do try to avoid using these on their own.
310 */
311static inline void __thread_fpu_end(struct thread_info *ti)
312{
313 __thread_clear_has_fpu(ti);
314 stts();
315}
316
317static inline void __thread_fpu_begin(struct thread_info *ti)
318{
319 clts();
320 __thread_set_has_fpu(ti);
321}
322
323/*
283 * Signal frame handlers... 324 * Signal frame handlers...
284 */ 325 */
285extern int save_i387_xstate(void __user *buf); 326extern int save_i387_xstate(void __user *buf);
@@ -287,23 +328,21 @@ extern int restore_i387_xstate(void __user *buf);
287 328
288static inline void __unlazy_fpu(struct task_struct *tsk) 329static inline void __unlazy_fpu(struct task_struct *tsk)
289{ 330{
290 if (task_thread_info(tsk)->status & TS_USEDFPU) { 331 if (__thread_has_fpu(task_thread_info(tsk))) {
291 __save_init_fpu(tsk); 332 __save_init_fpu(tsk);
292 task_thread_info(tsk)->status &= ~TS_USEDFPU; 333 __thread_fpu_end(task_thread_info(tsk));
293 stts();
294 } else 334 } else
295 tsk->fpu_counter = 0; 335 tsk->fpu_counter = 0;
296} 336}
297 337
298static inline void __clear_fpu(struct task_struct *tsk) 338static inline void __clear_fpu(struct task_struct *tsk)
299{ 339{
300 if (task_thread_info(tsk)->status & TS_USEDFPU) { 340 if (__thread_has_fpu(task_thread_info(tsk))) {
301 /* Ignore delayed exceptions from user space */ 341 /* Ignore delayed exceptions from user space */
302 asm volatile("1: fwait\n" 342 asm volatile("1: fwait\n"
303 "2:\n" 343 "2:\n"
304 _ASM_EXTABLE(1b, 2b)); 344 _ASM_EXTABLE(1b, 2b));
305 task_thread_info(tsk)->status &= ~TS_USEDFPU; 345 __thread_fpu_end(task_thread_info(tsk));
306 stts();
307 } 346 }
308} 347}
309 348
@@ -311,14 +350,14 @@ static inline void __clear_fpu(struct task_struct *tsk)
311 * Were we in an interrupt that interrupted kernel mode? 350 * Were we in an interrupt that interrupted kernel mode?
312 * 351 *
313 * We can do a kernel_fpu_begin/end() pair *ONLY* if that 352 * We can do a kernel_fpu_begin/end() pair *ONLY* if that
314 * pair does nothing at all: TS_USEDFPU must be clear (so 353 * pair does nothing at all: the thread must not have fpu (so
315 * that we don't try to save the FPU state), and TS must 354 * that we don't try to save the FPU state), and TS must
316 * be set (so that the clts/stts pair does nothing that is 355 * be set (so that the clts/stts pair does nothing that is
317 * visible in the interrupted kernel thread). 356 * visible in the interrupted kernel thread).
318 */ 357 */
319static inline bool interrupted_kernel_fpu_idle(void) 358static inline bool interrupted_kernel_fpu_idle(void)
320{ 359{
321 return !(current_thread_info()->status & TS_USEDFPU) && 360 return !__thread_has_fpu(current_thread_info()) &&
322 (read_cr0() & X86_CR0_TS); 361 (read_cr0() & X86_CR0_TS);
323} 362}
324 363
@@ -356,9 +395,9 @@ static inline void kernel_fpu_begin(void)
356 395
357 WARN_ON_ONCE(!irq_fpu_usable()); 396 WARN_ON_ONCE(!irq_fpu_usable());
358 preempt_disable(); 397 preempt_disable();
359 if (me->status & TS_USEDFPU) { 398 if (__thread_has_fpu(me)) {
360 __save_init_fpu(me->task); 399 __save_init_fpu(me->task);
361 me->status &= ~TS_USEDFPU; 400 __thread_clear_has_fpu(me);
362 /* We do 'stts()' in kernel_fpu_end() */ 401 /* We do 'stts()' in kernel_fpu_end() */
363 } else 402 } else
364 clts(); 403 clts();
@@ -422,24 +461,21 @@ static inline void irq_ts_restore(int TS_state)
422 */ 461 */
423static inline int user_has_fpu(void) 462static inline int user_has_fpu(void)
424{ 463{
425 return current_thread_info()->status & TS_USEDFPU; 464 return __thread_has_fpu(current_thread_info());
426} 465}
427 466
428static inline void user_fpu_end(void) 467static inline void user_fpu_end(void)
429{ 468{
430 preempt_disable(); 469 preempt_disable();
431 current_thread_info()->status &= ~TS_USEDFPU; 470 __thread_fpu_end(current_thread_info());
432 stts();
433 preempt_enable(); 471 preempt_enable();
434} 472}
435 473
436static inline void user_fpu_begin(void) 474static inline void user_fpu_begin(void)
437{ 475{
438 preempt_disable(); 476 preempt_disable();
439 if (!user_has_fpu()) { 477 if (!user_has_fpu())
440 clts(); 478 __thread_fpu_begin(current_thread_info());
441 current_thread_info()->status |= TS_USEDFPU;
442 }
443 preempt_enable(); 479 preempt_enable();
444} 480}
445 481
@@ -448,11 +484,10 @@ static inline void user_fpu_begin(void)
448 */ 484 */
449static inline void save_init_fpu(struct task_struct *tsk) 485static inline void save_init_fpu(struct task_struct *tsk)
450{ 486{
451 WARN_ON_ONCE(!(task_thread_info(tsk)->status & TS_USEDFPU)); 487 WARN_ON_ONCE(!__thread_has_fpu(task_thread_info(tsk)));
452 preempt_disable(); 488 preempt_disable();
453 __save_init_fpu(tsk); 489 __save_init_fpu(tsk);
454 task_thread_info(tsk)->status &= ~TS_USEDFPU; 490 __thread_fpu_end(task_thread_info(tsk));
455 stts();
456 preempt_enable(); 491 preempt_enable();
457} 492}
458 493