diff options
author | Cyril Bur <cyrilbur@gmail.com> | 2016-02-29 01:53:48 -0500 |
---|---|---|
committer | Michael Ellerman <mpe@ellerman.id.au> | 2016-03-02 07:34:48 -0500 |
commit | de2a20aa7237b45d3c14a2505804a8daa95a8f53 (patch) | |
tree | 13593cbcaa3c487fa1a8a9ce76c1bdc5e50b602f /arch/powerpc/kernel/process.c | |
parent | 70fe3d980f5f14d8125869125ba9a0ea95e09c6b (diff) |
powerpc: Prepare for splitting giveup_{fpu, altivec, vsx} in two
This prepares for the decoupling of saving {fpu,altivec,vsx} registers and
marking {fpu,altivec,vsx} as being unused by a thread.
Currently giveup_{fpu,altivec,vsx}() does both however optimisations to
task switching can be made if these two operations are decoupled.
save_all() will permit the saving of registers to thread structs and leave
threads MSR with bits enabled.
This patch introduces no functional change.
Signed-off-by: Cyril Bur <cyrilbur@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'arch/powerpc/kernel/process.c')
-rw-r--r-- | arch/powerpc/kernel/process.c | 31 |
1 files changed, 30 insertions, 1 deletions
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 55c1eb0465af..29da07fb3b4a 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c | |||
@@ -444,12 +444,41 @@ void restore_math(struct pt_regs *regs) | |||
444 | regs->msr = msr; | 444 | regs->msr = msr; |
445 | } | 445 | } |
446 | 446 | ||
447 | void save_all(struct task_struct *tsk) | ||
448 | { | ||
449 | unsigned long usermsr; | ||
450 | |||
451 | if (!tsk->thread.regs) | ||
452 | return; | ||
453 | |||
454 | usermsr = tsk->thread.regs->msr; | ||
455 | |||
456 | if ((usermsr & msr_all_available) == 0) | ||
457 | return; | ||
458 | |||
459 | msr_check_and_set(msr_all_available); | ||
460 | |||
461 | if (usermsr & MSR_FP) | ||
462 | __giveup_fpu(tsk); | ||
463 | |||
464 | if (usermsr & MSR_VEC) | ||
465 | __giveup_altivec(tsk); | ||
466 | |||
467 | if (usermsr & MSR_VSX) | ||
468 | __giveup_vsx(tsk); | ||
469 | |||
470 | if (usermsr & MSR_SPE) | ||
471 | __giveup_spe(tsk); | ||
472 | |||
473 | msr_check_and_clear(msr_all_available); | ||
474 | } | ||
475 | |||
447 | void flush_all_to_thread(struct task_struct *tsk) | 476 | void flush_all_to_thread(struct task_struct *tsk) |
448 | { | 477 | { |
449 | if (tsk->thread.regs) { | 478 | if (tsk->thread.regs) { |
450 | preempt_disable(); | 479 | preempt_disable(); |
451 | BUG_ON(tsk != current); | 480 | BUG_ON(tsk != current); |
452 | giveup_all(tsk); | 481 | save_all(tsk); |
453 | 482 | ||
454 | #ifdef CONFIG_SPE | 483 | #ifdef CONFIG_SPE |
455 | if (tsk->thread.regs->msr & MSR_SPE) | 484 | if (tsk->thread.regs->msr & MSR_SPE) |