aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/process.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-01-28 00:11:26 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-01-28 00:11:26 -0500
commit1b17366d695c8ab03f98d0155357e97a427e1dce (patch)
treed223c79cc33ca1d890d264a202a1dd9c29655039 /arch/powerpc/kernel/process.c
parentd12de1ef5eba3adb88f8e9dd81b6a60349466378 (diff)
parent7179ba52889bef7e5e23f72908270e1ab2b7fc6f (diff)
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc
Pull powerpc updates from Ben Herrenschmidt: "So here's my next branch for powerpc. A bit late as I was on vacation last week. It's mostly the same stuff that was in next already, I just added two patches today which are the wiring up of lockref for powerpc, which for some reason fell through the cracks last time and is trivial. The highlights are, in addition to a bunch of bug fixes: - Reworked Machine Check handling on kernels running without a hypervisor (or acting as a hypervisor). Provides hooks to handle some errors in real mode such as TLB errors, handle SLB errors, etc... - Support for retrieving memory error information from the service processor on IBM servers running without a hypervisor and routing them to the memory poison infrastructure. - _PAGE_NUMA support on server processors - 32-bit BookE relocatable kernel support - FSL e6500 hardware tablewalk support - A bunch of new/revived board support - FSL e6500 deeper idle states and altivec powerdown support You'll notice a generic mm change here, it has been acked by the relevant authorities and is a pre-req for our _PAGE_NUMA support" * 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc: (121 commits) powerpc: Implement arch_spin_is_locked() using arch_spin_value_unlocked() powerpc: Add support for the optimised lockref implementation powerpc/powernv: Call OPAL sync before kexec'ing powerpc/eeh: Escalate error on non-existing PE powerpc/eeh: Handle multiple EEH errors powerpc: Fix transactional FP/VMX/VSX unavailable handlers powerpc: Don't corrupt transactional state when using FP/VMX in kernel powerpc: Reclaim two unused thread_info flag bits powerpc: Fix races with irq_work Move precessing of MCE queued event out from syscall exit path. pseries/cpuidle: Remove redundant call to ppc64_runlatch_off() in cpu idle routines powerpc: Make add_system_ram_resources() __init powerpc: add SATA_MV to ppc64_defconfig powerpc/powernv: Increase candidate fw image size powerpc: Add debug checks to catch invalid cpu-to-node mappings powerpc: Fix the setup of CPU-to-Node mappings during CPU online powerpc/iommu: Don't detach device without IOMMU group powerpc/eeh: Hotplug improvement powerpc/eeh: Call opal_pci_reinit() on powernv for restoring config space powerpc/eeh: Add restore_config operation ...
Diffstat (limited to 'arch/powerpc/kernel/process.c')
-rw-r--r--arch/powerpc/kernel/process.c177
1 files changed, 162 insertions, 15 deletions
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 4a96556fd2d4..64b7a6e61dd1 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -25,7 +25,6 @@
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <linux/user.h> 26#include <linux/user.h>
27#include <linux/elf.h> 27#include <linux/elf.h>
28#include <linux/init.h>
29#include <linux/prctl.h> 28#include <linux/prctl.h>
30#include <linux/init_task.h> 29#include <linux/init_task.h>
31#include <linux/export.h> 30#include <linux/export.h>
@@ -74,6 +73,48 @@ struct task_struct *last_task_used_vsx = NULL;
74struct task_struct *last_task_used_spe = NULL; 73struct task_struct *last_task_used_spe = NULL;
75#endif 74#endif
76 75
76#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
77void giveup_fpu_maybe_transactional(struct task_struct *tsk)
78{
79 /*
80 * If we are saving the current thread's registers, and the
81 * thread is in a transactional state, set the TIF_RESTORE_TM
82 * bit so that we know to restore the registers before
83 * returning to userspace.
84 */
85 if (tsk == current && tsk->thread.regs &&
86 MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
87 !test_thread_flag(TIF_RESTORE_TM)) {
88 tsk->thread.tm_orig_msr = tsk->thread.regs->msr;
89 set_thread_flag(TIF_RESTORE_TM);
90 }
91
92 giveup_fpu(tsk);
93}
94
95void giveup_altivec_maybe_transactional(struct task_struct *tsk)
96{
97 /*
98 * If we are saving the current thread's registers, and the
99 * thread is in a transactional state, set the TIF_RESTORE_TM
100 * bit so that we know to restore the registers before
101 * returning to userspace.
102 */
103 if (tsk == current && tsk->thread.regs &&
104 MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
105 !test_thread_flag(TIF_RESTORE_TM)) {
106 tsk->thread.tm_orig_msr = tsk->thread.regs->msr;
107 set_thread_flag(TIF_RESTORE_TM);
108 }
109
110 giveup_altivec(tsk);
111}
112
113#else
114#define giveup_fpu_maybe_transactional(tsk) giveup_fpu(tsk)
115#define giveup_altivec_maybe_transactional(tsk) giveup_altivec(tsk)
116#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
117
77#ifdef CONFIG_PPC_FPU 118#ifdef CONFIG_PPC_FPU
78/* 119/*
79 * Make sure the floating-point register state in the 120 * Make sure the floating-point register state in the
@@ -102,13 +143,13 @@ void flush_fp_to_thread(struct task_struct *tsk)
102 */ 143 */
103 BUG_ON(tsk != current); 144 BUG_ON(tsk != current);
104#endif 145#endif
105 giveup_fpu(tsk); 146 giveup_fpu_maybe_transactional(tsk);
106 } 147 }
107 preempt_enable(); 148 preempt_enable();
108 } 149 }
109} 150}
110EXPORT_SYMBOL_GPL(flush_fp_to_thread); 151EXPORT_SYMBOL_GPL(flush_fp_to_thread);
111#endif 152#endif /* CONFIG_PPC_FPU */
112 153
113void enable_kernel_fp(void) 154void enable_kernel_fp(void)
114{ 155{
@@ -116,11 +157,11 @@ void enable_kernel_fp(void)
116 157
117#ifdef CONFIG_SMP 158#ifdef CONFIG_SMP
118 if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) 159 if (current->thread.regs && (current->thread.regs->msr & MSR_FP))
119 giveup_fpu(current); 160 giveup_fpu_maybe_transactional(current);
120 else 161 else
121 giveup_fpu(NULL); /* just enables FP for kernel */ 162 giveup_fpu(NULL); /* just enables FP for kernel */
122#else 163#else
123 giveup_fpu(last_task_used_math); 164 giveup_fpu_maybe_transactional(last_task_used_math);
124#endif /* CONFIG_SMP */ 165#endif /* CONFIG_SMP */
125} 166}
126EXPORT_SYMBOL(enable_kernel_fp); 167EXPORT_SYMBOL(enable_kernel_fp);
@@ -132,11 +173,11 @@ void enable_kernel_altivec(void)
132 173
133#ifdef CONFIG_SMP 174#ifdef CONFIG_SMP
134 if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) 175 if (current->thread.regs && (current->thread.regs->msr & MSR_VEC))
135 giveup_altivec(current); 176 giveup_altivec_maybe_transactional(current);
136 else 177 else
137 giveup_altivec_notask(); 178 giveup_altivec_notask();
138#else 179#else
139 giveup_altivec(last_task_used_altivec); 180 giveup_altivec_maybe_transactional(last_task_used_altivec);
140#endif /* CONFIG_SMP */ 181#endif /* CONFIG_SMP */
141} 182}
142EXPORT_SYMBOL(enable_kernel_altivec); 183EXPORT_SYMBOL(enable_kernel_altivec);
@@ -153,7 +194,7 @@ void flush_altivec_to_thread(struct task_struct *tsk)
153#ifdef CONFIG_SMP 194#ifdef CONFIG_SMP
154 BUG_ON(tsk != current); 195 BUG_ON(tsk != current);
155#endif 196#endif
156 giveup_altivec(tsk); 197 giveup_altivec_maybe_transactional(tsk);
157 } 198 }
158 preempt_enable(); 199 preempt_enable();
159 } 200 }
@@ -182,8 +223,8 @@ EXPORT_SYMBOL(enable_kernel_vsx);
182 223
183void giveup_vsx(struct task_struct *tsk) 224void giveup_vsx(struct task_struct *tsk)
184{ 225{
185 giveup_fpu(tsk); 226 giveup_fpu_maybe_transactional(tsk);
186 giveup_altivec(tsk); 227 giveup_altivec_maybe_transactional(tsk);
187 __giveup_vsx(tsk); 228 __giveup_vsx(tsk);
188} 229}
189 230
@@ -479,7 +520,48 @@ static inline bool hw_brk_match(struct arch_hw_breakpoint *a,
479 return false; 520 return false;
480 return true; 521 return true;
481} 522}
523
482#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 524#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
525static void tm_reclaim_thread(struct thread_struct *thr,
526 struct thread_info *ti, uint8_t cause)
527{
528 unsigned long msr_diff = 0;
529
530 /*
531 * If FP/VSX registers have been already saved to the
532 * thread_struct, move them to the transact_fp array.
533 * We clear the TIF_RESTORE_TM bit since after the reclaim
534 * the thread will no longer be transactional.
535 */
536 if (test_ti_thread_flag(ti, TIF_RESTORE_TM)) {
537 msr_diff = thr->tm_orig_msr & ~thr->regs->msr;
538 if (msr_diff & MSR_FP)
539 memcpy(&thr->transact_fp, &thr->fp_state,
540 sizeof(struct thread_fp_state));
541 if (msr_diff & MSR_VEC)
542 memcpy(&thr->transact_vr, &thr->vr_state,
543 sizeof(struct thread_vr_state));
544 clear_ti_thread_flag(ti, TIF_RESTORE_TM);
545 msr_diff &= MSR_FP | MSR_VEC | MSR_VSX | MSR_FE0 | MSR_FE1;
546 }
547
548 tm_reclaim(thr, thr->regs->msr, cause);
549
550 /* Having done the reclaim, we now have the checkpointed
551 * FP/VSX values in the registers. These might be valid
552 * even if we have previously called enable_kernel_fp() or
553 * flush_fp_to_thread(), so update thr->regs->msr to
554 * indicate their current validity.
555 */
556 thr->regs->msr |= msr_diff;
557}
558
559void tm_reclaim_current(uint8_t cause)
560{
561 tm_enable();
562 tm_reclaim_thread(&current->thread, current_thread_info(), cause);
563}
564
483static inline void tm_reclaim_task(struct task_struct *tsk) 565static inline void tm_reclaim_task(struct task_struct *tsk)
484{ 566{
485 /* We have to work out if we're switching from/to a task that's in the 567 /* We have to work out if we're switching from/to a task that's in the
@@ -502,9 +584,11 @@ static inline void tm_reclaim_task(struct task_struct *tsk)
502 584
503 /* Stash the original thread MSR, as giveup_fpu et al will 585 /* Stash the original thread MSR, as giveup_fpu et al will
504 * modify it. We hold onto it to see whether the task used 586 * modify it. We hold onto it to see whether the task used
505 * FP & vector regs. 587 * FP & vector regs. If the TIF_RESTORE_TM flag is set,
588 * tm_orig_msr is already set.
506 */ 589 */
507 thr->tm_orig_msr = thr->regs->msr; 590 if (!test_ti_thread_flag(task_thread_info(tsk), TIF_RESTORE_TM))
591 thr->tm_orig_msr = thr->regs->msr;
508 592
509 TM_DEBUG("--- tm_reclaim on pid %d (NIP=%lx, " 593 TM_DEBUG("--- tm_reclaim on pid %d (NIP=%lx, "
510 "ccr=%lx, msr=%lx, trap=%lx)\n", 594 "ccr=%lx, msr=%lx, trap=%lx)\n",
@@ -512,7 +596,7 @@ static inline void tm_reclaim_task(struct task_struct *tsk)
512 thr->regs->ccr, thr->regs->msr, 596 thr->regs->ccr, thr->regs->msr,
513 thr->regs->trap); 597 thr->regs->trap);
514 598
515 tm_reclaim(thr, thr->regs->msr, TM_CAUSE_RESCHED); 599 tm_reclaim_thread(thr, task_thread_info(tsk), TM_CAUSE_RESCHED);
516 600
517 TM_DEBUG("--- tm_reclaim on pid %d complete\n", 601 TM_DEBUG("--- tm_reclaim on pid %d complete\n",
518 tsk->pid); 602 tsk->pid);
@@ -588,6 +672,43 @@ static inline void __switch_to_tm(struct task_struct *prev)
588 tm_reclaim_task(prev); 672 tm_reclaim_task(prev);
589 } 673 }
590} 674}
675
676/*
677 * This is called if we are on the way out to userspace and the
678 * TIF_RESTORE_TM flag is set. It checks if we need to reload
679 * FP and/or vector state and does so if necessary.
680 * If userspace is inside a transaction (whether active or
681 * suspended) and FP/VMX/VSX instructions have ever been enabled
682 * inside that transaction, then we have to keep them enabled
683 * and keep the FP/VMX/VSX state loaded while ever the transaction
684 * continues. The reason is that if we didn't, and subsequently
685 * got a FP/VMX/VSX unavailable interrupt inside a transaction,
686 * we don't know whether it's the same transaction, and thus we
687 * don't know which of the checkpointed state and the transactional
688 * state to use.
689 */
690void restore_tm_state(struct pt_regs *regs)
691{
692 unsigned long msr_diff;
693
694 clear_thread_flag(TIF_RESTORE_TM);
695 if (!MSR_TM_ACTIVE(regs->msr))
696 return;
697
698 msr_diff = current->thread.tm_orig_msr & ~regs->msr;
699 msr_diff &= MSR_FP | MSR_VEC | MSR_VSX;
700 if (msr_diff & MSR_FP) {
701 fp_enable();
702 load_fp_state(&current->thread.fp_state);
703 regs->msr |= current->thread.fpexc_mode;
704 }
705 if (msr_diff & MSR_VEC) {
706 vec_enable();
707 load_vr_state(&current->thread.vr_state);
708 }
709 regs->msr |= msr_diff;
710}
711
591#else 712#else
592#define tm_recheckpoint_new_task(new) 713#define tm_recheckpoint_new_task(new)
593#define __switch_to_tm(prev) 714#define __switch_to_tm(prev)
@@ -1175,6 +1296,19 @@ int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
1175 if (val & PR_FP_EXC_SW_ENABLE) { 1296 if (val & PR_FP_EXC_SW_ENABLE) {
1176#ifdef CONFIG_SPE 1297#ifdef CONFIG_SPE
1177 if (cpu_has_feature(CPU_FTR_SPE)) { 1298 if (cpu_has_feature(CPU_FTR_SPE)) {
1299 /*
1300 * When the sticky exception bits are set
1301 * directly by userspace, it must call prctl
1302 * with PR_GET_FPEXC (with PR_FP_EXC_SW_ENABLE
1303 * in the existing prctl settings) or
1304 * PR_SET_FPEXC (with PR_FP_EXC_SW_ENABLE in
1305 * the bits being set). <fenv.h> functions
1306 * saving and restoring the whole
1307 * floating-point environment need to do so
1308 * anyway to restore the prctl settings from
1309 * the saved environment.
1310 */
1311 tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR);
1178 tsk->thread.fpexc_mode = val & 1312 tsk->thread.fpexc_mode = val &
1179 (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT); 1313 (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT);
1180 return 0; 1314 return 0;
@@ -1206,9 +1340,22 @@ int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
1206 1340
1207 if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE) 1341 if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE)
1208#ifdef CONFIG_SPE 1342#ifdef CONFIG_SPE
1209 if (cpu_has_feature(CPU_FTR_SPE)) 1343 if (cpu_has_feature(CPU_FTR_SPE)) {
1344 /*
1345 * When the sticky exception bits are set
1346 * directly by userspace, it must call prctl
1347 * with PR_GET_FPEXC (with PR_FP_EXC_SW_ENABLE
1348 * in the existing prctl settings) or
1349 * PR_SET_FPEXC (with PR_FP_EXC_SW_ENABLE in
1350 * the bits being set). <fenv.h> functions
1351 * saving and restoring the whole
1352 * floating-point environment need to do so
1353 * anyway to restore the prctl settings from
1354 * the saved environment.
1355 */
1356 tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR);
1210 val = tsk->thread.fpexc_mode; 1357 val = tsk->thread.fpexc_mode;
1211 else 1358 } else
1212 return -EINVAL; 1359 return -EINVAL;
1213#else 1360#else
1214 return -EINVAL; 1361 return -EINVAL;