diff options
Diffstat (limited to 'arch/powerpc/kernel/process.c')
-rw-r--r-- | arch/powerpc/kernel/process.c | 188 |
1 files changed, 172 insertions, 16 deletions
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 4a96556fd2d4..af064d28b365 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c | |||
@@ -25,7 +25,6 @@ | |||
25 | #include <linux/slab.h> | 25 | #include <linux/slab.h> |
26 | #include <linux/user.h> | 26 | #include <linux/user.h> |
27 | #include <linux/elf.h> | 27 | #include <linux/elf.h> |
28 | #include <linux/init.h> | ||
29 | #include <linux/prctl.h> | 28 | #include <linux/prctl.h> |
30 | #include <linux/init_task.h> | 29 | #include <linux/init_task.h> |
31 | #include <linux/export.h> | 30 | #include <linux/export.h> |
@@ -74,6 +73,48 @@ struct task_struct *last_task_used_vsx = NULL; | |||
74 | struct task_struct *last_task_used_spe = NULL; | 73 | struct task_struct *last_task_used_spe = NULL; |
75 | #endif | 74 | #endif |
76 | 75 | ||
76 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | ||
77 | void giveup_fpu_maybe_transactional(struct task_struct *tsk) | ||
78 | { | ||
79 | /* | ||
80 | * If we are saving the current thread's registers, and the | ||
81 | * thread is in a transactional state, set the TIF_RESTORE_TM | ||
82 | * bit so that we know to restore the registers before | ||
83 | * returning to userspace. | ||
84 | */ | ||
85 | if (tsk == current && tsk->thread.regs && | ||
86 | MSR_TM_ACTIVE(tsk->thread.regs->msr) && | ||
87 | !test_thread_flag(TIF_RESTORE_TM)) { | ||
88 | tsk->thread.tm_orig_msr = tsk->thread.regs->msr; | ||
89 | set_thread_flag(TIF_RESTORE_TM); | ||
90 | } | ||
91 | |||
92 | giveup_fpu(tsk); | ||
93 | } | ||
94 | |||
95 | void giveup_altivec_maybe_transactional(struct task_struct *tsk) | ||
96 | { | ||
97 | /* | ||
98 | * If we are saving the current thread's registers, and the | ||
99 | * thread is in a transactional state, set the TIF_RESTORE_TM | ||
100 | * bit so that we know to restore the registers before | ||
101 | * returning to userspace. | ||
102 | */ | ||
103 | if (tsk == current && tsk->thread.regs && | ||
104 | MSR_TM_ACTIVE(tsk->thread.regs->msr) && | ||
105 | !test_thread_flag(TIF_RESTORE_TM)) { | ||
106 | tsk->thread.tm_orig_msr = tsk->thread.regs->msr; | ||
107 | set_thread_flag(TIF_RESTORE_TM); | ||
108 | } | ||
109 | |||
110 | giveup_altivec(tsk); | ||
111 | } | ||
112 | |||
113 | #else | ||
114 | #define giveup_fpu_maybe_transactional(tsk) giveup_fpu(tsk) | ||
115 | #define giveup_altivec_maybe_transactional(tsk) giveup_altivec(tsk) | ||
116 | #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ | ||
117 | |||
77 | #ifdef CONFIG_PPC_FPU | 118 | #ifdef CONFIG_PPC_FPU |
78 | /* | 119 | /* |
79 | * Make sure the floating-point register state in the | 120 | * Make sure the floating-point register state in the |
@@ -102,13 +143,13 @@ void flush_fp_to_thread(struct task_struct *tsk) | |||
102 | */ | 143 | */ |
103 | BUG_ON(tsk != current); | 144 | BUG_ON(tsk != current); |
104 | #endif | 145 | #endif |
105 | giveup_fpu(tsk); | 146 | giveup_fpu_maybe_transactional(tsk); |
106 | } | 147 | } |
107 | preempt_enable(); | 148 | preempt_enable(); |
108 | } | 149 | } |
109 | } | 150 | } |
110 | EXPORT_SYMBOL_GPL(flush_fp_to_thread); | 151 | EXPORT_SYMBOL_GPL(flush_fp_to_thread); |
111 | #endif | 152 | #endif /* CONFIG_PPC_FPU */ |
112 | 153 | ||
113 | void enable_kernel_fp(void) | 154 | void enable_kernel_fp(void) |
114 | { | 155 | { |
@@ -116,11 +157,11 @@ void enable_kernel_fp(void) | |||
116 | 157 | ||
117 | #ifdef CONFIG_SMP | 158 | #ifdef CONFIG_SMP |
118 | if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) | 159 | if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) |
119 | giveup_fpu(current); | 160 | giveup_fpu_maybe_transactional(current); |
120 | else | 161 | else |
121 | giveup_fpu(NULL); /* just enables FP for kernel */ | 162 | giveup_fpu(NULL); /* just enables FP for kernel */ |
122 | #else | 163 | #else |
123 | giveup_fpu(last_task_used_math); | 164 | giveup_fpu_maybe_transactional(last_task_used_math); |
124 | #endif /* CONFIG_SMP */ | 165 | #endif /* CONFIG_SMP */ |
125 | } | 166 | } |
126 | EXPORT_SYMBOL(enable_kernel_fp); | 167 | EXPORT_SYMBOL(enable_kernel_fp); |
@@ -132,11 +173,11 @@ void enable_kernel_altivec(void) | |||
132 | 173 | ||
133 | #ifdef CONFIG_SMP | 174 | #ifdef CONFIG_SMP |
134 | if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) | 175 | if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) |
135 | giveup_altivec(current); | 176 | giveup_altivec_maybe_transactional(current); |
136 | else | 177 | else |
137 | giveup_altivec_notask(); | 178 | giveup_altivec_notask(); |
138 | #else | 179 | #else |
139 | giveup_altivec(last_task_used_altivec); | 180 | giveup_altivec_maybe_transactional(last_task_used_altivec); |
140 | #endif /* CONFIG_SMP */ | 181 | #endif /* CONFIG_SMP */ |
141 | } | 182 | } |
142 | EXPORT_SYMBOL(enable_kernel_altivec); | 183 | EXPORT_SYMBOL(enable_kernel_altivec); |
@@ -153,7 +194,7 @@ void flush_altivec_to_thread(struct task_struct *tsk) | |||
153 | #ifdef CONFIG_SMP | 194 | #ifdef CONFIG_SMP |
154 | BUG_ON(tsk != current); | 195 | BUG_ON(tsk != current); |
155 | #endif | 196 | #endif |
156 | giveup_altivec(tsk); | 197 | giveup_altivec_maybe_transactional(tsk); |
157 | } | 198 | } |
158 | preempt_enable(); | 199 | preempt_enable(); |
159 | } | 200 | } |
@@ -182,8 +223,8 @@ EXPORT_SYMBOL(enable_kernel_vsx); | |||
182 | 223 | ||
183 | void giveup_vsx(struct task_struct *tsk) | 224 | void giveup_vsx(struct task_struct *tsk) |
184 | { | 225 | { |
185 | giveup_fpu(tsk); | 226 | giveup_fpu_maybe_transactional(tsk); |
186 | giveup_altivec(tsk); | 227 | giveup_altivec_maybe_transactional(tsk); |
187 | __giveup_vsx(tsk); | 228 | __giveup_vsx(tsk); |
188 | } | 229 | } |
189 | 230 | ||
@@ -479,7 +520,48 @@ static inline bool hw_brk_match(struct arch_hw_breakpoint *a, | |||
479 | return false; | 520 | return false; |
480 | return true; | 521 | return true; |
481 | } | 522 | } |
523 | |||
482 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | 524 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
525 | static void tm_reclaim_thread(struct thread_struct *thr, | ||
526 | struct thread_info *ti, uint8_t cause) | ||
527 | { | ||
528 | unsigned long msr_diff = 0; | ||
529 | |||
530 | /* | ||
531 | * If FP/VSX registers have been already saved to the | ||
532 | * thread_struct, move them to the transact_fp array. | ||
533 | * We clear the TIF_RESTORE_TM bit since after the reclaim | ||
534 | * the thread will no longer be transactional. | ||
535 | */ | ||
536 | if (test_ti_thread_flag(ti, TIF_RESTORE_TM)) { | ||
537 | msr_diff = thr->tm_orig_msr & ~thr->regs->msr; | ||
538 | if (msr_diff & MSR_FP) | ||
539 | memcpy(&thr->transact_fp, &thr->fp_state, | ||
540 | sizeof(struct thread_fp_state)); | ||
541 | if (msr_diff & MSR_VEC) | ||
542 | memcpy(&thr->transact_vr, &thr->vr_state, | ||
543 | sizeof(struct thread_vr_state)); | ||
544 | clear_ti_thread_flag(ti, TIF_RESTORE_TM); | ||
545 | msr_diff &= MSR_FP | MSR_VEC | MSR_VSX | MSR_FE0 | MSR_FE1; | ||
546 | } | ||
547 | |||
548 | tm_reclaim(thr, thr->regs->msr, cause); | ||
549 | |||
550 | /* Having done the reclaim, we now have the checkpointed | ||
551 | * FP/VSX values in the registers. These might be valid | ||
552 | * even if we have previously called enable_kernel_fp() or | ||
553 | * flush_fp_to_thread(), so update thr->regs->msr to | ||
554 | * indicate their current validity. | ||
555 | */ | ||
556 | thr->regs->msr |= msr_diff; | ||
557 | } | ||
558 | |||
559 | void tm_reclaim_current(uint8_t cause) | ||
560 | { | ||
561 | tm_enable(); | ||
562 | tm_reclaim_thread(¤t->thread, current_thread_info(), cause); | ||
563 | } | ||
564 | |||
483 | static inline void tm_reclaim_task(struct task_struct *tsk) | 565 | static inline void tm_reclaim_task(struct task_struct *tsk) |
484 | { | 566 | { |
485 | /* We have to work out if we're switching from/to a task that's in the | 567 | /* We have to work out if we're switching from/to a task that's in the |
@@ -502,9 +584,11 @@ static inline void tm_reclaim_task(struct task_struct *tsk) | |||
502 | 584 | ||
503 | /* Stash the original thread MSR, as giveup_fpu et al will | 585 | /* Stash the original thread MSR, as giveup_fpu et al will |
504 | * modify it. We hold onto it to see whether the task used | 586 | * modify it. We hold onto it to see whether the task used |
505 | * FP & vector regs. | 587 | * FP & vector regs. If the TIF_RESTORE_TM flag is set, |
588 | * tm_orig_msr is already set. | ||
506 | */ | 589 | */ |
507 | thr->tm_orig_msr = thr->regs->msr; | 590 | if (!test_ti_thread_flag(task_thread_info(tsk), TIF_RESTORE_TM)) |
591 | thr->tm_orig_msr = thr->regs->msr; | ||
508 | 592 | ||
509 | TM_DEBUG("--- tm_reclaim on pid %d (NIP=%lx, " | 593 | TM_DEBUG("--- tm_reclaim on pid %d (NIP=%lx, " |
510 | "ccr=%lx, msr=%lx, trap=%lx)\n", | 594 | "ccr=%lx, msr=%lx, trap=%lx)\n", |
@@ -512,7 +596,7 @@ static inline void tm_reclaim_task(struct task_struct *tsk) | |||
512 | thr->regs->ccr, thr->regs->msr, | 596 | thr->regs->ccr, thr->regs->msr, |
513 | thr->regs->trap); | 597 | thr->regs->trap); |
514 | 598 | ||
515 | tm_reclaim(thr, thr->regs->msr, TM_CAUSE_RESCHED); | 599 | tm_reclaim_thread(thr, task_thread_info(tsk), TM_CAUSE_RESCHED); |
516 | 600 | ||
517 | TM_DEBUG("--- tm_reclaim on pid %d complete\n", | 601 | TM_DEBUG("--- tm_reclaim on pid %d complete\n", |
518 | tsk->pid); | 602 | tsk->pid); |
@@ -588,6 +672,43 @@ static inline void __switch_to_tm(struct task_struct *prev) | |||
588 | tm_reclaim_task(prev); | 672 | tm_reclaim_task(prev); |
589 | } | 673 | } |
590 | } | 674 | } |
675 | |||
676 | /* | ||
677 | * This is called if we are on the way out to userspace and the | ||
678 | * TIF_RESTORE_TM flag is set. It checks if we need to reload | ||
679 | * FP and/or vector state and does so if necessary. | ||
680 | * If userspace is inside a transaction (whether active or | ||
681 | * suspended) and FP/VMX/VSX instructions have ever been enabled | ||
682 | * inside that transaction, then we have to keep them enabled | ||
683 | * and keep the FP/VMX/VSX state loaded while ever the transaction | ||
684 | * continues. The reason is that if we didn't, and subsequently | ||
685 | * got a FP/VMX/VSX unavailable interrupt inside a transaction, | ||
686 | * we don't know whether it's the same transaction, and thus we | ||
687 | * don't know which of the checkpointed state and the transactional | ||
688 | * state to use. | ||
689 | */ | ||
690 | void restore_tm_state(struct pt_regs *regs) | ||
691 | { | ||
692 | unsigned long msr_diff; | ||
693 | |||
694 | clear_thread_flag(TIF_RESTORE_TM); | ||
695 | if (!MSR_TM_ACTIVE(regs->msr)) | ||
696 | return; | ||
697 | |||
698 | msr_diff = current->thread.tm_orig_msr & ~regs->msr; | ||
699 | msr_diff &= MSR_FP | MSR_VEC | MSR_VSX; | ||
700 | if (msr_diff & MSR_FP) { | ||
701 | fp_enable(); | ||
702 | load_fp_state(¤t->thread.fp_state); | ||
703 | regs->msr |= current->thread.fpexc_mode; | ||
704 | } | ||
705 | if (msr_diff & MSR_VEC) { | ||
706 | vec_enable(); | ||
707 | load_vr_state(¤t->thread.vr_state); | ||
708 | } | ||
709 | regs->msr |= msr_diff; | ||
710 | } | ||
711 | |||
591 | #else | 712 | #else |
592 | #define tm_recheckpoint_new_task(new) | 713 | #define tm_recheckpoint_new_task(new) |
593 | #define __switch_to_tm(prev) | 714 | #define __switch_to_tm(prev) |
@@ -690,7 +811,7 @@ struct task_struct *__switch_to(struct task_struct *prev, | |||
690 | * schedule DABR | 811 | * schedule DABR |
691 | */ | 812 | */ |
692 | #ifndef CONFIG_HAVE_HW_BREAKPOINT | 813 | #ifndef CONFIG_HAVE_HW_BREAKPOINT |
693 | if (unlikely(hw_brk_match(&__get_cpu_var(current_brk), &new->thread.hw_brk))) | 814 | if (unlikely(!hw_brk_match(&__get_cpu_var(current_brk), &new->thread.hw_brk))) |
694 | set_breakpoint(&new->thread.hw_brk); | 815 | set_breakpoint(&new->thread.hw_brk); |
695 | #endif /* CONFIG_HAVE_HW_BREAKPOINT */ | 816 | #endif /* CONFIG_HAVE_HW_BREAKPOINT */ |
696 | #endif | 817 | #endif |
@@ -927,6 +1048,15 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) | |||
927 | flush_altivec_to_thread(src); | 1048 | flush_altivec_to_thread(src); |
928 | flush_vsx_to_thread(src); | 1049 | flush_vsx_to_thread(src); |
929 | flush_spe_to_thread(src); | 1050 | flush_spe_to_thread(src); |
1051 | /* | ||
1052 | * Flush TM state out so we can copy it. __switch_to_tm() does this | ||
1053 | * flush but it removes the checkpointed state from the current CPU and | ||
1054 | * transitions the CPU out of TM mode. Hence we need to call | ||
1055 | * tm_recheckpoint_new_task() (on the same task) to restore the | ||
1056 | * checkpointed state back and the TM mode. | ||
1057 | */ | ||
1058 | __switch_to_tm(src); | ||
1059 | tm_recheckpoint_new_task(src); | ||
930 | 1060 | ||
931 | *dst = *src; | 1061 | *dst = *src; |
932 | 1062 | ||
@@ -1175,6 +1305,19 @@ int set_fpexc_mode(struct task_struct *tsk, unsigned int val) | |||
1175 | if (val & PR_FP_EXC_SW_ENABLE) { | 1305 | if (val & PR_FP_EXC_SW_ENABLE) { |
1176 | #ifdef CONFIG_SPE | 1306 | #ifdef CONFIG_SPE |
1177 | if (cpu_has_feature(CPU_FTR_SPE)) { | 1307 | if (cpu_has_feature(CPU_FTR_SPE)) { |
1308 | /* | ||
1309 | * When the sticky exception bits are set | ||
1310 | * directly by userspace, it must call prctl | ||
1311 | * with PR_GET_FPEXC (with PR_FP_EXC_SW_ENABLE | ||
1312 | * in the existing prctl settings) or | ||
1313 | * PR_SET_FPEXC (with PR_FP_EXC_SW_ENABLE in | ||
1314 | * the bits being set). <fenv.h> functions | ||
1315 | * saving and restoring the whole | ||
1316 | * floating-point environment need to do so | ||
1317 | * anyway to restore the prctl settings from | ||
1318 | * the saved environment. | ||
1319 | */ | ||
1320 | tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR); | ||
1178 | tsk->thread.fpexc_mode = val & | 1321 | tsk->thread.fpexc_mode = val & |
1179 | (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT); | 1322 | (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT); |
1180 | return 0; | 1323 | return 0; |
@@ -1206,9 +1349,22 @@ int get_fpexc_mode(struct task_struct *tsk, unsigned long adr) | |||
1206 | 1349 | ||
1207 | if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE) | 1350 | if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE) |
1208 | #ifdef CONFIG_SPE | 1351 | #ifdef CONFIG_SPE |
1209 | if (cpu_has_feature(CPU_FTR_SPE)) | 1352 | if (cpu_has_feature(CPU_FTR_SPE)) { |
1353 | /* | ||
1354 | * When the sticky exception bits are set | ||
1355 | * directly by userspace, it must call prctl | ||
1356 | * with PR_GET_FPEXC (with PR_FP_EXC_SW_ENABLE | ||
1357 | * in the existing prctl settings) or | ||
1358 | * PR_SET_FPEXC (with PR_FP_EXC_SW_ENABLE in | ||
1359 | * the bits being set). <fenv.h> functions | ||
1360 | * saving and restoring the whole | ||
1361 | * floating-point environment need to do so | ||
1362 | * anyway to restore the prctl settings from | ||
1363 | * the saved environment. | ||
1364 | */ | ||
1365 | tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR); | ||
1210 | val = tsk->thread.fpexc_mode; | 1366 | val = tsk->thread.fpexc_mode; |
1211 | else | 1367 | } else |
1212 | return -EINVAL; | 1368 | return -EINVAL; |
1213 | #else | 1369 | #else |
1214 | return -EINVAL; | 1370 | return -EINVAL; |