aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/ppc_ksyms.c2
-rw-r--r--arch/powerpc/kernel/process.c17
2 files changed, 12 insertions, 7 deletions
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
index 7bfa0f0121ff..e73b0699b5f0 100644
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -182,11 +182,9 @@ EXPORT_SYMBOL(flush_tlb_kernel_range);
182EXPORT_SYMBOL(flush_tlb_page); 182EXPORT_SYMBOL(flush_tlb_page);
183EXPORT_SYMBOL(_tlbie); 183EXPORT_SYMBOL(_tlbie);
184#ifdef CONFIG_ALTIVEC 184#ifdef CONFIG_ALTIVEC
185EXPORT_SYMBOL(last_task_used_altivec);
186EXPORT_SYMBOL(giveup_altivec); 185EXPORT_SYMBOL(giveup_altivec);
187#endif /* CONFIG_ALTIVEC */ 186#endif /* CONFIG_ALTIVEC */
188#ifdef CONFIG_SPE 187#ifdef CONFIG_SPE
189EXPORT_SYMBOL(last_task_used_spe);
190EXPORT_SYMBOL(giveup_spe); 188EXPORT_SYMBOL(giveup_spe);
191#endif /* CONFIG_SPE */ 189#endif /* CONFIG_SPE */
192#ifdef CONFIG_SMP 190#ifdef CONFIG_SMP
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index e3946769dd8e..ae316e9ed581 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -272,11 +272,6 @@ struct task_struct *__switch_to(struct task_struct *prev,
272 */ 272 */
273 if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC)) 273 if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC))
274 giveup_altivec(prev); 274 giveup_altivec(prev);
275 /* Avoid the trap. On smp this this never happens since
276 * we don't set last_task_used_altivec -- Cort
277 */
278 if (new->thread.regs && last_task_used_altivec == new)
279 new->thread.regs->msr |= MSR_VEC;
280#endif /* CONFIG_ALTIVEC */ 275#endif /* CONFIG_ALTIVEC */
281#ifdef CONFIG_SPE 276#ifdef CONFIG_SPE
282 /* 277 /*
@@ -288,12 +283,24 @@ struct task_struct *__switch_to(struct task_struct *prev,
288 */ 283 */
289 if ((prev->thread.regs && (prev->thread.regs->msr & MSR_SPE))) 284 if ((prev->thread.regs && (prev->thread.regs->msr & MSR_SPE)))
290 giveup_spe(prev); 285 giveup_spe(prev);
286#endif /* CONFIG_SPE */
287
288#else /* CONFIG_SMP */
289#ifdef CONFIG_ALTIVEC
290 /* Avoid the trap. On smp this this never happens since
291 * we don't set last_task_used_altivec -- Cort
292 */
293 if (new->thread.regs && last_task_used_altivec == new)
294 new->thread.regs->msr |= MSR_VEC;
295#endif /* CONFIG_ALTIVEC */
296#ifdef CONFIG_SPE
291 /* Avoid the trap. On smp this this never happens since 297 /* Avoid the trap. On smp this this never happens since
292 * we don't set last_task_used_spe 298 * we don't set last_task_used_spe
293 */ 299 */
294 if (new->thread.regs && last_task_used_spe == new) 300 if (new->thread.regs && last_task_used_spe == new)
295 new->thread.regs->msr |= MSR_SPE; 301 new->thread.regs->msr |= MSR_SPE;
296#endif /* CONFIG_SPE */ 302#endif /* CONFIG_SPE */
303
297#endif /* CONFIG_SMP */ 304#endif /* CONFIG_SMP */
298 305
299#ifdef CONFIG_PPC64 /* for now */ 306#ifdef CONFIG_PPC64 /* for now */