diff options
author | Paul Mackerras <paulus@samba.org> | 2005-09-30 23:49:08 -0400 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2005-09-30 23:49:08 -0400 |
commit | c0c0d996d08e450164adedc249c1bbbca63524ce (patch) | |
tree | 15f297796a93568fd45756c72ca07e77756c8653 /arch/powerpc/kernel/process.c | |
parent | ab11d1ea281e85895369ef57c5259ad8a432fabb (diff) |
powerpc: Get merged kernel to compile and run on 32-bit SMP powermac.
This updates the powermac SMP code to use the mpic driver instead of
the openpic driver and fixes the SMP-dependent context switch code.
We had a subtle bug where we were using interrupt numbers 256-259 for
IPIs, but ppc32 had NR_IRQS = 256. Moved the IPIs down to use interrupt
numbers 252-255 instead.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/kernel/process.c')
-rw-r--r-- | arch/powerpc/kernel/process.c | 17 |
1 files changed, 12 insertions, 5 deletions
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index e3946769dd8e..ae316e9ed581 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c | |||
@@ -272,11 +272,6 @@ struct task_struct *__switch_to(struct task_struct *prev, | |||
272 | */ | 272 | */ |
273 | if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC)) | 273 | if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC)) |
274 | giveup_altivec(prev); | 274 | giveup_altivec(prev); |
275 | /* Avoid the trap. On smp this this never happens since | ||
276 | * we don't set last_task_used_altivec -- Cort | ||
277 | */ | ||
278 | if (new->thread.regs && last_task_used_altivec == new) | ||
279 | new->thread.regs->msr |= MSR_VEC; | ||
280 | #endif /* CONFIG_ALTIVEC */ | 275 | #endif /* CONFIG_ALTIVEC */ |
281 | #ifdef CONFIG_SPE | 276 | #ifdef CONFIG_SPE |
282 | /* | 277 | /* |
@@ -288,12 +283,24 @@ struct task_struct *__switch_to(struct task_struct *prev, | |||
288 | */ | 283 | */ |
289 | if ((prev->thread.regs && (prev->thread.regs->msr & MSR_SPE))) | 284 | if ((prev->thread.regs && (prev->thread.regs->msr & MSR_SPE))) |
290 | giveup_spe(prev); | 285 | giveup_spe(prev); |
286 | #endif /* CONFIG_SPE */ | ||
287 | |||
288 | #else /* CONFIG_SMP */ | ||
289 | #ifdef CONFIG_ALTIVEC | ||
290 | /* Avoid the trap. On smp this this never happens since | ||
291 | * we don't set last_task_used_altivec -- Cort | ||
292 | */ | ||
293 | if (new->thread.regs && last_task_used_altivec == new) | ||
294 | new->thread.regs->msr |= MSR_VEC; | ||
295 | #endif /* CONFIG_ALTIVEC */ | ||
296 | #ifdef CONFIG_SPE | ||
291 | /* Avoid the trap. On smp this this never happens since | 297 | /* Avoid the trap. On smp this this never happens since |
292 | * we don't set last_task_used_spe | 298 | * we don't set last_task_used_spe |
293 | */ | 299 | */ |
294 | if (new->thread.regs && last_task_used_spe == new) | 300 | if (new->thread.regs && last_task_used_spe == new) |
295 | new->thread.regs->msr |= MSR_SPE; | 301 | new->thread.regs->msr |= MSR_SPE; |
296 | #endif /* CONFIG_SPE */ | 302 | #endif /* CONFIG_SPE */ |
303 | |||
297 | #endif /* CONFIG_SMP */ | 304 | #endif /* CONFIG_SMP */ |
298 | 305 | ||
299 | #ifdef CONFIG_PPC64 /* for now */ | 306 | #ifdef CONFIG_PPC64 /* for now */ |