aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2005-05-01 11:58:40 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-05-01 11:58:40 -0400
commit443a848cd30eb5bb5c1038e6371d83404775dcfc (patch)
treef61d62eebf19498395257a7cddee828c4edfa876 /arch
parentf1c55dea0bb2df94aa2b01b0871cb02f2e206676 (diff)
[PATCH] ppc32: refactor FPU exception handling
Moved common FPU exception handling code out of head.S so it can be used by several of the sub-architectures that might of a full PowerPC FPU. Also, uses new CONFIG_PPC_FPU define to fix alignment exception handling for floating point load/store instructions to only occur if we have a hardware FPU. Signed-off-by: Jason McMullan <jason.mcmullan@timesys.com> Signed-off-by: Kumar Gala <kumar.gala@freescale.com> Signed-off-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/ppc/Kconfig4
-rw-r--r--arch/ppc/Makefile1
-rw-r--r--arch/ppc/kernel/Makefile1
-rw-r--r--arch/ppc/kernel/align.c8
-rw-r--r--arch/ppc/kernel/entry.S59
-rw-r--r--arch/ppc/kernel/fpu.S133
-rw-r--r--arch/ppc/kernel/head.S163
-rw-r--r--arch/ppc/kernel/head_44x.S6
-rw-r--r--arch/ppc/kernel/head_booke.h7
-rw-r--r--arch/ppc/kernel/head_fsl_booke.S8
-rw-r--r--arch/ppc/kernel/misc.S12
-rw-r--r--arch/ppc/kernel/traps.c2
12 files changed, 228 insertions, 176 deletions
diff --git a/arch/ppc/Kconfig b/arch/ppc/Kconfig
index 74aa1e92a395..c3d941345e3d 100644
--- a/arch/ppc/Kconfig
+++ b/arch/ppc/Kconfig
@@ -53,6 +53,7 @@ choice
53 53
54config 6xx 54config 6xx
55 bool "6xx/7xx/74xx/52xx/82xx/83xx" 55 bool "6xx/7xx/74xx/52xx/82xx/83xx"
56 select PPC_FPU
56 help 57 help
57 There are four types of PowerPC chips supported. The more common 58 There are four types of PowerPC chips supported. The more common
58 types (601, 603, 604, 740, 750, 7400), the Motorola embedded 59 types (601, 603, 604, 740, 750, 7400), the Motorola embedded
@@ -86,6 +87,9 @@ config E500
86 87
87endchoice 88endchoice
88 89
90config PPC_FPU
91 bool
92
89config BOOKE 93config BOOKE
90 bool 94 bool
91 depends on E500 95 depends on E500
diff --git a/arch/ppc/Makefile b/arch/ppc/Makefile
index 73cbdda5b597..0432a25b4735 100644
--- a/arch/ppc/Makefile
+++ b/arch/ppc/Makefile
@@ -53,6 +53,7 @@ head-$(CONFIG_FSL_BOOKE) := arch/ppc/kernel/head_fsl_booke.o
53 53
54head-$(CONFIG_6xx) += arch/ppc/kernel/idle_6xx.o 54head-$(CONFIG_6xx) += arch/ppc/kernel/idle_6xx.o
55head-$(CONFIG_POWER4) += arch/ppc/kernel/idle_power4.o 55head-$(CONFIG_POWER4) += arch/ppc/kernel/idle_power4.o
56head-$(CONFIG_PPC_FPU) += arch/ppc/kernel/fpu.o
56 57
57core-y += arch/ppc/kernel/ arch/ppc/platforms/ \ 58core-y += arch/ppc/kernel/ arch/ppc/platforms/ \
58 arch/ppc/mm/ arch/ppc/lib/ arch/ppc/syslib/ 59 arch/ppc/mm/ arch/ppc/lib/ arch/ppc/syslib/
diff --git a/arch/ppc/kernel/Makefile b/arch/ppc/kernel/Makefile
index 86bc878cb3ee..b284451802c9 100644
--- a/arch/ppc/kernel/Makefile
+++ b/arch/ppc/kernel/Makefile
@@ -9,6 +9,7 @@ extra-$(CONFIG_FSL_BOOKE) := head_fsl_booke.o
9extra-$(CONFIG_8xx) := head_8xx.o 9extra-$(CONFIG_8xx) := head_8xx.o
10extra-$(CONFIG_6xx) += idle_6xx.o 10extra-$(CONFIG_6xx) += idle_6xx.o
11extra-$(CONFIG_POWER4) += idle_power4.o 11extra-$(CONFIG_POWER4) += idle_power4.o
12extra-$(CONFIG_PPC_FPU) += fpu.o
12extra-y += vmlinux.lds 13extra-y += vmlinux.lds
13 14
14obj-y := entry.o traps.o irq.o idle.o time.o misc.o \ 15obj-y := entry.o traps.o irq.o idle.o time.o misc.o \
diff --git a/arch/ppc/kernel/align.c b/arch/ppc/kernel/align.c
index 79c929475037..40d356c66c41 100644
--- a/arch/ppc/kernel/align.c
+++ b/arch/ppc/kernel/align.c
@@ -368,16 +368,24 @@ fix_alignment(struct pt_regs *regs)
368 368
369 /* Single-precision FP load and store require conversions... */ 369 /* Single-precision FP load and store require conversions... */
370 case LD+F+S: 370 case LD+F+S:
371#ifdef CONFIG_PPC_FPU
371 preempt_disable(); 372 preempt_disable();
372 enable_kernel_fp(); 373 enable_kernel_fp();
373 cvt_fd(&data.f, &data.d, &current->thread.fpscr); 374 cvt_fd(&data.f, &data.d, &current->thread.fpscr);
374 preempt_enable(); 375 preempt_enable();
376#else
377 return 0;
378#endif
375 break; 379 break;
376 case ST+F+S: 380 case ST+F+S:
381#ifdef CONFIG_PPC_FPU
377 preempt_disable(); 382 preempt_disable();
378 enable_kernel_fp(); 383 enable_kernel_fp();
379 cvt_df(&data.d, &data.f, &current->thread.fpscr); 384 cvt_df(&data.d, &data.f, &current->thread.fpscr);
380 preempt_enable(); 385 preempt_enable();
386#else
387 return 0;
388#endif
381 break; 389 break;
382 } 390 }
383 391
diff --git a/arch/ppc/kernel/entry.S b/arch/ppc/kernel/entry.S
index 035217d6c0f1..5f075dbc4ee7 100644
--- a/arch/ppc/kernel/entry.S
+++ b/arch/ppc/kernel/entry.S
@@ -563,6 +563,65 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
563 addi r1,r1,INT_FRAME_SIZE 563 addi r1,r1,INT_FRAME_SIZE
564 blr 564 blr
565 565
566 .globl fast_exception_return
567fast_exception_return:
568#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
569 andi. r10,r9,MSR_RI /* check for recoverable interrupt */
570 beq 1f /* if not, we've got problems */
571#endif
572
5732: REST_4GPRS(3, r11)
574 lwz r10,_CCR(r11)
575 REST_GPR(1, r11)
576 mtcr r10
577 lwz r10,_LINK(r11)
578 mtlr r10
579 REST_GPR(10, r11)
580 mtspr SPRN_SRR1,r9
581 mtspr SPRN_SRR0,r12
582 REST_GPR(9, r11)
583 REST_GPR(12, r11)
584 lwz r11,GPR11(r11)
585 SYNC
586 RFI
587
588#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
589/* check if the exception happened in a restartable section */
5901: lis r3,exc_exit_restart_end@ha
591 addi r3,r3,exc_exit_restart_end@l
592 cmplw r12,r3
593 bge 3f
594 lis r4,exc_exit_restart@ha
595 addi r4,r4,exc_exit_restart@l
596 cmplw r12,r4
597 blt 3f
598 lis r3,fee_restarts@ha
599 tophys(r3,r3)
600 lwz r5,fee_restarts@l(r3)
601 addi r5,r5,1
602 stw r5,fee_restarts@l(r3)
603 mr r12,r4 /* restart at exc_exit_restart */
604 b 2b
605
606 .comm fee_restarts,4
607
608/* aargh, a nonrecoverable interrupt, panic */
609/* aargh, we don't know which trap this is */
610/* but the 601 doesn't implement the RI bit, so assume it's OK */
6113:
612BEGIN_FTR_SECTION
613 b 2b
614END_FTR_SECTION_IFSET(CPU_FTR_601)
615 li r10,-1
616 stw r10,TRAP(r11)
617 addi r3,r1,STACK_FRAME_OVERHEAD
618 lis r10,MSR_KERNEL@h
619 ori r10,r10,MSR_KERNEL@l
620 bl transfer_to_handler_full
621 .long nonrecoverable_exception
622 .long ret_from_except
623#endif
624
566 .globl sigreturn_exit 625 .globl sigreturn_exit
567sigreturn_exit: 626sigreturn_exit:
568 subi r1,r3,STACK_FRAME_OVERHEAD 627 subi r1,r3,STACK_FRAME_OVERHEAD
diff --git a/arch/ppc/kernel/fpu.S b/arch/ppc/kernel/fpu.S
new file mode 100644
index 000000000000..6189b26f640f
--- /dev/null
+++ b/arch/ppc/kernel/fpu.S
@@ -0,0 +1,133 @@
1/*
2 * FPU support code, moved here from head.S so that it can be used
3 * by chips which use other head-whatever.S files.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
9 *
10 */
11
12#include <linux/config.h>
13#include <asm/processor.h>
14#include <asm/page.h>
15#include <asm/mmu.h>
16#include <asm/pgtable.h>
17#include <asm/cputable.h>
18#include <asm/cache.h>
19#include <asm/thread_info.h>
20#include <asm/ppc_asm.h>
21#include <asm/offsets.h>
22
23/*
24 * This task wants to use the FPU now.
25 * On UP, disable FP for the task which had the FPU previously,
26 * and save its floating-point registers in its thread_struct.
27 * Load up this task's FP registers from its thread_struct,
28 * enable the FPU for the current task and return to the task.
29 */
30 .globl load_up_fpu
31load_up_fpu:
32 mfmsr r5
33 ori r5,r5,MSR_FP
34#ifdef CONFIG_PPC64BRIDGE
35 clrldi r5,r5,1 /* turn off 64-bit mode */
36#endif /* CONFIG_PPC64BRIDGE */
37 SYNC
38 MTMSRD(r5) /* enable use of fpu now */
39 isync
40/*
41 * For SMP, we don't do lazy FPU switching because it just gets too
42 * horrendously complex, especially when a task switches from one CPU
43 * to another. Instead we call giveup_fpu in switch_to.
44 */
45#ifndef CONFIG_SMP
46 tophys(r6,0) /* get __pa constant */
47 addis r3,r6,last_task_used_math@ha
48 lwz r4,last_task_used_math@l(r3)
49 cmpwi 0,r4,0
50 beq 1f
51 add r4,r4,r6
52 addi r4,r4,THREAD /* want last_task_used_math->thread */
53 SAVE_32FPRS(0, r4)
54 mffs fr0
55 stfd fr0,THREAD_FPSCR-4(r4)
56 lwz r5,PT_REGS(r4)
57 add r5,r5,r6
58 lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
59 li r10,MSR_FP|MSR_FE0|MSR_FE1
60 andc r4,r4,r10 /* disable FP for previous task */
61 stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)
621:
63#endif /* CONFIG_SMP */
64 /* enable use of FP after return */
65 mfspr r5,SPRN_SPRG3 /* current task's THREAD (phys) */
66 lwz r4,THREAD_FPEXC_MODE(r5)
67 ori r9,r9,MSR_FP /* enable FP for current */
68 or r9,r9,r4
69 lfd fr0,THREAD_FPSCR-4(r5)
70 mtfsf 0xff,fr0
71 REST_32FPRS(0, r5)
72#ifndef CONFIG_SMP
73 subi r4,r5,THREAD
74 sub r4,r4,r6
75 stw r4,last_task_used_math@l(r3)
76#endif /* CONFIG_SMP */
77 /* restore registers and return */
78 /* we haven't used ctr or xer or lr */
79 b fast_exception_return
80
81/*
82 * FP unavailable trap from kernel - print a message, but let
83 * the task use FP in the kernel until it returns to user mode.
84 */
85 .globl KernelFP
86KernelFP:
87 lwz r3,_MSR(r1)
88 ori r3,r3,MSR_FP
89 stw r3,_MSR(r1) /* enable use of FP after return */
90 lis r3,86f@h
91 ori r3,r3,86f@l
92 mr r4,r2 /* current */
93 lwz r5,_NIP(r1)
94 bl printk
95 b ret_from_except
9686: .string "floating point used in kernel (task=%p, pc=%x)\n"
97 .align 4,0
98
99/*
100 * giveup_fpu(tsk)
101 * Disable FP for the task given as the argument,
102 * and save the floating-point registers in its thread_struct.
103 * Enables the FPU for use in the kernel on return.
104 */
105 .globl giveup_fpu
106giveup_fpu:
107 mfmsr r5
108 ori r5,r5,MSR_FP
109 SYNC_601
110 ISYNC_601
111 MTMSRD(r5) /* enable use of fpu now */
112 SYNC_601
113 isync
114 cmpwi 0,r3,0
115 beqlr- /* if no previous owner, done */
116 addi r3,r3,THREAD /* want THREAD of task */
117 lwz r5,PT_REGS(r3)
118 cmpwi 0,r5,0
119 SAVE_32FPRS(0, r3)
120 mffs fr0
121 stfd fr0,THREAD_FPSCR-4(r3)
122 beq 1f
123 lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
124 li r3,MSR_FP|MSR_FE0|MSR_FE1
125 andc r4,r4,r3 /* disable FP for previous task */
126 stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1271:
128#ifndef CONFIG_SMP
129 li r5,0
130 lis r4,last_task_used_math@ha
131 stw r5,last_task_used_math@l(r4)
132#endif /* CONFIG_SMP */
133 blr
diff --git a/arch/ppc/kernel/head.S b/arch/ppc/kernel/head.S
index 1a89a71e0acc..a931d773715f 100644
--- a/arch/ppc/kernel/head.S
+++ b/arch/ppc/kernel/head.S
@@ -775,133 +775,6 @@ InstructionSegment:
775 EXC_XFER_STD(0x480, UnknownException) 775 EXC_XFER_STD(0x480, UnknownException)
776#endif /* CONFIG_PPC64BRIDGE */ 776#endif /* CONFIG_PPC64BRIDGE */
777 777
778/*
779 * This task wants to use the FPU now.
780 * On UP, disable FP for the task which had the FPU previously,
781 * and save its floating-point registers in its thread_struct.
782 * Load up this task's FP registers from its thread_struct,
783 * enable the FPU for the current task and return to the task.
784 */
785load_up_fpu:
786 mfmsr r5
787 ori r5,r5,MSR_FP
788#ifdef CONFIG_PPC64BRIDGE
789 clrldi r5,r5,1 /* turn off 64-bit mode */
790#endif /* CONFIG_PPC64BRIDGE */
791 SYNC
792 MTMSRD(r5) /* enable use of fpu now */
793 isync
794/*
795 * For SMP, we don't do lazy FPU switching because it just gets too
796 * horrendously complex, especially when a task switches from one CPU
797 * to another. Instead we call giveup_fpu in switch_to.
798 */
799#ifndef CONFIG_SMP
800 tophys(r6,0) /* get __pa constant */
801 addis r3,r6,last_task_used_math@ha
802 lwz r4,last_task_used_math@l(r3)
803 cmpwi 0,r4,0
804 beq 1f
805 add r4,r4,r6
806 addi r4,r4,THREAD /* want last_task_used_math->thread */
807 SAVE_32FPRS(0, r4)
808 mffs fr0
809 stfd fr0,THREAD_FPSCR-4(r4)
810 lwz r5,PT_REGS(r4)
811 add r5,r5,r6
812 lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
813 li r10,MSR_FP|MSR_FE0|MSR_FE1
814 andc r4,r4,r10 /* disable FP for previous task */
815 stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)
8161:
817#endif /* CONFIG_SMP */
818 /* enable use of FP after return */
819 mfspr r5,SPRN_SPRG3 /* current task's THREAD (phys) */
820 lwz r4,THREAD_FPEXC_MODE(r5)
821 ori r9,r9,MSR_FP /* enable FP for current */
822 or r9,r9,r4
823 lfd fr0,THREAD_FPSCR-4(r5)
824 mtfsf 0xff,fr0
825 REST_32FPRS(0, r5)
826#ifndef CONFIG_SMP
827 subi r4,r5,THREAD
828 sub r4,r4,r6
829 stw r4,last_task_used_math@l(r3)
830#endif /* CONFIG_SMP */
831 /* restore registers and return */
832 /* we haven't used ctr or xer or lr */
833 /* fall through to fast_exception_return */
834
835 .globl fast_exception_return
836fast_exception_return:
837 andi. r10,r9,MSR_RI /* check for recoverable interrupt */
838 beq 1f /* if not, we've got problems */
8392: REST_4GPRS(3, r11)
840 lwz r10,_CCR(r11)
841 REST_GPR(1, r11)
842 mtcr r10
843 lwz r10,_LINK(r11)
844 mtlr r10
845 REST_GPR(10, r11)
846 mtspr SPRN_SRR1,r9
847 mtspr SPRN_SRR0,r12
848 REST_GPR(9, r11)
849 REST_GPR(12, r11)
850 lwz r11,GPR11(r11)
851 SYNC
852 RFI
853
854/* check if the exception happened in a restartable section */
8551: lis r3,exc_exit_restart_end@ha
856 addi r3,r3,exc_exit_restart_end@l
857 cmplw r12,r3
858 bge 3f
859 lis r4,exc_exit_restart@ha
860 addi r4,r4,exc_exit_restart@l
861 cmplw r12,r4
862 blt 3f
863 lis r3,fee_restarts@ha
864 tophys(r3,r3)
865 lwz r5,fee_restarts@l(r3)
866 addi r5,r5,1
867 stw r5,fee_restarts@l(r3)
868 mr r12,r4 /* restart at exc_exit_restart */
869 b 2b
870
871 .comm fee_restarts,4
872
873/* aargh, a nonrecoverable interrupt, panic */
874/* aargh, we don't know which trap this is */
875/* but the 601 doesn't implement the RI bit, so assume it's OK */
8763:
877BEGIN_FTR_SECTION
878 b 2b
879END_FTR_SECTION_IFSET(CPU_FTR_601)
880 li r10,-1
881 stw r10,TRAP(r11)
882 addi r3,r1,STACK_FRAME_OVERHEAD
883 li r10,MSR_KERNEL
884 bl transfer_to_handler_full
885 .long nonrecoverable_exception
886 .long ret_from_except
887
888/*
889 * FP unavailable trap from kernel - print a message, but let
890 * the task use FP in the kernel until it returns to user mode.
891 */
892KernelFP:
893 lwz r3,_MSR(r1)
894 ori r3,r3,MSR_FP
895 stw r3,_MSR(r1) /* enable use of FP after return */
896 lis r3,86f@h
897 ori r3,r3,86f@l
898 mr r4,r2 /* current */
899 lwz r5,_NIP(r1)
900 bl printk
901 b ret_from_except
90286: .string "floating point used in kernel (task=%p, pc=%x)\n"
903 .align 4,0
904
905#ifdef CONFIG_ALTIVEC 778#ifdef CONFIG_ALTIVEC
906/* Note that the AltiVec support is closely modeled after the FP 779/* Note that the AltiVec support is closely modeled after the FP
907 * support. Changes to one are likely to be applicable to the 780 * support. Changes to one are likely to be applicable to the
@@ -1016,42 +889,6 @@ giveup_altivec:
1016#endif /* CONFIG_ALTIVEC */ 889#endif /* CONFIG_ALTIVEC */
1017 890
1018/* 891/*
1019 * giveup_fpu(tsk)
1020 * Disable FP for the task given as the argument,
1021 * and save the floating-point registers in its thread_struct.
1022 * Enables the FPU for use in the kernel on return.
1023 */
1024 .globl giveup_fpu
1025giveup_fpu:
1026 mfmsr r5
1027 ori r5,r5,MSR_FP
1028 SYNC_601
1029 ISYNC_601
1030 MTMSRD(r5) /* enable use of fpu now */
1031 SYNC_601
1032 isync
1033 cmpwi 0,r3,0
1034 beqlr- /* if no previous owner, done */
1035 addi r3,r3,THREAD /* want THREAD of task */
1036 lwz r5,PT_REGS(r3)
1037 cmpwi 0,r5,0
1038 SAVE_32FPRS(0, r3)
1039 mffs fr0
1040 stfd fr0,THREAD_FPSCR-4(r3)
1041 beq 1f
1042 lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1043 li r3,MSR_FP|MSR_FE0|MSR_FE1
1044 andc r4,r4,r3 /* disable FP for previous task */
1045 stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)
10461:
1047#ifndef CONFIG_SMP
1048 li r5,0
1049 lis r4,last_task_used_math@ha
1050 stw r5,last_task_used_math@l(r4)
1051#endif /* CONFIG_SMP */
1052 blr
1053
1054/*
1055 * This code is jumped to from the startup code to copy 892 * This code is jumped to from the startup code to copy
1056 * the kernel image to physical address 0. 893 * the kernel image to physical address 0.
1057 */ 894 */
diff --git a/arch/ppc/kernel/head_44x.S b/arch/ppc/kernel/head_44x.S
index 9ed8165a3d6c..9b6a8e513657 100644
--- a/arch/ppc/kernel/head_44x.S
+++ b/arch/ppc/kernel/head_44x.S
@@ -426,7 +426,11 @@ interrupt_base:
426 PROGRAM_EXCEPTION 426 PROGRAM_EXCEPTION
427 427
428 /* Floating Point Unavailable Interrupt */ 428 /* Floating Point Unavailable Interrupt */
429#ifdef CONFIG_PPC_FPU
430 FP_UNAVAILABLE_EXCEPTION
431#else
429 EXCEPTION(0x2010, FloatingPointUnavailable, UnknownException, EXC_XFER_EE) 432 EXCEPTION(0x2010, FloatingPointUnavailable, UnknownException, EXC_XFER_EE)
433#endif
430 434
431 /* System Call Interrupt */ 435 /* System Call Interrupt */
432 START_EXCEPTION(SystemCall) 436 START_EXCEPTION(SystemCall)
@@ -686,8 +690,10 @@ _GLOBAL(giveup_altivec)
686 * 690 *
687 * The 44x core does not have an FPU. 691 * The 44x core does not have an FPU.
688 */ 692 */
693#ifndef CONFIG_PPC_FPU
689_GLOBAL(giveup_fpu) 694_GLOBAL(giveup_fpu)
690 blr 695 blr
696#endif
691 697
692/* 698/*
693 * extern void abort(void) 699 * extern void abort(void)
diff --git a/arch/ppc/kernel/head_booke.h b/arch/ppc/kernel/head_booke.h
index 884dac916bce..f213d12eec08 100644
--- a/arch/ppc/kernel/head_booke.h
+++ b/arch/ppc/kernel/head_booke.h
@@ -337,4 +337,11 @@ label:
337 addi r3,r1,STACK_FRAME_OVERHEAD; \ 337 addi r3,r1,STACK_FRAME_OVERHEAD; \
338 EXC_XFER_LITE(0x0900, timer_interrupt) 338 EXC_XFER_LITE(0x0900, timer_interrupt)
339 339
340#define FP_UNAVAILABLE_EXCEPTION \
341 START_EXCEPTION(FloatingPointUnavailable) \
342 NORMAL_EXCEPTION_PROLOG; \
343 bne load_up_fpu; /* if from user, just load it up */ \
344 addi r3,r1,STACK_FRAME_OVERHEAD; \
345 EXC_XFER_EE_LITE(0x800, KernelFP)
346
340#endif /* __HEAD_BOOKE_H__ */ 347#endif /* __HEAD_BOOKE_H__ */
diff --git a/arch/ppc/kernel/head_fsl_booke.S b/arch/ppc/kernel/head_fsl_booke.S
index d64bf61d2b1f..f22ddce36135 100644
--- a/arch/ppc/kernel/head_fsl_booke.S
+++ b/arch/ppc/kernel/head_fsl_booke.S
@@ -504,7 +504,11 @@ interrupt_base:
504 PROGRAM_EXCEPTION 504 PROGRAM_EXCEPTION
505 505
506 /* Floating Point Unavailable Interrupt */ 506 /* Floating Point Unavailable Interrupt */
507#ifdef CONFIG_PPC_FPU
508 FP_UNAVAILABLE_EXCEPTION
509#else
507 EXCEPTION(0x0800, FloatingPointUnavailable, UnknownException, EXC_XFER_EE) 510 EXCEPTION(0x0800, FloatingPointUnavailable, UnknownException, EXC_XFER_EE)
511#endif
508 512
509 /* System Call Interrupt */ 513 /* System Call Interrupt */
510 START_EXCEPTION(SystemCall) 514 START_EXCEPTION(SystemCall)
@@ -916,10 +920,12 @@ _GLOBAL(giveup_spe)
916/* 920/*
917 * extern void giveup_fpu(struct task_struct *prev) 921 * extern void giveup_fpu(struct task_struct *prev)
918 * 922 *
919 * The e500 core does not have an FPU. 923 * Not all FSL Book-E cores have an FPU
920 */ 924 */
925#ifndef CONFIG_PPC_FPU
921_GLOBAL(giveup_fpu) 926_GLOBAL(giveup_fpu)
922 blr 927 blr
928#endif
923 929
924/* 930/*
925 * extern void abort(void) 931 * extern void abort(void)
diff --git a/arch/ppc/kernel/misc.S b/arch/ppc/kernel/misc.S
index 73f7c23b0dd4..e4f1615ec13f 100644
--- a/arch/ppc/kernel/misc.S
+++ b/arch/ppc/kernel/misc.S
@@ -1096,17 +1096,7 @@ _GLOBAL(_get_SP)
1096 * and exceptions as if the cpu had performed the load or store. 1096 * and exceptions as if the cpu had performed the load or store.
1097 */ 1097 */
1098 1098
1099#if defined(CONFIG_4xx) || defined(CONFIG_E500) 1099#ifdef CONFIG_PPC_FPU
1100_GLOBAL(cvt_fd)
1101 lfs 0,0(r3)
1102 stfd 0,0(r4)
1103 blr
1104
1105_GLOBAL(cvt_df)
1106 lfd 0,0(r3)
1107 stfs 0,0(r4)
1108 blr
1109#else
1110_GLOBAL(cvt_fd) 1100_GLOBAL(cvt_fd)
1111 lfd 0,-4(r5) /* load up fpscr value */ 1101 lfd 0,-4(r5) /* load up fpscr value */
1112 mtfsf 0xff,0 1102 mtfsf 0xff,0
diff --git a/arch/ppc/kernel/traps.c b/arch/ppc/kernel/traps.c
index 361865c4bc84..f8e7e324a173 100644
--- a/arch/ppc/kernel/traps.c
+++ b/arch/ppc/kernel/traps.c
@@ -176,7 +176,7 @@ static inline int check_io_access(struct pt_regs *regs)
176#else 176#else
177#define get_mc_reason(regs) (mfspr(SPRN_MCSR)) 177#define get_mc_reason(regs) (mfspr(SPRN_MCSR))
178#endif 178#endif
179#define REASON_FP 0 179#define REASON_FP ESR_FP
180#define REASON_ILLEGAL ESR_PIL 180#define REASON_ILLEGAL ESR_PIL
181#define REASON_PRIVILEGED ESR_PPR 181#define REASON_PRIVILEGED ESR_PPR
182#define REASON_TRAP ESR_PTR 182#define REASON_TRAP ESR_PTR