diff options
Diffstat (limited to 'arch/ppc/kernel/head.S')
| -rw-r--r-- | arch/ppc/kernel/head.S | 163 |
1 files changed, 0 insertions, 163 deletions
diff --git a/arch/ppc/kernel/head.S b/arch/ppc/kernel/head.S index 1a89a71e0acc..a931d773715f 100644 --- a/arch/ppc/kernel/head.S +++ b/arch/ppc/kernel/head.S | |||
| @@ -775,133 +775,6 @@ InstructionSegment: | |||
| 775 | EXC_XFER_STD(0x480, UnknownException) | 775 | EXC_XFER_STD(0x480, UnknownException) |
| 776 | #endif /* CONFIG_PPC64BRIDGE */ | 776 | #endif /* CONFIG_PPC64BRIDGE */ |
| 777 | 777 | ||
| 778 | /* | ||
| 779 | * This task wants to use the FPU now. | ||
| 780 | * On UP, disable FP for the task which had the FPU previously, | ||
| 781 | * and save its floating-point registers in its thread_struct. | ||
| 782 | * Load up this task's FP registers from its thread_struct, | ||
| 783 | * enable the FPU for the current task and return to the task. | ||
| 784 | */ | ||
| 785 | load_up_fpu: | ||
| 786 | mfmsr r5 | ||
| 787 | ori r5,r5,MSR_FP | ||
| 788 | #ifdef CONFIG_PPC64BRIDGE | ||
| 789 | clrldi r5,r5,1 /* turn off 64-bit mode */ | ||
| 790 | #endif /* CONFIG_PPC64BRIDGE */ | ||
| 791 | SYNC | ||
| 792 | MTMSRD(r5) /* enable use of fpu now */ | ||
| 793 | isync | ||
| 794 | /* | ||
| 795 | * For SMP, we don't do lazy FPU switching because it just gets too | ||
| 796 | * horrendously complex, especially when a task switches from one CPU | ||
| 797 | * to another. Instead we call giveup_fpu in switch_to. | ||
| 798 | */ | ||
| 799 | #ifndef CONFIG_SMP | ||
| 800 | tophys(r6,0) /* get __pa constant */ | ||
| 801 | addis r3,r6,last_task_used_math@ha | ||
| 802 | lwz r4,last_task_used_math@l(r3) | ||
| 803 | cmpwi 0,r4,0 | ||
| 804 | beq 1f | ||
| 805 | add r4,r4,r6 | ||
| 806 | addi r4,r4,THREAD /* want last_task_used_math->thread */ | ||
| 807 | SAVE_32FPRS(0, r4) | ||
| 808 | mffs fr0 | ||
| 809 | stfd fr0,THREAD_FPSCR-4(r4) | ||
| 810 | lwz r5,PT_REGS(r4) | ||
| 811 | add r5,r5,r6 | ||
| 812 | lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
| 813 | li r10,MSR_FP|MSR_FE0|MSR_FE1 | ||
| 814 | andc r4,r4,r10 /* disable FP for previous task */ | ||
| 815 | stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
| 816 | 1: | ||
| 817 | #endif /* CONFIG_SMP */ | ||
| 818 | /* enable use of FP after return */ | ||
| 819 | mfspr r5,SPRN_SPRG3 /* current task's THREAD (phys) */ | ||
| 820 | lwz r4,THREAD_FPEXC_MODE(r5) | ||
| 821 | ori r9,r9,MSR_FP /* enable FP for current */ | ||
| 822 | or r9,r9,r4 | ||
| 823 | lfd fr0,THREAD_FPSCR-4(r5) | ||
| 824 | mtfsf 0xff,fr0 | ||
| 825 | REST_32FPRS(0, r5) | ||
| 826 | #ifndef CONFIG_SMP | ||
| 827 | subi r4,r5,THREAD | ||
| 828 | sub r4,r4,r6 | ||
| 829 | stw r4,last_task_used_math@l(r3) | ||
| 830 | #endif /* CONFIG_SMP */ | ||
| 831 | /* restore registers and return */ | ||
| 832 | /* we haven't used ctr or xer or lr */ | ||
| 833 | /* fall through to fast_exception_return */ | ||
| 834 | |||
| 835 | .globl fast_exception_return | ||
| 836 | fast_exception_return: | ||
| 837 | andi. r10,r9,MSR_RI /* check for recoverable interrupt */ | ||
| 838 | beq 1f /* if not, we've got problems */ | ||
| 839 | 2: REST_4GPRS(3, r11) | ||
| 840 | lwz r10,_CCR(r11) | ||
| 841 | REST_GPR(1, r11) | ||
| 842 | mtcr r10 | ||
| 843 | lwz r10,_LINK(r11) | ||
| 844 | mtlr r10 | ||
| 845 | REST_GPR(10, r11) | ||
| 846 | mtspr SPRN_SRR1,r9 | ||
| 847 | mtspr SPRN_SRR0,r12 | ||
| 848 | REST_GPR(9, r11) | ||
| 849 | REST_GPR(12, r11) | ||
| 850 | lwz r11,GPR11(r11) | ||
| 851 | SYNC | ||
| 852 | RFI | ||
| 853 | |||
| 854 | /* check if the exception happened in a restartable section */ | ||
| 855 | 1: lis r3,exc_exit_restart_end@ha | ||
| 856 | addi r3,r3,exc_exit_restart_end@l | ||
| 857 | cmplw r12,r3 | ||
| 858 | bge 3f | ||
| 859 | lis r4,exc_exit_restart@ha | ||
| 860 | addi r4,r4,exc_exit_restart@l | ||
| 861 | cmplw r12,r4 | ||
| 862 | blt 3f | ||
| 863 | lis r3,fee_restarts@ha | ||
| 864 | tophys(r3,r3) | ||
| 865 | lwz r5,fee_restarts@l(r3) | ||
| 866 | addi r5,r5,1 | ||
| 867 | stw r5,fee_restarts@l(r3) | ||
| 868 | mr r12,r4 /* restart at exc_exit_restart */ | ||
| 869 | b 2b | ||
| 870 | |||
| 871 | .comm fee_restarts,4 | ||
| 872 | |||
| 873 | /* aargh, a nonrecoverable interrupt, panic */ | ||
| 874 | /* aargh, we don't know which trap this is */ | ||
| 875 | /* but the 601 doesn't implement the RI bit, so assume it's OK */ | ||
| 876 | 3: | ||
| 877 | BEGIN_FTR_SECTION | ||
| 878 | b 2b | ||
| 879 | END_FTR_SECTION_IFSET(CPU_FTR_601) | ||
| 880 | li r10,-1 | ||
| 881 | stw r10,TRAP(r11) | ||
| 882 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
| 883 | li r10,MSR_KERNEL | ||
| 884 | bl transfer_to_handler_full | ||
| 885 | .long nonrecoverable_exception | ||
| 886 | .long ret_from_except | ||
| 887 | |||
| 888 | /* | ||
| 889 | * FP unavailable trap from kernel - print a message, but let | ||
| 890 | * the task use FP in the kernel until it returns to user mode. | ||
| 891 | */ | ||
| 892 | KernelFP: | ||
| 893 | lwz r3,_MSR(r1) | ||
| 894 | ori r3,r3,MSR_FP | ||
| 895 | stw r3,_MSR(r1) /* enable use of FP after return */ | ||
| 896 | lis r3,86f@h | ||
| 897 | ori r3,r3,86f@l | ||
| 898 | mr r4,r2 /* current */ | ||
| 899 | lwz r5,_NIP(r1) | ||
| 900 | bl printk | ||
| 901 | b ret_from_except | ||
| 902 | 86: .string "floating point used in kernel (task=%p, pc=%x)\n" | ||
| 903 | .align 4,0 | ||
| 904 | |||
| 905 | #ifdef CONFIG_ALTIVEC | 778 | #ifdef CONFIG_ALTIVEC |
| 906 | /* Note that the AltiVec support is closely modeled after the FP | 779 | /* Note that the AltiVec support is closely modeled after the FP |
| 907 | * support. Changes to one are likely to be applicable to the | 780 | * support. Changes to one are likely to be applicable to the |
| @@ -1016,42 +889,6 @@ giveup_altivec: | |||
| 1016 | #endif /* CONFIG_ALTIVEC */ | 889 | #endif /* CONFIG_ALTIVEC */ |
| 1017 | 890 | ||
| 1018 | /* | 891 | /* |
| 1019 | * giveup_fpu(tsk) | ||
| 1020 | * Disable FP for the task given as the argument, | ||
| 1021 | * and save the floating-point registers in its thread_struct. | ||
| 1022 | * Enables the FPU for use in the kernel on return. | ||
| 1023 | */ | ||
| 1024 | .globl giveup_fpu | ||
| 1025 | giveup_fpu: | ||
| 1026 | mfmsr r5 | ||
| 1027 | ori r5,r5,MSR_FP | ||
| 1028 | SYNC_601 | ||
| 1029 | ISYNC_601 | ||
| 1030 | MTMSRD(r5) /* enable use of fpu now */ | ||
| 1031 | SYNC_601 | ||
| 1032 | isync | ||
| 1033 | cmpwi 0,r3,0 | ||
| 1034 | beqlr- /* if no previous owner, done */ | ||
| 1035 | addi r3,r3,THREAD /* want THREAD of task */ | ||
| 1036 | lwz r5,PT_REGS(r3) | ||
| 1037 | cmpwi 0,r5,0 | ||
| 1038 | SAVE_32FPRS(0, r3) | ||
| 1039 | mffs fr0 | ||
| 1040 | stfd fr0,THREAD_FPSCR-4(r3) | ||
| 1041 | beq 1f | ||
| 1042 | lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
| 1043 | li r3,MSR_FP|MSR_FE0|MSR_FE1 | ||
| 1044 | andc r4,r4,r3 /* disable FP for previous task */ | ||
| 1045 | stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
| 1046 | 1: | ||
| 1047 | #ifndef CONFIG_SMP | ||
| 1048 | li r5,0 | ||
| 1049 | lis r4,last_task_used_math@ha | ||
| 1050 | stw r5,last_task_used_math@l(r4) | ||
| 1051 | #endif /* CONFIG_SMP */ | ||
| 1052 | blr | ||
| 1053 | |||
| 1054 | /* | ||
| 1055 | * This code is jumped to from the startup code to copy | 892 | * This code is jumped to from the startup code to copy |
| 1056 | * the kernel image to physical address 0. | 893 | * the kernel image to physical address 0. |
| 1057 | */ | 894 | */ |
