diff options
Diffstat (limited to 'arch/ppc64/kernel/head.S')
| -rw-r--r-- | arch/ppc64/kernel/head.S | 59 |
1 files changed, 2 insertions, 57 deletions
diff --git a/arch/ppc64/kernel/head.S b/arch/ppc64/kernel/head.S index f58af9c246cb..929f9f42cf7a 100644 --- a/arch/ppc64/kernel/head.S +++ b/arch/ppc64/kernel/head.S | |||
| @@ -81,7 +81,7 @@ _stext: | |||
| 81 | _GLOBAL(__start) | 81 | _GLOBAL(__start) |
| 82 | /* NOP this out unconditionally */ | 82 | /* NOP this out unconditionally */ |
| 83 | BEGIN_FTR_SECTION | 83 | BEGIN_FTR_SECTION |
| 84 | b .__start_initialization_multiplatform | 84 | b .__start_initialization_multiplatform |
| 85 | END_FTR_SECTION(0, 1) | 85 | END_FTR_SECTION(0, 1) |
| 86 | #endif /* CONFIG_PPC_MULTIPLATFORM */ | 86 | #endif /* CONFIG_PPC_MULTIPLATFORM */ |
| 87 | 87 | ||
| @@ -747,6 +747,7 @@ bad_stack: | |||
| 747 | * any task or sent any task a signal, you should use | 747 | * any task or sent any task a signal, you should use |
| 748 | * ret_from_except or ret_from_except_lite instead of this. | 748 | * ret_from_except or ret_from_except_lite instead of this. |
| 749 | */ | 749 | */ |
| 750 | .globl fast_exception_return | ||
| 750 | fast_exception_return: | 751 | fast_exception_return: |
| 751 | ld r12,_MSR(r1) | 752 | ld r12,_MSR(r1) |
| 752 | ld r11,_NIP(r1) | 753 | ld r11,_NIP(r1) |
| @@ -858,62 +859,6 @@ fp_unavailable_common: | |||
| 858 | bl .kernel_fp_unavailable_exception | 859 | bl .kernel_fp_unavailable_exception |
| 859 | BUG_OPCODE | 860 | BUG_OPCODE |
| 860 | 861 | ||
| 861 | /* | ||
| 862 | * load_up_fpu(unused, unused, tsk) | ||
| 863 | * Disable FP for the task which had the FPU previously, | ||
| 864 | * and save its floating-point registers in its thread_struct. | ||
| 865 | * Enables the FPU for use in the kernel on return. | ||
| 866 | * On SMP we know the fpu is free, since we give it up every | ||
| 867 | * switch (ie, no lazy save of the FP registers). | ||
| 868 | * On entry: r13 == 'current' && last_task_used_math != 'current' | ||
| 869 | */ | ||
| 870 | _STATIC(load_up_fpu) | ||
| 871 | mfmsr r5 /* grab the current MSR */ | ||
| 872 | ori r5,r5,MSR_FP | ||
| 873 | mtmsrd r5 /* enable use of fpu now */ | ||
| 874 | isync | ||
| 875 | /* | ||
| 876 | * For SMP, we don't do lazy FPU switching because it just gets too | ||
| 877 | * horrendously complex, especially when a task switches from one CPU | ||
| 878 | * to another. Instead we call giveup_fpu in switch_to. | ||
| 879 | * | ||
| 880 | */ | ||
| 881 | #ifndef CONFIG_SMP | ||
| 882 | ld r3,last_task_used_math@got(r2) | ||
| 883 | ld r4,0(r3) | ||
| 884 | cmpdi 0,r4,0 | ||
| 885 | beq 1f | ||
| 886 | /* Save FP state to last_task_used_math's THREAD struct */ | ||
| 887 | addi r4,r4,THREAD | ||
| 888 | SAVE_32FPRS(0, r4) | ||
| 889 | mffs fr0 | ||
| 890 | stfd fr0,THREAD_FPSCR(r4) | ||
| 891 | /* Disable FP for last_task_used_math */ | ||
| 892 | ld r5,PT_REGS(r4) | ||
| 893 | ld r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
| 894 | li r6,MSR_FP|MSR_FE0|MSR_FE1 | ||
| 895 | andc r4,r4,r6 | ||
| 896 | std r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
| 897 | 1: | ||
| 898 | #endif /* CONFIG_SMP */ | ||
| 899 | /* enable use of FP after return */ | ||
| 900 | ld r4,PACACURRENT(r13) | ||
| 901 | addi r5,r4,THREAD /* Get THREAD */ | ||
| 902 | ld r4,THREAD_FPEXC_MODE(r5) | ||
| 903 | ori r12,r12,MSR_FP | ||
| 904 | or r12,r12,r4 | ||
| 905 | std r12,_MSR(r1) | ||
| 906 | lfd fr0,THREAD_FPSCR(r5) | ||
| 907 | mtfsf 0xff,fr0 | ||
| 908 | REST_32FPRS(0, r5) | ||
| 909 | #ifndef CONFIG_SMP | ||
| 910 | /* Update last_task_used_math to 'current' */ | ||
| 911 | subi r4,r5,THREAD /* Back to 'current' */ | ||
| 912 | std r4,0(r3) | ||
| 913 | #endif /* CONFIG_SMP */ | ||
| 914 | /* restore registers and return */ | ||
| 915 | b fast_exception_return | ||
| 916 | |||
| 917 | .align 7 | 862 | .align 7 |
| 918 | .globl altivec_unavailable_common | 863 | .globl altivec_unavailable_common |
| 919 | altivec_unavailable_common: | 864 | altivec_unavailable_common: |
