diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-07-18 16:00:54 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-07-18 16:00:54 -0400 |
commit | bb2c018b09b681d43f5e08124b83e362647ea82b (patch) | |
tree | d794902c78f9fdd04ed88a4b8d451ed6f9292ec0 /arch/powerpc/kernel/head_64.S | |
parent | 82638844d9a8581bbf33201cc209a14876eca167 (diff) | |
parent | 5b664cb235e97afbf34db9c4d77f08ebd725335e (diff) |
Merge branch 'linus' into cpus4096
Conflicts:
drivers/acpi/processor_throttling.c
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/powerpc/kernel/head_64.S')
-rw-r--r-- | arch/powerpc/kernel/head_64.S | 82 |
1 files changed, 77 insertions, 5 deletions
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 25e84c0e1166..cc8fb474d520 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S | |||
@@ -275,7 +275,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) | |||
275 | . = 0xf00 | 275 | . = 0xf00 |
276 | b performance_monitor_pSeries | 276 | b performance_monitor_pSeries |
277 | 277 | ||
278 | STD_EXCEPTION_PSERIES(0xf20, altivec_unavailable) | 278 | . = 0xf20 |
279 | b altivec_unavailable_pSeries | ||
280 | |||
281 | . = 0xf40 | ||
282 | b vsx_unavailable_pSeries | ||
279 | 283 | ||
280 | #ifdef CONFIG_CBE_RAS | 284 | #ifdef CONFIG_CBE_RAS |
281 | HSTD_EXCEPTION_PSERIES(0x1200, cbe_system_error) | 285 | HSTD_EXCEPTION_PSERIES(0x1200, cbe_system_error) |
@@ -295,6 +299,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) | |||
295 | 299 | ||
296 | /* moved from 0xf00 */ | 300 | /* moved from 0xf00 */ |
297 | STD_EXCEPTION_PSERIES(., performance_monitor) | 301 | STD_EXCEPTION_PSERIES(., performance_monitor) |
302 | STD_EXCEPTION_PSERIES(., altivec_unavailable) | ||
303 | STD_EXCEPTION_PSERIES(., vsx_unavailable) | ||
298 | 304 | ||
299 | /* | 305 | /* |
300 | * An interrupt came in while soft-disabled; clear EE in SRR1, | 306 | * An interrupt came in while soft-disabled; clear EE in SRR1, |
@@ -739,7 +745,8 @@ fp_unavailable_common: | |||
739 | ENABLE_INTS | 745 | ENABLE_INTS |
740 | bl .kernel_fp_unavailable_exception | 746 | bl .kernel_fp_unavailable_exception |
741 | BUG_OPCODE | 747 | BUG_OPCODE |
742 | 1: b .load_up_fpu | 748 | 1: bl .load_up_fpu |
749 | b fast_exception_return | ||
743 | 750 | ||
744 | .align 7 | 751 | .align 7 |
745 | .globl altivec_unavailable_common | 752 | .globl altivec_unavailable_common |
@@ -747,7 +754,10 @@ altivec_unavailable_common: | |||
747 | EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN) | 754 | EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN) |
748 | #ifdef CONFIG_ALTIVEC | 755 | #ifdef CONFIG_ALTIVEC |
749 | BEGIN_FTR_SECTION | 756 | BEGIN_FTR_SECTION |
750 | bne .load_up_altivec /* if from user, just load it up */ | 757 | beq 1f |
758 | bl .load_up_altivec | ||
759 | b fast_exception_return | ||
760 | 1: | ||
751 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | 761 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) |
752 | #endif | 762 | #endif |
753 | bl .save_nvgprs | 763 | bl .save_nvgprs |
@@ -827,9 +837,70 @@ _STATIC(load_up_altivec) | |||
827 | std r4,0(r3) | 837 | std r4,0(r3) |
828 | #endif /* CONFIG_SMP */ | 838 | #endif /* CONFIG_SMP */ |
829 | /* restore registers and return */ | 839 | /* restore registers and return */ |
830 | b fast_exception_return | 840 | blr |
831 | #endif /* CONFIG_ALTIVEC */ | 841 | #endif /* CONFIG_ALTIVEC */ |
832 | 842 | ||
843 | .align 7 | ||
844 | .globl vsx_unavailable_common | ||
845 | vsx_unavailable_common: | ||
846 | EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN) | ||
847 | #ifdef CONFIG_VSX | ||
848 | BEGIN_FTR_SECTION | ||
849 | bne .load_up_vsx | ||
850 | 1: | ||
851 | END_FTR_SECTION_IFSET(CPU_FTR_VSX) | ||
852 | #endif | ||
853 | bl .save_nvgprs | ||
854 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
855 | ENABLE_INTS | ||
856 | bl .vsx_unavailable_exception | ||
857 | b .ret_from_except | ||
858 | |||
859 | #ifdef CONFIG_VSX | ||
860 | /* | ||
861 | * load_up_vsx(unused, unused, tsk) | ||
862 | * Disable VSX for the task which had it previously, | ||
863 | * and save its vector registers in its thread_struct. | ||
864 | * Reuse the fp and vsx saves, but first check to see if they have | ||
865 | * been saved already. | ||
866 | * On entry: r13 == 'current' && last_task_used_vsx != 'current' | ||
867 | */ | ||
868 | _STATIC(load_up_vsx) | ||
869 | /* Load FP and VSX registers if they haven't been done yet */ | ||
870 | andi. r5,r12,MSR_FP | ||
871 | beql+ load_up_fpu /* skip if already loaded */ | ||
872 | andis. r5,r12,MSR_VEC@h | ||
873 | beql+ load_up_altivec /* skip if already loaded */ | ||
874 | |||
875 | #ifndef CONFIG_SMP | ||
876 | ld r3,last_task_used_vsx@got(r2) | ||
877 | ld r4,0(r3) | ||
878 | cmpdi 0,r4,0 | ||
879 | beq 1f | ||
880 | /* Disable VSX for last_task_used_vsx */ | ||
881 | addi r4,r4,THREAD | ||
882 | ld r5,PT_REGS(r4) | ||
883 | ld r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
884 | lis r6,MSR_VSX@h | ||
885 | andc r6,r4,r6 | ||
886 | std r6,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
887 | 1: | ||
888 | #endif /* CONFIG_SMP */ | ||
889 | ld r4,PACACURRENT(r13) | ||
890 | addi r4,r4,THREAD /* Get THREAD */ | ||
891 | li r6,1 | ||
892 | stw r6,THREAD_USED_VSR(r4) /* ... also set thread used vsr */ | ||
893 | /* enable use of VSX after return */ | ||
894 | oris r12,r12,MSR_VSX@h | ||
895 | std r12,_MSR(r1) | ||
896 | #ifndef CONFIG_SMP | ||
897 | /* Update last_task_used_math to 'current' */ | ||
898 | ld r4,PACACURRENT(r13) | ||
899 | std r4,0(r3) | ||
900 | #endif /* CONFIG_SMP */ | ||
901 | b fast_exception_return | ||
902 | #endif /* CONFIG_VSX */ | ||
903 | |||
833 | /* | 904 | /* |
834 | * Hash table stuff | 905 | * Hash table stuff |
835 | */ | 906 | */ |
@@ -1127,7 +1198,6 @@ _GLOBAL(generic_secondary_smp_init) | |||
1127 | 3: HMT_LOW | 1198 | 3: HMT_LOW |
1128 | lbz r23,PACAPROCSTART(r13) /* Test if this processor should */ | 1199 | lbz r23,PACAPROCSTART(r13) /* Test if this processor should */ |
1129 | /* start. */ | 1200 | /* start. */ |
1130 | sync | ||
1131 | 1201 | ||
1132 | #ifndef CONFIG_SMP | 1202 | #ifndef CONFIG_SMP |
1133 | b 3b /* Never go on non-SMP */ | 1203 | b 3b /* Never go on non-SMP */ |
@@ -1135,6 +1205,8 @@ _GLOBAL(generic_secondary_smp_init) | |||
1135 | cmpwi 0,r23,0 | 1205 | cmpwi 0,r23,0 |
1136 | beq 3b /* Loop until told to go */ | 1206 | beq 3b /* Loop until told to go */ |
1137 | 1207 | ||
1208 | sync /* order paca.run and cur_cpu_spec */ | ||
1209 | |||
1138 | /* See if we need to call a cpu state restore handler */ | 1210 | /* See if we need to call a cpu state restore handler */ |
1139 | LOAD_REG_IMMEDIATE(r23, cur_cpu_spec) | 1211 | LOAD_REG_IMMEDIATE(r23, cur_cpu_spec) |
1140 | ld r23,0(r23) | 1212 | ld r23,0(r23) |