aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ppc64/kernel/head.S
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ppc64/kernel/head.S')
-rw-r--r--arch/ppc64/kernel/head.S532
1 files changed, 215 insertions, 317 deletions
diff --git a/arch/ppc64/kernel/head.S b/arch/ppc64/kernel/head.S
index 784f56d4684c..58c314738c99 100644
--- a/arch/ppc64/kernel/head.S
+++ b/arch/ppc64/kernel/head.S
@@ -23,39 +23,32 @@
23 * 2 of the License, or (at your option) any later version. 23 * 2 of the License, or (at your option) any later version.
24 */ 24 */
25 25
26#define SECONDARY_PROCESSORS
27
28#include <linux/config.h> 26#include <linux/config.h>
29#include <linux/threads.h> 27#include <linux/threads.h>
30#include <asm/processor.h> 28#include <asm/processor.h>
31#include <asm/page.h> 29#include <asm/page.h>
32#include <asm/mmu.h> 30#include <asm/mmu.h>
33#include <asm/naca.h>
34#include <asm/systemcfg.h> 31#include <asm/systemcfg.h>
35#include <asm/ppc_asm.h> 32#include <asm/ppc_asm.h>
36#include <asm/offsets.h> 33#include <asm/asm-offsets.h>
37#include <asm/bug.h> 34#include <asm/bug.h>
38#include <asm/cputable.h> 35#include <asm/cputable.h>
39#include <asm/setup.h> 36#include <asm/setup.h>
40#include <asm/hvcall.h> 37#include <asm/hvcall.h>
38#include <asm/iSeries/LparMap.h>
41 39
42#ifdef CONFIG_PPC_ISERIES 40#ifdef CONFIG_PPC_ISERIES
43#define DO_SOFT_DISABLE 41#define DO_SOFT_DISABLE
44#endif 42#endif
45 43
46/* 44/*
47 * hcall interface to pSeries LPAR
48 */
49#define H_SET_ASR 0x30
50
51/*
52 * We layout physical memory as follows: 45 * We layout physical memory as follows:
53 * 0x0000 - 0x00ff : Secondary processor spin code 46 * 0x0000 - 0x00ff : Secondary processor spin code
54 * 0x0100 - 0x2fff : pSeries Interrupt prologs 47 * 0x0100 - 0x2fff : pSeries Interrupt prologs
55 * 0x3000 - 0x3fff : Interrupt support 48 * 0x3000 - 0x5fff : interrupt support, iSeries and common interrupt prologs
56 * 0x4000 - 0x4fff : NACA 49 * 0x6000 - 0x6fff : Initial (CPU0) segment table
57 * 0x6000 : iSeries and common interrupt prologs 50 * 0x7000 - 0x7fff : FWNMI data area
58 * 0x9000 - 0x9fff : Initial segment table 51 * 0x8000 - : Early init and support code
59 */ 52 */
60 53
61/* 54/*
@@ -93,6 +86,7 @@ END_FTR_SECTION(0, 1)
93 86
94 /* Catch branch to 0 in real mode */ 87 /* Catch branch to 0 in real mode */
95 trap 88 trap
89
96#ifdef CONFIG_PPC_ISERIES 90#ifdef CONFIG_PPC_ISERIES
97 /* 91 /*
98 * At offset 0x20, there is a pointer to iSeries LPAR data. 92 * At offset 0x20, there is a pointer to iSeries LPAR data.
@@ -102,12 +96,12 @@ END_FTR_SECTION(0, 1)
102 .llong hvReleaseData-KERNELBASE 96 .llong hvReleaseData-KERNELBASE
103 97
104 /* 98 /*
105 * At offset 0x28 and 0x30 are offsets to the msChunks 99 * At offset 0x28 and 0x30 are offsets to the mschunks_map
106 * array (used by the iSeries LPAR debugger to do translation 100 * array (used by the iSeries LPAR debugger to do translation
107 * between physical addresses and absolute addresses) and 101 * between physical addresses and absolute addresses) and
108 * to the pidhash table (also used by the debugger) 102 * to the pidhash table (also used by the debugger)
109 */ 103 */
110 .llong msChunks-KERNELBASE 104 .llong mschunks_map-KERNELBASE
111 .llong 0 /* pidhash-KERNELBASE SFRXXX */ 105 .llong 0 /* pidhash-KERNELBASE SFRXXX */
112 106
113 /* Offset 0x38 - Pointer to start of embedded System.map */ 107 /* Offset 0x38 - Pointer to start of embedded System.map */
@@ -119,7 +113,7 @@ embedded_sysmap_start:
119embedded_sysmap_end: 113embedded_sysmap_end:
120 .llong 0 114 .llong 0
121 115
122#else /* CONFIG_PPC_ISERIES */ 116#endif /* CONFIG_PPC_ISERIES */
123 117
124 /* Secondary processors spin on this value until it goes to 1. */ 118 /* Secondary processors spin on this value until it goes to 1. */
125 .globl __secondary_hold_spinloop 119 .globl __secondary_hold_spinloop
@@ -154,7 +148,7 @@ _GLOBAL(__secondary_hold)
154 std r24,__secondary_hold_acknowledge@l(0) 148 std r24,__secondary_hold_acknowledge@l(0)
155 sync 149 sync
156 150
157 /* All secondary cpu's wait here until told to start. */ 151 /* All secondary cpus wait here until told to start. */
158100: ld r4,__secondary_hold_spinloop@l(0) 152100: ld r4,__secondary_hold_spinloop@l(0)
159 cmpdi 0,r4,1 153 cmpdi 0,r4,1
160 bne 100b 154 bne 100b
@@ -169,7 +163,6 @@ _GLOBAL(__secondary_hold)
169 BUG_OPCODE 163 BUG_OPCODE
170#endif 164#endif
171#endif 165#endif
172#endif
173 166
174/* This value is used to mark exception frames on the stack. */ 167/* This value is used to mark exception frames on the stack. */
175 .section ".toc","aw" 168 .section ".toc","aw"
@@ -501,33 +494,37 @@ system_call_pSeries:
501 STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint) 494 STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint)
502 STD_EXCEPTION_PSERIES(0x1700, altivec_assist) 495 STD_EXCEPTION_PSERIES(0x1700, altivec_assist)
503 496
497 . = 0x3000
498
499/*** pSeries interrupt support ***/
500
504 /* moved from 0xf00 */ 501 /* moved from 0xf00 */
505 STD_EXCEPTION_PSERIES(0x3000, performance_monitor) 502 STD_EXCEPTION_PSERIES(., performance_monitor)
506 503
507 . = 0x3100 504 .align 7
508_GLOBAL(do_stab_bolted_pSeries) 505_GLOBAL(do_stab_bolted_pSeries)
509 mtcrf 0x80,r12 506 mtcrf 0x80,r12
510 mfspr r12,SPRG2 507 mfspr r12,SPRG2
511 EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted) 508 EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted)
512 509
513 510/*
514 /* Space for the naca. Architected to be located at real address 511 * Vectors for the FWNMI option. Share common code.
515 * NACA_PHYS_ADDR. Various tools rely on this location being fixed. 512 */
516 * The first dword of the naca is required by iSeries LPAR to 513 .globl system_reset_fwnmi
517 * point to itVpdAreas. On pSeries native, this value is not used. 514system_reset_fwnmi:
518 */ 515 HMT_MEDIUM
519 . = NACA_PHYS_ADDR 516 mtspr SPRG1,r13 /* save r13 */
520 .globl __end_interrupts 517 RUNLATCH_ON(r13)
521__end_interrupts: 518 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common)
522#ifdef CONFIG_PPC_ISERIES
523 .globl naca
524naca:
525 .llong itVpdAreas
526 .llong 0 /* xRamDisk */
527 .llong 0 /* xRamDiskSize */
528 519
529 . = 0x6100 520 .globl machine_check_fwnmi
521machine_check_fwnmi:
522 HMT_MEDIUM
523 mtspr SPRG1,r13 /* save r13 */
524 RUNLATCH_ON(r13)
525 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
530 526
527#ifdef CONFIG_PPC_ISERIES
531/*** ISeries-LPAR interrupt handlers ***/ 528/*** ISeries-LPAR interrupt handlers ***/
532 529
533 STD_EXCEPTION_ISERIES(0x200, machine_check, PACA_EXMC) 530 STD_EXCEPTION_ISERIES(0x200, machine_check, PACA_EXMC)
@@ -625,9 +622,7 @@ system_reset_iSeries:
625 622
626 cmpwi 0,r23,0 623 cmpwi 0,r23,0
627 beq iSeries_secondary_smp_loop /* Loop until told to go */ 624 beq iSeries_secondary_smp_loop /* Loop until told to go */
628#ifdef SECONDARY_PROCESSORS
629 bne .__secondary_start /* Loop until told to go */ 625 bne .__secondary_start /* Loop until told to go */
630#endif
631iSeries_secondary_smp_loop: 626iSeries_secondary_smp_loop:
632 /* Let the Hypervisor know we are alive */ 627 /* Let the Hypervisor know we are alive */
633 /* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */ 628 /* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */
@@ -670,45 +665,7 @@ hardware_interrupt_iSeries_masked:
670 ld r13,PACA_EXGEN+EX_R13(r13) 665 ld r13,PACA_EXGEN+EX_R13(r13)
671 rfid 666 rfid
672 b . /* prevent speculative execution */ 667 b . /* prevent speculative execution */
673#endif 668#endif /* CONFIG_PPC_ISERIES */
674
675/*
676 * Data area reserved for FWNMI option.
677 */
678 .= 0x7000
679 .globl fwnmi_data_area
680fwnmi_data_area:
681
682/*
683 * Vectors for the FWNMI option. Share common code.
684 */
685 . = 0x8000
686 .globl system_reset_fwnmi
687system_reset_fwnmi:
688 HMT_MEDIUM
689 mtspr SPRG1,r13 /* save r13 */
690 RUNLATCH_ON(r13)
691 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common)
692 .globl machine_check_fwnmi
693machine_check_fwnmi:
694 HMT_MEDIUM
695 mtspr SPRG1,r13 /* save r13 */
696 RUNLATCH_ON(r13)
697 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
698
699 /*
700 * Space for the initial segment table
701 * For LPAR, the hypervisor must fill in at least one entry
702 * before we get control (with relocate on)
703 */
704 . = STAB0_PHYS_ADDR
705 .globl __start_stab
706__start_stab:
707
708 . = (STAB0_PHYS_ADDR + PAGE_SIZE)
709 .globl __end_stab
710__end_stab:
711
712 669
713/*** Common interrupt handlers ***/ 670/*** Common interrupt handlers ***/
714 671
@@ -746,8 +703,8 @@ machine_check_common:
746 * R9 contains the saved CR, r13 points to the paca, 703 * R9 contains the saved CR, r13 points to the paca,
747 * r10 contains the (bad) kernel stack pointer, 704 * r10 contains the (bad) kernel stack pointer,
748 * r11 and r12 contain the saved SRR0 and SRR1. 705 * r11 and r12 contain the saved SRR0 and SRR1.
749 * We switch to using the paca guard page as an emergency stack, 706 * We switch to using an emergency stack, save the registers there,
750 * save the registers there, and call kernel_bad_stack(), which panics. 707 * and call kernel_bad_stack(), which panics.
751 */ 708 */
752bad_stack: 709bad_stack:
753 ld r1,PACAEMERGSP(r13) 710 ld r1,PACAEMERGSP(r13)
@@ -900,6 +857,62 @@ fp_unavailable_common:
900 bl .kernel_fp_unavailable_exception 857 bl .kernel_fp_unavailable_exception
901 BUG_OPCODE 858 BUG_OPCODE
902 859
860/*
861 * load_up_fpu(unused, unused, tsk)
862 * Disable FP for the task which had the FPU previously,
863 * and save its floating-point registers in its thread_struct.
864 * Enables the FPU for use in the kernel on return.
865 * On SMP we know the fpu is free, since we give it up every
866 * switch (ie, no lazy save of the FP registers).
867 * On entry: r13 == 'current' && last_task_used_math != 'current'
868 */
869_STATIC(load_up_fpu)
870 mfmsr r5 /* grab the current MSR */
871 ori r5,r5,MSR_FP
872 mtmsrd r5 /* enable use of fpu now */
873 isync
874/*
875 * For SMP, we don't do lazy FPU switching because it just gets too
876 * horrendously complex, especially when a task switches from one CPU
877 * to another. Instead we call giveup_fpu in switch_to.
878 *
879 */
880#ifndef CONFIG_SMP
881 ld r3,last_task_used_math@got(r2)
882 ld r4,0(r3)
883 cmpdi 0,r4,0
884 beq 1f
885 /* Save FP state to last_task_used_math's THREAD struct */
886 addi r4,r4,THREAD
887 SAVE_32FPRS(0, r4)
888 mffs fr0
889 stfd fr0,THREAD_FPSCR(r4)
890 /* Disable FP for last_task_used_math */
891 ld r5,PT_REGS(r4)
892 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
893 li r6,MSR_FP|MSR_FE0|MSR_FE1
894 andc r4,r4,r6
895 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
8961:
897#endif /* CONFIG_SMP */
898 /* enable use of FP after return */
899 ld r4,PACACURRENT(r13)
900 addi r5,r4,THREAD /* Get THREAD */
901 ld r4,THREAD_FPEXC_MODE(r5)
902 ori r12,r12,MSR_FP
903 or r12,r12,r4
904 std r12,_MSR(r1)
905 lfd fr0,THREAD_FPSCR(r5)
906 mtfsf 0xff,fr0
907 REST_32FPRS(0, r5)
908#ifndef CONFIG_SMP
909 /* Update last_task_used_math to 'current' */
910 subi r4,r5,THREAD /* Back to 'current' */
911 std r4,0(r3)
912#endif /* CONFIG_SMP */
913 /* restore registers and return */
914 b fast_exception_return
915
903 .align 7 916 .align 7
904 .globl altivec_unavailable_common 917 .globl altivec_unavailable_common
905altivec_unavailable_common: 918altivec_unavailable_common:
@@ -915,6 +928,80 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
915 bl .altivec_unavailable_exception 928 bl .altivec_unavailable_exception
916 b .ret_from_except 929 b .ret_from_except
917 930
931#ifdef CONFIG_ALTIVEC
932/*
933 * load_up_altivec(unused, unused, tsk)
934 * Disable VMX for the task which had it previously,
935 * and save its vector registers in its thread_struct.
936 * Enables the VMX for use in the kernel on return.
937 * On SMP we know the VMX is free, since we give it up every
938 * switch (ie, no lazy save of the vector registers).
939 * On entry: r13 == 'current' && last_task_used_altivec != 'current'
940 */
941_STATIC(load_up_altivec)
942 mfmsr r5 /* grab the current MSR */
943 oris r5,r5,MSR_VEC@h
944 mtmsrd r5 /* enable use of VMX now */
945 isync
946
947/*
948 * For SMP, we don't do lazy VMX switching because it just gets too
949 * horrendously complex, especially when a task switches from one CPU
950 * to another. Instead we call giveup_altvec in switch_to.
951 * VRSAVE isn't dealt with here, that is done in the normal context
952 * switch code. Note that we could rely on vrsave value to eventually
953 * avoid saving all of the VREGs here...
954 */
955#ifndef CONFIG_SMP
956 ld r3,last_task_used_altivec@got(r2)
957 ld r4,0(r3)
958 cmpdi 0,r4,0
959 beq 1f
960 /* Save VMX state to last_task_used_altivec's THREAD struct */
961 addi r4,r4,THREAD
962 SAVE_32VRS(0,r5,r4)
963 mfvscr vr0
964 li r10,THREAD_VSCR
965 stvx vr0,r10,r4
966 /* Disable VMX for last_task_used_altivec */
967 ld r5,PT_REGS(r4)
968 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
969 lis r6,MSR_VEC@h
970 andc r4,r4,r6
971 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
9721:
973#endif /* CONFIG_SMP */
974 /* Hack: if we get an altivec unavailable trap with VRSAVE
975 * set to all zeros, we assume this is a broken application
976 * that fails to set it properly, and thus we switch it to
977 * all 1's
978 */
979 mfspr r4,SPRN_VRSAVE
980 cmpdi 0,r4,0
981 bne+ 1f
982 li r4,-1
983 mtspr SPRN_VRSAVE,r4
9841:
985 /* enable use of VMX after return */
986 ld r4,PACACURRENT(r13)
987 addi r5,r4,THREAD /* Get THREAD */
988 oris r12,r12,MSR_VEC@h
989 std r12,_MSR(r1)
990 li r4,1
991 li r10,THREAD_VSCR
992 stw r4,THREAD_USED_VR(r5)
993 lvx vr0,r10,r5
994 mtvscr vr0
995 REST_32VRS(0,r4,r5)
996#ifndef CONFIG_SMP
997 /* Update last_task_used_math to 'current' */
998 subi r4,r5,THREAD /* Back to 'current' */
999 std r4,0(r3)
1000#endif /* CONFIG_SMP */
1001 /* restore registers and return */
1002 b fast_exception_return
1003#endif /* CONFIG_ALTIVEC */
1004
918/* 1005/*
919 * Hash table stuff 1006 * Hash table stuff
920 */ 1007 */
@@ -1161,6 +1248,42 @@ unrecov_slb:
1161 bl .unrecoverable_exception 1248 bl .unrecoverable_exception
1162 b 1b 1249 b 1b
1163 1250
1251/*
1252 * Space for CPU0's segment table.
1253 *
1254 * On iSeries, the hypervisor must fill in at least one entry before
1255 * we get control (with relocate on). The address is give to the hv
1256 * as a page number (see xLparMap in LparData.c), so this must be at a
1257 * fixed address (the linker can't compute (u64)&initial_stab >>
1258 * PAGE_SHIFT).
1259 */
1260 . = STAB0_PHYS_ADDR /* 0x6000 */
1261 .globl initial_stab
1262initial_stab:
1263 .space 4096
1264
1265/*
1266 * Data area reserved for FWNMI option.
1267 * This address (0x7000) is fixed by the RPA.
1268 */
1269 .= 0x7000
1270 .globl fwnmi_data_area
1271fwnmi_data_area:
1272
1273 /* iSeries does not use the FWNMI stuff, so it is safe to put
1274 * this here, even if we later allow kernels that will boot on
1275 * both pSeries and iSeries */
1276#ifdef CONFIG_PPC_ISERIES
1277 . = LPARMAP_PHYS
1278#include "lparmap.s"
1279/*
1280 * This ".text" is here for old compilers that generate a trailing
1281 * .note section when compiling .c files to .s
1282 */
1283 .text
1284#endif /* CONFIG_PPC_ISERIES */
1285
1286 . = 0x8000
1164 1287
1165/* 1288/*
1166 * On pSeries, secondary processors spin in the following code. 1289 * On pSeries, secondary processors spin in the following code.
@@ -1194,7 +1317,7 @@ _GLOBAL(pSeries_secondary_smp_init)
1194 b .kexec_wait /* next kernel might do better */ 1317 b .kexec_wait /* next kernel might do better */
1195 1318
11962: mtspr SPRG3,r13 /* Save vaddr of paca in SPRG3 */ 13192: mtspr SPRG3,r13 /* Save vaddr of paca in SPRG3 */
1197 /* From now on, r24 is expected to be logica cpuid */ 1320 /* From now on, r24 is expected to be logical cpuid */
1198 mr r24,r5 1321 mr r24,r5
11993: HMT_LOW 13223: HMT_LOW
1200 lbz r23,PACAPROCSTART(r13) /* Test if this processor should */ 1323 lbz r23,PACAPROCSTART(r13) /* Test if this processor should */
@@ -1207,10 +1330,8 @@ _GLOBAL(pSeries_secondary_smp_init)
1207 1330
1208 cmpwi 0,r23,0 1331 cmpwi 0,r23,0
1209#ifdef CONFIG_SMP 1332#ifdef CONFIG_SMP
1210#ifdef SECONDARY_PROCESSORS
1211 bne .__secondary_start 1333 bne .__secondary_start
1212#endif 1334#endif
1213#endif
1214 b 3b /* Loop until told to go */ 1335 b 3b /* Loop until told to go */
1215 1336
1216#ifdef CONFIG_PPC_ISERIES 1337#ifdef CONFIG_PPC_ISERIES
@@ -1424,228 +1545,6 @@ _GLOBAL(copy_and_flush)
1424.align 8 1545.align 8
1425copy_to_here: 1546copy_to_here:
1426 1547
1427/*
1428 * load_up_fpu(unused, unused, tsk)
1429 * Disable FP for the task which had the FPU previously,
1430 * and save its floating-point registers in its thread_struct.
1431 * Enables the FPU for use in the kernel on return.
1432 * On SMP we know the fpu is free, since we give it up every
1433 * switch (ie, no lazy save of the FP registers).
1434 * On entry: r13 == 'current' && last_task_used_math != 'current'
1435 */
1436_STATIC(load_up_fpu)
1437 mfmsr r5 /* grab the current MSR */
1438 ori r5,r5,MSR_FP
1439 mtmsrd r5 /* enable use of fpu now */
1440 isync
1441/*
1442 * For SMP, we don't do lazy FPU switching because it just gets too
1443 * horrendously complex, especially when a task switches from one CPU
1444 * to another. Instead we call giveup_fpu in switch_to.
1445 *
1446 */
1447#ifndef CONFIG_SMP
1448 ld r3,last_task_used_math@got(r2)
1449 ld r4,0(r3)
1450 cmpdi 0,r4,0
1451 beq 1f
1452 /* Save FP state to last_task_used_math's THREAD struct */
1453 addi r4,r4,THREAD
1454 SAVE_32FPRS(0, r4)
1455 mffs fr0
1456 stfd fr0,THREAD_FPSCR(r4)
1457 /* Disable FP for last_task_used_math */
1458 ld r5,PT_REGS(r4)
1459 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1460 li r6,MSR_FP|MSR_FE0|MSR_FE1
1461 andc r4,r4,r6
1462 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
14631:
1464#endif /* CONFIG_SMP */
1465 /* enable use of FP after return */
1466 ld r4,PACACURRENT(r13)
1467 addi r5,r4,THREAD /* Get THREAD */
1468 ld r4,THREAD_FPEXC_MODE(r5)
1469 ori r12,r12,MSR_FP
1470 or r12,r12,r4
1471 std r12,_MSR(r1)
1472 lfd fr0,THREAD_FPSCR(r5)
1473 mtfsf 0xff,fr0
1474 REST_32FPRS(0, r5)
1475#ifndef CONFIG_SMP
1476 /* Update last_task_used_math to 'current' */
1477 subi r4,r5,THREAD /* Back to 'current' */
1478 std r4,0(r3)
1479#endif /* CONFIG_SMP */
1480 /* restore registers and return */
1481 b fast_exception_return
1482
1483/*
1484 * disable_kernel_fp()
1485 * Disable the FPU.
1486 */
1487_GLOBAL(disable_kernel_fp)
1488 mfmsr r3
1489 rldicl r0,r3,(63-MSR_FP_LG),1
1490 rldicl r3,r0,(MSR_FP_LG+1),0
1491 mtmsrd r3 /* disable use of fpu now */
1492 isync
1493 blr
1494
1495/*
1496 * giveup_fpu(tsk)
1497 * Disable FP for the task given as the argument,
1498 * and save the floating-point registers in its thread_struct.
1499 * Enables the FPU for use in the kernel on return.
1500 */
1501_GLOBAL(giveup_fpu)
1502 mfmsr r5
1503 ori r5,r5,MSR_FP
1504 mtmsrd r5 /* enable use of fpu now */
1505 isync
1506 cmpdi 0,r3,0
1507 beqlr- /* if no previous owner, done */
1508 addi r3,r3,THREAD /* want THREAD of task */
1509 ld r5,PT_REGS(r3)
1510 cmpdi 0,r5,0
1511 SAVE_32FPRS(0, r3)
1512 mffs fr0
1513 stfd fr0,THREAD_FPSCR(r3)
1514 beq 1f
1515 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1516 li r3,MSR_FP|MSR_FE0|MSR_FE1
1517 andc r4,r4,r3 /* disable FP for previous task */
1518 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
15191:
1520#ifndef CONFIG_SMP
1521 li r5,0
1522 ld r4,last_task_used_math@got(r2)
1523 std r5,0(r4)
1524#endif /* CONFIG_SMP */
1525 blr
1526
1527
1528#ifdef CONFIG_ALTIVEC
1529
1530/*
1531 * load_up_altivec(unused, unused, tsk)
1532 * Disable VMX for the task which had it previously,
1533 * and save its vector registers in its thread_struct.
1534 * Enables the VMX for use in the kernel on return.
1535 * On SMP we know the VMX is free, since we give it up every
1536 * switch (ie, no lazy save of the vector registers).
1537 * On entry: r13 == 'current' && last_task_used_altivec != 'current'
1538 */
1539_STATIC(load_up_altivec)
1540 mfmsr r5 /* grab the current MSR */
1541 oris r5,r5,MSR_VEC@h
1542 mtmsrd r5 /* enable use of VMX now */
1543 isync
1544
1545/*
1546 * For SMP, we don't do lazy VMX switching because it just gets too
1547 * horrendously complex, especially when a task switches from one CPU
1548 * to another. Instead we call giveup_altvec in switch_to.
1549 * VRSAVE isn't dealt with here, that is done in the normal context
1550 * switch code. Note that we could rely on vrsave value to eventually
1551 * avoid saving all of the VREGs here...
1552 */
1553#ifndef CONFIG_SMP
1554 ld r3,last_task_used_altivec@got(r2)
1555 ld r4,0(r3)
1556 cmpdi 0,r4,0
1557 beq 1f
1558 /* Save VMX state to last_task_used_altivec's THREAD struct */
1559 addi r4,r4,THREAD
1560 SAVE_32VRS(0,r5,r4)
1561 mfvscr vr0
1562 li r10,THREAD_VSCR
1563 stvx vr0,r10,r4
1564 /* Disable VMX for last_task_used_altivec */
1565 ld r5,PT_REGS(r4)
1566 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1567 lis r6,MSR_VEC@h
1568 andc r4,r4,r6
1569 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
15701:
1571#endif /* CONFIG_SMP */
1572 /* Hack: if we get an altivec unavailable trap with VRSAVE
1573 * set to all zeros, we assume this is a broken application
1574 * that fails to set it properly, and thus we switch it to
1575 * all 1's
1576 */
1577 mfspr r4,SPRN_VRSAVE
1578 cmpdi 0,r4,0
1579 bne+ 1f
1580 li r4,-1
1581 mtspr SPRN_VRSAVE,r4
15821:
1583 /* enable use of VMX after return */
1584 ld r4,PACACURRENT(r13)
1585 addi r5,r4,THREAD /* Get THREAD */
1586 oris r12,r12,MSR_VEC@h
1587 std r12,_MSR(r1)
1588 li r4,1
1589 li r10,THREAD_VSCR
1590 stw r4,THREAD_USED_VR(r5)
1591 lvx vr0,r10,r5
1592 mtvscr vr0
1593 REST_32VRS(0,r4,r5)
1594#ifndef CONFIG_SMP
1595 /* Update last_task_used_math to 'current' */
1596 subi r4,r5,THREAD /* Back to 'current' */
1597 std r4,0(r3)
1598#endif /* CONFIG_SMP */
1599 /* restore registers and return */
1600 b fast_exception_return
1601
1602/*
1603 * disable_kernel_altivec()
1604 * Disable the VMX.
1605 */
1606_GLOBAL(disable_kernel_altivec)
1607 mfmsr r3
1608 rldicl r0,r3,(63-MSR_VEC_LG),1
1609 rldicl r3,r0,(MSR_VEC_LG+1),0
1610 mtmsrd r3 /* disable use of VMX now */
1611 isync
1612 blr
1613
1614/*
1615 * giveup_altivec(tsk)
1616 * Disable VMX for the task given as the argument,
1617 * and save the vector registers in its thread_struct.
1618 * Enables the VMX for use in the kernel on return.
1619 */
1620_GLOBAL(giveup_altivec)
1621 mfmsr r5
1622 oris r5,r5,MSR_VEC@h
1623 mtmsrd r5 /* enable use of VMX now */
1624 isync
1625 cmpdi 0,r3,0
1626 beqlr- /* if no previous owner, done */
1627 addi r3,r3,THREAD /* want THREAD of task */
1628 ld r5,PT_REGS(r3)
1629 cmpdi 0,r5,0
1630 SAVE_32VRS(0,r4,r3)
1631 mfvscr vr0
1632 li r4,THREAD_VSCR
1633 stvx vr0,r4,r3
1634 beq 1f
1635 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1636 lis r3,MSR_VEC@h
1637 andc r4,r4,r3 /* disable FP for previous task */
1638 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
16391:
1640#ifndef CONFIG_SMP
1641 li r5,0
1642 ld r4,last_task_used_altivec@got(r2)
1643 std r5,0(r4)
1644#endif /* CONFIG_SMP */
1645 blr
1646
1647#endif /* CONFIG_ALTIVEC */
1648
1649#ifdef CONFIG_SMP 1548#ifdef CONFIG_SMP
1650#ifdef CONFIG_PPC_PMAC 1549#ifdef CONFIG_PPC_PMAC
1651/* 1550/*
@@ -1747,8 +1646,9 @@ _GLOBAL(__secondary_start)
1747#else 1646#else
1748 /* set the ASR */ 1647 /* set the ASR */
1749 ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */ 1648 ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */
1649 ld r3,0(r3)
1750 lwz r3,PLATFORM(r3) /* r3 = platform flags */ 1650 lwz r3,PLATFORM(r3) /* r3 = platform flags */
1751 cmpldi r3,PLATFORM_PSERIES_LPAR 1651 andi. r3,r3,PLATFORM_LPAR /* Test if bit 0 is set (LPAR bit) */
1752 bne 98f 1652 bne 98f
1753 mfspr r3,PVR 1653 mfspr r3,PVR
1754 srwi r3,r3,16 1654 srwi r3,r3,16
@@ -1910,8 +1810,9 @@ _STATIC(start_here_multiplatform)
1910 ld r3,PACASTABREAL(r13) 1810 ld r3,PACASTABREAL(r13)
1911 ori r4,r3,1 /* turn on valid bit */ 1811 ori r4,r3,1 /* turn on valid bit */
1912 ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */ 1812 ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */
1813 ld r3,0(r3)
1913 lwz r3,PLATFORM(r3) /* r3 = platform flags */ 1814 lwz r3,PLATFORM(r3) /* r3 = platform flags */
1914 cmpldi r3,PLATFORM_PSERIES_LPAR 1815 andi. r3,r3,PLATFORM_LPAR /* Test if bit 0 is set (LPAR bit) */
1915 bne 98f 1816 bne 98f
1916 mfspr r3,PVR 1817 mfspr r3,PVR
1917 srwi r3,r3,16 1818 srwi r3,r3,16
@@ -1929,9 +1830,10 @@ _STATIC(start_here_multiplatform)
192999: 183099:
1930 /* Set SDR1 (hash table pointer) */ 1831 /* Set SDR1 (hash table pointer) */
1931 ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */ 1832 ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */
1833 ld r3,0(r3)
1932 lwz r3,PLATFORM(r3) /* r3 = platform flags */ 1834 lwz r3,PLATFORM(r3) /* r3 = platform flags */
1933 /* Test if bit 0 is set (LPAR bit) */ 1835 /* Test if bit 0 is set (LPAR bit) */
1934 andi. r3,r3,0x1 1836 andi. r3,r3,PLATFORM_LPAR
1935 bne 98f 1837 bne 98f
1936 LOADADDR(r6,_SDR1) /* Only if NOT LPAR */ 1838 LOADADDR(r6,_SDR1) /* Only if NOT LPAR */
1937 sub r6,r6,r26 1839 sub r6,r6,r26
@@ -1996,9 +1898,6 @@ _STATIC(start_here_common)
1996 1898
1997 bl .start_kernel 1899 bl .start_kernel
1998 1900
1999_GLOBAL(__setup_cpu_power3)
2000 blr
2001
2002_GLOBAL(hmt_init) 1901_GLOBAL(hmt_init)
2003#ifdef CONFIG_HMT 1902#ifdef CONFIG_HMT
2004 LOADADDR(r5, hmt_thread_data) 1903 LOADADDR(r5, hmt_thread_data)
@@ -2089,20 +1988,19 @@ _GLOBAL(smp_release_cpus)
2089 1988
2090/* 1989/*
2091 * We put a few things here that have to be page-aligned. 1990 * We put a few things here that have to be page-aligned.
2092 * This stuff goes at the beginning of the data segment, 1991 * This stuff goes at the beginning of the bss, which is page-aligned.
2093 * which is page-aligned.
2094 */ 1992 */
2095 .data 1993 .section ".bss"
1994
2096 .align 12 1995 .align 12
2097 .globl sdata 1996
2098sdata:
2099 .globl empty_zero_page 1997 .globl empty_zero_page
2100empty_zero_page: 1998empty_zero_page:
2101 .space 4096 1999 .space PAGE_SIZE
2102 2000
2103 .globl swapper_pg_dir 2001 .globl swapper_pg_dir
2104swapper_pg_dir: 2002swapper_pg_dir:
2105 .space 4096 2003 .space PAGE_SIZE
2106 2004
2107/* 2005/*
2108 * This space gets a copy of optional info passed to us by the bootstrap 2006 * This space gets a copy of optional info passed to us by the bootstrap