aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/head_64.S
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel/head_64.S')
-rw-r--r--arch/powerpc/kernel/head_64.S473
1 files changed, 262 insertions, 211 deletions
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index cc8fb474d52..84856bee33a 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -82,7 +82,11 @@ END_FTR_SECTION(0, 1)
82 /* Catch branch to 0 in real mode */ 82 /* Catch branch to 0 in real mode */
83 trap 83 trap
84 84
85 /* Secondary processors spin on this value until it goes to 1. */ 85 /* Secondary processors spin on this value until it becomes nonzero.
86 * When it does it contains the real address of the descriptor
87 * of the function that the cpu should jump to to continue
88 * initialization.
89 */
86 .globl __secondary_hold_spinloop 90 .globl __secondary_hold_spinloop
87__secondary_hold_spinloop: 91__secondary_hold_spinloop:
88 .llong 0x0 92 .llong 0x0
@@ -109,8 +113,11 @@ __secondary_hold_acknowledge:
109 * before the bulk of the kernel has been relocated. This code 113 * before the bulk of the kernel has been relocated. This code
110 * is relocated to physical address 0x60 before prom_init is run. 114 * is relocated to physical address 0x60 before prom_init is run.
111 * All of it must fit below the first exception vector at 0x100. 115 * All of it must fit below the first exception vector at 0x100.
116 * Use .globl here not _GLOBAL because we want __secondary_hold
117 * to be the actual text address, not a descriptor.
112 */ 118 */
113_GLOBAL(__secondary_hold) 119 .globl __secondary_hold
120__secondary_hold:
114 mfmsr r24 121 mfmsr r24
115 ori r24,r24,MSR_RI 122 ori r24,r24,MSR_RI
116 mtmsrd r24 /* RI on */ 123 mtmsrd r24 /* RI on */
@@ -121,16 +128,16 @@ _GLOBAL(__secondary_hold)
121 /* Tell the master cpu we're here */ 128 /* Tell the master cpu we're here */
122 /* Relocation is off & we are located at an address less */ 129 /* Relocation is off & we are located at an address less */
123 /* than 0x100, so only need to grab low order offset. */ 130 /* than 0x100, so only need to grab low order offset. */
124 std r24,__secondary_hold_acknowledge@l(0) 131 std r24,__secondary_hold_acknowledge-_stext(0)
125 sync 132 sync
126 133
127 /* All secondary cpus wait here until told to start. */ 134 /* All secondary cpus wait here until told to start. */
128100: ld r4,__secondary_hold_spinloop@l(0) 135100: ld r4,__secondary_hold_spinloop-_stext(0)
129 cmpdi 0,r4,1 136 cmpdi 0,r4,0
130 bne 100b 137 beq 100b
131 138
132#if defined(CONFIG_SMP) || defined(CONFIG_KEXEC) 139#if defined(CONFIG_SMP) || defined(CONFIG_KEXEC)
133 LOAD_REG_IMMEDIATE(r4, .generic_secondary_smp_init) 140 ld r4,0(r4) /* deref function descriptor */
134 mtctr r4 141 mtctr r4
135 mr r3,r24 142 mr r3,r24
136 bctr 143 bctr
@@ -147,6 +154,10 @@ exception_marker:
147/* 154/*
148 * This is the start of the interrupt handlers for pSeries 155 * This is the start of the interrupt handlers for pSeries
149 * This code runs with relocation off. 156 * This code runs with relocation off.
157 * Code from here to __end_interrupts gets copied down to real
158 * address 0x100 when we are running a relocatable kernel.
159 * Therefore any relative branches in this section must only
160 * branch to labels in this section.
150 */ 161 */
151 . = 0x100 162 . = 0x100
152 .globl __start_interrupts 163 .globl __start_interrupts
@@ -200,7 +211,20 @@ data_access_slb_pSeries:
200 mfspr r10,SPRN_SPRG1 211 mfspr r10,SPRN_SPRG1
201 std r10,PACA_EXSLB+EX_R13(r13) 212 std r10,PACA_EXSLB+EX_R13(r13)
202 mfspr r12,SPRN_SRR1 /* and SRR1 */ 213 mfspr r12,SPRN_SRR1 /* and SRR1 */
203 b .slb_miss_realmode /* Rel. branch works in real mode */ 214#ifndef CONFIG_RELOCATABLE
215 b .slb_miss_realmode
216#else
217 /*
218 * We can't just use a direct branch to .slb_miss_realmode
219 * because the distance from here to there depends on where
220 * the kernel ends up being put.
221 */
222 mfctr r11
223 ld r10,PACAKBASE(r13)
224 LOAD_HANDLER(r10, .slb_miss_realmode)
225 mtctr r10
226 bctr
227#endif
204 228
205 STD_EXCEPTION_PSERIES(0x400, instruction_access) 229 STD_EXCEPTION_PSERIES(0x400, instruction_access)
206 230
@@ -225,7 +249,15 @@ instruction_access_slb_pSeries:
225 mfspr r10,SPRN_SPRG1 249 mfspr r10,SPRN_SPRG1
226 std r10,PACA_EXSLB+EX_R13(r13) 250 std r10,PACA_EXSLB+EX_R13(r13)
227 mfspr r12,SPRN_SRR1 /* and SRR1 */ 251 mfspr r12,SPRN_SRR1 /* and SRR1 */
228 b .slb_miss_realmode /* Rel. branch works in real mode */ 252#ifndef CONFIG_RELOCATABLE
253 b .slb_miss_realmode
254#else
255 mfctr r11
256 ld r10,PACAKBASE(r13)
257 LOAD_HANDLER(r10, .slb_miss_realmode)
258 mtctr r10
259 bctr
260#endif
229 261
230 MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt) 262 MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt)
231 STD_EXCEPTION_PSERIES(0x600, alignment) 263 STD_EXCEPTION_PSERIES(0x600, alignment)
@@ -244,14 +276,12 @@ BEGIN_FTR_SECTION
244 beq- 1f 276 beq- 1f
245END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) 277END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
246 mr r9,r13 278 mr r9,r13
247 mfmsr r10
248 mfspr r13,SPRN_SPRG3 279 mfspr r13,SPRN_SPRG3
249 mfspr r11,SPRN_SRR0 280 mfspr r11,SPRN_SRR0
250 clrrdi r12,r13,32 281 ld r12,PACAKBASE(r13)
251 oris r12,r12,system_call_common@h 282 ld r10,PACAKMSR(r13)
252 ori r12,r12,system_call_common@l 283 LOAD_HANDLER(r12, system_call_entry)
253 mtspr SPRN_SRR0,r12 284 mtspr SPRN_SRR0,r12
254 ori r10,r10,MSR_IR|MSR_DR|MSR_RI
255 mfspr r12,SPRN_SRR1 285 mfspr r12,SPRN_SRR1
256 mtspr SPRN_SRR1,r10 286 mtspr SPRN_SRR1,r10
257 rfid 287 rfid
@@ -325,16 +355,32 @@ do_stab_bolted_pSeries:
325 mfspr r12,SPRN_SPRG2 355 mfspr r12,SPRN_SPRG2
326 EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted) 356 EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted)
327 357
358#ifdef CONFIG_PPC_PSERIES
359/*
360 * Vectors for the FWNMI option. Share common code.
361 */
362 .globl system_reset_fwnmi
363 .align 7
364system_reset_fwnmi:
365 HMT_MEDIUM
366 mtspr SPRN_SPRG1,r13 /* save r13 */
367 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common)
368
369 .globl machine_check_fwnmi
370 .align 7
371machine_check_fwnmi:
372 HMT_MEDIUM
373 mtspr SPRN_SPRG1,r13 /* save r13 */
374 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
375
376#endif /* CONFIG_PPC_PSERIES */
377
378#ifdef __DISABLED__
328/* 379/*
329 * We have some room here we use that to put
330 * the peries slb miss user trampoline code so it's reasonably
331 * away from slb_miss_user_common to avoid problems with rfid
332 *
333 * This is used for when the SLB miss handler has to go virtual, 380 * This is used for when the SLB miss handler has to go virtual,
334 * which doesn't happen for now anymore but will once we re-implement 381 * which doesn't happen for now anymore but will once we re-implement
335 * dynamic VSIDs for shared page tables 382 * dynamic VSIDs for shared page tables
336 */ 383 */
337#ifdef __DISABLED__
338slb_miss_user_pseries: 384slb_miss_user_pseries:
339 std r10,PACA_EXGEN+EX_R10(r13) 385 std r10,PACA_EXGEN+EX_R10(r13)
340 std r11,PACA_EXGEN+EX_R11(r13) 386 std r11,PACA_EXGEN+EX_R11(r13)
@@ -357,25 +403,17 @@ slb_miss_user_pseries:
357 b . /* prevent spec. execution */ 403 b . /* prevent spec. execution */
358#endif /* __DISABLED__ */ 404#endif /* __DISABLED__ */
359 405
360#ifdef CONFIG_PPC_PSERIES 406 .align 7
407 .globl __end_interrupts
408__end_interrupts:
409
361/* 410/*
362 * Vectors for the FWNMI option. Share common code. 411 * Code from here down to __end_handlers is invoked from the
412 * exception prologs above. Because the prologs assemble the
413 * addresses of these handlers using the LOAD_HANDLER macro,
414 * which uses an addi instruction, these handlers must be in
415 * the first 32k of the kernel image.
363 */ 416 */
364 .globl system_reset_fwnmi
365 .align 7
366system_reset_fwnmi:
367 HMT_MEDIUM
368 mtspr SPRN_SPRG1,r13 /* save r13 */
369 EXCEPTION_PROLOG_PSERIES_FORCE_64BIT(PACA_EXGEN, system_reset_common)
370
371 .globl machine_check_fwnmi
372 .align 7
373machine_check_fwnmi:
374 HMT_MEDIUM
375 mtspr SPRN_SPRG1,r13 /* save r13 */
376 EXCEPTION_PROLOG_PSERIES_FORCE_64BIT(PACA_EXMC, machine_check_common)
377
378#endif /* CONFIG_PPC_PSERIES */
379 417
380/*** Common interrupt handlers ***/ 418/*** Common interrupt handlers ***/
381 419
@@ -414,6 +452,10 @@ machine_check_common:
414 STD_EXCEPTION_COMMON(0x1800, cbe_thermal, .cbe_thermal_exception) 452 STD_EXCEPTION_COMMON(0x1800, cbe_thermal, .cbe_thermal_exception)
415#endif /* CONFIG_CBE_RAS */ 453#endif /* CONFIG_CBE_RAS */
416 454
455 .align 7
456system_call_entry:
457 b system_call_common
458
417/* 459/*
418 * Here we have detected that the kernel stack pointer is bad. 460 * Here we have detected that the kernel stack pointer is bad.
419 * R9 contains the saved CR, r13 points to the paca, 461 * R9 contains the saved CR, r13 points to the paca,
@@ -457,65 +499,6 @@ bad_stack:
457 b 1b 499 b 1b
458 500
459/* 501/*
460 * Return from an exception with minimal checks.
461 * The caller is assumed to have done EXCEPTION_PROLOG_COMMON.
462 * If interrupts have been enabled, or anything has been
463 * done that might have changed the scheduling status of
464 * any task or sent any task a signal, you should use
465 * ret_from_except or ret_from_except_lite instead of this.
466 */
467fast_exc_return_irq: /* restores irq state too */
468 ld r3,SOFTE(r1)
469 TRACE_AND_RESTORE_IRQ(r3);
470 ld r12,_MSR(r1)
471 rldicl r4,r12,49,63 /* get MSR_EE to LSB */
472 stb r4,PACAHARDIRQEN(r13) /* restore paca->hard_enabled */
473 b 1f
474
475 .globl fast_exception_return
476fast_exception_return:
477 ld r12,_MSR(r1)
4781: ld r11,_NIP(r1)
479 andi. r3,r12,MSR_RI /* check if RI is set */
480 beq- unrecov_fer
481
482#ifdef CONFIG_VIRT_CPU_ACCOUNTING
483 andi. r3,r12,MSR_PR
484 beq 2f
485 ACCOUNT_CPU_USER_EXIT(r3, r4)
4862:
487#endif
488
489 ld r3,_CCR(r1)
490 ld r4,_LINK(r1)
491 ld r5,_CTR(r1)
492 ld r6,_XER(r1)
493 mtcr r3
494 mtlr r4
495 mtctr r5
496 mtxer r6
497 REST_GPR(0, r1)
498 REST_8GPRS(2, r1)
499
500 mfmsr r10
501 rldicl r10,r10,48,1 /* clear EE */
502 rldicr r10,r10,16,61 /* clear RI (LE is 0 already) */
503 mtmsrd r10,1
504
505 mtspr SPRN_SRR1,r12
506 mtspr SPRN_SRR0,r11
507 REST_4GPRS(10, r1)
508 ld r1,GPR1(r1)
509 rfid
510 b . /* prevent speculative execution */
511
512unrecov_fer:
513 bl .save_nvgprs
5141: addi r3,r1,STACK_FRAME_OVERHEAD
515 bl .unrecoverable_exception
516 b 1b
517
518/*
519 * Here r13 points to the paca, r9 contains the saved CR, 502 * Here r13 points to the paca, r9 contains the saved CR,
520 * SRR0 and SRR1 are saved in r11 and r12, 503 * SRR0 and SRR1 are saved in r11 and r12,
521 * r9 - r13 are saved in paca->exgen. 504 * r9 - r13 are saved in paca->exgen.
@@ -616,6 +599,9 @@ unrecov_user_slb:
616 */ 599 */
617_GLOBAL(slb_miss_realmode) 600_GLOBAL(slb_miss_realmode)
618 mflr r10 601 mflr r10
602#ifdef CONFIG_RELOCATABLE
603 mtctr r11
604#endif
619 605
620 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ 606 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
621 std r10,PACA_EXSLB+EX_LR(r13) /* save LR */ 607 std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
@@ -666,11 +652,10 @@ BEGIN_FW_FTR_SECTION
666END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) 652END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
667#endif /* CONFIG_PPC_ISERIES */ 653#endif /* CONFIG_PPC_ISERIES */
668 mfspr r11,SPRN_SRR0 654 mfspr r11,SPRN_SRR0
669 clrrdi r10,r13,32 655 ld r10,PACAKBASE(r13)
670 LOAD_HANDLER(r10,unrecov_slb) 656 LOAD_HANDLER(r10,unrecov_slb)
671 mtspr SPRN_SRR0,r10 657 mtspr SPRN_SRR0,r10
672 mfmsr r10 658 ld r10,PACAKMSR(r13)
673 ori r10,r10,MSR_IR|MSR_DR|MSR_RI
674 mtspr SPRN_SRR1,r10 659 mtspr SPRN_SRR1,r10
675 rfid 660 rfid
676 b . 661 b .
@@ -766,6 +751,85 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
766 bl .altivec_unavailable_exception 751 bl .altivec_unavailable_exception
767 b .ret_from_except 752 b .ret_from_except
768 753
754 .align 7
755 .globl vsx_unavailable_common
756vsx_unavailable_common:
757 EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN)
758#ifdef CONFIG_VSX
759BEGIN_FTR_SECTION
760 bne .load_up_vsx
7611:
762END_FTR_SECTION_IFSET(CPU_FTR_VSX)
763#endif
764 bl .save_nvgprs
765 addi r3,r1,STACK_FRAME_OVERHEAD
766 ENABLE_INTS
767 bl .vsx_unavailable_exception
768 b .ret_from_except
769
770 .align 7
771 .globl __end_handlers
772__end_handlers:
773
774/*
775 * Return from an exception with minimal checks.
776 * The caller is assumed to have done EXCEPTION_PROLOG_COMMON.
777 * If interrupts have been enabled, or anything has been
778 * done that might have changed the scheduling status of
779 * any task or sent any task a signal, you should use
780 * ret_from_except or ret_from_except_lite instead of this.
781 */
782fast_exc_return_irq: /* restores irq state too */
783 ld r3,SOFTE(r1)
784 TRACE_AND_RESTORE_IRQ(r3);
785 ld r12,_MSR(r1)
786 rldicl r4,r12,49,63 /* get MSR_EE to LSB */
787 stb r4,PACAHARDIRQEN(r13) /* restore paca->hard_enabled */
788 b 1f
789
790 .globl fast_exception_return
791fast_exception_return:
792 ld r12,_MSR(r1)
7931: ld r11,_NIP(r1)
794 andi. r3,r12,MSR_RI /* check if RI is set */
795 beq- unrecov_fer
796
797#ifdef CONFIG_VIRT_CPU_ACCOUNTING
798 andi. r3,r12,MSR_PR
799 beq 2f
800 ACCOUNT_CPU_USER_EXIT(r3, r4)
8012:
802#endif
803
804 ld r3,_CCR(r1)
805 ld r4,_LINK(r1)
806 ld r5,_CTR(r1)
807 ld r6,_XER(r1)
808 mtcr r3
809 mtlr r4
810 mtctr r5
811 mtxer r6
812 REST_GPR(0, r1)
813 REST_8GPRS(2, r1)
814
815 mfmsr r10
816 rldicl r10,r10,48,1 /* clear EE */
817 rldicr r10,r10,16,61 /* clear RI (LE is 0 already) */
818 mtmsrd r10,1
819
820 mtspr SPRN_SRR1,r12
821 mtspr SPRN_SRR0,r11
822 REST_4GPRS(10, r1)
823 ld r1,GPR1(r1)
824 rfid
825 b . /* prevent speculative execution */
826
827unrecov_fer:
828 bl .save_nvgprs
8291: addi r3,r1,STACK_FRAME_OVERHEAD
830 bl .unrecoverable_exception
831 b 1b
832
769#ifdef CONFIG_ALTIVEC 833#ifdef CONFIG_ALTIVEC
770/* 834/*
771 * load_up_altivec(unused, unused, tsk) 835 * load_up_altivec(unused, unused, tsk)
@@ -840,22 +904,6 @@ _STATIC(load_up_altivec)
840 blr 904 blr
841#endif /* CONFIG_ALTIVEC */ 905#endif /* CONFIG_ALTIVEC */
842 906
843 .align 7
844 .globl vsx_unavailable_common
845vsx_unavailable_common:
846 EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN)
847#ifdef CONFIG_VSX
848BEGIN_FTR_SECTION
849 bne .load_up_vsx
8501:
851END_FTR_SECTION_IFSET(CPU_FTR_VSX)
852#endif
853 bl .save_nvgprs
854 addi r3,r1,STACK_FRAME_OVERHEAD
855 ENABLE_INTS
856 bl .vsx_unavailable_exception
857 b .ret_from_except
858
859#ifdef CONFIG_VSX 907#ifdef CONFIG_VSX
860/* 908/*
861 * load_up_vsx(unused, unused, tsk) 909 * load_up_vsx(unused, unused, tsk)
@@ -1175,11 +1223,14 @@ _GLOBAL(generic_secondary_smp_init)
1175 /* turn on 64-bit mode */ 1223 /* turn on 64-bit mode */
1176 bl .enable_64b_mode 1224 bl .enable_64b_mode
1177 1225
1226 /* get the TOC pointer (real address) */
1227 bl .relative_toc
1228
1178 /* Set up a paca value for this processor. Since we have the 1229 /* Set up a paca value for this processor. Since we have the
1179 * physical cpu id in r24, we need to search the pacas to find 1230 * physical cpu id in r24, we need to search the pacas to find
1180 * which logical id maps to our physical one. 1231 * which logical id maps to our physical one.
1181 */ 1232 */
1182 LOAD_REG_IMMEDIATE(r13, paca) /* Get base vaddr of paca array */ 1233 LOAD_REG_ADDR(r13, paca) /* Get base vaddr of paca array */
1183 li r5,0 /* logical cpu id */ 1234 li r5,0 /* logical cpu id */
11841: lhz r6,PACAHWCPUID(r13) /* Load HW procid from paca */ 12351: lhz r6,PACAHWCPUID(r13) /* Load HW procid from paca */
1185 cmpw r6,r24 /* Compare to our id */ 1236 cmpw r6,r24 /* Compare to our id */
@@ -1208,7 +1259,7 @@ _GLOBAL(generic_secondary_smp_init)
1208 sync /* order paca.run and cur_cpu_spec */ 1259 sync /* order paca.run and cur_cpu_spec */
1209 1260
1210 /* See if we need to call a cpu state restore handler */ 1261 /* See if we need to call a cpu state restore handler */
1211 LOAD_REG_IMMEDIATE(r23, cur_cpu_spec) 1262 LOAD_REG_ADDR(r23, cur_cpu_spec)
1212 ld r23,0(r23) 1263 ld r23,0(r23)
1213 ld r23,CPU_SPEC_RESTORE(r23) 1264 ld r23,CPU_SPEC_RESTORE(r23)
1214 cmpdi 0,r23,0 1265 cmpdi 0,r23,0
@@ -1224,10 +1275,15 @@ _GLOBAL(generic_secondary_smp_init)
1224 b __secondary_start 1275 b __secondary_start
1225#endif 1276#endif
1226 1277
1278/*
1279 * Turn the MMU off.
1280 * Assumes we're mapped EA == RA if the MMU is on.
1281 */
1227_STATIC(__mmu_off) 1282_STATIC(__mmu_off)
1228 mfmsr r3 1283 mfmsr r3
1229 andi. r0,r3,MSR_IR|MSR_DR 1284 andi. r0,r3,MSR_IR|MSR_DR
1230 beqlr 1285 beqlr
1286 mflr r4
1231 andc r3,r3,r0 1287 andc r3,r3,r0
1232 mtspr SPRN_SRR0,r4 1288 mtspr SPRN_SRR0,r4
1233 mtspr SPRN_SRR1,r3 1289 mtspr SPRN_SRR1,r3
@@ -1248,6 +1304,18 @@ _STATIC(__mmu_off)
1248 * 1304 *
1249 */ 1305 */
1250_GLOBAL(__start_initialization_multiplatform) 1306_GLOBAL(__start_initialization_multiplatform)
1307 /* Make sure we are running in 64 bits mode */
1308 bl .enable_64b_mode
1309
1310 /* Get TOC pointer (current runtime address) */
1311 bl .relative_toc
1312
1313 /* find out where we are now */
1314 bcl 20,31,$+4
13150: mflr r26 /* r26 = runtime addr here */
1316 addis r26,r26,(_stext - 0b)@ha
1317 addi r26,r26,(_stext - 0b)@l /* current runtime base addr */
1318
1251 /* 1319 /*
1252 * Are we booted from a PROM Of-type client-interface ? 1320 * Are we booted from a PROM Of-type client-interface ?
1253 */ 1321 */
@@ -1259,9 +1327,6 @@ _GLOBAL(__start_initialization_multiplatform)
1259 mr r31,r3 1327 mr r31,r3
1260 mr r30,r4 1328 mr r30,r4
1261 1329
1262 /* Make sure we are running in 64 bits mode */
1263 bl .enable_64b_mode
1264
1265 /* Setup some critical 970 SPRs before switching MMU off */ 1330 /* Setup some critical 970 SPRs before switching MMU off */
1266 mfspr r0,SPRN_PVR 1331 mfspr r0,SPRN_PVR
1267 srwi r0,r0,16 1332 srwi r0,r0,16
@@ -1276,9 +1341,7 @@ _GLOBAL(__start_initialization_multiplatform)
12761: bl .__cpu_preinit_ppc970 13411: bl .__cpu_preinit_ppc970
12772: 13422:
1278 1343
1279 /* Switch off MMU if not already */ 1344 /* Switch off MMU if not already off */
1280 LOAD_REG_IMMEDIATE(r4, .__after_prom_start - KERNELBASE)
1281 add r4,r4,r30
1282 bl .__mmu_off 1345 bl .__mmu_off
1283 b .__after_prom_start 1346 b .__after_prom_start
1284 1347
@@ -1293,22 +1356,15 @@ _INIT_STATIC(__boot_from_prom)
1293 /* 1356 /*
1294 * Align the stack to 16-byte boundary 1357 * Align the stack to 16-byte boundary
1295 * Depending on the size and layout of the ELF sections in the initial 1358 * Depending on the size and layout of the ELF sections in the initial
1296 * boot binary, the stack pointer will be unalignet on PowerMac 1359 * boot binary, the stack pointer may be unaligned on PowerMac
1297 */ 1360 */
1298 rldicr r1,r1,0,59 1361 rldicr r1,r1,0,59
1299 1362
1300 /* Make sure we are running in 64 bits mode */ 1363#ifdef CONFIG_RELOCATABLE
1301 bl .enable_64b_mode 1364 /* Relocate code for where we are now */
1302 1365 mr r3,r26
1303 /* put a relocation offset into r3 */ 1366 bl .relocate
1304 bl .reloc_offset 1367#endif
1305
1306 LOAD_REG_IMMEDIATE(r2,__toc_start)
1307 addi r2,r2,0x4000
1308 addi r2,r2,0x4000
1309
1310 /* Relocate the TOC from a virt addr to a real addr */
1311 add r2,r2,r3
1312 1368
1313 /* Restore parameters */ 1369 /* Restore parameters */
1314 mr r3,r31 1370 mr r3,r31
@@ -1318,60 +1374,51 @@ _INIT_STATIC(__boot_from_prom)
1318 mr r7,r27 1374 mr r7,r27
1319 1375
1320 /* Do all of the interaction with OF client interface */ 1376 /* Do all of the interaction with OF client interface */
1377 mr r8,r26
1321 bl .prom_init 1378 bl .prom_init
1322 /* We never return */ 1379 /* We never return */
1323 trap 1380 trap
1324 1381
1325_STATIC(__after_prom_start) 1382_STATIC(__after_prom_start)
1383#ifdef CONFIG_RELOCATABLE
1384 /* process relocations for the final address of the kernel */
1385 lis r25,PAGE_OFFSET@highest /* compute virtual base of kernel */
1386 sldi r25,r25,32
1387 mr r3,r25
1388 bl .relocate
1389#endif
1326 1390
1327/* 1391/*
1328 * We need to run with __start at physical address PHYSICAL_START. 1392 * We need to run with _stext at physical address PHYSICAL_START.
1329 * This will leave some code in the first 256B of 1393 * This will leave some code in the first 256B of
1330 * real memory, which are reserved for software use. 1394 * real memory, which are reserved for software use.
1331 * The remainder of the first page is loaded with the fixed
1332 * interrupt vectors. The next two pages are filled with
1333 * unknown exception placeholders.
1334 * 1395 *
1335 * Note: This process overwrites the OF exception vectors. 1396 * Note: This process overwrites the OF exception vectors.
1336 * r26 == relocation offset
1337 * r27 == KERNELBASE
1338 */ 1397 */
1339 bl .reloc_offset 1398 li r3,0 /* target addr */
1340 mr r26,r3 1399 mr. r4,r26 /* In some cases the loader may */
1341 LOAD_REG_IMMEDIATE(r27, KERNELBASE) 1400 beq 9f /* have already put us at zero */
1342 1401 lis r5,(copy_to_here - _stext)@ha
1343 LOAD_REG_IMMEDIATE(r3, PHYSICAL_START) /* target addr */ 1402 addi r5,r5,(copy_to_here - _stext)@l /* # bytes of memory to copy */
1344
1345 // XXX FIXME: Use phys returned by OF (r30)
1346 add r4,r27,r26 /* source addr */
1347 /* current address of _start */
1348 /* i.e. where we are running */
1349 /* the source addr */
1350
1351 cmpdi r4,0 /* In some cases the loader may */
1352 bne 1f
1353 b .start_here_multiplatform /* have already put us at zero */
1354 /* so we can skip the copy. */
13551: LOAD_REG_IMMEDIATE(r5,copy_to_here) /* # bytes of memory to copy */
1356 sub r5,r5,r27
1357
1358 li r6,0x100 /* Start offset, the first 0x100 */ 1403 li r6,0x100 /* Start offset, the first 0x100 */
1359 /* bytes were copied earlier. */ 1404 /* bytes were copied earlier. */
1360 1405
1361 bl .copy_and_flush /* copy the first n bytes */ 1406 bl .copy_and_flush /* copy the first n bytes */
1362 /* this includes the code being */ 1407 /* this includes the code being */
1363 /* executed here. */ 1408 /* executed here. */
1364 1409 addis r8,r3,(4f - _stext)@ha /* Jump to the copy of this code */
1365 LOAD_REG_IMMEDIATE(r0, 4f) /* Jump to the copy of this code */ 1410 addi r8,r8,(4f - _stext)@l /* that we just made */
1366 mtctr r0 /* that we just made/relocated */ 1411 mtctr r8
1367 bctr 1412 bctr
1368 1413
13694: LOAD_REG_IMMEDIATE(r5,klimit) 14144: /* Now copy the rest of the kernel up to _end */
1370 add r5,r5,r26 1415 addis r5,r26,(p_end - _stext)@ha
1371 ld r5,0(r5) /* get the value of klimit */ 1416 ld r5,(p_end - _stext)@l(r5) /* get _end */
1372 sub r5,r5,r27
1373 bl .copy_and_flush /* copy the rest */ 1417 bl .copy_and_flush /* copy the rest */
1374 b .start_here_multiplatform 1418
14199: b .start_here_multiplatform
1420
1421p_end: .llong _end - _stext
1375 1422
1376/* 1423/*
1377 * Copy routine used to copy the kernel to start at physical address 0 1424 * Copy routine used to copy the kernel to start at physical address 0
@@ -1436,6 +1483,9 @@ _GLOBAL(pmac_secondary_start)
1436 /* turn on 64-bit mode */ 1483 /* turn on 64-bit mode */
1437 bl .enable_64b_mode 1484 bl .enable_64b_mode
1438 1485
1486 /* get TOC pointer (real address) */
1487 bl .relative_toc
1488
1439 /* Copy some CPU settings from CPU 0 */ 1489 /* Copy some CPU settings from CPU 0 */
1440 bl .__restore_cpu_ppc970 1490 bl .__restore_cpu_ppc970
1441 1491
@@ -1445,10 +1495,10 @@ _GLOBAL(pmac_secondary_start)
1445 mtmsrd r3 /* RI on */ 1495 mtmsrd r3 /* RI on */
1446 1496
1447 /* Set up a paca value for this processor. */ 1497 /* Set up a paca value for this processor. */
1448 LOAD_REG_IMMEDIATE(r4, paca) /* Get base vaddr of paca array */ 1498 LOAD_REG_ADDR(r4,paca) /* Get base vaddr of paca array */
1449 mulli r13,r24,PACA_SIZE /* Calculate vaddr of right paca */ 1499 mulli r13,r24,PACA_SIZE /* Calculate vaddr of right paca */
1450 add r13,r13,r4 /* for this processor. */ 1500 add r13,r13,r4 /* for this processor. */
1451 mtspr SPRN_SPRG3,r13 /* Save vaddr of paca in SPRG3 */ 1501 mtspr SPRN_SPRG3,r13 /* Save vaddr of paca in SPRG3 */
1452 1502
1453 /* Create a temp kernel stack for use before relocation is on. */ 1503 /* Create a temp kernel stack for use before relocation is on. */
1454 ld r1,PACAEMERGSP(r13) 1504 ld r1,PACAEMERGSP(r13)
@@ -1476,9 +1526,6 @@ __secondary_start:
1476 /* Set thread priority to MEDIUM */ 1526 /* Set thread priority to MEDIUM */
1477 HMT_MEDIUM 1527 HMT_MEDIUM
1478 1528
1479 /* Load TOC */
1480 ld r2,PACATOC(r13)
1481
1482 /* Do early setup for that CPU (stab, slb, hash table pointer) */ 1529 /* Do early setup for that CPU (stab, slb, hash table pointer) */
1483 bl .early_setup_secondary 1530 bl .early_setup_secondary
1484 1531
@@ -1515,9 +1562,11 @@ END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
1515 1562
1516/* 1563/*
1517 * Running with relocation on at this point. All we want to do is 1564 * Running with relocation on at this point. All we want to do is
1518 * zero the stack back-chain pointer before going into C code. 1565 * zero the stack back-chain pointer and get the TOC virtual address
1566 * before going into C code.
1519 */ 1567 */
1520_GLOBAL(start_secondary_prolog) 1568_GLOBAL(start_secondary_prolog)
1569 ld r2,PACATOC(r13)
1521 li r3,0 1570 li r3,0
1522 std r3,0(r1) /* Zero the stack frame pointer */ 1571 std r3,0(r1) /* Zero the stack frame pointer */
1523 bl .start_secondary 1572 bl .start_secondary
@@ -1529,34 +1578,46 @@ _GLOBAL(start_secondary_prolog)
1529 */ 1578 */
1530_GLOBAL(enable_64b_mode) 1579_GLOBAL(enable_64b_mode)
1531 mfmsr r11 /* grab the current MSR */ 1580 mfmsr r11 /* grab the current MSR */
1532 li r12,1 1581 li r12,(MSR_SF | MSR_ISF)@highest
1533 rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG) 1582 sldi r12,r12,48
1534 or r11,r11,r12
1535 li r12,1
1536 rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
1537 or r11,r11,r12 1583 or r11,r11,r12
1538 mtmsrd r11 1584 mtmsrd r11
1539 isync 1585 isync
1540 blr 1586 blr
1541 1587
1542/* 1588/*
1589 * This puts the TOC pointer into r2, offset by 0x8000 (as expected
1590 * by the toolchain). It computes the correct value for wherever we
1591 * are running at the moment, using position-independent code.
1592 */
1593_GLOBAL(relative_toc)
1594 mflr r0
1595 bcl 20,31,$+4
15960: mflr r9
1597 ld r2,(p_toc - 0b)(r9)
1598 add r2,r2,r9
1599 mtlr r0
1600 blr
1601
1602p_toc: .llong __toc_start + 0x8000 - 0b
1603
1604/*
1543 * This is where the main kernel code starts. 1605 * This is where the main kernel code starts.
1544 */ 1606 */
1545_INIT_STATIC(start_here_multiplatform) 1607_INIT_STATIC(start_here_multiplatform)
1546 /* get a new offset, now that the kernel has moved. */ 1608 /* set up the TOC (real address) */
1547 bl .reloc_offset 1609 bl .relative_toc
1548 mr r26,r3
1549 1610
1550 /* Clear out the BSS. It may have been done in prom_init, 1611 /* Clear out the BSS. It may have been done in prom_init,
1551 * already but that's irrelevant since prom_init will soon 1612 * already but that's irrelevant since prom_init will soon
1552 * be detached from the kernel completely. Besides, we need 1613 * be detached from the kernel completely. Besides, we need
1553 * to clear it now for kexec-style entry. 1614 * to clear it now for kexec-style entry.
1554 */ 1615 */
1555 LOAD_REG_IMMEDIATE(r11,__bss_stop) 1616 LOAD_REG_ADDR(r11,__bss_stop)
1556 LOAD_REG_IMMEDIATE(r8,__bss_start) 1617 LOAD_REG_ADDR(r8,__bss_start)
1557 sub r11,r11,r8 /* bss size */ 1618 sub r11,r11,r8 /* bss size */
1558 addi r11,r11,7 /* round up to an even double word */ 1619 addi r11,r11,7 /* round up to an even double word */
1559 rldicl. r11,r11,61,3 /* shift right by 3 */ 1620 srdi. r11,r11,3 /* shift right by 3 */
1560 beq 4f 1621 beq 4f
1561 addi r8,r8,-8 1622 addi r8,r8,-8
1562 li r0,0 1623 li r0,0
@@ -1569,35 +1630,35 @@ _INIT_STATIC(start_here_multiplatform)
1569 ori r6,r6,MSR_RI 1630 ori r6,r6,MSR_RI
1570 mtmsrd r6 /* RI on */ 1631 mtmsrd r6 /* RI on */
1571 1632
1572 /* The following gets the stack and TOC set up with the regs */ 1633#ifdef CONFIG_RELOCATABLE
1634 /* Save the physical address we're running at in kernstart_addr */
1635 LOAD_REG_ADDR(r4, kernstart_addr)
1636 clrldi r0,r25,2
1637 std r0,0(r4)
1638#endif
1639
1640 /* The following gets the stack set up with the regs */
1573 /* pointing to the real addr of the kernel stack. This is */ 1641 /* pointing to the real addr of the kernel stack. This is */
1574 /* all done to support the C function call below which sets */ 1642 /* all done to support the C function call below which sets */
1575 /* up the htab. This is done because we have relocated the */ 1643 /* up the htab. This is done because we have relocated the */
1576 /* kernel but are still running in real mode. */ 1644 /* kernel but are still running in real mode. */
1577 1645
1578 LOAD_REG_IMMEDIATE(r3,init_thread_union) 1646 LOAD_REG_ADDR(r3,init_thread_union)
1579 add r3,r3,r26
1580 1647
1581 /* set up a stack pointer (physical address) */ 1648 /* set up a stack pointer */
1582 addi r1,r3,THREAD_SIZE 1649 addi r1,r3,THREAD_SIZE
1583 li r0,0 1650 li r0,0
1584 stdu r0,-STACK_FRAME_OVERHEAD(r1) 1651 stdu r0,-STACK_FRAME_OVERHEAD(r1)
1585 1652
1586 /* set up the TOC (physical address) */
1587 LOAD_REG_IMMEDIATE(r2,__toc_start)
1588 addi r2,r2,0x4000
1589 addi r2,r2,0x4000
1590 add r2,r2,r26
1591
1592 /* Do very early kernel initializations, including initial hash table, 1653 /* Do very early kernel initializations, including initial hash table,
1593 * stab and slb setup before we turn on relocation. */ 1654 * stab and slb setup before we turn on relocation. */
1594 1655
1595 /* Restore parameters passed from prom_init/kexec */ 1656 /* Restore parameters passed from prom_init/kexec */
1596 mr r3,r31 1657 mr r3,r31
1597 bl .early_setup 1658 bl .early_setup /* also sets r13 and SPRG3 */
1598 1659
1599 LOAD_REG_IMMEDIATE(r3, .start_here_common) 1660 LOAD_REG_ADDR(r3, .start_here_common)
1600 LOAD_REG_IMMEDIATE(r4, MSR_KERNEL) 1661 ld r4,PACAKMSR(r13)
1601 mtspr SPRN_SRR0,r3 1662 mtspr SPRN_SRR0,r3
1602 mtspr SPRN_SRR1,r4 1663 mtspr SPRN_SRR1,r4
1603 rfid 1664 rfid
@@ -1606,20 +1667,10 @@ _INIT_STATIC(start_here_multiplatform)
1606 /* This is where all platforms converge execution */ 1667 /* This is where all platforms converge execution */
1607_INIT_GLOBAL(start_here_common) 1668_INIT_GLOBAL(start_here_common)
1608 /* relocation is on at this point */ 1669 /* relocation is on at this point */
1670 std r1,PACAKSAVE(r13)
1609 1671
1610 /* The following code sets up the SP and TOC now that we are */ 1672 /* Load the TOC (virtual address) */
1611 /* running with translation enabled. */
1612
1613 LOAD_REG_IMMEDIATE(r3,init_thread_union)
1614
1615 /* set up the stack */
1616 addi r1,r3,THREAD_SIZE
1617 li r0,0
1618 stdu r0,-STACK_FRAME_OVERHEAD(r1)
1619
1620 /* Load the TOC */
1621 ld r2,PACATOC(r13) 1673 ld r2,PACATOC(r13)
1622 std r1,PACAKSAVE(r13)
1623 1674
1624 bl .setup_system 1675 bl .setup_system
1625 1676