aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/head_64.S
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel/head_64.S')
-rw-r--r--arch/powerpc/kernel/head_64.S2011
1 files changed, 2011 insertions, 0 deletions
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
new file mode 100644
index 000000000000..22a5ee07e1ea
--- /dev/null
+++ b/arch/powerpc/kernel/head_64.S
@@ -0,0 +1,2011 @@
1/*
2 * arch/ppc64/kernel/head.S
3 *
4 * PowerPC version
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 *
7 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
8 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
9 * Adapted for Power Macintosh by Paul Mackerras.
10 * Low-level exception handlers and MMU support
11 * rewritten by Paul Mackerras.
12 * Copyright (C) 1996 Paul Mackerras.
13 *
14 * Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and
15 * Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com
16 *
17 * This file contains the low-level support and setup for the
18 * PowerPC-64 platform, including trap and interrupt dispatch.
19 *
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
24 */
25
26#include <linux/config.h>
27#include <linux/threads.h>
28#include <asm/processor.h>
29#include <asm/page.h>
30#include <asm/mmu.h>
31#include <asm/systemcfg.h>
32#include <asm/ppc_asm.h>
33#include <asm/asm-offsets.h>
34#include <asm/bug.h>
35#include <asm/cputable.h>
36#include <asm/setup.h>
37#include <asm/hvcall.h>
38#include <asm/iSeries/LparMap.h>
39
40#ifdef CONFIG_PPC_ISERIES
41#define DO_SOFT_DISABLE
42#endif
43
44/*
45 * We layout physical memory as follows:
46 * 0x0000 - 0x00ff : Secondary processor spin code
47 * 0x0100 - 0x2fff : pSeries Interrupt prologs
48 * 0x3000 - 0x5fff : interrupt support, iSeries and common interrupt prologs
49 * 0x6000 - 0x6fff : Initial (CPU0) segment table
50 * 0x7000 - 0x7fff : FWNMI data area
51 * 0x8000 - : Early init and support code
52 */
53
54/*
55 * SPRG Usage
56 *
57 * Register Definition
58 *
59 * SPRG0 reserved for hypervisor
60 * SPRG1 temp - used to save gpr
61 * SPRG2 temp - used to save gpr
62 * SPRG3 virt addr of paca
63 */
64
65/*
66 * Entering into this code we make the following assumptions:
67 * For pSeries:
68 * 1. The MMU is off & open firmware is running in real mode.
69 * 2. The kernel is entered at __start
70 *
71 * For iSeries:
72 * 1. The MMU is on (as it always is for iSeries)
73 * 2. The kernel is entered at system_reset_iSeries
74 */
75
76 .text
77 .globl _stext
78_stext:
79#ifdef CONFIG_PPC_MULTIPLATFORM
80_GLOBAL(__start)
81 /* NOP this out unconditionally */
82BEGIN_FTR_SECTION
83 b .__start_initialization_multiplatform
84END_FTR_SECTION(0, 1)
85#endif /* CONFIG_PPC_MULTIPLATFORM */
86
87 /* Catch branch to 0 in real mode */
88 trap
89
90#ifdef CONFIG_PPC_ISERIES
91 /*
92 * At offset 0x20, there is a pointer to iSeries LPAR data.
93 * This is required by the hypervisor
94 */
95 . = 0x20
96 .llong hvReleaseData-KERNELBASE
97
98 /*
99 * At offset 0x28 and 0x30 are offsets to the mschunks_map
100 * array (used by the iSeries LPAR debugger to do translation
101 * between physical addresses and absolute addresses) and
102 * to the pidhash table (also used by the debugger)
103 */
104 .llong mschunks_map-KERNELBASE
105 .llong 0 /* pidhash-KERNELBASE SFRXXX */
106
107 /* Offset 0x38 - Pointer to start of embedded System.map */
108 .globl embedded_sysmap_start
109embedded_sysmap_start:
110 .llong 0
111 /* Offset 0x40 - Pointer to end of embedded System.map */
112 .globl embedded_sysmap_end
113embedded_sysmap_end:
114 .llong 0
115
116#endif /* CONFIG_PPC_ISERIES */
117
118 /* Secondary processors spin on this value until it goes to 1. */
119 .globl __secondary_hold_spinloop
120__secondary_hold_spinloop:
121 .llong 0x0
122
123 /* Secondary processors write this value with their cpu # */
124 /* after they enter the spin loop immediately below. */
125 .globl __secondary_hold_acknowledge
126__secondary_hold_acknowledge:
127 .llong 0x0
128
129 . = 0x60
130/*
131 * The following code is used on pSeries to hold secondary processors
132 * in a spin loop after they have been freed from OpenFirmware, but
133 * before the bulk of the kernel has been relocated. This code
134 * is relocated to physical address 0x60 before prom_init is run.
135 * All of it must fit below the first exception vector at 0x100.
136 */
137_GLOBAL(__secondary_hold)
138 mfmsr r24
139 ori r24,r24,MSR_RI
140 mtmsrd r24 /* RI on */
141
142 /* Grab our linux cpu number */
143 mr r24,r3
144
145 /* Tell the master cpu we're here */
146 /* Relocation is off & we are located at an address less */
147 /* than 0x100, so only need to grab low order offset. */
148 std r24,__secondary_hold_acknowledge@l(0)
149 sync
150
151 /* All secondary cpus wait here until told to start. */
152100: ld r4,__secondary_hold_spinloop@l(0)
153 cmpdi 0,r4,1
154 bne 100b
155
156#ifdef CONFIG_HMT
157 b .hmt_init
158#else
159#ifdef CONFIG_SMP
160 mr r3,r24
161 b .pSeries_secondary_smp_init
162#else
163 BUG_OPCODE
164#endif
165#endif
166
167/* This value is used to mark exception frames on the stack. */
168 .section ".toc","aw"
169exception_marker:
170 .tc ID_72656773_68657265[TC],0x7265677368657265
171 .text
172
173/*
174 * The following macros define the code that appears as
175 * the prologue to each of the exception handlers. They
176 * are split into two parts to allow a single kernel binary
177 * to be used for pSeries and iSeries.
178 * LOL. One day... - paulus
179 */
180
181/*
182 * We make as much of the exception code common between native
183 * exception handlers (including pSeries LPAR) and iSeries LPAR
184 * implementations as possible.
185 */
186
187/*
188 * This is the start of the interrupt handlers for pSeries
189 * This code runs with relocation off.
190 */
191#define EX_R9 0
192#define EX_R10 8
193#define EX_R11 16
194#define EX_R12 24
195#define EX_R13 32
196#define EX_SRR0 40
197#define EX_R3 40 /* SLB miss saves R3, but not SRR0 */
198#define EX_DAR 48
199#define EX_LR 48 /* SLB miss saves LR, but not DAR */
200#define EX_DSISR 56
201#define EX_CCR 60
202
203#define EXCEPTION_PROLOG_PSERIES(area, label) \
204 mfspr r13,SPRG3; /* get paca address into r13 */ \
205 std r9,area+EX_R9(r13); /* save r9 - r12 */ \
206 std r10,area+EX_R10(r13); \
207 std r11,area+EX_R11(r13); \
208 std r12,area+EX_R12(r13); \
209 mfspr r9,SPRG1; \
210 std r9,area+EX_R13(r13); \
211 mfcr r9; \
212 clrrdi r12,r13,32; /* get high part of &label */ \
213 mfmsr r10; \
214 mfspr r11,SRR0; /* save SRR0 */ \
215 ori r12,r12,(label)@l; /* virt addr of handler */ \
216 ori r10,r10,MSR_IR|MSR_DR|MSR_RI; \
217 mtspr SRR0,r12; \
218 mfspr r12,SRR1; /* and SRR1 */ \
219 mtspr SRR1,r10; \
220 rfid; \
221 b . /* prevent speculative execution */
222
223/*
224 * This is the start of the interrupt handlers for iSeries
225 * This code runs with relocation on.
226 */
227#define EXCEPTION_PROLOG_ISERIES_1(area) \
228 mfspr r13,SPRG3; /* get paca address into r13 */ \
229 std r9,area+EX_R9(r13); /* save r9 - r12 */ \
230 std r10,area+EX_R10(r13); \
231 std r11,area+EX_R11(r13); \
232 std r12,area+EX_R12(r13); \
233 mfspr r9,SPRG1; \
234 std r9,area+EX_R13(r13); \
235 mfcr r9
236
237#define EXCEPTION_PROLOG_ISERIES_2 \
238 mfmsr r10; \
239 ld r11,PACALPPACA+LPPACASRR0(r13); \
240 ld r12,PACALPPACA+LPPACASRR1(r13); \
241 ori r10,r10,MSR_RI; \
242 mtmsrd r10,1
243
244/*
245 * The common exception prolog is used for all except a few exceptions
246 * such as a segment miss on a kernel address. We have to be prepared
247 * to take another exception from the point where we first touch the
248 * kernel stack onwards.
249 *
250 * On entry r13 points to the paca, r9-r13 are saved in the paca,
251 * r9 contains the saved CR, r11 and r12 contain the saved SRR0 and
252 * SRR1, and relocation is on.
253 */
254#define EXCEPTION_PROLOG_COMMON(n, area) \
255 andi. r10,r12,MSR_PR; /* See if coming from user */ \
256 mr r10,r1; /* Save r1 */ \
257 subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ \
258 beq- 1f; \
259 ld r1,PACAKSAVE(r13); /* kernel stack to use */ \
2601: cmpdi cr1,r1,0; /* check if r1 is in userspace */ \
261 bge- cr1,bad_stack; /* abort if it is */ \
262 std r9,_CCR(r1); /* save CR in stackframe */ \
263 std r11,_NIP(r1); /* save SRR0 in stackframe */ \
264 std r12,_MSR(r1); /* save SRR1 in stackframe */ \
265 std r10,0(r1); /* make stack chain pointer */ \
266 std r0,GPR0(r1); /* save r0 in stackframe */ \
267 std r10,GPR1(r1); /* save r1 in stackframe */ \
268 std r2,GPR2(r1); /* save r2 in stackframe */ \
269 SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \
270 SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \
271 ld r9,area+EX_R9(r13); /* move r9, r10 to stackframe */ \
272 ld r10,area+EX_R10(r13); \
273 std r9,GPR9(r1); \
274 std r10,GPR10(r1); \
275 ld r9,area+EX_R11(r13); /* move r11 - r13 to stackframe */ \
276 ld r10,area+EX_R12(r13); \
277 ld r11,area+EX_R13(r13); \
278 std r9,GPR11(r1); \
279 std r10,GPR12(r1); \
280 std r11,GPR13(r1); \
281 ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \
282 mflr r9; /* save LR in stackframe */ \
283 std r9,_LINK(r1); \
284 mfctr r10; /* save CTR in stackframe */ \
285 std r10,_CTR(r1); \
286 mfspr r11,XER; /* save XER in stackframe */ \
287 std r11,_XER(r1); \
288 li r9,(n)+1; \
289 std r9,_TRAP(r1); /* set trap number */ \
290 li r10,0; \
291 ld r11,exception_marker@toc(r2); \
292 std r10,RESULT(r1); /* clear regs->result */ \
293 std r11,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */
294
295/*
296 * Exception vectors.
297 */
298#define STD_EXCEPTION_PSERIES(n, label) \
299 . = n; \
300 .globl label##_pSeries; \
301label##_pSeries: \
302 HMT_MEDIUM; \
303 mtspr SPRG1,r13; /* save r13 */ \
304 RUNLATCH_ON(r13); \
305 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common)
306
307#define STD_EXCEPTION_ISERIES(n, label, area) \
308 .globl label##_iSeries; \
309label##_iSeries: \
310 HMT_MEDIUM; \
311 mtspr SPRG1,r13; /* save r13 */ \
312 RUNLATCH_ON(r13); \
313 EXCEPTION_PROLOG_ISERIES_1(area); \
314 EXCEPTION_PROLOG_ISERIES_2; \
315 b label##_common
316
317#define MASKABLE_EXCEPTION_ISERIES(n, label) \
318 .globl label##_iSeries; \
319label##_iSeries: \
320 HMT_MEDIUM; \
321 mtspr SPRG1,r13; /* save r13 */ \
322 RUNLATCH_ON(r13); \
323 EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN); \
324 lbz r10,PACAPROCENABLED(r13); \
325 cmpwi 0,r10,0; \
326 beq- label##_iSeries_masked; \
327 EXCEPTION_PROLOG_ISERIES_2; \
328 b label##_common; \
329
330#ifdef DO_SOFT_DISABLE
331#define DISABLE_INTS \
332 lbz r10,PACAPROCENABLED(r13); \
333 li r11,0; \
334 std r10,SOFTE(r1); \
335 mfmsr r10; \
336 stb r11,PACAPROCENABLED(r13); \
337 ori r10,r10,MSR_EE; \
338 mtmsrd r10,1
339
340#define ENABLE_INTS \
341 lbz r10,PACAPROCENABLED(r13); \
342 mfmsr r11; \
343 std r10,SOFTE(r1); \
344 ori r11,r11,MSR_EE; \
345 mtmsrd r11,1
346
347#else /* hard enable/disable interrupts */
348#define DISABLE_INTS
349
350#define ENABLE_INTS \
351 ld r12,_MSR(r1); \
352 mfmsr r11; \
353 rlwimi r11,r12,0,MSR_EE; \
354 mtmsrd r11,1
355
356#endif
357
358#define STD_EXCEPTION_COMMON(trap, label, hdlr) \
359 .align 7; \
360 .globl label##_common; \
361label##_common: \
362 EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \
363 DISABLE_INTS; \
364 bl .save_nvgprs; \
365 addi r3,r1,STACK_FRAME_OVERHEAD; \
366 bl hdlr; \
367 b .ret_from_except
368
369#define STD_EXCEPTION_COMMON_LITE(trap, label, hdlr) \
370 .align 7; \
371 .globl label##_common; \
372label##_common: \
373 EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \
374 DISABLE_INTS; \
375 addi r3,r1,STACK_FRAME_OVERHEAD; \
376 bl hdlr; \
377 b .ret_from_except_lite
378
379/*
380 * Start of pSeries system interrupt routines
381 */
382 . = 0x100
383 .globl __start_interrupts
384__start_interrupts:
385
386 STD_EXCEPTION_PSERIES(0x100, system_reset)
387
388 . = 0x200
389_machine_check_pSeries:
390 HMT_MEDIUM
391 mtspr SPRG1,r13 /* save r13 */
392 RUNLATCH_ON(r13)
393 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
394
395 . = 0x300
396 .globl data_access_pSeries
397data_access_pSeries:
398 HMT_MEDIUM
399 mtspr SPRG1,r13
400BEGIN_FTR_SECTION
401 mtspr SPRG2,r12
402 mfspr r13,DAR
403 mfspr r12,DSISR
404 srdi r13,r13,60
405 rlwimi r13,r12,16,0x20
406 mfcr r12
407 cmpwi r13,0x2c
408 beq .do_stab_bolted_pSeries
409 mtcrf 0x80,r12
410 mfspr r12,SPRG2
411END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
412 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common)
413
414 . = 0x380
415 .globl data_access_slb_pSeries
416data_access_slb_pSeries:
417 HMT_MEDIUM
418 mtspr SPRG1,r13
419 RUNLATCH_ON(r13)
420 mfspr r13,SPRG3 /* get paca address into r13 */
421 std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
422 std r10,PACA_EXSLB+EX_R10(r13)
423 std r11,PACA_EXSLB+EX_R11(r13)
424 std r12,PACA_EXSLB+EX_R12(r13)
425 std r3,PACA_EXSLB+EX_R3(r13)
426 mfspr r9,SPRG1
427 std r9,PACA_EXSLB+EX_R13(r13)
428 mfcr r9
429 mfspr r12,SRR1 /* and SRR1 */
430 mfspr r3,DAR
431 b .do_slb_miss /* Rel. branch works in real mode */
432
433 STD_EXCEPTION_PSERIES(0x400, instruction_access)
434
435 . = 0x480
436 .globl instruction_access_slb_pSeries
437instruction_access_slb_pSeries:
438 HMT_MEDIUM
439 mtspr SPRG1,r13
440 RUNLATCH_ON(r13)
441 mfspr r13,SPRG3 /* get paca address into r13 */
442 std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
443 std r10,PACA_EXSLB+EX_R10(r13)
444 std r11,PACA_EXSLB+EX_R11(r13)
445 std r12,PACA_EXSLB+EX_R12(r13)
446 std r3,PACA_EXSLB+EX_R3(r13)
447 mfspr r9,SPRG1
448 std r9,PACA_EXSLB+EX_R13(r13)
449 mfcr r9
450 mfspr r12,SRR1 /* and SRR1 */
451 mfspr r3,SRR0 /* SRR0 is faulting address */
452 b .do_slb_miss /* Rel. branch works in real mode */
453
454 STD_EXCEPTION_PSERIES(0x500, hardware_interrupt)
455 STD_EXCEPTION_PSERIES(0x600, alignment)
456 STD_EXCEPTION_PSERIES(0x700, program_check)
457 STD_EXCEPTION_PSERIES(0x800, fp_unavailable)
458 STD_EXCEPTION_PSERIES(0x900, decrementer)
459 STD_EXCEPTION_PSERIES(0xa00, trap_0a)
460 STD_EXCEPTION_PSERIES(0xb00, trap_0b)
461
462 . = 0xc00
463 .globl system_call_pSeries
464system_call_pSeries:
465 HMT_MEDIUM
466 RUNLATCH_ON(r9)
467 mr r9,r13
468 mfmsr r10
469 mfspr r13,SPRG3
470 mfspr r11,SRR0
471 clrrdi r12,r13,32
472 oris r12,r12,system_call_common@h
473 ori r12,r12,system_call_common@l
474 mtspr SRR0,r12
475 ori r10,r10,MSR_IR|MSR_DR|MSR_RI
476 mfspr r12,SRR1
477 mtspr SRR1,r10
478 rfid
479 b . /* prevent speculative execution */
480
481 STD_EXCEPTION_PSERIES(0xd00, single_step)
482 STD_EXCEPTION_PSERIES(0xe00, trap_0e)
483
484 /* We need to deal with the Altivec unavailable exception
485 * here which is at 0xf20, thus in the middle of the
486 * prolog code of the PerformanceMonitor one. A little
487 * trickery is thus necessary
488 */
489 . = 0xf00
490 b performance_monitor_pSeries
491
492 STD_EXCEPTION_PSERIES(0xf20, altivec_unavailable)
493
494 STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint)
495 STD_EXCEPTION_PSERIES(0x1700, altivec_assist)
496
497 . = 0x3000
498
499/*** pSeries interrupt support ***/
500
501 /* moved from 0xf00 */
502 STD_EXCEPTION_PSERIES(., performance_monitor)
503
504 .align 7
505_GLOBAL(do_stab_bolted_pSeries)
506 mtcrf 0x80,r12
507 mfspr r12,SPRG2
508 EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted)
509
510/*
511 * Vectors for the FWNMI option. Share common code.
512 */
513 .globl system_reset_fwnmi
514system_reset_fwnmi:
515 HMT_MEDIUM
516 mtspr SPRG1,r13 /* save r13 */
517 RUNLATCH_ON(r13)
518 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common)
519
520 .globl machine_check_fwnmi
521machine_check_fwnmi:
522 HMT_MEDIUM
523 mtspr SPRG1,r13 /* save r13 */
524 RUNLATCH_ON(r13)
525 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
526
527#ifdef CONFIG_PPC_ISERIES
528/*** ISeries-LPAR interrupt handlers ***/
529
530 STD_EXCEPTION_ISERIES(0x200, machine_check, PACA_EXMC)
531
532 .globl data_access_iSeries
533data_access_iSeries:
534 mtspr SPRG1,r13
535BEGIN_FTR_SECTION
536 mtspr SPRG2,r12
537 mfspr r13,DAR
538 mfspr r12,DSISR
539 srdi r13,r13,60
540 rlwimi r13,r12,16,0x20
541 mfcr r12
542 cmpwi r13,0x2c
543 beq .do_stab_bolted_iSeries
544 mtcrf 0x80,r12
545 mfspr r12,SPRG2
546END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
547 EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN)
548 EXCEPTION_PROLOG_ISERIES_2
549 b data_access_common
550
551.do_stab_bolted_iSeries:
552 mtcrf 0x80,r12
553 mfspr r12,SPRG2
554 EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
555 EXCEPTION_PROLOG_ISERIES_2
556 b .do_stab_bolted
557
558 .globl data_access_slb_iSeries
559data_access_slb_iSeries:
560 mtspr SPRG1,r13 /* save r13 */
561 EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
562 std r3,PACA_EXSLB+EX_R3(r13)
563 ld r12,PACALPPACA+LPPACASRR1(r13)
564 mfspr r3,DAR
565 b .do_slb_miss
566
567 STD_EXCEPTION_ISERIES(0x400, instruction_access, PACA_EXGEN)
568
569 .globl instruction_access_slb_iSeries
570instruction_access_slb_iSeries:
571 mtspr SPRG1,r13 /* save r13 */
572 EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
573 std r3,PACA_EXSLB+EX_R3(r13)
574 ld r12,PACALPPACA+LPPACASRR1(r13)
575 ld r3,PACALPPACA+LPPACASRR0(r13)
576 b .do_slb_miss
577
578 MASKABLE_EXCEPTION_ISERIES(0x500, hardware_interrupt)
579 STD_EXCEPTION_ISERIES(0x600, alignment, PACA_EXGEN)
580 STD_EXCEPTION_ISERIES(0x700, program_check, PACA_EXGEN)
581 STD_EXCEPTION_ISERIES(0x800, fp_unavailable, PACA_EXGEN)
582 MASKABLE_EXCEPTION_ISERIES(0x900, decrementer)
583 STD_EXCEPTION_ISERIES(0xa00, trap_0a, PACA_EXGEN)
584 STD_EXCEPTION_ISERIES(0xb00, trap_0b, PACA_EXGEN)
585
586 .globl system_call_iSeries
587system_call_iSeries:
588 mr r9,r13
589 mfspr r13,SPRG3
590 EXCEPTION_PROLOG_ISERIES_2
591 b system_call_common
592
593 STD_EXCEPTION_ISERIES( 0xd00, single_step, PACA_EXGEN)
594 STD_EXCEPTION_ISERIES( 0xe00, trap_0e, PACA_EXGEN)
595 STD_EXCEPTION_ISERIES( 0xf00, performance_monitor, PACA_EXGEN)
596
597 .globl system_reset_iSeries
598system_reset_iSeries:
599 mfspr r13,SPRG3 /* Get paca address */
600 mfmsr r24
601 ori r24,r24,MSR_RI
602 mtmsrd r24 /* RI on */
603 lhz r24,PACAPACAINDEX(r13) /* Get processor # */
604 cmpwi 0,r24,0 /* Are we processor 0? */
605 beq .__start_initialization_iSeries /* Start up the first processor */
606 mfspr r4,SPRN_CTRLF
607 li r5,CTRL_RUNLATCH /* Turn off the run light */
608 andc r4,r4,r5
609 mtspr SPRN_CTRLT,r4
610
6111:
612 HMT_LOW
613#ifdef CONFIG_SMP
614 lbz r23,PACAPROCSTART(r13) /* Test if this processor
615 * should start */
616 sync
617 LOADADDR(r3,current_set)
618 sldi r28,r24,3 /* get current_set[cpu#] */
619 ldx r3,r3,r28
620 addi r1,r3,THREAD_SIZE
621 subi r1,r1,STACK_FRAME_OVERHEAD
622
623 cmpwi 0,r23,0
624 beq iSeries_secondary_smp_loop /* Loop until told to go */
625 bne .__secondary_start /* Loop until told to go */
626iSeries_secondary_smp_loop:
627 /* Let the Hypervisor know we are alive */
628 /* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */
629 lis r3,0x8002
630 rldicr r3,r3,32,15 /* r0 = (r3 << 32) & 0xffff000000000000 */
631#else /* CONFIG_SMP */
632 /* Yield the processor. This is required for non-SMP kernels
633 which are running on multi-threaded machines. */
634 lis r3,0x8000
635 rldicr r3,r3,32,15 /* r3 = (r3 << 32) & 0xffff000000000000 */
636 addi r3,r3,18 /* r3 = 0x8000000000000012 which is "yield" */
637 li r4,0 /* "yield timed" */
638 li r5,-1 /* "yield forever" */
639#endif /* CONFIG_SMP */
640 li r0,-1 /* r0=-1 indicates a Hypervisor call */
641 sc /* Invoke the hypervisor via a system call */
642 mfspr r13,SPRG3 /* Put r13 back ???? */
643 b 1b /* If SMP not configured, secondaries
644 * loop forever */
645
646 .globl decrementer_iSeries_masked
647decrementer_iSeries_masked:
648 li r11,1
649 stb r11,PACALPPACA+LPPACADECRINT(r13)
650 lwz r12,PACADEFAULTDECR(r13)
651 mtspr SPRN_DEC,r12
652 /* fall through */
653
654 .globl hardware_interrupt_iSeries_masked
655hardware_interrupt_iSeries_masked:
656 mtcrf 0x80,r9 /* Restore regs */
657 ld r11,PACALPPACA+LPPACASRR0(r13)
658 ld r12,PACALPPACA+LPPACASRR1(r13)
659 mtspr SRR0,r11
660 mtspr SRR1,r12
661 ld r9,PACA_EXGEN+EX_R9(r13)
662 ld r10,PACA_EXGEN+EX_R10(r13)
663 ld r11,PACA_EXGEN+EX_R11(r13)
664 ld r12,PACA_EXGEN+EX_R12(r13)
665 ld r13,PACA_EXGEN+EX_R13(r13)
666 rfid
667 b . /* prevent speculative execution */
668#endif /* CONFIG_PPC_ISERIES */
669
670/*** Common interrupt handlers ***/
671
672 STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception)
673
674 /*
675 * Machine check is different because we use a different
676 * save area: PACA_EXMC instead of PACA_EXGEN.
677 */
678 .align 7
679 .globl machine_check_common
680machine_check_common:
681 EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
682 DISABLE_INTS
683 bl .save_nvgprs
684 addi r3,r1,STACK_FRAME_OVERHEAD
685 bl .machine_check_exception
686 b .ret_from_except
687
688 STD_EXCEPTION_COMMON_LITE(0x900, decrementer, .timer_interrupt)
689 STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception)
690 STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
691 STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
692 STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
693 STD_EXCEPTION_COMMON(0xf00, performance_monitor, .performance_monitor_exception)
694 STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception)
695#ifdef CONFIG_ALTIVEC
696 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception)
697#else
698 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception)
699#endif
700
701/*
702 * Here we have detected that the kernel stack pointer is bad.
703 * R9 contains the saved CR, r13 points to the paca,
704 * r10 contains the (bad) kernel stack pointer,
705 * r11 and r12 contain the saved SRR0 and SRR1.
706 * We switch to using an emergency stack, save the registers there,
707 * and call kernel_bad_stack(), which panics.
708 */
709bad_stack:
710 ld r1,PACAEMERGSP(r13)
711 subi r1,r1,64+INT_FRAME_SIZE
712 std r9,_CCR(r1)
713 std r10,GPR1(r1)
714 std r11,_NIP(r1)
715 std r12,_MSR(r1)
716 mfspr r11,DAR
717 mfspr r12,DSISR
718 std r11,_DAR(r1)
719 std r12,_DSISR(r1)
720 mflr r10
721 mfctr r11
722 mfxer r12
723 std r10,_LINK(r1)
724 std r11,_CTR(r1)
725 std r12,_XER(r1)
726 SAVE_GPR(0,r1)
727 SAVE_GPR(2,r1)
728 SAVE_4GPRS(3,r1)
729 SAVE_2GPRS(7,r1)
730 SAVE_10GPRS(12,r1)
731 SAVE_10GPRS(22,r1)
732 addi r11,r1,INT_FRAME_SIZE
733 std r11,0(r1)
734 li r12,0
735 std r12,0(r11)
736 ld r2,PACATOC(r13)
7371: addi r3,r1,STACK_FRAME_OVERHEAD
738 bl .kernel_bad_stack
739 b 1b
740
741/*
742 * Return from an exception with minimal checks.
743 * The caller is assumed to have done EXCEPTION_PROLOG_COMMON.
744 * If interrupts have been enabled, or anything has been
745 * done that might have changed the scheduling status of
746 * any task or sent any task a signal, you should use
747 * ret_from_except or ret_from_except_lite instead of this.
748 */
749fast_exception_return:
750 ld r12,_MSR(r1)
751 ld r11,_NIP(r1)
752 andi. r3,r12,MSR_RI /* check if RI is set */
753 beq- unrecov_fer
754 ld r3,_CCR(r1)
755 ld r4,_LINK(r1)
756 ld r5,_CTR(r1)
757 ld r6,_XER(r1)
758 mtcr r3
759 mtlr r4
760 mtctr r5
761 mtxer r6
762 REST_GPR(0, r1)
763 REST_8GPRS(2, r1)
764
765 mfmsr r10
766 clrrdi r10,r10,2 /* clear RI (LE is 0 already) */
767 mtmsrd r10,1
768
769 mtspr SRR1,r12
770 mtspr SRR0,r11
771 REST_4GPRS(10, r1)
772 ld r1,GPR1(r1)
773 rfid
774 b . /* prevent speculative execution */
775
776unrecov_fer:
777 bl .save_nvgprs
7781: addi r3,r1,STACK_FRAME_OVERHEAD
779 bl .unrecoverable_exception
780 b 1b
781
782/*
783 * Here r13 points to the paca, r9 contains the saved CR,
784 * SRR0 and SRR1 are saved in r11 and r12,
785 * r9 - r13 are saved in paca->exgen.
786 */
787 .align 7
788 .globl data_access_common
789data_access_common:
790 RUNLATCH_ON(r10) /* It wont fit in the 0x300 handler */
791 mfspr r10,DAR
792 std r10,PACA_EXGEN+EX_DAR(r13)
793 mfspr r10,DSISR
794 stw r10,PACA_EXGEN+EX_DSISR(r13)
795 EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
796 ld r3,PACA_EXGEN+EX_DAR(r13)
797 lwz r4,PACA_EXGEN+EX_DSISR(r13)
798 li r5,0x300
799 b .do_hash_page /* Try to handle as hpte fault */
800
801 .align 7
802 .globl instruction_access_common
803instruction_access_common:
804 EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN)
805 ld r3,_NIP(r1)
806 andis. r4,r12,0x5820
807 li r5,0x400
808 b .do_hash_page /* Try to handle as hpte fault */
809
810 .align 7
811 .globl hardware_interrupt_common
812 .globl hardware_interrupt_entry
813hardware_interrupt_common:
814 EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN)
815hardware_interrupt_entry:
816 DISABLE_INTS
817 addi r3,r1,STACK_FRAME_OVERHEAD
818 bl .do_IRQ
819 b .ret_from_except_lite
820
821 .align 7
822 .globl alignment_common
823alignment_common:
824 mfspr r10,DAR
825 std r10,PACA_EXGEN+EX_DAR(r13)
826 mfspr r10,DSISR
827 stw r10,PACA_EXGEN+EX_DSISR(r13)
828 EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
829 ld r3,PACA_EXGEN+EX_DAR(r13)
830 lwz r4,PACA_EXGEN+EX_DSISR(r13)
831 std r3,_DAR(r1)
832 std r4,_DSISR(r1)
833 bl .save_nvgprs
834 addi r3,r1,STACK_FRAME_OVERHEAD
835 ENABLE_INTS
836 bl .alignment_exception
837 b .ret_from_except
838
839 .align 7
840 .globl program_check_common
841program_check_common:
842 EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
843 bl .save_nvgprs
844 addi r3,r1,STACK_FRAME_OVERHEAD
845 ENABLE_INTS
846 bl .program_check_exception
847 b .ret_from_except
848
849 .align 7
850 .globl fp_unavailable_common
851fp_unavailable_common:
852 EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
853 bne .load_up_fpu /* if from user, just load it up */
854 bl .save_nvgprs
855 addi r3,r1,STACK_FRAME_OVERHEAD
856 ENABLE_INTS
857 bl .kernel_fp_unavailable_exception
858 BUG_OPCODE
859
860/*
861 * load_up_fpu(unused, unused, tsk)
862 * Disable FP for the task which had the FPU previously,
863 * and save its floating-point registers in its thread_struct.
864 * Enables the FPU for use in the kernel on return.
865 * On SMP we know the fpu is free, since we give it up every
866 * switch (ie, no lazy save of the FP registers).
867 * On entry: r13 == 'current' && last_task_used_math != 'current'
868 */
869_STATIC(load_up_fpu)
870 mfmsr r5 /* grab the current MSR */
871 ori r5,r5,MSR_FP
872 mtmsrd r5 /* enable use of fpu now */
873 isync
874/*
875 * For SMP, we don't do lazy FPU switching because it just gets too
876 * horrendously complex, especially when a task switches from one CPU
877 * to another. Instead we call giveup_fpu in switch_to.
878 *
879 */
880#ifndef CONFIG_SMP
881 ld r3,last_task_used_math@got(r2)
882 ld r4,0(r3)
883 cmpdi 0,r4,0
884 beq 1f
885 /* Save FP state to last_task_used_math's THREAD struct */
886 addi r4,r4,THREAD
887 SAVE_32FPRS(0, r4)
888 mffs fr0
889 stfd fr0,THREAD_FPSCR(r4)
890 /* Disable FP for last_task_used_math */
891 ld r5,PT_REGS(r4)
892 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
893 li r6,MSR_FP|MSR_FE0|MSR_FE1
894 andc r4,r4,r6
895 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
8961:
897#endif /* CONFIG_SMP */
898 /* enable use of FP after return */
899 ld r4,PACACURRENT(r13)
900 addi r5,r4,THREAD /* Get THREAD */
901 ld r4,THREAD_FPEXC_MODE(r5)
902 ori r12,r12,MSR_FP
903 or r12,r12,r4
904 std r12,_MSR(r1)
905 lfd fr0,THREAD_FPSCR(r5)
906 mtfsf 0xff,fr0
907 REST_32FPRS(0, r5)
908#ifndef CONFIG_SMP
909 /* Update last_task_used_math to 'current' */
910 subi r4,r5,THREAD /* Back to 'current' */
911 std r4,0(r3)
912#endif /* CONFIG_SMP */
913 /* restore registers and return */
914 b fast_exception_return
915
916 .align 7
917 .globl altivec_unavailable_common
918altivec_unavailable_common:
919 EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)
920#ifdef CONFIG_ALTIVEC
921BEGIN_FTR_SECTION
922 bne .load_up_altivec /* if from user, just load it up */
923END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
924#endif
925 bl .save_nvgprs
926 addi r3,r1,STACK_FRAME_OVERHEAD
927 ENABLE_INTS
928 bl .altivec_unavailable_exception
929 b .ret_from_except
930
931#ifdef CONFIG_ALTIVEC
932/*
933 * load_up_altivec(unused, unused, tsk)
934 * Disable VMX for the task which had it previously,
935 * and save its vector registers in its thread_struct.
936 * Enables the VMX for use in the kernel on return.
937 * On SMP we know the VMX is free, since we give it up every
938 * switch (ie, no lazy save of the vector registers).
939 * On entry: r13 == 'current' && last_task_used_altivec != 'current'
940 */
941_STATIC(load_up_altivec)
942 mfmsr r5 /* grab the current MSR */
943 oris r5,r5,MSR_VEC@h
944 mtmsrd r5 /* enable use of VMX now */
945 isync
946
947/*
948 * For SMP, we don't do lazy VMX switching because it just gets too
949 * horrendously complex, especially when a task switches from one CPU
950 * to another. Instead we call giveup_altvec in switch_to.
951 * VRSAVE isn't dealt with here, that is done in the normal context
952 * switch code. Note that we could rely on vrsave value to eventually
953 * avoid saving all of the VREGs here...
954 */
955#ifndef CONFIG_SMP
956 ld r3,last_task_used_altivec@got(r2)
957 ld r4,0(r3)
958 cmpdi 0,r4,0
959 beq 1f
960 /* Save VMX state to last_task_used_altivec's THREAD struct */
961 addi r4,r4,THREAD
962 SAVE_32VRS(0,r5,r4)
963 mfvscr vr0
964 li r10,THREAD_VSCR
965 stvx vr0,r10,r4
966 /* Disable VMX for last_task_used_altivec */
967 ld r5,PT_REGS(r4)
968 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
969 lis r6,MSR_VEC@h
970 andc r4,r4,r6
971 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
9721:
973#endif /* CONFIG_SMP */
974 /* Hack: if we get an altivec unavailable trap with VRSAVE
975 * set to all zeros, we assume this is a broken application
976 * that fails to set it properly, and thus we switch it to
977 * all 1's
978 */
979 mfspr r4,SPRN_VRSAVE
980 cmpdi 0,r4,0
981 bne+ 1f
982 li r4,-1
983 mtspr SPRN_VRSAVE,r4
9841:
985 /* enable use of VMX after return */
986 ld r4,PACACURRENT(r13)
987 addi r5,r4,THREAD /* Get THREAD */
988 oris r12,r12,MSR_VEC@h
989 std r12,_MSR(r1)
990 li r4,1
991 li r10,THREAD_VSCR
992 stw r4,THREAD_USED_VR(r5)
993 lvx vr0,r10,r5
994 mtvscr vr0
995 REST_32VRS(0,r4,r5)
996#ifndef CONFIG_SMP
997 /* Update last_task_used_math to 'current' */
998 subi r4,r5,THREAD /* Back to 'current' */
999 std r4,0(r3)
1000#endif /* CONFIG_SMP */
1001 /* restore registers and return */
1002 b fast_exception_return
1003#endif /* CONFIG_ALTIVEC */
1004
1005/*
1006 * Hash table stuff
1007 */
1008 .align 7
1009_GLOBAL(do_hash_page)
1010 std r3,_DAR(r1)
1011 std r4,_DSISR(r1)
1012
1013 andis. r0,r4,0xa450 /* weird error? */
1014 bne- .handle_page_fault /* if not, try to insert a HPTE */
1015BEGIN_FTR_SECTION
1016 andis. r0,r4,0x0020 /* Is it a segment table fault? */
1017 bne- .do_ste_alloc /* If so handle it */
1018END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
1019
1020 /*
1021 * We need to set the _PAGE_USER bit if MSR_PR is set or if we are
1022 * accessing a userspace segment (even from the kernel). We assume
1023 * kernel addresses always have the high bit set.
1024 */
1025 rlwinm r4,r4,32-25+9,31-9,31-9 /* DSISR_STORE -> _PAGE_RW */
1026 rotldi r0,r3,15 /* Move high bit into MSR_PR posn */
1027 orc r0,r12,r0 /* MSR_PR | ~high_bit */
1028 rlwimi r4,r0,32-13,30,30 /* becomes _PAGE_USER access bit */
1029 ori r4,r4,1 /* add _PAGE_PRESENT */
1030 rlwimi r4,r5,22+2,31-2,31-2 /* Set _PAGE_EXEC if trap is 0x400 */
1031
1032 /*
1033 * On iSeries, we soft-disable interrupts here, then
1034 * hard-enable interrupts so that the hash_page code can spin on
1035 * the hash_table_lock without problems on a shared processor.
1036 */
1037 DISABLE_INTS
1038
1039 /*
1040 * r3 contains the faulting address
1041 * r4 contains the required access permissions
1042 * r5 contains the trap number
1043 *
1044 * at return r3 = 0 for success
1045 */
1046 bl .hash_page /* build HPTE if possible */
1047 cmpdi r3,0 /* see if hash_page succeeded */
1048
1049#ifdef DO_SOFT_DISABLE
1050 /*
1051 * If we had interrupts soft-enabled at the point where the
1052 * DSI/ISI occurred, and an interrupt came in during hash_page,
1053 * handle it now.
1054 * We jump to ret_from_except_lite rather than fast_exception_return
1055 * because ret_from_except_lite will check for and handle pending
1056 * interrupts if necessary.
1057 */
1058 beq .ret_from_except_lite
1059 /* For a hash failure, we don't bother re-enabling interrupts */
1060 ble- 12f
1061
1062 /*
1063 * hash_page couldn't handle it, set soft interrupt enable back
1064 * to what it was before the trap. Note that .local_irq_restore
1065 * handles any interrupts pending at this point.
1066 */
1067 ld r3,SOFTE(r1)
1068 bl .local_irq_restore
1069 b 11f
1070#else
1071 beq fast_exception_return /* Return from exception on success */
1072 ble- 12f /* Failure return from hash_page */
1073
1074 /* fall through */
1075#endif
1076
1077/* Here we have a page fault that hash_page can't handle. */
1078_GLOBAL(handle_page_fault)
1079 ENABLE_INTS
108011: ld r4,_DAR(r1)
1081 ld r5,_DSISR(r1)
1082 addi r3,r1,STACK_FRAME_OVERHEAD
1083 bl .do_page_fault
1084 cmpdi r3,0
1085 beq+ .ret_from_except_lite
1086 bl .save_nvgprs
1087 mr r5,r3
1088 addi r3,r1,STACK_FRAME_OVERHEAD
1089 lwz r4,_DAR(r1)
1090 bl .bad_page_fault
1091 b .ret_from_except
1092
1093/* We have a page fault that hash_page could handle but HV refused
1094 * the PTE insertion
1095 */
109612: bl .save_nvgprs
1097 addi r3,r1,STACK_FRAME_OVERHEAD
1098 lwz r4,_DAR(r1)
1099 bl .low_hash_fault
1100 b .ret_from_except
1101
1102 /* here we have a segment miss */
1103_GLOBAL(do_ste_alloc)
1104 bl .ste_allocate /* try to insert stab entry */
1105 cmpdi r3,0
1106 beq+ fast_exception_return
1107 b .handle_page_fault
1108
1109/*
1110 * r13 points to the PACA, r9 contains the saved CR,
1111 * r11 and r12 contain the saved SRR0 and SRR1.
1112 * r9 - r13 are saved in paca->exslb.
1113 * We assume we aren't going to take any exceptions during this procedure.
1114 * We assume (DAR >> 60) == 0xc.
1115 */
1116 .align 7
1117_GLOBAL(do_stab_bolted)
1118 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
1119 std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */
1120
1121 /* Hash to the primary group */
1122 ld r10,PACASTABVIRT(r13)
1123 mfspr r11,DAR
1124 srdi r11,r11,28
1125 rldimi r10,r11,7,52 /* r10 = first ste of the group */
1126
1127 /* Calculate VSID */
1128 /* This is a kernel address, so protovsid = ESID */
1129 ASM_VSID_SCRAMBLE(r11, r9)
1130 rldic r9,r11,12,16 /* r9 = vsid << 12 */
1131
1132 /* Search the primary group for a free entry */
11331: ld r11,0(r10) /* Test valid bit of the current ste */
1134 andi. r11,r11,0x80
1135 beq 2f
1136 addi r10,r10,16
1137 andi. r11,r10,0x70
1138 bne 1b
1139
1140 /* Stick for only searching the primary group for now. */
1141 /* At least for now, we use a very simple random castout scheme */
1142 /* Use the TB as a random number ; OR in 1 to avoid entry 0 */
1143 mftb r11
1144 rldic r11,r11,4,57 /* r11 = (r11 << 4) & 0x70 */
1145 ori r11,r11,0x10
1146
1147 /* r10 currently points to an ste one past the group of interest */
1148 /* make it point to the randomly selected entry */
1149 subi r10,r10,128
1150 or r10,r10,r11 /* r10 is the entry to invalidate */
1151
1152 isync /* mark the entry invalid */
1153 ld r11,0(r10)
1154 rldicl r11,r11,56,1 /* clear the valid bit */
1155 rotldi r11,r11,8
1156 std r11,0(r10)
1157 sync
1158
1159 clrrdi r11,r11,28 /* Get the esid part of the ste */
1160 slbie r11
1161
11622: std r9,8(r10) /* Store the vsid part of the ste */
1163 eieio
1164
1165 mfspr r11,DAR /* Get the new esid */
1166 clrrdi r11,r11,28 /* Permits a full 32b of ESID */
1167 ori r11,r11,0x90 /* Turn on valid and kp */
1168 std r11,0(r10) /* Put new entry back into the stab */
1169
1170 sync
1171
1172 /* All done -- return from exception. */
1173 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
1174 ld r11,PACA_EXSLB+EX_SRR0(r13) /* get saved SRR0 */
1175
1176 andi. r10,r12,MSR_RI
1177 beq- unrecov_slb
1178
1179 mtcrf 0x80,r9 /* restore CR */
1180
1181 mfmsr r10
1182 clrrdi r10,r10,2
1183 mtmsrd r10,1
1184
1185 mtspr SRR0,r11
1186 mtspr SRR1,r12
1187 ld r9,PACA_EXSLB+EX_R9(r13)
1188 ld r10,PACA_EXSLB+EX_R10(r13)
1189 ld r11,PACA_EXSLB+EX_R11(r13)
1190 ld r12,PACA_EXSLB+EX_R12(r13)
1191 ld r13,PACA_EXSLB+EX_R13(r13)
1192 rfid
1193 b . /* prevent speculative execution */
1194
1195/*
1196 * r13 points to the PACA, r9 contains the saved CR,
1197 * r11 and r12 contain the saved SRR0 and SRR1.
1198 * r3 has the faulting address
1199 * r9 - r13 are saved in paca->exslb.
1200 * r3 is saved in paca->slb_r3
1201 * We assume we aren't going to take any exceptions during this procedure.
1202 */
1203_GLOBAL(do_slb_miss)
1204 mflr r10
1205
1206 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
1207 std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
1208
1209 bl .slb_allocate /* handle it */
1210
1211 /* All done -- return from exception. */
1212
1213 ld r10,PACA_EXSLB+EX_LR(r13)
1214 ld r3,PACA_EXSLB+EX_R3(r13)
1215 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
1216#ifdef CONFIG_PPC_ISERIES
1217 ld r11,PACALPPACA+LPPACASRR0(r13) /* get SRR0 value */
1218#endif /* CONFIG_PPC_ISERIES */
1219
1220 mtlr r10
1221
1222 andi. r10,r12,MSR_RI /* check for unrecoverable exception */
1223 beq- unrecov_slb
1224
1225.machine push
1226.machine "power4"
1227 mtcrf 0x80,r9
1228 mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
1229.machine pop
1230
1231#ifdef CONFIG_PPC_ISERIES
1232 mtspr SRR0,r11
1233 mtspr SRR1,r12
1234#endif /* CONFIG_PPC_ISERIES */
1235 ld r9,PACA_EXSLB+EX_R9(r13)
1236 ld r10,PACA_EXSLB+EX_R10(r13)
1237 ld r11,PACA_EXSLB+EX_R11(r13)
1238 ld r12,PACA_EXSLB+EX_R12(r13)
1239 ld r13,PACA_EXSLB+EX_R13(r13)
1240 rfid
1241 b . /* prevent speculative execution */
1242
1243unrecov_slb:
1244 EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
1245 DISABLE_INTS
1246 bl .save_nvgprs
12471: addi r3,r1,STACK_FRAME_OVERHEAD
1248 bl .unrecoverable_exception
1249 b 1b
1250
1251/*
1252 * Space for CPU0's segment table.
1253 *
1254 * On iSeries, the hypervisor must fill in at least one entry before
1255 * we get control (with relocate on). The address is give to the hv
1256 * as a page number (see xLparMap in LparData.c), so this must be at a
1257 * fixed address (the linker can't compute (u64)&initial_stab >>
1258 * PAGE_SHIFT).
1259 */
1260 . = STAB0_PHYS_ADDR /* 0x6000 */
1261 .globl initial_stab
1262initial_stab:
1263 .space 4096
1264
1265/*
1266 * Data area reserved for FWNMI option.
1267 * This address (0x7000) is fixed by the RPA.
1268 */
1269 .= 0x7000
1270 .globl fwnmi_data_area
1271fwnmi_data_area:
1272
1273 /* iSeries does not use the FWNMI stuff, so it is safe to put
1274 * this here, even if we later allow kernels that will boot on
1275 * both pSeries and iSeries */
1276#ifdef CONFIG_PPC_ISERIES
1277 . = LPARMAP_PHYS
1278#include "lparmap.s"
1279/*
1280 * This ".text" is here for old compilers that generate a trailing
1281 * .note section when compiling .c files to .s
1282 */
1283 .text
1284#endif /* CONFIG_PPC_ISERIES */
1285
1286 . = 0x8000
1287
1288/*
1289 * On pSeries, secondary processors spin in the following code.
1290 * At entry, r3 = this processor's number (physical cpu id)
1291 */
1292_GLOBAL(pSeries_secondary_smp_init)
1293 mr r24,r3
1294
1295 /* turn on 64-bit mode */
1296 bl .enable_64b_mode
1297 isync
1298
1299 /* Copy some CPU settings from CPU 0 */
1300 bl .__restore_cpu_setup
1301
1302 /* Set up a paca value for this processor. Since we have the
1303 * physical cpu id in r24, we need to search the pacas to find
1304 * which logical id maps to our physical one.
1305 */
1306 LOADADDR(r13, paca) /* Get base vaddr of paca array */
1307 li r5,0 /* logical cpu id */
13081: lhz r6,PACAHWCPUID(r13) /* Load HW procid from paca */
1309 cmpw r6,r24 /* Compare to our id */
1310 beq 2f
1311 addi r13,r13,PACA_SIZE /* Loop to next PACA on miss */
1312 addi r5,r5,1
1313 cmpwi r5,NR_CPUS
1314 blt 1b
1315
1316 mr r3,r24 /* not found, copy phys to r3 */
1317 b .kexec_wait /* next kernel might do better */
1318
13192: mtspr SPRG3,r13 /* Save vaddr of paca in SPRG3 */
1320 /* From now on, r24 is expected to be logical cpuid */
1321 mr r24,r5
13223: HMT_LOW
1323 lbz r23,PACAPROCSTART(r13) /* Test if this processor should */
1324 /* start. */
1325 sync
1326
1327 /* Create a temp kernel stack for use before relocation is on. */
1328 ld r1,PACAEMERGSP(r13)
1329 subi r1,r1,STACK_FRAME_OVERHEAD
1330
1331 cmpwi 0,r23,0
1332#ifdef CONFIG_SMP
1333 bne .__secondary_start
1334#endif
1335 b 3b /* Loop until told to go */
1336
1337#ifdef CONFIG_PPC_ISERIES
1338_STATIC(__start_initialization_iSeries)
1339 /* Clear out the BSS */
1340 LOADADDR(r11,__bss_stop)
1341 LOADADDR(r8,__bss_start)
1342 sub r11,r11,r8 /* bss size */
1343 addi r11,r11,7 /* round up to an even double word */
1344 rldicl. r11,r11,61,3 /* shift right by 3 */
1345 beq 4f
1346 addi r8,r8,-8
1347 li r0,0
1348 mtctr r11 /* zero this many doublewords */
13493: stdu r0,8(r8)
1350 bdnz 3b
13514:
1352 LOADADDR(r1,init_thread_union)
1353 addi r1,r1,THREAD_SIZE
1354 li r0,0
1355 stdu r0,-STACK_FRAME_OVERHEAD(r1)
1356
1357 LOADADDR(r3,cpu_specs)
1358 LOADADDR(r4,cur_cpu_spec)
1359 li r5,0
1360 bl .identify_cpu
1361
1362 LOADADDR(r2,__toc_start)
1363 addi r2,r2,0x4000
1364 addi r2,r2,0x4000
1365
1366 bl .iSeries_early_setup
1367
1368 /* relocation is on at this point */
1369
1370 b .start_here_common
1371#endif /* CONFIG_PPC_ISERIES */
1372
1373#ifdef CONFIG_PPC_MULTIPLATFORM
1374
1375_STATIC(__mmu_off)
1376 mfmsr r3
1377 andi. r0,r3,MSR_IR|MSR_DR
1378 beqlr
1379 andc r3,r3,r0
1380 mtspr SPRN_SRR0,r4
1381 mtspr SPRN_SRR1,r3
1382 sync
1383 rfid
1384 b . /* prevent speculative execution */
1385
1386
1387/*
1388 * Here is our main kernel entry point. We support currently 2 kind of entries
1389 * depending on the value of r5.
1390 *
1391 * r5 != NULL -> OF entry, we go to prom_init, "legacy" parameter content
1392 * in r3...r7
1393 *
1394 * r5 == NULL -> kexec style entry. r3 is a physical pointer to the
1395 * DT block, r4 is a physical pointer to the kernel itself
1396 *
1397 */
1398_GLOBAL(__start_initialization_multiplatform)
1399 /*
1400 * Are we booted from a PROM Of-type client-interface ?
1401 */
1402 cmpldi cr0,r5,0
1403 bne .__boot_from_prom /* yes -> prom */
1404
1405 /* Save parameters */
1406 mr r31,r3
1407 mr r30,r4
1408
1409 /* Make sure we are running in 64 bits mode */
1410 bl .enable_64b_mode
1411
1412 /* Setup some critical 970 SPRs before switching MMU off */
1413 bl .__970_cpu_preinit
1414
1415 /* cpu # */
1416 li r24,0
1417
1418 /* Switch off MMU if not already */
1419 LOADADDR(r4, .__after_prom_start - KERNELBASE)
1420 add r4,r4,r30
1421 bl .__mmu_off
1422 b .__after_prom_start
1423
1424_STATIC(__boot_from_prom)
1425 /* Save parameters */
1426 mr r31,r3
1427 mr r30,r4
1428 mr r29,r5
1429 mr r28,r6
1430 mr r27,r7
1431
1432 /* Make sure we are running in 64 bits mode */
1433 bl .enable_64b_mode
1434
1435 /* put a relocation offset into r3 */
1436 bl .reloc_offset
1437
1438 LOADADDR(r2,__toc_start)
1439 addi r2,r2,0x4000
1440 addi r2,r2,0x4000
1441
1442 /* Relocate the TOC from a virt addr to a real addr */
1443 sub r2,r2,r3
1444
1445 /* Restore parameters */
1446 mr r3,r31
1447 mr r4,r30
1448 mr r5,r29
1449 mr r6,r28
1450 mr r7,r27
1451
1452 /* Do all of the interaction with OF client interface */
1453 bl .prom_init
1454 /* We never return */
1455 trap
1456
1457/*
1458 * At this point, r3 contains the physical address we are running at,
1459 * returned by prom_init()
1460 */
1461_STATIC(__after_prom_start)
1462
1463/*
1464 * We need to run with __start at physical address 0.
1465 * This will leave some code in the first 256B of
1466 * real memory, which are reserved for software use.
1467 * The remainder of the first page is loaded with the fixed
1468 * interrupt vectors. The next two pages are filled with
1469 * unknown exception placeholders.
1470 *
1471 * Note: This process overwrites the OF exception vectors.
1472 * r26 == relocation offset
1473 * r27 == KERNELBASE
1474 */
1475 bl .reloc_offset
1476 mr r26,r3
1477 SET_REG_TO_CONST(r27,KERNELBASE)
1478
1479 li r3,0 /* target addr */
1480
1481 // XXX FIXME: Use phys returned by OF (r30)
1482 sub r4,r27,r26 /* source addr */
1483 /* current address of _start */
1484 /* i.e. where we are running */
1485 /* the source addr */
1486
1487 LOADADDR(r5,copy_to_here) /* # bytes of memory to copy */
1488 sub r5,r5,r27
1489
1490 li r6,0x100 /* Start offset, the first 0x100 */
1491 /* bytes were copied earlier. */
1492
1493 bl .copy_and_flush /* copy the first n bytes */
1494 /* this includes the code being */
1495 /* executed here. */
1496
1497 LOADADDR(r0, 4f) /* Jump to the copy of this code */
1498 mtctr r0 /* that we just made/relocated */
1499 bctr
1500
15014: LOADADDR(r5,klimit)
1502 sub r5,r5,r26
1503 ld r5,0(r5) /* get the value of klimit */
1504 sub r5,r5,r27
1505 bl .copy_and_flush /* copy the rest */
1506 b .start_here_multiplatform
1507
1508#endif /* CONFIG_PPC_MULTIPLATFORM */
1509
1510/*
1511 * Copy routine used to copy the kernel to start at physical address 0
1512 * and flush and invalidate the caches as needed.
1513 * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
1514 * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
1515 *
1516 * Note: this routine *only* clobbers r0, r6 and lr
1517 */
1518_GLOBAL(copy_and_flush)
1519 addi r5,r5,-8
1520 addi r6,r6,-8
15214: li r0,16 /* Use the least common */
1522 /* denominator cache line */
1523 /* size. This results in */
1524 /* extra cache line flushes */
1525 /* but operation is correct. */
1526 /* Can't get cache line size */
1527 /* from NACA as it is being */
1528 /* moved too. */
1529
1530 mtctr r0 /* put # words/line in ctr */
15313: addi r6,r6,8 /* copy a cache line */
1532 ldx r0,r6,r4
1533 stdx r0,r6,r3
1534 bdnz 3b
1535 dcbst r6,r3 /* write it to memory */
1536 sync
1537 icbi r6,r3 /* flush the icache line */
1538 cmpld 0,r6,r5
1539 blt 4b
1540 sync
1541 addi r5,r5,8
1542 addi r6,r6,8
1543 blr
1544
1545.align 8
1546copy_to_here:
1547
1548#ifdef CONFIG_SMP
1549#ifdef CONFIG_PPC_PMAC
1550/*
1551 * On PowerMac, secondary processors starts from the reset vector, which
1552 * is temporarily turned into a call to one of the functions below.
1553 */
1554 .section ".text";
1555 .align 2 ;
1556
1557 .globl pmac_secondary_start_1
1558pmac_secondary_start_1:
1559 li r24, 1
1560 b .pmac_secondary_start
1561
1562 .globl pmac_secondary_start_2
1563pmac_secondary_start_2:
1564 li r24, 2
1565 b .pmac_secondary_start
1566
1567 .globl pmac_secondary_start_3
1568pmac_secondary_start_3:
1569 li r24, 3
1570 b .pmac_secondary_start
1571
1572_GLOBAL(pmac_secondary_start)
1573 /* turn on 64-bit mode */
1574 bl .enable_64b_mode
1575 isync
1576
1577 /* Copy some CPU settings from CPU 0 */
1578 bl .__restore_cpu_setup
1579
1580 /* pSeries do that early though I don't think we really need it */
1581 mfmsr r3
1582 ori r3,r3,MSR_RI
1583 mtmsrd r3 /* RI on */
1584
1585 /* Set up a paca value for this processor. */
1586 LOADADDR(r4, paca) /* Get base vaddr of paca array */
1587 mulli r13,r24,PACA_SIZE /* Calculate vaddr of right paca */
1588 add r13,r13,r4 /* for this processor. */
1589 mtspr SPRG3,r13 /* Save vaddr of paca in SPRG3 */
1590
1591 /* Create a temp kernel stack for use before relocation is on. */
1592 ld r1,PACAEMERGSP(r13)
1593 subi r1,r1,STACK_FRAME_OVERHEAD
1594
1595 b .__secondary_start
1596
1597#endif /* CONFIG_PPC_PMAC */
1598
1599/*
1600 * This function is called after the master CPU has released the
1601 * secondary processors. The execution environment is relocation off.
1602 * The paca for this processor has the following fields initialized at
1603 * this point:
1604 * 1. Processor number
1605 * 2. Segment table pointer (virtual address)
1606 * On entry the following are set:
1607 * r1 = stack pointer. vaddr for iSeries, raddr (temp stack) for pSeries
1608 * r24 = cpu# (in Linux terms)
1609 * r13 = paca virtual address
1610 * SPRG3 = paca virtual address
1611 */
1612_GLOBAL(__secondary_start)
1613
1614 HMT_MEDIUM /* Set thread priority to MEDIUM */
1615
1616 ld r2,PACATOC(r13)
1617 li r6,0
1618 stb r6,PACAPROCENABLED(r13)
1619
1620#ifndef CONFIG_PPC_ISERIES
1621 /* Initialize the page table pointer register. */
1622 LOADADDR(r6,_SDR1)
1623 ld r6,0(r6) /* get the value of _SDR1 */
1624 mtspr SDR1,r6 /* set the htab location */
1625#endif
1626 /* Initialize the first segment table (or SLB) entry */
1627 ld r3,PACASTABVIRT(r13) /* get addr of segment table */
1628 bl .stab_initialize
1629
1630 /* Initialize the kernel stack. Just a repeat for iSeries. */
1631 LOADADDR(r3,current_set)
1632 sldi r28,r24,3 /* get current_set[cpu#] */
1633 ldx r1,r3,r28
1634 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
1635 std r1,PACAKSAVE(r13)
1636
1637 ld r3,PACASTABREAL(r13) /* get raddr of segment table */
1638 ori r4,r3,1 /* turn on valid bit */
1639
1640#ifdef CONFIG_PPC_ISERIES
1641 li r0,-1 /* hypervisor call */
1642 li r3,1
1643 sldi r3,r3,63 /* 0x8000000000000000 */
1644 ori r3,r3,4 /* 0x8000000000000004 */
1645 sc /* HvCall_setASR */
1646#else
1647 /* set the ASR */
1648 ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */
1649 ld r3,0(r3)
1650 lwz r3,PLATFORM(r3) /* r3 = platform flags */
1651 andi. r3,r3,PLATFORM_LPAR /* Test if bit 0 is set (LPAR bit) */
1652 beq 98f /* branch if result is 0 */
1653 mfspr r3,PVR
1654 srwi r3,r3,16
1655 cmpwi r3,0x37 /* SStar */
1656 beq 97f
1657 cmpwi r3,0x36 /* IStar */
1658 beq 97f
1659 cmpwi r3,0x34 /* Pulsar */
1660 bne 98f
166197: li r3,H_SET_ASR /* hcall = H_SET_ASR */
1662 HVSC /* Invoking hcall */
1663 b 99f
166498: /* !(rpa hypervisor) || !(star) */
1665 mtasr r4 /* set the stab location */
166699:
1667#endif
1668 li r7,0
1669 mtlr r7
1670
1671 /* enable MMU and jump to start_secondary */
1672 LOADADDR(r3,.start_secondary_prolog)
1673 SET_REG_TO_CONST(r4, MSR_KERNEL)
1674#ifdef DO_SOFT_DISABLE
1675 ori r4,r4,MSR_EE
1676#endif
1677 mtspr SRR0,r3
1678 mtspr SRR1,r4
1679 rfid
1680 b . /* prevent speculative execution */
1681
1682/*
1683 * Running with relocation on at this point. All we want to do is
1684 * zero the stack back-chain pointer before going into C code.
1685 */
1686_GLOBAL(start_secondary_prolog)
1687 li r3,0
1688 std r3,0(r1) /* Zero the stack frame pointer */
1689 bl .start_secondary
1690#endif
1691
1692/*
1693 * This subroutine clobbers r11 and r12
1694 */
1695_GLOBAL(enable_64b_mode)
1696 mfmsr r11 /* grab the current MSR */
1697 li r12,1
1698 rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
1699 or r11,r11,r12
1700 li r12,1
1701 rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
1702 or r11,r11,r12
1703 mtmsrd r11
1704 isync
1705 blr
1706
1707#ifdef CONFIG_PPC_MULTIPLATFORM
1708/*
1709 * This is where the main kernel code starts.
1710 */
1711_STATIC(start_here_multiplatform)
1712 /* get a new offset, now that the kernel has moved. */
1713 bl .reloc_offset
1714 mr r26,r3
1715
1716 /* Clear out the BSS. It may have been done in prom_init,
1717 * already but that's irrelevant since prom_init will soon
1718 * be detached from the kernel completely. Besides, we need
1719 * to clear it now for kexec-style entry.
1720 */
1721 LOADADDR(r11,__bss_stop)
1722 LOADADDR(r8,__bss_start)
1723 sub r11,r11,r8 /* bss size */
1724 addi r11,r11,7 /* round up to an even double word */
1725 rldicl. r11,r11,61,3 /* shift right by 3 */
1726 beq 4f
1727 addi r8,r8,-8
1728 li r0,0
1729 mtctr r11 /* zero this many doublewords */
17303: stdu r0,8(r8)
1731 bdnz 3b
17324:
1733
1734 mfmsr r6
1735 ori r6,r6,MSR_RI
1736 mtmsrd r6 /* RI on */
1737
1738#ifdef CONFIG_HMT
1739 /* Start up the second thread on cpu 0 */
1740 mfspr r3,PVR
1741 srwi r3,r3,16
1742 cmpwi r3,0x34 /* Pulsar */
1743 beq 90f
1744 cmpwi r3,0x36 /* Icestar */
1745 beq 90f
1746 cmpwi r3,0x37 /* SStar */
1747 beq 90f
1748 b 91f /* HMT not supported */
174990: li r3,0
1750 bl .hmt_start_secondary
175191:
1752#endif
1753
1754 /* The following gets the stack and TOC set up with the regs */
1755 /* pointing to the real addr of the kernel stack. This is */
1756 /* all done to support the C function call below which sets */
1757 /* up the htab. This is done because we have relocated the */
1758 /* kernel but are still running in real mode. */
1759
1760 LOADADDR(r3,init_thread_union)
1761 sub r3,r3,r26
1762
1763 /* set up a stack pointer (physical address) */
1764 addi r1,r3,THREAD_SIZE
1765 li r0,0
1766 stdu r0,-STACK_FRAME_OVERHEAD(r1)
1767
1768 /* set up the TOC (physical address) */
1769 LOADADDR(r2,__toc_start)
1770 addi r2,r2,0x4000
1771 addi r2,r2,0x4000
1772 sub r2,r2,r26
1773
1774 LOADADDR(r3,cpu_specs)
1775 sub r3,r3,r26
1776 LOADADDR(r4,cur_cpu_spec)
1777 sub r4,r4,r26
1778 mr r5,r26
1779 bl .identify_cpu
1780
1781 /* Save some low level config HIDs of CPU0 to be copied to
1782 * other CPUs later on, or used for suspend/resume
1783 */
1784 bl .__save_cpu_setup
1785 sync
1786
1787 /* Setup a valid physical PACA pointer in SPRG3 for early_setup
1788 * note that boot_cpuid can always be 0 nowadays since there is
1789 * nowhere it can be initialized differently before we reach this
1790 * code
1791 */
1792 LOADADDR(r27, boot_cpuid)
1793 sub r27,r27,r26
1794 lwz r27,0(r27)
1795
1796 LOADADDR(r24, paca) /* Get base vaddr of paca array */
1797 mulli r13,r27,PACA_SIZE /* Calculate vaddr of right paca */
1798 add r13,r13,r24 /* for this processor. */
1799 sub r13,r13,r26 /* convert to physical addr */
1800 mtspr SPRG3,r13 /* PPPBBB: Temp... -Peter */
1801
1802 /* Do very early kernel initializations, including initial hash table,
1803 * stab and slb setup before we turn on relocation. */
1804
1805 /* Restore parameters passed from prom_init/kexec */
1806 mr r3,r31
1807 bl .early_setup
1808
1809 /* set the ASR */
1810 ld r3,PACASTABREAL(r13)
1811 ori r4,r3,1 /* turn on valid bit */
1812 ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */
1813 ld r3,0(r3)
1814 lwz r3,PLATFORM(r3) /* r3 = platform flags */
1815 andi. r3,r3,PLATFORM_LPAR /* Test if bit 0 is set (LPAR bit) */
1816 beq 98f /* branch if result is 0 */
1817 mfspr r3,PVR
1818 srwi r3,r3,16
1819 cmpwi r3,0x37 /* SStar */
1820 beq 97f
1821 cmpwi r3,0x36 /* IStar */
1822 beq 97f
1823 cmpwi r3,0x34 /* Pulsar */
1824 bne 98f
182597: li r3,H_SET_ASR /* hcall = H_SET_ASR */
1826 HVSC /* Invoking hcall */
1827 b 99f
182898: /* !(rpa hypervisor) || !(star) */
1829 mtasr r4 /* set the stab location */
183099:
1831 /* Set SDR1 (hash table pointer) */
1832 ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */
1833 ld r3,0(r3)
1834 lwz r3,PLATFORM(r3) /* r3 = platform flags */
1835 /* Test if bit 0 is set (LPAR bit) */
1836 andi. r3,r3,PLATFORM_LPAR
1837 bne 98f /* branch if result is !0 */
1838 LOADADDR(r6,_SDR1) /* Only if NOT LPAR */
1839 sub r6,r6,r26
1840 ld r6,0(r6) /* get the value of _SDR1 */
1841 mtspr SDR1,r6 /* set the htab location */
184298:
1843 LOADADDR(r3,.start_here_common)
1844 SET_REG_TO_CONST(r4, MSR_KERNEL)
1845 mtspr SRR0,r3
1846 mtspr SRR1,r4
1847 rfid
1848 b . /* prevent speculative execution */
1849#endif /* CONFIG_PPC_MULTIPLATFORM */
1850
1851 /* This is where all platforms converge execution */
1852_STATIC(start_here_common)
1853 /* relocation is on at this point */
1854
1855 /* The following code sets up the SP and TOC now that we are */
1856 /* running with translation enabled. */
1857
1858 LOADADDR(r3,init_thread_union)
1859
1860 /* set up the stack */
1861 addi r1,r3,THREAD_SIZE
1862 li r0,0
1863 stdu r0,-STACK_FRAME_OVERHEAD(r1)
1864
1865 /* Apply the CPUs-specific fixups (nop out sections not relevant
1866 * to this CPU
1867 */
1868 li r3,0
1869 bl .do_cpu_ftr_fixups
1870
1871 LOADADDR(r26, boot_cpuid)
1872 lwz r26,0(r26)
1873
1874 LOADADDR(r24, paca) /* Get base vaddr of paca array */
1875 mulli r13,r26,PACA_SIZE /* Calculate vaddr of right paca */
1876 add r13,r13,r24 /* for this processor. */
1877 mtspr SPRG3,r13
1878
1879 /* ptr to current */
1880 LOADADDR(r4,init_task)
1881 std r4,PACACURRENT(r13)
1882
1883 /* Load the TOC */
1884 ld r2,PACATOC(r13)
1885 std r1,PACAKSAVE(r13)
1886
1887 bl .setup_system
1888
1889 /* Load up the kernel context */
18905:
1891#ifdef DO_SOFT_DISABLE
1892 li r5,0
1893 stb r5,PACAPROCENABLED(r13) /* Soft Disabled */
1894 mfmsr r5
1895 ori r5,r5,MSR_EE /* Hard Enabled */
1896 mtmsrd r5
1897#endif
1898
1899 bl .start_kernel
1900
1901_GLOBAL(hmt_init)
1902#ifdef CONFIG_HMT
1903 LOADADDR(r5, hmt_thread_data)
1904 mfspr r7,PVR
1905 srwi r7,r7,16
1906 cmpwi r7,0x34 /* Pulsar */
1907 beq 90f
1908 cmpwi r7,0x36 /* Icestar */
1909 beq 91f
1910 cmpwi r7,0x37 /* SStar */
1911 beq 91f
1912 b 101f
191390: mfspr r6,PIR
1914 andi. r6,r6,0x1f
1915 b 92f
191691: mfspr r6,PIR
1917 andi. r6,r6,0x3ff
191892: sldi r4,r24,3
1919 stwx r6,r5,r4
1920 bl .hmt_start_secondary
1921 b 101f
1922
1923__hmt_secondary_hold:
1924 LOADADDR(r5, hmt_thread_data)
1925 clrldi r5,r5,4
1926 li r7,0
1927 mfspr r6,PIR
1928 mfspr r8,PVR
1929 srwi r8,r8,16
1930 cmpwi r8,0x34
1931 bne 93f
1932 andi. r6,r6,0x1f
1933 b 103f
193493: andi. r6,r6,0x3f
1935
1936103: lwzx r8,r5,r7
1937 cmpw r8,r6
1938 beq 104f
1939 addi r7,r7,8
1940 b 103b
1941
1942104: addi r7,r7,4
1943 lwzx r9,r5,r7
1944 mr r24,r9
1945101:
1946#endif
1947 mr r3,r24
1948 b .pSeries_secondary_smp_init
1949
1950#ifdef CONFIG_HMT
1951_GLOBAL(hmt_start_secondary)
1952 LOADADDR(r4,__hmt_secondary_hold)
1953 clrldi r4,r4,4
1954 mtspr NIADORM, r4
1955 mfspr r4, MSRDORM
1956 li r5, -65
1957 and r4, r4, r5
1958 mtspr MSRDORM, r4
1959 lis r4,0xffef
1960 ori r4,r4,0x7403
1961 mtspr TSC, r4
1962 li r4,0x1f4
1963 mtspr TST, r4
1964 mfspr r4, HID0
1965 ori r4, r4, 0x1
1966 mtspr HID0, r4
1967 mfspr r4, SPRN_CTRLF
1968 oris r4, r4, 0x40
1969 mtspr SPRN_CTRLT, r4
1970 blr
1971#endif
1972
1973#if defined(CONFIG_KEXEC) || (defined(CONFIG_SMP) && !defined(CONFIG_PPC_ISERIES))
1974_GLOBAL(smp_release_cpus)
1975 /* All secondary cpus are spinning on a common
1976 * spinloop, release them all now so they can start
1977 * to spin on their individual paca spinloops.
1978 * For non SMP kernels, the secondary cpus never
1979 * get out of the common spinloop.
1980 */
1981 li r3,1
1982 LOADADDR(r5,__secondary_hold_spinloop)
1983 std r3,0(r5)
1984 sync
1985 blr
1986#endif /* CONFIG_SMP && !CONFIG_PPC_ISERIES */
1987
1988
1989/*
1990 * We put a few things here that have to be page-aligned.
1991 * This stuff goes at the beginning of the bss, which is page-aligned.
1992 */
1993 .section ".bss"
1994
1995 .align PAGE_SHIFT
1996
1997 .globl empty_zero_page
1998empty_zero_page:
1999 .space PAGE_SIZE
2000
2001 .globl swapper_pg_dir
2002swapper_pg_dir:
2003 .space PAGE_SIZE
2004
2005/*
2006 * This space gets a copy of optional info passed to us by the bootstrap
2007 * Used to pass parameters into the kernel like root=/dev/sda1, etc.
2008 */
2009 .globl cmd_line
2010cmd_line:
2011 .space COMMAND_LINE_SIZE