aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ppc64/kernel/head.S
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/ppc64/kernel/head.S
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'arch/ppc64/kernel/head.S')
-rw-r--r--arch/ppc64/kernel/head.S2139
1 files changed, 2139 insertions, 0 deletions
diff --git a/arch/ppc64/kernel/head.S b/arch/ppc64/kernel/head.S
new file mode 100644
index 000000000000..fe05f3fbf9d0
--- /dev/null
+++ b/arch/ppc64/kernel/head.S
@@ -0,0 +1,2139 @@
1/*
2 * arch/ppc64/kernel/head.S
3 *
4 * PowerPC version
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 *
7 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
8 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
9 * Adapted for Power Macintosh by Paul Mackerras.
10 * Low-level exception handlers and MMU support
11 * rewritten by Paul Mackerras.
12 * Copyright (C) 1996 Paul Mackerras.
13 *
14 * Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and
15 * Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com
16 *
17 * This file contains the low-level support and setup for the
18 * PowerPC-64 platform, including trap and interrupt dispatch.
19 *
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
24 */
25
26#define SECONDARY_PROCESSORS
27
28#include <linux/config.h>
29#include <linux/threads.h>
30#include <asm/processor.h>
31#include <asm/page.h>
32#include <asm/mmu.h>
33#include <asm/naca.h>
34#include <asm/systemcfg.h>
35#include <asm/ppc_asm.h>
36#include <asm/offsets.h>
37#include <asm/bug.h>
38#include <asm/cputable.h>
39#include <asm/setup.h>
40#include <asm/hvcall.h>
41
42#ifdef CONFIG_PPC_ISERIES
43#define DO_SOFT_DISABLE
44#endif
45
46/*
47 * hcall interface to pSeries LPAR
48 */
49#define H_SET_ASR 0x30
50
51/*
52 * We layout physical memory as follows:
53 * 0x0000 - 0x00ff : Secondary processor spin code
54 * 0x0100 - 0x2fff : pSeries Interrupt prologs
55 * 0x3000 - 0x3fff : Interrupt support
56 * 0x4000 - 0x4fff : NACA
57 * 0x6000 : iSeries and common interrupt prologs
58 * 0x9000 - 0x9fff : Initial segment table
59 */
60
61/*
62 * SPRG Usage
63 *
64 * Register Definition
65 *
66 * SPRG0 reserved for hypervisor
67 * SPRG1 temp - used to save gpr
68 * SPRG2 temp - used to save gpr
69 * SPRG3 virt addr of paca
70 */
71
72/*
73 * Entering into this code we make the following assumptions:
74 * For pSeries:
75 * 1. The MMU is off & open firmware is running in real mode.
76 * 2. The kernel is entered at __start
77 *
78 * For iSeries:
79 * 1. The MMU is on (as it always is for iSeries)
80 * 2. The kernel is entered at system_reset_iSeries
81 */
82
83 .text
84 .globl _stext
85_stext:
86#ifdef CONFIG_PPC_MULTIPLATFORM
87_GLOBAL(__start)
88 /* NOP this out unconditionally */
89BEGIN_FTR_SECTION
90 b .__start_initialization_multiplatform
91END_FTR_SECTION(0, 1)
92#endif /* CONFIG_PPC_MULTIPLATFORM */
93
94 /* Catch branch to 0 in real mode */
95 trap
96#ifdef CONFIG_PPC_ISERIES
97 /*
98 * At offset 0x20, there is a pointer to iSeries LPAR data.
99 * This is required by the hypervisor
100 */
101 . = 0x20
102 .llong hvReleaseData-KERNELBASE
103
104 /*
105 * At offset 0x28 and 0x30 are offsets to the msChunks
106 * array (used by the iSeries LPAR debugger to do translation
107 * between physical addresses and absolute addresses) and
108 * to the pidhash table (also used by the debugger)
109 */
110 .llong msChunks-KERNELBASE
111 .llong 0 /* pidhash-KERNELBASE SFRXXX */
112
113 /* Offset 0x38 - Pointer to start of embedded System.map */
114 .globl embedded_sysmap_start
115embedded_sysmap_start:
116 .llong 0
117 /* Offset 0x40 - Pointer to end of embedded System.map */
118 .globl embedded_sysmap_end
119embedded_sysmap_end:
120 .llong 0
121
122#else /* CONFIG_PPC_ISERIES */
123
124 /* Secondary processors spin on this value until it goes to 1. */
125 .globl __secondary_hold_spinloop
126__secondary_hold_spinloop:
127 .llong 0x0
128
129 /* Secondary processors write this value with their cpu # */
130 /* after they enter the spin loop immediately below. */
131 .globl __secondary_hold_acknowledge
132__secondary_hold_acknowledge:
133 .llong 0x0
134
135 . = 0x60
136/*
137 * The following code is used on pSeries to hold secondary processors
138 * in a spin loop after they have been freed from OpenFirmware, but
139 * before the bulk of the kernel has been relocated. This code
140 * is relocated to physical address 0x60 before prom_init is run.
141 * All of it must fit below the first exception vector at 0x100.
142 */
143_GLOBAL(__secondary_hold)
144 mfmsr r24
145 ori r24,r24,MSR_RI
146 mtmsrd r24 /* RI on */
147
148 /* Grab our linux cpu number */
149 mr r24,r3
150
151 /* Tell the master cpu we're here */
152 /* Relocation is off & we are located at an address less */
153 /* than 0x100, so only need to grab low order offset. */
154 std r24,__secondary_hold_acknowledge@l(0)
155 sync
156
157 /* All secondary cpu's wait here until told to start. */
158100: ld r4,__secondary_hold_spinloop@l(0)
159 cmpdi 0,r4,1
160 bne 100b
161
162#ifdef CONFIG_HMT
163 b .hmt_init
164#else
165#ifdef CONFIG_SMP
166 mr r3,r24
167 b .pSeries_secondary_smp_init
168#else
169 BUG_OPCODE
170#endif
171#endif
172#endif
173
174/* This value is used to mark exception frames on the stack. */
175 .section ".toc","aw"
176exception_marker:
177 .tc ID_72656773_68657265[TC],0x7265677368657265
178 .text
179
180/*
181 * The following macros define the code that appears as
182 * the prologue to each of the exception handlers. They
183 * are split into two parts to allow a single kernel binary
184 * to be used for pSeries and iSeries.
185 * LOL. One day... - paulus
186 */
187
188/*
189 * We make as much of the exception code common between native
190 * exception handlers (including pSeries LPAR) and iSeries LPAR
191 * implementations as possible.
192 */
193
194/*
195 * This is the start of the interrupt handlers for pSeries
196 * This code runs with relocation off.
197 */
198#define EX_R9 0
199#define EX_R10 8
200#define EX_R11 16
201#define EX_R12 24
202#define EX_R13 32
203#define EX_SRR0 40
204#define EX_R3 40 /* SLB miss saves R3, but not SRR0 */
205#define EX_DAR 48
206#define EX_LR 48 /* SLB miss saves LR, but not DAR */
207#define EX_DSISR 56
208#define EX_CCR 60
209
210#define EXCEPTION_PROLOG_PSERIES(area, label) \
211 mfspr r13,SPRG3; /* get paca address into r13 */ \
212 std r9,area+EX_R9(r13); /* save r9 - r12 */ \
213 std r10,area+EX_R10(r13); \
214 std r11,area+EX_R11(r13); \
215 std r12,area+EX_R12(r13); \
216 mfspr r9,SPRG1; \
217 std r9,area+EX_R13(r13); \
218 mfcr r9; \
219 clrrdi r12,r13,32; /* get high part of &label */ \
220 mfmsr r10; \
221 mfspr r11,SRR0; /* save SRR0 */ \
222 ori r12,r12,(label)@l; /* virt addr of handler */ \
223 ori r10,r10,MSR_IR|MSR_DR|MSR_RI; \
224 mtspr SRR0,r12; \
225 mfspr r12,SRR1; /* and SRR1 */ \
226 mtspr SRR1,r10; \
227 rfid; \
228 b . /* prevent speculative execution */
229
230/*
231 * This is the start of the interrupt handlers for iSeries
232 * This code runs with relocation on.
233 */
234#define EXCEPTION_PROLOG_ISERIES_1(area) \
235 mfspr r13,SPRG3; /* get paca address into r13 */ \
236 std r9,area+EX_R9(r13); /* save r9 - r12 */ \
237 std r10,area+EX_R10(r13); \
238 std r11,area+EX_R11(r13); \
239 std r12,area+EX_R12(r13); \
240 mfspr r9,SPRG1; \
241 std r9,area+EX_R13(r13); \
242 mfcr r9
243
244#define EXCEPTION_PROLOG_ISERIES_2 \
245 mfmsr r10; \
246 ld r11,PACALPPACA+LPPACASRR0(r13); \
247 ld r12,PACALPPACA+LPPACASRR1(r13); \
248 ori r10,r10,MSR_RI; \
249 mtmsrd r10,1
250
251/*
252 * The common exception prolog is used for all except a few exceptions
253 * such as a segment miss on a kernel address. We have to be prepared
254 * to take another exception from the point where we first touch the
255 * kernel stack onwards.
256 *
257 * On entry r13 points to the paca, r9-r13 are saved in the paca,
258 * r9 contains the saved CR, r11 and r12 contain the saved SRR0 and
259 * SRR1, and relocation is on.
260 */
261#define EXCEPTION_PROLOG_COMMON(n, area) \
262 andi. r10,r12,MSR_PR; /* See if coming from user */ \
263 mr r10,r1; /* Save r1 */ \
264 subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ \
265 beq- 1f; \
266 ld r1,PACAKSAVE(r13); /* kernel stack to use */ \
2671: cmpdi cr1,r1,0; /* check if r1 is in userspace */ \
268 bge- cr1,bad_stack; /* abort if it is */ \
269 std r9,_CCR(r1); /* save CR in stackframe */ \
270 std r11,_NIP(r1); /* save SRR0 in stackframe */ \
271 std r12,_MSR(r1); /* save SRR1 in stackframe */ \
272 std r10,0(r1); /* make stack chain pointer */ \
273 std r0,GPR0(r1); /* save r0 in stackframe */ \
274 std r10,GPR1(r1); /* save r1 in stackframe */ \
275 std r2,GPR2(r1); /* save r2 in stackframe */ \
276 SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \
277 SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \
278 ld r9,area+EX_R9(r13); /* move r9, r10 to stackframe */ \
279 ld r10,area+EX_R10(r13); \
280 std r9,GPR9(r1); \
281 std r10,GPR10(r1); \
282 ld r9,area+EX_R11(r13); /* move r11 - r13 to stackframe */ \
283 ld r10,area+EX_R12(r13); \
284 ld r11,area+EX_R13(r13); \
285 std r9,GPR11(r1); \
286 std r10,GPR12(r1); \
287 std r11,GPR13(r1); \
288 ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \
289 mflr r9; /* save LR in stackframe */ \
290 std r9,_LINK(r1); \
291 mfctr r10; /* save CTR in stackframe */ \
292 std r10,_CTR(r1); \
293 mfspr r11,XER; /* save XER in stackframe */ \
294 std r11,_XER(r1); \
295 li r9,(n)+1; \
296 std r9,_TRAP(r1); /* set trap number */ \
297 li r10,0; \
298 ld r11,exception_marker@toc(r2); \
299 std r10,RESULT(r1); /* clear regs->result */ \
300 std r11,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */
301
302/*
303 * Exception vectors.
304 */
305#define STD_EXCEPTION_PSERIES(n, label) \
306 . = n; \
307 .globl label##_pSeries; \
308label##_pSeries: \
309 HMT_MEDIUM; \
310 mtspr SPRG1,r13; /* save r13 */ \
311 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common)
312
313#define STD_EXCEPTION_ISERIES(n, label, area) \
314 .globl label##_iSeries; \
315label##_iSeries: \
316 HMT_MEDIUM; \
317 mtspr SPRG1,r13; /* save r13 */ \
318 EXCEPTION_PROLOG_ISERIES_1(area); \
319 EXCEPTION_PROLOG_ISERIES_2; \
320 b label##_common
321
322#define MASKABLE_EXCEPTION_ISERIES(n, label) \
323 .globl label##_iSeries; \
324label##_iSeries: \
325 HMT_MEDIUM; \
326 mtspr SPRG1,r13; /* save r13 */ \
327 EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN); \
328 lbz r10,PACAPROCENABLED(r13); \
329 cmpwi 0,r10,0; \
330 beq- label##_iSeries_masked; \
331 EXCEPTION_PROLOG_ISERIES_2; \
332 b label##_common; \
333
334#ifdef DO_SOFT_DISABLE
335#define DISABLE_INTS \
336 lbz r10,PACAPROCENABLED(r13); \
337 li r11,0; \
338 std r10,SOFTE(r1); \
339 mfmsr r10; \
340 stb r11,PACAPROCENABLED(r13); \
341 ori r10,r10,MSR_EE; \
342 mtmsrd r10,1
343
344#define ENABLE_INTS \
345 lbz r10,PACAPROCENABLED(r13); \
346 mfmsr r11; \
347 std r10,SOFTE(r1); \
348 ori r11,r11,MSR_EE; \
349 mtmsrd r11,1
350
351#else /* hard enable/disable interrupts */
352#define DISABLE_INTS
353
354#define ENABLE_INTS \
355 ld r12,_MSR(r1); \
356 mfmsr r11; \
357 rlwimi r11,r12,0,MSR_EE; \
358 mtmsrd r11,1
359
360#endif
361
362#define STD_EXCEPTION_COMMON(trap, label, hdlr) \
363 .align 7; \
364 .globl label##_common; \
365label##_common: \
366 EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \
367 DISABLE_INTS; \
368 bl .save_nvgprs; \
369 addi r3,r1,STACK_FRAME_OVERHEAD; \
370 bl hdlr; \
371 b .ret_from_except
372
373#define STD_EXCEPTION_COMMON_LITE(trap, label, hdlr) \
374 .align 7; \
375 .globl label##_common; \
376label##_common: \
377 EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \
378 DISABLE_INTS; \
379 addi r3,r1,STACK_FRAME_OVERHEAD; \
380 bl hdlr; \
381 b .ret_from_except_lite
382
383/*
384 * Start of pSeries system interrupt routines
385 */
386 . = 0x100
387 .globl __start_interrupts
388__start_interrupts:
389
390 STD_EXCEPTION_PSERIES(0x100, system_reset)
391
392 . = 0x200
393_machine_check_pSeries:
394 HMT_MEDIUM
395 mtspr SPRG1,r13 /* save r13 */
396 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
397
398 . = 0x300
399 .globl data_access_pSeries
400data_access_pSeries:
401 HMT_MEDIUM
402 mtspr SPRG1,r13
403BEGIN_FTR_SECTION
404 mtspr SPRG2,r12
405 mfspr r13,DAR
406 mfspr r12,DSISR
407 srdi r13,r13,60
408 rlwimi r13,r12,16,0x20
409 mfcr r12
410 cmpwi r13,0x2c
411 beq .do_stab_bolted_pSeries
412 mtcrf 0x80,r12
413 mfspr r12,SPRG2
414END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
415 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common)
416
417 . = 0x380
418 .globl data_access_slb_pSeries
419data_access_slb_pSeries:
420 HMT_MEDIUM
421 mtspr SPRG1,r13
422 mfspr r13,SPRG3 /* get paca address into r13 */
423 std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
424 std r10,PACA_EXSLB+EX_R10(r13)
425 std r11,PACA_EXSLB+EX_R11(r13)
426 std r12,PACA_EXSLB+EX_R12(r13)
427 std r3,PACA_EXSLB+EX_R3(r13)
428 mfspr r9,SPRG1
429 std r9,PACA_EXSLB+EX_R13(r13)
430 mfcr r9
431 mfspr r12,SRR1 /* and SRR1 */
432 mfspr r3,DAR
433 b .do_slb_miss /* Rel. branch works in real mode */
434
435 STD_EXCEPTION_PSERIES(0x400, instruction_access)
436
437 . = 0x480
438 .globl instruction_access_slb_pSeries
439instruction_access_slb_pSeries:
440 HMT_MEDIUM
441 mtspr SPRG1,r13
442 mfspr r13,SPRG3 /* get paca address into r13 */
443 std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
444 std r10,PACA_EXSLB+EX_R10(r13)
445 std r11,PACA_EXSLB+EX_R11(r13)
446 std r12,PACA_EXSLB+EX_R12(r13)
447 std r3,PACA_EXSLB+EX_R3(r13)
448 mfspr r9,SPRG1
449 std r9,PACA_EXSLB+EX_R13(r13)
450 mfcr r9
451 mfspr r12,SRR1 /* and SRR1 */
452 mfspr r3,SRR0 /* SRR0 is faulting address */
453 b .do_slb_miss /* Rel. branch works in real mode */
454
455 STD_EXCEPTION_PSERIES(0x500, hardware_interrupt)
456 STD_EXCEPTION_PSERIES(0x600, alignment)
457 STD_EXCEPTION_PSERIES(0x700, program_check)
458 STD_EXCEPTION_PSERIES(0x800, fp_unavailable)
459 STD_EXCEPTION_PSERIES(0x900, decrementer)
460 STD_EXCEPTION_PSERIES(0xa00, trap_0a)
461 STD_EXCEPTION_PSERIES(0xb00, trap_0b)
462
463 . = 0xc00
464 .globl system_call_pSeries
465system_call_pSeries:
466 HMT_MEDIUM
467 mr r9,r13
468 mfmsr r10
469 mfspr r13,SPRG3
470 mfspr r11,SRR0
471 clrrdi r12,r13,32
472 oris r12,r12,system_call_common@h
473 ori r12,r12,system_call_common@l
474 mtspr SRR0,r12
475 ori r10,r10,MSR_IR|MSR_DR|MSR_RI
476 mfspr r12,SRR1
477 mtspr SRR1,r10
478 rfid
479 b . /* prevent speculative execution */
480
481 STD_EXCEPTION_PSERIES(0xd00, single_step)
482 STD_EXCEPTION_PSERIES(0xe00, trap_0e)
483
484 /* We need to deal with the Altivec unavailable exception
485 * here which is at 0xf20, thus in the middle of the
486 * prolog code of the PerformanceMonitor one. A little
487 * trickery is thus necessary
488 */
489 . = 0xf00
490 b performance_monitor_pSeries
491
492 STD_EXCEPTION_PSERIES(0xf20, altivec_unavailable)
493
494 STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint)
495 STD_EXCEPTION_PSERIES(0x1700, altivec_assist)
496
497 /* moved from 0xf00 */
498 STD_EXCEPTION_PSERIES(0x3000, performance_monitor)
499
500 . = 0x3100
501_GLOBAL(do_stab_bolted_pSeries)
502 mtcrf 0x80,r12
503 mfspr r12,SPRG2
504 EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted)
505
506
507 /* Space for the naca. Architected to be located at real address
508 * NACA_PHYS_ADDR. Various tools rely on this location being fixed.
509 * The first dword of the naca is required by iSeries LPAR to
510 * point to itVpdAreas. On pSeries native, this value is not used.
511 */
512 . = NACA_PHYS_ADDR
513 .globl __end_interrupts
514__end_interrupts:
515#ifdef CONFIG_PPC_ISERIES
516 .globl naca
517naca:
518 .llong itVpdAreas
519
520 /*
521 * The iSeries LPAR map is at this fixed address
522 * so that the HvReleaseData structure can address
523 * it with a 32-bit offset.
524 *
525 * The VSID values below are dependent on the
526 * VSID generation algorithm. See include/asm/mmu_context.h.
527 */
528
529 . = 0x4800
530
531 .llong 2 /* # ESIDs to be mapped by hypervisor */
532 .llong 1 /* # memory ranges to be mapped by hypervisor */
533 .llong STAB0_PAGE /* Page # of segment table within load area */
534 .llong 0 /* Reserved */
535 .llong 0 /* Reserved */
536 .llong 0 /* Reserved */
537 .llong 0 /* Reserved */
538 .llong 0 /* Reserved */
539 .llong (KERNELBASE>>SID_SHIFT)
540 .llong 0x408f92c94 /* KERNELBASE VSID */
541 /* We have to list the bolted VMALLOC segment here, too, so that it
542 * will be restored on shared processor switch */
543 .llong (VMALLOCBASE>>SID_SHIFT)
544 .llong 0xf09b89af5 /* VMALLOCBASE VSID */
545 .llong 8192 /* # pages to map (32 MB) */
546 .llong 0 /* Offset from start of loadarea to start of map */
547 .llong 0x408f92c940000 /* VPN of first page to map */
548
549 . = 0x6100
550
551/*** ISeries-LPAR interrupt handlers ***/
552
553 STD_EXCEPTION_ISERIES(0x200, machine_check, PACA_EXMC)
554
555 .globl data_access_iSeries
556data_access_iSeries:
557 mtspr SPRG1,r13
558BEGIN_FTR_SECTION
559 mtspr SPRG2,r12
560 mfspr r13,DAR
561 mfspr r12,DSISR
562 srdi r13,r13,60
563 rlwimi r13,r12,16,0x20
564 mfcr r12
565 cmpwi r13,0x2c
566 beq .do_stab_bolted_iSeries
567 mtcrf 0x80,r12
568 mfspr r12,SPRG2
569END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
570 EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN)
571 EXCEPTION_PROLOG_ISERIES_2
572 b data_access_common
573
574.do_stab_bolted_iSeries:
575 mtcrf 0x80,r12
576 mfspr r12,SPRG2
577 EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
578 EXCEPTION_PROLOG_ISERIES_2
579 b .do_stab_bolted
580
581 .globl data_access_slb_iSeries
582data_access_slb_iSeries:
583 mtspr SPRG1,r13 /* save r13 */
584 EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
585 std r3,PACA_EXSLB+EX_R3(r13)
586 ld r12,PACALPPACA+LPPACASRR1(r13)
587 mfspr r3,DAR
588 b .do_slb_miss
589
590 STD_EXCEPTION_ISERIES(0x400, instruction_access, PACA_EXGEN)
591
592 .globl instruction_access_slb_iSeries
593instruction_access_slb_iSeries:
594 mtspr SPRG1,r13 /* save r13 */
595 EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
596 std r3,PACA_EXSLB+EX_R3(r13)
597 ld r12,PACALPPACA+LPPACASRR1(r13)
598 ld r3,PACALPPACA+LPPACASRR0(r13)
599 b .do_slb_miss
600
601 MASKABLE_EXCEPTION_ISERIES(0x500, hardware_interrupt)
602 STD_EXCEPTION_ISERIES(0x600, alignment, PACA_EXGEN)
603 STD_EXCEPTION_ISERIES(0x700, program_check, PACA_EXGEN)
604 STD_EXCEPTION_ISERIES(0x800, fp_unavailable, PACA_EXGEN)
605 MASKABLE_EXCEPTION_ISERIES(0x900, decrementer)
606 STD_EXCEPTION_ISERIES(0xa00, trap_0a, PACA_EXGEN)
607 STD_EXCEPTION_ISERIES(0xb00, trap_0b, PACA_EXGEN)
608
609 .globl system_call_iSeries
610system_call_iSeries:
611 mr r9,r13
612 mfspr r13,SPRG3
613 EXCEPTION_PROLOG_ISERIES_2
614 b system_call_common
615
616 STD_EXCEPTION_ISERIES( 0xd00, single_step, PACA_EXGEN)
617 STD_EXCEPTION_ISERIES( 0xe00, trap_0e, PACA_EXGEN)
618 STD_EXCEPTION_ISERIES( 0xf00, performance_monitor, PACA_EXGEN)
619
620 .globl system_reset_iSeries
621system_reset_iSeries:
622 mfspr r13,SPRG3 /* Get paca address */
623 mfmsr r24
624 ori r24,r24,MSR_RI
625 mtmsrd r24 /* RI on */
626 lhz r24,PACAPACAINDEX(r13) /* Get processor # */
627 cmpwi 0,r24,0 /* Are we processor 0? */
628 beq .__start_initialization_iSeries /* Start up the first processor */
629 mfspr r4,CTRLF
630 li r5,RUNLATCH /* Turn off the run light */
631 andc r4,r4,r5
632 mtspr CTRLT,r4
633
6341:
635 HMT_LOW
636#ifdef CONFIG_SMP
637 lbz r23,PACAPROCSTART(r13) /* Test if this processor
638 * should start */
639 sync
640 LOADADDR(r3,current_set)
641 sldi r28,r24,3 /* get current_set[cpu#] */
642 ldx r3,r3,r28
643 addi r1,r3,THREAD_SIZE
644 subi r1,r1,STACK_FRAME_OVERHEAD
645
646 cmpwi 0,r23,0
647 beq iSeries_secondary_smp_loop /* Loop until told to go */
648#ifdef SECONDARY_PROCESSORS
649 bne .__secondary_start /* Loop until told to go */
650#endif
651iSeries_secondary_smp_loop:
652 /* Let the Hypervisor know we are alive */
653 /* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */
654 lis r3,0x8002
655 rldicr r3,r3,32,15 /* r0 = (r3 << 32) & 0xffff000000000000 */
656#else /* CONFIG_SMP */
657 /* Yield the processor. This is required for non-SMP kernels
658 which are running on multi-threaded machines. */
659 lis r3,0x8000
660 rldicr r3,r3,32,15 /* r3 = (r3 << 32) & 0xffff000000000000 */
661 addi r3,r3,18 /* r3 = 0x8000000000000012 which is "yield" */
662 li r4,0 /* "yield timed" */
663 li r5,-1 /* "yield forever" */
664#endif /* CONFIG_SMP */
665 li r0,-1 /* r0=-1 indicates a Hypervisor call */
666 sc /* Invoke the hypervisor via a system call */
667 mfspr r13,SPRG3 /* Put r13 back ???? */
668 b 1b /* If SMP not configured, secondaries
669 * loop forever */
670
671 .globl decrementer_iSeries_masked
672decrementer_iSeries_masked:
673 li r11,1
674 stb r11,PACALPPACA+LPPACADECRINT(r13)
675 lwz r12,PACADEFAULTDECR(r13)
676 mtspr SPRN_DEC,r12
677 /* fall through */
678
679 .globl hardware_interrupt_iSeries_masked
680hardware_interrupt_iSeries_masked:
681 mtcrf 0x80,r9 /* Restore regs */
682 ld r11,PACALPPACA+LPPACASRR0(r13)
683 ld r12,PACALPPACA+LPPACASRR1(r13)
684 mtspr SRR0,r11
685 mtspr SRR1,r12
686 ld r9,PACA_EXGEN+EX_R9(r13)
687 ld r10,PACA_EXGEN+EX_R10(r13)
688 ld r11,PACA_EXGEN+EX_R11(r13)
689 ld r12,PACA_EXGEN+EX_R12(r13)
690 ld r13,PACA_EXGEN+EX_R13(r13)
691 rfid
692 b . /* prevent speculative execution */
693#endif
694
695/*
696 * Data area reserved for FWNMI option.
697 */
698 .= 0x7000
699 .globl fwnmi_data_area
700fwnmi_data_area:
701
702/*
703 * Vectors for the FWNMI option. Share common code.
704 */
705 . = 0x8000
706 .globl system_reset_fwnmi
707system_reset_fwnmi:
708 HMT_MEDIUM
709 mtspr SPRG1,r13 /* save r13 */
710 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common)
711 .globl machine_check_fwnmi
712machine_check_fwnmi:
713 HMT_MEDIUM
714 mtspr SPRG1,r13 /* save r13 */
715 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
716
717 /*
718 * Space for the initial segment table
719 * For LPAR, the hypervisor must fill in at least one entry
720 * before we get control (with relocate on)
721 */
722 . = STAB0_PHYS_ADDR
723 .globl __start_stab
724__start_stab:
725
726 . = (STAB0_PHYS_ADDR + PAGE_SIZE)
727 .globl __end_stab
728__end_stab:
729
730
731/*** Common interrupt handlers ***/
732
733 STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception)
734
735 /*
736 * Machine check is different because we use a different
737 * save area: PACA_EXMC instead of PACA_EXGEN.
738 */
739 .align 7
740 .globl machine_check_common
741machine_check_common:
742 EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
743 DISABLE_INTS
744 bl .save_nvgprs
745 addi r3,r1,STACK_FRAME_OVERHEAD
746 bl .machine_check_exception
747 b .ret_from_except
748
749 STD_EXCEPTION_COMMON_LITE(0x900, decrementer, .timer_interrupt)
750 STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception)
751 STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
752 STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
753 STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
754 STD_EXCEPTION_COMMON(0xf00, performance_monitor, .performance_monitor_exception)
755 STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception)
756#ifdef CONFIG_ALTIVEC
757 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception)
758#else
759 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception)
760#endif
761
762/*
763 * Here we have detected that the kernel stack pointer is bad.
764 * R9 contains the saved CR, r13 points to the paca,
765 * r10 contains the (bad) kernel stack pointer,
766 * r11 and r12 contain the saved SRR0 and SRR1.
767 * We switch to using the paca guard page as an emergency stack,
768 * save the registers there, and call kernel_bad_stack(), which panics.
769 */
770bad_stack:
771 ld r1,PACAEMERGSP(r13)
772 subi r1,r1,64+INT_FRAME_SIZE
773 std r9,_CCR(r1)
774 std r10,GPR1(r1)
775 std r11,_NIP(r1)
776 std r12,_MSR(r1)
777 mfspr r11,DAR
778 mfspr r12,DSISR
779 std r11,_DAR(r1)
780 std r12,_DSISR(r1)
781 mflr r10
782 mfctr r11
783 mfxer r12
784 std r10,_LINK(r1)
785 std r11,_CTR(r1)
786 std r12,_XER(r1)
787 SAVE_GPR(0,r1)
788 SAVE_GPR(2,r1)
789 SAVE_4GPRS(3,r1)
790 SAVE_2GPRS(7,r1)
791 SAVE_10GPRS(12,r1)
792 SAVE_10GPRS(22,r1)
793 addi r11,r1,INT_FRAME_SIZE
794 std r11,0(r1)
795 li r12,0
796 std r12,0(r11)
797 ld r2,PACATOC(r13)
7981: addi r3,r1,STACK_FRAME_OVERHEAD
799 bl .kernel_bad_stack
800 b 1b
801
802/*
803 * Return from an exception with minimal checks.
804 * The caller is assumed to have done EXCEPTION_PROLOG_COMMON.
805 * If interrupts have been enabled, or anything has been
806 * done that might have changed the scheduling status of
807 * any task or sent any task a signal, you should use
808 * ret_from_except or ret_from_except_lite instead of this.
809 */
810fast_exception_return:
811 ld r12,_MSR(r1)
812 ld r11,_NIP(r1)
813 andi. r3,r12,MSR_RI /* check if RI is set */
814 beq- unrecov_fer
815 ld r3,_CCR(r1)
816 ld r4,_LINK(r1)
817 ld r5,_CTR(r1)
818 ld r6,_XER(r1)
819 mtcr r3
820 mtlr r4
821 mtctr r5
822 mtxer r6
823 REST_GPR(0, r1)
824 REST_8GPRS(2, r1)
825
826 mfmsr r10
827 clrrdi r10,r10,2 /* clear RI (LE is 0 already) */
828 mtmsrd r10,1
829
830 mtspr SRR1,r12
831 mtspr SRR0,r11
832 REST_4GPRS(10, r1)
833 ld r1,GPR1(r1)
834 rfid
835 b . /* prevent speculative execution */
836
837unrecov_fer:
838 bl .save_nvgprs
8391: addi r3,r1,STACK_FRAME_OVERHEAD
840 bl .unrecoverable_exception
841 b 1b
842
843/*
844 * Here r13 points to the paca, r9 contains the saved CR,
845 * SRR0 and SRR1 are saved in r11 and r12,
846 * r9 - r13 are saved in paca->exgen.
847 */
848 .align 7
849 .globl data_access_common
850data_access_common:
851 mfspr r10,DAR
852 std r10,PACA_EXGEN+EX_DAR(r13)
853 mfspr r10,DSISR
854 stw r10,PACA_EXGEN+EX_DSISR(r13)
855 EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
856 ld r3,PACA_EXGEN+EX_DAR(r13)
857 lwz r4,PACA_EXGEN+EX_DSISR(r13)
858 li r5,0x300
859 b .do_hash_page /* Try to handle as hpte fault */
860
861 .align 7
862 .globl instruction_access_common
863instruction_access_common:
864 EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN)
865 ld r3,_NIP(r1)
866 andis. r4,r12,0x5820
867 li r5,0x400
868 b .do_hash_page /* Try to handle as hpte fault */
869
870 .align 7
871 .globl hardware_interrupt_common
872 .globl hardware_interrupt_entry
873hardware_interrupt_common:
874 EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN)
875hardware_interrupt_entry:
876 DISABLE_INTS
877 addi r3,r1,STACK_FRAME_OVERHEAD
878 bl .do_IRQ
879 b .ret_from_except_lite
880
881 .align 7
882 .globl alignment_common
883alignment_common:
884 mfspr r10,DAR
885 std r10,PACA_EXGEN+EX_DAR(r13)
886 mfspr r10,DSISR
887 stw r10,PACA_EXGEN+EX_DSISR(r13)
888 EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
889 ld r3,PACA_EXGEN+EX_DAR(r13)
890 lwz r4,PACA_EXGEN+EX_DSISR(r13)
891 std r3,_DAR(r1)
892 std r4,_DSISR(r1)
893 bl .save_nvgprs
894 addi r3,r1,STACK_FRAME_OVERHEAD
895 ENABLE_INTS
896 bl .alignment_exception
897 b .ret_from_except
898
899 .align 7
900 .globl program_check_common
901program_check_common:
902 EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
903 bl .save_nvgprs
904 addi r3,r1,STACK_FRAME_OVERHEAD
905 ENABLE_INTS
906 bl .program_check_exception
907 b .ret_from_except
908
909 .align 7
910 .globl fp_unavailable_common
911fp_unavailable_common:
912 EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
913 bne .load_up_fpu /* if from user, just load it up */
914 bl .save_nvgprs
915 addi r3,r1,STACK_FRAME_OVERHEAD
916 ENABLE_INTS
917 bl .kernel_fp_unavailable_exception
918 BUG_OPCODE
919
920 .align 7
921 .globl altivec_unavailable_common
922altivec_unavailable_common:
923 EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)
924#ifdef CONFIG_ALTIVEC
925 bne .load_up_altivec /* if from user, just load it up */
926#endif
927 bl .save_nvgprs
928 addi r3,r1,STACK_FRAME_OVERHEAD
929 ENABLE_INTS
930 bl .altivec_unavailable_exception
931 b .ret_from_except
932
933/*
934 * Hash table stuff
935 */
936 .align 7
937_GLOBAL(do_hash_page)
938 std r3,_DAR(r1)
939 std r4,_DSISR(r1)
940
941 andis. r0,r4,0xa450 /* weird error? */
942 bne- .handle_page_fault /* if not, try to insert a HPTE */
943BEGIN_FTR_SECTION
944 andis. r0,r4,0x0020 /* Is it a segment table fault? */
945 bne- .do_ste_alloc /* If so handle it */
946END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
947
948 /*
949 * We need to set the _PAGE_USER bit if MSR_PR is set or if we are
950 * accessing a userspace segment (even from the kernel). We assume
951 * kernel addresses always have the high bit set.
952 */
953 rlwinm r4,r4,32-25+9,31-9,31-9 /* DSISR_STORE -> _PAGE_RW */
954 rotldi r0,r3,15 /* Move high bit into MSR_PR posn */
955 orc r0,r12,r0 /* MSR_PR | ~high_bit */
956 rlwimi r4,r0,32-13,30,30 /* becomes _PAGE_USER access bit */
957 ori r4,r4,1 /* add _PAGE_PRESENT */
958 rlwimi r4,r5,22+2,31-2,31-2 /* Set _PAGE_EXEC if trap is 0x400 */
959
960 /*
961 * On iSeries, we soft-disable interrupts here, then
962 * hard-enable interrupts so that the hash_page code can spin on
963 * the hash_table_lock without problems on a shared processor.
964 */
965 DISABLE_INTS
966
967 /*
968 * r3 contains the faulting address
969 * r4 contains the required access permissions
970 * r5 contains the trap number
971 *
972 * at return r3 = 0 for success
973 */
974 bl .hash_page /* build HPTE if possible */
975 cmpdi r3,0 /* see if hash_page succeeded */
976
977#ifdef DO_SOFT_DISABLE
978 /*
979 * If we had interrupts soft-enabled at the point where the
980 * DSI/ISI occurred, and an interrupt came in during hash_page,
981 * handle it now.
982 * We jump to ret_from_except_lite rather than fast_exception_return
983 * because ret_from_except_lite will check for and handle pending
984 * interrupts if necessary.
985 */
986 beq .ret_from_except_lite
987 /* For a hash failure, we don't bother re-enabling interrupts */
988 ble- 12f
989
990 /*
991 * hash_page couldn't handle it, set soft interrupt enable back
992 * to what it was before the trap. Note that .local_irq_restore
993 * handles any interrupts pending at this point.
994 */
995 ld r3,SOFTE(r1)
996 bl .local_irq_restore
997 b 11f
998#else
999 beq fast_exception_return /* Return from exception on success */
1000 ble- 12f /* Failure return from hash_page */
1001
1002 /* fall through */
1003#endif
1004
1005/* Here we have a page fault that hash_page can't handle. */
1006_GLOBAL(handle_page_fault)
1007 ENABLE_INTS
100811: ld r4,_DAR(r1)
1009 ld r5,_DSISR(r1)
1010 addi r3,r1,STACK_FRAME_OVERHEAD
1011 bl .do_page_fault
1012 cmpdi r3,0
1013 beq+ .ret_from_except_lite
1014 bl .save_nvgprs
1015 mr r5,r3
1016 addi r3,r1,STACK_FRAME_OVERHEAD
1017 lwz r4,_DAR(r1)
1018 bl .bad_page_fault
1019 b .ret_from_except
1020
1021/* We have a page fault that hash_page could handle but HV refused
1022 * the PTE insertion
1023 */
102412: bl .save_nvgprs
1025 addi r3,r1,STACK_FRAME_OVERHEAD
1026 lwz r4,_DAR(r1)
1027 bl .low_hash_fault
1028 b .ret_from_except
1029
1030 /* here we have a segment miss */
1031_GLOBAL(do_ste_alloc)
1032 bl .ste_allocate /* try to insert stab entry */
1033 cmpdi r3,0
1034 beq+ fast_exception_return
1035 b .handle_page_fault
1036
1037/*
1038 * r13 points to the PACA, r9 contains the saved CR,
1039 * r11 and r12 contain the saved SRR0 and SRR1.
1040 * r9 - r13 are saved in paca->exslb.
1041 * We assume we aren't going to take any exceptions during this procedure.
1042 * We assume (DAR >> 60) == 0xc.
1043 */
1044 .align 7
1045_GLOBAL(do_stab_bolted)
1046 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
1047 std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */
1048
1049 /* Hash to the primary group */
1050 ld r10,PACASTABVIRT(r13)
1051 mfspr r11,DAR
1052 srdi r11,r11,28
1053 rldimi r10,r11,7,52 /* r10 = first ste of the group */
1054
1055 /* Calculate VSID */
1056 /* This is a kernel address, so protovsid = ESID */
1057 ASM_VSID_SCRAMBLE(r11, r9)
1058 rldic r9,r11,12,16 /* r9 = vsid << 12 */
1059
1060 /* Search the primary group for a free entry */
10611: ld r11,0(r10) /* Test valid bit of the current ste */
1062 andi. r11,r11,0x80
1063 beq 2f
1064 addi r10,r10,16
1065 andi. r11,r10,0x70
1066 bne 1b
1067
1068 /* Stick for only searching the primary group for now. */
1069 /* At least for now, we use a very simple random castout scheme */
1070 /* Use the TB as a random number ; OR in 1 to avoid entry 0 */
1071 mftb r11
1072 rldic r11,r11,4,57 /* r11 = (r11 << 4) & 0x70 */
1073 ori r11,r11,0x10
1074
1075 /* r10 currently points to an ste one past the group of interest */
1076 /* make it point to the randomly selected entry */
1077 subi r10,r10,128
1078 or r10,r10,r11 /* r10 is the entry to invalidate */
1079
1080 isync /* mark the entry invalid */
1081 ld r11,0(r10)
1082 rldicl r11,r11,56,1 /* clear the valid bit */
1083 rotldi r11,r11,8
1084 std r11,0(r10)
1085 sync
1086
1087 clrrdi r11,r11,28 /* Get the esid part of the ste */
1088 slbie r11
1089
10902: std r9,8(r10) /* Store the vsid part of the ste */
1091 eieio
1092
1093 mfspr r11,DAR /* Get the new esid */
1094 clrrdi r11,r11,28 /* Permits a full 32b of ESID */
1095 ori r11,r11,0x90 /* Turn on valid and kp */
1096 std r11,0(r10) /* Put new entry back into the stab */
1097
1098 sync
1099
1100 /* All done -- return from exception. */
1101 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
1102 ld r11,PACA_EXSLB+EX_SRR0(r13) /* get saved SRR0 */
1103
1104 andi. r10,r12,MSR_RI
1105 beq- unrecov_slb
1106
1107 mtcrf 0x80,r9 /* restore CR */
1108
1109 mfmsr r10
1110 clrrdi r10,r10,2
1111 mtmsrd r10,1
1112
1113 mtspr SRR0,r11
1114 mtspr SRR1,r12
1115 ld r9,PACA_EXSLB+EX_R9(r13)
1116 ld r10,PACA_EXSLB+EX_R10(r13)
1117 ld r11,PACA_EXSLB+EX_R11(r13)
1118 ld r12,PACA_EXSLB+EX_R12(r13)
1119 ld r13,PACA_EXSLB+EX_R13(r13)
1120 rfid
1121 b . /* prevent speculative execution */
1122
1123/*
1124 * r13 points to the PACA, r9 contains the saved CR,
1125 * r11 and r12 contain the saved SRR0 and SRR1.
1126 * r3 has the faulting address
1127 * r9 - r13 are saved in paca->exslb.
1128 * r3 is saved in paca->slb_r3
1129 * We assume we aren't going to take any exceptions during this procedure.
1130 */
1131_GLOBAL(do_slb_miss)
1132 mflr r10
1133
1134 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
1135 std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
1136
1137 bl .slb_allocate /* handle it */
1138
1139 /* All done -- return from exception. */
1140
1141 ld r10,PACA_EXSLB+EX_LR(r13)
1142 ld r3,PACA_EXSLB+EX_R3(r13)
1143 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
1144#ifdef CONFIG_PPC_ISERIES
1145 ld r11,PACALPPACA+LPPACASRR0(r13) /* get SRR0 value */
1146#endif /* CONFIG_PPC_ISERIES */
1147
1148 mtlr r10
1149
1150 andi. r10,r12,MSR_RI /* check for unrecoverable exception */
1151 beq- unrecov_slb
1152
1153.machine push
1154.machine "power4"
1155 mtcrf 0x80,r9
1156 mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
1157.machine pop
1158
1159#ifdef CONFIG_PPC_ISERIES
1160 mtspr SRR0,r11
1161 mtspr SRR1,r12
1162#endif /* CONFIG_PPC_ISERIES */
1163 ld r9,PACA_EXSLB+EX_R9(r13)
1164 ld r10,PACA_EXSLB+EX_R10(r13)
1165 ld r11,PACA_EXSLB+EX_R11(r13)
1166 ld r12,PACA_EXSLB+EX_R12(r13)
1167 ld r13,PACA_EXSLB+EX_R13(r13)
1168 rfid
1169 b . /* prevent speculative execution */
1170
1171unrecov_slb:
1172 EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
1173 DISABLE_INTS
1174 bl .save_nvgprs
11751: addi r3,r1,STACK_FRAME_OVERHEAD
1176 bl .unrecoverable_exception
1177 b 1b
1178
1179
1180/*
1181 * On pSeries, secondary processors spin in the following code.
1182 * At entry, r3 = this processor's number (physical cpu id)
1183 */
1184_GLOBAL(pSeries_secondary_smp_init)
1185 mr r24,r3
1186
1187 /* turn on 64-bit mode */
1188 bl .enable_64b_mode
1189 isync
1190
1191 /* Copy some CPU settings from CPU 0 */
1192 bl .__restore_cpu_setup
1193
1194 /* Set up a paca value for this processor. Since we have the
1195 * physical cpu id in r3, we need to search the pacas to find
1196 * which logical id maps to our physical one.
1197 */
1198 LOADADDR(r13, paca) /* Get base vaddr of paca array */
1199 li r5,0 /* logical cpu id */
12001: lhz r6,PACAHWCPUID(r13) /* Load HW procid from paca */
1201 cmpw r6,r24 /* Compare to our id */
1202 beq 2f
1203 addi r13,r13,PACA_SIZE /* Loop to next PACA on miss */
1204 addi r5,r5,1
1205 cmpwi r5,NR_CPUS
1206 blt 1b
1207
120899: HMT_LOW /* Couldn't find our CPU id */
1209 b 99b
1210
12112: mtspr SPRG3,r13 /* Save vaddr of paca in SPRG3 */
1212 /* From now on, r24 is expected to be logica cpuid */
1213 mr r24,r5
12143: HMT_LOW
1215 lbz r23,PACAPROCSTART(r13) /* Test if this processor should */
1216 /* start. */
1217 sync
1218
1219 /* Create a temp kernel stack for use before relocation is on. */
1220 ld r1,PACAEMERGSP(r13)
1221 subi r1,r1,STACK_FRAME_OVERHEAD
1222
1223 cmpwi 0,r23,0
1224#ifdef CONFIG_SMP
1225#ifdef SECONDARY_PROCESSORS
1226 bne .__secondary_start
1227#endif
1228#endif
1229 b 3b /* Loop until told to go */
1230
1231#ifdef CONFIG_PPC_ISERIES
1232_STATIC(__start_initialization_iSeries)
1233 /* Clear out the BSS */
1234 LOADADDR(r11,__bss_stop)
1235 LOADADDR(r8,__bss_start)
1236 sub r11,r11,r8 /* bss size */
1237 addi r11,r11,7 /* round up to an even double word */
1238 rldicl. r11,r11,61,3 /* shift right by 3 */
1239 beq 4f
1240 addi r8,r8,-8
1241 li r0,0
1242 mtctr r11 /* zero this many doublewords */
12433: stdu r0,8(r8)
1244 bdnz 3b
12454:
1246 LOADADDR(r1,init_thread_union)
1247 addi r1,r1,THREAD_SIZE
1248 li r0,0
1249 stdu r0,-STACK_FRAME_OVERHEAD(r1)
1250
1251 LOADADDR(r3,cpu_specs)
1252 LOADADDR(r4,cur_cpu_spec)
1253 li r5,0
1254 bl .identify_cpu
1255
1256 LOADADDR(r2,__toc_start)
1257 addi r2,r2,0x4000
1258 addi r2,r2,0x4000
1259
1260 bl .iSeries_early_setup
1261
1262 /* relocation is on at this point */
1263
1264 b .start_here_common
1265#endif /* CONFIG_PPC_ISERIES */
1266
1267#ifdef CONFIG_PPC_MULTIPLATFORM
1268
1269_STATIC(__mmu_off)
1270 mfmsr r3
1271 andi. r0,r3,MSR_IR|MSR_DR
1272 beqlr
1273 andc r3,r3,r0
1274 mtspr SPRN_SRR0,r4
1275 mtspr SPRN_SRR1,r3
1276 sync
1277 rfid
1278 b . /* prevent speculative execution */
1279
1280
1281/*
1282 * Here is our main kernel entry point. We support currently 2 kind of entries
1283 * depending on the value of r5.
1284 *
1285 * r5 != NULL -> OF entry, we go to prom_init, "legacy" parameter content
1286 * in r3...r7
1287 *
1288 * r5 == NULL -> kexec style entry. r3 is a physical pointer to the
1289 * DT block, r4 is a physical pointer to the kernel itself
1290 *
1291 */
1292_GLOBAL(__start_initialization_multiplatform)
1293 /*
1294 * Are we booted from a PROM Of-type client-interface ?
1295 */
1296 cmpldi cr0,r5,0
1297 bne .__boot_from_prom /* yes -> prom */
1298
1299 /* Save parameters */
1300 mr r31,r3
1301 mr r30,r4
1302
1303 /* Make sure we are running in 64 bits mode */
1304 bl .enable_64b_mode
1305
1306 /* Setup some critical 970 SPRs before switching MMU off */
1307 bl .__970_cpu_preinit
1308
1309 /* cpu # */
1310 li r24,0
1311
1312 /* Switch off MMU if not already */
1313 LOADADDR(r4, .__after_prom_start - KERNELBASE)
1314 add r4,r4,r30
1315 bl .__mmu_off
1316 b .__after_prom_start
1317
1318_STATIC(__boot_from_prom)
1319 /* Save parameters */
1320 mr r31,r3
1321 mr r30,r4
1322 mr r29,r5
1323 mr r28,r6
1324 mr r27,r7
1325
1326 /* Make sure we are running in 64 bits mode */
1327 bl .enable_64b_mode
1328
1329 /* put a relocation offset into r3 */
1330 bl .reloc_offset
1331
1332 LOADADDR(r2,__toc_start)
1333 addi r2,r2,0x4000
1334 addi r2,r2,0x4000
1335
1336 /* Relocate the TOC from a virt addr to a real addr */
1337 sub r2,r2,r3
1338
1339 /* Restore parameters */
1340 mr r3,r31
1341 mr r4,r30
1342 mr r5,r29
1343 mr r6,r28
1344 mr r7,r27
1345
1346 /* Do all of the interaction with OF client interface */
1347 bl .prom_init
1348 /* We never return */
1349 trap
1350
1351/*
1352 * At this point, r3 contains the physical address we are running at,
1353 * returned by prom_init()
1354 */
1355_STATIC(__after_prom_start)
1356
1357/*
1358 * We need to run with __start at physical address 0.
1359 * This will leave some code in the first 256B of
1360 * real memory, which are reserved for software use.
1361 * The remainder of the first page is loaded with the fixed
1362 * interrupt vectors. The next two pages are filled with
1363 * unknown exception placeholders.
1364 *
1365 * Note: This process overwrites the OF exception vectors.
1366 * r26 == relocation offset
1367 * r27 == KERNELBASE
1368 */
1369 bl .reloc_offset
1370 mr r26,r3
1371 SET_REG_TO_CONST(r27,KERNELBASE)
1372
1373 li r3,0 /* target addr */
1374
1375 // XXX FIXME: Use phys returned by OF (r30)
1376 sub r4,r27,r26 /* source addr */
1377 /* current address of _start */
1378 /* i.e. where we are running */
1379 /* the source addr */
1380
1381 LOADADDR(r5,copy_to_here) /* # bytes of memory to copy */
1382 sub r5,r5,r27
1383
1384 li r6,0x100 /* Start offset, the first 0x100 */
1385 /* bytes were copied earlier. */
1386
1387 bl .copy_and_flush /* copy the first n bytes */
1388 /* this includes the code being */
1389 /* executed here. */
1390
1391 LOADADDR(r0, 4f) /* Jump to the copy of this code */
1392 mtctr r0 /* that we just made/relocated */
1393 bctr
1394
13954: LOADADDR(r5,klimit)
1396 sub r5,r5,r26
1397 ld r5,0(r5) /* get the value of klimit */
1398 sub r5,r5,r27
1399 bl .copy_and_flush /* copy the rest */
1400 b .start_here_multiplatform
1401
1402#endif /* CONFIG_PPC_MULTIPLATFORM */
1403
1404/*
1405 * Copy routine used to copy the kernel to start at physical address 0
1406 * and flush and invalidate the caches as needed.
1407 * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
1408 * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
1409 *
1410 * Note: this routine *only* clobbers r0, r6 and lr
1411 */
1412_GLOBAL(copy_and_flush)
1413 addi r5,r5,-8
1414 addi r6,r6,-8
14154: li r0,16 /* Use the least common */
1416 /* denominator cache line */
1417 /* size. This results in */
1418 /* extra cache line flushes */
1419 /* but operation is correct. */
1420 /* Can't get cache line size */
1421 /* from NACA as it is being */
1422 /* moved too. */
1423
1424 mtctr r0 /* put # words/line in ctr */
14253: addi r6,r6,8 /* copy a cache line */
1426 ldx r0,r6,r4
1427 stdx r0,r6,r3
1428 bdnz 3b
1429 dcbst r6,r3 /* write it to memory */
1430 sync
1431 icbi r6,r3 /* flush the icache line */
1432 cmpld 0,r6,r5
1433 blt 4b
1434 sync
1435 addi r5,r5,8
1436 addi r6,r6,8
1437 blr
1438
1439.align 8
1440copy_to_here:
1441
1442/*
1443 * load_up_fpu(unused, unused, tsk)
1444 * Disable FP for the task which had the FPU previously,
1445 * and save its floating-point registers in its thread_struct.
1446 * Enables the FPU for use in the kernel on return.
1447 * On SMP we know the fpu is free, since we give it up every
1448 * switch (ie, no lazy save of the FP registers).
1449 * On entry: r13 == 'current' && last_task_used_math != 'current'
1450 */
1451_STATIC(load_up_fpu)
1452 mfmsr r5 /* grab the current MSR */
1453 ori r5,r5,MSR_FP
1454 mtmsrd r5 /* enable use of fpu now */
1455 isync
1456/*
1457 * For SMP, we don't do lazy FPU switching because it just gets too
1458 * horrendously complex, especially when a task switches from one CPU
1459 * to another. Instead we call giveup_fpu in switch_to.
1460 *
1461 */
1462#ifndef CONFIG_SMP
1463 ld r3,last_task_used_math@got(r2)
1464 ld r4,0(r3)
1465 cmpdi 0,r4,0
1466 beq 1f
1467 /* Save FP state to last_task_used_math's THREAD struct */
1468 addi r4,r4,THREAD
1469 SAVE_32FPRS(0, r4)
1470 mffs fr0
1471 stfd fr0,THREAD_FPSCR(r4)
1472 /* Disable FP for last_task_used_math */
1473 ld r5,PT_REGS(r4)
1474 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1475 li r6,MSR_FP|MSR_FE0|MSR_FE1
1476 andc r4,r4,r6
1477 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
14781:
1479#endif /* CONFIG_SMP */
1480 /* enable use of FP after return */
1481 ld r4,PACACURRENT(r13)
1482 addi r5,r4,THREAD /* Get THREAD */
1483 ld r4,THREAD_FPEXC_MODE(r5)
1484 ori r12,r12,MSR_FP
1485 or r12,r12,r4
1486 std r12,_MSR(r1)
1487 lfd fr0,THREAD_FPSCR(r5)
1488 mtfsf 0xff,fr0
1489 REST_32FPRS(0, r5)
1490#ifndef CONFIG_SMP
1491 /* Update last_task_used_math to 'current' */
1492 subi r4,r5,THREAD /* Back to 'current' */
1493 std r4,0(r3)
1494#endif /* CONFIG_SMP */
1495 /* restore registers and return */
1496 b fast_exception_return
1497
1498/*
1499 * disable_kernel_fp()
1500 * Disable the FPU.
1501 */
1502_GLOBAL(disable_kernel_fp)
1503 mfmsr r3
1504 rldicl r0,r3,(63-MSR_FP_LG),1
1505 rldicl r3,r0,(MSR_FP_LG+1),0
1506 mtmsrd r3 /* disable use of fpu now */
1507 isync
1508 blr
1509
1510/*
1511 * giveup_fpu(tsk)
1512 * Disable FP for the task given as the argument,
1513 * and save the floating-point registers in its thread_struct.
1514 * Enables the FPU for use in the kernel on return.
1515 */
1516_GLOBAL(giveup_fpu)
1517 mfmsr r5
1518 ori r5,r5,MSR_FP
1519 mtmsrd r5 /* enable use of fpu now */
1520 isync
1521 cmpdi 0,r3,0
1522 beqlr- /* if no previous owner, done */
1523 addi r3,r3,THREAD /* want THREAD of task */
1524 ld r5,PT_REGS(r3)
1525 cmpdi 0,r5,0
1526 SAVE_32FPRS(0, r3)
1527 mffs fr0
1528 stfd fr0,THREAD_FPSCR(r3)
1529 beq 1f
1530 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1531 li r3,MSR_FP|MSR_FE0|MSR_FE1
1532 andc r4,r4,r3 /* disable FP for previous task */
1533 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
15341:
1535#ifndef CONFIG_SMP
1536 li r5,0
1537 ld r4,last_task_used_math@got(r2)
1538 std r5,0(r4)
1539#endif /* CONFIG_SMP */
1540 blr
1541
1542
1543#ifdef CONFIG_ALTIVEC
1544
1545/*
1546 * load_up_altivec(unused, unused, tsk)
1547 * Disable VMX for the task which had it previously,
1548 * and save its vector registers in its thread_struct.
1549 * Enables the VMX for use in the kernel on return.
1550 * On SMP we know the VMX is free, since we give it up every
1551 * switch (ie, no lazy save of the vector registers).
1552 * On entry: r13 == 'current' && last_task_used_altivec != 'current'
1553 */
1554_STATIC(load_up_altivec)
1555 mfmsr r5 /* grab the current MSR */
1556 oris r5,r5,MSR_VEC@h
1557 mtmsrd r5 /* enable use of VMX now */
1558 isync
1559
1560/*
1561 * For SMP, we don't do lazy VMX switching because it just gets too
1562 * horrendously complex, especially when a task switches from one CPU
1563 * to another. Instead we call giveup_altvec in switch_to.
1564 * VRSAVE isn't dealt with here, that is done in the normal context
1565 * switch code. Note that we could rely on vrsave value to eventually
1566 * avoid saving all of the VREGs here...
1567 */
1568#ifndef CONFIG_SMP
1569 ld r3,last_task_used_altivec@got(r2)
1570 ld r4,0(r3)
1571 cmpdi 0,r4,0
1572 beq 1f
1573 /* Save VMX state to last_task_used_altivec's THREAD struct */
1574 addi r4,r4,THREAD
1575 SAVE_32VRS(0,r5,r4)
1576 mfvscr vr0
1577 li r10,THREAD_VSCR
1578 stvx vr0,r10,r4
1579 /* Disable VMX for last_task_used_altivec */
1580 ld r5,PT_REGS(r4)
1581 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1582 lis r6,MSR_VEC@h
1583 andc r4,r4,r6
1584 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
15851:
1586#endif /* CONFIG_SMP */
1587 /* Hack: if we get an altivec unavailable trap with VRSAVE
1588 * set to all zeros, we assume this is a broken application
1589 * that fails to set it properly, and thus we switch it to
1590 * all 1's
1591 */
1592 mfspr r4,SPRN_VRSAVE
1593 cmpdi 0,r4,0
1594 bne+ 1f
1595 li r4,-1
1596 mtspr SPRN_VRSAVE,r4
15971:
1598 /* enable use of VMX after return */
1599 ld r4,PACACURRENT(r13)
1600 addi r5,r4,THREAD /* Get THREAD */
1601 oris r12,r12,MSR_VEC@h
1602 std r12,_MSR(r1)
1603 li r4,1
1604 li r10,THREAD_VSCR
1605 stw r4,THREAD_USED_VR(r5)
1606 lvx vr0,r10,r5
1607 mtvscr vr0
1608 REST_32VRS(0,r4,r5)
1609#ifndef CONFIG_SMP
1610 /* Update last_task_used_math to 'current' */
1611 subi r4,r5,THREAD /* Back to 'current' */
1612 std r4,0(r3)
1613#endif /* CONFIG_SMP */
1614 /* restore registers and return */
1615 b fast_exception_return
1616
1617/*
1618 * disable_kernel_altivec()
1619 * Disable the VMX.
1620 */
1621_GLOBAL(disable_kernel_altivec)
1622 mfmsr r3
1623 rldicl r0,r3,(63-MSR_VEC_LG),1
1624 rldicl r3,r0,(MSR_VEC_LG+1),0
1625 mtmsrd r3 /* disable use of VMX now */
1626 isync
1627 blr
1628
1629/*
1630 * giveup_altivec(tsk)
1631 * Disable VMX for the task given as the argument,
1632 * and save the vector registers in its thread_struct.
1633 * Enables the VMX for use in the kernel on return.
1634 */
1635_GLOBAL(giveup_altivec)
1636 mfmsr r5
1637 oris r5,r5,MSR_VEC@h
1638 mtmsrd r5 /* enable use of VMX now */
1639 isync
1640 cmpdi 0,r3,0
1641 beqlr- /* if no previous owner, done */
1642 addi r3,r3,THREAD /* want THREAD of task */
1643 ld r5,PT_REGS(r3)
1644 cmpdi 0,r5,0
1645 SAVE_32VRS(0,r4,r3)
1646 mfvscr vr0
1647 li r4,THREAD_VSCR
1648 stvx vr0,r4,r3
1649 beq 1f
1650 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1651 lis r3,MSR_VEC@h
1652 andc r4,r4,r3 /* disable FP for previous task */
1653 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
16541:
1655#ifndef CONFIG_SMP
1656 li r5,0
1657 ld r4,last_task_used_altivec@got(r2)
1658 std r5,0(r4)
1659#endif /* CONFIG_SMP */
1660 blr
1661
1662#endif /* CONFIG_ALTIVEC */
1663
1664#ifdef CONFIG_SMP
1665#ifdef CONFIG_PPC_PMAC
1666/*
1667 * On PowerMac, secondary processors starts from the reset vector, which
1668 * is temporarily turned into a call to one of the functions below.
1669 */
1670 .section ".text";
1671 .align 2 ;
1672
1673 .globl pmac_secondary_start_1
1674pmac_secondary_start_1:
1675 li r24, 1
1676 b .pmac_secondary_start
1677
1678 .globl pmac_secondary_start_2
1679pmac_secondary_start_2:
1680 li r24, 2
1681 b .pmac_secondary_start
1682
1683 .globl pmac_secondary_start_3
1684pmac_secondary_start_3:
1685 li r24, 3
1686 b .pmac_secondary_start
1687
1688_GLOBAL(pmac_secondary_start)
1689 /* turn on 64-bit mode */
1690 bl .enable_64b_mode
1691 isync
1692
1693 /* Copy some CPU settings from CPU 0 */
1694 bl .__restore_cpu_setup
1695
1696 /* pSeries do that early though I don't think we really need it */
1697 mfmsr r3
1698 ori r3,r3,MSR_RI
1699 mtmsrd r3 /* RI on */
1700
1701 /* Set up a paca value for this processor. */
1702 LOADADDR(r4, paca) /* Get base vaddr of paca array */
1703 mulli r13,r24,PACA_SIZE /* Calculate vaddr of right paca */
1704 add r13,r13,r4 /* for this processor. */
1705 mtspr SPRG3,r13 /* Save vaddr of paca in SPRG3 */
1706
1707 /* Create a temp kernel stack for use before relocation is on. */
1708 ld r1,PACAEMERGSP(r13)
1709 subi r1,r1,STACK_FRAME_OVERHEAD
1710
1711 b .__secondary_start
1712
1713#endif /* CONFIG_PPC_PMAC */
1714
1715/*
1716 * This function is called after the master CPU has released the
1717 * secondary processors. The execution environment is relocation off.
1718 * The paca for this processor has the following fields initialized at
1719 * this point:
1720 * 1. Processor number
1721 * 2. Segment table pointer (virtual address)
1722 * On entry the following are set:
1723 * r1 = stack pointer. vaddr for iSeries, raddr (temp stack) for pSeries
1724 * r24 = cpu# (in Linux terms)
1725 * r13 = paca virtual address
1726 * SPRG3 = paca virtual address
1727 */
1728_GLOBAL(__secondary_start)
1729
1730 HMT_MEDIUM /* Set thread priority to MEDIUM */
1731
1732 ld r2,PACATOC(r13)
1733 li r6,0
1734 stb r6,PACAPROCENABLED(r13)
1735
1736#ifndef CONFIG_PPC_ISERIES
1737 /* Initialize the page table pointer register. */
1738 LOADADDR(r6,_SDR1)
1739 ld r6,0(r6) /* get the value of _SDR1 */
1740 mtspr SDR1,r6 /* set the htab location */
1741#endif
1742 /* Initialize the first segment table (or SLB) entry */
1743 ld r3,PACASTABVIRT(r13) /* get addr of segment table */
1744 bl .stab_initialize
1745
1746 /* Initialize the kernel stack. Just a repeat for iSeries. */
1747 LOADADDR(r3,current_set)
1748 sldi r28,r24,3 /* get current_set[cpu#] */
1749 ldx r1,r3,r28
1750 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
1751 std r1,PACAKSAVE(r13)
1752
1753 ld r3,PACASTABREAL(r13) /* get raddr of segment table */
1754 ori r4,r3,1 /* turn on valid bit */
1755
1756#ifdef CONFIG_PPC_ISERIES
1757 li r0,-1 /* hypervisor call */
1758 li r3,1
1759 sldi r3,r3,63 /* 0x8000000000000000 */
1760 ori r3,r3,4 /* 0x8000000000000004 */
1761 sc /* HvCall_setASR */
1762#else
1763 /* set the ASR */
1764 ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */
1765 lwz r3,PLATFORM(r3) /* r3 = platform flags */
1766 cmpldi r3,PLATFORM_PSERIES_LPAR
1767 bne 98f
1768 mfspr r3,PVR
1769 srwi r3,r3,16
1770 cmpwi r3,0x37 /* SStar */
1771 beq 97f
1772 cmpwi r3,0x36 /* IStar */
1773 beq 97f
1774 cmpwi r3,0x34 /* Pulsar */
1775 bne 98f
177697: li r3,H_SET_ASR /* hcall = H_SET_ASR */
1777 HVSC /* Invoking hcall */
1778 b 99f
177998: /* !(rpa hypervisor) || !(star) */
1780 mtasr r4 /* set the stab location */
178199:
1782#endif
1783 li r7,0
1784 mtlr r7
1785
1786 /* enable MMU and jump to start_secondary */
1787 LOADADDR(r3,.start_secondary_prolog)
1788 SET_REG_TO_CONST(r4, MSR_KERNEL)
1789#ifdef DO_SOFT_DISABLE
1790 ori r4,r4,MSR_EE
1791#endif
1792 mtspr SRR0,r3
1793 mtspr SRR1,r4
1794 rfid
1795 b . /* prevent speculative execution */
1796
1797/*
1798 * Running with relocation on at this point. All we want to do is
1799 * zero the stack back-chain pointer before going into C code.
1800 */
1801_GLOBAL(start_secondary_prolog)
1802 li r3,0
1803 std r3,0(r1) /* Zero the stack frame pointer */
1804 bl .start_secondary
1805#endif
1806
1807/*
1808 * This subroutine clobbers r11 and r12
1809 */
1810_GLOBAL(enable_64b_mode)
1811 mfmsr r11 /* grab the current MSR */
1812 li r12,1
1813 rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
1814 or r11,r11,r12
1815 li r12,1
1816 rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
1817 or r11,r11,r12
1818 mtmsrd r11
1819 isync
1820 blr
1821
1822#ifdef CONFIG_PPC_MULTIPLATFORM
1823/*
1824 * This is where the main kernel code starts.
1825 */
1826_STATIC(start_here_multiplatform)
1827 /* get a new offset, now that the kernel has moved. */
1828 bl .reloc_offset
1829 mr r26,r3
1830
1831 /* Clear out the BSS. It may have been done in prom_init,
1832 * already but that's irrelevant since prom_init will soon
1833 * be detached from the kernel completely. Besides, we need
1834 * to clear it now for kexec-style entry.
1835 */
1836 LOADADDR(r11,__bss_stop)
1837 LOADADDR(r8,__bss_start)
1838 sub r11,r11,r8 /* bss size */
1839 addi r11,r11,7 /* round up to an even double word */
1840 rldicl. r11,r11,61,3 /* shift right by 3 */
1841 beq 4f
1842 addi r8,r8,-8
1843 li r0,0
1844 mtctr r11 /* zero this many doublewords */
18453: stdu r0,8(r8)
1846 bdnz 3b
18474:
1848
1849 mfmsr r6
1850 ori r6,r6,MSR_RI
1851 mtmsrd r6 /* RI on */
1852
1853#ifdef CONFIG_HMT
1854 /* Start up the second thread on cpu 0 */
1855 mfspr r3,PVR
1856 srwi r3,r3,16
1857 cmpwi r3,0x34 /* Pulsar */
1858 beq 90f
1859 cmpwi r3,0x36 /* Icestar */
1860 beq 90f
1861 cmpwi r3,0x37 /* SStar */
1862 beq 90f
1863 b 91f /* HMT not supported */
186490: li r3,0
1865 bl .hmt_start_secondary
186691:
1867#endif
1868
1869 /* The following gets the stack and TOC set up with the regs */
1870 /* pointing to the real addr of the kernel stack. This is */
1871 /* all done to support the C function call below which sets */
1872 /* up the htab. This is done because we have relocated the */
1873 /* kernel but are still running in real mode. */
1874
1875 LOADADDR(r3,init_thread_union)
1876 sub r3,r3,r26
1877
1878 /* set up a stack pointer (physical address) */
1879 addi r1,r3,THREAD_SIZE
1880 li r0,0
1881 stdu r0,-STACK_FRAME_OVERHEAD(r1)
1882
1883 /* set up the TOC (physical address) */
1884 LOADADDR(r2,__toc_start)
1885 addi r2,r2,0x4000
1886 addi r2,r2,0x4000
1887 sub r2,r2,r26
1888
1889 LOADADDR(r3,cpu_specs)
1890 sub r3,r3,r26
1891 LOADADDR(r4,cur_cpu_spec)
1892 sub r4,r4,r26
1893 mr r5,r26
1894 bl .identify_cpu
1895
1896 /* Save some low level config HIDs of CPU0 to be copied to
1897 * other CPUs later on, or used for suspend/resume
1898 */
1899 bl .__save_cpu_setup
1900 sync
1901
1902 /* Setup a valid physical PACA pointer in SPRG3 for early_setup
1903 * note that boot_cpuid can always be 0 nowadays since there is
1904 * nowhere it can be initialized differently before we reach this
1905 * code
1906 */
1907 LOADADDR(r27, boot_cpuid)
1908 sub r27,r27,r26
1909 lwz r27,0(r27)
1910
1911 LOADADDR(r24, paca) /* Get base vaddr of paca array */
1912 mulli r13,r27,PACA_SIZE /* Calculate vaddr of right paca */
1913 add r13,r13,r24 /* for this processor. */
1914 sub r13,r13,r26 /* convert to physical addr */
1915 mtspr SPRG3,r13 /* PPPBBB: Temp... -Peter */
1916
1917 /* Do very early kernel initializations, including initial hash table,
1918 * stab and slb setup before we turn on relocation. */
1919
1920 /* Restore parameters passed from prom_init/kexec */
1921 mr r3,r31
1922 bl .early_setup
1923
1924 /* set the ASR */
1925 ld r3,PACASTABREAL(r13)
1926 ori r4,r3,1 /* turn on valid bit */
1927 ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */
1928 lwz r3,PLATFORM(r3) /* r3 = platform flags */
1929 cmpldi r3,PLATFORM_PSERIES_LPAR
1930 bne 98f
1931 mfspr r3,PVR
1932 srwi r3,r3,16
1933 cmpwi r3,0x37 /* SStar */
1934 beq 97f
1935 cmpwi r3,0x36 /* IStar */
1936 beq 97f
1937 cmpwi r3,0x34 /* Pulsar */
1938 bne 98f
193997: li r3,H_SET_ASR /* hcall = H_SET_ASR */
1940 HVSC /* Invoking hcall */
1941 b 99f
194298: /* !(rpa hypervisor) || !(star) */
1943 mtasr r4 /* set the stab location */
194499:
1945 /* Set SDR1 (hash table pointer) */
1946 ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */
1947 lwz r3,PLATFORM(r3) /* r3 = platform flags */
1948 /* Test if bit 0 is set (LPAR bit) */
1949 andi. r3,r3,0x1
1950 bne 98f
1951 LOADADDR(r6,_SDR1) /* Only if NOT LPAR */
1952 sub r6,r6,r26
1953 ld r6,0(r6) /* get the value of _SDR1 */
1954 mtspr SDR1,r6 /* set the htab location */
195598:
1956 LOADADDR(r3,.start_here_common)
1957 SET_REG_TO_CONST(r4, MSR_KERNEL)
1958 mtspr SRR0,r3
1959 mtspr SRR1,r4
1960 rfid
1961 b . /* prevent speculative execution */
1962#endif /* CONFIG_PPC_MULTIPLATFORM */
1963
1964 /* This is where all platforms converge execution */
1965_STATIC(start_here_common)
1966 /* relocation is on at this point */
1967
1968 /* The following code sets up the SP and TOC now that we are */
1969 /* running with translation enabled. */
1970
1971 LOADADDR(r3,init_thread_union)
1972
1973 /* set up the stack */
1974 addi r1,r3,THREAD_SIZE
1975 li r0,0
1976 stdu r0,-STACK_FRAME_OVERHEAD(r1)
1977
1978 /* Apply the CPUs-specific fixups (nop out sections not relevant
1979 * to this CPU
1980 */
1981 li r3,0
1982 bl .do_cpu_ftr_fixups
1983
1984 LOADADDR(r26, boot_cpuid)
1985 lwz r26,0(r26)
1986
1987 LOADADDR(r24, paca) /* Get base vaddr of paca array */
1988 mulli r13,r26,PACA_SIZE /* Calculate vaddr of right paca */
1989 add r13,r13,r24 /* for this processor. */
1990 mtspr SPRG3,r13
1991
1992 /* ptr to current */
1993 LOADADDR(r4,init_task)
1994 std r4,PACACURRENT(r13)
1995
1996 /* Load the TOC */
1997 ld r2,PACATOC(r13)
1998 std r1,PACAKSAVE(r13)
1999
2000 bl .setup_system
2001
2002 /* Load up the kernel context */
20035:
2004#ifdef DO_SOFT_DISABLE
2005 li r5,0
2006 stb r5,PACAPROCENABLED(r13) /* Soft Disabled */
2007 mfmsr r5
2008 ori r5,r5,MSR_EE /* Hard Enabled */
2009 mtmsrd r5
2010#endif
2011
2012 bl .start_kernel
2013
2014_GLOBAL(__setup_cpu_power3)
2015 blr
2016
2017_GLOBAL(hmt_init)
2018#ifdef CONFIG_HMT
2019 LOADADDR(r5, hmt_thread_data)
2020 mfspr r7,PVR
2021 srwi r7,r7,16
2022 cmpwi r7,0x34 /* Pulsar */
2023 beq 90f
2024 cmpwi r7,0x36 /* Icestar */
2025 beq 91f
2026 cmpwi r7,0x37 /* SStar */
2027 beq 91f
2028 b 101f
202990: mfspr r6,PIR
2030 andi. r6,r6,0x1f
2031 b 92f
203291: mfspr r6,PIR
2033 andi. r6,r6,0x3ff
203492: sldi r4,r24,3
2035 stwx r6,r5,r4
2036 bl .hmt_start_secondary
2037 b 101f
2038
2039__hmt_secondary_hold:
2040 LOADADDR(r5, hmt_thread_data)
2041 clrldi r5,r5,4
2042 li r7,0
2043 mfspr r6,PIR
2044 mfspr r8,PVR
2045 srwi r8,r8,16
2046 cmpwi r8,0x34
2047 bne 93f
2048 andi. r6,r6,0x1f
2049 b 103f
205093: andi. r6,r6,0x3f
2051
2052103: lwzx r8,r5,r7
2053 cmpw r8,r6
2054 beq 104f
2055 addi r7,r7,8
2056 b 103b
2057
2058104: addi r7,r7,4
2059 lwzx r9,r5,r7
2060 mr r24,r9
2061101:
2062#endif
2063 mr r3,r24
2064 b .pSeries_secondary_smp_init
2065
2066#ifdef CONFIG_HMT
2067_GLOBAL(hmt_start_secondary)
2068 LOADADDR(r4,__hmt_secondary_hold)
2069 clrldi r4,r4,4
2070 mtspr NIADORM, r4
2071 mfspr r4, MSRDORM
2072 li r5, -65
2073 and r4, r4, r5
2074 mtspr MSRDORM, r4
2075 lis r4,0xffef
2076 ori r4,r4,0x7403
2077 mtspr TSC, r4
2078 li r4,0x1f4
2079 mtspr TST, r4
2080 mfspr r4, HID0
2081 ori r4, r4, 0x1
2082 mtspr HID0, r4
2083 mfspr r4, CTRLF
2084 oris r4, r4, 0x40
2085 mtspr CTRLT, r4
2086 blr
2087#endif
2088
2089#if defined(CONFIG_SMP) && !defined(CONFIG_PPC_ISERIES)
2090_GLOBAL(smp_release_cpus)
2091 /* All secondary cpus are spinning on a common
2092 * spinloop, release them all now so they can start
2093 * to spin on their individual paca spinloops.
2094 * For non SMP kernels, the secondary cpus never
2095 * get out of the common spinloop.
2096 */
2097 li r3,1
2098 LOADADDR(r5,__secondary_hold_spinloop)
2099 std r3,0(r5)
2100 sync
2101 blr
2102#endif /* CONFIG_SMP && !CONFIG_PPC_ISERIES */
2103
2104
2105/*
2106 * We put a few things here that have to be page-aligned.
2107 * This stuff goes at the beginning of the data segment,
2108 * which is page-aligned.
2109 */
2110 .data
2111 .align 12
2112 .globl sdata
2113sdata:
2114 .globl empty_zero_page
2115empty_zero_page:
2116 .space 4096
2117
2118 .globl swapper_pg_dir
2119swapper_pg_dir:
2120 .space 4096
2121
2122 .globl ioremap_dir
2123ioremap_dir:
2124 .space 4096
2125
2126#ifdef CONFIG_SMP
2127/* 1 page segment table per cpu (max 48, cpu0 allocated at STAB0_PHYS_ADDR) */
2128 .globl stab_array
2129stab_array:
2130 .space 4096 * 48
2131#endif
2132
2133/*
2134 * This space gets a copy of optional info passed to us by the bootstrap
2135 * Used to pass parameters into the kernel like root=/dev/sda1, etc.
2136 */
2137 .globl cmd_line
2138cmd_line:
2139 .space COMMAND_LINE_SIZE