diff options
Diffstat (limited to 'arch/powerpc/kernel/head_64.S')
-rw-r--r-- | arch/powerpc/kernel/head_64.S | 977 |
1 files changed, 17 insertions, 960 deletions
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 382495fa90b0..012505ebd9f9 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S | |||
@@ -12,8 +12,9 @@ | |||
12 | * Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and | 12 | * Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and |
13 | * Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com | 13 | * Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com |
14 | * | 14 | * |
15 | * This file contains the low-level support and setup for the | 15 | * This file contains the entry point for the 64-bit kernel along |
16 | * PowerPC-64 platform, including trap and interrupt dispatch. | 16 | * with some early initialization code common to all 64-bit powerpc |
17 | * variants. | ||
17 | * | 18 | * |
18 | * This program is free software; you can redistribute it and/or | 19 | * This program is free software; you can redistribute it and/or |
19 | * modify it under the terms of the GNU General Public License | 20 | * modify it under the terms of the GNU General Public License |
@@ -38,36 +39,25 @@ | |||
38 | #include <asm/exception.h> | 39 | #include <asm/exception.h> |
39 | #include <asm/irqflags.h> | 40 | #include <asm/irqflags.h> |
40 | 41 | ||
41 | /* | 42 | /* The physical memory is layed out such that the secondary processor |
42 | * We layout physical memory as follows: | 43 | * spin code sits at 0x0000...0x00ff. On server, the vectors follow |
43 | * 0x0000 - 0x00ff : Secondary processor spin code | 44 | * using the layout described in exceptions-64s.S |
44 | * 0x0100 - 0x2fff : pSeries Interrupt prologs | ||
45 | * 0x3000 - 0x5fff : interrupt support, iSeries and common interrupt prologs | ||
46 | * 0x6000 - 0x6fff : Initial (CPU0) segment table | ||
47 | * 0x7000 - 0x7fff : FWNMI data area | ||
48 | * 0x8000 - : Early init and support code | ||
49 | */ | ||
50 | |||
51 | /* | ||
52 | * SPRG Usage | ||
53 | * | ||
54 | * Register Definition | ||
55 | * | ||
56 | * SPRG0 reserved for hypervisor | ||
57 | * SPRG1 temp - used to save gpr | ||
58 | * SPRG2 temp - used to save gpr | ||
59 | * SPRG3 virt addr of paca | ||
60 | */ | 45 | */ |
61 | 46 | ||
62 | /* | 47 | /* |
63 | * Entering into this code we make the following assumptions: | 48 | * Entering into this code we make the following assumptions: |
64 | * For pSeries: | 49 | * |
50 | * For pSeries or server processors: | ||
65 | * 1. The MMU is off & open firmware is running in real mode. | 51 | * 1. The MMU is off & open firmware is running in real mode. |
66 | * 2. The kernel is entered at __start | 52 | * 2. The kernel is entered at __start |
67 | * | 53 | * |
68 | * For iSeries: | 54 | * For iSeries: |
69 | * 1. The MMU is on (as it always is for iSeries) | 55 | * 1. The MMU is on (as it always is for iSeries) |
70 | * 2. The kernel is entered at system_reset_iSeries | 56 | * 2. The kernel is entered at system_reset_iSeries |
57 | * | ||
58 | * For Book3E processors: | ||
59 | * 1. The MMU is on running in AS0 in a state defined in ePAPR | ||
60 | * 2. The kernel is entered at __start | ||
71 | */ | 61 | */ |
72 | 62 | ||
73 | .text | 63 | .text |
@@ -166,947 +156,14 @@ exception_marker: | |||
166 | .text | 156 | .text |
167 | 157 | ||
168 | /* | 158 | /* |
169 | * This is the start of the interrupt handlers for pSeries | 159 | * On server, we include the exception vectors code here as it |
170 | * This code runs with relocation off. | 160 | * relies on absolute addressing which is only possible within |
171 | * Code from here to __end_interrupts gets copied down to real | 161 | * this compilation unit |
172 | * address 0x100 when we are running a relocatable kernel. | ||
173 | * Therefore any relative branches in this section must only | ||
174 | * branch to labels in this section. | ||
175 | */ | ||
176 | . = 0x100 | ||
177 | .globl __start_interrupts | ||
178 | __start_interrupts: | ||
179 | |||
180 | STD_EXCEPTION_PSERIES(0x100, system_reset) | ||
181 | |||
182 | . = 0x200 | ||
183 | _machine_check_pSeries: | ||
184 | HMT_MEDIUM | ||
185 | mtspr SPRN_SPRG1,r13 /* save r13 */ | ||
186 | EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common) | ||
187 | |||
188 | . = 0x300 | ||
189 | .globl data_access_pSeries | ||
190 | data_access_pSeries: | ||
191 | HMT_MEDIUM | ||
192 | mtspr SPRN_SPRG1,r13 | ||
193 | BEGIN_FTR_SECTION | ||
194 | mtspr SPRN_SPRG2,r12 | ||
195 | mfspr r13,SPRN_DAR | ||
196 | mfspr r12,SPRN_DSISR | ||
197 | srdi r13,r13,60 | ||
198 | rlwimi r13,r12,16,0x20 | ||
199 | mfcr r12 | ||
200 | cmpwi r13,0x2c | ||
201 | beq do_stab_bolted_pSeries | ||
202 | mtcrf 0x80,r12 | ||
203 | mfspr r12,SPRN_SPRG2 | ||
204 | END_FTR_SECTION_IFCLR(CPU_FTR_SLB) | ||
205 | EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common) | ||
206 | |||
207 | . = 0x380 | ||
208 | .globl data_access_slb_pSeries | ||
209 | data_access_slb_pSeries: | ||
210 | HMT_MEDIUM | ||
211 | mtspr SPRN_SPRG1,r13 | ||
212 | mfspr r13,SPRN_SPRG3 /* get paca address into r13 */ | ||
213 | std r3,PACA_EXSLB+EX_R3(r13) | ||
214 | mfspr r3,SPRN_DAR | ||
215 | std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */ | ||
216 | mfcr r9 | ||
217 | #ifdef __DISABLED__ | ||
218 | /* Keep that around for when we re-implement dynamic VSIDs */ | ||
219 | cmpdi r3,0 | ||
220 | bge slb_miss_user_pseries | ||
221 | #endif /* __DISABLED__ */ | ||
222 | std r10,PACA_EXSLB+EX_R10(r13) | ||
223 | std r11,PACA_EXSLB+EX_R11(r13) | ||
224 | std r12,PACA_EXSLB+EX_R12(r13) | ||
225 | mfspr r10,SPRN_SPRG1 | ||
226 | std r10,PACA_EXSLB+EX_R13(r13) | ||
227 | mfspr r12,SPRN_SRR1 /* and SRR1 */ | ||
228 | #ifndef CONFIG_RELOCATABLE | ||
229 | b .slb_miss_realmode | ||
230 | #else | ||
231 | /* | ||
232 | * We can't just use a direct branch to .slb_miss_realmode | ||
233 | * because the distance from here to there depends on where | ||
234 | * the kernel ends up being put. | ||
235 | */ | ||
236 | mfctr r11 | ||
237 | ld r10,PACAKBASE(r13) | ||
238 | LOAD_HANDLER(r10, .slb_miss_realmode) | ||
239 | mtctr r10 | ||
240 | bctr | ||
241 | #endif | ||
242 | |||
243 | STD_EXCEPTION_PSERIES(0x400, instruction_access) | ||
244 | |||
245 | . = 0x480 | ||
246 | .globl instruction_access_slb_pSeries | ||
247 | instruction_access_slb_pSeries: | ||
248 | HMT_MEDIUM | ||
249 | mtspr SPRN_SPRG1,r13 | ||
250 | mfspr r13,SPRN_SPRG3 /* get paca address into r13 */ | ||
251 | std r3,PACA_EXSLB+EX_R3(r13) | ||
252 | mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */ | ||
253 | std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */ | ||
254 | mfcr r9 | ||
255 | #ifdef __DISABLED__ | ||
256 | /* Keep that around for when we re-implement dynamic VSIDs */ | ||
257 | cmpdi r3,0 | ||
258 | bge slb_miss_user_pseries | ||
259 | #endif /* __DISABLED__ */ | ||
260 | std r10,PACA_EXSLB+EX_R10(r13) | ||
261 | std r11,PACA_EXSLB+EX_R11(r13) | ||
262 | std r12,PACA_EXSLB+EX_R12(r13) | ||
263 | mfspr r10,SPRN_SPRG1 | ||
264 | std r10,PACA_EXSLB+EX_R13(r13) | ||
265 | mfspr r12,SPRN_SRR1 /* and SRR1 */ | ||
266 | #ifndef CONFIG_RELOCATABLE | ||
267 | b .slb_miss_realmode | ||
268 | #else | ||
269 | mfctr r11 | ||
270 | ld r10,PACAKBASE(r13) | ||
271 | LOAD_HANDLER(r10, .slb_miss_realmode) | ||
272 | mtctr r10 | ||
273 | bctr | ||
274 | #endif | ||
275 | |||
276 | MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt) | ||
277 | STD_EXCEPTION_PSERIES(0x600, alignment) | ||
278 | STD_EXCEPTION_PSERIES(0x700, program_check) | ||
279 | STD_EXCEPTION_PSERIES(0x800, fp_unavailable) | ||
280 | MASKABLE_EXCEPTION_PSERIES(0x900, decrementer) | ||
281 | STD_EXCEPTION_PSERIES(0xa00, trap_0a) | ||
282 | STD_EXCEPTION_PSERIES(0xb00, trap_0b) | ||
283 | |||
284 | . = 0xc00 | ||
285 | .globl system_call_pSeries | ||
286 | system_call_pSeries: | ||
287 | HMT_MEDIUM | ||
288 | BEGIN_FTR_SECTION | ||
289 | cmpdi r0,0x1ebe | ||
290 | beq- 1f | ||
291 | END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) | ||
292 | mr r9,r13 | ||
293 | mfspr r13,SPRN_SPRG3 | ||
294 | mfspr r11,SPRN_SRR0 | ||
295 | ld r12,PACAKBASE(r13) | ||
296 | ld r10,PACAKMSR(r13) | ||
297 | LOAD_HANDLER(r12, system_call_entry) | ||
298 | mtspr SPRN_SRR0,r12 | ||
299 | mfspr r12,SPRN_SRR1 | ||
300 | mtspr SPRN_SRR1,r10 | ||
301 | rfid | ||
302 | b . /* prevent speculative execution */ | ||
303 | |||
304 | /* Fast LE/BE switch system call */ | ||
305 | 1: mfspr r12,SPRN_SRR1 | ||
306 | xori r12,r12,MSR_LE | ||
307 | mtspr SPRN_SRR1,r12 | ||
308 | rfid /* return to userspace */ | ||
309 | b . | ||
310 | |||
311 | STD_EXCEPTION_PSERIES(0xd00, single_step) | ||
312 | STD_EXCEPTION_PSERIES(0xe00, trap_0e) | ||
313 | |||
314 | /* We need to deal with the Altivec unavailable exception | ||
315 | * here which is at 0xf20, thus in the middle of the | ||
316 | * prolog code of the PerformanceMonitor one. A little | ||
317 | * trickery is thus necessary | ||
318 | */ | ||
319 | . = 0xf00 | ||
320 | b performance_monitor_pSeries | ||
321 | |||
322 | . = 0xf20 | ||
323 | b altivec_unavailable_pSeries | ||
324 | |||
325 | . = 0xf40 | ||
326 | b vsx_unavailable_pSeries | ||
327 | |||
328 | #ifdef CONFIG_CBE_RAS | ||
329 | HSTD_EXCEPTION_PSERIES(0x1200, cbe_system_error) | ||
330 | #endif /* CONFIG_CBE_RAS */ | ||
331 | STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint) | ||
332 | #ifdef CONFIG_CBE_RAS | ||
333 | HSTD_EXCEPTION_PSERIES(0x1600, cbe_maintenance) | ||
334 | #endif /* CONFIG_CBE_RAS */ | ||
335 | STD_EXCEPTION_PSERIES(0x1700, altivec_assist) | ||
336 | #ifdef CONFIG_CBE_RAS | ||
337 | HSTD_EXCEPTION_PSERIES(0x1800, cbe_thermal) | ||
338 | #endif /* CONFIG_CBE_RAS */ | ||
339 | |||
340 | . = 0x3000 | ||
341 | |||
342 | /*** pSeries interrupt support ***/ | ||
343 | |||
344 | /* moved from 0xf00 */ | ||
345 | STD_EXCEPTION_PSERIES(., performance_monitor) | ||
346 | STD_EXCEPTION_PSERIES(., altivec_unavailable) | ||
347 | STD_EXCEPTION_PSERIES(., vsx_unavailable) | ||
348 | |||
349 | /* | ||
350 | * An interrupt came in while soft-disabled; clear EE in SRR1, | ||
351 | * clear paca->hard_enabled and return. | ||
352 | */ | ||
353 | masked_interrupt: | ||
354 | stb r10,PACAHARDIRQEN(r13) | ||
355 | mtcrf 0x80,r9 | ||
356 | ld r9,PACA_EXGEN+EX_R9(r13) | ||
357 | mfspr r10,SPRN_SRR1 | ||
358 | rldicl r10,r10,48,1 /* clear MSR_EE */ | ||
359 | rotldi r10,r10,16 | ||
360 | mtspr SPRN_SRR1,r10 | ||
361 | ld r10,PACA_EXGEN+EX_R10(r13) | ||
362 | mfspr r13,SPRN_SPRG1 | ||
363 | rfid | ||
364 | b . | ||
365 | |||
366 | .align 7 | ||
367 | do_stab_bolted_pSeries: | ||
368 | mtcrf 0x80,r12 | ||
369 | mfspr r12,SPRN_SPRG2 | ||
370 | EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted) | ||
371 | |||
372 | #ifdef CONFIG_PPC_PSERIES | ||
373 | /* | ||
374 | * Vectors for the FWNMI option. Share common code. | ||
375 | */ | ||
376 | .globl system_reset_fwnmi | ||
377 | .align 7 | ||
378 | system_reset_fwnmi: | ||
379 | HMT_MEDIUM | ||
380 | mtspr SPRN_SPRG1,r13 /* save r13 */ | ||
381 | EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common) | ||
382 | |||
383 | .globl machine_check_fwnmi | ||
384 | .align 7 | ||
385 | machine_check_fwnmi: | ||
386 | HMT_MEDIUM | ||
387 | mtspr SPRN_SPRG1,r13 /* save r13 */ | ||
388 | EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common) | ||
389 | |||
390 | #endif /* CONFIG_PPC_PSERIES */ | ||
391 | |||
392 | #ifdef __DISABLED__ | ||
393 | /* | ||
394 | * This is used for when the SLB miss handler has to go virtual, | ||
395 | * which doesn't happen for now anymore but will once we re-implement | ||
396 | * dynamic VSIDs for shared page tables | ||
397 | */ | ||
398 | slb_miss_user_pseries: | ||
399 | std r10,PACA_EXGEN+EX_R10(r13) | ||
400 | std r11,PACA_EXGEN+EX_R11(r13) | ||
401 | std r12,PACA_EXGEN+EX_R12(r13) | ||
402 | mfspr r10,SPRG1 | ||
403 | ld r11,PACA_EXSLB+EX_R9(r13) | ||
404 | ld r12,PACA_EXSLB+EX_R3(r13) | ||
405 | std r10,PACA_EXGEN+EX_R13(r13) | ||
406 | std r11,PACA_EXGEN+EX_R9(r13) | ||
407 | std r12,PACA_EXGEN+EX_R3(r13) | ||
408 | clrrdi r12,r13,32 | ||
409 | mfmsr r10 | ||
410 | mfspr r11,SRR0 /* save SRR0 */ | ||
411 | ori r12,r12,slb_miss_user_common@l /* virt addr of handler */ | ||
412 | ori r10,r10,MSR_IR|MSR_DR|MSR_RI | ||
413 | mtspr SRR0,r12 | ||
414 | mfspr r12,SRR1 /* and SRR1 */ | ||
415 | mtspr SRR1,r10 | ||
416 | rfid | ||
417 | b . /* prevent spec. execution */ | ||
418 | #endif /* __DISABLED__ */ | ||
419 | |||
420 | .align 7 | ||
421 | .globl __end_interrupts | ||
422 | __end_interrupts: | ||
423 | |||
424 | /* | ||
425 | * Code from here down to __end_handlers is invoked from the | ||
426 | * exception prologs above. Because the prologs assemble the | ||
427 | * addresses of these handlers using the LOAD_HANDLER macro, | ||
428 | * which uses an addi instruction, these handlers must be in | ||
429 | * the first 32k of the kernel image. | ||
430 | */ | ||
431 | |||
432 | /*** Common interrupt handlers ***/ | ||
433 | |||
434 | STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception) | ||
435 | |||
436 | /* | ||
437 | * Machine check is different because we use a different | ||
438 | * save area: PACA_EXMC instead of PACA_EXGEN. | ||
439 | */ | ||
440 | .align 7 | ||
441 | .globl machine_check_common | ||
442 | machine_check_common: | ||
443 | EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC) | ||
444 | FINISH_NAP | ||
445 | DISABLE_INTS | ||
446 | bl .save_nvgprs | ||
447 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
448 | bl .machine_check_exception | ||
449 | b .ret_from_except | ||
450 | |||
451 | STD_EXCEPTION_COMMON_LITE(0x900, decrementer, .timer_interrupt) | ||
452 | STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception) | ||
453 | STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception) | ||
454 | STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception) | ||
455 | STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception) | ||
456 | STD_EXCEPTION_COMMON_IDLE(0xf00, performance_monitor, .performance_monitor_exception) | ||
457 | STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception) | ||
458 | #ifdef CONFIG_ALTIVEC | ||
459 | STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception) | ||
460 | #else | ||
461 | STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception) | ||
462 | #endif | ||
463 | #ifdef CONFIG_CBE_RAS | ||
464 | STD_EXCEPTION_COMMON(0x1200, cbe_system_error, .cbe_system_error_exception) | ||
465 | STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, .cbe_maintenance_exception) | ||
466 | STD_EXCEPTION_COMMON(0x1800, cbe_thermal, .cbe_thermal_exception) | ||
467 | #endif /* CONFIG_CBE_RAS */ | ||
468 | |||
469 | .align 7 | ||
470 | system_call_entry: | ||
471 | b system_call_common | ||
472 | |||
473 | /* | ||
474 | * Here we have detected that the kernel stack pointer is bad. | ||
475 | * R9 contains the saved CR, r13 points to the paca, | ||
476 | * r10 contains the (bad) kernel stack pointer, | ||
477 | * r11 and r12 contain the saved SRR0 and SRR1. | ||
478 | * We switch to using an emergency stack, save the registers there, | ||
479 | * and call kernel_bad_stack(), which panics. | ||
480 | */ | ||
481 | bad_stack: | ||
482 | ld r1,PACAEMERGSP(r13) | ||
483 | subi r1,r1,64+INT_FRAME_SIZE | ||
484 | std r9,_CCR(r1) | ||
485 | std r10,GPR1(r1) | ||
486 | std r11,_NIP(r1) | ||
487 | std r12,_MSR(r1) | ||
488 | mfspr r11,SPRN_DAR | ||
489 | mfspr r12,SPRN_DSISR | ||
490 | std r11,_DAR(r1) | ||
491 | std r12,_DSISR(r1) | ||
492 | mflr r10 | ||
493 | mfctr r11 | ||
494 | mfxer r12 | ||
495 | std r10,_LINK(r1) | ||
496 | std r11,_CTR(r1) | ||
497 | std r12,_XER(r1) | ||
498 | SAVE_GPR(0,r1) | ||
499 | SAVE_GPR(2,r1) | ||
500 | SAVE_4GPRS(3,r1) | ||
501 | SAVE_2GPRS(7,r1) | ||
502 | SAVE_10GPRS(12,r1) | ||
503 | SAVE_10GPRS(22,r1) | ||
504 | lhz r12,PACA_TRAP_SAVE(r13) | ||
505 | std r12,_TRAP(r1) | ||
506 | addi r11,r1,INT_FRAME_SIZE | ||
507 | std r11,0(r1) | ||
508 | li r12,0 | ||
509 | std r12,0(r11) | ||
510 | ld r2,PACATOC(r13) | ||
511 | 1: addi r3,r1,STACK_FRAME_OVERHEAD | ||
512 | bl .kernel_bad_stack | ||
513 | b 1b | ||
514 | |||
515 | /* | ||
516 | * Here r13 points to the paca, r9 contains the saved CR, | ||
517 | * SRR0 and SRR1 are saved in r11 and r12, | ||
518 | * r9 - r13 are saved in paca->exgen. | ||
519 | */ | ||
520 | .align 7 | ||
521 | .globl data_access_common | ||
522 | data_access_common: | ||
523 | mfspr r10,SPRN_DAR | ||
524 | std r10,PACA_EXGEN+EX_DAR(r13) | ||
525 | mfspr r10,SPRN_DSISR | ||
526 | stw r10,PACA_EXGEN+EX_DSISR(r13) | ||
527 | EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN) | ||
528 | ld r3,PACA_EXGEN+EX_DAR(r13) | ||
529 | lwz r4,PACA_EXGEN+EX_DSISR(r13) | ||
530 | li r5,0x300 | ||
531 | b .do_hash_page /* Try to handle as hpte fault */ | ||
532 | |||
533 | .align 7 | ||
534 | .globl instruction_access_common | ||
535 | instruction_access_common: | ||
536 | EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN) | ||
537 | ld r3,_NIP(r1) | ||
538 | andis. r4,r12,0x5820 | ||
539 | li r5,0x400 | ||
540 | b .do_hash_page /* Try to handle as hpte fault */ | ||
541 | |||
542 | /* | ||
543 | * Here is the common SLB miss user that is used when going to virtual | ||
544 | * mode for SLB misses, that is currently not used | ||
545 | */ | ||
546 | #ifdef __DISABLED__ | ||
547 | .align 7 | ||
548 | .globl slb_miss_user_common | ||
549 | slb_miss_user_common: | ||
550 | mflr r10 | ||
551 | std r3,PACA_EXGEN+EX_DAR(r13) | ||
552 | stw r9,PACA_EXGEN+EX_CCR(r13) | ||
553 | std r10,PACA_EXGEN+EX_LR(r13) | ||
554 | std r11,PACA_EXGEN+EX_SRR0(r13) | ||
555 | bl .slb_allocate_user | ||
556 | |||
557 | ld r10,PACA_EXGEN+EX_LR(r13) | ||
558 | ld r3,PACA_EXGEN+EX_R3(r13) | ||
559 | lwz r9,PACA_EXGEN+EX_CCR(r13) | ||
560 | ld r11,PACA_EXGEN+EX_SRR0(r13) | ||
561 | mtlr r10 | ||
562 | beq- slb_miss_fault | ||
563 | |||
564 | andi. r10,r12,MSR_RI /* check for unrecoverable exception */ | ||
565 | beq- unrecov_user_slb | ||
566 | mfmsr r10 | ||
567 | |||
568 | .machine push | ||
569 | .machine "power4" | ||
570 | mtcrf 0x80,r9 | ||
571 | .machine pop | ||
572 | |||
573 | clrrdi r10,r10,2 /* clear RI before setting SRR0/1 */ | ||
574 | mtmsrd r10,1 | ||
575 | |||
576 | mtspr SRR0,r11 | ||
577 | mtspr SRR1,r12 | ||
578 | |||
579 | ld r9,PACA_EXGEN+EX_R9(r13) | ||
580 | ld r10,PACA_EXGEN+EX_R10(r13) | ||
581 | ld r11,PACA_EXGEN+EX_R11(r13) | ||
582 | ld r12,PACA_EXGEN+EX_R12(r13) | ||
583 | ld r13,PACA_EXGEN+EX_R13(r13) | ||
584 | rfid | ||
585 | b . | ||
586 | |||
587 | slb_miss_fault: | ||
588 | EXCEPTION_PROLOG_COMMON(0x380, PACA_EXGEN) | ||
589 | ld r4,PACA_EXGEN+EX_DAR(r13) | ||
590 | li r5,0 | ||
591 | std r4,_DAR(r1) | ||
592 | std r5,_DSISR(r1) | ||
593 | b handle_page_fault | ||
594 | |||
595 | unrecov_user_slb: | ||
596 | EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN) | ||
597 | DISABLE_INTS | ||
598 | bl .save_nvgprs | ||
599 | 1: addi r3,r1,STACK_FRAME_OVERHEAD | ||
600 | bl .unrecoverable_exception | ||
601 | b 1b | ||
602 | |||
603 | #endif /* __DISABLED__ */ | ||
604 | |||
605 | |||
606 | /* | ||
607 | * r13 points to the PACA, r9 contains the saved CR, | ||
608 | * r12 contain the saved SRR1, SRR0 is still ready for return | ||
609 | * r3 has the faulting address | ||
610 | * r9 - r13 are saved in paca->exslb. | ||
611 | * r3 is saved in paca->slb_r3 | ||
612 | * We assume we aren't going to take any exceptions during this procedure. | ||
613 | */ | ||
614 | _GLOBAL(slb_miss_realmode) | ||
615 | mflr r10 | ||
616 | #ifdef CONFIG_RELOCATABLE | ||
617 | mtctr r11 | ||
618 | #endif | ||
619 | |||
620 | stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ | ||
621 | std r10,PACA_EXSLB+EX_LR(r13) /* save LR */ | ||
622 | |||
623 | bl .slb_allocate_realmode | ||
624 | |||
625 | /* All done -- return from exception. */ | ||
626 | |||
627 | ld r10,PACA_EXSLB+EX_LR(r13) | ||
628 | ld r3,PACA_EXSLB+EX_R3(r13) | ||
629 | lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */ | ||
630 | #ifdef CONFIG_PPC_ISERIES | ||
631 | BEGIN_FW_FTR_SECTION | ||
632 | ld r11,PACALPPACAPTR(r13) | ||
633 | ld r11,LPPACASRR0(r11) /* get SRR0 value */ | ||
634 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) | ||
635 | #endif /* CONFIG_PPC_ISERIES */ | ||
636 | |||
637 | mtlr r10 | ||
638 | |||
639 | andi. r10,r12,MSR_RI /* check for unrecoverable exception */ | ||
640 | beq- 2f | ||
641 | |||
642 | .machine push | ||
643 | .machine "power4" | ||
644 | mtcrf 0x80,r9 | ||
645 | mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */ | ||
646 | .machine pop | ||
647 | |||
648 | #ifdef CONFIG_PPC_ISERIES | ||
649 | BEGIN_FW_FTR_SECTION | ||
650 | mtspr SPRN_SRR0,r11 | ||
651 | mtspr SPRN_SRR1,r12 | ||
652 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) | ||
653 | #endif /* CONFIG_PPC_ISERIES */ | ||
654 | ld r9,PACA_EXSLB+EX_R9(r13) | ||
655 | ld r10,PACA_EXSLB+EX_R10(r13) | ||
656 | ld r11,PACA_EXSLB+EX_R11(r13) | ||
657 | ld r12,PACA_EXSLB+EX_R12(r13) | ||
658 | ld r13,PACA_EXSLB+EX_R13(r13) | ||
659 | rfid | ||
660 | b . /* prevent speculative execution */ | ||
661 | |||
662 | 2: | ||
663 | #ifdef CONFIG_PPC_ISERIES | ||
664 | BEGIN_FW_FTR_SECTION | ||
665 | b unrecov_slb | ||
666 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) | ||
667 | #endif /* CONFIG_PPC_ISERIES */ | ||
668 | mfspr r11,SPRN_SRR0 | ||
669 | ld r10,PACAKBASE(r13) | ||
670 | LOAD_HANDLER(r10,unrecov_slb) | ||
671 | mtspr SPRN_SRR0,r10 | ||
672 | ld r10,PACAKMSR(r13) | ||
673 | mtspr SPRN_SRR1,r10 | ||
674 | rfid | ||
675 | b . | ||
676 | |||
677 | unrecov_slb: | ||
678 | EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB) | ||
679 | DISABLE_INTS | ||
680 | bl .save_nvgprs | ||
681 | 1: addi r3,r1,STACK_FRAME_OVERHEAD | ||
682 | bl .unrecoverable_exception | ||
683 | b 1b | ||
684 | |||
685 | .align 7 | ||
686 | .globl hardware_interrupt_common | ||
687 | .globl hardware_interrupt_entry | ||
688 | hardware_interrupt_common: | ||
689 | EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN) | ||
690 | FINISH_NAP | ||
691 | hardware_interrupt_entry: | ||
692 | DISABLE_INTS | ||
693 | BEGIN_FTR_SECTION | ||
694 | bl .ppc64_runlatch_on | ||
695 | END_FTR_SECTION_IFSET(CPU_FTR_CTRL) | ||
696 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
697 | bl .do_IRQ | ||
698 | b .ret_from_except_lite | ||
699 | |||
700 | #ifdef CONFIG_PPC_970_NAP | ||
701 | power4_fixup_nap: | ||
702 | andc r9,r9,r10 | ||
703 | std r9,TI_LOCAL_FLAGS(r11) | ||
704 | ld r10,_LINK(r1) /* make idle task do the */ | ||
705 | std r10,_NIP(r1) /* equivalent of a blr */ | ||
706 | blr | ||
707 | #endif | ||
708 | |||
709 | .align 7 | ||
710 | .globl alignment_common | ||
711 | alignment_common: | ||
712 | mfspr r10,SPRN_DAR | ||
713 | std r10,PACA_EXGEN+EX_DAR(r13) | ||
714 | mfspr r10,SPRN_DSISR | ||
715 | stw r10,PACA_EXGEN+EX_DSISR(r13) | ||
716 | EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN) | ||
717 | ld r3,PACA_EXGEN+EX_DAR(r13) | ||
718 | lwz r4,PACA_EXGEN+EX_DSISR(r13) | ||
719 | std r3,_DAR(r1) | ||
720 | std r4,_DSISR(r1) | ||
721 | bl .save_nvgprs | ||
722 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
723 | ENABLE_INTS | ||
724 | bl .alignment_exception | ||
725 | b .ret_from_except | ||
726 | |||
727 | .align 7 | ||
728 | .globl program_check_common | ||
729 | program_check_common: | ||
730 | EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN) | ||
731 | bl .save_nvgprs | ||
732 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
733 | ENABLE_INTS | ||
734 | bl .program_check_exception | ||
735 | b .ret_from_except | ||
736 | |||
737 | .align 7 | ||
738 | .globl fp_unavailable_common | ||
739 | fp_unavailable_common: | ||
740 | EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN) | ||
741 | bne 1f /* if from user, just load it up */ | ||
742 | bl .save_nvgprs | ||
743 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
744 | ENABLE_INTS | ||
745 | bl .kernel_fp_unavailable_exception | ||
746 | BUG_OPCODE | ||
747 | 1: bl .load_up_fpu | ||
748 | b fast_exception_return | ||
749 | |||
750 | .align 7 | ||
751 | .globl altivec_unavailable_common | ||
752 | altivec_unavailable_common: | ||
753 | EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN) | ||
754 | #ifdef CONFIG_ALTIVEC | ||
755 | BEGIN_FTR_SECTION | ||
756 | beq 1f | ||
757 | bl .load_up_altivec | ||
758 | b fast_exception_return | ||
759 | 1: | ||
760 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | ||
761 | #endif | ||
762 | bl .save_nvgprs | ||
763 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
764 | ENABLE_INTS | ||
765 | bl .altivec_unavailable_exception | ||
766 | b .ret_from_except | ||
767 | |||
768 | .align 7 | ||
769 | .globl vsx_unavailable_common | ||
770 | vsx_unavailable_common: | ||
771 | EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN) | ||
772 | #ifdef CONFIG_VSX | ||
773 | BEGIN_FTR_SECTION | ||
774 | bne .load_up_vsx | ||
775 | 1: | ||
776 | END_FTR_SECTION_IFSET(CPU_FTR_VSX) | ||
777 | #endif | ||
778 | bl .save_nvgprs | ||
779 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
780 | ENABLE_INTS | ||
781 | bl .vsx_unavailable_exception | ||
782 | b .ret_from_except | ||
783 | |||
784 | .align 7 | ||
785 | .globl __end_handlers | ||
786 | __end_handlers: | ||
787 | |||
788 | /* | ||
789 | * Return from an exception with minimal checks. | ||
790 | * The caller is assumed to have done EXCEPTION_PROLOG_COMMON. | ||
791 | * If interrupts have been enabled, or anything has been | ||
792 | * done that might have changed the scheduling status of | ||
793 | * any task or sent any task a signal, you should use | ||
794 | * ret_from_except or ret_from_except_lite instead of this. | ||
795 | */ | 162 | */ |
796 | fast_exc_return_irq: /* restores irq state too */ | 163 | #ifdef CONFIG_PPC_BOOK3S |
797 | ld r3,SOFTE(r1) | 164 | #include "exceptions-64s.S" |
798 | TRACE_AND_RESTORE_IRQ(r3); | ||
799 | ld r12,_MSR(r1) | ||
800 | rldicl r4,r12,49,63 /* get MSR_EE to LSB */ | ||
801 | stb r4,PACAHARDIRQEN(r13) /* restore paca->hard_enabled */ | ||
802 | b 1f | ||
803 | |||
804 | .globl fast_exception_return | ||
805 | fast_exception_return: | ||
806 | ld r12,_MSR(r1) | ||
807 | 1: ld r11,_NIP(r1) | ||
808 | andi. r3,r12,MSR_RI /* check if RI is set */ | ||
809 | beq- unrecov_fer | ||
810 | |||
811 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
812 | andi. r3,r12,MSR_PR | ||
813 | beq 2f | ||
814 | ACCOUNT_CPU_USER_EXIT(r3, r4) | ||
815 | 2: | ||
816 | #endif | 165 | #endif |
817 | 166 | ||
818 | ld r3,_CCR(r1) | ||
819 | ld r4,_LINK(r1) | ||
820 | ld r5,_CTR(r1) | ||
821 | ld r6,_XER(r1) | ||
822 | mtcr r3 | ||
823 | mtlr r4 | ||
824 | mtctr r5 | ||
825 | mtxer r6 | ||
826 | REST_GPR(0, r1) | ||
827 | REST_8GPRS(2, r1) | ||
828 | |||
829 | mfmsr r10 | ||
830 | rldicl r10,r10,48,1 /* clear EE */ | ||
831 | rldicr r10,r10,16,61 /* clear RI (LE is 0 already) */ | ||
832 | mtmsrd r10,1 | ||
833 | |||
834 | mtspr SPRN_SRR1,r12 | ||
835 | mtspr SPRN_SRR0,r11 | ||
836 | REST_4GPRS(10, r1) | ||
837 | ld r1,GPR1(r1) | ||
838 | rfid | ||
839 | b . /* prevent speculative execution */ | ||
840 | |||
841 | unrecov_fer: | ||
842 | bl .save_nvgprs | ||
843 | 1: addi r3,r1,STACK_FRAME_OVERHEAD | ||
844 | bl .unrecoverable_exception | ||
845 | b 1b | ||
846 | |||
847 | |||
848 | /* | ||
849 | * Hash table stuff | ||
850 | */ | ||
851 | .align 7 | ||
852 | _STATIC(do_hash_page) | ||
853 | std r3,_DAR(r1) | ||
854 | std r4,_DSISR(r1) | ||
855 | |||
856 | andis. r0,r4,0xa450 /* weird error? */ | ||
857 | bne- handle_page_fault /* if not, try to insert a HPTE */ | ||
858 | BEGIN_FTR_SECTION | ||
859 | andis. r0,r4,0x0020 /* Is it a segment table fault? */ | ||
860 | bne- do_ste_alloc /* If so handle it */ | ||
861 | END_FTR_SECTION_IFCLR(CPU_FTR_SLB) | ||
862 | |||
863 | /* | ||
864 | * On iSeries, we soft-disable interrupts here, then | ||
865 | * hard-enable interrupts so that the hash_page code can spin on | ||
866 | * the hash_table_lock without problems on a shared processor. | ||
867 | */ | ||
868 | DISABLE_INTS | ||
869 | |||
870 | /* | ||
871 | * Currently, trace_hardirqs_off() will be called by DISABLE_INTS | ||
872 | * and will clobber volatile registers when irq tracing is enabled | ||
873 | * so we need to reload them. It may be possible to be smarter here | ||
874 | * and move the irq tracing elsewhere but let's keep it simple for | ||
875 | * now | ||
876 | */ | ||
877 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
878 | ld r3,_DAR(r1) | ||
879 | ld r4,_DSISR(r1) | ||
880 | ld r5,_TRAP(r1) | ||
881 | ld r12,_MSR(r1) | ||
882 | clrrdi r5,r5,4 | ||
883 | #endif /* CONFIG_TRACE_IRQFLAGS */ | ||
884 | /* | ||
885 | * We need to set the _PAGE_USER bit if MSR_PR is set or if we are | ||
886 | * accessing a userspace segment (even from the kernel). We assume | ||
887 | * kernel addresses always have the high bit set. | ||
888 | */ | ||
889 | rlwinm r4,r4,32-25+9,31-9,31-9 /* DSISR_STORE -> _PAGE_RW */ | ||
890 | rotldi r0,r3,15 /* Move high bit into MSR_PR posn */ | ||
891 | orc r0,r12,r0 /* MSR_PR | ~high_bit */ | ||
892 | rlwimi r4,r0,32-13,30,30 /* becomes _PAGE_USER access bit */ | ||
893 | ori r4,r4,1 /* add _PAGE_PRESENT */ | ||
894 | rlwimi r4,r5,22+2,31-2,31-2 /* Set _PAGE_EXEC if trap is 0x400 */ | ||
895 | |||
896 | /* | ||
897 | * r3 contains the faulting address | ||
898 | * r4 contains the required access permissions | ||
899 | * r5 contains the trap number | ||
900 | * | ||
901 | * at return r3 = 0 for success | ||
902 | */ | ||
903 | bl .hash_page /* build HPTE if possible */ | ||
904 | cmpdi r3,0 /* see if hash_page succeeded */ | ||
905 | |||
906 | BEGIN_FW_FTR_SECTION | ||
907 | /* | ||
908 | * If we had interrupts soft-enabled at the point where the | ||
909 | * DSI/ISI occurred, and an interrupt came in during hash_page, | ||
910 | * handle it now. | ||
911 | * We jump to ret_from_except_lite rather than fast_exception_return | ||
912 | * because ret_from_except_lite will check for and handle pending | ||
913 | * interrupts if necessary. | ||
914 | */ | ||
915 | beq 13f | ||
916 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) | ||
917 | |||
918 | BEGIN_FW_FTR_SECTION | ||
919 | /* | ||
920 | * Here we have interrupts hard-disabled, so it is sufficient | ||
921 | * to restore paca->{soft,hard}_enable and get out. | ||
922 | */ | ||
923 | beq fast_exc_return_irq /* Return from exception on success */ | ||
924 | END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES) | ||
925 | |||
926 | /* For a hash failure, we don't bother re-enabling interrupts */ | ||
927 | ble- 12f | ||
928 | |||
929 | /* | ||
930 | * hash_page couldn't handle it, set soft interrupt enable back | ||
931 | * to what it was before the trap. Note that .raw_local_irq_restore | ||
932 | * handles any interrupts pending at this point. | ||
933 | */ | ||
934 | ld r3,SOFTE(r1) | ||
935 | TRACE_AND_RESTORE_IRQ_PARTIAL(r3, 11f) | ||
936 | bl .raw_local_irq_restore | ||
937 | b 11f | ||
938 | |||
939 | /* Here we have a page fault that hash_page can't handle. */ | ||
940 | handle_page_fault: | ||
941 | ENABLE_INTS | ||
942 | 11: ld r4,_DAR(r1) | ||
943 | ld r5,_DSISR(r1) | ||
944 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
945 | bl .do_page_fault | ||
946 | cmpdi r3,0 | ||
947 | beq+ 13f | ||
948 | bl .save_nvgprs | ||
949 | mr r5,r3 | ||
950 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
951 | lwz r4,_DAR(r1) | ||
952 | bl .bad_page_fault | ||
953 | b .ret_from_except | ||
954 | |||
955 | 13: b .ret_from_except_lite | ||
956 | |||
957 | /* We have a page fault that hash_page could handle but HV refused | ||
958 | * the PTE insertion | ||
959 | */ | ||
960 | 12: bl .save_nvgprs | ||
961 | mr r5,r3 | ||
962 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
963 | ld r4,_DAR(r1) | ||
964 | bl .low_hash_fault | ||
965 | b .ret_from_except | ||
966 | |||
967 | /* here we have a segment miss */ | ||
968 | do_ste_alloc: | ||
969 | bl .ste_allocate /* try to insert stab entry */ | ||
970 | cmpdi r3,0 | ||
971 | bne- handle_page_fault | ||
972 | b fast_exception_return | ||
973 | |||
974 | /* | ||
975 | * r13 points to the PACA, r9 contains the saved CR, | ||
976 | * r11 and r12 contain the saved SRR0 and SRR1. | ||
977 | * r9 - r13 are saved in paca->exslb. | ||
978 | * We assume we aren't going to take any exceptions during this procedure. | ||
979 | * We assume (DAR >> 60) == 0xc. | ||
980 | */ | ||
981 | .align 7 | ||
982 | _GLOBAL(do_stab_bolted) | ||
983 | stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ | ||
984 | std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */ | ||
985 | |||
986 | /* Hash to the primary group */ | ||
987 | ld r10,PACASTABVIRT(r13) | ||
988 | mfspr r11,SPRN_DAR | ||
989 | srdi r11,r11,28 | ||
990 | rldimi r10,r11,7,52 /* r10 = first ste of the group */ | ||
991 | |||
992 | /* Calculate VSID */ | ||
993 | /* This is a kernel address, so protovsid = ESID */ | ||
994 | ASM_VSID_SCRAMBLE(r11, r9, 256M) | ||
995 | rldic r9,r11,12,16 /* r9 = vsid << 12 */ | ||
996 | |||
997 | /* Search the primary group for a free entry */ | ||
998 | 1: ld r11,0(r10) /* Test valid bit of the current ste */ | ||
999 | andi. r11,r11,0x80 | ||
1000 | beq 2f | ||
1001 | addi r10,r10,16 | ||
1002 | andi. r11,r10,0x70 | ||
1003 | bne 1b | ||
1004 | |||
1005 | /* Stick for only searching the primary group for now. */ | ||
1006 | /* At least for now, we use a very simple random castout scheme */ | ||
1007 | /* Use the TB as a random number ; OR in 1 to avoid entry 0 */ | ||
1008 | mftb r11 | ||
1009 | rldic r11,r11,4,57 /* r11 = (r11 << 4) & 0x70 */ | ||
1010 | ori r11,r11,0x10 | ||
1011 | |||
1012 | /* r10 currently points to an ste one past the group of interest */ | ||
1013 | /* make it point to the randomly selected entry */ | ||
1014 | subi r10,r10,128 | ||
1015 | or r10,r10,r11 /* r10 is the entry to invalidate */ | ||
1016 | |||
1017 | isync /* mark the entry invalid */ | ||
1018 | ld r11,0(r10) | ||
1019 | rldicl r11,r11,56,1 /* clear the valid bit */ | ||
1020 | rotldi r11,r11,8 | ||
1021 | std r11,0(r10) | ||
1022 | sync | ||
1023 | |||
1024 | clrrdi r11,r11,28 /* Get the esid part of the ste */ | ||
1025 | slbie r11 | ||
1026 | |||
1027 | 2: std r9,8(r10) /* Store the vsid part of the ste */ | ||
1028 | eieio | ||
1029 | |||
1030 | mfspr r11,SPRN_DAR /* Get the new esid */ | ||
1031 | clrrdi r11,r11,28 /* Permits a full 32b of ESID */ | ||
1032 | ori r11,r11,0x90 /* Turn on valid and kp */ | ||
1033 | std r11,0(r10) /* Put new entry back into the stab */ | ||
1034 | |||
1035 | sync | ||
1036 | |||
1037 | /* All done -- return from exception. */ | ||
1038 | lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */ | ||
1039 | ld r11,PACA_EXSLB+EX_SRR0(r13) /* get saved SRR0 */ | ||
1040 | |||
1041 | andi. r10,r12,MSR_RI | ||
1042 | beq- unrecov_slb | ||
1043 | |||
1044 | mtcrf 0x80,r9 /* restore CR */ | ||
1045 | |||
1046 | mfmsr r10 | ||
1047 | clrrdi r10,r10,2 | ||
1048 | mtmsrd r10,1 | ||
1049 | |||
1050 | mtspr SPRN_SRR0,r11 | ||
1051 | mtspr SPRN_SRR1,r12 | ||
1052 | ld r9,PACA_EXSLB+EX_R9(r13) | ||
1053 | ld r10,PACA_EXSLB+EX_R10(r13) | ||
1054 | ld r11,PACA_EXSLB+EX_R11(r13) | ||
1055 | ld r12,PACA_EXSLB+EX_R12(r13) | ||
1056 | ld r13,PACA_EXSLB+EX_R13(r13) | ||
1057 | rfid | ||
1058 | b . /* prevent speculative execution */ | ||
1059 | |||
1060 | /* | ||
1061 | * Space for CPU0's segment table. | ||
1062 | * | ||
1063 | * On iSeries, the hypervisor must fill in at least one entry before | ||
1064 | * we get control (with relocate on). The address is given to the hv | ||
1065 | * as a page number (see xLparMap below), so this must be at a | ||
1066 | * fixed address (the linker can't compute (u64)&initial_stab >> | ||
1067 | * PAGE_SHIFT). | ||
1068 | */ | ||
1069 | . = STAB0_OFFSET /* 0x6000 */ | ||
1070 | .globl initial_stab | ||
1071 | initial_stab: | ||
1072 | .space 4096 | ||
1073 | |||
1074 | #ifdef CONFIG_PPC_PSERIES | ||
1075 | /* | ||
1076 | * Data area reserved for FWNMI option. | ||
1077 | * This address (0x7000) is fixed by the RPA. | ||
1078 | */ | ||
1079 | .= 0x7000 | ||
1080 | .globl fwnmi_data_area | ||
1081 | fwnmi_data_area: | ||
1082 | #endif /* CONFIG_PPC_PSERIES */ | ||
1083 | |||
1084 | /* iSeries does not use the FWNMI stuff, so it is safe to put | ||
1085 | * this here, even if we later allow kernels that will boot on | ||
1086 | * both pSeries and iSeries */ | ||
1087 | #ifdef CONFIG_PPC_ISERIES | ||
1088 | . = LPARMAP_PHYS | ||
1089 | .globl xLparMap | ||
1090 | xLparMap: | ||
1091 | .quad HvEsidsToMap /* xNumberEsids */ | ||
1092 | .quad HvRangesToMap /* xNumberRanges */ | ||
1093 | .quad STAB0_PAGE /* xSegmentTableOffs */ | ||
1094 | .zero 40 /* xRsvd */ | ||
1095 | /* xEsids (HvEsidsToMap entries of 2 quads) */ | ||
1096 | .quad PAGE_OFFSET_ESID /* xKernelEsid */ | ||
1097 | .quad PAGE_OFFSET_VSID /* xKernelVsid */ | ||
1098 | .quad VMALLOC_START_ESID /* xKernelEsid */ | ||
1099 | .quad VMALLOC_START_VSID /* xKernelVsid */ | ||
1100 | /* xRanges (HvRangesToMap entries of 3 quads) */ | ||
1101 | .quad HvPagesToMap /* xPages */ | ||
1102 | .quad 0 /* xOffset */ | ||
1103 | .quad PAGE_OFFSET_VSID << (SID_SHIFT - HW_PAGE_SHIFT) /* xVPN */ | ||
1104 | |||
1105 | #endif /* CONFIG_PPC_ISERIES */ | ||
1106 | |||
1107 | #ifdef CONFIG_PPC_PSERIES | ||
1108 | . = 0x8000 | ||
1109 | #endif /* CONFIG_PPC_PSERIES */ | ||
1110 | 167 | ||
1111 | /* | 168 | /* |
1112 | * On pSeries and most other platforms, secondary processors spin | 169 | * On pSeries and most other platforms, secondary processors spin |