aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/head_fsl_booke.S
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel/head_fsl_booke.S')
-rw-r--r--arch/powerpc/kernel/head_fsl_booke.S99
1 files changed, 44 insertions, 55 deletions
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index 1f155d399d57..4b9822728aea 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -2,27 +2,27 @@
2 * Kernel execution entry point code. 2 * Kernel execution entry point code.
3 * 3 *
4 * Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org> 4 * Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org>
5 * Initial PowerPC version. 5 * Initial PowerPC version.
6 * Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu> 6 * Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu>
7 * Rewritten for PReP 7 * Rewritten for PReP
8 * Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au> 8 * Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au>
9 * Low-level exception handers, MMU support, and rewrite. 9 * Low-level exception handers, MMU support, and rewrite.
10 * Copyright (c) 1997 Dan Malek <dmalek@jlc.net> 10 * Copyright (c) 1997 Dan Malek <dmalek@jlc.net>
11 * PowerPC 8xx modifications. 11 * PowerPC 8xx modifications.
12 * Copyright (c) 1998-1999 TiVo, Inc. 12 * Copyright (c) 1998-1999 TiVo, Inc.
13 * PowerPC 403GCX modifications. 13 * PowerPC 403GCX modifications.
14 * Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu> 14 * Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu>
15 * PowerPC 403GCX/405GP modifications. 15 * PowerPC 403GCX/405GP modifications.
16 * Copyright 2000 MontaVista Software Inc. 16 * Copyright 2000 MontaVista Software Inc.
17 * PPC405 modifications 17 * PPC405 modifications
18 * PowerPC 403GCX/405GP modifications. 18 * PowerPC 403GCX/405GP modifications.
19 * Author: MontaVista Software, Inc. 19 * Author: MontaVista Software, Inc.
20 * frank_rowand@mvista.com or source@mvista.com 20 * frank_rowand@mvista.com or source@mvista.com
21 * debbie_chu@mvista.com 21 * debbie_chu@mvista.com
22 * Copyright 2002-2004 MontaVista Software, Inc. 22 * Copyright 2002-2004 MontaVista Software, Inc.
23 * PowerPC 44x support, Matt Porter <mporter@kernel.crashing.org> 23 * PowerPC 44x support, Matt Porter <mporter@kernel.crashing.org>
24 * Copyright 2004 Freescale Semiconductor, Inc 24 * Copyright 2004 Freescale Semiconductor, Inc
25 * PowerPC e500 modifications, Kumar Gala <galak@kernel.crashing.org> 25 * PowerPC e500 modifications, Kumar Gala <galak@kernel.crashing.org>
26 * 26 *
27 * This program is free software; you can redistribute it and/or modify it 27 * This program is free software; you can redistribute it and/or modify it
28 * under the terms of the GNU General Public License as published by the 28 * under the terms of the GNU General Public License as published by the
@@ -52,9 +52,9 @@
52 * r7 - End of kernel command line string 52 * r7 - End of kernel command line string
53 * 53 *
54 */ 54 */
55 .text 55 .section .text.head, "ax"
56_GLOBAL(_stext) 56_ENTRY(_stext);
57_GLOBAL(_start) 57_ENTRY(_start);
58 /* 58 /*
59 * Reserve a word at a fixed location to store the address 59 * Reserve a word at a fixed location to store the address
60 * of abatron_pteptrs 60 * of abatron_pteptrs
@@ -146,13 +146,13 @@ skpinv: addi r6,r6,1 /* Increment */
146 bne 1b /* If not, repeat */ 146 bne 1b /* If not, repeat */
147 147
148 /* Invalidate TLB0 */ 148 /* Invalidate TLB0 */
149 li r6,0x04 149 li r6,0x04
150 tlbivax 0,r6 150 tlbivax 0,r6
151#ifdef CONFIG_SMP 151#ifdef CONFIG_SMP
152 tlbsync 152 tlbsync
153#endif 153#endif
154 /* Invalidate TLB1 */ 154 /* Invalidate TLB1 */
155 li r6,0x0c 155 li r6,0x0c
156 tlbivax 0,r6 156 tlbivax 0,r6
157#ifdef CONFIG_SMP 157#ifdef CONFIG_SMP
158 tlbsync 158 tlbsync
@@ -211,7 +211,7 @@ skpinv: addi r6,r6,1 /* Increment */
211 mtspr SPRN_MAS1,r6 211 mtspr SPRN_MAS1,r6
212 tlbwe 212 tlbwe
213 /* Invalidate TLB1 */ 213 /* Invalidate TLB1 */
214 li r9,0x0c 214 li r9,0x0c
215 tlbivax 0,r9 215 tlbivax 0,r9
216#ifdef CONFIG_SMP 216#ifdef CONFIG_SMP
217 tlbsync 217 tlbsync
@@ -254,7 +254,7 @@ skpinv: addi r6,r6,1 /* Increment */
254 mtspr SPRN_MAS1,r8 254 mtspr SPRN_MAS1,r8
255 tlbwe 255 tlbwe
256 /* Invalidate TLB1 */ 256 /* Invalidate TLB1 */
257 li r9,0x0c 257 li r9,0x0c
258 tlbivax 0,r9 258 tlbivax 0,r9
259#ifdef CONFIG_SMP 259#ifdef CONFIG_SMP
260 tlbsync 260 tlbsync
@@ -294,7 +294,7 @@ skpinv: addi r6,r6,1 /* Increment */
294#ifdef CONFIG_E200 294#ifdef CONFIG_E200
295 oris r2,r2,MAS4_TLBSELD(1)@h 295 oris r2,r2,MAS4_TLBSELD(1)@h
296#endif 296#endif
297 mtspr SPRN_MAS4, r2 297 mtspr SPRN_MAS4, r2
298 298
299#if 0 299#if 0
300 /* Enable DOZE */ 300 /* Enable DOZE */
@@ -305,7 +305,7 @@ skpinv: addi r6,r6,1 /* Increment */
305#ifdef CONFIG_E200 305#ifdef CONFIG_E200
306 /* enable dedicated debug exception handling resources (Debug APU) */ 306 /* enable dedicated debug exception handling resources (Debug APU) */
307 mfspr r2,SPRN_HID0 307 mfspr r2,SPRN_HID0
308 ori r2,r2,HID0_DAPUEN@l 308 ori r2,r2,HID0_DAPUEN@l
309 mtspr SPRN_HID0,r2 309 mtspr SPRN_HID0,r2
310#endif 310#endif
311 311
@@ -391,7 +391,7 @@ skpinv: addi r6,r6,1 /* Increment */
391#ifdef CONFIG_PTE_64BIT 391#ifdef CONFIG_PTE_64BIT
392#define PTE_FLAGS_OFFSET 4 392#define PTE_FLAGS_OFFSET 4
393#define FIND_PTE \ 393#define FIND_PTE \
394 rlwinm r12, r10, 13, 19, 29; /* Compute pgdir/pmd offset */ \ 394 rlwinm r12, r10, 13, 19, 29; /* Compute pgdir/pmd offset */ \
395 lwzx r11, r12, r11; /* Get pgd/pmd entry */ \ 395 lwzx r11, r12, r11; /* Get pgd/pmd entry */ \
396 rlwinm. r12, r11, 0, 0, 20; /* Extract pt base address */ \ 396 rlwinm. r12, r11, 0, 0, 20; /* Extract pt base address */ \
397 beq 2f; /* Bail if no table */ \ 397 beq 2f; /* Bail if no table */ \
@@ -461,8 +461,7 @@ interrupt_base:
461 /* If we are faulting a kernel address, we have to use the 461 /* If we are faulting a kernel address, we have to use the
462 * kernel page tables. 462 * kernel page tables.
463 */ 463 */
464 lis r11, TASK_SIZE@h 464 lis r11, PAGE_OFFSET@h
465 ori r11, r11, TASK_SIZE@l
466 cmplw 0, r10, r11 465 cmplw 0, r10, r11
467 bge 2f 466 bge 2f
468 467
@@ -487,7 +486,7 @@ interrupt_base:
487 */ 486 */
488 andi. r11, r11, _PAGE_HWEXEC 487 andi. r11, r11, _PAGE_HWEXEC
489 rlwimi r11, r11, 31, 27, 27 /* SX <- _PAGE_HWEXEC */ 488 rlwimi r11, r11, 31, 27, 27 /* SX <- _PAGE_HWEXEC */
490 ori r11, r11, (MAS3_UW|MAS3_SW|MAS3_UR|MAS3_SR)@l /* set static perms */ 489 ori r11, r11, (MAS3_UW|MAS3_SW|MAS3_UR|MAS3_SR)@l /* set static perms */
491 490
492 /* update search PID in MAS6, AS = 0 */ 491 /* update search PID in MAS6, AS = 0 */
493 mfspr r12, SPRN_PID0 492 mfspr r12, SPRN_PID0
@@ -584,8 +583,7 @@ interrupt_base:
584 /* If we are faulting a kernel address, we have to use the 583 /* If we are faulting a kernel address, we have to use the
585 * kernel page tables. 584 * kernel page tables.
586 */ 585 */
587 lis r11, TASK_SIZE@h 586 lis r11, PAGE_OFFSET@h
588 ori r11, r11, TASK_SIZE@l
589 cmplw 5, r10, r11 587 cmplw 5, r10, r11
590 blt 5, 3f 588 blt 5, 3f
591 lis r11, swapper_pg_dir@h 589 lis r11, swapper_pg_dir@h
@@ -645,8 +643,7 @@ interrupt_base:
645 /* If we are faulting a kernel address, we have to use the 643 /* If we are faulting a kernel address, we have to use the
646 * kernel page tables. 644 * kernel page tables.
647 */ 645 */
648 lis r11, TASK_SIZE@h 646 lis r11, PAGE_OFFSET@h
649 ori r11, r11, TASK_SIZE@l
650 cmplw 5, r10, r11 647 cmplw 5, r10, r11
651 blt 5, 3f 648 blt 5, 3f
652 lis r11, swapper_pg_dir@h 649 lis r11, swapper_pg_dir@h
@@ -694,7 +691,7 @@ interrupt_base:
694 START_EXCEPTION(SPEUnavailable) 691 START_EXCEPTION(SPEUnavailable)
695 NORMAL_EXCEPTION_PROLOG 692 NORMAL_EXCEPTION_PROLOG
696 bne load_up_spe 693 bne load_up_spe
697 addi r3,r1,STACK_FRAME_OVERHEAD 694 addi r3,r1,STACK_FRAME_OVERHEAD
698 EXC_XFER_EE_LITE(0x2010, KernelSPE) 695 EXC_XFER_EE_LITE(0x2010, KernelSPE)
699#else 696#else
700 EXCEPTION(0x2020, SPEUnavailable, unknown_exception, EXC_XFER_EE) 697 EXCEPTION(0x2020, SPEUnavailable, unknown_exception, EXC_XFER_EE)
@@ -741,10 +738,10 @@ data_access:
741 738
742 * Both the instruction and data TLB miss get to this 739 * Both the instruction and data TLB miss get to this
743 * point to load the TLB. 740 * point to load the TLB.
744 * r10 - EA of fault 741 * r10 - EA of fault
745 * r11 - TLB (info from Linux PTE) 742 * r11 - TLB (info from Linux PTE)
746 * r12, r13 - available to use 743 * r12, r13 - available to use
747 * CR5 - results of addr < TASK_SIZE 744 * CR5 - results of addr >= PAGE_OFFSET
748 * MAS0, MAS1 - loaded with proper value when we get here 745 * MAS0, MAS1 - loaded with proper value when we get here
749 * MAS2, MAS3 - will need additional info from Linux PTE 746 * MAS2, MAS3 - will need additional info from Linux PTE
750 * Upon exit, we reload everything and RFI. 747 * Upon exit, we reload everything and RFI.
@@ -813,7 +810,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_BIG_PHYS)
813 lwz r13, tlbcam_index@l(r13) 810 lwz r13, tlbcam_index@l(r13)
814 rlwimi r12, r13, 0, 20, 31 811 rlwimi r12, r13, 0, 20, 31
8157: 8127:
816 mtspr SPRN_MAS0,r12 813 mtspr SPRN_MAS0,r12
817#endif /* CONFIG_E200 */ 814#endif /* CONFIG_E200 */
818 815
819 tlbwe 816 tlbwe
@@ -855,17 +852,17 @@ load_up_spe:
855 beq 1f 852 beq 1f
856 addi r4,r4,THREAD /* want THREAD of last_task_used_spe */ 853 addi r4,r4,THREAD /* want THREAD of last_task_used_spe */
857 SAVE_32EVRS(0,r10,r4) 854 SAVE_32EVRS(0,r10,r4)
858 evxor evr10, evr10, evr10 /* clear out evr10 */ 855 evxor evr10, evr10, evr10 /* clear out evr10 */
859 evmwumiaa evr10, evr10, evr10 /* evr10 <- ACC = 0 * 0 + ACC */ 856 evmwumiaa evr10, evr10, evr10 /* evr10 <- ACC = 0 * 0 + ACC */
860 li r5,THREAD_ACC 857 li r5,THREAD_ACC
861 evstddx evr10, r4, r5 /* save off accumulator */ 858 evstddx evr10, r4, r5 /* save off accumulator */
862 lwz r5,PT_REGS(r4) 859 lwz r5,PT_REGS(r4)
863 lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) 860 lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
864 lis r10,MSR_SPE@h 861 lis r10,MSR_SPE@h
865 andc r4,r4,r10 /* disable SPE for previous task */ 862 andc r4,r4,r10 /* disable SPE for previous task */
866 stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) 863 stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)
8671: 8641:
868#endif /* CONFIG_SMP */ 865#endif /* !CONFIG_SMP */
869 /* enable use of SPE after return */ 866 /* enable use of SPE after return */
870 oris r9,r9,MSR_SPE@h 867 oris r9,r9,MSR_SPE@h
871 mfspr r5,SPRN_SPRG3 /* current task's THREAD (phys) */ 868 mfspr r5,SPRN_SPRG3 /* current task's THREAD (phys) */
@@ -878,7 +875,7 @@ load_up_spe:
878#ifndef CONFIG_SMP 875#ifndef CONFIG_SMP
879 subi r4,r5,THREAD 876 subi r4,r5,THREAD
880 stw r4,last_task_used_spe@l(r3) 877 stw r4,last_task_used_spe@l(r3)
881#endif /* CONFIG_SMP */ 878#endif /* !CONFIG_SMP */
882 /* restore registers and return */ 879 /* restore registers and return */
8832: REST_4GPRS(3, r11) 8802: REST_4GPRS(3, r11)
884 lwz r10,_CCR(r11) 881 lwz r10,_CCR(r11)
@@ -963,10 +960,10 @@ _GLOBAL(giveup_spe)
963 lwz r5,PT_REGS(r3) 960 lwz r5,PT_REGS(r3)
964 cmpi 0,r5,0 961 cmpi 0,r5,0
965 SAVE_32EVRS(0, r4, r3) 962 SAVE_32EVRS(0, r4, r3)
966 evxor evr6, evr6, evr6 /* clear out evr6 */ 963 evxor evr6, evr6, evr6 /* clear out evr6 */
967 evmwumiaa evr6, evr6, evr6 /* evr6 <- ACC = 0 * 0 + ACC */ 964 evmwumiaa evr6, evr6, evr6 /* evr6 <- ACC = 0 * 0 + ACC */
968 li r4,THREAD_ACC 965 li r4,THREAD_ACC
969 evstddx evr6, r4, r3 /* save off accumulator */ 966 evstddx evr6, r4, r3 /* save off accumulator */
970 mfspr r6,SPRN_SPEFSCR 967 mfspr r6,SPRN_SPEFSCR
971 stw r6,THREAD_SPEFSCR(r3) /* save spefscr register value */ 968 stw r6,THREAD_SPEFSCR(r3) /* save spefscr register value */
972 beq 1f 969 beq 1f
@@ -979,7 +976,7 @@ _GLOBAL(giveup_spe)
979 li r5,0 976 li r5,0
980 lis r4,last_task_used_spe@ha 977 lis r4,last_task_used_spe@ha
981 stw r5,last_task_used_spe@l(r4) 978 stw r5,last_task_used_spe@l(r4)
982#endif /* CONFIG_SMP */ 979#endif /* !CONFIG_SMP */
983 blr 980 blr
984#endif /* CONFIG_SPE */ 981#endif /* CONFIG_SPE */
985 982
@@ -1000,15 +997,15 @@ _GLOBAL(giveup_fpu)
1000 */ 997 */
1001_GLOBAL(abort) 998_GLOBAL(abort)
1002 li r13,0 999 li r13,0
1003 mtspr SPRN_DBCR0,r13 /* disable all debug events */ 1000 mtspr SPRN_DBCR0,r13 /* disable all debug events */
1004 isync 1001 isync
1005 mfmsr r13 1002 mfmsr r13
1006 ori r13,r13,MSR_DE@l /* Enable Debug Events */ 1003 ori r13,r13,MSR_DE@l /* Enable Debug Events */
1007 mtmsr r13 1004 mtmsr r13
1008 isync 1005 isync
1009 mfspr r13,SPRN_DBCR0 1006 mfspr r13,SPRN_DBCR0
1010 lis r13,(DBCR0_IDM|DBCR0_RST_CHIP)@h 1007 lis r13,(DBCR0_IDM|DBCR0_RST_CHIP)@h
1011 mtspr SPRN_DBCR0,r13 1008 mtspr SPRN_DBCR0,r13
1012 isync 1009 isync
1013 1010
1014_GLOBAL(set_context) 1011_GLOBAL(set_context)
@@ -1043,21 +1040,13 @@ swapper_pg_dir:
1043/* Reserved 4k for the critical exception stack & 4k for the machine 1040/* Reserved 4k for the critical exception stack & 4k for the machine
1044 * check stack per CPU for kernel mode exceptions */ 1041 * check stack per CPU for kernel mode exceptions */
1045 .section .bss 1042 .section .bss
1046 .align 12 1043 .align 12
1047exception_stack_bottom: 1044exception_stack_bottom:
1048 .space BOOKE_EXCEPTION_STACK_SIZE * NR_CPUS 1045 .space BOOKE_EXCEPTION_STACK_SIZE * NR_CPUS
1049 .globl exception_stack_top 1046 .globl exception_stack_top
1050exception_stack_top: 1047exception_stack_top:
1051 1048
1052/* 1049/*
1053 * This space gets a copy of optional info passed to us by the bootstrap
1054 * which is used to pass parameters into the kernel like root=/dev/sda1, etc.
1055 */
1056 .globl cmd_line
1057cmd_line:
1058 .space 512
1059
1060/*
1061 * Room for two PTE pointers, usually the kernel and current user pointers 1050 * Room for two PTE pointers, usually the kernel and current user pointers
1062 * to their respective root page table. 1051 * to their respective root page table.
1063 */ 1052 */