diff options
author | Michal Simek <monstr@monstr.eu> | 2009-05-26 10:30:09 -0400 |
---|---|---|
committer | Michal Simek <monstr@monstr.eu> | 2009-05-26 10:45:14 -0400 |
commit | 5846cc608fd42cd3645ff9f841888832c6ef9b6e (patch) | |
tree | d8927ce608c2a74bc471fe7500f5eff292098430 /arch/microblaze/kernel/head.S | |
parent | 2c65b4665f3f1651a7ef652d86eeb23be95dcdb9 (diff) |
microblaze_mmu_v2: MMU update for startup code
Signed-off-by: Michal Simek <monstr@monstr.eu>
Diffstat (limited to 'arch/microblaze/kernel/head.S')
-rw-r--r-- | arch/microblaze/kernel/head.S | 190 |
1 files changed, 190 insertions, 0 deletions
diff --git a/arch/microblaze/kernel/head.S b/arch/microblaze/kernel/head.S index 319dc35fc922..e568d6ec621b 100644 --- a/arch/microblaze/kernel/head.S +++ b/arch/microblaze/kernel/head.S | |||
@@ -3,6 +3,26 @@ | |||
3 | * Copyright (C) 2007-2009 PetaLogix | 3 | * Copyright (C) 2007-2009 PetaLogix |
4 | * Copyright (C) 2006 Atmark Techno, Inc. | 4 | * Copyright (C) 2006 Atmark Techno, Inc. |
5 | * | 5 | * |
6 | * MMU code derived from arch/ppc/kernel/head_4xx.S: | ||
7 | * Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org> | ||
8 | * Initial PowerPC version. | ||
9 | * Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu> | ||
10 | * Rewritten for PReP | ||
11 | * Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au> | ||
12 | * Low-level exception handers, MMU support, and rewrite. | ||
13 | * Copyright (c) 1997 Dan Malek <dmalek@jlc.net> | ||
14 | * PowerPC 8xx modifications. | ||
15 | * Copyright (c) 1998-1999 TiVo, Inc. | ||
16 | * PowerPC 403GCX modifications. | ||
17 | * Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu> | ||
18 | * PowerPC 403GCX/405GP modifications. | ||
19 | * Copyright 2000 MontaVista Software Inc. | ||
20 | * PPC405 modifications | ||
21 | * PowerPC 403GCX/405GP modifications. | ||
22 | * Author: MontaVista Software, Inc. | ||
23 | * frank_rowand@mvista.com or source@mvista.com | ||
24 | * debbie_chu@mvista.com | ||
25 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | 26 | * This file is subject to the terms and conditions of the GNU General Public |
7 | * License. See the file "COPYING" in the main directory of this archive | 27 | * License. See the file "COPYING" in the main directory of this archive |
8 | * for more details. | 28 | * for more details. |
@@ -12,6 +32,22 @@ | |||
12 | #include <asm/thread_info.h> | 32 | #include <asm/thread_info.h> |
13 | #include <asm/page.h> | 33 | #include <asm/page.h> |
14 | 34 | ||
35 | #ifdef CONFIG_MMU | ||
36 | #include <asm/setup.h> /* COMMAND_LINE_SIZE */ | ||
37 | #include <asm/mmu.h> | ||
38 | #include <asm/processor.h> | ||
39 | |||
40 | .data | ||
41 | .global empty_zero_page | ||
42 | .align 12 | ||
43 | empty_zero_page: | ||
44 | .space 4096 | ||
45 | .global swapper_pg_dir | ||
46 | swapper_pg_dir: | ||
47 | .space 4096 | ||
48 | |||
49 | #endif /* CONFIG_MMU */ | ||
50 | |||
15 | .text | 51 | .text |
16 | ENTRY(_start) | 52 | ENTRY(_start) |
17 | mfs r1, rmsr | 53 | mfs r1, rmsr |
@@ -32,6 +68,123 @@ _copy_fdt: | |||
32 | addik r3, r3, -4 /* descrement loop */ | 68 | addik r3, r3, -4 /* descrement loop */ |
33 | no_fdt_arg: | 69 | no_fdt_arg: |
34 | 70 | ||
71 | #ifdef CONFIG_MMU | ||
72 | |||
73 | #ifndef CONFIG_CMDLINE_BOOL | ||
74 | /* | ||
75 | * handling command line | ||
76 | * copy command line to __init_end. There is space for storing command line. | ||
77 | */ | ||
78 | or r6, r0, r0 /* incremment */ | ||
79 | ori r4, r0, __init_end /* load address of command line */ | ||
80 | tophys(r4,r4) /* convert to phys address */ | ||
81 | ori r3, r0, COMMAND_LINE_SIZE - 1 /* number of loops */ | ||
82 | _copy_command_line: | ||
83 | lbu r7, r5, r6 /* r7=r5+r6 - r5 contain pointer to command line */ | ||
84 | sb r7, r4, r6 /* addr[r4+r6]= r7*/ | ||
85 | addik r6, r6, 1 /* increment counting */ | ||
86 | bgtid r3, _copy_command_line /* loop for all entries */ | ||
87 | addik r3, r3, -1 /* descrement loop */ | ||
88 | addik r5, r4, 0 /* add new space for command line */ | ||
89 | tovirt(r5,r5) | ||
90 | #endif /* CONFIG_CMDLINE_BOOL */ | ||
91 | |||
92 | #ifdef NOT_COMPILE | ||
93 | /* save bram context */ | ||
94 | or r6, r0, r0 /* incremment */ | ||
95 | ori r4, r0, TOPHYS(_bram_load_start) /* save bram context */ | ||
96 | ori r3, r0, (LMB_SIZE - 4) | ||
97 | _copy_bram: | ||
98 | lw r7, r0, r6 /* r7 = r0 + r6 */ | ||
99 | sw r7, r4, r6 /* addr[r4 + r6] = r7*/ | ||
100 | addik r6, r6, 4 /* increment counting */ | ||
101 | bgtid r3, _copy_bram /* loop for all entries */ | ||
102 | addik r3, r3, -4 /* descrement loop */ | ||
103 | #endif | ||
104 | /* We have to turn on the MMU right away. */ | ||
105 | |||
106 | /* | ||
107 | * Set up the initial MMU state so we can do the first level of | ||
108 | * kernel initialization. This maps the first 16 MBytes of memory 1:1 | ||
109 | * virtual to physical. | ||
110 | */ | ||
111 | nop | ||
112 | addik r3, r0, 63 /* Invalidate all TLB entries */ | ||
113 | _invalidate: | ||
114 | mts rtlbx, r3 | ||
115 | mts rtlbhi, r0 /* flush: ensure V is clear */ | ||
116 | bgtid r3, _invalidate /* loop for all entries */ | ||
117 | addik r3, r3, -1 | ||
118 | /* sync */ | ||
119 | |||
120 | /* | ||
121 | * We should still be executing code at physical address area | ||
122 | * RAM_BASEADDR at this point. However, kernel code is at | ||
123 | * a virtual address. So, set up a TLB mapping to cover this once | ||
124 | * translation is enabled. | ||
125 | */ | ||
126 | |||
127 | addik r3,r0, CONFIG_KERNEL_START /* Load the kernel virtual address */ | ||
128 | tophys(r4,r3) /* Load the kernel physical address */ | ||
129 | |||
130 | mts rpid,r0 /* Load the kernel PID */ | ||
131 | nop | ||
132 | bri 4 | ||
133 | |||
134 | /* | ||
135 | * Configure and load two entries into TLB slots 0 and 1. | ||
136 | * In case we are pinning TLBs, these are reserved in by the | ||
137 | * other TLB functions. If not reserving, then it doesn't | ||
138 | * matter where they are loaded. | ||
139 | */ | ||
140 | andi r4,r4,0xfffffc00 /* Mask off the real page number */ | ||
141 | ori r4,r4,(TLB_WR | TLB_EX) /* Set the write and execute bits */ | ||
142 | |||
143 | andi r3,r3,0xfffffc00 /* Mask off the effective page number */ | ||
144 | ori r3,r3,(TLB_VALID | TLB_PAGESZ(PAGESZ_16M)) | ||
145 | |||
146 | mts rtlbx,r0 /* TLB slow 0 */ | ||
147 | |||
148 | mts rtlblo,r4 /* Load the data portion of the entry */ | ||
149 | mts rtlbhi,r3 /* Load the tag portion of the entry */ | ||
150 | |||
151 | addik r4, r4, 0x01000000 /* Map next 16 M entries */ | ||
152 | addik r3, r3, 0x01000000 | ||
153 | |||
154 | ori r6,r0,1 /* TLB slot 1 */ | ||
155 | mts rtlbx,r6 | ||
156 | |||
157 | mts rtlblo,r4 /* Load the data portion of the entry */ | ||
158 | mts rtlbhi,r3 /* Load the tag portion of the entry */ | ||
159 | |||
160 | /* | ||
161 | * Load a TLB entry for LMB, since we need access to | ||
162 | * the exception vectors, using a 4k real==virtual mapping. | ||
163 | */ | ||
164 | ori r6,r0,3 /* TLB slot 3 */ | ||
165 | mts rtlbx,r6 | ||
166 | |||
167 | ori r4,r0,(TLB_WR | TLB_EX) | ||
168 | ori r3,r0,(TLB_VALID | TLB_PAGESZ(PAGESZ_4K)) | ||
169 | |||
170 | mts rtlblo,r4 /* Load the data portion of the entry */ | ||
171 | mts rtlbhi,r3 /* Load the tag portion of the entry */ | ||
172 | |||
173 | /* | ||
174 | * We now have the lower 16 Meg of RAM mapped into TLB entries, and the | ||
175 | * caches ready to work. | ||
176 | */ | ||
177 | turn_on_mmu: | ||
178 | ori r15,r0,start_here | ||
179 | ori r4,r0,MSR_KERNEL_VMS | ||
180 | mts rmsr,r4 | ||
181 | nop | ||
182 | rted r15,0 /* enables MMU */ | ||
183 | nop | ||
184 | |||
185 | start_here: | ||
186 | #endif /* CONFIG_MMU */ | ||
187 | |||
35 | /* Initialize small data anchors */ | 188 | /* Initialize small data anchors */ |
36 | la r13, r0, _KERNEL_SDA_BASE_ | 189 | la r13, r0, _KERNEL_SDA_BASE_ |
37 | la r2, r0, _KERNEL_SDA2_BASE_ | 190 | la r2, r0, _KERNEL_SDA2_BASE_ |
@@ -51,6 +204,43 @@ no_fdt_arg: | |||
51 | brald r15, r8 | 204 | brald r15, r8 |
52 | nop | 205 | nop |
53 | 206 | ||
207 | #ifndef CONFIG_MMU | ||
54 | la r15, r0, machine_halt | 208 | la r15, r0, machine_halt |
55 | braid start_kernel | 209 | braid start_kernel |
56 | nop | 210 | nop |
211 | #else | ||
212 | /* | ||
213 | * Initialize the MMU. | ||
214 | */ | ||
215 | bralid r15, mmu_init | ||
216 | nop | ||
217 | |||
218 | /* Go back to running unmapped so we can load up new values | ||
219 | * and change to using our exception vectors. | ||
220 | * On the MicroBlaze, all we invalidate the used TLB entries to clear | ||
221 | * the old 16M byte TLB mappings. | ||
222 | */ | ||
223 | ori r15,r0,TOPHYS(kernel_load_context) | ||
224 | ori r4,r0,MSR_KERNEL | ||
225 | mts rmsr,r4 | ||
226 | nop | ||
227 | bri 4 | ||
228 | rted r15,0 | ||
229 | nop | ||
230 | |||
231 | /* Load up the kernel context */ | ||
232 | kernel_load_context: | ||
233 | # Keep entry 0 and 1 valid. Entry 3 mapped to LMB can go away. | ||
234 | ori r5,r0,3 | ||
235 | mts rtlbx,r5 | ||
236 | nop | ||
237 | mts rtlbhi,r0 | ||
238 | nop | ||
239 | addi r15, r0, machine_halt | ||
240 | ori r17, r0, start_kernel | ||
241 | ori r4, r0, MSR_KERNEL_VMS | ||
242 | mts rmsr, r4 | ||
243 | nop | ||
244 | rted r17, 0 /* enable MMU and jump to start_kernel */ | ||
245 | nop | ||
246 | #endif /* CONFIG_MMU */ | ||