diff options
Diffstat (limited to 'arch/arm/kernel/head.S')
-rw-r--r-- | arch/arm/kernel/head.S | 250 |
1 files changed, 139 insertions, 111 deletions
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index b44d21e1e34..767390449e0 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S | |||
@@ -98,113 +98,15 @@ ENTRY(stext) | |||
98 | * above. On return, the CPU will be ready for the MMU to be | 98 | * above. On return, the CPU will be ready for the MMU to be |
99 | * turned on, and r0 will hold the CPU control register value. | 99 | * turned on, and r0 will hold the CPU control register value. |
100 | */ | 100 | */ |
101 | ldr r13, __switch_data @ address to jump to after | 101 | ldr r13, =__mmap_switched @ address to jump to after |
102 | @ mmu has been enabled | 102 | @ mmu has been enabled |
103 | adr lr, BSYM(__enable_mmu) @ return (PIC) address | 103 | adr lr, BSYM(1f) @ return (PIC) address |
104 | ARM( add pc, r10, #PROCINFO_INITFUNC ) | 104 | ARM( add pc, r10, #PROCINFO_INITFUNC ) |
105 | THUMB( add r12, r10, #PROCINFO_INITFUNC ) | 105 | THUMB( add r12, r10, #PROCINFO_INITFUNC ) |
106 | THUMB( mov pc, r12 ) | 106 | THUMB( mov pc, r12 ) |
107 | 1: b __enable_mmu | ||
107 | ENDPROC(stext) | 108 | ENDPROC(stext) |
108 | 109 | .ltorg | |
109 | #if defined(CONFIG_SMP) | ||
110 | ENTRY(secondary_startup) | ||
111 | /* | ||
112 | * Common entry point for secondary CPUs. | ||
113 | * | ||
114 | * Ensure that we're in SVC mode, and IRQs are disabled. Lookup | ||
115 | * the processor type - there is no need to check the machine type | ||
116 | * as it has already been validated by the primary processor. | ||
117 | */ | ||
118 | setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 | ||
119 | mrc p15, 0, r9, c0, c0 @ get processor id | ||
120 | bl __lookup_processor_type | ||
121 | movs r10, r5 @ invalid processor? | ||
122 | moveq r0, #'p' @ yes, error 'p' | ||
123 | beq __error | ||
124 | |||
125 | /* | ||
126 | * Use the page tables supplied from __cpu_up. | ||
127 | */ | ||
128 | adr r4, __secondary_data | ||
129 | ldmia r4, {r5, r7, r12} @ address to jump to after | ||
130 | sub r4, r4, r5 @ mmu has been enabled | ||
131 | ldr r4, [r7, r4] @ get secondary_data.pgdir | ||
132 | adr lr, BSYM(__enable_mmu) @ return address | ||
133 | mov r13, r12 @ __secondary_switched address | ||
134 | ARM( add pc, r10, #PROCINFO_INITFUNC ) @ initialise processor | ||
135 | @ (return control reg) | ||
136 | THUMB( add r12, r10, #PROCINFO_INITFUNC ) | ||
137 | THUMB( mov pc, r12 ) | ||
138 | ENDPROC(secondary_startup) | ||
139 | |||
140 | /* | ||
141 | * r6 = &secondary_data | ||
142 | */ | ||
143 | ENTRY(__secondary_switched) | ||
144 | ldr sp, [r7, #4] @ get secondary_data.stack | ||
145 | mov fp, #0 | ||
146 | b secondary_start_kernel | ||
147 | ENDPROC(__secondary_switched) | ||
148 | |||
149 | .type __secondary_data, %object | ||
150 | __secondary_data: | ||
151 | .long . | ||
152 | .long secondary_data | ||
153 | .long __secondary_switched | ||
154 | #endif /* defined(CONFIG_SMP) */ | ||
155 | |||
156 | |||
157 | |||
158 | /* | ||
159 | * Setup common bits before finally enabling the MMU. Essentially | ||
160 | * this is just loading the page table pointer and domain access | ||
161 | * registers. | ||
162 | */ | ||
163 | __enable_mmu: | ||
164 | #ifdef CONFIG_ALIGNMENT_TRAP | ||
165 | orr r0, r0, #CR_A | ||
166 | #else | ||
167 | bic r0, r0, #CR_A | ||
168 | #endif | ||
169 | #ifdef CONFIG_CPU_DCACHE_DISABLE | ||
170 | bic r0, r0, #CR_C | ||
171 | #endif | ||
172 | #ifdef CONFIG_CPU_BPREDICT_DISABLE | ||
173 | bic r0, r0, #CR_Z | ||
174 | #endif | ||
175 | #ifdef CONFIG_CPU_ICACHE_DISABLE | ||
176 | bic r0, r0, #CR_I | ||
177 | #endif | ||
178 | mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \ | ||
179 | domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \ | ||
180 | domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \ | ||
181 | domain_val(DOMAIN_IO, DOMAIN_CLIENT)) | ||
182 | mcr p15, 0, r5, c3, c0, 0 @ load domain access register | ||
183 | mcr p15, 0, r4, c2, c0, 0 @ load page table pointer | ||
184 | b __turn_mmu_on | ||
185 | ENDPROC(__enable_mmu) | ||
186 | |||
187 | /* | ||
188 | * Enable the MMU. This completely changes the structure of the visible | ||
189 | * memory space. You will not be able to trace execution through this. | ||
190 | * If you have an enquiry about this, *please* check the linux-arm-kernel | ||
191 | * mailing list archives BEFORE sending another post to the list. | ||
192 | * | ||
193 | * r0 = cp#15 control register | ||
194 | * r13 = *virtual* address to jump to upon completion | ||
195 | * | ||
196 | * other registers depend on the function called upon completion | ||
197 | */ | ||
198 | .align 5 | ||
199 | __turn_mmu_on: | ||
200 | mov r0, r0 | ||
201 | mcr p15, 0, r0, c1, c0, 0 @ write control reg | ||
202 | mrc p15, 0, r3, c0, c0, 0 @ read id reg | ||
203 | mov r3, r3 | ||
204 | mov r3, r13 | ||
205 | mov pc, r3 | ||
206 | ENDPROC(__turn_mmu_on) | ||
207 | |||
208 | 110 | ||
209 | /* | 111 | /* |
210 | * Setup the initial page tables. We only setup the barest | 112 | * Setup the initial page tables. We only setup the barest |
@@ -216,7 +118,7 @@ ENDPROC(__turn_mmu_on) | |||
216 | * r10 = procinfo | 118 | * r10 = procinfo |
217 | * | 119 | * |
218 | * Returns: | 120 | * Returns: |
219 | * r0, r3, r6, r7 corrupted | 121 | * r0, r3, r5-r7 corrupted |
220 | * r4 = physical page table address | 122 | * r4 = physical page table address |
221 | */ | 123 | */ |
222 | __create_page_tables: | 124 | __create_page_tables: |
@@ -238,20 +140,30 @@ __create_page_tables: | |||
238 | ldr r7, [r10, #PROCINFO_MM_MMUFLAGS] @ mm_mmuflags | 140 | ldr r7, [r10, #PROCINFO_MM_MMUFLAGS] @ mm_mmuflags |
239 | 141 | ||
240 | /* | 142 | /* |
241 | * Create identity mapping for first MB of kernel to | 143 | * Create identity mapping to cater for __enable_mmu. |
242 | * cater for the MMU enable. This identity mapping | 144 | * This identity mapping will be removed by paging_init(). |
243 | * will be removed by paging_init(). We use our current program | ||
244 | * counter to determine corresponding section base address. | ||
245 | */ | 145 | */ |
246 | mov r6, pc | 146 | adr r0, __enable_mmu_loc |
247 | mov r6, r6, lsr #20 @ start of kernel section | 147 | ldmia r0, {r3, r5, r6} |
248 | orr r3, r7, r6, lsl #20 @ flags + kernel base | 148 | sub r0, r0, r3 @ virt->phys offset |
249 | str r3, [r4, r6, lsl #2] @ identity mapping | 149 | add r5, r5, r0 @ phys __enable_mmu |
150 | add r6, r6, r0 @ phys __enable_mmu_end | ||
151 | mov r5, r5, lsr #20 | ||
152 | mov r6, r6, lsr #20 | ||
153 | |||
154 | 1: orr r3, r7, r5, lsl #20 @ flags + kernel base | ||
155 | str r3, [r4, r5, lsl #2] @ identity mapping | ||
156 | teq r5, r6 | ||
157 | addne r5, r5, #1 @ next section | ||
158 | bne 1b | ||
250 | 159 | ||
251 | /* | 160 | /* |
252 | * Now setup the pagetables for our kernel direct | 161 | * Now setup the pagetables for our kernel direct |
253 | * mapped region. | 162 | * mapped region. |
254 | */ | 163 | */ |
164 | mov r3, pc | ||
165 | mov r3, r3, lsr #20 | ||
166 | orr r3, r7, r3, lsl #20 | ||
255 | add r0, r4, #(KERNEL_START & 0xff000000) >> 18 | 167 | add r0, r4, #(KERNEL_START & 0xff000000) >> 18 |
256 | str r3, [r0, #(KERNEL_START & 0x00f00000) >> 18]! | 168 | str r3, [r0, #(KERNEL_START & 0x00f00000) >> 18]! |
257 | ldr r6, =(KERNEL_END - 1) | 169 | ldr r6, =(KERNEL_END - 1) |
@@ -335,6 +247,122 @@ __create_page_tables: | |||
335 | mov pc, lr | 247 | mov pc, lr |
336 | ENDPROC(__create_page_tables) | 248 | ENDPROC(__create_page_tables) |
337 | .ltorg | 249 | .ltorg |
250 | __enable_mmu_loc: | ||
251 | .long . | ||
252 | .long __enable_mmu | ||
253 | .long __enable_mmu_end | ||
254 | |||
255 | #if defined(CONFIG_SMP) | ||
256 | __CPUINIT | ||
257 | ENTRY(secondary_startup) | ||
258 | /* | ||
259 | * Common entry point for secondary CPUs. | ||
260 | * | ||
261 | * Ensure that we're in SVC mode, and IRQs are disabled. Lookup | ||
262 | * the processor type - there is no need to check the machine type | ||
263 | * as it has already been validated by the primary processor. | ||
264 | */ | ||
265 | setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 | ||
266 | mrc p15, 0, r9, c0, c0 @ get processor id | ||
267 | bl __lookup_processor_type | ||
268 | movs r10, r5 @ invalid processor? | ||
269 | moveq r0, #'p' @ yes, error 'p' | ||
270 | beq __error_p | ||
271 | |||
272 | /* | ||
273 | * Use the page tables supplied from __cpu_up. | ||
274 | */ | ||
275 | adr r4, __secondary_data | ||
276 | ldmia r4, {r5, r7, r12} @ address to jump to after | ||
277 | sub r4, r4, r5 @ mmu has been enabled | ||
278 | ldr r4, [r7, r4] @ get secondary_data.pgdir | ||
279 | adr lr, BSYM(__enable_mmu) @ return address | ||
280 | mov r13, r12 @ __secondary_switched address | ||
281 | ARM( add pc, r10, #PROCINFO_INITFUNC ) @ initialise processor | ||
282 | @ (return control reg) | ||
283 | THUMB( add r12, r10, #PROCINFO_INITFUNC ) | ||
284 | THUMB( mov pc, r12 ) | ||
285 | ENDPROC(secondary_startup) | ||
286 | |||
287 | /* | ||
288 | * r6 = &secondary_data | ||
289 | */ | ||
290 | ENTRY(__secondary_switched) | ||
291 | ldr sp, [r7, #4] @ get secondary_data.stack | ||
292 | mov fp, #0 | ||
293 | b secondary_start_kernel | ||
294 | ENDPROC(__secondary_switched) | ||
295 | |||
296 | .type __secondary_data, %object | ||
297 | __secondary_data: | ||
298 | .long . | ||
299 | .long secondary_data | ||
300 | .long __secondary_switched | ||
301 | #endif /* defined(CONFIG_SMP) */ | ||
302 | |||
303 | |||
304 | |||
305 | /* | ||
306 | * Setup common bits before finally enabling the MMU. Essentially | ||
307 | * this is just loading the page table pointer and domain access | ||
308 | * registers. | ||
309 | * | ||
310 | * r0 = cp#15 control register | ||
311 | * r1 = machine ID | ||
312 | * r2 = atags pointer | ||
313 | * r4 = page table pointer | ||
314 | * r9 = processor ID | ||
315 | * r13 = *virtual* address to jump to upon completion | ||
316 | */ | ||
317 | __enable_mmu: | ||
318 | #ifdef CONFIG_ALIGNMENT_TRAP | ||
319 | orr r0, r0, #CR_A | ||
320 | #else | ||
321 | bic r0, r0, #CR_A | ||
322 | #endif | ||
323 | #ifdef CONFIG_CPU_DCACHE_DISABLE | ||
324 | bic r0, r0, #CR_C | ||
325 | #endif | ||
326 | #ifdef CONFIG_CPU_BPREDICT_DISABLE | ||
327 | bic r0, r0, #CR_Z | ||
328 | #endif | ||
329 | #ifdef CONFIG_CPU_ICACHE_DISABLE | ||
330 | bic r0, r0, #CR_I | ||
331 | #endif | ||
332 | mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \ | ||
333 | domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \ | ||
334 | domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \ | ||
335 | domain_val(DOMAIN_IO, DOMAIN_CLIENT)) | ||
336 | mcr p15, 0, r5, c3, c0, 0 @ load domain access register | ||
337 | mcr p15, 0, r4, c2, c0, 0 @ load page table pointer | ||
338 | b __turn_mmu_on | ||
339 | ENDPROC(__enable_mmu) | ||
340 | |||
341 | /* | ||
342 | * Enable the MMU. This completely changes the structure of the visible | ||
343 | * memory space. You will not be able to trace execution through this. | ||
344 | * If you have an enquiry about this, *please* check the linux-arm-kernel | ||
345 | * mailing list archives BEFORE sending another post to the list. | ||
346 | * | ||
347 | * r0 = cp#15 control register | ||
348 | * r1 = machine ID | ||
349 | * r2 = atags pointer | ||
350 | * r9 = processor ID | ||
351 | * r13 = *virtual* address to jump to upon completion | ||
352 | * | ||
353 | * other registers depend on the function called upon completion | ||
354 | */ | ||
355 | .align 5 | ||
356 | __turn_mmu_on: | ||
357 | mov r0, r0 | ||
358 | mcr p15, 0, r0, c1, c0, 0 @ write control reg | ||
359 | mrc p15, 0, r3, c0, c0, 0 @ read id reg | ||
360 | mov r3, r3 | ||
361 | mov r3, r13 | ||
362 | mov pc, r3 | ||
363 | __enable_mmu_end: | ||
364 | ENDPROC(__turn_mmu_on) | ||
365 | |||
338 | 366 | ||
339 | #ifdef CONFIG_SMP_ON_UP | 367 | #ifdef CONFIG_SMP_ON_UP |
340 | __fixup_smp: | 368 | __fixup_smp: |