diff options
Diffstat (limited to 'arch/arm/kernel/head.S')
-rw-r--r-- | arch/arm/kernel/head.S | 323 |
1 files changed, 208 insertions, 115 deletions
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index eb62bf947212..dd6b369ac69c 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S | |||
@@ -22,6 +22,10 @@ | |||
22 | #include <asm/thread_info.h> | 22 | #include <asm/thread_info.h> |
23 | #include <asm/system.h> | 23 | #include <asm/system.h> |
24 | 24 | ||
25 | #ifdef CONFIG_DEBUG_LL | ||
26 | #include <mach/debug-macro.S> | ||
27 | #endif | ||
28 | |||
25 | #if (PHYS_OFFSET & 0x001fffff) | 29 | #if (PHYS_OFFSET & 0x001fffff) |
26 | #error "PHYS_OFFSET must be at an even 2MiB boundary!" | 30 | #error "PHYS_OFFSET must be at an even 2MiB boundary!" |
27 | #endif | 31 | #endif |
@@ -86,6 +90,9 @@ ENTRY(stext) | |||
86 | movs r8, r5 @ invalid machine (r5=0)? | 90 | movs r8, r5 @ invalid machine (r5=0)? |
87 | beq __error_a @ yes, error 'a' | 91 | beq __error_a @ yes, error 'a' |
88 | bl __vet_atags | 92 | bl __vet_atags |
93 | #ifdef CONFIG_SMP_ON_UP | ||
94 | bl __fixup_smp | ||
95 | #endif | ||
89 | bl __create_page_tables | 96 | bl __create_page_tables |
90 | 97 | ||
91 | /* | 98 | /* |
@@ -95,113 +102,15 @@ ENTRY(stext) | |||
95 | * above. On return, the CPU will be ready for the MMU to be | 102 | * above. On return, the CPU will be ready for the MMU to be |
96 | * turned on, and r0 will hold the CPU control register value. | 103 | * turned on, and r0 will hold the CPU control register value. |
97 | */ | 104 | */ |
98 | ldr r13, __switch_data @ address to jump to after | 105 | ldr r13, =__mmap_switched @ address to jump to after |
99 | @ mmu has been enabled | 106 | @ mmu has been enabled |
100 | adr lr, BSYM(__enable_mmu) @ return (PIC) address | 107 | adr lr, BSYM(1f) @ return (PIC) address |
101 | ARM( add pc, r10, #PROCINFO_INITFUNC ) | 108 | ARM( add pc, r10, #PROCINFO_INITFUNC ) |
102 | THUMB( add r12, r10, #PROCINFO_INITFUNC ) | 109 | THUMB( add r12, r10, #PROCINFO_INITFUNC ) |
103 | THUMB( mov pc, r12 ) | 110 | THUMB( mov pc, r12 ) |
111 | 1: b __enable_mmu | ||
104 | ENDPROC(stext) | 112 | ENDPROC(stext) |
105 | 113 | .ltorg | |
106 | #if defined(CONFIG_SMP) | ||
107 | ENTRY(secondary_startup) | ||
108 | /* | ||
109 | * Common entry point for secondary CPUs. | ||
110 | * | ||
111 | * Ensure that we're in SVC mode, and IRQs are disabled. Lookup | ||
112 | * the processor type - there is no need to check the machine type | ||
113 | * as it has already been validated by the primary processor. | ||
114 | */ | ||
115 | setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 | ||
116 | mrc p15, 0, r9, c0, c0 @ get processor id | ||
117 | bl __lookup_processor_type | ||
118 | movs r10, r5 @ invalid processor? | ||
119 | moveq r0, #'p' @ yes, error 'p' | ||
120 | beq __error | ||
121 | |||
122 | /* | ||
123 | * Use the page tables supplied from __cpu_up. | ||
124 | */ | ||
125 | adr r4, __secondary_data | ||
126 | ldmia r4, {r5, r7, r12} @ address to jump to after | ||
127 | sub r4, r4, r5 @ mmu has been enabled | ||
128 | ldr r4, [r7, r4] @ get secondary_data.pgdir | ||
129 | adr lr, BSYM(__enable_mmu) @ return address | ||
130 | mov r13, r12 @ __secondary_switched address | ||
131 | ARM( add pc, r10, #PROCINFO_INITFUNC ) @ initialise processor | ||
132 | @ (return control reg) | ||
133 | THUMB( add r12, r10, #PROCINFO_INITFUNC ) | ||
134 | THUMB( mov pc, r12 ) | ||
135 | ENDPROC(secondary_startup) | ||
136 | |||
137 | /* | ||
138 | * r6 = &secondary_data | ||
139 | */ | ||
140 | ENTRY(__secondary_switched) | ||
141 | ldr sp, [r7, #4] @ get secondary_data.stack | ||
142 | mov fp, #0 | ||
143 | b secondary_start_kernel | ||
144 | ENDPROC(__secondary_switched) | ||
145 | |||
146 | .type __secondary_data, %object | ||
147 | __secondary_data: | ||
148 | .long . | ||
149 | .long secondary_data | ||
150 | .long __secondary_switched | ||
151 | #endif /* defined(CONFIG_SMP) */ | ||
152 | |||
153 | |||
154 | |||
155 | /* | ||
156 | * Setup common bits before finally enabling the MMU. Essentially | ||
157 | * this is just loading the page table pointer and domain access | ||
158 | * registers. | ||
159 | */ | ||
160 | __enable_mmu: | ||
161 | #ifdef CONFIG_ALIGNMENT_TRAP | ||
162 | orr r0, r0, #CR_A | ||
163 | #else | ||
164 | bic r0, r0, #CR_A | ||
165 | #endif | ||
166 | #ifdef CONFIG_CPU_DCACHE_DISABLE | ||
167 | bic r0, r0, #CR_C | ||
168 | #endif | ||
169 | #ifdef CONFIG_CPU_BPREDICT_DISABLE | ||
170 | bic r0, r0, #CR_Z | ||
171 | #endif | ||
172 | #ifdef CONFIG_CPU_ICACHE_DISABLE | ||
173 | bic r0, r0, #CR_I | ||
174 | #endif | ||
175 | mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \ | ||
176 | domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \ | ||
177 | domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \ | ||
178 | domain_val(DOMAIN_IO, DOMAIN_CLIENT)) | ||
179 | mcr p15, 0, r5, c3, c0, 0 @ load domain access register | ||
180 | mcr p15, 0, r4, c2, c0, 0 @ load page table pointer | ||
181 | b __turn_mmu_on | ||
182 | ENDPROC(__enable_mmu) | ||
183 | |||
184 | /* | ||
185 | * Enable the MMU. This completely changes the structure of the visible | ||
186 | * memory space. You will not be able to trace execution through this. | ||
187 | * If you have an enquiry about this, *please* check the linux-arm-kernel | ||
188 | * mailing list archives BEFORE sending another post to the list. | ||
189 | * | ||
190 | * r0 = cp#15 control register | ||
191 | * r13 = *virtual* address to jump to upon completion | ||
192 | * | ||
193 | * other registers depend on the function called upon completion | ||
194 | */ | ||
195 | .align 5 | ||
196 | __turn_mmu_on: | ||
197 | mov r0, r0 | ||
198 | mcr p15, 0, r0, c1, c0, 0 @ write control reg | ||
199 | mrc p15, 0, r3, c0, c0, 0 @ read id reg | ||
200 | mov r3, r3 | ||
201 | mov r3, r13 | ||
202 | mov pc, r3 | ||
203 | ENDPROC(__turn_mmu_on) | ||
204 | |||
205 | 114 | ||
206 | /* | 115 | /* |
207 | * Setup the initial page tables. We only setup the barest | 116 | * Setup the initial page tables. We only setup the barest |
@@ -213,7 +122,7 @@ ENDPROC(__turn_mmu_on) | |||
213 | * r10 = procinfo | 122 | * r10 = procinfo |
214 | * | 123 | * |
215 | * Returns: | 124 | * Returns: |
216 | * r0, r3, r6, r7 corrupted | 125 | * r0, r3, r5-r7 corrupted |
217 | * r4 = physical page table address | 126 | * r4 = physical page table address |
218 | */ | 127 | */ |
219 | __create_page_tables: | 128 | __create_page_tables: |
@@ -235,20 +144,30 @@ __create_page_tables: | |||
235 | ldr r7, [r10, #PROCINFO_MM_MMUFLAGS] @ mm_mmuflags | 144 | ldr r7, [r10, #PROCINFO_MM_MMUFLAGS] @ mm_mmuflags |
236 | 145 | ||
237 | /* | 146 | /* |
238 | * Create identity mapping for first MB of kernel to | 147 | * Create identity mapping to cater for __enable_mmu. |
239 | * cater for the MMU enable. This identity mapping | 148 | * This identity mapping will be removed by paging_init(). |
240 | * will be removed by paging_init(). We use our current program | ||
241 | * counter to determine corresponding section base address. | ||
242 | */ | 149 | */ |
243 | mov r6, pc | 150 | adr r0, __enable_mmu_loc |
244 | mov r6, r6, lsr #20 @ start of kernel section | 151 | ldmia r0, {r3, r5, r6} |
245 | orr r3, r7, r6, lsl #20 @ flags + kernel base | 152 | sub r0, r0, r3 @ virt->phys offset |
246 | str r3, [r4, r6, lsl #2] @ identity mapping | 153 | add r5, r5, r0 @ phys __enable_mmu |
154 | add r6, r6, r0 @ phys __enable_mmu_end | ||
155 | mov r5, r5, lsr #20 | ||
156 | mov r6, r6, lsr #20 | ||
157 | |||
158 | 1: orr r3, r7, r5, lsl #20 @ flags + kernel base | ||
159 | str r3, [r4, r5, lsl #2] @ identity mapping | ||
160 | teq r5, r6 | ||
161 | addne r5, r5, #1 @ next section | ||
162 | bne 1b | ||
247 | 163 | ||
248 | /* | 164 | /* |
249 | * Now setup the pagetables for our kernel direct | 165 | * Now setup the pagetables for our kernel direct |
250 | * mapped region. | 166 | * mapped region. |
251 | */ | 167 | */ |
168 | mov r3, pc | ||
169 | mov r3, r3, lsr #20 | ||
170 | orr r3, r7, r3, lsl #20 | ||
252 | add r0, r4, #(KERNEL_START & 0xff000000) >> 18 | 171 | add r0, r4, #(KERNEL_START & 0xff000000) >> 18 |
253 | str r3, [r0, #(KERNEL_START & 0x00f00000) >> 18]! | 172 | str r3, [r0, #(KERNEL_START & 0x00f00000) >> 18]! |
254 | ldr r6, =(KERNEL_END - 1) | 173 | ldr r6, =(KERNEL_END - 1) |
@@ -289,24 +208,35 @@ __create_page_tables: | |||
289 | str r6, [r0] | 208 | str r6, [r0] |
290 | 209 | ||
291 | #ifdef CONFIG_DEBUG_LL | 210 | #ifdef CONFIG_DEBUG_LL |
292 | ldr r7, [r10, #PROCINFO_IO_MMUFLAGS] @ io_mmuflags | 211 | #ifndef CONFIG_DEBUG_ICEDCC |
293 | /* | 212 | /* |
294 | * Map in IO space for serial debugging. | 213 | * Map in IO space for serial debugging. |
295 | * This allows debug messages to be output | 214 | * This allows debug messages to be output |
296 | * via a serial console before paging_init. | 215 | * via a serial console before paging_init. |
297 | */ | 216 | */ |
298 | ldr r3, [r8, #MACHINFO_PGOFFIO] | 217 | addruart r7, r3 |
218 | |||
219 | mov r3, r3, lsr #20 | ||
220 | mov r3, r3, lsl #2 | ||
221 | |||
299 | add r0, r4, r3 | 222 | add r0, r4, r3 |
300 | rsb r3, r3, #0x4000 @ PTRS_PER_PGD*sizeof(long) | 223 | rsb r3, r3, #0x4000 @ PTRS_PER_PGD*sizeof(long) |
301 | cmp r3, #0x0800 @ limit to 512MB | 224 | cmp r3, #0x0800 @ limit to 512MB |
302 | movhi r3, #0x0800 | 225 | movhi r3, #0x0800 |
303 | add r6, r0, r3 | 226 | add r6, r0, r3 |
304 | ldr r3, [r8, #MACHINFO_PHYSIO] | 227 | mov r3, r7, lsr #20 |
305 | orr r3, r3, r7 | 228 | ldr r7, [r10, #PROCINFO_IO_MMUFLAGS] @ io_mmuflags |
229 | orr r3, r7, r3, lsl #20 | ||
306 | 1: str r3, [r0], #4 | 230 | 1: str r3, [r0], #4 |
307 | add r3, r3, #1 << 20 | 231 | add r3, r3, #1 << 20 |
308 | teq r0, r6 | 232 | teq r0, r6 |
309 | bne 1b | 233 | bne 1b |
234 | |||
235 | #else /* CONFIG_DEBUG_ICEDCC */ | ||
236 | /* we don't need any serial debugging mappings for ICEDCC */ | ||
237 | ldr r7, [r10, #PROCINFO_IO_MMUFLAGS] @ io_mmuflags | ||
238 | #endif /* !CONFIG_DEBUG_ICEDCC */ | ||
239 | |||
310 | #if defined(CONFIG_ARCH_NETWINDER) || defined(CONFIG_ARCH_CATS) | 240 | #if defined(CONFIG_ARCH_NETWINDER) || defined(CONFIG_ARCH_CATS) |
311 | /* | 241 | /* |
312 | * If we're using the NetWinder or CATS, we also need to map | 242 | * If we're using the NetWinder or CATS, we also need to map |
@@ -332,5 +262,168 @@ __create_page_tables: | |||
332 | mov pc, lr | 262 | mov pc, lr |
333 | ENDPROC(__create_page_tables) | 263 | ENDPROC(__create_page_tables) |
334 | .ltorg | 264 | .ltorg |
265 | __enable_mmu_loc: | ||
266 | .long . | ||
267 | .long __enable_mmu | ||
268 | .long __enable_mmu_end | ||
269 | |||
270 | #if defined(CONFIG_SMP) | ||
271 | __CPUINIT | ||
272 | ENTRY(secondary_startup) | ||
273 | /* | ||
274 | * Common entry point for secondary CPUs. | ||
275 | * | ||
276 | * Ensure that we're in SVC mode, and IRQs are disabled. Lookup | ||
277 | * the processor type - there is no need to check the machine type | ||
278 | * as it has already been validated by the primary processor. | ||
279 | */ | ||
280 | setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 | ||
281 | mrc p15, 0, r9, c0, c0 @ get processor id | ||
282 | bl __lookup_processor_type | ||
283 | movs r10, r5 @ invalid processor? | ||
284 | moveq r0, #'p' @ yes, error 'p' | ||
285 | beq __error_p | ||
286 | |||
287 | /* | ||
288 | * Use the page tables supplied from __cpu_up. | ||
289 | */ | ||
290 | adr r4, __secondary_data | ||
291 | ldmia r4, {r5, r7, r12} @ address to jump to after | ||
292 | sub r4, r4, r5 @ mmu has been enabled | ||
293 | ldr r4, [r7, r4] @ get secondary_data.pgdir | ||
294 | adr lr, BSYM(__enable_mmu) @ return address | ||
295 | mov r13, r12 @ __secondary_switched address | ||
296 | ARM( add pc, r10, #PROCINFO_INITFUNC ) @ initialise processor | ||
297 | @ (return control reg) | ||
298 | THUMB( add r12, r10, #PROCINFO_INITFUNC ) | ||
299 | THUMB( mov pc, r12 ) | ||
300 | ENDPROC(secondary_startup) | ||
301 | |||
302 | /* | ||
303 | * r6 = &secondary_data | ||
304 | */ | ||
305 | ENTRY(__secondary_switched) | ||
306 | ldr sp, [r7, #4] @ get secondary_data.stack | ||
307 | mov fp, #0 | ||
308 | b secondary_start_kernel | ||
309 | ENDPROC(__secondary_switched) | ||
310 | |||
311 | .type __secondary_data, %object | ||
312 | __secondary_data: | ||
313 | .long . | ||
314 | .long secondary_data | ||
315 | .long __secondary_switched | ||
316 | #endif /* defined(CONFIG_SMP) */ | ||
317 | |||
318 | |||
319 | |||
320 | /* | ||
321 | * Setup common bits before finally enabling the MMU. Essentially | ||
322 | * this is just loading the page table pointer and domain access | ||
323 | * registers. | ||
324 | * | ||
325 | * r0 = cp#15 control register | ||
326 | * r1 = machine ID | ||
327 | * r2 = atags pointer | ||
328 | * r4 = page table pointer | ||
329 | * r9 = processor ID | ||
330 | * r13 = *virtual* address to jump to upon completion | ||
331 | */ | ||
332 | __enable_mmu: | ||
333 | #ifdef CONFIG_ALIGNMENT_TRAP | ||
334 | orr r0, r0, #CR_A | ||
335 | #else | ||
336 | bic r0, r0, #CR_A | ||
337 | #endif | ||
338 | #ifdef CONFIG_CPU_DCACHE_DISABLE | ||
339 | bic r0, r0, #CR_C | ||
340 | #endif | ||
341 | #ifdef CONFIG_CPU_BPREDICT_DISABLE | ||
342 | bic r0, r0, #CR_Z | ||
343 | #endif | ||
344 | #ifdef CONFIG_CPU_ICACHE_DISABLE | ||
345 | bic r0, r0, #CR_I | ||
346 | #endif | ||
347 | mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \ | ||
348 | domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \ | ||
349 | domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \ | ||
350 | domain_val(DOMAIN_IO, DOMAIN_CLIENT)) | ||
351 | mcr p15, 0, r5, c3, c0, 0 @ load domain access register | ||
352 | mcr p15, 0, r4, c2, c0, 0 @ load page table pointer | ||
353 | b __turn_mmu_on | ||
354 | ENDPROC(__enable_mmu) | ||
355 | |||
356 | /* | ||
357 | * Enable the MMU. This completely changes the structure of the visible | ||
358 | * memory space. You will not be able to trace execution through this. | ||
359 | * If you have an enquiry about this, *please* check the linux-arm-kernel | ||
360 | * mailing list archives BEFORE sending another post to the list. | ||
361 | * | ||
362 | * r0 = cp#15 control register | ||
363 | * r1 = machine ID | ||
364 | * r2 = atags pointer | ||
365 | * r9 = processor ID | ||
366 | * r13 = *virtual* address to jump to upon completion | ||
367 | * | ||
368 | * other registers depend on the function called upon completion | ||
369 | */ | ||
370 | .align 5 | ||
371 | __turn_mmu_on: | ||
372 | mov r0, r0 | ||
373 | mcr p15, 0, r0, c1, c0, 0 @ write control reg | ||
374 | mrc p15, 0, r3, c0, c0, 0 @ read id reg | ||
375 | mov r3, r3 | ||
376 | mov r3, r13 | ||
377 | mov pc, r3 | ||
378 | __enable_mmu_end: | ||
379 | ENDPROC(__turn_mmu_on) | ||
380 | |||
381 | |||
382 | #ifdef CONFIG_SMP_ON_UP | ||
383 | __fixup_smp: | ||
384 | mov r7, #0x00070000 | ||
385 | orr r6, r7, #0xff000000 @ mask 0xff070000 | ||
386 | orr r7, r7, #0x41000000 @ val 0x41070000 | ||
387 | and r0, r9, r6 | ||
388 | teq r0, r7 @ ARM CPU and ARMv6/v7? | ||
389 | bne __fixup_smp_on_up @ no, assume UP | ||
390 | |||
391 | orr r6, r6, #0x0000ff00 | ||
392 | orr r6, r6, #0x000000f0 @ mask 0xff07fff0 | ||
393 | orr r7, r7, #0x0000b000 | ||
394 | orr r7, r7, #0x00000020 @ val 0x4107b020 | ||
395 | and r0, r9, r6 | ||
396 | teq r0, r7 @ ARM 11MPCore? | ||
397 | moveq pc, lr @ yes, assume SMP | ||
398 | |||
399 | mrc p15, 0, r0, c0, c0, 5 @ read MPIDR | ||
400 | tst r0, #1 << 31 | ||
401 | movne pc, lr @ bit 31 => SMP | ||
402 | |||
403 | __fixup_smp_on_up: | ||
404 | adr r0, 1f | ||
405 | ldmia r0, {r3, r6, r7} | ||
406 | sub r3, r0, r3 | ||
407 | add r6, r6, r3 | ||
408 | add r7, r7, r3 | ||
409 | 2: cmp r6, r7 | ||
410 | ldmia r6!, {r0, r4} | ||
411 | strlo r4, [r0, r3] | ||
412 | blo 2b | ||
413 | mov pc, lr | ||
414 | ENDPROC(__fixup_smp) | ||
415 | |||
416 | 1: .word . | ||
417 | .word __smpalt_begin | ||
418 | .word __smpalt_end | ||
419 | |||
420 | .pushsection .data | ||
421 | .globl smp_on_up | ||
422 | smp_on_up: | ||
423 | ALT_SMP(.long 1) | ||
424 | ALT_UP(.long 0) | ||
425 | .popsection | ||
426 | |||
427 | #endif | ||
335 | 428 | ||
336 | #include "head-common.S" | 429 | #include "head-common.S" |