aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel/head.S
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2010-10-04 12:56:13 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2010-10-08 05:07:35 -0400
commit00945010c063b95e813b966f44bf58ffa1955a38 (patch)
treeb5f85a613747fb62c5f2e00d5aecd9a3c9fec69e /arch/arm/kernel/head.S
parent786f1b73f7d5cad5c88dc75a96d53a74160aa7d7 (diff)
ARM: hotplug cpu: move secondary_startup, __enable_mmu to cpuinit
Move these two functions, both of which are required for secondary CPU booting, into the cpuinit section. Ensure bad processors call __error_p for better diagnostics, rather than just __error. Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/kernel/head.S')
-rw-r--r--arch/arm/kernel/head.S206
1 files changed, 104 insertions, 102 deletions
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 17414e299482..ed9ebe59178b 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -97,114 +97,14 @@ ENTRY(stext)
97 */ 97 */
98 ldr r13, =__mmap_switched @ address to jump to after 98 ldr r13, =__mmap_switched @ address to jump to after
99 @ mmu has been enabled 99 @ mmu has been enabled
100 adr lr, BSYM(__enable_mmu) @ return (PIC) address 100 adr lr, BSYM(1f) @ return (PIC) address
101 ARM( add pc, r10, #PROCINFO_INITFUNC ) 101 ARM( add pc, r10, #PROCINFO_INITFUNC )
102 THUMB( add r12, r10, #PROCINFO_INITFUNC ) 102 THUMB( add r12, r10, #PROCINFO_INITFUNC )
103 THUMB( mov pc, r12 ) 103 THUMB( mov pc, r12 )
1041: b __enable_mmu
104ENDPROC(stext) 105ENDPROC(stext)
105 .ltorg 106 .ltorg
106 107
107#if defined(CONFIG_SMP)
108ENTRY(secondary_startup)
109 /*
110 * Common entry point for secondary CPUs.
111 *
112 * Ensure that we're in SVC mode, and IRQs are disabled. Lookup
113 * the processor type - there is no need to check the machine type
114 * as it has already been validated by the primary processor.
115 */
116 setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9
117 mrc p15, 0, r9, c0, c0 @ get processor id
118 bl __lookup_processor_type
119 movs r10, r5 @ invalid processor?
120 moveq r0, #'p' @ yes, error 'p'
121 beq __error
122
123 /*
124 * Use the page tables supplied from __cpu_up.
125 */
126 adr r4, __secondary_data
127 ldmia r4, {r5, r7, r12} @ address to jump to after
128 sub r4, r4, r5 @ mmu has been enabled
129 ldr r4, [r7, r4] @ get secondary_data.pgdir
130 adr lr, BSYM(__enable_mmu) @ return address
131 mov r13, r12 @ __secondary_switched address
132 ARM( add pc, r10, #PROCINFO_INITFUNC ) @ initialise processor
133 @ (return control reg)
134 THUMB( add r12, r10, #PROCINFO_INITFUNC )
135 THUMB( mov pc, r12 )
136ENDPROC(secondary_startup)
137
138 /*
139 * r6 = &secondary_data
140 */
141ENTRY(__secondary_switched)
142 ldr sp, [r7, #4] @ get secondary_data.stack
143 mov fp, #0
144 b secondary_start_kernel
145ENDPROC(__secondary_switched)
146
147 .type __secondary_data, %object
148__secondary_data:
149 .long .
150 .long secondary_data
151 .long __secondary_switched
152#endif /* defined(CONFIG_SMP) */
153
154
155
156/*
157 * Setup common bits before finally enabling the MMU. Essentially
158 * this is just loading the page table pointer and domain access
159 * registers.
160 */
161__enable_mmu:
162#ifdef CONFIG_ALIGNMENT_TRAP
163 orr r0, r0, #CR_A
164#else
165 bic r0, r0, #CR_A
166#endif
167#ifdef CONFIG_CPU_DCACHE_DISABLE
168 bic r0, r0, #CR_C
169#endif
170#ifdef CONFIG_CPU_BPREDICT_DISABLE
171 bic r0, r0, #CR_Z
172#endif
173#ifdef CONFIG_CPU_ICACHE_DISABLE
174 bic r0, r0, #CR_I
175#endif
176 mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
177 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
178 domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
179 domain_val(DOMAIN_IO, DOMAIN_CLIENT))
180 mcr p15, 0, r5, c3, c0, 0 @ load domain access register
181 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
182 b __turn_mmu_on
183ENDPROC(__enable_mmu)
184
185/*
186 * Enable the MMU. This completely changes the structure of the visible
187 * memory space. You will not be able to trace execution through this.
188 * If you have an enquiry about this, *please* check the linux-arm-kernel
189 * mailing list archives BEFORE sending another post to the list.
190 *
191 * r0 = cp#15 control register
192 * r13 = *virtual* address to jump to upon completion
193 *
194 * other registers depend on the function called upon completion
195 */
196 .align 5
197__turn_mmu_on:
198 mov r0, r0
199 mcr p15, 0, r0, c1, c0, 0 @ write control reg
200 mrc p15, 0, r3, c0, c0, 0 @ read id reg
201 mov r3, r3
202 mov r3, r13
203 mov pc, r3
204__enable_mmu_end:
205ENDPROC(__turn_mmu_on)
206
207
208/* 108/*
209 * Setup the initial page tables. We only setup the barest 109 * Setup the initial page tables. We only setup the barest
210 * amount which are required to get the kernel running, which 110 * amount which are required to get the kernel running, which
@@ -349,4 +249,106 @@ __enable_mmu_loc:
349 .long __enable_mmu 249 .long __enable_mmu
350 .long __enable_mmu_end 250 .long __enable_mmu_end
351 251
252#if defined(CONFIG_SMP)
253 __CPUINIT
254ENTRY(secondary_startup)
255 /*
256 * Common entry point for secondary CPUs.
257 *
258 * Ensure that we're in SVC mode, and IRQs are disabled. Lookup
259 * the processor type - there is no need to check the machine type
260 * as it has already been validated by the primary processor.
261 */
262 setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9
263 mrc p15, 0, r9, c0, c0 @ get processor id
264 bl __lookup_processor_type
265 movs r10, r5 @ invalid processor?
266 moveq r0, #'p' @ yes, error 'p'
267 beq __error_p
268
269 /*
270 * Use the page tables supplied from __cpu_up.
271 */
272 adr r4, __secondary_data
273 ldmia r4, {r5, r7, r12} @ address to jump to after
274 sub r4, r4, r5 @ mmu has been enabled
275 ldr r4, [r7, r4] @ get secondary_data.pgdir
276 adr lr, BSYM(__enable_mmu) @ return address
277 mov r13, r12 @ __secondary_switched address
278 ARM( add pc, r10, #PROCINFO_INITFUNC ) @ initialise processor
279 @ (return control reg)
280 THUMB( add r12, r10, #PROCINFO_INITFUNC )
281 THUMB( mov pc, r12 )
282ENDPROC(secondary_startup)
283
284 /*
285 * r6 = &secondary_data
286 */
287ENTRY(__secondary_switched)
288 ldr sp, [r7, #4] @ get secondary_data.stack
289 mov fp, #0
290 b secondary_start_kernel
291ENDPROC(__secondary_switched)
292
293 .type __secondary_data, %object
294__secondary_data:
295 .long .
296 .long secondary_data
297 .long __secondary_switched
298#endif /* defined(CONFIG_SMP) */
299
300
301
302/*
303 * Setup common bits before finally enabling the MMU. Essentially
304 * this is just loading the page table pointer and domain access
305 * registers.
306 */
307__enable_mmu:
308#ifdef CONFIG_ALIGNMENT_TRAP
309 orr r0, r0, #CR_A
310#else
311 bic r0, r0, #CR_A
312#endif
313#ifdef CONFIG_CPU_DCACHE_DISABLE
314 bic r0, r0, #CR_C
315#endif
316#ifdef CONFIG_CPU_BPREDICT_DISABLE
317 bic r0, r0, #CR_Z
318#endif
319#ifdef CONFIG_CPU_ICACHE_DISABLE
320 bic r0, r0, #CR_I
321#endif
322 mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
323 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
324 domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
325 domain_val(DOMAIN_IO, DOMAIN_CLIENT))
326 mcr p15, 0, r5, c3, c0, 0 @ load domain access register
327 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
328 b __turn_mmu_on
329ENDPROC(__enable_mmu)
330
331/*
332 * Enable the MMU. This completely changes the structure of the visible
333 * memory space. You will not be able to trace execution through this.
334 * If you have an enquiry about this, *please* check the linux-arm-kernel
335 * mailing list archives BEFORE sending another post to the list.
336 *
337 * r0 = cp#15 control register
338 * r13 = *virtual* address to jump to upon completion
339 *
340 * other registers depend on the function called upon completion
341 */
342 .align 5
343__turn_mmu_on:
344 mov r0, r0
345 mcr p15, 0, r0, c1, c0, 0 @ write control reg
346 mrc p15, 0, r3, c0, c0, 0 @ read id reg
347 mov r3, r3
348 mov r3, r13
349 mov pc, r3
350__enable_mmu_end:
351ENDPROC(__turn_mmu_on)
352
353
352#include "head-common.S" 354#include "head-common.S"