diff options
author | Russell King <rmk+kernel@arm.linux.org.uk> | 2010-10-18 17:34:47 -0400 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2010-10-18 17:34:47 -0400 |
commit | a0a55682b83fd5f012afadcf415b030d7424ae68 (patch) | |
tree | 97ad3cb7e3443f09fbe55f57d3c36994b20a8e52 /arch/arm/kernel | |
parent | 23beab76b490172a9ff3d52843e4d27a35b2a4c6 (diff) | |
parent | 865a4fae7793b80e2b8bca76e279d6dfecbeac17 (diff) |
Merge branch 'hotplug' into devel
Conflicts:
arch/arm/kernel/head-common.S
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r-- | arch/arm/kernel/head-common.S | 305 | ||||
-rw-r--r-- | arch/arm/kernel/head-nommu.S | 5 | ||||
-rw-r--r-- | arch/arm/kernel/head.S | 250 | ||||
-rw-r--r-- | arch/arm/kernel/smp.c | 63 | ||||
-rw-r--r-- | arch/arm/kernel/vmlinux.lds.S | 24 |
5 files changed, 371 insertions, 276 deletions
diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S index 58a3e632b6d..bbecaac1e01 100644 --- a/arch/arm/kernel/head-common.S +++ b/arch/arm/kernel/head-common.S | |||
@@ -15,55 +15,6 @@ | |||
15 | #define ATAG_CORE_SIZE ((2*4 + 3*4) >> 2) | 15 | #define ATAG_CORE_SIZE ((2*4 + 3*4) >> 2) |
16 | #define ATAG_CORE_SIZE_EMPTY ((2*4) >> 2) | 16 | #define ATAG_CORE_SIZE_EMPTY ((2*4) >> 2) |
17 | 17 | ||
18 | .align 2 | ||
19 | .type __switch_data, %object | ||
20 | __switch_data: | ||
21 | .long __mmap_switched | ||
22 | .long __data_loc @ r4 | ||
23 | .long _sdata @ r5 | ||
24 | .long __bss_start @ r6 | ||
25 | .long _end @ r7 | ||
26 | .long processor_id @ r4 | ||
27 | .long __machine_arch_type @ r5 | ||
28 | .long __atags_pointer @ r6 | ||
29 | .long cr_alignment @ r7 | ||
30 | .long init_thread_union + THREAD_START_SP @ sp | ||
31 | |||
32 | /* | ||
33 | * The following fragment of code is executed with the MMU on in MMU mode, | ||
34 | * and uses absolute addresses; this is not position independent. | ||
35 | * | ||
36 | * r0 = cp#15 control register | ||
37 | * r1 = machine ID | ||
38 | * r2 = atags pointer | ||
39 | * r9 = processor ID | ||
40 | */ | ||
41 | __mmap_switched: | ||
42 | adr r3, __switch_data + 4 | ||
43 | |||
44 | ldmia r3!, {r4, r5, r6, r7} | ||
45 | cmp r4, r5 @ Copy data segment if needed | ||
46 | 1: cmpne r5, r6 | ||
47 | ldrne fp, [r4], #4 | ||
48 | strne fp, [r5], #4 | ||
49 | bne 1b | ||
50 | |||
51 | mov fp, #0 @ Clear BSS (and zero fp) | ||
52 | 1: cmp r6, r7 | ||
53 | strcc fp, [r6],#4 | ||
54 | bcc 1b | ||
55 | |||
56 | ARM( ldmia r3, {r4, r5, r6, r7, sp}) | ||
57 | THUMB( ldmia r3, {r4, r5, r6, r7} ) | ||
58 | THUMB( ldr sp, [r3, #16] ) | ||
59 | str r9, [r4] @ Save processor ID | ||
60 | str r1, [r5] @ Save machine type | ||
61 | str r2, [r6] @ Save atags pointer | ||
62 | bic r4, r0, #CR_A @ Clear 'A' bit | ||
63 | stmia r7, {r0, r4} @ Save control register values | ||
64 | b start_kernel | ||
65 | ENDPROC(__mmap_switched) | ||
66 | |||
67 | /* | 18 | /* |
68 | * Exception handling. Something went wrong and we can't proceed. We | 19 | * Exception handling. Something went wrong and we can't proceed. We |
69 | * ought to tell the user, but since we don't have any guarantee that | 20 | * ought to tell the user, but since we don't have any guarantee that |
@@ -73,21 +24,7 @@ ENDPROC(__mmap_switched) | |||
73 | * and hope for the best (useful if bootloader fails to pass a proper | 24 | * and hope for the best (useful if bootloader fails to pass a proper |
74 | * machine ID for example). | 25 | * machine ID for example). |
75 | */ | 26 | */ |
76 | __error_p: | 27 | __HEAD |
77 | #ifdef CONFIG_DEBUG_LL | ||
78 | adr r0, str_p1 | ||
79 | bl printascii | ||
80 | mov r0, r9 | ||
81 | bl printhex8 | ||
82 | adr r0, str_p2 | ||
83 | bl printascii | ||
84 | b __error | ||
85 | str_p1: .asciz "\nError: unrecognized/unsupported processor variant (0x" | ||
86 | str_p2: .asciz ").\n" | ||
87 | .align | ||
88 | #endif | ||
89 | ENDPROC(__error_p) | ||
90 | |||
91 | __error_a: | 28 | __error_a: |
92 | #ifdef CONFIG_DEBUG_LL | 29 | #ifdef CONFIG_DEBUG_LL |
93 | mov r4, r1 @ preserve machine ID | 30 | mov r4, r1 @ preserve machine ID |
@@ -97,7 +34,7 @@ __error_a: | |||
97 | bl printhex8 | 34 | bl printhex8 |
98 | adr r0, str_a2 | 35 | adr r0, str_a2 |
99 | bl printascii | 36 | bl printascii |
100 | adr r3, 4f | 37 | adr r3, __lookup_machine_type_data |
101 | ldmia r3, {r4, r5, r6} @ get machine desc list | 38 | ldmia r3, {r4, r5, r6} @ get machine desc list |
102 | sub r4, r3, r4 @ get offset between virt&phys | 39 | sub r4, r3, r4 @ get offset between virt&phys |
103 | add r5, r5, r4 @ convert virt addresses to | 40 | add r5, r5, r4 @ convert virt addresses to |
@@ -125,78 +62,6 @@ str_a3: .asciz "\nPlease check your kernel config and/or bootloader.\n" | |||
125 | .align | 62 | .align |
126 | #endif | 63 | #endif |
127 | 64 | ||
128 | __error: | ||
129 | #ifdef CONFIG_ARCH_RPC | ||
130 | /* | ||
131 | * Turn the screen red on a error - RiscPC only. | ||
132 | */ | ||
133 | mov r0, #0x02000000 | ||
134 | mov r3, #0x11 | ||
135 | orr r3, r3, r3, lsl #8 | ||
136 | orr r3, r3, r3, lsl #16 | ||
137 | str r3, [r0], #4 | ||
138 | str r3, [r0], #4 | ||
139 | str r3, [r0], #4 | ||
140 | str r3, [r0], #4 | ||
141 | #endif | ||
142 | 1: mov r0, r0 | ||
143 | b 1b | ||
144 | ENDPROC(__error) | ||
145 | |||
146 | |||
147 | /* | ||
148 | * Read processor ID register (CP#15, CR0), and look up in the linker-built | ||
149 | * supported processor list. Note that we can't use the absolute addresses | ||
150 | * for the __proc_info lists since we aren't running with the MMU on | ||
151 | * (and therefore, we are not in the correct address space). We have to | ||
152 | * calculate the offset. | ||
153 | * | ||
154 | * r9 = cpuid | ||
155 | * Returns: | ||
156 | * r3, r4, r6 corrupted | ||
157 | * r5 = proc_info pointer in physical address space | ||
158 | * r9 = cpuid (preserved) | ||
159 | */ | ||
160 | __lookup_processor_type: | ||
161 | adr r3, 3f | ||
162 | ldmia r3, {r5 - r7} | ||
163 | add r3, r3, #8 | ||
164 | sub r3, r3, r7 @ get offset between virt&phys | ||
165 | add r5, r5, r3 @ convert virt addresses to | ||
166 | add r6, r6, r3 @ physical address space | ||
167 | 1: ldmia r5, {r3, r4} @ value, mask | ||
168 | and r4, r4, r9 @ mask wanted bits | ||
169 | teq r3, r4 | ||
170 | beq 2f | ||
171 | add r5, r5, #PROC_INFO_SZ @ sizeof(proc_info_list) | ||
172 | cmp r5, r6 | ||
173 | blo 1b | ||
174 | mov r5, #0 @ unknown processor | ||
175 | 2: mov pc, lr | ||
176 | ENDPROC(__lookup_processor_type) | ||
177 | |||
178 | /* | ||
179 | * This provides a C-API version of the above function. | ||
180 | */ | ||
181 | ENTRY(lookup_processor_type) | ||
182 | stmfd sp!, {r4 - r7, r9, lr} | ||
183 | mov r9, r0 | ||
184 | bl __lookup_processor_type | ||
185 | mov r0, r5 | ||
186 | ldmfd sp!, {r4 - r7, r9, pc} | ||
187 | ENDPROC(lookup_processor_type) | ||
188 | |||
189 | /* | ||
190 | * Look in <asm/procinfo.h> and arch/arm/kernel/arch.[ch] for | ||
191 | * more information about the __proc_info and __arch_info structures. | ||
192 | */ | ||
193 | .align 2 | ||
194 | 3: .long __proc_info_begin | ||
195 | .long __proc_info_end | ||
196 | 4: .long . | ||
197 | .long __arch_info_begin | ||
198 | .long __arch_info_end | ||
199 | |||
200 | /* | 65 | /* |
201 | * Lookup machine architecture in the linker-build list of architectures. | 66 | * Lookup machine architecture in the linker-build list of architectures. |
202 | * Note that we can't use the absolute addresses for the __arch_info | 67 | * Note that we can't use the absolute addresses for the __arch_info |
@@ -209,7 +74,7 @@ ENDPROC(lookup_processor_type) | |||
209 | * r5 = mach_info pointer in physical address space | 74 | * r5 = mach_info pointer in physical address space |
210 | */ | 75 | */ |
211 | __lookup_machine_type: | 76 | __lookup_machine_type: |
212 | adr r3, 4b | 77 | adr r3, __lookup_machine_type_data |
213 | ldmia r3, {r4, r5, r6} | 78 | ldmia r3, {r4, r5, r6} |
214 | sub r3, r3, r4 @ get offset between virt&phys | 79 | sub r3, r3, r4 @ get offset between virt&phys |
215 | add r5, r5, r3 @ convert virt addresses to | 80 | add r5, r5, r3 @ convert virt addresses to |
@@ -225,15 +90,16 @@ __lookup_machine_type: | |||
225 | ENDPROC(__lookup_machine_type) | 90 | ENDPROC(__lookup_machine_type) |
226 | 91 | ||
227 | /* | 92 | /* |
228 | * This provides a C-API version of the above function. | 93 | * Look in arch/arm/kernel/arch.[ch] for information about the |
94 | * __arch_info structures. | ||
229 | */ | 95 | */ |
230 | ENTRY(lookup_machine_type) | 96 | .align 2 |
231 | stmfd sp!, {r4 - r6, lr} | 97 | .type __lookup_machine_type_data, %object |
232 | mov r1, r0 | 98 | __lookup_machine_type_data: |
233 | bl __lookup_machine_type | 99 | .long . |
234 | mov r0, r5 | 100 | .long __arch_info_begin |
235 | ldmfd sp!, {r4 - r6, pc} | 101 | .long __arch_info_end |
236 | ENDPROC(lookup_machine_type) | 102 | .size __lookup_machine_type_data, . - __lookup_machine_type_data |
237 | 103 | ||
238 | /* Determine validity of the r2 atags pointer. The heuristic requires | 104 | /* Determine validity of the r2 atags pointer. The heuristic requires |
239 | * that the pointer be aligned, in the first 16k of physical RAM and | 105 | * that the pointer be aligned, in the first 16k of physical RAM and |
@@ -265,3 +131,150 @@ __vet_atags: | |||
265 | 1: mov r2, #0 | 131 | 1: mov r2, #0 |
266 | mov pc, lr | 132 | mov pc, lr |
267 | ENDPROC(__vet_atags) | 133 | ENDPROC(__vet_atags) |
134 | |||
135 | /* | ||
136 | * The following fragment of code is executed with the MMU on in MMU mode, | ||
137 | * and uses absolute addresses; this is not position independent. | ||
138 | * | ||
139 | * r0 = cp#15 control register | ||
140 | * r1 = machine ID | ||
141 | * r2 = atags pointer | ||
142 | * r9 = processor ID | ||
143 | */ | ||
144 | __INIT | ||
145 | __mmap_switched: | ||
146 | adr r3, __mmap_switched_data | ||
147 | |||
148 | ldmia r3!, {r4, r5, r6, r7} | ||
149 | cmp r4, r5 @ Copy data segment if needed | ||
150 | 1: cmpne r5, r6 | ||
151 | ldrne fp, [r4], #4 | ||
152 | strne fp, [r5], #4 | ||
153 | bne 1b | ||
154 | |||
155 | mov fp, #0 @ Clear BSS (and zero fp) | ||
156 | 1: cmp r6, r7 | ||
157 | strcc fp, [r6],#4 | ||
158 | bcc 1b | ||
159 | |||
160 | ARM( ldmia r3, {r4, r5, r6, r7, sp}) | ||
161 | THUMB( ldmia r3, {r4, r5, r6, r7} ) | ||
162 | THUMB( ldr sp, [r3, #16] ) | ||
163 | str r9, [r4] @ Save processor ID | ||
164 | str r1, [r5] @ Save machine type | ||
165 | str r2, [r6] @ Save atags pointer | ||
166 | bic r4, r0, #CR_A @ Clear 'A' bit | ||
167 | stmia r7, {r0, r4} @ Save control register values | ||
168 | b start_kernel | ||
169 | ENDPROC(__mmap_switched) | ||
170 | |||
171 | .align 2 | ||
172 | .type __mmap_switched_data, %object | ||
173 | __mmap_switched_data: | ||
174 | .long __data_loc @ r4 | ||
175 | .long _sdata @ r5 | ||
176 | .long __bss_start @ r6 | ||
177 | .long _end @ r7 | ||
178 | .long processor_id @ r4 | ||
179 | .long __machine_arch_type @ r5 | ||
180 | .long __atags_pointer @ r6 | ||
181 | .long cr_alignment @ r7 | ||
182 | .long init_thread_union + THREAD_START_SP @ sp | ||
183 | .size __mmap_switched_data, . - __mmap_switched_data | ||
184 | |||
185 | /* | ||
186 | * This provides a C-API version of __lookup_machine_type | ||
187 | */ | ||
188 | ENTRY(lookup_machine_type) | ||
189 | stmfd sp!, {r4 - r6, lr} | ||
190 | mov r1, r0 | ||
191 | bl __lookup_machine_type | ||
192 | mov r0, r5 | ||
193 | ldmfd sp!, {r4 - r6, pc} | ||
194 | ENDPROC(lookup_machine_type) | ||
195 | |||
196 | /* | ||
197 | * This provides a C-API version of __lookup_processor_type | ||
198 | */ | ||
199 | ENTRY(lookup_processor_type) | ||
200 | stmfd sp!, {r4 - r6, r9, lr} | ||
201 | mov r9, r0 | ||
202 | bl __lookup_processor_type | ||
203 | mov r0, r5 | ||
204 | ldmfd sp!, {r4 - r6, r9, pc} | ||
205 | ENDPROC(lookup_processor_type) | ||
206 | |||
207 | /* | ||
208 | * Read processor ID register (CP#15, CR0), and look up in the linker-built | ||
209 | * supported processor list. Note that we can't use the absolute addresses | ||
210 | * for the __proc_info lists since we aren't running with the MMU on | ||
211 | * (and therefore, we are not in the correct address space). We have to | ||
212 | * calculate the offset. | ||
213 | * | ||
214 | * r9 = cpuid | ||
215 | * Returns: | ||
216 | * r3, r4, r6 corrupted | ||
217 | * r5 = proc_info pointer in physical address space | ||
218 | * r9 = cpuid (preserved) | ||
219 | */ | ||
220 | __CPUINIT | ||
221 | __lookup_processor_type: | ||
222 | adr r3, __lookup_processor_type_data | ||
223 | ldmia r3, {r4 - r6} | ||
224 | sub r3, r3, r4 @ get offset between virt&phys | ||
225 | add r5, r5, r3 @ convert virt addresses to | ||
226 | add r6, r6, r3 @ physical address space | ||
227 | 1: ldmia r5, {r3, r4} @ value, mask | ||
228 | and r4, r4, r9 @ mask wanted bits | ||
229 | teq r3, r4 | ||
230 | beq 2f | ||
231 | add r5, r5, #PROC_INFO_SZ @ sizeof(proc_info_list) | ||
232 | cmp r5, r6 | ||
233 | blo 1b | ||
234 | mov r5, #0 @ unknown processor | ||
235 | 2: mov pc, lr | ||
236 | ENDPROC(__lookup_processor_type) | ||
237 | |||
238 | /* | ||
239 | * Look in <asm/procinfo.h> for information about the __proc_info structure. | ||
240 | */ | ||
241 | .align 2 | ||
242 | .type __lookup_processor_type_data, %object | ||
243 | __lookup_processor_type_data: | ||
244 | .long . | ||
245 | .long __proc_info_begin | ||
246 | .long __proc_info_end | ||
247 | .size __lookup_processor_type_data, . - __lookup_processor_type_data | ||
248 | |||
249 | __error_p: | ||
250 | #ifdef CONFIG_DEBUG_LL | ||
251 | adr r0, str_p1 | ||
252 | bl printascii | ||
253 | mov r0, r9 | ||
254 | bl printhex8 | ||
255 | adr r0, str_p2 | ||
256 | bl printascii | ||
257 | b __error | ||
258 | str_p1: .asciz "\nError: unrecognized/unsupported processor variant (0x" | ||
259 | str_p2: .asciz ").\n" | ||
260 | .align | ||
261 | #endif | ||
262 | ENDPROC(__error_p) | ||
263 | |||
264 | __error: | ||
265 | #ifdef CONFIG_ARCH_RPC | ||
266 | /* | ||
267 | * Turn the screen red on a error - RiscPC only. | ||
268 | */ | ||
269 | mov r0, #0x02000000 | ||
270 | mov r3, #0x11 | ||
271 | orr r3, r3, r3, lsl #8 | ||
272 | orr r3, r3, r3, lsl #16 | ||
273 | str r3, [r0], #4 | ||
274 | str r3, [r0], #4 | ||
275 | str r3, [r0], #4 | ||
276 | str r3, [r0], #4 | ||
277 | #endif | ||
278 | 1: mov r0, r0 | ||
279 | b 1b | ||
280 | ENDPROC(__error) | ||
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S index 573b803dc6b..814ce1a7327 100644 --- a/arch/arm/kernel/head-nommu.S +++ b/arch/arm/kernel/head-nommu.S | |||
@@ -48,8 +48,6 @@ ENTRY(stext) | |||
48 | movs r8, r5 @ invalid machine (r5=0)? | 48 | movs r8, r5 @ invalid machine (r5=0)? |
49 | beq __error_a @ yes, error 'a' | 49 | beq __error_a @ yes, error 'a' |
50 | 50 | ||
51 | ldr r13, __switch_data @ address to jump to after | ||
52 | @ the initialization is done | ||
53 | adr lr, BSYM(__after_proc_init) @ return (PIC) address | 51 | adr lr, BSYM(__after_proc_init) @ return (PIC) address |
54 | ARM( add pc, r10, #PROCINFO_INITFUNC ) | 52 | ARM( add pc, r10, #PROCINFO_INITFUNC ) |
55 | THUMB( add r12, r10, #PROCINFO_INITFUNC ) | 53 | THUMB( add r12, r10, #PROCINFO_INITFUNC ) |
@@ -87,8 +85,7 @@ __after_proc_init: | |||
87 | mcr p15, 0, r0, c1, c0, 0 @ write control reg | 85 | mcr p15, 0, r0, c1, c0, 0 @ write control reg |
88 | #endif /* CONFIG_CPU_CP15 */ | 86 | #endif /* CONFIG_CPU_CP15 */ |
89 | 87 | ||
90 | mov r3, r13 | 88 | b __mmap_switched @ clear the BSS and jump |
91 | mov pc, r3 @ clear the BSS and jump | ||
92 | @ to start_kernel | 89 | @ to start_kernel |
93 | ENDPROC(__after_proc_init) | 90 | ENDPROC(__after_proc_init) |
94 | .ltorg | 91 | .ltorg |
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index b44d21e1e34..767390449e0 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S | |||
@@ -98,113 +98,15 @@ ENTRY(stext) | |||
98 | * above. On return, the CPU will be ready for the MMU to be | 98 | * above. On return, the CPU will be ready for the MMU to be |
99 | * turned on, and r0 will hold the CPU control register value. | 99 | * turned on, and r0 will hold the CPU control register value. |
100 | */ | 100 | */ |
101 | ldr r13, __switch_data @ address to jump to after | 101 | ldr r13, =__mmap_switched @ address to jump to after |
102 | @ mmu has been enabled | 102 | @ mmu has been enabled |
103 | adr lr, BSYM(__enable_mmu) @ return (PIC) address | 103 | adr lr, BSYM(1f) @ return (PIC) address |
104 | ARM( add pc, r10, #PROCINFO_INITFUNC ) | 104 | ARM( add pc, r10, #PROCINFO_INITFUNC ) |
105 | THUMB( add r12, r10, #PROCINFO_INITFUNC ) | 105 | THUMB( add r12, r10, #PROCINFO_INITFUNC ) |
106 | THUMB( mov pc, r12 ) | 106 | THUMB( mov pc, r12 ) |
107 | 1: b __enable_mmu | ||
107 | ENDPROC(stext) | 108 | ENDPROC(stext) |
108 | 109 | .ltorg | |
109 | #if defined(CONFIG_SMP) | ||
110 | ENTRY(secondary_startup) | ||
111 | /* | ||
112 | * Common entry point for secondary CPUs. | ||
113 | * | ||
114 | * Ensure that we're in SVC mode, and IRQs are disabled. Lookup | ||
115 | * the processor type - there is no need to check the machine type | ||
116 | * as it has already been validated by the primary processor. | ||
117 | */ | ||
118 | setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 | ||
119 | mrc p15, 0, r9, c0, c0 @ get processor id | ||
120 | bl __lookup_processor_type | ||
121 | movs r10, r5 @ invalid processor? | ||
122 | moveq r0, #'p' @ yes, error 'p' | ||
123 | beq __error | ||
124 | |||
125 | /* | ||
126 | * Use the page tables supplied from __cpu_up. | ||
127 | */ | ||
128 | adr r4, __secondary_data | ||
129 | ldmia r4, {r5, r7, r12} @ address to jump to after | ||
130 | sub r4, r4, r5 @ mmu has been enabled | ||
131 | ldr r4, [r7, r4] @ get secondary_data.pgdir | ||
132 | adr lr, BSYM(__enable_mmu) @ return address | ||
133 | mov r13, r12 @ __secondary_switched address | ||
134 | ARM( add pc, r10, #PROCINFO_INITFUNC ) @ initialise processor | ||
135 | @ (return control reg) | ||
136 | THUMB( add r12, r10, #PROCINFO_INITFUNC ) | ||
137 | THUMB( mov pc, r12 ) | ||
138 | ENDPROC(secondary_startup) | ||
139 | |||
140 | /* | ||
141 | * r6 = &secondary_data | ||
142 | */ | ||
143 | ENTRY(__secondary_switched) | ||
144 | ldr sp, [r7, #4] @ get secondary_data.stack | ||
145 | mov fp, #0 | ||
146 | b secondary_start_kernel | ||
147 | ENDPROC(__secondary_switched) | ||
148 | |||
149 | .type __secondary_data, %object | ||
150 | __secondary_data: | ||
151 | .long . | ||
152 | .long secondary_data | ||
153 | .long __secondary_switched | ||
154 | #endif /* defined(CONFIG_SMP) */ | ||
155 | |||
156 | |||
157 | |||
158 | /* | ||
159 | * Setup common bits before finally enabling the MMU. Essentially | ||
160 | * this is just loading the page table pointer and domain access | ||
161 | * registers. | ||
162 | */ | ||
163 | __enable_mmu: | ||
164 | #ifdef CONFIG_ALIGNMENT_TRAP | ||
165 | orr r0, r0, #CR_A | ||
166 | #else | ||
167 | bic r0, r0, #CR_A | ||
168 | #endif | ||
169 | #ifdef CONFIG_CPU_DCACHE_DISABLE | ||
170 | bic r0, r0, #CR_C | ||
171 | #endif | ||
172 | #ifdef CONFIG_CPU_BPREDICT_DISABLE | ||
173 | bic r0, r0, #CR_Z | ||
174 | #endif | ||
175 | #ifdef CONFIG_CPU_ICACHE_DISABLE | ||
176 | bic r0, r0, #CR_I | ||
177 | #endif | ||
178 | mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \ | ||
179 | domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \ | ||
180 | domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \ | ||
181 | domain_val(DOMAIN_IO, DOMAIN_CLIENT)) | ||
182 | mcr p15, 0, r5, c3, c0, 0 @ load domain access register | ||
183 | mcr p15, 0, r4, c2, c0, 0 @ load page table pointer | ||
184 | b __turn_mmu_on | ||
185 | ENDPROC(__enable_mmu) | ||
186 | |||
187 | /* | ||
188 | * Enable the MMU. This completely changes the structure of the visible | ||
189 | * memory space. You will not be able to trace execution through this. | ||
190 | * If you have an enquiry about this, *please* check the linux-arm-kernel | ||
191 | * mailing list archives BEFORE sending another post to the list. | ||
192 | * | ||
193 | * r0 = cp#15 control register | ||
194 | * r13 = *virtual* address to jump to upon completion | ||
195 | * | ||
196 | * other registers depend on the function called upon completion | ||
197 | */ | ||
198 | .align 5 | ||
199 | __turn_mmu_on: | ||
200 | mov r0, r0 | ||
201 | mcr p15, 0, r0, c1, c0, 0 @ write control reg | ||
202 | mrc p15, 0, r3, c0, c0, 0 @ read id reg | ||
203 | mov r3, r3 | ||
204 | mov r3, r13 | ||
205 | mov pc, r3 | ||
206 | ENDPROC(__turn_mmu_on) | ||
207 | |||
208 | 110 | ||
209 | /* | 111 | /* |
210 | * Setup the initial page tables. We only setup the barest | 112 | * Setup the initial page tables. We only setup the barest |
@@ -216,7 +118,7 @@ ENDPROC(__turn_mmu_on) | |||
216 | * r10 = procinfo | 118 | * r10 = procinfo |
217 | * | 119 | * |
218 | * Returns: | 120 | * Returns: |
219 | * r0, r3, r6, r7 corrupted | 121 | * r0, r3, r5-r7 corrupted |
220 | * r4 = physical page table address | 122 | * r4 = physical page table address |
221 | */ | 123 | */ |
222 | __create_page_tables: | 124 | __create_page_tables: |
@@ -238,20 +140,30 @@ __create_page_tables: | |||
238 | ldr r7, [r10, #PROCINFO_MM_MMUFLAGS] @ mm_mmuflags | 140 | ldr r7, [r10, #PROCINFO_MM_MMUFLAGS] @ mm_mmuflags |
239 | 141 | ||
240 | /* | 142 | /* |
241 | * Create identity mapping for first MB of kernel to | 143 | * Create identity mapping to cater for __enable_mmu. |
242 | * cater for the MMU enable. This identity mapping | 144 | * This identity mapping will be removed by paging_init(). |
243 | * will be removed by paging_init(). We use our current program | ||
244 | * counter to determine corresponding section base address. | ||
245 | */ | 145 | */ |
246 | mov r6, pc | 146 | adr r0, __enable_mmu_loc |
247 | mov r6, r6, lsr #20 @ start of kernel section | 147 | ldmia r0, {r3, r5, r6} |
248 | orr r3, r7, r6, lsl #20 @ flags + kernel base | 148 | sub r0, r0, r3 @ virt->phys offset |
249 | str r3, [r4, r6, lsl #2] @ identity mapping | 149 | add r5, r5, r0 @ phys __enable_mmu |
150 | add r6, r6, r0 @ phys __enable_mmu_end | ||
151 | mov r5, r5, lsr #20 | ||
152 | mov r6, r6, lsr #20 | ||
153 | |||
154 | 1: orr r3, r7, r5, lsl #20 @ flags + kernel base | ||
155 | str r3, [r4, r5, lsl #2] @ identity mapping | ||
156 | teq r5, r6 | ||
157 | addne r5, r5, #1 @ next section | ||
158 | bne 1b | ||
250 | 159 | ||
251 | /* | 160 | /* |
252 | * Now setup the pagetables for our kernel direct | 161 | * Now setup the pagetables for our kernel direct |
253 | * mapped region. | 162 | * mapped region. |
254 | */ | 163 | */ |
164 | mov r3, pc | ||
165 | mov r3, r3, lsr #20 | ||
166 | orr r3, r7, r3, lsl #20 | ||
255 | add r0, r4, #(KERNEL_START & 0xff000000) >> 18 | 167 | add r0, r4, #(KERNEL_START & 0xff000000) >> 18 |
256 | str r3, [r0, #(KERNEL_START & 0x00f00000) >> 18]! | 168 | str r3, [r0, #(KERNEL_START & 0x00f00000) >> 18]! |
257 | ldr r6, =(KERNEL_END - 1) | 169 | ldr r6, =(KERNEL_END - 1) |
@@ -335,6 +247,122 @@ __create_page_tables: | |||
335 | mov pc, lr | 247 | mov pc, lr |
336 | ENDPROC(__create_page_tables) | 248 | ENDPROC(__create_page_tables) |
337 | .ltorg | 249 | .ltorg |
250 | __enable_mmu_loc: | ||
251 | .long . | ||
252 | .long __enable_mmu | ||
253 | .long __enable_mmu_end | ||
254 | |||
255 | #if defined(CONFIG_SMP) | ||
256 | __CPUINIT | ||
257 | ENTRY(secondary_startup) | ||
258 | /* | ||
259 | * Common entry point for secondary CPUs. | ||
260 | * | ||
261 | * Ensure that we're in SVC mode, and IRQs are disabled. Lookup | ||
262 | * the processor type - there is no need to check the machine type | ||
263 | * as it has already been validated by the primary processor. | ||
264 | */ | ||
265 | setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 | ||
266 | mrc p15, 0, r9, c0, c0 @ get processor id | ||
267 | bl __lookup_processor_type | ||
268 | movs r10, r5 @ invalid processor? | ||
269 | moveq r0, #'p' @ yes, error 'p' | ||
270 | beq __error_p | ||
271 | |||
272 | /* | ||
273 | * Use the page tables supplied from __cpu_up. | ||
274 | */ | ||
275 | adr r4, __secondary_data | ||
276 | ldmia r4, {r5, r7, r12} @ address to jump to after | ||
277 | sub r4, r4, r5 @ mmu has been enabled | ||
278 | ldr r4, [r7, r4] @ get secondary_data.pgdir | ||
279 | adr lr, BSYM(__enable_mmu) @ return address | ||
280 | mov r13, r12 @ __secondary_switched address | ||
281 | ARM( add pc, r10, #PROCINFO_INITFUNC ) @ initialise processor | ||
282 | @ (return control reg) | ||
283 | THUMB( add r12, r10, #PROCINFO_INITFUNC ) | ||
284 | THUMB( mov pc, r12 ) | ||
285 | ENDPROC(secondary_startup) | ||
286 | |||
287 | /* | ||
288 | * r6 = &secondary_data | ||
289 | */ | ||
290 | ENTRY(__secondary_switched) | ||
291 | ldr sp, [r7, #4] @ get secondary_data.stack | ||
292 | mov fp, #0 | ||
293 | b secondary_start_kernel | ||
294 | ENDPROC(__secondary_switched) | ||
295 | |||
296 | .type __secondary_data, %object | ||
297 | __secondary_data: | ||
298 | .long . | ||
299 | .long secondary_data | ||
300 | .long __secondary_switched | ||
301 | #endif /* defined(CONFIG_SMP) */ | ||
302 | |||
303 | |||
304 | |||
305 | /* | ||
306 | * Setup common bits before finally enabling the MMU. Essentially | ||
307 | * this is just loading the page table pointer and domain access | ||
308 | * registers. | ||
309 | * | ||
310 | * r0 = cp#15 control register | ||
311 | * r1 = machine ID | ||
312 | * r2 = atags pointer | ||
313 | * r4 = page table pointer | ||
314 | * r9 = processor ID | ||
315 | * r13 = *virtual* address to jump to upon completion | ||
316 | */ | ||
317 | __enable_mmu: | ||
318 | #ifdef CONFIG_ALIGNMENT_TRAP | ||
319 | orr r0, r0, #CR_A | ||
320 | #else | ||
321 | bic r0, r0, #CR_A | ||
322 | #endif | ||
323 | #ifdef CONFIG_CPU_DCACHE_DISABLE | ||
324 | bic r0, r0, #CR_C | ||
325 | #endif | ||
326 | #ifdef CONFIG_CPU_BPREDICT_DISABLE | ||
327 | bic r0, r0, #CR_Z | ||
328 | #endif | ||
329 | #ifdef CONFIG_CPU_ICACHE_DISABLE | ||
330 | bic r0, r0, #CR_I | ||
331 | #endif | ||
332 | mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \ | ||
333 | domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \ | ||
334 | domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \ | ||
335 | domain_val(DOMAIN_IO, DOMAIN_CLIENT)) | ||
336 | mcr p15, 0, r5, c3, c0, 0 @ load domain access register | ||
337 | mcr p15, 0, r4, c2, c0, 0 @ load page table pointer | ||
338 | b __turn_mmu_on | ||
339 | ENDPROC(__enable_mmu) | ||
340 | |||
341 | /* | ||
342 | * Enable the MMU. This completely changes the structure of the visible | ||
343 | * memory space. You will not be able to trace execution through this. | ||
344 | * If you have an enquiry about this, *please* check the linux-arm-kernel | ||
345 | * mailing list archives BEFORE sending another post to the list. | ||
346 | * | ||
347 | * r0 = cp#15 control register | ||
348 | * r1 = machine ID | ||
349 | * r2 = atags pointer | ||
350 | * r9 = processor ID | ||
351 | * r13 = *virtual* address to jump to upon completion | ||
352 | * | ||
353 | * other registers depend on the function called upon completion | ||
354 | */ | ||
355 | .align 5 | ||
356 | __turn_mmu_on: | ||
357 | mov r0, r0 | ||
358 | mcr p15, 0, r0, c1, c0, 0 @ write control reg | ||
359 | mrc p15, 0, r3, c0, c0, 0 @ read id reg | ||
360 | mov r3, r3 | ||
361 | mov r3, r13 | ||
362 | mov pc, r3 | ||
363 | __enable_mmu_end: | ||
364 | ENDPROC(__turn_mmu_on) | ||
365 | |||
338 | 366 | ||
339 | #ifdef CONFIG_SMP_ON_UP | 367 | #ifdef CONFIG_SMP_ON_UP |
340 | __fixup_smp: | 368 | __fixup_smp: |
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 32e16da5cbc..8c195959025 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <asm/pgtable.h> | 33 | #include <asm/pgtable.h> |
34 | #include <asm/pgalloc.h> | 34 | #include <asm/pgalloc.h> |
35 | #include <asm/processor.h> | 35 | #include <asm/processor.h> |
36 | #include <asm/sections.h> | ||
36 | #include <asm/tlbflush.h> | 37 | #include <asm/tlbflush.h> |
37 | #include <asm/ptrace.h> | 38 | #include <asm/ptrace.h> |
38 | #include <asm/localtimer.h> | 39 | #include <asm/localtimer.h> |
@@ -67,12 +68,47 @@ enum ipi_msg_type { | |||
67 | IPI_CPU_STOP, | 68 | IPI_CPU_STOP, |
68 | }; | 69 | }; |
69 | 70 | ||
71 | static inline void identity_mapping_add(pgd_t *pgd, unsigned long start, | ||
72 | unsigned long end) | ||
73 | { | ||
74 | unsigned long addr, prot; | ||
75 | pmd_t *pmd; | ||
76 | |||
77 | prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE; | ||
78 | if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale()) | ||
79 | prot |= PMD_BIT4; | ||
80 | |||
81 | for (addr = start & PGDIR_MASK; addr < end;) { | ||
82 | pmd = pmd_offset(pgd + pgd_index(addr), addr); | ||
83 | pmd[0] = __pmd(addr | prot); | ||
84 | addr += SECTION_SIZE; | ||
85 | pmd[1] = __pmd(addr | prot); | ||
86 | addr += SECTION_SIZE; | ||
87 | flush_pmd_entry(pmd); | ||
88 | outer_clean_range(__pa(pmd), __pa(pmd + 1)); | ||
89 | } | ||
90 | } | ||
91 | |||
92 | static inline void identity_mapping_del(pgd_t *pgd, unsigned long start, | ||
93 | unsigned long end) | ||
94 | { | ||
95 | unsigned long addr; | ||
96 | pmd_t *pmd; | ||
97 | |||
98 | for (addr = start & PGDIR_MASK; addr < end; addr += PGDIR_SIZE) { | ||
99 | pmd = pmd_offset(pgd + pgd_index(addr), addr); | ||
100 | pmd[0] = __pmd(0); | ||
101 | pmd[1] = __pmd(0); | ||
102 | clean_pmd_entry(pmd); | ||
103 | outer_clean_range(__pa(pmd), __pa(pmd + 1)); | ||
104 | } | ||
105 | } | ||
106 | |||
70 | int __cpuinit __cpu_up(unsigned int cpu) | 107 | int __cpuinit __cpu_up(unsigned int cpu) |
71 | { | 108 | { |
72 | struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu); | 109 | struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu); |
73 | struct task_struct *idle = ci->idle; | 110 | struct task_struct *idle = ci->idle; |
74 | pgd_t *pgd; | 111 | pgd_t *pgd; |
75 | pmd_t *pmd; | ||
76 | int ret; | 112 | int ret; |
77 | 113 | ||
78 | /* | 114 | /* |
@@ -101,11 +137,16 @@ int __cpuinit __cpu_up(unsigned int cpu) | |||
101 | * a 1:1 mapping for the physical address of the kernel. | 137 | * a 1:1 mapping for the physical address of the kernel. |
102 | */ | 138 | */ |
103 | pgd = pgd_alloc(&init_mm); | 139 | pgd = pgd_alloc(&init_mm); |
104 | pmd = pmd_offset(pgd + pgd_index(PHYS_OFFSET), PHYS_OFFSET); | 140 | if (!pgd) |
105 | *pmd = __pmd((PHYS_OFFSET & PGDIR_MASK) | | 141 | return -ENOMEM; |
106 | PMD_TYPE_SECT | PMD_SECT_AP_WRITE); | 142 | |
107 | flush_pmd_entry(pmd); | 143 | if (PHYS_OFFSET != PAGE_OFFSET) { |
108 | outer_clean_range(__pa(pmd), __pa(pmd + 1)); | 144 | #ifndef CONFIG_HOTPLUG_CPU |
145 | identity_mapping_add(pgd, __pa(__init_begin), __pa(__init_end)); | ||
146 | #endif | ||
147 | identity_mapping_add(pgd, __pa(_stext), __pa(_etext)); | ||
148 | identity_mapping_add(pgd, __pa(_sdata), __pa(_edata)); | ||
149 | } | ||
109 | 150 | ||
110 | /* | 151 | /* |
111 | * We need to tell the secondary core where to find | 152 | * We need to tell the secondary core where to find |
@@ -143,8 +184,14 @@ int __cpuinit __cpu_up(unsigned int cpu) | |||
143 | secondary_data.stack = NULL; | 184 | secondary_data.stack = NULL; |
144 | secondary_data.pgdir = 0; | 185 | secondary_data.pgdir = 0; |
145 | 186 | ||
146 | *pmd = __pmd(0); | 187 | if (PHYS_OFFSET != PAGE_OFFSET) { |
147 | clean_pmd_entry(pmd); | 188 | #ifndef CONFIG_HOTPLUG_CPU |
189 | identity_mapping_del(pgd, __pa(__init_begin), __pa(__init_end)); | ||
190 | #endif | ||
191 | identity_mapping_del(pgd, __pa(_stext), __pa(_etext)); | ||
192 | identity_mapping_del(pgd, __pa(_sdata), __pa(_edata)); | ||
193 | } | ||
194 | |||
148 | pgd_free(&init_mm, pgd); | 195 | pgd_free(&init_mm, pgd); |
149 | 196 | ||
150 | if (ret) { | 197 | if (ret) { |
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index 065d35de0e0..1953e3d21ab 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S | |||
@@ -8,6 +8,19 @@ | |||
8 | #include <asm/memory.h> | 8 | #include <asm/memory.h> |
9 | #include <asm/page.h> | 9 | #include <asm/page.h> |
10 | 10 | ||
11 | #define PROC_INFO \ | ||
12 | VMLINUX_SYMBOL(__proc_info_begin) = .; \ | ||
13 | *(.proc.info.init) \ | ||
14 | VMLINUX_SYMBOL(__proc_info_end) = .; | ||
15 | |||
16 | #ifdef CONFIG_HOTPLUG_CPU | ||
17 | #define ARM_CPU_DISCARD(x) | ||
18 | #define ARM_CPU_KEEP(x) x | ||
19 | #else | ||
20 | #define ARM_CPU_DISCARD(x) x | ||
21 | #define ARM_CPU_KEEP(x) | ||
22 | #endif | ||
23 | |||
11 | OUTPUT_ARCH(arm) | 24 | OUTPUT_ARCH(arm) |
12 | ENTRY(stext) | 25 | ENTRY(stext) |
13 | 26 | ||
@@ -31,9 +44,7 @@ SECTIONS | |||
31 | HEAD_TEXT | 44 | HEAD_TEXT |
32 | INIT_TEXT | 45 | INIT_TEXT |
33 | _einittext = .; | 46 | _einittext = .; |
34 | __proc_info_begin = .; | 47 | ARM_CPU_DISCARD(PROC_INFO) |
35 | *(.proc.info.init) | ||
36 | __proc_info_end = .; | ||
37 | __arch_info_begin = .; | 48 | __arch_info_begin = .; |
38 | *(.arch.info.init) | 49 | *(.arch.info.init) |
39 | __arch_info_end = .; | 50 | __arch_info_end = .; |
@@ -73,10 +84,8 @@ SECTIONS | |||
73 | /DISCARD/ : { | 84 | /DISCARD/ : { |
74 | *(.ARM.exidx.exit.text) | 85 | *(.ARM.exidx.exit.text) |
75 | *(.ARM.extab.exit.text) | 86 | *(.ARM.extab.exit.text) |
76 | #ifndef CONFIG_HOTPLUG_CPU | 87 | ARM_CPU_DISCARD(*(.ARM.exidx.cpuexit.text)) |
77 | *(.ARM.exidx.cpuexit.text) | 88 | ARM_CPU_DISCARD(*(.ARM.extab.cpuexit.text)) |
78 | *(.ARM.extab.cpuexit.text) | ||
79 | #endif | ||
80 | #ifndef CONFIG_HOTPLUG | 89 | #ifndef CONFIG_HOTPLUG |
81 | *(.ARM.exidx.devexit.text) | 90 | *(.ARM.exidx.devexit.text) |
82 | *(.ARM.extab.devexit.text) | 91 | *(.ARM.extab.devexit.text) |
@@ -105,6 +114,7 @@ SECTIONS | |||
105 | *(.glue_7) | 114 | *(.glue_7) |
106 | *(.glue_7t) | 115 | *(.glue_7t) |
107 | *(.got) /* Global offset table */ | 116 | *(.got) /* Global offset table */ |
117 | ARM_CPU_KEEP(PROC_INFO) | ||
108 | } | 118 | } |
109 | 119 | ||
110 | RO_DATA(PAGE_SIZE) | 120 | RO_DATA(PAGE_SIZE) |