aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/asm-offsets.c3
-rw-r--r--arch/arm/kernel/entry-armv.S277
-rw-r--r--arch/arm/kernel/entry-header.S19
-rw-r--r--arch/arm/kernel/head-nommu.S8
-rw-r--r--arch/arm/kernel/head.S8
-rw-r--r--arch/arm/kernel/hw_breakpoint.c12
-rw-r--r--arch/arm/kernel/irq.c51
-rw-r--r--arch/arm/kernel/perf_event.c10
-rw-r--r--arch/arm/kernel/pmu.c87
-rw-r--r--arch/arm/kernel/setup.c101
-rw-r--r--arch/arm/kernel/sleep.S84
-rw-r--r--arch/arm/kernel/smp.c11
-rw-r--r--arch/arm/kernel/smp_scu.c2
-rw-r--r--arch/arm/kernel/smp_twd.c2
-rw-r--r--arch/arm/kernel/tcm.c68
-rw-r--r--arch/arm/kernel/vmlinux.lds.S126
16 files changed, 464 insertions, 405 deletions
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index 927522cfc12..16baba2e436 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -59,6 +59,9 @@ int main(void)
59 DEFINE(TI_TP_VALUE, offsetof(struct thread_info, tp_value)); 59 DEFINE(TI_TP_VALUE, offsetof(struct thread_info, tp_value));
60 DEFINE(TI_FPSTATE, offsetof(struct thread_info, fpstate)); 60 DEFINE(TI_FPSTATE, offsetof(struct thread_info, fpstate));
61 DEFINE(TI_VFPSTATE, offsetof(struct thread_info, vfpstate)); 61 DEFINE(TI_VFPSTATE, offsetof(struct thread_info, vfpstate));
62#ifdef CONFIG_SMP
63 DEFINE(VFP_CPU, offsetof(union vfp_state, hard.cpu));
64#endif
62#ifdef CONFIG_ARM_THUMBEE 65#ifdef CONFIG_ARM_THUMBEE
63 DEFINE(TI_THUMBEE_STATE, offsetof(struct thread_info, thumbee_state)); 66 DEFINE(TI_THUMBEE_STATE, offsetof(struct thread_info, thumbee_state));
64#endif 67#endif
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 90c62cd51ca..fa02a22a4c4 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -29,21 +29,53 @@
29#include <asm/entry-macro-multi.S> 29#include <asm/entry-macro-multi.S>
30 30
31/* 31/*
32 * Interrupt handling. Preserves r7, r8, r9 32 * Interrupt handling.
33 */ 33 */
34 .macro irq_handler 34 .macro irq_handler
35#ifdef CONFIG_MULTI_IRQ_HANDLER 35#ifdef CONFIG_MULTI_IRQ_HANDLER
36 ldr r5, =handle_arch_irq 36 ldr r1, =handle_arch_irq
37 mov r0, sp 37 mov r0, sp
38 ldr r5, [r5] 38 ldr r1, [r1]
39 adr lr, BSYM(9997f) 39 adr lr, BSYM(9997f)
40 teq r5, #0 40 teq r1, #0
41 movne pc, r5 41 movne pc, r1
42#endif 42#endif
43 arch_irq_handler_default 43 arch_irq_handler_default
449997: 449997:
45 .endm 45 .endm
46 46
47 .macro pabt_helper
48 @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
49#ifdef MULTI_PABORT
50 ldr ip, .LCprocfns
51 mov lr, pc
52 ldr pc, [ip, #PROCESSOR_PABT_FUNC]
53#else
54 bl CPU_PABORT_HANDLER
55#endif
56 .endm
57
58 .macro dabt_helper
59
60 @
61 @ Call the processor-specific abort handler:
62 @
63 @ r2 - pt_regs
64 @ r4 - aborted context pc
65 @ r5 - aborted context psr
66 @
67 @ The abort handler must return the aborted address in r0, and
68 @ the fault status register in r1. r9 must be preserved.
69 @
70#ifdef MULTI_DABORT
71 ldr ip, .LCprocfns
72 mov lr, pc
73 ldr pc, [ip, #PROCESSOR_DABT_FUNC]
74#else
75 bl CPU_DABORT_HANDLER
76#endif
77 .endm
78
47#ifdef CONFIG_KPROBES 79#ifdef CONFIG_KPROBES
48 .section .kprobes.text,"ax",%progbits 80 .section .kprobes.text,"ax",%progbits
49#else 81#else
@@ -126,106 +158,74 @@ ENDPROC(__und_invalid)
126 SPFIX( subeq sp, sp, #4 ) 158 SPFIX( subeq sp, sp, #4 )
127 stmia sp, {r1 - r12} 159 stmia sp, {r1 - r12}
128 160
129 ldmia r0, {r1 - r3} 161 ldmia r0, {r3 - r5}
130 add r5, sp, #S_SP - 4 @ here for interlock avoidance 162 add r7, sp, #S_SP - 4 @ here for interlock avoidance
131 mov r4, #-1 @ "" "" "" "" 163 mov r6, #-1 @ "" "" "" ""
132 add r0, sp, #(S_FRAME_SIZE + \stack_hole - 4) 164 add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
133 SPFIX( addeq r0, r0, #4 ) 165 SPFIX( addeq r2, r2, #4 )
134 str r1, [sp, #-4]! @ save the "real" r0 copied 166 str r3, [sp, #-4]! @ save the "real" r0 copied
135 @ from the exception stack 167 @ from the exception stack
136 168
137 mov r1, lr 169 mov r3, lr
138 170
139 @ 171 @
140 @ We are now ready to fill in the remaining blanks on the stack: 172 @ We are now ready to fill in the remaining blanks on the stack:
141 @ 173 @
142 @ r0 - sp_svc 174 @ r2 - sp_svc
143 @ r1 - lr_svc 175 @ r3 - lr_svc
144 @ r2 - lr_<exception>, already fixed up for correct return/restart 176 @ r4 - lr_<exception>, already fixed up for correct return/restart
145 @ r3 - spsr_<exception> 177 @ r5 - spsr_<exception>
146 @ r4 - orig_r0 (see pt_regs definition in ptrace.h) 178 @ r6 - orig_r0 (see pt_regs definition in ptrace.h)
147 @ 179 @
148 stmia r5, {r0 - r4} 180 stmia r7, {r2 - r6}
181
182#ifdef CONFIG_TRACE_IRQFLAGS
183 bl trace_hardirqs_off
184#endif
149 .endm 185 .endm
150 186
151 .align 5 187 .align 5
152__dabt_svc: 188__dabt_svc:
153 svc_entry 189 svc_entry
154
155 @
156 @ get ready to re-enable interrupts if appropriate
157 @
158 mrs r9, cpsr
159 tst r3, #PSR_I_BIT
160 biceq r9, r9, #PSR_I_BIT
161
162 @
163 @ Call the processor-specific abort handler:
164 @
165 @ r2 - aborted context pc
166 @ r3 - aborted context cpsr
167 @
168 @ The abort handler must return the aborted address in r0, and
169 @ the fault status register in r1. r9 must be preserved.
170 @
171#ifdef MULTI_DABORT
172 ldr r4, .LCprocfns
173 mov lr, pc
174 ldr pc, [r4, #PROCESSOR_DABT_FUNC]
175#else
176 bl CPU_DABORT_HANDLER
177#endif
178
179 @
180 @ set desired IRQ state, then call main handler
181 @
182 debug_entry r1
183 msr cpsr_c, r9
184 mov r2, sp 190 mov r2, sp
185 bl do_DataAbort 191 dabt_helper
186 192
187 @ 193 @
188 @ IRQs off again before pulling preserved data off the stack 194 @ IRQs off again before pulling preserved data off the stack
189 @ 195 @
190 disable_irq_notrace 196 disable_irq_notrace
191 197
192 @ 198#ifdef CONFIG_TRACE_IRQFLAGS
193 @ restore SPSR and restart the instruction 199 tst r5, #PSR_I_BIT
194 @ 200 bleq trace_hardirqs_on
195 ldr r2, [sp, #S_PSR] 201 tst r5, #PSR_I_BIT
196 svc_exit r2 @ return from exception 202 blne trace_hardirqs_off
203#endif
204 svc_exit r5 @ return from exception
197 UNWIND(.fnend ) 205 UNWIND(.fnend )
198ENDPROC(__dabt_svc) 206ENDPROC(__dabt_svc)
199 207
200 .align 5 208 .align 5
201__irq_svc: 209__irq_svc:
202 svc_entry 210 svc_entry
211 irq_handler
203 212
204#ifdef CONFIG_TRACE_IRQFLAGS
205 bl trace_hardirqs_off
206#endif
207#ifdef CONFIG_PREEMPT 213#ifdef CONFIG_PREEMPT
208 get_thread_info tsk 214 get_thread_info tsk
209 ldr r8, [tsk, #TI_PREEMPT] @ get preempt count 215 ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
210 add r7, r8, #1 @ increment it
211 str r7, [tsk, #TI_PREEMPT]
212#endif
213
214 irq_handler
215#ifdef CONFIG_PREEMPT
216 str r8, [tsk, #TI_PREEMPT] @ restore preempt count
217 ldr r0, [tsk, #TI_FLAGS] @ get flags 216 ldr r0, [tsk, #TI_FLAGS] @ get flags
218 teq r8, #0 @ if preempt count != 0 217 teq r8, #0 @ if preempt count != 0
219 movne r0, #0 @ force flags to 0 218 movne r0, #0 @ force flags to 0
220 tst r0, #_TIF_NEED_RESCHED 219 tst r0, #_TIF_NEED_RESCHED
221 blne svc_preempt 220 blne svc_preempt
222#endif 221#endif
223 ldr r4, [sp, #S_PSR] @ irqs are already disabled 222
224#ifdef CONFIG_TRACE_IRQFLAGS 223#ifdef CONFIG_TRACE_IRQFLAGS
225 tst r4, #PSR_I_BIT 224 @ The parent context IRQs must have been enabled to get here in
226 bleq trace_hardirqs_on 225 @ the first place, so there's no point checking the PSR I bit.
226 bl trace_hardirqs_on
227#endif 227#endif
228 svc_exit r4 @ return from exception 228 svc_exit r5 @ return from exception
229 UNWIND(.fnend ) 229 UNWIND(.fnend )
230ENDPROC(__irq_svc) 230ENDPROC(__irq_svc)
231 231
@@ -251,7 +251,6 @@ __und_svc:
251#else 251#else
252 svc_entry 252 svc_entry
253#endif 253#endif
254
255 @ 254 @
256 @ call emulation code, which returns using r9 if it has emulated 255 @ call emulation code, which returns using r9 if it has emulated
257 @ the instruction, or the more conventional lr if we are to treat 256 @ the instruction, or the more conventional lr if we are to treat
@@ -260,15 +259,16 @@ __und_svc:
260 @ r0 - instruction 259 @ r0 - instruction
261 @ 260 @
262#ifndef CONFIG_THUMB2_KERNEL 261#ifndef CONFIG_THUMB2_KERNEL
263 ldr r0, [r2, #-4] 262 ldr r0, [r4, #-4]
264#else 263#else
265 ldrh r0, [r2, #-2] @ Thumb instruction at LR - 2 264 ldrh r0, [r4, #-2] @ Thumb instruction at LR - 2
266 and r9, r0, #0xf800 265 and r9, r0, #0xf800
267 cmp r9, #0xe800 @ 32-bit instruction if xx >= 0 266 cmp r9, #0xe800 @ 32-bit instruction if xx >= 0
268 ldrhhs r9, [r2] @ bottom 16 bits 267 ldrhhs r9, [r4] @ bottom 16 bits
269 orrhs r0, r9, r0, lsl #16 268 orrhs r0, r9, r0, lsl #16
270#endif 269#endif
271 adr r9, BSYM(1f) 270 adr r9, BSYM(1f)
271 mov r2, r4
272 bl call_fpe 272 bl call_fpe
273 273
274 mov r0, sp @ struct pt_regs *regs 274 mov r0, sp @ struct pt_regs *regs
@@ -282,45 +282,35 @@ __und_svc:
282 @ 282 @
283 @ restore SPSR and restart the instruction 283 @ restore SPSR and restart the instruction
284 @ 284 @
285 ldr r2, [sp, #S_PSR] @ Get SVC cpsr 285 ldr r5, [sp, #S_PSR] @ Get SVC cpsr
286 svc_exit r2 @ return from exception 286#ifdef CONFIG_TRACE_IRQFLAGS
287 tst r5, #PSR_I_BIT
288 bleq trace_hardirqs_on
289 tst r5, #PSR_I_BIT
290 blne trace_hardirqs_off
291#endif
292 svc_exit r5 @ return from exception
287 UNWIND(.fnend ) 293 UNWIND(.fnend )
288ENDPROC(__und_svc) 294ENDPROC(__und_svc)
289 295
290 .align 5 296 .align 5
291__pabt_svc: 297__pabt_svc:
292 svc_entry 298 svc_entry
293
294 @
295 @ re-enable interrupts if appropriate
296 @
297 mrs r9, cpsr
298 tst r3, #PSR_I_BIT
299 biceq r9, r9, #PSR_I_BIT
300
301 mov r0, r2 @ pass address of aborted instruction.
302#ifdef MULTI_PABORT
303 ldr r4, .LCprocfns
304 mov lr, pc
305 ldr pc, [r4, #PROCESSOR_PABT_FUNC]
306#else
307 bl CPU_PABORT_HANDLER
308#endif
309 debug_entry r1
310 msr cpsr_c, r9 @ Maybe enable interrupts
311 mov r2, sp @ regs 299 mov r2, sp @ regs
312 bl do_PrefetchAbort @ call abort handler 300 pabt_helper
313 301
314 @ 302 @
315 @ IRQs off again before pulling preserved data off the stack 303 @ IRQs off again before pulling preserved data off the stack
316 @ 304 @
317 disable_irq_notrace 305 disable_irq_notrace
318 306
319 @ 307#ifdef CONFIG_TRACE_IRQFLAGS
320 @ restore SPSR and restart the instruction 308 tst r5, #PSR_I_BIT
321 @ 309 bleq trace_hardirqs_on
322 ldr r2, [sp, #S_PSR] 310 tst r5, #PSR_I_BIT
323 svc_exit r2 @ return from exception 311 blne trace_hardirqs_off
312#endif
313 svc_exit r5 @ return from exception
324 UNWIND(.fnend ) 314 UNWIND(.fnend )
325ENDPROC(__pabt_svc) 315ENDPROC(__pabt_svc)
326 316
@@ -351,23 +341,23 @@ ENDPROC(__pabt_svc)
351 ARM( stmib sp, {r1 - r12} ) 341 ARM( stmib sp, {r1 - r12} )
352 THUMB( stmia sp, {r0 - r12} ) 342 THUMB( stmia sp, {r0 - r12} )
353 343
354 ldmia r0, {r1 - r3} 344 ldmia r0, {r3 - r5}
355 add r0, sp, #S_PC @ here for interlock avoidance 345 add r0, sp, #S_PC @ here for interlock avoidance
356 mov r4, #-1 @ "" "" "" "" 346 mov r6, #-1 @ "" "" "" ""
357 347
358 str r1, [sp] @ save the "real" r0 copied 348 str r3, [sp] @ save the "real" r0 copied
359 @ from the exception stack 349 @ from the exception stack
360 350
361 @ 351 @
362 @ We are now ready to fill in the remaining blanks on the stack: 352 @ We are now ready to fill in the remaining blanks on the stack:
363 @ 353 @
364 @ r2 - lr_<exception>, already fixed up for correct return/restart 354 @ r4 - lr_<exception>, already fixed up for correct return/restart
365 @ r3 - spsr_<exception> 355 @ r5 - spsr_<exception>
366 @ r4 - orig_r0 (see pt_regs definition in ptrace.h) 356 @ r6 - orig_r0 (see pt_regs definition in ptrace.h)
367 @ 357 @
368 @ Also, separately save sp_usr and lr_usr 358 @ Also, separately save sp_usr and lr_usr
369 @ 359 @
370 stmia r0, {r2 - r4} 360 stmia r0, {r4 - r6}
371 ARM( stmdb r0, {sp, lr}^ ) 361 ARM( stmdb r0, {sp, lr}^ )
372 THUMB( store_user_sp_lr r0, r1, S_SP - S_PC ) 362 THUMB( store_user_sp_lr r0, r1, S_SP - S_PC )
373 363
@@ -380,6 +370,10 @@ ENDPROC(__pabt_svc)
380 @ Clear FP to mark the first stack frame 370 @ Clear FP to mark the first stack frame
381 @ 371 @
382 zero_fp 372 zero_fp
373
374#ifdef CONFIG_IRQSOFF_TRACER
375 bl trace_hardirqs_off
376#endif
383 .endm 377 .endm
384 378
385 .macro kuser_cmpxchg_check 379 .macro kuser_cmpxchg_check
@@ -391,7 +385,7 @@ ENDPROC(__pabt_svc)
391 @ if it was interrupted in a critical region. Here we 385 @ if it was interrupted in a critical region. Here we
392 @ perform a quick test inline since it should be false 386 @ perform a quick test inline since it should be false
393 @ 99.9999% of the time. The rest is done out of line. 387 @ 99.9999% of the time. The rest is done out of line.
394 cmp r2, #TASK_SIZE 388 cmp r4, #TASK_SIZE
395 blhs kuser_cmpxchg_fixup 389 blhs kuser_cmpxchg_fixup
396#endif 390#endif
397#endif 391#endif
@@ -401,32 +395,9 @@ ENDPROC(__pabt_svc)
401__dabt_usr: 395__dabt_usr:
402 usr_entry 396 usr_entry
403 kuser_cmpxchg_check 397 kuser_cmpxchg_check
404
405 @
406 @ Call the processor-specific abort handler:
407 @
408 @ r2 - aborted context pc
409 @ r3 - aborted context cpsr
410 @
411 @ The abort handler must return the aborted address in r0, and
412 @ the fault status register in r1.
413 @
414#ifdef MULTI_DABORT
415 ldr r4, .LCprocfns
416 mov lr, pc
417 ldr pc, [r4, #PROCESSOR_DABT_FUNC]
418#else
419 bl CPU_DABORT_HANDLER
420#endif
421
422 @
423 @ IRQs on, then call the main handler
424 @
425 debug_entry r1
426 enable_irq
427 mov r2, sp 398 mov r2, sp
428 adr lr, BSYM(ret_from_exception) 399 dabt_helper
429 b do_DataAbort 400 b ret_from_exception
430 UNWIND(.fnend ) 401 UNWIND(.fnend )
431ENDPROC(__dabt_usr) 402ENDPROC(__dabt_usr)
432 403
@@ -434,28 +405,8 @@ ENDPROC(__dabt_usr)
434__irq_usr: 405__irq_usr:
435 usr_entry 406 usr_entry
436 kuser_cmpxchg_check 407 kuser_cmpxchg_check
437
438#ifdef CONFIG_IRQSOFF_TRACER
439 bl trace_hardirqs_off
440#endif
441
442 get_thread_info tsk
443#ifdef CONFIG_PREEMPT
444 ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
445 add r7, r8, #1 @ increment it
446 str r7, [tsk, #TI_PREEMPT]
447#endif
448
449 irq_handler 408 irq_handler
450#ifdef CONFIG_PREEMPT 409 get_thread_info tsk
451 ldr r0, [tsk, #TI_PREEMPT]
452 str r8, [tsk, #TI_PREEMPT]
453 teq r0, r7
454 ARM( strne r0, [r0, -r0] )
455 THUMB( movne r0, #0 )
456 THUMB( strne r0, [r0] )
457#endif
458
459 mov why, #0 410 mov why, #0
460 b ret_to_user_from_irq 411 b ret_to_user_from_irq
461 UNWIND(.fnend ) 412 UNWIND(.fnend )
@@ -467,6 +418,9 @@ ENDPROC(__irq_usr)
467__und_usr: 418__und_usr:
468 usr_entry 419 usr_entry
469 420
421 mov r2, r4
422 mov r3, r5
423
470 @ 424 @
471 @ fall through to the emulation code, which returns using r9 if 425 @ fall through to the emulation code, which returns using r9 if
472 @ it has emulated the instruction, or the more conventional lr 426 @ it has emulated the instruction, or the more conventional lr
@@ -682,19 +636,8 @@ ENDPROC(__und_usr_unknown)
682 .align 5 636 .align 5
683__pabt_usr: 637__pabt_usr:
684 usr_entry 638 usr_entry
685
686 mov r0, r2 @ pass address of aborted instruction.
687#ifdef MULTI_PABORT
688 ldr r4, .LCprocfns
689 mov lr, pc
690 ldr pc, [r4, #PROCESSOR_PABT_FUNC]
691#else
692 bl CPU_PABORT_HANDLER
693#endif
694 debug_entry r1
695 enable_irq @ Enable interrupts
696 mov r2, sp @ regs 639 mov r2, sp @ regs
697 bl do_PrefetchAbort @ call abort handler 640 pabt_helper
698 UNWIND(.fnend ) 641 UNWIND(.fnend )
699 /* fall through */ 642 /* fall through */
700/* 643/*
@@ -927,13 +870,13 @@ __kuser_cmpxchg: @ 0xffff0fc0
927 .text 870 .text
928kuser_cmpxchg_fixup: 871kuser_cmpxchg_fixup:
929 @ Called from kuser_cmpxchg_check macro. 872 @ Called from kuser_cmpxchg_check macro.
930 @ r2 = address of interrupted insn (must be preserved). 873 @ r4 = address of interrupted insn (must be preserved).
931 @ sp = saved regs. r7 and r8 are clobbered. 874 @ sp = saved regs. r7 and r8 are clobbered.
932 @ 1b = first critical insn, 2b = last critical insn. 875 @ 1b = first critical insn, 2b = last critical insn.
933 @ If r2 >= 1b and r2 <= 2b then saved pc_usr is set to 1b. 876 @ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
934 mov r7, #0xffff0fff 877 mov r7, #0xffff0fff
935 sub r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg))) 878 sub r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg)))
936 subs r8, r2, r7 879 subs r8, r4, r7
937 rsbcss r8, r8, #(2b - 1b) 880 rsbcss r8, r8, #(2b - 1b)
938 strcs r7, [sp, #S_PC] 881 strcs r7, [sp, #S_PC]
939 mov pc, lr 882 mov pc, lr
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
index 051166c2a93..4d6ad8348e8 100644
--- a/arch/arm/kernel/entry-header.S
+++ b/arch/arm/kernel/entry-header.S
@@ -165,25 +165,6 @@
165 .endm 165 .endm
166#endif /* !CONFIG_THUMB2_KERNEL */ 166#endif /* !CONFIG_THUMB2_KERNEL */
167 167
168 @
169 @ Debug exceptions are taken as prefetch or data aborts.
170 @ We must disable preemption during the handler so that
171 @ we can access the debug registers safely.
172 @
173 .macro debug_entry, fsr
174#if defined(CONFIG_HAVE_HW_BREAKPOINT) && defined(CONFIG_PREEMPT)
175 ldr r4, =0x40f @ mask out fsr.fs
176 and r5, r4, \fsr
177 cmp r5, #2 @ debug exception
178 bne 1f
179 get_thread_info r10
180 ldr r6, [r10, #TI_PREEMPT] @ get preempt count
181 add r11, r6, #1 @ increment it
182 str r11, [r10, #TI_PREEMPT]
1831:
184#endif
185 .endm
186
187/* 168/*
188 * These are the registers used in the syscall handler, and allow us to 169 * These are the registers used in the syscall handler, and allow us to
189 * have in theory up to 7 arguments to a function - r0 to r6. 170 * have in theory up to 7 arguments to a function - r0 to r6.
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S
index 6b1e0ad9ec3..d46f25968be 100644
--- a/arch/arm/kernel/head-nommu.S
+++ b/arch/arm/kernel/head-nommu.S
@@ -32,8 +32,16 @@
32 * numbers for r1. 32 * numbers for r1.
33 * 33 *
34 */ 34 */
35 .arm
36
35 __HEAD 37 __HEAD
36ENTRY(stext) 38ENTRY(stext)
39
40 THUMB( adr r9, BSYM(1f) ) @ Kernel is always entered in ARM.
41 THUMB( bx r9 ) @ If this is a Thumb-2 kernel,
42 THUMB( .thumb ) @ switch to Thumb now.
43 THUMB(1: )
44
37 setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode 45 setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode
38 @ and irqs disabled 46 @ and irqs disabled
39#ifndef CONFIG_CPU_CP15 47#ifndef CONFIG_CPU_CP15
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 278c1b0ebb2..742b6108a00 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -71,8 +71,16 @@
71 * crap here - that's what the boot loader (or in extreme, well justified 71 * crap here - that's what the boot loader (or in extreme, well justified
72 * circumstances, zImage) is for. 72 * circumstances, zImage) is for.
73 */ 73 */
74 .arm
75
74 __HEAD 76 __HEAD
75ENTRY(stext) 77ENTRY(stext)
78
79 THUMB( adr r9, BSYM(1f) ) @ Kernel is always entered in ARM.
80 THUMB( bx r9 ) @ If this is a Thumb-2 kernel,
81 THUMB( .thumb ) @ switch to Thumb now.
82 THUMB(1: )
83
76 setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode 84 setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode
77 @ and irqs disabled 85 @ and irqs disabled
78 mrc p15, 0, r9, c0, c0 @ get processor id 86 mrc p15, 0, r9, c0, c0 @ get processor id
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index 87acc25d7a3..a927ca1f556 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -796,7 +796,7 @@ unlock:
796 796
797/* 797/*
798 * Called from either the Data Abort Handler [watchpoint] or the 798 * Called from either the Data Abort Handler [watchpoint] or the
799 * Prefetch Abort Handler [breakpoint] with preemption disabled. 799 * Prefetch Abort Handler [breakpoint] with interrupts disabled.
800 */ 800 */
801static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr, 801static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr,
802 struct pt_regs *regs) 802 struct pt_regs *regs)
@@ -804,8 +804,10 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr,
804 int ret = 0; 804 int ret = 0;
805 u32 dscr; 805 u32 dscr;
806 806
807 /* We must be called with preemption disabled. */ 807 preempt_disable();
808 WARN_ON(preemptible()); 808
809 if (interrupts_enabled(regs))
810 local_irq_enable();
809 811
810 /* We only handle watchpoints and hardware breakpoints. */ 812 /* We only handle watchpoints and hardware breakpoints. */
811 ARM_DBG_READ(c1, 0, dscr); 813 ARM_DBG_READ(c1, 0, dscr);
@@ -824,10 +826,6 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr,
824 ret = 1; /* Unhandled fault. */ 826 ret = 1; /* Unhandled fault. */
825 } 827 }
826 828
827 /*
828 * Re-enable preemption after it was disabled in the
829 * low-level exception handling code.
830 */
831 preempt_enable(); 829 preempt_enable();
832 830
833 return ret; 831 return ret;
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index 83bbad03fcc..0f928a131af 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -131,54 +131,63 @@ int __init arch_probe_nr_irqs(void)
131 131
132#ifdef CONFIG_HOTPLUG_CPU 132#ifdef CONFIG_HOTPLUG_CPU
133 133
134static bool migrate_one_irq(struct irq_data *d) 134static bool migrate_one_irq(struct irq_desc *desc)
135{ 135{
136 unsigned int cpu = cpumask_any_and(d->affinity, cpu_online_mask); 136 struct irq_data *d = irq_desc_get_irq_data(desc);
137 const struct cpumask *affinity = d->affinity;
138 struct irq_chip *c;
137 bool ret = false; 139 bool ret = false;
138 140
139 if (cpu >= nr_cpu_ids) { 141 /*
140 cpu = cpumask_any(cpu_online_mask); 142 * If this is a per-CPU interrupt, or the affinity does not
143 * include this CPU, then we have nothing to do.
144 */
145 if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity))
146 return false;
147
148 if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
149 affinity = cpu_online_mask;
141 ret = true; 150 ret = true;
142 } 151 }
143 152
144 pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", d->irq, d->node, cpu); 153 c = irq_data_get_irq_chip(d);
145 154 if (c->irq_set_affinity)
146 d->chip->irq_set_affinity(d, cpumask_of(cpu), true); 155 c->irq_set_affinity(d, affinity, true);
156 else
157 pr_debug("IRQ%u: unable to set affinity\n", d->irq);
147 158
148 return ret; 159 return ret;
149} 160}
150 161
151/* 162/*
152 * The CPU has been marked offline. Migrate IRQs off this CPU. If 163 * The current CPU has been marked offline. Migrate IRQs off this CPU.
153 * the affinity settings do not allow other CPUs, force them onto any 164 * If the affinity settings do not allow other CPUs, force them onto any
154 * available CPU. 165 * available CPU.
166 *
167 * Note: we must iterate over all IRQs, whether they have an attached
168 * action structure or not, as we need to get chained interrupts too.
155 */ 169 */
156void migrate_irqs(void) 170void migrate_irqs(void)
157{ 171{
158 unsigned int i, cpu = smp_processor_id(); 172 unsigned int i;
159 struct irq_desc *desc; 173 struct irq_desc *desc;
160 unsigned long flags; 174 unsigned long flags;
161 175
162 local_irq_save(flags); 176 local_irq_save(flags);
163 177
164 for_each_irq_desc(i, desc) { 178 for_each_irq_desc(i, desc) {
165 struct irq_data *d = &desc->irq_data;
166 bool affinity_broken = false; 179 bool affinity_broken = false;
167 180
168 raw_spin_lock(&desc->lock); 181 if (!desc)
169 do { 182 continue;
170 if (desc->action == NULL)
171 break;
172
173 if (d->node != cpu)
174 break;
175 183
176 affinity_broken = migrate_one_irq(d); 184 raw_spin_lock(&desc->lock);
177 } while (0); 185 affinity_broken = migrate_one_irq(desc);
178 raw_spin_unlock(&desc->lock); 186 raw_spin_unlock(&desc->lock);
179 187
180 if (affinity_broken && printk_ratelimit()) 188 if (affinity_broken && printk_ratelimit())
181 pr_warning("IRQ%u no longer affine to CPU%u\n", i, cpu); 189 pr_warning("IRQ%u no longer affine to CPU%u\n", i,
190 smp_processor_id());
182 } 191 }
183 192
184 local_irq_restore(flags); 193 local_irq_restore(flags);
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index d53c0abc4dd..8d8507858e5 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -435,7 +435,7 @@ armpmu_reserve_hardware(void)
435 if (irq >= 0) 435 if (irq >= 0)
436 free_irq(irq, NULL); 436 free_irq(irq, NULL);
437 } 437 }
438 release_pmu(pmu_device); 438 release_pmu(ARM_PMU_DEVICE_CPU);
439 pmu_device = NULL; 439 pmu_device = NULL;
440 } 440 }
441 441
@@ -454,7 +454,7 @@ armpmu_release_hardware(void)
454 } 454 }
455 armpmu->stop(); 455 armpmu->stop();
456 456
457 release_pmu(pmu_device); 457 release_pmu(ARM_PMU_DEVICE_CPU);
458 pmu_device = NULL; 458 pmu_device = NULL;
459} 459}
460 460
@@ -583,7 +583,7 @@ static int armpmu_event_init(struct perf_event *event)
583static void armpmu_enable(struct pmu *pmu) 583static void armpmu_enable(struct pmu *pmu)
584{ 584{
585 /* Enable all of the perf events on hardware. */ 585 /* Enable all of the perf events on hardware. */
586 int idx; 586 int idx, enabled = 0;
587 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 587 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
588 588
589 if (!armpmu) 589 if (!armpmu)
@@ -596,9 +596,11 @@ static void armpmu_enable(struct pmu *pmu)
596 continue; 596 continue;
597 597
598 armpmu->enable(&event->hw, idx); 598 armpmu->enable(&event->hw, idx);
599 enabled = 1;
599 } 600 }
600 601
601 armpmu->start(); 602 if (enabled)
603 armpmu->start();
602} 604}
603 605
604static void armpmu_disable(struct pmu *pmu) 606static void armpmu_disable(struct pmu *pmu)
diff --git a/arch/arm/kernel/pmu.c b/arch/arm/kernel/pmu.c
index 2c79eec1926..2b70709376c 100644
--- a/arch/arm/kernel/pmu.c
+++ b/arch/arm/kernel/pmu.c
@@ -17,6 +17,7 @@
17#include <linux/interrupt.h> 17#include <linux/interrupt.h>
18#include <linux/kernel.h> 18#include <linux/kernel.h>
19#include <linux/module.h> 19#include <linux/module.h>
20#include <linux/of_device.h>
20#include <linux/platform_device.h> 21#include <linux/platform_device.h>
21 22
22#include <asm/pmu.h> 23#include <asm/pmu.h>
@@ -25,36 +26,88 @@ static volatile long pmu_lock;
25 26
26static struct platform_device *pmu_devices[ARM_NUM_PMU_DEVICES]; 27static struct platform_device *pmu_devices[ARM_NUM_PMU_DEVICES];
27 28
28static int __devinit pmu_device_probe(struct platform_device *pdev) 29static int __devinit pmu_register(struct platform_device *pdev,
30 enum arm_pmu_type type)
29{ 31{
30 32 if (type < 0 || type >= ARM_NUM_PMU_DEVICES) {
31 if (pdev->id < 0 || pdev->id >= ARM_NUM_PMU_DEVICES) {
32 pr_warning("received registration request for unknown " 33 pr_warning("received registration request for unknown "
33 "device %d\n", pdev->id); 34 "device %d\n", type);
34 return -EINVAL; 35 return -EINVAL;
35 } 36 }
36 37
37 if (pmu_devices[pdev->id]) 38 if (pmu_devices[type]) {
38 pr_warning("registering new PMU device type %d overwrites " 39 pr_warning("rejecting duplicate registration of PMU device "
39 "previous registration!\n", pdev->id); 40 "type %d.", type);
40 else 41 return -ENOSPC;
41 pr_info("registered new PMU device of type %d\n", 42 }
42 pdev->id);
43 43
44 pmu_devices[pdev->id] = pdev; 44 pr_info("registered new PMU device of type %d\n", type);
45 pmu_devices[type] = pdev;
45 return 0; 46 return 0;
46} 47}
47 48
48static struct platform_driver pmu_driver = { 49#define OF_MATCH_PMU(_name, _type) { \
50 .compatible = _name, \
51 .data = (void *)_type, \
52}
53
54#define OF_MATCH_CPU(name) OF_MATCH_PMU(name, ARM_PMU_DEVICE_CPU)
55
56static struct of_device_id armpmu_of_device_ids[] = {
57 OF_MATCH_CPU("arm,cortex-a9-pmu"),
58 OF_MATCH_CPU("arm,cortex-a8-pmu"),
59 OF_MATCH_CPU("arm,arm1136-pmu"),
60 OF_MATCH_CPU("arm,arm1176-pmu"),
61 {},
62};
63
64#define PLAT_MATCH_PMU(_name, _type) { \
65 .name = _name, \
66 .driver_data = _type, \
67}
68
69#define PLAT_MATCH_CPU(_name) PLAT_MATCH_PMU(_name, ARM_PMU_DEVICE_CPU)
70
71static struct platform_device_id armpmu_plat_device_ids[] = {
72 PLAT_MATCH_CPU("arm-pmu"),
73 {},
74};
75
76enum arm_pmu_type armpmu_device_type(struct platform_device *pdev)
77{
78 const struct of_device_id *of_id;
79 const struct platform_device_id *pdev_id;
80
81 /* provided by of_device_id table */
82 if (pdev->dev.of_node) {
83 of_id = of_match_device(armpmu_of_device_ids, &pdev->dev);
84 BUG_ON(!of_id);
85 return (enum arm_pmu_type)of_id->data;
86 }
87
88 /* Provided by platform_device_id table */
89 pdev_id = platform_get_device_id(pdev);
90 BUG_ON(!pdev_id);
91 return pdev_id->driver_data;
92}
93
94static int __devinit armpmu_device_probe(struct platform_device *pdev)
95{
96 return pmu_register(pdev, armpmu_device_type(pdev));
97}
98
99static struct platform_driver armpmu_driver = {
49 .driver = { 100 .driver = {
50 .name = "arm-pmu", 101 .name = "arm-pmu",
102 .of_match_table = armpmu_of_device_ids,
51 }, 103 },
52 .probe = pmu_device_probe, 104 .probe = armpmu_device_probe,
105 .id_table = armpmu_plat_device_ids,
53}; 106};
54 107
55static int __init register_pmu_driver(void) 108static int __init register_pmu_driver(void)
56{ 109{
57 return platform_driver_register(&pmu_driver); 110 return platform_driver_register(&armpmu_driver);
58} 111}
59device_initcall(register_pmu_driver); 112device_initcall(register_pmu_driver);
60 113
@@ -77,11 +130,11 @@ reserve_pmu(enum arm_pmu_type device)
77EXPORT_SYMBOL_GPL(reserve_pmu); 130EXPORT_SYMBOL_GPL(reserve_pmu);
78 131
79int 132int
80release_pmu(struct platform_device *pdev) 133release_pmu(enum arm_pmu_type device)
81{ 134{
82 if (WARN_ON(pdev != pmu_devices[pdev->id])) 135 if (WARN_ON(!pmu_devices[device]))
83 return -EINVAL; 136 return -EINVAL;
84 clear_bit_unlock(pdev->id, &pmu_lock); 137 clear_bit_unlock(device, &pmu_lock);
85 return 0; 138 return 0;
86} 139}
87EXPORT_SYMBOL_GPL(release_pmu); 140EXPORT_SYMBOL_GPL(release_pmu);
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index ed11fb08b05..9c3278f3779 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -73,6 +73,7 @@ __setup("fpe=", fpe_setup);
73#endif 73#endif
74 74
75extern void paging_init(struct machine_desc *desc); 75extern void paging_init(struct machine_desc *desc);
76extern void sanity_check_meminfo(void);
76extern void reboot_setup(char *str); 77extern void reboot_setup(char *str);
77 78
78unsigned int processor_id; 79unsigned int processor_id;
@@ -342,54 +343,6 @@ static void __init feat_v6_fixup(void)
342 elf_hwcap &= ~HWCAP_TLS; 343 elf_hwcap &= ~HWCAP_TLS;
343} 344}
344 345
345static void __init setup_processor(void)
346{
347 struct proc_info_list *list;
348
349 /*
350 * locate processor in the list of supported processor
351 * types. The linker builds this table for us from the
352 * entries in arch/arm/mm/proc-*.S
353 */
354 list = lookup_processor_type(read_cpuid_id());
355 if (!list) {
356 printk("CPU configuration botched (ID %08x), unable "
357 "to continue.\n", read_cpuid_id());
358 while (1);
359 }
360
361 cpu_name = list->cpu_name;
362
363#ifdef MULTI_CPU
364 processor = *list->proc;
365#endif
366#ifdef MULTI_TLB
367 cpu_tlb = *list->tlb;
368#endif
369#ifdef MULTI_USER
370 cpu_user = *list->user;
371#endif
372#ifdef MULTI_CACHE
373 cpu_cache = *list->cache;
374#endif
375
376 printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
377 cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
378 proc_arch[cpu_architecture()], cr_alignment);
379
380 sprintf(init_utsname()->machine, "%s%c", list->arch_name, ENDIANNESS);
381 sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS);
382 elf_hwcap = list->elf_hwcap;
383#ifndef CONFIG_ARM_THUMB
384 elf_hwcap &= ~HWCAP_THUMB;
385#endif
386
387 feat_v6_fixup();
388
389 cacheid_init();
390 cpu_proc_init();
391}
392
393/* 346/*
394 * cpu_init - initialise one CPU. 347 * cpu_init - initialise one CPU.
395 * 348 *
@@ -405,6 +358,8 @@ void cpu_init(void)
405 BUG(); 358 BUG();
406 } 359 }
407 360
361 cpu_proc_init();
362
408 /* 363 /*
409 * Define the placement constraint for the inline asm directive below. 364 * Define the placement constraint for the inline asm directive below.
410 * In Thumb-2, msr with an immediate value is not allowed. 365 * In Thumb-2, msr with an immediate value is not allowed.
@@ -441,6 +396,54 @@ void cpu_init(void)
441 : "r14"); 396 : "r14");
442} 397}
443 398
399static void __init setup_processor(void)
400{
401 struct proc_info_list *list;
402
403 /*
404 * locate processor in the list of supported processor
405 * types. The linker builds this table for us from the
406 * entries in arch/arm/mm/proc-*.S
407 */
408 list = lookup_processor_type(read_cpuid_id());
409 if (!list) {
410 printk("CPU configuration botched (ID %08x), unable "
411 "to continue.\n", read_cpuid_id());
412 while (1);
413 }
414
415 cpu_name = list->cpu_name;
416
417#ifdef MULTI_CPU
418 processor = *list->proc;
419#endif
420#ifdef MULTI_TLB
421 cpu_tlb = *list->tlb;
422#endif
423#ifdef MULTI_USER
424 cpu_user = *list->user;
425#endif
426#ifdef MULTI_CACHE
427 cpu_cache = *list->cache;
428#endif
429
430 printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
431 cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
432 proc_arch[cpu_architecture()], cr_alignment);
433
434 sprintf(init_utsname()->machine, "%s%c", list->arch_name, ENDIANNESS);
435 sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS);
436 elf_hwcap = list->elf_hwcap;
437#ifndef CONFIG_ARM_THUMB
438 elf_hwcap &= ~HWCAP_THUMB;
439#endif
440
441 feat_v6_fixup();
442
443 cacheid_init();
444 cpu_init();
445}
446
444void __init dump_machine_table(void) 447void __init dump_machine_table(void)
445{ 448{
446 struct machine_desc *p; 449 struct machine_desc *p;
@@ -900,6 +903,7 @@ void __init setup_arch(char **cmdline_p)
900 903
901 parse_early_param(); 904 parse_early_param();
902 905
906 sanity_check_meminfo();
903 arm_memblock_init(&meminfo, mdesc); 907 arm_memblock_init(&meminfo, mdesc);
904 908
905 paging_init(mdesc); 909 paging_init(mdesc);
@@ -913,7 +917,6 @@ void __init setup_arch(char **cmdline_p)
913#endif 917#endif
914 reserve_crashkernel(); 918 reserve_crashkernel();
915 919
916 cpu_init();
917 tcm_init(); 920 tcm_init();
918 921
919#ifdef CONFIG_MULTI_IRQ_HANDLER 922#ifdef CONFIG_MULTI_IRQ_HANDLER
diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S
index 6398ead9d1c..dc902f2c684 100644
--- a/arch/arm/kernel/sleep.S
+++ b/arch/arm/kernel/sleep.S
@@ -10,64 +10,61 @@
10/* 10/*
11 * Save CPU state for a suspend 11 * Save CPU state for a suspend
12 * r1 = v:p offset 12 * r1 = v:p offset
13 * r3 = virtual return function 13 * r2 = suspend function arg0
14 * Note: sp is decremented to allocate space for CPU state on stack 14 * r3 = suspend function
15 * r0-r3,r9,r10,lr corrupted
16 */ 15 */
17ENTRY(cpu_suspend) 16ENTRY(__cpu_suspend)
18 mov r9, lr 17 stmfd sp!, {r4 - r11, lr}
19#ifdef MULTI_CPU 18#ifdef MULTI_CPU
20 ldr r10, =processor 19 ldr r10, =processor
21 mov r2, sp @ current virtual SP 20 ldr r5, [r10, #CPU_SLEEP_SIZE] @ size of CPU sleep state
22 ldr r0, [r10, #CPU_SLEEP_SIZE] @ size of CPU sleep state
23 ldr ip, [r10, #CPU_DO_RESUME] @ virtual resume function 21 ldr ip, [r10, #CPU_DO_RESUME] @ virtual resume function
24 sub sp, sp, r0 @ allocate CPU state on stack 22#else
25 mov r0, sp @ save pointer 23 ldr r5, =cpu_suspend_size
24 ldr ip, =cpu_do_resume
25#endif
26 mov r6, sp @ current virtual SP
27 sub sp, sp, r5 @ allocate CPU state on stack
28 mov r0, sp @ save pointer to CPU save block
26 add ip, ip, r1 @ convert resume fn to phys 29 add ip, ip, r1 @ convert resume fn to phys
27 stmfd sp!, {r1, r2, r3, ip} @ save v:p, virt SP, retfn, phys resume fn 30 stmfd sp!, {r1, r6, ip} @ save v:p, virt SP, phys resume fn
28 ldr r3, =sleep_save_sp 31 ldr r5, =sleep_save_sp
29 add r2, sp, r1 @ convert SP to phys 32 add r6, sp, r1 @ convert SP to phys
33 stmfd sp!, {r2, r3} @ save suspend func arg and pointer
30#ifdef CONFIG_SMP 34#ifdef CONFIG_SMP
31 ALT_SMP(mrc p15, 0, lr, c0, c0, 5) 35 ALT_SMP(mrc p15, 0, lr, c0, c0, 5)
32 ALT_UP(mov lr, #0) 36 ALT_UP(mov lr, #0)
33 and lr, lr, #15 37 and lr, lr, #15
34 str r2, [r3, lr, lsl #2] @ save phys SP 38 str r6, [r5, lr, lsl #2] @ save phys SP
35#else 39#else
36 str r2, [r3] @ save phys SP 40 str r6, [r5] @ save phys SP
37#endif 41#endif
42#ifdef MULTI_CPU
38 mov lr, pc 43 mov lr, pc
39 ldr pc, [r10, #CPU_DO_SUSPEND] @ save CPU state 44 ldr pc, [r10, #CPU_DO_SUSPEND] @ save CPU state
40#else 45#else
41 mov r2, sp @ current virtual SP
42 ldr r0, =cpu_suspend_size
43 sub sp, sp, r0 @ allocate CPU state on stack
44 mov r0, sp @ save pointer
45 stmfd sp!, {r1, r2, r3} @ save v:p, virt SP, return fn
46 ldr r3, =sleep_save_sp
47 add r2, sp, r1 @ convert SP to phys
48#ifdef CONFIG_SMP
49 ALT_SMP(mrc p15, 0, lr, c0, c0, 5)
50 ALT_UP(mov lr, #0)
51 and lr, lr, #15
52 str r2, [r3, lr, lsl #2] @ save phys SP
53#else
54 str r2, [r3] @ save phys SP
55#endif
56 bl cpu_do_suspend 46 bl cpu_do_suspend
57#endif 47#endif
58 48
59 @ flush data cache 49 @ flush data cache
60#ifdef MULTI_CACHE 50#ifdef MULTI_CACHE
61 ldr r10, =cpu_cache 51 ldr r10, =cpu_cache
62 mov lr, r9 52 mov lr, pc
63 ldr pc, [r10, #CACHE_FLUSH_KERN_ALL] 53 ldr pc, [r10, #CACHE_FLUSH_KERN_ALL]
64#else 54#else
65 mov lr, r9 55 bl __cpuc_flush_kern_all
66 b __cpuc_flush_kern_all
67#endif 56#endif
68ENDPROC(cpu_suspend) 57 adr lr, BSYM(cpu_suspend_abort)
58 ldmfd sp!, {r0, pc} @ call suspend fn
59ENDPROC(__cpu_suspend)
69 .ltorg 60 .ltorg
70 61
62cpu_suspend_abort:
63 ldmia sp!, {r1 - r3} @ pop v:p, virt SP, phys resume fn
64 mov sp, r2
65 ldmfd sp!, {r4 - r11, pc}
66ENDPROC(cpu_suspend_abort)
67
71/* 68/*
72 * r0 = control register value 69 * r0 = control register value
73 * r1 = v:p offset (preserved by cpu_do_resume) 70 * r1 = v:p offset (preserved by cpu_do_resume)
@@ -97,7 +94,9 @@ ENDPROC(cpu_resume_turn_mmu_on)
97cpu_resume_after_mmu: 94cpu_resume_after_mmu:
98 str r5, [r2, r4, lsl #2] @ restore old mapping 95 str r5, [r2, r4, lsl #2] @ restore old mapping
99 mcr p15, 0, r0, c1, c0, 0 @ turn on D-cache 96 mcr p15, 0, r0, c1, c0, 0 @ turn on D-cache
100 mov pc, lr 97 bl cpu_init @ restore the und/abt/irq banked regs
98 mov r0, #0 @ return zero on success
99 ldmfd sp!, {r4 - r11, pc}
101ENDPROC(cpu_resume_after_mmu) 100ENDPROC(cpu_resume_after_mmu)
102 101
103/* 102/*
@@ -120,20 +119,11 @@ ENTRY(cpu_resume)
120 ldr r0, sleep_save_sp @ stack phys addr 119 ldr r0, sleep_save_sp @ stack phys addr
121#endif 120#endif
122 setmode PSR_I_BIT | PSR_F_BIT | SVC_MODE, r1 @ set SVC, irqs off 121 setmode PSR_I_BIT | PSR_F_BIT | SVC_MODE, r1 @ set SVC, irqs off
123#ifdef MULTI_CPU 122 @ load v:p, stack, resume fn
124 @ load v:p, stack, return fn, resume fn 123 ARM( ldmia r0!, {r1, sp, pc} )
125 ARM( ldmia r0!, {r1, sp, lr, pc} ) 124THUMB( ldmia r0!, {r1, r2, r3} )
126THUMB( ldmia r0!, {r1, r2, r3, r4} )
127THUMB( mov sp, r2 ) 125THUMB( mov sp, r2 )
128THUMB( mov lr, r3 ) 126THUMB( bx r3 )
129THUMB( bx r4 )
130#else
131 @ load v:p, stack, return fn
132 ARM( ldmia r0!, {r1, sp, lr} )
133THUMB( ldmia r0!, {r1, r2, lr} )
134THUMB( mov sp, r2 )
135 b cpu_do_resume
136#endif
137ENDPROC(cpu_resume) 127ENDPROC(cpu_resume)
138 128
139sleep_save_sp: 129sleep_save_sp:
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index e7f92a4321f..167e3cbe1f2 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -365,8 +365,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
365 */ 365 */
366 if (max_cpus > ncores) 366 if (max_cpus > ncores)
367 max_cpus = ncores; 367 max_cpus = ncores;
368 368 if (ncores > 1 && max_cpus) {
369 if (max_cpus > 1) {
370 /* 369 /*
371 * Enable the local timer or broadcast device for the 370 * Enable the local timer or broadcast device for the
372 * boot CPU, but only if we have more than one CPU. 371 * boot CPU, but only if we have more than one CPU.
@@ -374,6 +373,14 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
374 percpu_timer_setup(); 373 percpu_timer_setup();
375 374
376 /* 375 /*
376 * Initialise the present map, which describes the set of CPUs
377 * actually populated at the present time. A platform should
378 * re-initialize the map in platform_smp_prepare_cpus() if
379 * present != possible (e.g. physical hotplug).
380 */
381 init_cpu_present(&cpu_possible_map);
382
383 /*
377 * Initialise the SCU if there are more than one CPU 384 * Initialise the SCU if there are more than one CPU
378 * and let them know where to start. 385 * and let them know where to start.
379 */ 386 */
diff --git a/arch/arm/kernel/smp_scu.c b/arch/arm/kernel/smp_scu.c
index a1e757c3439..79ed5e7f204 100644
--- a/arch/arm/kernel/smp_scu.c
+++ b/arch/arm/kernel/smp_scu.c
@@ -20,6 +20,7 @@
20#define SCU_INVALIDATE 0x0c 20#define SCU_INVALIDATE 0x0c
21#define SCU_FPGA_REVISION 0x10 21#define SCU_FPGA_REVISION 0x10
22 22
23#ifdef CONFIG_SMP
23/* 24/*
24 * Get the number of CPU cores from the SCU configuration 25 * Get the number of CPU cores from the SCU configuration
25 */ 26 */
@@ -50,6 +51,7 @@ void __init scu_enable(void __iomem *scu_base)
50 */ 51 */
51 flush_cache_all(); 52 flush_cache_all();
52} 53}
54#endif
53 55
54/* 56/*
55 * Set the executing CPUs power mode as defined. This will be in 57 * Set the executing CPUs power mode as defined. This will be in
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c
index 60636f499cb..2c277d40cee 100644
--- a/arch/arm/kernel/smp_twd.c
+++ b/arch/arm/kernel/smp_twd.c
@@ -115,7 +115,7 @@ static void __cpuinit twd_calibrate_rate(void)
115 twd_timer_rate = (0xFFFFFFFFU - count) * (HZ / 5); 115 twd_timer_rate = (0xFFFFFFFFU - count) * (HZ / 5);
116 116
117 printk("%lu.%02luMHz.\n", twd_timer_rate / 1000000, 117 printk("%lu.%02luMHz.\n", twd_timer_rate / 1000000,
118 (twd_timer_rate / 1000000) % 100); 118 (twd_timer_rate / 10000) % 100);
119 } 119 }
120} 120}
121 121
diff --git a/arch/arm/kernel/tcm.c b/arch/arm/kernel/tcm.c
index f5cf660eefc..30e302d33e0 100644
--- a/arch/arm/kernel/tcm.c
+++ b/arch/arm/kernel/tcm.c
@@ -19,6 +19,8 @@
19#include "tcm.h" 19#include "tcm.h"
20 20
21static struct gen_pool *tcm_pool; 21static struct gen_pool *tcm_pool;
22static bool dtcm_present;
23static bool itcm_present;
22 24
23/* TCM section definitions from the linker */ 25/* TCM section definitions from the linker */
24extern char __itcm_start, __sitcm_text, __eitcm_text; 26extern char __itcm_start, __sitcm_text, __eitcm_text;
@@ -90,6 +92,18 @@ void tcm_free(void *addr, size_t len)
90} 92}
91EXPORT_SYMBOL(tcm_free); 93EXPORT_SYMBOL(tcm_free);
92 94
95bool tcm_dtcm_present(void)
96{
97 return dtcm_present;
98}
99EXPORT_SYMBOL(tcm_dtcm_present);
100
101bool tcm_itcm_present(void)
102{
103 return itcm_present;
104}
105EXPORT_SYMBOL(tcm_itcm_present);
106
93static int __init setup_tcm_bank(u8 type, u8 bank, u8 banks, 107static int __init setup_tcm_bank(u8 type, u8 bank, u8 banks,
94 u32 *offset) 108 u32 *offset)
95{ 109{
@@ -134,6 +148,10 @@ static int __init setup_tcm_bank(u8 type, u8 bank, u8 banks,
134 (tcm_region & 1) ? "" : "not "); 148 (tcm_region & 1) ? "" : "not ");
135 } 149 }
136 150
151 /* Not much fun you can do with a size 0 bank */
152 if (tcm_size == 0)
153 return 0;
154
137 /* Force move the TCM bank to where we want it, enable */ 155 /* Force move the TCM bank to where we want it, enable */
138 tcm_region = *offset | (tcm_region & 0x00000ffeU) | 1; 156 tcm_region = *offset | (tcm_region & 0x00000ffeU) | 1;
139 157
@@ -165,12 +183,20 @@ void __init tcm_init(void)
165 u32 tcm_status = read_cpuid_tcmstatus(); 183 u32 tcm_status = read_cpuid_tcmstatus();
166 u8 dtcm_banks = (tcm_status >> 16) & 0x03; 184 u8 dtcm_banks = (tcm_status >> 16) & 0x03;
167 u8 itcm_banks = (tcm_status & 0x03); 185 u8 itcm_banks = (tcm_status & 0x03);
186 size_t dtcm_code_sz = &__edtcm_data - &__sdtcm_data;
187 size_t itcm_code_sz = &__eitcm_text - &__sitcm_text;
168 char *start; 188 char *start;
169 char *end; 189 char *end;
170 char *ram; 190 char *ram;
171 int ret; 191 int ret;
172 int i; 192 int i;
173 193
194 /* Values greater than 2 for D/ITCM banks are "reserved" */
195 if (dtcm_banks > 2)
196 dtcm_banks = 0;
197 if (itcm_banks > 2)
198 itcm_banks = 0;
199
174 /* Setup DTCM if present */ 200 /* Setup DTCM if present */
175 if (dtcm_banks > 0) { 201 if (dtcm_banks > 0) {
176 for (i = 0; i < dtcm_banks; i++) { 202 for (i = 0; i < dtcm_banks; i++) {
@@ -178,6 +204,13 @@ void __init tcm_init(void)
178 if (ret) 204 if (ret)
179 return; 205 return;
180 } 206 }
207 /* This means you compiled more code than fits into DTCM */
208 if (dtcm_code_sz > (dtcm_end - DTCM_OFFSET)) {
209 pr_info("CPU DTCM: %u bytes of code compiled to "
210 "DTCM but only %lu bytes of DTCM present\n",
211 dtcm_code_sz, (dtcm_end - DTCM_OFFSET));
212 goto no_dtcm;
213 }
181 dtcm_res.end = dtcm_end - 1; 214 dtcm_res.end = dtcm_end - 1;
182 request_resource(&iomem_resource, &dtcm_res); 215 request_resource(&iomem_resource, &dtcm_res);
183 dtcm_iomap[0].length = dtcm_end - DTCM_OFFSET; 216 dtcm_iomap[0].length = dtcm_end - DTCM_OFFSET;
@@ -186,12 +219,16 @@ void __init tcm_init(void)
186 start = &__sdtcm_data; 219 start = &__sdtcm_data;
187 end = &__edtcm_data; 220 end = &__edtcm_data;
188 ram = &__dtcm_start; 221 ram = &__dtcm_start;
189 /* This means you compiled more code than fits into DTCM */ 222 memcpy(start, ram, dtcm_code_sz);
190 BUG_ON((end - start) > (dtcm_end - DTCM_OFFSET)); 223 pr_debug("CPU DTCM: copied data from %p - %p\n",
191 memcpy(start, ram, (end-start)); 224 start, end);
192 pr_debug("CPU DTCM: copied data from %p - %p\n", start, end); 225 dtcm_present = true;
226 } else if (dtcm_code_sz) {
227 pr_info("CPU DTCM: %u bytes of code compiled to DTCM but no "
228 "DTCM banks present in CPU\n", dtcm_code_sz);
193 } 229 }
194 230
231no_dtcm:
195 /* Setup ITCM if present */ 232 /* Setup ITCM if present */
196 if (itcm_banks > 0) { 233 if (itcm_banks > 0) {
197 for (i = 0; i < itcm_banks; i++) { 234 for (i = 0; i < itcm_banks; i++) {
@@ -199,6 +236,13 @@ void __init tcm_init(void)
199 if (ret) 236 if (ret)
200 return; 237 return;
201 } 238 }
239 /* This means you compiled more code than fits into ITCM */
240 if (itcm_code_sz > (itcm_end - ITCM_OFFSET)) {
241 pr_info("CPU ITCM: %u bytes of code compiled to "
242 "ITCM but only %lu bytes of ITCM present\n",
243 itcm_code_sz, (itcm_end - ITCM_OFFSET));
244 return;
245 }
202 itcm_res.end = itcm_end - 1; 246 itcm_res.end = itcm_end - 1;
203 request_resource(&iomem_resource, &itcm_res); 247 request_resource(&iomem_resource, &itcm_res);
204 itcm_iomap[0].length = itcm_end - ITCM_OFFSET; 248 itcm_iomap[0].length = itcm_end - ITCM_OFFSET;
@@ -207,10 +251,13 @@ void __init tcm_init(void)
207 start = &__sitcm_text; 251 start = &__sitcm_text;
208 end = &__eitcm_text; 252 end = &__eitcm_text;
209 ram = &__itcm_start; 253 ram = &__itcm_start;
210 /* This means you compiled more code than fits into ITCM */ 254 memcpy(start, ram, itcm_code_sz);
211 BUG_ON((end - start) > (itcm_end - ITCM_OFFSET)); 255 pr_debug("CPU ITCM: copied code from %p - %p\n",
212 memcpy(start, ram, (end-start)); 256 start, end);
213 pr_debug("CPU ITCM: copied code from %p - %p\n", start, end); 257 itcm_present = true;
258 } else if (itcm_code_sz) {
259 pr_info("CPU ITCM: %u bytes of code compiled to ITCM but no "
260 "ITCM banks present in CPU\n", itcm_code_sz);
214 } 261 }
215} 262}
216 263
@@ -221,7 +268,6 @@ void __init tcm_init(void)
221 */ 268 */
222static int __init setup_tcm_pool(void) 269static int __init setup_tcm_pool(void)
223{ 270{
224 u32 tcm_status = read_cpuid_tcmstatus();
225 u32 dtcm_pool_start = (u32) &__edtcm_data; 271 u32 dtcm_pool_start = (u32) &__edtcm_data;
226 u32 itcm_pool_start = (u32) &__eitcm_text; 272 u32 itcm_pool_start = (u32) &__eitcm_text;
227 int ret; 273 int ret;
@@ -236,7 +282,7 @@ static int __init setup_tcm_pool(void)
236 pr_debug("Setting up TCM memory pool\n"); 282 pr_debug("Setting up TCM memory pool\n");
237 283
238 /* Add the rest of DTCM to the TCM pool */ 284 /* Add the rest of DTCM to the TCM pool */
239 if (tcm_status & (0x03 << 16)) { 285 if (dtcm_present) {
240 if (dtcm_pool_start < dtcm_end) { 286 if (dtcm_pool_start < dtcm_end) {
241 ret = gen_pool_add(tcm_pool, dtcm_pool_start, 287 ret = gen_pool_add(tcm_pool, dtcm_pool_start,
242 dtcm_end - dtcm_pool_start, -1); 288 dtcm_end - dtcm_pool_start, -1);
@@ -253,7 +299,7 @@ static int __init setup_tcm_pool(void)
253 } 299 }
254 300
255 /* Add the rest of ITCM to the TCM pool */ 301 /* Add the rest of ITCM to the TCM pool */
256 if (tcm_status & 0x03) { 302 if (itcm_present) {
257 if (itcm_pool_start < itcm_end) { 303 if (itcm_pool_start < itcm_end) {
258 ret = gen_pool_add(tcm_pool, itcm_pool_start, 304 ret = gen_pool_add(tcm_pool, itcm_pool_start,
259 itcm_end - itcm_pool_start, -1); 305 itcm_end - itcm_pool_start, -1);
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index e5287f21bad..bf977f8514f 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -38,57 +38,6 @@ jiffies = jiffies_64 + 4;
38 38
39SECTIONS 39SECTIONS
40{ 40{
41#ifdef CONFIG_XIP_KERNEL
42 . = XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR);
43#else
44 . = PAGE_OFFSET + TEXT_OFFSET;
45#endif
46
47 .init : { /* Init code and data */
48 _stext = .;
49 _sinittext = .;
50 HEAD_TEXT
51 INIT_TEXT
52 ARM_EXIT_KEEP(EXIT_TEXT)
53 _einittext = .;
54 ARM_CPU_DISCARD(PROC_INFO)
55 __arch_info_begin = .;
56 *(.arch.info.init)
57 __arch_info_end = .;
58 __tagtable_begin = .;
59 *(.taglist.init)
60 __tagtable_end = .;
61#ifdef CONFIG_SMP_ON_UP
62 __smpalt_begin = .;
63 *(.alt.smp.init)
64 __smpalt_end = .;
65#endif
66
67 __pv_table_begin = .;
68 *(.pv_table)
69 __pv_table_end = .;
70
71 INIT_SETUP(16)
72
73 INIT_CALLS
74 CON_INITCALL
75 SECURITY_INITCALL
76 INIT_RAM_FS
77
78#ifndef CONFIG_XIP_KERNEL
79 __init_begin = _stext;
80 INIT_DATA
81 ARM_EXIT_KEEP(EXIT_DATA)
82#endif
83 }
84
85 PERCPU_SECTION(32)
86
87#ifndef CONFIG_XIP_KERNEL
88 . = ALIGN(PAGE_SIZE);
89 __init_end = .;
90#endif
91
92 /* 41 /*
93 * unwind exit sections must be discarded before the rest of the 42 * unwind exit sections must be discarded before the rest of the
94 * unwind sections get included. 43 * unwind sections get included.
@@ -106,10 +55,22 @@ SECTIONS
106 *(.fixup) 55 *(.fixup)
107 *(__ex_table) 56 *(__ex_table)
108#endif 57#endif
58#ifndef CONFIG_SMP_ON_UP
59 *(.alt.smp.init)
60#endif
109 } 61 }
110 62
63#ifdef CONFIG_XIP_KERNEL
64 . = XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR);
65#else
66 . = PAGE_OFFSET + TEXT_OFFSET;
67#endif
68 .head.text : {
69 _text = .;
70 HEAD_TEXT
71 }
111 .text : { /* Real text segment */ 72 .text : { /* Real text segment */
112 _text = .; /* Text and read-only data */ 73 _stext = .; /* Text and read-only data */
113 __exception_text_start = .; 74 __exception_text_start = .;
114 *(.exception.text) 75 *(.exception.text)
115 __exception_text_end = .; 76 __exception_text_end = .;
@@ -122,8 +83,6 @@ SECTIONS
122 *(.fixup) 83 *(.fixup)
123#endif 84#endif
124 *(.gnu.warning) 85 *(.gnu.warning)
125 *(.rodata)
126 *(.rodata.*)
127 *(.glue_7) 86 *(.glue_7)
128 *(.glue_7t) 87 *(.glue_7t)
129 . = ALIGN(4); 88 . = ALIGN(4);
@@ -152,10 +111,63 @@ SECTIONS
152 111
153 _etext = .; /* End of text and rodata section */ 112 _etext = .; /* End of text and rodata section */
154 113
114#ifndef CONFIG_XIP_KERNEL
115 . = ALIGN(PAGE_SIZE);
116 __init_begin = .;
117#endif
118
119 INIT_TEXT_SECTION(8)
120 .exit.text : {
121 ARM_EXIT_KEEP(EXIT_TEXT)
122 }
123 .init.proc.info : {
124 ARM_CPU_DISCARD(PROC_INFO)
125 }
126 .init.arch.info : {
127 __arch_info_begin = .;
128 *(.arch.info.init)
129 __arch_info_end = .;
130 }
131 .init.tagtable : {
132 __tagtable_begin = .;
133 *(.taglist.init)
134 __tagtable_end = .;
135 }
136#ifdef CONFIG_SMP_ON_UP
137 .init.smpalt : {
138 __smpalt_begin = .;
139 *(.alt.smp.init)
140 __smpalt_end = .;
141 }
142#endif
143 .init.pv_table : {
144 __pv_table_begin = .;
145 *(.pv_table)
146 __pv_table_end = .;
147 }
148 .init.data : {
149#ifndef CONFIG_XIP_KERNEL
150 INIT_DATA
151#endif
152 INIT_SETUP(16)
153 INIT_CALLS
154 CON_INITCALL
155 SECURITY_INITCALL
156 INIT_RAM_FS
157 }
158#ifndef CONFIG_XIP_KERNEL
159 .exit.data : {
160 ARM_EXIT_KEEP(EXIT_DATA)
161 }
162#endif
163
164 PERCPU_SECTION(32)
165
155#ifdef CONFIG_XIP_KERNEL 166#ifdef CONFIG_XIP_KERNEL
156 __data_loc = ALIGN(4); /* location in binary */ 167 __data_loc = ALIGN(4); /* location in binary */
157 . = PAGE_OFFSET + TEXT_OFFSET; 168 . = PAGE_OFFSET + TEXT_OFFSET;
158#else 169#else
170 __init_end = .;
159 . = ALIGN(THREAD_SIZE); 171 . = ALIGN(THREAD_SIZE);
160 __data_loc = .; 172 __data_loc = .;
161#endif 173#endif
@@ -270,12 +282,6 @@ SECTIONS
270 282
271 /* Default discards */ 283 /* Default discards */
272 DISCARDS 284 DISCARDS
273
274#ifndef CONFIG_SMP_ON_UP
275 /DISCARD/ : {
276 *(.alt.smp.init)
277 }
278#endif
279} 285}
280 286
281/* 287/*