diff options
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r-- | arch/arm/kernel/arch_timer.c | 13 | ||||
-rw-r--r-- | arch/arm/kernel/armksyms.c | 7 | ||||
-rw-r--r-- | arch/arm/kernel/bios32.c | 4 | ||||
-rw-r--r-- | arch/arm/kernel/entry-armv.S | 112 | ||||
-rw-r--r-- | arch/arm/kernel/entry-common.S | 44 | ||||
-rw-r--r-- | arch/arm/kernel/fiq.c | 9 | ||||
-rw-r--r-- | arch/arm/kernel/ftrace.c | 17 | ||||
-rw-r--r-- | arch/arm/kernel/head.S | 59 | ||||
-rw-r--r-- | arch/arm/kernel/irq.c | 10 | ||||
-rw-r--r-- | arch/arm/kernel/kprobes-test-arm.c | 4 | ||||
-rw-r--r-- | arch/arm/kernel/kprobes-thumb.c | 2 | ||||
-rw-r--r-- | arch/arm/kernel/perf_event.c | 17 | ||||
-rw-r--r-- | arch/arm/kernel/perf_event_v6.c | 2 | ||||
-rw-r--r-- | arch/arm/kernel/perf_event_v7.c | 5 | ||||
-rw-r--r-- | arch/arm/kernel/perf_event_xscale.c | 2 | ||||
-rw-r--r-- | arch/arm/kernel/process.c | 2 | ||||
-rw-r--r-- | arch/arm/kernel/ptrace.c | 36 | ||||
-rw-r--r-- | arch/arm/kernel/sched_clock.c | 24 | ||||
-rw-r--r-- | arch/arm/kernel/setup.c | 6 | ||||
-rw-r--r-- | arch/arm/kernel/signal.c | 76 | ||||
-rw-r--r-- | arch/arm/kernel/smp.c | 5 | ||||
-rw-r--r-- | arch/arm/kernel/topology.c | 241 | ||||
-rw-r--r-- | arch/arm/kernel/traps.c | 86 | ||||
-rw-r--r-- | arch/arm/kernel/vmlinux.lds.S | 2 |
24 files changed, 528 insertions, 257 deletions
diff --git a/arch/arm/kernel/arch_timer.c b/arch/arm/kernel/arch_timer.c index dd58035621f7..cf258807160d 100644 --- a/arch/arm/kernel/arch_timer.c +++ b/arch/arm/kernel/arch_timer.c | |||
@@ -32,6 +32,8 @@ static int arch_timer_ppi2; | |||
32 | 32 | ||
33 | static struct clock_event_device __percpu **arch_timer_evt; | 33 | static struct clock_event_device __percpu **arch_timer_evt; |
34 | 34 | ||
35 | extern void init_current_timer_delay(unsigned long freq); | ||
36 | |||
35 | /* | 37 | /* |
36 | * Architected system timer support. | 38 | * Architected system timer support. |
37 | */ | 39 | */ |
@@ -137,7 +139,7 @@ static int __cpuinit arch_timer_setup(struct clock_event_device *clk) | |||
137 | /* Be safe... */ | 139 | /* Be safe... */ |
138 | arch_timer_disable(); | 140 | arch_timer_disable(); |
139 | 141 | ||
140 | clk->features = CLOCK_EVT_FEAT_ONESHOT; | 142 | clk->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP; |
141 | clk->name = "arch_sys_timer"; | 143 | clk->name = "arch_sys_timer"; |
142 | clk->rating = 450; | 144 | clk->rating = 450; |
143 | clk->set_mode = arch_timer_set_mode; | 145 | clk->set_mode = arch_timer_set_mode; |
@@ -223,6 +225,14 @@ static cycle_t arch_counter_read(struct clocksource *cs) | |||
223 | return arch_counter_get_cntpct(); | 225 | return arch_counter_get_cntpct(); |
224 | } | 226 | } |
225 | 227 | ||
228 | int read_current_timer(unsigned long *timer_val) | ||
229 | { | ||
230 | if (!arch_timer_rate) | ||
231 | return -ENXIO; | ||
232 | *timer_val = arch_counter_get_cntpct(); | ||
233 | return 0; | ||
234 | } | ||
235 | |||
226 | static struct clocksource clocksource_counter = { | 236 | static struct clocksource clocksource_counter = { |
227 | .name = "arch_sys_counter", | 237 | .name = "arch_sys_counter", |
228 | .rating = 400, | 238 | .rating = 400, |
@@ -296,6 +306,7 @@ static int __init arch_timer_register(void) | |||
296 | if (err) | 306 | if (err) |
297 | goto out_free_irq; | 307 | goto out_free_irq; |
298 | 308 | ||
309 | init_current_timer_delay(arch_timer_rate); | ||
299 | return 0; | 310 | return 0; |
300 | 311 | ||
301 | out_free_irq: | 312 | out_free_irq: |
diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c index b57c75e0b01f..60d3b738d420 100644 --- a/arch/arm/kernel/armksyms.c +++ b/arch/arm/kernel/armksyms.c | |||
@@ -49,8 +49,7 @@ extern void __aeabi_ulcmp(void); | |||
49 | extern void fpundefinstr(void); | 49 | extern void fpundefinstr(void); |
50 | 50 | ||
51 | /* platform dependent support */ | 51 | /* platform dependent support */ |
52 | EXPORT_SYMBOL(__udelay); | 52 | EXPORT_SYMBOL(arm_delay_ops); |
53 | EXPORT_SYMBOL(__const_udelay); | ||
54 | 53 | ||
55 | /* networking */ | 54 | /* networking */ |
56 | EXPORT_SYMBOL(csum_partial); | 55 | EXPORT_SYMBOL(csum_partial); |
@@ -87,10 +86,6 @@ EXPORT_SYMBOL(memmove); | |||
87 | EXPORT_SYMBOL(memchr); | 86 | EXPORT_SYMBOL(memchr); |
88 | EXPORT_SYMBOL(__memzero); | 87 | EXPORT_SYMBOL(__memzero); |
89 | 88 | ||
90 | /* user mem (segment) */ | ||
91 | EXPORT_SYMBOL(__strnlen_user); | ||
92 | EXPORT_SYMBOL(__strncpy_from_user); | ||
93 | |||
94 | #ifdef CONFIG_MMU | 89 | #ifdef CONFIG_MMU |
95 | EXPORT_SYMBOL(copy_page); | 90 | EXPORT_SYMBOL(copy_page); |
96 | 91 | ||
diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c index 25552508c3fd..2b2f25e7fef5 100644 --- a/arch/arm/kernel/bios32.c +++ b/arch/arm/kernel/bios32.c | |||
@@ -253,7 +253,7 @@ static void __devinit pci_fixup_cy82c693(struct pci_dev *dev) | |||
253 | } | 253 | } |
254 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CONTAQ, PCI_DEVICE_ID_CONTAQ_82C693, pci_fixup_cy82c693); | 254 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CONTAQ, PCI_DEVICE_ID_CONTAQ_82C693, pci_fixup_cy82c693); |
255 | 255 | ||
256 | static void __init pci_fixup_it8152(struct pci_dev *dev) | 256 | static void __devinit pci_fixup_it8152(struct pci_dev *dev) |
257 | { | 257 | { |
258 | int i; | 258 | int i; |
259 | /* fixup for ITE 8152 devices */ | 259 | /* fixup for ITE 8152 devices */ |
@@ -461,7 +461,7 @@ static void __init pcibios_init_hw(struct hw_pci *hw, struct list_head *head) | |||
461 | if (!sys->bus) | 461 | if (!sys->bus) |
462 | panic("PCI: unable to scan bus!"); | 462 | panic("PCI: unable to scan bus!"); |
463 | 463 | ||
464 | busnr = sys->bus->subordinate + 1; | 464 | busnr = sys->bus->busn_res.end + 1; |
465 | 465 | ||
466 | list_add(&sys->node, head); | 466 | list_add(&sys->node, head); |
467 | } else { | 467 | } else { |
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 437f0c426517..0f82098c9bfe 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S | |||
@@ -244,6 +244,19 @@ svc_preempt: | |||
244 | b 1b | 244 | b 1b |
245 | #endif | 245 | #endif |
246 | 246 | ||
247 | __und_fault: | ||
248 | @ Correct the PC such that it is pointing at the instruction | ||
249 | @ which caused the fault. If the faulting instruction was ARM | ||
250 | @ the PC will be pointing at the next instruction, and have to | ||
251 | @ subtract 4. Otherwise, it is Thumb, and the PC will be | ||
252 | @ pointing at the second half of the Thumb instruction. We | ||
253 | @ have to subtract 2. | ||
254 | ldr r2, [r0, #S_PC] | ||
255 | sub r2, r2, r1 | ||
256 | str r2, [r0, #S_PC] | ||
257 | b do_undefinstr | ||
258 | ENDPROC(__und_fault) | ||
259 | |||
247 | .align 5 | 260 | .align 5 |
248 | __und_svc: | 261 | __und_svc: |
249 | #ifdef CONFIG_KPROBES | 262 | #ifdef CONFIG_KPROBES |
@@ -261,25 +274,32 @@ __und_svc: | |||
261 | @ | 274 | @ |
262 | @ r0 - instruction | 275 | @ r0 - instruction |
263 | @ | 276 | @ |
264 | #ifndef CONFIG_THUMB2_KERNEL | 277 | #ifndef CONFIG_THUMB2_KERNEL |
265 | ldr r0, [r4, #-4] | 278 | ldr r0, [r4, #-4] |
266 | #else | 279 | #else |
280 | mov r1, #2 | ||
267 | ldrh r0, [r4, #-2] @ Thumb instruction at LR - 2 | 281 | ldrh r0, [r4, #-2] @ Thumb instruction at LR - 2 |
268 | cmp r0, #0xe800 @ 32-bit instruction if xx >= 0 | 282 | cmp r0, #0xe800 @ 32-bit instruction if xx >= 0 |
269 | ldrhhs r9, [r4] @ bottom 16 bits | 283 | blo __und_svc_fault |
270 | orrhs r0, r9, r0, lsl #16 | 284 | ldrh r9, [r4] @ bottom 16 bits |
285 | add r4, r4, #2 | ||
286 | str r4, [sp, #S_PC] | ||
287 | orr r0, r9, r0, lsl #16 | ||
271 | #endif | 288 | #endif |
272 | adr r9, BSYM(1f) | 289 | adr r9, BSYM(__und_svc_finish) |
273 | mov r2, r4 | 290 | mov r2, r4 |
274 | bl call_fpe | 291 | bl call_fpe |
275 | 292 | ||
293 | mov r1, #4 @ PC correction to apply | ||
294 | __und_svc_fault: | ||
276 | mov r0, sp @ struct pt_regs *regs | 295 | mov r0, sp @ struct pt_regs *regs |
277 | bl do_undefinstr | 296 | bl __und_fault |
278 | 297 | ||
279 | @ | 298 | @ |
280 | @ IRQs off again before pulling preserved data off the stack | 299 | @ IRQs off again before pulling preserved data off the stack |
281 | @ | 300 | @ |
282 | 1: disable_irq_notrace | 301 | __und_svc_finish: |
302 | disable_irq_notrace | ||
283 | 303 | ||
284 | @ | 304 | @ |
285 | @ restore SPSR and restart the instruction | 305 | @ restore SPSR and restart the instruction |
@@ -423,25 +443,33 @@ __und_usr: | |||
423 | mov r2, r4 | 443 | mov r2, r4 |
424 | mov r3, r5 | 444 | mov r3, r5 |
425 | 445 | ||
446 | @ r2 = regs->ARM_pc, which is either 2 or 4 bytes ahead of the | ||
447 | @ faulting instruction depending on Thumb mode. | ||
448 | @ r3 = regs->ARM_cpsr | ||
426 | @ | 449 | @ |
427 | @ fall through to the emulation code, which returns using r9 if | 450 | @ The emulation code returns using r9 if it has emulated the |
428 | @ it has emulated the instruction, or the more conventional lr | 451 | @ instruction, or the more conventional lr if we are to treat |
429 | @ if we are to treat this as a real undefined instruction | 452 | @ this as a real undefined instruction |
430 | @ | ||
431 | @ r0 - instruction | ||
432 | @ | 453 | @ |
433 | adr r9, BSYM(ret_from_exception) | 454 | adr r9, BSYM(ret_from_exception) |
434 | adr lr, BSYM(__und_usr_unknown) | 455 | |
435 | tst r3, #PSR_T_BIT @ Thumb mode? | 456 | tst r3, #PSR_T_BIT @ Thumb mode? |
436 | itet eq @ explicit IT needed for the 1f label | 457 | bne __und_usr_thumb |
437 | subeq r4, r2, #4 @ ARM instr at LR - 4 | 458 | sub r4, r2, #4 @ ARM instr at LR - 4 |
438 | subne r4, r2, #2 @ Thumb instr at LR - 2 | 459 | 1: ldrt r0, [r4] |
439 | 1: ldreqt r0, [r4] | ||
440 | #ifdef CONFIG_CPU_ENDIAN_BE8 | 460 | #ifdef CONFIG_CPU_ENDIAN_BE8 |
441 | reveq r0, r0 @ little endian instruction | 461 | rev r0, r0 @ little endian instruction |
442 | #endif | 462 | #endif |
443 | beq call_fpe | 463 | @ r0 = 32-bit ARM instruction which caused the exception |
464 | @ r2 = PC value for the following instruction (:= regs->ARM_pc) | ||
465 | @ r4 = PC value for the faulting instruction | ||
466 | @ lr = 32-bit undefined instruction function | ||
467 | adr lr, BSYM(__und_usr_fault_32) | ||
468 | b call_fpe | ||
469 | |||
470 | __und_usr_thumb: | ||
444 | @ Thumb instruction | 471 | @ Thumb instruction |
472 | sub r4, r2, #2 @ First half of thumb instr at LR - 2 | ||
445 | #if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7 | 473 | #if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7 |
446 | /* | 474 | /* |
447 | * Thumb-2 instruction handling. Note that because pre-v6 and >= v6 platforms | 475 | * Thumb-2 instruction handling. Note that because pre-v6 and >= v6 platforms |
@@ -455,7 +483,7 @@ __und_usr: | |||
455 | ldr r5, .LCcpu_architecture | 483 | ldr r5, .LCcpu_architecture |
456 | ldr r5, [r5] | 484 | ldr r5, [r5] |
457 | cmp r5, #CPU_ARCH_ARMv7 | 485 | cmp r5, #CPU_ARCH_ARMv7 |
458 | blo __und_usr_unknown | 486 | blo __und_usr_fault_16 @ 16bit undefined instruction |
459 | /* | 487 | /* |
460 | * The following code won't get run unless the running CPU really is v7, so | 488 | * The following code won't get run unless the running CPU really is v7, so |
461 | * coding round the lack of ldrht on older arches is pointless. Temporarily | 489 | * coding round the lack of ldrht on older arches is pointless. Temporarily |
@@ -463,15 +491,18 @@ __und_usr: | |||
463 | */ | 491 | */ |
464 | .arch armv6t2 | 492 | .arch armv6t2 |
465 | #endif | 493 | #endif |
466 | 2: | 494 | 2: ldrht r5, [r4] |
467 | ARM( ldrht r5, [r4], #2 ) | ||
468 | THUMB( ldrht r5, [r4] ) | ||
469 | THUMB( add r4, r4, #2 ) | ||
470 | cmp r5, #0xe800 @ 32bit instruction if xx != 0 | 495 | cmp r5, #0xe800 @ 32bit instruction if xx != 0 |
471 | blo __und_usr_unknown | 496 | blo __und_usr_fault_16 @ 16bit undefined instruction |
472 | 3: ldrht r0, [r4] | 497 | 3: ldrht r0, [r2] |
473 | add r2, r2, #2 @ r2 is PC + 2, make it PC + 4 | 498 | add r2, r2, #2 @ r2 is PC + 2, make it PC + 4 |
499 | str r2, [sp, #S_PC] @ it's a 2x16bit instr, update | ||
474 | orr r0, r0, r5, lsl #16 | 500 | orr r0, r0, r5, lsl #16 |
501 | adr lr, BSYM(__und_usr_fault_32) | ||
502 | @ r0 = the two 16-bit Thumb instructions which caused the exception | ||
503 | @ r2 = PC value for the following Thumb instruction (:= regs->ARM_pc) | ||
504 | @ r4 = PC value for the first 16-bit Thumb instruction | ||
505 | @ lr = 32bit undefined instruction function | ||
475 | 506 | ||
476 | #if __LINUX_ARM_ARCH__ < 7 | 507 | #if __LINUX_ARM_ARCH__ < 7 |
477 | /* If the target arch was overridden, change it back: */ | 508 | /* If the target arch was overridden, change it back: */ |
@@ -482,19 +513,16 @@ __und_usr: | |||
482 | #endif | 513 | #endif |
483 | #endif /* __LINUX_ARM_ARCH__ < 7 */ | 514 | #endif /* __LINUX_ARM_ARCH__ < 7 */ |
484 | #else /* !(CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7) */ | 515 | #else /* !(CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7) */ |
485 | b __und_usr_unknown | 516 | b __und_usr_fault_16 |
486 | #endif | 517 | #endif |
487 | UNWIND(.fnend ) | 518 | UNWIND(.fnend) |
488 | ENDPROC(__und_usr) | 519 | ENDPROC(__und_usr) |
489 | 520 | ||
490 | @ | ||
491 | @ fallthrough to call_fpe | ||
492 | @ | ||
493 | |||
494 | /* | 521 | /* |
495 | * The out of line fixup for the ldrt above. | 522 | * The out of line fixup for the ldrt instructions above. |
496 | */ | 523 | */ |
497 | .pushsection .fixup, "ax" | 524 | .pushsection .fixup, "ax" |
525 | .align 2 | ||
498 | 4: mov pc, r9 | 526 | 4: mov pc, r9 |
499 | .popsection | 527 | .popsection |
500 | .pushsection __ex_table,"a" | 528 | .pushsection __ex_table,"a" |
@@ -523,11 +551,12 @@ ENDPROC(__und_usr) | |||
523 | * NEON handler code. | 551 | * NEON handler code. |
524 | * | 552 | * |
525 | * Emulators may wish to make use of the following registers: | 553 | * Emulators may wish to make use of the following registers: |
526 | * r0 = instruction opcode. | 554 | * r0 = instruction opcode (32-bit ARM or two 16-bit Thumb) |
527 | * r2 = PC+4 | 555 | * r2 = PC value to resume execution after successful emulation |
528 | * r9 = normal "successful" return address | 556 | * r9 = normal "successful" return address |
529 | * r10 = this threads thread_info structure. | 557 | * r10 = this threads thread_info structure |
530 | * lr = unrecognised instruction return address | 558 | * lr = unrecognised instruction return address |
559 | * IRQs disabled, FIQs enabled. | ||
531 | */ | 560 | */ |
532 | @ | 561 | @ |
533 | @ Fall-through from Thumb-2 __und_usr | 562 | @ Fall-through from Thumb-2 __und_usr |
@@ -658,12 +687,17 @@ ENTRY(no_fp) | |||
658 | mov pc, lr | 687 | mov pc, lr |
659 | ENDPROC(no_fp) | 688 | ENDPROC(no_fp) |
660 | 689 | ||
661 | __und_usr_unknown: | 690 | __und_usr_fault_32: |
662 | enable_irq | 691 | mov r1, #4 |
692 | b 1f | ||
693 | __und_usr_fault_16: | ||
694 | mov r1, #2 | ||
695 | 1: enable_irq | ||
663 | mov r0, sp | 696 | mov r0, sp |
664 | adr lr, BSYM(ret_from_exception) | 697 | adr lr, BSYM(ret_from_exception) |
665 | b do_undefinstr | 698 | b __und_fault |
666 | ENDPROC(__und_usr_unknown) | 699 | ENDPROC(__und_usr_fault_32) |
700 | ENDPROC(__und_usr_fault_16) | ||
667 | 701 | ||
668 | .align 5 | 702 | .align 5 |
669 | __pabt_usr: | 703 | __pabt_usr: |
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index 4afed88d250a..978eac57e04a 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S | |||
@@ -51,23 +51,15 @@ ret_fast_syscall: | |||
51 | fast_work_pending: | 51 | fast_work_pending: |
52 | str r0, [sp, #S_R0+S_OFF]! @ returned r0 | 52 | str r0, [sp, #S_R0+S_OFF]! @ returned r0 |
53 | work_pending: | 53 | work_pending: |
54 | tst r1, #_TIF_NEED_RESCHED | ||
55 | bne work_resched | ||
56 | /* | ||
57 | * TIF_SIGPENDING or TIF_NOTIFY_RESUME must've been set if we got here | ||
58 | */ | ||
59 | ldr r2, [sp, #S_PSR] | ||
60 | mov r0, sp @ 'regs' | 54 | mov r0, sp @ 'regs' |
61 | tst r2, #15 @ are we returning to user mode? | ||
62 | bne no_work_pending @ no? just leave, then... | ||
63 | mov r2, why @ 'syscall' | 55 | mov r2, why @ 'syscall' |
64 | tst r1, #_TIF_SIGPENDING @ delivering a signal? | 56 | bl do_work_pending |
65 | movne why, #0 @ prevent further restarts | 57 | cmp r0, #0 |
66 | bl do_notify_resume | 58 | beq no_work_pending |
67 | b ret_slow_syscall @ Check work again | 59 | movlt scno, #(__NR_restart_syscall - __NR_SYSCALL_BASE) |
60 | ldmia sp, {r0 - r6} @ have to reload r0 - r6 | ||
61 | b local_restart @ ... and off we go | ||
68 | 62 | ||
69 | work_resched: | ||
70 | bl schedule | ||
71 | /* | 63 | /* |
72 | * "slow" syscall return path. "why" tells us if this was a real syscall. | 64 | * "slow" syscall return path. "why" tells us if this was a real syscall. |
73 | */ | 65 | */ |
@@ -95,13 +87,7 @@ ENDPROC(ret_to_user) | |||
95 | ENTRY(ret_from_fork) | 87 | ENTRY(ret_from_fork) |
96 | bl schedule_tail | 88 | bl schedule_tail |
97 | get_thread_info tsk | 89 | get_thread_info tsk |
98 | ldr r1, [tsk, #TI_FLAGS] @ check for syscall tracing | ||
99 | mov why, #1 | 90 | mov why, #1 |
100 | tst r1, #_TIF_SYSCALL_WORK @ are we tracing syscalls? | ||
101 | beq ret_slow_syscall | ||
102 | mov r1, sp | ||
103 | mov r0, #1 @ trace exit [IP = 1] | ||
104 | bl syscall_trace | ||
105 | b ret_slow_syscall | 91 | b ret_slow_syscall |
106 | ENDPROC(ret_from_fork) | 92 | ENDPROC(ret_from_fork) |
107 | 93 | ||
@@ -415,6 +401,7 @@ ENTRY(vector_swi) | |||
415 | eor scno, scno, #__NR_SYSCALL_BASE @ check OS number | 401 | eor scno, scno, #__NR_SYSCALL_BASE @ check OS number |
416 | #endif | 402 | #endif |
417 | 403 | ||
404 | local_restart: | ||
418 | ldr r10, [tsk, #TI_FLAGS] @ check for syscall tracing | 405 | ldr r10, [tsk, #TI_FLAGS] @ check for syscall tracing |
419 | stmdb sp!, {r4, r5} @ push fifth and sixth args | 406 | stmdb sp!, {r4, r5} @ push fifth and sixth args |
420 | 407 | ||
@@ -448,25 +435,24 @@ ENDPROC(vector_swi) | |||
448 | * context switches, and waiting for our parent to respond. | 435 | * context switches, and waiting for our parent to respond. |
449 | */ | 436 | */ |
450 | __sys_trace: | 437 | __sys_trace: |
451 | mov r2, scno | 438 | mov r1, scno |
452 | add r1, sp, #S_OFF | 439 | add r0, sp, #S_OFF |
453 | mov r0, #0 @ trace entry [IP = 0] | 440 | bl syscall_trace_enter |
454 | bl syscall_trace | ||
455 | 441 | ||
456 | adr lr, BSYM(__sys_trace_return) @ return address | 442 | adr lr, BSYM(__sys_trace_return) @ return address |
457 | mov scno, r0 @ syscall number (possibly new) | 443 | mov scno, r0 @ syscall number (possibly new) |
458 | add r1, sp, #S_R0 + S_OFF @ pointer to regs | 444 | add r1, sp, #S_R0 + S_OFF @ pointer to regs |
459 | cmp scno, #NR_syscalls @ check upper syscall limit | 445 | cmp scno, #NR_syscalls @ check upper syscall limit |
460 | ldmccia r1, {r0 - r3} @ have to reload r0 - r3 | 446 | ldmccia r1, {r0 - r6} @ have to reload r0 - r6 |
447 | stmccia sp, {r4, r5} @ and update the stack args | ||
461 | ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine | 448 | ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine |
462 | b 2b | 449 | b 2b |
463 | 450 | ||
464 | __sys_trace_return: | 451 | __sys_trace_return: |
465 | str r0, [sp, #S_R0 + S_OFF]! @ save returned r0 | 452 | str r0, [sp, #S_R0 + S_OFF]! @ save returned r0 |
466 | mov r2, scno | 453 | mov r1, scno |
467 | mov r1, sp | 454 | mov r0, sp |
468 | mov r0, #1 @ trace exit [IP = 1] | 455 | bl syscall_trace_exit |
469 | bl syscall_trace | ||
470 | b ret_slow_syscall | 456 | b ret_slow_syscall |
471 | 457 | ||
472 | .align 5 | 458 | .align 5 |
diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c index c32f8456aa09..2adda11f712f 100644 --- a/arch/arm/kernel/fiq.c +++ b/arch/arm/kernel/fiq.c | |||
@@ -122,14 +122,16 @@ void release_fiq(struct fiq_handler *f) | |||
122 | while (current_fiq->fiq_op(current_fiq->dev_id, 0)); | 122 | while (current_fiq->fiq_op(current_fiq->dev_id, 0)); |
123 | } | 123 | } |
124 | 124 | ||
125 | static int fiq_start; | ||
126 | |||
125 | void enable_fiq(int fiq) | 127 | void enable_fiq(int fiq) |
126 | { | 128 | { |
127 | enable_irq(fiq + FIQ_START); | 129 | enable_irq(fiq + fiq_start); |
128 | } | 130 | } |
129 | 131 | ||
130 | void disable_fiq(int fiq) | 132 | void disable_fiq(int fiq) |
131 | { | 133 | { |
132 | disable_irq(fiq + FIQ_START); | 134 | disable_irq(fiq + fiq_start); |
133 | } | 135 | } |
134 | 136 | ||
135 | EXPORT_SYMBOL(set_fiq_handler); | 137 | EXPORT_SYMBOL(set_fiq_handler); |
@@ -140,7 +142,8 @@ EXPORT_SYMBOL(release_fiq); | |||
140 | EXPORT_SYMBOL(enable_fiq); | 142 | EXPORT_SYMBOL(enable_fiq); |
141 | EXPORT_SYMBOL(disable_fiq); | 143 | EXPORT_SYMBOL(disable_fiq); |
142 | 144 | ||
143 | void __init init_FIQ(void) | 145 | void __init init_FIQ(int start) |
144 | { | 146 | { |
145 | no_fiq_insn = *(unsigned long *)0xffff001c; | 147 | no_fiq_insn = *(unsigned long *)0xffff001c; |
148 | fiq_start = start; | ||
146 | } | 149 | } |
diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c index df0bf0c8cb79..34e56647dcee 100644 --- a/arch/arm/kernel/ftrace.c +++ b/arch/arm/kernel/ftrace.c | |||
@@ -179,19 +179,20 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, | |||
179 | old = *parent; | 179 | old = *parent; |
180 | *parent = return_hooker; | 180 | *parent = return_hooker; |
181 | 181 | ||
182 | err = ftrace_push_return_trace(old, self_addr, &trace.depth, | ||
183 | frame_pointer); | ||
184 | if (err == -EBUSY) { | ||
185 | *parent = old; | ||
186 | return; | ||
187 | } | ||
188 | |||
189 | trace.func = self_addr; | 182 | trace.func = self_addr; |
183 | trace.depth = current->curr_ret_stack + 1; | ||
190 | 184 | ||
191 | /* Only trace if the calling function expects to */ | 185 | /* Only trace if the calling function expects to */ |
192 | if (!ftrace_graph_entry(&trace)) { | 186 | if (!ftrace_graph_entry(&trace)) { |
193 | current->curr_ret_stack--; | ||
194 | *parent = old; | 187 | *parent = old; |
188 | return; | ||
189 | } | ||
190 | |||
191 | err = ftrace_push_return_trace(old, self_addr, &trace.depth, | ||
192 | frame_pointer); | ||
193 | if (err == -EBUSY) { | ||
194 | *parent = old; | ||
195 | return; | ||
195 | } | 196 | } |
196 | } | 197 | } |
197 | 198 | ||
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 835898e7d704..3db960e20cb8 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S | |||
@@ -55,14 +55,6 @@ | |||
55 | add \rd, \phys, #TEXT_OFFSET - PG_DIR_SIZE | 55 | add \rd, \phys, #TEXT_OFFSET - PG_DIR_SIZE |
56 | .endm | 56 | .endm |
57 | 57 | ||
58 | #ifdef CONFIG_XIP_KERNEL | ||
59 | #define KERNEL_START XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR) | ||
60 | #define KERNEL_END _edata_loc | ||
61 | #else | ||
62 | #define KERNEL_START KERNEL_RAM_VADDR | ||
63 | #define KERNEL_END _end | ||
64 | #endif | ||
65 | |||
66 | /* | 58 | /* |
67 | * Kernel startup entry point. | 59 | * Kernel startup entry point. |
68 | * --------------------------- | 60 | * --------------------------- |
@@ -218,51 +210,46 @@ __create_page_tables: | |||
218 | blo 1b | 210 | blo 1b |
219 | 211 | ||
220 | /* | 212 | /* |
221 | * Now setup the pagetables for our kernel direct | 213 | * Map our RAM from the start to the end of the kernel .bss section. |
222 | * mapped region. | ||
223 | */ | 214 | */ |
224 | mov r3, pc | 215 | add r0, r4, #PAGE_OFFSET >> (SECTION_SHIFT - PMD_ORDER) |
225 | mov r3, r3, lsr #SECTION_SHIFT | 216 | ldr r6, =(_end - 1) |
226 | orr r3, r7, r3, lsl #SECTION_SHIFT | 217 | orr r3, r8, r7 |
227 | add r0, r4, #(KERNEL_START & 0xff000000) >> (SECTION_SHIFT - PMD_ORDER) | ||
228 | str r3, [r0, #((KERNEL_START & 0x00f00000) >> SECTION_SHIFT) << PMD_ORDER]! | ||
229 | ldr r6, =(KERNEL_END - 1) | ||
230 | add r0, r0, #1 << PMD_ORDER | ||
231 | add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ORDER) | 218 | add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ORDER) |
232 | 1: cmp r0, r6 | 219 | 1: str r3, [r0], #1 << PMD_ORDER |
233 | add r3, r3, #1 << SECTION_SHIFT | 220 | add r3, r3, #1 << SECTION_SHIFT |
234 | strls r3, [r0], #1 << PMD_ORDER | 221 | cmp r0, r6 |
235 | bls 1b | 222 | bls 1b |
236 | 223 | ||
237 | #ifdef CONFIG_XIP_KERNEL | 224 | #ifdef CONFIG_XIP_KERNEL |
238 | /* | 225 | /* |
239 | * Map some ram to cover our .data and .bss areas. | 226 | * Map the kernel image separately as it is not located in RAM. |
240 | */ | 227 | */ |
241 | add r3, r8, #TEXT_OFFSET | 228 | #define XIP_START XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR) |
242 | orr r3, r3, r7 | 229 | mov r3, pc |
243 | add r0, r4, #(KERNEL_RAM_VADDR & 0xff000000) >> (SECTION_SHIFT - PMD_ORDER) | 230 | mov r3, r3, lsr #SECTION_SHIFT |
244 | str r3, [r0, #(KERNEL_RAM_VADDR & 0x00f00000) >> (SECTION_SHIFT - PMD_ORDER)]! | 231 | orr r3, r7, r3, lsl #SECTION_SHIFT |
245 | ldr r6, =(_end - 1) | 232 | add r0, r4, #(XIP_START & 0xff000000) >> (SECTION_SHIFT - PMD_ORDER) |
246 | add r0, r0, #4 | 233 | str r3, [r0, #((XIP_START & 0x00f00000) >> SECTION_SHIFT) << PMD_ORDER]! |
234 | ldr r6, =(_edata_loc - 1) | ||
235 | add r0, r0, #1 << PMD_ORDER | ||
247 | add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ORDER) | 236 | add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ORDER) |
248 | 1: cmp r0, r6 | 237 | 1: cmp r0, r6 |
249 | add r3, r3, #1 << 20 | 238 | add r3, r3, #1 << SECTION_SHIFT |
250 | strls r3, [r0], #4 | 239 | strls r3, [r0], #1 << PMD_ORDER |
251 | bls 1b | 240 | bls 1b |
252 | #endif | 241 | #endif |
253 | 242 | ||
254 | /* | 243 | /* |
255 | * Then map boot params address in r2 or the first 1MB (2MB with LPAE) | 244 | * Then map boot params address in r2 if specified. |
256 | * of ram if boot params address is not specified. | ||
257 | */ | 245 | */ |
258 | mov r0, r2, lsr #SECTION_SHIFT | 246 | mov r0, r2, lsr #SECTION_SHIFT |
259 | movs r0, r0, lsl #SECTION_SHIFT | 247 | movs r0, r0, lsl #SECTION_SHIFT |
260 | moveq r0, r8 | 248 | subne r3, r0, r8 |
261 | sub r3, r0, r8 | 249 | addne r3, r3, #PAGE_OFFSET |
262 | add r3, r3, #PAGE_OFFSET | 250 | addne r3, r4, r3, lsr #(SECTION_SHIFT - PMD_ORDER) |
263 | add r3, r4, r3, lsr #(SECTION_SHIFT - PMD_ORDER) | 251 | orrne r6, r7, r0 |
264 | orr r6, r7, r0 | 252 | strne r6, [r3] |
265 | str r6, [r3] | ||
266 | 253 | ||
267 | #ifdef CONFIG_DEBUG_LL | 254 | #ifdef CONFIG_DEBUG_LL |
268 | #if !defined(CONFIG_DEBUG_ICEDCC) && !defined(CONFIG_DEBUG_SEMIHOSTING) | 255 | #if !defined(CONFIG_DEBUG_ICEDCC) && !defined(CONFIG_DEBUG_SEMIHOSTING) |
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index 8349d4e97e2b..16cedb42c0c3 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c | |||
@@ -40,13 +40,6 @@ | |||
40 | #include <asm/mach/irq.h> | 40 | #include <asm/mach/irq.h> |
41 | #include <asm/mach/time.h> | 41 | #include <asm/mach/time.h> |
42 | 42 | ||
43 | /* | ||
44 | * No architecture-specific irq_finish function defined in arm/arch/irqs.h. | ||
45 | */ | ||
46 | #ifndef irq_finish | ||
47 | #define irq_finish(irq) do { } while (0) | ||
48 | #endif | ||
49 | |||
50 | unsigned long irq_err_count; | 43 | unsigned long irq_err_count; |
51 | 44 | ||
52 | int arch_show_interrupts(struct seq_file *p, int prec) | 45 | int arch_show_interrupts(struct seq_file *p, int prec) |
@@ -85,9 +78,6 @@ void handle_IRQ(unsigned int irq, struct pt_regs *regs) | |||
85 | generic_handle_irq(irq); | 78 | generic_handle_irq(irq); |
86 | } | 79 | } |
87 | 80 | ||
88 | /* AT91 specific workaround */ | ||
89 | irq_finish(irq); | ||
90 | |||
91 | irq_exit(); | 81 | irq_exit(); |
92 | set_irq_regs(old_regs); | 82 | set_irq_regs(old_regs); |
93 | } | 83 | } |
diff --git a/arch/arm/kernel/kprobes-test-arm.c b/arch/arm/kernel/kprobes-test-arm.c index ba32b393b3f0..38c1a3b103a0 100644 --- a/arch/arm/kernel/kprobes-test-arm.c +++ b/arch/arm/kernel/kprobes-test-arm.c | |||
@@ -187,8 +187,8 @@ void kprobe_arm_test_cases(void) | |||
187 | TEST_BF_R ("mov pc, r",0,2f,"") | 187 | TEST_BF_R ("mov pc, r",0,2f,"") |
188 | TEST_BF_RR("mov pc, r",0,2f,", asl r",1,0,"") | 188 | TEST_BF_RR("mov pc, r",0,2f,", asl r",1,0,"") |
189 | TEST_BB( "sub pc, pc, #1b-2b+8") | 189 | TEST_BB( "sub pc, pc, #1b-2b+8") |
190 | #if __LINUX_ARM_ARCH__ >= 6 | 190 | #if __LINUX_ARM_ARCH__ == 6 && !defined(CONFIG_CPU_V7) |
191 | TEST_BB( "sub pc, pc, #1b-2b+8-2") /* UNPREDICTABLE before ARMv6 */ | 191 | TEST_BB( "sub pc, pc, #1b-2b+8-2") /* UNPREDICTABLE before and after ARMv6 */ |
192 | #endif | 192 | #endif |
193 | TEST_BB_R( "sub pc, pc, r",14, 1f-2f+8,"") | 193 | TEST_BB_R( "sub pc, pc, r",14, 1f-2f+8,"") |
194 | TEST_BB_R( "rsb pc, r",14,1f-2f+8,", pc") | 194 | TEST_BB_R( "rsb pc, r",14,1f-2f+8,", pc") |
diff --git a/arch/arm/kernel/kprobes-thumb.c b/arch/arm/kernel/kprobes-thumb.c index 8f96ec778e8d..6123daf397a7 100644 --- a/arch/arm/kernel/kprobes-thumb.c +++ b/arch/arm/kernel/kprobes-thumb.c | |||
@@ -660,7 +660,7 @@ static const union decode_item t32_table_1111_100x[] = { | |||
660 | /* LDRSB (literal) 1111 1001 x001 1111 xxxx xxxx xxxx xxxx */ | 660 | /* LDRSB (literal) 1111 1001 x001 1111 xxxx xxxx xxxx xxxx */ |
661 | /* LDRH (literal) 1111 1000 x011 1111 xxxx xxxx xxxx xxxx */ | 661 | /* LDRH (literal) 1111 1000 x011 1111 xxxx xxxx xxxx xxxx */ |
662 | /* LDRSH (literal) 1111 1001 x011 1111 xxxx xxxx xxxx xxxx */ | 662 | /* LDRSH (literal) 1111 1001 x011 1111 xxxx xxxx xxxx xxxx */ |
663 | DECODE_EMULATEX (0xfe5f0000, 0xf81f0000, t32_simulate_ldr_literal, | 663 | DECODE_SIMULATEX(0xfe5f0000, 0xf81f0000, t32_simulate_ldr_literal, |
664 | REGS(PC, NOSPPCX, 0, 0, 0)), | 664 | REGS(PC, NOSPPCX, 0, 0, 0)), |
665 | 665 | ||
666 | /* STRB (immediate) 1111 1000 0000 xxxx xxxx 1xxx xxxx xxxx */ | 666 | /* STRB (immediate) 1111 1000 0000 xxxx xxxx 1xxx xxxx xxxx */ |
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index 186c8cb982c5..ab243b87118d 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c | |||
@@ -47,17 +47,14 @@ static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events); | |||
47 | /* Set at runtime when we know what CPU type we are. */ | 47 | /* Set at runtime when we know what CPU type we are. */ |
48 | static struct arm_pmu *cpu_pmu; | 48 | static struct arm_pmu *cpu_pmu; |
49 | 49 | ||
50 | enum arm_perf_pmu_ids | 50 | const char *perf_pmu_name(void) |
51 | armpmu_get_pmu_id(void) | ||
52 | { | 51 | { |
53 | int id = -ENODEV; | 52 | if (!cpu_pmu) |
54 | 53 | return NULL; | |
55 | if (cpu_pmu != NULL) | ||
56 | id = cpu_pmu->id; | ||
57 | 54 | ||
58 | return id; | 55 | return cpu_pmu->pmu.name; |
59 | } | 56 | } |
60 | EXPORT_SYMBOL_GPL(armpmu_get_pmu_id); | 57 | EXPORT_SYMBOL_GPL(perf_pmu_name); |
61 | 58 | ||
62 | int perf_num_counters(void) | 59 | int perf_num_counters(void) |
63 | { | 60 | { |
@@ -503,7 +500,7 @@ __hw_perf_event_init(struct perf_event *event) | |||
503 | event_requires_mode_exclusion(&event->attr)) { | 500 | event_requires_mode_exclusion(&event->attr)) { |
504 | pr_debug("ARM performance counters do not support " | 501 | pr_debug("ARM performance counters do not support " |
505 | "mode exclusion\n"); | 502 | "mode exclusion\n"); |
506 | return -EPERM; | 503 | return -EOPNOTSUPP; |
507 | } | 504 | } |
508 | 505 | ||
509 | /* | 506 | /* |
@@ -760,7 +757,7 @@ init_hw_perf_events(void) | |||
760 | cpu_pmu->name, cpu_pmu->num_events); | 757 | cpu_pmu->name, cpu_pmu->num_events); |
761 | cpu_pmu_init(cpu_pmu); | 758 | cpu_pmu_init(cpu_pmu); |
762 | register_cpu_notifier(&pmu_cpu_notifier); | 759 | register_cpu_notifier(&pmu_cpu_notifier); |
763 | armpmu_register(cpu_pmu, "cpu", PERF_TYPE_RAW); | 760 | armpmu_register(cpu_pmu, cpu_pmu->name, PERF_TYPE_RAW); |
764 | } else { | 761 | } else { |
765 | pr_info("no hardware support available\n"); | 762 | pr_info("no hardware support available\n"); |
766 | } | 763 | } |
diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c index ab627a740fa3..c90fcb2b6967 100644 --- a/arch/arm/kernel/perf_event_v6.c +++ b/arch/arm/kernel/perf_event_v6.c | |||
@@ -650,7 +650,6 @@ static int armv6_map_event(struct perf_event *event) | |||
650 | } | 650 | } |
651 | 651 | ||
652 | static struct arm_pmu armv6pmu = { | 652 | static struct arm_pmu armv6pmu = { |
653 | .id = ARM_PERF_PMU_ID_V6, | ||
654 | .name = "v6", | 653 | .name = "v6", |
655 | .handle_irq = armv6pmu_handle_irq, | 654 | .handle_irq = armv6pmu_handle_irq, |
656 | .enable = armv6pmu_enable_event, | 655 | .enable = armv6pmu_enable_event, |
@@ -685,7 +684,6 @@ static int armv6mpcore_map_event(struct perf_event *event) | |||
685 | } | 684 | } |
686 | 685 | ||
687 | static struct arm_pmu armv6mpcore_pmu = { | 686 | static struct arm_pmu armv6mpcore_pmu = { |
688 | .id = ARM_PERF_PMU_ID_V6MP, | ||
689 | .name = "v6mpcore", | 687 | .name = "v6mpcore", |
690 | .handle_irq = armv6pmu_handle_irq, | 688 | .handle_irq = armv6pmu_handle_irq, |
691 | .enable = armv6pmu_enable_event, | 689 | .enable = armv6pmu_enable_event, |
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index d3c536068162..f04070bd2183 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c | |||
@@ -1258,7 +1258,6 @@ static u32 __init armv7_read_num_pmnc_events(void) | |||
1258 | 1258 | ||
1259 | static struct arm_pmu *__init armv7_a8_pmu_init(void) | 1259 | static struct arm_pmu *__init armv7_a8_pmu_init(void) |
1260 | { | 1260 | { |
1261 | armv7pmu.id = ARM_PERF_PMU_ID_CA8; | ||
1262 | armv7pmu.name = "ARMv7 Cortex-A8"; | 1261 | armv7pmu.name = "ARMv7 Cortex-A8"; |
1263 | armv7pmu.map_event = armv7_a8_map_event; | 1262 | armv7pmu.map_event = armv7_a8_map_event; |
1264 | armv7pmu.num_events = armv7_read_num_pmnc_events(); | 1263 | armv7pmu.num_events = armv7_read_num_pmnc_events(); |
@@ -1267,7 +1266,6 @@ static struct arm_pmu *__init armv7_a8_pmu_init(void) | |||
1267 | 1266 | ||
1268 | static struct arm_pmu *__init armv7_a9_pmu_init(void) | 1267 | static struct arm_pmu *__init armv7_a9_pmu_init(void) |
1269 | { | 1268 | { |
1270 | armv7pmu.id = ARM_PERF_PMU_ID_CA9; | ||
1271 | armv7pmu.name = "ARMv7 Cortex-A9"; | 1269 | armv7pmu.name = "ARMv7 Cortex-A9"; |
1272 | armv7pmu.map_event = armv7_a9_map_event; | 1270 | armv7pmu.map_event = armv7_a9_map_event; |
1273 | armv7pmu.num_events = armv7_read_num_pmnc_events(); | 1271 | armv7pmu.num_events = armv7_read_num_pmnc_events(); |
@@ -1276,7 +1274,6 @@ static struct arm_pmu *__init armv7_a9_pmu_init(void) | |||
1276 | 1274 | ||
1277 | static struct arm_pmu *__init armv7_a5_pmu_init(void) | 1275 | static struct arm_pmu *__init armv7_a5_pmu_init(void) |
1278 | { | 1276 | { |
1279 | armv7pmu.id = ARM_PERF_PMU_ID_CA5; | ||
1280 | armv7pmu.name = "ARMv7 Cortex-A5"; | 1277 | armv7pmu.name = "ARMv7 Cortex-A5"; |
1281 | armv7pmu.map_event = armv7_a5_map_event; | 1278 | armv7pmu.map_event = armv7_a5_map_event; |
1282 | armv7pmu.num_events = armv7_read_num_pmnc_events(); | 1279 | armv7pmu.num_events = armv7_read_num_pmnc_events(); |
@@ -1285,7 +1282,6 @@ static struct arm_pmu *__init armv7_a5_pmu_init(void) | |||
1285 | 1282 | ||
1286 | static struct arm_pmu *__init armv7_a15_pmu_init(void) | 1283 | static struct arm_pmu *__init armv7_a15_pmu_init(void) |
1287 | { | 1284 | { |
1288 | armv7pmu.id = ARM_PERF_PMU_ID_CA15; | ||
1289 | armv7pmu.name = "ARMv7 Cortex-A15"; | 1285 | armv7pmu.name = "ARMv7 Cortex-A15"; |
1290 | armv7pmu.map_event = armv7_a15_map_event; | 1286 | armv7pmu.map_event = armv7_a15_map_event; |
1291 | armv7pmu.num_events = armv7_read_num_pmnc_events(); | 1287 | armv7pmu.num_events = armv7_read_num_pmnc_events(); |
@@ -1295,7 +1291,6 @@ static struct arm_pmu *__init armv7_a15_pmu_init(void) | |||
1295 | 1291 | ||
1296 | static struct arm_pmu *__init armv7_a7_pmu_init(void) | 1292 | static struct arm_pmu *__init armv7_a7_pmu_init(void) |
1297 | { | 1293 | { |
1298 | armv7pmu.id = ARM_PERF_PMU_ID_CA7; | ||
1299 | armv7pmu.name = "ARMv7 Cortex-A7"; | 1294 | armv7pmu.name = "ARMv7 Cortex-A7"; |
1300 | armv7pmu.map_event = armv7_a7_map_event; | 1295 | armv7pmu.map_event = armv7_a7_map_event; |
1301 | armv7pmu.num_events = armv7_read_num_pmnc_events(); | 1296 | armv7pmu.num_events = armv7_read_num_pmnc_events(); |
diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c index e34e7254e652..f759fe0bab63 100644 --- a/arch/arm/kernel/perf_event_xscale.c +++ b/arch/arm/kernel/perf_event_xscale.c | |||
@@ -435,7 +435,6 @@ static int xscale_map_event(struct perf_event *event) | |||
435 | } | 435 | } |
436 | 436 | ||
437 | static struct arm_pmu xscale1pmu = { | 437 | static struct arm_pmu xscale1pmu = { |
438 | .id = ARM_PERF_PMU_ID_XSCALE1, | ||
439 | .name = "xscale1", | 438 | .name = "xscale1", |
440 | .handle_irq = xscale1pmu_handle_irq, | 439 | .handle_irq = xscale1pmu_handle_irq, |
441 | .enable = xscale1pmu_enable_event, | 440 | .enable = xscale1pmu_enable_event, |
@@ -803,7 +802,6 @@ xscale2pmu_write_counter(int counter, u32 val) | |||
803 | } | 802 | } |
804 | 803 | ||
805 | static struct arm_pmu xscale2pmu = { | 804 | static struct arm_pmu xscale2pmu = { |
806 | .id = ARM_PERF_PMU_ID_XSCALE2, | ||
807 | .name = "xscale2", | 805 | .name = "xscale2", |
808 | .handle_irq = xscale2pmu_handle_irq, | 806 | .handle_irq = xscale2pmu_handle_irq, |
809 | .enable = xscale2pmu_enable_event, | 807 | .enable = xscale2pmu_enable_event, |
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 19c95ea65b2f..693b744fd572 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c | |||
@@ -247,6 +247,7 @@ void machine_shutdown(void) | |||
247 | void machine_halt(void) | 247 | void machine_halt(void) |
248 | { | 248 | { |
249 | machine_shutdown(); | 249 | machine_shutdown(); |
250 | local_irq_disable(); | ||
250 | while (1); | 251 | while (1); |
251 | } | 252 | } |
252 | 253 | ||
@@ -268,6 +269,7 @@ void machine_restart(char *cmd) | |||
268 | 269 | ||
269 | /* Whoops - the platform was unable to reboot. Tell the user! */ | 270 | /* Whoops - the platform was unable to reboot. Tell the user! */ |
270 | printk("Reboot failed -- System halted\n"); | 271 | printk("Reboot failed -- System halted\n"); |
272 | local_irq_disable(); | ||
271 | while (1); | 273 | while (1); |
272 | } | 274 | } |
273 | 275 | ||
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c index 5700a7ae7f0b..3e0fc5f7ed4b 100644 --- a/arch/arm/kernel/ptrace.c +++ b/arch/arm/kernel/ptrace.c | |||
@@ -908,18 +908,16 @@ long arch_ptrace(struct task_struct *child, long request, | |||
908 | return ret; | 908 | return ret; |
909 | } | 909 | } |
910 | 910 | ||
911 | asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno) | 911 | enum ptrace_syscall_dir { |
912 | PTRACE_SYSCALL_ENTER = 0, | ||
913 | PTRACE_SYSCALL_EXIT, | ||
914 | }; | ||
915 | |||
916 | static int ptrace_syscall_trace(struct pt_regs *regs, int scno, | ||
917 | enum ptrace_syscall_dir dir) | ||
912 | { | 918 | { |
913 | unsigned long ip; | 919 | unsigned long ip; |
914 | 920 | ||
915 | if (why) | ||
916 | audit_syscall_exit(regs); | ||
917 | else | ||
918 | audit_syscall_entry(AUDIT_ARCH_ARM, scno, regs->ARM_r0, | ||
919 | regs->ARM_r1, regs->ARM_r2, regs->ARM_r3); | ||
920 | |||
921 | if (why == 0 && test_and_clear_thread_flag(TIF_SYSCALL_RESTARTSYS)) | ||
922 | scno = __NR_restart_syscall - __NR_SYSCALL_BASE; | ||
923 | if (!test_thread_flag(TIF_SYSCALL_TRACE)) | 921 | if (!test_thread_flag(TIF_SYSCALL_TRACE)) |
924 | return scno; | 922 | return scno; |
925 | 923 | ||
@@ -930,14 +928,28 @@ asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno) | |||
930 | * IP = 0 -> entry, =1 -> exit | 928 | * IP = 0 -> entry, =1 -> exit |
931 | */ | 929 | */ |
932 | ip = regs->ARM_ip; | 930 | ip = regs->ARM_ip; |
933 | regs->ARM_ip = why; | 931 | regs->ARM_ip = dir; |
934 | 932 | ||
935 | if (why) | 933 | if (dir == PTRACE_SYSCALL_EXIT) |
936 | tracehook_report_syscall_exit(regs, 0); | 934 | tracehook_report_syscall_exit(regs, 0); |
937 | else if (tracehook_report_syscall_entry(regs)) | 935 | else if (tracehook_report_syscall_entry(regs)) |
938 | current_thread_info()->syscall = -1; | 936 | current_thread_info()->syscall = -1; |
939 | 937 | ||
940 | regs->ARM_ip = ip; | 938 | regs->ARM_ip = ip; |
941 | |||
942 | return current_thread_info()->syscall; | 939 | return current_thread_info()->syscall; |
943 | } | 940 | } |
941 | |||
942 | asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno) | ||
943 | { | ||
944 | int ret = ptrace_syscall_trace(regs, scno, PTRACE_SYSCALL_ENTER); | ||
945 | audit_syscall_entry(AUDIT_ARCH_ARM, scno, regs->ARM_r0, regs->ARM_r1, | ||
946 | regs->ARM_r2, regs->ARM_r3); | ||
947 | return ret; | ||
948 | } | ||
949 | |||
950 | asmlinkage int syscall_trace_exit(struct pt_regs *regs, int scno) | ||
951 | { | ||
952 | int ret = ptrace_syscall_trace(regs, scno, PTRACE_SYSCALL_EXIT); | ||
953 | audit_syscall_exit(regs); | ||
954 | return ret; | ||
955 | } | ||
diff --git a/arch/arm/kernel/sched_clock.c b/arch/arm/kernel/sched_clock.c index 27d186abbc06..f4515393248d 100644 --- a/arch/arm/kernel/sched_clock.c +++ b/arch/arm/kernel/sched_clock.c | |||
@@ -21,6 +21,8 @@ struct clock_data { | |||
21 | u32 epoch_cyc_copy; | 21 | u32 epoch_cyc_copy; |
22 | u32 mult; | 22 | u32 mult; |
23 | u32 shift; | 23 | u32 shift; |
24 | bool suspended; | ||
25 | bool needs_suspend; | ||
24 | }; | 26 | }; |
25 | 27 | ||
26 | static void sched_clock_poll(unsigned long wrap_ticks); | 28 | static void sched_clock_poll(unsigned long wrap_ticks); |
@@ -49,6 +51,9 @@ static unsigned long long cyc_to_sched_clock(u32 cyc, u32 mask) | |||
49 | u64 epoch_ns; | 51 | u64 epoch_ns; |
50 | u32 epoch_cyc; | 52 | u32 epoch_cyc; |
51 | 53 | ||
54 | if (cd.suspended) | ||
55 | return cd.epoch_ns; | ||
56 | |||
52 | /* | 57 | /* |
53 | * Load the epoch_cyc and epoch_ns atomically. We do this by | 58 | * Load the epoch_cyc and epoch_ns atomically. We do this by |
54 | * ensuring that we always write epoch_cyc, epoch_ns and | 59 | * ensuring that we always write epoch_cyc, epoch_ns and |
@@ -98,6 +103,13 @@ static void sched_clock_poll(unsigned long wrap_ticks) | |||
98 | update_sched_clock(); | 103 | update_sched_clock(); |
99 | } | 104 | } |
100 | 105 | ||
106 | void __init setup_sched_clock_needs_suspend(u32 (*read)(void), int bits, | ||
107 | unsigned long rate) | ||
108 | { | ||
109 | setup_sched_clock(read, bits, rate); | ||
110 | cd.needs_suspend = true; | ||
111 | } | ||
112 | |||
101 | void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate) | 113 | void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate) |
102 | { | 114 | { |
103 | unsigned long r, w; | 115 | unsigned long r, w; |
@@ -169,11 +181,23 @@ void __init sched_clock_postinit(void) | |||
169 | static int sched_clock_suspend(void) | 181 | static int sched_clock_suspend(void) |
170 | { | 182 | { |
171 | sched_clock_poll(sched_clock_timer.data); | 183 | sched_clock_poll(sched_clock_timer.data); |
184 | if (cd.needs_suspend) | ||
185 | cd.suspended = true; | ||
172 | return 0; | 186 | return 0; |
173 | } | 187 | } |
174 | 188 | ||
189 | static void sched_clock_resume(void) | ||
190 | { | ||
191 | if (cd.needs_suspend) { | ||
192 | cd.epoch_cyc = read_sched_clock(); | ||
193 | cd.epoch_cyc_copy = cd.epoch_cyc; | ||
194 | cd.suspended = false; | ||
195 | } | ||
196 | } | ||
197 | |||
175 | static struct syscore_ops sched_clock_ops = { | 198 | static struct syscore_ops sched_clock_ops = { |
176 | .suspend = sched_clock_suspend, | 199 | .suspend = sched_clock_suspend, |
200 | .resume = sched_clock_resume, | ||
177 | }; | 201 | }; |
178 | 202 | ||
179 | static int __init sched_clock_syscore_init(void) | 203 | static int __init sched_clock_syscore_init(void) |
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index e15d83bb4ea3..a81dcecc7343 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c | |||
@@ -508,7 +508,7 @@ void __init dump_machine_table(void) | |||
508 | /* can't use cpu_relax() here as it may require MMU setup */; | 508 | /* can't use cpu_relax() here as it may require MMU setup */; |
509 | } | 509 | } |
510 | 510 | ||
511 | int __init arm_add_memory(phys_addr_t start, unsigned long size) | 511 | int __init arm_add_memory(phys_addr_t start, phys_addr_t size) |
512 | { | 512 | { |
513 | struct membank *bank = &meminfo.bank[meminfo.nr_banks]; | 513 | struct membank *bank = &meminfo.bank[meminfo.nr_banks]; |
514 | 514 | ||
@@ -538,7 +538,7 @@ int __init arm_add_memory(phys_addr_t start, unsigned long size) | |||
538 | } | 538 | } |
539 | #endif | 539 | #endif |
540 | 540 | ||
541 | bank->size = size & PAGE_MASK; | 541 | bank->size = size & ~(phys_addr_t)(PAGE_SIZE - 1); |
542 | 542 | ||
543 | /* | 543 | /* |
544 | * Check whether this memory region has non-zero size or | 544 | * Check whether this memory region has non-zero size or |
@@ -558,7 +558,7 @@ int __init arm_add_memory(phys_addr_t start, unsigned long size) | |||
558 | static int __init early_mem(char *p) | 558 | static int __init early_mem(char *p) |
559 | { | 559 | { |
560 | static int usermem __initdata = 0; | 560 | static int usermem __initdata = 0; |
561 | unsigned long size; | 561 | phys_addr_t size; |
562 | phys_addr_t start; | 562 | phys_addr_t start; |
563 | char *endp; | 563 | char *endp; |
564 | 564 | ||
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index fd2392a17ac1..f27789e4e38a 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c | |||
@@ -569,12 +569,13 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, | |||
569 | * the kernel can handle, and then we build all the user-level signal handling | 569 | * the kernel can handle, and then we build all the user-level signal handling |
570 | * stack-frames in one go after that. | 570 | * stack-frames in one go after that. |
571 | */ | 571 | */ |
572 | static void do_signal(struct pt_regs *regs, int syscall) | 572 | static int do_signal(struct pt_regs *regs, int syscall) |
573 | { | 573 | { |
574 | unsigned int retval = 0, continue_addr = 0, restart_addr = 0; | 574 | unsigned int retval = 0, continue_addr = 0, restart_addr = 0; |
575 | struct k_sigaction ka; | 575 | struct k_sigaction ka; |
576 | siginfo_t info; | 576 | siginfo_t info; |
577 | int signr; | 577 | int signr; |
578 | int restart = 0; | ||
578 | 579 | ||
579 | /* | 580 | /* |
580 | * If we were from a system call, check for system call restarting... | 581 | * If we were from a system call, check for system call restarting... |
@@ -589,10 +590,12 @@ static void do_signal(struct pt_regs *regs, int syscall) | |||
589 | * debugger will see the already changed PSW. | 590 | * debugger will see the already changed PSW. |
590 | */ | 591 | */ |
591 | switch (retval) { | 592 | switch (retval) { |
593 | case -ERESTART_RESTARTBLOCK: | ||
594 | restart -= 2; | ||
592 | case -ERESTARTNOHAND: | 595 | case -ERESTARTNOHAND: |
593 | case -ERESTARTSYS: | 596 | case -ERESTARTSYS: |
594 | case -ERESTARTNOINTR: | 597 | case -ERESTARTNOINTR: |
595 | case -ERESTART_RESTARTBLOCK: | 598 | restart++; |
596 | regs->ARM_r0 = regs->ARM_ORIG_r0; | 599 | regs->ARM_r0 = regs->ARM_ORIG_r0; |
597 | regs->ARM_pc = restart_addr; | 600 | regs->ARM_pc = restart_addr; |
598 | break; | 601 | break; |
@@ -604,13 +607,15 @@ static void do_signal(struct pt_regs *regs, int syscall) | |||
604 | * point the debugger may change all our registers ... | 607 | * point the debugger may change all our registers ... |
605 | */ | 608 | */ |
606 | signr = get_signal_to_deliver(&info, &ka, regs, NULL); | 609 | signr = get_signal_to_deliver(&info, &ka, regs, NULL); |
610 | /* | ||
611 | * Depending on the signal settings we may need to revert the | ||
612 | * decision to restart the system call. But skip this if a | ||
613 | * debugger has chosen to restart at a different PC. | ||
614 | */ | ||
615 | if (regs->ARM_pc != restart_addr) | ||
616 | restart = 0; | ||
607 | if (signr > 0) { | 617 | if (signr > 0) { |
608 | /* | 618 | if (unlikely(restart)) { |
609 | * Depending on the signal settings we may need to revert the | ||
610 | * decision to restart the system call. But skip this if a | ||
611 | * debugger has chosen to restart at a different PC. | ||
612 | */ | ||
613 | if (regs->ARM_pc == restart_addr) { | ||
614 | if (retval == -ERESTARTNOHAND || | 619 | if (retval == -ERESTARTNOHAND || |
615 | retval == -ERESTART_RESTARTBLOCK | 620 | retval == -ERESTART_RESTARTBLOCK |
616 | || (retval == -ERESTARTSYS | 621 | || (retval == -ERESTARTSYS |
@@ -618,35 +623,46 @@ static void do_signal(struct pt_regs *regs, int syscall) | |||
618 | regs->ARM_r0 = -EINTR; | 623 | regs->ARM_r0 = -EINTR; |
619 | regs->ARM_pc = continue_addr; | 624 | regs->ARM_pc = continue_addr; |
620 | } | 625 | } |
621 | clear_thread_flag(TIF_SYSCALL_RESTARTSYS); | ||
622 | } | 626 | } |
623 | 627 | ||
624 | handle_signal(signr, &ka, &info, regs); | 628 | handle_signal(signr, &ka, &info, regs); |
625 | return; | 629 | return 0; |
626 | } | ||
627 | |||
628 | if (syscall) { | ||
629 | /* | ||
630 | * Handle restarting a different system call. As above, | ||
631 | * if a debugger has chosen to restart at a different PC, | ||
632 | * ignore the restart. | ||
633 | */ | ||
634 | if (retval == -ERESTART_RESTARTBLOCK | ||
635 | && regs->ARM_pc == restart_addr) | ||
636 | set_thread_flag(TIF_SYSCALL_RESTARTSYS); | ||
637 | } | 630 | } |
638 | 631 | ||
639 | restore_saved_sigmask(); | 632 | restore_saved_sigmask(); |
633 | if (unlikely(restart)) | ||
634 | regs->ARM_pc = continue_addr; | ||
635 | return restart; | ||
640 | } | 636 | } |
641 | 637 | ||
642 | asmlinkage void | 638 | asmlinkage int |
643 | do_notify_resume(struct pt_regs *regs, unsigned int thread_flags, int syscall) | 639 | do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall) |
644 | { | 640 | { |
645 | if (thread_flags & _TIF_SIGPENDING) | 641 | do { |
646 | do_signal(regs, syscall); | 642 | if (likely(thread_flags & _TIF_NEED_RESCHED)) { |
647 | 643 | schedule(); | |
648 | if (thread_flags & _TIF_NOTIFY_RESUME) { | 644 | } else { |
649 | clear_thread_flag(TIF_NOTIFY_RESUME); | 645 | if (unlikely(!user_mode(regs))) |
650 | tracehook_notify_resume(regs); | 646 | return 0; |
651 | } | 647 | local_irq_enable(); |
648 | if (thread_flags & _TIF_SIGPENDING) { | ||
649 | int restart = do_signal(regs, syscall); | ||
650 | if (unlikely(restart)) { | ||
651 | /* | ||
652 | * Restart without handlers. | ||
653 | * Deal with it without leaving | ||
654 | * the kernel space. | ||
655 | */ | ||
656 | return restart; | ||
657 | } | ||
658 | syscall = 0; | ||
659 | } else { | ||
660 | clear_thread_flag(TIF_NOTIFY_RESUME); | ||
661 | tracehook_notify_resume(regs); | ||
662 | } | ||
663 | } | ||
664 | local_irq_disable(); | ||
665 | thread_flags = current_thread_info()->flags; | ||
666 | } while (thread_flags & _TIF_WORK_MASK); | ||
667 | return 0; | ||
652 | } | 668 | } |
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 2c7217d971db..ebd8ad274d76 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c | |||
@@ -179,7 +179,7 @@ void __ref cpu_die(void) | |||
179 | mb(); | 179 | mb(); |
180 | 180 | ||
181 | /* Tell __cpu_die() that this CPU is now safe to dispose of */ | 181 | /* Tell __cpu_die() that this CPU is now safe to dispose of */ |
182 | complete(&cpu_died); | 182 | RCU_NONIDLE(complete(&cpu_died)); |
183 | 183 | ||
184 | /* | 184 | /* |
185 | * actual CPU shutdown procedure is at least platform (if not | 185 | * actual CPU shutdown procedure is at least platform (if not |
@@ -563,7 +563,8 @@ void smp_send_stop(void) | |||
563 | 563 | ||
564 | cpumask_copy(&mask, cpu_online_mask); | 564 | cpumask_copy(&mask, cpu_online_mask); |
565 | cpumask_clear_cpu(smp_processor_id(), &mask); | 565 | cpumask_clear_cpu(smp_processor_id(), &mask); |
566 | smp_cross_call(&mask, IPI_CPU_STOP); | 566 | if (!cpumask_empty(&mask)) |
567 | smp_cross_call(&mask, IPI_CPU_STOP); | ||
567 | 568 | ||
568 | /* Wait up to one second for other CPUs to stop */ | 569 | /* Wait up to one second for other CPUs to stop */ |
569 | timeout = USEC_PER_SEC; | 570 | timeout = USEC_PER_SEC; |
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c index 8200deaa14f6..26c12c6440fc 100644 --- a/arch/arm/kernel/topology.c +++ b/arch/arm/kernel/topology.c | |||
@@ -17,11 +17,190 @@ | |||
17 | #include <linux/percpu.h> | 17 | #include <linux/percpu.h> |
18 | #include <linux/node.h> | 18 | #include <linux/node.h> |
19 | #include <linux/nodemask.h> | 19 | #include <linux/nodemask.h> |
20 | #include <linux/of.h> | ||
20 | #include <linux/sched.h> | 21 | #include <linux/sched.h> |
22 | #include <linux/slab.h> | ||
21 | 23 | ||
22 | #include <asm/cputype.h> | 24 | #include <asm/cputype.h> |
23 | #include <asm/topology.h> | 25 | #include <asm/topology.h> |
24 | 26 | ||
27 | /* | ||
28 | * cpu power scale management | ||
29 | */ | ||
30 | |||
31 | /* | ||
32 | * cpu power table | ||
33 | * This per cpu data structure describes the relative capacity of each core. | ||
34 | * On a heteregenous system, cores don't have the same computation capacity | ||
35 | * and we reflect that difference in the cpu_power field so the scheduler can | ||
36 | * take this difference into account during load balance. A per cpu structure | ||
37 | * is preferred because each CPU updates its own cpu_power field during the | ||
38 | * load balance except for idle cores. One idle core is selected to run the | ||
39 | * rebalance_domains for all idle cores and the cpu_power can be updated | ||
40 | * during this sequence. | ||
41 | */ | ||
42 | static DEFINE_PER_CPU(unsigned long, cpu_scale); | ||
43 | |||
44 | unsigned long arch_scale_freq_power(struct sched_domain *sd, int cpu) | ||
45 | { | ||
46 | return per_cpu(cpu_scale, cpu); | ||
47 | } | ||
48 | |||
49 | static void set_power_scale(unsigned int cpu, unsigned long power) | ||
50 | { | ||
51 | per_cpu(cpu_scale, cpu) = power; | ||
52 | } | ||
53 | |||
54 | #ifdef CONFIG_OF | ||
55 | struct cpu_efficiency { | ||
56 | const char *compatible; | ||
57 | unsigned long efficiency; | ||
58 | }; | ||
59 | |||
60 | /* | ||
61 | * Table of relative efficiency of each processors | ||
62 | * The efficiency value must fit in 20bit and the final | ||
63 | * cpu_scale value must be in the range | ||
64 | * 0 < cpu_scale < 3*SCHED_POWER_SCALE/2 | ||
65 | * in order to return at most 1 when DIV_ROUND_CLOSEST | ||
66 | * is used to compute the capacity of a CPU. | ||
67 | * Processors that are not defined in the table, | ||
68 | * use the default SCHED_POWER_SCALE value for cpu_scale. | ||
69 | */ | ||
70 | struct cpu_efficiency table_efficiency[] = { | ||
71 | {"arm,cortex-a15", 3891}, | ||
72 | {"arm,cortex-a7", 2048}, | ||
73 | {NULL, }, | ||
74 | }; | ||
75 | |||
76 | struct cpu_capacity { | ||
77 | unsigned long hwid; | ||
78 | unsigned long capacity; | ||
79 | }; | ||
80 | |||
81 | struct cpu_capacity *cpu_capacity; | ||
82 | |||
83 | unsigned long middle_capacity = 1; | ||
84 | |||
85 | /* | ||
86 | * Iterate all CPUs' descriptor in DT and compute the efficiency | ||
87 | * (as per table_efficiency). Also calculate a middle efficiency | ||
88 | * as close as possible to (max{eff_i} - min{eff_i}) / 2 | ||
89 | * This is later used to scale the cpu_power field such that an | ||
90 | * 'average' CPU is of middle power. Also see the comments near | ||
91 | * table_efficiency[] and update_cpu_power(). | ||
92 | */ | ||
93 | static void __init parse_dt_topology(void) | ||
94 | { | ||
95 | struct cpu_efficiency *cpu_eff; | ||
96 | struct device_node *cn = NULL; | ||
97 | unsigned long min_capacity = (unsigned long)(-1); | ||
98 | unsigned long max_capacity = 0; | ||
99 | unsigned long capacity = 0; | ||
100 | int alloc_size, cpu = 0; | ||
101 | |||
102 | alloc_size = nr_cpu_ids * sizeof(struct cpu_capacity); | ||
103 | cpu_capacity = (struct cpu_capacity *)kzalloc(alloc_size, GFP_NOWAIT); | ||
104 | |||
105 | while ((cn = of_find_node_by_type(cn, "cpu"))) { | ||
106 | const u32 *rate, *reg; | ||
107 | int len; | ||
108 | |||
109 | if (cpu >= num_possible_cpus()) | ||
110 | break; | ||
111 | |||
112 | for (cpu_eff = table_efficiency; cpu_eff->compatible; cpu_eff++) | ||
113 | if (of_device_is_compatible(cn, cpu_eff->compatible)) | ||
114 | break; | ||
115 | |||
116 | if (cpu_eff->compatible == NULL) | ||
117 | continue; | ||
118 | |||
119 | rate = of_get_property(cn, "clock-frequency", &len); | ||
120 | if (!rate || len != 4) { | ||
121 | pr_err("%s missing clock-frequency property\n", | ||
122 | cn->full_name); | ||
123 | continue; | ||
124 | } | ||
125 | |||
126 | reg = of_get_property(cn, "reg", &len); | ||
127 | if (!reg || len != 4) { | ||
128 | pr_err("%s missing reg property\n", cn->full_name); | ||
129 | continue; | ||
130 | } | ||
131 | |||
132 | capacity = ((be32_to_cpup(rate)) >> 20) * cpu_eff->efficiency; | ||
133 | |||
134 | /* Save min capacity of the system */ | ||
135 | if (capacity < min_capacity) | ||
136 | min_capacity = capacity; | ||
137 | |||
138 | /* Save max capacity of the system */ | ||
139 | if (capacity > max_capacity) | ||
140 | max_capacity = capacity; | ||
141 | |||
142 | cpu_capacity[cpu].capacity = capacity; | ||
143 | cpu_capacity[cpu++].hwid = be32_to_cpup(reg); | ||
144 | } | ||
145 | |||
146 | if (cpu < num_possible_cpus()) | ||
147 | cpu_capacity[cpu].hwid = (unsigned long)(-1); | ||
148 | |||
149 | /* If min and max capacities are equals, we bypass the update of the | ||
150 | * cpu_scale because all CPUs have the same capacity. Otherwise, we | ||
151 | * compute a middle_capacity factor that will ensure that the capacity | ||
152 | * of an 'average' CPU of the system will be as close as possible to | ||
153 | * SCHED_POWER_SCALE, which is the default value, but with the | ||
154 | * constraint explained near table_efficiency[]. | ||
155 | */ | ||
156 | if (min_capacity == max_capacity) | ||
157 | cpu_capacity[0].hwid = (unsigned long)(-1); | ||
158 | else if (4*max_capacity < (3*(max_capacity + min_capacity))) | ||
159 | middle_capacity = (min_capacity + max_capacity) | ||
160 | >> (SCHED_POWER_SHIFT+1); | ||
161 | else | ||
162 | middle_capacity = ((max_capacity / 3) | ||
163 | >> (SCHED_POWER_SHIFT-1)) + 1; | ||
164 | |||
165 | } | ||
166 | |||
167 | /* | ||
168 | * Look for a customed capacity of a CPU in the cpu_capacity table during the | ||
169 | * boot. The update of all CPUs is in O(n^2) for heteregeneous system but the | ||
170 | * function returns directly for SMP system. | ||
171 | */ | ||
172 | void update_cpu_power(unsigned int cpu, unsigned long hwid) | ||
173 | { | ||
174 | unsigned int idx = 0; | ||
175 | |||
176 | /* look for the cpu's hwid in the cpu capacity table */ | ||
177 | for (idx = 0; idx < num_possible_cpus(); idx++) { | ||
178 | if (cpu_capacity[idx].hwid == hwid) | ||
179 | break; | ||
180 | |||
181 | if (cpu_capacity[idx].hwid == -1) | ||
182 | return; | ||
183 | } | ||
184 | |||
185 | if (idx == num_possible_cpus()) | ||
186 | return; | ||
187 | |||
188 | set_power_scale(cpu, cpu_capacity[idx].capacity / middle_capacity); | ||
189 | |||
190 | printk(KERN_INFO "CPU%u: update cpu_power %lu\n", | ||
191 | cpu, arch_scale_freq_power(NULL, cpu)); | ||
192 | } | ||
193 | |||
194 | #else | ||
195 | static inline void parse_dt_topology(void) {} | ||
196 | static inline void update_cpu_power(unsigned int cpuid, unsigned int mpidr) {} | ||
197 | #endif | ||
198 | |||
199 | |||
200 | /* | ||
201 | * cpu topology management | ||
202 | */ | ||
203 | |||
25 | #define MPIDR_SMP_BITMASK (0x3 << 30) | 204 | #define MPIDR_SMP_BITMASK (0x3 << 30) |
26 | #define MPIDR_SMP_VALUE (0x2 << 30) | 205 | #define MPIDR_SMP_VALUE (0x2 << 30) |
27 | 206 | ||
@@ -31,6 +210,7 @@ | |||
31 | * These masks reflect the current use of the affinity levels. | 210 | * These masks reflect the current use of the affinity levels. |
32 | * The affinity level can be up to 16 bits according to ARM ARM | 211 | * The affinity level can be up to 16 bits according to ARM ARM |
33 | */ | 212 | */ |
213 | #define MPIDR_HWID_BITMASK 0xFFFFFF | ||
34 | 214 | ||
35 | #define MPIDR_LEVEL0_MASK 0x3 | 215 | #define MPIDR_LEVEL0_MASK 0x3 |
36 | #define MPIDR_LEVEL0_SHIFT 0 | 216 | #define MPIDR_LEVEL0_SHIFT 0 |
@@ -41,6 +221,9 @@ | |||
41 | #define MPIDR_LEVEL2_MASK 0xFF | 221 | #define MPIDR_LEVEL2_MASK 0xFF |
42 | #define MPIDR_LEVEL2_SHIFT 16 | 222 | #define MPIDR_LEVEL2_SHIFT 16 |
43 | 223 | ||
224 | /* | ||
225 | * cpu topology table | ||
226 | */ | ||
44 | struct cputopo_arm cpu_topology[NR_CPUS]; | 227 | struct cputopo_arm cpu_topology[NR_CPUS]; |
45 | 228 | ||
46 | const struct cpumask *cpu_coregroup_mask(int cpu) | 229 | const struct cpumask *cpu_coregroup_mask(int cpu) |
@@ -48,6 +231,32 @@ const struct cpumask *cpu_coregroup_mask(int cpu) | |||
48 | return &cpu_topology[cpu].core_sibling; | 231 | return &cpu_topology[cpu].core_sibling; |
49 | } | 232 | } |
50 | 233 | ||
234 | void update_siblings_masks(unsigned int cpuid) | ||
235 | { | ||
236 | struct cputopo_arm *cpu_topo, *cpuid_topo = &cpu_topology[cpuid]; | ||
237 | int cpu; | ||
238 | |||
239 | /* update core and thread sibling masks */ | ||
240 | for_each_possible_cpu(cpu) { | ||
241 | cpu_topo = &cpu_topology[cpu]; | ||
242 | |||
243 | if (cpuid_topo->socket_id != cpu_topo->socket_id) | ||
244 | continue; | ||
245 | |||
246 | cpumask_set_cpu(cpuid, &cpu_topo->core_sibling); | ||
247 | if (cpu != cpuid) | ||
248 | cpumask_set_cpu(cpu, &cpuid_topo->core_sibling); | ||
249 | |||
250 | if (cpuid_topo->core_id != cpu_topo->core_id) | ||
251 | continue; | ||
252 | |||
253 | cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling); | ||
254 | if (cpu != cpuid) | ||
255 | cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling); | ||
256 | } | ||
257 | smp_wmb(); | ||
258 | } | ||
259 | |||
51 | /* | 260 | /* |
52 | * store_cpu_topology is called at boot when only one cpu is running | 261 | * store_cpu_topology is called at boot when only one cpu is running |
53 | * and with the mutex cpu_hotplug.lock locked, when several cpus have booted, | 262 | * and with the mutex cpu_hotplug.lock locked, when several cpus have booted, |
@@ -57,7 +266,6 @@ void store_cpu_topology(unsigned int cpuid) | |||
57 | { | 266 | { |
58 | struct cputopo_arm *cpuid_topo = &cpu_topology[cpuid]; | 267 | struct cputopo_arm *cpuid_topo = &cpu_topology[cpuid]; |
59 | unsigned int mpidr; | 268 | unsigned int mpidr; |
60 | unsigned int cpu; | ||
61 | 269 | ||
62 | /* If the cpu topology has been already set, just return */ | 270 | /* If the cpu topology has been already set, just return */ |
63 | if (cpuid_topo->core_id != -1) | 271 | if (cpuid_topo->core_id != -1) |
@@ -99,26 +307,9 @@ void store_cpu_topology(unsigned int cpuid) | |||
99 | cpuid_topo->socket_id = -1; | 307 | cpuid_topo->socket_id = -1; |
100 | } | 308 | } |
101 | 309 | ||
102 | /* update core and thread sibling masks */ | 310 | update_siblings_masks(cpuid); |
103 | for_each_possible_cpu(cpu) { | 311 | |
104 | struct cputopo_arm *cpu_topo = &cpu_topology[cpu]; | 312 | update_cpu_power(cpuid, mpidr & MPIDR_HWID_BITMASK); |
105 | |||
106 | if (cpuid_topo->socket_id == cpu_topo->socket_id) { | ||
107 | cpumask_set_cpu(cpuid, &cpu_topo->core_sibling); | ||
108 | if (cpu != cpuid) | ||
109 | cpumask_set_cpu(cpu, | ||
110 | &cpuid_topo->core_sibling); | ||
111 | |||
112 | if (cpuid_topo->core_id == cpu_topo->core_id) { | ||
113 | cpumask_set_cpu(cpuid, | ||
114 | &cpu_topo->thread_sibling); | ||
115 | if (cpu != cpuid) | ||
116 | cpumask_set_cpu(cpu, | ||
117 | &cpuid_topo->thread_sibling); | ||
118 | } | ||
119 | } | ||
120 | } | ||
121 | smp_wmb(); | ||
122 | 313 | ||
123 | printk(KERN_INFO "CPU%u: thread %d, cpu %d, socket %d, mpidr %x\n", | 314 | printk(KERN_INFO "CPU%u: thread %d, cpu %d, socket %d, mpidr %x\n", |
124 | cpuid, cpu_topology[cpuid].thread_id, | 315 | cpuid, cpu_topology[cpuid].thread_id, |
@@ -130,11 +321,11 @@ void store_cpu_topology(unsigned int cpuid) | |||
130 | * init_cpu_topology is called at boot when only one cpu is running | 321 | * init_cpu_topology is called at boot when only one cpu is running |
131 | * which prevent simultaneous write access to cpu_topology array | 322 | * which prevent simultaneous write access to cpu_topology array |
132 | */ | 323 | */ |
133 | void init_cpu_topology(void) | 324 | void __init init_cpu_topology(void) |
134 | { | 325 | { |
135 | unsigned int cpu; | 326 | unsigned int cpu; |
136 | 327 | ||
137 | /* init core mask */ | 328 | /* init core mask and power*/ |
138 | for_each_possible_cpu(cpu) { | 329 | for_each_possible_cpu(cpu) { |
139 | struct cputopo_arm *cpu_topo = &(cpu_topology[cpu]); | 330 | struct cputopo_arm *cpu_topo = &(cpu_topology[cpu]); |
140 | 331 | ||
@@ -143,6 +334,10 @@ void init_cpu_topology(void) | |||
143 | cpu_topo->socket_id = -1; | 334 | cpu_topo->socket_id = -1; |
144 | cpumask_clear(&cpu_topo->core_sibling); | 335 | cpumask_clear(&cpu_topo->core_sibling); |
145 | cpumask_clear(&cpu_topo->thread_sibling); | 336 | cpumask_clear(&cpu_topo->thread_sibling); |
337 | |||
338 | set_power_scale(cpu, SCHED_POWER_SCALE); | ||
146 | } | 339 | } |
147 | smp_wmb(); | 340 | smp_wmb(); |
341 | |||
342 | parse_dt_topology(); | ||
148 | } | 343 | } |
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 4928d89758f4..f7945218b8c6 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c | |||
@@ -233,9 +233,9 @@ void show_stack(struct task_struct *tsk, unsigned long *sp) | |||
233 | #define S_ISA " ARM" | 233 | #define S_ISA " ARM" |
234 | #endif | 234 | #endif |
235 | 235 | ||
236 | static int __die(const char *str, int err, struct thread_info *thread, struct pt_regs *regs) | 236 | static int __die(const char *str, int err, struct pt_regs *regs) |
237 | { | 237 | { |
238 | struct task_struct *tsk = thread->task; | 238 | struct task_struct *tsk = current; |
239 | static int die_counter; | 239 | static int die_counter; |
240 | int ret; | 240 | int ret; |
241 | 241 | ||
@@ -245,12 +245,12 @@ static int __die(const char *str, int err, struct thread_info *thread, struct pt | |||
245 | /* trap and error numbers are mostly meaningless on ARM */ | 245 | /* trap and error numbers are mostly meaningless on ARM */ |
246 | ret = notify_die(DIE_OOPS, str, regs, err, tsk->thread.trap_no, SIGSEGV); | 246 | ret = notify_die(DIE_OOPS, str, regs, err, tsk->thread.trap_no, SIGSEGV); |
247 | if (ret == NOTIFY_STOP) | 247 | if (ret == NOTIFY_STOP) |
248 | return ret; | 248 | return 1; |
249 | 249 | ||
250 | print_modules(); | 250 | print_modules(); |
251 | __show_regs(regs); | 251 | __show_regs(regs); |
252 | printk(KERN_EMERG "Process %.*s (pid: %d, stack limit = 0x%p)\n", | 252 | printk(KERN_EMERG "Process %.*s (pid: %d, stack limit = 0x%p)\n", |
253 | TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), thread + 1); | 253 | TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), end_of_stack(tsk)); |
254 | 254 | ||
255 | if (!user_mode(regs) || in_interrupt()) { | 255 | if (!user_mode(regs) || in_interrupt()) { |
256 | dump_mem(KERN_EMERG, "Stack: ", regs->ARM_sp, | 256 | dump_mem(KERN_EMERG, "Stack: ", regs->ARM_sp, |
@@ -259,45 +259,77 @@ static int __die(const char *str, int err, struct thread_info *thread, struct pt | |||
259 | dump_instr(KERN_EMERG, regs); | 259 | dump_instr(KERN_EMERG, regs); |
260 | } | 260 | } |
261 | 261 | ||
262 | return ret; | 262 | return 0; |
263 | } | 263 | } |
264 | 264 | ||
265 | static DEFINE_RAW_SPINLOCK(die_lock); | 265 | static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED; |
266 | static int die_owner = -1; | ||
267 | static unsigned int die_nest_count; | ||
266 | 268 | ||
267 | /* | 269 | static unsigned long oops_begin(void) |
268 | * This function is protected against re-entrancy. | ||
269 | */ | ||
270 | void die(const char *str, struct pt_regs *regs, int err) | ||
271 | { | 270 | { |
272 | struct thread_info *thread = current_thread_info(); | 271 | int cpu; |
273 | int ret; | 272 | unsigned long flags; |
274 | enum bug_trap_type bug_type = BUG_TRAP_TYPE_NONE; | ||
275 | 273 | ||
276 | oops_enter(); | 274 | oops_enter(); |
277 | 275 | ||
278 | raw_spin_lock_irq(&die_lock); | 276 | /* racy, but better than risking deadlock. */ |
277 | raw_local_irq_save(flags); | ||
278 | cpu = smp_processor_id(); | ||
279 | if (!arch_spin_trylock(&die_lock)) { | ||
280 | if (cpu == die_owner) | ||
281 | /* nested oops. should stop eventually */; | ||
282 | else | ||
283 | arch_spin_lock(&die_lock); | ||
284 | } | ||
285 | die_nest_count++; | ||
286 | die_owner = cpu; | ||
279 | console_verbose(); | 287 | console_verbose(); |
280 | bust_spinlocks(1); | 288 | bust_spinlocks(1); |
281 | if (!user_mode(regs)) | 289 | return flags; |
282 | bug_type = report_bug(regs->ARM_pc, regs); | 290 | } |
283 | if (bug_type != BUG_TRAP_TYPE_NONE) | ||
284 | str = "Oops - BUG"; | ||
285 | ret = __die(str, err, thread, regs); | ||
286 | 291 | ||
287 | if (regs && kexec_should_crash(thread->task)) | 292 | static void oops_end(unsigned long flags, struct pt_regs *regs, int signr) |
293 | { | ||
294 | if (regs && kexec_should_crash(current)) | ||
288 | crash_kexec(regs); | 295 | crash_kexec(regs); |
289 | 296 | ||
290 | bust_spinlocks(0); | 297 | bust_spinlocks(0); |
298 | die_owner = -1; | ||
291 | add_taint(TAINT_DIE); | 299 | add_taint(TAINT_DIE); |
292 | raw_spin_unlock_irq(&die_lock); | 300 | die_nest_count--; |
301 | if (!die_nest_count) | ||
302 | /* Nest count reaches zero, release the lock. */ | ||
303 | arch_spin_unlock(&die_lock); | ||
304 | raw_local_irq_restore(flags); | ||
293 | oops_exit(); | 305 | oops_exit(); |
294 | 306 | ||
295 | if (in_interrupt()) | 307 | if (in_interrupt()) |
296 | panic("Fatal exception in interrupt"); | 308 | panic("Fatal exception in interrupt"); |
297 | if (panic_on_oops) | 309 | if (panic_on_oops) |
298 | panic("Fatal exception"); | 310 | panic("Fatal exception"); |
299 | if (ret != NOTIFY_STOP) | 311 | if (signr) |
300 | do_exit(SIGSEGV); | 312 | do_exit(signr); |
313 | } | ||
314 | |||
315 | /* | ||
316 | * This function is protected against re-entrancy. | ||
317 | */ | ||
318 | void die(const char *str, struct pt_regs *regs, int err) | ||
319 | { | ||
320 | enum bug_trap_type bug_type = BUG_TRAP_TYPE_NONE; | ||
321 | unsigned long flags = oops_begin(); | ||
322 | int sig = SIGSEGV; | ||
323 | |||
324 | if (!user_mode(regs)) | ||
325 | bug_type = report_bug(regs->ARM_pc, regs); | ||
326 | if (bug_type != BUG_TRAP_TYPE_NONE) | ||
327 | str = "Oops - BUG"; | ||
328 | |||
329 | if (__die(str, err, regs)) | ||
330 | sig = 0; | ||
331 | |||
332 | oops_end(flags, regs, sig); | ||
301 | } | 333 | } |
302 | 334 | ||
303 | void arm_notify_die(const char *str, struct pt_regs *regs, | 335 | void arm_notify_die(const char *str, struct pt_regs *regs, |
@@ -370,18 +402,10 @@ static int call_undef_hook(struct pt_regs *regs, unsigned int instr) | |||
370 | 402 | ||
371 | asmlinkage void __exception do_undefinstr(struct pt_regs *regs) | 403 | asmlinkage void __exception do_undefinstr(struct pt_regs *regs) |
372 | { | 404 | { |
373 | unsigned int correction = thumb_mode(regs) ? 2 : 4; | ||
374 | unsigned int instr; | 405 | unsigned int instr; |
375 | siginfo_t info; | 406 | siginfo_t info; |
376 | void __user *pc; | 407 | void __user *pc; |
377 | 408 | ||
378 | /* | ||
379 | * According to the ARM ARM, PC is 2 or 4 bytes ahead, | ||
380 | * depending whether we're in Thumb mode or not. | ||
381 | * Correct this offset. | ||
382 | */ | ||
383 | regs->ARM_pc -= correction; | ||
384 | |||
385 | pc = (void __user *)instruction_pointer(regs); | 409 | pc = (void __user *)instruction_pointer(regs); |
386 | 410 | ||
387 | if (processor_mode(regs) == SVC_MODE) { | 411 | if (processor_mode(regs) == SVC_MODE) { |
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index 43a31fb06318..36ff15bbfdd4 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S | |||
@@ -183,7 +183,9 @@ SECTIONS | |||
183 | } | 183 | } |
184 | #endif | 184 | #endif |
185 | 185 | ||
186 | #ifdef CONFIG_SMP | ||
186 | PERCPU_SECTION(L1_CACHE_BYTES) | 187 | PERCPU_SECTION(L1_CACHE_BYTES) |
188 | #endif | ||
187 | 189 | ||
188 | #ifdef CONFIG_XIP_KERNEL | 190 | #ifdef CONFIG_XIP_KERNEL |
189 | __data_loc = ALIGN(4); /* location in binary */ | 191 | __data_loc = ALIGN(4); /* location in binary */ |