aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-11-13 18:51:29 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-11-13 18:51:29 -0500
commitf47671e2d861a2093179cd64dda22016664b2015 (patch)
treef77cb8e7d875f442e2cf0bdc8fbe478ec8ff8181 /arch/arm/kernel
parent8ceafbfa91ffbdbb2afaea5c24ccb519ffb8b587 (diff)
parent42cbe8271ca6562b4ad4b2e6a9895084b16eef5e (diff)
Merge branch 'for-linus' of git://git.linaro.org/people/rmk/linux-arm
Pull ARM updates from Russell King: "Included in this series are: 1. BE8 (modern big endian) changes for ARM from Ben Dooks 2. big.Little support from Nicolas Pitre and Dave Martin 3. support for LPAE systems with all system memory above 4GB 4. Perf updates from Will Deacon 5. Additional prefetching and other performance improvements from Will. 6. Neon-optimised AES implementation fro Ard. 7. A number of smaller fixes scattered around the place. There is a rather horrid merge conflict in tools/perf - I was never notified of the conflict because it originally occurred between Will's tree and other stuff. Consequently I have a resolution which Will forwarded me, which I'll forward on immediately after sending this mail. The other notable thing is I'm expecting some build breakage in the crypto stuff on ARM only with Ard's AES patches. These were merged into a stable git branch which others had already pulled, so there's little I can do about this. The problem is caused because these patches have a dependency on some code in the crypto git tree - I tried requesting a branch I can pull to resolve these, and all I got each time from the crypto people was "we'll revert our patches then" which would only make things worse since I still don't have the dependent patches. I've no idea what's going on there or how to resolve that, and since I can't split these patches from the rest of this pull request, I'm rather stuck with pushing this as-is or reverting Ard's patches. Since it should "come out in the wash" I've left them in - the only build problems they seem to cause at the moment are with randconfigs, and since it's a new feature anyway. However, if by -rc1 the dependencies aren't in, I think it'd be best to revert Ard's patches" I resolved the perf conflict roughly as per the patch sent by Russell, but there may be some differences. Any errors are likely mine. Let's see how the crypto issues work out.. * 'for-linus' of git://git.linaro.org/people/rmk/linux-arm: (110 commits) ARM: 7868/1: arm/arm64: remove atomic_clear_mask() in "include/asm/atomic.h" ARM: 7867/1: include: asm: use 'int' instead of 'unsigned long' for 'oldval' in atomic_cmpxchg(). ARM: 7866/1: include: asm: use 'long long' instead of 'u64' within atomic.h ARM: 7871/1: amba: Extend number of IRQS ARM: 7887/1: Don't smp_cross_call() on UP devices in arch_irq_work_raise() ARM: 7872/1: Support arch_irq_work_raise() via self IPIs ARM: 7880/1: Clear the IT state independent of the Thumb-2 mode ARM: 7878/1: nommu: Implement dummy early_paging_init() ARM: 7876/1: clear Thumb-2 IT state on exception handling ARM: 7874/2: bL_switcher: Remove cpu_hotplug_driver_{lock,unlock}() ARM: footbridge: fix build warnings for netwinder ARM: 7873/1: vfp: clear vfp_current_hw_state for dying cpu ARM: fix misplaced arch_virt_to_idmap() ARM: 7848/1: mcpm: Implement cpu_kill() to synchronise on powerdown ARM: 7847/1: mcpm: Factor out logical-to-physical CPU translation ARM: 7869/1: remove unused XSCALE_PMU Kconfig param ARM: 7864/1: Handle 64-bit memory in case of 32-bit phys_addr_t ARM: 7863/1: Let arm_add_memory() always use 64-bit arguments ARM: 7862/1: pcpu: replace __get_cpu_var_uses ARM: 7861/1: cacheflush: consolidate single-CPU ARMv7 cache disabling code ...
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/Makefile4
-rw-r--r--arch/arm/kernel/armksyms.c1
-rw-r--r--arch/arm/kernel/entry-armv.S6
-rw-r--r--arch/arm/kernel/entry-common.S4
-rw-r--r--arch/arm/kernel/head.S82
-rw-r--r--arch/arm/kernel/hw_breakpoint.c14
-rw-r--r--arch/arm/kernel/kprobes.c8
-rw-r--r--arch/arm/kernel/module.c57
-rw-r--r--arch/arm/kernel/perf_event.c3
-rw-r--r--arch/arm/kernel/perf_event_cpu.c2
-rw-r--r--arch/arm/kernel/perf_regs.c30
-rw-r--r--arch/arm/kernel/setup.c28
-rw-r--r--arch/arm/kernel/signal.c38
-rw-r--r--arch/arm/kernel/sigreturn_codes.S80
-rw-r--r--arch/arm/kernel/sleep.S27
-rw-r--r--arch/arm/kernel/smp.c42
-rw-r--r--arch/arm/kernel/smp_scu.c14
-rw-r--r--arch/arm/kernel/smp_tlb.c36
-rw-r--r--arch/arm/kernel/smp_twd.c24
-rw-r--r--arch/arm/kernel/suspend.c8
-rw-r--r--arch/arm/kernel/traps.c24
21 files changed, 389 insertions, 143 deletions
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 5140df5f23aa..a30fc9be9e9e 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -17,7 +17,8 @@ CFLAGS_REMOVE_return_address.o = -pg
17 17
18obj-y := elf.o entry-common.o irq.o opcodes.o \ 18obj-y := elf.o entry-common.o irq.o opcodes.o \
19 process.o ptrace.o return_address.o \ 19 process.o ptrace.o return_address.o \
20 setup.o signal.o stacktrace.o sys_arm.o time.o traps.o 20 setup.o signal.o sigreturn_codes.o \
21 stacktrace.o sys_arm.o time.o traps.o
21 22
22obj-$(CONFIG_ATAGS) += atags_parse.o 23obj-$(CONFIG_ATAGS) += atags_parse.o
23obj-$(CONFIG_ATAGS_PROC) += atags_proc.o 24obj-$(CONFIG_ATAGS_PROC) += atags_proc.o
@@ -78,6 +79,7 @@ obj-$(CONFIG_CPU_XSC3) += xscale-cp0.o
78obj-$(CONFIG_CPU_MOHAWK) += xscale-cp0.o 79obj-$(CONFIG_CPU_MOHAWK) += xscale-cp0.o
79obj-$(CONFIG_CPU_PJ4) += pj4-cp0.o 80obj-$(CONFIG_CPU_PJ4) += pj4-cp0.o
80obj-$(CONFIG_IWMMXT) += iwmmxt.o 81obj-$(CONFIG_IWMMXT) += iwmmxt.o
82obj-$(CONFIG_PERF_EVENTS) += perf_regs.o
81obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o perf_event_cpu.o 83obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o perf_event_cpu.o
82AFLAGS_iwmmxt.o := -Wa,-mcpu=iwmmxt 84AFLAGS_iwmmxt.o := -Wa,-mcpu=iwmmxt
83obj-$(CONFIG_ARM_CPU_TOPOLOGY) += topology.o 85obj-$(CONFIG_ARM_CPU_TOPOLOGY) += topology.o
diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
index 60d3b738d420..1f031ddd0667 100644
--- a/arch/arm/kernel/armksyms.c
+++ b/arch/arm/kernel/armksyms.c
@@ -155,4 +155,5 @@ EXPORT_SYMBOL(__gnu_mcount_nc);
155 155
156#ifdef CONFIG_ARM_PATCH_PHYS_VIRT 156#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
157EXPORT_SYMBOL(__pv_phys_offset); 157EXPORT_SYMBOL(__pv_phys_offset);
158EXPORT_SYMBOL(__pv_offset);
158#endif 159#endif
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 9cbe70c8b0ef..b3fb8c9e1ff2 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -192,6 +192,7 @@ __dabt_svc:
192 svc_entry 192 svc_entry
193 mov r2, sp 193 mov r2, sp
194 dabt_helper 194 dabt_helper
195 THUMB( ldr r5, [sp, #S_PSR] ) @ potentially updated CPSR
195 svc_exit r5 @ return from exception 196 svc_exit r5 @ return from exception
196 UNWIND(.fnend ) 197 UNWIND(.fnend )
197ENDPROC(__dabt_svc) 198ENDPROC(__dabt_svc)
@@ -416,9 +417,8 @@ __und_usr:
416 bne __und_usr_thumb 417 bne __und_usr_thumb
417 sub r4, r2, #4 @ ARM instr at LR - 4 418 sub r4, r2, #4 @ ARM instr at LR - 4
4181: ldrt r0, [r4] 4191: ldrt r0, [r4]
419#ifdef CONFIG_CPU_ENDIAN_BE8 420 ARM_BE8(rev r0, r0) @ little endian instruction
420 rev r0, r0 @ little endian instruction 421
421#endif
422 @ r0 = 32-bit ARM instruction which caused the exception 422 @ r0 = 32-bit ARM instruction which caused the exception
423 @ r2 = PC value for the following instruction (:= regs->ARM_pc) 423 @ r2 = PC value for the following instruction (:= regs->ARM_pc)
424 @ r4 = PC value for the faulting instruction 424 @ r4 = PC value for the faulting instruction
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index bc6bd9683ba4..a2dcafdf1bc8 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -393,9 +393,7 @@ ENTRY(vector_swi)
393#else 393#else
394 USER( ldr r10, [lr, #-4] ) @ get SWI instruction 394 USER( ldr r10, [lr, #-4] ) @ get SWI instruction
395#endif 395#endif
396#ifdef CONFIG_CPU_ENDIAN_BE8 396 ARM_BE8(rev r10, r10) @ little endian instruction
397 rev r10, r10 @ little endian instruction
398#endif
399 397
400#elif defined(CONFIG_AEABI) 398#elif defined(CONFIG_AEABI)
401 399
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 476de57dcef2..7801866e626a 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -77,6 +77,7 @@
77 77
78 __HEAD 78 __HEAD
79ENTRY(stext) 79ENTRY(stext)
80 ARM_BE8(setend be ) @ ensure we are in BE8 mode
80 81
81 THUMB( adr r9, BSYM(1f) ) @ Kernel is always entered in ARM. 82 THUMB( adr r9, BSYM(1f) ) @ Kernel is always entered in ARM.
82 THUMB( bx r9 ) @ If this is a Thumb-2 kernel, 83 THUMB( bx r9 ) @ If this is a Thumb-2 kernel,
@@ -352,6 +353,9 @@ ENTRY(secondary_startup)
352 * the processor type - there is no need to check the machine type 353 * the processor type - there is no need to check the machine type
353 * as it has already been validated by the primary processor. 354 * as it has already been validated by the primary processor.
354 */ 355 */
356
357 ARM_BE8(setend be) @ ensure we are in BE8 mode
358
355#ifdef CONFIG_ARM_VIRT_EXT 359#ifdef CONFIG_ARM_VIRT_EXT
356 bl __hyp_stub_install_secondary 360 bl __hyp_stub_install_secondary
357#endif 361#endif
@@ -555,6 +559,14 @@ ENTRY(fixup_smp)
555 ldmfd sp!, {r4 - r6, pc} 559 ldmfd sp!, {r4 - r6, pc}
556ENDPROC(fixup_smp) 560ENDPROC(fixup_smp)
557 561
562#ifdef __ARMEB__
563#define LOW_OFFSET 0x4
564#define HIGH_OFFSET 0x0
565#else
566#define LOW_OFFSET 0x0
567#define HIGH_OFFSET 0x4
568#endif
569
558#ifdef CONFIG_ARM_PATCH_PHYS_VIRT 570#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
559 571
560/* __fixup_pv_table - patch the stub instructions with the delta between 572/* __fixup_pv_table - patch the stub instructions with the delta between
@@ -565,17 +577,20 @@ ENDPROC(fixup_smp)
565 __HEAD 577 __HEAD
566__fixup_pv_table: 578__fixup_pv_table:
567 adr r0, 1f 579 adr r0, 1f
568 ldmia r0, {r3-r5, r7} 580 ldmia r0, {r3-r7}
569 sub r3, r0, r3 @ PHYS_OFFSET - PAGE_OFFSET 581 mvn ip, #0
582 subs r3, r0, r3 @ PHYS_OFFSET - PAGE_OFFSET
570 add r4, r4, r3 @ adjust table start address 583 add r4, r4, r3 @ adjust table start address
571 add r5, r5, r3 @ adjust table end address 584 add r5, r5, r3 @ adjust table end address
572 add r7, r7, r3 @ adjust __pv_phys_offset address 585 add r6, r6, r3 @ adjust __pv_phys_offset address
573 str r8, [r7] @ save computed PHYS_OFFSET to __pv_phys_offset 586 add r7, r7, r3 @ adjust __pv_offset address
587 str r8, [r6, #LOW_OFFSET] @ save computed PHYS_OFFSET to __pv_phys_offset
588 strcc ip, [r7, #HIGH_OFFSET] @ save to __pv_offset high bits
574 mov r6, r3, lsr #24 @ constant for add/sub instructions 589 mov r6, r3, lsr #24 @ constant for add/sub instructions
575 teq r3, r6, lsl #24 @ must be 16MiB aligned 590 teq r3, r6, lsl #24 @ must be 16MiB aligned
576THUMB( it ne @ cross section branch ) 591THUMB( it ne @ cross section branch )
577 bne __error 592 bne __error
578 str r6, [r7, #4] @ save to __pv_offset 593 str r3, [r7, #LOW_OFFSET] @ save to __pv_offset low bits
579 b __fixup_a_pv_table 594 b __fixup_a_pv_table
580ENDPROC(__fixup_pv_table) 595ENDPROC(__fixup_pv_table)
581 596
@@ -584,10 +599,19 @@ ENDPROC(__fixup_pv_table)
584 .long __pv_table_begin 599 .long __pv_table_begin
585 .long __pv_table_end 600 .long __pv_table_end
5862: .long __pv_phys_offset 6012: .long __pv_phys_offset
602 .long __pv_offset
587 603
588 .text 604 .text
589__fixup_a_pv_table: 605__fixup_a_pv_table:
606 adr r0, 3f
607 ldr r6, [r0]
608 add r6, r6, r3
609 ldr r0, [r6, #HIGH_OFFSET] @ pv_offset high word
610 ldr r6, [r6, #LOW_OFFSET] @ pv_offset low word
611 mov r6, r6, lsr #24
612 cmn r0, #1
590#ifdef CONFIG_THUMB2_KERNEL 613#ifdef CONFIG_THUMB2_KERNEL
614 moveq r0, #0x200000 @ set bit 21, mov to mvn instruction
591 lsls r6, #24 615 lsls r6, #24
592 beq 2f 616 beq 2f
593 clz r7, r6 617 clz r7, r6
@@ -601,18 +625,42 @@ __fixup_a_pv_table:
601 b 2f 625 b 2f
6021: add r7, r3 6261: add r7, r3
603 ldrh ip, [r7, #2] 627 ldrh ip, [r7, #2]
604 and ip, 0x8f00 628ARM_BE8(rev16 ip, ip)
605 orr ip, r6 @ mask in offset bits 31-24 629 tst ip, #0x4000
630 and ip, #0x8f00
631 orrne ip, r6 @ mask in offset bits 31-24
632 orreq ip, r0 @ mask in offset bits 7-0
633ARM_BE8(rev16 ip, ip)
606 strh ip, [r7, #2] 634 strh ip, [r7, #2]
635 bne 2f
636 ldrh ip, [r7]
637ARM_BE8(rev16 ip, ip)
638 bic ip, #0x20
639 orr ip, ip, r0, lsr #16
640ARM_BE8(rev16 ip, ip)
641 strh ip, [r7]
6072: cmp r4, r5 6422: cmp r4, r5
608 ldrcc r7, [r4], #4 @ use branch for delay slot 643 ldrcc r7, [r4], #4 @ use branch for delay slot
609 bcc 1b 644 bcc 1b
610 bx lr 645 bx lr
611#else 646#else
647 moveq r0, #0x400000 @ set bit 22, mov to mvn instruction
612 b 2f 648 b 2f
6131: ldr ip, [r7, r3] 6491: ldr ip, [r7, r3]
650#ifdef CONFIG_CPU_ENDIAN_BE8
651 @ in BE8, we load data in BE, but instructions still in LE
652 bic ip, ip, #0xff000000
653 tst ip, #0x000f0000 @ check the rotation field
654 orrne ip, ip, r6, lsl #24 @ mask in offset bits 31-24
655 biceq ip, ip, #0x00004000 @ clear bit 22
656 orreq ip, ip, r0, lsl #24 @ mask in offset bits 7-0
657#else
614 bic ip, ip, #0x000000ff 658 bic ip, ip, #0x000000ff
615 orr ip, ip, r6 @ mask in offset bits 31-24 659 tst ip, #0xf00 @ check the rotation field
660 orrne ip, ip, r6 @ mask in offset bits 31-24
661 biceq ip, ip, #0x400000 @ clear bit 22
662 orreq ip, ip, r0 @ mask in offset bits 7-0
663#endif
616 str ip, [r7, r3] 664 str ip, [r7, r3]
6172: cmp r4, r5 6652: cmp r4, r5
618 ldrcc r7, [r4], #4 @ use branch for delay slot 666 ldrcc r7, [r4], #4 @ use branch for delay slot
@@ -621,28 +669,30 @@ __fixup_a_pv_table:
621#endif 669#endif
622ENDPROC(__fixup_a_pv_table) 670ENDPROC(__fixup_a_pv_table)
623 671
672 .align
6733: .long __pv_offset
674
624ENTRY(fixup_pv_table) 675ENTRY(fixup_pv_table)
625 stmfd sp!, {r4 - r7, lr} 676 stmfd sp!, {r4 - r7, lr}
626 ldr r2, 2f @ get address of __pv_phys_offset
627 mov r3, #0 @ no offset 677 mov r3, #0 @ no offset
628 mov r4, r0 @ r0 = table start 678 mov r4, r0 @ r0 = table start
629 add r5, r0, r1 @ r1 = table size 679 add r5, r0, r1 @ r1 = table size
630 ldr r6, [r2, #4] @ get __pv_offset
631 bl __fixup_a_pv_table 680 bl __fixup_a_pv_table
632 ldmfd sp!, {r4 - r7, pc} 681 ldmfd sp!, {r4 - r7, pc}
633ENDPROC(fixup_pv_table) 682ENDPROC(fixup_pv_table)
634 683
635 .align
6362: .long __pv_phys_offset
637
638 .data 684 .data
639 .globl __pv_phys_offset 685 .globl __pv_phys_offset
640 .type __pv_phys_offset, %object 686 .type __pv_phys_offset, %object
641__pv_phys_offset: 687__pv_phys_offset:
642 .long 0 688 .quad 0
643 .size __pv_phys_offset, . - __pv_phys_offset 689 .size __pv_phys_offset, . -__pv_phys_offset
690
691 .globl __pv_offset
692 .type __pv_offset, %object
644__pv_offset: 693__pv_offset:
645 .long 0 694 .quad 0
695 .size __pv_offset, . -__pv_offset
646#endif 696#endif
647 697
648#include "head-common.S" 698#include "head-common.S"
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index 7b95de601357..3d446605cbf8 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -344,13 +344,13 @@ int arch_install_hw_breakpoint(struct perf_event *bp)
344 /* Breakpoint */ 344 /* Breakpoint */
345 ctrl_base = ARM_BASE_BCR; 345 ctrl_base = ARM_BASE_BCR;
346 val_base = ARM_BASE_BVR; 346 val_base = ARM_BASE_BVR;
347 slots = (struct perf_event **)__get_cpu_var(bp_on_reg); 347 slots = this_cpu_ptr(bp_on_reg);
348 max_slots = core_num_brps; 348 max_slots = core_num_brps;
349 } else { 349 } else {
350 /* Watchpoint */ 350 /* Watchpoint */
351 ctrl_base = ARM_BASE_WCR; 351 ctrl_base = ARM_BASE_WCR;
352 val_base = ARM_BASE_WVR; 352 val_base = ARM_BASE_WVR;
353 slots = (struct perf_event **)__get_cpu_var(wp_on_reg); 353 slots = this_cpu_ptr(wp_on_reg);
354 max_slots = core_num_wrps; 354 max_slots = core_num_wrps;
355 } 355 }
356 356
@@ -396,12 +396,12 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp)
396 if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) { 396 if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
397 /* Breakpoint */ 397 /* Breakpoint */
398 base = ARM_BASE_BCR; 398 base = ARM_BASE_BCR;
399 slots = (struct perf_event **)__get_cpu_var(bp_on_reg); 399 slots = this_cpu_ptr(bp_on_reg);
400 max_slots = core_num_brps; 400 max_slots = core_num_brps;
401 } else { 401 } else {
402 /* Watchpoint */ 402 /* Watchpoint */
403 base = ARM_BASE_WCR; 403 base = ARM_BASE_WCR;
404 slots = (struct perf_event **)__get_cpu_var(wp_on_reg); 404 slots = this_cpu_ptr(wp_on_reg);
405 max_slots = core_num_wrps; 405 max_slots = core_num_wrps;
406 } 406 }
407 407
@@ -697,7 +697,7 @@ static void watchpoint_handler(unsigned long addr, unsigned int fsr,
697 struct arch_hw_breakpoint *info; 697 struct arch_hw_breakpoint *info;
698 struct arch_hw_breakpoint_ctrl ctrl; 698 struct arch_hw_breakpoint_ctrl ctrl;
699 699
700 slots = (struct perf_event **)__get_cpu_var(wp_on_reg); 700 slots = this_cpu_ptr(wp_on_reg);
701 701
702 for (i = 0; i < core_num_wrps; ++i) { 702 for (i = 0; i < core_num_wrps; ++i) {
703 rcu_read_lock(); 703 rcu_read_lock();
@@ -768,7 +768,7 @@ static void watchpoint_single_step_handler(unsigned long pc)
768 struct perf_event *wp, **slots; 768 struct perf_event *wp, **slots;
769 struct arch_hw_breakpoint *info; 769 struct arch_hw_breakpoint *info;
770 770
771 slots = (struct perf_event **)__get_cpu_var(wp_on_reg); 771 slots = this_cpu_ptr(wp_on_reg);
772 772
773 for (i = 0; i < core_num_wrps; ++i) { 773 for (i = 0; i < core_num_wrps; ++i) {
774 rcu_read_lock(); 774 rcu_read_lock();
@@ -802,7 +802,7 @@ static void breakpoint_handler(unsigned long unknown, struct pt_regs *regs)
802 struct arch_hw_breakpoint *info; 802 struct arch_hw_breakpoint *info;
803 struct arch_hw_breakpoint_ctrl ctrl; 803 struct arch_hw_breakpoint_ctrl ctrl;
804 804
805 slots = (struct perf_event **)__get_cpu_var(bp_on_reg); 805 slots = this_cpu_ptr(bp_on_reg);
806 806
807 /* The exception entry code places the amended lr in the PC. */ 807 /* The exception entry code places the amended lr in the PC. */
808 addr = regs->ARM_pc; 808 addr = regs->ARM_pc;
diff --git a/arch/arm/kernel/kprobes.c b/arch/arm/kernel/kprobes.c
index 170e9f34003f..a7b621ece23d 100644
--- a/arch/arm/kernel/kprobes.c
+++ b/arch/arm/kernel/kprobes.c
@@ -171,13 +171,13 @@ static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
171 171
172static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) 172static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
173{ 173{
174 __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp; 174 __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
175 kcb->kprobe_status = kcb->prev_kprobe.status; 175 kcb->kprobe_status = kcb->prev_kprobe.status;
176} 176}
177 177
178static void __kprobes set_current_kprobe(struct kprobe *p) 178static void __kprobes set_current_kprobe(struct kprobe *p)
179{ 179{
180 __get_cpu_var(current_kprobe) = p; 180 __this_cpu_write(current_kprobe, p);
181} 181}
182 182
183static void __kprobes 183static void __kprobes
@@ -421,10 +421,10 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
421 continue; 421 continue;
422 422
423 if (ri->rp && ri->rp->handler) { 423 if (ri->rp && ri->rp->handler) {
424 __get_cpu_var(current_kprobe) = &ri->rp->kp; 424 __this_cpu_write(current_kprobe, &ri->rp->kp);
425 get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE; 425 get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
426 ri->rp->handler(ri, regs); 426 ri->rp->handler(ri, regs);
427 __get_cpu_var(current_kprobe) = NULL; 427 __this_cpu_write(current_kprobe, NULL);
428 } 428 }
429 429
430 orig_ret_address = (unsigned long)ri->ret_addr; 430 orig_ret_address = (unsigned long)ri->ret_addr;
diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
index c9dfff3b8008..45e478157278 100644
--- a/arch/arm/kernel/module.c
+++ b/arch/arm/kernel/module.c
@@ -24,6 +24,7 @@
24#include <asm/sections.h> 24#include <asm/sections.h>
25#include <asm/smp_plat.h> 25#include <asm/smp_plat.h>
26#include <asm/unwind.h> 26#include <asm/unwind.h>
27#include <asm/opcodes.h>
27 28
28#ifdef CONFIG_XIP_KERNEL 29#ifdef CONFIG_XIP_KERNEL
29/* 30/*
@@ -60,6 +61,7 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
60 Elf32_Sym *sym; 61 Elf32_Sym *sym;
61 const char *symname; 62 const char *symname;
62 s32 offset; 63 s32 offset;
64 u32 tmp;
63#ifdef CONFIG_THUMB2_KERNEL 65#ifdef CONFIG_THUMB2_KERNEL
64 u32 upper, lower, sign, j1, j2; 66 u32 upper, lower, sign, j1, j2;
65#endif 67#endif
@@ -95,7 +97,8 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
95 case R_ARM_PC24: 97 case R_ARM_PC24:
96 case R_ARM_CALL: 98 case R_ARM_CALL:
97 case R_ARM_JUMP24: 99 case R_ARM_JUMP24:
98 offset = (*(u32 *)loc & 0x00ffffff) << 2; 100 offset = __mem_to_opcode_arm(*(u32 *)loc);
101 offset = (offset & 0x00ffffff) << 2;
99 if (offset & 0x02000000) 102 if (offset & 0x02000000)
100 offset -= 0x04000000; 103 offset -= 0x04000000;
101 104
@@ -111,9 +114,10 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
111 } 114 }
112 115
113 offset >>= 2; 116 offset >>= 2;
117 offset &= 0x00ffffff;
114 118
115 *(u32 *)loc &= 0xff000000; 119 *(u32 *)loc &= __opcode_to_mem_arm(0xff000000);
116 *(u32 *)loc |= offset & 0x00ffffff; 120 *(u32 *)loc |= __opcode_to_mem_arm(offset);
117 break; 121 break;
118 122
119 case R_ARM_V4BX: 123 case R_ARM_V4BX:
@@ -121,8 +125,8 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
121 * other bits to re-code instruction as 125 * other bits to re-code instruction as
122 * MOV PC,Rm. 126 * MOV PC,Rm.
123 */ 127 */
124 *(u32 *)loc &= 0xf000000f; 128 *(u32 *)loc &= __opcode_to_mem_arm(0xf000000f);
125 *(u32 *)loc |= 0x01a0f000; 129 *(u32 *)loc |= __opcode_to_mem_arm(0x01a0f000);
126 break; 130 break;
127 131
128 case R_ARM_PREL31: 132 case R_ARM_PREL31:
@@ -132,7 +136,7 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
132 136
133 case R_ARM_MOVW_ABS_NC: 137 case R_ARM_MOVW_ABS_NC:
134 case R_ARM_MOVT_ABS: 138 case R_ARM_MOVT_ABS:
135 offset = *(u32 *)loc; 139 offset = tmp = __mem_to_opcode_arm(*(u32 *)loc);
136 offset = ((offset & 0xf0000) >> 4) | (offset & 0xfff); 140 offset = ((offset & 0xf0000) >> 4) | (offset & 0xfff);
137 offset = (offset ^ 0x8000) - 0x8000; 141 offset = (offset ^ 0x8000) - 0x8000;
138 142
@@ -140,16 +144,18 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
140 if (ELF32_R_TYPE(rel->r_info) == R_ARM_MOVT_ABS) 144 if (ELF32_R_TYPE(rel->r_info) == R_ARM_MOVT_ABS)
141 offset >>= 16; 145 offset >>= 16;
142 146
143 *(u32 *)loc &= 0xfff0f000; 147 tmp &= 0xfff0f000;
144 *(u32 *)loc |= ((offset & 0xf000) << 4) | 148 tmp |= ((offset & 0xf000) << 4) |
145 (offset & 0x0fff); 149 (offset & 0x0fff);
150
151 *(u32 *)loc = __opcode_to_mem_arm(tmp);
146 break; 152 break;
147 153
148#ifdef CONFIG_THUMB2_KERNEL 154#ifdef CONFIG_THUMB2_KERNEL
149 case R_ARM_THM_CALL: 155 case R_ARM_THM_CALL:
150 case R_ARM_THM_JUMP24: 156 case R_ARM_THM_JUMP24:
151 upper = *(u16 *)loc; 157 upper = __mem_to_opcode_thumb16(*(u16 *)loc);
152 lower = *(u16 *)(loc + 2); 158 lower = __mem_to_opcode_thumb16(*(u16 *)(loc + 2));
153 159
154 /* 160 /*
155 * 25 bit signed address range (Thumb-2 BL and B.W 161 * 25 bit signed address range (Thumb-2 BL and B.W
@@ -198,17 +204,20 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
198 sign = (offset >> 24) & 1; 204 sign = (offset >> 24) & 1;
199 j1 = sign ^ (~(offset >> 23) & 1); 205 j1 = sign ^ (~(offset >> 23) & 1);
200 j2 = sign ^ (~(offset >> 22) & 1); 206 j2 = sign ^ (~(offset >> 22) & 1);
201 *(u16 *)loc = (u16)((upper & 0xf800) | (sign << 10) | 207 upper = (u16)((upper & 0xf800) | (sign << 10) |
202 ((offset >> 12) & 0x03ff)); 208 ((offset >> 12) & 0x03ff));
203 *(u16 *)(loc + 2) = (u16)((lower & 0xd000) | 209 lower = (u16)((lower & 0xd000) |
204 (j1 << 13) | (j2 << 11) | 210 (j1 << 13) | (j2 << 11) |
205 ((offset >> 1) & 0x07ff)); 211 ((offset >> 1) & 0x07ff));
212
213 *(u16 *)loc = __opcode_to_mem_thumb16(upper);
214 *(u16 *)(loc + 2) = __opcode_to_mem_thumb16(lower);
206 break; 215 break;
207 216
208 case R_ARM_THM_MOVW_ABS_NC: 217 case R_ARM_THM_MOVW_ABS_NC:
209 case R_ARM_THM_MOVT_ABS: 218 case R_ARM_THM_MOVT_ABS:
210 upper = *(u16 *)loc; 219 upper = __mem_to_opcode_thumb16(*(u16 *)loc);
211 lower = *(u16 *)(loc + 2); 220 lower = __mem_to_opcode_thumb16(*(u16 *)(loc + 2));
212 221
213 /* 222 /*
214 * MOVT/MOVW instructions encoding in Thumb-2: 223 * MOVT/MOVW instructions encoding in Thumb-2:
@@ -229,12 +238,14 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
229 if (ELF32_R_TYPE(rel->r_info) == R_ARM_THM_MOVT_ABS) 238 if (ELF32_R_TYPE(rel->r_info) == R_ARM_THM_MOVT_ABS)
230 offset >>= 16; 239 offset >>= 16;
231 240
232 *(u16 *)loc = (u16)((upper & 0xfbf0) | 241 upper = (u16)((upper & 0xfbf0) |
233 ((offset & 0xf000) >> 12) | 242 ((offset & 0xf000) >> 12) |
234 ((offset & 0x0800) >> 1)); 243 ((offset & 0x0800) >> 1));
235 *(u16 *)(loc + 2) = (u16)((lower & 0x8f00) | 244 lower = (u16)((lower & 0x8f00) |
236 ((offset & 0x0700) << 4) | 245 ((offset & 0x0700) << 4) |
237 (offset & 0x00ff)); 246 (offset & 0x00ff));
247 *(u16 *)loc = __opcode_to_mem_thumb16(upper);
248 *(u16 *)(loc + 2) = __opcode_to_mem_thumb16(lower);
238 break; 249 break;
239#endif 250#endif
240 251
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index e186ee1e63f6..bc3f2efa0d86 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -256,12 +256,11 @@ validate_event(struct pmu_hw_events *hw_events,
256 struct perf_event *event) 256 struct perf_event *event)
257{ 257{
258 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); 258 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
259 struct pmu *leader_pmu = event->group_leader->pmu;
260 259
261 if (is_software_event(event)) 260 if (is_software_event(event))
262 return 1; 261 return 1;
263 262
264 if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF) 263 if (event->state < PERF_EVENT_STATE_OFF)
265 return 1; 264 return 1;
266 265
267 if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec) 266 if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c
index 8d6147b2001f..d85055cd24ba 100644
--- a/arch/arm/kernel/perf_event_cpu.c
+++ b/arch/arm/kernel/perf_event_cpu.c
@@ -68,7 +68,7 @@ EXPORT_SYMBOL_GPL(perf_num_counters);
68 68
69static struct pmu_hw_events *cpu_pmu_get_cpu_events(void) 69static struct pmu_hw_events *cpu_pmu_get_cpu_events(void)
70{ 70{
71 return &__get_cpu_var(cpu_hw_events); 71 return this_cpu_ptr(&cpu_hw_events);
72} 72}
73 73
74static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu) 74static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
diff --git a/arch/arm/kernel/perf_regs.c b/arch/arm/kernel/perf_regs.c
new file mode 100644
index 000000000000..6e4379c67cbc
--- /dev/null
+++ b/arch/arm/kernel/perf_regs.c
@@ -0,0 +1,30 @@
1
2#include <linux/errno.h>
3#include <linux/kernel.h>
4#include <linux/perf_event.h>
5#include <linux/bug.h>
6#include <asm/perf_regs.h>
7#include <asm/ptrace.h>
8
9u64 perf_reg_value(struct pt_regs *regs, int idx)
10{
11 if (WARN_ON_ONCE((u32)idx >= PERF_REG_ARM_MAX))
12 return 0;
13
14 return regs->uregs[idx];
15}
16
17#define REG_RESERVED (~((1ULL << PERF_REG_ARM_MAX) - 1))
18
19int perf_reg_validate(u64 mask)
20{
21 if (!mask || mask & REG_RESERVED)
22 return -EINVAL;
23
24 return 0;
25}
26
27u64 perf_reg_abi(struct task_struct *task)
28{
29 return PERF_SAMPLE_REGS_ABI_32;
30}
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 5d65438685d8..6a1b8a81b1ae 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -73,6 +73,8 @@ __setup("fpe=", fpe_setup);
73#endif 73#endif
74 74
75extern void paging_init(const struct machine_desc *desc); 75extern void paging_init(const struct machine_desc *desc);
76extern void early_paging_init(const struct machine_desc *,
77 struct proc_info_list *);
76extern void sanity_check_meminfo(void); 78extern void sanity_check_meminfo(void);
77extern enum reboot_mode reboot_mode; 79extern enum reboot_mode reboot_mode;
78extern void setup_dma_zone(const struct machine_desc *desc); 80extern void setup_dma_zone(const struct machine_desc *desc);
@@ -599,6 +601,8 @@ static void __init setup_processor(void)
599 elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT); 601 elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
600#endif 602#endif
601 603
604 erratum_a15_798181_init();
605
602 feat_v6_fixup(); 606 feat_v6_fixup();
603 607
604 cacheid_init(); 608 cacheid_init();
@@ -619,9 +623,10 @@ void __init dump_machine_table(void)
619 /* can't use cpu_relax() here as it may require MMU setup */; 623 /* can't use cpu_relax() here as it may require MMU setup */;
620} 624}
621 625
622int __init arm_add_memory(phys_addr_t start, phys_addr_t size) 626int __init arm_add_memory(u64 start, u64 size)
623{ 627{
624 struct membank *bank = &meminfo.bank[meminfo.nr_banks]; 628 struct membank *bank = &meminfo.bank[meminfo.nr_banks];
629 u64 aligned_start;
625 630
626 if (meminfo.nr_banks >= NR_BANKS) { 631 if (meminfo.nr_banks >= NR_BANKS) {
627 printk(KERN_CRIT "NR_BANKS too low, " 632 printk(KERN_CRIT "NR_BANKS too low, "
@@ -634,10 +639,16 @@ int __init arm_add_memory(phys_addr_t start, phys_addr_t size)
634 * Size is appropriately rounded down, start is rounded up. 639 * Size is appropriately rounded down, start is rounded up.
635 */ 640 */
636 size -= start & ~PAGE_MASK; 641 size -= start & ~PAGE_MASK;
637 bank->start = PAGE_ALIGN(start); 642 aligned_start = PAGE_ALIGN(start);
638 643
639#ifndef CONFIG_ARM_LPAE 644#ifndef CONFIG_ARCH_PHYS_ADDR_T_64BIT
640 if (bank->start + size < bank->start) { 645 if (aligned_start > ULONG_MAX) {
646 printk(KERN_CRIT "Ignoring memory at 0x%08llx outside "
647 "32-bit physical address space\n", (long long)start);
648 return -EINVAL;
649 }
650
651 if (aligned_start + size > ULONG_MAX) {
641 printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in " 652 printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in "
642 "32-bit physical address space\n", (long long)start); 653 "32-bit physical address space\n", (long long)start);
643 /* 654 /*
@@ -645,10 +656,11 @@ int __init arm_add_memory(phys_addr_t start, phys_addr_t size)
645 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB. 656 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
646 * This means we lose a page after masking. 657 * This means we lose a page after masking.
647 */ 658 */
648 size = ULONG_MAX - bank->start; 659 size = ULONG_MAX - aligned_start;
649 } 660 }
650#endif 661#endif
651 662
663 bank->start = aligned_start;
652 bank->size = size & ~(phys_addr_t)(PAGE_SIZE - 1); 664 bank->size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
653 665
654 /* 666 /*
@@ -669,8 +681,8 @@ int __init arm_add_memory(phys_addr_t start, phys_addr_t size)
669static int __init early_mem(char *p) 681static int __init early_mem(char *p)
670{ 682{
671 static int usermem __initdata = 0; 683 static int usermem __initdata = 0;
672 phys_addr_t size; 684 u64 size;
673 phys_addr_t start; 685 u64 start;
674 char *endp; 686 char *endp;
675 687
676 /* 688 /*
@@ -878,6 +890,8 @@ void __init setup_arch(char **cmdline_p)
878 parse_early_param(); 890 parse_early_param();
879 891
880 sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL); 892 sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
893
894 early_paging_init(mdesc, lookup_processor_type(read_cpuid_id()));
881 sanity_check_meminfo(); 895 sanity_check_meminfo();
882 arm_memblock_init(&meminfo, mdesc); 896 arm_memblock_init(&meminfo, mdesc);
883 897
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index ab3304225272..04d63880037f 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -21,29 +21,7 @@
21#include <asm/unistd.h> 21#include <asm/unistd.h>
22#include <asm/vfp.h> 22#include <asm/vfp.h>
23 23
24/* 24extern const unsigned long sigreturn_codes[7];
25 * For ARM syscalls, we encode the syscall number into the instruction.
26 */
27#define SWI_SYS_SIGRETURN (0xef000000|(__NR_sigreturn)|(__NR_OABI_SYSCALL_BASE))
28#define SWI_SYS_RT_SIGRETURN (0xef000000|(__NR_rt_sigreturn)|(__NR_OABI_SYSCALL_BASE))
29
30/*
31 * With EABI, the syscall number has to be loaded into r7.
32 */
33#define MOV_R7_NR_SIGRETURN (0xe3a07000 | (__NR_sigreturn - __NR_SYSCALL_BASE))
34#define MOV_R7_NR_RT_SIGRETURN (0xe3a07000 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE))
35
36/*
37 * For Thumb syscalls, we pass the syscall number via r7. We therefore
38 * need two 16-bit instructions.
39 */
40#define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_sigreturn - __NR_SYSCALL_BASE))
41#define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE))
42
43static const unsigned long sigreturn_codes[7] = {
44 MOV_R7_NR_SIGRETURN, SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN,
45 MOV_R7_NR_RT_SIGRETURN, SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN,
46};
47 25
48static unsigned long signal_return_offset; 26static unsigned long signal_return_offset;
49 27
@@ -375,12 +353,18 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
375 */ 353 */
376 thumb = handler & 1; 354 thumb = handler & 1;
377 355
378 if (thumb) {
379 cpsr |= PSR_T_BIT;
380#if __LINUX_ARM_ARCH__ >= 7 356#if __LINUX_ARM_ARCH__ >= 7
381 /* clear the If-Then Thumb-2 execution state */ 357 /*
382 cpsr &= ~PSR_IT_MASK; 358 * Clear the If-Then Thumb-2 execution state
359 * ARM spec requires this to be all 000s in ARM mode
360 * Snapdragon S4/Krait misbehaves on a Thumb=>ARM
361 * signal transition without this.
362 */
363 cpsr &= ~PSR_IT_MASK;
383#endif 364#endif
365
366 if (thumb) {
367 cpsr |= PSR_T_BIT;
384 } else 368 } else
385 cpsr &= ~PSR_T_BIT; 369 cpsr &= ~PSR_T_BIT;
386 } 370 }
diff --git a/arch/arm/kernel/sigreturn_codes.S b/arch/arm/kernel/sigreturn_codes.S
new file mode 100644
index 000000000000..3c5d0f2170fd
--- /dev/null
+++ b/arch/arm/kernel/sigreturn_codes.S
@@ -0,0 +1,80 @@
1/*
2 * sigreturn_codes.S - code sinpets for sigreturn syscalls
3 *
4 * Created by: Victor Kamensky, 2013-08-13
5 * Copyright: (C) 2013 Linaro Limited
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17#include <asm/unistd.h>
18
19/*
20 * For ARM syscalls, we encode the syscall number into the instruction.
21 * With EABI, the syscall number has to be loaded into r7. As result
22 * ARM syscall sequence snippet will have move and svc in .arm encoding
23 *
24 * For Thumb syscalls, we pass the syscall number via r7. We therefore
25 * need two 16-bit instructions in .thumb encoding
26 *
27 * Please note sigreturn_codes code are not executed in place. Instead
28 * they just copied by kernel into appropriate places. Code inside of
29 * arch/arm/kernel/signal.c is very sensitive to layout of these code
30 * snippets.
31 */
32
33#if __LINUX_ARM_ARCH__ <= 4
34 /*
35 * Note we manually set minimally required arch that supports
36 * required thumb opcodes for early arch versions. It is OK
37 * for this file to be used in combination with other
38 * lower arch variants, since these code snippets are only
39 * used as input data.
40 */
41 .arch armv4t
42#endif
43
44 .section .rodata
45 .global sigreturn_codes
46 .type sigreturn_codes, #object
47
48 .arm
49
50sigreturn_codes:
51
52 /* ARM sigreturn syscall code snippet */
53 mov r7, #(__NR_sigreturn - __NR_SYSCALL_BASE)
54 swi #(__NR_sigreturn)|(__NR_OABI_SYSCALL_BASE)
55
56 /* Thumb sigreturn syscall code snippet */
57 .thumb
58 movs r7, #(__NR_sigreturn - __NR_SYSCALL_BASE)
59 swi #0
60
61 /* ARM sigreturn_rt syscall code snippet */
62 .arm
63 mov r7, #(__NR_rt_sigreturn - __NR_SYSCALL_BASE)
64 swi #(__NR_rt_sigreturn)|(__NR_OABI_SYSCALL_BASE)
65
66 /* Thumb sigreturn_rt syscall code snippet */
67 .thumb
68 movs r7, #(__NR_rt_sigreturn - __NR_SYSCALL_BASE)
69 swi #0
70
71 /*
72 * Note on addtional space: setup_return in signal.c
73 * algorithm uses two words copy regardless whether
74 * it is thumb case or not, so we need additional
75 * word after real last entry.
76 */
77 .arm
78 .space 4
79
80 .size sigreturn_codes, . - sigreturn_codes
diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S
index db1536b8b30b..b907d9b790ab 100644
--- a/arch/arm/kernel/sleep.S
+++ b/arch/arm/kernel/sleep.S
@@ -55,6 +55,7 @@
55 * specific registers and some other data for resume. 55 * specific registers and some other data for resume.
56 * r0 = suspend function arg0 56 * r0 = suspend function arg0
57 * r1 = suspend function 57 * r1 = suspend function
58 * r2 = MPIDR value the resuming CPU will use
58 */ 59 */
59ENTRY(__cpu_suspend) 60ENTRY(__cpu_suspend)
60 stmfd sp!, {r4 - r11, lr} 61 stmfd sp!, {r4 - r11, lr}
@@ -67,23 +68,18 @@ ENTRY(__cpu_suspend)
67 mov r5, sp @ current virtual SP 68 mov r5, sp @ current virtual SP
68 add r4, r4, #12 @ Space for pgd, virt sp, phys resume fn 69 add r4, r4, #12 @ Space for pgd, virt sp, phys resume fn
69 sub sp, sp, r4 @ allocate CPU state on stack 70 sub sp, sp, r4 @ allocate CPU state on stack
70 stmfd sp!, {r0, r1} @ save suspend func arg and pointer
71 add r0, sp, #8 @ save pointer to save block
72 mov r1, r4 @ size of save block
73 mov r2, r5 @ virtual SP
74 ldr r3, =sleep_save_sp 71 ldr r3, =sleep_save_sp
72 stmfd sp!, {r0, r1} @ save suspend func arg and pointer
75 ldr r3, [r3, #SLEEP_SAVE_SP_VIRT] 73 ldr r3, [r3, #SLEEP_SAVE_SP_VIRT]
76 ALT_SMP(mrc p15, 0, r9, c0, c0, 5) 74 ALT_SMP(ldr r0, =mpidr_hash)
77 ALT_UP_B(1f) 75 ALT_UP_B(1f)
78 ldr r8, =mpidr_hash 76 /* This ldmia relies on the memory layout of the mpidr_hash struct */
79 /* 77 ldmia r0, {r1, r6-r8} @ r1 = mpidr mask (r6,r7,r8) = l[0,1,2] shifts
80 * This ldmia relies on the memory layout of the mpidr_hash 78 compute_mpidr_hash r0, r6, r7, r8, r2, r1
81 * struct mpidr_hash. 79 add r3, r3, r0, lsl #2
82 */ 801: mov r2, r5 @ virtual SP
83 ldmia r8, {r4-r7} @ r4 = mpidr mask (r5,r6,r7) = l[0,1,2] shifts 81 mov r1, r4 @ size of save block
84 compute_mpidr_hash lr, r5, r6, r7, r9, r4 82 add r0, sp, #8 @ pointer to save block
85 add r3, r3, lr, lsl #2
861:
87 bl __cpu_suspend_save 83 bl __cpu_suspend_save
88 adr lr, BSYM(cpu_suspend_abort) 84 adr lr, BSYM(cpu_suspend_abort)
89 ldmfd sp!, {r0, pc} @ call suspend fn 85 ldmfd sp!, {r0, pc} @ call suspend fn
@@ -130,6 +126,7 @@ ENDPROC(cpu_resume_after_mmu)
130 .data 126 .data
131 .align 127 .align
132ENTRY(cpu_resume) 128ENTRY(cpu_resume)
129ARM_BE8(setend be) @ ensure we are in BE mode
133 mov r1, #0 130 mov r1, #0
134 ALT_SMP(mrc p15, 0, r0, c0, c0, 5) 131 ALT_SMP(mrc p15, 0, r0, c0, c0, 5)
135 ALT_UP_B(1f) 132 ALT_UP_B(1f)
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 72024ea8a3a6..dc894ab3622b 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -25,6 +25,7 @@
25#include <linux/clockchips.h> 25#include <linux/clockchips.h>
26#include <linux/completion.h> 26#include <linux/completion.h>
27#include <linux/cpufreq.h> 27#include <linux/cpufreq.h>
28#include <linux/irq_work.h>
28 29
29#include <linux/atomic.h> 30#include <linux/atomic.h>
30#include <asm/smp.h> 31#include <asm/smp.h>
@@ -66,6 +67,8 @@ enum ipi_msg_type {
66 IPI_CALL_FUNC, 67 IPI_CALL_FUNC,
67 IPI_CALL_FUNC_SINGLE, 68 IPI_CALL_FUNC_SINGLE,
68 IPI_CPU_STOP, 69 IPI_CPU_STOP,
70 IPI_IRQ_WORK,
71 IPI_COMPLETION,
69}; 72};
70 73
71static DECLARE_COMPLETION(cpu_running); 74static DECLARE_COMPLETION(cpu_running);
@@ -80,7 +83,7 @@ void __init smp_set_ops(struct smp_operations *ops)
80 83
81static unsigned long get_arch_pgd(pgd_t *pgd) 84static unsigned long get_arch_pgd(pgd_t *pgd)
82{ 85{
83 phys_addr_t pgdir = virt_to_phys(pgd); 86 phys_addr_t pgdir = virt_to_idmap(pgd);
84 BUG_ON(pgdir & ARCH_PGD_MASK); 87 BUG_ON(pgdir & ARCH_PGD_MASK);
85 return pgdir >> ARCH_PGD_SHIFT; 88 return pgdir >> ARCH_PGD_SHIFT;
86} 89}
@@ -448,6 +451,14 @@ void arch_send_call_function_single_ipi(int cpu)
448 smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE); 451 smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
449} 452}
450 453
454#ifdef CONFIG_IRQ_WORK
455void arch_irq_work_raise(void)
456{
457 if (is_smp())
458 smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
459}
460#endif
461
451static const char *ipi_types[NR_IPI] = { 462static const char *ipi_types[NR_IPI] = {
452#define S(x,s) [x] = s 463#define S(x,s) [x] = s
453 S(IPI_WAKEUP, "CPU wakeup interrupts"), 464 S(IPI_WAKEUP, "CPU wakeup interrupts"),
@@ -456,6 +467,8 @@ static const char *ipi_types[NR_IPI] = {
456 S(IPI_CALL_FUNC, "Function call interrupts"), 467 S(IPI_CALL_FUNC, "Function call interrupts"),
457 S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"), 468 S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"),
458 S(IPI_CPU_STOP, "CPU stop interrupts"), 469 S(IPI_CPU_STOP, "CPU stop interrupts"),
470 S(IPI_IRQ_WORK, "IRQ work interrupts"),
471 S(IPI_COMPLETION, "completion interrupts"),
459}; 472};
460 473
461void show_ipi_list(struct seq_file *p, int prec) 474void show_ipi_list(struct seq_file *p, int prec)
@@ -515,6 +528,19 @@ static void ipi_cpu_stop(unsigned int cpu)
515 cpu_relax(); 528 cpu_relax();
516} 529}
517 530
531static DEFINE_PER_CPU(struct completion *, cpu_completion);
532
533int register_ipi_completion(struct completion *completion, int cpu)
534{
535 per_cpu(cpu_completion, cpu) = completion;
536 return IPI_COMPLETION;
537}
538
539static void ipi_complete(unsigned int cpu)
540{
541 complete(per_cpu(cpu_completion, cpu));
542}
543
518/* 544/*
519 * Main handler for inter-processor interrupts 545 * Main handler for inter-processor interrupts
520 */ 546 */
@@ -565,6 +591,20 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
565 irq_exit(); 591 irq_exit();
566 break; 592 break;
567 593
594#ifdef CONFIG_IRQ_WORK
595 case IPI_IRQ_WORK:
596 irq_enter();
597 irq_work_run();
598 irq_exit();
599 break;
600#endif
601
602 case IPI_COMPLETION:
603 irq_enter();
604 ipi_complete(cpu);
605 irq_exit();
606 break;
607
568 default: 608 default:
569 printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n", 609 printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n",
570 cpu, ipinr); 610 cpu, ipinr);
diff --git a/arch/arm/kernel/smp_scu.c b/arch/arm/kernel/smp_scu.c
index 5bc1a63284e3..1aafa0d785eb 100644
--- a/arch/arm/kernel/smp_scu.c
+++ b/arch/arm/kernel/smp_scu.c
@@ -28,7 +28,7 @@
28 */ 28 */
29unsigned int __init scu_get_core_count(void __iomem *scu_base) 29unsigned int __init scu_get_core_count(void __iomem *scu_base)
30{ 30{
31 unsigned int ncores = __raw_readl(scu_base + SCU_CONFIG); 31 unsigned int ncores = readl_relaxed(scu_base + SCU_CONFIG);
32 return (ncores & 0x03) + 1; 32 return (ncores & 0x03) + 1;
33} 33}
34 34
@@ -42,19 +42,19 @@ void scu_enable(void __iomem *scu_base)
42#ifdef CONFIG_ARM_ERRATA_764369 42#ifdef CONFIG_ARM_ERRATA_764369
43 /* Cortex-A9 only */ 43 /* Cortex-A9 only */
44 if ((read_cpuid_id() & 0xff0ffff0) == 0x410fc090) { 44 if ((read_cpuid_id() & 0xff0ffff0) == 0x410fc090) {
45 scu_ctrl = __raw_readl(scu_base + 0x30); 45 scu_ctrl = readl_relaxed(scu_base + 0x30);
46 if (!(scu_ctrl & 1)) 46 if (!(scu_ctrl & 1))
47 __raw_writel(scu_ctrl | 0x1, scu_base + 0x30); 47 writel_relaxed(scu_ctrl | 0x1, scu_base + 0x30);
48 } 48 }
49#endif 49#endif
50 50
51 scu_ctrl = __raw_readl(scu_base + SCU_CTRL); 51 scu_ctrl = readl_relaxed(scu_base + SCU_CTRL);
52 /* already enabled? */ 52 /* already enabled? */
53 if (scu_ctrl & 1) 53 if (scu_ctrl & 1)
54 return; 54 return;
55 55
56 scu_ctrl |= 1; 56 scu_ctrl |= 1;
57 __raw_writel(scu_ctrl, scu_base + SCU_CTRL); 57 writel_relaxed(scu_ctrl, scu_base + SCU_CTRL);
58 58
59 /* 59 /*
60 * Ensure that the data accessed by CPU0 before the SCU was 60 * Ensure that the data accessed by CPU0 before the SCU was
@@ -80,9 +80,9 @@ int scu_power_mode(void __iomem *scu_base, unsigned int mode)
80 if (mode > 3 || mode == 1 || cpu > 3) 80 if (mode > 3 || mode == 1 || cpu > 3)
81 return -EINVAL; 81 return -EINVAL;
82 82
83 val = __raw_readb(scu_base + SCU_CPU_STATUS + cpu) & ~0x03; 83 val = readb_relaxed(scu_base + SCU_CPU_STATUS + cpu) & ~0x03;
84 val |= mode; 84 val |= mode;
85 __raw_writeb(val, scu_base + SCU_CPU_STATUS + cpu); 85 writeb_relaxed(val, scu_base + SCU_CPU_STATUS + cpu);
86 86
87 return 0; 87 return 0;
88} 88}
diff --git a/arch/arm/kernel/smp_tlb.c b/arch/arm/kernel/smp_tlb.c
index 83ccca303df8..95d063620b76 100644
--- a/arch/arm/kernel/smp_tlb.c
+++ b/arch/arm/kernel/smp_tlb.c
@@ -70,6 +70,40 @@ static inline void ipi_flush_bp_all(void *ignored)
70 local_flush_bp_all(); 70 local_flush_bp_all();
71} 71}
72 72
73#ifdef CONFIG_ARM_ERRATA_798181
74bool (*erratum_a15_798181_handler)(void);
75
76static bool erratum_a15_798181_partial(void)
77{
78 asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (0));
79 dsb(ish);
80 return false;
81}
82
83static bool erratum_a15_798181_broadcast(void)
84{
85 asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (0));
86 dsb(ish);
87 return true;
88}
89
90void erratum_a15_798181_init(void)
91{
92 unsigned int midr = read_cpuid_id();
93 unsigned int revidr = read_cpuid(CPUID_REVIDR);
94
95 /* Cortex-A15 r0p0..r3p2 w/o ECO fix affected */
96 if ((midr & 0xff0ffff0) != 0x410fc0f0 || midr > 0x413fc0f2 ||
97 (revidr & 0x210) == 0x210) {
98 return;
99 }
100 if (revidr & 0x10)
101 erratum_a15_798181_handler = erratum_a15_798181_partial;
102 else
103 erratum_a15_798181_handler = erratum_a15_798181_broadcast;
104}
105#endif
106
73static void ipi_flush_tlb_a15_erratum(void *arg) 107static void ipi_flush_tlb_a15_erratum(void *arg)
74{ 108{
75 dmb(); 109 dmb();
@@ -80,7 +114,6 @@ static void broadcast_tlb_a15_erratum(void)
80 if (!erratum_a15_798181()) 114 if (!erratum_a15_798181())
81 return; 115 return;
82 116
83 dummy_flush_tlb_a15_erratum();
84 smp_call_function(ipi_flush_tlb_a15_erratum, NULL, 1); 117 smp_call_function(ipi_flush_tlb_a15_erratum, NULL, 1);
85} 118}
86 119
@@ -92,7 +125,6 @@ static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm)
92 if (!erratum_a15_798181()) 125 if (!erratum_a15_798181())
93 return; 126 return;
94 127
95 dummy_flush_tlb_a15_erratum();
96 this_cpu = get_cpu(); 128 this_cpu = get_cpu();
97 a15_erratum_get_cpumask(this_cpu, mm, &mask); 129 a15_erratum_get_cpumask(this_cpu, mm, &mask);
98 smp_call_function_many(&mask, ipi_flush_tlb_a15_erratum, NULL, 1); 130 smp_call_function_many(&mask, ipi_flush_tlb_a15_erratum, NULL, 1);
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c
index 2985c9f0905d..6591e26fc13f 100644
--- a/arch/arm/kernel/smp_twd.c
+++ b/arch/arm/kernel/smp_twd.c
@@ -45,7 +45,7 @@ static void twd_set_mode(enum clock_event_mode mode,
45 case CLOCK_EVT_MODE_PERIODIC: 45 case CLOCK_EVT_MODE_PERIODIC:
46 ctrl = TWD_TIMER_CONTROL_ENABLE | TWD_TIMER_CONTROL_IT_ENABLE 46 ctrl = TWD_TIMER_CONTROL_ENABLE | TWD_TIMER_CONTROL_IT_ENABLE
47 | TWD_TIMER_CONTROL_PERIODIC; 47 | TWD_TIMER_CONTROL_PERIODIC;
48 __raw_writel(DIV_ROUND_CLOSEST(twd_timer_rate, HZ), 48 writel_relaxed(DIV_ROUND_CLOSEST(twd_timer_rate, HZ),
49 twd_base + TWD_TIMER_LOAD); 49 twd_base + TWD_TIMER_LOAD);
50 break; 50 break;
51 case CLOCK_EVT_MODE_ONESHOT: 51 case CLOCK_EVT_MODE_ONESHOT:
@@ -58,18 +58,18 @@ static void twd_set_mode(enum clock_event_mode mode,
58 ctrl = 0; 58 ctrl = 0;
59 } 59 }
60 60
61 __raw_writel(ctrl, twd_base + TWD_TIMER_CONTROL); 61 writel_relaxed(ctrl, twd_base + TWD_TIMER_CONTROL);
62} 62}
63 63
64static int twd_set_next_event(unsigned long evt, 64static int twd_set_next_event(unsigned long evt,
65 struct clock_event_device *unused) 65 struct clock_event_device *unused)
66{ 66{
67 unsigned long ctrl = __raw_readl(twd_base + TWD_TIMER_CONTROL); 67 unsigned long ctrl = readl_relaxed(twd_base + TWD_TIMER_CONTROL);
68 68
69 ctrl |= TWD_TIMER_CONTROL_ENABLE; 69 ctrl |= TWD_TIMER_CONTROL_ENABLE;
70 70
71 __raw_writel(evt, twd_base + TWD_TIMER_COUNTER); 71 writel_relaxed(evt, twd_base + TWD_TIMER_COUNTER);
72 __raw_writel(ctrl, twd_base + TWD_TIMER_CONTROL); 72 writel_relaxed(ctrl, twd_base + TWD_TIMER_CONTROL);
73 73
74 return 0; 74 return 0;
75} 75}
@@ -82,8 +82,8 @@ static int twd_set_next_event(unsigned long evt,
82 */ 82 */
83static int twd_timer_ack(void) 83static int twd_timer_ack(void)
84{ 84{
85 if (__raw_readl(twd_base + TWD_TIMER_INTSTAT)) { 85 if (readl_relaxed(twd_base + TWD_TIMER_INTSTAT)) {
86 __raw_writel(1, twd_base + TWD_TIMER_INTSTAT); 86 writel_relaxed(1, twd_base + TWD_TIMER_INTSTAT);
87 return 1; 87 return 1;
88 } 88 }
89 89
@@ -211,15 +211,15 @@ static void twd_calibrate_rate(void)
211 waitjiffies += 5; 211 waitjiffies += 5;
212 212
213 /* enable, no interrupt or reload */ 213 /* enable, no interrupt or reload */
214 __raw_writel(0x1, twd_base + TWD_TIMER_CONTROL); 214 writel_relaxed(0x1, twd_base + TWD_TIMER_CONTROL);
215 215
216 /* maximum value */ 216 /* maximum value */
217 __raw_writel(0xFFFFFFFFU, twd_base + TWD_TIMER_COUNTER); 217 writel_relaxed(0xFFFFFFFFU, twd_base + TWD_TIMER_COUNTER);
218 218
219 while (get_jiffies_64() < waitjiffies) 219 while (get_jiffies_64() < waitjiffies)
220 udelay(10); 220 udelay(10);
221 221
222 count = __raw_readl(twd_base + TWD_TIMER_COUNTER); 222 count = readl_relaxed(twd_base + TWD_TIMER_COUNTER);
223 223
224 twd_timer_rate = (0xFFFFFFFFU - count) * (HZ / 5); 224 twd_timer_rate = (0xFFFFFFFFU - count) * (HZ / 5);
225 225
@@ -277,7 +277,7 @@ static void twd_timer_setup(void)
277 * bother with the below. 277 * bother with the below.
278 */ 278 */
279 if (per_cpu(percpu_setup_called, cpu)) { 279 if (per_cpu(percpu_setup_called, cpu)) {
280 __raw_writel(0, twd_base + TWD_TIMER_CONTROL); 280 writel_relaxed(0, twd_base + TWD_TIMER_CONTROL);
281 clockevents_register_device(clk); 281 clockevents_register_device(clk);
282 enable_percpu_irq(clk->irq, 0); 282 enable_percpu_irq(clk->irq, 0);
283 return; 283 return;
@@ -290,7 +290,7 @@ static void twd_timer_setup(void)
290 * The following is done once per CPU the first time .setup() is 290 * The following is done once per CPU the first time .setup() is
291 * called. 291 * called.
292 */ 292 */
293 __raw_writel(0, twd_base + TWD_TIMER_CONTROL); 293 writel_relaxed(0, twd_base + TWD_TIMER_CONTROL);
294 294
295 clk->name = "local_timer"; 295 clk->name = "local_timer";
296 clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT | 296 clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT |
diff --git a/arch/arm/kernel/suspend.c b/arch/arm/kernel/suspend.c
index 41cf3cbf756d..2835d35234ca 100644
--- a/arch/arm/kernel/suspend.c
+++ b/arch/arm/kernel/suspend.c
@@ -10,7 +10,7 @@
10#include <asm/suspend.h> 10#include <asm/suspend.h>
11#include <asm/tlbflush.h> 11#include <asm/tlbflush.h>
12 12
13extern int __cpu_suspend(unsigned long, int (*)(unsigned long)); 13extern int __cpu_suspend(unsigned long, int (*)(unsigned long), u32 cpuid);
14extern void cpu_resume_mmu(void); 14extern void cpu_resume_mmu(void);
15 15
16#ifdef CONFIG_MMU 16#ifdef CONFIG_MMU
@@ -21,6 +21,7 @@ extern void cpu_resume_mmu(void);
21int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) 21int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
22{ 22{
23 struct mm_struct *mm = current->active_mm; 23 struct mm_struct *mm = current->active_mm;
24 u32 __mpidr = cpu_logical_map(smp_processor_id());
24 int ret; 25 int ret;
25 26
26 if (!idmap_pgd) 27 if (!idmap_pgd)
@@ -32,7 +33,7 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
32 * resume (indicated by a zero return code), we need to switch 33 * resume (indicated by a zero return code), we need to switch
33 * back to the correct page tables. 34 * back to the correct page tables.
34 */ 35 */
35 ret = __cpu_suspend(arg, fn); 36 ret = __cpu_suspend(arg, fn, __mpidr);
36 if (ret == 0) { 37 if (ret == 0) {
37 cpu_switch_mm(mm->pgd, mm); 38 cpu_switch_mm(mm->pgd, mm);
38 local_flush_bp_all(); 39 local_flush_bp_all();
@@ -44,7 +45,8 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
44#else 45#else
45int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) 46int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
46{ 47{
47 return __cpu_suspend(arg, fn); 48 u32 __mpidr = cpu_logical_map(smp_processor_id());
49 return __cpu_suspend(arg, fn, __mpidr);
48} 50}
49#define idmap_pgd NULL 51#define idmap_pgd NULL
50#endif 52#endif
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 8fcda140358d..6125f259b7b5 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -34,6 +34,7 @@
34#include <asm/unwind.h> 34#include <asm/unwind.h>
35#include <asm/tls.h> 35#include <asm/tls.h>
36#include <asm/system_misc.h> 36#include <asm/system_misc.h>
37#include <asm/opcodes.h>
37 38
38static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" }; 39static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" };
39 40
@@ -341,15 +342,17 @@ void arm_notify_die(const char *str, struct pt_regs *regs,
341int is_valid_bugaddr(unsigned long pc) 342int is_valid_bugaddr(unsigned long pc)
342{ 343{
343#ifdef CONFIG_THUMB2_KERNEL 344#ifdef CONFIG_THUMB2_KERNEL
344 unsigned short bkpt; 345 u16 bkpt;
346 u16 insn = __opcode_to_mem_thumb16(BUG_INSTR_VALUE);
345#else 347#else
346 unsigned long bkpt; 348 u32 bkpt;
349 u32 insn = __opcode_to_mem_arm(BUG_INSTR_VALUE);
347#endif 350#endif
348 351
349 if (probe_kernel_address((unsigned *)pc, bkpt)) 352 if (probe_kernel_address((unsigned *)pc, bkpt))
350 return 0; 353 return 0;
351 354
352 return bkpt == BUG_INSTR_VALUE; 355 return bkpt == insn;
353} 356}
354 357
355#endif 358#endif
@@ -402,25 +405,28 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
402 if (processor_mode(regs) == SVC_MODE) { 405 if (processor_mode(regs) == SVC_MODE) {
403#ifdef CONFIG_THUMB2_KERNEL 406#ifdef CONFIG_THUMB2_KERNEL
404 if (thumb_mode(regs)) { 407 if (thumb_mode(regs)) {
405 instr = ((u16 *)pc)[0]; 408 instr = __mem_to_opcode_thumb16(((u16 *)pc)[0]);
406 if (is_wide_instruction(instr)) { 409 if (is_wide_instruction(instr)) {
407 instr <<= 16; 410 u16 inst2;
408 instr |= ((u16 *)pc)[1]; 411 inst2 = __mem_to_opcode_thumb16(((u16 *)pc)[1]);
412 instr = __opcode_thumb32_compose(instr, inst2);
409 } 413 }
410 } else 414 } else
411#endif 415#endif
412 instr = *(u32 *) pc; 416 instr = __mem_to_opcode_arm(*(u32 *) pc);
413 } else if (thumb_mode(regs)) { 417 } else if (thumb_mode(regs)) {
414 if (get_user(instr, (u16 __user *)pc)) 418 if (get_user(instr, (u16 __user *)pc))
415 goto die_sig; 419 goto die_sig;
420 instr = __mem_to_opcode_thumb16(instr);
416 if (is_wide_instruction(instr)) { 421 if (is_wide_instruction(instr)) {
417 unsigned int instr2; 422 unsigned int instr2;
418 if (get_user(instr2, (u16 __user *)pc+1)) 423 if (get_user(instr2, (u16 __user *)pc+1))
419 goto die_sig; 424 goto die_sig;
420 instr <<= 16; 425 instr2 = __mem_to_opcode_thumb16(instr2);
421 instr |= instr2; 426 instr = __opcode_thumb32_compose(instr, instr2);
422 } 427 }
423 } else if (get_user(instr, (u32 __user *)pc)) { 428 } else if (get_user(instr, (u32 __user *)pc)) {
429 instr = __mem_to_opcode_arm(instr);
424 goto die_sig; 430 goto die_sig;
425 } 431 }
426 432