diff options
author | Sage Weil <sage@inktank.com> | 2013-08-15 14:11:45 -0400 |
---|---|---|
committer | Sage Weil <sage@inktank.com> | 2013-08-15 14:11:45 -0400 |
commit | ee3e542fec6e69bc9fb668698889a37d93950ddf (patch) | |
tree | e74ee766a4764769ef1d3d45d266b4dea64101d3 /arch/arm/kernel | |
parent | fe2a801b50c0bb8039d627e5ae1fec249d10ff39 (diff) | |
parent | f1d6e17f540af37bb1891480143669ba7636c4cf (diff) |
Merge remote-tracking branch 'linus/master' into testing
Diffstat (limited to 'arch/arm/kernel')
33 files changed, 1105 insertions, 502 deletions
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index 5f3338eacad2..86d10dd47dc4 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile | |||
@@ -15,14 +15,20 @@ CFLAGS_REMOVE_return_address.o = -pg | |||
15 | 15 | ||
16 | # Object file lists. | 16 | # Object file lists. |
17 | 17 | ||
18 | obj-y := elf.o entry-armv.o entry-common.o irq.o opcodes.o \ | 18 | obj-y := elf.o entry-common.o irq.o opcodes.o \ |
19 | process.o ptrace.o return_address.o sched_clock.o \ | 19 | process.o ptrace.o return_address.o \ |
20 | setup.o signal.o stacktrace.o sys_arm.o time.o traps.o | 20 | setup.o signal.o stacktrace.o sys_arm.o time.o traps.o |
21 | 21 | ||
22 | obj-$(CONFIG_ATAGS) += atags_parse.o | 22 | obj-$(CONFIG_ATAGS) += atags_parse.o |
23 | obj-$(CONFIG_ATAGS_PROC) += atags_proc.o | 23 | obj-$(CONFIG_ATAGS_PROC) += atags_proc.o |
24 | obj-$(CONFIG_DEPRECATED_PARAM_STRUCT) += atags_compat.o | 24 | obj-$(CONFIG_DEPRECATED_PARAM_STRUCT) += atags_compat.o |
25 | 25 | ||
26 | ifeq ($(CONFIG_CPU_V7M),y) | ||
27 | obj-y += entry-v7m.o | ||
28 | else | ||
29 | obj-y += entry-armv.o | ||
30 | endif | ||
31 | |||
26 | obj-$(CONFIG_OC_ETM) += etm.o | 32 | obj-$(CONFIG_OC_ETM) += etm.o |
27 | obj-$(CONFIG_CPU_IDLE) += cpuidle.o | 33 | obj-$(CONFIG_CPU_IDLE) += cpuidle.o |
28 | obj-$(CONFIG_ISA_DMA_API) += dma.o | 34 | obj-$(CONFIG_ISA_DMA_API) += dma.o |
@@ -32,7 +38,10 @@ obj-$(CONFIG_ARTHUR) += arthur.o | |||
32 | obj-$(CONFIG_ISA_DMA) += dma-isa.o | 38 | obj-$(CONFIG_ISA_DMA) += dma-isa.o |
33 | obj-$(CONFIG_PCI) += bios32.o isa.o | 39 | obj-$(CONFIG_PCI) += bios32.o isa.o |
34 | obj-$(CONFIG_ARM_CPU_SUSPEND) += sleep.o suspend.o | 40 | obj-$(CONFIG_ARM_CPU_SUSPEND) += sleep.o suspend.o |
35 | obj-$(CONFIG_SMP) += smp.o smp_tlb.o | 41 | obj-$(CONFIG_SMP) += smp.o |
42 | ifdef CONFIG_MMU | ||
43 | obj-$(CONFIG_SMP) += smp_tlb.o | ||
44 | endif | ||
36 | obj-$(CONFIG_HAVE_ARM_SCU) += smp_scu.o | 45 | obj-$(CONFIG_HAVE_ARM_SCU) += smp_scu.o |
37 | obj-$(CONFIG_HAVE_ARM_TWD) += smp_twd.o | 46 | obj-$(CONFIG_HAVE_ARM_TWD) += smp_twd.o |
38 | obj-$(CONFIG_ARM_ARCH_TIMER) += arch_timer.o | 47 | obj-$(CONFIG_ARM_ARCH_TIMER) += arch_timer.o |
@@ -82,6 +91,9 @@ obj-$(CONFIG_DEBUG_LL) += debug.o | |||
82 | obj-$(CONFIG_EARLY_PRINTK) += early_printk.o | 91 | obj-$(CONFIG_EARLY_PRINTK) += early_printk.o |
83 | 92 | ||
84 | obj-$(CONFIG_ARM_VIRT_EXT) += hyp-stub.o | 93 | obj-$(CONFIG_ARM_VIRT_EXT) += hyp-stub.o |
85 | obj-$(CONFIG_ARM_PSCI) += psci.o | 94 | ifeq ($(CONFIG_ARM_PSCI),y) |
95 | obj-y += psci.o | ||
96 | obj-$(CONFIG_SMP) += psci_smp.o | ||
97 | endif | ||
86 | 98 | ||
87 | extra-y := $(head-y) vmlinux.lds | 99 | extra-y := $(head-y) vmlinux.lds |
diff --git a/arch/arm/kernel/arch_timer.c b/arch/arm/kernel/arch_timer.c index 59dcdced6e30..221f07b11ccb 100644 --- a/arch/arm/kernel/arch_timer.c +++ b/arch/arm/kernel/arch_timer.c | |||
@@ -11,9 +11,9 @@ | |||
11 | #include <linux/init.h> | 11 | #include <linux/init.h> |
12 | #include <linux/types.h> | 12 | #include <linux/types.h> |
13 | #include <linux/errno.h> | 13 | #include <linux/errno.h> |
14 | #include <linux/sched_clock.h> | ||
14 | 15 | ||
15 | #include <asm/delay.h> | 16 | #include <asm/delay.h> |
16 | #include <asm/sched_clock.h> | ||
17 | 17 | ||
18 | #include <clocksource/arm_arch_timer.h> | 18 | #include <clocksource/arm_arch_timer.h> |
19 | 19 | ||
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index ee68cce6b48e..ded041711beb 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <asm/thread_info.h> | 23 | #include <asm/thread_info.h> |
24 | #include <asm/memory.h> | 24 | #include <asm/memory.h> |
25 | #include <asm/procinfo.h> | 25 | #include <asm/procinfo.h> |
26 | #include <asm/suspend.h> | ||
26 | #include <asm/hardware/cache-l2x0.h> | 27 | #include <asm/hardware/cache-l2x0.h> |
27 | #include <linux/kbuild.h> | 28 | #include <linux/kbuild.h> |
28 | 29 | ||
@@ -145,6 +146,11 @@ int main(void) | |||
145 | #ifdef MULTI_CACHE | 146 | #ifdef MULTI_CACHE |
146 | DEFINE(CACHE_FLUSH_KERN_ALL, offsetof(struct cpu_cache_fns, flush_kern_all)); | 147 | DEFINE(CACHE_FLUSH_KERN_ALL, offsetof(struct cpu_cache_fns, flush_kern_all)); |
147 | #endif | 148 | #endif |
149 | #ifdef CONFIG_ARM_CPU_SUSPEND | ||
150 | DEFINE(SLEEP_SAVE_SP_SZ, sizeof(struct sleep_save_sp)); | ||
151 | DEFINE(SLEEP_SAVE_SP_PHYS, offsetof(struct sleep_save_sp, save_ptr_stash_phys)); | ||
152 | DEFINE(SLEEP_SAVE_SP_VIRT, offsetof(struct sleep_save_sp, save_ptr_stash)); | ||
153 | #endif | ||
148 | BLANK(); | 154 | BLANK(); |
149 | DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL); | 155 | DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL); |
150 | DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE); | 156 | DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE); |
diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c index b2ed73c45489..261fcc826169 100644 --- a/arch/arm/kernel/bios32.c +++ b/arch/arm/kernel/bios32.c | |||
@@ -445,7 +445,8 @@ static int pcibios_init_resources(int busnr, struct pci_sys_data *sys) | |||
445 | return 0; | 445 | return 0; |
446 | } | 446 | } |
447 | 447 | ||
448 | static void pcibios_init_hw(struct hw_pci *hw, struct list_head *head) | 448 | static void pcibios_init_hw(struct device *parent, struct hw_pci *hw, |
449 | struct list_head *head) | ||
449 | { | 450 | { |
450 | struct pci_sys_data *sys = NULL; | 451 | struct pci_sys_data *sys = NULL; |
451 | int ret; | 452 | int ret; |
@@ -480,7 +481,7 @@ static void pcibios_init_hw(struct hw_pci *hw, struct list_head *head) | |||
480 | if (hw->scan) | 481 | if (hw->scan) |
481 | sys->bus = hw->scan(nr, sys); | 482 | sys->bus = hw->scan(nr, sys); |
482 | else | 483 | else |
483 | sys->bus = pci_scan_root_bus(NULL, sys->busnr, | 484 | sys->bus = pci_scan_root_bus(parent, sys->busnr, |
484 | hw->ops, sys, &sys->resources); | 485 | hw->ops, sys, &sys->resources); |
485 | 486 | ||
486 | if (!sys->bus) | 487 | if (!sys->bus) |
@@ -497,7 +498,7 @@ static void pcibios_init_hw(struct hw_pci *hw, struct list_head *head) | |||
497 | } | 498 | } |
498 | } | 499 | } |
499 | 500 | ||
500 | void pci_common_init(struct hw_pci *hw) | 501 | void pci_common_init_dev(struct device *parent, struct hw_pci *hw) |
501 | { | 502 | { |
502 | struct pci_sys_data *sys; | 503 | struct pci_sys_data *sys; |
503 | LIST_HEAD(head); | 504 | LIST_HEAD(head); |
@@ -505,7 +506,7 @@ void pci_common_init(struct hw_pci *hw) | |||
505 | pci_add_flags(PCI_REASSIGN_ALL_RSRC); | 506 | pci_add_flags(PCI_REASSIGN_ALL_RSRC); |
506 | if (hw->preinit) | 507 | if (hw->preinit) |
507 | hw->preinit(); | 508 | hw->preinit(); |
508 | pcibios_init_hw(hw, &head); | 509 | pcibios_init_hw(parent, hw, &head); |
509 | if (hw->postinit) | 510 | if (hw->postinit) |
510 | hw->postinit(); | 511 | hw->postinit(); |
511 | 512 | ||
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 582b405befc5..d40d0ef389db 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S | |||
@@ -685,15 +685,16 @@ ENTRY(__switch_to) | |||
685 | UNWIND(.fnstart ) | 685 | UNWIND(.fnstart ) |
686 | UNWIND(.cantunwind ) | 686 | UNWIND(.cantunwind ) |
687 | add ip, r1, #TI_CPU_SAVE | 687 | add ip, r1, #TI_CPU_SAVE |
688 | ldr r3, [r2, #TI_TP_VALUE] | ||
689 | ARM( stmia ip!, {r4 - sl, fp, sp, lr} ) @ Store most regs on stack | 688 | ARM( stmia ip!, {r4 - sl, fp, sp, lr} ) @ Store most regs on stack |
690 | THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack | 689 | THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack |
691 | THUMB( str sp, [ip], #4 ) | 690 | THUMB( str sp, [ip], #4 ) |
692 | THUMB( str lr, [ip], #4 ) | 691 | THUMB( str lr, [ip], #4 ) |
692 | ldr r4, [r2, #TI_TP_VALUE] | ||
693 | ldr r5, [r2, #TI_TP_VALUE + 4] | ||
693 | #ifdef CONFIG_CPU_USE_DOMAINS | 694 | #ifdef CONFIG_CPU_USE_DOMAINS |
694 | ldr r6, [r2, #TI_CPU_DOMAIN] | 695 | ldr r6, [r2, #TI_CPU_DOMAIN] |
695 | #endif | 696 | #endif |
696 | set_tls r3, r4, r5 | 697 | switch_tls r1, r4, r5, r3, r7 |
697 | #if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) | 698 | #if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) |
698 | ldr r7, [r2, #TI_TASK] | 699 | ldr r7, [r2, #TI_TASK] |
699 | ldr r8, =__stack_chk_guard | 700 | ldr r8, =__stack_chk_guard |
@@ -741,6 +742,18 @@ ENDPROC(__switch_to) | |||
741 | #endif | 742 | #endif |
742 | .endm | 743 | .endm |
743 | 744 | ||
745 | .macro kuser_pad, sym, size | ||
746 | .if (. - \sym) & 3 | ||
747 | .rept 4 - (. - \sym) & 3 | ||
748 | .byte 0 | ||
749 | .endr | ||
750 | .endif | ||
751 | .rept (\size - (. - \sym)) / 4 | ||
752 | .word 0xe7fddef1 | ||
753 | .endr | ||
754 | .endm | ||
755 | |||
756 | #ifdef CONFIG_KUSER_HELPERS | ||
744 | .align 5 | 757 | .align 5 |
745 | .globl __kuser_helper_start | 758 | .globl __kuser_helper_start |
746 | __kuser_helper_start: | 759 | __kuser_helper_start: |
@@ -831,18 +844,13 @@ kuser_cmpxchg64_fixup: | |||
831 | #error "incoherent kernel configuration" | 844 | #error "incoherent kernel configuration" |
832 | #endif | 845 | #endif |
833 | 846 | ||
834 | /* pad to next slot */ | 847 | kuser_pad __kuser_cmpxchg64, 64 |
835 | .rept (16 - (. - __kuser_cmpxchg64)/4) | ||
836 | .word 0 | ||
837 | .endr | ||
838 | |||
839 | .align 5 | ||
840 | 848 | ||
841 | __kuser_memory_barrier: @ 0xffff0fa0 | 849 | __kuser_memory_barrier: @ 0xffff0fa0 |
842 | smp_dmb arm | 850 | smp_dmb arm |
843 | usr_ret lr | 851 | usr_ret lr |
844 | 852 | ||
845 | .align 5 | 853 | kuser_pad __kuser_memory_barrier, 32 |
846 | 854 | ||
847 | __kuser_cmpxchg: @ 0xffff0fc0 | 855 | __kuser_cmpxchg: @ 0xffff0fc0 |
848 | 856 | ||
@@ -915,13 +923,14 @@ kuser_cmpxchg32_fixup: | |||
915 | 923 | ||
916 | #endif | 924 | #endif |
917 | 925 | ||
918 | .align 5 | 926 | kuser_pad __kuser_cmpxchg, 32 |
919 | 927 | ||
920 | __kuser_get_tls: @ 0xffff0fe0 | 928 | __kuser_get_tls: @ 0xffff0fe0 |
921 | ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init | 929 | ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init |
922 | usr_ret lr | 930 | usr_ret lr |
923 | mrc p15, 0, r0, c13, c0, 3 @ 0xffff0fe8 hardware TLS code | 931 | mrc p15, 0, r0, c13, c0, 3 @ 0xffff0fe8 hardware TLS code |
924 | .rep 4 | 932 | kuser_pad __kuser_get_tls, 16 |
933 | .rep 3 | ||
925 | .word 0 @ 0xffff0ff0 software TLS value, then | 934 | .word 0 @ 0xffff0ff0 software TLS value, then |
926 | .endr @ pad up to __kuser_helper_version | 935 | .endr @ pad up to __kuser_helper_version |
927 | 936 | ||
@@ -931,14 +940,16 @@ __kuser_helper_version: @ 0xffff0ffc | |||
931 | .globl __kuser_helper_end | 940 | .globl __kuser_helper_end |
932 | __kuser_helper_end: | 941 | __kuser_helper_end: |
933 | 942 | ||
943 | #endif | ||
944 | |||
934 | THUMB( .thumb ) | 945 | THUMB( .thumb ) |
935 | 946 | ||
936 | /* | 947 | /* |
937 | * Vector stubs. | 948 | * Vector stubs. |
938 | * | 949 | * |
939 | * This code is copied to 0xffff0200 so we can use branches in the | 950 | * This code is copied to 0xffff1000 so we can use branches in the |
940 | * vectors, rather than ldr's. Note that this code must not | 951 | * vectors, rather than ldr's. Note that this code must not exceed |
941 | * exceed 0x300 bytes. | 952 | * a page size. |
942 | * | 953 | * |
943 | * Common stub entry macro: | 954 | * Common stub entry macro: |
944 | * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC | 955 | * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC |
@@ -985,8 +996,17 @@ ENDPROC(vector_\name) | |||
985 | 1: | 996 | 1: |
986 | .endm | 997 | .endm |
987 | 998 | ||
988 | .globl __stubs_start | 999 | .section .stubs, "ax", %progbits |
989 | __stubs_start: | 1000 | __stubs_start: |
1001 | @ This must be the first word | ||
1002 | .word vector_swi | ||
1003 | |||
1004 | vector_rst: | ||
1005 | ARM( swi SYS_ERROR0 ) | ||
1006 | THUMB( svc #0 ) | ||
1007 | THUMB( nop ) | ||
1008 | b vector_und | ||
1009 | |||
990 | /* | 1010 | /* |
991 | * Interrupt dispatcher | 1011 | * Interrupt dispatcher |
992 | */ | 1012 | */ |
@@ -1081,6 +1101,16 @@ __stubs_start: | |||
1081 | .align 5 | 1101 | .align 5 |
1082 | 1102 | ||
1083 | /*============================================================================= | 1103 | /*============================================================================= |
1104 | * Address exception handler | ||
1105 | *----------------------------------------------------------------------------- | ||
1106 | * These aren't too critical. | ||
1107 | * (they're not supposed to happen, and won't happen in 32-bit data mode). | ||
1108 | */ | ||
1109 | |||
1110 | vector_addrexcptn: | ||
1111 | b vector_addrexcptn | ||
1112 | |||
1113 | /*============================================================================= | ||
1084 | * Undefined FIQs | 1114 | * Undefined FIQs |
1085 | *----------------------------------------------------------------------------- | 1115 | *----------------------------------------------------------------------------- |
1086 | * Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC | 1116 | * Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC |
@@ -1093,45 +1123,19 @@ __stubs_start: | |||
1093 | vector_fiq: | 1123 | vector_fiq: |
1094 | subs pc, lr, #4 | 1124 | subs pc, lr, #4 |
1095 | 1125 | ||
1096 | /*============================================================================= | 1126 | .globl vector_fiq_offset |
1097 | * Address exception handler | 1127 | .equ vector_fiq_offset, vector_fiq |
1098 | *----------------------------------------------------------------------------- | ||
1099 | * These aren't too critical. | ||
1100 | * (they're not supposed to happen, and won't happen in 32-bit data mode). | ||
1101 | */ | ||
1102 | |||
1103 | vector_addrexcptn: | ||
1104 | b vector_addrexcptn | ||
1105 | |||
1106 | /* | ||
1107 | * We group all the following data together to optimise | ||
1108 | * for CPUs with separate I & D caches. | ||
1109 | */ | ||
1110 | .align 5 | ||
1111 | |||
1112 | .LCvswi: | ||
1113 | .word vector_swi | ||
1114 | |||
1115 | .globl __stubs_end | ||
1116 | __stubs_end: | ||
1117 | |||
1118 | .equ stubs_offset, __vectors_start + 0x200 - __stubs_start | ||
1119 | 1128 | ||
1120 | .globl __vectors_start | 1129 | .section .vectors, "ax", %progbits |
1121 | __vectors_start: | 1130 | __vectors_start: |
1122 | ARM( swi SYS_ERROR0 ) | 1131 | W(b) vector_rst |
1123 | THUMB( svc #0 ) | 1132 | W(b) vector_und |
1124 | THUMB( nop ) | 1133 | W(ldr) pc, __vectors_start + 0x1000 |
1125 | W(b) vector_und + stubs_offset | 1134 | W(b) vector_pabt |
1126 | W(ldr) pc, .LCvswi + stubs_offset | 1135 | W(b) vector_dabt |
1127 | W(b) vector_pabt + stubs_offset | 1136 | W(b) vector_addrexcptn |
1128 | W(b) vector_dabt + stubs_offset | 1137 | W(b) vector_irq |
1129 | W(b) vector_addrexcptn + stubs_offset | 1138 | W(b) vector_fiq |
1130 | W(b) vector_irq + stubs_offset | ||
1131 | W(b) vector_fiq + stubs_offset | ||
1132 | |||
1133 | .globl __vectors_end | ||
1134 | __vectors_end: | ||
1135 | 1139 | ||
1136 | .data | 1140 | .data |
1137 | 1141 | ||
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index bc5bc0a97131..94104bf69719 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S | |||
@@ -350,6 +350,9 @@ ENDPROC(ftrace_stub) | |||
350 | 350 | ||
351 | .align 5 | 351 | .align 5 |
352 | ENTRY(vector_swi) | 352 | ENTRY(vector_swi) |
353 | #ifdef CONFIG_CPU_V7M | ||
354 | v7m_exception_entry | ||
355 | #else | ||
353 | sub sp, sp, #S_FRAME_SIZE | 356 | sub sp, sp, #S_FRAME_SIZE |
354 | stmia sp, {r0 - r12} @ Calling r0 - r12 | 357 | stmia sp, {r0 - r12} @ Calling r0 - r12 |
355 | ARM( add r8, sp, #S_PC ) | 358 | ARM( add r8, sp, #S_PC ) |
@@ -360,8 +363,19 @@ ENTRY(vector_swi) | |||
360 | str lr, [sp, #S_PC] @ Save calling PC | 363 | str lr, [sp, #S_PC] @ Save calling PC |
361 | str r8, [sp, #S_PSR] @ Save CPSR | 364 | str r8, [sp, #S_PSR] @ Save CPSR |
362 | str r0, [sp, #S_OLD_R0] @ Save OLD_R0 | 365 | str r0, [sp, #S_OLD_R0] @ Save OLD_R0 |
366 | #endif | ||
363 | zero_fp | 367 | zero_fp |
364 | 368 | ||
369 | #ifdef CONFIG_ALIGNMENT_TRAP | ||
370 | ldr ip, __cr_alignment | ||
371 | ldr ip, [ip] | ||
372 | mcr p15, 0, ip, c1, c0 @ update control register | ||
373 | #endif | ||
374 | |||
375 | enable_irq | ||
376 | ct_user_exit | ||
377 | get_thread_info tsk | ||
378 | |||
365 | /* | 379 | /* |
366 | * Get the system call number. | 380 | * Get the system call number. |
367 | */ | 381 | */ |
@@ -375,9 +389,9 @@ ENTRY(vector_swi) | |||
375 | #ifdef CONFIG_ARM_THUMB | 389 | #ifdef CONFIG_ARM_THUMB |
376 | tst r8, #PSR_T_BIT | 390 | tst r8, #PSR_T_BIT |
377 | movne r10, #0 @ no thumb OABI emulation | 391 | movne r10, #0 @ no thumb OABI emulation |
378 | ldreq r10, [lr, #-4] @ get SWI instruction | 392 | USER( ldreq r10, [lr, #-4] ) @ get SWI instruction |
379 | #else | 393 | #else |
380 | ldr r10, [lr, #-4] @ get SWI instruction | 394 | USER( ldr r10, [lr, #-4] ) @ get SWI instruction |
381 | #endif | 395 | #endif |
382 | #ifdef CONFIG_CPU_ENDIAN_BE8 | 396 | #ifdef CONFIG_CPU_ENDIAN_BE8 |
383 | rev r10, r10 @ little endian instruction | 397 | rev r10, r10 @ little endian instruction |
@@ -392,22 +406,13 @@ ENTRY(vector_swi) | |||
392 | /* Legacy ABI only, possibly thumb mode. */ | 406 | /* Legacy ABI only, possibly thumb mode. */ |
393 | tst r8, #PSR_T_BIT @ this is SPSR from save_user_regs | 407 | tst r8, #PSR_T_BIT @ this is SPSR from save_user_regs |
394 | addne scno, r7, #__NR_SYSCALL_BASE @ put OS number in | 408 | addne scno, r7, #__NR_SYSCALL_BASE @ put OS number in |
395 | ldreq scno, [lr, #-4] | 409 | USER( ldreq scno, [lr, #-4] ) |
396 | 410 | ||
397 | #else | 411 | #else |
398 | /* Legacy ABI only. */ | 412 | /* Legacy ABI only. */ |
399 | ldr scno, [lr, #-4] @ get SWI instruction | 413 | USER( ldr scno, [lr, #-4] ) @ get SWI instruction |
400 | #endif | 414 | #endif |
401 | 415 | ||
402 | #ifdef CONFIG_ALIGNMENT_TRAP | ||
403 | ldr ip, __cr_alignment | ||
404 | ldr ip, [ip] | ||
405 | mcr p15, 0, ip, c1, c0 @ update control register | ||
406 | #endif | ||
407 | enable_irq | ||
408 | ct_user_exit | ||
409 | |||
410 | get_thread_info tsk | ||
411 | adr tbl, sys_call_table @ load syscall table pointer | 416 | adr tbl, sys_call_table @ load syscall table pointer |
412 | 417 | ||
413 | #if defined(CONFIG_OABI_COMPAT) | 418 | #if defined(CONFIG_OABI_COMPAT) |
@@ -442,6 +447,21 @@ local_restart: | |||
442 | eor r0, scno, #__NR_SYSCALL_BASE @ put OS number back | 447 | eor r0, scno, #__NR_SYSCALL_BASE @ put OS number back |
443 | bcs arm_syscall | 448 | bcs arm_syscall |
444 | b sys_ni_syscall @ not private func | 449 | b sys_ni_syscall @ not private func |
450 | |||
451 | #if defined(CONFIG_OABI_COMPAT) || !defined(CONFIG_AEABI) | ||
452 | /* | ||
453 | * We failed to handle a fault trying to access the page | ||
454 | * containing the swi instruction, but we're not really in a | ||
455 | * position to return -EFAULT. Instead, return back to the | ||
456 | * instruction and re-enter the user fault handling path trying | ||
457 | * to page it in. This will likely result in sending SEGV to the | ||
458 | * current task. | ||
459 | */ | ||
460 | 9001: | ||
461 | sub lr, lr, #4 | ||
462 | str lr, [sp, #S_PC] | ||
463 | b ret_fast_syscall | ||
464 | #endif | ||
445 | ENDPROC(vector_swi) | 465 | ENDPROC(vector_swi) |
446 | 466 | ||
447 | /* | 467 | /* |
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S index 160f3376ba6d..de23a9beed13 100644 --- a/arch/arm/kernel/entry-header.S +++ b/arch/arm/kernel/entry-header.S | |||
@@ -5,6 +5,7 @@ | |||
5 | #include <asm/asm-offsets.h> | 5 | #include <asm/asm-offsets.h> |
6 | #include <asm/errno.h> | 6 | #include <asm/errno.h> |
7 | #include <asm/thread_info.h> | 7 | #include <asm/thread_info.h> |
8 | #include <asm/v7m.h> | ||
8 | 9 | ||
9 | @ Bad Abort numbers | 10 | @ Bad Abort numbers |
10 | @ ----------------- | 11 | @ ----------------- |
@@ -44,6 +45,116 @@ | |||
44 | #endif | 45 | #endif |
45 | .endm | 46 | .endm |
46 | 47 | ||
48 | #ifdef CONFIG_CPU_V7M | ||
49 | /* | ||
50 | * ARMv7-M exception entry/exit macros. | ||
51 | * | ||
52 | * xPSR, ReturnAddress(), LR (R14), R12, R3, R2, R1, and R0 are | ||
53 | * automatically saved on the current stack (32 words) before | ||
54 | * switching to the exception stack (SP_main). | ||
55 | * | ||
56 | * If exception is taken while in user mode, SP_main is | ||
57 | * empty. Otherwise, SP_main is aligned to 64 bit automatically | ||
58 | * (CCR.STKALIGN set). | ||
59 | * | ||
60 | * Linux assumes that the interrupts are disabled when entering an | ||
61 | * exception handler and it may BUG if this is not the case. Interrupts | ||
62 | * are disabled during entry and reenabled in the exit macro. | ||
63 | * | ||
64 | * v7m_exception_slow_exit is used when returning from SVC or PendSV. | ||
65 | * When returning to kernel mode, we don't return from exception. | ||
66 | */ | ||
67 | .macro v7m_exception_entry | ||
68 | @ determine the location of the registers saved by the core during | ||
69 | @ exception entry. Depending on the mode the cpu was in when the | ||
70 | @ exception happend that is either on the main or the process stack. | ||
71 | @ Bit 2 of EXC_RETURN stored in the lr register specifies which stack | ||
72 | @ was used. | ||
73 | tst lr, #EXC_RET_STACK_MASK | ||
74 | mrsne r12, psp | ||
75 | moveq r12, sp | ||
76 | |||
77 | @ we cannot rely on r0-r3 and r12 matching the value saved in the | ||
78 | @ exception frame because of tail-chaining. So these have to be | ||
79 | @ reloaded. | ||
80 | ldmia r12!, {r0-r3} | ||
81 | |||
82 | @ Linux expects to have irqs off. Do it here before taking stack space | ||
83 | cpsid i | ||
84 | |||
85 | sub sp, #S_FRAME_SIZE-S_IP | ||
86 | stmdb sp!, {r0-r11} | ||
87 | |||
88 | @ load saved r12, lr, return address and xPSR. | ||
89 | @ r0-r7 are used for signals and never touched from now on. Clobbering | ||
90 | @ r8-r12 is OK. | ||
91 | mov r9, r12 | ||
92 | ldmia r9!, {r8, r10-r12} | ||
93 | |||
94 | @ calculate the original stack pointer value. | ||
95 | @ r9 currently points to the memory location just above the auto saved | ||
96 | @ xPSR. | ||
97 | @ The cpu might automatically 8-byte align the stack. Bit 9 | ||
98 | @ of the saved xPSR specifies if stack aligning took place. In this case | ||
99 | @ another 32-bit value is included in the stack. | ||
100 | |||
101 | tst r12, V7M_xPSR_FRAMEPTRALIGN | ||
102 | addne r9, r9, #4 | ||
103 | |||
104 | @ store saved r12 using str to have a register to hold the base for stm | ||
105 | str r8, [sp, #S_IP] | ||
106 | add r8, sp, #S_SP | ||
107 | @ store r13-r15, xPSR | ||
108 | stmia r8!, {r9-r12} | ||
109 | @ store old_r0 | ||
110 | str r0, [r8] | ||
111 | .endm | ||
112 | |||
113 | /* | ||
114 | * PENDSV and SVCALL are configured to have the same exception | ||
115 | * priorities. As a kernel thread runs at SVCALL execution priority it | ||
116 | * can never be preempted and so we will never have to return to a | ||
117 | * kernel thread here. | ||
118 | */ | ||
119 | .macro v7m_exception_slow_exit ret_r0 | ||
120 | cpsid i | ||
121 | ldr lr, =EXC_RET_THREADMODE_PROCESSSTACK | ||
122 | |||
123 | @ read original r12, sp, lr, pc and xPSR | ||
124 | add r12, sp, #S_IP | ||
125 | ldmia r12, {r1-r5} | ||
126 | |||
127 | @ an exception frame is always 8-byte aligned. To tell the hardware if | ||
128 | @ the sp to be restored is aligned or not set bit 9 of the saved xPSR | ||
129 | @ accordingly. | ||
130 | tst r2, #4 | ||
131 | subne r2, r2, #4 | ||
132 | orrne r5, V7M_xPSR_FRAMEPTRALIGN | ||
133 | biceq r5, V7M_xPSR_FRAMEPTRALIGN | ||
134 | |||
135 | @ write basic exception frame | ||
136 | stmdb r2!, {r1, r3-r5} | ||
137 | ldmia sp, {r1, r3-r5} | ||
138 | .if \ret_r0 | ||
139 | stmdb r2!, {r0, r3-r5} | ||
140 | .else | ||
141 | stmdb r2!, {r1, r3-r5} | ||
142 | .endif | ||
143 | |||
144 | @ restore process sp | ||
145 | msr psp, r2 | ||
146 | |||
147 | @ restore original r4-r11 | ||
148 | ldmia sp!, {r0-r11} | ||
149 | |||
150 | @ restore main sp | ||
151 | add sp, sp, #S_FRAME_SIZE-S_IP | ||
152 | |||
153 | cpsie i | ||
154 | bx lr | ||
155 | .endm | ||
156 | #endif /* CONFIG_CPU_V7M */ | ||
157 | |||
47 | @ | 158 | @ |
48 | @ Store/load the USER SP and LR registers by switching to the SYS | 159 | @ Store/load the USER SP and LR registers by switching to the SYS |
49 | @ mode. Useful in Thumb-2 mode where "stm/ldm rd, {sp, lr}^" is not | 160 | @ mode. Useful in Thumb-2 mode where "stm/ldm rd, {sp, lr}^" is not |
@@ -165,6 +276,18 @@ | |||
165 | rfeia sp! | 276 | rfeia sp! |
166 | .endm | 277 | .endm |
167 | 278 | ||
279 | #ifdef CONFIG_CPU_V7M | ||
280 | /* | ||
281 | * Note we don't need to do clrex here as clearing the local monitor is | ||
282 | * part of each exception entry and exit sequence. | ||
283 | */ | ||
284 | .macro restore_user_regs, fast = 0, offset = 0 | ||
285 | .if \offset | ||
286 | add sp, #\offset | ||
287 | .endif | ||
288 | v7m_exception_slow_exit ret_r0 = \fast | ||
289 | .endm | ||
290 | #else /* ifdef CONFIG_CPU_V7M */ | ||
168 | .macro restore_user_regs, fast = 0, offset = 0 | 291 | .macro restore_user_regs, fast = 0, offset = 0 |
169 | clrex @ clear the exclusive monitor | 292 | clrex @ clear the exclusive monitor |
170 | mov r2, sp | 293 | mov r2, sp |
@@ -181,6 +304,7 @@ | |||
181 | add sp, sp, #S_FRAME_SIZE - S_SP | 304 | add sp, sp, #S_FRAME_SIZE - S_SP |
182 | movs pc, lr @ return & move spsr_svc into cpsr | 305 | movs pc, lr @ return & move spsr_svc into cpsr |
183 | .endm | 306 | .endm |
307 | #endif /* ifdef CONFIG_CPU_V7M / else */ | ||
184 | 308 | ||
185 | .macro get_thread_info, rd | 309 | .macro get_thread_info, rd |
186 | mov \rd, sp | 310 | mov \rd, sp |
diff --git a/arch/arm/kernel/entry-v7m.S b/arch/arm/kernel/entry-v7m.S new file mode 100644 index 000000000000..52b26432c9a9 --- /dev/null +++ b/arch/arm/kernel/entry-v7m.S | |||
@@ -0,0 +1,143 @@ | |||
1 | /* | ||
2 | * linux/arch/arm/kernel/entry-v7m.S | ||
3 | * | ||
4 | * Copyright (C) 2008 ARM Ltd. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * Low-level vector interface routines for the ARMv7-M architecture | ||
11 | */ | ||
12 | #include <asm/memory.h> | ||
13 | #include <asm/glue.h> | ||
14 | #include <asm/thread_notify.h> | ||
15 | #include <asm/v7m.h> | ||
16 | |||
17 | #include <mach/entry-macro.S> | ||
18 | |||
19 | #include "entry-header.S" | ||
20 | |||
21 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
22 | #error "CONFIG_TRACE_IRQFLAGS not supported on the current ARMv7M implementation" | ||
23 | #endif | ||
24 | |||
25 | __invalid_entry: | ||
26 | v7m_exception_entry | ||
27 | adr r0, strerr | ||
28 | mrs r1, ipsr | ||
29 | mov r2, lr | ||
30 | bl printk | ||
31 | mov r0, sp | ||
32 | bl show_regs | ||
33 | 1: b 1b | ||
34 | ENDPROC(__invalid_entry) | ||
35 | |||
36 | strerr: .asciz "\nUnhandled exception: IPSR = %08lx LR = %08lx\n" | ||
37 | |||
38 | .align 2 | ||
39 | __irq_entry: | ||
40 | v7m_exception_entry | ||
41 | |||
42 | @ | ||
43 | @ Invoke the IRQ handler | ||
44 | @ | ||
45 | mrs r0, ipsr | ||
46 | ldr r1, =V7M_xPSR_EXCEPTIONNO | ||
47 | and r0, r1 | ||
48 | sub r0, #16 | ||
49 | mov r1, sp | ||
50 | stmdb sp!, {lr} | ||
51 | @ routine called with r0 = irq number, r1 = struct pt_regs * | ||
52 | bl nvic_handle_irq | ||
53 | |||
54 | pop {lr} | ||
55 | @ | ||
56 | @ Check for any pending work if returning to user | ||
57 | @ | ||
58 | ldr r1, =BASEADDR_V7M_SCB | ||
59 | ldr r0, [r1, V7M_SCB_ICSR] | ||
60 | tst r0, V7M_SCB_ICSR_RETTOBASE | ||
61 | beq 2f | ||
62 | |||
63 | get_thread_info tsk | ||
64 | ldr r2, [tsk, #TI_FLAGS] | ||
65 | tst r2, #_TIF_WORK_MASK | ||
66 | beq 2f @ no work pending | ||
67 | mov r0, #V7M_SCB_ICSR_PENDSVSET | ||
68 | str r0, [r1, V7M_SCB_ICSR] @ raise PendSV | ||
69 | |||
70 | 2: | ||
71 | @ registers r0-r3 and r12 are automatically restored on exception | ||
72 | @ return. r4-r7 were not clobbered in v7m_exception_entry so for | ||
73 | @ correctness they don't need to be restored. So only r8-r11 must be | ||
74 | @ restored here. The easiest way to do so is to restore r0-r7, too. | ||
75 | ldmia sp!, {r0-r11} | ||
76 | add sp, #S_FRAME_SIZE-S_IP | ||
77 | cpsie i | ||
78 | bx lr | ||
79 | ENDPROC(__irq_entry) | ||
80 | |||
81 | __pendsv_entry: | ||
82 | v7m_exception_entry | ||
83 | |||
84 | ldr r1, =BASEADDR_V7M_SCB | ||
85 | mov r0, #V7M_SCB_ICSR_PENDSVCLR | ||
86 | str r0, [r1, V7M_SCB_ICSR] @ clear PendSV | ||
87 | |||
88 | @ execute the pending work, including reschedule | ||
89 | get_thread_info tsk | ||
90 | mov why, #0 | ||
91 | b ret_to_user | ||
92 | ENDPROC(__pendsv_entry) | ||
93 | |||
94 | /* | ||
95 | * Register switch for ARMv7-M processors. | ||
96 | * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info | ||
97 | * previous and next are guaranteed not to be the same. | ||
98 | */ | ||
99 | ENTRY(__switch_to) | ||
100 | .fnstart | ||
101 | .cantunwind | ||
102 | add ip, r1, #TI_CPU_SAVE | ||
103 | stmia ip!, {r4 - r11} @ Store most regs on stack | ||
104 | str sp, [ip], #4 | ||
105 | str lr, [ip], #4 | ||
106 | mov r5, r0 | ||
107 | add r4, r2, #TI_CPU_SAVE | ||
108 | ldr r0, =thread_notify_head | ||
109 | mov r1, #THREAD_NOTIFY_SWITCH | ||
110 | bl atomic_notifier_call_chain | ||
111 | mov ip, r4 | ||
112 | mov r0, r5 | ||
113 | ldmia ip!, {r4 - r11} @ Load all regs saved previously | ||
114 | ldr sp, [ip] | ||
115 | ldr pc, [ip, #4]! | ||
116 | .fnend | ||
117 | ENDPROC(__switch_to) | ||
118 | |||
119 | .data | ||
120 | .align 8 | ||
121 | /* | ||
122 | * Vector table (64 words => 256 bytes natural alignment) | ||
123 | */ | ||
124 | ENTRY(vector_table) | ||
125 | .long 0 @ 0 - Reset stack pointer | ||
126 | .long __invalid_entry @ 1 - Reset | ||
127 | .long __invalid_entry @ 2 - NMI | ||
128 | .long __invalid_entry @ 3 - HardFault | ||
129 | .long __invalid_entry @ 4 - MemManage | ||
130 | .long __invalid_entry @ 5 - BusFault | ||
131 | .long __invalid_entry @ 6 - UsageFault | ||
132 | .long __invalid_entry @ 7 - Reserved | ||
133 | .long __invalid_entry @ 8 - Reserved | ||
134 | .long __invalid_entry @ 9 - Reserved | ||
135 | .long __invalid_entry @ 10 - Reserved | ||
136 | .long vector_swi @ 11 - SVCall | ||
137 | .long __invalid_entry @ 12 - Debug Monitor | ||
138 | .long __invalid_entry @ 13 - Reserved | ||
139 | .long __pendsv_entry @ 14 - PendSV | ||
140 | .long __invalid_entry @ 15 - SysTick | ||
141 | .rept 64 - 16 | ||
142 | .long __irq_entry @ 16..64 - External Interrupts | ||
143 | .endr | ||
diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c index 2adda11f712f..25442f451148 100644 --- a/arch/arm/kernel/fiq.c +++ b/arch/arm/kernel/fiq.c | |||
@@ -47,6 +47,11 @@ | |||
47 | #include <asm/irq.h> | 47 | #include <asm/irq.h> |
48 | #include <asm/traps.h> | 48 | #include <asm/traps.h> |
49 | 49 | ||
50 | #define FIQ_OFFSET ({ \ | ||
51 | extern void *vector_fiq_offset; \ | ||
52 | (unsigned)&vector_fiq_offset; \ | ||
53 | }) | ||
54 | |||
50 | static unsigned long no_fiq_insn; | 55 | static unsigned long no_fiq_insn; |
51 | 56 | ||
52 | /* Default reacquire function | 57 | /* Default reacquire function |
@@ -80,13 +85,16 @@ int show_fiq_list(struct seq_file *p, int prec) | |||
80 | void set_fiq_handler(void *start, unsigned int length) | 85 | void set_fiq_handler(void *start, unsigned int length) |
81 | { | 86 | { |
82 | #if defined(CONFIG_CPU_USE_DOMAINS) | 87 | #if defined(CONFIG_CPU_USE_DOMAINS) |
83 | memcpy((void *)0xffff001c, start, length); | 88 | void *base = (void *)0xffff0000; |
84 | #else | 89 | #else |
85 | memcpy(vectors_page + 0x1c, start, length); | 90 | void *base = vectors_page; |
86 | #endif | 91 | #endif |
87 | flush_icache_range(0xffff001c, 0xffff001c + length); | 92 | unsigned offset = FIQ_OFFSET; |
93 | |||
94 | memcpy(base + offset, start, length); | ||
95 | flush_icache_range(0xffff0000 + offset, 0xffff0000 + offset + length); | ||
88 | if (!vectors_high()) | 96 | if (!vectors_high()) |
89 | flush_icache_range(0x1c, 0x1c + length); | 97 | flush_icache_range(offset, offset + length); |
90 | } | 98 | } |
91 | 99 | ||
92 | int claim_fiq(struct fiq_handler *f) | 100 | int claim_fiq(struct fiq_handler *f) |
@@ -144,6 +152,7 @@ EXPORT_SYMBOL(disable_fiq); | |||
144 | 152 | ||
145 | void __init init_FIQ(int start) | 153 | void __init init_FIQ(int start) |
146 | { | 154 | { |
147 | no_fiq_insn = *(unsigned long *)0xffff001c; | 155 | unsigned offset = FIQ_OFFSET; |
156 | no_fiq_insn = *(unsigned long *)(0xffff0000 + offset); | ||
148 | fiq_start = start; | 157 | fiq_start = start; |
149 | } | 158 | } |
diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S index 5b391a689b47..47cd974e57ea 100644 --- a/arch/arm/kernel/head-common.S +++ b/arch/arm/kernel/head-common.S | |||
@@ -133,6 +133,9 @@ ENTRY(lookup_processor_type) | |||
133 | ldmfd sp!, {r4 - r6, r9, pc} | 133 | ldmfd sp!, {r4 - r6, r9, pc} |
134 | ENDPROC(lookup_processor_type) | 134 | ENDPROC(lookup_processor_type) |
135 | 135 | ||
136 | __FINIT | ||
137 | .text | ||
138 | |||
136 | /* | 139 | /* |
137 | * Read processor ID register (CP#15, CR0), and look up in the linker-built | 140 | * Read processor ID register (CP#15, CR0), and look up in the linker-built |
138 | * supported processor list. Note that we can't use the absolute addresses | 141 | * supported processor list. Note that we can't use the absolute addresses |
@@ -146,7 +149,6 @@ ENDPROC(lookup_processor_type) | |||
146 | * r5 = proc_info pointer in physical address space | 149 | * r5 = proc_info pointer in physical address space |
147 | * r9 = cpuid (preserved) | 150 | * r9 = cpuid (preserved) |
148 | */ | 151 | */ |
149 | __CPUINIT | ||
150 | __lookup_processor_type: | 152 | __lookup_processor_type: |
151 | adr r3, __lookup_processor_type_data | 153 | adr r3, __lookup_processor_type_data |
152 | ldmia r3, {r4 - r6} | 154 | ldmia r3, {r4 - r6} |
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S index 6a2e09c952c7..14235ba64a90 100644 --- a/arch/arm/kernel/head-nommu.S +++ b/arch/arm/kernel/head-nommu.S | |||
@@ -17,8 +17,12 @@ | |||
17 | #include <asm/assembler.h> | 17 | #include <asm/assembler.h> |
18 | #include <asm/ptrace.h> | 18 | #include <asm/ptrace.h> |
19 | #include <asm/asm-offsets.h> | 19 | #include <asm/asm-offsets.h> |
20 | #include <asm/memory.h> | ||
20 | #include <asm/cp15.h> | 21 | #include <asm/cp15.h> |
21 | #include <asm/thread_info.h> | 22 | #include <asm/thread_info.h> |
23 | #include <asm/v7m.h> | ||
24 | #include <asm/mpu.h> | ||
25 | #include <asm/page.h> | ||
22 | 26 | ||
23 | /* | 27 | /* |
24 | * Kernel startup entry point. | 28 | * Kernel startup entry point. |
@@ -50,21 +54,86 @@ ENTRY(stext) | |||
50 | 54 | ||
51 | setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode | 55 | setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode |
52 | @ and irqs disabled | 56 | @ and irqs disabled |
53 | #ifndef CONFIG_CPU_CP15 | 57 | #if defined(CONFIG_CPU_CP15) |
54 | ldr r9, =CONFIG_PROCESSOR_ID | ||
55 | #else | ||
56 | mrc p15, 0, r9, c0, c0 @ get processor id | 58 | mrc p15, 0, r9, c0, c0 @ get processor id |
59 | #elif defined(CONFIG_CPU_V7M) | ||
60 | ldr r9, =BASEADDR_V7M_SCB | ||
61 | ldr r9, [r9, V7M_SCB_CPUID] | ||
62 | #else | ||
63 | ldr r9, =CONFIG_PROCESSOR_ID | ||
57 | #endif | 64 | #endif |
58 | bl __lookup_processor_type @ r5=procinfo r9=cpuid | 65 | bl __lookup_processor_type @ r5=procinfo r9=cpuid |
59 | movs r10, r5 @ invalid processor (r5=0)? | 66 | movs r10, r5 @ invalid processor (r5=0)? |
60 | beq __error_p @ yes, error 'p' | 67 | beq __error_p @ yes, error 'p' |
61 | 68 | ||
62 | adr lr, BSYM(__after_proc_init) @ return (PIC) address | 69 | #ifdef CONFIG_ARM_MPU |
70 | /* Calculate the size of a region covering just the kernel */ | ||
71 | ldr r5, =PHYS_OFFSET @ Region start: PHYS_OFFSET | ||
72 | ldr r6, =(_end) @ Cover whole kernel | ||
73 | sub r6, r6, r5 @ Minimum size of region to map | ||
74 | clz r6, r6 @ Region size must be 2^N... | ||
75 | rsb r6, r6, #31 @ ...so round up region size | ||
76 | lsl r6, r6, #MPU_RSR_SZ @ Put size in right field | ||
77 | orr r6, r6, #(1 << MPU_RSR_EN) @ Set region enabled bit | ||
78 | bl __setup_mpu | ||
79 | #endif | ||
80 | ldr r13, =__mmap_switched @ address to jump to after | ||
81 | @ initialising sctlr | ||
82 | adr lr, BSYM(1f) @ return (PIC) address | ||
63 | ARM( add pc, r10, #PROCINFO_INITFUNC ) | 83 | ARM( add pc, r10, #PROCINFO_INITFUNC ) |
64 | THUMB( add r12, r10, #PROCINFO_INITFUNC ) | 84 | THUMB( add r12, r10, #PROCINFO_INITFUNC ) |
65 | THUMB( mov pc, r12 ) | 85 | THUMB( mov pc, r12 ) |
86 | 1: b __after_proc_init | ||
66 | ENDPROC(stext) | 87 | ENDPROC(stext) |
67 | 88 | ||
89 | #ifdef CONFIG_SMP | ||
90 | .text | ||
91 | ENTRY(secondary_startup) | ||
92 | /* | ||
93 | * Common entry point for secondary CPUs. | ||
94 | * | ||
95 | * Ensure that we're in SVC mode, and IRQs are disabled. Lookup | ||
96 | * the processor type - there is no need to check the machine type | ||
97 | * as it has already been validated by the primary processor. | ||
98 | */ | ||
99 | setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 | ||
100 | #ifndef CONFIG_CPU_CP15 | ||
101 | ldr r9, =CONFIG_PROCESSOR_ID | ||
102 | #else | ||
103 | mrc p15, 0, r9, c0, c0 @ get processor id | ||
104 | #endif | ||
105 | bl __lookup_processor_type @ r5=procinfo r9=cpuid | ||
106 | movs r10, r5 @ invalid processor? | ||
107 | beq __error_p @ yes, error 'p' | ||
108 | |||
109 | adr r4, __secondary_data | ||
110 | ldmia r4, {r7, r12} | ||
111 | |||
112 | #ifdef CONFIG_ARM_MPU | ||
113 | /* Use MPU region info supplied by __cpu_up */ | ||
114 | ldr r6, [r7] @ get secondary_data.mpu_szr | ||
115 | bl __setup_mpu @ Initialize the MPU | ||
116 | #endif | ||
117 | |||
118 | adr lr, BSYM(__after_proc_init) @ return address | ||
119 | mov r13, r12 @ __secondary_switched address | ||
120 | ARM( add pc, r10, #PROCINFO_INITFUNC ) | ||
121 | THUMB( add r12, r10, #PROCINFO_INITFUNC ) | ||
122 | THUMB( mov pc, r12 ) | ||
123 | ENDPROC(secondary_startup) | ||
124 | |||
125 | ENTRY(__secondary_switched) | ||
126 | ldr sp, [r7, #8] @ set up the stack pointer | ||
127 | mov fp, #0 | ||
128 | b secondary_start_kernel | ||
129 | ENDPROC(__secondary_switched) | ||
130 | |||
131 | .type __secondary_data, %object | ||
132 | __secondary_data: | ||
133 | .long secondary_data | ||
134 | .long __secondary_switched | ||
135 | #endif /* CONFIG_SMP */ | ||
136 | |||
68 | /* | 137 | /* |
69 | * Set the Control Register and Read the process ID. | 138 | * Set the Control Register and Read the process ID. |
70 | */ | 139 | */ |
@@ -95,10 +164,97 @@ __after_proc_init: | |||
95 | #endif | 164 | #endif |
96 | mcr p15, 0, r0, c1, c0, 0 @ write control reg | 165 | mcr p15, 0, r0, c1, c0, 0 @ write control reg |
97 | #endif /* CONFIG_CPU_CP15 */ | 166 | #endif /* CONFIG_CPU_CP15 */ |
98 | 167 | mov pc, r13 | |
99 | b __mmap_switched @ clear the BSS and jump | ||
100 | @ to start_kernel | ||
101 | ENDPROC(__after_proc_init) | 168 | ENDPROC(__after_proc_init) |
102 | .ltorg | 169 | .ltorg |
103 | 170 | ||
171 | #ifdef CONFIG_ARM_MPU | ||
172 | |||
173 | |||
174 | /* Set which MPU region should be programmed */ | ||
175 | .macro set_region_nr tmp, rgnr | ||
176 | mov \tmp, \rgnr @ Use static region numbers | ||
177 | mcr p15, 0, \tmp, c6, c2, 0 @ Write RGNR | ||
178 | .endm | ||
179 | |||
180 | /* Setup a single MPU region, either D or I side (D-side for unified) */ | ||
181 | .macro setup_region bar, acr, sr, side = MPU_DATA_SIDE | ||
182 | mcr p15, 0, \bar, c6, c1, (0 + \side) @ I/DRBAR | ||
183 | mcr p15, 0, \acr, c6, c1, (4 + \side) @ I/DRACR | ||
184 | mcr p15, 0, \sr, c6, c1, (2 + \side) @ I/DRSR | ||
185 | .endm | ||
186 | |||
187 | /* | ||
188 | * Setup the MPU and initial MPU Regions. We create the following regions: | ||
189 | * Region 0: Use this for probing the MPU details, so leave disabled. | ||
190 | * Region 1: Background region - covers the whole of RAM as strongly ordered | ||
191 | * Region 2: Normal, Shared, cacheable for RAM. From PHYS_OFFSET, size from r6 | ||
192 | * Region 3: Normal, shared, inaccessible from PL0 to protect the vectors page | ||
193 | * | ||
194 | * r6: Value to be written to DRSR (and IRSR if required) for MPU_RAM_REGION | ||
195 | */ | ||
196 | |||
197 | ENTRY(__setup_mpu) | ||
198 | |||
199 | /* Probe for v7 PMSA compliance */ | ||
200 | mrc p15, 0, r0, c0, c1, 4 @ Read ID_MMFR0 | ||
201 | and r0, r0, #(MMFR0_PMSA) @ PMSA field | ||
202 | teq r0, #(MMFR0_PMSAv7) @ PMSA v7 | ||
203 | bne __error_p @ Fail: ARM_MPU on NOT v7 PMSA | ||
204 | |||
205 | /* Determine whether the D/I-side memory map is unified. We set the | ||
206 | * flags here and continue to use them for the rest of this function */ | ||
207 | mrc p15, 0, r0, c0, c0, 4 @ MPUIR | ||
208 | ands r5, r0, #MPUIR_DREGION_SZMASK @ 0 size d region => No MPU | ||
209 | beq __error_p @ Fail: ARM_MPU and no MPU | ||
210 | tst r0, #MPUIR_nU @ MPUIR_nU = 0 for unified | ||
211 | |||
212 | /* Setup second region first to free up r6 */ | ||
213 | set_region_nr r0, #MPU_RAM_REGION | ||
214 | isb | ||
215 | /* Full access from PL0, PL1, shared for CONFIG_SMP, cacheable */ | ||
216 | ldr r0, =PHYS_OFFSET @ RAM starts at PHYS_OFFSET | ||
217 | ldr r5,=(MPU_AP_PL1RW_PL0RW | MPU_RGN_NORMAL) | ||
218 | |||
219 | setup_region r0, r5, r6, MPU_DATA_SIDE @ PHYS_OFFSET, shared, enabled | ||
220 | beq 1f @ Memory-map not unified | ||
221 | setup_region r0, r5, r6, MPU_INSTR_SIDE @ PHYS_OFFSET, shared, enabled | ||
222 | 1: isb | ||
223 | |||
224 | /* First/background region */ | ||
225 | set_region_nr r0, #MPU_BG_REGION | ||
226 | isb | ||
227 | /* Execute Never, strongly ordered, inaccessible to PL0, rw PL1 */ | ||
228 | mov r0, #0 @ BG region starts at 0x0 | ||
229 | ldr r5,=(MPU_ACR_XN | MPU_RGN_STRONGLY_ORDERED | MPU_AP_PL1RW_PL0NA) | ||
230 | mov r6, #MPU_RSR_ALL_MEM @ 4GB region, enabled | ||
231 | |||
232 | setup_region r0, r5, r6, MPU_DATA_SIDE @ 0x0, BG region, enabled | ||
233 | beq 2f @ Memory-map not unified | ||
234 | setup_region r0, r5, r6, MPU_INSTR_SIDE @ 0x0, BG region, enabled | ||
235 | 2: isb | ||
236 | |||
237 | /* Vectors region */ | ||
238 | set_region_nr r0, #MPU_VECTORS_REGION | ||
239 | isb | ||
240 | /* Shared, inaccessible to PL0, rw PL1 */ | ||
241 | mov r0, #CONFIG_VECTORS_BASE @ Cover from VECTORS_BASE | ||
242 | ldr r5,=(MPU_AP_PL1RW_PL0NA | MPU_RGN_NORMAL) | ||
243 | /* Writing N to bits 5:1 (RSR_SZ) --> region size 2^N+1 */ | ||
244 | mov r6, #(((PAGE_SHIFT - 1) << MPU_RSR_SZ) | 1 << MPU_RSR_EN) | ||
245 | |||
246 | setup_region r0, r5, r6, MPU_DATA_SIDE @ VECTORS_BASE, PL0 NA, enabled | ||
247 | beq 3f @ Memory-map not unified | ||
248 | setup_region r0, r5, r6, MPU_INSTR_SIDE @ VECTORS_BASE, PL0 NA, enabled | ||
249 | 3: isb | ||
250 | |||
251 | /* Enable the MPU */ | ||
252 | mrc p15, 0, r0, c1, c0, 0 @ Read SCTLR | ||
253 | bic r0, r0, #CR_BR @ Disable the 'default mem-map' | ||
254 | orr r0, r0, #CR_M @ Set SCTRL.M (MPU on) | ||
255 | mcr p15, 0, r0, c1, c0, 0 @ Enable MPU | ||
256 | isb | ||
257 | mov pc,lr | ||
258 | ENDPROC(__setup_mpu) | ||
259 | #endif | ||
104 | #include "head-common.S" | 260 | #include "head-common.S" |
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 8bac553fe213..2c7cc1e03473 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S | |||
@@ -156,7 +156,7 @@ ENDPROC(stext) | |||
156 | * | 156 | * |
157 | * Returns: | 157 | * Returns: |
158 | * r0, r3, r5-r7 corrupted | 158 | * r0, r3, r5-r7 corrupted |
159 | * r4 = physical page table address | 159 | * r4 = page table (see ARCH_PGD_SHIFT in asm/memory.h) |
160 | */ | 160 | */ |
161 | __create_page_tables: | 161 | __create_page_tables: |
162 | pgtbl r4, r8 @ page table address | 162 | pgtbl r4, r8 @ page table address |
@@ -331,6 +331,7 @@ __create_page_tables: | |||
331 | #endif | 331 | #endif |
332 | #ifdef CONFIG_ARM_LPAE | 332 | #ifdef CONFIG_ARM_LPAE |
333 | sub r4, r4, #0x1000 @ point to the PGD table | 333 | sub r4, r4, #0x1000 @ point to the PGD table |
334 | mov r4, r4, lsr #ARCH_PGD_SHIFT | ||
334 | #endif | 335 | #endif |
335 | mov pc, lr | 336 | mov pc, lr |
336 | ENDPROC(__create_page_tables) | 337 | ENDPROC(__create_page_tables) |
@@ -342,7 +343,7 @@ __turn_mmu_on_loc: | |||
342 | .long __turn_mmu_on_end | 343 | .long __turn_mmu_on_end |
343 | 344 | ||
344 | #if defined(CONFIG_SMP) | 345 | #if defined(CONFIG_SMP) |
345 | __CPUINIT | 346 | .text |
346 | ENTRY(secondary_startup) | 347 | ENTRY(secondary_startup) |
347 | /* | 348 | /* |
348 | * Common entry point for secondary CPUs. | 349 | * Common entry point for secondary CPUs. |
@@ -408,7 +409,7 @@ __secondary_data: | |||
408 | * r0 = cp#15 control register | 409 | * r0 = cp#15 control register |
409 | * r1 = machine ID | 410 | * r1 = machine ID |
410 | * r2 = atags or dtb pointer | 411 | * r2 = atags or dtb pointer |
411 | * r4 = page table pointer | 412 | * r4 = page table (see ARCH_PGD_SHIFT in asm/memory.h) |
412 | * r9 = processor ID | 413 | * r9 = processor ID |
413 | * r13 = *virtual* address to jump to upon completion | 414 | * r13 = *virtual* address to jump to upon completion |
414 | */ | 415 | */ |
@@ -427,10 +428,7 @@ __enable_mmu: | |||
427 | #ifdef CONFIG_CPU_ICACHE_DISABLE | 428 | #ifdef CONFIG_CPU_ICACHE_DISABLE |
428 | bic r0, r0, #CR_I | 429 | bic r0, r0, #CR_I |
429 | #endif | 430 | #endif |
430 | #ifdef CONFIG_ARM_LPAE | 431 | #ifndef CONFIG_ARM_LPAE |
431 | mov r5, #0 | ||
432 | mcrr p15, 0, r4, r5, c2 @ load TTBR0 | ||
433 | #else | ||
434 | mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \ | 432 | mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \ |
435 | domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \ | 433 | domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \ |
436 | domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \ | 434 | domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \ |
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index 1fd749ee4a1b..7b95de601357 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c | |||
@@ -1020,7 +1020,7 @@ out_mdbgen: | |||
1020 | cpumask_or(&debug_err_mask, &debug_err_mask, cpumask_of(cpu)); | 1020 | cpumask_or(&debug_err_mask, &debug_err_mask, cpumask_of(cpu)); |
1021 | } | 1021 | } |
1022 | 1022 | ||
1023 | static int __cpuinit dbg_reset_notify(struct notifier_block *self, | 1023 | static int dbg_reset_notify(struct notifier_block *self, |
1024 | unsigned long action, void *cpu) | 1024 | unsigned long action, void *cpu) |
1025 | { | 1025 | { |
1026 | if ((action & ~CPU_TASKS_FROZEN) == CPU_ONLINE) | 1026 | if ((action & ~CPU_TASKS_FROZEN) == CPU_ONLINE) |
@@ -1029,7 +1029,7 @@ static int __cpuinit dbg_reset_notify(struct notifier_block *self, | |||
1029 | return NOTIFY_OK; | 1029 | return NOTIFY_OK; |
1030 | } | 1030 | } |
1031 | 1031 | ||
1032 | static struct notifier_block __cpuinitdata dbg_reset_nb = { | 1032 | static struct notifier_block dbg_reset_nb = { |
1033 | .notifier_call = dbg_reset_notify, | 1033 | .notifier_call = dbg_reset_notify, |
1034 | }; | 1034 | }; |
1035 | 1035 | ||
diff --git a/arch/arm/kernel/hyp-stub.S b/arch/arm/kernel/hyp-stub.S index 1315c4ccfa56..797b1a6a4906 100644 --- a/arch/arm/kernel/hyp-stub.S +++ b/arch/arm/kernel/hyp-stub.S | |||
@@ -56,8 +56,8 @@ ENTRY(__boot_cpu_mode) | |||
56 | ldr \reg3, [\reg2] | 56 | ldr \reg3, [\reg2] |
57 | ldr \reg1, [\reg2, \reg3] | 57 | ldr \reg1, [\reg2, \reg3] |
58 | cmp \mode, \reg1 @ matches primary CPU boot mode? | 58 | cmp \mode, \reg1 @ matches primary CPU boot mode? |
59 | orrne r7, r7, #BOOT_CPU_MODE_MISMATCH | 59 | orrne \reg1, \reg1, #BOOT_CPU_MODE_MISMATCH |
60 | strne r7, [r5, r6] @ record what happened and give up | 60 | strne \reg1, [\reg2, \reg3] @ record what happened and give up |
61 | .endm | 61 | .endm |
62 | 62 | ||
63 | #else /* ZIMAGE */ | 63 | #else /* ZIMAGE */ |
@@ -153,6 +153,13 @@ THUMB( orr r7, #(1 << 30) ) @ HSCTLR.TE | |||
153 | mrc p15, 4, r7, c14, c1, 0 @ CNTHCTL | 153 | mrc p15, 4, r7, c14, c1, 0 @ CNTHCTL |
154 | orr r7, r7, #3 @ PL1PCEN | PL1PCTEN | 154 | orr r7, r7, #3 @ PL1PCEN | PL1PCTEN |
155 | mcr p15, 4, r7, c14, c1, 0 @ CNTHCTL | 155 | mcr p15, 4, r7, c14, c1, 0 @ CNTHCTL |
156 | mov r7, #0 | ||
157 | mcrr p15, 4, r7, r7, c14 @ CNTVOFF | ||
158 | |||
159 | @ Disable virtual timer in case it was counting | ||
160 | mrc p15, 0, r7, c14, c3, 1 @ CNTV_CTL | ||
161 | bic r7, #1 @ Clear ENABLE | ||
162 | mcr p15, 0, r7, c14, c3, 1 @ CNTV_CTL | ||
156 | 1: | 163 | 1: |
157 | #endif | 164 | #endif |
158 | 165 | ||
diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c index 1e9be5d25e56..85c3fb6c93c2 100644 --- a/arch/arm/kernel/module.c +++ b/arch/arm/kernel/module.c | |||
@@ -288,24 +288,16 @@ int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs, | |||
288 | 288 | ||
289 | if (strcmp(".ARM.exidx.init.text", secname) == 0) | 289 | if (strcmp(".ARM.exidx.init.text", secname) == 0) |
290 | maps[ARM_SEC_INIT].unw_sec = s; | 290 | maps[ARM_SEC_INIT].unw_sec = s; |
291 | else if (strcmp(".ARM.exidx.devinit.text", secname) == 0) | ||
292 | maps[ARM_SEC_DEVINIT].unw_sec = s; | ||
293 | else if (strcmp(".ARM.exidx", secname) == 0) | 291 | else if (strcmp(".ARM.exidx", secname) == 0) |
294 | maps[ARM_SEC_CORE].unw_sec = s; | 292 | maps[ARM_SEC_CORE].unw_sec = s; |
295 | else if (strcmp(".ARM.exidx.exit.text", secname) == 0) | 293 | else if (strcmp(".ARM.exidx.exit.text", secname) == 0) |
296 | maps[ARM_SEC_EXIT].unw_sec = s; | 294 | maps[ARM_SEC_EXIT].unw_sec = s; |
297 | else if (strcmp(".ARM.exidx.devexit.text", secname) == 0) | ||
298 | maps[ARM_SEC_DEVEXIT].unw_sec = s; | ||
299 | else if (strcmp(".init.text", secname) == 0) | 295 | else if (strcmp(".init.text", secname) == 0) |
300 | maps[ARM_SEC_INIT].txt_sec = s; | 296 | maps[ARM_SEC_INIT].txt_sec = s; |
301 | else if (strcmp(".devinit.text", secname) == 0) | ||
302 | maps[ARM_SEC_DEVINIT].txt_sec = s; | ||
303 | else if (strcmp(".text", secname) == 0) | 297 | else if (strcmp(".text", secname) == 0) |
304 | maps[ARM_SEC_CORE].txt_sec = s; | 298 | maps[ARM_SEC_CORE].txt_sec = s; |
305 | else if (strcmp(".exit.text", secname) == 0) | 299 | else if (strcmp(".exit.text", secname) == 0) |
306 | maps[ARM_SEC_EXIT].txt_sec = s; | 300 | maps[ARM_SEC_EXIT].txt_sec = s; |
307 | else if (strcmp(".devexit.text", secname) == 0) | ||
308 | maps[ARM_SEC_DEVEXIT].txt_sec = s; | ||
309 | } | 301 | } |
310 | 302 | ||
311 | for (i = 0; i < ARM_SEC_MAX; i++) | 303 | for (i = 0; i < ARM_SEC_MAX; i++) |
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index 8c3094d0f7b7..21f77906602c 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c | |||
@@ -53,7 +53,12 @@ armpmu_map_cache_event(const unsigned (*cache_map) | |||
53 | static int | 53 | static int |
54 | armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config) | 54 | armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config) |
55 | { | 55 | { |
56 | int mapping = (*event_map)[config]; | 56 | int mapping; |
57 | |||
58 | if (config >= PERF_COUNT_HW_MAX) | ||
59 | return -ENOENT; | ||
60 | |||
61 | mapping = (*event_map)[config]; | ||
57 | return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping; | 62 | return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping; |
58 | } | 63 | } |
59 | 64 | ||
@@ -569,6 +574,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) | |||
569 | return; | 574 | return; |
570 | } | 575 | } |
571 | 576 | ||
577 | perf_callchain_store(entry, regs->ARM_pc); | ||
572 | tail = (struct frame_tail __user *)regs->ARM_fp - 1; | 578 | tail = (struct frame_tail __user *)regs->ARM_fp - 1; |
573 | 579 | ||
574 | while ((entry->nr < PERF_MAX_STACK_DEPTH) && | 580 | while ((entry->nr < PERF_MAX_STACK_DEPTH) && |
diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c index 1f2740e3dbc0..aebe0e99c153 100644 --- a/arch/arm/kernel/perf_event_cpu.c +++ b/arch/arm/kernel/perf_event_cpu.c | |||
@@ -157,8 +157,8 @@ static void cpu_pmu_init(struct arm_pmu *cpu_pmu) | |||
157 | * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading | 157 | * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading |
158 | * junk values out of them. | 158 | * junk values out of them. |
159 | */ | 159 | */ |
160 | static int __cpuinit cpu_pmu_notify(struct notifier_block *b, | 160 | static int cpu_pmu_notify(struct notifier_block *b, unsigned long action, |
161 | unsigned long action, void *hcpu) | 161 | void *hcpu) |
162 | { | 162 | { |
163 | if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING) | 163 | if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING) |
164 | return NOTIFY_DONE; | 164 | return NOTIFY_DONE; |
@@ -171,7 +171,7 @@ static int __cpuinit cpu_pmu_notify(struct notifier_block *b, | |||
171 | return NOTIFY_OK; | 171 | return NOTIFY_OK; |
172 | } | 172 | } |
173 | 173 | ||
174 | static struct notifier_block __cpuinitdata cpu_pmu_hotplug_notifier = { | 174 | static struct notifier_block cpu_pmu_hotplug_notifier = { |
175 | .notifier_call = cpu_pmu_notify, | 175 | .notifier_call = cpu_pmu_notify, |
176 | }; | 176 | }; |
177 | 177 | ||
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 6e8931ccf13e..536c85fe72a8 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <linux/hw_breakpoint.h> | 32 | #include <linux/hw_breakpoint.h> |
33 | #include <linux/cpuidle.h> | 33 | #include <linux/cpuidle.h> |
34 | #include <linux/leds.h> | 34 | #include <linux/leds.h> |
35 | #include <linux/reboot.h> | ||
35 | 36 | ||
36 | #include <asm/cacheflush.h> | 37 | #include <asm/cacheflush.h> |
37 | #include <asm/idmap.h> | 38 | #include <asm/idmap.h> |
@@ -39,6 +40,7 @@ | |||
39 | #include <asm/thread_notify.h> | 40 | #include <asm/thread_notify.h> |
40 | #include <asm/stacktrace.h> | 41 | #include <asm/stacktrace.h> |
41 | #include <asm/mach/time.h> | 42 | #include <asm/mach/time.h> |
43 | #include <asm/tls.h> | ||
42 | 44 | ||
43 | #ifdef CONFIG_CC_STACKPROTECTOR | 45 | #ifdef CONFIG_CC_STACKPROTECTOR |
44 | #include <linux/stackprotector.h> | 46 | #include <linux/stackprotector.h> |
@@ -112,7 +114,7 @@ void soft_restart(unsigned long addr) | |||
112 | BUG(); | 114 | BUG(); |
113 | } | 115 | } |
114 | 116 | ||
115 | static void null_restart(char mode, const char *cmd) | 117 | static void null_restart(enum reboot_mode reboot_mode, const char *cmd) |
116 | { | 118 | { |
117 | } | 119 | } |
118 | 120 | ||
@@ -122,7 +124,7 @@ static void null_restart(char mode, const char *cmd) | |||
122 | void (*pm_power_off)(void); | 124 | void (*pm_power_off)(void); |
123 | EXPORT_SYMBOL(pm_power_off); | 125 | EXPORT_SYMBOL(pm_power_off); |
124 | 126 | ||
125 | void (*arm_pm_restart)(char str, const char *cmd) = null_restart; | 127 | void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd) = null_restart; |
126 | EXPORT_SYMBOL_GPL(arm_pm_restart); | 128 | EXPORT_SYMBOL_GPL(arm_pm_restart); |
127 | 129 | ||
128 | /* | 130 | /* |
@@ -174,16 +176,6 @@ void arch_cpu_idle(void) | |||
174 | default_idle(); | 176 | default_idle(); |
175 | } | 177 | } |
176 | 178 | ||
177 | static char reboot_mode = 'h'; | ||
178 | |||
179 | int __init reboot_setup(char *str) | ||
180 | { | ||
181 | reboot_mode = str[0]; | ||
182 | return 1; | ||
183 | } | ||
184 | |||
185 | __setup("reboot=", reboot_setup); | ||
186 | |||
187 | /* | 179 | /* |
188 | * Called by kexec, immediately prior to machine_kexec(). | 180 | * Called by kexec, immediately prior to machine_kexec(). |
189 | * | 181 | * |
@@ -205,6 +197,7 @@ void machine_shutdown(void) | |||
205 | */ | 197 | */ |
206 | void machine_halt(void) | 198 | void machine_halt(void) |
207 | { | 199 | { |
200 | local_irq_disable(); | ||
208 | smp_send_stop(); | 201 | smp_send_stop(); |
209 | 202 | ||
210 | local_irq_disable(); | 203 | local_irq_disable(); |
@@ -219,6 +212,7 @@ void machine_halt(void) | |||
219 | */ | 212 | */ |
220 | void machine_power_off(void) | 213 | void machine_power_off(void) |
221 | { | 214 | { |
215 | local_irq_disable(); | ||
222 | smp_send_stop(); | 216 | smp_send_stop(); |
223 | 217 | ||
224 | if (pm_power_off) | 218 | if (pm_power_off) |
@@ -238,6 +232,7 @@ void machine_power_off(void) | |||
238 | */ | 232 | */ |
239 | void machine_restart(char *cmd) | 233 | void machine_restart(char *cmd) |
240 | { | 234 | { |
235 | local_irq_disable(); | ||
241 | smp_send_stop(); | 236 | smp_send_stop(); |
242 | 237 | ||
243 | arm_pm_restart(reboot_mode, cmd); | 238 | arm_pm_restart(reboot_mode, cmd); |
@@ -374,7 +369,8 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start, | |||
374 | clear_ptrace_hw_breakpoint(p); | 369 | clear_ptrace_hw_breakpoint(p); |
375 | 370 | ||
376 | if (clone_flags & CLONE_SETTLS) | 371 | if (clone_flags & CLONE_SETTLS) |
377 | thread->tp_value = childregs->ARM_r3; | 372 | thread->tp_value[0] = childregs->ARM_r3; |
373 | thread->tp_value[1] = get_tpuser(); | ||
378 | 374 | ||
379 | thread_notify(THREAD_NOTIFY_COPY, thread); | 375 | thread_notify(THREAD_NOTIFY_COPY, thread); |
380 | 376 | ||
@@ -433,10 +429,11 @@ unsigned long arch_randomize_brk(struct mm_struct *mm) | |||
433 | } | 429 | } |
434 | 430 | ||
435 | #ifdef CONFIG_MMU | 431 | #ifdef CONFIG_MMU |
432 | #ifdef CONFIG_KUSER_HELPERS | ||
436 | /* | 433 | /* |
437 | * The vectors page is always readable from user space for the | 434 | * The vectors page is always readable from user space for the |
438 | * atomic helpers and the signal restart code. Insert it into the | 435 | * atomic helpers. Insert it into the gate_vma so that it is visible |
439 | * gate_vma so that it is visible through ptrace and /proc/<pid>/mem. | 436 | * through ptrace and /proc/<pid>/mem. |
440 | */ | 437 | */ |
441 | static struct vm_area_struct gate_vma = { | 438 | static struct vm_area_struct gate_vma = { |
442 | .vm_start = 0xffff0000, | 439 | .vm_start = 0xffff0000, |
@@ -465,9 +462,48 @@ int in_gate_area_no_mm(unsigned long addr) | |||
465 | { | 462 | { |
466 | return in_gate_area(NULL, addr); | 463 | return in_gate_area(NULL, addr); |
467 | } | 464 | } |
465 | #define is_gate_vma(vma) ((vma) = &gate_vma) | ||
466 | #else | ||
467 | #define is_gate_vma(vma) 0 | ||
468 | #endif | ||
468 | 469 | ||
469 | const char *arch_vma_name(struct vm_area_struct *vma) | 470 | const char *arch_vma_name(struct vm_area_struct *vma) |
470 | { | 471 | { |
471 | return (vma == &gate_vma) ? "[vectors]" : NULL; | 472 | return is_gate_vma(vma) ? "[vectors]" : |
473 | (vma->vm_mm && vma->vm_start == vma->vm_mm->context.sigpage) ? | ||
474 | "[sigpage]" : NULL; | ||
475 | } | ||
476 | |||
477 | static struct page *signal_page; | ||
478 | extern struct page *get_signal_page(void); | ||
479 | |||
480 | int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) | ||
481 | { | ||
482 | struct mm_struct *mm = current->mm; | ||
483 | unsigned long addr; | ||
484 | int ret; | ||
485 | |||
486 | if (!signal_page) | ||
487 | signal_page = get_signal_page(); | ||
488 | if (!signal_page) | ||
489 | return -ENOMEM; | ||
490 | |||
491 | down_write(&mm->mmap_sem); | ||
492 | addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0); | ||
493 | if (IS_ERR_VALUE(addr)) { | ||
494 | ret = addr; | ||
495 | goto up_fail; | ||
496 | } | ||
497 | |||
498 | ret = install_special_mapping(mm, addr, PAGE_SIZE, | ||
499 | VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC, | ||
500 | &signal_page); | ||
501 | |||
502 | if (ret == 0) | ||
503 | mm->context.sigpage = addr; | ||
504 | |||
505 | up_fail: | ||
506 | up_write(&mm->mmap_sem); | ||
507 | return ret; | ||
472 | } | 508 | } |
473 | #endif | 509 | #endif |
diff --git a/arch/arm/kernel/psci.c b/arch/arm/kernel/psci.c index 36531643cc2c..46931880093d 100644 --- a/arch/arm/kernel/psci.c +++ b/arch/arm/kernel/psci.c | |||
@@ -158,7 +158,7 @@ static const struct of_device_id psci_of_match[] __initconst = { | |||
158 | {}, | 158 | {}, |
159 | }; | 159 | }; |
160 | 160 | ||
161 | static int __init psci_init(void) | 161 | void __init psci_init(void) |
162 | { | 162 | { |
163 | struct device_node *np; | 163 | struct device_node *np; |
164 | const char *method; | 164 | const char *method; |
@@ -166,7 +166,7 @@ static int __init psci_init(void) | |||
166 | 166 | ||
167 | np = of_find_matching_node(NULL, psci_of_match); | 167 | np = of_find_matching_node(NULL, psci_of_match); |
168 | if (!np) | 168 | if (!np) |
169 | return 0; | 169 | return; |
170 | 170 | ||
171 | pr_info("probing function IDs from device-tree\n"); | 171 | pr_info("probing function IDs from device-tree\n"); |
172 | 172 | ||
@@ -206,6 +206,5 @@ static int __init psci_init(void) | |||
206 | 206 | ||
207 | out_put_node: | 207 | out_put_node: |
208 | of_node_put(np); | 208 | of_node_put(np); |
209 | return 0; | 209 | return; |
210 | } | 210 | } |
211 | early_initcall(psci_init); | ||
diff --git a/arch/arm/kernel/psci_smp.c b/arch/arm/kernel/psci_smp.c new file mode 100644 index 000000000000..70ded3fb42d9 --- /dev/null +++ b/arch/arm/kernel/psci_smp.c | |||
@@ -0,0 +1,83 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License version 2 as | ||
4 | * published by the Free Software Foundation. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, | ||
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
9 | * GNU General Public License for more details. | ||
10 | * | ||
11 | * Copyright (C) 2012 ARM Limited | ||
12 | * | ||
13 | * Author: Will Deacon <will.deacon@arm.com> | ||
14 | */ | ||
15 | |||
16 | #include <linux/init.h> | ||
17 | #include <linux/irqchip/arm-gic.h> | ||
18 | #include <linux/smp.h> | ||
19 | #include <linux/of.h> | ||
20 | |||
21 | #include <asm/psci.h> | ||
22 | #include <asm/smp_plat.h> | ||
23 | |||
24 | /* | ||
25 | * psci_smp assumes that the following is true about PSCI: | ||
26 | * | ||
27 | * cpu_suspend Suspend the execution on a CPU | ||
28 | * @state we don't currently describe affinity levels, so just pass 0. | ||
29 | * @entry_point the first instruction to be executed on return | ||
30 | * returns 0 success, < 0 on failure | ||
31 | * | ||
32 | * cpu_off Power down a CPU | ||
33 | * @state we don't currently describe affinity levels, so just pass 0. | ||
34 | * no return on successful call | ||
35 | * | ||
36 | * cpu_on Power up a CPU | ||
37 | * @cpuid cpuid of target CPU, as from MPIDR | ||
38 | * @entry_point the first instruction to be executed on return | ||
39 | * returns 0 success, < 0 on failure | ||
40 | * | ||
41 | * migrate Migrate the context to a different CPU | ||
42 | * @cpuid cpuid of target CPU, as from MPIDR | ||
43 | * returns 0 success, < 0 on failure | ||
44 | * | ||
45 | */ | ||
46 | |||
47 | extern void secondary_startup(void); | ||
48 | |||
49 | static int psci_boot_secondary(unsigned int cpu, struct task_struct *idle) | ||
50 | { | ||
51 | if (psci_ops.cpu_on) | ||
52 | return psci_ops.cpu_on(cpu_logical_map(cpu), | ||
53 | __pa(secondary_startup)); | ||
54 | return -ENODEV; | ||
55 | } | ||
56 | |||
57 | #ifdef CONFIG_HOTPLUG_CPU | ||
58 | void __ref psci_cpu_die(unsigned int cpu) | ||
59 | { | ||
60 | const struct psci_power_state ps = { | ||
61 | .type = PSCI_POWER_STATE_TYPE_POWER_DOWN, | ||
62 | }; | ||
63 | |||
64 | if (psci_ops.cpu_off) | ||
65 | psci_ops.cpu_off(ps); | ||
66 | |||
67 | /* We should never return */ | ||
68 | panic("psci: cpu %d failed to shutdown\n", cpu); | ||
69 | } | ||
70 | #endif | ||
71 | |||
72 | bool __init psci_smp_available(void) | ||
73 | { | ||
74 | /* is cpu_on available at least? */ | ||
75 | return (psci_ops.cpu_on != NULL); | ||
76 | } | ||
77 | |||
78 | struct smp_operations __initdata psci_smp_ops = { | ||
79 | .smp_boot_secondary = psci_boot_secondary, | ||
80 | #ifdef CONFIG_HOTPLUG_CPU | ||
81 | .cpu_die = psci_cpu_die, | ||
82 | #endif | ||
83 | }; | ||
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c index 03deeffd9f6d..0dd3b79b15c3 100644 --- a/arch/arm/kernel/ptrace.c +++ b/arch/arm/kernel/ptrace.c | |||
@@ -849,7 +849,7 @@ long arch_ptrace(struct task_struct *child, long request, | |||
849 | #endif | 849 | #endif |
850 | 850 | ||
851 | case PTRACE_GET_THREAD_AREA: | 851 | case PTRACE_GET_THREAD_AREA: |
852 | ret = put_user(task_thread_info(child)->tp_value, | 852 | ret = put_user(task_thread_info(child)->tp_value[0], |
853 | datap); | 853 | datap); |
854 | break; | 854 | break; |
855 | 855 | ||
@@ -886,20 +886,12 @@ long arch_ptrace(struct task_struct *child, long request, | |||
886 | 886 | ||
887 | #ifdef CONFIG_HAVE_HW_BREAKPOINT | 887 | #ifdef CONFIG_HAVE_HW_BREAKPOINT |
888 | case PTRACE_GETHBPREGS: | 888 | case PTRACE_GETHBPREGS: |
889 | if (ptrace_get_breakpoints(child) < 0) | ||
890 | return -ESRCH; | ||
891 | |||
892 | ret = ptrace_gethbpregs(child, addr, | 889 | ret = ptrace_gethbpregs(child, addr, |
893 | (unsigned long __user *)data); | 890 | (unsigned long __user *)data); |
894 | ptrace_put_breakpoints(child); | ||
895 | break; | 891 | break; |
896 | case PTRACE_SETHBPREGS: | 892 | case PTRACE_SETHBPREGS: |
897 | if (ptrace_get_breakpoints(child) < 0) | ||
898 | return -ESRCH; | ||
899 | |||
900 | ret = ptrace_sethbpregs(child, addr, | 893 | ret = ptrace_sethbpregs(child, addr, |
901 | (unsigned long __user *)data); | 894 | (unsigned long __user *)data); |
902 | ptrace_put_breakpoints(child); | ||
903 | break; | 895 | break; |
904 | #endif | 896 | #endif |
905 | 897 | ||
diff --git a/arch/arm/kernel/sched_clock.c b/arch/arm/kernel/sched_clock.c deleted file mode 100644 index e8edcaa0e432..000000000000 --- a/arch/arm/kernel/sched_clock.c +++ /dev/null | |||
@@ -1,217 +0,0 @@ | |||
1 | /* | ||
2 | * sched_clock.c: support for extending counters to full 64-bit ns counter | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | */ | ||
8 | #include <linux/clocksource.h> | ||
9 | #include <linux/init.h> | ||
10 | #include <linux/jiffies.h> | ||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/moduleparam.h> | ||
13 | #include <linux/sched.h> | ||
14 | #include <linux/syscore_ops.h> | ||
15 | #include <linux/timer.h> | ||
16 | |||
17 | #include <asm/sched_clock.h> | ||
18 | |||
19 | struct clock_data { | ||
20 | u64 epoch_ns; | ||
21 | u32 epoch_cyc; | ||
22 | u32 epoch_cyc_copy; | ||
23 | unsigned long rate; | ||
24 | u32 mult; | ||
25 | u32 shift; | ||
26 | bool suspended; | ||
27 | bool needs_suspend; | ||
28 | }; | ||
29 | |||
30 | static void sched_clock_poll(unsigned long wrap_ticks); | ||
31 | static DEFINE_TIMER(sched_clock_timer, sched_clock_poll, 0, 0); | ||
32 | static int irqtime = -1; | ||
33 | |||
34 | core_param(irqtime, irqtime, int, 0400); | ||
35 | |||
36 | static struct clock_data cd = { | ||
37 | .mult = NSEC_PER_SEC / HZ, | ||
38 | }; | ||
39 | |||
40 | static u32 __read_mostly sched_clock_mask = 0xffffffff; | ||
41 | |||
42 | static u32 notrace jiffy_sched_clock_read(void) | ||
43 | { | ||
44 | return (u32)(jiffies - INITIAL_JIFFIES); | ||
45 | } | ||
46 | |||
47 | static u32 __read_mostly (*read_sched_clock)(void) = jiffy_sched_clock_read; | ||
48 | |||
49 | static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift) | ||
50 | { | ||
51 | return (cyc * mult) >> shift; | ||
52 | } | ||
53 | |||
54 | static unsigned long long notrace cyc_to_sched_clock(u32 cyc, u32 mask) | ||
55 | { | ||
56 | u64 epoch_ns; | ||
57 | u32 epoch_cyc; | ||
58 | |||
59 | if (cd.suspended) | ||
60 | return cd.epoch_ns; | ||
61 | |||
62 | /* | ||
63 | * Load the epoch_cyc and epoch_ns atomically. We do this by | ||
64 | * ensuring that we always write epoch_cyc, epoch_ns and | ||
65 | * epoch_cyc_copy in strict order, and read them in strict order. | ||
66 | * If epoch_cyc and epoch_cyc_copy are not equal, then we're in | ||
67 | * the middle of an update, and we should repeat the load. | ||
68 | */ | ||
69 | do { | ||
70 | epoch_cyc = cd.epoch_cyc; | ||
71 | smp_rmb(); | ||
72 | epoch_ns = cd.epoch_ns; | ||
73 | smp_rmb(); | ||
74 | } while (epoch_cyc != cd.epoch_cyc_copy); | ||
75 | |||
76 | return epoch_ns + cyc_to_ns((cyc - epoch_cyc) & mask, cd.mult, cd.shift); | ||
77 | } | ||
78 | |||
79 | /* | ||
80 | * Atomically update the sched_clock epoch. | ||
81 | */ | ||
82 | static void notrace update_sched_clock(void) | ||
83 | { | ||
84 | unsigned long flags; | ||
85 | u32 cyc; | ||
86 | u64 ns; | ||
87 | |||
88 | cyc = read_sched_clock(); | ||
89 | ns = cd.epoch_ns + | ||
90 | cyc_to_ns((cyc - cd.epoch_cyc) & sched_clock_mask, | ||
91 | cd.mult, cd.shift); | ||
92 | /* | ||
93 | * Write epoch_cyc and epoch_ns in a way that the update is | ||
94 | * detectable in cyc_to_fixed_sched_clock(). | ||
95 | */ | ||
96 | raw_local_irq_save(flags); | ||
97 | cd.epoch_cyc_copy = cyc; | ||
98 | smp_wmb(); | ||
99 | cd.epoch_ns = ns; | ||
100 | smp_wmb(); | ||
101 | cd.epoch_cyc = cyc; | ||
102 | raw_local_irq_restore(flags); | ||
103 | } | ||
104 | |||
105 | static void sched_clock_poll(unsigned long wrap_ticks) | ||
106 | { | ||
107 | mod_timer(&sched_clock_timer, round_jiffies(jiffies + wrap_ticks)); | ||
108 | update_sched_clock(); | ||
109 | } | ||
110 | |||
111 | void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate) | ||
112 | { | ||
113 | unsigned long r, w; | ||
114 | u64 res, wrap; | ||
115 | char r_unit; | ||
116 | |||
117 | if (cd.rate > rate) | ||
118 | return; | ||
119 | |||
120 | BUG_ON(bits > 32); | ||
121 | WARN_ON(!irqs_disabled()); | ||
122 | read_sched_clock = read; | ||
123 | sched_clock_mask = (1 << bits) - 1; | ||
124 | cd.rate = rate; | ||
125 | |||
126 | /* calculate the mult/shift to convert counter ticks to ns. */ | ||
127 | clocks_calc_mult_shift(&cd.mult, &cd.shift, rate, NSEC_PER_SEC, 0); | ||
128 | |||
129 | r = rate; | ||
130 | if (r >= 4000000) { | ||
131 | r /= 1000000; | ||
132 | r_unit = 'M'; | ||
133 | } else if (r >= 1000) { | ||
134 | r /= 1000; | ||
135 | r_unit = 'k'; | ||
136 | } else | ||
137 | r_unit = ' '; | ||
138 | |||
139 | /* calculate how many ns until we wrap */ | ||
140 | wrap = cyc_to_ns((1ULL << bits) - 1, cd.mult, cd.shift); | ||
141 | do_div(wrap, NSEC_PER_MSEC); | ||
142 | w = wrap; | ||
143 | |||
144 | /* calculate the ns resolution of this counter */ | ||
145 | res = cyc_to_ns(1ULL, cd.mult, cd.shift); | ||
146 | pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lums\n", | ||
147 | bits, r, r_unit, res, w); | ||
148 | |||
149 | /* | ||
150 | * Start the timer to keep sched_clock() properly updated and | ||
151 | * sets the initial epoch. | ||
152 | */ | ||
153 | sched_clock_timer.data = msecs_to_jiffies(w - (w / 10)); | ||
154 | update_sched_clock(); | ||
155 | |||
156 | /* | ||
157 | * Ensure that sched_clock() starts off at 0ns | ||
158 | */ | ||
159 | cd.epoch_ns = 0; | ||
160 | |||
161 | /* Enable IRQ time accounting if we have a fast enough sched_clock */ | ||
162 | if (irqtime > 0 || (irqtime == -1 && rate >= 1000000)) | ||
163 | enable_sched_clock_irqtime(); | ||
164 | |||
165 | pr_debug("Registered %pF as sched_clock source\n", read); | ||
166 | } | ||
167 | |||
168 | static unsigned long long notrace sched_clock_32(void) | ||
169 | { | ||
170 | u32 cyc = read_sched_clock(); | ||
171 | return cyc_to_sched_clock(cyc, sched_clock_mask); | ||
172 | } | ||
173 | |||
174 | unsigned long long __read_mostly (*sched_clock_func)(void) = sched_clock_32; | ||
175 | |||
176 | unsigned long long notrace sched_clock(void) | ||
177 | { | ||
178 | return sched_clock_func(); | ||
179 | } | ||
180 | |||
181 | void __init sched_clock_postinit(void) | ||
182 | { | ||
183 | /* | ||
184 | * If no sched_clock function has been provided at that point, | ||
185 | * make it the final one one. | ||
186 | */ | ||
187 | if (read_sched_clock == jiffy_sched_clock_read) | ||
188 | setup_sched_clock(jiffy_sched_clock_read, 32, HZ); | ||
189 | |||
190 | sched_clock_poll(sched_clock_timer.data); | ||
191 | } | ||
192 | |||
193 | static int sched_clock_suspend(void) | ||
194 | { | ||
195 | sched_clock_poll(sched_clock_timer.data); | ||
196 | cd.suspended = true; | ||
197 | return 0; | ||
198 | } | ||
199 | |||
200 | static void sched_clock_resume(void) | ||
201 | { | ||
202 | cd.epoch_cyc = read_sched_clock(); | ||
203 | cd.epoch_cyc_copy = cd.epoch_cyc; | ||
204 | cd.suspended = false; | ||
205 | } | ||
206 | |||
207 | static struct syscore_ops sched_clock_ops = { | ||
208 | .suspend = sched_clock_suspend, | ||
209 | .resume = sched_clock_resume, | ||
210 | }; | ||
211 | |||
212 | static int __init sched_clock_syscore_init(void) | ||
213 | { | ||
214 | register_syscore_ops(&sched_clock_ops); | ||
215 | return 0; | ||
216 | } | ||
217 | device_initcall(sched_clock_syscore_init); | ||
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index b4b1d397592b..afc2489ee13b 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c | |||
@@ -37,6 +37,7 @@ | |||
37 | #include <asm/cputype.h> | 37 | #include <asm/cputype.h> |
38 | #include <asm/elf.h> | 38 | #include <asm/elf.h> |
39 | #include <asm/procinfo.h> | 39 | #include <asm/procinfo.h> |
40 | #include <asm/psci.h> | ||
40 | #include <asm/sections.h> | 41 | #include <asm/sections.h> |
41 | #include <asm/setup.h> | 42 | #include <asm/setup.h> |
42 | #include <asm/smp_plat.h> | 43 | #include <asm/smp_plat.h> |
@@ -73,7 +74,7 @@ __setup("fpe=", fpe_setup); | |||
73 | 74 | ||
74 | extern void paging_init(struct machine_desc *desc); | 75 | extern void paging_init(struct machine_desc *desc); |
75 | extern void sanity_check_meminfo(void); | 76 | extern void sanity_check_meminfo(void); |
76 | extern void reboot_setup(char *str); | 77 | extern enum reboot_mode reboot_mode; |
77 | extern void setup_dma_zone(struct machine_desc *desc); | 78 | extern void setup_dma_zone(struct machine_desc *desc); |
78 | 79 | ||
79 | unsigned int processor_id; | 80 | unsigned int processor_id; |
@@ -128,7 +129,9 @@ struct stack { | |||
128 | u32 und[3]; | 129 | u32 und[3]; |
129 | } ____cacheline_aligned; | 130 | } ____cacheline_aligned; |
130 | 131 | ||
132 | #ifndef CONFIG_CPU_V7M | ||
131 | static struct stack stacks[NR_CPUS]; | 133 | static struct stack stacks[NR_CPUS]; |
134 | #endif | ||
132 | 135 | ||
133 | char elf_platform[ELF_PLATFORM_SIZE]; | 136 | char elf_platform[ELF_PLATFORM_SIZE]; |
134 | EXPORT_SYMBOL(elf_platform); | 137 | EXPORT_SYMBOL(elf_platform); |
@@ -207,7 +210,7 @@ static const char *proc_arch[] = { | |||
207 | "5TEJ", | 210 | "5TEJ", |
208 | "6TEJ", | 211 | "6TEJ", |
209 | "7", | 212 | "7", |
210 | "?(11)", | 213 | "7M", |
211 | "?(12)", | 214 | "?(12)", |
212 | "?(13)", | 215 | "?(13)", |
213 | "?(14)", | 216 | "?(14)", |
@@ -216,6 +219,12 @@ static const char *proc_arch[] = { | |||
216 | "?(17)", | 219 | "?(17)", |
217 | }; | 220 | }; |
218 | 221 | ||
222 | #ifdef CONFIG_CPU_V7M | ||
223 | static int __get_cpu_architecture(void) | ||
224 | { | ||
225 | return CPU_ARCH_ARMv7M; | ||
226 | } | ||
227 | #else | ||
219 | static int __get_cpu_architecture(void) | 228 | static int __get_cpu_architecture(void) |
220 | { | 229 | { |
221 | int cpu_arch; | 230 | int cpu_arch; |
@@ -248,6 +257,7 @@ static int __get_cpu_architecture(void) | |||
248 | 257 | ||
249 | return cpu_arch; | 258 | return cpu_arch; |
250 | } | 259 | } |
260 | #endif | ||
251 | 261 | ||
252 | int __pure cpu_architecture(void) | 262 | int __pure cpu_architecture(void) |
253 | { | 263 | { |
@@ -293,7 +303,9 @@ static void __init cacheid_init(void) | |||
293 | { | 303 | { |
294 | unsigned int arch = cpu_architecture(); | 304 | unsigned int arch = cpu_architecture(); |
295 | 305 | ||
296 | if (arch >= CPU_ARCH_ARMv6) { | 306 | if (arch == CPU_ARCH_ARMv7M) { |
307 | cacheid = 0; | ||
308 | } else if (arch >= CPU_ARCH_ARMv6) { | ||
297 | unsigned int cachetype = read_cpuid_cachetype(); | 309 | unsigned int cachetype = read_cpuid_cachetype(); |
298 | if ((cachetype & (7 << 29)) == 4 << 29) { | 310 | if ((cachetype & (7 << 29)) == 4 << 29) { |
299 | /* ARMv7 register format */ | 311 | /* ARMv7 register format */ |
@@ -355,7 +367,7 @@ void __init early_print(const char *str, ...) | |||
355 | 367 | ||
356 | static void __init cpuid_init_hwcaps(void) | 368 | static void __init cpuid_init_hwcaps(void) |
357 | { | 369 | { |
358 | unsigned int divide_instrs; | 370 | unsigned int divide_instrs, vmsa; |
359 | 371 | ||
360 | if (cpu_architecture() < CPU_ARCH_ARMv7) | 372 | if (cpu_architecture() < CPU_ARCH_ARMv7) |
361 | return; | 373 | return; |
@@ -368,6 +380,11 @@ static void __init cpuid_init_hwcaps(void) | |||
368 | case 1: | 380 | case 1: |
369 | elf_hwcap |= HWCAP_IDIVT; | 381 | elf_hwcap |= HWCAP_IDIVT; |
370 | } | 382 | } |
383 | |||
384 | /* LPAE implies atomic ldrd/strd instructions */ | ||
385 | vmsa = (read_cpuid_ext(CPUID_EXT_MMFR0) & 0xf) >> 0; | ||
386 | if (vmsa >= 5) | ||
387 | elf_hwcap |= HWCAP_LPAE; | ||
371 | } | 388 | } |
372 | 389 | ||
373 | static void __init feat_v6_fixup(void) | 390 | static void __init feat_v6_fixup(void) |
@@ -392,6 +409,7 @@ static void __init feat_v6_fixup(void) | |||
392 | */ | 409 | */ |
393 | void notrace cpu_init(void) | 410 | void notrace cpu_init(void) |
394 | { | 411 | { |
412 | #ifndef CONFIG_CPU_V7M | ||
395 | unsigned int cpu = smp_processor_id(); | 413 | unsigned int cpu = smp_processor_id(); |
396 | struct stack *stk = &stacks[cpu]; | 414 | struct stack *stk = &stacks[cpu]; |
397 | 415 | ||
@@ -442,6 +460,7 @@ void notrace cpu_init(void) | |||
442 | "I" (offsetof(struct stack, und[0])), | 460 | "I" (offsetof(struct stack, und[0])), |
443 | PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE) | 461 | PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE) |
444 | : "r14"); | 462 | : "r14"); |
463 | #endif | ||
445 | } | 464 | } |
446 | 465 | ||
447 | u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID }; | 466 | u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID }; |
@@ -456,9 +475,82 @@ void __init smp_setup_processor_id(void) | |||
456 | for (i = 1; i < nr_cpu_ids; ++i) | 475 | for (i = 1; i < nr_cpu_ids; ++i) |
457 | cpu_logical_map(i) = i == cpu ? 0 : i; | 476 | cpu_logical_map(i) = i == cpu ? 0 : i; |
458 | 477 | ||
478 | /* | ||
479 | * clear __my_cpu_offset on boot CPU to avoid hang caused by | ||
480 | * using percpu variable early, for example, lockdep will | ||
481 | * access percpu variable inside lock_release | ||
482 | */ | ||
483 | set_my_cpu_offset(0); | ||
484 | |||
459 | printk(KERN_INFO "Booting Linux on physical CPU 0x%x\n", mpidr); | 485 | printk(KERN_INFO "Booting Linux on physical CPU 0x%x\n", mpidr); |
460 | } | 486 | } |
461 | 487 | ||
488 | struct mpidr_hash mpidr_hash; | ||
489 | #ifdef CONFIG_SMP | ||
490 | /** | ||
491 | * smp_build_mpidr_hash - Pre-compute shifts required at each affinity | ||
492 | * level in order to build a linear index from an | ||
493 | * MPIDR value. Resulting algorithm is a collision | ||
494 | * free hash carried out through shifting and ORing | ||
495 | */ | ||
496 | static void __init smp_build_mpidr_hash(void) | ||
497 | { | ||
498 | u32 i, affinity; | ||
499 | u32 fs[3], bits[3], ls, mask = 0; | ||
500 | /* | ||
501 | * Pre-scan the list of MPIDRS and filter out bits that do | ||
502 | * not contribute to affinity levels, ie they never toggle. | ||
503 | */ | ||
504 | for_each_possible_cpu(i) | ||
505 | mask |= (cpu_logical_map(i) ^ cpu_logical_map(0)); | ||
506 | pr_debug("mask of set bits 0x%x\n", mask); | ||
507 | /* | ||
508 | * Find and stash the last and first bit set at all affinity levels to | ||
509 | * check how many bits are required to represent them. | ||
510 | */ | ||
511 | for (i = 0; i < 3; i++) { | ||
512 | affinity = MPIDR_AFFINITY_LEVEL(mask, i); | ||
513 | /* | ||
514 | * Find the MSB bit and LSB bits position | ||
515 | * to determine how many bits are required | ||
516 | * to express the affinity level. | ||
517 | */ | ||
518 | ls = fls(affinity); | ||
519 | fs[i] = affinity ? ffs(affinity) - 1 : 0; | ||
520 | bits[i] = ls - fs[i]; | ||
521 | } | ||
522 | /* | ||
523 | * An index can be created from the MPIDR by isolating the | ||
524 | * significant bits at each affinity level and by shifting | ||
525 | * them in order to compress the 24 bits values space to a | ||
526 | * compressed set of values. This is equivalent to hashing | ||
527 | * the MPIDR through shifting and ORing. It is a collision free | ||
528 | * hash though not minimal since some levels might contain a number | ||
529 | * of CPUs that is not an exact power of 2 and their bit | ||
530 | * representation might contain holes, eg MPIDR[7:0] = {0x2, 0x80}. | ||
531 | */ | ||
532 | mpidr_hash.shift_aff[0] = fs[0]; | ||
533 | mpidr_hash.shift_aff[1] = MPIDR_LEVEL_BITS + fs[1] - bits[0]; | ||
534 | mpidr_hash.shift_aff[2] = 2*MPIDR_LEVEL_BITS + fs[2] - | ||
535 | (bits[1] + bits[0]); | ||
536 | mpidr_hash.mask = mask; | ||
537 | mpidr_hash.bits = bits[2] + bits[1] + bits[0]; | ||
538 | pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] mask[0x%x] bits[%u]\n", | ||
539 | mpidr_hash.shift_aff[0], | ||
540 | mpidr_hash.shift_aff[1], | ||
541 | mpidr_hash.shift_aff[2], | ||
542 | mpidr_hash.mask, | ||
543 | mpidr_hash.bits); | ||
544 | /* | ||
545 | * 4x is an arbitrary value used to warn on a hash table much bigger | ||
546 | * than expected on most systems. | ||
547 | */ | ||
548 | if (mpidr_hash_size() > 4 * num_possible_cpus()) | ||
549 | pr_warn("Large number of MPIDR hash buckets detected\n"); | ||
550 | sync_cache_w(&mpidr_hash); | ||
551 | } | ||
552 | #endif | ||
553 | |||
462 | static void __init setup_processor(void) | 554 | static void __init setup_processor(void) |
463 | { | 555 | { |
464 | struct proc_info_list *list; | 556 | struct proc_info_list *list; |
@@ -744,6 +836,8 @@ static int __init meminfo_cmp(const void *_a, const void *_b) | |||
744 | void __init hyp_mode_check(void) | 836 | void __init hyp_mode_check(void) |
745 | { | 837 | { |
746 | #ifdef CONFIG_ARM_VIRT_EXT | 838 | #ifdef CONFIG_ARM_VIRT_EXT |
839 | sync_boot_mode(); | ||
840 | |||
747 | if (is_hyp_mode_available()) { | 841 | if (is_hyp_mode_available()) { |
748 | pr_info("CPU: All CPU(s) started in HYP mode.\n"); | 842 | pr_info("CPU: All CPU(s) started in HYP mode.\n"); |
749 | pr_info("CPU: Virtualization extensions available.\n"); | 843 | pr_info("CPU: Virtualization extensions available.\n"); |
@@ -769,8 +863,8 @@ void __init setup_arch(char **cmdline_p) | |||
769 | 863 | ||
770 | setup_dma_zone(mdesc); | 864 | setup_dma_zone(mdesc); |
771 | 865 | ||
772 | if (mdesc->restart_mode) | 866 | if (mdesc->reboot_mode != REBOOT_HARD) |
773 | reboot_setup(&mdesc->restart_mode); | 867 | reboot_mode = mdesc->reboot_mode; |
774 | 868 | ||
775 | init_mm.start_code = (unsigned long) _text; | 869 | init_mm.start_code = (unsigned long) _text; |
776 | init_mm.end_code = (unsigned long) _etext; | 870 | init_mm.end_code = (unsigned long) _etext; |
@@ -796,10 +890,17 @@ void __init setup_arch(char **cmdline_p) | |||
796 | unflatten_device_tree(); | 890 | unflatten_device_tree(); |
797 | 891 | ||
798 | arm_dt_init_cpu_maps(); | 892 | arm_dt_init_cpu_maps(); |
893 | psci_init(); | ||
799 | #ifdef CONFIG_SMP | 894 | #ifdef CONFIG_SMP |
800 | if (is_smp()) { | 895 | if (is_smp()) { |
801 | smp_set_ops(mdesc->smp); | 896 | if (!mdesc->smp_init || !mdesc->smp_init()) { |
897 | if (psci_smp_available()) | ||
898 | smp_set_ops(&psci_smp_ops); | ||
899 | else if (mdesc->smp) | ||
900 | smp_set_ops(mdesc->smp); | ||
901 | } | ||
802 | smp_init_cpus(); | 902 | smp_init_cpus(); |
903 | smp_build_mpidr_hash(); | ||
803 | } | 904 | } |
804 | #endif | 905 | #endif |
805 | 906 | ||
@@ -872,6 +973,8 @@ static const char *hwcap_str[] = { | |||
872 | "vfpv4", | 973 | "vfpv4", |
873 | "idiva", | 974 | "idiva", |
874 | "idivt", | 975 | "idivt", |
976 | "vfpd32", | ||
977 | "lpae", | ||
875 | NULL | 978 | NULL |
876 | }; | 979 | }; |
877 | 980 | ||
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index 296786bdbb73..ab3304225272 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c | |||
@@ -8,6 +8,7 @@ | |||
8 | * published by the Free Software Foundation. | 8 | * published by the Free Software Foundation. |
9 | */ | 9 | */ |
10 | #include <linux/errno.h> | 10 | #include <linux/errno.h> |
11 | #include <linux/random.h> | ||
11 | #include <linux/signal.h> | 12 | #include <linux/signal.h> |
12 | #include <linux/personality.h> | 13 | #include <linux/personality.h> |
13 | #include <linux/uaccess.h> | 14 | #include <linux/uaccess.h> |
@@ -15,12 +16,11 @@ | |||
15 | 16 | ||
16 | #include <asm/elf.h> | 17 | #include <asm/elf.h> |
17 | #include <asm/cacheflush.h> | 18 | #include <asm/cacheflush.h> |
19 | #include <asm/traps.h> | ||
18 | #include <asm/ucontext.h> | 20 | #include <asm/ucontext.h> |
19 | #include <asm/unistd.h> | 21 | #include <asm/unistd.h> |
20 | #include <asm/vfp.h> | 22 | #include <asm/vfp.h> |
21 | 23 | ||
22 | #include "signal.h" | ||
23 | |||
24 | /* | 24 | /* |
25 | * For ARM syscalls, we encode the syscall number into the instruction. | 25 | * For ARM syscalls, we encode the syscall number into the instruction. |
26 | */ | 26 | */ |
@@ -40,11 +40,13 @@ | |||
40 | #define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_sigreturn - __NR_SYSCALL_BASE)) | 40 | #define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_sigreturn - __NR_SYSCALL_BASE)) |
41 | #define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE)) | 41 | #define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE)) |
42 | 42 | ||
43 | const unsigned long sigreturn_codes[7] = { | 43 | static const unsigned long sigreturn_codes[7] = { |
44 | MOV_R7_NR_SIGRETURN, SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN, | 44 | MOV_R7_NR_SIGRETURN, SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN, |
45 | MOV_R7_NR_RT_SIGRETURN, SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN, | 45 | MOV_R7_NR_RT_SIGRETURN, SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN, |
46 | }; | 46 | }; |
47 | 47 | ||
48 | static unsigned long signal_return_offset; | ||
49 | |||
48 | #ifdef CONFIG_CRUNCH | 50 | #ifdef CONFIG_CRUNCH |
49 | static int preserve_crunch_context(struct crunch_sigframe __user *frame) | 51 | static int preserve_crunch_context(struct crunch_sigframe __user *frame) |
50 | { | 52 | { |
@@ -392,17 +394,28 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig, | |||
392 | if (ksig->ka.sa.sa_flags & SA_SIGINFO) | 394 | if (ksig->ka.sa.sa_flags & SA_SIGINFO) |
393 | idx += 3; | 395 | idx += 3; |
394 | 396 | ||
397 | /* | ||
398 | * Put the sigreturn code on the stack no matter which return | ||
399 | * mechanism we use in order to remain ABI compliant | ||
400 | */ | ||
395 | if (__put_user(sigreturn_codes[idx], rc) || | 401 | if (__put_user(sigreturn_codes[idx], rc) || |
396 | __put_user(sigreturn_codes[idx+1], rc+1)) | 402 | __put_user(sigreturn_codes[idx+1], rc+1)) |
397 | return 1; | 403 | return 1; |
398 | 404 | ||
405 | #ifdef CONFIG_MMU | ||
399 | if (cpsr & MODE32_BIT) { | 406 | if (cpsr & MODE32_BIT) { |
407 | struct mm_struct *mm = current->mm; | ||
408 | |||
400 | /* | 409 | /* |
401 | * 32-bit code can use the new high-page | 410 | * 32-bit code can use the signal return page |
402 | * signal return code support. | 411 | * except when the MPU has protected the vectors |
412 | * page from PL0 | ||
403 | */ | 413 | */ |
404 | retcode = KERN_SIGRETURN_CODE + (idx << 2) + thumb; | 414 | retcode = mm->context.sigpage + signal_return_offset + |
405 | } else { | 415 | (idx << 2) + thumb; |
416 | } else | ||
417 | #endif | ||
418 | { | ||
406 | /* | 419 | /* |
407 | * Ensure that the instruction cache sees | 420 | * Ensure that the instruction cache sees |
408 | * the return code written onto the stack. | 421 | * the return code written onto the stack. |
@@ -603,3 +616,33 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall) | |||
603 | } while (thread_flags & _TIF_WORK_MASK); | 616 | } while (thread_flags & _TIF_WORK_MASK); |
604 | return 0; | 617 | return 0; |
605 | } | 618 | } |
619 | |||
620 | struct page *get_signal_page(void) | ||
621 | { | ||
622 | unsigned long ptr; | ||
623 | unsigned offset; | ||
624 | struct page *page; | ||
625 | void *addr; | ||
626 | |||
627 | page = alloc_pages(GFP_KERNEL, 0); | ||
628 | |||
629 | if (!page) | ||
630 | return NULL; | ||
631 | |||
632 | addr = page_address(page); | ||
633 | |||
634 | /* Give the signal return code some randomness */ | ||
635 | offset = 0x200 + (get_random_int() & 0x7fc); | ||
636 | signal_return_offset = offset; | ||
637 | |||
638 | /* | ||
639 | * Copy signal return handlers into the vector page, and | ||
640 | * set sigreturn to be a pointer to these. | ||
641 | */ | ||
642 | memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes)); | ||
643 | |||
644 | ptr = (unsigned long)addr + offset; | ||
645 | flush_icache_range(ptr, ptr + sizeof(sigreturn_codes)); | ||
646 | |||
647 | return page; | ||
648 | } | ||
diff --git a/arch/arm/kernel/signal.h b/arch/arm/kernel/signal.h deleted file mode 100644 index 5ff067b7c752..000000000000 --- a/arch/arm/kernel/signal.h +++ /dev/null | |||
@@ -1,12 +0,0 @@ | |||
1 | /* | ||
2 | * linux/arch/arm/kernel/signal.h | ||
3 | * | ||
4 | * Copyright (C) 2005-2009 Russell King. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | #define KERN_SIGRETURN_CODE (CONFIG_VECTORS_BASE + 0x00000500) | ||
11 | |||
12 | extern const unsigned long sigreturn_codes[7]; | ||
diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S index 987dcf33415c..db1536b8b30b 100644 --- a/arch/arm/kernel/sleep.S +++ b/arch/arm/kernel/sleep.S | |||
@@ -7,6 +7,49 @@ | |||
7 | .text | 7 | .text |
8 | 8 | ||
9 | /* | 9 | /* |
10 | * Implementation of MPIDR hash algorithm through shifting | ||
11 | * and OR'ing. | ||
12 | * | ||
13 | * @dst: register containing hash result | ||
14 | * @rs0: register containing affinity level 0 bit shift | ||
15 | * @rs1: register containing affinity level 1 bit shift | ||
16 | * @rs2: register containing affinity level 2 bit shift | ||
17 | * @mpidr: register containing MPIDR value | ||
18 | * @mask: register containing MPIDR mask | ||
19 | * | ||
20 | * Pseudo C-code: | ||
21 | * | ||
22 | *u32 dst; | ||
23 | * | ||
24 | *compute_mpidr_hash(u32 rs0, u32 rs1, u32 rs2, u32 mpidr, u32 mask) { | ||
25 | * u32 aff0, aff1, aff2; | ||
26 | * u32 mpidr_masked = mpidr & mask; | ||
27 | * aff0 = mpidr_masked & 0xff; | ||
28 | * aff1 = mpidr_masked & 0xff00; | ||
29 | * aff2 = mpidr_masked & 0xff0000; | ||
30 | * dst = (aff0 >> rs0 | aff1 >> rs1 | aff2 >> rs2); | ||
31 | *} | ||
32 | * Input registers: rs0, rs1, rs2, mpidr, mask | ||
33 | * Output register: dst | ||
34 | * Note: input and output registers must be disjoint register sets | ||
35 | (eg: a macro instance with mpidr = r1 and dst = r1 is invalid) | ||
36 | */ | ||
37 | .macro compute_mpidr_hash dst, rs0, rs1, rs2, mpidr, mask | ||
38 | and \mpidr, \mpidr, \mask @ mask out MPIDR bits | ||
39 | and \dst, \mpidr, #0xff @ mask=aff0 | ||
40 | ARM( mov \dst, \dst, lsr \rs0 ) @ dst=aff0>>rs0 | ||
41 | THUMB( lsr \dst, \dst, \rs0 ) | ||
42 | and \mask, \mpidr, #0xff00 @ mask = aff1 | ||
43 | ARM( orr \dst, \dst, \mask, lsr \rs1 ) @ dst|=(aff1>>rs1) | ||
44 | THUMB( lsr \mask, \mask, \rs1 ) | ||
45 | THUMB( orr \dst, \dst, \mask ) | ||
46 | and \mask, \mpidr, #0xff0000 @ mask = aff2 | ||
47 | ARM( orr \dst, \dst, \mask, lsr \rs2 ) @ dst|=(aff2>>rs2) | ||
48 | THUMB( lsr \mask, \mask, \rs2 ) | ||
49 | THUMB( orr \dst, \dst, \mask ) | ||
50 | .endm | ||
51 | |||
52 | /* | ||
10 | * Save CPU state for a suspend. This saves the CPU general purpose | 53 | * Save CPU state for a suspend. This saves the CPU general purpose |
11 | * registers, and allocates space on the kernel stack to save the CPU | 54 | * registers, and allocates space on the kernel stack to save the CPU |
12 | * specific registers and some other data for resume. | 55 | * specific registers and some other data for resume. |
@@ -29,12 +72,18 @@ ENTRY(__cpu_suspend) | |||
29 | mov r1, r4 @ size of save block | 72 | mov r1, r4 @ size of save block |
30 | mov r2, r5 @ virtual SP | 73 | mov r2, r5 @ virtual SP |
31 | ldr r3, =sleep_save_sp | 74 | ldr r3, =sleep_save_sp |
32 | #ifdef CONFIG_SMP | 75 | ldr r3, [r3, #SLEEP_SAVE_SP_VIRT] |
33 | ALT_SMP(mrc p15, 0, lr, c0, c0, 5) | 76 | ALT_SMP(mrc p15, 0, r9, c0, c0, 5) |
34 | ALT_UP(mov lr, #0) | 77 | ALT_UP_B(1f) |
35 | and lr, lr, #15 | 78 | ldr r8, =mpidr_hash |
79 | /* | ||
80 | * This ldmia relies on the memory layout of the mpidr_hash | ||
81 | * struct mpidr_hash. | ||
82 | */ | ||
83 | ldmia r8, {r4-r7} @ r4 = mpidr mask (r5,r6,r7) = l[0,1,2] shifts | ||
84 | compute_mpidr_hash lr, r5, r6, r7, r9, r4 | ||
36 | add r3, r3, lr, lsl #2 | 85 | add r3, r3, lr, lsl #2 |
37 | #endif | 86 | 1: |
38 | bl __cpu_suspend_save | 87 | bl __cpu_suspend_save |
39 | adr lr, BSYM(cpu_suspend_abort) | 88 | adr lr, BSYM(cpu_suspend_abort) |
40 | ldmfd sp!, {r0, pc} @ call suspend fn | 89 | ldmfd sp!, {r0, pc} @ call suspend fn |
@@ -81,15 +130,23 @@ ENDPROC(cpu_resume_after_mmu) | |||
81 | .data | 130 | .data |
82 | .align | 131 | .align |
83 | ENTRY(cpu_resume) | 132 | ENTRY(cpu_resume) |
84 | #ifdef CONFIG_SMP | 133 | mov r1, #0 |
85 | adr r0, sleep_save_sp | 134 | ALT_SMP(mrc p15, 0, r0, c0, c0, 5) |
86 | ALT_SMP(mrc p15, 0, r1, c0, c0, 5) | 135 | ALT_UP_B(1f) |
87 | ALT_UP(mov r1, #0) | 136 | adr r2, mpidr_hash_ptr |
88 | and r1, r1, #15 | 137 | ldr r3, [r2] |
89 | ldr r0, [r0, r1, lsl #2] @ stack phys addr | 138 | add r2, r2, r3 @ r2 = struct mpidr_hash phys address |
90 | #else | 139 | /* |
91 | ldr r0, sleep_save_sp @ stack phys addr | 140 | * This ldmia relies on the memory layout of the mpidr_hash |
92 | #endif | 141 | * struct mpidr_hash. |
142 | */ | ||
143 | ldmia r2, { r3-r6 } @ r3 = mpidr mask (r4,r5,r6) = l[0,1,2] shifts | ||
144 | compute_mpidr_hash r1, r4, r5, r6, r0, r3 | ||
145 | 1: | ||
146 | adr r0, _sleep_save_sp | ||
147 | ldr r0, [r0, #SLEEP_SAVE_SP_PHYS] | ||
148 | ldr r0, [r0, r1, lsl #2] | ||
149 | |||
93 | setmode PSR_I_BIT | PSR_F_BIT | SVC_MODE, r1 @ set SVC, irqs off | 150 | setmode PSR_I_BIT | PSR_F_BIT | SVC_MODE, r1 @ set SVC, irqs off |
94 | @ load phys pgd, stack, resume fn | 151 | @ load phys pgd, stack, resume fn |
95 | ARM( ldmia r0!, {r1, sp, pc} ) | 152 | ARM( ldmia r0!, {r1, sp, pc} ) |
@@ -98,7 +155,11 @@ THUMB( mov sp, r2 ) | |||
98 | THUMB( bx r3 ) | 155 | THUMB( bx r3 ) |
99 | ENDPROC(cpu_resume) | 156 | ENDPROC(cpu_resume) |
100 | 157 | ||
101 | sleep_save_sp: | 158 | .align 2 |
102 | .rept CONFIG_NR_CPUS | 159 | mpidr_hash_ptr: |
103 | .long 0 @ preserve stack phys ptr here | 160 | .long mpidr_hash - . @ mpidr_hash struct offset |
104 | .endr | 161 | |
162 | .type sleep_save_sp, #object | ||
163 | ENTRY(sleep_save_sp) | ||
164 | _sleep_save_sp: | ||
165 | .space SLEEP_SAVE_SP_SZ @ struct sleep_save_sp | ||
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 5919eb451bb9..c2b4f8f0be9a 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c | |||
@@ -45,6 +45,7 @@ | |||
45 | #include <asm/smp_plat.h> | 45 | #include <asm/smp_plat.h> |
46 | #include <asm/virt.h> | 46 | #include <asm/virt.h> |
47 | #include <asm/mach/arch.h> | 47 | #include <asm/mach/arch.h> |
48 | #include <asm/mpu.h> | ||
48 | 49 | ||
49 | /* | 50 | /* |
50 | * as from 2.5, kernels no longer have an init_tasks structure | 51 | * as from 2.5, kernels no longer have an init_tasks structure |
@@ -57,7 +58,7 @@ struct secondary_data secondary_data; | |||
57 | * control for which core is the next to come out of the secondary | 58 | * control for which core is the next to come out of the secondary |
58 | * boot "holding pen" | 59 | * boot "holding pen" |
59 | */ | 60 | */ |
60 | volatile int __cpuinitdata pen_release = -1; | 61 | volatile int pen_release = -1; |
61 | 62 | ||
62 | enum ipi_msg_type { | 63 | enum ipi_msg_type { |
63 | IPI_WAKEUP, | 64 | IPI_WAKEUP, |
@@ -78,7 +79,14 @@ void __init smp_set_ops(struct smp_operations *ops) | |||
78 | smp_ops = *ops; | 79 | smp_ops = *ops; |
79 | }; | 80 | }; |
80 | 81 | ||
81 | int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle) | 82 | static unsigned long get_arch_pgd(pgd_t *pgd) |
83 | { | ||
84 | phys_addr_t pgdir = virt_to_phys(pgd); | ||
85 | BUG_ON(pgdir & ARCH_PGD_MASK); | ||
86 | return pgdir >> ARCH_PGD_SHIFT; | ||
87 | } | ||
88 | |||
89 | int __cpu_up(unsigned int cpu, struct task_struct *idle) | ||
82 | { | 90 | { |
83 | int ret; | 91 | int ret; |
84 | 92 | ||
@@ -87,8 +95,14 @@ int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle) | |||
87 | * its stack and the page tables. | 95 | * its stack and the page tables. |
88 | */ | 96 | */ |
89 | secondary_data.stack = task_stack_page(idle) + THREAD_START_SP; | 97 | secondary_data.stack = task_stack_page(idle) + THREAD_START_SP; |
90 | secondary_data.pgdir = virt_to_phys(idmap_pgd); | 98 | #ifdef CONFIG_ARM_MPU |
91 | secondary_data.swapper_pg_dir = virt_to_phys(swapper_pg_dir); | 99 | secondary_data.mpu_rgn_szr = mpu_rgn_info.rgns[MPU_RAM_REGION].drsr; |
100 | #endif | ||
101 | |||
102 | #ifdef CONFIG_MMU | ||
103 | secondary_data.pgdir = get_arch_pgd(idmap_pgd); | ||
104 | secondary_data.swapper_pg_dir = get_arch_pgd(swapper_pg_dir); | ||
105 | #endif | ||
92 | __cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data)); | 106 | __cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data)); |
93 | outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1)); | 107 | outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1)); |
94 | 108 | ||
@@ -112,9 +126,8 @@ int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle) | |||
112 | pr_err("CPU%u: failed to boot: %d\n", cpu, ret); | 126 | pr_err("CPU%u: failed to boot: %d\n", cpu, ret); |
113 | } | 127 | } |
114 | 128 | ||
115 | secondary_data.stack = NULL; | ||
116 | secondary_data.pgdir = 0; | ||
117 | 129 | ||
130 | memset(&secondary_data, 0, sizeof(secondary_data)); | ||
118 | return ret; | 131 | return ret; |
119 | } | 132 | } |
120 | 133 | ||
@@ -125,7 +138,7 @@ void __init smp_init_cpus(void) | |||
125 | smp_ops.smp_init_cpus(); | 138 | smp_ops.smp_init_cpus(); |
126 | } | 139 | } |
127 | 140 | ||
128 | int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) | 141 | int boot_secondary(unsigned int cpu, struct task_struct *idle) |
129 | { | 142 | { |
130 | if (smp_ops.smp_boot_secondary) | 143 | if (smp_ops.smp_boot_secondary) |
131 | return smp_ops.smp_boot_secondary(cpu, idle); | 144 | return smp_ops.smp_boot_secondary(cpu, idle); |
@@ -157,7 +170,7 @@ static int platform_cpu_disable(unsigned int cpu) | |||
157 | /* | 170 | /* |
158 | * __cpu_disable runs on the processor to be shutdown. | 171 | * __cpu_disable runs on the processor to be shutdown. |
159 | */ | 172 | */ |
160 | int __cpuinit __cpu_disable(void) | 173 | int __cpu_disable(void) |
161 | { | 174 | { |
162 | unsigned int cpu = smp_processor_id(); | 175 | unsigned int cpu = smp_processor_id(); |
163 | int ret; | 176 | int ret; |
@@ -203,7 +216,7 @@ static DECLARE_COMPLETION(cpu_died); | |||
203 | * called on the thread which is asking for a CPU to be shutdown - | 216 | * called on the thread which is asking for a CPU to be shutdown - |
204 | * waits until shutdown has completed, or it is timed out. | 217 | * waits until shutdown has completed, or it is timed out. |
205 | */ | 218 | */ |
206 | void __cpuinit __cpu_die(unsigned int cpu) | 219 | void __cpu_die(unsigned int cpu) |
207 | { | 220 | { |
208 | if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) { | 221 | if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) { |
209 | pr_err("CPU%u: cpu didn't die\n", cpu); | 222 | pr_err("CPU%u: cpu didn't die\n", cpu); |
@@ -293,7 +306,7 @@ void __ref cpu_die(void) | |||
293 | * Called by both boot and secondaries to move global data into | 306 | * Called by both boot and secondaries to move global data into |
294 | * per-processor storage. | 307 | * per-processor storage. |
295 | */ | 308 | */ |
296 | static void __cpuinit smp_store_cpu_info(unsigned int cpuid) | 309 | static void smp_store_cpu_info(unsigned int cpuid) |
297 | { | 310 | { |
298 | struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid); | 311 | struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid); |
299 | 312 | ||
@@ -309,7 +322,7 @@ static void percpu_timer_setup(void); | |||
309 | * This is the secondary CPU boot entry. We're using this CPUs | 322 | * This is the secondary CPU boot entry. We're using this CPUs |
310 | * idle thread stack, but a set of temporary page tables. | 323 | * idle thread stack, but a set of temporary page tables. |
311 | */ | 324 | */ |
312 | asmlinkage void __cpuinit secondary_start_kernel(void) | 325 | asmlinkage void secondary_start_kernel(void) |
313 | { | 326 | { |
314 | struct mm_struct *mm = &init_mm; | 327 | struct mm_struct *mm = &init_mm; |
315 | unsigned int cpu; | 328 | unsigned int cpu; |
@@ -508,7 +521,7 @@ static void broadcast_timer_set_mode(enum clock_event_mode mode, | |||
508 | { | 521 | { |
509 | } | 522 | } |
510 | 523 | ||
511 | static void __cpuinit broadcast_timer_setup(struct clock_event_device *evt) | 524 | static void broadcast_timer_setup(struct clock_event_device *evt) |
512 | { | 525 | { |
513 | evt->name = "dummy_timer"; | 526 | evt->name = "dummy_timer"; |
514 | evt->features = CLOCK_EVT_FEAT_ONESHOT | | 527 | evt->features = CLOCK_EVT_FEAT_ONESHOT | |
@@ -537,7 +550,7 @@ int local_timer_register(struct local_timer_ops *ops) | |||
537 | } | 550 | } |
538 | #endif | 551 | #endif |
539 | 552 | ||
540 | static void __cpuinit percpu_timer_setup(void) | 553 | static void percpu_timer_setup(void) |
541 | { | 554 | { |
542 | unsigned int cpu = smp_processor_id(); | 555 | unsigned int cpu = smp_processor_id(); |
543 | struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu); | 556 | struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu); |
diff --git a/arch/arm/kernel/smp_tlb.c b/arch/arm/kernel/smp_tlb.c index 9a52a07aa40e..c2edfff573c2 100644 --- a/arch/arm/kernel/smp_tlb.c +++ b/arch/arm/kernel/smp_tlb.c | |||
@@ -70,23 +70,6 @@ static inline void ipi_flush_bp_all(void *ignored) | |||
70 | local_flush_bp_all(); | 70 | local_flush_bp_all(); |
71 | } | 71 | } |
72 | 72 | ||
73 | #ifdef CONFIG_ARM_ERRATA_798181 | ||
74 | static int erratum_a15_798181(void) | ||
75 | { | ||
76 | unsigned int midr = read_cpuid_id(); | ||
77 | |||
78 | /* Cortex-A15 r0p0..r3p2 affected */ | ||
79 | if ((midr & 0xff0ffff0) != 0x410fc0f0 || midr > 0x413fc0f2) | ||
80 | return 0; | ||
81 | return 1; | ||
82 | } | ||
83 | #else | ||
84 | static int erratum_a15_798181(void) | ||
85 | { | ||
86 | return 0; | ||
87 | } | ||
88 | #endif | ||
89 | |||
90 | static void ipi_flush_tlb_a15_erratum(void *arg) | 73 | static void ipi_flush_tlb_a15_erratum(void *arg) |
91 | { | 74 | { |
92 | dmb(); | 75 | dmb(); |
@@ -103,7 +86,7 @@ static void broadcast_tlb_a15_erratum(void) | |||
103 | 86 | ||
104 | static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm) | 87 | static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm) |
105 | { | 88 | { |
106 | int cpu, this_cpu; | 89 | int this_cpu; |
107 | cpumask_t mask = { CPU_BITS_NONE }; | 90 | cpumask_t mask = { CPU_BITS_NONE }; |
108 | 91 | ||
109 | if (!erratum_a15_798181()) | 92 | if (!erratum_a15_798181()) |
@@ -111,21 +94,7 @@ static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm) | |||
111 | 94 | ||
112 | dummy_flush_tlb_a15_erratum(); | 95 | dummy_flush_tlb_a15_erratum(); |
113 | this_cpu = get_cpu(); | 96 | this_cpu = get_cpu(); |
114 | for_each_online_cpu(cpu) { | 97 | a15_erratum_get_cpumask(this_cpu, mm, &mask); |
115 | if (cpu == this_cpu) | ||
116 | continue; | ||
117 | /* | ||
118 | * We only need to send an IPI if the other CPUs are running | ||
119 | * the same ASID as the one being invalidated. There is no | ||
120 | * need for locking around the active_asids check since the | ||
121 | * switch_mm() function has at least one dmb() (as required by | ||
122 | * this workaround) in case a context switch happens on | ||
123 | * another CPU after the condition below. | ||
124 | */ | ||
125 | if (atomic64_read(&mm->context.id) == | ||
126 | atomic64_read(&per_cpu(active_asids, cpu))) | ||
127 | cpumask_set_cpu(cpu, &mask); | ||
128 | } | ||
129 | smp_call_function_many(&mask, ipi_flush_tlb_a15_erratum, NULL, 1); | 98 | smp_call_function_many(&mask, ipi_flush_tlb_a15_erratum, NULL, 1); |
130 | put_cpu(); | 99 | put_cpu(); |
131 | } | 100 | } |
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c index 90525d9d290b..25956204ef23 100644 --- a/arch/arm/kernel/smp_twd.c +++ b/arch/arm/kernel/smp_twd.c | |||
@@ -120,7 +120,7 @@ static int twd_rate_change(struct notifier_block *nb, | |||
120 | * changing cpu. | 120 | * changing cpu. |
121 | */ | 121 | */ |
122 | if (flags == POST_RATE_CHANGE) | 122 | if (flags == POST_RATE_CHANGE) |
123 | smp_call_function(twd_update_frequency, | 123 | on_each_cpu(twd_update_frequency, |
124 | (void *)&cnd->new_rate, 1); | 124 | (void *)&cnd->new_rate, 1); |
125 | 125 | ||
126 | return NOTIFY_OK; | 126 | return NOTIFY_OK; |
@@ -187,7 +187,7 @@ core_initcall(twd_cpufreq_init); | |||
187 | 187 | ||
188 | #endif | 188 | #endif |
189 | 189 | ||
190 | static void __cpuinit twd_calibrate_rate(void) | 190 | static void twd_calibrate_rate(void) |
191 | { | 191 | { |
192 | unsigned long count; | 192 | unsigned long count; |
193 | u64 waitjiffies; | 193 | u64 waitjiffies; |
@@ -265,7 +265,7 @@ static void twd_get_clock(struct device_node *np) | |||
265 | /* | 265 | /* |
266 | * Setup the local clock events for a CPU. | 266 | * Setup the local clock events for a CPU. |
267 | */ | 267 | */ |
268 | static int __cpuinit twd_timer_setup(struct clock_event_device *clk) | 268 | static int twd_timer_setup(struct clock_event_device *clk) |
269 | { | 269 | { |
270 | struct clock_event_device **this_cpu_clk; | 270 | struct clock_event_device **this_cpu_clk; |
271 | int cpu = smp_processor_id(); | 271 | int cpu = smp_processor_id(); |
@@ -308,7 +308,7 @@ static int __cpuinit twd_timer_setup(struct clock_event_device *clk) | |||
308 | return 0; | 308 | return 0; |
309 | } | 309 | } |
310 | 310 | ||
311 | static struct local_timer_ops twd_lt_ops __cpuinitdata = { | 311 | static struct local_timer_ops twd_lt_ops = { |
312 | .setup = twd_timer_setup, | 312 | .setup = twd_timer_setup, |
313 | .stop = twd_timer_stop, | 313 | .stop = twd_timer_stop, |
314 | }; | 314 | }; |
diff --git a/arch/arm/kernel/suspend.c b/arch/arm/kernel/suspend.c index c59c97ea8268..41cf3cbf756d 100644 --- a/arch/arm/kernel/suspend.c +++ b/arch/arm/kernel/suspend.c | |||
@@ -1,15 +1,54 @@ | |||
1 | #include <linux/init.h> | 1 | #include <linux/init.h> |
2 | #include <linux/slab.h> | ||
2 | 3 | ||
4 | #include <asm/cacheflush.h> | ||
3 | #include <asm/idmap.h> | 5 | #include <asm/idmap.h> |
4 | #include <asm/pgalloc.h> | 6 | #include <asm/pgalloc.h> |
5 | #include <asm/pgtable.h> | 7 | #include <asm/pgtable.h> |
6 | #include <asm/memory.h> | 8 | #include <asm/memory.h> |
9 | #include <asm/smp_plat.h> | ||
7 | #include <asm/suspend.h> | 10 | #include <asm/suspend.h> |
8 | #include <asm/tlbflush.h> | 11 | #include <asm/tlbflush.h> |
9 | 12 | ||
10 | extern int __cpu_suspend(unsigned long, int (*)(unsigned long)); | 13 | extern int __cpu_suspend(unsigned long, int (*)(unsigned long)); |
11 | extern void cpu_resume_mmu(void); | 14 | extern void cpu_resume_mmu(void); |
12 | 15 | ||
16 | #ifdef CONFIG_MMU | ||
17 | /* | ||
18 | * Hide the first two arguments to __cpu_suspend - these are an implementation | ||
19 | * detail which platform code shouldn't have to know about. | ||
20 | */ | ||
21 | int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) | ||
22 | { | ||
23 | struct mm_struct *mm = current->active_mm; | ||
24 | int ret; | ||
25 | |||
26 | if (!idmap_pgd) | ||
27 | return -EINVAL; | ||
28 | |||
29 | /* | ||
30 | * Provide a temporary page table with an identity mapping for | ||
31 | * the MMU-enable code, required for resuming. On successful | ||
32 | * resume (indicated by a zero return code), we need to switch | ||
33 | * back to the correct page tables. | ||
34 | */ | ||
35 | ret = __cpu_suspend(arg, fn); | ||
36 | if (ret == 0) { | ||
37 | cpu_switch_mm(mm->pgd, mm); | ||
38 | local_flush_bp_all(); | ||
39 | local_flush_tlb_all(); | ||
40 | } | ||
41 | |||
42 | return ret; | ||
43 | } | ||
44 | #else | ||
45 | int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) | ||
46 | { | ||
47 | return __cpu_suspend(arg, fn); | ||
48 | } | ||
49 | #define idmap_pgd NULL | ||
50 | #endif | ||
51 | |||
13 | /* | 52 | /* |
14 | * This is called by __cpu_suspend() to save the state, and do whatever | 53 | * This is called by __cpu_suspend() to save the state, and do whatever |
15 | * flushing is required to ensure that when the CPU goes to sleep we have | 54 | * flushing is required to ensure that when the CPU goes to sleep we have |
@@ -47,30 +86,19 @@ void __cpu_suspend_save(u32 *ptr, u32 ptrsz, u32 sp, u32 *save_ptr) | |||
47 | virt_to_phys(save_ptr) + sizeof(*save_ptr)); | 86 | virt_to_phys(save_ptr) + sizeof(*save_ptr)); |
48 | } | 87 | } |
49 | 88 | ||
50 | /* | 89 | extern struct sleep_save_sp sleep_save_sp; |
51 | * Hide the first two arguments to __cpu_suspend - these are an implementation | ||
52 | * detail which platform code shouldn't have to know about. | ||
53 | */ | ||
54 | int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) | ||
55 | { | ||
56 | struct mm_struct *mm = current->active_mm; | ||
57 | int ret; | ||
58 | |||
59 | if (!idmap_pgd) | ||
60 | return -EINVAL; | ||
61 | 90 | ||
62 | /* | 91 | static int cpu_suspend_alloc_sp(void) |
63 | * Provide a temporary page table with an identity mapping for | 92 | { |
64 | * the MMU-enable code, required for resuming. On successful | 93 | void *ctx_ptr; |
65 | * resume (indicated by a zero return code), we need to switch | 94 | /* ctx_ptr is an array of physical addresses */ |
66 | * back to the correct page tables. | 95 | ctx_ptr = kcalloc(mpidr_hash_size(), sizeof(u32), GFP_KERNEL); |
67 | */ | ||
68 | ret = __cpu_suspend(arg, fn); | ||
69 | if (ret == 0) { | ||
70 | cpu_switch_mm(mm->pgd, mm); | ||
71 | local_flush_bp_all(); | ||
72 | local_flush_tlb_all(); | ||
73 | } | ||
74 | 96 | ||
75 | return ret; | 97 | if (WARN_ON(!ctx_ptr)) |
98 | return -ENOMEM; | ||
99 | sleep_save_sp.save_ptr_stash = ctx_ptr; | ||
100 | sleep_save_sp.save_ptr_stash_phys = virt_to_phys(ctx_ptr); | ||
101 | sync_cache_w(&sleep_save_sp); | ||
102 | return 0; | ||
76 | } | 103 | } |
104 | early_initcall(cpu_suspend_alloc_sp); | ||
diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c index abff4e9aaee0..98aee3258398 100644 --- a/arch/arm/kernel/time.c +++ b/arch/arm/kernel/time.c | |||
@@ -24,9 +24,9 @@ | |||
24 | #include <linux/timer.h> | 24 | #include <linux/timer.h> |
25 | #include <linux/clocksource.h> | 25 | #include <linux/clocksource.h> |
26 | #include <linux/irq.h> | 26 | #include <linux/irq.h> |
27 | #include <linux/sched_clock.h> | ||
27 | 28 | ||
28 | #include <asm/thread_info.h> | 29 | #include <asm/thread_info.h> |
29 | #include <asm/sched_clock.h> | ||
30 | #include <asm/stacktrace.h> | 30 | #include <asm/stacktrace.h> |
31 | #include <asm/mach/arch.h> | 31 | #include <asm/mach/arch.h> |
32 | #include <asm/mach/time.h> | 32 | #include <asm/mach/time.h> |
@@ -120,6 +120,4 @@ void __init time_init(void) | |||
120 | machine_desc->init_time(); | 120 | machine_desc->init_time(); |
121 | else | 121 | else |
122 | clocksource_of_init(); | 122 | clocksource_of_init(); |
123 | |||
124 | sched_clock_postinit(); | ||
125 | } | 123 | } |
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 18b32e8e4497..ab517fcce21b 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c | |||
@@ -35,8 +35,6 @@ | |||
35 | #include <asm/tls.h> | 35 | #include <asm/tls.h> |
36 | #include <asm/system_misc.h> | 36 | #include <asm/system_misc.h> |
37 | 37 | ||
38 | #include "signal.h" | ||
39 | |||
40 | static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" }; | 38 | static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" }; |
41 | 39 | ||
42 | void *vectors_page; | 40 | void *vectors_page; |
@@ -581,7 +579,7 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs) | |||
581 | return regs->ARM_r0; | 579 | return regs->ARM_r0; |
582 | 580 | ||
583 | case NR(set_tls): | 581 | case NR(set_tls): |
584 | thread->tp_value = regs->ARM_r0; | 582 | thread->tp_value[0] = regs->ARM_r0; |
585 | if (tls_emu) | 583 | if (tls_emu) |
586 | return 0; | 584 | return 0; |
587 | if (has_tls_reg) { | 585 | if (has_tls_reg) { |
@@ -699,7 +697,7 @@ static int get_tp_trap(struct pt_regs *regs, unsigned int instr) | |||
699 | int reg = (instr >> 12) & 15; | 697 | int reg = (instr >> 12) & 15; |
700 | if (reg == 15) | 698 | if (reg == 15) |
701 | return 1; | 699 | return 1; |
702 | regs->uregs[reg] = current_thread_info()->tp_value; | 700 | regs->uregs[reg] = current_thread_info()->tp_value[0]; |
703 | regs->ARM_pc += 4; | 701 | regs->ARM_pc += 4; |
704 | return 0; | 702 | return 0; |
705 | } | 703 | } |
@@ -800,47 +798,63 @@ void __init trap_init(void) | |||
800 | return; | 798 | return; |
801 | } | 799 | } |
802 | 800 | ||
803 | static void __init kuser_get_tls_init(unsigned long vectors) | 801 | #ifdef CONFIG_KUSER_HELPERS |
802 | static void __init kuser_init(void *vectors) | ||
804 | { | 803 | { |
804 | extern char __kuser_helper_start[], __kuser_helper_end[]; | ||
805 | int kuser_sz = __kuser_helper_end - __kuser_helper_start; | ||
806 | |||
807 | memcpy(vectors + 0x1000 - kuser_sz, __kuser_helper_start, kuser_sz); | ||
808 | |||
805 | /* | 809 | /* |
806 | * vectors + 0xfe0 = __kuser_get_tls | 810 | * vectors + 0xfe0 = __kuser_get_tls |
807 | * vectors + 0xfe8 = hardware TLS instruction at 0xffff0fe8 | 811 | * vectors + 0xfe8 = hardware TLS instruction at 0xffff0fe8 |
808 | */ | 812 | */ |
809 | if (tls_emu || has_tls_reg) | 813 | if (tls_emu || has_tls_reg) |
810 | memcpy((void *)vectors + 0xfe0, (void *)vectors + 0xfe8, 4); | 814 | memcpy(vectors + 0xfe0, vectors + 0xfe8, 4); |
811 | } | 815 | } |
816 | #else | ||
817 | static void __init kuser_init(void *vectors) | ||
818 | { | ||
819 | } | ||
820 | #endif | ||
812 | 821 | ||
813 | void __init early_trap_init(void *vectors_base) | 822 | void __init early_trap_init(void *vectors_base) |
814 | { | 823 | { |
824 | #ifndef CONFIG_CPU_V7M | ||
815 | unsigned long vectors = (unsigned long)vectors_base; | 825 | unsigned long vectors = (unsigned long)vectors_base; |
816 | extern char __stubs_start[], __stubs_end[]; | 826 | extern char __stubs_start[], __stubs_end[]; |
817 | extern char __vectors_start[], __vectors_end[]; | 827 | extern char __vectors_start[], __vectors_end[]; |
818 | extern char __kuser_helper_start[], __kuser_helper_end[]; | 828 | unsigned i; |
819 | int kuser_sz = __kuser_helper_end - __kuser_helper_start; | ||
820 | 829 | ||
821 | vectors_page = vectors_base; | 830 | vectors_page = vectors_base; |
822 | 831 | ||
823 | /* | 832 | /* |
833 | * Poison the vectors page with an undefined instruction. This | ||
834 | * instruction is chosen to be undefined for both ARM and Thumb | ||
835 | * ISAs. The Thumb version is an undefined instruction with a | ||
836 | * branch back to the undefined instruction. | ||
837 | */ | ||
838 | for (i = 0; i < PAGE_SIZE / sizeof(u32); i++) | ||
839 | ((u32 *)vectors_base)[i] = 0xe7fddef1; | ||
840 | |||
841 | /* | ||
824 | * Copy the vectors, stubs and kuser helpers (in entry-armv.S) | 842 | * Copy the vectors, stubs and kuser helpers (in entry-armv.S) |
825 | * into the vector page, mapped at 0xffff0000, and ensure these | 843 | * into the vector page, mapped at 0xffff0000, and ensure these |
826 | * are visible to the instruction stream. | 844 | * are visible to the instruction stream. |
827 | */ | 845 | */ |
828 | memcpy((void *)vectors, __vectors_start, __vectors_end - __vectors_start); | 846 | memcpy((void *)vectors, __vectors_start, __vectors_end - __vectors_start); |
829 | memcpy((void *)vectors + 0x200, __stubs_start, __stubs_end - __stubs_start); | 847 | memcpy((void *)vectors + 0x1000, __stubs_start, __stubs_end - __stubs_start); |
830 | memcpy((void *)vectors + 0x1000 - kuser_sz, __kuser_helper_start, kuser_sz); | ||
831 | 848 | ||
832 | /* | 849 | kuser_init(vectors_base); |
833 | * Do processor specific fixups for the kuser helpers | ||
834 | */ | ||
835 | kuser_get_tls_init(vectors); | ||
836 | 850 | ||
851 | flush_icache_range(vectors, vectors + PAGE_SIZE * 2); | ||
852 | modify_domain(DOMAIN_USER, DOMAIN_CLIENT); | ||
853 | #else /* ifndef CONFIG_CPU_V7M */ | ||
837 | /* | 854 | /* |
838 | * Copy signal return handlers into the vector page, and | 855 | * on V7-M there is no need to copy the vector table to a dedicated |
839 | * set sigreturn to be a pointer to these. | 856 | * memory area. The address is configurable and so a table in the kernel |
857 | * image can be used. | ||
840 | */ | 858 | */ |
841 | memcpy((void *)(vectors + KERN_SIGRETURN_CODE - CONFIG_VECTORS_BASE), | 859 | #endif |
842 | sigreturn_codes, sizeof(sigreturn_codes)); | ||
843 | |||
844 | flush_icache_range(vectors, vectors + PAGE_SIZE); | ||
845 | modify_domain(DOMAIN_USER, DOMAIN_CLIENT); | ||
846 | } | 860 | } |
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index a871b8e00fca..7bcee5c9b604 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S | |||
@@ -70,10 +70,6 @@ SECTIONS | |||
70 | ARM_EXIT_DISCARD(EXIT_TEXT) | 70 | ARM_EXIT_DISCARD(EXIT_TEXT) |
71 | ARM_EXIT_DISCARD(EXIT_DATA) | 71 | ARM_EXIT_DISCARD(EXIT_DATA) |
72 | EXIT_CALL | 72 | EXIT_CALL |
73 | #ifndef CONFIG_HOTPLUG | ||
74 | *(.ARM.exidx.devexit.text) | ||
75 | *(.ARM.extab.devexit.text) | ||
76 | #endif | ||
77 | #ifndef CONFIG_MMU | 73 | #ifndef CONFIG_MMU |
78 | *(.fixup) | 74 | *(.fixup) |
79 | *(__ex_table) | 75 | *(__ex_table) |
@@ -152,6 +148,23 @@ SECTIONS | |||
152 | . = ALIGN(PAGE_SIZE); | 148 | . = ALIGN(PAGE_SIZE); |
153 | __init_begin = .; | 149 | __init_begin = .; |
154 | #endif | 150 | #endif |
151 | /* | ||
152 | * The vectors and stubs are relocatable code, and the | ||
153 | * only thing that matters is their relative offsets | ||
154 | */ | ||
155 | __vectors_start = .; | ||
156 | .vectors 0 : AT(__vectors_start) { | ||
157 | *(.vectors) | ||
158 | } | ||
159 | . = __vectors_start + SIZEOF(.vectors); | ||
160 | __vectors_end = .; | ||
161 | |||
162 | __stubs_start = .; | ||
163 | .stubs 0x1000 : AT(__stubs_start) { | ||
164 | *(.stubs) | ||
165 | } | ||
166 | . = __stubs_start + SIZEOF(.stubs); | ||
167 | __stubs_end = .; | ||
155 | 168 | ||
156 | INIT_TEXT_SECTION(8) | 169 | INIT_TEXT_SECTION(8) |
157 | .exit.text : { | 170 | .exit.text : { |