diff options
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r-- | arch/arm/kernel/Makefile | 1 | ||||
-rw-r--r-- | arch/arm/kernel/asm-offsets.c | 25 | ||||
-rw-r--r-- | arch/arm/kernel/bios32.c | 9 | ||||
-rw-r--r-- | arch/arm/kernel/debug.S | 2 | ||||
-rw-r--r-- | arch/arm/kernel/head.S | 5 | ||||
-rw-r--r-- | arch/arm/kernel/hw_breakpoint.c | 61 | ||||
-rw-r--r-- | arch/arm/kernel/hyp-stub.S | 18 | ||||
-rw-r--r-- | arch/arm/kernel/perf_event.c | 16 | ||||
-rw-r--r-- | arch/arm/kernel/perf_event_cpu.c | 51 | ||||
-rw-r--r-- | arch/arm/kernel/perf_event_v6.c | 4 | ||||
-rw-r--r-- | arch/arm/kernel/perf_event_v7.c | 18 | ||||
-rw-r--r-- | arch/arm/kernel/perf_event_xscale.c | 2 | ||||
-rw-r--r-- | arch/arm/kernel/process.c | 13 | ||||
-rw-r--r-- | arch/arm/kernel/psci.c | 211 | ||||
-rw-r--r-- | arch/arm/kernel/sched_clock.c | 4 | ||||
-rw-r--r-- | arch/arm/kernel/smp.c | 31 | ||||
-rw-r--r-- | arch/arm/kernel/smp_scu.c | 2 | ||||
-rw-r--r-- | arch/arm/kernel/smp_twd.c | 53 | ||||
-rw-r--r-- | arch/arm/kernel/vmlinux.lds.S | 6 |
19 files changed, 389 insertions, 143 deletions
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index 5bbec7b8183e..5f3338eacad2 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile | |||
@@ -82,5 +82,6 @@ obj-$(CONFIG_DEBUG_LL) += debug.o | |||
82 | obj-$(CONFIG_EARLY_PRINTK) += early_printk.o | 82 | obj-$(CONFIG_EARLY_PRINTK) += early_printk.o |
83 | 83 | ||
84 | obj-$(CONFIG_ARM_VIRT_EXT) += hyp-stub.o | 84 | obj-$(CONFIG_ARM_VIRT_EXT) += hyp-stub.o |
85 | obj-$(CONFIG_ARM_PSCI) += psci.o | ||
85 | 86 | ||
86 | extra-y := $(head-y) vmlinux.lds | 87 | extra-y := $(head-y) vmlinux.lds |
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index c985b481192c..c8b3272dfed1 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c | |||
@@ -13,6 +13,9 @@ | |||
13 | #include <linux/sched.h> | 13 | #include <linux/sched.h> |
14 | #include <linux/mm.h> | 14 | #include <linux/mm.h> |
15 | #include <linux/dma-mapping.h> | 15 | #include <linux/dma-mapping.h> |
16 | #ifdef CONFIG_KVM_ARM_HOST | ||
17 | #include <linux/kvm_host.h> | ||
18 | #endif | ||
16 | #include <asm/cacheflush.h> | 19 | #include <asm/cacheflush.h> |
17 | #include <asm/glue-df.h> | 20 | #include <asm/glue-df.h> |
18 | #include <asm/glue-pf.h> | 21 | #include <asm/glue-pf.h> |
@@ -146,5 +149,27 @@ int main(void) | |||
146 | DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL); | 149 | DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL); |
147 | DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE); | 150 | DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE); |
148 | DEFINE(DMA_FROM_DEVICE, DMA_FROM_DEVICE); | 151 | DEFINE(DMA_FROM_DEVICE, DMA_FROM_DEVICE); |
152 | #ifdef CONFIG_KVM_ARM_HOST | ||
153 | DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm)); | ||
154 | DEFINE(VCPU_MIDR, offsetof(struct kvm_vcpu, arch.midr)); | ||
155 | DEFINE(VCPU_CP15, offsetof(struct kvm_vcpu, arch.cp15)); | ||
156 | DEFINE(VCPU_VFP_GUEST, offsetof(struct kvm_vcpu, arch.vfp_guest)); | ||
157 | DEFINE(VCPU_VFP_HOST, offsetof(struct kvm_vcpu, arch.vfp_host)); | ||
158 | DEFINE(VCPU_REGS, offsetof(struct kvm_vcpu, arch.regs)); | ||
159 | DEFINE(VCPU_USR_REGS, offsetof(struct kvm_vcpu, arch.regs.usr_regs)); | ||
160 | DEFINE(VCPU_SVC_REGS, offsetof(struct kvm_vcpu, arch.regs.svc_regs)); | ||
161 | DEFINE(VCPU_ABT_REGS, offsetof(struct kvm_vcpu, arch.regs.abt_regs)); | ||
162 | DEFINE(VCPU_UND_REGS, offsetof(struct kvm_vcpu, arch.regs.und_regs)); | ||
163 | DEFINE(VCPU_IRQ_REGS, offsetof(struct kvm_vcpu, arch.regs.irq_regs)); | ||
164 | DEFINE(VCPU_FIQ_REGS, offsetof(struct kvm_vcpu, arch.regs.fiq_regs)); | ||
165 | DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_pc)); | ||
166 | DEFINE(VCPU_CPSR, offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_cpsr)); | ||
167 | DEFINE(VCPU_IRQ_LINES, offsetof(struct kvm_vcpu, arch.irq_lines)); | ||
168 | DEFINE(VCPU_HSR, offsetof(struct kvm_vcpu, arch.hsr)); | ||
169 | DEFINE(VCPU_HxFAR, offsetof(struct kvm_vcpu, arch.hxfar)); | ||
170 | DEFINE(VCPU_HPFAR, offsetof(struct kvm_vcpu, arch.hpfar)); | ||
171 | DEFINE(VCPU_HYP_PC, offsetof(struct kvm_vcpu, arch.hyp_pc)); | ||
172 | DEFINE(KVM_VTTBR, offsetof(struct kvm, arch.vttbr)); | ||
173 | #endif | ||
149 | return 0; | 174 | return 0; |
150 | } | 175 | } |
diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c index 379cf3292390..a1f73b502ef0 100644 --- a/arch/arm/kernel/bios32.c +++ b/arch/arm/kernel/bios32.c | |||
@@ -413,7 +413,7 @@ static int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | |||
413 | return irq; | 413 | return irq; |
414 | } | 414 | } |
415 | 415 | ||
416 | static int __init pcibios_init_resources(int busnr, struct pci_sys_data *sys) | 416 | static int pcibios_init_resources(int busnr, struct pci_sys_data *sys) |
417 | { | 417 | { |
418 | int ret; | 418 | int ret; |
419 | struct pci_host_bridge_window *window; | 419 | struct pci_host_bridge_window *window; |
@@ -445,7 +445,7 @@ static int __init pcibios_init_resources(int busnr, struct pci_sys_data *sys) | |||
445 | return 0; | 445 | return 0; |
446 | } | 446 | } |
447 | 447 | ||
448 | static void __init pcibios_init_hw(struct hw_pci *hw, struct list_head *head) | 448 | static void pcibios_init_hw(struct hw_pci *hw, struct list_head *head) |
449 | { | 449 | { |
450 | struct pci_sys_data *sys = NULL; | 450 | struct pci_sys_data *sys = NULL; |
451 | int ret; | 451 | int ret; |
@@ -464,6 +464,9 @@ static void __init pcibios_init_hw(struct hw_pci *hw, struct list_head *head) | |||
464 | sys->map_irq = hw->map_irq; | 464 | sys->map_irq = hw->map_irq; |
465 | INIT_LIST_HEAD(&sys->resources); | 465 | INIT_LIST_HEAD(&sys->resources); |
466 | 466 | ||
467 | if (hw->private_data) | ||
468 | sys->private_data = hw->private_data[nr]; | ||
469 | |||
467 | ret = hw->setup(nr, sys); | 470 | ret = hw->setup(nr, sys); |
468 | 471 | ||
469 | if (ret > 0) { | 472 | if (ret > 0) { |
@@ -493,7 +496,7 @@ static void __init pcibios_init_hw(struct hw_pci *hw, struct list_head *head) | |||
493 | } | 496 | } |
494 | } | 497 | } |
495 | 498 | ||
496 | void __init pci_common_init(struct hw_pci *hw) | 499 | void pci_common_init(struct hw_pci *hw) |
497 | { | 500 | { |
498 | struct pci_sys_data *sys; | 501 | struct pci_sys_data *sys; |
499 | LIST_HEAD(head); | 502 | LIST_HEAD(head); |
diff --git a/arch/arm/kernel/debug.S b/arch/arm/kernel/debug.S index 6809200c31fb..14f7c3b14632 100644 --- a/arch/arm/kernel/debug.S +++ b/arch/arm/kernel/debug.S | |||
@@ -100,12 +100,14 @@ ENTRY(printch) | |||
100 | b 1b | 100 | b 1b |
101 | ENDPROC(printch) | 101 | ENDPROC(printch) |
102 | 102 | ||
103 | #ifdef CONFIG_MMU | ||
103 | ENTRY(debug_ll_addr) | 104 | ENTRY(debug_ll_addr) |
104 | addruart r2, r3, ip | 105 | addruart r2, r3, ip |
105 | str r2, [r0] | 106 | str r2, [r0] |
106 | str r3, [r1] | 107 | str r3, [r1] |
107 | mov pc, lr | 108 | mov pc, lr |
108 | ENDPROC(debug_ll_addr) | 109 | ENDPROC(debug_ll_addr) |
110 | #endif | ||
109 | 111 | ||
110 | #else | 112 | #else |
111 | 113 | ||
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 4eee351f4668..486a15ae9011 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S | |||
@@ -246,6 +246,7 @@ __create_page_tables: | |||
246 | 246 | ||
247 | /* | 247 | /* |
248 | * Then map boot params address in r2 if specified. | 248 | * Then map boot params address in r2 if specified. |
249 | * We map 2 sections in case the ATAGs/DTB crosses a section boundary. | ||
249 | */ | 250 | */ |
250 | mov r0, r2, lsr #SECTION_SHIFT | 251 | mov r0, r2, lsr #SECTION_SHIFT |
251 | movs r0, r0, lsl #SECTION_SHIFT | 252 | movs r0, r0, lsl #SECTION_SHIFT |
@@ -253,6 +254,8 @@ __create_page_tables: | |||
253 | addne r3, r3, #PAGE_OFFSET | 254 | addne r3, r3, #PAGE_OFFSET |
254 | addne r3, r4, r3, lsr #(SECTION_SHIFT - PMD_ORDER) | 255 | addne r3, r4, r3, lsr #(SECTION_SHIFT - PMD_ORDER) |
255 | orrne r6, r7, r0 | 256 | orrne r6, r7, r0 |
257 | strne r6, [r3], #1 << PMD_ORDER | ||
258 | addne r6, r6, #1 << SECTION_SHIFT | ||
256 | strne r6, [r3] | 259 | strne r6, [r3] |
257 | 260 | ||
258 | #ifdef CONFIG_DEBUG_LL | 261 | #ifdef CONFIG_DEBUG_LL |
@@ -331,7 +334,7 @@ ENTRY(secondary_startup) | |||
331 | * as it has already been validated by the primary processor. | 334 | * as it has already been validated by the primary processor. |
332 | */ | 335 | */ |
333 | #ifdef CONFIG_ARM_VIRT_EXT | 336 | #ifdef CONFIG_ARM_VIRT_EXT |
334 | bl __hyp_stub_install | 337 | bl __hyp_stub_install_secondary |
335 | #endif | 338 | #endif |
336 | safe_svcmode_maskall r9 | 339 | safe_svcmode_maskall r9 |
337 | 340 | ||
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index 5ff2e77782b1..5eae53e7a2e1 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/perf_event.h> | 28 | #include <linux/perf_event.h> |
29 | #include <linux/hw_breakpoint.h> | 29 | #include <linux/hw_breakpoint.h> |
30 | #include <linux/smp.h> | 30 | #include <linux/smp.h> |
31 | #include <linux/cpu_pm.h> | ||
31 | 32 | ||
32 | #include <asm/cacheflush.h> | 33 | #include <asm/cacheflush.h> |
33 | #include <asm/cputype.h> | 34 | #include <asm/cputype.h> |
@@ -35,6 +36,7 @@ | |||
35 | #include <asm/hw_breakpoint.h> | 36 | #include <asm/hw_breakpoint.h> |
36 | #include <asm/kdebug.h> | 37 | #include <asm/kdebug.h> |
37 | #include <asm/traps.h> | 38 | #include <asm/traps.h> |
39 | #include <asm/hardware/coresight.h> | ||
38 | 40 | ||
39 | /* Breakpoint currently in use for each BRP. */ | 41 | /* Breakpoint currently in use for each BRP. */ |
40 | static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]); | 42 | static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]); |
@@ -49,6 +51,9 @@ static int core_num_wrps; | |||
49 | /* Debug architecture version. */ | 51 | /* Debug architecture version. */ |
50 | static u8 debug_arch; | 52 | static u8 debug_arch; |
51 | 53 | ||
54 | /* Does debug architecture support OS Save and Restore? */ | ||
55 | static bool has_ossr; | ||
56 | |||
52 | /* Maximum supported watchpoint length. */ | 57 | /* Maximum supported watchpoint length. */ |
53 | static u8 max_watchpoint_len; | 58 | static u8 max_watchpoint_len; |
54 | 59 | ||
@@ -903,6 +908,23 @@ static struct undef_hook debug_reg_hook = { | |||
903 | .fn = debug_reg_trap, | 908 | .fn = debug_reg_trap, |
904 | }; | 909 | }; |
905 | 910 | ||
911 | /* Does this core support OS Save and Restore? */ | ||
912 | static bool core_has_os_save_restore(void) | ||
913 | { | ||
914 | u32 oslsr; | ||
915 | |||
916 | switch (get_debug_arch()) { | ||
917 | case ARM_DEBUG_ARCH_V7_1: | ||
918 | return true; | ||
919 | case ARM_DEBUG_ARCH_V7_ECP14: | ||
920 | ARM_DBG_READ(c1, c1, 4, oslsr); | ||
921 | if (oslsr & ARM_OSLSR_OSLM0) | ||
922 | return true; | ||
923 | default: | ||
924 | return false; | ||
925 | } | ||
926 | } | ||
927 | |||
906 | static void reset_ctrl_regs(void *unused) | 928 | static void reset_ctrl_regs(void *unused) |
907 | { | 929 | { |
908 | int i, raw_num_brps, err = 0, cpu = smp_processor_id(); | 930 | int i, raw_num_brps, err = 0, cpu = smp_processor_id(); |
@@ -930,11 +952,7 @@ static void reset_ctrl_regs(void *unused) | |||
930 | if ((val & 0x1) == 0) | 952 | if ((val & 0x1) == 0) |
931 | err = -EPERM; | 953 | err = -EPERM; |
932 | 954 | ||
933 | /* | 955 | if (!has_ossr) |
934 | * Check whether we implement OS save and restore. | ||
935 | */ | ||
936 | ARM_DBG_READ(c1, c1, 4, val); | ||
937 | if ((val & 0x9) == 0) | ||
938 | goto clear_vcr; | 956 | goto clear_vcr; |
939 | break; | 957 | break; |
940 | case ARM_DEBUG_ARCH_V7_1: | 958 | case ARM_DEBUG_ARCH_V7_1: |
@@ -955,9 +973,9 @@ static void reset_ctrl_regs(void *unused) | |||
955 | 973 | ||
956 | /* | 974 | /* |
957 | * Unconditionally clear the OS lock by writing a value | 975 | * Unconditionally clear the OS lock by writing a value |
958 | * other than 0xC5ACCE55 to the access register. | 976 | * other than CS_LAR_KEY to the access register. |
959 | */ | 977 | */ |
960 | ARM_DBG_WRITE(c1, c0, 4, 0); | 978 | ARM_DBG_WRITE(c1, c0, 4, ~CS_LAR_KEY); |
961 | isb(); | 979 | isb(); |
962 | 980 | ||
963 | /* | 981 | /* |
@@ -1015,6 +1033,30 @@ static struct notifier_block __cpuinitdata dbg_reset_nb = { | |||
1015 | .notifier_call = dbg_reset_notify, | 1033 | .notifier_call = dbg_reset_notify, |
1016 | }; | 1034 | }; |
1017 | 1035 | ||
1036 | #ifdef CONFIG_CPU_PM | ||
1037 | static int dbg_cpu_pm_notify(struct notifier_block *self, unsigned long action, | ||
1038 | void *v) | ||
1039 | { | ||
1040 | if (action == CPU_PM_EXIT) | ||
1041 | reset_ctrl_regs(NULL); | ||
1042 | |||
1043 | return NOTIFY_OK; | ||
1044 | } | ||
1045 | |||
1046 | static struct notifier_block __cpuinitdata dbg_cpu_pm_nb = { | ||
1047 | .notifier_call = dbg_cpu_pm_notify, | ||
1048 | }; | ||
1049 | |||
1050 | static void __init pm_init(void) | ||
1051 | { | ||
1052 | cpu_pm_register_notifier(&dbg_cpu_pm_nb); | ||
1053 | } | ||
1054 | #else | ||
1055 | static inline void pm_init(void) | ||
1056 | { | ||
1057 | } | ||
1058 | #endif | ||
1059 | |||
1018 | static int __init arch_hw_breakpoint_init(void) | 1060 | static int __init arch_hw_breakpoint_init(void) |
1019 | { | 1061 | { |
1020 | debug_arch = get_debug_arch(); | 1062 | debug_arch = get_debug_arch(); |
@@ -1024,6 +1066,8 @@ static int __init arch_hw_breakpoint_init(void) | |||
1024 | return 0; | 1066 | return 0; |
1025 | } | 1067 | } |
1026 | 1068 | ||
1069 | has_ossr = core_has_os_save_restore(); | ||
1070 | |||
1027 | /* Determine how many BRPs/WRPs are available. */ | 1071 | /* Determine how many BRPs/WRPs are available. */ |
1028 | core_num_brps = get_num_brps(); | 1072 | core_num_brps = get_num_brps(); |
1029 | core_num_wrps = get_num_wrps(); | 1073 | core_num_wrps = get_num_wrps(); |
@@ -1062,8 +1106,9 @@ static int __init arch_hw_breakpoint_init(void) | |||
1062 | hook_ifault_code(FAULT_CODE_DEBUG, hw_breakpoint_pending, SIGTRAP, | 1106 | hook_ifault_code(FAULT_CODE_DEBUG, hw_breakpoint_pending, SIGTRAP, |
1063 | TRAP_HWBKPT, "breakpoint debug exception"); | 1107 | TRAP_HWBKPT, "breakpoint debug exception"); |
1064 | 1108 | ||
1065 | /* Register hotplug notifier. */ | 1109 | /* Register hotplug and PM notifiers. */ |
1066 | register_cpu_notifier(&dbg_reset_nb); | 1110 | register_cpu_notifier(&dbg_reset_nb); |
1111 | pm_init(); | ||
1067 | return 0; | 1112 | return 0; |
1068 | } | 1113 | } |
1069 | arch_initcall(arch_hw_breakpoint_init); | 1114 | arch_initcall(arch_hw_breakpoint_init); |
diff --git a/arch/arm/kernel/hyp-stub.S b/arch/arm/kernel/hyp-stub.S index 65b2417aebce..1315c4ccfa56 100644 --- a/arch/arm/kernel/hyp-stub.S +++ b/arch/arm/kernel/hyp-stub.S | |||
@@ -99,7 +99,7 @@ ENTRY(__hyp_stub_install_secondary) | |||
99 | * immediately. | 99 | * immediately. |
100 | */ | 100 | */ |
101 | compare_cpu_mode_with_primary r4, r5, r6, r7 | 101 | compare_cpu_mode_with_primary r4, r5, r6, r7 |
102 | bxne lr | 102 | movne pc, lr |
103 | 103 | ||
104 | /* | 104 | /* |
105 | * Once we have given up on one CPU, we do not try to install the | 105 | * Once we have given up on one CPU, we do not try to install the |
@@ -111,7 +111,7 @@ ENTRY(__hyp_stub_install_secondary) | |||
111 | */ | 111 | */ |
112 | 112 | ||
113 | cmp r4, #HYP_MODE | 113 | cmp r4, #HYP_MODE |
114 | bxne lr @ give up if the CPU is not in HYP mode | 114 | movne pc, lr @ give up if the CPU is not in HYP mode |
115 | 115 | ||
116 | /* | 116 | /* |
117 | * Configure HSCTLR to set correct exception endianness/instruction set | 117 | * Configure HSCTLR to set correct exception endianness/instruction set |
@@ -120,7 +120,8 @@ ENTRY(__hyp_stub_install_secondary) | |||
120 | * Eventually, CPU-specific code might be needed -- assume not for now | 120 | * Eventually, CPU-specific code might be needed -- assume not for now |
121 | * | 121 | * |
122 | * This code relies on the "eret" instruction to synchronize the | 122 | * This code relies on the "eret" instruction to synchronize the |
123 | * various coprocessor accesses. | 123 | * various coprocessor accesses. This is done when we switch to SVC |
124 | * (see safe_svcmode_maskall). | ||
124 | */ | 125 | */ |
125 | @ Now install the hypervisor stub: | 126 | @ Now install the hypervisor stub: |
126 | adr r7, __hyp_stub_vectors | 127 | adr r7, __hyp_stub_vectors |
@@ -155,14 +156,7 @@ THUMB( orr r7, #(1 << 30) ) @ HSCTLR.TE | |||
155 | 1: | 156 | 1: |
156 | #endif | 157 | #endif |
157 | 158 | ||
158 | bic r7, r4, #MODE_MASK | 159 | bx lr @ The boot CPU mode is left in r4. |
159 | orr r7, r7, #SVC_MODE | ||
160 | THUMB( orr r7, r7, #PSR_T_BIT ) | ||
161 | msr spsr_cxsf, r7 @ This is SPSR_hyp. | ||
162 | |||
163 | __MSR_ELR_HYP(14) @ msr elr_hyp, lr | ||
164 | __ERET @ return, switching to SVC mode | ||
165 | @ The boot CPU mode is left in r4. | ||
166 | ENDPROC(__hyp_stub_install_secondary) | 160 | ENDPROC(__hyp_stub_install_secondary) |
167 | 161 | ||
168 | __hyp_stub_do_trap: | 162 | __hyp_stub_do_trap: |
@@ -200,7 +194,7 @@ ENDPROC(__hyp_get_vectors) | |||
200 | @ fall through | 194 | @ fall through |
201 | ENTRY(__hyp_set_vectors) | 195 | ENTRY(__hyp_set_vectors) |
202 | __HVC(0) | 196 | __HVC(0) |
203 | bx lr | 197 | mov pc, lr |
204 | ENDPROC(__hyp_set_vectors) | 198 | ENDPROC(__hyp_set_vectors) |
205 | 199 | ||
206 | #ifndef ZIMAGE | 200 | #ifndef ZIMAGE |
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index f9e8657dd241..31e0eb353cd8 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c | |||
@@ -149,12 +149,6 @@ again: | |||
149 | static void | 149 | static void |
150 | armpmu_read(struct perf_event *event) | 150 | armpmu_read(struct perf_event *event) |
151 | { | 151 | { |
152 | struct hw_perf_event *hwc = &event->hw; | ||
153 | |||
154 | /* Don't read disabled counters! */ | ||
155 | if (hwc->idx < 0) | ||
156 | return; | ||
157 | |||
158 | armpmu_event_update(event); | 152 | armpmu_event_update(event); |
159 | } | 153 | } |
160 | 154 | ||
@@ -207,8 +201,6 @@ armpmu_del(struct perf_event *event, int flags) | |||
207 | struct hw_perf_event *hwc = &event->hw; | 201 | struct hw_perf_event *hwc = &event->hw; |
208 | int idx = hwc->idx; | 202 | int idx = hwc->idx; |
209 | 203 | ||
210 | WARN_ON(idx < 0); | ||
211 | |||
212 | armpmu_stop(event, PERF_EF_UPDATE); | 204 | armpmu_stop(event, PERF_EF_UPDATE); |
213 | hw_events->events[idx] = NULL; | 205 | hw_events->events[idx] = NULL; |
214 | clear_bit(idx, hw_events->used_mask); | 206 | clear_bit(idx, hw_events->used_mask); |
@@ -358,7 +350,7 @@ __hw_perf_event_init(struct perf_event *event) | |||
358 | { | 350 | { |
359 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); | 351 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
360 | struct hw_perf_event *hwc = &event->hw; | 352 | struct hw_perf_event *hwc = &event->hw; |
361 | int mapping, err; | 353 | int mapping; |
362 | 354 | ||
363 | mapping = armpmu->map_event(event); | 355 | mapping = armpmu->map_event(event); |
364 | 356 | ||
@@ -407,14 +399,12 @@ __hw_perf_event_init(struct perf_event *event) | |||
407 | local64_set(&hwc->period_left, hwc->sample_period); | 399 | local64_set(&hwc->period_left, hwc->sample_period); |
408 | } | 400 | } |
409 | 401 | ||
410 | err = 0; | ||
411 | if (event->group_leader != event) { | 402 | if (event->group_leader != event) { |
412 | err = validate_group(event); | 403 | if (validate_group(event) != 0); |
413 | if (err) | ||
414 | return -EINVAL; | 404 | return -EINVAL; |
415 | } | 405 | } |
416 | 406 | ||
417 | return err; | 407 | return 0; |
418 | } | 408 | } |
419 | 409 | ||
420 | static int armpmu_event_init(struct perf_event *event) | 410 | static int armpmu_event_init(struct perf_event *event) |
diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c index 5f6620684e25..1f2740e3dbc0 100644 --- a/arch/arm/kernel/perf_event_cpu.c +++ b/arch/arm/kernel/perf_event_cpu.c | |||
@@ -147,7 +147,7 @@ static void cpu_pmu_init(struct arm_pmu *cpu_pmu) | |||
147 | cpu_pmu->free_irq = cpu_pmu_free_irq; | 147 | cpu_pmu->free_irq = cpu_pmu_free_irq; |
148 | 148 | ||
149 | /* Ensure the PMU has sane values out of reset. */ | 149 | /* Ensure the PMU has sane values out of reset. */ |
150 | if (cpu_pmu && cpu_pmu->reset) | 150 | if (cpu_pmu->reset) |
151 | on_each_cpu(cpu_pmu->reset, cpu_pmu, 1); | 151 | on_each_cpu(cpu_pmu->reset, cpu_pmu, 1); |
152 | } | 152 | } |
153 | 153 | ||
@@ -201,48 +201,46 @@ static struct platform_device_id cpu_pmu_plat_device_ids[] = { | |||
201 | static int probe_current_pmu(struct arm_pmu *pmu) | 201 | static int probe_current_pmu(struct arm_pmu *pmu) |
202 | { | 202 | { |
203 | int cpu = get_cpu(); | 203 | int cpu = get_cpu(); |
204 | unsigned long cpuid = read_cpuid_id(); | 204 | unsigned long implementor = read_cpuid_implementor(); |
205 | unsigned long implementor = (cpuid & 0xFF000000) >> 24; | 205 | unsigned long part_number = read_cpuid_part_number(); |
206 | unsigned long part_number = (cpuid & 0xFFF0); | ||
207 | int ret = -ENODEV; | 206 | int ret = -ENODEV; |
208 | 207 | ||
209 | pr_info("probing PMU on CPU %d\n", cpu); | 208 | pr_info("probing PMU on CPU %d\n", cpu); |
210 | 209 | ||
211 | /* ARM Ltd CPUs. */ | 210 | /* ARM Ltd CPUs. */ |
212 | if (0x41 == implementor) { | 211 | if (implementor == ARM_CPU_IMP_ARM) { |
213 | switch (part_number) { | 212 | switch (part_number) { |
214 | case 0xB360: /* ARM1136 */ | 213 | case ARM_CPU_PART_ARM1136: |
215 | case 0xB560: /* ARM1156 */ | 214 | case ARM_CPU_PART_ARM1156: |
216 | case 0xB760: /* ARM1176 */ | 215 | case ARM_CPU_PART_ARM1176: |
217 | ret = armv6pmu_init(pmu); | 216 | ret = armv6pmu_init(pmu); |
218 | break; | 217 | break; |
219 | case 0xB020: /* ARM11mpcore */ | 218 | case ARM_CPU_PART_ARM11MPCORE: |
220 | ret = armv6mpcore_pmu_init(pmu); | 219 | ret = armv6mpcore_pmu_init(pmu); |
221 | break; | 220 | break; |
222 | case 0xC080: /* Cortex-A8 */ | 221 | case ARM_CPU_PART_CORTEX_A8: |
223 | ret = armv7_a8_pmu_init(pmu); | 222 | ret = armv7_a8_pmu_init(pmu); |
224 | break; | 223 | break; |
225 | case 0xC090: /* Cortex-A9 */ | 224 | case ARM_CPU_PART_CORTEX_A9: |
226 | ret = armv7_a9_pmu_init(pmu); | 225 | ret = armv7_a9_pmu_init(pmu); |
227 | break; | 226 | break; |
228 | case 0xC050: /* Cortex-A5 */ | 227 | case ARM_CPU_PART_CORTEX_A5: |
229 | ret = armv7_a5_pmu_init(pmu); | 228 | ret = armv7_a5_pmu_init(pmu); |
230 | break; | 229 | break; |
231 | case 0xC0F0: /* Cortex-A15 */ | 230 | case ARM_CPU_PART_CORTEX_A15: |
232 | ret = armv7_a15_pmu_init(pmu); | 231 | ret = armv7_a15_pmu_init(pmu); |
233 | break; | 232 | break; |
234 | case 0xC070: /* Cortex-A7 */ | 233 | case ARM_CPU_PART_CORTEX_A7: |
235 | ret = armv7_a7_pmu_init(pmu); | 234 | ret = armv7_a7_pmu_init(pmu); |
236 | break; | 235 | break; |
237 | } | 236 | } |
238 | /* Intel CPUs [xscale]. */ | 237 | /* Intel CPUs [xscale]. */ |
239 | } else if (0x69 == implementor) { | 238 | } else if (implementor == ARM_CPU_IMP_INTEL) { |
240 | part_number = (cpuid >> 13) & 0x7; | 239 | switch (xscale_cpu_arch_version()) { |
241 | switch (part_number) { | 240 | case ARM_CPU_XSCALE_ARCH_V1: |
242 | case 1: | ||
243 | ret = xscale1pmu_init(pmu); | 241 | ret = xscale1pmu_init(pmu); |
244 | break; | 242 | break; |
245 | case 2: | 243 | case ARM_CPU_XSCALE_ARCH_V2: |
246 | ret = xscale2pmu_init(pmu); | 244 | ret = xscale2pmu_init(pmu); |
247 | break; | 245 | break; |
248 | } | 246 | } |
@@ -279,17 +277,22 @@ static int cpu_pmu_device_probe(struct platform_device *pdev) | |||
279 | } | 277 | } |
280 | 278 | ||
281 | if (ret) { | 279 | if (ret) { |
282 | pr_info("failed to register PMU devices!"); | 280 | pr_info("failed to probe PMU!"); |
283 | kfree(pmu); | 281 | goto out_free; |
284 | return ret; | ||
285 | } | 282 | } |
286 | 283 | ||
287 | cpu_pmu = pmu; | 284 | cpu_pmu = pmu; |
288 | cpu_pmu->plat_device = pdev; | 285 | cpu_pmu->plat_device = pdev; |
289 | cpu_pmu_init(cpu_pmu); | 286 | cpu_pmu_init(cpu_pmu); |
290 | armpmu_register(cpu_pmu, PERF_TYPE_RAW); | 287 | ret = armpmu_register(cpu_pmu, PERF_TYPE_RAW); |
291 | 288 | ||
292 | return 0; | 289 | if (!ret) |
290 | return 0; | ||
291 | |||
292 | out_free: | ||
293 | pr_info("failed to register PMU devices!"); | ||
294 | kfree(pmu); | ||
295 | return ret; | ||
293 | } | 296 | } |
294 | 297 | ||
295 | static struct platform_driver cpu_pmu_driver = { | 298 | static struct platform_driver cpu_pmu_driver = { |
diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c index 041d0526a288..03664b0e8fa4 100644 --- a/arch/arm/kernel/perf_event_v6.c +++ b/arch/arm/kernel/perf_event_v6.c | |||
@@ -106,7 +106,7 @@ static const unsigned armv6_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | |||
106 | }, | 106 | }, |
107 | [C(OP_WRITE)] = { | 107 | [C(OP_WRITE)] = { |
108 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | 108 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, |
109 | [C(RESULT_MISS)] = ARMV6_PERFCTR_ICACHE_MISS, | 109 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, |
110 | }, | 110 | }, |
111 | [C(OP_PREFETCH)] = { | 111 | [C(OP_PREFETCH)] = { |
112 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | 112 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, |
@@ -259,7 +259,7 @@ static const unsigned armv6mpcore_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | |||
259 | }, | 259 | }, |
260 | [C(OP_WRITE)] = { | 260 | [C(OP_WRITE)] = { |
261 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | 261 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, |
262 | [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ICACHE_MISS, | 262 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, |
263 | }, | 263 | }, |
264 | [C(OP_PREFETCH)] = { | 264 | [C(OP_PREFETCH)] = { |
265 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | 265 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, |
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index 4fbc757d9cff..8c79a9e70b83 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c | |||
@@ -157,8 +157,8 @@ static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | |||
157 | [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, | 157 | [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, |
158 | }, | 158 | }, |
159 | [C(OP_WRITE)] = { | 159 | [C(OP_WRITE)] = { |
160 | [C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS, | 160 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, |
161 | [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, | 161 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, |
162 | }, | 162 | }, |
163 | [C(OP_PREFETCH)] = { | 163 | [C(OP_PREFETCH)] = { |
164 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | 164 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, |
@@ -282,7 +282,7 @@ static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | |||
282 | }, | 282 | }, |
283 | [C(OP_WRITE)] = { | 283 | [C(OP_WRITE)] = { |
284 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | 284 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, |
285 | [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, | 285 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, |
286 | }, | 286 | }, |
287 | [C(OP_PREFETCH)] = { | 287 | [C(OP_PREFETCH)] = { |
288 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | 288 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, |
@@ -399,8 +399,8 @@ static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | |||
399 | [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, | 399 | [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, |
400 | }, | 400 | }, |
401 | [C(OP_WRITE)] = { | 401 | [C(OP_WRITE)] = { |
402 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS, | 402 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, |
403 | [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, | 403 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, |
404 | }, | 404 | }, |
405 | /* | 405 | /* |
406 | * The prefetch counters don't differentiate between the I | 406 | * The prefetch counters don't differentiate between the I |
@@ -527,8 +527,8 @@ static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | |||
527 | [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, | 527 | [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, |
528 | }, | 528 | }, |
529 | [C(OP_WRITE)] = { | 529 | [C(OP_WRITE)] = { |
530 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS, | 530 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, |
531 | [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, | 531 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, |
532 | }, | 532 | }, |
533 | [C(OP_PREFETCH)] = { | 533 | [C(OP_PREFETCH)] = { |
534 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | 534 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, |
@@ -651,8 +651,8 @@ static const unsigned armv7_a7_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | |||
651 | [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, | 651 | [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, |
652 | }, | 652 | }, |
653 | [C(OP_WRITE)] = { | 653 | [C(OP_WRITE)] = { |
654 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS, | 654 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, |
655 | [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, | 655 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, |
656 | }, | 656 | }, |
657 | [C(OP_PREFETCH)] = { | 657 | [C(OP_PREFETCH)] = { |
658 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | 658 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, |
diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c index 2b0fe30ec12e..63990c42fac9 100644 --- a/arch/arm/kernel/perf_event_xscale.c +++ b/arch/arm/kernel/perf_event_xscale.c | |||
@@ -83,7 +83,7 @@ static const unsigned xscale_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | |||
83 | }, | 83 | }, |
84 | [C(OP_WRITE)] = { | 84 | [C(OP_WRITE)] = { |
85 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | 85 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, |
86 | [C(RESULT_MISS)] = XSCALE_PERFCTR_ICACHE_MISS, | 86 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, |
87 | }, | 87 | }, |
88 | [C(OP_PREFETCH)] = { | 88 | [C(OP_PREFETCH)] = { |
89 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | 89 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, |
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index c6dec5fc20aa..047d3e40e470 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c | |||
@@ -172,14 +172,9 @@ static void default_idle(void) | |||
172 | local_irq_enable(); | 172 | local_irq_enable(); |
173 | } | 173 | } |
174 | 174 | ||
175 | void (*pm_idle)(void) = default_idle; | ||
176 | EXPORT_SYMBOL(pm_idle); | ||
177 | |||
178 | /* | 175 | /* |
179 | * The idle thread, has rather strange semantics for calling pm_idle, | 176 | * The idle thread. |
180 | * but this is what x86 does and we need to do the same, so that | 177 | * We always respect 'hlt_counter' to prevent low power idle. |
181 | * things like cpuidle get called in the same way. The only difference | ||
182 | * is that we always respect 'hlt_counter' to prevent low power idle. | ||
183 | */ | 178 | */ |
184 | void cpu_idle(void) | 179 | void cpu_idle(void) |
185 | { | 180 | { |
@@ -210,10 +205,10 @@ void cpu_idle(void) | |||
210 | } else if (!need_resched()) { | 205 | } else if (!need_resched()) { |
211 | stop_critical_timings(); | 206 | stop_critical_timings(); |
212 | if (cpuidle_idle_call()) | 207 | if (cpuidle_idle_call()) |
213 | pm_idle(); | 208 | default_idle(); |
214 | start_critical_timings(); | 209 | start_critical_timings(); |
215 | /* | 210 | /* |
216 | * pm_idle functions must always | 211 | * default_idle functions must always |
217 | * return with IRQs enabled. | 212 | * return with IRQs enabled. |
218 | */ | 213 | */ |
219 | WARN_ON(irqs_disabled()); | 214 | WARN_ON(irqs_disabled()); |
diff --git a/arch/arm/kernel/psci.c b/arch/arm/kernel/psci.c new file mode 100644 index 000000000000..36531643cc2c --- /dev/null +++ b/arch/arm/kernel/psci.c | |||
@@ -0,0 +1,211 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License version 2 as | ||
4 | * published by the Free Software Foundation. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, | ||
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
9 | * GNU General Public License for more details. | ||
10 | * | ||
11 | * Copyright (C) 2012 ARM Limited | ||
12 | * | ||
13 | * Author: Will Deacon <will.deacon@arm.com> | ||
14 | */ | ||
15 | |||
16 | #define pr_fmt(fmt) "psci: " fmt | ||
17 | |||
18 | #include <linux/init.h> | ||
19 | #include <linux/of.h> | ||
20 | |||
21 | #include <asm/compiler.h> | ||
22 | #include <asm/errno.h> | ||
23 | #include <asm/opcodes-sec.h> | ||
24 | #include <asm/opcodes-virt.h> | ||
25 | #include <asm/psci.h> | ||
26 | |||
27 | struct psci_operations psci_ops; | ||
28 | |||
29 | static int (*invoke_psci_fn)(u32, u32, u32, u32); | ||
30 | |||
31 | enum psci_function { | ||
32 | PSCI_FN_CPU_SUSPEND, | ||
33 | PSCI_FN_CPU_ON, | ||
34 | PSCI_FN_CPU_OFF, | ||
35 | PSCI_FN_MIGRATE, | ||
36 | PSCI_FN_MAX, | ||
37 | }; | ||
38 | |||
39 | static u32 psci_function_id[PSCI_FN_MAX]; | ||
40 | |||
41 | #define PSCI_RET_SUCCESS 0 | ||
42 | #define PSCI_RET_EOPNOTSUPP -1 | ||
43 | #define PSCI_RET_EINVAL -2 | ||
44 | #define PSCI_RET_EPERM -3 | ||
45 | |||
46 | static int psci_to_linux_errno(int errno) | ||
47 | { | ||
48 | switch (errno) { | ||
49 | case PSCI_RET_SUCCESS: | ||
50 | return 0; | ||
51 | case PSCI_RET_EOPNOTSUPP: | ||
52 | return -EOPNOTSUPP; | ||
53 | case PSCI_RET_EINVAL: | ||
54 | return -EINVAL; | ||
55 | case PSCI_RET_EPERM: | ||
56 | return -EPERM; | ||
57 | }; | ||
58 | |||
59 | return -EINVAL; | ||
60 | } | ||
61 | |||
62 | #define PSCI_POWER_STATE_ID_MASK 0xffff | ||
63 | #define PSCI_POWER_STATE_ID_SHIFT 0 | ||
64 | #define PSCI_POWER_STATE_TYPE_MASK 0x1 | ||
65 | #define PSCI_POWER_STATE_TYPE_SHIFT 16 | ||
66 | #define PSCI_POWER_STATE_AFFL_MASK 0x3 | ||
67 | #define PSCI_POWER_STATE_AFFL_SHIFT 24 | ||
68 | |||
69 | static u32 psci_power_state_pack(struct psci_power_state state) | ||
70 | { | ||
71 | return ((state.id & PSCI_POWER_STATE_ID_MASK) | ||
72 | << PSCI_POWER_STATE_ID_SHIFT) | | ||
73 | ((state.type & PSCI_POWER_STATE_TYPE_MASK) | ||
74 | << PSCI_POWER_STATE_TYPE_SHIFT) | | ||
75 | ((state.affinity_level & PSCI_POWER_STATE_AFFL_MASK) | ||
76 | << PSCI_POWER_STATE_AFFL_SHIFT); | ||
77 | } | ||
78 | |||
79 | /* | ||
80 | * The following two functions are invoked via the invoke_psci_fn pointer | ||
81 | * and will not be inlined, allowing us to piggyback on the AAPCS. | ||
82 | */ | ||
83 | static noinline int __invoke_psci_fn_hvc(u32 function_id, u32 arg0, u32 arg1, | ||
84 | u32 arg2) | ||
85 | { | ||
86 | asm volatile( | ||
87 | __asmeq("%0", "r0") | ||
88 | __asmeq("%1", "r1") | ||
89 | __asmeq("%2", "r2") | ||
90 | __asmeq("%3", "r3") | ||
91 | __HVC(0) | ||
92 | : "+r" (function_id) | ||
93 | : "r" (arg0), "r" (arg1), "r" (arg2)); | ||
94 | |||
95 | return function_id; | ||
96 | } | ||
97 | |||
98 | static noinline int __invoke_psci_fn_smc(u32 function_id, u32 arg0, u32 arg1, | ||
99 | u32 arg2) | ||
100 | { | ||
101 | asm volatile( | ||
102 | __asmeq("%0", "r0") | ||
103 | __asmeq("%1", "r1") | ||
104 | __asmeq("%2", "r2") | ||
105 | __asmeq("%3", "r3") | ||
106 | __SMC(0) | ||
107 | : "+r" (function_id) | ||
108 | : "r" (arg0), "r" (arg1), "r" (arg2)); | ||
109 | |||
110 | return function_id; | ||
111 | } | ||
112 | |||
113 | static int psci_cpu_suspend(struct psci_power_state state, | ||
114 | unsigned long entry_point) | ||
115 | { | ||
116 | int err; | ||
117 | u32 fn, power_state; | ||
118 | |||
119 | fn = psci_function_id[PSCI_FN_CPU_SUSPEND]; | ||
120 | power_state = psci_power_state_pack(state); | ||
121 | err = invoke_psci_fn(fn, power_state, entry_point, 0); | ||
122 | return psci_to_linux_errno(err); | ||
123 | } | ||
124 | |||
125 | static int psci_cpu_off(struct psci_power_state state) | ||
126 | { | ||
127 | int err; | ||
128 | u32 fn, power_state; | ||
129 | |||
130 | fn = psci_function_id[PSCI_FN_CPU_OFF]; | ||
131 | power_state = psci_power_state_pack(state); | ||
132 | err = invoke_psci_fn(fn, power_state, 0, 0); | ||
133 | return psci_to_linux_errno(err); | ||
134 | } | ||
135 | |||
136 | static int psci_cpu_on(unsigned long cpuid, unsigned long entry_point) | ||
137 | { | ||
138 | int err; | ||
139 | u32 fn; | ||
140 | |||
141 | fn = psci_function_id[PSCI_FN_CPU_ON]; | ||
142 | err = invoke_psci_fn(fn, cpuid, entry_point, 0); | ||
143 | return psci_to_linux_errno(err); | ||
144 | } | ||
145 | |||
146 | static int psci_migrate(unsigned long cpuid) | ||
147 | { | ||
148 | int err; | ||
149 | u32 fn; | ||
150 | |||
151 | fn = psci_function_id[PSCI_FN_MIGRATE]; | ||
152 | err = invoke_psci_fn(fn, cpuid, 0, 0); | ||
153 | return psci_to_linux_errno(err); | ||
154 | } | ||
155 | |||
156 | static const struct of_device_id psci_of_match[] __initconst = { | ||
157 | { .compatible = "arm,psci", }, | ||
158 | {}, | ||
159 | }; | ||
160 | |||
161 | static int __init psci_init(void) | ||
162 | { | ||
163 | struct device_node *np; | ||
164 | const char *method; | ||
165 | u32 id; | ||
166 | |||
167 | np = of_find_matching_node(NULL, psci_of_match); | ||
168 | if (!np) | ||
169 | return 0; | ||
170 | |||
171 | pr_info("probing function IDs from device-tree\n"); | ||
172 | |||
173 | if (of_property_read_string(np, "method", &method)) { | ||
174 | pr_warning("missing \"method\" property\n"); | ||
175 | goto out_put_node; | ||
176 | } | ||
177 | |||
178 | if (!strcmp("hvc", method)) { | ||
179 | invoke_psci_fn = __invoke_psci_fn_hvc; | ||
180 | } else if (!strcmp("smc", method)) { | ||
181 | invoke_psci_fn = __invoke_psci_fn_smc; | ||
182 | } else { | ||
183 | pr_warning("invalid \"method\" property: %s\n", method); | ||
184 | goto out_put_node; | ||
185 | } | ||
186 | |||
187 | if (!of_property_read_u32(np, "cpu_suspend", &id)) { | ||
188 | psci_function_id[PSCI_FN_CPU_SUSPEND] = id; | ||
189 | psci_ops.cpu_suspend = psci_cpu_suspend; | ||
190 | } | ||
191 | |||
192 | if (!of_property_read_u32(np, "cpu_off", &id)) { | ||
193 | psci_function_id[PSCI_FN_CPU_OFF] = id; | ||
194 | psci_ops.cpu_off = psci_cpu_off; | ||
195 | } | ||
196 | |||
197 | if (!of_property_read_u32(np, "cpu_on", &id)) { | ||
198 | psci_function_id[PSCI_FN_CPU_ON] = id; | ||
199 | psci_ops.cpu_on = psci_cpu_on; | ||
200 | } | ||
201 | |||
202 | if (!of_property_read_u32(np, "migrate", &id)) { | ||
203 | psci_function_id[PSCI_FN_MIGRATE] = id; | ||
204 | psci_ops.migrate = psci_migrate; | ||
205 | } | ||
206 | |||
207 | out_put_node: | ||
208 | of_node_put(np); | ||
209 | return 0; | ||
210 | } | ||
211 | early_initcall(psci_init); | ||
diff --git a/arch/arm/kernel/sched_clock.c b/arch/arm/kernel/sched_clock.c index fc6692e2b603..bd6f56b9ec21 100644 --- a/arch/arm/kernel/sched_clock.c +++ b/arch/arm/kernel/sched_clock.c | |||
@@ -93,11 +93,11 @@ static void notrace update_sched_clock(void) | |||
93 | * detectable in cyc_to_fixed_sched_clock(). | 93 | * detectable in cyc_to_fixed_sched_clock(). |
94 | */ | 94 | */ |
95 | raw_local_irq_save(flags); | 95 | raw_local_irq_save(flags); |
96 | cd.epoch_cyc = cyc; | 96 | cd.epoch_cyc_copy = cyc; |
97 | smp_wmb(); | 97 | smp_wmb(); |
98 | cd.epoch_ns = ns; | 98 | cd.epoch_ns = ns; |
99 | smp_wmb(); | 99 | smp_wmb(); |
100 | cd.epoch_cyc_copy = cyc; | 100 | cd.epoch_cyc = cyc; |
101 | raw_local_irq_restore(flags); | 101 | raw_local_irq_restore(flags); |
102 | } | 102 | } |
103 | 103 | ||
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 84f4cbf652e5..365c8d92e2eb 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c | |||
@@ -125,18 +125,6 @@ void __init smp_init_cpus(void) | |||
125 | smp_ops.smp_init_cpus(); | 125 | smp_ops.smp_init_cpus(); |
126 | } | 126 | } |
127 | 127 | ||
128 | static void __init platform_smp_prepare_cpus(unsigned int max_cpus) | ||
129 | { | ||
130 | if (smp_ops.smp_prepare_cpus) | ||
131 | smp_ops.smp_prepare_cpus(max_cpus); | ||
132 | } | ||
133 | |||
134 | static void __cpuinit platform_secondary_init(unsigned int cpu) | ||
135 | { | ||
136 | if (smp_ops.smp_secondary_init) | ||
137 | smp_ops.smp_secondary_init(cpu); | ||
138 | } | ||
139 | |||
140 | int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) | 128 | int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) |
141 | { | 129 | { |
142 | if (smp_ops.smp_boot_secondary) | 130 | if (smp_ops.smp_boot_secondary) |
@@ -154,12 +142,6 @@ static int platform_cpu_kill(unsigned int cpu) | |||
154 | return 1; | 142 | return 1; |
155 | } | 143 | } |
156 | 144 | ||
157 | static void platform_cpu_die(unsigned int cpu) | ||
158 | { | ||
159 | if (smp_ops.cpu_die) | ||
160 | smp_ops.cpu_die(cpu); | ||
161 | } | ||
162 | |||
163 | static int platform_cpu_disable(unsigned int cpu) | 145 | static int platform_cpu_disable(unsigned int cpu) |
164 | { | 146 | { |
165 | if (smp_ops.cpu_disable) | 147 | if (smp_ops.cpu_disable) |
@@ -257,7 +239,8 @@ void __ref cpu_die(void) | |||
257 | * actual CPU shutdown procedure is at least platform (if not | 239 | * actual CPU shutdown procedure is at least platform (if not |
258 | * CPU) specific. | 240 | * CPU) specific. |
259 | */ | 241 | */ |
260 | platform_cpu_die(cpu); | 242 | if (smp_ops.cpu_die) |
243 | smp_ops.cpu_die(cpu); | ||
261 | 244 | ||
262 | /* | 245 | /* |
263 | * Do not return to the idle loop - jump back to the secondary | 246 | * Do not return to the idle loop - jump back to the secondary |
@@ -324,7 +307,8 @@ asmlinkage void __cpuinit secondary_start_kernel(void) | |||
324 | /* | 307 | /* |
325 | * Give the platform a chance to do its own initialisation. | 308 | * Give the platform a chance to do its own initialisation. |
326 | */ | 309 | */ |
327 | platform_secondary_init(cpu); | 310 | if (smp_ops.smp_secondary_init) |
311 | smp_ops.smp_secondary_init(cpu); | ||
328 | 312 | ||
329 | notify_cpu_starting(cpu); | 313 | notify_cpu_starting(cpu); |
330 | 314 | ||
@@ -399,8 +383,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus) | |||
399 | /* | 383 | /* |
400 | * Initialise the present map, which describes the set of CPUs | 384 | * Initialise the present map, which describes the set of CPUs |
401 | * actually populated at the present time. A platform should | 385 | * actually populated at the present time. A platform should |
402 | * re-initialize the map in platform_smp_prepare_cpus() if | 386 | * re-initialize the map in the platforms smp_prepare_cpus() |
403 | * present != possible (e.g. physical hotplug). | 387 | * if present != possible (e.g. physical hotplug). |
404 | */ | 388 | */ |
405 | init_cpu_present(cpu_possible_mask); | 389 | init_cpu_present(cpu_possible_mask); |
406 | 390 | ||
@@ -408,7 +392,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus) | |||
408 | * Initialise the SCU if there are more than one CPU | 392 | * Initialise the SCU if there are more than one CPU |
409 | * and let them know where to start. | 393 | * and let them know where to start. |
410 | */ | 394 | */ |
411 | platform_smp_prepare_cpus(max_cpus); | 395 | if (smp_ops.smp_prepare_cpus) |
396 | smp_ops.smp_prepare_cpus(max_cpus); | ||
412 | } | 397 | } |
413 | } | 398 | } |
414 | 399 | ||
diff --git a/arch/arm/kernel/smp_scu.c b/arch/arm/kernel/smp_scu.c index b9f015e843d8..45eac87ed66a 100644 --- a/arch/arm/kernel/smp_scu.c +++ b/arch/arm/kernel/smp_scu.c | |||
@@ -75,7 +75,7 @@ void scu_enable(void __iomem *scu_base) | |||
75 | int scu_power_mode(void __iomem *scu_base, unsigned int mode) | 75 | int scu_power_mode(void __iomem *scu_base, unsigned int mode) |
76 | { | 76 | { |
77 | unsigned int val; | 77 | unsigned int val; |
78 | int cpu = cpu_logical_map(smp_processor_id()); | 78 | int cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(smp_processor_id()), 0); |
79 | 79 | ||
80 | if (mode > 3 || mode == 1 || cpu > 3) | 80 | if (mode > 3 || mode == 1 || cpu > 3) |
81 | return -EINVAL; | 81 | return -EINVAL; |
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c index 49f335d301ba..ae0c7bb39ae8 100644 --- a/arch/arm/kernel/smp_twd.c +++ b/arch/arm/kernel/smp_twd.c | |||
@@ -31,7 +31,6 @@ static void __iomem *twd_base; | |||
31 | 31 | ||
32 | static struct clk *twd_clk; | 32 | static struct clk *twd_clk; |
33 | static unsigned long twd_timer_rate; | 33 | static unsigned long twd_timer_rate; |
34 | static bool common_setup_called; | ||
35 | static DEFINE_PER_CPU(bool, percpu_setup_called); | 34 | static DEFINE_PER_CPU(bool, percpu_setup_called); |
36 | 35 | ||
37 | static struct clock_event_device __percpu **twd_evt; | 36 | static struct clock_event_device __percpu **twd_evt; |
@@ -239,25 +238,28 @@ static irqreturn_t twd_handler(int irq, void *dev_id) | |||
239 | return IRQ_NONE; | 238 | return IRQ_NONE; |
240 | } | 239 | } |
241 | 240 | ||
242 | static struct clk *twd_get_clock(void) | 241 | static void twd_get_clock(struct device_node *np) |
243 | { | 242 | { |
244 | struct clk *clk; | ||
245 | int err; | 243 | int err; |
246 | 244 | ||
247 | clk = clk_get_sys("smp_twd", NULL); | 245 | if (np) |
248 | if (IS_ERR(clk)) { | 246 | twd_clk = of_clk_get(np, 0); |
249 | pr_err("smp_twd: clock not found: %d\n", (int)PTR_ERR(clk)); | 247 | else |
250 | return clk; | 248 | twd_clk = clk_get_sys("smp_twd", NULL); |
249 | |||
250 | if (IS_ERR(twd_clk)) { | ||
251 | pr_err("smp_twd: clock not found %d\n", (int) PTR_ERR(twd_clk)); | ||
252 | return; | ||
251 | } | 253 | } |
252 | 254 | ||
253 | err = clk_prepare_enable(clk); | 255 | err = clk_prepare_enable(twd_clk); |
254 | if (err) { | 256 | if (err) { |
255 | pr_err("smp_twd: clock failed to prepare+enable: %d\n", err); | 257 | pr_err("smp_twd: clock failed to prepare+enable: %d\n", err); |
256 | clk_put(clk); | 258 | clk_put(twd_clk); |
257 | return ERR_PTR(err); | 259 | return; |
258 | } | 260 | } |
259 | 261 | ||
260 | return clk; | 262 | twd_timer_rate = clk_get_rate(twd_clk); |
261 | } | 263 | } |
262 | 264 | ||
263 | /* | 265 | /* |
@@ -280,26 +282,7 @@ static int __cpuinit twd_timer_setup(struct clock_event_device *clk) | |||
280 | } | 282 | } |
281 | per_cpu(percpu_setup_called, cpu) = true; | 283 | per_cpu(percpu_setup_called, cpu) = true; |
282 | 284 | ||
283 | /* | 285 | twd_calibrate_rate(); |
284 | * This stuff only need to be done once for the entire TWD cluster | ||
285 | * during the runtime of the system. | ||
286 | */ | ||
287 | if (!common_setup_called) { | ||
288 | twd_clk = twd_get_clock(); | ||
289 | |||
290 | /* | ||
291 | * We use IS_ERR_OR_NULL() here, because if the clock stubs | ||
292 | * are active we will get a valid clk reference which is | ||
293 | * however NULL and will return the rate 0. In that case we | ||
294 | * need to calibrate the rate instead. | ||
295 | */ | ||
296 | if (!IS_ERR_OR_NULL(twd_clk)) | ||
297 | twd_timer_rate = clk_get_rate(twd_clk); | ||
298 | else | ||
299 | twd_calibrate_rate(); | ||
300 | |||
301 | common_setup_called = true; | ||
302 | } | ||
303 | 286 | ||
304 | /* | 287 | /* |
305 | * The following is done once per CPU the first time .setup() is | 288 | * The following is done once per CPU the first time .setup() is |
@@ -330,7 +313,7 @@ static struct local_timer_ops twd_lt_ops __cpuinitdata = { | |||
330 | .stop = twd_timer_stop, | 313 | .stop = twd_timer_stop, |
331 | }; | 314 | }; |
332 | 315 | ||
333 | static int __init twd_local_timer_common_register(void) | 316 | static int __init twd_local_timer_common_register(struct device_node *np) |
334 | { | 317 | { |
335 | int err; | 318 | int err; |
336 | 319 | ||
@@ -350,6 +333,8 @@ static int __init twd_local_timer_common_register(void) | |||
350 | if (err) | 333 | if (err) |
351 | goto out_irq; | 334 | goto out_irq; |
352 | 335 | ||
336 | twd_get_clock(np); | ||
337 | |||
353 | return 0; | 338 | return 0; |
354 | 339 | ||
355 | out_irq: | 340 | out_irq: |
@@ -373,7 +358,7 @@ int __init twd_local_timer_register(struct twd_local_timer *tlt) | |||
373 | if (!twd_base) | 358 | if (!twd_base) |
374 | return -ENOMEM; | 359 | return -ENOMEM; |
375 | 360 | ||
376 | return twd_local_timer_common_register(); | 361 | return twd_local_timer_common_register(NULL); |
377 | } | 362 | } |
378 | 363 | ||
379 | #ifdef CONFIG_OF | 364 | #ifdef CONFIG_OF |
@@ -405,7 +390,7 @@ void __init twd_local_timer_of_register(void) | |||
405 | goto out; | 390 | goto out; |
406 | } | 391 | } |
407 | 392 | ||
408 | err = twd_local_timer_common_register(); | 393 | err = twd_local_timer_common_register(np); |
409 | 394 | ||
410 | out: | 395 | out: |
411 | WARN(err, "twd_local_timer_of_register failed (%d)\n", err); | 396 | WARN(err, "twd_local_timer_of_register failed (%d)\n", err); |
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index 11c1785bf63e..b571484e9f03 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S | |||
@@ -19,7 +19,11 @@ | |||
19 | ALIGN_FUNCTION(); \ | 19 | ALIGN_FUNCTION(); \ |
20 | VMLINUX_SYMBOL(__idmap_text_start) = .; \ | 20 | VMLINUX_SYMBOL(__idmap_text_start) = .; \ |
21 | *(.idmap.text) \ | 21 | *(.idmap.text) \ |
22 | VMLINUX_SYMBOL(__idmap_text_end) = .; | 22 | VMLINUX_SYMBOL(__idmap_text_end) = .; \ |
23 | ALIGN_FUNCTION(); \ | ||
24 | VMLINUX_SYMBOL(__hyp_idmap_text_start) = .; \ | ||
25 | *(.hyp.idmap.text) \ | ||
26 | VMLINUX_SYMBOL(__hyp_idmap_text_end) = .; | ||
23 | 27 | ||
24 | #ifdef CONFIG_HOTPLUG_CPU | 28 | #ifdef CONFIG_HOTPLUG_CPU |
25 | #define ARM_CPU_DISCARD(x) | 29 | #define ARM_CPU_DISCARD(x) |