aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel
diff options
context:
space:
mode:
authorArnd Bergmann <arnd@arndb.de>2012-01-09 11:16:29 -0500
committerArnd Bergmann <arnd@arndb.de>2012-01-09 11:16:29 -0500
commitdcf7ec5ee62a78123057a1e286c88ca739717409 (patch)
treefa3f19434638a942ba66d236dde4d9aaadf8b370 /arch/arm/kernel
parent15db3e823c3246e3bd31fe454f5c8927eb85caf2 (diff)
parent142f2101a86ade2d6c9dfbedf82e1b5b31c8fce6 (diff)
Merge branch 'samsung/driver' into next/drivers
Conflicts: arch/arm/mach-mxs/include/mach/common.h Pull in previous samsung conflict merges and do a trivial merge of an mxs double-add conflict. Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/Makefile2
-rw-r--r--arch/arm/kernel/entry-armv.S7
-rw-r--r--arch/arm/kernel/head.S65
-rw-r--r--arch/arm/kernel/hw_breakpoint.c8
-rw-r--r--arch/arm/kernel/kprobes-test.c66
-rw-r--r--arch/arm/kernel/machine_kexec.c15
-rw-r--r--arch/arm/kernel/opcodes.c72
-rw-r--r--arch/arm/kernel/perf_event.c19
-rw-r--r--arch/arm/kernel/perf_event_v6.c32
-rw-r--r--arch/arm/kernel/perf_event_v7.c401
-rw-r--r--arch/arm/kernel/perf_event_xscale.c16
-rw-r--r--arch/arm/kernel/process.c77
-rw-r--r--arch/arm/kernel/sched_clock.c118
-rw-r--r--arch/arm/kernel/setup.c15
-rw-r--r--arch/arm/kernel/sleep.S4
-rw-r--r--arch/arm/kernel/smp.c36
-rw-r--r--arch/arm/kernel/smp_twd.c95
-rw-r--r--arch/arm/kernel/suspend.c18
-rw-r--r--arch/arm/kernel/swp_emulate.c16
-rw-r--r--arch/arm/kernel/tcm.c22
-rw-r--r--arch/arm/kernel/vmlinux.lds.S7
21 files changed, 648 insertions, 463 deletions
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 16eed6aebfa4..43b740d0e374 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -13,7 +13,7 @@ CFLAGS_REMOVE_return_address.o = -pg
13 13
14# Object file lists. 14# Object file lists.
15 15
16obj-y := elf.o entry-armv.o entry-common.o irq.o \ 16obj-y := elf.o entry-armv.o entry-common.o irq.o opcodes.o \
17 process.o ptrace.o return_address.o setup.o signal.o \ 17 process.o ptrace.o return_address.o setup.o signal.o \
18 sys_arm.o stacktrace.o time.o traps.o 18 sys_arm.o stacktrace.o time.o traps.o
19 19
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index b145f16c91bc..3a456c6c7005 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -36,12 +36,11 @@
36#ifdef CONFIG_MULTI_IRQ_HANDLER 36#ifdef CONFIG_MULTI_IRQ_HANDLER
37 ldr r1, =handle_arch_irq 37 ldr r1, =handle_arch_irq
38 mov r0, sp 38 mov r0, sp
39 ldr r1, [r1]
40 adr lr, BSYM(9997f) 39 adr lr, BSYM(9997f)
41 teq r1, #0 40 ldr pc, [r1]
42 movne pc, r1 41#else
43#endif
44 arch_irq_handler_default 42 arch_irq_handler_default
43#endif
459997: 449997:
46 .endm 45 .endm
47 46
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 08c82fd844a8..14e277d2ff91 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -39,8 +39,14 @@
39#error KERNEL_RAM_VADDR must start at 0xXXXX8000 39#error KERNEL_RAM_VADDR must start at 0xXXXX8000
40#endif 40#endif
41 41
42#ifdef CONFIG_ARM_LPAE
43 /* LPAE requires an additional page for the PGD */
44#define PG_DIR_SIZE 0x5000
45#define PMD_ORDER 3
46#else
42#define PG_DIR_SIZE 0x4000 47#define PG_DIR_SIZE 0x4000
43#define PMD_ORDER 2 48#define PMD_ORDER 2
49#endif
44 50
45 .globl swapper_pg_dir 51 .globl swapper_pg_dir
46 .equ swapper_pg_dir, KERNEL_RAM_VADDR - PG_DIR_SIZE 52 .equ swapper_pg_dir, KERNEL_RAM_VADDR - PG_DIR_SIZE
@@ -164,17 +170,36 @@ __create_page_tables:
164 teq r0, r6 170 teq r0, r6
165 bne 1b 171 bne 1b
166 172
173#ifdef CONFIG_ARM_LPAE
174 /*
175 * Build the PGD table (first level) to point to the PMD table. A PGD
176 * entry is 64-bit wide.
177 */
178 mov r0, r4
179 add r3, r4, #0x1000 @ first PMD table address
180 orr r3, r3, #3 @ PGD block type
181 mov r6, #4 @ PTRS_PER_PGD
182 mov r7, #1 << (55 - 32) @ L_PGD_SWAPPER
1831: str r3, [r0], #4 @ set bottom PGD entry bits
184 str r7, [r0], #4 @ set top PGD entry bits
185 add r3, r3, #0x1000 @ next PMD table
186 subs r6, r6, #1
187 bne 1b
188
189 add r4, r4, #0x1000 @ point to the PMD tables
190#endif
191
167 ldr r7, [r10, #PROCINFO_MM_MMUFLAGS] @ mm_mmuflags 192 ldr r7, [r10, #PROCINFO_MM_MMUFLAGS] @ mm_mmuflags
168 193
169 /* 194 /*
170 * Create identity mapping to cater for __enable_mmu. 195 * Create identity mapping to cater for __enable_mmu.
171 * This identity mapping will be removed by paging_init(). 196 * This identity mapping will be removed by paging_init().
172 */ 197 */
173 adr r0, __enable_mmu_loc 198 adr r0, __turn_mmu_on_loc
174 ldmia r0, {r3, r5, r6} 199 ldmia r0, {r3, r5, r6}
175 sub r0, r0, r3 @ virt->phys offset 200 sub r0, r0, r3 @ virt->phys offset
176 add r5, r5, r0 @ phys __enable_mmu 201 add r5, r5, r0 @ phys __turn_mmu_on
177 add r6, r6, r0 @ phys __enable_mmu_end 202 add r6, r6, r0 @ phys __turn_mmu_on_end
178 mov r5, r5, lsr #SECTION_SHIFT 203 mov r5, r5, lsr #SECTION_SHIFT
179 mov r6, r6, lsr #SECTION_SHIFT 204 mov r6, r6, lsr #SECTION_SHIFT
180 205
@@ -219,8 +244,8 @@ __create_page_tables:
219#endif 244#endif
220 245
221 /* 246 /*
222 * Then map boot params address in r2 or 247 * Then map boot params address in r2 or the first 1MB (2MB with LPAE)
223 * the first 1MB of ram if boot params address is not specified. 248 * of ram if boot params address is not specified.
224 */ 249 */
225 mov r0, r2, lsr #SECTION_SHIFT 250 mov r0, r2, lsr #SECTION_SHIFT
226 movs r0, r0, lsl #SECTION_SHIFT 251 movs r0, r0, lsl #SECTION_SHIFT
@@ -251,7 +276,15 @@ __create_page_tables:
251 mov r3, r7, lsr #SECTION_SHIFT 276 mov r3, r7, lsr #SECTION_SHIFT
252 ldr r7, [r10, #PROCINFO_IO_MMUFLAGS] @ io_mmuflags 277 ldr r7, [r10, #PROCINFO_IO_MMUFLAGS] @ io_mmuflags
253 orr r3, r7, r3, lsl #SECTION_SHIFT 278 orr r3, r7, r3, lsl #SECTION_SHIFT
279#ifdef CONFIG_ARM_LPAE
280 mov r7, #1 << (54 - 32) @ XN
281#else
282 orr r3, r3, #PMD_SECT_XN
283#endif
2541: str r3, [r0], #4 2841: str r3, [r0], #4
285#ifdef CONFIG_ARM_LPAE
286 str r7, [r0], #4
287#endif
255 add r3, r3, #1 << SECTION_SHIFT 288 add r3, r3, #1 << SECTION_SHIFT
256 cmp r0, r6 289 cmp r0, r6
257 blo 1b 290 blo 1b
@@ -283,14 +316,17 @@ __create_page_tables:
283 str r3, [r0] 316 str r3, [r0]
284#endif 317#endif
285#endif 318#endif
319#ifdef CONFIG_ARM_LPAE
320 sub r4, r4, #0x1000 @ point to the PGD table
321#endif
286 mov pc, lr 322 mov pc, lr
287ENDPROC(__create_page_tables) 323ENDPROC(__create_page_tables)
288 .ltorg 324 .ltorg
289 .align 325 .align
290__enable_mmu_loc: 326__turn_mmu_on_loc:
291 .long . 327 .long .
292 .long __enable_mmu 328 .long __turn_mmu_on
293 .long __enable_mmu_end 329 .long __turn_mmu_on_end
294 330
295#if defined(CONFIG_SMP) 331#if defined(CONFIG_SMP)
296 __CPUINIT 332 __CPUINIT
@@ -374,12 +410,17 @@ __enable_mmu:
374#ifdef CONFIG_CPU_ICACHE_DISABLE 410#ifdef CONFIG_CPU_ICACHE_DISABLE
375 bic r0, r0, #CR_I 411 bic r0, r0, #CR_I
376#endif 412#endif
413#ifdef CONFIG_ARM_LPAE
414 mov r5, #0
415 mcrr p15, 0, r4, r5, c2 @ load TTBR0
416#else
377 mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \ 417 mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
378 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \ 418 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
379 domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \ 419 domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
380 domain_val(DOMAIN_IO, DOMAIN_CLIENT)) 420 domain_val(DOMAIN_IO, DOMAIN_CLIENT))
381 mcr p15, 0, r5, c3, c0, 0 @ load domain access register 421 mcr p15, 0, r5, c3, c0, 0 @ load domain access register
382 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer 422 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
423#endif
383 b __turn_mmu_on 424 b __turn_mmu_on
384ENDPROC(__enable_mmu) 425ENDPROC(__enable_mmu)
385 426
@@ -398,15 +439,19 @@ ENDPROC(__enable_mmu)
398 * other registers depend on the function called upon completion 439 * other registers depend on the function called upon completion
399 */ 440 */
400 .align 5 441 .align 5
401__turn_mmu_on: 442 .pushsection .idmap.text, "ax"
443ENTRY(__turn_mmu_on)
402 mov r0, r0 444 mov r0, r0
445 instr_sync
403 mcr p15, 0, r0, c1, c0, 0 @ write control reg 446 mcr p15, 0, r0, c1, c0, 0 @ write control reg
404 mrc p15, 0, r3, c0, c0, 0 @ read id reg 447 mrc p15, 0, r3, c0, c0, 0 @ read id reg
448 instr_sync
405 mov r3, r3 449 mov r3, r3
406 mov r3, r13 450 mov r3, r13
407 mov pc, r3 451 mov pc, r3
408__enable_mmu_end: 452__turn_mmu_on_end:
409ENDPROC(__turn_mmu_on) 453ENDPROC(__turn_mmu_on)
454 .popsection
410 455
411 456
412#ifdef CONFIG_SMP_ON_UP 457#ifdef CONFIG_SMP_ON_UP
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index 814a52a9dc39..d6a95ef9131d 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -1016,10 +1016,10 @@ static int __init arch_hw_breakpoint_init(void)
1016 } 1016 }
1017 1017
1018 /* Register debug fault handler. */ 1018 /* Register debug fault handler. */
1019 hook_fault_code(2, hw_breakpoint_pending, SIGTRAP, TRAP_HWBKPT, 1019 hook_fault_code(FAULT_CODE_DEBUG, hw_breakpoint_pending, SIGTRAP,
1020 "watchpoint debug exception"); 1020 TRAP_HWBKPT, "watchpoint debug exception");
1021 hook_ifault_code(2, hw_breakpoint_pending, SIGTRAP, TRAP_HWBKPT, 1021 hook_ifault_code(FAULT_CODE_DEBUG, hw_breakpoint_pending, SIGTRAP,
1022 "breakpoint debug exception"); 1022 TRAP_HWBKPT, "breakpoint debug exception");
1023 1023
1024 /* Register hotplug notifier. */ 1024 /* Register hotplug notifier. */
1025 register_cpu_notifier(&dbg_reset_nb); 1025 register_cpu_notifier(&dbg_reset_nb);
diff --git a/arch/arm/kernel/kprobes-test.c b/arch/arm/kernel/kprobes-test.c
index e17cdd6d90d8..1862d8f2fd44 100644
--- a/arch/arm/kernel/kprobes-test.c
+++ b/arch/arm/kernel/kprobes-test.c
@@ -202,6 +202,8 @@
202#include <linux/slab.h> 202#include <linux/slab.h>
203#include <linux/kprobes.h> 203#include <linux/kprobes.h>
204 204
205#include <asm/opcodes.h>
206
205#include "kprobes.h" 207#include "kprobes.h"
206#include "kprobes-test.h" 208#include "kprobes-test.h"
207 209
@@ -1050,65 +1052,9 @@ static int test_instance;
1050 1052
1051static unsigned long test_check_cc(int cc, unsigned long cpsr) 1053static unsigned long test_check_cc(int cc, unsigned long cpsr)
1052{ 1054{
1053 unsigned long temp; 1055 int ret = arm_check_condition(cc << 28, cpsr);
1054
1055 switch (cc) {
1056 case 0x0: /* eq */
1057 return cpsr & PSR_Z_BIT;
1058
1059 case 0x1: /* ne */
1060 return (~cpsr) & PSR_Z_BIT;
1061
1062 case 0x2: /* cs */
1063 return cpsr & PSR_C_BIT;
1064
1065 case 0x3: /* cc */
1066 return (~cpsr) & PSR_C_BIT;
1067
1068 case 0x4: /* mi */
1069 return cpsr & PSR_N_BIT;
1070
1071 case 0x5: /* pl */
1072 return (~cpsr) & PSR_N_BIT;
1073
1074 case 0x6: /* vs */
1075 return cpsr & PSR_V_BIT;
1076
1077 case 0x7: /* vc */
1078 return (~cpsr) & PSR_V_BIT;
1079 1056
1080 case 0x8: /* hi */ 1057 return (ret != ARM_OPCODE_CONDTEST_FAIL);
1081 cpsr &= ~(cpsr >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */
1082 return cpsr & PSR_C_BIT;
1083
1084 case 0x9: /* ls */
1085 cpsr &= ~(cpsr >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */
1086 return (~cpsr) & PSR_C_BIT;
1087
1088 case 0xa: /* ge */
1089 cpsr ^= (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */
1090 return (~cpsr) & PSR_N_BIT;
1091
1092 case 0xb: /* lt */
1093 cpsr ^= (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */
1094 return cpsr & PSR_N_BIT;
1095
1096 case 0xc: /* gt */
1097 temp = cpsr ^ (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */
1098 temp |= (cpsr << 1); /* PSR_N_BIT |= PSR_Z_BIT */
1099 return (~temp) & PSR_N_BIT;
1100
1101 case 0xd: /* le */
1102 temp = cpsr ^ (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */
1103 temp |= (cpsr << 1); /* PSR_N_BIT |= PSR_Z_BIT */
1104 return temp & PSR_N_BIT;
1105
1106 case 0xe: /* al */
1107 case 0xf: /* unconditional */
1108 return true;
1109 }
1110 BUG();
1111 return false;
1112} 1058}
1113 1059
1114static int is_last_scenario; 1060static int is_last_scenario;
@@ -1128,7 +1074,9 @@ static unsigned long test_context_cpsr(int scenario)
1128 1074
1129 if (!test_case_is_thumb) { 1075 if (!test_case_is_thumb) {
1130 /* Testing ARM code */ 1076 /* Testing ARM code */
1131 probe_should_run = test_check_cc(current_instruction >> 28, cpsr) != 0; 1077 int cc = current_instruction >> 28;
1078
1079 probe_should_run = test_check_cc(cc, cpsr) != 0;
1132 if (scenario == 15) 1080 if (scenario == 15)
1133 is_last_scenario = true; 1081 is_last_scenario = true;
1134 1082
diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c
index e59bbd496c39..764bd456d84f 100644
--- a/arch/arm/kernel/machine_kexec.c
+++ b/arch/arm/kernel/machine_kexec.c
@@ -12,12 +12,11 @@
12#include <asm/mmu_context.h> 12#include <asm/mmu_context.h>
13#include <asm/cacheflush.h> 13#include <asm/cacheflush.h>
14#include <asm/mach-types.h> 14#include <asm/mach-types.h>
15#include <asm/system.h>
15 16
16extern const unsigned char relocate_new_kernel[]; 17extern const unsigned char relocate_new_kernel[];
17extern const unsigned int relocate_new_kernel_size; 18extern const unsigned int relocate_new_kernel_size;
18 19
19extern void setup_mm_for_reboot(char mode);
20
21extern unsigned long kexec_start_address; 20extern unsigned long kexec_start_address;
22extern unsigned long kexec_indirection_page; 21extern unsigned long kexec_indirection_page;
23extern unsigned long kexec_mach_type; 22extern unsigned long kexec_mach_type;
@@ -111,14 +110,6 @@ void machine_kexec(struct kimage *image)
111 110
112 if (kexec_reinit) 111 if (kexec_reinit)
113 kexec_reinit(); 112 kexec_reinit();
114 local_irq_disable(); 113
115 local_fiq_disable(); 114 soft_restart(reboot_code_buffer_phys);
116 setup_mm_for_reboot(0); /* mode is not used, so just pass 0*/
117 flush_cache_all();
118 outer_flush_all();
119 outer_disable();
120 cpu_proc_fin();
121 outer_inv_all();
122 flush_cache_all();
123 cpu_reset(reboot_code_buffer_phys);
124} 115}
diff --git a/arch/arm/kernel/opcodes.c b/arch/arm/kernel/opcodes.c
new file mode 100644
index 000000000000..f8179c6a817f
--- /dev/null
+++ b/arch/arm/kernel/opcodes.c
@@ -0,0 +1,72 @@
1/*
2 * linux/arch/arm/kernel/opcodes.c
3 *
4 * A32 condition code lookup feature moved from nwfpe/fpopcode.c
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/module.h>
12#include <asm/opcodes.h>
13
14#define ARM_OPCODE_CONDITION_UNCOND 0xf
15
16/*
17 * condition code lookup table
18 * index into the table is test code: EQ, NE, ... LT, GT, AL, NV
19 *
20 * bit position in short is condition code: NZCV
21 */
22static const unsigned short cc_map[16] = {
23 0xF0F0, /* EQ == Z set */
24 0x0F0F, /* NE */
25 0xCCCC, /* CS == C set */
26 0x3333, /* CC */
27 0xFF00, /* MI == N set */
28 0x00FF, /* PL */
29 0xAAAA, /* VS == V set */
30 0x5555, /* VC */
31 0x0C0C, /* HI == C set && Z clear */
32 0xF3F3, /* LS == C clear || Z set */
33 0xAA55, /* GE == (N==V) */
34 0x55AA, /* LT == (N!=V) */
35 0x0A05, /* GT == (!Z && (N==V)) */
36 0xF5FA, /* LE == (Z || (N!=V)) */
37 0xFFFF, /* AL always */
38 0 /* NV */
39};
40
41/*
42 * Returns:
43 * ARM_OPCODE_CONDTEST_FAIL - if condition fails
44 * ARM_OPCODE_CONDTEST_PASS - if condition passes (including AL)
45 * ARM_OPCODE_CONDTEST_UNCOND - if NV condition, or separate unconditional
46 * opcode space from v5 onwards
47 *
48 * Code that tests whether a conditional instruction would pass its condition
49 * check should check that return value == ARM_OPCODE_CONDTEST_PASS.
50 *
51 * Code that tests if a condition means that the instruction would be executed
52 * (regardless of conditional or unconditional) should instead check that the
53 * return value != ARM_OPCODE_CONDTEST_FAIL.
54 */
55asmlinkage unsigned int arm_check_condition(u32 opcode, u32 psr)
56{
57 u32 cc_bits = opcode >> 28;
58 u32 psr_cond = psr >> 28;
59 unsigned int ret;
60
61 if (cc_bits != ARM_OPCODE_CONDITION_UNCOND) {
62 if ((cc_map[cc_bits] >> (psr_cond)) & 1)
63 ret = ARM_OPCODE_CONDTEST_PASS;
64 else
65 ret = ARM_OPCODE_CONDTEST_FAIL;
66 } else {
67 ret = ARM_OPCODE_CONDTEST_UNCOND;
68 }
69
70 return ret;
71}
72EXPORT_SYMBOL_GPL(arm_check_condition);
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 88b0941ce51e..5bb91bf3d47f 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -59,8 +59,7 @@ armpmu_get_pmu_id(void)
59} 59}
60EXPORT_SYMBOL_GPL(armpmu_get_pmu_id); 60EXPORT_SYMBOL_GPL(armpmu_get_pmu_id);
61 61
62int 62int perf_num_counters(void)
63armpmu_get_max_events(void)
64{ 63{
65 int max_events = 0; 64 int max_events = 0;
66 65
@@ -69,12 +68,6 @@ armpmu_get_max_events(void)
69 68
70 return max_events; 69 return max_events;
71} 70}
72EXPORT_SYMBOL_GPL(armpmu_get_max_events);
73
74int perf_num_counters(void)
75{
76 return armpmu_get_max_events();
77}
78EXPORT_SYMBOL_GPL(perf_num_counters); 71EXPORT_SYMBOL_GPL(perf_num_counters);
79 72
80#define HW_OP_UNSUPPORTED 0xFFFF 73#define HW_OP_UNSUPPORTED 0xFFFF
@@ -380,6 +373,8 @@ armpmu_release_hardware(struct arm_pmu *armpmu)
380{ 373{
381 int i, irq, irqs; 374 int i, irq, irqs;
382 struct platform_device *pmu_device = armpmu->plat_device; 375 struct platform_device *pmu_device = armpmu->plat_device;
376 struct arm_pmu_platdata *plat =
377 dev_get_platdata(&pmu_device->dev);
383 378
384 irqs = min(pmu_device->num_resources, num_possible_cpus()); 379 irqs = min(pmu_device->num_resources, num_possible_cpus());
385 380
@@ -387,8 +382,11 @@ armpmu_release_hardware(struct arm_pmu *armpmu)
387 if (!cpumask_test_and_clear_cpu(i, &armpmu->active_irqs)) 382 if (!cpumask_test_and_clear_cpu(i, &armpmu->active_irqs))
388 continue; 383 continue;
389 irq = platform_get_irq(pmu_device, i); 384 irq = platform_get_irq(pmu_device, i);
390 if (irq >= 0) 385 if (irq >= 0) {
386 if (plat && plat->disable_irq)
387 plat->disable_irq(irq);
391 free_irq(irq, armpmu); 388 free_irq(irq, armpmu);
389 }
392 } 390 }
393 391
394 release_pmu(armpmu->type); 392 release_pmu(armpmu->type);
@@ -448,7 +446,8 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu)
448 irq); 446 irq);
449 armpmu_release_hardware(armpmu); 447 armpmu_release_hardware(armpmu);
450 return err; 448 return err;
451 } 449 } else if (plat && plat->enable_irq)
450 plat->enable_irq(irq);
452 451
453 cpumask_set_cpu(i, &armpmu->active_irqs); 452 cpumask_set_cpu(i, &armpmu->active_irqs);
454 } 453 }
diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c
index e63d8115c01b..533be9930ec2 100644
--- a/arch/arm/kernel/perf_event_v6.c
+++ b/arch/arm/kernel/perf_event_v6.c
@@ -65,13 +65,15 @@ enum armv6_counters {
65 * accesses/misses in hardware. 65 * accesses/misses in hardware.
66 */ 66 */
67static const unsigned armv6_perf_map[PERF_COUNT_HW_MAX] = { 67static const unsigned armv6_perf_map[PERF_COUNT_HW_MAX] = {
68 [PERF_COUNT_HW_CPU_CYCLES] = ARMV6_PERFCTR_CPU_CYCLES, 68 [PERF_COUNT_HW_CPU_CYCLES] = ARMV6_PERFCTR_CPU_CYCLES,
69 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV6_PERFCTR_INSTR_EXEC, 69 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV6_PERFCTR_INSTR_EXEC,
70 [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED, 70 [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
71 [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED, 71 [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
72 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6_PERFCTR_BR_EXEC, 72 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6_PERFCTR_BR_EXEC,
73 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV6_PERFCTR_BR_MISPREDICT, 73 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV6_PERFCTR_BR_MISPREDICT,
74 [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED, 74 [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
75 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV6_PERFCTR_IBUF_STALL,
76 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = ARMV6_PERFCTR_LSU_FULL_STALL,
75}; 77};
76 78
77static const unsigned armv6_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] 79static const unsigned armv6_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
@@ -218,13 +220,15 @@ enum armv6mpcore_perf_types {
218 * accesses/misses in hardware. 220 * accesses/misses in hardware.
219 */ 221 */
220static const unsigned armv6mpcore_perf_map[PERF_COUNT_HW_MAX] = { 222static const unsigned armv6mpcore_perf_map[PERF_COUNT_HW_MAX] = {
221 [PERF_COUNT_HW_CPU_CYCLES] = ARMV6MPCORE_PERFCTR_CPU_CYCLES, 223 [PERF_COUNT_HW_CPU_CYCLES] = ARMV6MPCORE_PERFCTR_CPU_CYCLES,
222 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_INSTR_EXEC, 224 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_INSTR_EXEC,
223 [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED, 225 [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
224 [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED, 226 [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
225 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_BR_EXEC, 227 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_BR_EXEC,
226 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV6MPCORE_PERFCTR_BR_MISPREDICT, 228 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV6MPCORE_PERFCTR_BR_MISPREDICT,
227 [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED, 229 [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
230 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV6MPCORE_PERFCTR_IBUF_STALL,
231 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = ARMV6MPCORE_PERFCTR_LSU_FULL_STALL,
228}; 232};
229 233
230static const unsigned armv6mpcore_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] 234static const unsigned armv6mpcore_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index 1ef6d0034b85..460bbbb6b885 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -28,165 +28,87 @@ static struct arm_pmu armv7pmu;
28 * they are not available. 28 * they are not available.
29 */ 29 */
30enum armv7_perf_types { 30enum armv7_perf_types {
31 ARMV7_PERFCTR_PMNC_SW_INCR = 0x00, 31 ARMV7_PERFCTR_PMNC_SW_INCR = 0x00,
32 ARMV7_PERFCTR_IFETCH_MISS = 0x01, 32 ARMV7_PERFCTR_L1_ICACHE_REFILL = 0x01,
33 ARMV7_PERFCTR_ITLB_MISS = 0x02, 33 ARMV7_PERFCTR_ITLB_REFILL = 0x02,
34 ARMV7_PERFCTR_DCACHE_REFILL = 0x03, /* L1 */ 34 ARMV7_PERFCTR_L1_DCACHE_REFILL = 0x03,
35 ARMV7_PERFCTR_DCACHE_ACCESS = 0x04, /* L1 */ 35 ARMV7_PERFCTR_L1_DCACHE_ACCESS = 0x04,
36 ARMV7_PERFCTR_DTLB_REFILL = 0x05, 36 ARMV7_PERFCTR_DTLB_REFILL = 0x05,
37 ARMV7_PERFCTR_DREAD = 0x06, 37 ARMV7_PERFCTR_MEM_READ = 0x06,
38 ARMV7_PERFCTR_DWRITE = 0x07, 38 ARMV7_PERFCTR_MEM_WRITE = 0x07,
39 ARMV7_PERFCTR_INSTR_EXECUTED = 0x08, 39 ARMV7_PERFCTR_INSTR_EXECUTED = 0x08,
40 ARMV7_PERFCTR_EXC_TAKEN = 0x09, 40 ARMV7_PERFCTR_EXC_TAKEN = 0x09,
41 ARMV7_PERFCTR_EXC_EXECUTED = 0x0A, 41 ARMV7_PERFCTR_EXC_EXECUTED = 0x0A,
42 ARMV7_PERFCTR_CID_WRITE = 0x0B, 42 ARMV7_PERFCTR_CID_WRITE = 0x0B,
43 /* ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS. 43
44 /*
45 * ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS.
44 * It counts: 46 * It counts:
45 * - all branch instructions, 47 * - all (taken) branch instructions,
46 * - instructions that explicitly write the PC, 48 * - instructions that explicitly write the PC,
47 * - exception generating instructions. 49 * - exception generating instructions.
48 */ 50 */
49 ARMV7_PERFCTR_PC_WRITE = 0x0C, 51 ARMV7_PERFCTR_PC_WRITE = 0x0C,
50 ARMV7_PERFCTR_PC_IMM_BRANCH = 0x0D, 52 ARMV7_PERFCTR_PC_IMM_BRANCH = 0x0D,
51 ARMV7_PERFCTR_PC_PROC_RETURN = 0x0E, 53 ARMV7_PERFCTR_PC_PROC_RETURN = 0x0E,
52 ARMV7_PERFCTR_UNALIGNED_ACCESS = 0x0F, 54 ARMV7_PERFCTR_MEM_UNALIGNED_ACCESS = 0x0F,
55 ARMV7_PERFCTR_PC_BRANCH_MIS_PRED = 0x10,
56 ARMV7_PERFCTR_CLOCK_CYCLES = 0x11,
57 ARMV7_PERFCTR_PC_BRANCH_PRED = 0x12,
53 58
54 /* These events are defined by the PMUv2 supplement (ARM DDI 0457A). */ 59 /* These events are defined by the PMUv2 supplement (ARM DDI 0457A). */
55 ARMV7_PERFCTR_PC_BRANCH_MIS_PRED = 0x10, 60 ARMV7_PERFCTR_MEM_ACCESS = 0x13,
56 ARMV7_PERFCTR_CLOCK_CYCLES = 0x11, 61 ARMV7_PERFCTR_L1_ICACHE_ACCESS = 0x14,
57 ARMV7_PERFCTR_PC_BRANCH_PRED = 0x12, 62 ARMV7_PERFCTR_L1_DCACHE_WB = 0x15,
58 ARMV7_PERFCTR_MEM_ACCESS = 0x13, 63 ARMV7_PERFCTR_L2_CACHE_ACCESS = 0x16,
59 ARMV7_PERFCTR_L1_ICACHE_ACCESS = 0x14, 64 ARMV7_PERFCTR_L2_CACHE_REFILL = 0x17,
60 ARMV7_PERFCTR_L1_DCACHE_WB = 0x15, 65 ARMV7_PERFCTR_L2_CACHE_WB = 0x18,
61 ARMV7_PERFCTR_L2_DCACHE_ACCESS = 0x16, 66 ARMV7_PERFCTR_BUS_ACCESS = 0x19,
62 ARMV7_PERFCTR_L2_DCACHE_REFILL = 0x17, 67 ARMV7_PERFCTR_MEM_ERROR = 0x1A,
63 ARMV7_PERFCTR_L2_DCACHE_WB = 0x18, 68 ARMV7_PERFCTR_INSTR_SPEC = 0x1B,
64 ARMV7_PERFCTR_BUS_ACCESS = 0x19, 69 ARMV7_PERFCTR_TTBR_WRITE = 0x1C,
65 ARMV7_PERFCTR_MEMORY_ERROR = 0x1A, 70 ARMV7_PERFCTR_BUS_CYCLES = 0x1D,
66 ARMV7_PERFCTR_INSTR_SPEC = 0x1B, 71
67 ARMV7_PERFCTR_TTBR_WRITE = 0x1C, 72 ARMV7_PERFCTR_CPU_CYCLES = 0xFF
68 ARMV7_PERFCTR_BUS_CYCLES = 0x1D,
69
70 ARMV7_PERFCTR_CPU_CYCLES = 0xFF
71}; 73};
72 74
73/* ARMv7 Cortex-A8 specific event types */ 75/* ARMv7 Cortex-A8 specific event types */
74enum armv7_a8_perf_types { 76enum armv7_a8_perf_types {
75 ARMV7_PERFCTR_WRITE_BUFFER_FULL = 0x40, 77 ARMV7_A8_PERFCTR_L2_CACHE_ACCESS = 0x43,
76 ARMV7_PERFCTR_L2_STORE_MERGED = 0x41, 78 ARMV7_A8_PERFCTR_L2_CACHE_REFILL = 0x44,
77 ARMV7_PERFCTR_L2_STORE_BUFF = 0x42, 79 ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS = 0x50,
78 ARMV7_PERFCTR_L2_ACCESS = 0x43, 80 ARMV7_A8_PERFCTR_STALL_ISIDE = 0x56,
79 ARMV7_PERFCTR_L2_CACH_MISS = 0x44,
80 ARMV7_PERFCTR_AXI_READ_CYCLES = 0x45,
81 ARMV7_PERFCTR_AXI_WRITE_CYCLES = 0x46,
82 ARMV7_PERFCTR_MEMORY_REPLAY = 0x47,
83 ARMV7_PERFCTR_UNALIGNED_ACCESS_REPLAY = 0x48,
84 ARMV7_PERFCTR_L1_DATA_MISS = 0x49,
85 ARMV7_PERFCTR_L1_INST_MISS = 0x4A,
86 ARMV7_PERFCTR_L1_DATA_COLORING = 0x4B,
87 ARMV7_PERFCTR_L1_NEON_DATA = 0x4C,
88 ARMV7_PERFCTR_L1_NEON_CACH_DATA = 0x4D,
89 ARMV7_PERFCTR_L2_NEON = 0x4E,
90 ARMV7_PERFCTR_L2_NEON_HIT = 0x4F,
91 ARMV7_PERFCTR_L1_INST = 0x50,
92 ARMV7_PERFCTR_PC_RETURN_MIS_PRED = 0x51,
93 ARMV7_PERFCTR_PC_BRANCH_FAILED = 0x52,
94 ARMV7_PERFCTR_PC_BRANCH_TAKEN = 0x53,
95 ARMV7_PERFCTR_PC_BRANCH_EXECUTED = 0x54,
96 ARMV7_PERFCTR_OP_EXECUTED = 0x55,
97 ARMV7_PERFCTR_CYCLES_INST_STALL = 0x56,
98 ARMV7_PERFCTR_CYCLES_INST = 0x57,
99 ARMV7_PERFCTR_CYCLES_NEON_DATA_STALL = 0x58,
100 ARMV7_PERFCTR_CYCLES_NEON_INST_STALL = 0x59,
101 ARMV7_PERFCTR_NEON_CYCLES = 0x5A,
102
103 ARMV7_PERFCTR_PMU0_EVENTS = 0x70,
104 ARMV7_PERFCTR_PMU1_EVENTS = 0x71,
105 ARMV7_PERFCTR_PMU_EVENTS = 0x72,
106}; 81};
107 82
108/* ARMv7 Cortex-A9 specific event types */ 83/* ARMv7 Cortex-A9 specific event types */
109enum armv7_a9_perf_types { 84enum armv7_a9_perf_types {
110 ARMV7_PERFCTR_JAVA_HW_BYTECODE_EXEC = 0x40, 85 ARMV7_A9_PERFCTR_INSTR_CORE_RENAME = 0x68,
111 ARMV7_PERFCTR_JAVA_SW_BYTECODE_EXEC = 0x41, 86 ARMV7_A9_PERFCTR_STALL_ICACHE = 0x60,
112 ARMV7_PERFCTR_JAZELLE_BRANCH_EXEC = 0x42, 87 ARMV7_A9_PERFCTR_STALL_DISPATCH = 0x66,
113
114 ARMV7_PERFCTR_COHERENT_LINE_MISS = 0x50,
115 ARMV7_PERFCTR_COHERENT_LINE_HIT = 0x51,
116
117 ARMV7_PERFCTR_ICACHE_DEP_STALL_CYCLES = 0x60,
118 ARMV7_PERFCTR_DCACHE_DEP_STALL_CYCLES = 0x61,
119 ARMV7_PERFCTR_TLB_MISS_DEP_STALL_CYCLES = 0x62,
120 ARMV7_PERFCTR_STREX_EXECUTED_PASSED = 0x63,
121 ARMV7_PERFCTR_STREX_EXECUTED_FAILED = 0x64,
122 ARMV7_PERFCTR_DATA_EVICTION = 0x65,
123 ARMV7_PERFCTR_ISSUE_STAGE_NO_INST = 0x66,
124 ARMV7_PERFCTR_ISSUE_STAGE_EMPTY = 0x67,
125 ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE = 0x68,
126
127 ARMV7_PERFCTR_PREDICTABLE_FUNCT_RETURNS = 0x6E,
128
129 ARMV7_PERFCTR_MAIN_UNIT_EXECUTED_INST = 0x70,
130 ARMV7_PERFCTR_SECOND_UNIT_EXECUTED_INST = 0x71,
131 ARMV7_PERFCTR_LD_ST_UNIT_EXECUTED_INST = 0x72,
132 ARMV7_PERFCTR_FP_EXECUTED_INST = 0x73,
133 ARMV7_PERFCTR_NEON_EXECUTED_INST = 0x74,
134
135 ARMV7_PERFCTR_PLD_FULL_DEP_STALL_CYCLES = 0x80,
136 ARMV7_PERFCTR_DATA_WR_DEP_STALL_CYCLES = 0x81,
137 ARMV7_PERFCTR_ITLB_MISS_DEP_STALL_CYCLES = 0x82,
138 ARMV7_PERFCTR_DTLB_MISS_DEP_STALL_CYCLES = 0x83,
139 ARMV7_PERFCTR_MICRO_ITLB_MISS_DEP_STALL_CYCLES = 0x84,
140 ARMV7_PERFCTR_MICRO_DTLB_MISS_DEP_STALL_CYCLES = 0x85,
141 ARMV7_PERFCTR_DMB_DEP_STALL_CYCLES = 0x86,
142
143 ARMV7_PERFCTR_INTGR_CLK_ENABLED_CYCLES = 0x8A,
144 ARMV7_PERFCTR_DATA_ENGINE_CLK_EN_CYCLES = 0x8B,
145
146 ARMV7_PERFCTR_ISB_INST = 0x90,
147 ARMV7_PERFCTR_DSB_INST = 0x91,
148 ARMV7_PERFCTR_DMB_INST = 0x92,
149 ARMV7_PERFCTR_EXT_INTERRUPTS = 0x93,
150
151 ARMV7_PERFCTR_PLE_CACHE_LINE_RQST_COMPLETED = 0xA0,
152 ARMV7_PERFCTR_PLE_CACHE_LINE_RQST_SKIPPED = 0xA1,
153 ARMV7_PERFCTR_PLE_FIFO_FLUSH = 0xA2,
154 ARMV7_PERFCTR_PLE_RQST_COMPLETED = 0xA3,
155 ARMV7_PERFCTR_PLE_FIFO_OVERFLOW = 0xA4,
156 ARMV7_PERFCTR_PLE_RQST_PROG = 0xA5
157}; 88};
158 89
159/* ARMv7 Cortex-A5 specific event types */ 90/* ARMv7 Cortex-A5 specific event types */
160enum armv7_a5_perf_types { 91enum armv7_a5_perf_types {
161 ARMV7_PERFCTR_IRQ_TAKEN = 0x86, 92 ARMV7_A5_PERFCTR_PREFETCH_LINEFILL = 0xc2,
162 ARMV7_PERFCTR_FIQ_TAKEN = 0x87, 93 ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP = 0xc3,
163
164 ARMV7_PERFCTR_EXT_MEM_RQST = 0xc0,
165 ARMV7_PERFCTR_NC_EXT_MEM_RQST = 0xc1,
166 ARMV7_PERFCTR_PREFETCH_LINEFILL = 0xc2,
167 ARMV7_PERFCTR_PREFETCH_LINEFILL_DROP = 0xc3,
168 ARMV7_PERFCTR_ENTER_READ_ALLOC = 0xc4,
169 ARMV7_PERFCTR_READ_ALLOC = 0xc5,
170
171 ARMV7_PERFCTR_STALL_SB_FULL = 0xc9,
172}; 94};
173 95
174/* ARMv7 Cortex-A15 specific event types */ 96/* ARMv7 Cortex-A15 specific event types */
175enum armv7_a15_perf_types { 97enum armv7_a15_perf_types {
176 ARMV7_PERFCTR_L1_DCACHE_READ_ACCESS = 0x40, 98 ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ = 0x40,
177 ARMV7_PERFCTR_L1_DCACHE_WRITE_ACCESS = 0x41, 99 ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE = 0x41,
178 ARMV7_PERFCTR_L1_DCACHE_READ_REFILL = 0x42, 100 ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ = 0x42,
179 ARMV7_PERFCTR_L1_DCACHE_WRITE_REFILL = 0x43, 101 ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE = 0x43,
180 102
181 ARMV7_PERFCTR_L1_DTLB_READ_REFILL = 0x4C, 103 ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ = 0x4C,
182 ARMV7_PERFCTR_L1_DTLB_WRITE_REFILL = 0x4D, 104 ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE = 0x4D,
183 105
184 ARMV7_PERFCTR_L2_DCACHE_READ_ACCESS = 0x50, 106 ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ = 0x50,
185 ARMV7_PERFCTR_L2_DCACHE_WRITE_ACCESS = 0x51, 107 ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE = 0x51,
186 ARMV7_PERFCTR_L2_DCACHE_READ_REFILL = 0x52, 108 ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ = 0x52,
187 ARMV7_PERFCTR_L2_DCACHE_WRITE_REFILL = 0x53, 109 ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE = 0x53,
188 110
189 ARMV7_PERFCTR_SPEC_PC_WRITE = 0x76, 111 ARMV7_A15_PERFCTR_PC_WRITE_SPEC = 0x76,
190}; 112};
191 113
192/* 114/*
@@ -197,13 +119,15 @@ enum armv7_a15_perf_types {
197 * accesses/misses in hardware. 119 * accesses/misses in hardware.
198 */ 120 */
199static const unsigned armv7_a8_perf_map[PERF_COUNT_HW_MAX] = { 121static const unsigned armv7_a8_perf_map[PERF_COUNT_HW_MAX] = {
200 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, 122 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
201 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED, 123 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
202 [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED, 124 [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
203 [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED, 125 [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
204 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE, 126 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
205 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, 127 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
206 [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES, 128 [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
129 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV7_A8_PERFCTR_STALL_ISIDE,
130 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED,
207}; 131};
208 132
209static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] 133static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
@@ -217,12 +141,12 @@ static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
217 * combined. 141 * combined.
218 */ 142 */
219 [C(OP_READ)] = { 143 [C(OP_READ)] = {
220 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS, 144 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
221 [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL, 145 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
222 }, 146 },
223 [C(OP_WRITE)] = { 147 [C(OP_WRITE)] = {
224 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS, 148 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
225 [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL, 149 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
226 }, 150 },
227 [C(OP_PREFETCH)] = { 151 [C(OP_PREFETCH)] = {
228 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 152 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
@@ -231,12 +155,12 @@ static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
231 }, 155 },
232 [C(L1I)] = { 156 [C(L1I)] = {
233 [C(OP_READ)] = { 157 [C(OP_READ)] = {
234 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_INST, 158 [C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS,
235 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_INST_MISS, 159 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
236 }, 160 },
237 [C(OP_WRITE)] = { 161 [C(OP_WRITE)] = {
238 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_INST, 162 [C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS,
239 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_INST_MISS, 163 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
240 }, 164 },
241 [C(OP_PREFETCH)] = { 165 [C(OP_PREFETCH)] = {
242 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 166 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
@@ -245,12 +169,12 @@ static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
245 }, 169 },
246 [C(LL)] = { 170 [C(LL)] = {
247 [C(OP_READ)] = { 171 [C(OP_READ)] = {
248 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_ACCESS, 172 [C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L2_CACHE_ACCESS,
249 [C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACH_MISS, 173 [C(RESULT_MISS)] = ARMV7_A8_PERFCTR_L2_CACHE_REFILL,
250 }, 174 },
251 [C(OP_WRITE)] = { 175 [C(OP_WRITE)] = {
252 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_ACCESS, 176 [C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L2_CACHE_ACCESS,
253 [C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACH_MISS, 177 [C(RESULT_MISS)] = ARMV7_A8_PERFCTR_L2_CACHE_REFILL,
254 }, 178 },
255 [C(OP_PREFETCH)] = { 179 [C(OP_PREFETCH)] = {
256 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 180 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
@@ -274,11 +198,11 @@ static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
274 [C(ITLB)] = { 198 [C(ITLB)] = {
275 [C(OP_READ)] = { 199 [C(OP_READ)] = {
276 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 200 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
277 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS, 201 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
278 }, 202 },
279 [C(OP_WRITE)] = { 203 [C(OP_WRITE)] = {
280 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 204 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
281 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS, 205 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
282 }, 206 },
283 [C(OP_PREFETCH)] = { 207 [C(OP_PREFETCH)] = {
284 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 208 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
@@ -287,14 +211,12 @@ static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
287 }, 211 },
288 [C(BPU)] = { 212 [C(BPU)] = {
289 [C(OP_READ)] = { 213 [C(OP_READ)] = {
290 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE, 214 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
291 [C(RESULT_MISS)] 215 [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
292 = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
293 }, 216 },
294 [C(OP_WRITE)] = { 217 [C(OP_WRITE)] = {
295 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE, 218 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
296 [C(RESULT_MISS)] 219 [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
297 = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
298 }, 220 },
299 [C(OP_PREFETCH)] = { 221 [C(OP_PREFETCH)] = {
300 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 222 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
@@ -321,14 +243,15 @@ static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
321 * Cortex-A9 HW events mapping 243 * Cortex-A9 HW events mapping
322 */ 244 */
323static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = { 245static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = {
324 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, 246 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
325 [PERF_COUNT_HW_INSTRUCTIONS] = 247 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_A9_PERFCTR_INSTR_CORE_RENAME,
326 ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE, 248 [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
327 [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_DCACHE_ACCESS, 249 [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
328 [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_DCACHE_REFILL, 250 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
329 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE, 251 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
330 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, 252 [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
331 [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES, 253 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV7_A9_PERFCTR_STALL_ICACHE,
254 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = ARMV7_A9_PERFCTR_STALL_DISPATCH,
332}; 255};
333 256
334static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] 257static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
@@ -342,12 +265,12 @@ static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
342 * combined. 265 * combined.
343 */ 266 */
344 [C(OP_READ)] = { 267 [C(OP_READ)] = {
345 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS, 268 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
346 [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL, 269 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
347 }, 270 },
348 [C(OP_WRITE)] = { 271 [C(OP_WRITE)] = {
349 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS, 272 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
350 [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL, 273 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
351 }, 274 },
352 [C(OP_PREFETCH)] = { 275 [C(OP_PREFETCH)] = {
353 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 276 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
@@ -357,11 +280,11 @@ static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
357 [C(L1I)] = { 280 [C(L1I)] = {
358 [C(OP_READ)] = { 281 [C(OP_READ)] = {
359 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 282 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
360 [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS, 283 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
361 }, 284 },
362 [C(OP_WRITE)] = { 285 [C(OP_WRITE)] = {
363 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 286 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
364 [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS, 287 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
365 }, 288 },
366 [C(OP_PREFETCH)] = { 289 [C(OP_PREFETCH)] = {
367 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 290 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
@@ -399,11 +322,11 @@ static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
399 [C(ITLB)] = { 322 [C(ITLB)] = {
400 [C(OP_READ)] = { 323 [C(OP_READ)] = {
401 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 324 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
402 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS, 325 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
403 }, 326 },
404 [C(OP_WRITE)] = { 327 [C(OP_WRITE)] = {
405 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 328 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
406 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS, 329 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
407 }, 330 },
408 [C(OP_PREFETCH)] = { 331 [C(OP_PREFETCH)] = {
409 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 332 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
@@ -412,14 +335,12 @@ static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
412 }, 335 },
413 [C(BPU)] = { 336 [C(BPU)] = {
414 [C(OP_READ)] = { 337 [C(OP_READ)] = {
415 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE, 338 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
416 [C(RESULT_MISS)] 339 [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
417 = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
418 }, 340 },
419 [C(OP_WRITE)] = { 341 [C(OP_WRITE)] = {
420 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE, 342 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
421 [C(RESULT_MISS)] 343 [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
422 = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
423 }, 344 },
424 [C(OP_PREFETCH)] = { 345 [C(OP_PREFETCH)] = {
425 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 346 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
@@ -446,13 +367,15 @@ static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
446 * Cortex-A5 HW events mapping 367 * Cortex-A5 HW events mapping
447 */ 368 */
448static const unsigned armv7_a5_perf_map[PERF_COUNT_HW_MAX] = { 369static const unsigned armv7_a5_perf_map[PERF_COUNT_HW_MAX] = {
449 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, 370 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
450 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED, 371 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
451 [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED, 372 [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
452 [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED, 373 [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
453 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE, 374 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
454 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, 375 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
455 [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED, 376 [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
377 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = HW_OP_UNSUPPORTED,
378 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED,
456}; 379};
457 380
458static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] 381static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
@@ -460,42 +383,34 @@ static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
460 [PERF_COUNT_HW_CACHE_RESULT_MAX] = { 383 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
461 [C(L1D)] = { 384 [C(L1D)] = {
462 [C(OP_READ)] = { 385 [C(OP_READ)] = {
463 [C(RESULT_ACCESS)] 386 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
464 = ARMV7_PERFCTR_DCACHE_ACCESS, 387 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
465 [C(RESULT_MISS)]
466 = ARMV7_PERFCTR_DCACHE_REFILL,
467 }, 388 },
468 [C(OP_WRITE)] = { 389 [C(OP_WRITE)] = {
469 [C(RESULT_ACCESS)] 390 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
470 = ARMV7_PERFCTR_DCACHE_ACCESS, 391 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
471 [C(RESULT_MISS)]
472 = ARMV7_PERFCTR_DCACHE_REFILL,
473 }, 392 },
474 [C(OP_PREFETCH)] = { 393 [C(OP_PREFETCH)] = {
475 [C(RESULT_ACCESS)] 394 [C(RESULT_ACCESS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL,
476 = ARMV7_PERFCTR_PREFETCH_LINEFILL, 395 [C(RESULT_MISS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP,
477 [C(RESULT_MISS)]
478 = ARMV7_PERFCTR_PREFETCH_LINEFILL_DROP,
479 }, 396 },
480 }, 397 },
481 [C(L1I)] = { 398 [C(L1I)] = {
482 [C(OP_READ)] = { 399 [C(OP_READ)] = {
483 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS, 400 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
484 [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS, 401 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
485 }, 402 },
486 [C(OP_WRITE)] = { 403 [C(OP_WRITE)] = {
487 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS, 404 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
488 [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS, 405 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
489 }, 406 },
490 /* 407 /*
491 * The prefetch counters don't differentiate between the I 408 * The prefetch counters don't differentiate between the I
492 * side and the D side. 409 * side and the D side.
493 */ 410 */
494 [C(OP_PREFETCH)] = { 411 [C(OP_PREFETCH)] = {
495 [C(RESULT_ACCESS)] 412 [C(RESULT_ACCESS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL,
496 = ARMV7_PERFCTR_PREFETCH_LINEFILL, 413 [C(RESULT_MISS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP,
497 [C(RESULT_MISS)]
498 = ARMV7_PERFCTR_PREFETCH_LINEFILL_DROP,
499 }, 414 },
500 }, 415 },
501 [C(LL)] = { 416 [C(LL)] = {
@@ -529,11 +444,11 @@ static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
529 [C(ITLB)] = { 444 [C(ITLB)] = {
530 [C(OP_READ)] = { 445 [C(OP_READ)] = {
531 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 446 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
532 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS, 447 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
533 }, 448 },
534 [C(OP_WRITE)] = { 449 [C(OP_WRITE)] = {
535 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 450 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
536 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS, 451 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
537 }, 452 },
538 [C(OP_PREFETCH)] = { 453 [C(OP_PREFETCH)] = {
539 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 454 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
@@ -543,13 +458,11 @@ static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
543 [C(BPU)] = { 458 [C(BPU)] = {
544 [C(OP_READ)] = { 459 [C(OP_READ)] = {
545 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, 460 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
546 [C(RESULT_MISS)] 461 [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
547 = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
548 }, 462 },
549 [C(OP_WRITE)] = { 463 [C(OP_WRITE)] = {
550 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, 464 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
551 [C(RESULT_MISS)] 465 [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
552 = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
553 }, 466 },
554 [C(OP_PREFETCH)] = { 467 [C(OP_PREFETCH)] = {
555 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 468 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
@@ -562,13 +475,15 @@ static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
562 * Cortex-A15 HW events mapping 475 * Cortex-A15 HW events mapping
563 */ 476 */
564static const unsigned armv7_a15_perf_map[PERF_COUNT_HW_MAX] = { 477static const unsigned armv7_a15_perf_map[PERF_COUNT_HW_MAX] = {
565 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, 478 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
566 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED, 479 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
567 [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED, 480 [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
568 [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED, 481 [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
569 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_SPEC_PC_WRITE, 482 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_A15_PERFCTR_PC_WRITE_SPEC,
570 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, 483 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
571 [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_BUS_CYCLES, 484 [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_BUS_CYCLES,
485 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = HW_OP_UNSUPPORTED,
486 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED,
572}; 487};
573 488
574static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] 489static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
@@ -576,16 +491,12 @@ static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
576 [PERF_COUNT_HW_CACHE_RESULT_MAX] = { 491 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
577 [C(L1D)] = { 492 [C(L1D)] = {
578 [C(OP_READ)] = { 493 [C(OP_READ)] = {
579 [C(RESULT_ACCESS)] 494 [C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ,
580 = ARMV7_PERFCTR_L1_DCACHE_READ_ACCESS, 495 [C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ,
581 [C(RESULT_MISS)]
582 = ARMV7_PERFCTR_L1_DCACHE_READ_REFILL,
583 }, 496 },
584 [C(OP_WRITE)] = { 497 [C(OP_WRITE)] = {
585 [C(RESULT_ACCESS)] 498 [C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE,
586 = ARMV7_PERFCTR_L1_DCACHE_WRITE_ACCESS, 499 [C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE,
587 [C(RESULT_MISS)]
588 = ARMV7_PERFCTR_L1_DCACHE_WRITE_REFILL,
589 }, 500 },
590 [C(OP_PREFETCH)] = { 501 [C(OP_PREFETCH)] = {
591 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 502 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
@@ -601,11 +512,11 @@ static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
601 */ 512 */
602 [C(OP_READ)] = { 513 [C(OP_READ)] = {
603 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS, 514 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
604 [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS, 515 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
605 }, 516 },
606 [C(OP_WRITE)] = { 517 [C(OP_WRITE)] = {
607 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS, 518 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
608 [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS, 519 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
609 }, 520 },
610 [C(OP_PREFETCH)] = { 521 [C(OP_PREFETCH)] = {
611 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 522 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
@@ -614,16 +525,12 @@ static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
614 }, 525 },
615 [C(LL)] = { 526 [C(LL)] = {
616 [C(OP_READ)] = { 527 [C(OP_READ)] = {
617 [C(RESULT_ACCESS)] 528 [C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ,
618 = ARMV7_PERFCTR_L2_DCACHE_READ_ACCESS, 529 [C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ,
619 [C(RESULT_MISS)]
620 = ARMV7_PERFCTR_L2_DCACHE_READ_REFILL,
621 }, 530 },
622 [C(OP_WRITE)] = { 531 [C(OP_WRITE)] = {
623 [C(RESULT_ACCESS)] 532 [C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE,
624 = ARMV7_PERFCTR_L2_DCACHE_WRITE_ACCESS, 533 [C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE,
625 [C(RESULT_MISS)]
626 = ARMV7_PERFCTR_L2_DCACHE_WRITE_REFILL,
627 }, 534 },
628 [C(OP_PREFETCH)] = { 535 [C(OP_PREFETCH)] = {
629 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 536 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
@@ -633,13 +540,11 @@ static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
633 [C(DTLB)] = { 540 [C(DTLB)] = {
634 [C(OP_READ)] = { 541 [C(OP_READ)] = {
635 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 542 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
636 [C(RESULT_MISS)] 543 [C(RESULT_MISS)] = ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ,
637 = ARMV7_PERFCTR_L1_DTLB_READ_REFILL,
638 }, 544 },
639 [C(OP_WRITE)] = { 545 [C(OP_WRITE)] = {
640 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 546 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
641 [C(RESULT_MISS)] 547 [C(RESULT_MISS)] = ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE,
642 = ARMV7_PERFCTR_L1_DTLB_WRITE_REFILL,
643 }, 548 },
644 [C(OP_PREFETCH)] = { 549 [C(OP_PREFETCH)] = {
645 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 550 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
@@ -649,11 +554,11 @@ static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
649 [C(ITLB)] = { 554 [C(ITLB)] = {
650 [C(OP_READ)] = { 555 [C(OP_READ)] = {
651 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 556 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
652 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS, 557 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
653 }, 558 },
654 [C(OP_WRITE)] = { 559 [C(OP_WRITE)] = {
655 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 560 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
656 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS, 561 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
657 }, 562 },
658 [C(OP_PREFETCH)] = { 563 [C(OP_PREFETCH)] = {
659 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 564 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
@@ -663,13 +568,11 @@ static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
663 [C(BPU)] = { 568 [C(BPU)] = {
664 [C(OP_READ)] = { 569 [C(OP_READ)] = {
665 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, 570 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
666 [C(RESULT_MISS)] 571 [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
667 = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
668 }, 572 },
669 [C(OP_WRITE)] = { 573 [C(OP_WRITE)] = {
670 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, 574 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
671 [C(RESULT_MISS)] 575 [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
672 = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
673 }, 576 },
674 [C(OP_PREFETCH)] = { 577 [C(OP_PREFETCH)] = {
675 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 578 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c
index e0cca10a8411..3b99d8269829 100644
--- a/arch/arm/kernel/perf_event_xscale.c
+++ b/arch/arm/kernel/perf_event_xscale.c
@@ -48,13 +48,15 @@ enum xscale_counters {
48}; 48};
49 49
50static const unsigned xscale_perf_map[PERF_COUNT_HW_MAX] = { 50static const unsigned xscale_perf_map[PERF_COUNT_HW_MAX] = {
51 [PERF_COUNT_HW_CPU_CYCLES] = XSCALE_PERFCTR_CCNT, 51 [PERF_COUNT_HW_CPU_CYCLES] = XSCALE_PERFCTR_CCNT,
52 [PERF_COUNT_HW_INSTRUCTIONS] = XSCALE_PERFCTR_INSTRUCTION, 52 [PERF_COUNT_HW_INSTRUCTIONS] = XSCALE_PERFCTR_INSTRUCTION,
53 [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED, 53 [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
54 [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED, 54 [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
55 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = XSCALE_PERFCTR_BRANCH, 55 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = XSCALE_PERFCTR_BRANCH,
56 [PERF_COUNT_HW_BRANCH_MISSES] = XSCALE_PERFCTR_BRANCH_MISS, 56 [PERF_COUNT_HW_BRANCH_MISSES] = XSCALE_PERFCTR_BRANCH_MISS,
57 [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED, 57 [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
58 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = XSCALE_PERFCTR_ICACHE_NO_DELIVER,
59 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED,
58}; 60};
59 61
60static const unsigned xscale_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] 62static const unsigned xscale_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 3d0c6fb74ae4..b29776aa6586 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -57,7 +57,7 @@ static const char *isa_modes[] = {
57 "ARM" , "Thumb" , "Jazelle", "ThumbEE" 57 "ARM" , "Thumb" , "Jazelle", "ThumbEE"
58}; 58};
59 59
60extern void setup_mm_for_reboot(char mode); 60extern void setup_mm_for_reboot(void);
61 61
62static volatile int hlt_counter; 62static volatile int hlt_counter;
63 63
@@ -92,18 +92,24 @@ static int __init hlt_setup(char *__unused)
92__setup("nohlt", nohlt_setup); 92__setup("nohlt", nohlt_setup);
93__setup("hlt", hlt_setup); 93__setup("hlt", hlt_setup);
94 94
95void arm_machine_restart(char mode, const char *cmd) 95extern void call_with_stack(void (*fn)(void *), void *arg, void *sp);
96typedef void (*phys_reset_t)(unsigned long);
97
98/*
99 * A temporary stack to use for CPU reset. This is static so that we
100 * don't clobber it with the identity mapping. When running with this
101 * stack, any references to the current task *will not work* so you
102 * should really do as little as possible before jumping to your reset
103 * code.
104 */
105static u64 soft_restart_stack[16];
106
107static void __soft_restart(void *addr)
96{ 108{
97 /* Disable interrupts first */ 109 phys_reset_t phys_reset;
98 local_irq_disable();
99 local_fiq_disable();
100 110
101 /* 111 /* Take out a flat memory mapping. */
102 * Tell the mm system that we are going to reboot - 112 setup_mm_for_reboot();
103 * we may need it to insert some 1:1 mappings so that
104 * soft boot works.
105 */
106 setup_mm_for_reboot(mode);
107 113
108 /* Clean and invalidate caches */ 114 /* Clean and invalidate caches */
109 flush_cache_all(); 115 flush_cache_all();
@@ -114,18 +120,35 @@ void arm_machine_restart(char mode, const char *cmd)
114 /* Push out any further dirty data, and ensure cache is empty */ 120 /* Push out any further dirty data, and ensure cache is empty */
115 flush_cache_all(); 121 flush_cache_all();
116 122
117 /* 123 /* Switch to the identity mapping. */
118 * Now call the architecture specific reboot code. 124 phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset);
119 */ 125 phys_reset((unsigned long)addr);
120 arch_reset(mode, cmd);
121 126
122 /* 127 /* Should never get here. */
123 * Whoops - the architecture was unable to reboot. 128 BUG();
124 * Tell the user! 129}
125 */ 130
126 mdelay(1000); 131void soft_restart(unsigned long addr)
127 printk("Reboot failed -- System halted\n"); 132{
128 while (1); 133 u64 *stack = soft_restart_stack + ARRAY_SIZE(soft_restart_stack);
134
135 /* Disable interrupts first */
136 local_irq_disable();
137 local_fiq_disable();
138
139 /* Disable the L2 if we're the last man standing. */
140 if (num_online_cpus() == 1)
141 outer_disable();
142
143 /* Change to the new stack and continue with the reset. */
144 call_with_stack(__soft_restart, (void *)addr, (void *)stack);
145
146 /* Should never get here. */
147 BUG();
148}
149
150static void null_restart(char mode, const char *cmd)
151{
129} 152}
130 153
131/* 154/*
@@ -134,7 +157,7 @@ void arm_machine_restart(char mode, const char *cmd)
134void (*pm_power_off)(void); 157void (*pm_power_off)(void);
135EXPORT_SYMBOL(pm_power_off); 158EXPORT_SYMBOL(pm_power_off);
136 159
137void (*arm_pm_restart)(char str, const char *cmd) = arm_machine_restart; 160void (*arm_pm_restart)(char str, const char *cmd) = null_restart;
138EXPORT_SYMBOL_GPL(arm_pm_restart); 161EXPORT_SYMBOL_GPL(arm_pm_restart);
139 162
140static void do_nothing(void *unused) 163static void do_nothing(void *unused)
@@ -253,7 +276,15 @@ void machine_power_off(void)
253void machine_restart(char *cmd) 276void machine_restart(char *cmd)
254{ 277{
255 machine_shutdown(); 278 machine_shutdown();
279
256 arm_pm_restart(reboot_mode, cmd); 280 arm_pm_restart(reboot_mode, cmd);
281
282 /* Give a grace period for failure to restart of 1s */
283 mdelay(1000);
284
285 /* Whoops - the platform was unable to reboot. Tell the user! */
286 printk("Reboot failed -- System halted\n");
287 while (1);
257} 288}
258 289
259void __show_regs(struct pt_regs *regs) 290void __show_regs(struct pt_regs *regs)
diff --git a/arch/arm/kernel/sched_clock.c b/arch/arm/kernel/sched_clock.c
index 9a46370fe9da..5416c7c12528 100644
--- a/arch/arm/kernel/sched_clock.c
+++ b/arch/arm/kernel/sched_clock.c
@@ -14,61 +14,153 @@
14 14
15#include <asm/sched_clock.h> 15#include <asm/sched_clock.h>
16 16
17struct clock_data {
18 u64 epoch_ns;
19 u32 epoch_cyc;
20 u32 epoch_cyc_copy;
21 u32 mult;
22 u32 shift;
23};
24
17static void sched_clock_poll(unsigned long wrap_ticks); 25static void sched_clock_poll(unsigned long wrap_ticks);
18static DEFINE_TIMER(sched_clock_timer, sched_clock_poll, 0, 0); 26static DEFINE_TIMER(sched_clock_timer, sched_clock_poll, 0, 0);
19static void (*sched_clock_update_fn)(void); 27
28static struct clock_data cd = {
29 .mult = NSEC_PER_SEC / HZ,
30};
31
32static u32 __read_mostly sched_clock_mask = 0xffffffff;
33
34static u32 notrace jiffy_sched_clock_read(void)
35{
36 return (u32)(jiffies - INITIAL_JIFFIES);
37}
38
39static u32 __read_mostly (*read_sched_clock)(void) = jiffy_sched_clock_read;
40
41static inline u64 cyc_to_ns(u64 cyc, u32 mult, u32 shift)
42{
43 return (cyc * mult) >> shift;
44}
45
46static unsigned long long cyc_to_sched_clock(u32 cyc, u32 mask)
47{
48 u64 epoch_ns;
49 u32 epoch_cyc;
50
51 /*
52 * Load the epoch_cyc and epoch_ns atomically. We do this by
53 * ensuring that we always write epoch_cyc, epoch_ns and
54 * epoch_cyc_copy in strict order, and read them in strict order.
55 * If epoch_cyc and epoch_cyc_copy are not equal, then we're in
56 * the middle of an update, and we should repeat the load.
57 */
58 do {
59 epoch_cyc = cd.epoch_cyc;
60 smp_rmb();
61 epoch_ns = cd.epoch_ns;
62 smp_rmb();
63 } while (epoch_cyc != cd.epoch_cyc_copy);
64
65 return epoch_ns + cyc_to_ns((cyc - epoch_cyc) & mask, cd.mult, cd.shift);
66}
67
68/*
69 * Atomically update the sched_clock epoch.
70 */
71static void notrace update_sched_clock(void)
72{
73 unsigned long flags;
74 u32 cyc;
75 u64 ns;
76
77 cyc = read_sched_clock();
78 ns = cd.epoch_ns +
79 cyc_to_ns((cyc - cd.epoch_cyc) & sched_clock_mask,
80 cd.mult, cd.shift);
81 /*
82 * Write epoch_cyc and epoch_ns in a way that the update is
83 * detectable in cyc_to_fixed_sched_clock().
84 */
85 raw_local_irq_save(flags);
86 cd.epoch_cyc = cyc;
87 smp_wmb();
88 cd.epoch_ns = ns;
89 smp_wmb();
90 cd.epoch_cyc_copy = cyc;
91 raw_local_irq_restore(flags);
92}
20 93
21static void sched_clock_poll(unsigned long wrap_ticks) 94static void sched_clock_poll(unsigned long wrap_ticks)
22{ 95{
23 mod_timer(&sched_clock_timer, round_jiffies(jiffies + wrap_ticks)); 96 mod_timer(&sched_clock_timer, round_jiffies(jiffies + wrap_ticks));
24 sched_clock_update_fn(); 97 update_sched_clock();
25} 98}
26 99
27void __init init_sched_clock(struct clock_data *cd, void (*update)(void), 100void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate)
28 unsigned int clock_bits, unsigned long rate)
29{ 101{
30 unsigned long r, w; 102 unsigned long r, w;
31 u64 res, wrap; 103 u64 res, wrap;
32 char r_unit; 104 char r_unit;
33 105
34 sched_clock_update_fn = update; 106 BUG_ON(bits > 32);
107 WARN_ON(!irqs_disabled());
108 WARN_ON(read_sched_clock != jiffy_sched_clock_read);
109 read_sched_clock = read;
110 sched_clock_mask = (1 << bits) - 1;
35 111
36 /* calculate the mult/shift to convert counter ticks to ns. */ 112 /* calculate the mult/shift to convert counter ticks to ns. */
37 clocks_calc_mult_shift(&cd->mult, &cd->shift, rate, NSEC_PER_SEC, 0); 113 clocks_calc_mult_shift(&cd.mult, &cd.shift, rate, NSEC_PER_SEC, 0);
38 114
39 r = rate; 115 r = rate;
40 if (r >= 4000000) { 116 if (r >= 4000000) {
41 r /= 1000000; 117 r /= 1000000;
42 r_unit = 'M'; 118 r_unit = 'M';
43 } else { 119 } else if (r >= 1000) {
44 r /= 1000; 120 r /= 1000;
45 r_unit = 'k'; 121 r_unit = 'k';
46 } 122 } else
123 r_unit = ' ';
47 124
48 /* calculate how many ns until we wrap */ 125 /* calculate how many ns until we wrap */
49 wrap = cyc_to_ns((1ULL << clock_bits) - 1, cd->mult, cd->shift); 126 wrap = cyc_to_ns((1ULL << bits) - 1, cd.mult, cd.shift);
50 do_div(wrap, NSEC_PER_MSEC); 127 do_div(wrap, NSEC_PER_MSEC);
51 w = wrap; 128 w = wrap;
52 129
53 /* calculate the ns resolution of this counter */ 130 /* calculate the ns resolution of this counter */
54 res = cyc_to_ns(1ULL, cd->mult, cd->shift); 131 res = cyc_to_ns(1ULL, cd.mult, cd.shift);
55 pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lums\n", 132 pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lums\n",
56 clock_bits, r, r_unit, res, w); 133 bits, r, r_unit, res, w);
57 134
58 /* 135 /*
59 * Start the timer to keep sched_clock() properly updated and 136 * Start the timer to keep sched_clock() properly updated and
60 * sets the initial epoch. 137 * sets the initial epoch.
61 */ 138 */
62 sched_clock_timer.data = msecs_to_jiffies(w - (w / 10)); 139 sched_clock_timer.data = msecs_to_jiffies(w - (w / 10));
63 update(); 140 update_sched_clock();
64 141
65 /* 142 /*
66 * Ensure that sched_clock() starts off at 0ns 143 * Ensure that sched_clock() starts off at 0ns
67 */ 144 */
68 cd->epoch_ns = 0; 145 cd.epoch_ns = 0;
146
147 pr_debug("Registered %pF as sched_clock source\n", read);
148}
149
150unsigned long long notrace sched_clock(void)
151{
152 u32 cyc = read_sched_clock();
153 return cyc_to_sched_clock(cyc, sched_clock_mask);
69} 154}
70 155
71void __init sched_clock_postinit(void) 156void __init sched_clock_postinit(void)
72{ 157{
158 /*
159 * If no sched_clock function has been provided at that point,
160 * make it the final one one.
161 */
162 if (read_sched_clock == jiffy_sched_clock_read)
163 setup_sched_clock(jiffy_sched_clock_read, 32, HZ);
164
73 sched_clock_poll(sched_clock_timer.data); 165 sched_clock_poll(sched_clock_timer.data);
74} 166}
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 8fc2c8fcbdc6..095d6611c84e 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -31,6 +31,7 @@
31#include <linux/memblock.h> 31#include <linux/memblock.h>
32#include <linux/bug.h> 32#include <linux/bug.h>
33#include <linux/compiler.h> 33#include <linux/compiler.h>
34#include <linux/sort.h>
34 35
35#include <asm/unified.h> 36#include <asm/unified.h>
36#include <asm/cpu.h> 37#include <asm/cpu.h>
@@ -890,6 +891,12 @@ static struct machine_desc * __init setup_machine_tags(unsigned int nr)
890 return mdesc; 891 return mdesc;
891} 892}
892 893
894static int __init meminfo_cmp(const void *_a, const void *_b)
895{
896 const struct membank *a = _a, *b = _b;
897 long cmp = bank_pfn_start(a) - bank_pfn_start(b);
898 return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
899}
893 900
894void __init setup_arch(char **cmdline_p) 901void __init setup_arch(char **cmdline_p)
895{ 902{
@@ -908,8 +915,8 @@ void __init setup_arch(char **cmdline_p)
908 arm_dma_zone_size = mdesc->dma_zone_size; 915 arm_dma_zone_size = mdesc->dma_zone_size;
909 } 916 }
910#endif 917#endif
911 if (mdesc->soft_reboot) 918 if (mdesc->restart_mode)
912 reboot_setup("s"); 919 reboot_setup(&mdesc->restart_mode);
913 920
914 init_mm.start_code = (unsigned long) _text; 921 init_mm.start_code = (unsigned long) _text;
915 init_mm.end_code = (unsigned long) _etext; 922 init_mm.end_code = (unsigned long) _etext;
@@ -922,12 +929,16 @@ void __init setup_arch(char **cmdline_p)
922 929
923 parse_early_param(); 930 parse_early_param();
924 931
932 sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
925 sanity_check_meminfo(); 933 sanity_check_meminfo();
926 arm_memblock_init(&meminfo, mdesc); 934 arm_memblock_init(&meminfo, mdesc);
927 935
928 paging_init(mdesc); 936 paging_init(mdesc);
929 request_standard_resources(mdesc); 937 request_standard_resources(mdesc);
930 938
939 if (mdesc->restart)
940 arm_pm_restart = mdesc->restart;
941
931 unflatten_device_tree(); 942 unflatten_device_tree();
932 943
933#ifdef CONFIG_SMP 944#ifdef CONFIG_SMP
diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S
index 020e99c845e7..1f268bda4552 100644
--- a/arch/arm/kernel/sleep.S
+++ b/arch/arm/kernel/sleep.S
@@ -54,14 +54,18 @@ ENDPROC(cpu_suspend_abort)
54 * r0 = control register value 54 * r0 = control register value
55 */ 55 */
56 .align 5 56 .align 5
57 .pushsection .idmap.text,"ax"
57ENTRY(cpu_resume_mmu) 58ENTRY(cpu_resume_mmu)
58 ldr r3, =cpu_resume_after_mmu 59 ldr r3, =cpu_resume_after_mmu
60 instr_sync
59 mcr p15, 0, r0, c1, c0, 0 @ turn on MMU, I-cache, etc 61 mcr p15, 0, r0, c1, c0, 0 @ turn on MMU, I-cache, etc
60 mrc p15, 0, r0, c0, c0, 0 @ read id reg 62 mrc p15, 0, r0, c0, c0, 0 @ read id reg
63 instr_sync
61 mov r0, r0 64 mov r0, r0
62 mov r0, r0 65 mov r0, r0
63 mov pc, r3 @ jump to virtual address 66 mov pc, r3 @ jump to virtual address
64ENDPROC(cpu_resume_mmu) 67ENDPROC(cpu_resume_mmu)
68 .popsection
65cpu_resume_after_mmu: 69cpu_resume_after_mmu:
66 bl cpu_init @ restore the und/abt/irq banked regs 70 bl cpu_init @ restore the und/abt/irq banked regs
67 mov r0, #0 @ return zero on success 71 mov r0, #0 @ return zero on success
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index ef5640b9e218..57db122a4f62 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -31,6 +31,7 @@
31#include <asm/cpu.h> 31#include <asm/cpu.h>
32#include <asm/cputype.h> 32#include <asm/cputype.h>
33#include <asm/exception.h> 33#include <asm/exception.h>
34#include <asm/idmap.h>
34#include <asm/topology.h> 35#include <asm/topology.h>
35#include <asm/mmu_context.h> 36#include <asm/mmu_context.h>
36#include <asm/pgtable.h> 37#include <asm/pgtable.h>
@@ -61,7 +62,6 @@ int __cpuinit __cpu_up(unsigned int cpu)
61{ 62{
62 struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu); 63 struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu);
63 struct task_struct *idle = ci->idle; 64 struct task_struct *idle = ci->idle;
64 pgd_t *pgd;
65 int ret; 65 int ret;
66 66
67 /* 67 /*
@@ -84,29 +84,11 @@ int __cpuinit __cpu_up(unsigned int cpu)
84 } 84 }
85 85
86 /* 86 /*
87 * Allocate initial page tables to allow the new CPU to
88 * enable the MMU safely. This essentially means a set
89 * of our "standard" page tables, with the addition of
90 * a 1:1 mapping for the physical address of the kernel.
91 */
92 pgd = pgd_alloc(&init_mm);
93 if (!pgd)
94 return -ENOMEM;
95
96 if (PHYS_OFFSET != PAGE_OFFSET) {
97#ifndef CONFIG_HOTPLUG_CPU
98 identity_mapping_add(pgd, __pa(__init_begin), __pa(__init_end));
99#endif
100 identity_mapping_add(pgd, __pa(_stext), __pa(_etext));
101 identity_mapping_add(pgd, __pa(_sdata), __pa(_edata));
102 }
103
104 /*
105 * We need to tell the secondary core where to find 87 * We need to tell the secondary core where to find
106 * its stack and the page tables. 88 * its stack and the page tables.
107 */ 89 */
108 secondary_data.stack = task_stack_page(idle) + THREAD_START_SP; 90 secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
109 secondary_data.pgdir = virt_to_phys(pgd); 91 secondary_data.pgdir = virt_to_phys(idmap_pgd);
110 secondary_data.swapper_pg_dir = virt_to_phys(swapper_pg_dir); 92 secondary_data.swapper_pg_dir = virt_to_phys(swapper_pg_dir);
111 __cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data)); 93 __cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data));
112 outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1)); 94 outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1));
@@ -142,16 +124,6 @@ int __cpuinit __cpu_up(unsigned int cpu)
142 secondary_data.stack = NULL; 124 secondary_data.stack = NULL;
143 secondary_data.pgdir = 0; 125 secondary_data.pgdir = 0;
144 126
145 if (PHYS_OFFSET != PAGE_OFFSET) {
146#ifndef CONFIG_HOTPLUG_CPU
147 identity_mapping_del(pgd, __pa(__init_begin), __pa(__init_end));
148#endif
149 identity_mapping_del(pgd, __pa(_stext), __pa(_etext));
150 identity_mapping_del(pgd, __pa(_sdata), __pa(_edata));
151 }
152
153 pgd_free(&init_mm, pgd);
154
155 return ret; 127 return ret;
156} 128}
157 129
@@ -550,6 +522,10 @@ static void ipi_cpu_stop(unsigned int cpu)
550 local_fiq_disable(); 522 local_fiq_disable();
551 local_irq_disable(); 523 local_irq_disable();
552 524
525#ifdef CONFIG_HOTPLUG_CPU
526 platform_cpu_kill(cpu);
527#endif
528
553 while (1) 529 while (1)
554 cpu_relax(); 530 cpu_relax();
555} 531}
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c
index a8a6682d6b52..c8e938553d47 100644
--- a/arch/arm/kernel/smp_twd.c
+++ b/arch/arm/kernel/smp_twd.c
@@ -10,8 +10,11 @@
10 */ 10 */
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/kernel.h> 12#include <linux/kernel.h>
13#include <linux/clk.h>
14#include <linux/cpufreq.h>
13#include <linux/delay.h> 15#include <linux/delay.h>
14#include <linux/device.h> 16#include <linux/device.h>
17#include <linux/err.h>
15#include <linux/smp.h> 18#include <linux/smp.h>
16#include <linux/jiffies.h> 19#include <linux/jiffies.h>
17#include <linux/clockchips.h> 20#include <linux/clockchips.h>
@@ -25,6 +28,7 @@
25/* set up by the platform code */ 28/* set up by the platform code */
26void __iomem *twd_base; 29void __iomem *twd_base;
27 30
31static struct clk *twd_clk;
28static unsigned long twd_timer_rate; 32static unsigned long twd_timer_rate;
29 33
30static struct clock_event_device __percpu **twd_evt; 34static struct clock_event_device __percpu **twd_evt;
@@ -89,6 +93,52 @@ void twd_timer_stop(struct clock_event_device *clk)
89 disable_percpu_irq(clk->irq); 93 disable_percpu_irq(clk->irq);
90} 94}
91 95
96#ifdef CONFIG_CPU_FREQ
97
98/*
99 * Updates clockevent frequency when the cpu frequency changes.
100 * Called on the cpu that is changing frequency with interrupts disabled.
101 */
102static void twd_update_frequency(void *data)
103{
104 twd_timer_rate = clk_get_rate(twd_clk);
105
106 clockevents_update_freq(*__this_cpu_ptr(twd_evt), twd_timer_rate);
107}
108
109static int twd_cpufreq_transition(struct notifier_block *nb,
110 unsigned long state, void *data)
111{
112 struct cpufreq_freqs *freqs = data;
113
114 /*
115 * The twd clock events must be reprogrammed to account for the new
116 * frequency. The timer is local to a cpu, so cross-call to the
117 * changing cpu.
118 */
119 if (state == CPUFREQ_POSTCHANGE || state == CPUFREQ_RESUMECHANGE)
120 smp_call_function_single(freqs->cpu, twd_update_frequency,
121 NULL, 1);
122
123 return NOTIFY_OK;
124}
125
126static struct notifier_block twd_cpufreq_nb = {
127 .notifier_call = twd_cpufreq_transition,
128};
129
130static int twd_cpufreq_init(void)
131{
132 if (!IS_ERR(twd_clk))
133 return cpufreq_register_notifier(&twd_cpufreq_nb,
134 CPUFREQ_TRANSITION_NOTIFIER);
135
136 return 0;
137}
138core_initcall(twd_cpufreq_init);
139
140#endif
141
92static void __cpuinit twd_calibrate_rate(void) 142static void __cpuinit twd_calibrate_rate(void)
93{ 143{
94 unsigned long count; 144 unsigned long count;
@@ -140,6 +190,35 @@ static irqreturn_t twd_handler(int irq, void *dev_id)
140 return IRQ_NONE; 190 return IRQ_NONE;
141} 191}
142 192
193static struct clk *twd_get_clock(void)
194{
195 struct clk *clk;
196 int err;
197
198 clk = clk_get_sys("smp_twd", NULL);
199 if (IS_ERR(clk)) {
200 pr_err("smp_twd: clock not found: %d\n", (int)PTR_ERR(clk));
201 return clk;
202 }
203
204 err = clk_prepare(clk);
205 if (err) {
206 pr_err("smp_twd: clock failed to prepare: %d\n", err);
207 clk_put(clk);
208 return ERR_PTR(err);
209 }
210
211 err = clk_enable(clk);
212 if (err) {
213 pr_err("smp_twd: clock failed to enable: %d\n", err);
214 clk_unprepare(clk);
215 clk_put(clk);
216 return ERR_PTR(err);
217 }
218
219 return clk;
220}
221
143/* 222/*
144 * Setup the local clock events for a CPU. 223 * Setup the local clock events for a CPU.
145 */ 224 */
@@ -165,7 +244,13 @@ void __cpuinit twd_timer_setup(struct clock_event_device *clk)
165 } 244 }
166 } 245 }
167 246
168 twd_calibrate_rate(); 247 if (!twd_clk)
248 twd_clk = twd_get_clock();
249
250 if (!IS_ERR_OR_NULL(twd_clk))
251 twd_timer_rate = clk_get_rate(twd_clk);
252 else
253 twd_calibrate_rate();
169 254
170 clk->name = "local_timer"; 255 clk->name = "local_timer";
171 clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT | 256 clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT |
@@ -173,15 +258,11 @@ void __cpuinit twd_timer_setup(struct clock_event_device *clk)
173 clk->rating = 350; 258 clk->rating = 350;
174 clk->set_mode = twd_set_mode; 259 clk->set_mode = twd_set_mode;
175 clk->set_next_event = twd_set_next_event; 260 clk->set_next_event = twd_set_next_event;
176 clk->shift = 20;
177 clk->mult = div_sc(twd_timer_rate, NSEC_PER_SEC, clk->shift);
178 clk->max_delta_ns = clockevent_delta2ns(0xffffffff, clk);
179 clk->min_delta_ns = clockevent_delta2ns(0xf, clk);
180 261
181 this_cpu_clk = __this_cpu_ptr(twd_evt); 262 this_cpu_clk = __this_cpu_ptr(twd_evt);
182 *this_cpu_clk = clk; 263 *this_cpu_clk = clk;
183 264
184 clockevents_register_device(clk); 265 clockevents_config_and_register(clk, twd_timer_rate,
185 266 0xf, 0xffffffff);
186 enable_percpu_irq(clk->irq, 0); 267 enable_percpu_irq(clk->irq, 0);
187} 268}
diff --git a/arch/arm/kernel/suspend.c b/arch/arm/kernel/suspend.c
index 93a22d282c16..1794cc3b0f18 100644
--- a/arch/arm/kernel/suspend.c
+++ b/arch/arm/kernel/suspend.c
@@ -1,13 +1,12 @@
1#include <linux/init.h> 1#include <linux/init.h>
2 2
3#include <asm/idmap.h>
3#include <asm/pgalloc.h> 4#include <asm/pgalloc.h>
4#include <asm/pgtable.h> 5#include <asm/pgtable.h>
5#include <asm/memory.h> 6#include <asm/memory.h>
6#include <asm/suspend.h> 7#include <asm/suspend.h>
7#include <asm/tlbflush.h> 8#include <asm/tlbflush.h>
8 9
9static pgd_t *suspend_pgd;
10
11extern int __cpu_suspend(unsigned long, int (*)(unsigned long)); 10extern int __cpu_suspend(unsigned long, int (*)(unsigned long));
12extern void cpu_resume_mmu(void); 11extern void cpu_resume_mmu(void);
13 12
@@ -21,7 +20,7 @@ void __cpu_suspend_save(u32 *ptr, u32 ptrsz, u32 sp, u32 *save_ptr)
21 *save_ptr = virt_to_phys(ptr); 20 *save_ptr = virt_to_phys(ptr);
22 21
23 /* This must correspond to the LDM in cpu_resume() assembly */ 22 /* This must correspond to the LDM in cpu_resume() assembly */
24 *ptr++ = virt_to_phys(suspend_pgd); 23 *ptr++ = virt_to_phys(idmap_pgd);
25 *ptr++ = sp; 24 *ptr++ = sp;
26 *ptr++ = virt_to_phys(cpu_do_resume); 25 *ptr++ = virt_to_phys(cpu_do_resume);
27 26
@@ -42,7 +41,7 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
42 struct mm_struct *mm = current->active_mm; 41 struct mm_struct *mm = current->active_mm;
43 int ret; 42 int ret;
44 43
45 if (!suspend_pgd) 44 if (!idmap_pgd)
46 return -EINVAL; 45 return -EINVAL;
47 46
48 /* 47 /*
@@ -59,14 +58,3 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
59 58
60 return ret; 59 return ret;
61} 60}
62
63static int __init cpu_suspend_init(void)
64{
65 suspend_pgd = pgd_alloc(&init_mm);
66 if (suspend_pgd) {
67 unsigned long addr = virt_to_phys(cpu_resume_mmu);
68 identity_mapping_add(suspend_pgd, addr, addr + SECTION_SIZE);
69 }
70 return suspend_pgd ? 0 : -ENOMEM;
71}
72core_initcall(cpu_suspend_init);
diff --git a/arch/arm/kernel/swp_emulate.c b/arch/arm/kernel/swp_emulate.c
index 5f452f8fde05..df745188f5de 100644
--- a/arch/arm/kernel/swp_emulate.c
+++ b/arch/arm/kernel/swp_emulate.c
@@ -25,6 +25,7 @@
25#include <linux/syscalls.h> 25#include <linux/syscalls.h>
26#include <linux/perf_event.h> 26#include <linux/perf_event.h>
27 27
28#include <asm/opcodes.h>
28#include <asm/traps.h> 29#include <asm/traps.h>
29#include <asm/uaccess.h> 30#include <asm/uaccess.h>
30 31
@@ -185,6 +186,21 @@ static int swp_handler(struct pt_regs *regs, unsigned int instr)
185 186
186 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->ARM_pc); 187 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->ARM_pc);
187 188
189 res = arm_check_condition(instr, regs->ARM_cpsr);
190 switch (res) {
191 case ARM_OPCODE_CONDTEST_PASS:
192 break;
193 case ARM_OPCODE_CONDTEST_FAIL:
194 /* Condition failed - return to next instruction */
195 regs->ARM_pc += 4;
196 return 0;
197 case ARM_OPCODE_CONDTEST_UNCOND:
198 /* If unconditional encoding - not a SWP, undef */
199 return -EFAULT;
200 default:
201 return -EINVAL;
202 }
203
188 if (current->pid != previous_pid) { 204 if (current->pid != previous_pid) {
189 pr_debug("\"%s\" (%ld) uses deprecated SWP{B} instruction\n", 205 pr_debug("\"%s\" (%ld) uses deprecated SWP{B} instruction\n",
190 current->comm, (unsigned long)current->pid); 206 current->comm, (unsigned long)current->pid);
diff --git a/arch/arm/kernel/tcm.c b/arch/arm/kernel/tcm.c
index 30e302d33e0a..01ec453bb924 100644
--- a/arch/arm/kernel/tcm.c
+++ b/arch/arm/kernel/tcm.c
@@ -180,9 +180,9 @@ static int __init setup_tcm_bank(u8 type, u8 bank, u8 banks,
180 */ 180 */
181void __init tcm_init(void) 181void __init tcm_init(void)
182{ 182{
183 u32 tcm_status = read_cpuid_tcmstatus(); 183 u32 tcm_status;
184 u8 dtcm_banks = (tcm_status >> 16) & 0x03; 184 u8 dtcm_banks;
185 u8 itcm_banks = (tcm_status & 0x03); 185 u8 itcm_banks;
186 size_t dtcm_code_sz = &__edtcm_data - &__sdtcm_data; 186 size_t dtcm_code_sz = &__edtcm_data - &__sdtcm_data;
187 size_t itcm_code_sz = &__eitcm_text - &__sitcm_text; 187 size_t itcm_code_sz = &__eitcm_text - &__sitcm_text;
188 char *start; 188 char *start;
@@ -191,6 +191,22 @@ void __init tcm_init(void)
191 int ret; 191 int ret;
192 int i; 192 int i;
193 193
194 /*
195 * Prior to ARMv5 there is no TCM, and trying to read the status
196 * register will hang the processor.
197 */
198 if (cpu_architecture() < CPU_ARCH_ARMv5) {
199 if (dtcm_code_sz || itcm_code_sz)
200 pr_info("CPU TCM: %u bytes of DTCM and %u bytes of "
201 "ITCM code compiled in, but no TCM present "
202 "in pre-v5 CPU\n", dtcm_code_sz, itcm_code_sz);
203 return;
204 }
205
206 tcm_status = read_cpuid_tcmstatus();
207 dtcm_banks = (tcm_status >> 16) & 0x03;
208 itcm_banks = (tcm_status & 0x03);
209
194 /* Values greater than 2 for D/ITCM banks are "reserved" */ 210 /* Values greater than 2 for D/ITCM banks are "reserved" */
195 if (dtcm_banks > 2) 211 if (dtcm_banks > 2)
196 dtcm_banks = 0; 212 dtcm_banks = 0;
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index 20b3041e0860..f76e75548670 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -13,6 +13,12 @@
13 *(.proc.info.init) \ 13 *(.proc.info.init) \
14 VMLINUX_SYMBOL(__proc_info_end) = .; 14 VMLINUX_SYMBOL(__proc_info_end) = .;
15 15
16#define IDMAP_TEXT \
17 ALIGN_FUNCTION(); \
18 VMLINUX_SYMBOL(__idmap_text_start) = .; \
19 *(.idmap.text) \
20 VMLINUX_SYMBOL(__idmap_text_end) = .;
21
16#ifdef CONFIG_HOTPLUG_CPU 22#ifdef CONFIG_HOTPLUG_CPU
17#define ARM_CPU_DISCARD(x) 23#define ARM_CPU_DISCARD(x)
18#define ARM_CPU_KEEP(x) x 24#define ARM_CPU_KEEP(x) x
@@ -92,6 +98,7 @@ SECTIONS
92 SCHED_TEXT 98 SCHED_TEXT
93 LOCK_TEXT 99 LOCK_TEXT
94 KPROBES_TEXT 100 KPROBES_TEXT
101 IDMAP_TEXT
95#ifdef CONFIG_MMU 102#ifdef CONFIG_MMU
96 *(.fixup) 103 *(.fixup)
97#endif 104#endif