diff options
author | Russell King <rmk+kernel@arm.linux.org.uk> | 2012-12-11 05:01:53 -0500 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2012-12-11 05:01:53 -0500 |
commit | 0fa5d3996dbda1ee9653c43d39b7ef159fb57ee7 (patch) | |
tree | 70f0adc3b86bb1511be6607c959506f6365fc2a9 /arch/arm/kernel | |
parent | 0b99cb73105f0527c1c4096960796b8772343a39 (diff) | |
parent | 14318efb322e2fe1a034c69463d725209eb9d548 (diff) |
Merge branch 'devel-stable' into for-linus
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r-- | arch/arm/kernel/devtree.c | 104 | ||||
-rw-r--r-- | arch/arm/kernel/hw_breakpoint.c | 154 | ||||
-rw-r--r-- | arch/arm/kernel/perf_event.c | 85 | ||||
-rw-r--r-- | arch/arm/kernel/perf_event_cpu.c | 74 | ||||
-rw-r--r-- | arch/arm/kernel/perf_event_v6.c | 126 | ||||
-rw-r--r-- | arch/arm/kernel/perf_event_v7.c | 246 | ||||
-rw-r--r-- | arch/arm/kernel/perf_event_xscale.c | 157 | ||||
-rw-r--r-- | arch/arm/kernel/setup.c | 84 | ||||
-rw-r--r-- | arch/arm/kernel/smp.c | 5 | ||||
-rw-r--r-- | arch/arm/kernel/topology.c | 42 |
10 files changed, 613 insertions, 464 deletions
diff --git a/arch/arm/kernel/devtree.c b/arch/arm/kernel/devtree.c index bee7f9d47f02..70f1bdeb241b 100644 --- a/arch/arm/kernel/devtree.c +++ b/arch/arm/kernel/devtree.c | |||
@@ -19,8 +19,10 @@ | |||
19 | #include <linux/of_irq.h> | 19 | #include <linux/of_irq.h> |
20 | #include <linux/of_platform.h> | 20 | #include <linux/of_platform.h> |
21 | 21 | ||
22 | #include <asm/cputype.h> | ||
22 | #include <asm/setup.h> | 23 | #include <asm/setup.h> |
23 | #include <asm/page.h> | 24 | #include <asm/page.h> |
25 | #include <asm/smp_plat.h> | ||
24 | #include <asm/mach/arch.h> | 26 | #include <asm/mach/arch.h> |
25 | #include <asm/mach-types.h> | 27 | #include <asm/mach-types.h> |
26 | 28 | ||
@@ -61,6 +63,108 @@ void __init arm_dt_memblock_reserve(void) | |||
61 | } | 63 | } |
62 | } | 64 | } |
63 | 65 | ||
66 | /* | ||
67 | * arm_dt_init_cpu_maps - Function retrieves cpu nodes from the device tree | ||
68 | * and builds the cpu logical map array containing MPIDR values related to | ||
69 | * logical cpus | ||
70 | * | ||
71 | * Updates the cpu possible mask with the number of parsed cpu nodes | ||
72 | */ | ||
73 | void __init arm_dt_init_cpu_maps(void) | ||
74 | { | ||
75 | /* | ||
76 | * Temp logical map is initialized with UINT_MAX values that are | ||
77 | * considered invalid logical map entries since the logical map must | ||
78 | * contain a list of MPIDR[23:0] values where MPIDR[31:24] must | ||
79 | * read as 0. | ||
80 | */ | ||
81 | struct device_node *cpu, *cpus; | ||
82 | u32 i, j, cpuidx = 1; | ||
83 | u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0; | ||
84 | |||
85 | u32 tmp_map[NR_CPUS] = { [0 ... NR_CPUS-1] = UINT_MAX }; | ||
86 | bool bootcpu_valid = false; | ||
87 | cpus = of_find_node_by_path("/cpus"); | ||
88 | |||
89 | if (!cpus) | ||
90 | return; | ||
91 | |||
92 | for_each_child_of_node(cpus, cpu) { | ||
93 | u32 hwid; | ||
94 | |||
95 | pr_debug(" * %s...\n", cpu->full_name); | ||
96 | /* | ||
97 | * A device tree containing CPU nodes with missing "reg" | ||
98 | * properties is considered invalid to build the | ||
99 | * cpu_logical_map. | ||
100 | */ | ||
101 | if (of_property_read_u32(cpu, "reg", &hwid)) { | ||
102 | pr_debug(" * %s missing reg property\n", | ||
103 | cpu->full_name); | ||
104 | return; | ||
105 | } | ||
106 | |||
107 | /* | ||
108 | * 8 MSBs must be set to 0 in the DT since the reg property | ||
109 | * defines the MPIDR[23:0]. | ||
110 | */ | ||
111 | if (hwid & ~MPIDR_HWID_BITMASK) | ||
112 | return; | ||
113 | |||
114 | /* | ||
115 | * Duplicate MPIDRs are a recipe for disaster. | ||
116 | * Scan all initialized entries and check for | ||
117 | * duplicates. If any is found just bail out. | ||
118 | * temp values were initialized to UINT_MAX | ||
119 | * to avoid matching valid MPIDR[23:0] values. | ||
120 | */ | ||
121 | for (j = 0; j < cpuidx; j++) | ||
122 | if (WARN(tmp_map[j] == hwid, "Duplicate /cpu reg " | ||
123 | "properties in the DT\n")) | ||
124 | return; | ||
125 | |||
126 | /* | ||
127 | * Build a stashed array of MPIDR values. Numbering scheme | ||
128 | * requires that if detected the boot CPU must be assigned | ||
129 | * logical id 0. Other CPUs get sequential indexes starting | ||
130 | * from 1. If a CPU node with a reg property matching the | ||
131 | * boot CPU MPIDR is detected, this is recorded so that the | ||
132 | * logical map built from DT is validated and can be used | ||
133 | * to override the map created in smp_setup_processor_id(). | ||
134 | */ | ||
135 | if (hwid == mpidr) { | ||
136 | i = 0; | ||
137 | bootcpu_valid = true; | ||
138 | } else { | ||
139 | i = cpuidx++; | ||
140 | } | ||
141 | |||
142 | if (WARN(cpuidx > nr_cpu_ids, "DT /cpu %u nodes greater than " | ||
143 | "max cores %u, capping them\n", | ||
144 | cpuidx, nr_cpu_ids)) { | ||
145 | cpuidx = nr_cpu_ids; | ||
146 | break; | ||
147 | } | ||
148 | |||
149 | tmp_map[i] = hwid; | ||
150 | } | ||
151 | |||
152 | if (WARN(!bootcpu_valid, "DT missing boot CPU MPIDR[23:0], " | ||
153 | "fall back to default cpu_logical_map\n")) | ||
154 | return; | ||
155 | |||
156 | /* | ||
157 | * Since the boot CPU node contains proper data, and all nodes have | ||
158 | * a reg property, the DT CPU list can be considered valid and the | ||
159 | * logical map created in smp_setup_processor_id() can be overridden | ||
160 | */ | ||
161 | for (i = 0; i < cpuidx; i++) { | ||
162 | set_cpu_possible(i, true); | ||
163 | cpu_logical_map(i) = tmp_map[i]; | ||
164 | pr_debug("cpu logical map 0x%x\n", cpu_logical_map(i)); | ||
165 | } | ||
166 | } | ||
167 | |||
64 | /** | 168 | /** |
65 | * setup_machine_fdt - Machine setup when an dtb was passed to the kernel | 169 | * setup_machine_fdt - Machine setup when an dtb was passed to the kernel |
66 | * @dt_phys: physical address of dt blob | 170 | * @dt_phys: physical address of dt blob |
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index 281bf3301241..5ff2e77782b1 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c | |||
@@ -52,14 +52,14 @@ static u8 debug_arch; | |||
52 | /* Maximum supported watchpoint length. */ | 52 | /* Maximum supported watchpoint length. */ |
53 | static u8 max_watchpoint_len; | 53 | static u8 max_watchpoint_len; |
54 | 54 | ||
55 | #define READ_WB_REG_CASE(OP2, M, VAL) \ | 55 | #define READ_WB_REG_CASE(OP2, M, VAL) \ |
56 | case ((OP2 << 4) + M): \ | 56 | case ((OP2 << 4) + M): \ |
57 | ARM_DBG_READ(c ## M, OP2, VAL); \ | 57 | ARM_DBG_READ(c0, c ## M, OP2, VAL); \ |
58 | break | 58 | break |
59 | 59 | ||
60 | #define WRITE_WB_REG_CASE(OP2, M, VAL) \ | 60 | #define WRITE_WB_REG_CASE(OP2, M, VAL) \ |
61 | case ((OP2 << 4) + M): \ | 61 | case ((OP2 << 4) + M): \ |
62 | ARM_DBG_WRITE(c ## M, OP2, VAL);\ | 62 | ARM_DBG_WRITE(c0, c ## M, OP2, VAL); \ |
63 | break | 63 | break |
64 | 64 | ||
65 | #define GEN_READ_WB_REG_CASES(OP2, VAL) \ | 65 | #define GEN_READ_WB_REG_CASES(OP2, VAL) \ |
@@ -136,12 +136,12 @@ static u8 get_debug_arch(void) | |||
136 | 136 | ||
137 | /* Do we implement the extended CPUID interface? */ | 137 | /* Do we implement the extended CPUID interface? */ |
138 | if (((read_cpuid_id() >> 16) & 0xf) != 0xf) { | 138 | if (((read_cpuid_id() >> 16) & 0xf) != 0xf) { |
139 | pr_warning("CPUID feature registers not supported. " | 139 | pr_warn_once("CPUID feature registers not supported. " |
140 | "Assuming v6 debug is present.\n"); | 140 | "Assuming v6 debug is present.\n"); |
141 | return ARM_DEBUG_ARCH_V6; | 141 | return ARM_DEBUG_ARCH_V6; |
142 | } | 142 | } |
143 | 143 | ||
144 | ARM_DBG_READ(c0, 0, didr); | 144 | ARM_DBG_READ(c0, c0, 0, didr); |
145 | return (didr >> 16) & 0xf; | 145 | return (didr >> 16) & 0xf; |
146 | } | 146 | } |
147 | 147 | ||
@@ -169,7 +169,7 @@ static int debug_exception_updates_fsr(void) | |||
169 | static int get_num_wrp_resources(void) | 169 | static int get_num_wrp_resources(void) |
170 | { | 170 | { |
171 | u32 didr; | 171 | u32 didr; |
172 | ARM_DBG_READ(c0, 0, didr); | 172 | ARM_DBG_READ(c0, c0, 0, didr); |
173 | return ((didr >> 28) & 0xf) + 1; | 173 | return ((didr >> 28) & 0xf) + 1; |
174 | } | 174 | } |
175 | 175 | ||
@@ -177,7 +177,7 @@ static int get_num_wrp_resources(void) | |||
177 | static int get_num_brp_resources(void) | 177 | static int get_num_brp_resources(void) |
178 | { | 178 | { |
179 | u32 didr; | 179 | u32 didr; |
180 | ARM_DBG_READ(c0, 0, didr); | 180 | ARM_DBG_READ(c0, c0, 0, didr); |
181 | return ((didr >> 24) & 0xf) + 1; | 181 | return ((didr >> 24) & 0xf) + 1; |
182 | } | 182 | } |
183 | 183 | ||
@@ -228,19 +228,17 @@ static int get_num_brps(void) | |||
228 | * be put into halting debug mode at any time by an external debugger | 228 | * be put into halting debug mode at any time by an external debugger |
229 | * but there is nothing we can do to prevent that. | 229 | * but there is nothing we can do to prevent that. |
230 | */ | 230 | */ |
231 | static int enable_monitor_mode(void) | 231 | static int monitor_mode_enabled(void) |
232 | { | 232 | { |
233 | u32 dscr; | 233 | u32 dscr; |
234 | int ret = 0; | 234 | ARM_DBG_READ(c0, c1, 0, dscr); |
235 | 235 | return !!(dscr & ARM_DSCR_MDBGEN); | |
236 | ARM_DBG_READ(c1, 0, dscr); | 236 | } |
237 | 237 | ||
238 | /* Ensure that halting mode is disabled. */ | 238 | static int enable_monitor_mode(void) |
239 | if (WARN_ONCE(dscr & ARM_DSCR_HDBGEN, | 239 | { |
240 | "halting debug mode enabled. Unable to access hardware resources.\n")) { | 240 | u32 dscr; |
241 | ret = -EPERM; | 241 | ARM_DBG_READ(c0, c1, 0, dscr); |
242 | goto out; | ||
243 | } | ||
244 | 242 | ||
245 | /* If monitor mode is already enabled, just return. */ | 243 | /* If monitor mode is already enabled, just return. */ |
246 | if (dscr & ARM_DSCR_MDBGEN) | 244 | if (dscr & ARM_DSCR_MDBGEN) |
@@ -250,24 +248,27 @@ static int enable_monitor_mode(void) | |||
250 | switch (get_debug_arch()) { | 248 | switch (get_debug_arch()) { |
251 | case ARM_DEBUG_ARCH_V6: | 249 | case ARM_DEBUG_ARCH_V6: |
252 | case ARM_DEBUG_ARCH_V6_1: | 250 | case ARM_DEBUG_ARCH_V6_1: |
253 | ARM_DBG_WRITE(c1, 0, (dscr | ARM_DSCR_MDBGEN)); | 251 | ARM_DBG_WRITE(c0, c1, 0, (dscr | ARM_DSCR_MDBGEN)); |
254 | break; | 252 | break; |
255 | case ARM_DEBUG_ARCH_V7_ECP14: | 253 | case ARM_DEBUG_ARCH_V7_ECP14: |
256 | case ARM_DEBUG_ARCH_V7_1: | 254 | case ARM_DEBUG_ARCH_V7_1: |
257 | ARM_DBG_WRITE(c2, 2, (dscr | ARM_DSCR_MDBGEN)); | 255 | ARM_DBG_WRITE(c0, c2, 2, (dscr | ARM_DSCR_MDBGEN)); |
256 | isb(); | ||
258 | break; | 257 | break; |
259 | default: | 258 | default: |
260 | ret = -ENODEV; | 259 | return -ENODEV; |
261 | goto out; | ||
262 | } | 260 | } |
263 | 261 | ||
264 | /* Check that the write made it through. */ | 262 | /* Check that the write made it through. */ |
265 | ARM_DBG_READ(c1, 0, dscr); | 263 | ARM_DBG_READ(c0, c1, 0, dscr); |
266 | if (!(dscr & ARM_DSCR_MDBGEN)) | 264 | if (!(dscr & ARM_DSCR_MDBGEN)) { |
267 | ret = -EPERM; | 265 | pr_warn_once("Failed to enable monitor mode on CPU %d.\n", |
266 | smp_processor_id()); | ||
267 | return -EPERM; | ||
268 | } | ||
268 | 269 | ||
269 | out: | 270 | out: |
270 | return ret; | 271 | return 0; |
271 | } | 272 | } |
272 | 273 | ||
273 | int hw_breakpoint_slots(int type) | 274 | int hw_breakpoint_slots(int type) |
@@ -328,14 +329,9 @@ int arch_install_hw_breakpoint(struct perf_event *bp) | |||
328 | { | 329 | { |
329 | struct arch_hw_breakpoint *info = counter_arch_bp(bp); | 330 | struct arch_hw_breakpoint *info = counter_arch_bp(bp); |
330 | struct perf_event **slot, **slots; | 331 | struct perf_event **slot, **slots; |
331 | int i, max_slots, ctrl_base, val_base, ret = 0; | 332 | int i, max_slots, ctrl_base, val_base; |
332 | u32 addr, ctrl; | 333 | u32 addr, ctrl; |
333 | 334 | ||
334 | /* Ensure that we are in monitor mode and halting mode is disabled. */ | ||
335 | ret = enable_monitor_mode(); | ||
336 | if (ret) | ||
337 | goto out; | ||
338 | |||
339 | addr = info->address; | 335 | addr = info->address; |
340 | ctrl = encode_ctrl_reg(info->ctrl) | 0x1; | 336 | ctrl = encode_ctrl_reg(info->ctrl) | 0x1; |
341 | 337 | ||
@@ -362,9 +358,9 @@ int arch_install_hw_breakpoint(struct perf_event *bp) | |||
362 | } | 358 | } |
363 | } | 359 | } |
364 | 360 | ||
365 | if (WARN_ONCE(i == max_slots, "Can't find any breakpoint slot\n")) { | 361 | if (i == max_slots) { |
366 | ret = -EBUSY; | 362 | pr_warning("Can't find any breakpoint slot\n"); |
367 | goto out; | 363 | return -EBUSY; |
368 | } | 364 | } |
369 | 365 | ||
370 | /* Override the breakpoint data with the step data. */ | 366 | /* Override the breakpoint data with the step data. */ |
@@ -383,9 +379,7 @@ int arch_install_hw_breakpoint(struct perf_event *bp) | |||
383 | 379 | ||
384 | /* Setup the control register. */ | 380 | /* Setup the control register. */ |
385 | write_wb_reg(ctrl_base + i, ctrl); | 381 | write_wb_reg(ctrl_base + i, ctrl); |
386 | 382 | return 0; | |
387 | out: | ||
388 | return ret; | ||
389 | } | 383 | } |
390 | 384 | ||
391 | void arch_uninstall_hw_breakpoint(struct perf_event *bp) | 385 | void arch_uninstall_hw_breakpoint(struct perf_event *bp) |
@@ -416,8 +410,10 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp) | |||
416 | } | 410 | } |
417 | } | 411 | } |
418 | 412 | ||
419 | if (WARN_ONCE(i == max_slots, "Can't find any breakpoint slot\n")) | 413 | if (i == max_slots) { |
414 | pr_warning("Can't find any breakpoint slot\n"); | ||
420 | return; | 415 | return; |
416 | } | ||
421 | 417 | ||
422 | /* Ensure that we disable the mismatch breakpoint. */ | 418 | /* Ensure that we disable the mismatch breakpoint. */ |
423 | if (info->ctrl.type != ARM_BREAKPOINT_EXECUTE && | 419 | if (info->ctrl.type != ARM_BREAKPOINT_EXECUTE && |
@@ -596,6 +592,10 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp) | |||
596 | int ret = 0; | 592 | int ret = 0; |
597 | u32 offset, alignment_mask = 0x3; | 593 | u32 offset, alignment_mask = 0x3; |
598 | 594 | ||
595 | /* Ensure that we are in monitor debug mode. */ | ||
596 | if (!monitor_mode_enabled()) | ||
597 | return -ENODEV; | ||
598 | |||
599 | /* Build the arch_hw_breakpoint. */ | 599 | /* Build the arch_hw_breakpoint. */ |
600 | ret = arch_build_bp_info(bp); | 600 | ret = arch_build_bp_info(bp); |
601 | if (ret) | 601 | if (ret) |
@@ -858,7 +858,7 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr, | |||
858 | local_irq_enable(); | 858 | local_irq_enable(); |
859 | 859 | ||
860 | /* We only handle watchpoints and hardware breakpoints. */ | 860 | /* We only handle watchpoints and hardware breakpoints. */ |
861 | ARM_DBG_READ(c1, 0, dscr); | 861 | ARM_DBG_READ(c0, c1, 0, dscr); |
862 | 862 | ||
863 | /* Perform perf callbacks. */ | 863 | /* Perform perf callbacks. */ |
864 | switch (ARM_DSCR_MOE(dscr)) { | 864 | switch (ARM_DSCR_MOE(dscr)) { |
@@ -906,7 +906,7 @@ static struct undef_hook debug_reg_hook = { | |||
906 | static void reset_ctrl_regs(void *unused) | 906 | static void reset_ctrl_regs(void *unused) |
907 | { | 907 | { |
908 | int i, raw_num_brps, err = 0, cpu = smp_processor_id(); | 908 | int i, raw_num_brps, err = 0, cpu = smp_processor_id(); |
909 | u32 dbg_power; | 909 | u32 val; |
910 | 910 | ||
911 | /* | 911 | /* |
912 | * v7 debug contains save and restore registers so that debug state | 912 | * v7 debug contains save and restore registers so that debug state |
@@ -919,23 +919,30 @@ static void reset_ctrl_regs(void *unused) | |||
919 | switch (debug_arch) { | 919 | switch (debug_arch) { |
920 | case ARM_DEBUG_ARCH_V6: | 920 | case ARM_DEBUG_ARCH_V6: |
921 | case ARM_DEBUG_ARCH_V6_1: | 921 | case ARM_DEBUG_ARCH_V6_1: |
922 | /* ARMv6 cores just need to reset the registers. */ | 922 | /* ARMv6 cores clear the registers out of reset. */ |
923 | goto reset_regs; | 923 | goto out_mdbgen; |
924 | case ARM_DEBUG_ARCH_V7_ECP14: | 924 | case ARM_DEBUG_ARCH_V7_ECP14: |
925 | /* | 925 | /* |
926 | * Ensure sticky power-down is clear (i.e. debug logic is | 926 | * Ensure sticky power-down is clear (i.e. debug logic is |
927 | * powered up). | 927 | * powered up). |
928 | */ | 928 | */ |
929 | asm volatile("mrc p14, 0, %0, c1, c5, 4" : "=r" (dbg_power)); | 929 | ARM_DBG_READ(c1, c5, 4, val); |
930 | if ((dbg_power & 0x1) == 0) | 930 | if ((val & 0x1) == 0) |
931 | err = -EPERM; | 931 | err = -EPERM; |
932 | |||
933 | /* | ||
934 | * Check whether we implement OS save and restore. | ||
935 | */ | ||
936 | ARM_DBG_READ(c1, c1, 4, val); | ||
937 | if ((val & 0x9) == 0) | ||
938 | goto clear_vcr; | ||
932 | break; | 939 | break; |
933 | case ARM_DEBUG_ARCH_V7_1: | 940 | case ARM_DEBUG_ARCH_V7_1: |
934 | /* | 941 | /* |
935 | * Ensure the OS double lock is clear. | 942 | * Ensure the OS double lock is clear. |
936 | */ | 943 | */ |
937 | asm volatile("mrc p14, 0, %0, c1, c3, 4" : "=r" (dbg_power)); | 944 | ARM_DBG_READ(c1, c3, 4, val); |
938 | if ((dbg_power & 0x1) == 1) | 945 | if ((val & 0x1) == 1) |
939 | err = -EPERM; | 946 | err = -EPERM; |
940 | break; | 947 | break; |
941 | } | 948 | } |
@@ -947,24 +954,29 @@ static void reset_ctrl_regs(void *unused) | |||
947 | } | 954 | } |
948 | 955 | ||
949 | /* | 956 | /* |
950 | * Unconditionally clear the lock by writing a value | 957 | * Unconditionally clear the OS lock by writing a value |
951 | * other than 0xC5ACCE55 to the access register. | 958 | * other than 0xC5ACCE55 to the access register. |
952 | */ | 959 | */ |
953 | asm volatile("mcr p14, 0, %0, c1, c0, 4" : : "r" (0)); | 960 | ARM_DBG_WRITE(c1, c0, 4, 0); |
954 | isb(); | 961 | isb(); |
955 | 962 | ||
956 | /* | 963 | /* |
957 | * Clear any configured vector-catch events before | 964 | * Clear any configured vector-catch events before |
958 | * enabling monitor mode. | 965 | * enabling monitor mode. |
959 | */ | 966 | */ |
960 | asm volatile("mcr p14, 0, %0, c0, c7, 0" : : "r" (0)); | 967 | clear_vcr: |
968 | ARM_DBG_WRITE(c0, c7, 0, 0); | ||
961 | isb(); | 969 | isb(); |
962 | 970 | ||
963 | reset_regs: | 971 | if (cpumask_intersects(&debug_err_mask, cpumask_of(cpu))) { |
964 | if (enable_monitor_mode()) | 972 | pr_warning("CPU %d failed to disable vector catch\n", cpu); |
965 | return; | 973 | return; |
974 | } | ||
966 | 975 | ||
967 | /* We must also reset any reserved registers. */ | 976 | /* |
977 | * The control/value register pairs are UNKNOWN out of reset so | ||
978 | * clear them to avoid spurious debug events. | ||
979 | */ | ||
968 | raw_num_brps = get_num_brp_resources(); | 980 | raw_num_brps = get_num_brp_resources(); |
969 | for (i = 0; i < raw_num_brps; ++i) { | 981 | for (i = 0; i < raw_num_brps; ++i) { |
970 | write_wb_reg(ARM_BASE_BCR + i, 0UL); | 982 | write_wb_reg(ARM_BASE_BCR + i, 0UL); |
@@ -975,6 +987,19 @@ reset_regs: | |||
975 | write_wb_reg(ARM_BASE_WCR + i, 0UL); | 987 | write_wb_reg(ARM_BASE_WCR + i, 0UL); |
976 | write_wb_reg(ARM_BASE_WVR + i, 0UL); | 988 | write_wb_reg(ARM_BASE_WVR + i, 0UL); |
977 | } | 989 | } |
990 | |||
991 | if (cpumask_intersects(&debug_err_mask, cpumask_of(cpu))) { | ||
992 | pr_warning("CPU %d failed to clear debug register pairs\n", cpu); | ||
993 | return; | ||
994 | } | ||
995 | |||
996 | /* | ||
997 | * Have a crack at enabling monitor mode. We don't actually need | ||
998 | * it yet, but reporting an error early is useful if it fails. | ||
999 | */ | ||
1000 | out_mdbgen: | ||
1001 | if (enable_monitor_mode()) | ||
1002 | cpumask_or(&debug_err_mask, &debug_err_mask, cpumask_of(cpu)); | ||
978 | } | 1003 | } |
979 | 1004 | ||
980 | static int __cpuinit dbg_reset_notify(struct notifier_block *self, | 1005 | static int __cpuinit dbg_reset_notify(struct notifier_block *self, |
@@ -992,8 +1017,6 @@ static struct notifier_block __cpuinitdata dbg_reset_nb = { | |||
992 | 1017 | ||
993 | static int __init arch_hw_breakpoint_init(void) | 1018 | static int __init arch_hw_breakpoint_init(void) |
994 | { | 1019 | { |
995 | u32 dscr; | ||
996 | |||
997 | debug_arch = get_debug_arch(); | 1020 | debug_arch = get_debug_arch(); |
998 | 1021 | ||
999 | if (!debug_arch_supported()) { | 1022 | if (!debug_arch_supported()) { |
@@ -1028,17 +1051,10 @@ static int __init arch_hw_breakpoint_init(void) | |||
1028 | core_num_brps, core_has_mismatch_brps() ? "(+1 reserved) " : | 1051 | core_num_brps, core_has_mismatch_brps() ? "(+1 reserved) " : |
1029 | "", core_num_wrps); | 1052 | "", core_num_wrps); |
1030 | 1053 | ||
1031 | ARM_DBG_READ(c1, 0, dscr); | 1054 | /* Work out the maximum supported watchpoint length. */ |
1032 | if (dscr & ARM_DSCR_HDBGEN) { | 1055 | max_watchpoint_len = get_max_wp_len(); |
1033 | max_watchpoint_len = 4; | 1056 | pr_info("maximum watchpoint size is %u bytes.\n", |
1034 | pr_warning("halting debug mode enabled. Assuming maximum watchpoint size of %u bytes.\n", | 1057 | max_watchpoint_len); |
1035 | max_watchpoint_len); | ||
1036 | } else { | ||
1037 | /* Work out the maximum supported watchpoint length. */ | ||
1038 | max_watchpoint_len = get_max_wp_len(); | ||
1039 | pr_info("maximum watchpoint size is %u bytes.\n", | ||
1040 | max_watchpoint_len); | ||
1041 | } | ||
1042 | 1058 | ||
1043 | /* Register debug fault handler. */ | 1059 | /* Register debug fault handler. */ |
1044 | hook_fault_code(FAULT_CODE_DEBUG, hw_breakpoint_pending, SIGTRAP, | 1060 | hook_fault_code(FAULT_CODE_DEBUG, hw_breakpoint_pending, SIGTRAP, |
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index 53c0304b734a..f9e8657dd241 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c | |||
@@ -86,12 +86,10 @@ armpmu_map_event(struct perf_event *event, | |||
86 | return -ENOENT; | 86 | return -ENOENT; |
87 | } | 87 | } |
88 | 88 | ||
89 | int | 89 | int armpmu_event_set_period(struct perf_event *event) |
90 | armpmu_event_set_period(struct perf_event *event, | ||
91 | struct hw_perf_event *hwc, | ||
92 | int idx) | ||
93 | { | 90 | { |
94 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); | 91 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
92 | struct hw_perf_event *hwc = &event->hw; | ||
95 | s64 left = local64_read(&hwc->period_left); | 93 | s64 left = local64_read(&hwc->period_left); |
96 | s64 period = hwc->sample_period; | 94 | s64 period = hwc->sample_period; |
97 | int ret = 0; | 95 | int ret = 0; |
@@ -119,24 +117,22 @@ armpmu_event_set_period(struct perf_event *event, | |||
119 | 117 | ||
120 | local64_set(&hwc->prev_count, (u64)-left); | 118 | local64_set(&hwc->prev_count, (u64)-left); |
121 | 119 | ||
122 | armpmu->write_counter(idx, (u64)(-left) & 0xffffffff); | 120 | armpmu->write_counter(event, (u64)(-left) & 0xffffffff); |
123 | 121 | ||
124 | perf_event_update_userpage(event); | 122 | perf_event_update_userpage(event); |
125 | 123 | ||
126 | return ret; | 124 | return ret; |
127 | } | 125 | } |
128 | 126 | ||
129 | u64 | 127 | u64 armpmu_event_update(struct perf_event *event) |
130 | armpmu_event_update(struct perf_event *event, | ||
131 | struct hw_perf_event *hwc, | ||
132 | int idx) | ||
133 | { | 128 | { |
134 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); | 129 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
130 | struct hw_perf_event *hwc = &event->hw; | ||
135 | u64 delta, prev_raw_count, new_raw_count; | 131 | u64 delta, prev_raw_count, new_raw_count; |
136 | 132 | ||
137 | again: | 133 | again: |
138 | prev_raw_count = local64_read(&hwc->prev_count); | 134 | prev_raw_count = local64_read(&hwc->prev_count); |
139 | new_raw_count = armpmu->read_counter(idx); | 135 | new_raw_count = armpmu->read_counter(event); |
140 | 136 | ||
141 | if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, | 137 | if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, |
142 | new_raw_count) != prev_raw_count) | 138 | new_raw_count) != prev_raw_count) |
@@ -159,7 +155,7 @@ armpmu_read(struct perf_event *event) | |||
159 | if (hwc->idx < 0) | 155 | if (hwc->idx < 0) |
160 | return; | 156 | return; |
161 | 157 | ||
162 | armpmu_event_update(event, hwc, hwc->idx); | 158 | armpmu_event_update(event); |
163 | } | 159 | } |
164 | 160 | ||
165 | static void | 161 | static void |
@@ -173,14 +169,13 @@ armpmu_stop(struct perf_event *event, int flags) | |||
173 | * PERF_EF_UPDATE, see comments in armpmu_start(). | 169 | * PERF_EF_UPDATE, see comments in armpmu_start(). |
174 | */ | 170 | */ |
175 | if (!(hwc->state & PERF_HES_STOPPED)) { | 171 | if (!(hwc->state & PERF_HES_STOPPED)) { |
176 | armpmu->disable(hwc, hwc->idx); | 172 | armpmu->disable(event); |
177 | armpmu_event_update(event, hwc, hwc->idx); | 173 | armpmu_event_update(event); |
178 | hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; | 174 | hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; |
179 | } | 175 | } |
180 | } | 176 | } |
181 | 177 | ||
182 | static void | 178 | static void armpmu_start(struct perf_event *event, int flags) |
183 | armpmu_start(struct perf_event *event, int flags) | ||
184 | { | 179 | { |
185 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); | 180 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
186 | struct hw_perf_event *hwc = &event->hw; | 181 | struct hw_perf_event *hwc = &event->hw; |
@@ -200,8 +195,8 @@ armpmu_start(struct perf_event *event, int flags) | |||
200 | * get an interrupt too soon or *way* too late if the overflow has | 195 | * get an interrupt too soon or *way* too late if the overflow has |
201 | * happened since disabling. | 196 | * happened since disabling. |
202 | */ | 197 | */ |
203 | armpmu_event_set_period(event, hwc, hwc->idx); | 198 | armpmu_event_set_period(event); |
204 | armpmu->enable(hwc, hwc->idx); | 199 | armpmu->enable(event); |
205 | } | 200 | } |
206 | 201 | ||
207 | static void | 202 | static void |
@@ -233,7 +228,7 @@ armpmu_add(struct perf_event *event, int flags) | |||
233 | perf_pmu_disable(event->pmu); | 228 | perf_pmu_disable(event->pmu); |
234 | 229 | ||
235 | /* If we don't have a space for the counter then finish early. */ | 230 | /* If we don't have a space for the counter then finish early. */ |
236 | idx = armpmu->get_event_idx(hw_events, hwc); | 231 | idx = armpmu->get_event_idx(hw_events, event); |
237 | if (idx < 0) { | 232 | if (idx < 0) { |
238 | err = idx; | 233 | err = idx; |
239 | goto out; | 234 | goto out; |
@@ -244,7 +239,7 @@ armpmu_add(struct perf_event *event, int flags) | |||
244 | * sure it is disabled. | 239 | * sure it is disabled. |
245 | */ | 240 | */ |
246 | event->hw.idx = idx; | 241 | event->hw.idx = idx; |
247 | armpmu->disable(hwc, idx); | 242 | armpmu->disable(event); |
248 | hw_events->events[idx] = event; | 243 | hw_events->events[idx] = event; |
249 | 244 | ||
250 | hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; | 245 | hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; |
@@ -264,13 +259,12 @@ validate_event(struct pmu_hw_events *hw_events, | |||
264 | struct perf_event *event) | 259 | struct perf_event *event) |
265 | { | 260 | { |
266 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); | 261 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
267 | struct hw_perf_event fake_event = event->hw; | ||
268 | struct pmu *leader_pmu = event->group_leader->pmu; | 262 | struct pmu *leader_pmu = event->group_leader->pmu; |
269 | 263 | ||
270 | if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF) | 264 | if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF) |
271 | return 1; | 265 | return 1; |
272 | 266 | ||
273 | return armpmu->get_event_idx(hw_events, &fake_event) >= 0; | 267 | return armpmu->get_event_idx(hw_events, event) >= 0; |
274 | } | 268 | } |
275 | 269 | ||
276 | static int | 270 | static int |
@@ -316,7 +310,7 @@ static irqreturn_t armpmu_dispatch_irq(int irq, void *dev) | |||
316 | static void | 310 | static void |
317 | armpmu_release_hardware(struct arm_pmu *armpmu) | 311 | armpmu_release_hardware(struct arm_pmu *armpmu) |
318 | { | 312 | { |
319 | armpmu->free_irq(); | 313 | armpmu->free_irq(armpmu); |
320 | pm_runtime_put_sync(&armpmu->plat_device->dev); | 314 | pm_runtime_put_sync(&armpmu->plat_device->dev); |
321 | } | 315 | } |
322 | 316 | ||
@@ -330,7 +324,7 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu) | |||
330 | return -ENODEV; | 324 | return -ENODEV; |
331 | 325 | ||
332 | pm_runtime_get_sync(&pmu_device->dev); | 326 | pm_runtime_get_sync(&pmu_device->dev); |
333 | err = armpmu->request_irq(armpmu_dispatch_irq); | 327 | err = armpmu->request_irq(armpmu, armpmu_dispatch_irq); |
334 | if (err) { | 328 | if (err) { |
335 | armpmu_release_hardware(armpmu); | 329 | armpmu_release_hardware(armpmu); |
336 | return err; | 330 | return err; |
@@ -465,13 +459,13 @@ static void armpmu_enable(struct pmu *pmu) | |||
465 | int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events); | 459 | int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events); |
466 | 460 | ||
467 | if (enabled) | 461 | if (enabled) |
468 | armpmu->start(); | 462 | armpmu->start(armpmu); |
469 | } | 463 | } |
470 | 464 | ||
471 | static void armpmu_disable(struct pmu *pmu) | 465 | static void armpmu_disable(struct pmu *pmu) |
472 | { | 466 | { |
473 | struct arm_pmu *armpmu = to_arm_pmu(pmu); | 467 | struct arm_pmu *armpmu = to_arm_pmu(pmu); |
474 | armpmu->stop(); | 468 | armpmu->stop(armpmu); |
475 | } | 469 | } |
476 | 470 | ||
477 | #ifdef CONFIG_PM_RUNTIME | 471 | #ifdef CONFIG_PM_RUNTIME |
@@ -517,12 +511,13 @@ static void __init armpmu_init(struct arm_pmu *armpmu) | |||
517 | }; | 511 | }; |
518 | } | 512 | } |
519 | 513 | ||
520 | int armpmu_register(struct arm_pmu *armpmu, char *name, int type) | 514 | int armpmu_register(struct arm_pmu *armpmu, int type) |
521 | { | 515 | { |
522 | armpmu_init(armpmu); | 516 | armpmu_init(armpmu); |
517 | pm_runtime_enable(&armpmu->plat_device->dev); | ||
523 | pr_info("enabled with %s PMU driver, %d counters available\n", | 518 | pr_info("enabled with %s PMU driver, %d counters available\n", |
524 | armpmu->name, armpmu->num_events); | 519 | armpmu->name, armpmu->num_events); |
525 | return perf_pmu_register(&armpmu->pmu, name, type); | 520 | return perf_pmu_register(&armpmu->pmu, armpmu->name, type); |
526 | } | 521 | } |
527 | 522 | ||
528 | /* | 523 | /* |
@@ -576,6 +571,10 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) | |||
576 | { | 571 | { |
577 | struct frame_tail __user *tail; | 572 | struct frame_tail __user *tail; |
578 | 573 | ||
574 | if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { | ||
575 | /* We don't support guest os callchain now */ | ||
576 | return; | ||
577 | } | ||
579 | 578 | ||
580 | tail = (struct frame_tail __user *)regs->ARM_fp - 1; | 579 | tail = (struct frame_tail __user *)regs->ARM_fp - 1; |
581 | 580 | ||
@@ -603,9 +602,41 @@ perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) | |||
603 | { | 602 | { |
604 | struct stackframe fr; | 603 | struct stackframe fr; |
605 | 604 | ||
605 | if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { | ||
606 | /* We don't support guest os callchain now */ | ||
607 | return; | ||
608 | } | ||
609 | |||
606 | fr.fp = regs->ARM_fp; | 610 | fr.fp = regs->ARM_fp; |
607 | fr.sp = regs->ARM_sp; | 611 | fr.sp = regs->ARM_sp; |
608 | fr.lr = regs->ARM_lr; | 612 | fr.lr = regs->ARM_lr; |
609 | fr.pc = regs->ARM_pc; | 613 | fr.pc = regs->ARM_pc; |
610 | walk_stackframe(&fr, callchain_trace, entry); | 614 | walk_stackframe(&fr, callchain_trace, entry); |
611 | } | 615 | } |
616 | |||
617 | unsigned long perf_instruction_pointer(struct pt_regs *regs) | ||
618 | { | ||
619 | if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) | ||
620 | return perf_guest_cbs->get_guest_ip(); | ||
621 | |||
622 | return instruction_pointer(regs); | ||
623 | } | ||
624 | |||
625 | unsigned long perf_misc_flags(struct pt_regs *regs) | ||
626 | { | ||
627 | int misc = 0; | ||
628 | |||
629 | if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { | ||
630 | if (perf_guest_cbs->is_user_mode()) | ||
631 | misc |= PERF_RECORD_MISC_GUEST_USER; | ||
632 | else | ||
633 | misc |= PERF_RECORD_MISC_GUEST_KERNEL; | ||
634 | } else { | ||
635 | if (user_mode(regs)) | ||
636 | misc |= PERF_RECORD_MISC_USER; | ||
637 | else | ||
638 | misc |= PERF_RECORD_MISC_KERNEL; | ||
639 | } | ||
640 | |||
641 | return misc; | ||
642 | } | ||
diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c index 8d7d8d4de9d6..9a4f6307a016 100644 --- a/arch/arm/kernel/perf_event_cpu.c +++ b/arch/arm/kernel/perf_event_cpu.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/kernel.h> | 23 | #include <linux/kernel.h> |
24 | #include <linux/of.h> | 24 | #include <linux/of.h> |
25 | #include <linux/platform_device.h> | 25 | #include <linux/platform_device.h> |
26 | #include <linux/slab.h> | ||
26 | #include <linux/spinlock.h> | 27 | #include <linux/spinlock.h> |
27 | 28 | ||
28 | #include <asm/cputype.h> | 29 | #include <asm/cputype.h> |
@@ -45,7 +46,7 @@ const char *perf_pmu_name(void) | |||
45 | if (!cpu_pmu) | 46 | if (!cpu_pmu) |
46 | return NULL; | 47 | return NULL; |
47 | 48 | ||
48 | return cpu_pmu->pmu.name; | 49 | return cpu_pmu->name; |
49 | } | 50 | } |
50 | EXPORT_SYMBOL_GPL(perf_pmu_name); | 51 | EXPORT_SYMBOL_GPL(perf_pmu_name); |
51 | 52 | ||
@@ -70,7 +71,7 @@ static struct pmu_hw_events *cpu_pmu_get_cpu_events(void) | |||
70 | return &__get_cpu_var(cpu_hw_events); | 71 | return &__get_cpu_var(cpu_hw_events); |
71 | } | 72 | } |
72 | 73 | ||
73 | static void cpu_pmu_free_irq(void) | 74 | static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu) |
74 | { | 75 | { |
75 | int i, irq, irqs; | 76 | int i, irq, irqs; |
76 | struct platform_device *pmu_device = cpu_pmu->plat_device; | 77 | struct platform_device *pmu_device = cpu_pmu->plat_device; |
@@ -86,7 +87,7 @@ static void cpu_pmu_free_irq(void) | |||
86 | } | 87 | } |
87 | } | 88 | } |
88 | 89 | ||
89 | static int cpu_pmu_request_irq(irq_handler_t handler) | 90 | static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler) |
90 | { | 91 | { |
91 | int i, err, irq, irqs; | 92 | int i, err, irq, irqs; |
92 | struct platform_device *pmu_device = cpu_pmu->plat_device; | 93 | struct platform_device *pmu_device = cpu_pmu->plat_device; |
@@ -147,7 +148,7 @@ static void __devinit cpu_pmu_init(struct arm_pmu *cpu_pmu) | |||
147 | 148 | ||
148 | /* Ensure the PMU has sane values out of reset. */ | 149 | /* Ensure the PMU has sane values out of reset. */ |
149 | if (cpu_pmu && cpu_pmu->reset) | 150 | if (cpu_pmu && cpu_pmu->reset) |
150 | on_each_cpu(cpu_pmu->reset, NULL, 1); | 151 | on_each_cpu(cpu_pmu->reset, cpu_pmu, 1); |
151 | } | 152 | } |
152 | 153 | ||
153 | /* | 154 | /* |
@@ -163,7 +164,9 @@ static int __cpuinit cpu_pmu_notify(struct notifier_block *b, | |||
163 | return NOTIFY_DONE; | 164 | return NOTIFY_DONE; |
164 | 165 | ||
165 | if (cpu_pmu && cpu_pmu->reset) | 166 | if (cpu_pmu && cpu_pmu->reset) |
166 | cpu_pmu->reset(NULL); | 167 | cpu_pmu->reset(cpu_pmu); |
168 | else | ||
169 | return NOTIFY_DONE; | ||
167 | 170 | ||
168 | return NOTIFY_OK; | 171 | return NOTIFY_OK; |
169 | } | 172 | } |
@@ -195,13 +198,13 @@ static struct platform_device_id __devinitdata cpu_pmu_plat_device_ids[] = { | |||
195 | /* | 198 | /* |
196 | * CPU PMU identification and probing. | 199 | * CPU PMU identification and probing. |
197 | */ | 200 | */ |
198 | static struct arm_pmu *__devinit probe_current_pmu(void) | 201 | static int __devinit probe_current_pmu(struct arm_pmu *pmu) |
199 | { | 202 | { |
200 | struct arm_pmu *pmu = NULL; | ||
201 | int cpu = get_cpu(); | 203 | int cpu = get_cpu(); |
202 | unsigned long cpuid = read_cpuid_id(); | 204 | unsigned long cpuid = read_cpuid_id(); |
203 | unsigned long implementor = (cpuid & 0xFF000000) >> 24; | 205 | unsigned long implementor = (cpuid & 0xFF000000) >> 24; |
204 | unsigned long part_number = (cpuid & 0xFFF0); | 206 | unsigned long part_number = (cpuid & 0xFFF0); |
207 | int ret = -ENODEV; | ||
205 | 208 | ||
206 | pr_info("probing PMU on CPU %d\n", cpu); | 209 | pr_info("probing PMU on CPU %d\n", cpu); |
207 | 210 | ||
@@ -211,25 +214,25 @@ static struct arm_pmu *__devinit probe_current_pmu(void) | |||
211 | case 0xB360: /* ARM1136 */ | 214 | case 0xB360: /* ARM1136 */ |
212 | case 0xB560: /* ARM1156 */ | 215 | case 0xB560: /* ARM1156 */ |
213 | case 0xB760: /* ARM1176 */ | 216 | case 0xB760: /* ARM1176 */ |
214 | pmu = armv6pmu_init(); | 217 | ret = armv6pmu_init(pmu); |
215 | break; | 218 | break; |
216 | case 0xB020: /* ARM11mpcore */ | 219 | case 0xB020: /* ARM11mpcore */ |
217 | pmu = armv6mpcore_pmu_init(); | 220 | ret = armv6mpcore_pmu_init(pmu); |
218 | break; | 221 | break; |
219 | case 0xC080: /* Cortex-A8 */ | 222 | case 0xC080: /* Cortex-A8 */ |
220 | pmu = armv7_a8_pmu_init(); | 223 | ret = armv7_a8_pmu_init(pmu); |
221 | break; | 224 | break; |
222 | case 0xC090: /* Cortex-A9 */ | 225 | case 0xC090: /* Cortex-A9 */ |
223 | pmu = armv7_a9_pmu_init(); | 226 | ret = armv7_a9_pmu_init(pmu); |
224 | break; | 227 | break; |
225 | case 0xC050: /* Cortex-A5 */ | 228 | case 0xC050: /* Cortex-A5 */ |
226 | pmu = armv7_a5_pmu_init(); | 229 | ret = armv7_a5_pmu_init(pmu); |
227 | break; | 230 | break; |
228 | case 0xC0F0: /* Cortex-A15 */ | 231 | case 0xC0F0: /* Cortex-A15 */ |
229 | pmu = armv7_a15_pmu_init(); | 232 | ret = armv7_a15_pmu_init(pmu); |
230 | break; | 233 | break; |
231 | case 0xC070: /* Cortex-A7 */ | 234 | case 0xC070: /* Cortex-A7 */ |
232 | pmu = armv7_a7_pmu_init(); | 235 | ret = armv7_a7_pmu_init(pmu); |
233 | break; | 236 | break; |
234 | } | 237 | } |
235 | /* Intel CPUs [xscale]. */ | 238 | /* Intel CPUs [xscale]. */ |
@@ -237,43 +240,54 @@ static struct arm_pmu *__devinit probe_current_pmu(void) | |||
237 | part_number = (cpuid >> 13) & 0x7; | 240 | part_number = (cpuid >> 13) & 0x7; |
238 | switch (part_number) { | 241 | switch (part_number) { |
239 | case 1: | 242 | case 1: |
240 | pmu = xscale1pmu_init(); | 243 | ret = xscale1pmu_init(pmu); |
241 | break; | 244 | break; |
242 | case 2: | 245 | case 2: |
243 | pmu = xscale2pmu_init(); | 246 | ret = xscale2pmu_init(pmu); |
244 | break; | 247 | break; |
245 | } | 248 | } |
246 | } | 249 | } |
247 | 250 | ||
248 | put_cpu(); | 251 | put_cpu(); |
249 | return pmu; | 252 | return ret; |
250 | } | 253 | } |
251 | 254 | ||
252 | static int __devinit cpu_pmu_device_probe(struct platform_device *pdev) | 255 | static int __devinit cpu_pmu_device_probe(struct platform_device *pdev) |
253 | { | 256 | { |
254 | const struct of_device_id *of_id; | 257 | const struct of_device_id *of_id; |
255 | struct arm_pmu *(*init_fn)(void); | 258 | int (*init_fn)(struct arm_pmu *); |
256 | struct device_node *node = pdev->dev.of_node; | 259 | struct device_node *node = pdev->dev.of_node; |
260 | struct arm_pmu *pmu; | ||
261 | int ret = -ENODEV; | ||
257 | 262 | ||
258 | if (cpu_pmu) { | 263 | if (cpu_pmu) { |
259 | pr_info("attempt to register multiple PMU devices!"); | 264 | pr_info("attempt to register multiple PMU devices!"); |
260 | return -ENOSPC; | 265 | return -ENOSPC; |
261 | } | 266 | } |
262 | 267 | ||
268 | pmu = kzalloc(sizeof(struct arm_pmu), GFP_KERNEL); | ||
269 | if (!pmu) { | ||
270 | pr_info("failed to allocate PMU device!"); | ||
271 | return -ENOMEM; | ||
272 | } | ||
273 | |||
263 | if (node && (of_id = of_match_node(cpu_pmu_of_device_ids, pdev->dev.of_node))) { | 274 | if (node && (of_id = of_match_node(cpu_pmu_of_device_ids, pdev->dev.of_node))) { |
264 | init_fn = of_id->data; | 275 | init_fn = of_id->data; |
265 | cpu_pmu = init_fn(); | 276 | ret = init_fn(pmu); |
266 | } else { | 277 | } else { |
267 | cpu_pmu = probe_current_pmu(); | 278 | ret = probe_current_pmu(pmu); |
268 | } | 279 | } |
269 | 280 | ||
270 | if (!cpu_pmu) | 281 | if (ret) { |
271 | return -ENODEV; | 282 | pr_info("failed to register PMU devices!"); |
283 | kfree(pmu); | ||
284 | return ret; | ||
285 | } | ||
272 | 286 | ||
287 | cpu_pmu = pmu; | ||
273 | cpu_pmu->plat_device = pdev; | 288 | cpu_pmu->plat_device = pdev; |
274 | cpu_pmu_init(cpu_pmu); | 289 | cpu_pmu_init(cpu_pmu); |
275 | register_cpu_notifier(&cpu_pmu_hotplug_notifier); | 290 | armpmu_register(cpu_pmu, PERF_TYPE_RAW); |
276 | armpmu_register(cpu_pmu, cpu_pmu->name, PERF_TYPE_RAW); | ||
277 | 291 | ||
278 | return 0; | 292 | return 0; |
279 | } | 293 | } |
@@ -290,6 +304,16 @@ static struct platform_driver cpu_pmu_driver = { | |||
290 | 304 | ||
291 | static int __init register_pmu_driver(void) | 305 | static int __init register_pmu_driver(void) |
292 | { | 306 | { |
293 | return platform_driver_register(&cpu_pmu_driver); | 307 | int err; |
308 | |||
309 | err = register_cpu_notifier(&cpu_pmu_hotplug_notifier); | ||
310 | if (err) | ||
311 | return err; | ||
312 | |||
313 | err = platform_driver_register(&cpu_pmu_driver); | ||
314 | if (err) | ||
315 | unregister_cpu_notifier(&cpu_pmu_hotplug_notifier); | ||
316 | |||
317 | return err; | ||
294 | } | 318 | } |
295 | device_initcall(register_pmu_driver); | 319 | device_initcall(register_pmu_driver); |
diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c index 6ccc07971745..f3e22ff8b6a2 100644 --- a/arch/arm/kernel/perf_event_v6.c +++ b/arch/arm/kernel/perf_event_v6.c | |||
@@ -401,9 +401,10 @@ armv6_pmcr_counter_has_overflowed(unsigned long pmcr, | |||
401 | return ret; | 401 | return ret; |
402 | } | 402 | } |
403 | 403 | ||
404 | static inline u32 | 404 | static inline u32 armv6pmu_read_counter(struct perf_event *event) |
405 | armv6pmu_read_counter(int counter) | ||
406 | { | 405 | { |
406 | struct hw_perf_event *hwc = &event->hw; | ||
407 | int counter = hwc->idx; | ||
407 | unsigned long value = 0; | 408 | unsigned long value = 0; |
408 | 409 | ||
409 | if (ARMV6_CYCLE_COUNTER == counter) | 410 | if (ARMV6_CYCLE_COUNTER == counter) |
@@ -418,10 +419,11 @@ armv6pmu_read_counter(int counter) | |||
418 | return value; | 419 | return value; |
419 | } | 420 | } |
420 | 421 | ||
421 | static inline void | 422 | static inline void armv6pmu_write_counter(struct perf_event *event, u32 value) |
422 | armv6pmu_write_counter(int counter, | ||
423 | u32 value) | ||
424 | { | 423 | { |
424 | struct hw_perf_event *hwc = &event->hw; | ||
425 | int counter = hwc->idx; | ||
426 | |||
425 | if (ARMV6_CYCLE_COUNTER == counter) | 427 | if (ARMV6_CYCLE_COUNTER == counter) |
426 | asm volatile("mcr p15, 0, %0, c15, c12, 1" : : "r"(value)); | 428 | asm volatile("mcr p15, 0, %0, c15, c12, 1" : : "r"(value)); |
427 | else if (ARMV6_COUNTER0 == counter) | 429 | else if (ARMV6_COUNTER0 == counter) |
@@ -432,12 +434,13 @@ armv6pmu_write_counter(int counter, | |||
432 | WARN_ONCE(1, "invalid counter number (%d)\n", counter); | 434 | WARN_ONCE(1, "invalid counter number (%d)\n", counter); |
433 | } | 435 | } |
434 | 436 | ||
435 | static void | 437 | static void armv6pmu_enable_event(struct perf_event *event) |
436 | armv6pmu_enable_event(struct hw_perf_event *hwc, | ||
437 | int idx) | ||
438 | { | 438 | { |
439 | unsigned long val, mask, evt, flags; | 439 | unsigned long val, mask, evt, flags; |
440 | struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); | ||
441 | struct hw_perf_event *hwc = &event->hw; | ||
440 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | 442 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); |
443 | int idx = hwc->idx; | ||
441 | 444 | ||
442 | if (ARMV6_CYCLE_COUNTER == idx) { | 445 | if (ARMV6_CYCLE_COUNTER == idx) { |
443 | mask = 0; | 446 | mask = 0; |
@@ -473,7 +476,8 @@ armv6pmu_handle_irq(int irq_num, | |||
473 | { | 476 | { |
474 | unsigned long pmcr = armv6_pmcr_read(); | 477 | unsigned long pmcr = armv6_pmcr_read(); |
475 | struct perf_sample_data data; | 478 | struct perf_sample_data data; |
476 | struct pmu_hw_events *cpuc; | 479 | struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev; |
480 | struct pmu_hw_events *cpuc = cpu_pmu->get_hw_events(); | ||
477 | struct pt_regs *regs; | 481 | struct pt_regs *regs; |
478 | int idx; | 482 | int idx; |
479 | 483 | ||
@@ -489,7 +493,6 @@ armv6pmu_handle_irq(int irq_num, | |||
489 | */ | 493 | */ |
490 | armv6_pmcr_write(pmcr); | 494 | armv6_pmcr_write(pmcr); |
491 | 495 | ||
492 | cpuc = &__get_cpu_var(cpu_hw_events); | ||
493 | for (idx = 0; idx < cpu_pmu->num_events; ++idx) { | 496 | for (idx = 0; idx < cpu_pmu->num_events; ++idx) { |
494 | struct perf_event *event = cpuc->events[idx]; | 497 | struct perf_event *event = cpuc->events[idx]; |
495 | struct hw_perf_event *hwc; | 498 | struct hw_perf_event *hwc; |
@@ -506,13 +509,13 @@ armv6pmu_handle_irq(int irq_num, | |||
506 | continue; | 509 | continue; |
507 | 510 | ||
508 | hwc = &event->hw; | 511 | hwc = &event->hw; |
509 | armpmu_event_update(event, hwc, idx); | 512 | armpmu_event_update(event); |
510 | perf_sample_data_init(&data, 0, hwc->last_period); | 513 | perf_sample_data_init(&data, 0, hwc->last_period); |
511 | if (!armpmu_event_set_period(event, hwc, idx)) | 514 | if (!armpmu_event_set_period(event)) |
512 | continue; | 515 | continue; |
513 | 516 | ||
514 | if (perf_event_overflow(event, &data, regs)) | 517 | if (perf_event_overflow(event, &data, regs)) |
515 | cpu_pmu->disable(hwc, idx); | 518 | cpu_pmu->disable(event); |
516 | } | 519 | } |
517 | 520 | ||
518 | /* | 521 | /* |
@@ -527,8 +530,7 @@ armv6pmu_handle_irq(int irq_num, | |||
527 | return IRQ_HANDLED; | 530 | return IRQ_HANDLED; |
528 | } | 531 | } |
529 | 532 | ||
530 | static void | 533 | static void armv6pmu_start(struct arm_pmu *cpu_pmu) |
531 | armv6pmu_start(void) | ||
532 | { | 534 | { |
533 | unsigned long flags, val; | 535 | unsigned long flags, val; |
534 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | 536 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); |
@@ -540,8 +542,7 @@ armv6pmu_start(void) | |||
540 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); | 542 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
541 | } | 543 | } |
542 | 544 | ||
543 | static void | 545 | static void armv6pmu_stop(struct arm_pmu *cpu_pmu) |
544 | armv6pmu_stop(void) | ||
545 | { | 546 | { |
546 | unsigned long flags, val; | 547 | unsigned long flags, val; |
547 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | 548 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); |
@@ -555,10 +556,11 @@ armv6pmu_stop(void) | |||
555 | 556 | ||
556 | static int | 557 | static int |
557 | armv6pmu_get_event_idx(struct pmu_hw_events *cpuc, | 558 | armv6pmu_get_event_idx(struct pmu_hw_events *cpuc, |
558 | struct hw_perf_event *event) | 559 | struct perf_event *event) |
559 | { | 560 | { |
561 | struct hw_perf_event *hwc = &event->hw; | ||
560 | /* Always place a cycle counter into the cycle counter. */ | 562 | /* Always place a cycle counter into the cycle counter. */ |
561 | if (ARMV6_PERFCTR_CPU_CYCLES == event->config_base) { | 563 | if (ARMV6_PERFCTR_CPU_CYCLES == hwc->config_base) { |
562 | if (test_and_set_bit(ARMV6_CYCLE_COUNTER, cpuc->used_mask)) | 564 | if (test_and_set_bit(ARMV6_CYCLE_COUNTER, cpuc->used_mask)) |
563 | return -EAGAIN; | 565 | return -EAGAIN; |
564 | 566 | ||
@@ -579,12 +581,13 @@ armv6pmu_get_event_idx(struct pmu_hw_events *cpuc, | |||
579 | } | 581 | } |
580 | } | 582 | } |
581 | 583 | ||
582 | static void | 584 | static void armv6pmu_disable_event(struct perf_event *event) |
583 | armv6pmu_disable_event(struct hw_perf_event *hwc, | ||
584 | int idx) | ||
585 | { | 585 | { |
586 | unsigned long val, mask, evt, flags; | 586 | unsigned long val, mask, evt, flags; |
587 | struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); | ||
588 | struct hw_perf_event *hwc = &event->hw; | ||
587 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | 589 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); |
590 | int idx = hwc->idx; | ||
588 | 591 | ||
589 | if (ARMV6_CYCLE_COUNTER == idx) { | 592 | if (ARMV6_CYCLE_COUNTER == idx) { |
590 | mask = ARMV6_PMCR_CCOUNT_IEN; | 593 | mask = ARMV6_PMCR_CCOUNT_IEN; |
@@ -613,12 +616,13 @@ armv6pmu_disable_event(struct hw_perf_event *hwc, | |||
613 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); | 616 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
614 | } | 617 | } |
615 | 618 | ||
616 | static void | 619 | static void armv6mpcore_pmu_disable_event(struct perf_event *event) |
617 | armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc, | ||
618 | int idx) | ||
619 | { | 620 | { |
620 | unsigned long val, mask, flags, evt = 0; | 621 | unsigned long val, mask, flags, evt = 0; |
622 | struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); | ||
623 | struct hw_perf_event *hwc = &event->hw; | ||
621 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | 624 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); |
625 | int idx = hwc->idx; | ||
622 | 626 | ||
623 | if (ARMV6_CYCLE_COUNTER == idx) { | 627 | if (ARMV6_CYCLE_COUNTER == idx) { |
624 | mask = ARMV6_PMCR_CCOUNT_IEN; | 628 | mask = ARMV6_PMCR_CCOUNT_IEN; |
@@ -649,24 +653,22 @@ static int armv6_map_event(struct perf_event *event) | |||
649 | &armv6_perf_cache_map, 0xFF); | 653 | &armv6_perf_cache_map, 0xFF); |
650 | } | 654 | } |
651 | 655 | ||
652 | static struct arm_pmu armv6pmu = { | 656 | static int __devinit armv6pmu_init(struct arm_pmu *cpu_pmu) |
653 | .name = "v6", | ||
654 | .handle_irq = armv6pmu_handle_irq, | ||
655 | .enable = armv6pmu_enable_event, | ||
656 | .disable = armv6pmu_disable_event, | ||
657 | .read_counter = armv6pmu_read_counter, | ||
658 | .write_counter = armv6pmu_write_counter, | ||
659 | .get_event_idx = armv6pmu_get_event_idx, | ||
660 | .start = armv6pmu_start, | ||
661 | .stop = armv6pmu_stop, | ||
662 | .map_event = armv6_map_event, | ||
663 | .num_events = 3, | ||
664 | .max_period = (1LLU << 32) - 1, | ||
665 | }; | ||
666 | |||
667 | static struct arm_pmu *__devinit armv6pmu_init(void) | ||
668 | { | 657 | { |
669 | return &armv6pmu; | 658 | cpu_pmu->name = "v6"; |
659 | cpu_pmu->handle_irq = armv6pmu_handle_irq; | ||
660 | cpu_pmu->enable = armv6pmu_enable_event; | ||
661 | cpu_pmu->disable = armv6pmu_disable_event; | ||
662 | cpu_pmu->read_counter = armv6pmu_read_counter; | ||
663 | cpu_pmu->write_counter = armv6pmu_write_counter; | ||
664 | cpu_pmu->get_event_idx = armv6pmu_get_event_idx; | ||
665 | cpu_pmu->start = armv6pmu_start; | ||
666 | cpu_pmu->stop = armv6pmu_stop; | ||
667 | cpu_pmu->map_event = armv6_map_event; | ||
668 | cpu_pmu->num_events = 3; | ||
669 | cpu_pmu->max_period = (1LLU << 32) - 1; | ||
670 | |||
671 | return 0; | ||
670 | } | 672 | } |
671 | 673 | ||
672 | /* | 674 | /* |
@@ -683,33 +685,31 @@ static int armv6mpcore_map_event(struct perf_event *event) | |||
683 | &armv6mpcore_perf_cache_map, 0xFF); | 685 | &armv6mpcore_perf_cache_map, 0xFF); |
684 | } | 686 | } |
685 | 687 | ||
686 | static struct arm_pmu armv6mpcore_pmu = { | 688 | static int __devinit armv6mpcore_pmu_init(struct arm_pmu *cpu_pmu) |
687 | .name = "v6mpcore", | ||
688 | .handle_irq = armv6pmu_handle_irq, | ||
689 | .enable = armv6pmu_enable_event, | ||
690 | .disable = armv6mpcore_pmu_disable_event, | ||
691 | .read_counter = armv6pmu_read_counter, | ||
692 | .write_counter = armv6pmu_write_counter, | ||
693 | .get_event_idx = armv6pmu_get_event_idx, | ||
694 | .start = armv6pmu_start, | ||
695 | .stop = armv6pmu_stop, | ||
696 | .map_event = armv6mpcore_map_event, | ||
697 | .num_events = 3, | ||
698 | .max_period = (1LLU << 32) - 1, | ||
699 | }; | ||
700 | |||
701 | static struct arm_pmu *__devinit armv6mpcore_pmu_init(void) | ||
702 | { | 689 | { |
703 | return &armv6mpcore_pmu; | 690 | cpu_pmu->name = "v6mpcore"; |
691 | cpu_pmu->handle_irq = armv6pmu_handle_irq; | ||
692 | cpu_pmu->enable = armv6pmu_enable_event; | ||
693 | cpu_pmu->disable = armv6mpcore_pmu_disable_event; | ||
694 | cpu_pmu->read_counter = armv6pmu_read_counter; | ||
695 | cpu_pmu->write_counter = armv6pmu_write_counter; | ||
696 | cpu_pmu->get_event_idx = armv6pmu_get_event_idx; | ||
697 | cpu_pmu->start = armv6pmu_start; | ||
698 | cpu_pmu->stop = armv6pmu_stop; | ||
699 | cpu_pmu->map_event = armv6mpcore_map_event; | ||
700 | cpu_pmu->num_events = 3; | ||
701 | cpu_pmu->max_period = (1LLU << 32) - 1; | ||
702 | |||
703 | return 0; | ||
704 | } | 704 | } |
705 | #else | 705 | #else |
706 | static struct arm_pmu *__devinit armv6pmu_init(void) | 706 | static int armv6pmu_init(struct arm_pmu *cpu_pmu) |
707 | { | 707 | { |
708 | return NULL; | 708 | return -ENODEV; |
709 | } | 709 | } |
710 | 710 | ||
711 | static struct arm_pmu *__devinit armv6mpcore_pmu_init(void) | 711 | static int armv6mpcore_pmu_init(struct arm_pmu *cpu_pmu) |
712 | { | 712 | { |
713 | return NULL; | 713 | return -ENODEV; |
714 | } | 714 | } |
715 | #endif /* CONFIG_CPU_V6 || CONFIG_CPU_V6K */ | 715 | #endif /* CONFIG_CPU_V6 || CONFIG_CPU_V6K */ |
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index bd4b090ebcfd..7d0cce85d17e 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c | |||
@@ -18,8 +18,6 @@ | |||
18 | 18 | ||
19 | #ifdef CONFIG_CPU_V7 | 19 | #ifdef CONFIG_CPU_V7 |
20 | 20 | ||
21 | static struct arm_pmu armv7pmu; | ||
22 | |||
23 | /* | 21 | /* |
24 | * Common ARMv7 event types | 22 | * Common ARMv7 event types |
25 | * | 23 | * |
@@ -738,7 +736,8 @@ static const unsigned armv7_a7_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | |||
738 | */ | 736 | */ |
739 | #define ARMV7_IDX_CYCLE_COUNTER 0 | 737 | #define ARMV7_IDX_CYCLE_COUNTER 0 |
740 | #define ARMV7_IDX_COUNTER0 1 | 738 | #define ARMV7_IDX_COUNTER0 1 |
741 | #define ARMV7_IDX_COUNTER_LAST (ARMV7_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1) | 739 | #define ARMV7_IDX_COUNTER_LAST(cpu_pmu) \ |
740 | (ARMV7_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1) | ||
742 | 741 | ||
743 | #define ARMV7_MAX_COUNTERS 32 | 742 | #define ARMV7_MAX_COUNTERS 32 |
744 | #define ARMV7_COUNTER_MASK (ARMV7_MAX_COUNTERS - 1) | 743 | #define ARMV7_COUNTER_MASK (ARMV7_MAX_COUNTERS - 1) |
@@ -804,49 +803,34 @@ static inline int armv7_pmnc_has_overflowed(u32 pmnc) | |||
804 | return pmnc & ARMV7_OVERFLOWED_MASK; | 803 | return pmnc & ARMV7_OVERFLOWED_MASK; |
805 | } | 804 | } |
806 | 805 | ||
807 | static inline int armv7_pmnc_counter_valid(int idx) | 806 | static inline int armv7_pmnc_counter_valid(struct arm_pmu *cpu_pmu, int idx) |
808 | { | 807 | { |
809 | return idx >= ARMV7_IDX_CYCLE_COUNTER && idx <= ARMV7_IDX_COUNTER_LAST; | 808 | return idx >= ARMV7_IDX_CYCLE_COUNTER && |
809 | idx <= ARMV7_IDX_COUNTER_LAST(cpu_pmu); | ||
810 | } | 810 | } |
811 | 811 | ||
812 | static inline int armv7_pmnc_counter_has_overflowed(u32 pmnc, int idx) | 812 | static inline int armv7_pmnc_counter_has_overflowed(u32 pmnc, int idx) |
813 | { | 813 | { |
814 | int ret = 0; | 814 | return pmnc & BIT(ARMV7_IDX_TO_COUNTER(idx)); |
815 | u32 counter; | ||
816 | |||
817 | if (!armv7_pmnc_counter_valid(idx)) { | ||
818 | pr_err("CPU%u checking wrong counter %d overflow status\n", | ||
819 | smp_processor_id(), idx); | ||
820 | } else { | ||
821 | counter = ARMV7_IDX_TO_COUNTER(idx); | ||
822 | ret = pmnc & BIT(counter); | ||
823 | } | ||
824 | |||
825 | return ret; | ||
826 | } | 815 | } |
827 | 816 | ||
828 | static inline int armv7_pmnc_select_counter(int idx) | 817 | static inline int armv7_pmnc_select_counter(int idx) |
829 | { | 818 | { |
830 | u32 counter; | 819 | u32 counter = ARMV7_IDX_TO_COUNTER(idx); |
831 | |||
832 | if (!armv7_pmnc_counter_valid(idx)) { | ||
833 | pr_err("CPU%u selecting wrong PMNC counter %d\n", | ||
834 | smp_processor_id(), idx); | ||
835 | return -EINVAL; | ||
836 | } | ||
837 | |||
838 | counter = ARMV7_IDX_TO_COUNTER(idx); | ||
839 | asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (counter)); | 820 | asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (counter)); |
840 | isb(); | 821 | isb(); |
841 | 822 | ||
842 | return idx; | 823 | return idx; |
843 | } | 824 | } |
844 | 825 | ||
845 | static inline u32 armv7pmu_read_counter(int idx) | 826 | static inline u32 armv7pmu_read_counter(struct perf_event *event) |
846 | { | 827 | { |
828 | struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); | ||
829 | struct hw_perf_event *hwc = &event->hw; | ||
830 | int idx = hwc->idx; | ||
847 | u32 value = 0; | 831 | u32 value = 0; |
848 | 832 | ||
849 | if (!armv7_pmnc_counter_valid(idx)) | 833 | if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) |
850 | pr_err("CPU%u reading wrong counter %d\n", | 834 | pr_err("CPU%u reading wrong counter %d\n", |
851 | smp_processor_id(), idx); | 835 | smp_processor_id(), idx); |
852 | else if (idx == ARMV7_IDX_CYCLE_COUNTER) | 836 | else if (idx == ARMV7_IDX_CYCLE_COUNTER) |
@@ -857,9 +841,13 @@ static inline u32 armv7pmu_read_counter(int idx) | |||
857 | return value; | 841 | return value; |
858 | } | 842 | } |
859 | 843 | ||
860 | static inline void armv7pmu_write_counter(int idx, u32 value) | 844 | static inline void armv7pmu_write_counter(struct perf_event *event, u32 value) |
861 | { | 845 | { |
862 | if (!armv7_pmnc_counter_valid(idx)) | 846 | struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); |
847 | struct hw_perf_event *hwc = &event->hw; | ||
848 | int idx = hwc->idx; | ||
849 | |||
850 | if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) | ||
863 | pr_err("CPU%u writing wrong counter %d\n", | 851 | pr_err("CPU%u writing wrong counter %d\n", |
864 | smp_processor_id(), idx); | 852 | smp_processor_id(), idx); |
865 | else if (idx == ARMV7_IDX_CYCLE_COUNTER) | 853 | else if (idx == ARMV7_IDX_CYCLE_COUNTER) |
@@ -878,60 +866,28 @@ static inline void armv7_pmnc_write_evtsel(int idx, u32 val) | |||
878 | 866 | ||
879 | static inline int armv7_pmnc_enable_counter(int idx) | 867 | static inline int armv7_pmnc_enable_counter(int idx) |
880 | { | 868 | { |
881 | u32 counter; | 869 | u32 counter = ARMV7_IDX_TO_COUNTER(idx); |
882 | |||
883 | if (!armv7_pmnc_counter_valid(idx)) { | ||
884 | pr_err("CPU%u enabling wrong PMNC counter %d\n", | ||
885 | smp_processor_id(), idx); | ||
886 | return -EINVAL; | ||
887 | } | ||
888 | |||
889 | counter = ARMV7_IDX_TO_COUNTER(idx); | ||
890 | asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (BIT(counter))); | 870 | asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (BIT(counter))); |
891 | return idx; | 871 | return idx; |
892 | } | 872 | } |
893 | 873 | ||
894 | static inline int armv7_pmnc_disable_counter(int idx) | 874 | static inline int armv7_pmnc_disable_counter(int idx) |
895 | { | 875 | { |
896 | u32 counter; | 876 | u32 counter = ARMV7_IDX_TO_COUNTER(idx); |
897 | |||
898 | if (!armv7_pmnc_counter_valid(idx)) { | ||
899 | pr_err("CPU%u disabling wrong PMNC counter %d\n", | ||
900 | smp_processor_id(), idx); | ||
901 | return -EINVAL; | ||
902 | } | ||
903 | |||
904 | counter = ARMV7_IDX_TO_COUNTER(idx); | ||
905 | asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (BIT(counter))); | 877 | asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (BIT(counter))); |
906 | return idx; | 878 | return idx; |
907 | } | 879 | } |
908 | 880 | ||
909 | static inline int armv7_pmnc_enable_intens(int idx) | 881 | static inline int armv7_pmnc_enable_intens(int idx) |
910 | { | 882 | { |
911 | u32 counter; | 883 | u32 counter = ARMV7_IDX_TO_COUNTER(idx); |
912 | |||
913 | if (!armv7_pmnc_counter_valid(idx)) { | ||
914 | pr_err("CPU%u enabling wrong PMNC counter IRQ enable %d\n", | ||
915 | smp_processor_id(), idx); | ||
916 | return -EINVAL; | ||
917 | } | ||
918 | |||
919 | counter = ARMV7_IDX_TO_COUNTER(idx); | ||
920 | asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (BIT(counter))); | 884 | asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (BIT(counter))); |
921 | return idx; | 885 | return idx; |
922 | } | 886 | } |
923 | 887 | ||
924 | static inline int armv7_pmnc_disable_intens(int idx) | 888 | static inline int armv7_pmnc_disable_intens(int idx) |
925 | { | 889 | { |
926 | u32 counter; | 890 | u32 counter = ARMV7_IDX_TO_COUNTER(idx); |
927 | |||
928 | if (!armv7_pmnc_counter_valid(idx)) { | ||
929 | pr_err("CPU%u disabling wrong PMNC counter IRQ enable %d\n", | ||
930 | smp_processor_id(), idx); | ||
931 | return -EINVAL; | ||
932 | } | ||
933 | |||
934 | counter = ARMV7_IDX_TO_COUNTER(idx); | ||
935 | asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(counter))); | 891 | asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(counter))); |
936 | isb(); | 892 | isb(); |
937 | /* Clear the overflow flag in case an interrupt is pending. */ | 893 | /* Clear the overflow flag in case an interrupt is pending. */ |
@@ -956,7 +912,7 @@ static inline u32 armv7_pmnc_getreset_flags(void) | |||
956 | } | 912 | } |
957 | 913 | ||
958 | #ifdef DEBUG | 914 | #ifdef DEBUG |
959 | static void armv7_pmnc_dump_regs(void) | 915 | static void armv7_pmnc_dump_regs(struct arm_pmu *cpu_pmu) |
960 | { | 916 | { |
961 | u32 val; | 917 | u32 val; |
962 | unsigned int cnt; | 918 | unsigned int cnt; |
@@ -981,7 +937,8 @@ static void armv7_pmnc_dump_regs(void) | |||
981 | asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val)); | 937 | asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val)); |
982 | printk(KERN_INFO "CCNT =0x%08x\n", val); | 938 | printk(KERN_INFO "CCNT =0x%08x\n", val); |
983 | 939 | ||
984 | for (cnt = ARMV7_IDX_COUNTER0; cnt <= ARMV7_IDX_COUNTER_LAST; cnt++) { | 940 | for (cnt = ARMV7_IDX_COUNTER0; |
941 | cnt <= ARMV7_IDX_COUNTER_LAST(cpu_pmu); cnt++) { | ||
985 | armv7_pmnc_select_counter(cnt); | 942 | armv7_pmnc_select_counter(cnt); |
986 | asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val)); | 943 | asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val)); |
987 | printk(KERN_INFO "CNT[%d] count =0x%08x\n", | 944 | printk(KERN_INFO "CNT[%d] count =0x%08x\n", |
@@ -993,10 +950,19 @@ static void armv7_pmnc_dump_regs(void) | |||
993 | } | 950 | } |
994 | #endif | 951 | #endif |
995 | 952 | ||
996 | static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx) | 953 | static void armv7pmu_enable_event(struct perf_event *event) |
997 | { | 954 | { |
998 | unsigned long flags; | 955 | unsigned long flags; |
956 | struct hw_perf_event *hwc = &event->hw; | ||
957 | struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); | ||
999 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | 958 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); |
959 | int idx = hwc->idx; | ||
960 | |||
961 | if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) { | ||
962 | pr_err("CPU%u enabling wrong PMNC counter IRQ enable %d\n", | ||
963 | smp_processor_id(), idx); | ||
964 | return; | ||
965 | } | ||
1000 | 966 | ||
1001 | /* | 967 | /* |
1002 | * Enable counter and interrupt, and set the counter to count | 968 | * Enable counter and interrupt, and set the counter to count |
@@ -1014,7 +980,7 @@ static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx) | |||
1014 | * We only need to set the event for the cycle counter if we | 980 | * We only need to set the event for the cycle counter if we |
1015 | * have the ability to perform event filtering. | 981 | * have the ability to perform event filtering. |
1016 | */ | 982 | */ |
1017 | if (armv7pmu.set_event_filter || idx != ARMV7_IDX_CYCLE_COUNTER) | 983 | if (cpu_pmu->set_event_filter || idx != ARMV7_IDX_CYCLE_COUNTER) |
1018 | armv7_pmnc_write_evtsel(idx, hwc->config_base); | 984 | armv7_pmnc_write_evtsel(idx, hwc->config_base); |
1019 | 985 | ||
1020 | /* | 986 | /* |
@@ -1030,10 +996,19 @@ static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx) | |||
1030 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); | 996 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
1031 | } | 997 | } |
1032 | 998 | ||
1033 | static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx) | 999 | static void armv7pmu_disable_event(struct perf_event *event) |
1034 | { | 1000 | { |
1035 | unsigned long flags; | 1001 | unsigned long flags; |
1002 | struct hw_perf_event *hwc = &event->hw; | ||
1003 | struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); | ||
1036 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | 1004 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); |
1005 | int idx = hwc->idx; | ||
1006 | |||
1007 | if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) { | ||
1008 | pr_err("CPU%u disabling wrong PMNC counter IRQ enable %d\n", | ||
1009 | smp_processor_id(), idx); | ||
1010 | return; | ||
1011 | } | ||
1037 | 1012 | ||
1038 | /* | 1013 | /* |
1039 | * Disable counter and interrupt | 1014 | * Disable counter and interrupt |
@@ -1057,7 +1032,8 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) | |||
1057 | { | 1032 | { |
1058 | u32 pmnc; | 1033 | u32 pmnc; |
1059 | struct perf_sample_data data; | 1034 | struct perf_sample_data data; |
1060 | struct pmu_hw_events *cpuc; | 1035 | struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev; |
1036 | struct pmu_hw_events *cpuc = cpu_pmu->get_hw_events(); | ||
1061 | struct pt_regs *regs; | 1037 | struct pt_regs *regs; |
1062 | int idx; | 1038 | int idx; |
1063 | 1039 | ||
@@ -1077,7 +1053,6 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) | |||
1077 | */ | 1053 | */ |
1078 | regs = get_irq_regs(); | 1054 | regs = get_irq_regs(); |
1079 | 1055 | ||
1080 | cpuc = &__get_cpu_var(cpu_hw_events); | ||
1081 | for (idx = 0; idx < cpu_pmu->num_events; ++idx) { | 1056 | for (idx = 0; idx < cpu_pmu->num_events; ++idx) { |
1082 | struct perf_event *event = cpuc->events[idx]; | 1057 | struct perf_event *event = cpuc->events[idx]; |
1083 | struct hw_perf_event *hwc; | 1058 | struct hw_perf_event *hwc; |
@@ -1094,13 +1069,13 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) | |||
1094 | continue; | 1069 | continue; |
1095 | 1070 | ||
1096 | hwc = &event->hw; | 1071 | hwc = &event->hw; |
1097 | armpmu_event_update(event, hwc, idx); | 1072 | armpmu_event_update(event); |
1098 | perf_sample_data_init(&data, 0, hwc->last_period); | 1073 | perf_sample_data_init(&data, 0, hwc->last_period); |
1099 | if (!armpmu_event_set_period(event, hwc, idx)) | 1074 | if (!armpmu_event_set_period(event)) |
1100 | continue; | 1075 | continue; |
1101 | 1076 | ||
1102 | if (perf_event_overflow(event, &data, regs)) | 1077 | if (perf_event_overflow(event, &data, regs)) |
1103 | cpu_pmu->disable(hwc, idx); | 1078 | cpu_pmu->disable(event); |
1104 | } | 1079 | } |
1105 | 1080 | ||
1106 | /* | 1081 | /* |
@@ -1115,7 +1090,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) | |||
1115 | return IRQ_HANDLED; | 1090 | return IRQ_HANDLED; |
1116 | } | 1091 | } |
1117 | 1092 | ||
1118 | static void armv7pmu_start(void) | 1093 | static void armv7pmu_start(struct arm_pmu *cpu_pmu) |
1119 | { | 1094 | { |
1120 | unsigned long flags; | 1095 | unsigned long flags; |
1121 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | 1096 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); |
@@ -1126,7 +1101,7 @@ static void armv7pmu_start(void) | |||
1126 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); | 1101 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
1127 | } | 1102 | } |
1128 | 1103 | ||
1129 | static void armv7pmu_stop(void) | 1104 | static void armv7pmu_stop(struct arm_pmu *cpu_pmu) |
1130 | { | 1105 | { |
1131 | unsigned long flags; | 1106 | unsigned long flags; |
1132 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | 1107 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); |
@@ -1138,10 +1113,12 @@ static void armv7pmu_stop(void) | |||
1138 | } | 1113 | } |
1139 | 1114 | ||
1140 | static int armv7pmu_get_event_idx(struct pmu_hw_events *cpuc, | 1115 | static int armv7pmu_get_event_idx(struct pmu_hw_events *cpuc, |
1141 | struct hw_perf_event *event) | 1116 | struct perf_event *event) |
1142 | { | 1117 | { |
1143 | int idx; | 1118 | int idx; |
1144 | unsigned long evtype = event->config_base & ARMV7_EVTYPE_EVENT; | 1119 | struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); |
1120 | struct hw_perf_event *hwc = &event->hw; | ||
1121 | unsigned long evtype = hwc->config_base & ARMV7_EVTYPE_EVENT; | ||
1145 | 1122 | ||
1146 | /* Always place a cycle counter into the cycle counter. */ | 1123 | /* Always place a cycle counter into the cycle counter. */ |
1147 | if (evtype == ARMV7_PERFCTR_CPU_CYCLES) { | 1124 | if (evtype == ARMV7_PERFCTR_CPU_CYCLES) { |
@@ -1192,11 +1169,14 @@ static int armv7pmu_set_event_filter(struct hw_perf_event *event, | |||
1192 | 1169 | ||
1193 | static void armv7pmu_reset(void *info) | 1170 | static void armv7pmu_reset(void *info) |
1194 | { | 1171 | { |
1172 | struct arm_pmu *cpu_pmu = (struct arm_pmu *)info; | ||
1195 | u32 idx, nb_cnt = cpu_pmu->num_events; | 1173 | u32 idx, nb_cnt = cpu_pmu->num_events; |
1196 | 1174 | ||
1197 | /* The counter and interrupt enable registers are unknown at reset. */ | 1175 | /* The counter and interrupt enable registers are unknown at reset. */ |
1198 | for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) | 1176 | for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) { |
1199 | armv7pmu_disable_event(NULL, idx); | 1177 | armv7_pmnc_disable_counter(idx); |
1178 | armv7_pmnc_disable_intens(idx); | ||
1179 | } | ||
1200 | 1180 | ||
1201 | /* Initialize & Reset PMNC: C and P bits */ | 1181 | /* Initialize & Reset PMNC: C and P bits */ |
1202 | armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C); | 1182 | armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C); |
@@ -1232,17 +1212,18 @@ static int armv7_a7_map_event(struct perf_event *event) | |||
1232 | &armv7_a7_perf_cache_map, 0xFF); | 1212 | &armv7_a7_perf_cache_map, 0xFF); |
1233 | } | 1213 | } |
1234 | 1214 | ||
1235 | static struct arm_pmu armv7pmu = { | 1215 | static void armv7pmu_init(struct arm_pmu *cpu_pmu) |
1236 | .handle_irq = armv7pmu_handle_irq, | 1216 | { |
1237 | .enable = armv7pmu_enable_event, | 1217 | cpu_pmu->handle_irq = armv7pmu_handle_irq; |
1238 | .disable = armv7pmu_disable_event, | 1218 | cpu_pmu->enable = armv7pmu_enable_event; |
1239 | .read_counter = armv7pmu_read_counter, | 1219 | cpu_pmu->disable = armv7pmu_disable_event; |
1240 | .write_counter = armv7pmu_write_counter, | 1220 | cpu_pmu->read_counter = armv7pmu_read_counter; |
1241 | .get_event_idx = armv7pmu_get_event_idx, | 1221 | cpu_pmu->write_counter = armv7pmu_write_counter; |
1242 | .start = armv7pmu_start, | 1222 | cpu_pmu->get_event_idx = armv7pmu_get_event_idx; |
1243 | .stop = armv7pmu_stop, | 1223 | cpu_pmu->start = armv7pmu_start; |
1244 | .reset = armv7pmu_reset, | 1224 | cpu_pmu->stop = armv7pmu_stop; |
1245 | .max_period = (1LLU << 32) - 1, | 1225 | cpu_pmu->reset = armv7pmu_reset; |
1226 | cpu_pmu->max_period = (1LLU << 32) - 1; | ||
1246 | }; | 1227 | }; |
1247 | 1228 | ||
1248 | static u32 __devinit armv7_read_num_pmnc_events(void) | 1229 | static u32 __devinit armv7_read_num_pmnc_events(void) |
@@ -1256,70 +1237,75 @@ static u32 __devinit armv7_read_num_pmnc_events(void) | |||
1256 | return nb_cnt + 1; | 1237 | return nb_cnt + 1; |
1257 | } | 1238 | } |
1258 | 1239 | ||
1259 | static struct arm_pmu *__devinit armv7_a8_pmu_init(void) | 1240 | static int __devinit armv7_a8_pmu_init(struct arm_pmu *cpu_pmu) |
1260 | { | 1241 | { |
1261 | armv7pmu.name = "ARMv7 Cortex-A8"; | 1242 | armv7pmu_init(cpu_pmu); |
1262 | armv7pmu.map_event = armv7_a8_map_event; | 1243 | cpu_pmu->name = "ARMv7 Cortex-A8"; |
1263 | armv7pmu.num_events = armv7_read_num_pmnc_events(); | 1244 | cpu_pmu->map_event = armv7_a8_map_event; |
1264 | return &armv7pmu; | 1245 | cpu_pmu->num_events = armv7_read_num_pmnc_events(); |
1246 | return 0; | ||
1265 | } | 1247 | } |
1266 | 1248 | ||
1267 | static struct arm_pmu *__devinit armv7_a9_pmu_init(void) | 1249 | static int __devinit armv7_a9_pmu_init(struct arm_pmu *cpu_pmu) |
1268 | { | 1250 | { |
1269 | armv7pmu.name = "ARMv7 Cortex-A9"; | 1251 | armv7pmu_init(cpu_pmu); |
1270 | armv7pmu.map_event = armv7_a9_map_event; | 1252 | cpu_pmu->name = "ARMv7 Cortex-A9"; |
1271 | armv7pmu.num_events = armv7_read_num_pmnc_events(); | 1253 | cpu_pmu->map_event = armv7_a9_map_event; |
1272 | return &armv7pmu; | 1254 | cpu_pmu->num_events = armv7_read_num_pmnc_events(); |
1255 | return 0; | ||
1273 | } | 1256 | } |
1274 | 1257 | ||
1275 | static struct arm_pmu *__devinit armv7_a5_pmu_init(void) | 1258 | static int __devinit armv7_a5_pmu_init(struct arm_pmu *cpu_pmu) |
1276 | { | 1259 | { |
1277 | armv7pmu.name = "ARMv7 Cortex-A5"; | 1260 | armv7pmu_init(cpu_pmu); |
1278 | armv7pmu.map_event = armv7_a5_map_event; | 1261 | cpu_pmu->name = "ARMv7 Cortex-A5"; |
1279 | armv7pmu.num_events = armv7_read_num_pmnc_events(); | 1262 | cpu_pmu->map_event = armv7_a5_map_event; |
1280 | return &armv7pmu; | 1263 | cpu_pmu->num_events = armv7_read_num_pmnc_events(); |
1264 | return 0; | ||
1281 | } | 1265 | } |
1282 | 1266 | ||
1283 | static struct arm_pmu *__devinit armv7_a15_pmu_init(void) | 1267 | static int __devinit armv7_a15_pmu_init(struct arm_pmu *cpu_pmu) |
1284 | { | 1268 | { |
1285 | armv7pmu.name = "ARMv7 Cortex-A15"; | 1269 | armv7pmu_init(cpu_pmu); |
1286 | armv7pmu.map_event = armv7_a15_map_event; | 1270 | cpu_pmu->name = "ARMv7 Cortex-A15"; |
1287 | armv7pmu.num_events = armv7_read_num_pmnc_events(); | 1271 | cpu_pmu->map_event = armv7_a15_map_event; |
1288 | armv7pmu.set_event_filter = armv7pmu_set_event_filter; | 1272 | cpu_pmu->num_events = armv7_read_num_pmnc_events(); |
1289 | return &armv7pmu; | 1273 | cpu_pmu->set_event_filter = armv7pmu_set_event_filter; |
1274 | return 0; | ||
1290 | } | 1275 | } |
1291 | 1276 | ||
1292 | static struct arm_pmu *__devinit armv7_a7_pmu_init(void) | 1277 | static int __devinit armv7_a7_pmu_init(struct arm_pmu *cpu_pmu) |
1293 | { | 1278 | { |
1294 | armv7pmu.name = "ARMv7 Cortex-A7"; | 1279 | armv7pmu_init(cpu_pmu); |
1295 | armv7pmu.map_event = armv7_a7_map_event; | 1280 | cpu_pmu->name = "ARMv7 Cortex-A7"; |
1296 | armv7pmu.num_events = armv7_read_num_pmnc_events(); | 1281 | cpu_pmu->map_event = armv7_a7_map_event; |
1297 | armv7pmu.set_event_filter = armv7pmu_set_event_filter; | 1282 | cpu_pmu->num_events = armv7_read_num_pmnc_events(); |
1298 | return &armv7pmu; | 1283 | cpu_pmu->set_event_filter = armv7pmu_set_event_filter; |
1284 | return 0; | ||
1299 | } | 1285 | } |
1300 | #else | 1286 | #else |
1301 | static struct arm_pmu *__devinit armv7_a8_pmu_init(void) | 1287 | static inline int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu) |
1302 | { | 1288 | { |
1303 | return NULL; | 1289 | return -ENODEV; |
1304 | } | 1290 | } |
1305 | 1291 | ||
1306 | static struct arm_pmu *__devinit armv7_a9_pmu_init(void) | 1292 | static inline int armv7_a9_pmu_init(struct arm_pmu *cpu_pmu) |
1307 | { | 1293 | { |
1308 | return NULL; | 1294 | return -ENODEV; |
1309 | } | 1295 | } |
1310 | 1296 | ||
1311 | static struct arm_pmu *__devinit armv7_a5_pmu_init(void) | 1297 | static inline int armv7_a5_pmu_init(struct arm_pmu *cpu_pmu) |
1312 | { | 1298 | { |
1313 | return NULL; | 1299 | return -ENODEV; |
1314 | } | 1300 | } |
1315 | 1301 | ||
1316 | static struct arm_pmu *__devinit armv7_a15_pmu_init(void) | 1302 | static inline int armv7_a15_pmu_init(struct arm_pmu *cpu_pmu) |
1317 | { | 1303 | { |
1318 | return NULL; | 1304 | return -ENODEV; |
1319 | } | 1305 | } |
1320 | 1306 | ||
1321 | static struct arm_pmu *__devinit armv7_a7_pmu_init(void) | 1307 | static inline int armv7_a7_pmu_init(struct arm_pmu *cpu_pmu) |
1322 | { | 1308 | { |
1323 | return NULL; | 1309 | return -ENODEV; |
1324 | } | 1310 | } |
1325 | #endif /* CONFIG_CPU_V7 */ | 1311 | #endif /* CONFIG_CPU_V7 */ |
diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c index 426e19f380a2..0c8265e53d5f 100644 --- a/arch/arm/kernel/perf_event_xscale.c +++ b/arch/arm/kernel/perf_event_xscale.c | |||
@@ -224,7 +224,8 @@ xscale1pmu_handle_irq(int irq_num, void *dev) | |||
224 | { | 224 | { |
225 | unsigned long pmnc; | 225 | unsigned long pmnc; |
226 | struct perf_sample_data data; | 226 | struct perf_sample_data data; |
227 | struct pmu_hw_events *cpuc; | 227 | struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev; |
228 | struct pmu_hw_events *cpuc = cpu_pmu->get_hw_events(); | ||
228 | struct pt_regs *regs; | 229 | struct pt_regs *regs; |
229 | int idx; | 230 | int idx; |
230 | 231 | ||
@@ -248,7 +249,6 @@ xscale1pmu_handle_irq(int irq_num, void *dev) | |||
248 | 249 | ||
249 | regs = get_irq_regs(); | 250 | regs = get_irq_regs(); |
250 | 251 | ||
251 | cpuc = &__get_cpu_var(cpu_hw_events); | ||
252 | for (idx = 0; idx < cpu_pmu->num_events; ++idx) { | 252 | for (idx = 0; idx < cpu_pmu->num_events; ++idx) { |
253 | struct perf_event *event = cpuc->events[idx]; | 253 | struct perf_event *event = cpuc->events[idx]; |
254 | struct hw_perf_event *hwc; | 254 | struct hw_perf_event *hwc; |
@@ -260,13 +260,13 @@ xscale1pmu_handle_irq(int irq_num, void *dev) | |||
260 | continue; | 260 | continue; |
261 | 261 | ||
262 | hwc = &event->hw; | 262 | hwc = &event->hw; |
263 | armpmu_event_update(event, hwc, idx); | 263 | armpmu_event_update(event); |
264 | perf_sample_data_init(&data, 0, hwc->last_period); | 264 | perf_sample_data_init(&data, 0, hwc->last_period); |
265 | if (!armpmu_event_set_period(event, hwc, idx)) | 265 | if (!armpmu_event_set_period(event)) |
266 | continue; | 266 | continue; |
267 | 267 | ||
268 | if (perf_event_overflow(event, &data, regs)) | 268 | if (perf_event_overflow(event, &data, regs)) |
269 | cpu_pmu->disable(hwc, idx); | 269 | cpu_pmu->disable(event); |
270 | } | 270 | } |
271 | 271 | ||
272 | irq_work_run(); | 272 | irq_work_run(); |
@@ -280,11 +280,13 @@ xscale1pmu_handle_irq(int irq_num, void *dev) | |||
280 | return IRQ_HANDLED; | 280 | return IRQ_HANDLED; |
281 | } | 281 | } |
282 | 282 | ||
283 | static void | 283 | static void xscale1pmu_enable_event(struct perf_event *event) |
284 | xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx) | ||
285 | { | 284 | { |
286 | unsigned long val, mask, evt, flags; | 285 | unsigned long val, mask, evt, flags; |
286 | struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); | ||
287 | struct hw_perf_event *hwc = &event->hw; | ||
287 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | 288 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); |
289 | int idx = hwc->idx; | ||
288 | 290 | ||
289 | switch (idx) { | 291 | switch (idx) { |
290 | case XSCALE_CYCLE_COUNTER: | 292 | case XSCALE_CYCLE_COUNTER: |
@@ -314,11 +316,13 @@ xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx) | |||
314 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); | 316 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
315 | } | 317 | } |
316 | 318 | ||
317 | static void | 319 | static void xscale1pmu_disable_event(struct perf_event *event) |
318 | xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx) | ||
319 | { | 320 | { |
320 | unsigned long val, mask, evt, flags; | 321 | unsigned long val, mask, evt, flags; |
322 | struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); | ||
323 | struct hw_perf_event *hwc = &event->hw; | ||
321 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | 324 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); |
325 | int idx = hwc->idx; | ||
322 | 326 | ||
323 | switch (idx) { | 327 | switch (idx) { |
324 | case XSCALE_CYCLE_COUNTER: | 328 | case XSCALE_CYCLE_COUNTER: |
@@ -348,9 +352,10 @@ xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx) | |||
348 | 352 | ||
349 | static int | 353 | static int |
350 | xscale1pmu_get_event_idx(struct pmu_hw_events *cpuc, | 354 | xscale1pmu_get_event_idx(struct pmu_hw_events *cpuc, |
351 | struct hw_perf_event *event) | 355 | struct perf_event *event) |
352 | { | 356 | { |
353 | if (XSCALE_PERFCTR_CCNT == event->config_base) { | 357 | struct hw_perf_event *hwc = &event->hw; |
358 | if (XSCALE_PERFCTR_CCNT == hwc->config_base) { | ||
354 | if (test_and_set_bit(XSCALE_CYCLE_COUNTER, cpuc->used_mask)) | 359 | if (test_and_set_bit(XSCALE_CYCLE_COUNTER, cpuc->used_mask)) |
355 | return -EAGAIN; | 360 | return -EAGAIN; |
356 | 361 | ||
@@ -366,8 +371,7 @@ xscale1pmu_get_event_idx(struct pmu_hw_events *cpuc, | |||
366 | } | 371 | } |
367 | } | 372 | } |
368 | 373 | ||
369 | static void | 374 | static void xscale1pmu_start(struct arm_pmu *cpu_pmu) |
370 | xscale1pmu_start(void) | ||
371 | { | 375 | { |
372 | unsigned long flags, val; | 376 | unsigned long flags, val; |
373 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | 377 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); |
@@ -379,8 +383,7 @@ xscale1pmu_start(void) | |||
379 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); | 383 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
380 | } | 384 | } |
381 | 385 | ||
382 | static void | 386 | static void xscale1pmu_stop(struct arm_pmu *cpu_pmu) |
383 | xscale1pmu_stop(void) | ||
384 | { | 387 | { |
385 | unsigned long flags, val; | 388 | unsigned long flags, val; |
386 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | 389 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); |
@@ -392,9 +395,10 @@ xscale1pmu_stop(void) | |||
392 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); | 395 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
393 | } | 396 | } |
394 | 397 | ||
395 | static inline u32 | 398 | static inline u32 xscale1pmu_read_counter(struct perf_event *event) |
396 | xscale1pmu_read_counter(int counter) | ||
397 | { | 399 | { |
400 | struct hw_perf_event *hwc = &event->hw; | ||
401 | int counter = hwc->idx; | ||
398 | u32 val = 0; | 402 | u32 val = 0; |
399 | 403 | ||
400 | switch (counter) { | 404 | switch (counter) { |
@@ -412,9 +416,11 @@ xscale1pmu_read_counter(int counter) | |||
412 | return val; | 416 | return val; |
413 | } | 417 | } |
414 | 418 | ||
415 | static inline void | 419 | static inline void xscale1pmu_write_counter(struct perf_event *event, u32 val) |
416 | xscale1pmu_write_counter(int counter, u32 val) | ||
417 | { | 420 | { |
421 | struct hw_perf_event *hwc = &event->hw; | ||
422 | int counter = hwc->idx; | ||
423 | |||
418 | switch (counter) { | 424 | switch (counter) { |
419 | case XSCALE_CYCLE_COUNTER: | 425 | case XSCALE_CYCLE_COUNTER: |
420 | asm volatile("mcr p14, 0, %0, c1, c0, 0" : : "r" (val)); | 426 | asm volatile("mcr p14, 0, %0, c1, c0, 0" : : "r" (val)); |
@@ -434,24 +440,22 @@ static int xscale_map_event(struct perf_event *event) | |||
434 | &xscale_perf_cache_map, 0xFF); | 440 | &xscale_perf_cache_map, 0xFF); |
435 | } | 441 | } |
436 | 442 | ||
437 | static struct arm_pmu xscale1pmu = { | 443 | static int __devinit xscale1pmu_init(struct arm_pmu *cpu_pmu) |
438 | .name = "xscale1", | ||
439 | .handle_irq = xscale1pmu_handle_irq, | ||
440 | .enable = xscale1pmu_enable_event, | ||
441 | .disable = xscale1pmu_disable_event, | ||
442 | .read_counter = xscale1pmu_read_counter, | ||
443 | .write_counter = xscale1pmu_write_counter, | ||
444 | .get_event_idx = xscale1pmu_get_event_idx, | ||
445 | .start = xscale1pmu_start, | ||
446 | .stop = xscale1pmu_stop, | ||
447 | .map_event = xscale_map_event, | ||
448 | .num_events = 3, | ||
449 | .max_period = (1LLU << 32) - 1, | ||
450 | }; | ||
451 | |||
452 | static struct arm_pmu *__devinit xscale1pmu_init(void) | ||
453 | { | 444 | { |
454 | return &xscale1pmu; | 445 | cpu_pmu->name = "xscale1"; |
446 | cpu_pmu->handle_irq = xscale1pmu_handle_irq; | ||
447 | cpu_pmu->enable = xscale1pmu_enable_event; | ||
448 | cpu_pmu->disable = xscale1pmu_disable_event; | ||
449 | cpu_pmu->read_counter = xscale1pmu_read_counter; | ||
450 | cpu_pmu->write_counter = xscale1pmu_write_counter; | ||
451 | cpu_pmu->get_event_idx = xscale1pmu_get_event_idx; | ||
452 | cpu_pmu->start = xscale1pmu_start; | ||
453 | cpu_pmu->stop = xscale1pmu_stop; | ||
454 | cpu_pmu->map_event = xscale_map_event; | ||
455 | cpu_pmu->num_events = 3; | ||
456 | cpu_pmu->max_period = (1LLU << 32) - 1; | ||
457 | |||
458 | return 0; | ||
455 | } | 459 | } |
456 | 460 | ||
457 | #define XSCALE2_OVERFLOWED_MASK 0x01f | 461 | #define XSCALE2_OVERFLOWED_MASK 0x01f |
@@ -567,7 +571,8 @@ xscale2pmu_handle_irq(int irq_num, void *dev) | |||
567 | { | 571 | { |
568 | unsigned long pmnc, of_flags; | 572 | unsigned long pmnc, of_flags; |
569 | struct perf_sample_data data; | 573 | struct perf_sample_data data; |
570 | struct pmu_hw_events *cpuc; | 574 | struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev; |
575 | struct pmu_hw_events *cpuc = cpu_pmu->get_hw_events(); | ||
571 | struct pt_regs *regs; | 576 | struct pt_regs *regs; |
572 | int idx; | 577 | int idx; |
573 | 578 | ||
@@ -585,7 +590,6 @@ xscale2pmu_handle_irq(int irq_num, void *dev) | |||
585 | 590 | ||
586 | regs = get_irq_regs(); | 591 | regs = get_irq_regs(); |
587 | 592 | ||
588 | cpuc = &__get_cpu_var(cpu_hw_events); | ||
589 | for (idx = 0; idx < cpu_pmu->num_events; ++idx) { | 593 | for (idx = 0; idx < cpu_pmu->num_events; ++idx) { |
590 | struct perf_event *event = cpuc->events[idx]; | 594 | struct perf_event *event = cpuc->events[idx]; |
591 | struct hw_perf_event *hwc; | 595 | struct hw_perf_event *hwc; |
@@ -597,13 +601,13 @@ xscale2pmu_handle_irq(int irq_num, void *dev) | |||
597 | continue; | 601 | continue; |
598 | 602 | ||
599 | hwc = &event->hw; | 603 | hwc = &event->hw; |
600 | armpmu_event_update(event, hwc, idx); | 604 | armpmu_event_update(event); |
601 | perf_sample_data_init(&data, 0, hwc->last_period); | 605 | perf_sample_data_init(&data, 0, hwc->last_period); |
602 | if (!armpmu_event_set_period(event, hwc, idx)) | 606 | if (!armpmu_event_set_period(event)) |
603 | continue; | 607 | continue; |
604 | 608 | ||
605 | if (perf_event_overflow(event, &data, regs)) | 609 | if (perf_event_overflow(event, &data, regs)) |
606 | cpu_pmu->disable(hwc, idx); | 610 | cpu_pmu->disable(event); |
607 | } | 611 | } |
608 | 612 | ||
609 | irq_work_run(); | 613 | irq_work_run(); |
@@ -617,11 +621,13 @@ xscale2pmu_handle_irq(int irq_num, void *dev) | |||
617 | return IRQ_HANDLED; | 621 | return IRQ_HANDLED; |
618 | } | 622 | } |
619 | 623 | ||
620 | static void | 624 | static void xscale2pmu_enable_event(struct perf_event *event) |
621 | xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx) | ||
622 | { | 625 | { |
623 | unsigned long flags, ien, evtsel; | 626 | unsigned long flags, ien, evtsel; |
627 | struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); | ||
628 | struct hw_perf_event *hwc = &event->hw; | ||
624 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | 629 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); |
630 | int idx = hwc->idx; | ||
625 | 631 | ||
626 | ien = xscale2pmu_read_int_enable(); | 632 | ien = xscale2pmu_read_int_enable(); |
627 | evtsel = xscale2pmu_read_event_select(); | 633 | evtsel = xscale2pmu_read_event_select(); |
@@ -661,11 +667,13 @@ xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx) | |||
661 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); | 667 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
662 | } | 668 | } |
663 | 669 | ||
664 | static void | 670 | static void xscale2pmu_disable_event(struct perf_event *event) |
665 | xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx) | ||
666 | { | 671 | { |
667 | unsigned long flags, ien, evtsel, of_flags; | 672 | unsigned long flags, ien, evtsel, of_flags; |
673 | struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); | ||
674 | struct hw_perf_event *hwc = &event->hw; | ||
668 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | 675 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); |
676 | int idx = hwc->idx; | ||
669 | 677 | ||
670 | ien = xscale2pmu_read_int_enable(); | 678 | ien = xscale2pmu_read_int_enable(); |
671 | evtsel = xscale2pmu_read_event_select(); | 679 | evtsel = xscale2pmu_read_event_select(); |
@@ -713,7 +721,7 @@ xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx) | |||
713 | 721 | ||
714 | static int | 722 | static int |
715 | xscale2pmu_get_event_idx(struct pmu_hw_events *cpuc, | 723 | xscale2pmu_get_event_idx(struct pmu_hw_events *cpuc, |
716 | struct hw_perf_event *event) | 724 | struct perf_event *event) |
717 | { | 725 | { |
718 | int idx = xscale1pmu_get_event_idx(cpuc, event); | 726 | int idx = xscale1pmu_get_event_idx(cpuc, event); |
719 | if (idx >= 0) | 727 | if (idx >= 0) |
@@ -727,8 +735,7 @@ out: | |||
727 | return idx; | 735 | return idx; |
728 | } | 736 | } |
729 | 737 | ||
730 | static void | 738 | static void xscale2pmu_start(struct arm_pmu *cpu_pmu) |
731 | xscale2pmu_start(void) | ||
732 | { | 739 | { |
733 | unsigned long flags, val; | 740 | unsigned long flags, val; |
734 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | 741 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); |
@@ -740,8 +747,7 @@ xscale2pmu_start(void) | |||
740 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); | 747 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
741 | } | 748 | } |
742 | 749 | ||
743 | static void | 750 | static void xscale2pmu_stop(struct arm_pmu *cpu_pmu) |
744 | xscale2pmu_stop(void) | ||
745 | { | 751 | { |
746 | unsigned long flags, val; | 752 | unsigned long flags, val; |
747 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | 753 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); |
@@ -753,9 +759,10 @@ xscale2pmu_stop(void) | |||
753 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); | 759 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
754 | } | 760 | } |
755 | 761 | ||
756 | static inline u32 | 762 | static inline u32 xscale2pmu_read_counter(struct perf_event *event) |
757 | xscale2pmu_read_counter(int counter) | ||
758 | { | 763 | { |
764 | struct hw_perf_event *hwc = &event->hw; | ||
765 | int counter = hwc->idx; | ||
759 | u32 val = 0; | 766 | u32 val = 0; |
760 | 767 | ||
761 | switch (counter) { | 768 | switch (counter) { |
@@ -779,9 +786,11 @@ xscale2pmu_read_counter(int counter) | |||
779 | return val; | 786 | return val; |
780 | } | 787 | } |
781 | 788 | ||
782 | static inline void | 789 | static inline void xscale2pmu_write_counter(struct perf_event *event, u32 val) |
783 | xscale2pmu_write_counter(int counter, u32 val) | ||
784 | { | 790 | { |
791 | struct hw_perf_event *hwc = &event->hw; | ||
792 | int counter = hwc->idx; | ||
793 | |||
785 | switch (counter) { | 794 | switch (counter) { |
786 | case XSCALE_CYCLE_COUNTER: | 795 | case XSCALE_CYCLE_COUNTER: |
787 | asm volatile("mcr p14, 0, %0, c1, c1, 0" : : "r" (val)); | 796 | asm volatile("mcr p14, 0, %0, c1, c1, 0" : : "r" (val)); |
@@ -801,33 +810,31 @@ xscale2pmu_write_counter(int counter, u32 val) | |||
801 | } | 810 | } |
802 | } | 811 | } |
803 | 812 | ||
804 | static struct arm_pmu xscale2pmu = { | 813 | static int __devinit xscale2pmu_init(struct arm_pmu *cpu_pmu) |
805 | .name = "xscale2", | ||
806 | .handle_irq = xscale2pmu_handle_irq, | ||
807 | .enable = xscale2pmu_enable_event, | ||
808 | .disable = xscale2pmu_disable_event, | ||
809 | .read_counter = xscale2pmu_read_counter, | ||
810 | .write_counter = xscale2pmu_write_counter, | ||
811 | .get_event_idx = xscale2pmu_get_event_idx, | ||
812 | .start = xscale2pmu_start, | ||
813 | .stop = xscale2pmu_stop, | ||
814 | .map_event = xscale_map_event, | ||
815 | .num_events = 5, | ||
816 | .max_period = (1LLU << 32) - 1, | ||
817 | }; | ||
818 | |||
819 | static struct arm_pmu *__devinit xscale2pmu_init(void) | ||
820 | { | 814 | { |
821 | return &xscale2pmu; | 815 | cpu_pmu->name = "xscale2"; |
816 | cpu_pmu->handle_irq = xscale2pmu_handle_irq; | ||
817 | cpu_pmu->enable = xscale2pmu_enable_event; | ||
818 | cpu_pmu->disable = xscale2pmu_disable_event; | ||
819 | cpu_pmu->read_counter = xscale2pmu_read_counter; | ||
820 | cpu_pmu->write_counter = xscale2pmu_write_counter; | ||
821 | cpu_pmu->get_event_idx = xscale2pmu_get_event_idx; | ||
822 | cpu_pmu->start = xscale2pmu_start; | ||
823 | cpu_pmu->stop = xscale2pmu_stop; | ||
824 | cpu_pmu->map_event = xscale_map_event; | ||
825 | cpu_pmu->num_events = 5; | ||
826 | cpu_pmu->max_period = (1LLU << 32) - 1; | ||
827 | |||
828 | return 0; | ||
822 | } | 829 | } |
823 | #else | 830 | #else |
824 | static struct arm_pmu *__devinit xscale1pmu_init(void) | 831 | static inline int xscale1pmu_init(struct arm_pmu *cpu_pmu) |
825 | { | 832 | { |
826 | return NULL; | 833 | return -ENODEV; |
827 | } | 834 | } |
828 | 835 | ||
829 | static struct arm_pmu *__devinit xscale2pmu_init(void) | 836 | static inline int xscale2pmu_init(struct arm_pmu *cpu_pmu) |
830 | { | 837 | { |
831 | return NULL; | 838 | return -ENODEV; |
832 | } | 839 | } |
833 | #endif /* CONFIG_CPU_XSCALE */ | 840 | #endif /* CONFIG_CPU_XSCALE */ |
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index da1d1aa20ad9..9a89bf4aefe1 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c | |||
@@ -383,6 +383,12 @@ void cpu_init(void) | |||
383 | BUG(); | 383 | BUG(); |
384 | } | 384 | } |
385 | 385 | ||
386 | /* | ||
387 | * This only works on resume and secondary cores. For booting on the | ||
388 | * boot cpu, smp_prepare_boot_cpu is called after percpu area setup. | ||
389 | */ | ||
390 | set_my_cpu_offset(per_cpu_offset(cpu)); | ||
391 | |||
386 | cpu_proc_init(); | 392 | cpu_proc_init(); |
387 | 393 | ||
388 | /* | 394 | /* |
@@ -426,13 +432,14 @@ int __cpu_logical_map[NR_CPUS]; | |||
426 | void __init smp_setup_processor_id(void) | 432 | void __init smp_setup_processor_id(void) |
427 | { | 433 | { |
428 | int i; | 434 | int i; |
429 | u32 cpu = is_smp() ? read_cpuid_mpidr() & 0xff : 0; | 435 | u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0; |
436 | u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); | ||
430 | 437 | ||
431 | cpu_logical_map(0) = cpu; | 438 | cpu_logical_map(0) = cpu; |
432 | for (i = 1; i < NR_CPUS; ++i) | 439 | for (i = 1; i < nr_cpu_ids; ++i) |
433 | cpu_logical_map(i) = i == cpu ? 0 : i; | 440 | cpu_logical_map(i) = i == cpu ? 0 : i; |
434 | 441 | ||
435 | printk(KERN_INFO "Booting Linux on physical CPU %d\n", cpu); | 442 | printk(KERN_INFO "Booting Linux on physical CPU 0x%x\n", mpidr); |
436 | } | 443 | } |
437 | 444 | ||
438 | static void __init setup_processor(void) | 445 | static void __init setup_processor(void) |
@@ -758,6 +765,7 @@ void __init setup_arch(char **cmdline_p) | |||
758 | 765 | ||
759 | unflatten_device_tree(); | 766 | unflatten_device_tree(); |
760 | 767 | ||
768 | arm_dt_init_cpu_maps(); | ||
761 | #ifdef CONFIG_SMP | 769 | #ifdef CONFIG_SMP |
762 | if (is_smp()) { | 770 | if (is_smp()) { |
763 | smp_set_ops(mdesc->smp); | 771 | smp_set_ops(mdesc->smp); |
@@ -841,12 +849,9 @@ static const char *hwcap_str[] = { | |||
841 | 849 | ||
842 | static int c_show(struct seq_file *m, void *v) | 850 | static int c_show(struct seq_file *m, void *v) |
843 | { | 851 | { |
844 | int i; | 852 | int i, j; |
853 | u32 cpuid; | ||
845 | 854 | ||
846 | seq_printf(m, "Processor\t: %s rev %d (%s)\n", | ||
847 | cpu_name, read_cpuid_id() & 15, elf_platform); | ||
848 | |||
849 | #if defined(CONFIG_SMP) | ||
850 | for_each_online_cpu(i) { | 855 | for_each_online_cpu(i) { |
851 | /* | 856 | /* |
852 | * glibc reads /proc/cpuinfo to determine the number of | 857 | * glibc reads /proc/cpuinfo to determine the number of |
@@ -854,45 +859,48 @@ static int c_show(struct seq_file *m, void *v) | |||
854 | * "processor". Give glibc what it expects. | 859 | * "processor". Give glibc what it expects. |
855 | */ | 860 | */ |
856 | seq_printf(m, "processor\t: %d\n", i); | 861 | seq_printf(m, "processor\t: %d\n", i); |
857 | seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n", | 862 | cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id(); |
863 | seq_printf(m, "model name\t: %s rev %d (%s)\n", | ||
864 | cpu_name, cpuid & 15, elf_platform); | ||
865 | |||
866 | #if defined(CONFIG_SMP) | ||
867 | seq_printf(m, "BogoMIPS\t: %lu.%02lu\n", | ||
858 | per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ), | 868 | per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ), |
859 | (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100); | 869 | (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100); |
860 | } | 870 | #else |
861 | #else /* CONFIG_SMP */ | 871 | seq_printf(m, "BogoMIPS\t: %lu.%02lu\n", |
862 | seq_printf(m, "BogoMIPS\t: %lu.%02lu\n", | 872 | loops_per_jiffy / (500000/HZ), |
863 | loops_per_jiffy / (500000/HZ), | 873 | (loops_per_jiffy / (5000/HZ)) % 100); |
864 | (loops_per_jiffy / (5000/HZ)) % 100); | ||
865 | #endif | 874 | #endif |
875 | /* dump out the processor features */ | ||
876 | seq_puts(m, "Features\t: "); | ||
866 | 877 | ||
867 | /* dump out the processor features */ | 878 | for (j = 0; hwcap_str[j]; j++) |
868 | seq_puts(m, "Features\t: "); | 879 | if (elf_hwcap & (1 << j)) |
869 | 880 | seq_printf(m, "%s ", hwcap_str[j]); | |
870 | for (i = 0; hwcap_str[i]; i++) | ||
871 | if (elf_hwcap & (1 << i)) | ||
872 | seq_printf(m, "%s ", hwcap_str[i]); | ||
873 | 881 | ||
874 | seq_printf(m, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24); | 882 | seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24); |
875 | seq_printf(m, "CPU architecture: %s\n", proc_arch[cpu_architecture()]); | 883 | seq_printf(m, "CPU architecture: %s\n", |
884 | proc_arch[cpu_architecture()]); | ||
876 | 885 | ||
877 | if ((read_cpuid_id() & 0x0008f000) == 0x00000000) { | 886 | if ((cpuid & 0x0008f000) == 0x00000000) { |
878 | /* pre-ARM7 */ | 887 | /* pre-ARM7 */ |
879 | seq_printf(m, "CPU part\t: %07x\n", read_cpuid_id() >> 4); | 888 | seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4); |
880 | } else { | ||
881 | if ((read_cpuid_id() & 0x0008f000) == 0x00007000) { | ||
882 | /* ARM7 */ | ||
883 | seq_printf(m, "CPU variant\t: 0x%02x\n", | ||
884 | (read_cpuid_id() >> 16) & 127); | ||
885 | } else { | 889 | } else { |
886 | /* post-ARM7 */ | 890 | if ((cpuid & 0x0008f000) == 0x00007000) { |
887 | seq_printf(m, "CPU variant\t: 0x%x\n", | 891 | /* ARM7 */ |
888 | (read_cpuid_id() >> 20) & 15); | 892 | seq_printf(m, "CPU variant\t: 0x%02x\n", |
893 | (cpuid >> 16) & 127); | ||
894 | } else { | ||
895 | /* post-ARM7 */ | ||
896 | seq_printf(m, "CPU variant\t: 0x%x\n", | ||
897 | (cpuid >> 20) & 15); | ||
898 | } | ||
899 | seq_printf(m, "CPU part\t: 0x%03x\n", | ||
900 | (cpuid >> 4) & 0xfff); | ||
889 | } | 901 | } |
890 | seq_printf(m, "CPU part\t: 0x%03x\n", | 902 | seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15); |
891 | (read_cpuid_id() >> 4) & 0xfff); | ||
892 | } | 903 | } |
893 | seq_printf(m, "CPU revision\t: %d\n", read_cpuid_id() & 15); | ||
894 | |||
895 | seq_puts(m, "\n"); | ||
896 | 904 | ||
897 | seq_printf(m, "Hardware\t: %s\n", machine_name); | 905 | seq_printf(m, "Hardware\t: %s\n", machine_name); |
898 | seq_printf(m, "Revision\t: %04x\n", system_rev); | 906 | seq_printf(m, "Revision\t: %04x\n", system_rev); |
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 57f537731979..84f4cbf652e5 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c | |||
@@ -281,6 +281,7 @@ static void __cpuinit smp_store_cpu_info(unsigned int cpuid) | |||
281 | struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid); | 281 | struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid); |
282 | 282 | ||
283 | cpu_info->loops_per_jiffy = loops_per_jiffy; | 283 | cpu_info->loops_per_jiffy = loops_per_jiffy; |
284 | cpu_info->cpuid = read_cpuid_id(); | ||
284 | 285 | ||
285 | store_cpu_topology(cpuid); | 286 | store_cpu_topology(cpuid); |
286 | } | 287 | } |
@@ -313,9 +314,10 @@ asmlinkage void __cpuinit secondary_start_kernel(void) | |||
313 | current->active_mm = mm; | 314 | current->active_mm = mm; |
314 | cpumask_set_cpu(cpu, mm_cpumask(mm)); | 315 | cpumask_set_cpu(cpu, mm_cpumask(mm)); |
315 | 316 | ||
317 | cpu_init(); | ||
318 | |||
316 | printk("CPU%u: Booted secondary processor\n", cpu); | 319 | printk("CPU%u: Booted secondary processor\n", cpu); |
317 | 320 | ||
318 | cpu_init(); | ||
319 | preempt_disable(); | 321 | preempt_disable(); |
320 | trace_hardirqs_off(); | 322 | trace_hardirqs_off(); |
321 | 323 | ||
@@ -371,6 +373,7 @@ void __init smp_cpus_done(unsigned int max_cpus) | |||
371 | 373 | ||
372 | void __init smp_prepare_boot_cpu(void) | 374 | void __init smp_prepare_boot_cpu(void) |
373 | { | 375 | { |
376 | set_my_cpu_offset(per_cpu_offset(smp_processor_id())); | ||
374 | } | 377 | } |
375 | 378 | ||
376 | void __init smp_prepare_cpus(unsigned int max_cpus) | 379 | void __init smp_prepare_cpus(unsigned int max_cpus) |
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c index 26c12c6440fc..79282ebcd939 100644 --- a/arch/arm/kernel/topology.c +++ b/arch/arm/kernel/topology.c | |||
@@ -196,32 +196,7 @@ static inline void parse_dt_topology(void) {} | |||
196 | static inline void update_cpu_power(unsigned int cpuid, unsigned int mpidr) {} | 196 | static inline void update_cpu_power(unsigned int cpuid, unsigned int mpidr) {} |
197 | #endif | 197 | #endif |
198 | 198 | ||
199 | 199 | /* | |
200 | /* | ||
201 | * cpu topology management | ||
202 | */ | ||
203 | |||
204 | #define MPIDR_SMP_BITMASK (0x3 << 30) | ||
205 | #define MPIDR_SMP_VALUE (0x2 << 30) | ||
206 | |||
207 | #define MPIDR_MT_BITMASK (0x1 << 24) | ||
208 | |||
209 | /* | ||
210 | * These masks reflect the current use of the affinity levels. | ||
211 | * The affinity level can be up to 16 bits according to ARM ARM | ||
212 | */ | ||
213 | #define MPIDR_HWID_BITMASK 0xFFFFFF | ||
214 | |||
215 | #define MPIDR_LEVEL0_MASK 0x3 | ||
216 | #define MPIDR_LEVEL0_SHIFT 0 | ||
217 | |||
218 | #define MPIDR_LEVEL1_MASK 0xF | ||
219 | #define MPIDR_LEVEL1_SHIFT 8 | ||
220 | |||
221 | #define MPIDR_LEVEL2_MASK 0xFF | ||
222 | #define MPIDR_LEVEL2_SHIFT 16 | ||
223 | |||
224 | /* | ||
225 | * cpu topology table | 200 | * cpu topology table |
226 | */ | 201 | */ |
227 | struct cputopo_arm cpu_topology[NR_CPUS]; | 202 | struct cputopo_arm cpu_topology[NR_CPUS]; |
@@ -282,19 +257,14 @@ void store_cpu_topology(unsigned int cpuid) | |||
282 | 257 | ||
283 | if (mpidr & MPIDR_MT_BITMASK) { | 258 | if (mpidr & MPIDR_MT_BITMASK) { |
284 | /* core performance interdependency */ | 259 | /* core performance interdependency */ |
285 | cpuid_topo->thread_id = (mpidr >> MPIDR_LEVEL0_SHIFT) | 260 | cpuid_topo->thread_id = MPIDR_AFFINITY_LEVEL(mpidr, 0); |
286 | & MPIDR_LEVEL0_MASK; | 261 | cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 1); |
287 | cpuid_topo->core_id = (mpidr >> MPIDR_LEVEL1_SHIFT) | 262 | cpuid_topo->socket_id = MPIDR_AFFINITY_LEVEL(mpidr, 2); |
288 | & MPIDR_LEVEL1_MASK; | ||
289 | cpuid_topo->socket_id = (mpidr >> MPIDR_LEVEL2_SHIFT) | ||
290 | & MPIDR_LEVEL2_MASK; | ||
291 | } else { | 263 | } else { |
292 | /* largely independent cores */ | 264 | /* largely independent cores */ |
293 | cpuid_topo->thread_id = -1; | 265 | cpuid_topo->thread_id = -1; |
294 | cpuid_topo->core_id = (mpidr >> MPIDR_LEVEL0_SHIFT) | 266 | cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0); |
295 | & MPIDR_LEVEL0_MASK; | 267 | cpuid_topo->socket_id = MPIDR_AFFINITY_LEVEL(mpidr, 1); |
296 | cpuid_topo->socket_id = (mpidr >> MPIDR_LEVEL1_SHIFT) | ||
297 | & MPIDR_LEVEL1_MASK; | ||
298 | } | 268 | } |
299 | } else { | 269 | } else { |
300 | /* | 270 | /* |