aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/bios32.c3
-rw-r--r--arch/arm/kernel/dma.c36
-rw-r--r--arch/arm/kernel/perf_event.c928
-rw-r--r--arch/arm/kernel/pmu.c127
-rw-r--r--arch/arm/kernel/smp.c2
-rw-r--r--arch/arm/kernel/time.c70
6 files changed, 1021 insertions, 145 deletions
diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c
index bd397e0b663e..c6273a3bfc25 100644
--- a/arch/arm/kernel/bios32.c
+++ b/arch/arm/kernel/bios32.c
@@ -527,6 +527,9 @@ static void __init pcibios_init_hw(struct hw_pci *hw)
527 if (!sys) 527 if (!sys)
528 panic("PCI: unable to allocate sys data!"); 528 panic("PCI: unable to allocate sys data!");
529 529
530#ifdef CONFIG_PCI_DOMAINS
531 sys->domain = hw->domain;
532#endif
530 sys->hw = hw; 533 sys->hw = hw;
531 sys->busnr = busnr; 534 sys->busnr = busnr;
532 sys->swizzle = hw->swizzle; 535 sys->swizzle = hw->swizzle;
diff --git a/arch/arm/kernel/dma.c b/arch/arm/kernel/dma.c
index 7d5b9fb01e71..2c4a185f92cd 100644
--- a/arch/arm/kernel/dma.c
+++ b/arch/arm/kernel/dma.c
@@ -16,6 +16,8 @@
16#include <linux/spinlock.h> 16#include <linux/spinlock.h>
17#include <linux/errno.h> 17#include <linux/errno.h>
18#include <linux/scatterlist.h> 18#include <linux/scatterlist.h>
19#include <linux/seq_file.h>
20#include <linux/proc_fs.h>
19 21
20#include <asm/dma.h> 22#include <asm/dma.h>
21 23
@@ -264,3 +266,37 @@ int get_dma_residue(unsigned int chan)
264 return ret; 266 return ret;
265} 267}
266EXPORT_SYMBOL(get_dma_residue); 268EXPORT_SYMBOL(get_dma_residue);
269
270#ifdef CONFIG_PROC_FS
271static int proc_dma_show(struct seq_file *m, void *v)
272{
273 int i;
274
275 for (i = 0 ; i < MAX_DMA_CHANNELS ; i++) {
276 dma_t *dma = dma_channel(i);
277 if (dma && dma->lock)
278 seq_printf(m, "%2d: %s\n", i, dma->device_id);
279 }
280 return 0;
281}
282
283static int proc_dma_open(struct inode *inode, struct file *file)
284{
285 return single_open(file, proc_dma_show, NULL);
286}
287
288static const struct file_operations proc_dma_operations = {
289 .open = proc_dma_open,
290 .read = seq_read,
291 .llseek = seq_lseek,
292 .release = single_release,
293};
294
295static int __init proc_dma_init(void)
296{
297 proc_create("dma", 0, NULL, &proc_dma_operations);
298 return 0;
299}
300
301__initcall(proc_dma_init);
302#endif
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 9e70f2053f9a..c45768614c8a 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -16,7 +16,9 @@
16 16
17#include <linux/interrupt.h> 17#include <linux/interrupt.h>
18#include <linux/kernel.h> 18#include <linux/kernel.h>
19#include <linux/module.h>
19#include <linux/perf_event.h> 20#include <linux/perf_event.h>
21#include <linux/platform_device.h>
20#include <linux/spinlock.h> 22#include <linux/spinlock.h>
21#include <linux/uaccess.h> 23#include <linux/uaccess.h>
22 24
@@ -26,7 +28,7 @@
26#include <asm/pmu.h> 28#include <asm/pmu.h>
27#include <asm/stacktrace.h> 29#include <asm/stacktrace.h>
28 30
29static const struct pmu_irqs *pmu_irqs; 31static struct platform_device *pmu_device;
30 32
31/* 33/*
32 * Hardware lock to serialize accesses to PMU registers. Needed for the 34 * Hardware lock to serialize accesses to PMU registers. Needed for the
@@ -67,8 +69,18 @@ struct cpu_hw_events {
67}; 69};
68DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events); 70DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
69 71
72/* PMU names. */
73static const char *arm_pmu_names[] = {
74 [ARM_PERF_PMU_ID_XSCALE1] = "xscale1",
75 [ARM_PERF_PMU_ID_XSCALE2] = "xscale2",
76 [ARM_PERF_PMU_ID_V6] = "v6",
77 [ARM_PERF_PMU_ID_V6MP] = "v6mpcore",
78 [ARM_PERF_PMU_ID_CA8] = "ARMv7 Cortex-A8",
79 [ARM_PERF_PMU_ID_CA9] = "ARMv7 Cortex-A9",
80};
81
70struct arm_pmu { 82struct arm_pmu {
71 char *name; 83 enum arm_perf_pmu_ids id;
72 irqreturn_t (*handle_irq)(int irq_num, void *dev); 84 irqreturn_t (*handle_irq)(int irq_num, void *dev);
73 void (*enable)(struct hw_perf_event *evt, int idx); 85 void (*enable)(struct hw_perf_event *evt, int idx);
74 void (*disable)(struct hw_perf_event *evt, int idx); 86 void (*disable)(struct hw_perf_event *evt, int idx);
@@ -87,6 +99,30 @@ struct arm_pmu {
87/* Set at runtime when we know what CPU type we are. */ 99/* Set at runtime when we know what CPU type we are. */
88static const struct arm_pmu *armpmu; 100static const struct arm_pmu *armpmu;
89 101
102enum arm_perf_pmu_ids
103armpmu_get_pmu_id(void)
104{
105 int id = -ENODEV;
106
107 if (armpmu != NULL)
108 id = armpmu->id;
109
110 return id;
111}
112EXPORT_SYMBOL_GPL(armpmu_get_pmu_id);
113
114int
115armpmu_get_max_events(void)
116{
117 int max_events = 0;
118
119 if (armpmu != NULL)
120 max_events = armpmu->num_events;
121
122 return max_events;
123}
124EXPORT_SYMBOL_GPL(armpmu_get_max_events);
125
90#define HW_OP_UNSUPPORTED 0xFFFF 126#define HW_OP_UNSUPPORTED 0xFFFF
91 127
92#define C(_x) \ 128#define C(_x) \
@@ -314,38 +350,44 @@ validate_group(struct perf_event *event)
314static int 350static int
315armpmu_reserve_hardware(void) 351armpmu_reserve_hardware(void)
316{ 352{
317 int i; 353 int i, err = -ENODEV, irq;
318 int err;
319 354
320 pmu_irqs = reserve_pmu(); 355 pmu_device = reserve_pmu(ARM_PMU_DEVICE_CPU);
321 if (IS_ERR(pmu_irqs)) { 356 if (IS_ERR(pmu_device)) {
322 pr_warning("unable to reserve pmu\n"); 357 pr_warning("unable to reserve pmu\n");
323 return PTR_ERR(pmu_irqs); 358 return PTR_ERR(pmu_device);
324 } 359 }
325 360
326 init_pmu(); 361 init_pmu(ARM_PMU_DEVICE_CPU);
327 362
328 if (pmu_irqs->num_irqs < 1) { 363 if (pmu_device->num_resources < 1) {
329 pr_err("no irqs for PMUs defined\n"); 364 pr_err("no irqs for PMUs defined\n");
330 return -ENODEV; 365 return -ENODEV;
331 } 366 }
332 367
333 for (i = 0; i < pmu_irqs->num_irqs; ++i) { 368 for (i = 0; i < pmu_device->num_resources; ++i) {
334 err = request_irq(pmu_irqs->irqs[i], armpmu->handle_irq, 369 irq = platform_get_irq(pmu_device, i);
370 if (irq < 0)
371 continue;
372
373 err = request_irq(irq, armpmu->handle_irq,
335 IRQF_DISABLED | IRQF_NOBALANCING, 374 IRQF_DISABLED | IRQF_NOBALANCING,
336 "armpmu", NULL); 375 "armpmu", NULL);
337 if (err) { 376 if (err) {
338 pr_warning("unable to request IRQ%d for ARM " 377 pr_warning("unable to request IRQ%d for ARM perf "
339 "perf counters\n", pmu_irqs->irqs[i]); 378 "counters\n", irq);
340 break; 379 break;
341 } 380 }
342 } 381 }
343 382
344 if (err) { 383 if (err) {
345 for (i = i - 1; i >= 0; --i) 384 for (i = i - 1; i >= 0; --i) {
346 free_irq(pmu_irqs->irqs[i], NULL); 385 irq = platform_get_irq(pmu_device, i);
347 release_pmu(pmu_irqs); 386 if (irq >= 0)
348 pmu_irqs = NULL; 387 free_irq(irq, NULL);
388 }
389 release_pmu(pmu_device);
390 pmu_device = NULL;
349 } 391 }
350 392
351 return err; 393 return err;
@@ -354,14 +396,17 @@ armpmu_reserve_hardware(void)
354static void 396static void
355armpmu_release_hardware(void) 397armpmu_release_hardware(void)
356{ 398{
357 int i; 399 int i, irq;
358 400
359 for (i = pmu_irqs->num_irqs - 1; i >= 0; --i) 401 for (i = pmu_device->num_resources - 1; i >= 0; --i) {
360 free_irq(pmu_irqs->irqs[i], NULL); 402 irq = platform_get_irq(pmu_device, i);
403 if (irq >= 0)
404 free_irq(irq, NULL);
405 }
361 armpmu->stop(); 406 armpmu->stop();
362 407
363 release_pmu(pmu_irqs); 408 release_pmu(pmu_device);
364 pmu_irqs = NULL; 409 pmu_device = NULL;
365} 410}
366 411
367static atomic_t active_events = ATOMIC_INIT(0); 412static atomic_t active_events = ATOMIC_INIT(0);
@@ -1144,7 +1189,7 @@ armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc,
1144} 1189}
1145 1190
1146static const struct arm_pmu armv6pmu = { 1191static const struct arm_pmu armv6pmu = {
1147 .name = "v6", 1192 .id = ARM_PERF_PMU_ID_V6,
1148 .handle_irq = armv6pmu_handle_irq, 1193 .handle_irq = armv6pmu_handle_irq,
1149 .enable = armv6pmu_enable_event, 1194 .enable = armv6pmu_enable_event,
1150 .disable = armv6pmu_disable_event, 1195 .disable = armv6pmu_disable_event,
@@ -1167,7 +1212,7 @@ static const struct arm_pmu armv6pmu = {
1167 * reset the period and enable the interrupt reporting. 1212 * reset the period and enable the interrupt reporting.
1168 */ 1213 */
1169static const struct arm_pmu armv6mpcore_pmu = { 1214static const struct arm_pmu armv6mpcore_pmu = {
1170 .name = "v6mpcore", 1215 .id = ARM_PERF_PMU_ID_V6MP,
1171 .handle_irq = armv6pmu_handle_irq, 1216 .handle_irq = armv6pmu_handle_irq,
1172 .enable = armv6pmu_enable_event, 1217 .enable = armv6pmu_enable_event,
1173 .disable = armv6mpcore_pmu_disable_event, 1218 .disable = armv6mpcore_pmu_disable_event,
@@ -1197,10 +1242,6 @@ static const struct arm_pmu armv6mpcore_pmu = {
1197 * counter and all 4 performance counters together can be reset separately. 1242 * counter and all 4 performance counters together can be reset separately.
1198 */ 1243 */
1199 1244
1200#define ARMV7_PMU_CORTEX_A8_NAME "ARMv7 Cortex-A8"
1201
1202#define ARMV7_PMU_CORTEX_A9_NAME "ARMv7 Cortex-A9"
1203
1204/* Common ARMv7 event types */ 1245/* Common ARMv7 event types */
1205enum armv7_perf_types { 1246enum armv7_perf_types {
1206 ARMV7_PERFCTR_PMNC_SW_INCR = 0x00, 1247 ARMV7_PERFCTR_PMNC_SW_INCR = 0x00,
@@ -2079,6 +2120,803 @@ static u32 __init armv7_reset_read_pmnc(void)
2079 return nb_cnt + 1; 2120 return nb_cnt + 1;
2080} 2121}
2081 2122
2123/*
2124 * ARMv5 [xscale] Performance counter handling code.
2125 *
2126 * Based on xscale OProfile code.
2127 *
2128 * There are two variants of the xscale PMU that we support:
2129 * - xscale1pmu: 2 event counters and a cycle counter
2130 * - xscale2pmu: 4 event counters and a cycle counter
2131 * The two variants share event definitions, but have different
2132 * PMU structures.
2133 */
2134
2135enum xscale_perf_types {
2136 XSCALE_PERFCTR_ICACHE_MISS = 0x00,
2137 XSCALE_PERFCTR_ICACHE_NO_DELIVER = 0x01,
2138 XSCALE_PERFCTR_DATA_STALL = 0x02,
2139 XSCALE_PERFCTR_ITLB_MISS = 0x03,
2140 XSCALE_PERFCTR_DTLB_MISS = 0x04,
2141 XSCALE_PERFCTR_BRANCH = 0x05,
2142 XSCALE_PERFCTR_BRANCH_MISS = 0x06,
2143 XSCALE_PERFCTR_INSTRUCTION = 0x07,
2144 XSCALE_PERFCTR_DCACHE_FULL_STALL = 0x08,
2145 XSCALE_PERFCTR_DCACHE_FULL_STALL_CONTIG = 0x09,
2146 XSCALE_PERFCTR_DCACHE_ACCESS = 0x0A,
2147 XSCALE_PERFCTR_DCACHE_MISS = 0x0B,
2148 XSCALE_PERFCTR_DCACHE_WRITE_BACK = 0x0C,
2149 XSCALE_PERFCTR_PC_CHANGED = 0x0D,
2150 XSCALE_PERFCTR_BCU_REQUEST = 0x10,
2151 XSCALE_PERFCTR_BCU_FULL = 0x11,
2152 XSCALE_PERFCTR_BCU_DRAIN = 0x12,
2153 XSCALE_PERFCTR_BCU_ECC_NO_ELOG = 0x14,
2154 XSCALE_PERFCTR_BCU_1_BIT_ERR = 0x15,
2155 XSCALE_PERFCTR_RMW = 0x16,
2156 /* XSCALE_PERFCTR_CCNT is not hardware defined */
2157 XSCALE_PERFCTR_CCNT = 0xFE,
2158 XSCALE_PERFCTR_UNUSED = 0xFF,
2159};
2160
2161enum xscale_counters {
2162 XSCALE_CYCLE_COUNTER = 1,
2163 XSCALE_COUNTER0,
2164 XSCALE_COUNTER1,
2165 XSCALE_COUNTER2,
2166 XSCALE_COUNTER3,
2167};
2168
2169static const unsigned xscale_perf_map[PERF_COUNT_HW_MAX] = {
2170 [PERF_COUNT_HW_CPU_CYCLES] = XSCALE_PERFCTR_CCNT,
2171 [PERF_COUNT_HW_INSTRUCTIONS] = XSCALE_PERFCTR_INSTRUCTION,
2172 [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
2173 [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
2174 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = XSCALE_PERFCTR_BRANCH,
2175 [PERF_COUNT_HW_BRANCH_MISSES] = XSCALE_PERFCTR_BRANCH_MISS,
2176 [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
2177};
2178
2179static const unsigned xscale_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
2180 [PERF_COUNT_HW_CACHE_OP_MAX]
2181 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
2182 [C(L1D)] = {
2183 [C(OP_READ)] = {
2184 [C(RESULT_ACCESS)] = XSCALE_PERFCTR_DCACHE_ACCESS,
2185 [C(RESULT_MISS)] = XSCALE_PERFCTR_DCACHE_MISS,
2186 },
2187 [C(OP_WRITE)] = {
2188 [C(RESULT_ACCESS)] = XSCALE_PERFCTR_DCACHE_ACCESS,
2189 [C(RESULT_MISS)] = XSCALE_PERFCTR_DCACHE_MISS,
2190 },
2191 [C(OP_PREFETCH)] = {
2192 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
2193 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
2194 },
2195 },
2196 [C(L1I)] = {
2197 [C(OP_READ)] = {
2198 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
2199 [C(RESULT_MISS)] = XSCALE_PERFCTR_ICACHE_MISS,
2200 },
2201 [C(OP_WRITE)] = {
2202 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
2203 [C(RESULT_MISS)] = XSCALE_PERFCTR_ICACHE_MISS,
2204 },
2205 [C(OP_PREFETCH)] = {
2206 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
2207 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
2208 },
2209 },
2210 [C(LL)] = {
2211 [C(OP_READ)] = {
2212 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
2213 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
2214 },
2215 [C(OP_WRITE)] = {
2216 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
2217 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
2218 },
2219 [C(OP_PREFETCH)] = {
2220 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
2221 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
2222 },
2223 },
2224 [C(DTLB)] = {
2225 [C(OP_READ)] = {
2226 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
2227 [C(RESULT_MISS)] = XSCALE_PERFCTR_DTLB_MISS,
2228 },
2229 [C(OP_WRITE)] = {
2230 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
2231 [C(RESULT_MISS)] = XSCALE_PERFCTR_DTLB_MISS,
2232 },
2233 [C(OP_PREFETCH)] = {
2234 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
2235 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
2236 },
2237 },
2238 [C(ITLB)] = {
2239 [C(OP_READ)] = {
2240 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
2241 [C(RESULT_MISS)] = XSCALE_PERFCTR_ITLB_MISS,
2242 },
2243 [C(OP_WRITE)] = {
2244 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
2245 [C(RESULT_MISS)] = XSCALE_PERFCTR_ITLB_MISS,
2246 },
2247 [C(OP_PREFETCH)] = {
2248 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
2249 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
2250 },
2251 },
2252 [C(BPU)] = {
2253 [C(OP_READ)] = {
2254 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
2255 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
2256 },
2257 [C(OP_WRITE)] = {
2258 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
2259 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
2260 },
2261 [C(OP_PREFETCH)] = {
2262 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
2263 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
2264 },
2265 },
2266};
2267
2268#define XSCALE_PMU_ENABLE 0x001
2269#define XSCALE_PMN_RESET 0x002
2270#define XSCALE_CCNT_RESET 0x004
2271#define XSCALE_PMU_RESET (CCNT_RESET | PMN_RESET)
2272#define XSCALE_PMU_CNT64 0x008
2273
2274static inline int
2275xscalepmu_event_map(int config)
2276{
2277 int mapping = xscale_perf_map[config];
2278 if (HW_OP_UNSUPPORTED == mapping)
2279 mapping = -EOPNOTSUPP;
2280 return mapping;
2281}
2282
2283static u64
2284xscalepmu_raw_event(u64 config)
2285{
2286 return config & 0xff;
2287}
2288
2289#define XSCALE1_OVERFLOWED_MASK 0x700
2290#define XSCALE1_CCOUNT_OVERFLOW 0x400
2291#define XSCALE1_COUNT0_OVERFLOW 0x100
2292#define XSCALE1_COUNT1_OVERFLOW 0x200
2293#define XSCALE1_CCOUNT_INT_EN 0x040
2294#define XSCALE1_COUNT0_INT_EN 0x010
2295#define XSCALE1_COUNT1_INT_EN 0x020
2296#define XSCALE1_COUNT0_EVT_SHFT 12
2297#define XSCALE1_COUNT0_EVT_MASK (0xff << XSCALE1_COUNT0_EVT_SHFT)
2298#define XSCALE1_COUNT1_EVT_SHFT 20
2299#define XSCALE1_COUNT1_EVT_MASK (0xff << XSCALE1_COUNT1_EVT_SHFT)
2300
2301static inline u32
2302xscale1pmu_read_pmnc(void)
2303{
2304 u32 val;
2305 asm volatile("mrc p14, 0, %0, c0, c0, 0" : "=r" (val));
2306 return val;
2307}
2308
2309static inline void
2310xscale1pmu_write_pmnc(u32 val)
2311{
2312 /* upper 4bits and 7, 11 are write-as-0 */
2313 val &= 0xffff77f;
2314 asm volatile("mcr p14, 0, %0, c0, c0, 0" : : "r" (val));
2315}
2316
2317static inline int
2318xscale1_pmnc_counter_has_overflowed(unsigned long pmnc,
2319 enum xscale_counters counter)
2320{
2321 int ret = 0;
2322
2323 switch (counter) {
2324 case XSCALE_CYCLE_COUNTER:
2325 ret = pmnc & XSCALE1_CCOUNT_OVERFLOW;
2326 break;
2327 case XSCALE_COUNTER0:
2328 ret = pmnc & XSCALE1_COUNT0_OVERFLOW;
2329 break;
2330 case XSCALE_COUNTER1:
2331 ret = pmnc & XSCALE1_COUNT1_OVERFLOW;
2332 break;
2333 default:
2334 WARN_ONCE(1, "invalid counter number (%d)\n", counter);
2335 }
2336
2337 return ret;
2338}
2339
2340static irqreturn_t
2341xscale1pmu_handle_irq(int irq_num, void *dev)
2342{
2343 unsigned long pmnc;
2344 struct perf_sample_data data;
2345 struct cpu_hw_events *cpuc;
2346 struct pt_regs *regs;
2347 int idx;
2348
2349 /*
2350 * NOTE: there's an A stepping erratum that states if an overflow
2351 * bit already exists and another occurs, the previous
2352 * Overflow bit gets cleared. There's no workaround.
2353 * Fixed in B stepping or later.
2354 */
2355 pmnc = xscale1pmu_read_pmnc();
2356
2357 /*
2358 * Write the value back to clear the overflow flags. Overflow
2359 * flags remain in pmnc for use below. We also disable the PMU
2360 * while we process the interrupt.
2361 */
2362 xscale1pmu_write_pmnc(pmnc & ~XSCALE_PMU_ENABLE);
2363
2364 if (!(pmnc & XSCALE1_OVERFLOWED_MASK))
2365 return IRQ_NONE;
2366
2367 regs = get_irq_regs();
2368
2369 perf_sample_data_init(&data, 0);
2370
2371 cpuc = &__get_cpu_var(cpu_hw_events);
2372 for (idx = 0; idx <= armpmu->num_events; ++idx) {
2373 struct perf_event *event = cpuc->events[idx];
2374 struct hw_perf_event *hwc;
2375
2376 if (!test_bit(idx, cpuc->active_mask))
2377 continue;
2378
2379 if (!xscale1_pmnc_counter_has_overflowed(pmnc, idx))
2380 continue;
2381
2382 hwc = &event->hw;
2383 armpmu_event_update(event, hwc, idx);
2384 data.period = event->hw.last_period;
2385 if (!armpmu_event_set_period(event, hwc, idx))
2386 continue;
2387
2388 if (perf_event_overflow(event, 0, &data, regs))
2389 armpmu->disable(hwc, idx);
2390 }
2391
2392 perf_event_do_pending();
2393
2394 /*
2395 * Re-enable the PMU.
2396 */
2397 pmnc = xscale1pmu_read_pmnc() | XSCALE_PMU_ENABLE;
2398 xscale1pmu_write_pmnc(pmnc);
2399
2400 return IRQ_HANDLED;
2401}
2402
2403static void
2404xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx)
2405{
2406 unsigned long val, mask, evt, flags;
2407
2408 switch (idx) {
2409 case XSCALE_CYCLE_COUNTER:
2410 mask = 0;
2411 evt = XSCALE1_CCOUNT_INT_EN;
2412 break;
2413 case XSCALE_COUNTER0:
2414 mask = XSCALE1_COUNT0_EVT_MASK;
2415 evt = (hwc->config_base << XSCALE1_COUNT0_EVT_SHFT) |
2416 XSCALE1_COUNT0_INT_EN;
2417 break;
2418 case XSCALE_COUNTER1:
2419 mask = XSCALE1_COUNT1_EVT_MASK;
2420 evt = (hwc->config_base << XSCALE1_COUNT1_EVT_SHFT) |
2421 XSCALE1_COUNT1_INT_EN;
2422 break;
2423 default:
2424 WARN_ONCE(1, "invalid counter number (%d)\n", idx);
2425 return;
2426 }
2427
2428 spin_lock_irqsave(&pmu_lock, flags);
2429 val = xscale1pmu_read_pmnc();
2430 val &= ~mask;
2431 val |= evt;
2432 xscale1pmu_write_pmnc(val);
2433 spin_unlock_irqrestore(&pmu_lock, flags);
2434}
2435
2436static void
2437xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx)
2438{
2439 unsigned long val, mask, evt, flags;
2440
2441 switch (idx) {
2442 case XSCALE_CYCLE_COUNTER:
2443 mask = XSCALE1_CCOUNT_INT_EN;
2444 evt = 0;
2445 break;
2446 case XSCALE_COUNTER0:
2447 mask = XSCALE1_COUNT0_INT_EN | XSCALE1_COUNT0_EVT_MASK;
2448 evt = XSCALE_PERFCTR_UNUSED << XSCALE1_COUNT0_EVT_SHFT;
2449 break;
2450 case XSCALE_COUNTER1:
2451 mask = XSCALE1_COUNT1_INT_EN | XSCALE1_COUNT1_EVT_MASK;
2452 evt = XSCALE_PERFCTR_UNUSED << XSCALE1_COUNT1_EVT_SHFT;
2453 break;
2454 default:
2455 WARN_ONCE(1, "invalid counter number (%d)\n", idx);
2456 return;
2457 }
2458
2459 spin_lock_irqsave(&pmu_lock, flags);
2460 val = xscale1pmu_read_pmnc();
2461 val &= ~mask;
2462 val |= evt;
2463 xscale1pmu_write_pmnc(val);
2464 spin_unlock_irqrestore(&pmu_lock, flags);
2465}
2466
2467static int
2468xscale1pmu_get_event_idx(struct cpu_hw_events *cpuc,
2469 struct hw_perf_event *event)
2470{
2471 if (XSCALE_PERFCTR_CCNT == event->config_base) {
2472 if (test_and_set_bit(XSCALE_CYCLE_COUNTER, cpuc->used_mask))
2473 return -EAGAIN;
2474
2475 return XSCALE_CYCLE_COUNTER;
2476 } else {
2477 if (!test_and_set_bit(XSCALE_COUNTER1, cpuc->used_mask)) {
2478 return XSCALE_COUNTER1;
2479 }
2480
2481 if (!test_and_set_bit(XSCALE_COUNTER0, cpuc->used_mask)) {
2482 return XSCALE_COUNTER0;
2483 }
2484
2485 return -EAGAIN;
2486 }
2487}
2488
2489static void
2490xscale1pmu_start(void)
2491{
2492 unsigned long flags, val;
2493
2494 spin_lock_irqsave(&pmu_lock, flags);
2495 val = xscale1pmu_read_pmnc();
2496 val |= XSCALE_PMU_ENABLE;
2497 xscale1pmu_write_pmnc(val);
2498 spin_unlock_irqrestore(&pmu_lock, flags);
2499}
2500
2501static void
2502xscale1pmu_stop(void)
2503{
2504 unsigned long flags, val;
2505
2506 spin_lock_irqsave(&pmu_lock, flags);
2507 val = xscale1pmu_read_pmnc();
2508 val &= ~XSCALE_PMU_ENABLE;
2509 xscale1pmu_write_pmnc(val);
2510 spin_unlock_irqrestore(&pmu_lock, flags);
2511}
2512
2513static inline u32
2514xscale1pmu_read_counter(int counter)
2515{
2516 u32 val = 0;
2517
2518 switch (counter) {
2519 case XSCALE_CYCLE_COUNTER:
2520 asm volatile("mrc p14, 0, %0, c1, c0, 0" : "=r" (val));
2521 break;
2522 case XSCALE_COUNTER0:
2523 asm volatile("mrc p14, 0, %0, c2, c0, 0" : "=r" (val));
2524 break;
2525 case XSCALE_COUNTER1:
2526 asm volatile("mrc p14, 0, %0, c3, c0, 0" : "=r" (val));
2527 break;
2528 }
2529
2530 return val;
2531}
2532
2533static inline void
2534xscale1pmu_write_counter(int counter, u32 val)
2535{
2536 switch (counter) {
2537 case XSCALE_CYCLE_COUNTER:
2538 asm volatile("mcr p14, 0, %0, c1, c0, 0" : : "r" (val));
2539 break;
2540 case XSCALE_COUNTER0:
2541 asm volatile("mcr p14, 0, %0, c2, c0, 0" : : "r" (val));
2542 break;
2543 case XSCALE_COUNTER1:
2544 asm volatile("mcr p14, 0, %0, c3, c0, 0" : : "r" (val));
2545 break;
2546 }
2547}
2548
2549static const struct arm_pmu xscale1pmu = {
2550 .id = ARM_PERF_PMU_ID_XSCALE1,
2551 .handle_irq = xscale1pmu_handle_irq,
2552 .enable = xscale1pmu_enable_event,
2553 .disable = xscale1pmu_disable_event,
2554 .event_map = xscalepmu_event_map,
2555 .raw_event = xscalepmu_raw_event,
2556 .read_counter = xscale1pmu_read_counter,
2557 .write_counter = xscale1pmu_write_counter,
2558 .get_event_idx = xscale1pmu_get_event_idx,
2559 .start = xscale1pmu_start,
2560 .stop = xscale1pmu_stop,
2561 .num_events = 3,
2562 .max_period = (1LLU << 32) - 1,
2563};
2564
2565#define XSCALE2_OVERFLOWED_MASK 0x01f
2566#define XSCALE2_CCOUNT_OVERFLOW 0x001
2567#define XSCALE2_COUNT0_OVERFLOW 0x002
2568#define XSCALE2_COUNT1_OVERFLOW 0x004
2569#define XSCALE2_COUNT2_OVERFLOW 0x008
2570#define XSCALE2_COUNT3_OVERFLOW 0x010
2571#define XSCALE2_CCOUNT_INT_EN 0x001
2572#define XSCALE2_COUNT0_INT_EN 0x002
2573#define XSCALE2_COUNT1_INT_EN 0x004
2574#define XSCALE2_COUNT2_INT_EN 0x008
2575#define XSCALE2_COUNT3_INT_EN 0x010
2576#define XSCALE2_COUNT0_EVT_SHFT 0
2577#define XSCALE2_COUNT0_EVT_MASK (0xff << XSCALE2_COUNT0_EVT_SHFT)
2578#define XSCALE2_COUNT1_EVT_SHFT 8
2579#define XSCALE2_COUNT1_EVT_MASK (0xff << XSCALE2_COUNT1_EVT_SHFT)
2580#define XSCALE2_COUNT2_EVT_SHFT 16
2581#define XSCALE2_COUNT2_EVT_MASK (0xff << XSCALE2_COUNT2_EVT_SHFT)
2582#define XSCALE2_COUNT3_EVT_SHFT 24
2583#define XSCALE2_COUNT3_EVT_MASK (0xff << XSCALE2_COUNT3_EVT_SHFT)
2584
2585static inline u32
2586xscale2pmu_read_pmnc(void)
2587{
2588 u32 val;
2589 asm volatile("mrc p14, 0, %0, c0, c1, 0" : "=r" (val));
2590 /* bits 1-2 and 4-23 are read-unpredictable */
2591 return val & 0xff000009;
2592}
2593
2594static inline void
2595xscale2pmu_write_pmnc(u32 val)
2596{
2597 /* bits 4-23 are write-as-0, 24-31 are write ignored */
2598 val &= 0xf;
2599 asm volatile("mcr p14, 0, %0, c0, c1, 0" : : "r" (val));
2600}
2601
2602static inline u32
2603xscale2pmu_read_overflow_flags(void)
2604{
2605 u32 val;
2606 asm volatile("mrc p14, 0, %0, c5, c1, 0" : "=r" (val));
2607 return val;
2608}
2609
2610static inline void
2611xscale2pmu_write_overflow_flags(u32 val)
2612{
2613 asm volatile("mcr p14, 0, %0, c5, c1, 0" : : "r" (val));
2614}
2615
2616static inline u32
2617xscale2pmu_read_event_select(void)
2618{
2619 u32 val;
2620 asm volatile("mrc p14, 0, %0, c8, c1, 0" : "=r" (val));
2621 return val;
2622}
2623
2624static inline void
2625xscale2pmu_write_event_select(u32 val)
2626{
2627 asm volatile("mcr p14, 0, %0, c8, c1, 0" : : "r"(val));
2628}
2629
2630static inline u32
2631xscale2pmu_read_int_enable(void)
2632{
2633 u32 val;
2634 asm volatile("mrc p14, 0, %0, c4, c1, 0" : "=r" (val));
2635 return val;
2636}
2637
2638static void
2639xscale2pmu_write_int_enable(u32 val)
2640{
2641 asm volatile("mcr p14, 0, %0, c4, c1, 0" : : "r" (val));
2642}
2643
2644static inline int
2645xscale2_pmnc_counter_has_overflowed(unsigned long of_flags,
2646 enum xscale_counters counter)
2647{
2648 int ret = 0;
2649
2650 switch (counter) {
2651 case XSCALE_CYCLE_COUNTER:
2652 ret = of_flags & XSCALE2_CCOUNT_OVERFLOW;
2653 break;
2654 case XSCALE_COUNTER0:
2655 ret = of_flags & XSCALE2_COUNT0_OVERFLOW;
2656 break;
2657 case XSCALE_COUNTER1:
2658 ret = of_flags & XSCALE2_COUNT1_OVERFLOW;
2659 break;
2660 case XSCALE_COUNTER2:
2661 ret = of_flags & XSCALE2_COUNT2_OVERFLOW;
2662 break;
2663 case XSCALE_COUNTER3:
2664 ret = of_flags & XSCALE2_COUNT3_OVERFLOW;
2665 break;
2666 default:
2667 WARN_ONCE(1, "invalid counter number (%d)\n", counter);
2668 }
2669
2670 return ret;
2671}
2672
2673static irqreturn_t
2674xscale2pmu_handle_irq(int irq_num, void *dev)
2675{
2676 unsigned long pmnc, of_flags;
2677 struct perf_sample_data data;
2678 struct cpu_hw_events *cpuc;
2679 struct pt_regs *regs;
2680 int idx;
2681
2682 /* Disable the PMU. */
2683 pmnc = xscale2pmu_read_pmnc();
2684 xscale2pmu_write_pmnc(pmnc & ~XSCALE_PMU_ENABLE);
2685
2686 /* Check the overflow flag register. */
2687 of_flags = xscale2pmu_read_overflow_flags();
2688 if (!(of_flags & XSCALE2_OVERFLOWED_MASK))
2689 return IRQ_NONE;
2690
2691 /* Clear the overflow bits. */
2692 xscale2pmu_write_overflow_flags(of_flags);
2693
2694 regs = get_irq_regs();
2695
2696 perf_sample_data_init(&data, 0);
2697
2698 cpuc = &__get_cpu_var(cpu_hw_events);
2699 for (idx = 0; idx <= armpmu->num_events; ++idx) {
2700 struct perf_event *event = cpuc->events[idx];
2701 struct hw_perf_event *hwc;
2702
2703 if (!test_bit(idx, cpuc->active_mask))
2704 continue;
2705
2706 if (!xscale2_pmnc_counter_has_overflowed(pmnc, idx))
2707 continue;
2708
2709 hwc = &event->hw;
2710 armpmu_event_update(event, hwc, idx);
2711 data.period = event->hw.last_period;
2712 if (!armpmu_event_set_period(event, hwc, idx))
2713 continue;
2714
2715 if (perf_event_overflow(event, 0, &data, regs))
2716 armpmu->disable(hwc, idx);
2717 }
2718
2719 perf_event_do_pending();
2720
2721 /*
2722 * Re-enable the PMU.
2723 */
2724 pmnc = xscale2pmu_read_pmnc() | XSCALE_PMU_ENABLE;
2725 xscale2pmu_write_pmnc(pmnc);
2726
2727 return IRQ_HANDLED;
2728}
2729
2730static void
2731xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx)
2732{
2733 unsigned long flags, ien, evtsel;
2734
2735 ien = xscale2pmu_read_int_enable();
2736 evtsel = xscale2pmu_read_event_select();
2737
2738 switch (idx) {
2739 case XSCALE_CYCLE_COUNTER:
2740 ien |= XSCALE2_CCOUNT_INT_EN;
2741 break;
2742 case XSCALE_COUNTER0:
2743 ien |= XSCALE2_COUNT0_INT_EN;
2744 evtsel &= ~XSCALE2_COUNT0_EVT_MASK;
2745 evtsel |= hwc->config_base << XSCALE2_COUNT0_EVT_SHFT;
2746 break;
2747 case XSCALE_COUNTER1:
2748 ien |= XSCALE2_COUNT1_INT_EN;
2749 evtsel &= ~XSCALE2_COUNT1_EVT_MASK;
2750 evtsel |= hwc->config_base << XSCALE2_COUNT1_EVT_SHFT;
2751 break;
2752 case XSCALE_COUNTER2:
2753 ien |= XSCALE2_COUNT2_INT_EN;
2754 evtsel &= ~XSCALE2_COUNT2_EVT_MASK;
2755 evtsel |= hwc->config_base << XSCALE2_COUNT2_EVT_SHFT;
2756 break;
2757 case XSCALE_COUNTER3:
2758 ien |= XSCALE2_COUNT3_INT_EN;
2759 evtsel &= ~XSCALE2_COUNT3_EVT_MASK;
2760 evtsel |= hwc->config_base << XSCALE2_COUNT3_EVT_SHFT;
2761 break;
2762 default:
2763 WARN_ONCE(1, "invalid counter number (%d)\n", idx);
2764 return;
2765 }
2766
2767 spin_lock_irqsave(&pmu_lock, flags);
2768 xscale2pmu_write_event_select(evtsel);
2769 xscale2pmu_write_int_enable(ien);
2770 spin_unlock_irqrestore(&pmu_lock, flags);
2771}
2772
2773static void
2774xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
2775{
2776 unsigned long flags, ien, evtsel;
2777
2778 ien = xscale2pmu_read_int_enable();
2779 evtsel = xscale2pmu_read_event_select();
2780
2781 switch (idx) {
2782 case XSCALE_CYCLE_COUNTER:
2783 ien &= ~XSCALE2_CCOUNT_INT_EN;
2784 break;
2785 case XSCALE_COUNTER0:
2786 ien &= ~XSCALE2_COUNT0_INT_EN;
2787 evtsel &= ~XSCALE2_COUNT0_EVT_MASK;
2788 evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT0_EVT_SHFT;
2789 break;
2790 case XSCALE_COUNTER1:
2791 ien &= ~XSCALE2_COUNT1_INT_EN;
2792 evtsel &= ~XSCALE2_COUNT1_EVT_MASK;
2793 evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT1_EVT_SHFT;
2794 break;
2795 case XSCALE_COUNTER2:
2796 ien &= ~XSCALE2_COUNT2_INT_EN;
2797 evtsel &= ~XSCALE2_COUNT2_EVT_MASK;
2798 evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT2_EVT_SHFT;
2799 break;
2800 case XSCALE_COUNTER3:
2801 ien &= ~XSCALE2_COUNT3_INT_EN;
2802 evtsel &= ~XSCALE2_COUNT3_EVT_MASK;
2803 evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT3_EVT_SHFT;
2804 break;
2805 default:
2806 WARN_ONCE(1, "invalid counter number (%d)\n", idx);
2807 return;
2808 }
2809
2810 spin_lock_irqsave(&pmu_lock, flags);
2811 xscale2pmu_write_event_select(evtsel);
2812 xscale2pmu_write_int_enable(ien);
2813 spin_unlock_irqrestore(&pmu_lock, flags);
2814}
2815
2816static int
2817xscale2pmu_get_event_idx(struct cpu_hw_events *cpuc,
2818 struct hw_perf_event *event)
2819{
2820 int idx = xscale1pmu_get_event_idx(cpuc, event);
2821 if (idx >= 0)
2822 goto out;
2823
2824 if (!test_and_set_bit(XSCALE_COUNTER3, cpuc->used_mask))
2825 idx = XSCALE_COUNTER3;
2826 else if (!test_and_set_bit(XSCALE_COUNTER2, cpuc->used_mask))
2827 idx = XSCALE_COUNTER2;
2828out:
2829 return idx;
2830}
2831
2832static void
2833xscale2pmu_start(void)
2834{
2835 unsigned long flags, val;
2836
2837 spin_lock_irqsave(&pmu_lock, flags);
2838 val = xscale2pmu_read_pmnc() & ~XSCALE_PMU_CNT64;
2839 val |= XSCALE_PMU_ENABLE;
2840 xscale2pmu_write_pmnc(val);
2841 spin_unlock_irqrestore(&pmu_lock, flags);
2842}
2843
2844static void
2845xscale2pmu_stop(void)
2846{
2847 unsigned long flags, val;
2848
2849 spin_lock_irqsave(&pmu_lock, flags);
2850 val = xscale2pmu_read_pmnc();
2851 val &= ~XSCALE_PMU_ENABLE;
2852 xscale2pmu_write_pmnc(val);
2853 spin_unlock_irqrestore(&pmu_lock, flags);
2854}
2855
2856static inline u32
2857xscale2pmu_read_counter(int counter)
2858{
2859 u32 val = 0;
2860
2861 switch (counter) {
2862 case XSCALE_CYCLE_COUNTER:
2863 asm volatile("mrc p14, 0, %0, c1, c1, 0" : "=r" (val));
2864 break;
2865 case XSCALE_COUNTER0:
2866 asm volatile("mrc p14, 0, %0, c0, c2, 0" : "=r" (val));
2867 break;
2868 case XSCALE_COUNTER1:
2869 asm volatile("mrc p14, 0, %0, c1, c2, 0" : "=r" (val));
2870 break;
2871 case XSCALE_COUNTER2:
2872 asm volatile("mrc p14, 0, %0, c2, c2, 0" : "=r" (val));
2873 break;
2874 case XSCALE_COUNTER3:
2875 asm volatile("mrc p14, 0, %0, c3, c2, 0" : "=r" (val));
2876 break;
2877 }
2878
2879 return val;
2880}
2881
2882static inline void
2883xscale2pmu_write_counter(int counter, u32 val)
2884{
2885 switch (counter) {
2886 case XSCALE_CYCLE_COUNTER:
2887 asm volatile("mcr p14, 0, %0, c1, c1, 0" : : "r" (val));
2888 break;
2889 case XSCALE_COUNTER0:
2890 asm volatile("mcr p14, 0, %0, c0, c2, 0" : : "r" (val));
2891 break;
2892 case XSCALE_COUNTER1:
2893 asm volatile("mcr p14, 0, %0, c1, c2, 0" : : "r" (val));
2894 break;
2895 case XSCALE_COUNTER2:
2896 asm volatile("mcr p14, 0, %0, c2, c2, 0" : : "r" (val));
2897 break;
2898 case XSCALE_COUNTER3:
2899 asm volatile("mcr p14, 0, %0, c3, c2, 0" : : "r" (val));
2900 break;
2901 }
2902}
2903
2904static const struct arm_pmu xscale2pmu = {
2905 .id = ARM_PERF_PMU_ID_XSCALE2,
2906 .handle_irq = xscale2pmu_handle_irq,
2907 .enable = xscale2pmu_enable_event,
2908 .disable = xscale2pmu_disable_event,
2909 .event_map = xscalepmu_event_map,
2910 .raw_event = xscalepmu_raw_event,
2911 .read_counter = xscale2pmu_read_counter,
2912 .write_counter = xscale2pmu_write_counter,
2913 .get_event_idx = xscale2pmu_get_event_idx,
2914 .start = xscale2pmu_start,
2915 .stop = xscale2pmu_stop,
2916 .num_events = 5,
2917 .max_period = (1LLU << 32) - 1,
2918};
2919
2082static int __init 2920static int __init
2083init_hw_perf_events(void) 2921init_hw_perf_events(void)
2084{ 2922{
@@ -2086,7 +2924,7 @@ init_hw_perf_events(void)
2086 unsigned long implementor = (cpuid & 0xFF000000) >> 24; 2924 unsigned long implementor = (cpuid & 0xFF000000) >> 24;
2087 unsigned long part_number = (cpuid & 0xFFF0); 2925 unsigned long part_number = (cpuid & 0xFFF0);
2088 2926
2089 /* We only support ARM CPUs implemented by ARM at the moment. */ 2927 /* ARM Ltd CPUs. */
2090 if (0x41 == implementor) { 2928 if (0x41 == implementor) {
2091 switch (part_number) { 2929 switch (part_number) {
2092 case 0xB360: /* ARM1136 */ 2930 case 0xB360: /* ARM1136 */
@@ -2105,7 +2943,7 @@ init_hw_perf_events(void)
2105 perf_max_events = armv6mpcore_pmu.num_events; 2943 perf_max_events = armv6mpcore_pmu.num_events;
2106 break; 2944 break;
2107 case 0xC080: /* Cortex-A8 */ 2945 case 0xC080: /* Cortex-A8 */
2108 armv7pmu.name = ARMV7_PMU_CORTEX_A8_NAME; 2946 armv7pmu.id = ARM_PERF_PMU_ID_CA8;
2109 memcpy(armpmu_perf_cache_map, armv7_a8_perf_cache_map, 2947 memcpy(armpmu_perf_cache_map, armv7_a8_perf_cache_map,
2110 sizeof(armv7_a8_perf_cache_map)); 2948 sizeof(armv7_a8_perf_cache_map));
2111 armv7pmu.event_map = armv7_a8_pmu_event_map; 2949 armv7pmu.event_map = armv7_a8_pmu_event_map;
@@ -2117,7 +2955,7 @@ init_hw_perf_events(void)
2117 perf_max_events = armv7pmu.num_events; 2955 perf_max_events = armv7pmu.num_events;
2118 break; 2956 break;
2119 case 0xC090: /* Cortex-A9 */ 2957 case 0xC090: /* Cortex-A9 */
2120 armv7pmu.name = ARMV7_PMU_CORTEX_A9_NAME; 2958 armv7pmu.id = ARM_PERF_PMU_ID_CA9;
2121 memcpy(armpmu_perf_cache_map, armv7_a9_perf_cache_map, 2959 memcpy(armpmu_perf_cache_map, armv7_a9_perf_cache_map,
2122 sizeof(armv7_a9_perf_cache_map)); 2960 sizeof(armv7_a9_perf_cache_map));
2123 armv7pmu.event_map = armv7_a9_pmu_event_map; 2961 armv7pmu.event_map = armv7_a9_pmu_event_map;
@@ -2128,15 +2966,33 @@ init_hw_perf_events(void)
2128 armv7pmu.num_events = armv7_reset_read_pmnc(); 2966 armv7pmu.num_events = armv7_reset_read_pmnc();
2129 perf_max_events = armv7pmu.num_events; 2967 perf_max_events = armv7pmu.num_events;
2130 break; 2968 break;
2131 default: 2969 }
2132 pr_info("no hardware support available\n"); 2970 /* Intel CPUs [xscale]. */
2133 perf_max_events = -1; 2971 } else if (0x69 == implementor) {
2972 part_number = (cpuid >> 13) & 0x7;
2973 switch (part_number) {
2974 case 1:
2975 armpmu = &xscale1pmu;
2976 memcpy(armpmu_perf_cache_map, xscale_perf_cache_map,
2977 sizeof(xscale_perf_cache_map));
2978 perf_max_events = xscale1pmu.num_events;
2979 break;
2980 case 2:
2981 armpmu = &xscale2pmu;
2982 memcpy(armpmu_perf_cache_map, xscale_perf_cache_map,
2983 sizeof(xscale_perf_cache_map));
2984 perf_max_events = xscale2pmu.num_events;
2985 break;
2134 } 2986 }
2135 } 2987 }
2136 2988
2137 if (armpmu) 2989 if (armpmu) {
2138 pr_info("enabled with %s PMU driver, %d counters available\n", 2990 pr_info("enabled with %s PMU driver, %d counters available\n",
2139 armpmu->name, armpmu->num_events); 2991 arm_pmu_names[armpmu->id], armpmu->num_events);
2992 } else {
2993 pr_info("no hardware support available\n");
2994 perf_max_events = -1;
2995 }
2140 2996
2141 return 0; 2997 return 0;
2142} 2998}
diff --git a/arch/arm/kernel/pmu.c b/arch/arm/kernel/pmu.c
index a124312e343f..b8af96ea62e6 100644
--- a/arch/arm/kernel/pmu.c
+++ b/arch/arm/kernel/pmu.c
@@ -2,6 +2,7 @@
2 * linux/arch/arm/kernel/pmu.c 2 * linux/arch/arm/kernel/pmu.c
3 * 3 *
4 * Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles 4 * Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles
5 * Copyright (C) 2010 ARM Ltd, Will Deacon
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as 8 * it under the terms of the GNU General Public License version 2 as
@@ -9,65 +10,78 @@
9 * 10 *
10 */ 11 */
11 12
13#define pr_fmt(fmt) "PMU: " fmt
14
12#include <linux/cpumask.h> 15#include <linux/cpumask.h>
13#include <linux/err.h> 16#include <linux/err.h>
14#include <linux/interrupt.h> 17#include <linux/interrupt.h>
15#include <linux/kernel.h> 18#include <linux/kernel.h>
16#include <linux/module.h> 19#include <linux/module.h>
20#include <linux/platform_device.h>
17 21
18#include <asm/pmu.h> 22#include <asm/pmu.h>
19 23
20/* 24static volatile long pmu_lock;
21 * Define the IRQs for the system. We could use something like a platform 25
22 * device but that seems fairly heavyweight for this. Also, the performance 26static struct platform_device *pmu_devices[ARM_NUM_PMU_DEVICES];
23 * counters can't be removed or hotplugged. 27
24 * 28static int __devinit pmu_device_probe(struct platform_device *pdev)
25 * Ordering is important: init_pmu() will use the ordering to set the affinity 29{
26 * to the corresponding core. e.g. the first interrupt will go to cpu 0, the 30
27 * second goes to cpu 1 etc. 31 if (pdev->id < 0 || pdev->id >= ARM_NUM_PMU_DEVICES) {
28 */ 32 pr_warning("received registration request for unknown "
29static const int irqs[] = { 33 "device %d\n", pdev->id);
30#if defined(CONFIG_ARCH_OMAP2) 34 return -EINVAL;
31 3, 35 }
32#elif defined(CONFIG_ARCH_BCMRING) 36
33 IRQ_PMUIRQ, 37 if (pmu_devices[pdev->id])
34#elif defined(CONFIG_MACH_REALVIEW_EB) 38 pr_warning("registering new PMU device type %d overwrites "
35 IRQ_EB11MP_PMU_CPU0, 39 "previous registration!\n", pdev->id);
36 IRQ_EB11MP_PMU_CPU1, 40 else
37 IRQ_EB11MP_PMU_CPU2, 41 pr_info("registered new PMU device of type %d\n",
38 IRQ_EB11MP_PMU_CPU3, 42 pdev->id);
39#elif defined(CONFIG_ARCH_OMAP3)
40 INT_34XX_BENCH_MPU_EMUL,
41#elif defined(CONFIG_ARCH_IOP32X)
42 IRQ_IOP32X_CORE_PMU,
43#elif defined(CONFIG_ARCH_IOP33X)
44 IRQ_IOP33X_CORE_PMU,
45#elif defined(CONFIG_ARCH_PXA)
46 IRQ_PMU,
47#endif
48};
49 43
50static const struct pmu_irqs pmu_irqs = { 44 pmu_devices[pdev->id] = pdev;
51 .irqs = irqs, 45 return 0;
52 .num_irqs = ARRAY_SIZE(irqs), 46}
47
48static struct platform_driver pmu_driver = {
49 .driver = {
50 .name = "arm-pmu",
51 },
52 .probe = pmu_device_probe,
53}; 53};
54 54
55static volatile long pmu_lock; 55static int __init register_pmu_driver(void)
56{
57 return platform_driver_register(&pmu_driver);
58}
59device_initcall(register_pmu_driver);
56 60
57const struct pmu_irqs * 61struct platform_device *
58reserve_pmu(void) 62reserve_pmu(enum arm_pmu_type device)
59{ 63{
60 return test_and_set_bit_lock(0, &pmu_lock) ? ERR_PTR(-EBUSY) : 64 struct platform_device *pdev;
61 &pmu_irqs; 65
66 if (test_and_set_bit_lock(device, &pmu_lock)) {
67 pdev = ERR_PTR(-EBUSY);
68 } else if (pmu_devices[device] == NULL) {
69 clear_bit_unlock(device, &pmu_lock);
70 pdev = ERR_PTR(-ENODEV);
71 } else {
72 pdev = pmu_devices[device];
73 }
74
75 return pdev;
62} 76}
63EXPORT_SYMBOL_GPL(reserve_pmu); 77EXPORT_SYMBOL_GPL(reserve_pmu);
64 78
65int 79int
66release_pmu(const struct pmu_irqs *irqs) 80release_pmu(struct platform_device *pdev)
67{ 81{
68 if (WARN_ON(irqs != &pmu_irqs)) 82 if (WARN_ON(pdev != pmu_devices[pdev->id]))
69 return -EINVAL; 83 return -EINVAL;
70 clear_bit_unlock(0, &pmu_lock); 84 clear_bit_unlock(pdev->id, &pmu_lock);
71 return 0; 85 return 0;
72} 86}
73EXPORT_SYMBOL_GPL(release_pmu); 87EXPORT_SYMBOL_GPL(release_pmu);
@@ -87,17 +101,42 @@ set_irq_affinity(int irq,
87#endif 101#endif
88} 102}
89 103
90int 104static int
91init_pmu(void) 105init_cpu_pmu(void)
92{ 106{
93 int i, err = 0; 107 int i, err = 0;
108 struct platform_device *pdev = pmu_devices[ARM_PMU_DEVICE_CPU];
109
110 if (!pdev) {
111 err = -ENODEV;
112 goto out;
113 }
94 114
95 for (i = 0; i < pmu_irqs.num_irqs; ++i) { 115 for (i = 0; i < pdev->num_resources; ++i) {
96 err = set_irq_affinity(pmu_irqs.irqs[i], i); 116 err = set_irq_affinity(platform_get_irq(pdev, i), i);
97 if (err) 117 if (err)
98 break; 118 break;
99 } 119 }
100 120
121out:
122 return err;
123}
124
125int
126init_pmu(enum arm_pmu_type device)
127{
128 int err = 0;
129
130 switch (device) {
131 case ARM_PMU_DEVICE_CPU:
132 err = init_cpu_pmu();
133 break;
134 default:
135 pr_warning("attempt to initialise unknown device %d\n",
136 device);
137 err = -EINVAL;
138 }
139
101 return err; 140 return err;
102} 141}
103EXPORT_SYMBOL_GPL(init_pmu); 142EXPORT_SYMBOL_GPL(init_pmu);
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index a01194e583ff..b8c3d0f689d9 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -168,7 +168,7 @@ int __cpu_disable(void)
168 struct task_struct *p; 168 struct task_struct *p;
169 int ret; 169 int ret;
170 170
171 ret = mach_cpu_disable(cpu); 171 ret = platform_cpu_disable(cpu);
172 if (ret) 172 if (ret)
173 return ret; 173 return ret;
174 174
diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c
index 28753805d2d1..38c261f9951c 100644
--- a/arch/arm/kernel/time.c
+++ b/arch/arm/kernel/time.c
@@ -72,12 +72,15 @@ unsigned long profile_pc(struct pt_regs *regs)
72EXPORT_SYMBOL(profile_pc); 72EXPORT_SYMBOL(profile_pc);
73#endif 73#endif
74 74
75#ifndef CONFIG_GENERIC_TIME 75#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
76static unsigned long dummy_gettimeoffset(void) 76u32 arch_gettimeoffset(void)
77{ 77{
78 if (system_timer->offset != NULL)
79 return system_timer->offset() * 1000;
80
78 return 0; 81 return 0;
79} 82}
80#endif 83#endif /* CONFIG_ARCH_USES_GETTIMEOFFSET */
81 84
82#ifdef CONFIG_LEDS_TIMER 85#ifdef CONFIG_LEDS_TIMER
83static inline void do_leds(void) 86static inline void do_leds(void)
@@ -93,63 +96,6 @@ static inline void do_leds(void)
93#define do_leds() 96#define do_leds()
94#endif 97#endif
95 98
96#ifndef CONFIG_GENERIC_TIME
97void do_gettimeofday(struct timeval *tv)
98{
99 unsigned long flags;
100 unsigned long seq;
101 unsigned long usec, sec;
102
103 do {
104 seq = read_seqbegin_irqsave(&xtime_lock, flags);
105 usec = system_timer->offset();
106 sec = xtime.tv_sec;
107 usec += xtime.tv_nsec / 1000;
108 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
109
110 /* usec may have gone up a lot: be safe */
111 while (usec >= 1000000) {
112 usec -= 1000000;
113 sec++;
114 }
115
116 tv->tv_sec = sec;
117 tv->tv_usec = usec;
118}
119
120EXPORT_SYMBOL(do_gettimeofday);
121
122int do_settimeofday(struct timespec *tv)
123{
124 time_t wtm_sec, sec = tv->tv_sec;
125 long wtm_nsec, nsec = tv->tv_nsec;
126
127 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
128 return -EINVAL;
129
130 write_seqlock_irq(&xtime_lock);
131 /*
132 * This is revolting. We need to set "xtime" correctly. However, the
133 * value in this location is the value at the most recent update of
134 * wall time. Discover what correction gettimeofday() would have
135 * done, and then undo it!
136 */
137 nsec -= system_timer->offset() * NSEC_PER_USEC;
138
139 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
140 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
141
142 set_normalized_timespec(&xtime, sec, nsec);
143 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
144
145 ntp_clear();
146 write_sequnlock_irq(&xtime_lock);
147 clock_was_set();
148 return 0;
149}
150
151EXPORT_SYMBOL(do_settimeofday);
152#endif /* !CONFIG_GENERIC_TIME */
153 99
154#ifndef CONFIG_GENERIC_CLOCKEVENTS 100#ifndef CONFIG_GENERIC_CLOCKEVENTS
155/* 101/*
@@ -214,10 +160,6 @@ device_initcall(timer_init_sysfs);
214 160
215void __init time_init(void) 161void __init time_init(void)
216{ 162{
217#ifndef CONFIG_GENERIC_TIME
218 if (system_timer->offset == NULL)
219 system_timer->offset = dummy_gettimeoffset;
220#endif
221 system_timer->init(); 163 system_timer->init();
222} 164}
223 165