aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm')
-rw-r--r--arch/arm/Kconfig1
-rw-r--r--arch/arm/include/asm/hw_irq.h2
-rw-r--r--arch/arm/include/asm/irqflags.h145
-rw-r--r--arch/arm/include/asm/perf_event.h12
-rw-r--r--arch/arm/kernel/irq.c10
-rw-r--r--arch/arm/kernel/perf_event.c212
-rw-r--r--arch/arm/mach-bcmring/dma.c4
-rw-r--r--arch/arm/mach-bcmring/irq.c6
-rw-r--r--arch/arm/mach-iop13xx/msi.c8
-rw-r--r--arch/arm/mach-lpc32xx/phy3250.c7
-rw-r--r--arch/arm/mach-u300/dummyspichip.c5
-rw-r--r--arch/arm/mach-u300/spi.c10
-rw-r--r--arch/arm/mach-ux500/board-mop500.c8
-rw-r--r--arch/arm/oprofile/Makefile4
-rw-r--r--arch/arm/oprofile/common.c311
-rw-r--r--arch/arm/plat-samsung/include/plat/s3c64xx-spi.h3
16 files changed, 217 insertions, 531 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 19792a9192b..3849887157e 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -25,6 +25,7 @@ config ARM
25 select HAVE_KERNEL_GZIP 25 select HAVE_KERNEL_GZIP
26 select HAVE_KERNEL_LZO 26 select HAVE_KERNEL_LZO
27 select HAVE_KERNEL_LZMA 27 select HAVE_KERNEL_LZMA
28 select HAVE_IRQ_WORK
28 select HAVE_PERF_EVENTS 29 select HAVE_PERF_EVENTS
29 select PERF_USE_VMALLOC 30 select PERF_USE_VMALLOC
30 select HAVE_REGS_AND_STACK_ACCESS_API 31 select HAVE_REGS_AND_STACK_ACCESS_API
diff --git a/arch/arm/include/asm/hw_irq.h b/arch/arm/include/asm/hw_irq.h
index 90831f6f5f5..5586b7c8ef6 100644
--- a/arch/arm/include/asm/hw_irq.h
+++ b/arch/arm/include/asm/hw_irq.h
@@ -24,4 +24,6 @@ void set_irq_flags(unsigned int irq, unsigned int flags);
24#define IRQF_PROBE (1 << 1) 24#define IRQF_PROBE (1 << 1)
25#define IRQF_NOAUTOEN (1 << 2) 25#define IRQF_NOAUTOEN (1 << 2)
26 26
27#define ARCH_IRQ_INIT_FLAGS (IRQ_NOREQUEST | IRQ_NOPROBE)
28
27#endif 29#endif
diff --git a/arch/arm/include/asm/irqflags.h b/arch/arm/include/asm/irqflags.h
index 6d09974e664..1e6cca55c75 100644
--- a/arch/arm/include/asm/irqflags.h
+++ b/arch/arm/include/asm/irqflags.h
@@ -10,66 +10,85 @@
10 */ 10 */
11#if __LINUX_ARM_ARCH__ >= 6 11#if __LINUX_ARM_ARCH__ >= 6
12 12
13#define raw_local_irq_save(x) \ 13static inline unsigned long arch_local_irq_save(void)
14 ({ \ 14{
15 __asm__ __volatile__( \ 15 unsigned long flags;
16 "mrs %0, cpsr @ local_irq_save\n" \ 16
17 "cpsid i" \ 17 asm volatile(
18 : "=r" (x) : : "memory", "cc"); \ 18 " mrs %0, cpsr @ arch_local_irq_save\n"
19 }) 19 " cpsid i"
20 : "=r" (flags) : : "memory", "cc");
21 return flags;
22}
23
24static inline void arch_local_irq_enable(void)
25{
26 asm volatile(
27 " cpsie i @ arch_local_irq_enable"
28 :
29 :
30 : "memory", "cc");
31}
32
33static inline void arch_local_irq_disable(void)
34{
35 asm volatile(
36 " cpsid i @ arch_local_irq_disable"
37 :
38 :
39 : "memory", "cc");
40}
20 41
21#define raw_local_irq_enable() __asm__("cpsie i @ __sti" : : : "memory", "cc")
22#define raw_local_irq_disable() __asm__("cpsid i @ __cli" : : : "memory", "cc")
23#define local_fiq_enable() __asm__("cpsie f @ __stf" : : : "memory", "cc") 42#define local_fiq_enable() __asm__("cpsie f @ __stf" : : : "memory", "cc")
24#define local_fiq_disable() __asm__("cpsid f @ __clf" : : : "memory", "cc") 43#define local_fiq_disable() __asm__("cpsid f @ __clf" : : : "memory", "cc")
25
26#else 44#else
27 45
28/* 46/*
29 * Save the current interrupt enable state & disable IRQs 47 * Save the current interrupt enable state & disable IRQs
30 */ 48 */
31#define raw_local_irq_save(x) \ 49static inline unsigned long arch_local_irq_save(void)
32 ({ \ 50{
33 unsigned long temp; \ 51 unsigned long flags, temp;
34 (void) (&temp == &x); \ 52
35 __asm__ __volatile__( \ 53 asm volatile(
36 "mrs %0, cpsr @ local_irq_save\n" \ 54 " mrs %0, cpsr @ arch_local_irq_save\n"
37" orr %1, %0, #128\n" \ 55 " orr %1, %0, #128\n"
38" msr cpsr_c, %1" \ 56 " msr cpsr_c, %1"
39 : "=r" (x), "=r" (temp) \ 57 : "=r" (flags), "=r" (temp)
40 : \ 58 :
41 : "memory", "cc"); \ 59 : "memory", "cc");
42 }) 60 return flags;
43 61}
62
44/* 63/*
45 * Enable IRQs 64 * Enable IRQs
46 */ 65 */
47#define raw_local_irq_enable() \ 66static inline void arch_local_irq_enable(void)
48 ({ \ 67{
49 unsigned long temp; \ 68 unsigned long temp;
50 __asm__ __volatile__( \ 69 asm volatile(
51 "mrs %0, cpsr @ local_irq_enable\n" \ 70 " mrs %0, cpsr @ arch_local_irq_enable\n"
52" bic %0, %0, #128\n" \ 71 " bic %0, %0, #128\n"
53" msr cpsr_c, %0" \ 72 " msr cpsr_c, %0"
54 : "=r" (temp) \ 73 : "=r" (temp)
55 : \ 74 :
56 : "memory", "cc"); \ 75 : "memory", "cc");
57 }) 76}
58 77
59/* 78/*
60 * Disable IRQs 79 * Disable IRQs
61 */ 80 */
62#define raw_local_irq_disable() \ 81static inline void arch_local_irq_disable(void)
63 ({ \ 82{
64 unsigned long temp; \ 83 unsigned long temp;
65 __asm__ __volatile__( \ 84 asm volatile(
66 "mrs %0, cpsr @ local_irq_disable\n" \ 85 " mrs %0, cpsr @ arch_local_irq_disable\n"
67" orr %0, %0, #128\n" \ 86 " orr %0, %0, #128\n"
68" msr cpsr_c, %0" \ 87 " msr cpsr_c, %0"
69 : "=r" (temp) \ 88 : "=r" (temp)
70 : \ 89 :
71 : "memory", "cc"); \ 90 : "memory", "cc");
72 }) 91}
73 92
74/* 93/*
75 * Enable FIQs 94 * Enable FIQs
@@ -106,27 +125,31 @@
106/* 125/*
107 * Save the current interrupt enable state. 126 * Save the current interrupt enable state.
108 */ 127 */
109#define raw_local_save_flags(x) \ 128static inline unsigned long arch_local_save_flags(void)
110 ({ \ 129{
111 __asm__ __volatile__( \ 130 unsigned long flags;
112 "mrs %0, cpsr @ local_save_flags" \ 131 asm volatile(
113 : "=r" (x) : : "memory", "cc"); \ 132 " mrs %0, cpsr @ local_save_flags"
114 }) 133 : "=r" (flags) : : "memory", "cc");
134 return flags;
135}
115 136
116/* 137/*
117 * restore saved IRQ & FIQ state 138 * restore saved IRQ & FIQ state
118 */ 139 */
119#define raw_local_irq_restore(x) \ 140static inline void arch_local_irq_restore(unsigned long flags)
120 __asm__ __volatile__( \ 141{
121 "msr cpsr_c, %0 @ local_irq_restore\n" \ 142 asm volatile(
122 : \ 143 " msr cpsr_c, %0 @ local_irq_restore"
123 : "r" (x) \ 144 :
124 : "memory", "cc") 145 : "r" (flags)
146 : "memory", "cc");
147}
125 148
126#define raw_irqs_disabled_flags(flags) \ 149static inline int arch_irqs_disabled_flags(unsigned long flags)
127({ \ 150{
128 (int)((flags) & PSR_I_BIT); \ 151 return flags & PSR_I_BIT;
129}) 152}
130 153
131#endif 154#endif
132#endif 155#endif
diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h
index b5799a3b711..c4aa4e8c6af 100644
--- a/arch/arm/include/asm/perf_event.h
+++ b/arch/arm/include/asm/perf_event.h
@@ -12,18 +12,6 @@
12#ifndef __ARM_PERF_EVENT_H__ 12#ifndef __ARM_PERF_EVENT_H__
13#define __ARM_PERF_EVENT_H__ 13#define __ARM_PERF_EVENT_H__
14 14
15/*
16 * NOP: on *most* (read: all supported) ARM platforms, the performance
17 * counter interrupts are regular interrupts and not an NMI. This
18 * means that when we receive the interrupt we can call
19 * perf_event_do_pending() that handles all of the work with
20 * interrupts disabled.
21 */
22static inline void
23set_perf_event_pending(void)
24{
25}
26
27/* ARM performance counters start from 1 (in the cp15 accesses) so use the 15/* ARM performance counters start from 1 (in the cp15 accesses) so use the
28 * same indexes here for consistency. */ 16 * same indexes here for consistency. */
29#define PERF_EVENT_INDEX_OFFSET 1 17#define PERF_EVENT_INDEX_OFFSET 1
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index c0d5c3b3a76..36ad3be4692 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -154,14 +154,6 @@ void set_irq_flags(unsigned int irq, unsigned int iflags)
154 154
155void __init init_IRQ(void) 155void __init init_IRQ(void)
156{ 156{
157 struct irq_desc *desc;
158 int irq;
159
160 for (irq = 0; irq < nr_irqs; irq++) {
161 desc = irq_to_desc_alloc_node(irq, 0);
162 desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE;
163 }
164
165 init_arch_irq(); 157 init_arch_irq();
166} 158}
167 159
@@ -169,7 +161,7 @@ void __init init_IRQ(void)
169int __init arch_probe_nr_irqs(void) 161int __init arch_probe_nr_irqs(void)
170{ 162{
171 nr_irqs = arch_nr_irqs ? arch_nr_irqs : NR_IRQS; 163 nr_irqs = arch_nr_irqs ? arch_nr_irqs : NR_IRQS;
172 return 0; 164 return nr_irqs;
173} 165}
174#endif 166#endif
175 167
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index ecbb0288e5d..49643b1467e 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -123,6 +123,12 @@ armpmu_get_max_events(void)
123} 123}
124EXPORT_SYMBOL_GPL(armpmu_get_max_events); 124EXPORT_SYMBOL_GPL(armpmu_get_max_events);
125 125
126int perf_num_counters(void)
127{
128 return armpmu_get_max_events();
129}
130EXPORT_SYMBOL_GPL(perf_num_counters);
131
126#define HW_OP_UNSUPPORTED 0xFFFF 132#define HW_OP_UNSUPPORTED 0xFFFF
127 133
128#define C(_x) \ 134#define C(_x) \
@@ -221,46 +227,56 @@ again:
221} 227}
222 228
223static void 229static void
224armpmu_disable(struct perf_event *event) 230armpmu_read(struct perf_event *event)
225{ 231{
226 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
227 struct hw_perf_event *hwc = &event->hw; 232 struct hw_perf_event *hwc = &event->hw;
228 int idx = hwc->idx;
229
230 WARN_ON(idx < 0);
231
232 clear_bit(idx, cpuc->active_mask);
233 armpmu->disable(hwc, idx);
234
235 barrier();
236 233
237 armpmu_event_update(event, hwc, idx); 234 /* Don't read disabled counters! */
238 cpuc->events[idx] = NULL; 235 if (hwc->idx < 0)
239 clear_bit(idx, cpuc->used_mask); 236 return;
240 237
241 perf_event_update_userpage(event); 238 armpmu_event_update(event, hwc, hwc->idx);
242} 239}
243 240
244static void 241static void
245armpmu_read(struct perf_event *event) 242armpmu_stop(struct perf_event *event, int flags)
246{ 243{
247 struct hw_perf_event *hwc = &event->hw; 244 struct hw_perf_event *hwc = &event->hw;
248 245
249 /* Don't read disabled counters! */ 246 if (!armpmu)
250 if (hwc->idx < 0)
251 return; 247 return;
252 248
253 armpmu_event_update(event, hwc, hwc->idx); 249 /*
250 * ARM pmu always has to update the counter, so ignore
251 * PERF_EF_UPDATE, see comments in armpmu_start().
252 */
253 if (!(hwc->state & PERF_HES_STOPPED)) {
254 armpmu->disable(hwc, hwc->idx);
255 barrier(); /* why? */
256 armpmu_event_update(event, hwc, hwc->idx);
257 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
258 }
254} 259}
255 260
256static void 261static void
257armpmu_unthrottle(struct perf_event *event) 262armpmu_start(struct perf_event *event, int flags)
258{ 263{
259 struct hw_perf_event *hwc = &event->hw; 264 struct hw_perf_event *hwc = &event->hw;
260 265
266 if (!armpmu)
267 return;
268
269 /*
270 * ARM pmu always has to reprogram the period, so ignore
271 * PERF_EF_RELOAD, see the comment below.
272 */
273 if (flags & PERF_EF_RELOAD)
274 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
275
276 hwc->state = 0;
261 /* 277 /*
262 * Set the period again. Some counters can't be stopped, so when we 278 * Set the period again. Some counters can't be stopped, so when we
263 * were throttled we simply disabled the IRQ source and the counter 279 * were stopped we simply disabled the IRQ source and the counter
264 * may have been left counting. If we don't do this step then we may 280 * may have been left counting. If we don't do this step then we may
265 * get an interrupt too soon or *way* too late if the overflow has 281 * get an interrupt too soon or *way* too late if the overflow has
266 * happened since disabling. 282 * happened since disabling.
@@ -269,14 +285,33 @@ armpmu_unthrottle(struct perf_event *event)
269 armpmu->enable(hwc, hwc->idx); 285 armpmu->enable(hwc, hwc->idx);
270} 286}
271 287
288static void
289armpmu_del(struct perf_event *event, int flags)
290{
291 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
292 struct hw_perf_event *hwc = &event->hw;
293 int idx = hwc->idx;
294
295 WARN_ON(idx < 0);
296
297 clear_bit(idx, cpuc->active_mask);
298 armpmu_stop(event, PERF_EF_UPDATE);
299 cpuc->events[idx] = NULL;
300 clear_bit(idx, cpuc->used_mask);
301
302 perf_event_update_userpage(event);
303}
304
272static int 305static int
273armpmu_enable(struct perf_event *event) 306armpmu_add(struct perf_event *event, int flags)
274{ 307{
275 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 308 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
276 struct hw_perf_event *hwc = &event->hw; 309 struct hw_perf_event *hwc = &event->hw;
277 int idx; 310 int idx;
278 int err = 0; 311 int err = 0;
279 312
313 perf_pmu_disable(event->pmu);
314
280 /* If we don't have a space for the counter then finish early. */ 315 /* If we don't have a space for the counter then finish early. */
281 idx = armpmu->get_event_idx(cpuc, hwc); 316 idx = armpmu->get_event_idx(cpuc, hwc);
282 if (idx < 0) { 317 if (idx < 0) {
@@ -293,25 +328,19 @@ armpmu_enable(struct perf_event *event)
293 cpuc->events[idx] = event; 328 cpuc->events[idx] = event;
294 set_bit(idx, cpuc->active_mask); 329 set_bit(idx, cpuc->active_mask);
295 330
296 /* Set the period for the event. */ 331 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
297 armpmu_event_set_period(event, hwc, idx); 332 if (flags & PERF_EF_START)
298 333 armpmu_start(event, PERF_EF_RELOAD);
299 /* Enable the event. */
300 armpmu->enable(hwc, idx);
301 334
302 /* Propagate our changes to the userspace mapping. */ 335 /* Propagate our changes to the userspace mapping. */
303 perf_event_update_userpage(event); 336 perf_event_update_userpage(event);
304 337
305out: 338out:
339 perf_pmu_enable(event->pmu);
306 return err; 340 return err;
307} 341}
308 342
309static struct pmu pmu = { 343static struct pmu pmu;
310 .enable = armpmu_enable,
311 .disable = armpmu_disable,
312 .unthrottle = armpmu_unthrottle,
313 .read = armpmu_read,
314};
315 344
316static int 345static int
317validate_event(struct cpu_hw_events *cpuc, 346validate_event(struct cpu_hw_events *cpuc,
@@ -491,20 +520,29 @@ __hw_perf_event_init(struct perf_event *event)
491 return err; 520 return err;
492} 521}
493 522
494const struct pmu * 523static int armpmu_event_init(struct perf_event *event)
495hw_perf_event_init(struct perf_event *event)
496{ 524{
497 int err = 0; 525 int err = 0;
498 526
527 switch (event->attr.type) {
528 case PERF_TYPE_RAW:
529 case PERF_TYPE_HARDWARE:
530 case PERF_TYPE_HW_CACHE:
531 break;
532
533 default:
534 return -ENOENT;
535 }
536
499 if (!armpmu) 537 if (!armpmu)
500 return ERR_PTR(-ENODEV); 538 return -ENODEV;
501 539
502 event->destroy = hw_perf_event_destroy; 540 event->destroy = hw_perf_event_destroy;
503 541
504 if (!atomic_inc_not_zero(&active_events)) { 542 if (!atomic_inc_not_zero(&active_events)) {
505 if (atomic_read(&active_events) > perf_max_events) { 543 if (atomic_read(&active_events) > armpmu->num_events) {
506 atomic_dec(&active_events); 544 atomic_dec(&active_events);
507 return ERR_PTR(-ENOSPC); 545 return -ENOSPC;
508 } 546 }
509 547
510 mutex_lock(&pmu_reserve_mutex); 548 mutex_lock(&pmu_reserve_mutex);
@@ -518,17 +556,16 @@ hw_perf_event_init(struct perf_event *event)
518 } 556 }
519 557
520 if (err) 558 if (err)
521 return ERR_PTR(err); 559 return err;
522 560
523 err = __hw_perf_event_init(event); 561 err = __hw_perf_event_init(event);
524 if (err) 562 if (err)
525 hw_perf_event_destroy(event); 563 hw_perf_event_destroy(event);
526 564
527 return err ? ERR_PTR(err) : &pmu; 565 return err;
528} 566}
529 567
530void 568static void armpmu_enable(struct pmu *pmu)
531hw_perf_enable(void)
532{ 569{
533 /* Enable all of the perf events on hardware. */ 570 /* Enable all of the perf events on hardware. */
534 int idx; 571 int idx;
@@ -549,13 +586,23 @@ hw_perf_enable(void)
549 armpmu->start(); 586 armpmu->start();
550} 587}
551 588
552void 589static void armpmu_disable(struct pmu *pmu)
553hw_perf_disable(void)
554{ 590{
555 if (armpmu) 591 if (armpmu)
556 armpmu->stop(); 592 armpmu->stop();
557} 593}
558 594
595static struct pmu pmu = {
596 .pmu_enable = armpmu_enable,
597 .pmu_disable = armpmu_disable,
598 .event_init = armpmu_event_init,
599 .add = armpmu_add,
600 .del = armpmu_del,
601 .start = armpmu_start,
602 .stop = armpmu_stop,
603 .read = armpmu_read,
604};
605
559/* 606/*
560 * ARMv6 Performance counter handling code. 607 * ARMv6 Performance counter handling code.
561 * 608 *
@@ -1045,7 +1092,7 @@ armv6pmu_handle_irq(int irq_num,
1045 * platforms that can have the PMU interrupts raised as an NMI, this 1092 * platforms that can have the PMU interrupts raised as an NMI, this
1046 * will not work. 1093 * will not work.
1047 */ 1094 */
1048 perf_event_do_pending(); 1095 irq_work_run();
1049 1096
1050 return IRQ_HANDLED; 1097 return IRQ_HANDLED;
1051} 1098}
@@ -2021,7 +2068,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
2021 * platforms that can have the PMU interrupts raised as an NMI, this 2068 * platforms that can have the PMU interrupts raised as an NMI, this
2022 * will not work. 2069 * will not work.
2023 */ 2070 */
2024 perf_event_do_pending(); 2071 irq_work_run();
2025 2072
2026 return IRQ_HANDLED; 2073 return IRQ_HANDLED;
2027} 2074}
@@ -2389,7 +2436,7 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
2389 armpmu->disable(hwc, idx); 2436 armpmu->disable(hwc, idx);
2390 } 2437 }
2391 2438
2392 perf_event_do_pending(); 2439 irq_work_run();
2393 2440
2394 /* 2441 /*
2395 * Re-enable the PMU. 2442 * Re-enable the PMU.
@@ -2716,7 +2763,7 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
2716 armpmu->disable(hwc, idx); 2763 armpmu->disable(hwc, idx);
2717 } 2764 }
2718 2765
2719 perf_event_do_pending(); 2766 irq_work_run();
2720 2767
2721 /* 2768 /*
2722 * Re-enable the PMU. 2769 * Re-enable the PMU.
@@ -2933,14 +2980,12 @@ init_hw_perf_events(void)
2933 armpmu = &armv6pmu; 2980 armpmu = &armv6pmu;
2934 memcpy(armpmu_perf_cache_map, armv6_perf_cache_map, 2981 memcpy(armpmu_perf_cache_map, armv6_perf_cache_map,
2935 sizeof(armv6_perf_cache_map)); 2982 sizeof(armv6_perf_cache_map));
2936 perf_max_events = armv6pmu.num_events;
2937 break; 2983 break;
2938 case 0xB020: /* ARM11mpcore */ 2984 case 0xB020: /* ARM11mpcore */
2939 armpmu = &armv6mpcore_pmu; 2985 armpmu = &armv6mpcore_pmu;
2940 memcpy(armpmu_perf_cache_map, 2986 memcpy(armpmu_perf_cache_map,
2941 armv6mpcore_perf_cache_map, 2987 armv6mpcore_perf_cache_map,
2942 sizeof(armv6mpcore_perf_cache_map)); 2988 sizeof(armv6mpcore_perf_cache_map));
2943 perf_max_events = armv6mpcore_pmu.num_events;
2944 break; 2989 break;
2945 case 0xC080: /* Cortex-A8 */ 2990 case 0xC080: /* Cortex-A8 */
2946 armv7pmu.id = ARM_PERF_PMU_ID_CA8; 2991 armv7pmu.id = ARM_PERF_PMU_ID_CA8;
@@ -2952,7 +2997,6 @@ init_hw_perf_events(void)
2952 /* Reset PMNC and read the nb of CNTx counters 2997 /* Reset PMNC and read the nb of CNTx counters
2953 supported */ 2998 supported */
2954 armv7pmu.num_events = armv7_reset_read_pmnc(); 2999 armv7pmu.num_events = armv7_reset_read_pmnc();
2955 perf_max_events = armv7pmu.num_events;
2956 break; 3000 break;
2957 case 0xC090: /* Cortex-A9 */ 3001 case 0xC090: /* Cortex-A9 */
2958 armv7pmu.id = ARM_PERF_PMU_ID_CA9; 3002 armv7pmu.id = ARM_PERF_PMU_ID_CA9;
@@ -2964,7 +3008,6 @@ init_hw_perf_events(void)
2964 /* Reset PMNC and read the nb of CNTx counters 3008 /* Reset PMNC and read the nb of CNTx counters
2965 supported */ 3009 supported */
2966 armv7pmu.num_events = armv7_reset_read_pmnc(); 3010 armv7pmu.num_events = armv7_reset_read_pmnc();
2967 perf_max_events = armv7pmu.num_events;
2968 break; 3011 break;
2969 } 3012 }
2970 /* Intel CPUs [xscale]. */ 3013 /* Intel CPUs [xscale]. */
@@ -2975,13 +3018,11 @@ init_hw_perf_events(void)
2975 armpmu = &xscale1pmu; 3018 armpmu = &xscale1pmu;
2976 memcpy(armpmu_perf_cache_map, xscale_perf_cache_map, 3019 memcpy(armpmu_perf_cache_map, xscale_perf_cache_map,
2977 sizeof(xscale_perf_cache_map)); 3020 sizeof(xscale_perf_cache_map));
2978 perf_max_events = xscale1pmu.num_events;
2979 break; 3021 break;
2980 case 2: 3022 case 2:
2981 armpmu = &xscale2pmu; 3023 armpmu = &xscale2pmu;
2982 memcpy(armpmu_perf_cache_map, xscale_perf_cache_map, 3024 memcpy(armpmu_perf_cache_map, xscale_perf_cache_map,
2983 sizeof(xscale_perf_cache_map)); 3025 sizeof(xscale_perf_cache_map));
2984 perf_max_events = xscale2pmu.num_events;
2985 break; 3026 break;
2986 } 3027 }
2987 } 3028 }
@@ -2991,9 +3032,10 @@ init_hw_perf_events(void)
2991 arm_pmu_names[armpmu->id], armpmu->num_events); 3032 arm_pmu_names[armpmu->id], armpmu->num_events);
2992 } else { 3033 } else {
2993 pr_info("no hardware support available\n"); 3034 pr_info("no hardware support available\n");
2994 perf_max_events = -1;
2995 } 3035 }
2996 3036
3037 perf_pmu_register(&pmu);
3038
2997 return 0; 3039 return 0;
2998} 3040}
2999arch_initcall(init_hw_perf_events); 3041arch_initcall(init_hw_perf_events);
@@ -3001,13 +3043,6 @@ arch_initcall(init_hw_perf_events);
3001/* 3043/*
3002 * Callchain handling code. 3044 * Callchain handling code.
3003 */ 3045 */
3004static inline void
3005callchain_store(struct perf_callchain_entry *entry,
3006 u64 ip)
3007{
3008 if (entry->nr < PERF_MAX_STACK_DEPTH)
3009 entry->ip[entry->nr++] = ip;
3010}
3011 3046
3012/* 3047/*
3013 * The registers we're interested in are at the end of the variable 3048 * The registers we're interested in are at the end of the variable
@@ -3039,7 +3074,7 @@ user_backtrace(struct frame_tail *tail,
3039 if (__copy_from_user_inatomic(&buftail, tail, sizeof(buftail))) 3074 if (__copy_from_user_inatomic(&buftail, tail, sizeof(buftail)))
3040 return NULL; 3075 return NULL;
3041 3076
3042 callchain_store(entry, buftail.lr); 3077 perf_callchain_store(entry, buftail.lr);
3043 3078
3044 /* 3079 /*
3045 * Frame pointers should strictly progress back up the stack 3080 * Frame pointers should strictly progress back up the stack
@@ -3051,16 +3086,11 @@ user_backtrace(struct frame_tail *tail,
3051 return buftail.fp - 1; 3086 return buftail.fp - 1;
3052} 3087}
3053 3088
3054static void 3089void
3055perf_callchain_user(struct pt_regs *regs, 3090perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
3056 struct perf_callchain_entry *entry)
3057{ 3091{
3058 struct frame_tail *tail; 3092 struct frame_tail *tail;
3059 3093
3060 callchain_store(entry, PERF_CONTEXT_USER);
3061
3062 if (!user_mode(regs))
3063 regs = task_pt_regs(current);
3064 3094
3065 tail = (struct frame_tail *)regs->ARM_fp - 1; 3095 tail = (struct frame_tail *)regs->ARM_fp - 1;
3066 3096
@@ -3078,56 +3108,18 @@ callchain_trace(struct stackframe *fr,
3078 void *data) 3108 void *data)
3079{ 3109{
3080 struct perf_callchain_entry *entry = data; 3110 struct perf_callchain_entry *entry = data;
3081 callchain_store(entry, fr->pc); 3111 perf_callchain_store(entry, fr->pc);
3082 return 0; 3112 return 0;
3083} 3113}
3084 3114
3085static void 3115void
3086perf_callchain_kernel(struct pt_regs *regs, 3116perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
3087 struct perf_callchain_entry *entry)
3088{ 3117{
3089 struct stackframe fr; 3118 struct stackframe fr;
3090 3119
3091 callchain_store(entry, PERF_CONTEXT_KERNEL);
3092 fr.fp = regs->ARM_fp; 3120 fr.fp = regs->ARM_fp;
3093 fr.sp = regs->ARM_sp; 3121 fr.sp = regs->ARM_sp;
3094 fr.lr = regs->ARM_lr; 3122 fr.lr = regs->ARM_lr;
3095 fr.pc = regs->ARM_pc; 3123 fr.pc = regs->ARM_pc;
3096 walk_stackframe(&fr, callchain_trace, entry); 3124 walk_stackframe(&fr, callchain_trace, entry);
3097} 3125}
3098
3099static void
3100perf_do_callchain(struct pt_regs *regs,
3101 struct perf_callchain_entry *entry)
3102{
3103 int is_user;
3104
3105 if (!regs)
3106 return;
3107
3108 is_user = user_mode(regs);
3109
3110 if (!current || !current->pid)
3111 return;
3112
3113 if (is_user && current->state != TASK_RUNNING)
3114 return;
3115
3116 if (!is_user)
3117 perf_callchain_kernel(regs, entry);
3118
3119 if (current->mm)
3120 perf_callchain_user(regs, entry);
3121}
3122
3123static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
3124
3125struct perf_callchain_entry *
3126perf_callchain(struct pt_regs *regs)
3127{
3128 struct perf_callchain_entry *entry = &__get_cpu_var(pmc_irq_entry);
3129
3130 entry->nr = 0;
3131 perf_do_callchain(regs, entry);
3132 return entry;
3133}
diff --git a/arch/arm/mach-bcmring/dma.c b/arch/arm/mach-bcmring/dma.c
index 29c0a911df2..77eb35c89cd 100644
--- a/arch/arm/mach-bcmring/dma.c
+++ b/arch/arm/mach-bcmring/dma.c
@@ -691,7 +691,7 @@ int dma_init(void)
691 691
692 memset(&gDMA, 0, sizeof(gDMA)); 692 memset(&gDMA, 0, sizeof(gDMA));
693 693
694 init_MUTEX_LOCKED(&gDMA.lock); 694 sema_init(&gDMA.lock, 0);
695 init_waitqueue_head(&gDMA.freeChannelQ); 695 init_waitqueue_head(&gDMA.freeChannelQ);
696 696
697 /* Initialize the Hardware */ 697 /* Initialize the Hardware */
@@ -1574,7 +1574,7 @@ int dma_init_mem_map(DMA_MemMap_t *memMap)
1574{ 1574{
1575 memset(memMap, 0, sizeof(*memMap)); 1575 memset(memMap, 0, sizeof(*memMap));
1576 1576
1577 init_MUTEX(&memMap->lock); 1577 sema_init(&memMap->lock, 1);
1578 1578
1579 return 0; 1579 return 0;
1580} 1580}
diff --git a/arch/arm/mach-bcmring/irq.c b/arch/arm/mach-bcmring/irq.c
index dc1c4939b0c..e3152631eb3 100644
--- a/arch/arm/mach-bcmring/irq.c
+++ b/arch/arm/mach-bcmring/irq.c
@@ -67,21 +67,21 @@ static void bcmring_unmask_irq2(unsigned int irq)
67} 67}
68 68
69static struct irq_chip bcmring_irq0_chip = { 69static struct irq_chip bcmring_irq0_chip = {
70 .typename = "ARM-INTC0", 70 .name = "ARM-INTC0",
71 .ack = bcmring_mask_irq0, 71 .ack = bcmring_mask_irq0,
72 .mask = bcmring_mask_irq0, /* mask a specific interrupt, blocking its delivery. */ 72 .mask = bcmring_mask_irq0, /* mask a specific interrupt, blocking its delivery. */
73 .unmask = bcmring_unmask_irq0, /* unmaks an interrupt */ 73 .unmask = bcmring_unmask_irq0, /* unmaks an interrupt */
74}; 74};
75 75
76static struct irq_chip bcmring_irq1_chip = { 76static struct irq_chip bcmring_irq1_chip = {
77 .typename = "ARM-INTC1", 77 .name = "ARM-INTC1",
78 .ack = bcmring_mask_irq1, 78 .ack = bcmring_mask_irq1,
79 .mask = bcmring_mask_irq1, 79 .mask = bcmring_mask_irq1,
80 .unmask = bcmring_unmask_irq1, 80 .unmask = bcmring_unmask_irq1,
81}; 81};
82 82
83static struct irq_chip bcmring_irq2_chip = { 83static struct irq_chip bcmring_irq2_chip = {
84 .typename = "ARM-SINTC", 84 .name = "ARM-SINTC",
85 .ack = bcmring_mask_irq2, 85 .ack = bcmring_mask_irq2,
86 .mask = bcmring_mask_irq2, 86 .mask = bcmring_mask_irq2,
87 .unmask = bcmring_unmask_irq2, 87 .unmask = bcmring_unmask_irq2,
diff --git a/arch/arm/mach-iop13xx/msi.c b/arch/arm/mach-iop13xx/msi.c
index f34b0ed8063..7149fcc16c8 100644
--- a/arch/arm/mach-iop13xx/msi.c
+++ b/arch/arm/mach-iop13xx/msi.c
@@ -164,10 +164,10 @@ static void iop13xx_msi_nop(unsigned int irq)
164static struct irq_chip iop13xx_msi_chip = { 164static struct irq_chip iop13xx_msi_chip = {
165 .name = "PCI-MSI", 165 .name = "PCI-MSI",
166 .ack = iop13xx_msi_nop, 166 .ack = iop13xx_msi_nop,
167 .enable = unmask_msi_irq, 167 .irq_enable = unmask_msi_irq,
168 .disable = mask_msi_irq, 168 .irq_disable = mask_msi_irq,
169 .mask = mask_msi_irq, 169 .irq_mask = mask_msi_irq,
170 .unmask = unmask_msi_irq, 170 .irq_unmask = unmask_msi_irq,
171}; 171};
172 172
173int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc) 173int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
diff --git a/arch/arm/mach-lpc32xx/phy3250.c b/arch/arm/mach-lpc32xx/phy3250.c
index a058de85890..7993b096778 100644
--- a/arch/arm/mach-lpc32xx/phy3250.c
+++ b/arch/arm/mach-lpc32xx/phy3250.c
@@ -172,18 +172,12 @@ static void phy3250_spi_cs_set(u32 control)
172} 172}
173 173
174static struct pl022_config_chip spi0_chip_info = { 174static struct pl022_config_chip spi0_chip_info = {
175 .lbm = LOOPBACK_DISABLED,
176 .com_mode = INTERRUPT_TRANSFER, 175 .com_mode = INTERRUPT_TRANSFER,
177 .iface = SSP_INTERFACE_MOTOROLA_SPI, 176 .iface = SSP_INTERFACE_MOTOROLA_SPI,
178 .hierarchy = SSP_MASTER, 177 .hierarchy = SSP_MASTER,
179 .slave_tx_disable = 0, 178 .slave_tx_disable = 0,
180 .endian_tx = SSP_TX_LSB,
181 .endian_rx = SSP_RX_LSB,
182 .data_size = SSP_DATA_BITS_8,
183 .rx_lev_trig = SSP_RX_4_OR_MORE_ELEM, 179 .rx_lev_trig = SSP_RX_4_OR_MORE_ELEM,
184 .tx_lev_trig = SSP_TX_4_OR_MORE_EMPTY_LOC, 180 .tx_lev_trig = SSP_TX_4_OR_MORE_EMPTY_LOC,
185 .clk_phase = SSP_CLK_FIRST_EDGE,
186 .clk_pol = SSP_CLK_POL_IDLE_LOW,
187 .ctrl_len = SSP_BITS_8, 181 .ctrl_len = SSP_BITS_8,
188 .wait_state = SSP_MWIRE_WAIT_ZERO, 182 .wait_state = SSP_MWIRE_WAIT_ZERO,
189 .duplex = SSP_MICROWIRE_CHANNEL_FULL_DUPLEX, 183 .duplex = SSP_MICROWIRE_CHANNEL_FULL_DUPLEX,
@@ -239,6 +233,7 @@ static int __init phy3250_spi_board_register(void)
239 .max_speed_hz = 5000000, 233 .max_speed_hz = 5000000,
240 .bus_num = 0, 234 .bus_num = 0,
241 .chip_select = 0, 235 .chip_select = 0,
236 .mode = SPI_MODE_0,
242 .platform_data = &eeprom, 237 .platform_data = &eeprom,
243 .controller_data = &spi0_chip_info, 238 .controller_data = &spi0_chip_info,
244 }, 239 },
diff --git a/arch/arm/mach-u300/dummyspichip.c b/arch/arm/mach-u300/dummyspichip.c
index 5f55012b7c9..03f79361259 100644
--- a/arch/arm/mach-u300/dummyspichip.c
+++ b/arch/arm/mach-u300/dummyspichip.c
@@ -46,7 +46,6 @@ static ssize_t dummy_looptest(struct device *dev,
46 * struct, this is just used here to alter the behaviour of the chip 46 * struct, this is just used here to alter the behaviour of the chip
47 * in order to perform tests. 47 * in order to perform tests.
48 */ 48 */
49 struct pl022_config_chip *chip_info = spi->controller_data;
50 int status; 49 int status;
51 u8 txbuf[14] = {0xDE, 0xAD, 0xBE, 0xEF, 0x2B, 0xAD, 50 u8 txbuf[14] = {0xDE, 0xAD, 0xBE, 0xEF, 0x2B, 0xAD,
52 0xCA, 0xFE, 0xBA, 0xBE, 0xB1, 0x05, 51 0xCA, 0xFE, 0xBA, 0xBE, 0xB1, 0x05,
@@ -72,7 +71,7 @@ static ssize_t dummy_looptest(struct device *dev,
72 * Force chip to 8 bit mode 71 * Force chip to 8 bit mode
73 * WARNING: NEVER DO THIS IN REAL DRIVER CODE, THIS SHOULD BE STATIC! 72 * WARNING: NEVER DO THIS IN REAL DRIVER CODE, THIS SHOULD BE STATIC!
74 */ 73 */
75 chip_info->data_size = SSP_DATA_BITS_8; 74 spi->bits_per_word = 8;
76 /* You should NOT DO THIS EITHER */ 75 /* You should NOT DO THIS EITHER */
77 spi->master->setup(spi); 76 spi->master->setup(spi);
78 77
@@ -159,7 +158,7 @@ static ssize_t dummy_looptest(struct device *dev,
159 * Force chip to 16 bit mode 158 * Force chip to 16 bit mode
160 * WARNING: NEVER DO THIS IN REAL DRIVER CODE, THIS SHOULD BE STATIC! 159 * WARNING: NEVER DO THIS IN REAL DRIVER CODE, THIS SHOULD BE STATIC!
161 */ 160 */
162 chip_info->data_size = SSP_DATA_BITS_16; 161 spi->bits_per_word = 16;
163 /* You should NOT DO THIS EITHER */ 162 /* You should NOT DO THIS EITHER */
164 spi->master->setup(spi); 163 spi->master->setup(spi);
165 164
diff --git a/arch/arm/mach-u300/spi.c b/arch/arm/mach-u300/spi.c
index f0e887bea30..edb2c0d255c 100644
--- a/arch/arm/mach-u300/spi.c
+++ b/arch/arm/mach-u300/spi.c
@@ -30,8 +30,6 @@ static void select_dummy_chip(u32 chipselect)
30} 30}
31 31
32struct pl022_config_chip dummy_chip_info = { 32struct pl022_config_chip dummy_chip_info = {
33 /* Nominally this is LOOPBACK_DISABLED, but this is our dummy chip! */
34 .lbm = LOOPBACK_ENABLED,
35 /* 33 /*
36 * available POLLING_TRANSFER and INTERRUPT_TRANSFER, 34 * available POLLING_TRANSFER and INTERRUPT_TRANSFER,
37 * DMA_TRANSFER does not work 35 * DMA_TRANSFER does not work
@@ -42,14 +40,8 @@ struct pl022_config_chip dummy_chip_info = {
42 .hierarchy = SSP_MASTER, 40 .hierarchy = SSP_MASTER,
43 /* 0 = drive TX even as slave, 1 = do not drive TX as slave */ 41 /* 0 = drive TX even as slave, 1 = do not drive TX as slave */
44 .slave_tx_disable = 0, 42 .slave_tx_disable = 0,
45 /* LSB first */
46 .endian_tx = SSP_TX_LSB,
47 .endian_rx = SSP_RX_LSB,
48 .data_size = SSP_DATA_BITS_8, /* used to be 12 in some default */
49 .rx_lev_trig = SSP_RX_1_OR_MORE_ELEM, 43 .rx_lev_trig = SSP_RX_1_OR_MORE_ELEM,
50 .tx_lev_trig = SSP_TX_1_OR_MORE_EMPTY_LOC, 44 .tx_lev_trig = SSP_TX_1_OR_MORE_EMPTY_LOC,
51 .clk_phase = SSP_CLK_SECOND_EDGE,
52 .clk_pol = SSP_CLK_POL_IDLE_LOW,
53 .ctrl_len = SSP_BITS_12, 45 .ctrl_len = SSP_BITS_12,
54 .wait_state = SSP_MWIRE_WAIT_ZERO, 46 .wait_state = SSP_MWIRE_WAIT_ZERO,
55 .duplex = SSP_MICROWIRE_CHANNEL_FULL_DUPLEX, 47 .duplex = SSP_MICROWIRE_CHANNEL_FULL_DUPLEX,
@@ -75,7 +67,7 @@ static struct spi_board_info u300_spi_devices[] = {
75 .bus_num = 0, /* Only one bus on this chip */ 67 .bus_num = 0, /* Only one bus on this chip */
76 .chip_select = 0, 68 .chip_select = 0,
77 /* Means SPI_CS_HIGH, change if e.g low CS */ 69 /* Means SPI_CS_HIGH, change if e.g low CS */
78 .mode = 0, 70 .mode = SPI_MODE_1 | SPI_LSB_FIRST | SPI_LOOP,
79 }, 71 },
80#endif 72#endif
81}; 73};
diff --git a/arch/arm/mach-ux500/board-mop500.c b/arch/arm/mach-ux500/board-mop500.c
index 4fb23e2e2cd..fcb587f825c 100644
--- a/arch/arm/mach-ux500/board-mop500.c
+++ b/arch/arm/mach-ux500/board-mop500.c
@@ -57,19 +57,13 @@ static void ab4500_spi_cs_control(u32 command)
57} 57}
58 58
59struct pl022_config_chip ab4500_chip_info = { 59struct pl022_config_chip ab4500_chip_info = {
60 .lbm = LOOPBACK_DISABLED,
61 .com_mode = INTERRUPT_TRANSFER, 60 .com_mode = INTERRUPT_TRANSFER,
62 .iface = SSP_INTERFACE_MOTOROLA_SPI, 61 .iface = SSP_INTERFACE_MOTOROLA_SPI,
63 /* we can act as master only */ 62 /* we can act as master only */
64 .hierarchy = SSP_MASTER, 63 .hierarchy = SSP_MASTER,
65 .slave_tx_disable = 0, 64 .slave_tx_disable = 0,
66 .endian_rx = SSP_RX_MSB,
67 .endian_tx = SSP_TX_MSB,
68 .data_size = SSP_DATA_BITS_24,
69 .rx_lev_trig = SSP_RX_1_OR_MORE_ELEM, 65 .rx_lev_trig = SSP_RX_1_OR_MORE_ELEM,
70 .tx_lev_trig = SSP_TX_1_OR_MORE_EMPTY_LOC, 66 .tx_lev_trig = SSP_TX_1_OR_MORE_EMPTY_LOC,
71 .clk_phase = SSP_CLK_SECOND_EDGE,
72 .clk_pol = SSP_CLK_POL_IDLE_HIGH,
73 .cs_control = ab4500_spi_cs_control, 67 .cs_control = ab4500_spi_cs_control,
74}; 68};
75 69
@@ -103,7 +97,7 @@ static struct spi_board_info ab8500_spi_devices[] = {
103 .max_speed_hz = 12000000, 97 .max_speed_hz = 12000000,
104 .bus_num = 0, 98 .bus_num = 0,
105 .chip_select = 0, 99 .chip_select = 0,
106 .mode = SPI_MODE_0, 100 .mode = SPI_MODE_3,
107 .irq = IRQ_DB8500_AB8500, 101 .irq = IRQ_DB8500_AB8500,
108 }, 102 },
109}; 103};
diff --git a/arch/arm/oprofile/Makefile b/arch/arm/oprofile/Makefile
index e666eafed15..b2215c61cdf 100644
--- a/arch/arm/oprofile/Makefile
+++ b/arch/arm/oprofile/Makefile
@@ -6,4 +6,8 @@ DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
6 oprofilefs.o oprofile_stats.o \ 6 oprofilefs.o oprofile_stats.o \
7 timer_int.o ) 7 timer_int.o )
8 8
9ifeq ($(CONFIG_HW_PERF_EVENTS),y)
10DRIVER_OBJS += $(addprefix ../../../drivers/oprofile/, oprofile_perf.o)
11endif
12
9oprofile-y := $(DRIVER_OBJS) common.o 13oprofile-y := $(DRIVER_OBJS) common.o
diff --git a/arch/arm/oprofile/common.c b/arch/arm/oprofile/common.c
index 72e09eb642d..8aa974491df 100644
--- a/arch/arm/oprofile/common.c
+++ b/arch/arm/oprofile/common.c
@@ -25,139 +25,10 @@
25#include <asm/ptrace.h> 25#include <asm/ptrace.h>
26 26
27#ifdef CONFIG_HW_PERF_EVENTS 27#ifdef CONFIG_HW_PERF_EVENTS
28/* 28char *op_name_from_perf_id(void)
29 * Per performance monitor configuration as set via oprofilefs.
30 */
31struct op_counter_config {
32 unsigned long count;
33 unsigned long enabled;
34 unsigned long event;
35 unsigned long unit_mask;
36 unsigned long kernel;
37 unsigned long user;
38 struct perf_event_attr attr;
39};
40
41static int op_arm_enabled;
42static DEFINE_MUTEX(op_arm_mutex);
43
44static struct op_counter_config *counter_config;
45static struct perf_event **perf_events[nr_cpumask_bits];
46static int perf_num_counters;
47
48/*
49 * Overflow callback for oprofile.
50 */
51static void op_overflow_handler(struct perf_event *event, int unused,
52 struct perf_sample_data *data, struct pt_regs *regs)
53{ 29{
54 int id; 30 enum arm_perf_pmu_ids id = armpmu_get_pmu_id();
55 u32 cpu = smp_processor_id();
56
57 for (id = 0; id < perf_num_counters; ++id)
58 if (perf_events[cpu][id] == event)
59 break;
60
61 if (id != perf_num_counters)
62 oprofile_add_sample(regs, id);
63 else
64 pr_warning("oprofile: ignoring spurious overflow "
65 "on cpu %u\n", cpu);
66}
67
68/*
69 * Called by op_arm_setup to create perf attributes to mirror the oprofile
70 * settings in counter_config. Attributes are created as `pinned' events and
71 * so are permanently scheduled on the PMU.
72 */
73static void op_perf_setup(void)
74{
75 int i;
76 u32 size = sizeof(struct perf_event_attr);
77 struct perf_event_attr *attr;
78
79 for (i = 0; i < perf_num_counters; ++i) {
80 attr = &counter_config[i].attr;
81 memset(attr, 0, size);
82 attr->type = PERF_TYPE_RAW;
83 attr->size = size;
84 attr->config = counter_config[i].event;
85 attr->sample_period = counter_config[i].count;
86 attr->pinned = 1;
87 }
88}
89
90static int op_create_counter(int cpu, int event)
91{
92 int ret = 0;
93 struct perf_event *pevent;
94
95 if (!counter_config[event].enabled || (perf_events[cpu][event] != NULL))
96 return ret;
97
98 pevent = perf_event_create_kernel_counter(&counter_config[event].attr,
99 cpu, -1,
100 op_overflow_handler);
101
102 if (IS_ERR(pevent)) {
103 ret = PTR_ERR(pevent);
104 } else if (pevent->state != PERF_EVENT_STATE_ACTIVE) {
105 perf_event_release_kernel(pevent);
106 pr_warning("oprofile: failed to enable event %d "
107 "on CPU %d\n", event, cpu);
108 ret = -EBUSY;
109 } else {
110 perf_events[cpu][event] = pevent;
111 }
112
113 return ret;
114}
115 31
116static void op_destroy_counter(int cpu, int event)
117{
118 struct perf_event *pevent = perf_events[cpu][event];
119
120 if (pevent) {
121 perf_event_release_kernel(pevent);
122 perf_events[cpu][event] = NULL;
123 }
124}
125
126/*
127 * Called by op_arm_start to create active perf events based on the
128 * perviously configured attributes.
129 */
130static int op_perf_start(void)
131{
132 int cpu, event, ret = 0;
133
134 for_each_online_cpu(cpu) {
135 for (event = 0; event < perf_num_counters; ++event) {
136 ret = op_create_counter(cpu, event);
137 if (ret)
138 goto out;
139 }
140 }
141
142out:
143 return ret;
144}
145
146/*
147 * Called by op_arm_stop at the end of a profiling run.
148 */
149static void op_perf_stop(void)
150{
151 int cpu, event;
152
153 for_each_online_cpu(cpu)
154 for (event = 0; event < perf_num_counters; ++event)
155 op_destroy_counter(cpu, event);
156}
157
158
159static char *op_name_from_perf_id(enum arm_perf_pmu_ids id)
160{
161 switch (id) { 32 switch (id) {
162 case ARM_PERF_PMU_ID_XSCALE1: 33 case ARM_PERF_PMU_ID_XSCALE1:
163 return "arm/xscale1"; 34 return "arm/xscale1";
@@ -176,116 +47,6 @@ static char *op_name_from_perf_id(enum arm_perf_pmu_ids id)
176 } 47 }
177} 48}
178 49
179static int op_arm_create_files(struct super_block *sb, struct dentry *root)
180{
181 unsigned int i;
182
183 for (i = 0; i < perf_num_counters; i++) {
184 struct dentry *dir;
185 char buf[4];
186
187 snprintf(buf, sizeof buf, "%d", i);
188 dir = oprofilefs_mkdir(sb, root, buf);
189 oprofilefs_create_ulong(sb, dir, "enabled", &counter_config[i].enabled);
190 oprofilefs_create_ulong(sb, dir, "event", &counter_config[i].event);
191 oprofilefs_create_ulong(sb, dir, "count", &counter_config[i].count);
192 oprofilefs_create_ulong(sb, dir, "unit_mask", &counter_config[i].unit_mask);
193 oprofilefs_create_ulong(sb, dir, "kernel", &counter_config[i].kernel);
194 oprofilefs_create_ulong(sb, dir, "user", &counter_config[i].user);
195 }
196
197 return 0;
198}
199
200static int op_arm_setup(void)
201{
202 spin_lock(&oprofilefs_lock);
203 op_perf_setup();
204 spin_unlock(&oprofilefs_lock);
205 return 0;
206}
207
208static int op_arm_start(void)
209{
210 int ret = -EBUSY;
211
212 mutex_lock(&op_arm_mutex);
213 if (!op_arm_enabled) {
214 ret = 0;
215 op_perf_start();
216 op_arm_enabled = 1;
217 }
218 mutex_unlock(&op_arm_mutex);
219 return ret;
220}
221
222static void op_arm_stop(void)
223{
224 mutex_lock(&op_arm_mutex);
225 if (op_arm_enabled)
226 op_perf_stop();
227 op_arm_enabled = 0;
228 mutex_unlock(&op_arm_mutex);
229}
230
231#ifdef CONFIG_PM
232static int op_arm_suspend(struct platform_device *dev, pm_message_t state)
233{
234 mutex_lock(&op_arm_mutex);
235 if (op_arm_enabled)
236 op_perf_stop();
237 mutex_unlock(&op_arm_mutex);
238 return 0;
239}
240
241static int op_arm_resume(struct platform_device *dev)
242{
243 mutex_lock(&op_arm_mutex);
244 if (op_arm_enabled && op_perf_start())
245 op_arm_enabled = 0;
246 mutex_unlock(&op_arm_mutex);
247 return 0;
248}
249
250static struct platform_driver oprofile_driver = {
251 .driver = {
252 .name = "arm-oprofile",
253 },
254 .resume = op_arm_resume,
255 .suspend = op_arm_suspend,
256};
257
258static struct platform_device *oprofile_pdev;
259
260static int __init init_driverfs(void)
261{
262 int ret;
263
264 ret = platform_driver_register(&oprofile_driver);
265 if (ret)
266 goto out;
267
268 oprofile_pdev = platform_device_register_simple(
269 oprofile_driver.driver.name, 0, NULL, 0);
270 if (IS_ERR(oprofile_pdev)) {
271 ret = PTR_ERR(oprofile_pdev);
272 platform_driver_unregister(&oprofile_driver);
273 }
274
275out:
276 return ret;
277}
278
279static void exit_driverfs(void)
280{
281 platform_device_unregister(oprofile_pdev);
282 platform_driver_unregister(&oprofile_driver);
283}
284#else
285static int __init init_driverfs(void) { return 0; }
286#define exit_driverfs() do { } while (0)
287#endif /* CONFIG_PM */
288
289static int report_trace(struct stackframe *frame, void *d) 50static int report_trace(struct stackframe *frame, void *d)
290{ 51{
291 unsigned int *depth = d; 52 unsigned int *depth = d;
@@ -350,74 +111,14 @@ static void arm_backtrace(struct pt_regs * const regs, unsigned int depth)
350 111
351int __init oprofile_arch_init(struct oprofile_operations *ops) 112int __init oprofile_arch_init(struct oprofile_operations *ops)
352{ 113{
353 int cpu, ret = 0;
354
355 perf_num_counters = armpmu_get_max_events();
356
357 counter_config = kcalloc(perf_num_counters,
358 sizeof(struct op_counter_config), GFP_KERNEL);
359
360 if (!counter_config) {
361 pr_info("oprofile: failed to allocate %d "
362 "counters\n", perf_num_counters);
363 return -ENOMEM;
364 }
365
366 ret = init_driverfs();
367 if (ret) {
368 kfree(counter_config);
369 counter_config = NULL;
370 return ret;
371 }
372
373 for_each_possible_cpu(cpu) {
374 perf_events[cpu] = kcalloc(perf_num_counters,
375 sizeof(struct perf_event *), GFP_KERNEL);
376 if (!perf_events[cpu]) {
377 pr_info("oprofile: failed to allocate %d perf events "
378 "for cpu %d\n", perf_num_counters, cpu);
379 while (--cpu >= 0)
380 kfree(perf_events[cpu]);
381 return -ENOMEM;
382 }
383 }
384
385 ops->backtrace = arm_backtrace; 114 ops->backtrace = arm_backtrace;
386 ops->create_files = op_arm_create_files;
387 ops->setup = op_arm_setup;
388 ops->start = op_arm_start;
389 ops->stop = op_arm_stop;
390 ops->shutdown = op_arm_stop;
391 ops->cpu_type = op_name_from_perf_id(armpmu_get_pmu_id());
392
393 if (!ops->cpu_type)
394 ret = -ENODEV;
395 else
396 pr_info("oprofile: using %s\n", ops->cpu_type);
397 115
398 return ret; 116 return oprofile_perf_init(ops);
399} 117}
400 118
401void oprofile_arch_exit(void) 119void __exit oprofile_arch_exit(void)
402{ 120{
403 int cpu, id; 121 oprofile_perf_exit();
404 struct perf_event *event;
405
406 if (*perf_events) {
407 for_each_possible_cpu(cpu) {
408 for (id = 0; id < perf_num_counters; ++id) {
409 event = perf_events[cpu][id];
410 if (event != NULL)
411 perf_event_release_kernel(event);
412 }
413 kfree(perf_events[cpu]);
414 }
415 }
416
417 if (counter_config) {
418 kfree(counter_config);
419 exit_driverfs();
420 }
421} 122}
422#else 123#else
423int __init oprofile_arch_init(struct oprofile_operations *ops) 124int __init oprofile_arch_init(struct oprofile_operations *ops)
@@ -425,5 +126,5 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
425 pr_info("oprofile: hardware counters not available\n"); 126 pr_info("oprofile: hardware counters not available\n");
426 return -ENODEV; 127 return -ENODEV;
427} 128}
428void oprofile_arch_exit(void) {} 129void __exit oprofile_arch_exit(void) {}
429#endif /* CONFIG_HW_PERF_EVENTS */ 130#endif /* CONFIG_HW_PERF_EVENTS */
diff --git a/arch/arm/plat-samsung/include/plat/s3c64xx-spi.h b/arch/arm/plat-samsung/include/plat/s3c64xx-spi.h
index ae8e802bdca..ff1a561b326 100644
--- a/arch/arm/plat-samsung/include/plat/s3c64xx-spi.h
+++ b/arch/arm/plat-samsung/include/plat/s3c64xx-spi.h
@@ -32,6 +32,8 @@ struct s3c64xx_spi_csinfo {
32 * struct s3c64xx_spi_info - SPI Controller defining structure 32 * struct s3c64xx_spi_info - SPI Controller defining structure
33 * @src_clk_nr: Clock source index for the CLK_CFG[SPI_CLKSEL] field. 33 * @src_clk_nr: Clock source index for the CLK_CFG[SPI_CLKSEL] field.
34 * @src_clk_name: Platform name of the corresponding clock. 34 * @src_clk_name: Platform name of the corresponding clock.
35 * @clk_from_cmu: If the SPI clock/prescalar control block is present
36 * by the platform's clock-management-unit and not in SPI controller.
35 * @num_cs: Number of CS this controller emulates. 37 * @num_cs: Number of CS this controller emulates.
36 * @cfg_gpio: Configure pins for this SPI controller. 38 * @cfg_gpio: Configure pins for this SPI controller.
37 * @fifo_lvl_mask: All tx fifo_lvl fields start at offset-6 39 * @fifo_lvl_mask: All tx fifo_lvl fields start at offset-6
@@ -41,6 +43,7 @@ struct s3c64xx_spi_csinfo {
41struct s3c64xx_spi_info { 43struct s3c64xx_spi_info {
42 int src_clk_nr; 44 int src_clk_nr;
43 char *src_clk_name; 45 char *src_clk_name;
46 bool clk_from_cmu;
44 47
45 int num_cs; 48 int num_cs;
46 49