aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/include/asm/cti.h179
-rw-r--r--arch/arm/include/asm/perf_event.h3
-rw-r--r--arch/arm/include/asm/pmu.h15
-rw-r--r--arch/arm/kernel/perf_event.c19
4 files changed, 200 insertions, 16 deletions
diff --git a/arch/arm/include/asm/cti.h b/arch/arm/include/asm/cti.h
new file mode 100644
index 000000000000..a0ada3ea4358
--- /dev/null
+++ b/arch/arm/include/asm/cti.h
@@ -0,0 +1,179 @@
1#ifndef __ASMARM_CTI_H
2#define __ASMARM_CTI_H
3
4#include <asm/io.h>
5
6/* The registers' definition is from section 3.2 of
7 * Embedded Cross Trigger Revision: r0p0
8 */
9#define CTICONTROL 0x000
10#define CTISTATUS 0x004
11#define CTILOCK 0x008
12#define CTIPROTECTION 0x00C
13#define CTIINTACK 0x010
14#define CTIAPPSET 0x014
15#define CTIAPPCLEAR 0x018
16#define CTIAPPPULSE 0x01c
17#define CTIINEN 0x020
18#define CTIOUTEN 0x0A0
19#define CTITRIGINSTATUS 0x130
20#define CTITRIGOUTSTATUS 0x134
21#define CTICHINSTATUS 0x138
22#define CTICHOUTSTATUS 0x13c
23#define CTIPERIPHID0 0xFE0
24#define CTIPERIPHID1 0xFE4
25#define CTIPERIPHID2 0xFE8
26#define CTIPERIPHID3 0xFEC
27#define CTIPCELLID0 0xFF0
28#define CTIPCELLID1 0xFF4
29#define CTIPCELLID2 0xFF8
30#define CTIPCELLID3 0xFFC
31
32/* The below are from section 3.6.4 of
33 * CoreSight v1.0 Architecture Specification
34 */
35#define LOCKACCESS 0xFB0
36#define LOCKSTATUS 0xFB4
37
38/* write this value to LOCKACCESS will unlock the module, and
39 * other value will lock the module
40 */
41#define LOCKCODE 0xC5ACCE55
42
43/**
44 * struct cti - cross trigger interface struct
45 * @base: mapped virtual address for the cti base
46 * @irq: irq number for the cti
47 * @trig_out_for_irq: triger out number which will cause
48 * the @irq happen
49 *
50 * cti struct used to operate cti registers.
51 */
52struct cti {
53 void __iomem *base;
54 int irq;
55 int trig_out_for_irq;
56};
57
58/**
59 * cti_init - initialize the cti instance
60 * @cti: cti instance
61 * @base: mapped virtual address for the cti base
62 * @irq: irq number for the cti
63 * @trig_out: triger out number which will cause
64 * the @irq happen
65 *
66 * called by machine code to pass the board dependent
67 * @base, @irq and @trig_out to cti.
68 */
69static inline void cti_init(struct cti *cti,
70 void __iomem *base, int irq, int trig_out)
71{
72 cti->base = base;
73 cti->irq = irq;
74 cti->trig_out_for_irq = trig_out;
75}
76
77/**
78 * cti_map_trigger - use the @chan to map @trig_in to @trig_out
79 * @cti: cti instance
80 * @trig_in: trigger in number
81 * @trig_out: trigger out number
82 * @channel: channel number
83 *
84 * This function maps one trigger in of @trig_in to one trigger
85 * out of @trig_out using the channel @chan.
86 */
87static inline void cti_map_trigger(struct cti *cti,
88 int trig_in, int trig_out, int chan)
89{
90 void __iomem *base = cti->base;
91 unsigned long val;
92
93 val = __raw_readl(base + CTIINEN + trig_in * 4);
94 val |= BIT(chan);
95 __raw_writel(val, base + CTIINEN + trig_in * 4);
96
97 val = __raw_readl(base + CTIOUTEN + trig_out * 4);
98 val |= BIT(chan);
99 __raw_writel(val, base + CTIOUTEN + trig_out * 4);
100}
101
102/**
103 * cti_enable - enable the cti module
104 * @cti: cti instance
105 *
106 * enable the cti module
107 */
108static inline void cti_enable(struct cti *cti)
109{
110 __raw_writel(0x1, cti->base + CTICONTROL);
111}
112
113/**
114 * cti_disable - disable the cti module
115 * @cti: cti instance
116 *
117 * enable the cti module
118 */
119static inline void cti_disable(struct cti *cti)
120{
121 __raw_writel(0, cti->base + CTICONTROL);
122}
123
124/**
125 * cti_irq_ack - clear the cti irq
126 * @cti: cti instance
127 *
128 * clear the cti irq
129 */
130static inline void cti_irq_ack(struct cti *cti)
131{
132 void __iomem *base = cti->base;
133 unsigned long val;
134
135 val = __raw_readl(base + CTIINTACK);
136 val |= BIT(cti->trig_out_for_irq);
137 __raw_writel(val, base + CTIINTACK);
138}
139
140/**
141 * cti_unlock - unlock cti module
142 * @cti: cti instance
143 *
144 * unlock the cti module, or else any writes to the cti
145 * module is not allowed.
146 */
147static inline void cti_unlock(struct cti *cti)
148{
149 void __iomem *base = cti->base;
150 unsigned long val;
151
152 val = __raw_readl(base + LOCKSTATUS);
153
154 if (val & 1) {
155 val = LOCKCODE;
156 __raw_writel(val, base + LOCKACCESS);
157 }
158}
159
160/**
161 * cti_lock - lock cti module
162 * @cti: cti instance
163 *
164 * lock the cti module, so any writes to the cti
165 * module will be not allowed.
166 */
167static inline void cti_lock(struct cti *cti)
168{
169 void __iomem *base = cti->base;
170 unsigned long val;
171
172 val = __raw_readl(base + LOCKSTATUS);
173
174 if (!(val & 1)) {
175 val = ~LOCKCODE;
176 __raw_writel(val, base + LOCKACCESS);
177 }
178}
179#endif
diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h
index 0f8e3827a89b..99cfe3607989 100644
--- a/arch/arm/include/asm/perf_event.h
+++ b/arch/arm/include/asm/perf_event.h
@@ -32,7 +32,4 @@ enum arm_perf_pmu_ids {
32extern enum arm_perf_pmu_ids 32extern enum arm_perf_pmu_ids
33armpmu_get_pmu_id(void); 33armpmu_get_pmu_id(void);
34 34
35extern int
36armpmu_get_max_events(void);
37
38#endif /* __ARM_PERF_EVENT_H__ */ 35#endif /* __ARM_PERF_EVENT_H__ */
diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h
index 0bda22c094a6..b5a5be2536c1 100644
--- a/arch/arm/include/asm/pmu.h
+++ b/arch/arm/include/asm/pmu.h
@@ -27,13 +27,22 @@ enum arm_pmu_type {
27/* 27/*
28 * struct arm_pmu_platdata - ARM PMU platform data 28 * struct arm_pmu_platdata - ARM PMU platform data
29 * 29 *
30 * @handle_irq: an optional handler which will be called from the interrupt and 30 * @handle_irq: an optional handler which will be called from the
31 * passed the address of the low level handler, and can be used to implement 31 * interrupt and passed the address of the low level handler,
32 * any platform specific handling before or after calling it. 32 * and can be used to implement any platform specific handling
33 * before or after calling it.
34 * @enable_irq: an optional handler which will be called after
35 * request_irq and be used to handle some platform specific
36 * irq enablement
37 * @disable_irq: an optional handler which will be called before
38 * free_irq and be used to handle some platform specific
39 * irq disablement
33 */ 40 */
34struct arm_pmu_platdata { 41struct arm_pmu_platdata {
35 irqreturn_t (*handle_irq)(int irq, void *dev, 42 irqreturn_t (*handle_irq)(int irq, void *dev,
36 irq_handler_t pmu_handler); 43 irq_handler_t pmu_handler);
44 void (*enable_irq)(int irq);
45 void (*disable_irq)(int irq);
37}; 46};
38 47
39#ifdef CONFIG_CPU_HAS_PMU 48#ifdef CONFIG_CPU_HAS_PMU
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index c475379199b1..172101ac97de 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -59,8 +59,7 @@ armpmu_get_pmu_id(void)
59} 59}
60EXPORT_SYMBOL_GPL(armpmu_get_pmu_id); 60EXPORT_SYMBOL_GPL(armpmu_get_pmu_id);
61 61
62int 62int perf_num_counters(void)
63armpmu_get_max_events(void)
64{ 63{
65 int max_events = 0; 64 int max_events = 0;
66 65
@@ -69,12 +68,6 @@ armpmu_get_max_events(void)
69 68
70 return max_events; 69 return max_events;
71} 70}
72EXPORT_SYMBOL_GPL(armpmu_get_max_events);
73
74int perf_num_counters(void)
75{
76 return armpmu_get_max_events();
77}
78EXPORT_SYMBOL_GPL(perf_num_counters); 71EXPORT_SYMBOL_GPL(perf_num_counters);
79 72
80#define HW_OP_UNSUPPORTED 0xFFFF 73#define HW_OP_UNSUPPORTED 0xFFFF
@@ -380,6 +373,8 @@ armpmu_release_hardware(struct arm_pmu *armpmu)
380{ 373{
381 int i, irq, irqs; 374 int i, irq, irqs;
382 struct platform_device *pmu_device = armpmu->plat_device; 375 struct platform_device *pmu_device = armpmu->plat_device;
376 struct arm_pmu_platdata *plat =
377 dev_get_platdata(&pmu_device->dev);
383 378
384 irqs = min(pmu_device->num_resources, num_possible_cpus()); 379 irqs = min(pmu_device->num_resources, num_possible_cpus());
385 380
@@ -387,8 +382,11 @@ armpmu_release_hardware(struct arm_pmu *armpmu)
387 if (!cpumask_test_and_clear_cpu(i, &armpmu->active_irqs)) 382 if (!cpumask_test_and_clear_cpu(i, &armpmu->active_irqs))
388 continue; 383 continue;
389 irq = platform_get_irq(pmu_device, i); 384 irq = platform_get_irq(pmu_device, i);
390 if (irq >= 0) 385 if (irq >= 0) {
386 if (plat && plat->disable_irq)
387 plat->disable_irq(irq);
391 free_irq(irq, armpmu); 388 free_irq(irq, armpmu);
389 }
392 } 390 }
393 391
394 release_pmu(armpmu->type); 392 release_pmu(armpmu->type);
@@ -448,7 +446,8 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu)
448 irq); 446 irq);
449 armpmu_release_hardware(armpmu); 447 armpmu_release_hardware(armpmu);
450 return err; 448 return err;
451 } 449 } else if (plat && plat->enable_irq)
450 plat->enable_irq(irq);
452 451
453 cpumask_set_cpu(i, &armpmu->active_irqs); 452 cpumask_set_cpu(i, &armpmu->active_irqs);
454 } 453 }