diff options
Diffstat (limited to 'include/linux/perf/arm_pmu.h')
-rw-r--r-- | include/linux/perf/arm_pmu.h | 154 |
1 files changed, 154 insertions, 0 deletions
diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h new file mode 100644 index 000000000000..bfa673bb822d --- /dev/null +++ b/include/linux/perf/arm_pmu.h | |||
@@ -0,0 +1,154 @@ | |||
1 | /* | ||
2 | * linux/arch/arm/include/asm/pmu.h | ||
3 | * | ||
4 | * Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | */ | ||
11 | |||
12 | #ifndef __ARM_PMU_H__ | ||
13 | #define __ARM_PMU_H__ | ||
14 | |||
15 | #include <linux/interrupt.h> | ||
16 | #include <linux/perf_event.h> | ||
17 | |||
18 | #include <asm/cputype.h> | ||
19 | |||
20 | /* | ||
21 | * struct arm_pmu_platdata - ARM PMU platform data | ||
22 | * | ||
23 | * @handle_irq: an optional handler which will be called from the | ||
24 | * interrupt and passed the address of the low level handler, | ||
25 | * and can be used to implement any platform specific handling | ||
26 | * before or after calling it. | ||
27 | */ | ||
28 | struct arm_pmu_platdata { | ||
29 | irqreturn_t (*handle_irq)(int irq, void *dev, | ||
30 | irq_handler_t pmu_handler); | ||
31 | }; | ||
32 | |||
33 | #ifdef CONFIG_ARM_PMU | ||
34 | |||
35 | /* | ||
36 | * The ARMv7 CPU PMU supports up to 32 event counters. | ||
37 | */ | ||
38 | #define ARMPMU_MAX_HWEVENTS 32 | ||
39 | |||
40 | #define HW_OP_UNSUPPORTED 0xFFFF | ||
41 | #define C(_x) PERF_COUNT_HW_CACHE_##_x | ||
42 | #define CACHE_OP_UNSUPPORTED 0xFFFF | ||
43 | |||
44 | #define PERF_MAP_ALL_UNSUPPORTED \ | ||
45 | [0 ... PERF_COUNT_HW_MAX - 1] = HW_OP_UNSUPPORTED | ||
46 | |||
47 | #define PERF_CACHE_MAP_ALL_UNSUPPORTED \ | ||
48 | [0 ... C(MAX) - 1] = { \ | ||
49 | [0 ... C(OP_MAX) - 1] = { \ | ||
50 | [0 ... C(RESULT_MAX) - 1] = CACHE_OP_UNSUPPORTED, \ | ||
51 | }, \ | ||
52 | } | ||
53 | |||
54 | /* The events for a given PMU register set. */ | ||
55 | struct pmu_hw_events { | ||
56 | /* | ||
57 | * The events that are active on the PMU for the given index. | ||
58 | */ | ||
59 | struct perf_event *events[ARMPMU_MAX_HWEVENTS]; | ||
60 | |||
61 | /* | ||
62 | * A 1 bit for an index indicates that the counter is being used for | ||
63 | * an event. A 0 means that the counter can be used. | ||
64 | */ | ||
65 | DECLARE_BITMAP(used_mask, ARMPMU_MAX_HWEVENTS); | ||
66 | |||
67 | /* | ||
68 | * Hardware lock to serialize accesses to PMU registers. Needed for the | ||
69 | * read/modify/write sequences. | ||
70 | */ | ||
71 | raw_spinlock_t pmu_lock; | ||
72 | |||
73 | /* | ||
74 | * When using percpu IRQs, we need a percpu dev_id. Place it here as we | ||
75 | * already have to allocate this struct per cpu. | ||
76 | */ | ||
77 | struct arm_pmu *percpu_pmu; | ||
78 | }; | ||
79 | |||
80 | struct arm_pmu { | ||
81 | struct pmu pmu; | ||
82 | cpumask_t active_irqs; | ||
83 | cpumask_t supported_cpus; | ||
84 | int *irq_affinity; | ||
85 | char *name; | ||
86 | irqreturn_t (*handle_irq)(int irq_num, void *dev); | ||
87 | void (*enable)(struct perf_event *event); | ||
88 | void (*disable)(struct perf_event *event); | ||
89 | int (*get_event_idx)(struct pmu_hw_events *hw_events, | ||
90 | struct perf_event *event); | ||
91 | void (*clear_event_idx)(struct pmu_hw_events *hw_events, | ||
92 | struct perf_event *event); | ||
93 | int (*set_event_filter)(struct hw_perf_event *evt, | ||
94 | struct perf_event_attr *attr); | ||
95 | u32 (*read_counter)(struct perf_event *event); | ||
96 | void (*write_counter)(struct perf_event *event, u32 val); | ||
97 | void (*start)(struct arm_pmu *); | ||
98 | void (*stop)(struct arm_pmu *); | ||
99 | void (*reset)(void *); | ||
100 | int (*request_irq)(struct arm_pmu *, irq_handler_t handler); | ||
101 | void (*free_irq)(struct arm_pmu *); | ||
102 | int (*map_event)(struct perf_event *event); | ||
103 | int num_events; | ||
104 | atomic_t active_events; | ||
105 | struct mutex reserve_mutex; | ||
106 | u64 max_period; | ||
107 | struct platform_device *plat_device; | ||
108 | struct pmu_hw_events __percpu *hw_events; | ||
109 | struct notifier_block hotplug_nb; | ||
110 | }; | ||
111 | |||
112 | #define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu)) | ||
113 | |||
114 | int armpmu_register(struct arm_pmu *armpmu, int type); | ||
115 | |||
116 | u64 armpmu_event_update(struct perf_event *event); | ||
117 | |||
118 | int armpmu_event_set_period(struct perf_event *event); | ||
119 | |||
120 | int armpmu_map_event(struct perf_event *event, | ||
121 | const unsigned (*event_map)[PERF_COUNT_HW_MAX], | ||
122 | const unsigned (*cache_map)[PERF_COUNT_HW_CACHE_MAX] | ||
123 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
124 | [PERF_COUNT_HW_CACHE_RESULT_MAX], | ||
125 | u32 raw_event_mask); | ||
126 | |||
127 | struct pmu_probe_info { | ||
128 | unsigned int cpuid; | ||
129 | unsigned int mask; | ||
130 | int (*init)(struct arm_pmu *); | ||
131 | }; | ||
132 | |||
133 | #define PMU_PROBE(_cpuid, _mask, _fn) \ | ||
134 | { \ | ||
135 | .cpuid = (_cpuid), \ | ||
136 | .mask = (_mask), \ | ||
137 | .init = (_fn), \ | ||
138 | } | ||
139 | |||
140 | #define ARM_PMU_PROBE(_cpuid, _fn) \ | ||
141 | PMU_PROBE(_cpuid, ARM_CPU_PART_MASK, _fn) | ||
142 | |||
143 | #define ARM_PMU_XSCALE_MASK ((0xff << 24) | ARM_CPU_XSCALE_ARCH_MASK) | ||
144 | |||
145 | #define XSCALE_PMU_PROBE(_version, _fn) \ | ||
146 | PMU_PROBE(ARM_CPU_IMP_INTEL << 24 | _version, ARM_PMU_XSCALE_MASK, _fn) | ||
147 | |||
148 | int arm_pmu_device_probe(struct platform_device *pdev, | ||
149 | const struct of_device_id *of_table, | ||
150 | const struct pmu_probe_info *probe_table); | ||
151 | |||
152 | #endif /* CONFIG_ARM_PMU */ | ||
153 | |||
154 | #endif /* __ARM_PMU_H__ */ | ||