aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/include/asm/pmu.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/include/asm/pmu.h')
-rw-r--r--arch/arm/include/asm/pmu.h77
1 files changed, 24 insertions, 53 deletions
diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h
index 4432305f4a2a..a26170dce02e 100644
--- a/arch/arm/include/asm/pmu.h
+++ b/arch/arm/include/asm/pmu.h
@@ -16,69 +16,30 @@
16#include <linux/perf_event.h> 16#include <linux/perf_event.h>
17 17
18/* 18/*
19 * Types of PMUs that can be accessed directly and require mutual
20 * exclusion between profiling tools.
21 */
22enum arm_pmu_type {
23 ARM_PMU_DEVICE_CPU = 0,
24 ARM_NUM_PMU_DEVICES,
25};
26
27/*
28 * struct arm_pmu_platdata - ARM PMU platform data 19 * struct arm_pmu_platdata - ARM PMU platform data
29 * 20 *
30 * @handle_irq: an optional handler which will be called from the 21 * @handle_irq: an optional handler which will be called from the
31 * interrupt and passed the address of the low level handler, 22 * interrupt and passed the address of the low level handler,
32 * and can be used to implement any platform specific handling 23 * and can be used to implement any platform specific handling
33 * before or after calling it. 24 * before or after calling it.
34 * @enable_irq: an optional handler which will be called after 25 * @runtime_resume: an optional handler which will be called by the
35 * request_irq and be used to handle some platform specific 26 * runtime PM framework following a call to pm_runtime_get().
36 * irq enablement 27 * Note that if pm_runtime_get() is called more than once in
37 * @disable_irq: an optional handler which will be called before 28 * succession this handler will only be called once.
38 * free_irq and be used to handle some platform specific 29 * @runtime_suspend: an optional handler which will be called by the
39 * irq disablement 30 * runtime PM framework following a call to pm_runtime_put().
31 * Note that if pm_runtime_get() is called more than once in
32 * succession this handler will only be called following the
33 * final call to pm_runtime_put() that actually disables the
34 * hardware.
40 */ 35 */
41struct arm_pmu_platdata { 36struct arm_pmu_platdata {
42 irqreturn_t (*handle_irq)(int irq, void *dev, 37 irqreturn_t (*handle_irq)(int irq, void *dev,
43 irq_handler_t pmu_handler); 38 irq_handler_t pmu_handler);
44 void (*enable_irq)(int irq); 39 int (*runtime_resume)(struct device *dev);
45 void (*disable_irq)(int irq); 40 int (*runtime_suspend)(struct device *dev);
46}; 41};
47 42
48#ifdef CONFIG_CPU_HAS_PMU
49
50/**
51 * reserve_pmu() - reserve the hardware performance counters
52 *
53 * Reserve the hardware performance counters in the system for exclusive use.
54 * Returns 0 on success or -EBUSY if the lock is already held.
55 */
56extern int
57reserve_pmu(enum arm_pmu_type type);
58
59/**
60 * release_pmu() - Relinquish control of the performance counters
61 *
62 * Release the performance counters and allow someone else to use them.
63 */
64extern void
65release_pmu(enum arm_pmu_type type);
66
67#else /* CONFIG_CPU_HAS_PMU */
68
69#include <linux/err.h>
70
71static inline int
72reserve_pmu(enum arm_pmu_type type)
73{
74 return -ENODEV;
75}
76
77static inline void
78release_pmu(enum arm_pmu_type type) { }
79
80#endif /* CONFIG_CPU_HAS_PMU */
81
82#ifdef CONFIG_HW_PERF_EVENTS 43#ifdef CONFIG_HW_PERF_EVENTS
83 44
84/* The events for a given PMU register set. */ 45/* The events for a given PMU register set. */
@@ -103,7 +64,6 @@ struct pmu_hw_events {
103 64
104struct arm_pmu { 65struct arm_pmu {
105 struct pmu pmu; 66 struct pmu pmu;
106 enum arm_pmu_type type;
107 cpumask_t active_irqs; 67 cpumask_t active_irqs;
108 char *name; 68 char *name;
109 irqreturn_t (*handle_irq)(int irq_num, void *dev); 69 irqreturn_t (*handle_irq)(int irq_num, void *dev);
@@ -118,6 +78,8 @@ struct arm_pmu {
118 void (*start)(void); 78 void (*start)(void);
119 void (*stop)(void); 79 void (*stop)(void);
120 void (*reset)(void *); 80 void (*reset)(void *);
81 int (*request_irq)(irq_handler_t handler);
82 void (*free_irq)(void);
121 int (*map_event)(struct perf_event *event); 83 int (*map_event)(struct perf_event *event);
122 int num_events; 84 int num_events;
123 atomic_t active_events; 85 atomic_t active_events;
@@ -129,7 +91,9 @@ struct arm_pmu {
129 91
130#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu)) 92#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
131 93
132int __init armpmu_register(struct arm_pmu *armpmu, char *name, int type); 94extern const struct dev_pm_ops armpmu_dev_pm_ops;
95
96int armpmu_register(struct arm_pmu *armpmu, char *name, int type);
133 97
134u64 armpmu_event_update(struct perf_event *event, 98u64 armpmu_event_update(struct perf_event *event,
135 struct hw_perf_event *hwc, 99 struct hw_perf_event *hwc,
@@ -139,6 +103,13 @@ int armpmu_event_set_period(struct perf_event *event,
139 struct hw_perf_event *hwc, 103 struct hw_perf_event *hwc,
140 int idx); 104 int idx);
141 105
106int armpmu_map_event(struct perf_event *event,
107 const unsigned (*event_map)[PERF_COUNT_HW_MAX],
108 const unsigned (*cache_map)[PERF_COUNT_HW_CACHE_MAX]
109 [PERF_COUNT_HW_CACHE_OP_MAX]
110 [PERF_COUNT_HW_CACHE_RESULT_MAX],
111 u32 raw_event_mask);
112
142#endif /* CONFIG_HW_PERF_EVENTS */ 113#endif /* CONFIG_HW_PERF_EVENTS */
143 114
144#endif /* __ARM_PMU_H__ */ 115#endif /* __ARM_PMU_H__ */