aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorKevin Winchester <kjwinchester@gmail.com>2011-08-30 19:41:05 -0400
committerIngo Molnar <mingo@elte.hu>2011-09-26 06:58:00 -0400
commitde0428a7ad4856c7b5b8a2792488ac893e6f3faa (patch)
tree63cf492f6e6a1b11aa8d4271df50b7c71649a49d /arch
parented3982cf3748b657ffb79d9d1c2e4a562661db2d (diff)
x86, perf: Clean up perf_event cpu code
The CPU support for perf events on x86 was implemented via included C files with #ifdefs. Clean this up by creating a new header file and compiling the vendor-specific files as needed. Signed-off-by: Kevin Winchester <kjwinchester@gmail.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/1314747665-2090-1-git-send-email-kjwinchester@gmail.com Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kernel/cpu/Makefile5
-rw-r--r--arch/x86/kernel/cpu/perf_event.c369
-rw-r--r--arch/x86/kernel/cpu/perf_event.h493
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd.c18
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c53
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_ds.c79
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_lbr.c28
-rw-r--r--arch/x86/kernel/cpu/perf_event_p4.c10
-rw-r--r--arch/x86/kernel/cpu/perf_event_p6.c9
9 files changed, 604 insertions, 460 deletions
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index 6042981d0309..1044fd787db8 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -28,6 +28,11 @@ obj-$(CONFIG_CPU_SUP_UMC_32) += umc.o
28 28
29obj-$(CONFIG_PERF_EVENTS) += perf_event.o 29obj-$(CONFIG_PERF_EVENTS) += perf_event.o
30 30
31ifdef CONFIG_PERF_EVENTS
32obj-$(CONFIG_CPU_SUP_AMD) += perf_event_amd.o
33obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_p6.o perf_event_p4.o perf_event_intel_lbr.o perf_event_intel_ds.o perf_event_intel.o
34endif
35
31obj-$(CONFIG_X86_MCE) += mcheck/ 36obj-$(CONFIG_X86_MCE) += mcheck/
32obj-$(CONFIG_MTRR) += mtrr/ 37obj-$(CONFIG_MTRR) += mtrr/
33 38
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 05df6e3d8b64..8ab89112f93c 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -32,6 +32,8 @@
32#include <asm/smp.h> 32#include <asm/smp.h>
33#include <asm/alternative.h> 33#include <asm/alternative.h>
34 34
35#include "perf_event.h"
36
35#if 0 37#if 0
36#undef wrmsrl 38#undef wrmsrl
37#define wrmsrl(msr, val) \ 39#define wrmsrl(msr, val) \
@@ -43,285 +45,17 @@ do { \
43} while (0) 45} while (0)
44#endif 46#endif
45 47
46/* 48struct x86_pmu x86_pmu __read_mostly;
47 * | NHM/WSM | SNB |
48 * register -------------------------------
49 * | HT | no HT | HT | no HT |
50 *-----------------------------------------
51 * offcore | core | core | cpu | core |
52 * lbr_sel | core | core | cpu | core |
53 * ld_lat | cpu | core | cpu | core |
54 *-----------------------------------------
55 *
56 * Given that there is a small number of shared regs,
57 * we can pre-allocate their slot in the per-cpu
58 * per-core reg tables.
59 */
60enum extra_reg_type {
61 EXTRA_REG_NONE = -1, /* not used */
62
63 EXTRA_REG_RSP_0 = 0, /* offcore_response_0 */
64 EXTRA_REG_RSP_1 = 1, /* offcore_response_1 */
65
66 EXTRA_REG_MAX /* number of entries needed */
67};
68
69struct event_constraint {
70 union {
71 unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
72 u64 idxmsk64;
73 };
74 u64 code;
75 u64 cmask;
76 int weight;
77};
78
79struct amd_nb {
80 int nb_id; /* NorthBridge id */
81 int refcnt; /* reference count */
82 struct perf_event *owners[X86_PMC_IDX_MAX];
83 struct event_constraint event_constraints[X86_PMC_IDX_MAX];
84};
85
86struct intel_percore;
87
88#define MAX_LBR_ENTRIES 16
89
90struct cpu_hw_events {
91 /*
92 * Generic x86 PMC bits
93 */
94 struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */
95 unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
96 unsigned long running[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
97 int enabled;
98
99 int n_events;
100 int n_added;
101 int n_txn;
102 int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
103 u64 tags[X86_PMC_IDX_MAX];
104 struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
105
106 unsigned int group_flag;
107
108 /*
109 * Intel DebugStore bits
110 */
111 struct debug_store *ds;
112 u64 pebs_enabled;
113
114 /*
115 * Intel LBR bits
116 */
117 int lbr_users;
118 void *lbr_context;
119 struct perf_branch_stack lbr_stack;
120 struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES];
121
122 /*
123 * manage shared (per-core, per-cpu) registers
124 * used on Intel NHM/WSM/SNB
125 */
126 struct intel_shared_regs *shared_regs;
127
128 /*
129 * AMD specific bits
130 */
131 struct amd_nb *amd_nb;
132
133 void *kfree_on_online;
134};
135
136#define __EVENT_CONSTRAINT(c, n, m, w) {\
137 { .idxmsk64 = (n) }, \
138 .code = (c), \
139 .cmask = (m), \
140 .weight = (w), \
141}
142
143#define EVENT_CONSTRAINT(c, n, m) \
144 __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n))
145
146/*
147 * Constraint on the Event code.
148 */
149#define INTEL_EVENT_CONSTRAINT(c, n) \
150 EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT)
151
152/*
153 * Constraint on the Event code + UMask + fixed-mask
154 *
155 * filter mask to validate fixed counter events.
156 * the following filters disqualify for fixed counters:
157 * - inv
158 * - edge
159 * - cnt-mask
160 * The other filters are supported by fixed counters.
161 * The any-thread option is supported starting with v3.
162 */
163#define FIXED_EVENT_CONSTRAINT(c, n) \
164 EVENT_CONSTRAINT(c, (1ULL << (32+n)), X86_RAW_EVENT_MASK)
165
166/*
167 * Constraint on the Event code + UMask
168 */
169#define INTEL_UEVENT_CONSTRAINT(c, n) \
170 EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK)
171
172#define EVENT_CONSTRAINT_END \
173 EVENT_CONSTRAINT(0, 0, 0)
174
175#define for_each_event_constraint(e, c) \
176 for ((e) = (c); (e)->weight; (e)++)
177
178/*
179 * Per register state.
180 */
181struct er_account {
182 raw_spinlock_t lock; /* per-core: protect structure */
183 u64 config; /* extra MSR config */
184 u64 reg; /* extra MSR number */
185 atomic_t ref; /* reference count */
186};
187
188/*
189 * Extra registers for specific events.
190 *
191 * Some events need large masks and require external MSRs.
192 * Those extra MSRs end up being shared for all events on
193 * a PMU and sometimes between PMU of sibling HT threads.
194 * In either case, the kernel needs to handle conflicting
195 * accesses to those extra, shared, regs. The data structure
196 * to manage those registers is stored in cpu_hw_event.
197 */
198struct extra_reg {
199 unsigned int event;
200 unsigned int msr;
201 u64 config_mask;
202 u64 valid_mask;
203 int idx; /* per_xxx->regs[] reg index */
204};
205
206#define EVENT_EXTRA_REG(e, ms, m, vm, i) { \
207 .event = (e), \
208 .msr = (ms), \
209 .config_mask = (m), \
210 .valid_mask = (vm), \
211 .idx = EXTRA_REG_##i \
212 }
213
214#define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx) \
215 EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm, idx)
216 49
217#define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0, RSP_0) 50DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
218
219union perf_capabilities {
220 struct {
221 u64 lbr_format : 6;
222 u64 pebs_trap : 1;
223 u64 pebs_arch_reg : 1;
224 u64 pebs_format : 4;
225 u64 smm_freeze : 1;
226 };
227 u64 capabilities;
228};
229
230/*
231 * struct x86_pmu - generic x86 pmu
232 */
233struct x86_pmu {
234 /*
235 * Generic x86 PMC bits
236 */
237 const char *name;
238 int version;
239 int (*handle_irq)(struct pt_regs *);
240 void (*disable_all)(void);
241 void (*enable_all)(int added);
242 void (*enable)(struct perf_event *);
243 void (*disable)(struct perf_event *);
244 int (*hw_config)(struct perf_event *event);
245 int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign);
246 unsigned eventsel;
247 unsigned perfctr;
248 u64 (*event_map)(int);
249 int max_events;
250 int num_counters;
251 int num_counters_fixed;
252 int cntval_bits;
253 u64 cntval_mask;
254 int apic;
255 u64 max_period;
256 struct event_constraint *
257 (*get_event_constraints)(struct cpu_hw_events *cpuc,
258 struct perf_event *event);
259
260 void (*put_event_constraints)(struct cpu_hw_events *cpuc,
261 struct perf_event *event);
262 struct event_constraint *event_constraints;
263 void (*quirks)(void);
264 int perfctr_second_write;
265
266 int (*cpu_prepare)(int cpu);
267 void (*cpu_starting)(int cpu);
268 void (*cpu_dying)(int cpu);
269 void (*cpu_dead)(int cpu);
270
271 /*
272 * Intel Arch Perfmon v2+
273 */
274 u64 intel_ctrl;
275 union perf_capabilities intel_cap;
276
277 /*
278 * Intel DebugStore bits
279 */
280 int bts, pebs;
281 int bts_active, pebs_active;
282 int pebs_record_size;
283 void (*drain_pebs)(struct pt_regs *regs);
284 struct event_constraint *pebs_constraints;
285
286 /*
287 * Intel LBR
288 */
289 unsigned long lbr_tos, lbr_from, lbr_to; /* MSR base regs */
290 int lbr_nr; /* hardware stack size */
291
292 /*
293 * Extra registers for events
294 */
295 struct extra_reg *extra_regs;
296 unsigned int er_flags;
297};
298
299#define ERF_NO_HT_SHARING 1
300#define ERF_HAS_RSP_1 2
301
302static struct x86_pmu x86_pmu __read_mostly;
303
304static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
305 .enabled = 1, 51 .enabled = 1,
306}; 52};
307 53
308static int x86_perf_event_set_period(struct perf_event *event); 54u64 __read_mostly hw_cache_event_ids
309
310/*
311 * Generalized hw caching related hw_event table, filled
312 * in on a per model basis. A value of 0 means
313 * 'not supported', -1 means 'hw_event makes no sense on
314 * this CPU', any other value means the raw hw_event
315 * ID.
316 */
317
318#define C(x) PERF_COUNT_HW_CACHE_##x
319
320static u64 __read_mostly hw_cache_event_ids
321 [PERF_COUNT_HW_CACHE_MAX] 55 [PERF_COUNT_HW_CACHE_MAX]
322 [PERF_COUNT_HW_CACHE_OP_MAX] 56 [PERF_COUNT_HW_CACHE_OP_MAX]
323 [PERF_COUNT_HW_CACHE_RESULT_MAX]; 57 [PERF_COUNT_HW_CACHE_RESULT_MAX];
324static u64 __read_mostly hw_cache_extra_regs 58u64 __read_mostly hw_cache_extra_regs
325 [PERF_COUNT_HW_CACHE_MAX] 59 [PERF_COUNT_HW_CACHE_MAX]
326 [PERF_COUNT_HW_CACHE_OP_MAX] 60 [PERF_COUNT_HW_CACHE_OP_MAX]
327 [PERF_COUNT_HW_CACHE_RESULT_MAX]; 61 [PERF_COUNT_HW_CACHE_RESULT_MAX];
@@ -331,8 +65,7 @@ static u64 __read_mostly hw_cache_extra_regs
331 * Can only be executed on the CPU where the event is active. 65 * Can only be executed on the CPU where the event is active.
332 * Returns the delta events processed. 66 * Returns the delta events processed.
333 */ 67 */
334static u64 68u64 x86_perf_event_update(struct perf_event *event)
335x86_perf_event_update(struct perf_event *event)
336{ 69{
337 struct hw_perf_event *hwc = &event->hw; 70 struct hw_perf_event *hwc = &event->hw;
338 int shift = 64 - x86_pmu.cntval_bits; 71 int shift = 64 - x86_pmu.cntval_bits;
@@ -375,30 +108,6 @@ again:
375 return new_raw_count; 108 return new_raw_count;
376} 109}
377 110
378static inline int x86_pmu_addr_offset(int index)
379{
380 int offset;
381
382 /* offset = X86_FEATURE_PERFCTR_CORE ? index << 1 : index */
383 alternative_io(ASM_NOP2,
384 "shll $1, %%eax",
385 X86_FEATURE_PERFCTR_CORE,
386 "=a" (offset),
387 "a" (index));
388
389 return offset;
390}
391
392static inline unsigned int x86_pmu_config_addr(int index)
393{
394 return x86_pmu.eventsel + x86_pmu_addr_offset(index);
395}
396
397static inline unsigned int x86_pmu_event_addr(int index)
398{
399 return x86_pmu.perfctr + x86_pmu_addr_offset(index);
400}
401
402/* 111/*
403 * Find and validate any extra registers to set up. 112 * Find and validate any extra registers to set up.
404 */ 113 */
@@ -534,9 +243,6 @@ msr_fail:
534 return false; 243 return false;
535} 244}
536 245
537static void reserve_ds_buffers(void);
538static void release_ds_buffers(void);
539
540static void hw_perf_event_destroy(struct perf_event *event) 246static void hw_perf_event_destroy(struct perf_event *event)
541{ 247{
542 if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) { 248 if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) {
@@ -585,7 +291,7 @@ set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event)
585 return x86_pmu_extra_regs(val, event); 291 return x86_pmu_extra_regs(val, event);
586} 292}
587 293
588static int x86_setup_perfctr(struct perf_event *event) 294int x86_setup_perfctr(struct perf_event *event)
589{ 295{
590 struct perf_event_attr *attr = &event->attr; 296 struct perf_event_attr *attr = &event->attr;
591 struct hw_perf_event *hwc = &event->hw; 297 struct hw_perf_event *hwc = &event->hw;
@@ -649,7 +355,7 @@ static int x86_setup_perfctr(struct perf_event *event)
649 return 0; 355 return 0;
650} 356}
651 357
652static int x86_pmu_hw_config(struct perf_event *event) 358int x86_pmu_hw_config(struct perf_event *event)
653{ 359{
654 if (event->attr.precise_ip) { 360 if (event->attr.precise_ip) {
655 int precise = 0; 361 int precise = 0;
@@ -725,7 +431,7 @@ static int __x86_pmu_event_init(struct perf_event *event)
725 return x86_pmu.hw_config(event); 431 return x86_pmu.hw_config(event);
726} 432}
727 433
728static void x86_pmu_disable_all(void) 434void x86_pmu_disable_all(void)
729{ 435{
730 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 436 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
731 int idx; 437 int idx;
@@ -760,15 +466,7 @@ static void x86_pmu_disable(struct pmu *pmu)
760 x86_pmu.disable_all(); 466 x86_pmu.disable_all();
761} 467}
762 468
763static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, 469void x86_pmu_enable_all(int added)
764 u64 enable_mask)
765{
766 if (hwc->extra_reg.reg)
767 wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config);
768 wrmsrl(hwc->config_base, hwc->config | enable_mask);
769}
770
771static void x86_pmu_enable_all(int added)
772{ 470{
773 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 471 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
774 int idx; 472 int idx;
@@ -790,7 +488,7 @@ static inline int is_x86_event(struct perf_event *event)
790 return event->pmu == &pmu; 488 return event->pmu == &pmu;
791} 489}
792 490
793static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) 491int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
794{ 492{
795 struct event_constraint *c, *constraints[X86_PMC_IDX_MAX]; 493 struct event_constraint *c, *constraints[X86_PMC_IDX_MAX];
796 unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; 494 unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
@@ -961,7 +659,6 @@ static inline int match_prev_assignment(struct hw_perf_event *hwc,
961} 659}
962 660
963static void x86_pmu_start(struct perf_event *event, int flags); 661static void x86_pmu_start(struct perf_event *event, int flags);
964static void x86_pmu_stop(struct perf_event *event, int flags);
965 662
966static void x86_pmu_enable(struct pmu *pmu) 663static void x86_pmu_enable(struct pmu *pmu)
967{ 664{
@@ -1033,21 +730,13 @@ static void x86_pmu_enable(struct pmu *pmu)
1033 x86_pmu.enable_all(added); 730 x86_pmu.enable_all(added);
1034} 731}
1035 732
1036static inline void x86_pmu_disable_event(struct perf_event *event)
1037{
1038 struct hw_perf_event *hwc = &event->hw;
1039
1040 wrmsrl(hwc->config_base, hwc->config);
1041}
1042
1043static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left); 733static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
1044 734
1045/* 735/*
1046 * Set the next IRQ period, based on the hwc->period_left value. 736 * Set the next IRQ period, based on the hwc->period_left value.
1047 * To be called with the event disabled in hw: 737 * To be called with the event disabled in hw:
1048 */ 738 */
1049static int 739int x86_perf_event_set_period(struct perf_event *event)
1050x86_perf_event_set_period(struct perf_event *event)
1051{ 740{
1052 struct hw_perf_event *hwc = &event->hw; 741 struct hw_perf_event *hwc = &event->hw;
1053 s64 left = local64_read(&hwc->period_left); 742 s64 left = local64_read(&hwc->period_left);
@@ -1107,7 +796,7 @@ x86_perf_event_set_period(struct perf_event *event)
1107 return ret; 796 return ret;
1108} 797}
1109 798
1110static void x86_pmu_enable_event(struct perf_event *event) 799void x86_pmu_enable_event(struct perf_event *event)
1111{ 800{
1112 if (__this_cpu_read(cpu_hw_events.enabled)) 801 if (__this_cpu_read(cpu_hw_events.enabled))
1113 __x86_pmu_enable_event(&event->hw, 802 __x86_pmu_enable_event(&event->hw,
@@ -1246,7 +935,7 @@ void perf_event_print_debug(void)
1246 local_irq_restore(flags); 935 local_irq_restore(flags);
1247} 936}
1248 937
1249static void x86_pmu_stop(struct perf_event *event, int flags) 938void x86_pmu_stop(struct perf_event *event, int flags)
1250{ 939{
1251 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 940 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1252 struct hw_perf_event *hwc = &event->hw; 941 struct hw_perf_event *hwc = &event->hw;
@@ -1299,7 +988,7 @@ static void x86_pmu_del(struct perf_event *event, int flags)
1299 perf_event_update_userpage(event); 988 perf_event_update_userpage(event);
1300} 989}
1301 990
1302static int x86_pmu_handle_irq(struct pt_regs *regs) 991int x86_pmu_handle_irq(struct pt_regs *regs)
1303{ 992{
1304 struct perf_sample_data data; 993 struct perf_sample_data data;
1305 struct cpu_hw_events *cpuc; 994 struct cpu_hw_events *cpuc;
@@ -1439,30 +1128,8 @@ static __read_mostly struct notifier_block perf_event_nmi_notifier = {
1439 .priority = NMI_LOCAL_LOW_PRIOR, 1128 .priority = NMI_LOCAL_LOW_PRIOR,
1440}; 1129};
1441 1130
1442static struct event_constraint unconstrained; 1131struct event_constraint emptyconstraint;
1443static struct event_constraint emptyconstraint; 1132struct event_constraint unconstrained;
1444
1445static struct event_constraint *
1446x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
1447{
1448 struct event_constraint *c;
1449
1450 if (x86_pmu.event_constraints) {
1451 for_each_event_constraint(c, x86_pmu.event_constraints) {
1452 if ((event->hw.config & c->cmask) == c->code)
1453 return c;
1454 }
1455 }
1456
1457 return &unconstrained;
1458}
1459
1460#include "perf_event_amd.c"
1461#include "perf_event_p6.c"
1462#include "perf_event_p4.c"
1463#include "perf_event_intel_lbr.c"
1464#include "perf_event_intel_ds.c"
1465#include "perf_event_intel.c"
1466 1133
1467static int __cpuinit 1134static int __cpuinit
1468x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) 1135x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
new file mode 100644
index 000000000000..fb330b0a816e
--- /dev/null
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -0,0 +1,493 @@
1/*
2 * Performance events x86 architecture header
3 *
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2009 Jaswinder Singh Rajput
7 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
8 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
9 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
10 * Copyright (C) 2009 Google, Inc., Stephane Eranian
11 *
12 * For licencing details see kernel-base/COPYING
13 */
14
15#include <linux/perf_event.h>
16
17/*
18 * | NHM/WSM | SNB |
19 * register -------------------------------
20 * | HT | no HT | HT | no HT |
21 *-----------------------------------------
22 * offcore | core | core | cpu | core |
23 * lbr_sel | core | core | cpu | core |
24 * ld_lat | cpu | core | cpu | core |
25 *-----------------------------------------
26 *
27 * Given that there is a small number of shared regs,
28 * we can pre-allocate their slot in the per-cpu
29 * per-core reg tables.
30 */
31enum extra_reg_type {
32 EXTRA_REG_NONE = -1, /* not used */
33
34 EXTRA_REG_RSP_0 = 0, /* offcore_response_0 */
35 EXTRA_REG_RSP_1 = 1, /* offcore_response_1 */
36
37 EXTRA_REG_MAX /* number of entries needed */
38};
39
40struct event_constraint {
41 union {
42 unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
43 u64 idxmsk64;
44 };
45 u64 code;
46 u64 cmask;
47 int weight;
48};
49
50struct amd_nb {
51 int nb_id; /* NorthBridge id */
52 int refcnt; /* reference count */
53 struct perf_event *owners[X86_PMC_IDX_MAX];
54 struct event_constraint event_constraints[X86_PMC_IDX_MAX];
55};
56
57/* The maximal number of PEBS events: */
58#define MAX_PEBS_EVENTS 4
59
60/*
61 * A debug store configuration.
62 *
63 * We only support architectures that use 64bit fields.
64 */
65struct debug_store {
66 u64 bts_buffer_base;
67 u64 bts_index;
68 u64 bts_absolute_maximum;
69 u64 bts_interrupt_threshold;
70 u64 pebs_buffer_base;
71 u64 pebs_index;
72 u64 pebs_absolute_maximum;
73 u64 pebs_interrupt_threshold;
74 u64 pebs_event_reset[MAX_PEBS_EVENTS];
75};
76
77/*
78 * Per register state.
79 */
80struct er_account {
81 raw_spinlock_t lock; /* per-core: protect structure */
82 u64 config; /* extra MSR config */
83 u64 reg; /* extra MSR number */
84 atomic_t ref; /* reference count */
85};
86
87/*
88 * Per core/cpu state
89 *
90 * Used to coordinate shared registers between HT threads or
91 * among events on a single PMU.
92 */
93struct intel_shared_regs {
94 struct er_account regs[EXTRA_REG_MAX];
95 int refcnt; /* per-core: #HT threads */
96 unsigned core_id; /* per-core: core id */
97};
98
99#define MAX_LBR_ENTRIES 16
100
101struct cpu_hw_events {
102 /*
103 * Generic x86 PMC bits
104 */
105 struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */
106 unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
107 unsigned long running[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
108 int enabled;
109
110 int n_events;
111 int n_added;
112 int n_txn;
113 int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
114 u64 tags[X86_PMC_IDX_MAX];
115 struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
116
117 unsigned int group_flag;
118
119 /*
120 * Intel DebugStore bits
121 */
122 struct debug_store *ds;
123 u64 pebs_enabled;
124
125 /*
126 * Intel LBR bits
127 */
128 int lbr_users;
129 void *lbr_context;
130 struct perf_branch_stack lbr_stack;
131 struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES];
132
133 /*
134 * manage shared (per-core, per-cpu) registers
135 * used on Intel NHM/WSM/SNB
136 */
137 struct intel_shared_regs *shared_regs;
138
139 /*
140 * AMD specific bits
141 */
142 struct amd_nb *amd_nb;
143
144 void *kfree_on_online;
145};
146
147#define __EVENT_CONSTRAINT(c, n, m, w) {\
148 { .idxmsk64 = (n) }, \
149 .code = (c), \
150 .cmask = (m), \
151 .weight = (w), \
152}
153
154#define EVENT_CONSTRAINT(c, n, m) \
155 __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n))
156
157/*
158 * Constraint on the Event code.
159 */
160#define INTEL_EVENT_CONSTRAINT(c, n) \
161 EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT)
162
163/*
164 * Constraint on the Event code + UMask + fixed-mask
165 *
166 * filter mask to validate fixed counter events.
167 * the following filters disqualify for fixed counters:
168 * - inv
169 * - edge
170 * - cnt-mask
171 * The other filters are supported by fixed counters.
172 * The any-thread option is supported starting with v3.
173 */
174#define FIXED_EVENT_CONSTRAINT(c, n) \
175 EVENT_CONSTRAINT(c, (1ULL << (32+n)), X86_RAW_EVENT_MASK)
176
177/*
178 * Constraint on the Event code + UMask
179 */
180#define INTEL_UEVENT_CONSTRAINT(c, n) \
181 EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK)
182
183#define EVENT_CONSTRAINT_END \
184 EVENT_CONSTRAINT(0, 0, 0)
185
186#define for_each_event_constraint(e, c) \
187 for ((e) = (c); (e)->weight; (e)++)
188
189/*
190 * Extra registers for specific events.
191 *
192 * Some events need large masks and require external MSRs.
193 * Those extra MSRs end up being shared for all events on
194 * a PMU and sometimes between PMU of sibling HT threads.
195 * In either case, the kernel needs to handle conflicting
196 * accesses to those extra, shared, regs. The data structure
197 * to manage those registers is stored in cpu_hw_event.
198 */
199struct extra_reg {
200 unsigned int event;
201 unsigned int msr;
202 u64 config_mask;
203 u64 valid_mask;
204 int idx; /* per_xxx->regs[] reg index */
205};
206
207#define EVENT_EXTRA_REG(e, ms, m, vm, i) { \
208 .event = (e), \
209 .msr = (ms), \
210 .config_mask = (m), \
211 .valid_mask = (vm), \
212 .idx = EXTRA_REG_##i \
213 }
214
215#define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx) \
216 EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm, idx)
217
218#define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0, RSP_0)
219
220union perf_capabilities {
221 struct {
222 u64 lbr_format:6;
223 u64 pebs_trap:1;
224 u64 pebs_arch_reg:1;
225 u64 pebs_format:4;
226 u64 smm_freeze:1;
227 };
228 u64 capabilities;
229};
230
231/*
232 * struct x86_pmu - generic x86 pmu
233 */
234struct x86_pmu {
235 /*
236 * Generic x86 PMC bits
237 */
238 const char *name;
239 int version;
240 int (*handle_irq)(struct pt_regs *);
241 void (*disable_all)(void);
242 void (*enable_all)(int added);
243 void (*enable)(struct perf_event *);
244 void (*disable)(struct perf_event *);
245 int (*hw_config)(struct perf_event *event);
246 int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign);
247 unsigned eventsel;
248 unsigned perfctr;
249 u64 (*event_map)(int);
250 int max_events;
251 int num_counters;
252 int num_counters_fixed;
253 int cntval_bits;
254 u64 cntval_mask;
255 int apic;
256 u64 max_period;
257 struct event_constraint *
258 (*get_event_constraints)(struct cpu_hw_events *cpuc,
259 struct perf_event *event);
260
261 void (*put_event_constraints)(struct cpu_hw_events *cpuc,
262 struct perf_event *event);
263 struct event_constraint *event_constraints;
264 void (*quirks)(void);
265 int perfctr_second_write;
266
267 int (*cpu_prepare)(int cpu);
268 void (*cpu_starting)(int cpu);
269 void (*cpu_dying)(int cpu);
270 void (*cpu_dead)(int cpu);
271
272 /*
273 * Intel Arch Perfmon v2+
274 */
275 u64 intel_ctrl;
276 union perf_capabilities intel_cap;
277
278 /*
279 * Intel DebugStore bits
280 */
281 int bts, pebs;
282 int bts_active, pebs_active;
283 int pebs_record_size;
284 void (*drain_pebs)(struct pt_regs *regs);
285 struct event_constraint *pebs_constraints;
286
287 /*
288 * Intel LBR
289 */
290 unsigned long lbr_tos, lbr_from, lbr_to; /* MSR base regs */
291 int lbr_nr; /* hardware stack size */
292
293 /*
294 * Extra registers for events
295 */
296 struct extra_reg *extra_regs;
297 unsigned int er_flags;
298};
299
300#define ERF_NO_HT_SHARING 1
301#define ERF_HAS_RSP_1 2
302
303extern struct x86_pmu x86_pmu __read_mostly;
304
305DECLARE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
306
307int x86_perf_event_set_period(struct perf_event *event);
308
309/*
310 * Generalized hw caching related hw_event table, filled
311 * in on a per model basis. A value of 0 means
312 * 'not supported', -1 means 'hw_event makes no sense on
313 * this CPU', any other value means the raw hw_event
314 * ID.
315 */
316
317#define C(x) PERF_COUNT_HW_CACHE_##x
318
319extern u64 __read_mostly hw_cache_event_ids
320 [PERF_COUNT_HW_CACHE_MAX]
321 [PERF_COUNT_HW_CACHE_OP_MAX]
322 [PERF_COUNT_HW_CACHE_RESULT_MAX];
323extern u64 __read_mostly hw_cache_extra_regs
324 [PERF_COUNT_HW_CACHE_MAX]
325 [PERF_COUNT_HW_CACHE_OP_MAX]
326 [PERF_COUNT_HW_CACHE_RESULT_MAX];
327
328u64 x86_perf_event_update(struct perf_event *event);
329
330static inline int x86_pmu_addr_offset(int index)
331{
332 int offset;
333
334 /* offset = X86_FEATURE_PERFCTR_CORE ? index << 1 : index */
335 alternative_io(ASM_NOP2,
336 "shll $1, %%eax",
337 X86_FEATURE_PERFCTR_CORE,
338 "=a" (offset),
339 "a" (index));
340
341 return offset;
342}
343
344static inline unsigned int x86_pmu_config_addr(int index)
345{
346 return x86_pmu.eventsel + x86_pmu_addr_offset(index);
347}
348
349static inline unsigned int x86_pmu_event_addr(int index)
350{
351 return x86_pmu.perfctr + x86_pmu_addr_offset(index);
352}
353
354int x86_setup_perfctr(struct perf_event *event);
355
356int x86_pmu_hw_config(struct perf_event *event);
357
358void x86_pmu_disable_all(void);
359
360static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
361 u64 enable_mask)
362{
363 if (hwc->extra_reg.reg)
364 wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config);
365 wrmsrl(hwc->config_base, hwc->config | enable_mask);
366}
367
368void x86_pmu_enable_all(int added);
369
370int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign);
371
372void x86_pmu_stop(struct perf_event *event, int flags);
373
374static inline void x86_pmu_disable_event(struct perf_event *event)
375{
376 struct hw_perf_event *hwc = &event->hw;
377
378 wrmsrl(hwc->config_base, hwc->config);
379}
380
381void x86_pmu_enable_event(struct perf_event *event);
382
383int x86_pmu_handle_irq(struct pt_regs *regs);
384
385extern struct event_constraint emptyconstraint;
386
387extern struct event_constraint unconstrained;
388
389#ifdef CONFIG_CPU_SUP_AMD
390
391int amd_pmu_init(void);
392
393#else /* CONFIG_CPU_SUP_AMD */
394
395static inline int amd_pmu_init(void)
396{
397 return 0;
398}
399
400#endif /* CONFIG_CPU_SUP_AMD */
401
402#ifdef CONFIG_CPU_SUP_INTEL
403
404int intel_pmu_save_and_restart(struct perf_event *event);
405
406struct event_constraint *
407x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event);
408
409struct intel_shared_regs *allocate_shared_regs(int cpu);
410
411int intel_pmu_init(void);
412
413void init_debug_store_on_cpu(int cpu);
414
415void fini_debug_store_on_cpu(int cpu);
416
417void release_ds_buffers(void);
418
419void reserve_ds_buffers(void);
420
421extern struct event_constraint bts_constraint;
422
423void intel_pmu_enable_bts(u64 config);
424
425void intel_pmu_disable_bts(void);
426
427int intel_pmu_drain_bts_buffer(void);
428
429extern struct event_constraint intel_core2_pebs_event_constraints[];
430
431extern struct event_constraint intel_atom_pebs_event_constraints[];
432
433extern struct event_constraint intel_nehalem_pebs_event_constraints[];
434
435extern struct event_constraint intel_westmere_pebs_event_constraints[];
436
437extern struct event_constraint intel_snb_pebs_event_constraints[];
438
439struct event_constraint *intel_pebs_constraints(struct perf_event *event);
440
441void intel_pmu_pebs_enable(struct perf_event *event);
442
443void intel_pmu_pebs_disable(struct perf_event *event);
444
445void intel_pmu_pebs_enable_all(void);
446
447void intel_pmu_pebs_disable_all(void);
448
449void intel_ds_init(void);
450
451void intel_pmu_lbr_reset(void);
452
453void intel_pmu_lbr_enable(struct perf_event *event);
454
455void intel_pmu_lbr_disable(struct perf_event *event);
456
457void intel_pmu_lbr_enable_all(void);
458
459void intel_pmu_lbr_disable_all(void);
460
461void intel_pmu_lbr_read(void);
462
463void intel_pmu_lbr_init_core(void);
464
465void intel_pmu_lbr_init_nhm(void);
466
467void intel_pmu_lbr_init_atom(void);
468
469int p4_pmu_init(void);
470
471int p6_pmu_init(void);
472
473#else /* CONFIG_CPU_SUP_INTEL */
474
475static inline void reserve_ds_buffers(void)
476{
477}
478
479static inline void release_ds_buffers(void)
480{
481}
482
483static inline int intel_pmu_init(void)
484{
485 return 0;
486}
487
488static inline struct intel_shared_regs *allocate_shared_regs(int cpu)
489{
490 return NULL;
491}
492
493#endif /* CONFIG_CPU_SUP_INTEL */
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c
index ee9436c3e5d6..ed334c889265 100644
--- a/arch/x86/kernel/cpu/perf_event_amd.c
+++ b/arch/x86/kernel/cpu/perf_event_amd.c
@@ -1,4 +1,9 @@
1#ifdef CONFIG_CPU_SUP_AMD 1#include <linux/perf_event.h>
2#include <linux/types.h>
3#include <linux/init.h>
4#include <linux/slab.h>
5
6#include "perf_event.h"
2 7
3static __initconst const u64 amd_hw_cache_event_ids 8static __initconst const u64 amd_hw_cache_event_ids
4 [PERF_COUNT_HW_CACHE_MAX] 9 [PERF_COUNT_HW_CACHE_MAX]
@@ -573,7 +578,7 @@ static __initconst const struct x86_pmu amd_pmu_f15h = {
573#endif 578#endif
574}; 579};
575 580
576static __init int amd_pmu_init(void) 581__init int amd_pmu_init(void)
577{ 582{
578 /* Performance-monitoring supported from K7 and later: */ 583 /* Performance-monitoring supported from K7 and later: */
579 if (boot_cpu_data.x86 < 6) 584 if (boot_cpu_data.x86 < 6)
@@ -602,12 +607,3 @@ static __init int amd_pmu_init(void)
602 607
603 return 0; 608 return 0;
604} 609}
605
606#else /* CONFIG_CPU_SUP_AMD */
607
608static int amd_pmu_init(void)
609{
610 return 0;
611}
612
613#endif
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 3751494e70f5..61fa35750b98 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -1,16 +1,19 @@
1#ifdef CONFIG_CPU_SUP_INTEL
2
3/* 1/*
4 * Per core/cpu state 2 * Per core/cpu state
5 * 3 *
6 * Used to coordinate shared registers between HT threads or 4 * Used to coordinate shared registers between HT threads or
7 * among events on a single PMU. 5 * among events on a single PMU.
8 */ 6 */
9struct intel_shared_regs { 7
10 struct er_account regs[EXTRA_REG_MAX]; 8#include <linux/stddef.h>
11 int refcnt; /* per-core: #HT threads */ 9#include <linux/types.h>
12 unsigned core_id; /* per-core: core id */ 10#include <linux/init.h>
13}; 11#include <linux/slab.h>
12
13#include <asm/hardirq.h>
14#include <asm/apic.h>
15
16#include "perf_event.h"
14 17
15/* 18/*
16 * Intel PerfMon, used on Core and later. 19 * Intel PerfMon, used on Core and later.
@@ -945,7 +948,7 @@ static void intel_pmu_enable_event(struct perf_event *event)
945 * Save and restart an expired event. Called by NMI contexts, 948 * Save and restart an expired event. Called by NMI contexts,
946 * so it has to be careful about preempting normal event ops: 949 * so it has to be careful about preempting normal event ops:
947 */ 950 */
948static int intel_pmu_save_and_restart(struct perf_event *event) 951int intel_pmu_save_and_restart(struct perf_event *event)
949{ 952{
950 x86_perf_event_update(event); 953 x86_perf_event_update(event);
951 return x86_perf_event_set_period(event); 954 return x86_perf_event_set_period(event);
@@ -1197,6 +1200,21 @@ intel_shared_regs_constraints(struct cpu_hw_events *cpuc,
1197 return c; 1200 return c;
1198} 1201}
1199 1202
1203struct event_constraint *
1204x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
1205{
1206 struct event_constraint *c;
1207
1208 if (x86_pmu.event_constraints) {
1209 for_each_event_constraint(c, x86_pmu.event_constraints) {
1210 if ((event->hw.config & c->cmask) == c->code)
1211 return c;
1212 }
1213 }
1214
1215 return &unconstrained;
1216}
1217
1200static struct event_constraint * 1218static struct event_constraint *
1201intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) 1219intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
1202{ 1220{
@@ -1309,7 +1327,7 @@ static __initconst const struct x86_pmu core_pmu = {
1309 .event_constraints = intel_core_event_constraints, 1327 .event_constraints = intel_core_event_constraints,
1310}; 1328};
1311 1329
1312static struct intel_shared_regs *allocate_shared_regs(int cpu) 1330struct intel_shared_regs *allocate_shared_regs(int cpu)
1313{ 1331{
1314 struct intel_shared_regs *regs; 1332 struct intel_shared_regs *regs;
1315 int i; 1333 int i;
@@ -1441,7 +1459,7 @@ static void intel_clovertown_quirks(void)
1441 x86_pmu.pebs_constraints = NULL; 1459 x86_pmu.pebs_constraints = NULL;
1442} 1460}
1443 1461
1444static __init int intel_pmu_init(void) 1462__init int intel_pmu_init(void)
1445{ 1463{
1446 union cpuid10_edx edx; 1464 union cpuid10_edx edx;
1447 union cpuid10_eax eax; 1465 union cpuid10_eax eax;
@@ -1597,7 +1615,7 @@ static __init int intel_pmu_init(void)
1597 intel_pmu_lbr_init_nhm(); 1615 intel_pmu_lbr_init_nhm();
1598 1616
1599 x86_pmu.event_constraints = intel_snb_event_constraints; 1617 x86_pmu.event_constraints = intel_snb_event_constraints;
1600 x86_pmu.pebs_constraints = intel_snb_pebs_events; 1618 x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints;
1601 x86_pmu.extra_regs = intel_snb_extra_regs; 1619 x86_pmu.extra_regs = intel_snb_extra_regs;
1602 /* all extra regs are per-cpu when HT is on */ 1620 /* all extra regs are per-cpu when HT is on */
1603 x86_pmu.er_flags |= ERF_HAS_RSP_1; 1621 x86_pmu.er_flags |= ERF_HAS_RSP_1;
@@ -1628,16 +1646,3 @@ static __init int intel_pmu_init(void)
1628 } 1646 }
1629 return 0; 1647 return 0;
1630} 1648}
1631
1632#else /* CONFIG_CPU_SUP_INTEL */
1633
1634static int intel_pmu_init(void)
1635{
1636 return 0;
1637}
1638
1639static struct intel_shared_regs *allocate_shared_regs(int cpu)
1640{
1641 return NULL;
1642}
1643#endif /* CONFIG_CPU_SUP_INTEL */
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index 1b1ef3addcfd..c0d238f49db8 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -1,7 +1,10 @@
1#ifdef CONFIG_CPU_SUP_INTEL 1#include <linux/bitops.h>
2#include <linux/types.h>
3#include <linux/slab.h>
2 4
3/* The maximal number of PEBS events: */ 5#include <asm/perf_event.h>
4#define MAX_PEBS_EVENTS 4 6
7#include "perf_event.h"
5 8
6/* The size of a BTS record in bytes: */ 9/* The size of a BTS record in bytes: */
7#define BTS_RECORD_SIZE 24 10#define BTS_RECORD_SIZE 24
@@ -37,24 +40,7 @@ struct pebs_record_nhm {
37 u64 status, dla, dse, lat; 40 u64 status, dla, dse, lat;
38}; 41};
39 42
40/* 43void init_debug_store_on_cpu(int cpu)
41 * A debug store configuration.
42 *
43 * We only support architectures that use 64bit fields.
44 */
45struct debug_store {
46 u64 bts_buffer_base;
47 u64 bts_index;
48 u64 bts_absolute_maximum;
49 u64 bts_interrupt_threshold;
50 u64 pebs_buffer_base;
51 u64 pebs_index;
52 u64 pebs_absolute_maximum;
53 u64 pebs_interrupt_threshold;
54 u64 pebs_event_reset[MAX_PEBS_EVENTS];
55};
56
57static void init_debug_store_on_cpu(int cpu)
58{ 44{
59 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; 45 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
60 46
@@ -66,7 +52,7 @@ static void init_debug_store_on_cpu(int cpu)
66 (u32)((u64)(unsigned long)ds >> 32)); 52 (u32)((u64)(unsigned long)ds >> 32));
67} 53}
68 54
69static void fini_debug_store_on_cpu(int cpu) 55void fini_debug_store_on_cpu(int cpu)
70{ 56{
71 if (!per_cpu(cpu_hw_events, cpu).ds) 57 if (!per_cpu(cpu_hw_events, cpu).ds)
72 return; 58 return;
@@ -175,7 +161,7 @@ static void release_ds_buffer(int cpu)
175 kfree(ds); 161 kfree(ds);
176} 162}
177 163
178static void release_ds_buffers(void) 164void release_ds_buffers(void)
179{ 165{
180 int cpu; 166 int cpu;
181 167
@@ -194,7 +180,7 @@ static void release_ds_buffers(void)
194 put_online_cpus(); 180 put_online_cpus();
195} 181}
196 182
197static void reserve_ds_buffers(void) 183void reserve_ds_buffers(void)
198{ 184{
199 int bts_err = 0, pebs_err = 0; 185 int bts_err = 0, pebs_err = 0;
200 int cpu; 186 int cpu;
@@ -260,10 +246,10 @@ static void reserve_ds_buffers(void)
260 * BTS 246 * BTS
261 */ 247 */
262 248
263static struct event_constraint bts_constraint = 249struct event_constraint bts_constraint =
264 EVENT_CONSTRAINT(0, 1ULL << X86_PMC_IDX_FIXED_BTS, 0); 250 EVENT_CONSTRAINT(0, 1ULL << X86_PMC_IDX_FIXED_BTS, 0);
265 251
266static void intel_pmu_enable_bts(u64 config) 252void intel_pmu_enable_bts(u64 config)
267{ 253{
268 unsigned long debugctlmsr; 254 unsigned long debugctlmsr;
269 255
@@ -282,7 +268,7 @@ static void intel_pmu_enable_bts(u64 config)
282 update_debugctlmsr(debugctlmsr); 268 update_debugctlmsr(debugctlmsr);
283} 269}
284 270
285static void intel_pmu_disable_bts(void) 271void intel_pmu_disable_bts(void)
286{ 272{
287 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 273 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
288 unsigned long debugctlmsr; 274 unsigned long debugctlmsr;
@@ -299,7 +285,7 @@ static void intel_pmu_disable_bts(void)
299 update_debugctlmsr(debugctlmsr); 285 update_debugctlmsr(debugctlmsr);
300} 286}
301 287
302static int intel_pmu_drain_bts_buffer(void) 288int intel_pmu_drain_bts_buffer(void)
303{ 289{
304 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 290 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
305 struct debug_store *ds = cpuc->ds; 291 struct debug_store *ds = cpuc->ds;
@@ -361,7 +347,7 @@ static int intel_pmu_drain_bts_buffer(void)
361/* 347/*
362 * PEBS 348 * PEBS
363 */ 349 */
364static struct event_constraint intel_core2_pebs_event_constraints[] = { 350struct event_constraint intel_core2_pebs_event_constraints[] = {
365 INTEL_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */ 351 INTEL_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */
366 INTEL_UEVENT_CONSTRAINT(0xfec1, 0x1), /* X87_OPS_RETIRED.ANY */ 352 INTEL_UEVENT_CONSTRAINT(0xfec1, 0x1), /* X87_OPS_RETIRED.ANY */
367 INTEL_UEVENT_CONSTRAINT(0x00c5, 0x1), /* BR_INST_RETIRED.MISPRED */ 353 INTEL_UEVENT_CONSTRAINT(0x00c5, 0x1), /* BR_INST_RETIRED.MISPRED */
@@ -370,14 +356,14 @@ static struct event_constraint intel_core2_pebs_event_constraints[] = {
370 EVENT_CONSTRAINT_END 356 EVENT_CONSTRAINT_END
371}; 357};
372 358
373static struct event_constraint intel_atom_pebs_event_constraints[] = { 359struct event_constraint intel_atom_pebs_event_constraints[] = {
374 INTEL_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */ 360 INTEL_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */
375 INTEL_UEVENT_CONSTRAINT(0x00c5, 0x1), /* MISPREDICTED_BRANCH_RETIRED */ 361 INTEL_UEVENT_CONSTRAINT(0x00c5, 0x1), /* MISPREDICTED_BRANCH_RETIRED */
376 INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */ 362 INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */
377 EVENT_CONSTRAINT_END 363 EVENT_CONSTRAINT_END
378}; 364};
379 365
380static struct event_constraint intel_nehalem_pebs_event_constraints[] = { 366struct event_constraint intel_nehalem_pebs_event_constraints[] = {
381 INTEL_EVENT_CONSTRAINT(0x0b, 0xf), /* MEM_INST_RETIRED.* */ 367 INTEL_EVENT_CONSTRAINT(0x0b, 0xf), /* MEM_INST_RETIRED.* */
382 INTEL_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */ 368 INTEL_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */
383 INTEL_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */ 369 INTEL_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */
@@ -392,7 +378,7 @@ static struct event_constraint intel_nehalem_pebs_event_constraints[] = {
392 EVENT_CONSTRAINT_END 378 EVENT_CONSTRAINT_END
393}; 379};
394 380
395static struct event_constraint intel_westmere_pebs_event_constraints[] = { 381struct event_constraint intel_westmere_pebs_event_constraints[] = {
396 INTEL_EVENT_CONSTRAINT(0x0b, 0xf), /* MEM_INST_RETIRED.* */ 382 INTEL_EVENT_CONSTRAINT(0x0b, 0xf), /* MEM_INST_RETIRED.* */
397 INTEL_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */ 383 INTEL_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */
398 INTEL_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */ 384 INTEL_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */
@@ -407,7 +393,7 @@ static struct event_constraint intel_westmere_pebs_event_constraints[] = {
407 EVENT_CONSTRAINT_END 393 EVENT_CONSTRAINT_END
408}; 394};
409 395
410static struct event_constraint intel_snb_pebs_events[] = { 396struct event_constraint intel_snb_pebs_event_constraints[] = {
411 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */ 397 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
412 INTEL_UEVENT_CONSTRAINT(0x01c2, 0xf), /* UOPS_RETIRED.ALL */ 398 INTEL_UEVENT_CONSTRAINT(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
413 INTEL_UEVENT_CONSTRAINT(0x02c2, 0xf), /* UOPS_RETIRED.RETIRE_SLOTS */ 399 INTEL_UEVENT_CONSTRAINT(0x02c2, 0xf), /* UOPS_RETIRED.RETIRE_SLOTS */
@@ -428,8 +414,7 @@ static struct event_constraint intel_snb_pebs_events[] = {
428 EVENT_CONSTRAINT_END 414 EVENT_CONSTRAINT_END
429}; 415};
430 416
431static struct event_constraint * 417struct event_constraint *intel_pebs_constraints(struct perf_event *event)
432intel_pebs_constraints(struct perf_event *event)
433{ 418{
434 struct event_constraint *c; 419 struct event_constraint *c;
435 420
@@ -446,7 +431,7 @@ intel_pebs_constraints(struct perf_event *event)
446 return &emptyconstraint; 431 return &emptyconstraint;
447} 432}
448 433
449static void intel_pmu_pebs_enable(struct perf_event *event) 434void intel_pmu_pebs_enable(struct perf_event *event)
450{ 435{
451 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 436 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
452 struct hw_perf_event *hwc = &event->hw; 437 struct hw_perf_event *hwc = &event->hw;
@@ -460,7 +445,7 @@ static void intel_pmu_pebs_enable(struct perf_event *event)
460 intel_pmu_lbr_enable(event); 445 intel_pmu_lbr_enable(event);
461} 446}
462 447
463static void intel_pmu_pebs_disable(struct perf_event *event) 448void intel_pmu_pebs_disable(struct perf_event *event)
464{ 449{
465 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 450 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
466 struct hw_perf_event *hwc = &event->hw; 451 struct hw_perf_event *hwc = &event->hw;
@@ -475,7 +460,7 @@ static void intel_pmu_pebs_disable(struct perf_event *event)
475 intel_pmu_lbr_disable(event); 460 intel_pmu_lbr_disable(event);
476} 461}
477 462
478static void intel_pmu_pebs_enable_all(void) 463void intel_pmu_pebs_enable_all(void)
479{ 464{
480 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 465 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
481 466
@@ -483,7 +468,7 @@ static void intel_pmu_pebs_enable_all(void)
483 wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled); 468 wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
484} 469}
485 470
486static void intel_pmu_pebs_disable_all(void) 471void intel_pmu_pebs_disable_all(void)
487{ 472{
488 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 473 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
489 474
@@ -576,8 +561,6 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
576 return 0; 561 return 0;
577} 562}
578 563
579static int intel_pmu_save_and_restart(struct perf_event *event);
580
581static void __intel_pmu_pebs_event(struct perf_event *event, 564static void __intel_pmu_pebs_event(struct perf_event *event,
582 struct pt_regs *iregs, void *__pebs) 565 struct pt_regs *iregs, void *__pebs)
583{ 566{
@@ -716,7 +699,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
716 * BTS, PEBS probe and setup 699 * BTS, PEBS probe and setup
717 */ 700 */
718 701
719static void intel_ds_init(void) 702void intel_ds_init(void)
720{ 703{
721 /* 704 /*
722 * No support for 32bit formats 705 * No support for 32bit formats
@@ -749,15 +732,3 @@ static void intel_ds_init(void)
749 } 732 }
750 } 733 }
751} 734}
752
753#else /* CONFIG_CPU_SUP_INTEL */
754
755static void reserve_ds_buffers(void)
756{
757}
758
759static void release_ds_buffers(void)
760{
761}
762
763#endif /* CONFIG_CPU_SUP_INTEL */
diff --git a/arch/x86/kernel/cpu/perf_event_intel_lbr.c b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
index d202c1bece1a..3fab3de3ce96 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_lbr.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
@@ -1,4 +1,10 @@
1#ifdef CONFIG_CPU_SUP_INTEL 1#include <linux/perf_event.h>
2#include <linux/types.h>
3
4#include <asm/perf_event.h>
5#include <asm/msr.h>
6
7#include "perf_event.h"
2 8
3enum { 9enum {
4 LBR_FORMAT_32 = 0x00, 10 LBR_FORMAT_32 = 0x00,
@@ -48,7 +54,7 @@ static void intel_pmu_lbr_reset_64(void)
48 } 54 }
49} 55}
50 56
51static void intel_pmu_lbr_reset(void) 57void intel_pmu_lbr_reset(void)
52{ 58{
53 if (!x86_pmu.lbr_nr) 59 if (!x86_pmu.lbr_nr)
54 return; 60 return;
@@ -59,7 +65,7 @@ static void intel_pmu_lbr_reset(void)
59 intel_pmu_lbr_reset_64(); 65 intel_pmu_lbr_reset_64();
60} 66}
61 67
62static void intel_pmu_lbr_enable(struct perf_event *event) 68void intel_pmu_lbr_enable(struct perf_event *event)
63{ 69{
64 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 70 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
65 71
@@ -81,7 +87,7 @@ static void intel_pmu_lbr_enable(struct perf_event *event)
81 cpuc->lbr_users++; 87 cpuc->lbr_users++;
82} 88}
83 89
84static void intel_pmu_lbr_disable(struct perf_event *event) 90void intel_pmu_lbr_disable(struct perf_event *event)
85{ 91{
86 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 92 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
87 93
@@ -95,7 +101,7 @@ static void intel_pmu_lbr_disable(struct perf_event *event)
95 __intel_pmu_lbr_disable(); 101 __intel_pmu_lbr_disable();
96} 102}
97 103
98static void intel_pmu_lbr_enable_all(void) 104void intel_pmu_lbr_enable_all(void)
99{ 105{
100 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 106 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
101 107
@@ -103,7 +109,7 @@ static void intel_pmu_lbr_enable_all(void)
103 __intel_pmu_lbr_enable(); 109 __intel_pmu_lbr_enable();
104} 110}
105 111
106static void intel_pmu_lbr_disable_all(void) 112void intel_pmu_lbr_disable_all(void)
107{ 113{
108 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 114 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
109 115
@@ -178,7 +184,7 @@ static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
178 cpuc->lbr_stack.nr = i; 184 cpuc->lbr_stack.nr = i;
179} 185}
180 186
181static void intel_pmu_lbr_read(void) 187void intel_pmu_lbr_read(void)
182{ 188{
183 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 189 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
184 190
@@ -191,7 +197,7 @@ static void intel_pmu_lbr_read(void)
191 intel_pmu_lbr_read_64(cpuc); 197 intel_pmu_lbr_read_64(cpuc);
192} 198}
193 199
194static void intel_pmu_lbr_init_core(void) 200void intel_pmu_lbr_init_core(void)
195{ 201{
196 x86_pmu.lbr_nr = 4; 202 x86_pmu.lbr_nr = 4;
197 x86_pmu.lbr_tos = 0x01c9; 203 x86_pmu.lbr_tos = 0x01c9;
@@ -199,7 +205,7 @@ static void intel_pmu_lbr_init_core(void)
199 x86_pmu.lbr_to = 0x60; 205 x86_pmu.lbr_to = 0x60;
200} 206}
201 207
202static void intel_pmu_lbr_init_nhm(void) 208void intel_pmu_lbr_init_nhm(void)
203{ 209{
204 x86_pmu.lbr_nr = 16; 210 x86_pmu.lbr_nr = 16;
205 x86_pmu.lbr_tos = 0x01c9; 211 x86_pmu.lbr_tos = 0x01c9;
@@ -207,12 +213,10 @@ static void intel_pmu_lbr_init_nhm(void)
207 x86_pmu.lbr_to = 0x6c0; 213 x86_pmu.lbr_to = 0x6c0;
208} 214}
209 215
210static void intel_pmu_lbr_init_atom(void) 216void intel_pmu_lbr_init_atom(void)
211{ 217{
212 x86_pmu.lbr_nr = 8; 218 x86_pmu.lbr_nr = 8;
213 x86_pmu.lbr_tos = 0x01c9; 219 x86_pmu.lbr_tos = 0x01c9;
214 x86_pmu.lbr_from = 0x40; 220 x86_pmu.lbr_from = 0x40;
215 x86_pmu.lbr_to = 0x60; 221 x86_pmu.lbr_to = 0x60;
216} 222}
217
218#endif /* CONFIG_CPU_SUP_INTEL */
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c
index 7809d2bcb209..492bf1358a7c 100644
--- a/arch/x86/kernel/cpu/perf_event_p4.c
+++ b/arch/x86/kernel/cpu/perf_event_p4.c
@@ -7,9 +7,13 @@
7 * For licencing details see kernel-base/COPYING 7 * For licencing details see kernel-base/COPYING
8 */ 8 */
9 9
10#ifdef CONFIG_CPU_SUP_INTEL 10#include <linux/perf_event.h>
11 11
12#include <asm/perf_event_p4.h> 12#include <asm/perf_event_p4.h>
13#include <asm/hardirq.h>
14#include <asm/apic.h>
15
16#include "perf_event.h"
13 17
14#define P4_CNTR_LIMIT 3 18#define P4_CNTR_LIMIT 3
15/* 19/*
@@ -1303,7 +1307,7 @@ static __initconst const struct x86_pmu p4_pmu = {
1303 .perfctr_second_write = 1, 1307 .perfctr_second_write = 1,
1304}; 1308};
1305 1309
1306static __init int p4_pmu_init(void) 1310__init int p4_pmu_init(void)
1307{ 1311{
1308 unsigned int low, high; 1312 unsigned int low, high;
1309 1313
@@ -1326,5 +1330,3 @@ static __init int p4_pmu_init(void)
1326 1330
1327 return 0; 1331 return 0;
1328} 1332}
1329
1330#endif /* CONFIG_CPU_SUP_INTEL */
diff --git a/arch/x86/kernel/cpu/perf_event_p6.c b/arch/x86/kernel/cpu/perf_event_p6.c
index 20c097e33860..c7181befecde 100644
--- a/arch/x86/kernel/cpu/perf_event_p6.c
+++ b/arch/x86/kernel/cpu/perf_event_p6.c
@@ -1,4 +1,7 @@
1#ifdef CONFIG_CPU_SUP_INTEL 1#include <linux/perf_event.h>
2#include <linux/types.h>
3
4#include "perf_event.h"
2 5
3/* 6/*
4 * Not sure about some of these 7 * Not sure about some of these
@@ -114,7 +117,7 @@ static __initconst const struct x86_pmu p6_pmu = {
114 .event_constraints = p6_event_constraints, 117 .event_constraints = p6_event_constraints,
115}; 118};
116 119
117static __init int p6_pmu_init(void) 120__init int p6_pmu_init(void)
118{ 121{
119 switch (boot_cpu_data.x86_model) { 122 switch (boot_cpu_data.x86_model) {
120 case 1: 123 case 1:
@@ -138,5 +141,3 @@ static __init int p6_pmu_init(void)
138 141
139 return 0; 142 return 0;
140} 143}
141
142#endif /* CONFIG_CPU_SUP_INTEL */