aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/cpu')
-rw-r--r--arch/x86/kernel/cpu/Makefile7
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce-inject.c20
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c23
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_intel.c10
-rw-r--r--arch/x86/kernel/cpu/perf_event.c442
-rw-r--r--arch/x86/kernel/cpu/perf_event.h505
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd.c38
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd_ibs.c294
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c146
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_ds.c79
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_lbr.c28
-rw-r--r--arch/x86/kernel/cpu/perf_event_p4.c10
-rw-r--r--arch/x86/kernel/cpu/perf_event_p6.c9
13 files changed, 1053 insertions, 558 deletions
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index 6042981d030..fe6eb197f84 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -28,10 +28,15 @@ obj-$(CONFIG_CPU_SUP_UMC_32) += umc.o
28 28
29obj-$(CONFIG_PERF_EVENTS) += perf_event.o 29obj-$(CONFIG_PERF_EVENTS) += perf_event.o
30 30
31ifdef CONFIG_PERF_EVENTS
32obj-$(CONFIG_CPU_SUP_AMD) += perf_event_amd.o
33obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_p6.o perf_event_p4.o perf_event_intel_lbr.o perf_event_intel_ds.o perf_event_intel.o
34endif
35
31obj-$(CONFIG_X86_MCE) += mcheck/ 36obj-$(CONFIG_X86_MCE) += mcheck/
32obj-$(CONFIG_MTRR) += mtrr/ 37obj-$(CONFIG_MTRR) += mtrr/
33 38
34obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o 39obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o perf_event_amd_ibs.o
35 40
36quiet_cmd_mkcapflags = MKCAP $@ 41quiet_cmd_mkcapflags = MKCAP $@
37 cmd_mkcapflags = $(PERL) $(srctree)/$(src)/mkcapflags.pl $< $@ 42 cmd_mkcapflags = $(PERL) $(srctree)/$(src)/mkcapflags.pl $< $@
diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c
index 0ed633c5048..6199232161c 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-inject.c
+++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c
@@ -78,27 +78,20 @@ static void raise_exception(struct mce *m, struct pt_regs *pregs)
78 78
79static cpumask_var_t mce_inject_cpumask; 79static cpumask_var_t mce_inject_cpumask;
80 80
81static int mce_raise_notify(struct notifier_block *self, 81static int mce_raise_notify(unsigned int cmd, struct pt_regs *regs)
82 unsigned long val, void *data)
83{ 82{
84 struct die_args *args = (struct die_args *)data;
85 int cpu = smp_processor_id(); 83 int cpu = smp_processor_id();
86 struct mce *m = &__get_cpu_var(injectm); 84 struct mce *m = &__get_cpu_var(injectm);
87 if (val != DIE_NMI || !cpumask_test_cpu(cpu, mce_inject_cpumask)) 85 if (!cpumask_test_cpu(cpu, mce_inject_cpumask))
88 return NOTIFY_DONE; 86 return NMI_DONE;
89 cpumask_clear_cpu(cpu, mce_inject_cpumask); 87 cpumask_clear_cpu(cpu, mce_inject_cpumask);
90 if (m->inject_flags & MCJ_EXCEPTION) 88 if (m->inject_flags & MCJ_EXCEPTION)
91 raise_exception(m, args->regs); 89 raise_exception(m, regs);
92 else if (m->status) 90 else if (m->status)
93 raise_poll(m); 91 raise_poll(m);
94 return NOTIFY_STOP; 92 return NMI_HANDLED;
95} 93}
96 94
97static struct notifier_block mce_raise_nb = {
98 .notifier_call = mce_raise_notify,
99 .priority = NMI_LOCAL_NORMAL_PRIOR,
100};
101
102/* Inject mce on current CPU */ 95/* Inject mce on current CPU */
103static int raise_local(void) 96static int raise_local(void)
104{ 97{
@@ -216,7 +209,8 @@ static int inject_init(void)
216 return -ENOMEM; 209 return -ENOMEM;
217 printk(KERN_INFO "Machine check injector initialized\n"); 210 printk(KERN_INFO "Machine check injector initialized\n");
218 mce_chrdev_ops.write = mce_write; 211 mce_chrdev_ops.write = mce_write;
219 register_die_notifier(&mce_raise_nb); 212 register_nmi_handler(NMI_LOCAL, mce_raise_notify, 0,
213 "mce_notify");
220 return 0; 214 return 0;
221} 215}
222 216
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 08363b04212..fce51ad1f36 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -908,9 +908,6 @@ void do_machine_check(struct pt_regs *regs, long error_code)
908 908
909 percpu_inc(mce_exception_count); 909 percpu_inc(mce_exception_count);
910 910
911 if (notify_die(DIE_NMI, "machine check", regs, error_code,
912 18, SIGKILL) == NOTIFY_STOP)
913 goto out;
914 if (!banks) 911 if (!banks)
915 goto out; 912 goto out;
916 913
@@ -1140,6 +1137,15 @@ static void mce_start_timer(unsigned long data)
1140 add_timer_on(t, smp_processor_id()); 1137 add_timer_on(t, smp_processor_id());
1141} 1138}
1142 1139
1140/* Must not be called in IRQ context where del_timer_sync() can deadlock */
1141static void mce_timer_delete_all(void)
1142{
1143 int cpu;
1144
1145 for_each_online_cpu(cpu)
1146 del_timer_sync(&per_cpu(mce_timer, cpu));
1147}
1148
1143static void mce_do_trigger(struct work_struct *work) 1149static void mce_do_trigger(struct work_struct *work)
1144{ 1150{
1145 call_usermodehelper(mce_helper, mce_helper_argv, NULL, UMH_NO_WAIT); 1151 call_usermodehelper(mce_helper, mce_helper_argv, NULL, UMH_NO_WAIT);
@@ -1750,7 +1756,6 @@ static struct syscore_ops mce_syscore_ops = {
1750 1756
1751static void mce_cpu_restart(void *data) 1757static void mce_cpu_restart(void *data)
1752{ 1758{
1753 del_timer_sync(&__get_cpu_var(mce_timer));
1754 if (!mce_available(__this_cpu_ptr(&cpu_info))) 1759 if (!mce_available(__this_cpu_ptr(&cpu_info)))
1755 return; 1760 return;
1756 __mcheck_cpu_init_generic(); 1761 __mcheck_cpu_init_generic();
@@ -1760,16 +1765,15 @@ static void mce_cpu_restart(void *data)
1760/* Reinit MCEs after user configuration changes */ 1765/* Reinit MCEs after user configuration changes */
1761static void mce_restart(void) 1766static void mce_restart(void)
1762{ 1767{
1768 mce_timer_delete_all();
1763 on_each_cpu(mce_cpu_restart, NULL, 1); 1769 on_each_cpu(mce_cpu_restart, NULL, 1);
1764} 1770}
1765 1771
1766/* Toggle features for corrected errors */ 1772/* Toggle features for corrected errors */
1767static void mce_disable_ce(void *all) 1773static void mce_disable_cmci(void *data)
1768{ 1774{
1769 if (!mce_available(__this_cpu_ptr(&cpu_info))) 1775 if (!mce_available(__this_cpu_ptr(&cpu_info)))
1770 return; 1776 return;
1771 if (all)
1772 del_timer_sync(&__get_cpu_var(mce_timer));
1773 cmci_clear(); 1777 cmci_clear();
1774} 1778}
1775 1779
@@ -1852,7 +1856,8 @@ static ssize_t set_ignore_ce(struct sys_device *s,
1852 if (mce_ignore_ce ^ !!new) { 1856 if (mce_ignore_ce ^ !!new) {
1853 if (new) { 1857 if (new) {
1854 /* disable ce features */ 1858 /* disable ce features */
1855 on_each_cpu(mce_disable_ce, (void *)1, 1); 1859 mce_timer_delete_all();
1860 on_each_cpu(mce_disable_cmci, NULL, 1);
1856 mce_ignore_ce = 1; 1861 mce_ignore_ce = 1;
1857 } else { 1862 } else {
1858 /* enable ce features */ 1863 /* enable ce features */
@@ -1875,7 +1880,7 @@ static ssize_t set_cmci_disabled(struct sys_device *s,
1875 if (mce_cmci_disabled ^ !!new) { 1880 if (mce_cmci_disabled ^ !!new) {
1876 if (new) { 1881 if (new) {
1877 /* disable cmci */ 1882 /* disable cmci */
1878 on_each_cpu(mce_disable_ce, NULL, 1); 1883 on_each_cpu(mce_disable_cmci, NULL, 1);
1879 mce_cmci_disabled = 1; 1884 mce_cmci_disabled = 1;
1880 } else { 1885 } else {
1881 /* enable cmci */ 1886 /* enable cmci */
diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel.c b/arch/x86/kernel/cpu/mcheck/mce_intel.c
index 8694ef56459..38e49bc95ff 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_intel.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_intel.c
@@ -28,7 +28,7 @@ static DEFINE_PER_CPU(mce_banks_t, mce_banks_owned);
28 * cmci_discover_lock protects against parallel discovery attempts 28 * cmci_discover_lock protects against parallel discovery attempts
29 * which could race against each other. 29 * which could race against each other.
30 */ 30 */
31static DEFINE_SPINLOCK(cmci_discover_lock); 31static DEFINE_RAW_SPINLOCK(cmci_discover_lock);
32 32
33#define CMCI_THRESHOLD 1 33#define CMCI_THRESHOLD 1
34 34
@@ -85,7 +85,7 @@ static void cmci_discover(int banks, int boot)
85 int hdr = 0; 85 int hdr = 0;
86 int i; 86 int i;
87 87
88 spin_lock_irqsave(&cmci_discover_lock, flags); 88 raw_spin_lock_irqsave(&cmci_discover_lock, flags);
89 for (i = 0; i < banks; i++) { 89 for (i = 0; i < banks; i++) {
90 u64 val; 90 u64 val;
91 91
@@ -116,7 +116,7 @@ static void cmci_discover(int banks, int boot)
116 WARN_ON(!test_bit(i, __get_cpu_var(mce_poll_banks))); 116 WARN_ON(!test_bit(i, __get_cpu_var(mce_poll_banks)));
117 } 117 }
118 } 118 }
119 spin_unlock_irqrestore(&cmci_discover_lock, flags); 119 raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
120 if (hdr) 120 if (hdr)
121 printk(KERN_CONT "\n"); 121 printk(KERN_CONT "\n");
122} 122}
@@ -150,7 +150,7 @@ void cmci_clear(void)
150 150
151 if (!cmci_supported(&banks)) 151 if (!cmci_supported(&banks))
152 return; 152 return;
153 spin_lock_irqsave(&cmci_discover_lock, flags); 153 raw_spin_lock_irqsave(&cmci_discover_lock, flags);
154 for (i = 0; i < banks; i++) { 154 for (i = 0; i < banks; i++) {
155 if (!test_bit(i, __get_cpu_var(mce_banks_owned))) 155 if (!test_bit(i, __get_cpu_var(mce_banks_owned)))
156 continue; 156 continue;
@@ -160,7 +160,7 @@ void cmci_clear(void)
160 wrmsrl(MSR_IA32_MCx_CTL2(i), val); 160 wrmsrl(MSR_IA32_MCx_CTL2(i), val);
161 __clear_bit(i, __get_cpu_var(mce_banks_owned)); 161 __clear_bit(i, __get_cpu_var(mce_banks_owned));
162 } 162 }
163 spin_unlock_irqrestore(&cmci_discover_lock, flags); 163 raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
164} 164}
165 165
166/* 166/*
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index cfa62ec090e..640891014b2 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -32,6 +32,8 @@
32#include <asm/smp.h> 32#include <asm/smp.h>
33#include <asm/alternative.h> 33#include <asm/alternative.h>
34 34
35#include "perf_event.h"
36
35#if 0 37#if 0
36#undef wrmsrl 38#undef wrmsrl
37#define wrmsrl(msr, val) \ 39#define wrmsrl(msr, val) \
@@ -43,283 +45,17 @@ do { \
43} while (0) 45} while (0)
44#endif 46#endif
45 47
46/* 48struct x86_pmu x86_pmu __read_mostly;
47 * | NHM/WSM | SNB |
48 * register -------------------------------
49 * | HT | no HT | HT | no HT |
50 *-----------------------------------------
51 * offcore | core | core | cpu | core |
52 * lbr_sel | core | core | cpu | core |
53 * ld_lat | cpu | core | cpu | core |
54 *-----------------------------------------
55 *
56 * Given that there is a small number of shared regs,
57 * we can pre-allocate their slot in the per-cpu
58 * per-core reg tables.
59 */
60enum extra_reg_type {
61 EXTRA_REG_NONE = -1, /* not used */
62
63 EXTRA_REG_RSP_0 = 0, /* offcore_response_0 */
64 EXTRA_REG_RSP_1 = 1, /* offcore_response_1 */
65
66 EXTRA_REG_MAX /* number of entries needed */
67};
68
69struct event_constraint {
70 union {
71 unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
72 u64 idxmsk64;
73 };
74 u64 code;
75 u64 cmask;
76 int weight;
77};
78
79struct amd_nb {
80 int nb_id; /* NorthBridge id */
81 int refcnt; /* reference count */
82 struct perf_event *owners[X86_PMC_IDX_MAX];
83 struct event_constraint event_constraints[X86_PMC_IDX_MAX];
84};
85
86struct intel_percore;
87
88#define MAX_LBR_ENTRIES 16
89
90struct cpu_hw_events {
91 /*
92 * Generic x86 PMC bits
93 */
94 struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */
95 unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
96 unsigned long running[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
97 int enabled;
98
99 int n_events;
100 int n_added;
101 int n_txn;
102 int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
103 u64 tags[X86_PMC_IDX_MAX];
104 struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
105
106 unsigned int group_flag;
107
108 /*
109 * Intel DebugStore bits
110 */
111 struct debug_store *ds;
112 u64 pebs_enabled;
113
114 /*
115 * Intel LBR bits
116 */
117 int lbr_users;
118 void *lbr_context;
119 struct perf_branch_stack lbr_stack;
120 struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES];
121
122 /*
123 * manage shared (per-core, per-cpu) registers
124 * used on Intel NHM/WSM/SNB
125 */
126 struct intel_shared_regs *shared_regs;
127
128 /*
129 * AMD specific bits
130 */
131 struct amd_nb *amd_nb;
132};
133
134#define __EVENT_CONSTRAINT(c, n, m, w) {\
135 { .idxmsk64 = (n) }, \
136 .code = (c), \
137 .cmask = (m), \
138 .weight = (w), \
139}
140
141#define EVENT_CONSTRAINT(c, n, m) \
142 __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n))
143
144/*
145 * Constraint on the Event code.
146 */
147#define INTEL_EVENT_CONSTRAINT(c, n) \
148 EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT)
149
150/*
151 * Constraint on the Event code + UMask + fixed-mask
152 *
153 * filter mask to validate fixed counter events.
154 * the following filters disqualify for fixed counters:
155 * - inv
156 * - edge
157 * - cnt-mask
158 * The other filters are supported by fixed counters.
159 * The any-thread option is supported starting with v3.
160 */
161#define FIXED_EVENT_CONSTRAINT(c, n) \
162 EVENT_CONSTRAINT(c, (1ULL << (32+n)), X86_RAW_EVENT_MASK)
163
164/*
165 * Constraint on the Event code + UMask
166 */
167#define INTEL_UEVENT_CONSTRAINT(c, n) \
168 EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK)
169
170#define EVENT_CONSTRAINT_END \
171 EVENT_CONSTRAINT(0, 0, 0)
172
173#define for_each_event_constraint(e, c) \
174 for ((e) = (c); (e)->weight; (e)++)
175
176/*
177 * Per register state.
178 */
179struct er_account {
180 raw_spinlock_t lock; /* per-core: protect structure */
181 u64 config; /* extra MSR config */
182 u64 reg; /* extra MSR number */
183 atomic_t ref; /* reference count */
184};
185
186/*
187 * Extra registers for specific events.
188 *
189 * Some events need large masks and require external MSRs.
190 * Those extra MSRs end up being shared for all events on
191 * a PMU and sometimes between PMU of sibling HT threads.
192 * In either case, the kernel needs to handle conflicting
193 * accesses to those extra, shared, regs. The data structure
194 * to manage those registers is stored in cpu_hw_event.
195 */
196struct extra_reg {
197 unsigned int event;
198 unsigned int msr;
199 u64 config_mask;
200 u64 valid_mask;
201 int idx; /* per_xxx->regs[] reg index */
202};
203
204#define EVENT_EXTRA_REG(e, ms, m, vm, i) { \
205 .event = (e), \
206 .msr = (ms), \
207 .config_mask = (m), \
208 .valid_mask = (vm), \
209 .idx = EXTRA_REG_##i \
210 }
211
212#define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx) \
213 EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm, idx)
214
215#define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0, RSP_0)
216
217union perf_capabilities {
218 struct {
219 u64 lbr_format : 6;
220 u64 pebs_trap : 1;
221 u64 pebs_arch_reg : 1;
222 u64 pebs_format : 4;
223 u64 smm_freeze : 1;
224 };
225 u64 capabilities;
226};
227
228/*
229 * struct x86_pmu - generic x86 pmu
230 */
231struct x86_pmu {
232 /*
233 * Generic x86 PMC bits
234 */
235 const char *name;
236 int version;
237 int (*handle_irq)(struct pt_regs *);
238 void (*disable_all)(void);
239 void (*enable_all)(int added);
240 void (*enable)(struct perf_event *);
241 void (*disable)(struct perf_event *);
242 int (*hw_config)(struct perf_event *event);
243 int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign);
244 unsigned eventsel;
245 unsigned perfctr;
246 u64 (*event_map)(int);
247 int max_events;
248 int num_counters;
249 int num_counters_fixed;
250 int cntval_bits;
251 u64 cntval_mask;
252 int apic;
253 u64 max_period;
254 struct event_constraint *
255 (*get_event_constraints)(struct cpu_hw_events *cpuc,
256 struct perf_event *event);
257
258 void (*put_event_constraints)(struct cpu_hw_events *cpuc,
259 struct perf_event *event);
260 struct event_constraint *event_constraints;
261 void (*quirks)(void);
262 int perfctr_second_write;
263
264 int (*cpu_prepare)(int cpu);
265 void (*cpu_starting)(int cpu);
266 void (*cpu_dying)(int cpu);
267 void (*cpu_dead)(int cpu);
268
269 /*
270 * Intel Arch Perfmon v2+
271 */
272 u64 intel_ctrl;
273 union perf_capabilities intel_cap;
274 49
275 /* 50DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
276 * Intel DebugStore bits
277 */
278 int bts, pebs;
279 int bts_active, pebs_active;
280 int pebs_record_size;
281 void (*drain_pebs)(struct pt_regs *regs);
282 struct event_constraint *pebs_constraints;
283
284 /*
285 * Intel LBR
286 */
287 unsigned long lbr_tos, lbr_from, lbr_to; /* MSR base regs */
288 int lbr_nr; /* hardware stack size */
289
290 /*
291 * Extra registers for events
292 */
293 struct extra_reg *extra_regs;
294 unsigned int er_flags;
295};
296
297#define ERF_NO_HT_SHARING 1
298#define ERF_HAS_RSP_1 2
299
300static struct x86_pmu x86_pmu __read_mostly;
301
302static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
303 .enabled = 1, 51 .enabled = 1,
304}; 52};
305 53
306static int x86_perf_event_set_period(struct perf_event *event); 54u64 __read_mostly hw_cache_event_ids
307
308/*
309 * Generalized hw caching related hw_event table, filled
310 * in on a per model basis. A value of 0 means
311 * 'not supported', -1 means 'hw_event makes no sense on
312 * this CPU', any other value means the raw hw_event
313 * ID.
314 */
315
316#define C(x) PERF_COUNT_HW_CACHE_##x
317
318static u64 __read_mostly hw_cache_event_ids
319 [PERF_COUNT_HW_CACHE_MAX] 55 [PERF_COUNT_HW_CACHE_MAX]
320 [PERF_COUNT_HW_CACHE_OP_MAX] 56 [PERF_COUNT_HW_CACHE_OP_MAX]
321 [PERF_COUNT_HW_CACHE_RESULT_MAX]; 57 [PERF_COUNT_HW_CACHE_RESULT_MAX];
322static u64 __read_mostly hw_cache_extra_regs 58u64 __read_mostly hw_cache_extra_regs
323 [PERF_COUNT_HW_CACHE_MAX] 59 [PERF_COUNT_HW_CACHE_MAX]
324 [PERF_COUNT_HW_CACHE_OP_MAX] 60 [PERF_COUNT_HW_CACHE_OP_MAX]
325 [PERF_COUNT_HW_CACHE_RESULT_MAX]; 61 [PERF_COUNT_HW_CACHE_RESULT_MAX];
@@ -329,8 +65,7 @@ static u64 __read_mostly hw_cache_extra_regs
329 * Can only be executed on the CPU where the event is active. 65 * Can only be executed on the CPU where the event is active.
330 * Returns the delta events processed. 66 * Returns the delta events processed.
331 */ 67 */
332static u64 68u64 x86_perf_event_update(struct perf_event *event)
333x86_perf_event_update(struct perf_event *event)
334{ 69{
335 struct hw_perf_event *hwc = &event->hw; 70 struct hw_perf_event *hwc = &event->hw;
336 int shift = 64 - x86_pmu.cntval_bits; 71 int shift = 64 - x86_pmu.cntval_bits;
@@ -373,30 +108,6 @@ again:
373 return new_raw_count; 108 return new_raw_count;
374} 109}
375 110
376static inline int x86_pmu_addr_offset(int index)
377{
378 int offset;
379
380 /* offset = X86_FEATURE_PERFCTR_CORE ? index << 1 : index */
381 alternative_io(ASM_NOP2,
382 "shll $1, %%eax",
383 X86_FEATURE_PERFCTR_CORE,
384 "=a" (offset),
385 "a" (index));
386
387 return offset;
388}
389
390static inline unsigned int x86_pmu_config_addr(int index)
391{
392 return x86_pmu.eventsel + x86_pmu_addr_offset(index);
393}
394
395static inline unsigned int x86_pmu_event_addr(int index)
396{
397 return x86_pmu.perfctr + x86_pmu_addr_offset(index);
398}
399
400/* 111/*
401 * Find and validate any extra registers to set up. 112 * Find and validate any extra registers to set up.
402 */ 113 */
@@ -532,9 +243,6 @@ msr_fail:
532 return false; 243 return false;
533} 244}
534 245
535static void reserve_ds_buffers(void);
536static void release_ds_buffers(void);
537
538static void hw_perf_event_destroy(struct perf_event *event) 246static void hw_perf_event_destroy(struct perf_event *event)
539{ 247{
540 if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) { 248 if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) {
@@ -583,7 +291,7 @@ set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event)
583 return x86_pmu_extra_regs(val, event); 291 return x86_pmu_extra_regs(val, event);
584} 292}
585 293
586static int x86_setup_perfctr(struct perf_event *event) 294int x86_setup_perfctr(struct perf_event *event)
587{ 295{
588 struct perf_event_attr *attr = &event->attr; 296 struct perf_event_attr *attr = &event->attr;
589 struct hw_perf_event *hwc = &event->hw; 297 struct hw_perf_event *hwc = &event->hw;
@@ -647,7 +355,7 @@ static int x86_setup_perfctr(struct perf_event *event)
647 return 0; 355 return 0;
648} 356}
649 357
650static int x86_pmu_hw_config(struct perf_event *event) 358int x86_pmu_hw_config(struct perf_event *event)
651{ 359{
652 if (event->attr.precise_ip) { 360 if (event->attr.precise_ip) {
653 int precise = 0; 361 int precise = 0;
@@ -723,7 +431,7 @@ static int __x86_pmu_event_init(struct perf_event *event)
723 return x86_pmu.hw_config(event); 431 return x86_pmu.hw_config(event);
724} 432}
725 433
726static void x86_pmu_disable_all(void) 434void x86_pmu_disable_all(void)
727{ 435{
728 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 436 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
729 int idx; 437 int idx;
@@ -758,15 +466,7 @@ static void x86_pmu_disable(struct pmu *pmu)
758 x86_pmu.disable_all(); 466 x86_pmu.disable_all();
759} 467}
760 468
761static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, 469void x86_pmu_enable_all(int added)
762 u64 enable_mask)
763{
764 if (hwc->extra_reg.reg)
765 wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config);
766 wrmsrl(hwc->config_base, hwc->config | enable_mask);
767}
768
769static void x86_pmu_enable_all(int added)
770{ 470{
771 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 471 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
772 int idx; 472 int idx;
@@ -788,7 +488,7 @@ static inline int is_x86_event(struct perf_event *event)
788 return event->pmu == &pmu; 488 return event->pmu == &pmu;
789} 489}
790 490
791static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) 491int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
792{ 492{
793 struct event_constraint *c, *constraints[X86_PMC_IDX_MAX]; 493 struct event_constraint *c, *constraints[X86_PMC_IDX_MAX];
794 unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; 494 unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
@@ -959,7 +659,6 @@ static inline int match_prev_assignment(struct hw_perf_event *hwc,
959} 659}
960 660
961static void x86_pmu_start(struct perf_event *event, int flags); 661static void x86_pmu_start(struct perf_event *event, int flags);
962static void x86_pmu_stop(struct perf_event *event, int flags);
963 662
964static void x86_pmu_enable(struct pmu *pmu) 663static void x86_pmu_enable(struct pmu *pmu)
965{ 664{
@@ -1031,21 +730,13 @@ static void x86_pmu_enable(struct pmu *pmu)
1031 x86_pmu.enable_all(added); 730 x86_pmu.enable_all(added);
1032} 731}
1033 732
1034static inline void x86_pmu_disable_event(struct perf_event *event)
1035{
1036 struct hw_perf_event *hwc = &event->hw;
1037
1038 wrmsrl(hwc->config_base, hwc->config);
1039}
1040
1041static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left); 733static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
1042 734
1043/* 735/*
1044 * Set the next IRQ period, based on the hwc->period_left value. 736 * Set the next IRQ period, based on the hwc->period_left value.
1045 * To be called with the event disabled in hw: 737 * To be called with the event disabled in hw:
1046 */ 738 */
1047static int 739int x86_perf_event_set_period(struct perf_event *event)
1048x86_perf_event_set_period(struct perf_event *event)
1049{ 740{
1050 struct hw_perf_event *hwc = &event->hw; 741 struct hw_perf_event *hwc = &event->hw;
1051 s64 left = local64_read(&hwc->period_left); 742 s64 left = local64_read(&hwc->period_left);
@@ -1105,7 +796,7 @@ x86_perf_event_set_period(struct perf_event *event)
1105 return ret; 796 return ret;
1106} 797}
1107 798
1108static void x86_pmu_enable_event(struct perf_event *event) 799void x86_pmu_enable_event(struct perf_event *event)
1109{ 800{
1110 if (__this_cpu_read(cpu_hw_events.enabled)) 801 if (__this_cpu_read(cpu_hw_events.enabled))
1111 __x86_pmu_enable_event(&event->hw, 802 __x86_pmu_enable_event(&event->hw,
@@ -1244,7 +935,7 @@ void perf_event_print_debug(void)
1244 local_irq_restore(flags); 935 local_irq_restore(flags);
1245} 936}
1246 937
1247static void x86_pmu_stop(struct perf_event *event, int flags) 938void x86_pmu_stop(struct perf_event *event, int flags)
1248{ 939{
1249 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 940 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1250 struct hw_perf_event *hwc = &event->hw; 941 struct hw_perf_event *hwc = &event->hw;
@@ -1297,7 +988,7 @@ static void x86_pmu_del(struct perf_event *event, int flags)
1297 perf_event_update_userpage(event); 988 perf_event_update_userpage(event);
1298} 989}
1299 990
1300static int x86_pmu_handle_irq(struct pt_regs *regs) 991int x86_pmu_handle_irq(struct pt_regs *regs)
1301{ 992{
1302 struct perf_sample_data data; 993 struct perf_sample_data data;
1303 struct cpu_hw_events *cpuc; 994 struct cpu_hw_events *cpuc;
@@ -1367,109 +1058,28 @@ void perf_events_lapic_init(void)
1367 apic_write(APIC_LVTPC, APIC_DM_NMI); 1058 apic_write(APIC_LVTPC, APIC_DM_NMI);
1368} 1059}
1369 1060
1370struct pmu_nmi_state {
1371 unsigned int marked;
1372 int handled;
1373};
1374
1375static DEFINE_PER_CPU(struct pmu_nmi_state, pmu_nmi);
1376
1377static int __kprobes 1061static int __kprobes
1378perf_event_nmi_handler(struct notifier_block *self, 1062perf_event_nmi_handler(unsigned int cmd, struct pt_regs *regs)
1379 unsigned long cmd, void *__args)
1380{ 1063{
1381 struct die_args *args = __args;
1382 unsigned int this_nmi;
1383 int handled;
1384
1385 if (!atomic_read(&active_events)) 1064 if (!atomic_read(&active_events))
1386 return NOTIFY_DONE; 1065 return NMI_DONE;
1387
1388 switch (cmd) {
1389 case DIE_NMI:
1390 break;
1391 case DIE_NMIUNKNOWN:
1392 this_nmi = percpu_read(irq_stat.__nmi_count);
1393 if (this_nmi != __this_cpu_read(pmu_nmi.marked))
1394 /* let the kernel handle the unknown nmi */
1395 return NOTIFY_DONE;
1396 /*
1397 * This one is a PMU back-to-back nmi. Two events
1398 * trigger 'simultaneously' raising two back-to-back
1399 * NMIs. If the first NMI handles both, the latter
1400 * will be empty and daze the CPU. So, we drop it to
1401 * avoid false-positive 'unknown nmi' messages.
1402 */
1403 return NOTIFY_STOP;
1404 default:
1405 return NOTIFY_DONE;
1406 }
1407
1408 handled = x86_pmu.handle_irq(args->regs);
1409 if (!handled)
1410 return NOTIFY_DONE;
1411
1412 this_nmi = percpu_read(irq_stat.__nmi_count);
1413 if ((handled > 1) ||
1414 /* the next nmi could be a back-to-back nmi */
1415 ((__this_cpu_read(pmu_nmi.marked) == this_nmi) &&
1416 (__this_cpu_read(pmu_nmi.handled) > 1))) {
1417 /*
1418 * We could have two subsequent back-to-back nmis: The
1419 * first handles more than one counter, the 2nd
1420 * handles only one counter and the 3rd handles no
1421 * counter.
1422 *
1423 * This is the 2nd nmi because the previous was
1424 * handling more than one counter. We will mark the
1425 * next (3rd) and then drop it if unhandled.
1426 */
1427 __this_cpu_write(pmu_nmi.marked, this_nmi + 1);
1428 __this_cpu_write(pmu_nmi.handled, handled);
1429 }
1430 1066
1431 return NOTIFY_STOP; 1067 return x86_pmu.handle_irq(regs);
1432} 1068}
1433 1069
1434static __read_mostly struct notifier_block perf_event_nmi_notifier = { 1070struct event_constraint emptyconstraint;
1435 .notifier_call = perf_event_nmi_handler, 1071struct event_constraint unconstrained;
1436 .next = NULL,
1437 .priority = NMI_LOCAL_LOW_PRIOR,
1438};
1439
1440static struct event_constraint unconstrained;
1441static struct event_constraint emptyconstraint;
1442
1443static struct event_constraint *
1444x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
1445{
1446 struct event_constraint *c;
1447
1448 if (x86_pmu.event_constraints) {
1449 for_each_event_constraint(c, x86_pmu.event_constraints) {
1450 if ((event->hw.config & c->cmask) == c->code)
1451 return c;
1452 }
1453 }
1454
1455 return &unconstrained;
1456}
1457
1458#include "perf_event_amd.c"
1459#include "perf_event_p6.c"
1460#include "perf_event_p4.c"
1461#include "perf_event_intel_lbr.c"
1462#include "perf_event_intel_ds.c"
1463#include "perf_event_intel.c"
1464 1072
1465static int __cpuinit 1073static int __cpuinit
1466x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) 1074x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
1467{ 1075{
1468 unsigned int cpu = (long)hcpu; 1076 unsigned int cpu = (long)hcpu;
1077 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
1469 int ret = NOTIFY_OK; 1078 int ret = NOTIFY_OK;
1470 1079
1471 switch (action & ~CPU_TASKS_FROZEN) { 1080 switch (action & ~CPU_TASKS_FROZEN) {
1472 case CPU_UP_PREPARE: 1081 case CPU_UP_PREPARE:
1082 cpuc->kfree_on_online = NULL;
1473 if (x86_pmu.cpu_prepare) 1083 if (x86_pmu.cpu_prepare)
1474 ret = x86_pmu.cpu_prepare(cpu); 1084 ret = x86_pmu.cpu_prepare(cpu);
1475 break; 1085 break;
@@ -1479,6 +1089,10 @@ x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
1479 x86_pmu.cpu_starting(cpu); 1089 x86_pmu.cpu_starting(cpu);
1480 break; 1090 break;
1481 1091
1092 case CPU_ONLINE:
1093 kfree(cpuc->kfree_on_online);
1094 break;
1095
1482 case CPU_DYING: 1096 case CPU_DYING:
1483 if (x86_pmu.cpu_dying) 1097 if (x86_pmu.cpu_dying)
1484 x86_pmu.cpu_dying(cpu); 1098 x86_pmu.cpu_dying(cpu);
@@ -1557,7 +1171,7 @@ static int __init init_hw_perf_events(void)
1557 ((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED; 1171 ((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED;
1558 1172
1559 perf_events_lapic_init(); 1173 perf_events_lapic_init();
1560 register_die_notifier(&perf_event_nmi_notifier); 1174 register_nmi_handler(NMI_LOCAL, perf_event_nmi_handler, 0, "PMI");
1561 1175
1562 unconstrained = (struct event_constraint) 1176 unconstrained = (struct event_constraint)
1563 __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1, 1177 __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1,
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
new file mode 100644
index 00000000000..b9698d40ac4
--- /dev/null
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -0,0 +1,505 @@
1/*
2 * Performance events x86 architecture header
3 *
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2009 Jaswinder Singh Rajput
7 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
8 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
9 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
10 * Copyright (C) 2009 Google, Inc., Stephane Eranian
11 *
12 * For licencing details see kernel-base/COPYING
13 */
14
15#include <linux/perf_event.h>
16
17/*
18 * | NHM/WSM | SNB |
19 * register -------------------------------
20 * | HT | no HT | HT | no HT |
21 *-----------------------------------------
22 * offcore | core | core | cpu | core |
23 * lbr_sel | core | core | cpu | core |
24 * ld_lat | cpu | core | cpu | core |
25 *-----------------------------------------
26 *
27 * Given that there is a small number of shared regs,
28 * we can pre-allocate their slot in the per-cpu
29 * per-core reg tables.
30 */
31enum extra_reg_type {
32 EXTRA_REG_NONE = -1, /* not used */
33
34 EXTRA_REG_RSP_0 = 0, /* offcore_response_0 */
35 EXTRA_REG_RSP_1 = 1, /* offcore_response_1 */
36
37 EXTRA_REG_MAX /* number of entries needed */
38};
39
40struct event_constraint {
41 union {
42 unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
43 u64 idxmsk64;
44 };
45 u64 code;
46 u64 cmask;
47 int weight;
48};
49
50struct amd_nb {
51 int nb_id; /* NorthBridge id */
52 int refcnt; /* reference count */
53 struct perf_event *owners[X86_PMC_IDX_MAX];
54 struct event_constraint event_constraints[X86_PMC_IDX_MAX];
55};
56
57/* The maximal number of PEBS events: */
58#define MAX_PEBS_EVENTS 4
59
60/*
61 * A debug store configuration.
62 *
63 * We only support architectures that use 64bit fields.
64 */
65struct debug_store {
66 u64 bts_buffer_base;
67 u64 bts_index;
68 u64 bts_absolute_maximum;
69 u64 bts_interrupt_threshold;
70 u64 pebs_buffer_base;
71 u64 pebs_index;
72 u64 pebs_absolute_maximum;
73 u64 pebs_interrupt_threshold;
74 u64 pebs_event_reset[MAX_PEBS_EVENTS];
75};
76
77/*
78 * Per register state.
79 */
80struct er_account {
81 raw_spinlock_t lock; /* per-core: protect structure */
82 u64 config; /* extra MSR config */
83 u64 reg; /* extra MSR number */
84 atomic_t ref; /* reference count */
85};
86
87/*
88 * Per core/cpu state
89 *
90 * Used to coordinate shared registers between HT threads or
91 * among events on a single PMU.
92 */
93struct intel_shared_regs {
94 struct er_account regs[EXTRA_REG_MAX];
95 int refcnt; /* per-core: #HT threads */
96 unsigned core_id; /* per-core: core id */
97};
98
99#define MAX_LBR_ENTRIES 16
100
101struct cpu_hw_events {
102 /*
103 * Generic x86 PMC bits
104 */
105 struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */
106 unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
107 unsigned long running[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
108 int enabled;
109
110 int n_events;
111 int n_added;
112 int n_txn;
113 int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
114 u64 tags[X86_PMC_IDX_MAX];
115 struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
116
117 unsigned int group_flag;
118
119 /*
120 * Intel DebugStore bits
121 */
122 struct debug_store *ds;
123 u64 pebs_enabled;
124
125 /*
126 * Intel LBR bits
127 */
128 int lbr_users;
129 void *lbr_context;
130 struct perf_branch_stack lbr_stack;
131 struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES];
132
133 /*
134 * Intel host/guest exclude bits
135 */
136 u64 intel_ctrl_guest_mask;
137 u64 intel_ctrl_host_mask;
138 struct perf_guest_switch_msr guest_switch_msrs[X86_PMC_IDX_MAX];
139
140 /*
141 * manage shared (per-core, per-cpu) registers
142 * used on Intel NHM/WSM/SNB
143 */
144 struct intel_shared_regs *shared_regs;
145
146 /*
147 * AMD specific bits
148 */
149 struct amd_nb *amd_nb;
150
151 void *kfree_on_online;
152};
153
154#define __EVENT_CONSTRAINT(c, n, m, w) {\
155 { .idxmsk64 = (n) }, \
156 .code = (c), \
157 .cmask = (m), \
158 .weight = (w), \
159}
160
161#define EVENT_CONSTRAINT(c, n, m) \
162 __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n))
163
164/*
165 * Constraint on the Event code.
166 */
167#define INTEL_EVENT_CONSTRAINT(c, n) \
168 EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT)
169
170/*
171 * Constraint on the Event code + UMask + fixed-mask
172 *
173 * filter mask to validate fixed counter events.
174 * the following filters disqualify for fixed counters:
175 * - inv
176 * - edge
177 * - cnt-mask
178 * The other filters are supported by fixed counters.
179 * The any-thread option is supported starting with v3.
180 */
181#define FIXED_EVENT_CONSTRAINT(c, n) \
182 EVENT_CONSTRAINT(c, (1ULL << (32+n)), X86_RAW_EVENT_MASK)
183
184/*
185 * Constraint on the Event code + UMask
186 */
187#define INTEL_UEVENT_CONSTRAINT(c, n) \
188 EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK)
189
190#define EVENT_CONSTRAINT_END \
191 EVENT_CONSTRAINT(0, 0, 0)
192
193#define for_each_event_constraint(e, c) \
194 for ((e) = (c); (e)->weight; (e)++)
195
196/*
197 * Extra registers for specific events.
198 *
199 * Some events need large masks and require external MSRs.
200 * Those extra MSRs end up being shared for all events on
201 * a PMU and sometimes between PMU of sibling HT threads.
202 * In either case, the kernel needs to handle conflicting
203 * accesses to those extra, shared, regs. The data structure
204 * to manage those registers is stored in cpu_hw_event.
205 */
206struct extra_reg {
207 unsigned int event;
208 unsigned int msr;
209 u64 config_mask;
210 u64 valid_mask;
211 int idx; /* per_xxx->regs[] reg index */
212};
213
214#define EVENT_EXTRA_REG(e, ms, m, vm, i) { \
215 .event = (e), \
216 .msr = (ms), \
217 .config_mask = (m), \
218 .valid_mask = (vm), \
219 .idx = EXTRA_REG_##i \
220 }
221
222#define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx) \
223 EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm, idx)
224
225#define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0, RSP_0)
226
227union perf_capabilities {
228 struct {
229 u64 lbr_format:6;
230 u64 pebs_trap:1;
231 u64 pebs_arch_reg:1;
232 u64 pebs_format:4;
233 u64 smm_freeze:1;
234 };
235 u64 capabilities;
236};
237
238/*
239 * struct x86_pmu - generic x86 pmu
240 */
241struct x86_pmu {
242 /*
243 * Generic x86 PMC bits
244 */
245 const char *name;
246 int version;
247 int (*handle_irq)(struct pt_regs *);
248 void (*disable_all)(void);
249 void (*enable_all)(int added);
250 void (*enable)(struct perf_event *);
251 void (*disable)(struct perf_event *);
252 int (*hw_config)(struct perf_event *event);
253 int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign);
254 unsigned eventsel;
255 unsigned perfctr;
256 u64 (*event_map)(int);
257 int max_events;
258 int num_counters;
259 int num_counters_fixed;
260 int cntval_bits;
261 u64 cntval_mask;
262 int apic;
263 u64 max_period;
264 struct event_constraint *
265 (*get_event_constraints)(struct cpu_hw_events *cpuc,
266 struct perf_event *event);
267
268 void (*put_event_constraints)(struct cpu_hw_events *cpuc,
269 struct perf_event *event);
270 struct event_constraint *event_constraints;
271 void (*quirks)(void);
272 int perfctr_second_write;
273
274 int (*cpu_prepare)(int cpu);
275 void (*cpu_starting)(int cpu);
276 void (*cpu_dying)(int cpu);
277 void (*cpu_dead)(int cpu);
278
279 /*
280 * Intel Arch Perfmon v2+
281 */
282 u64 intel_ctrl;
283 union perf_capabilities intel_cap;
284
285 /*
286 * Intel DebugStore bits
287 */
288 int bts, pebs;
289 int bts_active, pebs_active;
290 int pebs_record_size;
291 void (*drain_pebs)(struct pt_regs *regs);
292 struct event_constraint *pebs_constraints;
293
294 /*
295 * Intel LBR
296 */
297 unsigned long lbr_tos, lbr_from, lbr_to; /* MSR base regs */
298 int lbr_nr; /* hardware stack size */
299
300 /*
301 * Extra registers for events
302 */
303 struct extra_reg *extra_regs;
304 unsigned int er_flags;
305
306 /*
307 * Intel host/guest support (KVM)
308 */
309 struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr);
310};
311
312#define ERF_NO_HT_SHARING 1
313#define ERF_HAS_RSP_1 2
314
315extern struct x86_pmu x86_pmu __read_mostly;
316
317DECLARE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
318
319int x86_perf_event_set_period(struct perf_event *event);
320
321/*
322 * Generalized hw caching related hw_event table, filled
323 * in on a per model basis. A value of 0 means
324 * 'not supported', -1 means 'hw_event makes no sense on
325 * this CPU', any other value means the raw hw_event
326 * ID.
327 */
328
329#define C(x) PERF_COUNT_HW_CACHE_##x
330
331extern u64 __read_mostly hw_cache_event_ids
332 [PERF_COUNT_HW_CACHE_MAX]
333 [PERF_COUNT_HW_CACHE_OP_MAX]
334 [PERF_COUNT_HW_CACHE_RESULT_MAX];
335extern u64 __read_mostly hw_cache_extra_regs
336 [PERF_COUNT_HW_CACHE_MAX]
337 [PERF_COUNT_HW_CACHE_OP_MAX]
338 [PERF_COUNT_HW_CACHE_RESULT_MAX];
339
340u64 x86_perf_event_update(struct perf_event *event);
341
342static inline int x86_pmu_addr_offset(int index)
343{
344 int offset;
345
346 /* offset = X86_FEATURE_PERFCTR_CORE ? index << 1 : index */
347 alternative_io(ASM_NOP2,
348 "shll $1, %%eax",
349 X86_FEATURE_PERFCTR_CORE,
350 "=a" (offset),
351 "a" (index));
352
353 return offset;
354}
355
356static inline unsigned int x86_pmu_config_addr(int index)
357{
358 return x86_pmu.eventsel + x86_pmu_addr_offset(index);
359}
360
361static inline unsigned int x86_pmu_event_addr(int index)
362{
363 return x86_pmu.perfctr + x86_pmu_addr_offset(index);
364}
365
366int x86_setup_perfctr(struct perf_event *event);
367
368int x86_pmu_hw_config(struct perf_event *event);
369
370void x86_pmu_disable_all(void);
371
372static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
373 u64 enable_mask)
374{
375 if (hwc->extra_reg.reg)
376 wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config);
377 wrmsrl(hwc->config_base, hwc->config | enable_mask);
378}
379
380void x86_pmu_enable_all(int added);
381
382int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign);
383
384void x86_pmu_stop(struct perf_event *event, int flags);
385
386static inline void x86_pmu_disable_event(struct perf_event *event)
387{
388 struct hw_perf_event *hwc = &event->hw;
389
390 wrmsrl(hwc->config_base, hwc->config);
391}
392
393void x86_pmu_enable_event(struct perf_event *event);
394
395int x86_pmu_handle_irq(struct pt_regs *regs);
396
397extern struct event_constraint emptyconstraint;
398
399extern struct event_constraint unconstrained;
400
401#ifdef CONFIG_CPU_SUP_AMD
402
403int amd_pmu_init(void);
404
405#else /* CONFIG_CPU_SUP_AMD */
406
407static inline int amd_pmu_init(void)
408{
409 return 0;
410}
411
412#endif /* CONFIG_CPU_SUP_AMD */
413
414#ifdef CONFIG_CPU_SUP_INTEL
415
416int intel_pmu_save_and_restart(struct perf_event *event);
417
418struct event_constraint *
419x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event);
420
421struct intel_shared_regs *allocate_shared_regs(int cpu);
422
423int intel_pmu_init(void);
424
425void init_debug_store_on_cpu(int cpu);
426
427void fini_debug_store_on_cpu(int cpu);
428
429void release_ds_buffers(void);
430
431void reserve_ds_buffers(void);
432
433extern struct event_constraint bts_constraint;
434
435void intel_pmu_enable_bts(u64 config);
436
437void intel_pmu_disable_bts(void);
438
439int intel_pmu_drain_bts_buffer(void);
440
441extern struct event_constraint intel_core2_pebs_event_constraints[];
442
443extern struct event_constraint intel_atom_pebs_event_constraints[];
444
445extern struct event_constraint intel_nehalem_pebs_event_constraints[];
446
447extern struct event_constraint intel_westmere_pebs_event_constraints[];
448
449extern struct event_constraint intel_snb_pebs_event_constraints[];
450
451struct event_constraint *intel_pebs_constraints(struct perf_event *event);
452
453void intel_pmu_pebs_enable(struct perf_event *event);
454
455void intel_pmu_pebs_disable(struct perf_event *event);
456
457void intel_pmu_pebs_enable_all(void);
458
459void intel_pmu_pebs_disable_all(void);
460
461void intel_ds_init(void);
462
463void intel_pmu_lbr_reset(void);
464
465void intel_pmu_lbr_enable(struct perf_event *event);
466
467void intel_pmu_lbr_disable(struct perf_event *event);
468
469void intel_pmu_lbr_enable_all(void);
470
471void intel_pmu_lbr_disable_all(void);
472
473void intel_pmu_lbr_read(void);
474
475void intel_pmu_lbr_init_core(void);
476
477void intel_pmu_lbr_init_nhm(void);
478
479void intel_pmu_lbr_init_atom(void);
480
481int p4_pmu_init(void);
482
483int p6_pmu_init(void);
484
485#else /* CONFIG_CPU_SUP_INTEL */
486
487static inline void reserve_ds_buffers(void)
488{
489}
490
491static inline void release_ds_buffers(void)
492{
493}
494
495static inline int intel_pmu_init(void)
496{
497 return 0;
498}
499
500static inline struct intel_shared_regs *allocate_shared_regs(int cpu)
501{
502 return NULL;
503}
504
505#endif /* CONFIG_CPU_SUP_INTEL */
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c
index 941caa2e449..aeefd45697a 100644
--- a/arch/x86/kernel/cpu/perf_event_amd.c
+++ b/arch/x86/kernel/cpu/perf_event_amd.c
@@ -1,4 +1,10 @@
1#ifdef CONFIG_CPU_SUP_AMD 1#include <linux/perf_event.h>
2#include <linux/types.h>
3#include <linux/init.h>
4#include <linux/slab.h>
5#include <asm/apicdef.h>
6
7#include "perf_event.h"
2 8
3static __initconst const u64 amd_hw_cache_event_ids 9static __initconst const u64 amd_hw_cache_event_ids
4 [PERF_COUNT_HW_CACHE_MAX] 10 [PERF_COUNT_HW_CACHE_MAX]
@@ -132,6 +138,19 @@ static int amd_pmu_hw_config(struct perf_event *event)
132 if (ret) 138 if (ret)
133 return ret; 139 return ret;
134 140
141 if (event->attr.exclude_host && event->attr.exclude_guest)
142 /*
143 * When HO == GO == 1 the hardware treats that as GO == HO == 0
144 * and will count in both modes. We don't want to count in that
145 * case so we emulate no-counting by setting US = OS = 0.
146 */
147 event->hw.config &= ~(ARCH_PERFMON_EVENTSEL_USR |
148 ARCH_PERFMON_EVENTSEL_OS);
149 else if (event->attr.exclude_host)
150 event->hw.config |= AMD_PERFMON_EVENTSEL_GUESTONLY;
151 else if (event->attr.exclude_guest)
152 event->hw.config |= AMD_PERFMON_EVENTSEL_HOSTONLY;
153
135 if (event->attr.type != PERF_TYPE_RAW) 154 if (event->attr.type != PERF_TYPE_RAW)
136 return 0; 155 return 0;
137 156
@@ -350,7 +369,7 @@ static void amd_pmu_cpu_starting(int cpu)
350 continue; 369 continue;
351 370
352 if (nb->nb_id == nb_id) { 371 if (nb->nb_id == nb_id) {
353 kfree(cpuc->amd_nb); 372 cpuc->kfree_on_online = cpuc->amd_nb;
354 cpuc->amd_nb = nb; 373 cpuc->amd_nb = nb;
355 break; 374 break;
356 } 375 }
@@ -392,7 +411,7 @@ static __initconst const struct x86_pmu amd_pmu = {
392 .perfctr = MSR_K7_PERFCTR0, 411 .perfctr = MSR_K7_PERFCTR0,
393 .event_map = amd_pmu_event_map, 412 .event_map = amd_pmu_event_map,
394 .max_events = ARRAY_SIZE(amd_perfmon_event_map), 413 .max_events = ARRAY_SIZE(amd_perfmon_event_map),
395 .num_counters = 4, 414 .num_counters = AMD64_NUM_COUNTERS,
396 .cntval_bits = 48, 415 .cntval_bits = 48,
397 .cntval_mask = (1ULL << 48) - 1, 416 .cntval_mask = (1ULL << 48) - 1,
398 .apic = 1, 417 .apic = 1,
@@ -556,7 +575,7 @@ static __initconst const struct x86_pmu amd_pmu_f15h = {
556 .perfctr = MSR_F15H_PERF_CTR, 575 .perfctr = MSR_F15H_PERF_CTR,
557 .event_map = amd_pmu_event_map, 576 .event_map = amd_pmu_event_map,
558 .max_events = ARRAY_SIZE(amd_perfmon_event_map), 577 .max_events = ARRAY_SIZE(amd_perfmon_event_map),
559 .num_counters = 6, 578 .num_counters = AMD64_NUM_COUNTERS_F15H,
560 .cntval_bits = 48, 579 .cntval_bits = 48,
561 .cntval_mask = (1ULL << 48) - 1, 580 .cntval_mask = (1ULL << 48) - 1,
562 .apic = 1, 581 .apic = 1,
@@ -573,7 +592,7 @@ static __initconst const struct x86_pmu amd_pmu_f15h = {
573#endif 592#endif
574}; 593};
575 594
576static __init int amd_pmu_init(void) 595__init int amd_pmu_init(void)
577{ 596{
578 /* Performance-monitoring supported from K7 and later: */ 597 /* Performance-monitoring supported from K7 and later: */
579 if (boot_cpu_data.x86 < 6) 598 if (boot_cpu_data.x86 < 6)
@@ -602,12 +621,3 @@ static __init int amd_pmu_init(void)
602 621
603 return 0; 622 return 0;
604} 623}
605
606#else /* CONFIG_CPU_SUP_AMD */
607
608static int amd_pmu_init(void)
609{
610 return 0;
611}
612
613#endif
diff --git a/arch/x86/kernel/cpu/perf_event_amd_ibs.c b/arch/x86/kernel/cpu/perf_event_amd_ibs.c
new file mode 100644
index 00000000000..ab6343d2182
--- /dev/null
+++ b/arch/x86/kernel/cpu/perf_event_amd_ibs.c
@@ -0,0 +1,294 @@
1/*
2 * Performance events - AMD IBS
3 *
4 * Copyright (C) 2011 Advanced Micro Devices, Inc., Robert Richter
5 *
6 * For licencing details see kernel-base/COPYING
7 */
8
9#include <linux/perf_event.h>
10#include <linux/module.h>
11#include <linux/pci.h>
12
13#include <asm/apic.h>
14
15static u32 ibs_caps;
16
17#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
18
19static struct pmu perf_ibs;
20
21static int perf_ibs_init(struct perf_event *event)
22{
23 if (perf_ibs.type != event->attr.type)
24 return -ENOENT;
25 return 0;
26}
27
28static int perf_ibs_add(struct perf_event *event, int flags)
29{
30 return 0;
31}
32
33static void perf_ibs_del(struct perf_event *event, int flags)
34{
35}
36
37static struct pmu perf_ibs = {
38 .event_init= perf_ibs_init,
39 .add= perf_ibs_add,
40 .del= perf_ibs_del,
41};
42
43static __init int perf_event_ibs_init(void)
44{
45 if (!ibs_caps)
46 return -ENODEV; /* ibs not supported by the cpu */
47
48 perf_pmu_register(&perf_ibs, "ibs", -1);
49 printk(KERN_INFO "perf: AMD IBS detected (0x%08x)\n", ibs_caps);
50
51 return 0;
52}
53
54#else /* defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD) */
55
56static __init int perf_event_ibs_init(void) { return 0; }
57
58#endif
59
60/* IBS - apic initialization, for perf and oprofile */
61
62static __init u32 __get_ibs_caps(void)
63{
64 u32 caps;
65 unsigned int max_level;
66
67 if (!boot_cpu_has(X86_FEATURE_IBS))
68 return 0;
69
70 /* check IBS cpuid feature flags */
71 max_level = cpuid_eax(0x80000000);
72 if (max_level < IBS_CPUID_FEATURES)
73 return IBS_CAPS_DEFAULT;
74
75 caps = cpuid_eax(IBS_CPUID_FEATURES);
76 if (!(caps & IBS_CAPS_AVAIL))
77 /* cpuid flags not valid */
78 return IBS_CAPS_DEFAULT;
79
80 return caps;
81}
82
83u32 get_ibs_caps(void)
84{
85 return ibs_caps;
86}
87
88EXPORT_SYMBOL(get_ibs_caps);
89
90static inline int get_eilvt(int offset)
91{
92 return !setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_NMI, 1);
93}
94
95static inline int put_eilvt(int offset)
96{
97 return !setup_APIC_eilvt(offset, 0, 0, 1);
98}
99
100/*
101 * Check and reserve APIC extended interrupt LVT offset for IBS if available.
102 */
103static inline int ibs_eilvt_valid(void)
104{
105 int offset;
106 u64 val;
107 int valid = 0;
108
109 preempt_disable();
110
111 rdmsrl(MSR_AMD64_IBSCTL, val);
112 offset = val & IBSCTL_LVT_OFFSET_MASK;
113
114 if (!(val & IBSCTL_LVT_OFFSET_VALID)) {
115 pr_err(FW_BUG "cpu %d, invalid IBS interrupt offset %d (MSR%08X=0x%016llx)\n",
116 smp_processor_id(), offset, MSR_AMD64_IBSCTL, val);
117 goto out;
118 }
119
120 if (!get_eilvt(offset)) {
121 pr_err(FW_BUG "cpu %d, IBS interrupt offset %d not available (MSR%08X=0x%016llx)\n",
122 smp_processor_id(), offset, MSR_AMD64_IBSCTL, val);
123 goto out;
124 }
125
126 valid = 1;
127out:
128 preempt_enable();
129
130 return valid;
131}
132
133static int setup_ibs_ctl(int ibs_eilvt_off)
134{
135 struct pci_dev *cpu_cfg;
136 int nodes;
137 u32 value = 0;
138
139 nodes = 0;
140 cpu_cfg = NULL;
141 do {
142 cpu_cfg = pci_get_device(PCI_VENDOR_ID_AMD,
143 PCI_DEVICE_ID_AMD_10H_NB_MISC,
144 cpu_cfg);
145 if (!cpu_cfg)
146 break;
147 ++nodes;
148 pci_write_config_dword(cpu_cfg, IBSCTL, ibs_eilvt_off
149 | IBSCTL_LVT_OFFSET_VALID);
150 pci_read_config_dword(cpu_cfg, IBSCTL, &value);
151 if (value != (ibs_eilvt_off | IBSCTL_LVT_OFFSET_VALID)) {
152 pci_dev_put(cpu_cfg);
153 printk(KERN_DEBUG "Failed to setup IBS LVT offset, "
154 "IBSCTL = 0x%08x\n", value);
155 return -EINVAL;
156 }
157 } while (1);
158
159 if (!nodes) {
160 printk(KERN_DEBUG "No CPU node configured for IBS\n");
161 return -ENODEV;
162 }
163
164 return 0;
165}
166
167/*
168 * This runs only on the current cpu. We try to find an LVT offset and
169 * setup the local APIC. For this we must disable preemption. On
170 * success we initialize all nodes with this offset. This updates then
171 * the offset in the IBS_CTL per-node msr. The per-core APIC setup of
172 * the IBS interrupt vector is handled by perf_ibs_cpu_notifier that
173 * is using the new offset.
174 */
175static int force_ibs_eilvt_setup(void)
176{
177 int offset;
178 int ret;
179
180 preempt_disable();
181 /* find the next free available EILVT entry, skip offset 0 */
182 for (offset = 1; offset < APIC_EILVT_NR_MAX; offset++) {
183 if (get_eilvt(offset))
184 break;
185 }
186 preempt_enable();
187
188 if (offset == APIC_EILVT_NR_MAX) {
189 printk(KERN_DEBUG "No EILVT entry available\n");
190 return -EBUSY;
191 }
192
193 ret = setup_ibs_ctl(offset);
194 if (ret)
195 goto out;
196
197 if (!ibs_eilvt_valid()) {
198 ret = -EFAULT;
199 goto out;
200 }
201
202 pr_err(FW_BUG "using offset %d for IBS interrupts\n", offset);
203 pr_err(FW_BUG "workaround enabled for IBS LVT offset\n");
204
205 return 0;
206out:
207 preempt_disable();
208 put_eilvt(offset);
209 preempt_enable();
210 return ret;
211}
212
213static inline int get_ibs_lvt_offset(void)
214{
215 u64 val;
216
217 rdmsrl(MSR_AMD64_IBSCTL, val);
218 if (!(val & IBSCTL_LVT_OFFSET_VALID))
219 return -EINVAL;
220
221 return val & IBSCTL_LVT_OFFSET_MASK;
222}
223
224static void setup_APIC_ibs(void *dummy)
225{
226 int offset;
227
228 offset = get_ibs_lvt_offset();
229 if (offset < 0)
230 goto failed;
231
232 if (!setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_NMI, 0))
233 return;
234failed:
235 pr_warn("perf: IBS APIC setup failed on cpu #%d\n",
236 smp_processor_id());
237}
238
239static void clear_APIC_ibs(void *dummy)
240{
241 int offset;
242
243 offset = get_ibs_lvt_offset();
244 if (offset >= 0)
245 setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_FIX, 1);
246}
247
248static int __cpuinit
249perf_ibs_cpu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
250{
251 switch (action & ~CPU_TASKS_FROZEN) {
252 case CPU_STARTING:
253 setup_APIC_ibs(NULL);
254 break;
255 case CPU_DYING:
256 clear_APIC_ibs(NULL);
257 break;
258 default:
259 break;
260 }
261
262 return NOTIFY_OK;
263}
264
265static __init int amd_ibs_init(void)
266{
267 u32 caps;
268 int ret;
269
270 caps = __get_ibs_caps();
271 if (!caps)
272 return -ENODEV; /* ibs not supported by the cpu */
273
274 if (!ibs_eilvt_valid()) {
275 ret = force_ibs_eilvt_setup();
276 if (ret) {
277 pr_err("Failed to setup IBS, %d\n", ret);
278 return ret;
279 }
280 }
281
282 get_online_cpus();
283 ibs_caps = caps;
284 /* make ibs_caps visible to other cpus: */
285 smp_mb();
286 perf_cpu_notifier(perf_ibs_cpu_notifier);
287 smp_call_function(setup_APIC_ibs, NULL, 1);
288 put_online_cpus();
289
290 return perf_event_ibs_init();
291}
292
293/* Since we need the pci subsystem to init ibs we can't do this earlier: */
294device_initcall(amd_ibs_init);
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index f88af2c2a56..e09ca20e86e 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -1,16 +1,19 @@
1#ifdef CONFIG_CPU_SUP_INTEL
2
3/* 1/*
4 * Per core/cpu state 2 * Per core/cpu state
5 * 3 *
6 * Used to coordinate shared registers between HT threads or 4 * Used to coordinate shared registers between HT threads or
7 * among events on a single PMU. 5 * among events on a single PMU.
8 */ 6 */
9struct intel_shared_regs { 7
10 struct er_account regs[EXTRA_REG_MAX]; 8#include <linux/stddef.h>
11 int refcnt; /* per-core: #HT threads */ 9#include <linux/types.h>
12 unsigned core_id; /* per-core: core id */ 10#include <linux/init.h>
13}; 11#include <linux/slab.h>
12
13#include <asm/hardirq.h>
14#include <asm/apic.h>
15
16#include "perf_event.h"
14 17
15/* 18/*
16 * Intel PerfMon, used on Core and later. 19 * Intel PerfMon, used on Core and later.
@@ -746,7 +749,8 @@ static void intel_pmu_enable_all(int added)
746 749
747 intel_pmu_pebs_enable_all(); 750 intel_pmu_pebs_enable_all();
748 intel_pmu_lbr_enable_all(); 751 intel_pmu_lbr_enable_all();
749 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl); 752 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL,
753 x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask);
750 754
751 if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) { 755 if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
752 struct perf_event *event = 756 struct perf_event *event =
@@ -869,6 +873,7 @@ static void intel_pmu_disable_fixed(struct hw_perf_event *hwc)
869static void intel_pmu_disable_event(struct perf_event *event) 873static void intel_pmu_disable_event(struct perf_event *event)
870{ 874{
871 struct hw_perf_event *hwc = &event->hw; 875 struct hw_perf_event *hwc = &event->hw;
876 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
872 877
873 if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) { 878 if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) {
874 intel_pmu_disable_bts(); 879 intel_pmu_disable_bts();
@@ -876,6 +881,9 @@ static void intel_pmu_disable_event(struct perf_event *event)
876 return; 881 return;
877 } 882 }
878 883
884 cpuc->intel_ctrl_guest_mask &= ~(1ull << hwc->idx);
885 cpuc->intel_ctrl_host_mask &= ~(1ull << hwc->idx);
886
879 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { 887 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
880 intel_pmu_disable_fixed(hwc); 888 intel_pmu_disable_fixed(hwc);
881 return; 889 return;
@@ -921,6 +929,7 @@ static void intel_pmu_enable_fixed(struct hw_perf_event *hwc)
921static void intel_pmu_enable_event(struct perf_event *event) 929static void intel_pmu_enable_event(struct perf_event *event)
922{ 930{
923 struct hw_perf_event *hwc = &event->hw; 931 struct hw_perf_event *hwc = &event->hw;
932 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
924 933
925 if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) { 934 if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) {
926 if (!__this_cpu_read(cpu_hw_events.enabled)) 935 if (!__this_cpu_read(cpu_hw_events.enabled))
@@ -930,6 +939,11 @@ static void intel_pmu_enable_event(struct perf_event *event)
930 return; 939 return;
931 } 940 }
932 941
942 if (event->attr.exclude_host)
943 cpuc->intel_ctrl_guest_mask |= (1ull << hwc->idx);
944 if (event->attr.exclude_guest)
945 cpuc->intel_ctrl_host_mask |= (1ull << hwc->idx);
946
933 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { 947 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
934 intel_pmu_enable_fixed(hwc); 948 intel_pmu_enable_fixed(hwc);
935 return; 949 return;
@@ -945,7 +959,7 @@ static void intel_pmu_enable_event(struct perf_event *event)
945 * Save and restart an expired event. Called by NMI contexts, 959 * Save and restart an expired event. Called by NMI contexts,
946 * so it has to be careful about preempting normal event ops: 960 * so it has to be careful about preempting normal event ops:
947 */ 961 */
948static int intel_pmu_save_and_restart(struct perf_event *event) 962int intel_pmu_save_and_restart(struct perf_event *event)
949{ 963{
950 x86_perf_event_update(event); 964 x86_perf_event_update(event);
951 return x86_perf_event_set_period(event); 965 return x86_perf_event_set_period(event);
@@ -1197,6 +1211,21 @@ intel_shared_regs_constraints(struct cpu_hw_events *cpuc,
1197 return c; 1211 return c;
1198} 1212}
1199 1213
1214struct event_constraint *
1215x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
1216{
1217 struct event_constraint *c;
1218
1219 if (x86_pmu.event_constraints) {
1220 for_each_event_constraint(c, x86_pmu.event_constraints) {
1221 if ((event->hw.config & c->cmask) == c->code)
1222 return c;
1223 }
1224 }
1225
1226 return &unconstrained;
1227}
1228
1200static struct event_constraint * 1229static struct event_constraint *
1201intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) 1230intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
1202{ 1231{
@@ -1284,12 +1313,84 @@ static int intel_pmu_hw_config(struct perf_event *event)
1284 return 0; 1313 return 0;
1285} 1314}
1286 1315
1316struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
1317{
1318 if (x86_pmu.guest_get_msrs)
1319 return x86_pmu.guest_get_msrs(nr);
1320 *nr = 0;
1321 return NULL;
1322}
1323EXPORT_SYMBOL_GPL(perf_guest_get_msrs);
1324
1325static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr)
1326{
1327 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1328 struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
1329
1330 arr[0].msr = MSR_CORE_PERF_GLOBAL_CTRL;
1331 arr[0].host = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask;
1332 arr[0].guest = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_host_mask;
1333
1334 *nr = 1;
1335 return arr;
1336}
1337
1338static struct perf_guest_switch_msr *core_guest_get_msrs(int *nr)
1339{
1340 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1341 struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
1342 int idx;
1343
1344 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
1345 struct perf_event *event = cpuc->events[idx];
1346
1347 arr[idx].msr = x86_pmu_config_addr(idx);
1348 arr[idx].host = arr[idx].guest = 0;
1349
1350 if (!test_bit(idx, cpuc->active_mask))
1351 continue;
1352
1353 arr[idx].host = arr[idx].guest =
1354 event->hw.config | ARCH_PERFMON_EVENTSEL_ENABLE;
1355
1356 if (event->attr.exclude_host)
1357 arr[idx].host &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
1358 else if (event->attr.exclude_guest)
1359 arr[idx].guest &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
1360 }
1361
1362 *nr = x86_pmu.num_counters;
1363 return arr;
1364}
1365
1366static void core_pmu_enable_event(struct perf_event *event)
1367{
1368 if (!event->attr.exclude_host)
1369 x86_pmu_enable_event(event);
1370}
1371
1372static void core_pmu_enable_all(int added)
1373{
1374 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1375 int idx;
1376
1377 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
1378 struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
1379
1380 if (!test_bit(idx, cpuc->active_mask) ||
1381 cpuc->events[idx]->attr.exclude_host)
1382 continue;
1383
1384 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
1385 }
1386}
1387
1287static __initconst const struct x86_pmu core_pmu = { 1388static __initconst const struct x86_pmu core_pmu = {
1288 .name = "core", 1389 .name = "core",
1289 .handle_irq = x86_pmu_handle_irq, 1390 .handle_irq = x86_pmu_handle_irq,
1290 .disable_all = x86_pmu_disable_all, 1391 .disable_all = x86_pmu_disable_all,
1291 .enable_all = x86_pmu_enable_all, 1392 .enable_all = core_pmu_enable_all,
1292 .enable = x86_pmu_enable_event, 1393 .enable = core_pmu_enable_event,
1293 .disable = x86_pmu_disable_event, 1394 .disable = x86_pmu_disable_event,
1294 .hw_config = x86_pmu_hw_config, 1395 .hw_config = x86_pmu_hw_config,
1295 .schedule_events = x86_schedule_events, 1396 .schedule_events = x86_schedule_events,
@@ -1307,9 +1408,10 @@ static __initconst const struct x86_pmu core_pmu = {
1307 .get_event_constraints = intel_get_event_constraints, 1408 .get_event_constraints = intel_get_event_constraints,
1308 .put_event_constraints = intel_put_event_constraints, 1409 .put_event_constraints = intel_put_event_constraints,
1309 .event_constraints = intel_core_event_constraints, 1410 .event_constraints = intel_core_event_constraints,
1411 .guest_get_msrs = core_guest_get_msrs,
1310}; 1412};
1311 1413
1312static struct intel_shared_regs *allocate_shared_regs(int cpu) 1414struct intel_shared_regs *allocate_shared_regs(int cpu)
1313{ 1415{
1314 struct intel_shared_regs *regs; 1416 struct intel_shared_regs *regs;
1315 int i; 1417 int i;
@@ -1362,7 +1464,7 @@ static void intel_pmu_cpu_starting(int cpu)
1362 1464
1363 pc = per_cpu(cpu_hw_events, i).shared_regs; 1465 pc = per_cpu(cpu_hw_events, i).shared_regs;
1364 if (pc && pc->core_id == core_id) { 1466 if (pc && pc->core_id == core_id) {
1365 kfree(cpuc->shared_regs); 1467 cpuc->kfree_on_online = cpuc->shared_regs;
1366 cpuc->shared_regs = pc; 1468 cpuc->shared_regs = pc;
1367 break; 1469 break;
1368 } 1470 }
@@ -1413,6 +1515,7 @@ static __initconst const struct x86_pmu intel_pmu = {
1413 .cpu_prepare = intel_pmu_cpu_prepare, 1515 .cpu_prepare = intel_pmu_cpu_prepare,
1414 .cpu_starting = intel_pmu_cpu_starting, 1516 .cpu_starting = intel_pmu_cpu_starting,
1415 .cpu_dying = intel_pmu_cpu_dying, 1517 .cpu_dying = intel_pmu_cpu_dying,
1518 .guest_get_msrs = intel_guest_get_msrs,
1416}; 1519};
1417 1520
1418static void intel_clovertown_quirks(void) 1521static void intel_clovertown_quirks(void)
@@ -1441,7 +1544,7 @@ static void intel_clovertown_quirks(void)
1441 x86_pmu.pebs_constraints = NULL; 1544 x86_pmu.pebs_constraints = NULL;
1442} 1545}
1443 1546
1444static __init int intel_pmu_init(void) 1547__init int intel_pmu_init(void)
1445{ 1548{
1446 union cpuid10_edx edx; 1549 union cpuid10_edx edx;
1447 union cpuid10_eax eax; 1550 union cpuid10_eax eax;
@@ -1597,7 +1700,7 @@ static __init int intel_pmu_init(void)
1597 intel_pmu_lbr_init_nhm(); 1700 intel_pmu_lbr_init_nhm();
1598 1701
1599 x86_pmu.event_constraints = intel_snb_event_constraints; 1702 x86_pmu.event_constraints = intel_snb_event_constraints;
1600 x86_pmu.pebs_constraints = intel_snb_pebs_events; 1703 x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints;
1601 x86_pmu.extra_regs = intel_snb_extra_regs; 1704 x86_pmu.extra_regs = intel_snb_extra_regs;
1602 /* all extra regs are per-cpu when HT is on */ 1705 /* all extra regs are per-cpu when HT is on */
1603 x86_pmu.er_flags |= ERF_HAS_RSP_1; 1706 x86_pmu.er_flags |= ERF_HAS_RSP_1;
@@ -1628,16 +1731,3 @@ static __init int intel_pmu_init(void)
1628 } 1731 }
1629 return 0; 1732 return 0;
1630} 1733}
1631
1632#else /* CONFIG_CPU_SUP_INTEL */
1633
1634static int intel_pmu_init(void)
1635{
1636 return 0;
1637}
1638
1639static struct intel_shared_regs *allocate_shared_regs(int cpu)
1640{
1641 return NULL;
1642}
1643#endif /* CONFIG_CPU_SUP_INTEL */
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index 1b1ef3addcf..c0d238f49db 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -1,7 +1,10 @@
1#ifdef CONFIG_CPU_SUP_INTEL 1#include <linux/bitops.h>
2#include <linux/types.h>
3#include <linux/slab.h>
2 4
3/* The maximal number of PEBS events: */ 5#include <asm/perf_event.h>
4#define MAX_PEBS_EVENTS 4 6
7#include "perf_event.h"
5 8
6/* The size of a BTS record in bytes: */ 9/* The size of a BTS record in bytes: */
7#define BTS_RECORD_SIZE 24 10#define BTS_RECORD_SIZE 24
@@ -37,24 +40,7 @@ struct pebs_record_nhm {
37 u64 status, dla, dse, lat; 40 u64 status, dla, dse, lat;
38}; 41};
39 42
40/* 43void init_debug_store_on_cpu(int cpu)
41 * A debug store configuration.
42 *
43 * We only support architectures that use 64bit fields.
44 */
45struct debug_store {
46 u64 bts_buffer_base;
47 u64 bts_index;
48 u64 bts_absolute_maximum;
49 u64 bts_interrupt_threshold;
50 u64 pebs_buffer_base;
51 u64 pebs_index;
52 u64 pebs_absolute_maximum;
53 u64 pebs_interrupt_threshold;
54 u64 pebs_event_reset[MAX_PEBS_EVENTS];
55};
56
57static void init_debug_store_on_cpu(int cpu)
58{ 44{
59 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; 45 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
60 46
@@ -66,7 +52,7 @@ static void init_debug_store_on_cpu(int cpu)
66 (u32)((u64)(unsigned long)ds >> 32)); 52 (u32)((u64)(unsigned long)ds >> 32));
67} 53}
68 54
69static void fini_debug_store_on_cpu(int cpu) 55void fini_debug_store_on_cpu(int cpu)
70{ 56{
71 if (!per_cpu(cpu_hw_events, cpu).ds) 57 if (!per_cpu(cpu_hw_events, cpu).ds)
72 return; 58 return;
@@ -175,7 +161,7 @@ static void release_ds_buffer(int cpu)
175 kfree(ds); 161 kfree(ds);
176} 162}
177 163
178static void release_ds_buffers(void) 164void release_ds_buffers(void)
179{ 165{
180 int cpu; 166 int cpu;
181 167
@@ -194,7 +180,7 @@ static void release_ds_buffers(void)
194 put_online_cpus(); 180 put_online_cpus();
195} 181}
196 182
197static void reserve_ds_buffers(void) 183void reserve_ds_buffers(void)
198{ 184{
199 int bts_err = 0, pebs_err = 0; 185 int bts_err = 0, pebs_err = 0;
200 int cpu; 186 int cpu;
@@ -260,10 +246,10 @@ static void reserve_ds_buffers(void)
260 * BTS 246 * BTS
261 */ 247 */
262 248
263static struct event_constraint bts_constraint = 249struct event_constraint bts_constraint =
264 EVENT_CONSTRAINT(0, 1ULL << X86_PMC_IDX_FIXED_BTS, 0); 250 EVENT_CONSTRAINT(0, 1ULL << X86_PMC_IDX_FIXED_BTS, 0);
265 251
266static void intel_pmu_enable_bts(u64 config) 252void intel_pmu_enable_bts(u64 config)
267{ 253{
268 unsigned long debugctlmsr; 254 unsigned long debugctlmsr;
269 255
@@ -282,7 +268,7 @@ static void intel_pmu_enable_bts(u64 config)
282 update_debugctlmsr(debugctlmsr); 268 update_debugctlmsr(debugctlmsr);
283} 269}
284 270
285static void intel_pmu_disable_bts(void) 271void intel_pmu_disable_bts(void)
286{ 272{
287 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 273 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
288 unsigned long debugctlmsr; 274 unsigned long debugctlmsr;
@@ -299,7 +285,7 @@ static void intel_pmu_disable_bts(void)
299 update_debugctlmsr(debugctlmsr); 285 update_debugctlmsr(debugctlmsr);
300} 286}
301 287
302static int intel_pmu_drain_bts_buffer(void) 288int intel_pmu_drain_bts_buffer(void)
303{ 289{
304 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 290 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
305 struct debug_store *ds = cpuc->ds; 291 struct debug_store *ds = cpuc->ds;
@@ -361,7 +347,7 @@ static int intel_pmu_drain_bts_buffer(void)
361/* 347/*
362 * PEBS 348 * PEBS
363 */ 349 */
364static struct event_constraint intel_core2_pebs_event_constraints[] = { 350struct event_constraint intel_core2_pebs_event_constraints[] = {
365 INTEL_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */ 351 INTEL_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */
366 INTEL_UEVENT_CONSTRAINT(0xfec1, 0x1), /* X87_OPS_RETIRED.ANY */ 352 INTEL_UEVENT_CONSTRAINT(0xfec1, 0x1), /* X87_OPS_RETIRED.ANY */
367 INTEL_UEVENT_CONSTRAINT(0x00c5, 0x1), /* BR_INST_RETIRED.MISPRED */ 353 INTEL_UEVENT_CONSTRAINT(0x00c5, 0x1), /* BR_INST_RETIRED.MISPRED */
@@ -370,14 +356,14 @@ static struct event_constraint intel_core2_pebs_event_constraints[] = {
370 EVENT_CONSTRAINT_END 356 EVENT_CONSTRAINT_END
371}; 357};
372 358
373static struct event_constraint intel_atom_pebs_event_constraints[] = { 359struct event_constraint intel_atom_pebs_event_constraints[] = {
374 INTEL_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */ 360 INTEL_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */
375 INTEL_UEVENT_CONSTRAINT(0x00c5, 0x1), /* MISPREDICTED_BRANCH_RETIRED */ 361 INTEL_UEVENT_CONSTRAINT(0x00c5, 0x1), /* MISPREDICTED_BRANCH_RETIRED */
376 INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */ 362 INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */
377 EVENT_CONSTRAINT_END 363 EVENT_CONSTRAINT_END
378}; 364};
379 365
380static struct event_constraint intel_nehalem_pebs_event_constraints[] = { 366struct event_constraint intel_nehalem_pebs_event_constraints[] = {
381 INTEL_EVENT_CONSTRAINT(0x0b, 0xf), /* MEM_INST_RETIRED.* */ 367 INTEL_EVENT_CONSTRAINT(0x0b, 0xf), /* MEM_INST_RETIRED.* */
382 INTEL_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */ 368 INTEL_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */
383 INTEL_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */ 369 INTEL_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */
@@ -392,7 +378,7 @@ static struct event_constraint intel_nehalem_pebs_event_constraints[] = {
392 EVENT_CONSTRAINT_END 378 EVENT_CONSTRAINT_END
393}; 379};
394 380
395static struct event_constraint intel_westmere_pebs_event_constraints[] = { 381struct event_constraint intel_westmere_pebs_event_constraints[] = {
396 INTEL_EVENT_CONSTRAINT(0x0b, 0xf), /* MEM_INST_RETIRED.* */ 382 INTEL_EVENT_CONSTRAINT(0x0b, 0xf), /* MEM_INST_RETIRED.* */
397 INTEL_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */ 383 INTEL_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */
398 INTEL_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */ 384 INTEL_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */
@@ -407,7 +393,7 @@ static struct event_constraint intel_westmere_pebs_event_constraints[] = {
407 EVENT_CONSTRAINT_END 393 EVENT_CONSTRAINT_END
408}; 394};
409 395
410static struct event_constraint intel_snb_pebs_events[] = { 396struct event_constraint intel_snb_pebs_event_constraints[] = {
411 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */ 397 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
412 INTEL_UEVENT_CONSTRAINT(0x01c2, 0xf), /* UOPS_RETIRED.ALL */ 398 INTEL_UEVENT_CONSTRAINT(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
413 INTEL_UEVENT_CONSTRAINT(0x02c2, 0xf), /* UOPS_RETIRED.RETIRE_SLOTS */ 399 INTEL_UEVENT_CONSTRAINT(0x02c2, 0xf), /* UOPS_RETIRED.RETIRE_SLOTS */
@@ -428,8 +414,7 @@ static struct event_constraint intel_snb_pebs_events[] = {
428 EVENT_CONSTRAINT_END 414 EVENT_CONSTRAINT_END
429}; 415};
430 416
431static struct event_constraint * 417struct event_constraint *intel_pebs_constraints(struct perf_event *event)
432intel_pebs_constraints(struct perf_event *event)
433{ 418{
434 struct event_constraint *c; 419 struct event_constraint *c;
435 420
@@ -446,7 +431,7 @@ intel_pebs_constraints(struct perf_event *event)
446 return &emptyconstraint; 431 return &emptyconstraint;
447} 432}
448 433
449static void intel_pmu_pebs_enable(struct perf_event *event) 434void intel_pmu_pebs_enable(struct perf_event *event)
450{ 435{
451 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 436 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
452 struct hw_perf_event *hwc = &event->hw; 437 struct hw_perf_event *hwc = &event->hw;
@@ -460,7 +445,7 @@ static void intel_pmu_pebs_enable(struct perf_event *event)
460 intel_pmu_lbr_enable(event); 445 intel_pmu_lbr_enable(event);
461} 446}
462 447
463static void intel_pmu_pebs_disable(struct perf_event *event) 448void intel_pmu_pebs_disable(struct perf_event *event)
464{ 449{
465 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 450 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
466 struct hw_perf_event *hwc = &event->hw; 451 struct hw_perf_event *hwc = &event->hw;
@@ -475,7 +460,7 @@ static void intel_pmu_pebs_disable(struct perf_event *event)
475 intel_pmu_lbr_disable(event); 460 intel_pmu_lbr_disable(event);
476} 461}
477 462
478static void intel_pmu_pebs_enable_all(void) 463void intel_pmu_pebs_enable_all(void)
479{ 464{
480 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 465 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
481 466
@@ -483,7 +468,7 @@ static void intel_pmu_pebs_enable_all(void)
483 wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled); 468 wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
484} 469}
485 470
486static void intel_pmu_pebs_disable_all(void) 471void intel_pmu_pebs_disable_all(void)
487{ 472{
488 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 473 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
489 474
@@ -576,8 +561,6 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
576 return 0; 561 return 0;
577} 562}
578 563
579static int intel_pmu_save_and_restart(struct perf_event *event);
580
581static void __intel_pmu_pebs_event(struct perf_event *event, 564static void __intel_pmu_pebs_event(struct perf_event *event,
582 struct pt_regs *iregs, void *__pebs) 565 struct pt_regs *iregs, void *__pebs)
583{ 566{
@@ -716,7 +699,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
716 * BTS, PEBS probe and setup 699 * BTS, PEBS probe and setup
717 */ 700 */
718 701
719static void intel_ds_init(void) 702void intel_ds_init(void)
720{ 703{
721 /* 704 /*
722 * No support for 32bit formats 705 * No support for 32bit formats
@@ -749,15 +732,3 @@ static void intel_ds_init(void)
749 } 732 }
750 } 733 }
751} 734}
752
753#else /* CONFIG_CPU_SUP_INTEL */
754
755static void reserve_ds_buffers(void)
756{
757}
758
759static void release_ds_buffers(void)
760{
761}
762
763#endif /* CONFIG_CPU_SUP_INTEL */
diff --git a/arch/x86/kernel/cpu/perf_event_intel_lbr.c b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
index d202c1bece1..3fab3de3ce9 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_lbr.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
@@ -1,4 +1,10 @@
1#ifdef CONFIG_CPU_SUP_INTEL 1#include <linux/perf_event.h>
2#include <linux/types.h>
3
4#include <asm/perf_event.h>
5#include <asm/msr.h>
6
7#include "perf_event.h"
2 8
3enum { 9enum {
4 LBR_FORMAT_32 = 0x00, 10 LBR_FORMAT_32 = 0x00,
@@ -48,7 +54,7 @@ static void intel_pmu_lbr_reset_64(void)
48 } 54 }
49} 55}
50 56
51static void intel_pmu_lbr_reset(void) 57void intel_pmu_lbr_reset(void)
52{ 58{
53 if (!x86_pmu.lbr_nr) 59 if (!x86_pmu.lbr_nr)
54 return; 60 return;
@@ -59,7 +65,7 @@ static void intel_pmu_lbr_reset(void)
59 intel_pmu_lbr_reset_64(); 65 intel_pmu_lbr_reset_64();
60} 66}
61 67
62static void intel_pmu_lbr_enable(struct perf_event *event) 68void intel_pmu_lbr_enable(struct perf_event *event)
63{ 69{
64 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 70 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
65 71
@@ -81,7 +87,7 @@ static void intel_pmu_lbr_enable(struct perf_event *event)
81 cpuc->lbr_users++; 87 cpuc->lbr_users++;
82} 88}
83 89
84static void intel_pmu_lbr_disable(struct perf_event *event) 90void intel_pmu_lbr_disable(struct perf_event *event)
85{ 91{
86 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 92 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
87 93
@@ -95,7 +101,7 @@ static void intel_pmu_lbr_disable(struct perf_event *event)
95 __intel_pmu_lbr_disable(); 101 __intel_pmu_lbr_disable();
96} 102}
97 103
98static void intel_pmu_lbr_enable_all(void) 104void intel_pmu_lbr_enable_all(void)
99{ 105{
100 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 106 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
101 107
@@ -103,7 +109,7 @@ static void intel_pmu_lbr_enable_all(void)
103 __intel_pmu_lbr_enable(); 109 __intel_pmu_lbr_enable();
104} 110}
105 111
106static void intel_pmu_lbr_disable_all(void) 112void intel_pmu_lbr_disable_all(void)
107{ 113{
108 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 114 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
109 115
@@ -178,7 +184,7 @@ static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
178 cpuc->lbr_stack.nr = i; 184 cpuc->lbr_stack.nr = i;
179} 185}
180 186
181static void intel_pmu_lbr_read(void) 187void intel_pmu_lbr_read(void)
182{ 188{
183 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 189 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
184 190
@@ -191,7 +197,7 @@ static void intel_pmu_lbr_read(void)
191 intel_pmu_lbr_read_64(cpuc); 197 intel_pmu_lbr_read_64(cpuc);
192} 198}
193 199
194static void intel_pmu_lbr_init_core(void) 200void intel_pmu_lbr_init_core(void)
195{ 201{
196 x86_pmu.lbr_nr = 4; 202 x86_pmu.lbr_nr = 4;
197 x86_pmu.lbr_tos = 0x01c9; 203 x86_pmu.lbr_tos = 0x01c9;
@@ -199,7 +205,7 @@ static void intel_pmu_lbr_init_core(void)
199 x86_pmu.lbr_to = 0x60; 205 x86_pmu.lbr_to = 0x60;
200} 206}
201 207
202static void intel_pmu_lbr_init_nhm(void) 208void intel_pmu_lbr_init_nhm(void)
203{ 209{
204 x86_pmu.lbr_nr = 16; 210 x86_pmu.lbr_nr = 16;
205 x86_pmu.lbr_tos = 0x01c9; 211 x86_pmu.lbr_tos = 0x01c9;
@@ -207,12 +213,10 @@ static void intel_pmu_lbr_init_nhm(void)
207 x86_pmu.lbr_to = 0x6c0; 213 x86_pmu.lbr_to = 0x6c0;
208} 214}
209 215
210static void intel_pmu_lbr_init_atom(void) 216void intel_pmu_lbr_init_atom(void)
211{ 217{
212 x86_pmu.lbr_nr = 8; 218 x86_pmu.lbr_nr = 8;
213 x86_pmu.lbr_tos = 0x01c9; 219 x86_pmu.lbr_tos = 0x01c9;
214 x86_pmu.lbr_from = 0x40; 220 x86_pmu.lbr_from = 0x40;
215 x86_pmu.lbr_to = 0x60; 221 x86_pmu.lbr_to = 0x60;
216} 222}
217
218#endif /* CONFIG_CPU_SUP_INTEL */
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c
index 7809d2bcb20..492bf1358a7 100644
--- a/arch/x86/kernel/cpu/perf_event_p4.c
+++ b/arch/x86/kernel/cpu/perf_event_p4.c
@@ -7,9 +7,13 @@
7 * For licencing details see kernel-base/COPYING 7 * For licencing details see kernel-base/COPYING
8 */ 8 */
9 9
10#ifdef CONFIG_CPU_SUP_INTEL 10#include <linux/perf_event.h>
11 11
12#include <asm/perf_event_p4.h> 12#include <asm/perf_event_p4.h>
13#include <asm/hardirq.h>
14#include <asm/apic.h>
15
16#include "perf_event.h"
13 17
14#define P4_CNTR_LIMIT 3 18#define P4_CNTR_LIMIT 3
15/* 19/*
@@ -1303,7 +1307,7 @@ static __initconst const struct x86_pmu p4_pmu = {
1303 .perfctr_second_write = 1, 1307 .perfctr_second_write = 1,
1304}; 1308};
1305 1309
1306static __init int p4_pmu_init(void) 1310__init int p4_pmu_init(void)
1307{ 1311{
1308 unsigned int low, high; 1312 unsigned int low, high;
1309 1313
@@ -1326,5 +1330,3 @@ static __init int p4_pmu_init(void)
1326 1330
1327 return 0; 1331 return 0;
1328} 1332}
1329
1330#endif /* CONFIG_CPU_SUP_INTEL */
diff --git a/arch/x86/kernel/cpu/perf_event_p6.c b/arch/x86/kernel/cpu/perf_event_p6.c
index 20c097e3386..c7181befecd 100644
--- a/arch/x86/kernel/cpu/perf_event_p6.c
+++ b/arch/x86/kernel/cpu/perf_event_p6.c
@@ -1,4 +1,7 @@
1#ifdef CONFIG_CPU_SUP_INTEL 1#include <linux/perf_event.h>
2#include <linux/types.h>
3
4#include "perf_event.h"
2 5
3/* 6/*
4 * Not sure about some of these 7 * Not sure about some of these
@@ -114,7 +117,7 @@ static __initconst const struct x86_pmu p6_pmu = {
114 .event_constraints = p6_event_constraints, 117 .event_constraints = p6_event_constraints,
115}; 118};
116 119
117static __init int p6_pmu_init(void) 120__init int p6_pmu_init(void)
118{ 121{
119 switch (boot_cpu_data.x86_model) { 122 switch (boot_cpu_data.x86_model) {
120 case 1: 123 case 1:
@@ -138,5 +141,3 @@ static __init int p6_pmu_init(void)
138 141
139 return 0; 142 return 0;
140} 143}
141
142#endif /* CONFIG_CPU_SUP_INTEL */