aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-07-22 14:10:36 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-07-22 14:10:36 -0400
commit2eafeb6a415864bc4c59df79151cf40f6ac74b9e (patch)
tree331ee730275276aebbda5dd278a97c941067d5fd /arch
parent16d286e656250859946786de0df0fb01f8f241bc (diff)
parent6e0f17be0361444862637e8986c8c1a3b3f8dcf8 (diff)
Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf events changes from Ingo Molnar: "- kernel side: - Intel uncore PMU support for Nehalem and Sandy Bridge CPUs, we support both the events available via the MSR and via the PCI access space. - various uprobes cleanups and restructurings - PMU driver quirks by microcode version and required x86 microcode loader cleanups/robustization - various tracing robustness updates - static keys: remove obsolete static_branch() - tooling side: - GTK browser improvements - perf report browser: support screenshots to file - more automated tests - perf kvm improvements - perf bench refinements - build environment improvements - pipe mode improvements - libtraceevent updates, we have now hopefully merged most bits with the out of tree forked code base ... and many other goodies." * 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (138 commits) tracing: Check for allocation failure in __tracing_open() perf/x86: Fix intel_perfmon_event_mapformatting jump label: Remove static_branch() tracepoint: Use static_key_false(), since static_branch() is deprecated perf/x86: Uncore filter support for SandyBridge-EP perf/x86: Detect number of instances of uncore CBox perf/x86: Fix event constraint for SandyBridge-EP C-Box perf/x86: Use 0xff as pseudo code for fixed uncore event perf/x86: Save a few bytes in 'struct x86_pmu' perf/x86: Add a microcode revision check for SNB-PEBS perf/x86: Improve debug output in check_hw_exists() perf/x86/amd: Unify AMD's generic and family 15h pmus perf/x86: Move Intel specific code to intel_pmu_init() perf/x86: Rename Intel specific macros perf/x86: Fix USER/KERNEL tagging of samples perf tools: Split event symbols arrays to hw and sw parts perf tools: Split out PE_VALUE_SYM parsing token to SW and HW tokens perf tools: Add empty rule for new line in event syntax parsing perf test: Use ARRAY_SIZE in parse events tests tools lib traceevent: Cleanup realloc use ...
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/asm/kvm_host.h4
-rw-r--r--arch/x86/include/asm/msr.h46
-rw-r--r--arch/x86/include/asm/paravirt.h41
-rw-r--r--arch/x86/include/asm/paravirt_types.h2
-rw-r--r--arch/x86/include/asm/perf_event.h22
-rw-r--r--arch/x86/include/asm/uprobes.h2
-rw-r--r--arch/x86/kernel/cpu/Makefile4
-rw-r--r--arch/x86/kernel/cpu/amd.c39
-rw-r--r--arch/x86/kernel/cpu/common.c2
-rw-r--r--arch/x86/kernel/cpu/perf_event.c111
-rw-r--r--arch/x86/kernel/cpu/perf_event.h26
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd.c103
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c122
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_ds.c12
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.c1850
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.h424
-rw-r--r--arch/x86/kernel/cpu/perf_event_p4.c16
-rw-r--r--arch/x86/kernel/cpu/perf_event_p6.c4
-rw-r--r--arch/x86/kernel/microcode_core.c64
-rw-r--r--arch/x86/kernel/paravirt.c2
-rw-r--r--arch/x86/kernel/process_64.c4
-rw-r--r--arch/x86/kernel/uprobes.c3
-rw-r--r--arch/x86/kvm/pmu.c22
-rw-r--r--arch/x86/kvm/trace.h12
-rw-r--r--arch/x86/lib/msr-reg-export.c4
-rw-r--r--arch/x86/lib/msr-reg.S10
-rw-r--r--arch/x86/oprofile/op_model_amd.c4
-rw-r--r--arch/x86/vdso/vdso32-setup.c6
-rw-r--r--arch/x86/xen/enlighten.c2
29 files changed, 2642 insertions, 321 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index db7c1f2709a2..2da88c0cda14 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -313,8 +313,8 @@ struct kvm_pmu {
313 u64 counter_bitmask[2]; 313 u64 counter_bitmask[2];
314 u64 global_ctrl_mask; 314 u64 global_ctrl_mask;
315 u8 version; 315 u8 version;
316 struct kvm_pmc gp_counters[X86_PMC_MAX_GENERIC]; 316 struct kvm_pmc gp_counters[INTEL_PMC_MAX_GENERIC];
317 struct kvm_pmc fixed_counters[X86_PMC_MAX_FIXED]; 317 struct kvm_pmc fixed_counters[INTEL_PMC_MAX_FIXED];
318 struct irq_work irq_work; 318 struct irq_work irq_work;
319 u64 reprogram_pmi; 319 u64 reprogram_pmi;
320}; 320};
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
index 084ef95274cd..813ed103f45e 100644
--- a/arch/x86/include/asm/msr.h
+++ b/arch/x86/include/asm/msr.h
@@ -115,8 +115,8 @@ notrace static inline int native_write_msr_safe(unsigned int msr,
115 115
116extern unsigned long long native_read_tsc(void); 116extern unsigned long long native_read_tsc(void);
117 117
118extern int native_rdmsr_safe_regs(u32 regs[8]); 118extern int rdmsr_safe_regs(u32 regs[8]);
119extern int native_wrmsr_safe_regs(u32 regs[8]); 119extern int wrmsr_safe_regs(u32 regs[8]);
120 120
121static __always_inline unsigned long long __native_read_tsc(void) 121static __always_inline unsigned long long __native_read_tsc(void)
122{ 122{
@@ -187,43 +187,6 @@ static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
187 return err; 187 return err;
188} 188}
189 189
190static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
191{
192 u32 gprs[8] = { 0 };
193 int err;
194
195 gprs[1] = msr;
196 gprs[7] = 0x9c5a203a;
197
198 err = native_rdmsr_safe_regs(gprs);
199
200 *p = gprs[0] | ((u64)gprs[2] << 32);
201
202 return err;
203}
204
205static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
206{
207 u32 gprs[8] = { 0 };
208
209 gprs[0] = (u32)val;
210 gprs[1] = msr;
211 gprs[2] = val >> 32;
212 gprs[7] = 0x9c5a203a;
213
214 return native_wrmsr_safe_regs(gprs);
215}
216
217static inline int rdmsr_safe_regs(u32 regs[8])
218{
219 return native_rdmsr_safe_regs(regs);
220}
221
222static inline int wrmsr_safe_regs(u32 regs[8])
223{
224 return native_wrmsr_safe_regs(regs);
225}
226
227#define rdtscl(low) \ 190#define rdtscl(low) \
228 ((low) = (u32)__native_read_tsc()) 191 ((low) = (u32)__native_read_tsc())
229 192
@@ -237,6 +200,8 @@ do { \
237 (high) = (u32)(_l >> 32); \ 200 (high) = (u32)(_l >> 32); \
238} while (0) 201} while (0)
239 202
203#define rdpmcl(counter, val) ((val) = native_read_pmc(counter))
204
240#define rdtscp(low, high, aux) \ 205#define rdtscp(low, high, aux) \
241do { \ 206do { \
242 unsigned long long _val = native_read_tscp(&(aux)); \ 207 unsigned long long _val = native_read_tscp(&(aux)); \
@@ -248,8 +213,7 @@ do { \
248 213
249#endif /* !CONFIG_PARAVIRT */ 214#endif /* !CONFIG_PARAVIRT */
250 215
251 216#define wrmsrl_safe(msr, val) wrmsr_safe((msr), (u32)(val), \
252#define checking_wrmsrl(msr, val) wrmsr_safe((msr), (u32)(val), \
253 (u32)((val) >> 32)) 217 (u32)((val) >> 32))
254 218
255#define write_tsc(val1, val2) wrmsr(MSR_IA32_TSC, (val1), (val2)) 219#define write_tsc(val1, val2) wrmsr(MSR_IA32_TSC, (val1), (val2))
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 6cbbabf52707..0b47ddb6f00b 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -128,21 +128,11 @@ static inline u64 paravirt_read_msr(unsigned msr, int *err)
128 return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err); 128 return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err);
129} 129}
130 130
131static inline int paravirt_rdmsr_regs(u32 *regs)
132{
133 return PVOP_CALL1(int, pv_cpu_ops.rdmsr_regs, regs);
134}
135
136static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high) 131static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high)
137{ 132{
138 return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high); 133 return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high);
139} 134}
140 135
141static inline int paravirt_wrmsr_regs(u32 *regs)
142{
143 return PVOP_CALL1(int, pv_cpu_ops.wrmsr_regs, regs);
144}
145
146/* These should all do BUG_ON(_err), but our headers are too tangled. */ 136/* These should all do BUG_ON(_err), but our headers are too tangled. */
147#define rdmsr(msr, val1, val2) \ 137#define rdmsr(msr, val1, val2) \
148do { \ 138do { \
@@ -176,9 +166,6 @@ do { \
176 _err; \ 166 _err; \
177}) 167})
178 168
179#define rdmsr_safe_regs(regs) paravirt_rdmsr_regs(regs)
180#define wrmsr_safe_regs(regs) paravirt_wrmsr_regs(regs)
181
182static inline int rdmsrl_safe(unsigned msr, unsigned long long *p) 169static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
183{ 170{
184 int err; 171 int err;
@@ -186,32 +173,6 @@ static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
186 *p = paravirt_read_msr(msr, &err); 173 *p = paravirt_read_msr(msr, &err);
187 return err; 174 return err;
188} 175}
189static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
190{
191 u32 gprs[8] = { 0 };
192 int err;
193
194 gprs[1] = msr;
195 gprs[7] = 0x9c5a203a;
196
197 err = paravirt_rdmsr_regs(gprs);
198
199 *p = gprs[0] | ((u64)gprs[2] << 32);
200
201 return err;
202}
203
204static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
205{
206 u32 gprs[8] = { 0 };
207
208 gprs[0] = (u32)val;
209 gprs[1] = msr;
210 gprs[2] = val >> 32;
211 gprs[7] = 0x9c5a203a;
212
213 return paravirt_wrmsr_regs(gprs);
214}
215 176
216static inline u64 paravirt_read_tsc(void) 177static inline u64 paravirt_read_tsc(void)
217{ 178{
@@ -252,6 +213,8 @@ do { \
252 high = _l >> 32; \ 213 high = _l >> 32; \
253} while (0) 214} while (0)
254 215
216#define rdpmcl(counter, val) ((val) = paravirt_read_pmc(counter))
217
255static inline unsigned long long paravirt_rdtscp(unsigned int *aux) 218static inline unsigned long long paravirt_rdtscp(unsigned int *aux)
256{ 219{
257 return PVOP_CALL1(u64, pv_cpu_ops.read_tscp, aux); 220 return PVOP_CALL1(u64, pv_cpu_ops.read_tscp, aux);
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index 8e8b9a4987ee..8613cbb7ba41 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -153,9 +153,7 @@ struct pv_cpu_ops {
153 /* MSR, PMC and TSR operations. 153 /* MSR, PMC and TSR operations.
154 err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */ 154 err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */
155 u64 (*read_msr)(unsigned int msr, int *err); 155 u64 (*read_msr)(unsigned int msr, int *err);
156 int (*rdmsr_regs)(u32 *regs);
157 int (*write_msr)(unsigned int msr, unsigned low, unsigned high); 156 int (*write_msr)(unsigned int msr, unsigned low, unsigned high);
158 int (*wrmsr_regs)(u32 *regs);
159 157
160 u64 (*read_tsc)(void); 158 u64 (*read_tsc)(void);
161 u64 (*read_pmc)(int counter); 159 u64 (*read_pmc)(int counter);
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index 588f52ea810e..c78f14a0df00 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -5,11 +5,10 @@
5 * Performance event hw details: 5 * Performance event hw details:
6 */ 6 */
7 7
8#define X86_PMC_MAX_GENERIC 32 8#define INTEL_PMC_MAX_GENERIC 32
9#define X86_PMC_MAX_FIXED 3 9#define INTEL_PMC_MAX_FIXED 3
10#define INTEL_PMC_IDX_FIXED 32
10 11
11#define X86_PMC_IDX_GENERIC 0
12#define X86_PMC_IDX_FIXED 32
13#define X86_PMC_IDX_MAX 64 12#define X86_PMC_IDX_MAX 64
14 13
15#define MSR_ARCH_PERFMON_PERFCTR0 0xc1 14#define MSR_ARCH_PERFMON_PERFCTR0 0xc1
@@ -48,8 +47,7 @@
48 (X86_RAW_EVENT_MASK | \ 47 (X86_RAW_EVENT_MASK | \
49 AMD64_EVENTSEL_EVENT) 48 AMD64_EVENTSEL_EVENT)
50#define AMD64_NUM_COUNTERS 4 49#define AMD64_NUM_COUNTERS 4
51#define AMD64_NUM_COUNTERS_F15H 6 50#define AMD64_NUM_COUNTERS_CORE 6
52#define AMD64_NUM_COUNTERS_MAX AMD64_NUM_COUNTERS_F15H
53 51
54#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c 52#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c
55#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8) 53#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8)
@@ -121,16 +119,16 @@ struct x86_pmu_capability {
121 119
122/* Instr_Retired.Any: */ 120/* Instr_Retired.Any: */
123#define MSR_ARCH_PERFMON_FIXED_CTR0 0x309 121#define MSR_ARCH_PERFMON_FIXED_CTR0 0x309
124#define X86_PMC_IDX_FIXED_INSTRUCTIONS (X86_PMC_IDX_FIXED + 0) 122#define INTEL_PMC_IDX_FIXED_INSTRUCTIONS (INTEL_PMC_IDX_FIXED + 0)
125 123
126/* CPU_CLK_Unhalted.Core: */ 124/* CPU_CLK_Unhalted.Core: */
127#define MSR_ARCH_PERFMON_FIXED_CTR1 0x30a 125#define MSR_ARCH_PERFMON_FIXED_CTR1 0x30a
128#define X86_PMC_IDX_FIXED_CPU_CYCLES (X86_PMC_IDX_FIXED + 1) 126#define INTEL_PMC_IDX_FIXED_CPU_CYCLES (INTEL_PMC_IDX_FIXED + 1)
129 127
130/* CPU_CLK_Unhalted.Ref: */ 128/* CPU_CLK_Unhalted.Ref: */
131#define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b 129#define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b
132#define X86_PMC_IDX_FIXED_REF_CYCLES (X86_PMC_IDX_FIXED + 2) 130#define INTEL_PMC_IDX_FIXED_REF_CYCLES (INTEL_PMC_IDX_FIXED + 2)
133#define X86_PMC_MSK_FIXED_REF_CYCLES (1ULL << X86_PMC_IDX_FIXED_REF_CYCLES) 131#define INTEL_PMC_MSK_FIXED_REF_CYCLES (1ULL << INTEL_PMC_IDX_FIXED_REF_CYCLES)
134 132
135/* 133/*
136 * We model BTS tracing as another fixed-mode PMC. 134 * We model BTS tracing as another fixed-mode PMC.
@@ -139,7 +137,7 @@ struct x86_pmu_capability {
139 * values are used by actual fixed events and higher values are used 137 * values are used by actual fixed events and higher values are used
140 * to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr. 138 * to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr.
141 */ 139 */
142#define X86_PMC_IDX_FIXED_BTS (X86_PMC_IDX_FIXED + 16) 140#define INTEL_PMC_IDX_FIXED_BTS (INTEL_PMC_IDX_FIXED + 16)
143 141
144/* 142/*
145 * IBS cpuid feature detection 143 * IBS cpuid feature detection
@@ -234,6 +232,7 @@ struct perf_guest_switch_msr {
234 232
235extern struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr); 233extern struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr);
236extern void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap); 234extern void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap);
235extern void perf_check_microcode(void);
237#else 236#else
238static inline perf_guest_switch_msr *perf_guest_get_msrs(int *nr) 237static inline perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
239{ 238{
@@ -247,6 +246,7 @@ static inline void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap)
247} 246}
248 247
249static inline void perf_events_lapic_init(void) { } 248static inline void perf_events_lapic_init(void) { }
249static inline void perf_check_microcode(void) { }
250#endif 250#endif
251 251
252#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD) 252#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
diff --git a/arch/x86/include/asm/uprobes.h b/arch/x86/include/asm/uprobes.h
index 1e9bed14f7ae..f3971bbcd1de 100644
--- a/arch/x86/include/asm/uprobes.h
+++ b/arch/x86/include/asm/uprobes.h
@@ -48,7 +48,7 @@ struct arch_uprobe_task {
48#endif 48#endif
49}; 49};
50 50
51extern int arch_uprobe_analyze_insn(struct arch_uprobe *aup, struct mm_struct *mm); 51extern int arch_uprobe_analyze_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long addr);
52extern int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs); 52extern int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs);
53extern int arch_uprobe_post_xol(struct arch_uprobe *aup, struct pt_regs *regs); 53extern int arch_uprobe_post_xol(struct arch_uprobe *aup, struct pt_regs *regs);
54extern bool arch_uprobe_xol_was_trapped(struct task_struct *tsk); 54extern bool arch_uprobe_xol_was_trapped(struct task_struct *tsk);
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index 6ab6aa2fdfdd..bac4c3804cc7 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -32,7 +32,9 @@ obj-$(CONFIG_PERF_EVENTS) += perf_event.o
32 32
33ifdef CONFIG_PERF_EVENTS 33ifdef CONFIG_PERF_EVENTS
34obj-$(CONFIG_CPU_SUP_AMD) += perf_event_amd.o 34obj-$(CONFIG_CPU_SUP_AMD) += perf_event_amd.o
35obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_p6.o perf_event_p4.o perf_event_intel_lbr.o perf_event_intel_ds.o perf_event_intel.o 35obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_p6.o perf_event_p4.o
36obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_lbr.o perf_event_intel_ds.o perf_event_intel.o
37obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_uncore.o
36endif 38endif
37 39
38obj-$(CONFIG_X86_MCE) += mcheck/ 40obj-$(CONFIG_X86_MCE) += mcheck/
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 146bb6218eec..9d92e19039f0 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -19,6 +19,39 @@
19 19
20#include "cpu.h" 20#include "cpu.h"
21 21
22static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
23{
24 struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());
25 u32 gprs[8] = { 0 };
26 int err;
27
28 WARN_ONCE((c->x86 != 0xf), "%s should only be used on K8!\n", __func__);
29
30 gprs[1] = msr;
31 gprs[7] = 0x9c5a203a;
32
33 err = rdmsr_safe_regs(gprs);
34
35 *p = gprs[0] | ((u64)gprs[2] << 32);
36
37 return err;
38}
39
40static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
41{
42 struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());
43 u32 gprs[8] = { 0 };
44
45 WARN_ONCE((c->x86 != 0xf), "%s should only be used on K8!\n", __func__);
46
47 gprs[0] = (u32)val;
48 gprs[1] = msr;
49 gprs[2] = val >> 32;
50 gprs[7] = 0x9c5a203a;
51
52 return wrmsr_safe_regs(gprs);
53}
54
22#ifdef CONFIG_X86_32 55#ifdef CONFIG_X86_32
23/* 56/*
24 * B step AMD K6 before B 9730xxxx have hardware bugs that can cause 57 * B step AMD K6 before B 9730xxxx have hardware bugs that can cause
@@ -586,9 +619,9 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
586 !cpu_has(c, X86_FEATURE_TOPOEXT)) { 619 !cpu_has(c, X86_FEATURE_TOPOEXT)) {
587 u64 val; 620 u64 val;
588 621
589 if (!rdmsrl_amd_safe(0xc0011005, &val)) { 622 if (!rdmsrl_safe(0xc0011005, &val)) {
590 val |= 1ULL << 54; 623 val |= 1ULL << 54;
591 wrmsrl_amd_safe(0xc0011005, val); 624 wrmsrl_safe(0xc0011005, val);
592 rdmsrl(0xc0011005, val); 625 rdmsrl(0xc0011005, val);
593 if (val & (1ULL << 54)) { 626 if (val & (1ULL << 54)) {
594 set_cpu_cap(c, X86_FEATURE_TOPOEXT); 627 set_cpu_cap(c, X86_FEATURE_TOPOEXT);
@@ -679,7 +712,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
679 err = rdmsrl_safe(MSR_AMD64_MCx_MASK(4), &mask); 712 err = rdmsrl_safe(MSR_AMD64_MCx_MASK(4), &mask);
680 if (err == 0) { 713 if (err == 0) {
681 mask |= (1 << 10); 714 mask |= (1 << 10);
682 checking_wrmsrl(MSR_AMD64_MCx_MASK(4), mask); 715 wrmsrl_safe(MSR_AMD64_MCx_MASK(4), mask);
683 } 716 }
684 } 717 }
685 718
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 6b9333b429ba..5bbc082c47ad 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -947,7 +947,7 @@ static void __cpuinit __print_cpu_msr(void)
947 index_max = msr_range_array[i].max; 947 index_max = msr_range_array[i].max;
948 948
949 for (index = index_min; index < index_max; index++) { 949 for (index = index_min; index < index_max; index++) {
950 if (rdmsrl_amd_safe(index, &val)) 950 if (rdmsrl_safe(index, &val))
951 continue; 951 continue;
952 printk(KERN_INFO " MSR%08x: %016llx\n", index, val); 952 printk(KERN_INFO " MSR%08x: %016llx\n", index, val);
953 } 953 }
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index c4706cf9c011..29557aa06dda 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -35,17 +35,6 @@
35 35
36#include "perf_event.h" 36#include "perf_event.h"
37 37
38#if 0
39#undef wrmsrl
40#define wrmsrl(msr, val) \
41do { \
42 trace_printk("wrmsrl(%lx, %lx)\n", (unsigned long)(msr),\
43 (unsigned long)(val)); \
44 native_write_msr((msr), (u32)((u64)(val)), \
45 (u32)((u64)(val) >> 32)); \
46} while (0)
47#endif
48
49struct x86_pmu x86_pmu __read_mostly; 38struct x86_pmu x86_pmu __read_mostly;
50 39
51DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { 40DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
@@ -74,7 +63,7 @@ u64 x86_perf_event_update(struct perf_event *event)
74 int idx = hwc->idx; 63 int idx = hwc->idx;
75 s64 delta; 64 s64 delta;
76 65
77 if (idx == X86_PMC_IDX_FIXED_BTS) 66 if (idx == INTEL_PMC_IDX_FIXED_BTS)
78 return 0; 67 return 0;
79 68
80 /* 69 /*
@@ -86,7 +75,7 @@ u64 x86_perf_event_update(struct perf_event *event)
86 */ 75 */
87again: 76again:
88 prev_raw_count = local64_read(&hwc->prev_count); 77 prev_raw_count = local64_read(&hwc->prev_count);
89 rdmsrl(hwc->event_base, new_raw_count); 78 rdpmcl(hwc->event_base_rdpmc, new_raw_count);
90 79
91 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, 80 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
92 new_raw_count) != prev_raw_count) 81 new_raw_count) != prev_raw_count)
@@ -189,7 +178,7 @@ static void release_pmc_hardware(void) {}
189 178
190static bool check_hw_exists(void) 179static bool check_hw_exists(void)
191{ 180{
192 u64 val, val_new = 0; 181 u64 val, val_new = ~0;
193 int i, reg, ret = 0; 182 int i, reg, ret = 0;
194 183
195 /* 184 /*
@@ -222,8 +211,9 @@ static bool check_hw_exists(void)
222 * that don't trap on the MSR access and always return 0s. 211 * that don't trap on the MSR access and always return 0s.
223 */ 212 */
224 val = 0xabcdUL; 213 val = 0xabcdUL;
225 ret = checking_wrmsrl(x86_pmu_event_addr(0), val); 214 reg = x86_pmu_event_addr(0);
226 ret |= rdmsrl_safe(x86_pmu_event_addr(0), &val_new); 215 ret = wrmsrl_safe(reg, val);
216 ret |= rdmsrl_safe(reg, &val_new);
227 if (ret || val != val_new) 217 if (ret || val != val_new)
228 goto msr_fail; 218 goto msr_fail;
229 219
@@ -240,6 +230,7 @@ bios_fail:
240 230
241msr_fail: 231msr_fail:
242 printk(KERN_CONT "Broken PMU hardware detected, using software events only.\n"); 232 printk(KERN_CONT "Broken PMU hardware detected, using software events only.\n");
233 printk(KERN_ERR "Failed to access perfctr msr (MSR %x is %Lx)\n", reg, val_new);
243 234
244 return false; 235 return false;
245} 236}
@@ -388,7 +379,7 @@ int x86_pmu_hw_config(struct perf_event *event)
388 int precise = 0; 379 int precise = 0;
389 380
390 /* Support for constant skid */ 381 /* Support for constant skid */
391 if (x86_pmu.pebs_active) { 382 if (x86_pmu.pebs_active && !x86_pmu.pebs_broken) {
392 precise++; 383 precise++;
393 384
394 /* Support for IP fixup */ 385 /* Support for IP fixup */
@@ -637,8 +628,8 @@ static bool __perf_sched_find_counter(struct perf_sched *sched)
637 c = sched->constraints[sched->state.event]; 628 c = sched->constraints[sched->state.event];
638 629
639 /* Prefer fixed purpose counters */ 630 /* Prefer fixed purpose counters */
640 if (x86_pmu.num_counters_fixed) { 631 if (c->idxmsk64 & (~0ULL << INTEL_PMC_IDX_FIXED)) {
641 idx = X86_PMC_IDX_FIXED; 632 idx = INTEL_PMC_IDX_FIXED;
642 for_each_set_bit_from(idx, c->idxmsk, X86_PMC_IDX_MAX) { 633 for_each_set_bit_from(idx, c->idxmsk, X86_PMC_IDX_MAX) {
643 if (!__test_and_set_bit(idx, sched->state.used)) 634 if (!__test_and_set_bit(idx, sched->state.used))
644 goto done; 635 goto done;
@@ -646,7 +637,7 @@ static bool __perf_sched_find_counter(struct perf_sched *sched)
646 } 637 }
647 /* Grab the first unused counter starting with idx */ 638 /* Grab the first unused counter starting with idx */
648 idx = sched->state.counter; 639 idx = sched->state.counter;
649 for_each_set_bit_from(idx, c->idxmsk, X86_PMC_IDX_FIXED) { 640 for_each_set_bit_from(idx, c->idxmsk, INTEL_PMC_IDX_FIXED) {
650 if (!__test_and_set_bit(idx, sched->state.used)) 641 if (!__test_and_set_bit(idx, sched->state.used))
651 goto done; 642 goto done;
652 } 643 }
@@ -704,8 +695,8 @@ static bool perf_sched_next_event(struct perf_sched *sched)
704/* 695/*
705 * Assign a counter for each event. 696 * Assign a counter for each event.
706 */ 697 */
707static int perf_assign_events(struct event_constraint **constraints, int n, 698int perf_assign_events(struct event_constraint **constraints, int n,
708 int wmin, int wmax, int *assign) 699 int wmin, int wmax, int *assign)
709{ 700{
710 struct perf_sched sched; 701 struct perf_sched sched;
711 702
@@ -824,15 +815,17 @@ static inline void x86_assign_hw_event(struct perf_event *event,
824 hwc->last_cpu = smp_processor_id(); 815 hwc->last_cpu = smp_processor_id();
825 hwc->last_tag = ++cpuc->tags[i]; 816 hwc->last_tag = ++cpuc->tags[i];
826 817
827 if (hwc->idx == X86_PMC_IDX_FIXED_BTS) { 818 if (hwc->idx == INTEL_PMC_IDX_FIXED_BTS) {
828 hwc->config_base = 0; 819 hwc->config_base = 0;
829 hwc->event_base = 0; 820 hwc->event_base = 0;
830 } else if (hwc->idx >= X86_PMC_IDX_FIXED) { 821 } else if (hwc->idx >= INTEL_PMC_IDX_FIXED) {
831 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL; 822 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
832 hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0 + (hwc->idx - X86_PMC_IDX_FIXED); 823 hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0 + (hwc->idx - INTEL_PMC_IDX_FIXED);
824 hwc->event_base_rdpmc = (hwc->idx - INTEL_PMC_IDX_FIXED) | 1<<30;
833 } else { 825 } else {
834 hwc->config_base = x86_pmu_config_addr(hwc->idx); 826 hwc->config_base = x86_pmu_config_addr(hwc->idx);
835 hwc->event_base = x86_pmu_event_addr(hwc->idx); 827 hwc->event_base = x86_pmu_event_addr(hwc->idx);
828 hwc->event_base_rdpmc = hwc->idx;
836 } 829 }
837} 830}
838 831
@@ -930,7 +923,7 @@ int x86_perf_event_set_period(struct perf_event *event)
930 s64 period = hwc->sample_period; 923 s64 period = hwc->sample_period;
931 int ret = 0, idx = hwc->idx; 924 int ret = 0, idx = hwc->idx;
932 925
933 if (idx == X86_PMC_IDX_FIXED_BTS) 926 if (idx == INTEL_PMC_IDX_FIXED_BTS)
934 return 0; 927 return 0;
935 928
936 /* 929 /*
@@ -1316,7 +1309,6 @@ static struct attribute_group x86_pmu_format_group = {
1316static int __init init_hw_perf_events(void) 1309static int __init init_hw_perf_events(void)
1317{ 1310{
1318 struct x86_pmu_quirk *quirk; 1311 struct x86_pmu_quirk *quirk;
1319 struct event_constraint *c;
1320 int err; 1312 int err;
1321 1313
1322 pr_info("Performance Events: "); 1314 pr_info("Performance Events: ");
@@ -1347,21 +1339,8 @@ static int __init init_hw_perf_events(void)
1347 for (quirk = x86_pmu.quirks; quirk; quirk = quirk->next) 1339 for (quirk = x86_pmu.quirks; quirk; quirk = quirk->next)
1348 quirk->func(); 1340 quirk->func();
1349 1341
1350 if (x86_pmu.num_counters > X86_PMC_MAX_GENERIC) { 1342 if (!x86_pmu.intel_ctrl)
1351 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!", 1343 x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
1352 x86_pmu.num_counters, X86_PMC_MAX_GENERIC);
1353 x86_pmu.num_counters = X86_PMC_MAX_GENERIC;
1354 }
1355 x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
1356
1357 if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) {
1358 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
1359 x86_pmu.num_counters_fixed, X86_PMC_MAX_FIXED);
1360 x86_pmu.num_counters_fixed = X86_PMC_MAX_FIXED;
1361 }
1362
1363 x86_pmu.intel_ctrl |=
1364 ((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED;
1365 1344
1366 perf_events_lapic_init(); 1345 perf_events_lapic_init();
1367 register_nmi_handler(NMI_LOCAL, perf_event_nmi_handler, 0, "PMI"); 1346 register_nmi_handler(NMI_LOCAL, perf_event_nmi_handler, 0, "PMI");
@@ -1370,22 +1349,6 @@ static int __init init_hw_perf_events(void)
1370 __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1, 1349 __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1,
1371 0, x86_pmu.num_counters, 0); 1350 0, x86_pmu.num_counters, 0);
1372 1351
1373 if (x86_pmu.event_constraints) {
1374 /*
1375 * event on fixed counter2 (REF_CYCLES) only works on this
1376 * counter, so do not extend mask to generic counters
1377 */
1378 for_each_event_constraint(c, x86_pmu.event_constraints) {
1379 if (c->cmask != X86_RAW_EVENT_MASK
1380 || c->idxmsk64 == X86_PMC_MSK_FIXED_REF_CYCLES) {
1381 continue;
1382 }
1383
1384 c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
1385 c->weight += x86_pmu.num_counters;
1386 }
1387 }
1388
1389 x86_pmu.attr_rdpmc = 1; /* enable userspace RDPMC usage by default */ 1352 x86_pmu.attr_rdpmc = 1; /* enable userspace RDPMC usage by default */
1390 x86_pmu_format_group.attrs = x86_pmu.format_attrs; 1353 x86_pmu_format_group.attrs = x86_pmu.format_attrs;
1391 1354
@@ -1620,8 +1583,8 @@ static int x86_pmu_event_idx(struct perf_event *event)
1620 if (!x86_pmu.attr_rdpmc) 1583 if (!x86_pmu.attr_rdpmc)
1621 return 0; 1584 return 0;
1622 1585
1623 if (x86_pmu.num_counters_fixed && idx >= X86_PMC_IDX_FIXED) { 1586 if (x86_pmu.num_counters_fixed && idx >= INTEL_PMC_IDX_FIXED) {
1624 idx -= X86_PMC_IDX_FIXED; 1587 idx -= INTEL_PMC_IDX_FIXED;
1625 idx |= 1 << 30; 1588 idx |= 1 << 30;
1626 } 1589 }
1627 1590
@@ -1649,7 +1612,12 @@ static ssize_t set_attr_rdpmc(struct device *cdev,
1649 struct device_attribute *attr, 1612 struct device_attribute *attr,
1650 const char *buf, size_t count) 1613 const char *buf, size_t count)
1651{ 1614{
1652 unsigned long val = simple_strtoul(buf, NULL, 0); 1615 unsigned long val;
1616 ssize_t ret;
1617
1618 ret = kstrtoul(buf, 0, &val);
1619 if (ret)
1620 return ret;
1653 1621
1654 if (!!val != !!x86_pmu.attr_rdpmc) { 1622 if (!!val != !!x86_pmu.attr_rdpmc) {
1655 x86_pmu.attr_rdpmc = !!val; 1623 x86_pmu.attr_rdpmc = !!val;
@@ -1682,13 +1650,20 @@ static void x86_pmu_flush_branch_stack(void)
1682 x86_pmu.flush_branch_stack(); 1650 x86_pmu.flush_branch_stack();
1683} 1651}
1684 1652
1653void perf_check_microcode(void)
1654{
1655 if (x86_pmu.check_microcode)
1656 x86_pmu.check_microcode();
1657}
1658EXPORT_SYMBOL_GPL(perf_check_microcode);
1659
1685static struct pmu pmu = { 1660static struct pmu pmu = {
1686 .pmu_enable = x86_pmu_enable, 1661 .pmu_enable = x86_pmu_enable,
1687 .pmu_disable = x86_pmu_disable, 1662 .pmu_disable = x86_pmu_disable,
1688 1663
1689 .attr_groups = x86_pmu_attr_groups, 1664 .attr_groups = x86_pmu_attr_groups,
1690 1665
1691 .event_init = x86_pmu_event_init, 1666 .event_init = x86_pmu_event_init,
1692 1667
1693 .add = x86_pmu_add, 1668 .add = x86_pmu_add,
1694 .del = x86_pmu_del, 1669 .del = x86_pmu_del,
@@ -1696,11 +1671,11 @@ static struct pmu pmu = {
1696 .stop = x86_pmu_stop, 1671 .stop = x86_pmu_stop,
1697 .read = x86_pmu_read, 1672 .read = x86_pmu_read,
1698 1673
1699 .start_txn = x86_pmu_start_txn, 1674 .start_txn = x86_pmu_start_txn,
1700 .cancel_txn = x86_pmu_cancel_txn, 1675 .cancel_txn = x86_pmu_cancel_txn,
1701 .commit_txn = x86_pmu_commit_txn, 1676 .commit_txn = x86_pmu_commit_txn,
1702 1677
1703 .event_idx = x86_pmu_event_idx, 1678 .event_idx = x86_pmu_event_idx,
1704 .flush_branch_stack = x86_pmu_flush_branch_stack, 1679 .flush_branch_stack = x86_pmu_flush_branch_stack,
1705}; 1680};
1706 1681
@@ -1863,7 +1838,7 @@ unsigned long perf_misc_flags(struct pt_regs *regs)
1863 else 1838 else
1864 misc |= PERF_RECORD_MISC_GUEST_KERNEL; 1839 misc |= PERF_RECORD_MISC_GUEST_KERNEL;
1865 } else { 1840 } else {
1866 if (user_mode(regs)) 1841 if (!kernel_ip(regs->ip))
1867 misc |= PERF_RECORD_MISC_USER; 1842 misc |= PERF_RECORD_MISC_USER;
1868 else 1843 else
1869 misc |= PERF_RECORD_MISC_KERNEL; 1844 misc |= PERF_RECORD_MISC_KERNEL;
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index 7241e2fc3c17..a15df4be151f 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -14,6 +14,18 @@
14 14
15#include <linux/perf_event.h> 15#include <linux/perf_event.h>
16 16
17#if 0
18#undef wrmsrl
19#define wrmsrl(msr, val) \
20do { \
21 unsigned int _msr = (msr); \
22 u64 _val = (val); \
23 trace_printk("wrmsrl(%x, %Lx)\n", (unsigned int)(_msr), \
24 (unsigned long long)(_val)); \
25 native_write_msr((_msr), (u32)(_val), (u32)(_val >> 32)); \
26} while (0)
27#endif
28
17/* 29/*
18 * | NHM/WSM | SNB | 30 * | NHM/WSM | SNB |
19 * register ------------------------------- 31 * register -------------------------------
@@ -57,7 +69,7 @@ struct amd_nb {
57}; 69};
58 70
59/* The maximal number of PEBS events: */ 71/* The maximal number of PEBS events: */
60#define MAX_PEBS_EVENTS 4 72#define MAX_PEBS_EVENTS 8
61 73
62/* 74/*
63 * A debug store configuration. 75 * A debug store configuration.
@@ -349,6 +361,8 @@ struct x86_pmu {
349 void (*cpu_starting)(int cpu); 361 void (*cpu_starting)(int cpu);
350 void (*cpu_dying)(int cpu); 362 void (*cpu_dying)(int cpu);
351 void (*cpu_dead)(int cpu); 363 void (*cpu_dead)(int cpu);
364
365 void (*check_microcode)(void);
352 void (*flush_branch_stack)(void); 366 void (*flush_branch_stack)(void);
353 367
354 /* 368 /*
@@ -360,12 +374,16 @@ struct x86_pmu {
360 /* 374 /*
361 * Intel DebugStore bits 375 * Intel DebugStore bits
362 */ 376 */
363 int bts, pebs; 377 int bts :1,
364 int bts_active, pebs_active; 378 bts_active :1,
379 pebs :1,
380 pebs_active :1,
381 pebs_broken :1;
365 int pebs_record_size; 382 int pebs_record_size;
366 void (*drain_pebs)(struct pt_regs *regs); 383 void (*drain_pebs)(struct pt_regs *regs);
367 struct event_constraint *pebs_constraints; 384 struct event_constraint *pebs_constraints;
368 void (*pebs_aliases)(struct perf_event *event); 385 void (*pebs_aliases)(struct perf_event *event);
386 int max_pebs_events;
369 387
370 /* 388 /*
371 * Intel LBR 389 * Intel LBR
@@ -468,6 +486,8 @@ static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
468 486
469void x86_pmu_enable_all(int added); 487void x86_pmu_enable_all(int added);
470 488
489int perf_assign_events(struct event_constraint **constraints, int n,
490 int wmin, int wmax, int *assign);
471int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign); 491int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign);
472 492
473void x86_pmu_stop(struct perf_event *event, int flags); 493void x86_pmu_stop(struct perf_event *event, int flags);
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c
index 11a4eb9131d5..4528ae7b6ec4 100644
--- a/arch/x86/kernel/cpu/perf_event_amd.c
+++ b/arch/x86/kernel/cpu/perf_event_amd.c
@@ -366,7 +366,7 @@ static void amd_pmu_cpu_starting(int cpu)
366 366
367 cpuc->perf_ctr_virt_mask = AMD_PERFMON_EVENTSEL_HOSTONLY; 367 cpuc->perf_ctr_virt_mask = AMD_PERFMON_EVENTSEL_HOSTONLY;
368 368
369 if (boot_cpu_data.x86_max_cores < 2 || boot_cpu_data.x86 == 0x15) 369 if (boot_cpu_data.x86_max_cores < 2)
370 return; 370 return;
371 371
372 nb_id = amd_get_nb_id(cpu); 372 nb_id = amd_get_nb_id(cpu);
@@ -422,35 +422,6 @@ static struct attribute *amd_format_attr[] = {
422 NULL, 422 NULL,
423}; 423};
424 424
425static __initconst const struct x86_pmu amd_pmu = {
426 .name = "AMD",
427 .handle_irq = x86_pmu_handle_irq,
428 .disable_all = x86_pmu_disable_all,
429 .enable_all = x86_pmu_enable_all,
430 .enable = x86_pmu_enable_event,
431 .disable = x86_pmu_disable_event,
432 .hw_config = amd_pmu_hw_config,
433 .schedule_events = x86_schedule_events,
434 .eventsel = MSR_K7_EVNTSEL0,
435 .perfctr = MSR_K7_PERFCTR0,
436 .event_map = amd_pmu_event_map,
437 .max_events = ARRAY_SIZE(amd_perfmon_event_map),
438 .num_counters = AMD64_NUM_COUNTERS,
439 .cntval_bits = 48,
440 .cntval_mask = (1ULL << 48) - 1,
441 .apic = 1,
442 /* use highest bit to detect overflow */
443 .max_period = (1ULL << 47) - 1,
444 .get_event_constraints = amd_get_event_constraints,
445 .put_event_constraints = amd_put_event_constraints,
446
447 .format_attrs = amd_format_attr,
448
449 .cpu_prepare = amd_pmu_cpu_prepare,
450 .cpu_starting = amd_pmu_cpu_starting,
451 .cpu_dead = amd_pmu_cpu_dead,
452};
453
454/* AMD Family 15h */ 425/* AMD Family 15h */
455 426
456#define AMD_EVENT_TYPE_MASK 0x000000F0ULL 427#define AMD_EVENT_TYPE_MASK 0x000000F0ULL
@@ -597,8 +568,8 @@ amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *ev
597 } 568 }
598} 569}
599 570
600static __initconst const struct x86_pmu amd_pmu_f15h = { 571static __initconst const struct x86_pmu amd_pmu = {
601 .name = "AMD Family 15h", 572 .name = "AMD",
602 .handle_irq = x86_pmu_handle_irq, 573 .handle_irq = x86_pmu_handle_irq,
603 .disable_all = x86_pmu_disable_all, 574 .disable_all = x86_pmu_disable_all,
604 .enable_all = x86_pmu_enable_all, 575 .enable_all = x86_pmu_enable_all,
@@ -606,50 +577,68 @@ static __initconst const struct x86_pmu amd_pmu_f15h = {
606 .disable = x86_pmu_disable_event, 577 .disable = x86_pmu_disable_event,
607 .hw_config = amd_pmu_hw_config, 578 .hw_config = amd_pmu_hw_config,
608 .schedule_events = x86_schedule_events, 579 .schedule_events = x86_schedule_events,
609 .eventsel = MSR_F15H_PERF_CTL, 580 .eventsel = MSR_K7_EVNTSEL0,
610 .perfctr = MSR_F15H_PERF_CTR, 581 .perfctr = MSR_K7_PERFCTR0,
611 .event_map = amd_pmu_event_map, 582 .event_map = amd_pmu_event_map,
612 .max_events = ARRAY_SIZE(amd_perfmon_event_map), 583 .max_events = ARRAY_SIZE(amd_perfmon_event_map),
613 .num_counters = AMD64_NUM_COUNTERS_F15H, 584 .num_counters = AMD64_NUM_COUNTERS,
614 .cntval_bits = 48, 585 .cntval_bits = 48,
615 .cntval_mask = (1ULL << 48) - 1, 586 .cntval_mask = (1ULL << 48) - 1,
616 .apic = 1, 587 .apic = 1,
617 /* use highest bit to detect overflow */ 588 /* use highest bit to detect overflow */
618 .max_period = (1ULL << 47) - 1, 589 .max_period = (1ULL << 47) - 1,
619 .get_event_constraints = amd_get_event_constraints_f15h, 590 .get_event_constraints = amd_get_event_constraints,
620 /* nortbridge counters not yet implemented: */
621#if 0
622 .put_event_constraints = amd_put_event_constraints, 591 .put_event_constraints = amd_put_event_constraints,
623 592
593 .format_attrs = amd_format_attr,
594
624 .cpu_prepare = amd_pmu_cpu_prepare, 595 .cpu_prepare = amd_pmu_cpu_prepare,
625 .cpu_dead = amd_pmu_cpu_dead,
626#endif
627 .cpu_starting = amd_pmu_cpu_starting, 596 .cpu_starting = amd_pmu_cpu_starting,
628 .format_attrs = amd_format_attr, 597 .cpu_dead = amd_pmu_cpu_dead,
629}; 598};
630 599
600static int setup_event_constraints(void)
601{
602 if (boot_cpu_data.x86 >= 0x15)
603 x86_pmu.get_event_constraints = amd_get_event_constraints_f15h;
604 return 0;
605}
606
607static int setup_perfctr_core(void)
608{
609 if (!cpu_has_perfctr_core) {
610 WARN(x86_pmu.get_event_constraints == amd_get_event_constraints_f15h,
611 KERN_ERR "Odd, counter constraints enabled but no core perfctrs detected!");
612 return -ENODEV;
613 }
614
615 WARN(x86_pmu.get_event_constraints == amd_get_event_constraints,
616 KERN_ERR "hw perf events core counters need constraints handler!");
617
618 /*
619 * If core performance counter extensions exists, we must use
620 * MSR_F15H_PERF_CTL/MSR_F15H_PERF_CTR msrs. See also
621 * x86_pmu_addr_offset().
622 */
623 x86_pmu.eventsel = MSR_F15H_PERF_CTL;
624 x86_pmu.perfctr = MSR_F15H_PERF_CTR;
625 x86_pmu.num_counters = AMD64_NUM_COUNTERS_CORE;
626
627 printk(KERN_INFO "perf: AMD core performance counters detected\n");
628
629 return 0;
630}
631
631__init int amd_pmu_init(void) 632__init int amd_pmu_init(void)
632{ 633{
633 /* Performance-monitoring supported from K7 and later: */ 634 /* Performance-monitoring supported from K7 and later: */
634 if (boot_cpu_data.x86 < 6) 635 if (boot_cpu_data.x86 < 6)
635 return -ENODEV; 636 return -ENODEV;
636 637
637 /* 638 x86_pmu = amd_pmu;
638 * If core performance counter extensions exists, it must be 639
639 * family 15h, otherwise fail. See x86_pmu_addr_offset(). 640 setup_event_constraints();
640 */ 641 setup_perfctr_core();
641 switch (boot_cpu_data.x86) {
642 case 0x15:
643 if (!cpu_has_perfctr_core)
644 return -ENODEV;
645 x86_pmu = amd_pmu_f15h;
646 break;
647 default:
648 if (cpu_has_perfctr_core)
649 return -ENODEV;
650 x86_pmu = amd_pmu;
651 break;
652 }
653 642
654 /* Events are common for all AMDs */ 643 /* Events are common for all AMDs */
655 memcpy(hw_cache_event_ids, amd_hw_cache_event_ids, 644 memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 187c294bc658..1f4c8add6759 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -21,14 +21,14 @@
21 */ 21 */
22static u64 intel_perfmon_event_map[PERF_COUNT_HW_MAX] __read_mostly = 22static u64 intel_perfmon_event_map[PERF_COUNT_HW_MAX] __read_mostly =
23{ 23{
24 [PERF_COUNT_HW_CPU_CYCLES] = 0x003c, 24 [PERF_COUNT_HW_CPU_CYCLES] = 0x003c,
25 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, 25 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
26 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e, 26 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e,
27 [PERF_COUNT_HW_CACHE_MISSES] = 0x412e, 27 [PERF_COUNT_HW_CACHE_MISSES] = 0x412e,
28 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4, 28 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
29 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5, 29 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
30 [PERF_COUNT_HW_BUS_CYCLES] = 0x013c, 30 [PERF_COUNT_HW_BUS_CYCLES] = 0x013c,
31 [PERF_COUNT_HW_REF_CPU_CYCLES] = 0x0300, /* pseudo-encoding */ 31 [PERF_COUNT_HW_REF_CPU_CYCLES] = 0x0300, /* pseudo-encoding */
32}; 32};
33 33
34static struct event_constraint intel_core_event_constraints[] __read_mostly = 34static struct event_constraint intel_core_event_constraints[] __read_mostly =
@@ -747,7 +747,7 @@ static void intel_pmu_disable_all(void)
747 747
748 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); 748 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
749 749
750 if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) 750 if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask))
751 intel_pmu_disable_bts(); 751 intel_pmu_disable_bts();
752 752
753 intel_pmu_pebs_disable_all(); 753 intel_pmu_pebs_disable_all();
@@ -763,9 +763,9 @@ static void intel_pmu_enable_all(int added)
763 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 763 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL,
764 x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask); 764 x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask);
765 765
766 if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) { 766 if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
767 struct perf_event *event = 767 struct perf_event *event =
768 cpuc->events[X86_PMC_IDX_FIXED_BTS]; 768 cpuc->events[INTEL_PMC_IDX_FIXED_BTS];
769 769
770 if (WARN_ON_ONCE(!event)) 770 if (WARN_ON_ONCE(!event))
771 return; 771 return;
@@ -871,7 +871,7 @@ static inline void intel_pmu_ack_status(u64 ack)
871 871
872static void intel_pmu_disable_fixed(struct hw_perf_event *hwc) 872static void intel_pmu_disable_fixed(struct hw_perf_event *hwc)
873{ 873{
874 int idx = hwc->idx - X86_PMC_IDX_FIXED; 874 int idx = hwc->idx - INTEL_PMC_IDX_FIXED;
875 u64 ctrl_val, mask; 875 u64 ctrl_val, mask;
876 876
877 mask = 0xfULL << (idx * 4); 877 mask = 0xfULL << (idx * 4);
@@ -886,7 +886,7 @@ static void intel_pmu_disable_event(struct perf_event *event)
886 struct hw_perf_event *hwc = &event->hw; 886 struct hw_perf_event *hwc = &event->hw;
887 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 887 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
888 888
889 if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) { 889 if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) {
890 intel_pmu_disable_bts(); 890 intel_pmu_disable_bts();
891 intel_pmu_drain_bts_buffer(); 891 intel_pmu_drain_bts_buffer();
892 return; 892 return;
@@ -915,7 +915,7 @@ static void intel_pmu_disable_event(struct perf_event *event)
915 915
916static void intel_pmu_enable_fixed(struct hw_perf_event *hwc) 916static void intel_pmu_enable_fixed(struct hw_perf_event *hwc)
917{ 917{
918 int idx = hwc->idx - X86_PMC_IDX_FIXED; 918 int idx = hwc->idx - INTEL_PMC_IDX_FIXED;
919 u64 ctrl_val, bits, mask; 919 u64 ctrl_val, bits, mask;
920 920
921 /* 921 /*
@@ -949,7 +949,7 @@ static void intel_pmu_enable_event(struct perf_event *event)
949 struct hw_perf_event *hwc = &event->hw; 949 struct hw_perf_event *hwc = &event->hw;
950 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 950 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
951 951
952 if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) { 952 if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) {
953 if (!__this_cpu_read(cpu_hw_events.enabled)) 953 if (!__this_cpu_read(cpu_hw_events.enabled))
954 return; 954 return;
955 955
@@ -1003,11 +1003,11 @@ static void intel_pmu_reset(void)
1003 printk("clearing PMU state on CPU#%d\n", smp_processor_id()); 1003 printk("clearing PMU state on CPU#%d\n", smp_processor_id());
1004 1004
1005 for (idx = 0; idx < x86_pmu.num_counters; idx++) { 1005 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
1006 checking_wrmsrl(x86_pmu_config_addr(idx), 0ull); 1006 wrmsrl_safe(x86_pmu_config_addr(idx), 0ull);
1007 checking_wrmsrl(x86_pmu_event_addr(idx), 0ull); 1007 wrmsrl_safe(x86_pmu_event_addr(idx), 0ull);
1008 } 1008 }
1009 for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) 1009 for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++)
1010 checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull); 1010 wrmsrl_safe(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
1011 1011
1012 if (ds) 1012 if (ds)
1013 ds->bts_index = ds->bts_buffer_base; 1013 ds->bts_index = ds->bts_buffer_base;
@@ -1712,11 +1712,56 @@ static __init void intel_clovertown_quirk(void)
1712 x86_pmu.pebs_constraints = NULL; 1712 x86_pmu.pebs_constraints = NULL;
1713} 1713}
1714 1714
1715static int intel_snb_pebs_broken(int cpu)
1716{
1717 u32 rev = UINT_MAX; /* default to broken for unknown models */
1718
1719 switch (cpu_data(cpu).x86_model) {
1720 case 42: /* SNB */
1721 rev = 0x28;
1722 break;
1723
1724 case 45: /* SNB-EP */
1725 switch (cpu_data(cpu).x86_mask) {
1726 case 6: rev = 0x618; break;
1727 case 7: rev = 0x70c; break;
1728 }
1729 }
1730
1731 return (cpu_data(cpu).microcode < rev);
1732}
1733
1734static void intel_snb_check_microcode(void)
1735{
1736 int pebs_broken = 0;
1737 int cpu;
1738
1739 get_online_cpus();
1740 for_each_online_cpu(cpu) {
1741 if ((pebs_broken = intel_snb_pebs_broken(cpu)))
1742 break;
1743 }
1744 put_online_cpus();
1745
1746 if (pebs_broken == x86_pmu.pebs_broken)
1747 return;
1748
1749 /*
1750 * Serialized by the microcode lock..
1751 */
1752 if (x86_pmu.pebs_broken) {
1753 pr_info("PEBS enabled due to microcode update\n");
1754 x86_pmu.pebs_broken = 0;
1755 } else {
1756 pr_info("PEBS disabled due to CPU errata, please upgrade microcode\n");
1757 x86_pmu.pebs_broken = 1;
1758 }
1759}
1760
1715static __init void intel_sandybridge_quirk(void) 1761static __init void intel_sandybridge_quirk(void)
1716{ 1762{
1717 printk(KERN_WARNING "PEBS disabled due to CPU errata.\n"); 1763 x86_pmu.check_microcode = intel_snb_check_microcode;
1718 x86_pmu.pebs = 0; 1764 intel_snb_check_microcode();
1719 x86_pmu.pebs_constraints = NULL;
1720} 1765}
1721 1766
1722static const struct { int id; char *name; } intel_arch_events_map[] __initconst = { 1767static const struct { int id; char *name; } intel_arch_events_map[] __initconst = {
@@ -1765,6 +1810,7 @@ __init int intel_pmu_init(void)
1765 union cpuid10_edx edx; 1810 union cpuid10_edx edx;
1766 union cpuid10_eax eax; 1811 union cpuid10_eax eax;
1767 union cpuid10_ebx ebx; 1812 union cpuid10_ebx ebx;
1813 struct event_constraint *c;
1768 unsigned int unused; 1814 unsigned int unused;
1769 int version; 1815 int version;
1770 1816
@@ -1800,6 +1846,8 @@ __init int intel_pmu_init(void)
1800 x86_pmu.events_maskl = ebx.full; 1846 x86_pmu.events_maskl = ebx.full;
1801 x86_pmu.events_mask_len = eax.split.mask_length; 1847 x86_pmu.events_mask_len = eax.split.mask_length;
1802 1848
1849 x86_pmu.max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, x86_pmu.num_counters);
1850
1803 /* 1851 /*
1804 * Quirk: v2 perfmon does not report fixed-purpose events, so 1852 * Quirk: v2 perfmon does not report fixed-purpose events, so
1805 * assume at least 3 events: 1853 * assume at least 3 events:
@@ -1951,5 +1999,37 @@ __init int intel_pmu_init(void)
1951 } 1999 }
1952 } 2000 }
1953 2001
2002 if (x86_pmu.num_counters > INTEL_PMC_MAX_GENERIC) {
2003 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
2004 x86_pmu.num_counters, INTEL_PMC_MAX_GENERIC);
2005 x86_pmu.num_counters = INTEL_PMC_MAX_GENERIC;
2006 }
2007 x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
2008
2009 if (x86_pmu.num_counters_fixed > INTEL_PMC_MAX_FIXED) {
2010 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
2011 x86_pmu.num_counters_fixed, INTEL_PMC_MAX_FIXED);
2012 x86_pmu.num_counters_fixed = INTEL_PMC_MAX_FIXED;
2013 }
2014
2015 x86_pmu.intel_ctrl |=
2016 ((1LL << x86_pmu.num_counters_fixed)-1) << INTEL_PMC_IDX_FIXED;
2017
2018 if (x86_pmu.event_constraints) {
2019 /*
2020 * event on fixed counter2 (REF_CYCLES) only works on this
2021 * counter, so do not extend mask to generic counters
2022 */
2023 for_each_event_constraint(c, x86_pmu.event_constraints) {
2024 if (c->cmask != X86_RAW_EVENT_MASK
2025 || c->idxmsk64 == INTEL_PMC_MSK_FIXED_REF_CYCLES) {
2026 continue;
2027 }
2028
2029 c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
2030 c->weight += x86_pmu.num_counters;
2031 }
2032 }
2033
1954 return 0; 2034 return 0;
1955} 2035}
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index 35e2192df9f4..629ae0b7ad90 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -248,7 +248,7 @@ void reserve_ds_buffers(void)
248 */ 248 */
249 249
250struct event_constraint bts_constraint = 250struct event_constraint bts_constraint =
251 EVENT_CONSTRAINT(0, 1ULL << X86_PMC_IDX_FIXED_BTS, 0); 251 EVENT_CONSTRAINT(0, 1ULL << INTEL_PMC_IDX_FIXED_BTS, 0);
252 252
253void intel_pmu_enable_bts(u64 config) 253void intel_pmu_enable_bts(u64 config)
254{ 254{
@@ -295,7 +295,7 @@ int intel_pmu_drain_bts_buffer(void)
295 u64 to; 295 u64 to;
296 u64 flags; 296 u64 flags;
297 }; 297 };
298 struct perf_event *event = cpuc->events[X86_PMC_IDX_FIXED_BTS]; 298 struct perf_event *event = cpuc->events[INTEL_PMC_IDX_FIXED_BTS];
299 struct bts_record *at, *top; 299 struct bts_record *at, *top;
300 struct perf_output_handle handle; 300 struct perf_output_handle handle;
301 struct perf_event_header header; 301 struct perf_event_header header;
@@ -620,7 +620,7 @@ static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)
620 * Should not happen, we program the threshold at 1 and do not 620 * Should not happen, we program the threshold at 1 and do not
621 * set a reset value. 621 * set a reset value.
622 */ 622 */
623 WARN_ON_ONCE(n > 1); 623 WARN_ONCE(n > 1, "bad leftover pebs %d\n", n);
624 at += n - 1; 624 at += n - 1;
625 625
626 __intel_pmu_pebs_event(event, iregs, at); 626 __intel_pmu_pebs_event(event, iregs, at);
@@ -651,10 +651,10 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
651 * Should not happen, we program the threshold at 1 and do not 651 * Should not happen, we program the threshold at 1 and do not
652 * set a reset value. 652 * set a reset value.
653 */ 653 */
654 WARN_ON_ONCE(n > MAX_PEBS_EVENTS); 654 WARN_ONCE(n > x86_pmu.max_pebs_events, "Unexpected number of pebs records %d\n", n);
655 655
656 for ( ; at < top; at++) { 656 for ( ; at < top; at++) {
657 for_each_set_bit(bit, (unsigned long *)&at->status, MAX_PEBS_EVENTS) { 657 for_each_set_bit(bit, (unsigned long *)&at->status, x86_pmu.max_pebs_events) {
658 event = cpuc->events[bit]; 658 event = cpuc->events[bit];
659 if (!test_bit(bit, cpuc->active_mask)) 659 if (!test_bit(bit, cpuc->active_mask))
660 continue; 660 continue;
@@ -670,7 +670,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
670 break; 670 break;
671 } 671 }
672 672
673 if (!event || bit >= MAX_PEBS_EVENTS) 673 if (!event || bit >= x86_pmu.max_pebs_events)
674 continue; 674 continue;
675 675
676 __intel_pmu_pebs_event(event, iregs, at); 676 __intel_pmu_pebs_event(event, iregs, at);
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
new file mode 100644
index 000000000000..19faffc60886
--- /dev/null
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
@@ -0,0 +1,1850 @@
1#include "perf_event_intel_uncore.h"
2
3static struct intel_uncore_type *empty_uncore[] = { NULL, };
4static struct intel_uncore_type **msr_uncores = empty_uncore;
5static struct intel_uncore_type **pci_uncores = empty_uncore;
6/* pci bus to socket mapping */
7static int pcibus_to_physid[256] = { [0 ... 255] = -1, };
8
9static DEFINE_RAW_SPINLOCK(uncore_box_lock);
10
11/* mask of cpus that collect uncore events */
12static cpumask_t uncore_cpu_mask;
13
14/* constraint for the fixed counter */
15static struct event_constraint constraint_fixed =
16 EVENT_CONSTRAINT(~0ULL, 1 << UNCORE_PMC_IDX_FIXED, ~0ULL);
17static struct event_constraint constraint_empty =
18 EVENT_CONSTRAINT(0, 0, 0);
19
20DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
21DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
22DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
23DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
24DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
25DEFINE_UNCORE_FORMAT_ATTR(cmask5, cmask, "config:24-28");
26DEFINE_UNCORE_FORMAT_ATTR(cmask8, cmask, "config:24-31");
27DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31");
28DEFINE_UNCORE_FORMAT_ATTR(thresh5, thresh, "config:24-28");
29DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15");
30DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30");
31DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51");
32DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4");
33DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17");
34DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22");
35DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31");
36DEFINE_UNCORE_FORMAT_ATTR(filter_brand0, filter_brand0, "config1:0-7");
37DEFINE_UNCORE_FORMAT_ATTR(filter_brand1, filter_brand1, "config1:8-15");
38DEFINE_UNCORE_FORMAT_ATTR(filter_brand2, filter_brand2, "config1:16-23");
39DEFINE_UNCORE_FORMAT_ATTR(filter_brand3, filter_brand3, "config1:24-31");
40
41/* Sandy Bridge-EP uncore support */
42static struct intel_uncore_type snbep_uncore_cbox;
43static struct intel_uncore_type snbep_uncore_pcu;
44
45static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box)
46{
47 struct pci_dev *pdev = box->pci_dev;
48 int box_ctl = uncore_pci_box_ctl(box);
49 u32 config;
50
51 pci_read_config_dword(pdev, box_ctl, &config);
52 config |= SNBEP_PMON_BOX_CTL_FRZ;
53 pci_write_config_dword(pdev, box_ctl, config);
54}
55
56static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box)
57{
58 struct pci_dev *pdev = box->pci_dev;
59 int box_ctl = uncore_pci_box_ctl(box);
60 u32 config;
61
62 pci_read_config_dword(pdev, box_ctl, &config);
63 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
64 pci_write_config_dword(pdev, box_ctl, config);
65}
66
67static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box,
68 struct perf_event *event)
69{
70 struct pci_dev *pdev = box->pci_dev;
71 struct hw_perf_event *hwc = &event->hw;
72
73 pci_write_config_dword(pdev, hwc->config_base, hwc->config |
74 SNBEP_PMON_CTL_EN);
75}
76
77static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box,
78 struct perf_event *event)
79{
80 struct pci_dev *pdev = box->pci_dev;
81 struct hw_perf_event *hwc = &event->hw;
82
83 pci_write_config_dword(pdev, hwc->config_base, hwc->config);
84}
85
86static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box,
87 struct perf_event *event)
88{
89 struct pci_dev *pdev = box->pci_dev;
90 struct hw_perf_event *hwc = &event->hw;
91 u64 count;
92
93 pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
94 pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
95 return count;
96}
97
98static void snbep_uncore_pci_init_box(struct intel_uncore_box *box)
99{
100 struct pci_dev *pdev = box->pci_dev;
101 pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL,
102 SNBEP_PMON_BOX_CTL_INT);
103}
104
105static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box)
106{
107 u64 config;
108 unsigned msr;
109
110 msr = uncore_msr_box_ctl(box);
111 if (msr) {
112 rdmsrl(msr, config);
113 config |= SNBEP_PMON_BOX_CTL_FRZ;
114 wrmsrl(msr, config);
115 return;
116 }
117}
118
119static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box)
120{
121 u64 config;
122 unsigned msr;
123
124 msr = uncore_msr_box_ctl(box);
125 if (msr) {
126 rdmsrl(msr, config);
127 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
128 wrmsrl(msr, config);
129 return;
130 }
131}
132
133static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box,
134 struct perf_event *event)
135{
136 struct hw_perf_event *hwc = &event->hw;
137 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
138
139 if (reg1->idx != EXTRA_REG_NONE)
140 wrmsrl(reg1->reg, reg1->config);
141
142 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
143}
144
145static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box,
146 struct perf_event *event)
147{
148 struct hw_perf_event *hwc = &event->hw;
149
150 wrmsrl(hwc->config_base, hwc->config);
151}
152
153static u64 snbep_uncore_msr_read_counter(struct intel_uncore_box *box,
154 struct perf_event *event)
155{
156 struct hw_perf_event *hwc = &event->hw;
157 u64 count;
158
159 rdmsrl(hwc->event_base, count);
160 return count;
161}
162
163static void snbep_uncore_msr_init_box(struct intel_uncore_box *box)
164{
165 unsigned msr = uncore_msr_box_ctl(box);
166 if (msr)
167 wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT);
168}
169
170static struct event_constraint *
171snbep_uncore_get_constraint(struct intel_uncore_box *box,
172 struct perf_event *event)
173{
174 struct intel_uncore_extra_reg *er;
175 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
176 unsigned long flags;
177 bool ok = false;
178
179 if (reg1->idx == EXTRA_REG_NONE || (box->phys_id >= 0 && reg1->alloc))
180 return NULL;
181
182 er = &box->shared_regs[reg1->idx];
183 raw_spin_lock_irqsave(&er->lock, flags);
184 if (!atomic_read(&er->ref) || er->config1 == reg1->config) {
185 atomic_inc(&er->ref);
186 er->config1 = reg1->config;
187 ok = true;
188 }
189 raw_spin_unlock_irqrestore(&er->lock, flags);
190
191 if (ok) {
192 if (box->phys_id >= 0)
193 reg1->alloc = 1;
194 return NULL;
195 }
196 return &constraint_empty;
197}
198
199static void snbep_uncore_put_constraint(struct intel_uncore_box *box,
200 struct perf_event *event)
201{
202 struct intel_uncore_extra_reg *er;
203 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
204
205 if (box->phys_id < 0 || !reg1->alloc)
206 return;
207
208 er = &box->shared_regs[reg1->idx];
209 atomic_dec(&er->ref);
210 reg1->alloc = 0;
211}
212
213static int snbep_uncore_hw_config(struct intel_uncore_box *box,
214 struct perf_event *event)
215{
216 struct hw_perf_event *hwc = &event->hw;
217 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
218
219 if (box->pmu->type == &snbep_uncore_cbox) {
220 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
221 SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
222 reg1->config = event->attr.config1 &
223 SNBEP_CB0_MSR_PMON_BOX_FILTER_MASK;
224 } else if (box->pmu->type == &snbep_uncore_pcu) {
225 reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER;
226 reg1->config = event->attr.config1 &
227 SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK;
228 } else {
229 return 0;
230 }
231 reg1->idx = 0;
232 return 0;
233}
234
235static struct attribute *snbep_uncore_formats_attr[] = {
236 &format_attr_event.attr,
237 &format_attr_umask.attr,
238 &format_attr_edge.attr,
239 &format_attr_inv.attr,
240 &format_attr_thresh8.attr,
241 NULL,
242};
243
244static struct attribute *snbep_uncore_ubox_formats_attr[] = {
245 &format_attr_event.attr,
246 &format_attr_umask.attr,
247 &format_attr_edge.attr,
248 &format_attr_inv.attr,
249 &format_attr_thresh5.attr,
250 NULL,
251};
252
253static struct attribute *snbep_uncore_cbox_formats_attr[] = {
254 &format_attr_event.attr,
255 &format_attr_umask.attr,
256 &format_attr_edge.attr,
257 &format_attr_tid_en.attr,
258 &format_attr_inv.attr,
259 &format_attr_thresh8.attr,
260 &format_attr_filter_tid.attr,
261 &format_attr_filter_nid.attr,
262 &format_attr_filter_state.attr,
263 &format_attr_filter_opc.attr,
264 NULL,
265};
266
267static struct attribute *snbep_uncore_pcu_formats_attr[] = {
268 &format_attr_event.attr,
269 &format_attr_occ_sel.attr,
270 &format_attr_edge.attr,
271 &format_attr_inv.attr,
272 &format_attr_thresh5.attr,
273 &format_attr_occ_invert.attr,
274 &format_attr_occ_edge.attr,
275 &format_attr_filter_brand0.attr,
276 &format_attr_filter_brand1.attr,
277 &format_attr_filter_brand2.attr,
278 &format_attr_filter_brand3.attr,
279 NULL,
280};
281
282static struct uncore_event_desc snbep_uncore_imc_events[] = {
283 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
284 INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x03"),
285 INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
286 { /* end: all zeroes */ },
287};
288
289static struct uncore_event_desc snbep_uncore_qpi_events[] = {
290 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x14"),
291 INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"),
292 INTEL_UNCORE_EVENT_DESC(drs_data, "event=0x02,umask=0x08"),
293 INTEL_UNCORE_EVENT_DESC(ncb_data, "event=0x03,umask=0x04"),
294 { /* end: all zeroes */ },
295};
296
297static struct attribute_group snbep_uncore_format_group = {
298 .name = "format",
299 .attrs = snbep_uncore_formats_attr,
300};
301
302static struct attribute_group snbep_uncore_ubox_format_group = {
303 .name = "format",
304 .attrs = snbep_uncore_ubox_formats_attr,
305};
306
307static struct attribute_group snbep_uncore_cbox_format_group = {
308 .name = "format",
309 .attrs = snbep_uncore_cbox_formats_attr,
310};
311
312static struct attribute_group snbep_uncore_pcu_format_group = {
313 .name = "format",
314 .attrs = snbep_uncore_pcu_formats_attr,
315};
316
317static struct intel_uncore_ops snbep_uncore_msr_ops = {
318 .init_box = snbep_uncore_msr_init_box,
319 .disable_box = snbep_uncore_msr_disable_box,
320 .enable_box = snbep_uncore_msr_enable_box,
321 .disable_event = snbep_uncore_msr_disable_event,
322 .enable_event = snbep_uncore_msr_enable_event,
323 .read_counter = snbep_uncore_msr_read_counter,
324 .get_constraint = snbep_uncore_get_constraint,
325 .put_constraint = snbep_uncore_put_constraint,
326 .hw_config = snbep_uncore_hw_config,
327};
328
329static struct intel_uncore_ops snbep_uncore_pci_ops = {
330 .init_box = snbep_uncore_pci_init_box,
331 .disable_box = snbep_uncore_pci_disable_box,
332 .enable_box = snbep_uncore_pci_enable_box,
333 .disable_event = snbep_uncore_pci_disable_event,
334 .enable_event = snbep_uncore_pci_enable_event,
335 .read_counter = snbep_uncore_pci_read_counter,
336};
337
338static struct event_constraint snbep_uncore_cbox_constraints[] = {
339 UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
340 UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
341 UNCORE_EVENT_CONSTRAINT(0x04, 0x3),
342 UNCORE_EVENT_CONSTRAINT(0x05, 0x3),
343 UNCORE_EVENT_CONSTRAINT(0x07, 0x3),
344 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
345 UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
346 UNCORE_EVENT_CONSTRAINT(0x13, 0x3),
347 UNCORE_EVENT_CONSTRAINT(0x1b, 0xc),
348 UNCORE_EVENT_CONSTRAINT(0x1c, 0xc),
349 UNCORE_EVENT_CONSTRAINT(0x1d, 0xc),
350 UNCORE_EVENT_CONSTRAINT(0x1e, 0xc),
351 EVENT_CONSTRAINT_OVERLAP(0x1f, 0xe, 0xff),
352 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
353 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
354 UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
355 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
356 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
357 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
358 UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
359 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
360 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
361 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
362 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
363 UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
364 EVENT_CONSTRAINT_END
365};
366
367static struct event_constraint snbep_uncore_r2pcie_constraints[] = {
368 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
369 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
370 UNCORE_EVENT_CONSTRAINT(0x12, 0x1),
371 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
372 UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
373 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
374 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
375 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
376 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
377 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
378 EVENT_CONSTRAINT_END
379};
380
381static struct event_constraint snbep_uncore_r3qpi_constraints[] = {
382 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
383 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
384 UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
385 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
386 UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
387 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
388 UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
389 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
390 UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
391 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
392 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
393 UNCORE_EVENT_CONSTRAINT(0x30, 0x3),
394 UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
395 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
396 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
397 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
398 UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
399 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
400 EVENT_CONSTRAINT_END
401};
402
403static struct intel_uncore_type snbep_uncore_ubox = {
404 .name = "ubox",
405 .num_counters = 2,
406 .num_boxes = 1,
407 .perf_ctr_bits = 44,
408 .fixed_ctr_bits = 48,
409 .perf_ctr = SNBEP_U_MSR_PMON_CTR0,
410 .event_ctl = SNBEP_U_MSR_PMON_CTL0,
411 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
412 .fixed_ctr = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
413 .fixed_ctl = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
414 .ops = &snbep_uncore_msr_ops,
415 .format_group = &snbep_uncore_ubox_format_group,
416};
417
418static struct intel_uncore_type snbep_uncore_cbox = {
419 .name = "cbox",
420 .num_counters = 4,
421 .num_boxes = 8,
422 .perf_ctr_bits = 44,
423 .event_ctl = SNBEP_C0_MSR_PMON_CTL0,
424 .perf_ctr = SNBEP_C0_MSR_PMON_CTR0,
425 .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
426 .box_ctl = SNBEP_C0_MSR_PMON_BOX_CTL,
427 .msr_offset = SNBEP_CBO_MSR_OFFSET,
428 .num_shared_regs = 1,
429 .constraints = snbep_uncore_cbox_constraints,
430 .ops = &snbep_uncore_msr_ops,
431 .format_group = &snbep_uncore_cbox_format_group,
432};
433
434static struct intel_uncore_type snbep_uncore_pcu = {
435 .name = "pcu",
436 .num_counters = 4,
437 .num_boxes = 1,
438 .perf_ctr_bits = 48,
439 .perf_ctr = SNBEP_PCU_MSR_PMON_CTR0,
440 .event_ctl = SNBEP_PCU_MSR_PMON_CTL0,
441 .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
442 .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL,
443 .num_shared_regs = 1,
444 .ops = &snbep_uncore_msr_ops,
445 .format_group = &snbep_uncore_pcu_format_group,
446};
447
448static struct intel_uncore_type *snbep_msr_uncores[] = {
449 &snbep_uncore_ubox,
450 &snbep_uncore_cbox,
451 &snbep_uncore_pcu,
452 NULL,
453};
454
455#define SNBEP_UNCORE_PCI_COMMON_INIT() \
456 .perf_ctr = SNBEP_PCI_PMON_CTR0, \
457 .event_ctl = SNBEP_PCI_PMON_CTL0, \
458 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, \
459 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \
460 .ops = &snbep_uncore_pci_ops, \
461 .format_group = &snbep_uncore_format_group
462
463static struct intel_uncore_type snbep_uncore_ha = {
464 .name = "ha",
465 .num_counters = 4,
466 .num_boxes = 1,
467 .perf_ctr_bits = 48,
468 SNBEP_UNCORE_PCI_COMMON_INIT(),
469};
470
471static struct intel_uncore_type snbep_uncore_imc = {
472 .name = "imc",
473 .num_counters = 4,
474 .num_boxes = 4,
475 .perf_ctr_bits = 48,
476 .fixed_ctr_bits = 48,
477 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
478 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
479 .event_descs = snbep_uncore_imc_events,
480 SNBEP_UNCORE_PCI_COMMON_INIT(),
481};
482
483static struct intel_uncore_type snbep_uncore_qpi = {
484 .name = "qpi",
485 .num_counters = 4,
486 .num_boxes = 2,
487 .perf_ctr_bits = 48,
488 .event_descs = snbep_uncore_qpi_events,
489 SNBEP_UNCORE_PCI_COMMON_INIT(),
490};
491
492
493static struct intel_uncore_type snbep_uncore_r2pcie = {
494 .name = "r2pcie",
495 .num_counters = 4,
496 .num_boxes = 1,
497 .perf_ctr_bits = 44,
498 .constraints = snbep_uncore_r2pcie_constraints,
499 SNBEP_UNCORE_PCI_COMMON_INIT(),
500};
501
502static struct intel_uncore_type snbep_uncore_r3qpi = {
503 .name = "r3qpi",
504 .num_counters = 3,
505 .num_boxes = 2,
506 .perf_ctr_bits = 44,
507 .constraints = snbep_uncore_r3qpi_constraints,
508 SNBEP_UNCORE_PCI_COMMON_INIT(),
509};
510
511static struct intel_uncore_type *snbep_pci_uncores[] = {
512 &snbep_uncore_ha,
513 &snbep_uncore_imc,
514 &snbep_uncore_qpi,
515 &snbep_uncore_r2pcie,
516 &snbep_uncore_r3qpi,
517 NULL,
518};
519
520static DEFINE_PCI_DEVICE_TABLE(snbep_uncore_pci_ids) = {
521 { /* Home Agent */
522 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA),
523 .driver_data = (unsigned long)&snbep_uncore_ha,
524 },
525 { /* MC Channel 0 */
526 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0),
527 .driver_data = (unsigned long)&snbep_uncore_imc,
528 },
529 { /* MC Channel 1 */
530 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1),
531 .driver_data = (unsigned long)&snbep_uncore_imc,
532 },
533 { /* MC Channel 2 */
534 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2),
535 .driver_data = (unsigned long)&snbep_uncore_imc,
536 },
537 { /* MC Channel 3 */
538 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3),
539 .driver_data = (unsigned long)&snbep_uncore_imc,
540 },
541 { /* QPI Port 0 */
542 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0),
543 .driver_data = (unsigned long)&snbep_uncore_qpi,
544 },
545 { /* QPI Port 1 */
546 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1),
547 .driver_data = (unsigned long)&snbep_uncore_qpi,
548 },
549 { /* P2PCIe */
550 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE),
551 .driver_data = (unsigned long)&snbep_uncore_r2pcie,
552 },
553 { /* R3QPI Link 0 */
554 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0),
555 .driver_data = (unsigned long)&snbep_uncore_r3qpi,
556 },
557 { /* R3QPI Link 1 */
558 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1),
559 .driver_data = (unsigned long)&snbep_uncore_r3qpi,
560 },
561 { /* end: all zeroes */ }
562};
563
564static struct pci_driver snbep_uncore_pci_driver = {
565 .name = "snbep_uncore",
566 .id_table = snbep_uncore_pci_ids,
567};
568
569/*
570 * build pci bus to socket mapping
571 */
572static void snbep_pci2phy_map_init(void)
573{
574 struct pci_dev *ubox_dev = NULL;
575 int i, bus, nodeid;
576 u32 config;
577
578 while (1) {
579 /* find the UBOX device */
580 ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL,
581 PCI_DEVICE_ID_INTEL_JAKETOWN_UBOX,
582 ubox_dev);
583 if (!ubox_dev)
584 break;
585 bus = ubox_dev->bus->number;
586 /* get the Node ID of the local register */
587 pci_read_config_dword(ubox_dev, 0x40, &config);
588 nodeid = config;
589 /* get the Node ID mapping */
590 pci_read_config_dword(ubox_dev, 0x54, &config);
591 /*
592 * every three bits in the Node ID mapping register maps
593 * to a particular node.
594 */
595 for (i = 0; i < 8; i++) {
596 if (nodeid == ((config >> (3 * i)) & 0x7)) {
597 pcibus_to_physid[bus] = i;
598 break;
599 }
600 }
601 };
602 return;
603}
604/* end of Sandy Bridge-EP uncore support */
605
606
607/* Sandy Bridge uncore support */
608static void snb_uncore_msr_enable_event(struct intel_uncore_box *box,
609 struct perf_event *event)
610{
611 struct hw_perf_event *hwc = &event->hw;
612
613 if (hwc->idx < UNCORE_PMC_IDX_FIXED)
614 wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
615 else
616 wrmsrl(hwc->config_base, SNB_UNC_CTL_EN);
617}
618
619static void snb_uncore_msr_disable_event(struct intel_uncore_box *box,
620 struct perf_event *event)
621{
622 wrmsrl(event->hw.config_base, 0);
623}
624
625static u64 snb_uncore_msr_read_counter(struct intel_uncore_box *box,
626 struct perf_event *event)
627{
628 u64 count;
629 rdmsrl(event->hw.event_base, count);
630 return count;
631}
632
633static void snb_uncore_msr_init_box(struct intel_uncore_box *box)
634{
635 if (box->pmu->pmu_idx == 0) {
636 wrmsrl(SNB_UNC_PERF_GLOBAL_CTL,
637 SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL);
638 }
639}
640
641static struct attribute *snb_uncore_formats_attr[] = {
642 &format_attr_event.attr,
643 &format_attr_umask.attr,
644 &format_attr_edge.attr,
645 &format_attr_inv.attr,
646 &format_attr_cmask5.attr,
647 NULL,
648};
649
650static struct attribute_group snb_uncore_format_group = {
651 .name = "format",
652 .attrs = snb_uncore_formats_attr,
653};
654
655static struct intel_uncore_ops snb_uncore_msr_ops = {
656 .init_box = snb_uncore_msr_init_box,
657 .disable_event = snb_uncore_msr_disable_event,
658 .enable_event = snb_uncore_msr_enable_event,
659 .read_counter = snb_uncore_msr_read_counter,
660};
661
662static struct event_constraint snb_uncore_cbox_constraints[] = {
663 UNCORE_EVENT_CONSTRAINT(0x80, 0x1),
664 UNCORE_EVENT_CONSTRAINT(0x83, 0x1),
665 EVENT_CONSTRAINT_END
666};
667
668static struct intel_uncore_type snb_uncore_cbox = {
669 .name = "cbox",
670 .num_counters = 2,
671 .num_boxes = 4,
672 .perf_ctr_bits = 44,
673 .fixed_ctr_bits = 48,
674 .perf_ctr = SNB_UNC_CBO_0_PER_CTR0,
675 .event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0,
676 .fixed_ctr = SNB_UNC_FIXED_CTR,
677 .fixed_ctl = SNB_UNC_FIXED_CTR_CTRL,
678 .single_fixed = 1,
679 .event_mask = SNB_UNC_RAW_EVENT_MASK,
680 .msr_offset = SNB_UNC_CBO_MSR_OFFSET,
681 .constraints = snb_uncore_cbox_constraints,
682 .ops = &snb_uncore_msr_ops,
683 .format_group = &snb_uncore_format_group,
684};
685
686static struct intel_uncore_type *snb_msr_uncores[] = {
687 &snb_uncore_cbox,
688 NULL,
689};
690/* end of Sandy Bridge uncore support */
691
692/* Nehalem uncore support */
693static void nhm_uncore_msr_disable_box(struct intel_uncore_box *box)
694{
695 wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, 0);
696}
697
698static void nhm_uncore_msr_enable_box(struct intel_uncore_box *box)
699{
700 wrmsrl(NHM_UNC_PERF_GLOBAL_CTL,
701 NHM_UNC_GLOBAL_CTL_EN_PC_ALL | NHM_UNC_GLOBAL_CTL_EN_FC);
702}
703
704static void nhm_uncore_msr_enable_event(struct intel_uncore_box *box,
705 struct perf_event *event)
706{
707 struct hw_perf_event *hwc = &event->hw;
708
709 if (hwc->idx < UNCORE_PMC_IDX_FIXED)
710 wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
711 else
712 wrmsrl(hwc->config_base, NHM_UNC_FIXED_CTR_CTL_EN);
713}
714
715static struct attribute *nhm_uncore_formats_attr[] = {
716 &format_attr_event.attr,
717 &format_attr_umask.attr,
718 &format_attr_edge.attr,
719 &format_attr_inv.attr,
720 &format_attr_cmask8.attr,
721 NULL,
722};
723
724static struct attribute_group nhm_uncore_format_group = {
725 .name = "format",
726 .attrs = nhm_uncore_formats_attr,
727};
728
729static struct uncore_event_desc nhm_uncore_events[] = {
730 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
731 INTEL_UNCORE_EVENT_DESC(qmc_writes_full_any, "event=0x2f,umask=0x0f"),
732 INTEL_UNCORE_EVENT_DESC(qmc_normal_reads_any, "event=0x2c,umask=0x0f"),
733 INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_reads, "event=0x20,umask=0x01"),
734 INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_writes, "event=0x20,umask=0x02"),
735 INTEL_UNCORE_EVENT_DESC(qhl_request_remote_reads, "event=0x20,umask=0x04"),
736 INTEL_UNCORE_EVENT_DESC(qhl_request_remote_writes, "event=0x20,umask=0x08"),
737 INTEL_UNCORE_EVENT_DESC(qhl_request_local_reads, "event=0x20,umask=0x10"),
738 INTEL_UNCORE_EVENT_DESC(qhl_request_local_writes, "event=0x20,umask=0x20"),
739 { /* end: all zeroes */ },
740};
741
742static struct intel_uncore_ops nhm_uncore_msr_ops = {
743 .disable_box = nhm_uncore_msr_disable_box,
744 .enable_box = nhm_uncore_msr_enable_box,
745 .disable_event = snb_uncore_msr_disable_event,
746 .enable_event = nhm_uncore_msr_enable_event,
747 .read_counter = snb_uncore_msr_read_counter,
748};
749
750static struct intel_uncore_type nhm_uncore = {
751 .name = "",
752 .num_counters = 8,
753 .num_boxes = 1,
754 .perf_ctr_bits = 48,
755 .fixed_ctr_bits = 48,
756 .event_ctl = NHM_UNC_PERFEVTSEL0,
757 .perf_ctr = NHM_UNC_UNCORE_PMC0,
758 .fixed_ctr = NHM_UNC_FIXED_CTR,
759 .fixed_ctl = NHM_UNC_FIXED_CTR_CTRL,
760 .event_mask = NHM_UNC_RAW_EVENT_MASK,
761 .event_descs = nhm_uncore_events,
762 .ops = &nhm_uncore_msr_ops,
763 .format_group = &nhm_uncore_format_group,
764};
765
766static struct intel_uncore_type *nhm_msr_uncores[] = {
767 &nhm_uncore,
768 NULL,
769};
770/* end of Nehalem uncore support */
771
772static void uncore_assign_hw_event(struct intel_uncore_box *box,
773 struct perf_event *event, int idx)
774{
775 struct hw_perf_event *hwc = &event->hw;
776
777 hwc->idx = idx;
778 hwc->last_tag = ++box->tags[idx];
779
780 if (hwc->idx == UNCORE_PMC_IDX_FIXED) {
781 hwc->event_base = uncore_fixed_ctr(box);
782 hwc->config_base = uncore_fixed_ctl(box);
783 return;
784 }
785
786 hwc->config_base = uncore_event_ctl(box, hwc->idx);
787 hwc->event_base = uncore_perf_ctr(box, hwc->idx);
788}
789
790static void uncore_perf_event_update(struct intel_uncore_box *box,
791 struct perf_event *event)
792{
793 u64 prev_count, new_count, delta;
794 int shift;
795
796 if (event->hw.idx >= UNCORE_PMC_IDX_FIXED)
797 shift = 64 - uncore_fixed_ctr_bits(box);
798 else
799 shift = 64 - uncore_perf_ctr_bits(box);
800
801 /* the hrtimer might modify the previous event value */
802again:
803 prev_count = local64_read(&event->hw.prev_count);
804 new_count = uncore_read_counter(box, event);
805 if (local64_xchg(&event->hw.prev_count, new_count) != prev_count)
806 goto again;
807
808 delta = (new_count << shift) - (prev_count << shift);
809 delta >>= shift;
810
811 local64_add(delta, &event->count);
812}
813
814/*
815 * The overflow interrupt is unavailable for SandyBridge-EP, is broken
816 * for SandyBridge. So we use hrtimer to periodically poll the counter
817 * to avoid overflow.
818 */
819static enum hrtimer_restart uncore_pmu_hrtimer(struct hrtimer *hrtimer)
820{
821 struct intel_uncore_box *box;
822 unsigned long flags;
823 int bit;
824
825 box = container_of(hrtimer, struct intel_uncore_box, hrtimer);
826 if (!box->n_active || box->cpu != smp_processor_id())
827 return HRTIMER_NORESTART;
828 /*
829 * disable local interrupt to prevent uncore_pmu_event_start/stop
830 * to interrupt the update process
831 */
832 local_irq_save(flags);
833
834 for_each_set_bit(bit, box->active_mask, UNCORE_PMC_IDX_MAX)
835 uncore_perf_event_update(box, box->events[bit]);
836
837 local_irq_restore(flags);
838
839 hrtimer_forward_now(hrtimer, ns_to_ktime(UNCORE_PMU_HRTIMER_INTERVAL));
840 return HRTIMER_RESTART;
841}
842
843static void uncore_pmu_start_hrtimer(struct intel_uncore_box *box)
844{
845 __hrtimer_start_range_ns(&box->hrtimer,
846 ns_to_ktime(UNCORE_PMU_HRTIMER_INTERVAL), 0,
847 HRTIMER_MODE_REL_PINNED, 0);
848}
849
850static void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box)
851{
852 hrtimer_cancel(&box->hrtimer);
853}
854
855static void uncore_pmu_init_hrtimer(struct intel_uncore_box *box)
856{
857 hrtimer_init(&box->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
858 box->hrtimer.function = uncore_pmu_hrtimer;
859}
860
861struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type,
862 int cpu)
863{
864 struct intel_uncore_box *box;
865 int i, size;
866
867 size = sizeof(*box) + type->num_shared_regs *
868 sizeof(struct intel_uncore_extra_reg);
869
870 box = kmalloc_node(size, GFP_KERNEL | __GFP_ZERO, cpu_to_node(cpu));
871 if (!box)
872 return NULL;
873
874 for (i = 0; i < type->num_shared_regs; i++)
875 raw_spin_lock_init(&box->shared_regs[i].lock);
876
877 uncore_pmu_init_hrtimer(box);
878 atomic_set(&box->refcnt, 1);
879 box->cpu = -1;
880 box->phys_id = -1;
881
882 return box;
883}
884
885static struct intel_uncore_box *
886uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
887{
888 static struct intel_uncore_box *box;
889
890 box = *per_cpu_ptr(pmu->box, cpu);
891 if (box)
892 return box;
893
894 raw_spin_lock(&uncore_box_lock);
895 list_for_each_entry(box, &pmu->box_list, list) {
896 if (box->phys_id == topology_physical_package_id(cpu)) {
897 atomic_inc(&box->refcnt);
898 *per_cpu_ptr(pmu->box, cpu) = box;
899 break;
900 }
901 }
902 raw_spin_unlock(&uncore_box_lock);
903
904 return *per_cpu_ptr(pmu->box, cpu);
905}
906
907static struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event)
908{
909 return container_of(event->pmu, struct intel_uncore_pmu, pmu);
910}
911
912static struct intel_uncore_box *uncore_event_to_box(struct perf_event *event)
913{
914 /*
915 * perf core schedules event on the basis of cpu, uncore events are
916 * collected by one of the cpus inside a physical package.
917 */
918 return uncore_pmu_to_box(uncore_event_to_pmu(event),
919 smp_processor_id());
920}
921
922static int uncore_collect_events(struct intel_uncore_box *box,
923 struct perf_event *leader, bool dogrp)
924{
925 struct perf_event *event;
926 int n, max_count;
927
928 max_count = box->pmu->type->num_counters;
929 if (box->pmu->type->fixed_ctl)
930 max_count++;
931
932 if (box->n_events >= max_count)
933 return -EINVAL;
934
935 n = box->n_events;
936 box->event_list[n] = leader;
937 n++;
938 if (!dogrp)
939 return n;
940
941 list_for_each_entry(event, &leader->sibling_list, group_entry) {
942 if (event->state <= PERF_EVENT_STATE_OFF)
943 continue;
944
945 if (n >= max_count)
946 return -EINVAL;
947
948 box->event_list[n] = event;
949 n++;
950 }
951 return n;
952}
953
954static struct event_constraint *
955uncore_get_event_constraint(struct intel_uncore_box *box,
956 struct perf_event *event)
957{
958 struct intel_uncore_type *type = box->pmu->type;
959 struct event_constraint *c;
960
961 if (type->ops->get_constraint) {
962 c = type->ops->get_constraint(box, event);
963 if (c)
964 return c;
965 }
966
967 if (event->hw.config == ~0ULL)
968 return &constraint_fixed;
969
970 if (type->constraints) {
971 for_each_event_constraint(c, type->constraints) {
972 if ((event->hw.config & c->cmask) == c->code)
973 return c;
974 }
975 }
976
977 return &type->unconstrainted;
978}
979
980static void uncore_put_event_constraint(struct intel_uncore_box *box,
981 struct perf_event *event)
982{
983 if (box->pmu->type->ops->put_constraint)
984 box->pmu->type->ops->put_constraint(box, event);
985}
986
987static int uncore_assign_events(struct intel_uncore_box *box,
988 int assign[], int n)
989{
990 unsigned long used_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)];
991 struct event_constraint *c, *constraints[UNCORE_PMC_IDX_MAX];
992 int i, wmin, wmax, ret = 0;
993 struct hw_perf_event *hwc;
994
995 bitmap_zero(used_mask, UNCORE_PMC_IDX_MAX);
996
997 for (i = 0, wmin = UNCORE_PMC_IDX_MAX, wmax = 0; i < n; i++) {
998 c = uncore_get_event_constraint(box, box->event_list[i]);
999 constraints[i] = c;
1000 wmin = min(wmin, c->weight);
1001 wmax = max(wmax, c->weight);
1002 }
1003
1004 /* fastpath, try to reuse previous register */
1005 for (i = 0; i < n; i++) {
1006 hwc = &box->event_list[i]->hw;
1007 c = constraints[i];
1008
1009 /* never assigned */
1010 if (hwc->idx == -1)
1011 break;
1012
1013 /* constraint still honored */
1014 if (!test_bit(hwc->idx, c->idxmsk))
1015 break;
1016
1017 /* not already used */
1018 if (test_bit(hwc->idx, used_mask))
1019 break;
1020
1021 __set_bit(hwc->idx, used_mask);
1022 if (assign)
1023 assign[i] = hwc->idx;
1024 }
1025 /* slow path */
1026 if (i != n)
1027 ret = perf_assign_events(constraints, n, wmin, wmax, assign);
1028
1029 if (!assign || ret) {
1030 for (i = 0; i < n; i++)
1031 uncore_put_event_constraint(box, box->event_list[i]);
1032 }
1033 return ret ? -EINVAL : 0;
1034}
1035
1036static void uncore_pmu_event_start(struct perf_event *event, int flags)
1037{
1038 struct intel_uncore_box *box = uncore_event_to_box(event);
1039 int idx = event->hw.idx;
1040
1041 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
1042 return;
1043
1044 if (WARN_ON_ONCE(idx == -1 || idx >= UNCORE_PMC_IDX_MAX))
1045 return;
1046
1047 event->hw.state = 0;
1048 box->events[idx] = event;
1049 box->n_active++;
1050 __set_bit(idx, box->active_mask);
1051
1052 local64_set(&event->hw.prev_count, uncore_read_counter(box, event));
1053 uncore_enable_event(box, event);
1054
1055 if (box->n_active == 1) {
1056 uncore_enable_box(box);
1057 uncore_pmu_start_hrtimer(box);
1058 }
1059}
1060
1061static void uncore_pmu_event_stop(struct perf_event *event, int flags)
1062{
1063 struct intel_uncore_box *box = uncore_event_to_box(event);
1064 struct hw_perf_event *hwc = &event->hw;
1065
1066 if (__test_and_clear_bit(hwc->idx, box->active_mask)) {
1067 uncore_disable_event(box, event);
1068 box->n_active--;
1069 box->events[hwc->idx] = NULL;
1070 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
1071 hwc->state |= PERF_HES_STOPPED;
1072
1073 if (box->n_active == 0) {
1074 uncore_disable_box(box);
1075 uncore_pmu_cancel_hrtimer(box);
1076 }
1077 }
1078
1079 if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
1080 /*
1081 * Drain the remaining delta count out of a event
1082 * that we are disabling:
1083 */
1084 uncore_perf_event_update(box, event);
1085 hwc->state |= PERF_HES_UPTODATE;
1086 }
1087}
1088
1089static int uncore_pmu_event_add(struct perf_event *event, int flags)
1090{
1091 struct intel_uncore_box *box = uncore_event_to_box(event);
1092 struct hw_perf_event *hwc = &event->hw;
1093 int assign[UNCORE_PMC_IDX_MAX];
1094 int i, n, ret;
1095
1096 if (!box)
1097 return -ENODEV;
1098
1099 ret = n = uncore_collect_events(box, event, false);
1100 if (ret < 0)
1101 return ret;
1102
1103 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
1104 if (!(flags & PERF_EF_START))
1105 hwc->state |= PERF_HES_ARCH;
1106
1107 ret = uncore_assign_events(box, assign, n);
1108 if (ret)
1109 return ret;
1110
1111 /* save events moving to new counters */
1112 for (i = 0; i < box->n_events; i++) {
1113 event = box->event_list[i];
1114 hwc = &event->hw;
1115
1116 if (hwc->idx == assign[i] &&
1117 hwc->last_tag == box->tags[assign[i]])
1118 continue;
1119 /*
1120 * Ensure we don't accidentally enable a stopped
1121 * counter simply because we rescheduled.
1122 */
1123 if (hwc->state & PERF_HES_STOPPED)
1124 hwc->state |= PERF_HES_ARCH;
1125
1126 uncore_pmu_event_stop(event, PERF_EF_UPDATE);
1127 }
1128
1129 /* reprogram moved events into new counters */
1130 for (i = 0; i < n; i++) {
1131 event = box->event_list[i];
1132 hwc = &event->hw;
1133
1134 if (hwc->idx != assign[i] ||
1135 hwc->last_tag != box->tags[assign[i]])
1136 uncore_assign_hw_event(box, event, assign[i]);
1137 else if (i < box->n_events)
1138 continue;
1139
1140 if (hwc->state & PERF_HES_ARCH)
1141 continue;
1142
1143 uncore_pmu_event_start(event, 0);
1144 }
1145 box->n_events = n;
1146
1147 return 0;
1148}
1149
1150static void uncore_pmu_event_del(struct perf_event *event, int flags)
1151{
1152 struct intel_uncore_box *box = uncore_event_to_box(event);
1153 int i;
1154
1155 uncore_pmu_event_stop(event, PERF_EF_UPDATE);
1156
1157 for (i = 0; i < box->n_events; i++) {
1158 if (event == box->event_list[i]) {
1159 uncore_put_event_constraint(box, event);
1160
1161 while (++i < box->n_events)
1162 box->event_list[i - 1] = box->event_list[i];
1163
1164 --box->n_events;
1165 break;
1166 }
1167 }
1168
1169 event->hw.idx = -1;
1170 event->hw.last_tag = ~0ULL;
1171}
1172
1173static void uncore_pmu_event_read(struct perf_event *event)
1174{
1175 struct intel_uncore_box *box = uncore_event_to_box(event);
1176 uncore_perf_event_update(box, event);
1177}
1178
1179/*
1180 * validation ensures the group can be loaded onto the
1181 * PMU if it was the only group available.
1182 */
1183static int uncore_validate_group(struct intel_uncore_pmu *pmu,
1184 struct perf_event *event)
1185{
1186 struct perf_event *leader = event->group_leader;
1187 struct intel_uncore_box *fake_box;
1188 int ret = -EINVAL, n;
1189
1190 fake_box = uncore_alloc_box(pmu->type, smp_processor_id());
1191 if (!fake_box)
1192 return -ENOMEM;
1193
1194 fake_box->pmu = pmu;
1195 /*
1196 * the event is not yet connected with its
1197 * siblings therefore we must first collect
1198 * existing siblings, then add the new event
1199 * before we can simulate the scheduling
1200 */
1201 n = uncore_collect_events(fake_box, leader, true);
1202 if (n < 0)
1203 goto out;
1204
1205 fake_box->n_events = n;
1206 n = uncore_collect_events(fake_box, event, false);
1207 if (n < 0)
1208 goto out;
1209
1210 fake_box->n_events = n;
1211
1212 ret = uncore_assign_events(fake_box, NULL, n);
1213out:
1214 kfree(fake_box);
1215 return ret;
1216}
1217
1218int uncore_pmu_event_init(struct perf_event *event)
1219{
1220 struct intel_uncore_pmu *pmu;
1221 struct intel_uncore_box *box;
1222 struct hw_perf_event *hwc = &event->hw;
1223 int ret;
1224
1225 if (event->attr.type != event->pmu->type)
1226 return -ENOENT;
1227
1228 pmu = uncore_event_to_pmu(event);
1229 /* no device found for this pmu */
1230 if (pmu->func_id < 0)
1231 return -ENOENT;
1232
1233 /*
1234 * Uncore PMU does measure at all privilege level all the time.
1235 * So it doesn't make sense to specify any exclude bits.
1236 */
1237 if (event->attr.exclude_user || event->attr.exclude_kernel ||
1238 event->attr.exclude_hv || event->attr.exclude_idle)
1239 return -EINVAL;
1240
1241 /* Sampling not supported yet */
1242 if (hwc->sample_period)
1243 return -EINVAL;
1244
1245 /*
1246 * Place all uncore events for a particular physical package
1247 * onto a single cpu
1248 */
1249 if (event->cpu < 0)
1250 return -EINVAL;
1251 box = uncore_pmu_to_box(pmu, event->cpu);
1252 if (!box || box->cpu < 0)
1253 return -EINVAL;
1254 event->cpu = box->cpu;
1255
1256 event->hw.idx = -1;
1257 event->hw.last_tag = ~0ULL;
1258 event->hw.extra_reg.idx = EXTRA_REG_NONE;
1259
1260 if (event->attr.config == UNCORE_FIXED_EVENT) {
1261 /* no fixed counter */
1262 if (!pmu->type->fixed_ctl)
1263 return -EINVAL;
1264 /*
1265 * if there is only one fixed counter, only the first pmu
1266 * can access the fixed counter
1267 */
1268 if (pmu->type->single_fixed && pmu->pmu_idx > 0)
1269 return -EINVAL;
1270 hwc->config = ~0ULL;
1271 } else {
1272 hwc->config = event->attr.config & pmu->type->event_mask;
1273 if (pmu->type->ops->hw_config) {
1274 ret = pmu->type->ops->hw_config(box, event);
1275 if (ret)
1276 return ret;
1277 }
1278 }
1279
1280 if (event->group_leader != event)
1281 ret = uncore_validate_group(pmu, event);
1282 else
1283 ret = 0;
1284
1285 return ret;
1286}
1287
1288static int __init uncore_pmu_register(struct intel_uncore_pmu *pmu)
1289{
1290 int ret;
1291
1292 pmu->pmu = (struct pmu) {
1293 .attr_groups = pmu->type->attr_groups,
1294 .task_ctx_nr = perf_invalid_context,
1295 .event_init = uncore_pmu_event_init,
1296 .add = uncore_pmu_event_add,
1297 .del = uncore_pmu_event_del,
1298 .start = uncore_pmu_event_start,
1299 .stop = uncore_pmu_event_stop,
1300 .read = uncore_pmu_event_read,
1301 };
1302
1303 if (pmu->type->num_boxes == 1) {
1304 if (strlen(pmu->type->name) > 0)
1305 sprintf(pmu->name, "uncore_%s", pmu->type->name);
1306 else
1307 sprintf(pmu->name, "uncore");
1308 } else {
1309 sprintf(pmu->name, "uncore_%s_%d", pmu->type->name,
1310 pmu->pmu_idx);
1311 }
1312
1313 ret = perf_pmu_register(&pmu->pmu, pmu->name, -1);
1314 return ret;
1315}
1316
1317static void __init uncore_type_exit(struct intel_uncore_type *type)
1318{
1319 int i;
1320
1321 for (i = 0; i < type->num_boxes; i++)
1322 free_percpu(type->pmus[i].box);
1323 kfree(type->pmus);
1324 type->pmus = NULL;
1325 kfree(type->attr_groups[1]);
1326 type->attr_groups[1] = NULL;
1327}
1328
1329static void uncore_types_exit(struct intel_uncore_type **types)
1330{
1331 int i;
1332 for (i = 0; types[i]; i++)
1333 uncore_type_exit(types[i]);
1334}
1335
1336static int __init uncore_type_init(struct intel_uncore_type *type)
1337{
1338 struct intel_uncore_pmu *pmus;
1339 struct attribute_group *events_group;
1340 struct attribute **attrs;
1341 int i, j;
1342
1343 pmus = kzalloc(sizeof(*pmus) * type->num_boxes, GFP_KERNEL);
1344 if (!pmus)
1345 return -ENOMEM;
1346
1347 type->unconstrainted = (struct event_constraint)
1348 __EVENT_CONSTRAINT(0, (1ULL << type->num_counters) - 1,
1349 0, type->num_counters, 0);
1350
1351 for (i = 0; i < type->num_boxes; i++) {
1352 pmus[i].func_id = -1;
1353 pmus[i].pmu_idx = i;
1354 pmus[i].type = type;
1355 INIT_LIST_HEAD(&pmus[i].box_list);
1356 pmus[i].box = alloc_percpu(struct intel_uncore_box *);
1357 if (!pmus[i].box)
1358 goto fail;
1359 }
1360
1361 if (type->event_descs) {
1362 i = 0;
1363 while (type->event_descs[i].attr.attr.name)
1364 i++;
1365
1366 events_group = kzalloc(sizeof(struct attribute *) * (i + 1) +
1367 sizeof(*events_group), GFP_KERNEL);
1368 if (!events_group)
1369 goto fail;
1370
1371 attrs = (struct attribute **)(events_group + 1);
1372 events_group->name = "events";
1373 events_group->attrs = attrs;
1374
1375 for (j = 0; j < i; j++)
1376 attrs[j] = &type->event_descs[j].attr.attr;
1377
1378 type->attr_groups[1] = events_group;
1379 }
1380
1381 type->pmus = pmus;
1382 return 0;
1383fail:
1384 uncore_type_exit(type);
1385 return -ENOMEM;
1386}
1387
1388static int __init uncore_types_init(struct intel_uncore_type **types)
1389{
1390 int i, ret;
1391
1392 for (i = 0; types[i]; i++) {
1393 ret = uncore_type_init(types[i]);
1394 if (ret)
1395 goto fail;
1396 }
1397 return 0;
1398fail:
1399 while (--i >= 0)
1400 uncore_type_exit(types[i]);
1401 return ret;
1402}
1403
1404static struct pci_driver *uncore_pci_driver;
1405static bool pcidrv_registered;
1406
1407/*
1408 * add a pci uncore device
1409 */
1410static int __devinit uncore_pci_add(struct intel_uncore_type *type,
1411 struct pci_dev *pdev)
1412{
1413 struct intel_uncore_pmu *pmu;
1414 struct intel_uncore_box *box;
1415 int i, phys_id;
1416
1417 phys_id = pcibus_to_physid[pdev->bus->number];
1418 if (phys_id < 0)
1419 return -ENODEV;
1420
1421 box = uncore_alloc_box(type, 0);
1422 if (!box)
1423 return -ENOMEM;
1424
1425 /*
1426 * for performance monitoring unit with multiple boxes,
1427 * each box has a different function id.
1428 */
1429 for (i = 0; i < type->num_boxes; i++) {
1430 pmu = &type->pmus[i];
1431 if (pmu->func_id == pdev->devfn)
1432 break;
1433 if (pmu->func_id < 0) {
1434 pmu->func_id = pdev->devfn;
1435 break;
1436 }
1437 pmu = NULL;
1438 }
1439
1440 if (!pmu) {
1441 kfree(box);
1442 return -EINVAL;
1443 }
1444
1445 box->phys_id = phys_id;
1446 box->pci_dev = pdev;
1447 box->pmu = pmu;
1448 uncore_box_init(box);
1449 pci_set_drvdata(pdev, box);
1450
1451 raw_spin_lock(&uncore_box_lock);
1452 list_add_tail(&box->list, &pmu->box_list);
1453 raw_spin_unlock(&uncore_box_lock);
1454
1455 return 0;
1456}
1457
1458static void uncore_pci_remove(struct pci_dev *pdev)
1459{
1460 struct intel_uncore_box *box = pci_get_drvdata(pdev);
1461 struct intel_uncore_pmu *pmu = box->pmu;
1462 int cpu, phys_id = pcibus_to_physid[pdev->bus->number];
1463
1464 if (WARN_ON_ONCE(phys_id != box->phys_id))
1465 return;
1466
1467 raw_spin_lock(&uncore_box_lock);
1468 list_del(&box->list);
1469 raw_spin_unlock(&uncore_box_lock);
1470
1471 for_each_possible_cpu(cpu) {
1472 if (*per_cpu_ptr(pmu->box, cpu) == box) {
1473 *per_cpu_ptr(pmu->box, cpu) = NULL;
1474 atomic_dec(&box->refcnt);
1475 }
1476 }
1477
1478 WARN_ON_ONCE(atomic_read(&box->refcnt) != 1);
1479 kfree(box);
1480}
1481
1482static int __devinit uncore_pci_probe(struct pci_dev *pdev,
1483 const struct pci_device_id *id)
1484{
1485 struct intel_uncore_type *type;
1486
1487 type = (struct intel_uncore_type *)id->driver_data;
1488 return uncore_pci_add(type, pdev);
1489}
1490
1491static int __init uncore_pci_init(void)
1492{
1493 int ret;
1494
1495 switch (boot_cpu_data.x86_model) {
1496 case 45: /* Sandy Bridge-EP */
1497 pci_uncores = snbep_pci_uncores;
1498 uncore_pci_driver = &snbep_uncore_pci_driver;
1499 snbep_pci2phy_map_init();
1500 break;
1501 default:
1502 return 0;
1503 }
1504
1505 ret = uncore_types_init(pci_uncores);
1506 if (ret)
1507 return ret;
1508
1509 uncore_pci_driver->probe = uncore_pci_probe;
1510 uncore_pci_driver->remove = uncore_pci_remove;
1511
1512 ret = pci_register_driver(uncore_pci_driver);
1513 if (ret == 0)
1514 pcidrv_registered = true;
1515 else
1516 uncore_types_exit(pci_uncores);
1517
1518 return ret;
1519}
1520
1521static void __init uncore_pci_exit(void)
1522{
1523 if (pcidrv_registered) {
1524 pcidrv_registered = false;
1525 pci_unregister_driver(uncore_pci_driver);
1526 uncore_types_exit(pci_uncores);
1527 }
1528}
1529
1530static void __cpuinit uncore_cpu_dying(int cpu)
1531{
1532 struct intel_uncore_type *type;
1533 struct intel_uncore_pmu *pmu;
1534 struct intel_uncore_box *box;
1535 int i, j;
1536
1537 for (i = 0; msr_uncores[i]; i++) {
1538 type = msr_uncores[i];
1539 for (j = 0; j < type->num_boxes; j++) {
1540 pmu = &type->pmus[j];
1541 box = *per_cpu_ptr(pmu->box, cpu);
1542 *per_cpu_ptr(pmu->box, cpu) = NULL;
1543 if (box && atomic_dec_and_test(&box->refcnt))
1544 kfree(box);
1545 }
1546 }
1547}
1548
1549static int __cpuinit uncore_cpu_starting(int cpu)
1550{
1551 struct intel_uncore_type *type;
1552 struct intel_uncore_pmu *pmu;
1553 struct intel_uncore_box *box, *exist;
1554 int i, j, k, phys_id;
1555
1556 phys_id = topology_physical_package_id(cpu);
1557
1558 for (i = 0; msr_uncores[i]; i++) {
1559 type = msr_uncores[i];
1560 for (j = 0; j < type->num_boxes; j++) {
1561 pmu = &type->pmus[j];
1562 box = *per_cpu_ptr(pmu->box, cpu);
1563 /* called by uncore_cpu_init? */
1564 if (box && box->phys_id >= 0) {
1565 uncore_box_init(box);
1566 continue;
1567 }
1568
1569 for_each_online_cpu(k) {
1570 exist = *per_cpu_ptr(pmu->box, k);
1571 if (exist && exist->phys_id == phys_id) {
1572 atomic_inc(&exist->refcnt);
1573 *per_cpu_ptr(pmu->box, cpu) = exist;
1574 kfree(box);
1575 box = NULL;
1576 break;
1577 }
1578 }
1579
1580 if (box) {
1581 box->phys_id = phys_id;
1582 uncore_box_init(box);
1583 }
1584 }
1585 }
1586 return 0;
1587}
1588
1589static int __cpuinit uncore_cpu_prepare(int cpu, int phys_id)
1590{
1591 struct intel_uncore_type *type;
1592 struct intel_uncore_pmu *pmu;
1593 struct intel_uncore_box *box;
1594 int i, j;
1595
1596 for (i = 0; msr_uncores[i]; i++) {
1597 type = msr_uncores[i];
1598 for (j = 0; j < type->num_boxes; j++) {
1599 pmu = &type->pmus[j];
1600 if (pmu->func_id < 0)
1601 pmu->func_id = j;
1602
1603 box = uncore_alloc_box(type, cpu);
1604 if (!box)
1605 return -ENOMEM;
1606
1607 box->pmu = pmu;
1608 box->phys_id = phys_id;
1609 *per_cpu_ptr(pmu->box, cpu) = box;
1610 }
1611 }
1612 return 0;
1613}
1614
1615static void __cpuinit uncore_change_context(struct intel_uncore_type **uncores,
1616 int old_cpu, int new_cpu)
1617{
1618 struct intel_uncore_type *type;
1619 struct intel_uncore_pmu *pmu;
1620 struct intel_uncore_box *box;
1621 int i, j;
1622
1623 for (i = 0; uncores[i]; i++) {
1624 type = uncores[i];
1625 for (j = 0; j < type->num_boxes; j++) {
1626 pmu = &type->pmus[j];
1627 if (old_cpu < 0)
1628 box = uncore_pmu_to_box(pmu, new_cpu);
1629 else
1630 box = uncore_pmu_to_box(pmu, old_cpu);
1631 if (!box)
1632 continue;
1633
1634 if (old_cpu < 0) {
1635 WARN_ON_ONCE(box->cpu != -1);
1636 box->cpu = new_cpu;
1637 continue;
1638 }
1639
1640 WARN_ON_ONCE(box->cpu != old_cpu);
1641 if (new_cpu >= 0) {
1642 uncore_pmu_cancel_hrtimer(box);
1643 perf_pmu_migrate_context(&pmu->pmu,
1644 old_cpu, new_cpu);
1645 box->cpu = new_cpu;
1646 } else {
1647 box->cpu = -1;
1648 }
1649 }
1650 }
1651}
1652
1653static void __cpuinit uncore_event_exit_cpu(int cpu)
1654{
1655 int i, phys_id, target;
1656
1657 /* if exiting cpu is used for collecting uncore events */
1658 if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
1659 return;
1660
1661 /* find a new cpu to collect uncore events */
1662 phys_id = topology_physical_package_id(cpu);
1663 target = -1;
1664 for_each_online_cpu(i) {
1665 if (i == cpu)
1666 continue;
1667 if (phys_id == topology_physical_package_id(i)) {
1668 target = i;
1669 break;
1670 }
1671 }
1672
1673 /* migrate uncore events to the new cpu */
1674 if (target >= 0)
1675 cpumask_set_cpu(target, &uncore_cpu_mask);
1676
1677 uncore_change_context(msr_uncores, cpu, target);
1678 uncore_change_context(pci_uncores, cpu, target);
1679}
1680
1681static void __cpuinit uncore_event_init_cpu(int cpu)
1682{
1683 int i, phys_id;
1684
1685 phys_id = topology_physical_package_id(cpu);
1686 for_each_cpu(i, &uncore_cpu_mask) {
1687 if (phys_id == topology_physical_package_id(i))
1688 return;
1689 }
1690
1691 cpumask_set_cpu(cpu, &uncore_cpu_mask);
1692
1693 uncore_change_context(msr_uncores, -1, cpu);
1694 uncore_change_context(pci_uncores, -1, cpu);
1695}
1696
1697static int __cpuinit uncore_cpu_notifier(struct notifier_block *self,
1698 unsigned long action, void *hcpu)
1699{
1700 unsigned int cpu = (long)hcpu;
1701
1702 /* allocate/free data structure for uncore box */
1703 switch (action & ~CPU_TASKS_FROZEN) {
1704 case CPU_UP_PREPARE:
1705 uncore_cpu_prepare(cpu, -1);
1706 break;
1707 case CPU_STARTING:
1708 uncore_cpu_starting(cpu);
1709 break;
1710 case CPU_UP_CANCELED:
1711 case CPU_DYING:
1712 uncore_cpu_dying(cpu);
1713 break;
1714 default:
1715 break;
1716 }
1717
1718 /* select the cpu that collects uncore events */
1719 switch (action & ~CPU_TASKS_FROZEN) {
1720 case CPU_DOWN_FAILED:
1721 case CPU_STARTING:
1722 uncore_event_init_cpu(cpu);
1723 break;
1724 case CPU_DOWN_PREPARE:
1725 uncore_event_exit_cpu(cpu);
1726 break;
1727 default:
1728 break;
1729 }
1730
1731 return NOTIFY_OK;
1732}
1733
1734static struct notifier_block uncore_cpu_nb __cpuinitdata = {
1735 .notifier_call = uncore_cpu_notifier,
1736 /*
1737 * to migrate uncore events, our notifier should be executed
1738 * before perf core's notifier.
1739 */
1740 .priority = CPU_PRI_PERF + 1,
1741};
1742
1743static void __init uncore_cpu_setup(void *dummy)
1744{
1745 uncore_cpu_starting(smp_processor_id());
1746}
1747
1748static int __init uncore_cpu_init(void)
1749{
1750 int ret, cpu, max_cores;
1751
1752 max_cores = boot_cpu_data.x86_max_cores;
1753 switch (boot_cpu_data.x86_model) {
1754 case 26: /* Nehalem */
1755 case 30:
1756 case 37: /* Westmere */
1757 case 44:
1758 msr_uncores = nhm_msr_uncores;
1759 break;
1760 case 42: /* Sandy Bridge */
1761 if (snb_uncore_cbox.num_boxes > max_cores)
1762 snb_uncore_cbox.num_boxes = max_cores;
1763 msr_uncores = snb_msr_uncores;
1764 break;
1765 case 45: /* Sandy Birdge-EP */
1766 if (snbep_uncore_cbox.num_boxes > max_cores)
1767 snbep_uncore_cbox.num_boxes = max_cores;
1768 msr_uncores = snbep_msr_uncores;
1769 break;
1770 default:
1771 return 0;
1772 }
1773
1774 ret = uncore_types_init(msr_uncores);
1775 if (ret)
1776 return ret;
1777
1778 get_online_cpus();
1779
1780 for_each_online_cpu(cpu) {
1781 int i, phys_id = topology_physical_package_id(cpu);
1782
1783 for_each_cpu(i, &uncore_cpu_mask) {
1784 if (phys_id == topology_physical_package_id(i)) {
1785 phys_id = -1;
1786 break;
1787 }
1788 }
1789 if (phys_id < 0)
1790 continue;
1791
1792 uncore_cpu_prepare(cpu, phys_id);
1793 uncore_event_init_cpu(cpu);
1794 }
1795 on_each_cpu(uncore_cpu_setup, NULL, 1);
1796
1797 register_cpu_notifier(&uncore_cpu_nb);
1798
1799 put_online_cpus();
1800
1801 return 0;
1802}
1803
1804static int __init uncore_pmus_register(void)
1805{
1806 struct intel_uncore_pmu *pmu;
1807 struct intel_uncore_type *type;
1808 int i, j;
1809
1810 for (i = 0; msr_uncores[i]; i++) {
1811 type = msr_uncores[i];
1812 for (j = 0; j < type->num_boxes; j++) {
1813 pmu = &type->pmus[j];
1814 uncore_pmu_register(pmu);
1815 }
1816 }
1817
1818 for (i = 0; pci_uncores[i]; i++) {
1819 type = pci_uncores[i];
1820 for (j = 0; j < type->num_boxes; j++) {
1821 pmu = &type->pmus[j];
1822 uncore_pmu_register(pmu);
1823 }
1824 }
1825
1826 return 0;
1827}
1828
1829static int __init intel_uncore_init(void)
1830{
1831 int ret;
1832
1833 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
1834 return -ENODEV;
1835
1836 ret = uncore_pci_init();
1837 if (ret)
1838 goto fail;
1839 ret = uncore_cpu_init();
1840 if (ret) {
1841 uncore_pci_exit();
1842 goto fail;
1843 }
1844
1845 uncore_pmus_register();
1846 return 0;
1847fail:
1848 return ret;
1849}
1850device_initcall(intel_uncore_init);
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
new file mode 100644
index 000000000000..b13e9ea81def
--- /dev/null
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
@@ -0,0 +1,424 @@
1#include <linux/module.h>
2#include <linux/slab.h>
3#include <linux/pci.h>
4#include <linux/perf_event.h>
5#include "perf_event.h"
6
7#define UNCORE_PMU_NAME_LEN 32
8#define UNCORE_BOX_HASH_SIZE 8
9
10#define UNCORE_PMU_HRTIMER_INTERVAL (60 * NSEC_PER_SEC)
11
12#define UNCORE_FIXED_EVENT 0xff
13#define UNCORE_PMC_IDX_MAX_GENERIC 8
14#define UNCORE_PMC_IDX_FIXED UNCORE_PMC_IDX_MAX_GENERIC
15#define UNCORE_PMC_IDX_MAX (UNCORE_PMC_IDX_FIXED + 1)
16
17#define UNCORE_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, 0xff)
18
19/* SNB event control */
20#define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff
21#define SNB_UNC_CTL_UMASK_MASK 0x0000ff00
22#define SNB_UNC_CTL_EDGE_DET (1 << 18)
23#define SNB_UNC_CTL_EN (1 << 22)
24#define SNB_UNC_CTL_INVERT (1 << 23)
25#define SNB_UNC_CTL_CMASK_MASK 0x1f000000
26#define NHM_UNC_CTL_CMASK_MASK 0xff000000
27#define NHM_UNC_FIXED_CTR_CTL_EN (1 << 0)
28
29#define SNB_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \
30 SNB_UNC_CTL_UMASK_MASK | \
31 SNB_UNC_CTL_EDGE_DET | \
32 SNB_UNC_CTL_INVERT | \
33 SNB_UNC_CTL_CMASK_MASK)
34
35#define NHM_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \
36 SNB_UNC_CTL_UMASK_MASK | \
37 SNB_UNC_CTL_EDGE_DET | \
38 SNB_UNC_CTL_INVERT | \
39 NHM_UNC_CTL_CMASK_MASK)
40
41/* SNB global control register */
42#define SNB_UNC_PERF_GLOBAL_CTL 0x391
43#define SNB_UNC_FIXED_CTR_CTRL 0x394
44#define SNB_UNC_FIXED_CTR 0x395
45
46/* SNB uncore global control */
47#define SNB_UNC_GLOBAL_CTL_CORE_ALL ((1 << 4) - 1)
48#define SNB_UNC_GLOBAL_CTL_EN (1 << 29)
49
50/* SNB Cbo register */
51#define SNB_UNC_CBO_0_PERFEVTSEL0 0x700
52#define SNB_UNC_CBO_0_PER_CTR0 0x706
53#define SNB_UNC_CBO_MSR_OFFSET 0x10
54
55/* NHM global control register */
56#define NHM_UNC_PERF_GLOBAL_CTL 0x391
57#define NHM_UNC_FIXED_CTR 0x394
58#define NHM_UNC_FIXED_CTR_CTRL 0x395
59
60/* NHM uncore global control */
61#define NHM_UNC_GLOBAL_CTL_EN_PC_ALL ((1ULL << 8) - 1)
62#define NHM_UNC_GLOBAL_CTL_EN_FC (1ULL << 32)
63
64/* NHM uncore register */
65#define NHM_UNC_PERFEVTSEL0 0x3c0
66#define NHM_UNC_UNCORE_PMC0 0x3b0
67
68/* SNB-EP Box level control */
69#define SNBEP_PMON_BOX_CTL_RST_CTRL (1 << 0)
70#define SNBEP_PMON_BOX_CTL_RST_CTRS (1 << 1)
71#define SNBEP_PMON_BOX_CTL_FRZ (1 << 8)
72#define SNBEP_PMON_BOX_CTL_FRZ_EN (1 << 16)
73#define SNBEP_PMON_BOX_CTL_INT (SNBEP_PMON_BOX_CTL_RST_CTRL | \
74 SNBEP_PMON_BOX_CTL_RST_CTRS | \
75 SNBEP_PMON_BOX_CTL_FRZ_EN)
76/* SNB-EP event control */
77#define SNBEP_PMON_CTL_EV_SEL_MASK 0x000000ff
78#define SNBEP_PMON_CTL_UMASK_MASK 0x0000ff00
79#define SNBEP_PMON_CTL_RST (1 << 17)
80#define SNBEP_PMON_CTL_EDGE_DET (1 << 18)
81#define SNBEP_PMON_CTL_EV_SEL_EXT (1 << 21) /* only for QPI */
82#define SNBEP_PMON_CTL_EN (1 << 22)
83#define SNBEP_PMON_CTL_INVERT (1 << 23)
84#define SNBEP_PMON_CTL_TRESH_MASK 0xff000000
85#define SNBEP_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \
86 SNBEP_PMON_CTL_UMASK_MASK | \
87 SNBEP_PMON_CTL_EDGE_DET | \
88 SNBEP_PMON_CTL_INVERT | \
89 SNBEP_PMON_CTL_TRESH_MASK)
90
91/* SNB-EP Ubox event control */
92#define SNBEP_U_MSR_PMON_CTL_TRESH_MASK 0x1f000000
93#define SNBEP_U_MSR_PMON_RAW_EVENT_MASK \
94 (SNBEP_PMON_CTL_EV_SEL_MASK | \
95 SNBEP_PMON_CTL_UMASK_MASK | \
96 SNBEP_PMON_CTL_EDGE_DET | \
97 SNBEP_PMON_CTL_INVERT | \
98 SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
99
100#define SNBEP_CBO_PMON_CTL_TID_EN (1 << 19)
101#define SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \
102 SNBEP_CBO_PMON_CTL_TID_EN)
103
104/* SNB-EP PCU event control */
105#define SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK 0x0000c000
106#define SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK 0x1f000000
107#define SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT (1 << 30)
108#define SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET (1 << 31)
109#define SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK \
110 (SNBEP_PMON_CTL_EV_SEL_MASK | \
111 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
112 SNBEP_PMON_CTL_EDGE_DET | \
113 SNBEP_PMON_CTL_INVERT | \
114 SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
115 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
116 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
117
118/* SNB-EP pci control register */
119#define SNBEP_PCI_PMON_BOX_CTL 0xf4
120#define SNBEP_PCI_PMON_CTL0 0xd8
121/* SNB-EP pci counter register */
122#define SNBEP_PCI_PMON_CTR0 0xa0
123
124/* SNB-EP home agent register */
125#define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH0 0x40
126#define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH1 0x44
127#define SNBEP_HA_PCI_PMON_BOX_OPCODEMATCH 0x48
128/* SNB-EP memory controller register */
129#define SNBEP_MC_CHy_PCI_PMON_FIXED_CTL 0xf0
130#define SNBEP_MC_CHy_PCI_PMON_FIXED_CTR 0xd0
131/* SNB-EP QPI register */
132#define SNBEP_Q_Py_PCI_PMON_PKT_MATCH0 0x228
133#define SNBEP_Q_Py_PCI_PMON_PKT_MATCH1 0x22c
134#define SNBEP_Q_Py_PCI_PMON_PKT_MASK0 0x238
135#define SNBEP_Q_Py_PCI_PMON_PKT_MASK1 0x23c
136
137/* SNB-EP Ubox register */
138#define SNBEP_U_MSR_PMON_CTR0 0xc16
139#define SNBEP_U_MSR_PMON_CTL0 0xc10
140
141#define SNBEP_U_MSR_PMON_UCLK_FIXED_CTL 0xc08
142#define SNBEP_U_MSR_PMON_UCLK_FIXED_CTR 0xc09
143
144/* SNB-EP Cbo register */
145#define SNBEP_C0_MSR_PMON_CTR0 0xd16
146#define SNBEP_C0_MSR_PMON_CTL0 0xd10
147#define SNBEP_C0_MSR_PMON_BOX_CTL 0xd04
148#define SNBEP_C0_MSR_PMON_BOX_FILTER 0xd14
149#define SNBEP_CB0_MSR_PMON_BOX_FILTER_MASK 0xfffffc1f
150#define SNBEP_CBO_MSR_OFFSET 0x20
151
152/* SNB-EP PCU register */
153#define SNBEP_PCU_MSR_PMON_CTR0 0xc36
154#define SNBEP_PCU_MSR_PMON_CTL0 0xc30
155#define SNBEP_PCU_MSR_PMON_BOX_CTL 0xc24
156#define SNBEP_PCU_MSR_PMON_BOX_FILTER 0xc34
157#define SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK 0xffffffff
158#define SNBEP_PCU_MSR_CORE_C3_CTR 0x3fc
159#define SNBEP_PCU_MSR_CORE_C6_CTR 0x3fd
160
161struct intel_uncore_ops;
162struct intel_uncore_pmu;
163struct intel_uncore_box;
164struct uncore_event_desc;
165
166struct intel_uncore_type {
167 const char *name;
168 int num_counters;
169 int num_boxes;
170 int perf_ctr_bits;
171 int fixed_ctr_bits;
172 unsigned perf_ctr;
173 unsigned event_ctl;
174 unsigned event_mask;
175 unsigned fixed_ctr;
176 unsigned fixed_ctl;
177 unsigned box_ctl;
178 unsigned msr_offset;
179 unsigned num_shared_regs:8;
180 unsigned single_fixed:1;
181 struct event_constraint unconstrainted;
182 struct event_constraint *constraints;
183 struct intel_uncore_pmu *pmus;
184 struct intel_uncore_ops *ops;
185 struct uncore_event_desc *event_descs;
186 const struct attribute_group *attr_groups[3];
187};
188
189#define format_group attr_groups[0]
190
191struct intel_uncore_ops {
192 void (*init_box)(struct intel_uncore_box *);
193 void (*disable_box)(struct intel_uncore_box *);
194 void (*enable_box)(struct intel_uncore_box *);
195 void (*disable_event)(struct intel_uncore_box *, struct perf_event *);
196 void (*enable_event)(struct intel_uncore_box *, struct perf_event *);
197 u64 (*read_counter)(struct intel_uncore_box *, struct perf_event *);
198 int (*hw_config)(struct intel_uncore_box *, struct perf_event *);
199 struct event_constraint *(*get_constraint)(struct intel_uncore_box *,
200 struct perf_event *);
201 void (*put_constraint)(struct intel_uncore_box *, struct perf_event *);
202};
203
204struct intel_uncore_pmu {
205 struct pmu pmu;
206 char name[UNCORE_PMU_NAME_LEN];
207 int pmu_idx;
208 int func_id;
209 struct intel_uncore_type *type;
210 struct intel_uncore_box ** __percpu box;
211 struct list_head box_list;
212};
213
214struct intel_uncore_extra_reg {
215 raw_spinlock_t lock;
216 u64 config1;
217 atomic_t ref;
218};
219
220struct intel_uncore_box {
221 int phys_id;
222 int n_active; /* number of active events */
223 int n_events;
224 int cpu; /* cpu to collect events */
225 unsigned long flags;
226 atomic_t refcnt;
227 struct perf_event *events[UNCORE_PMC_IDX_MAX];
228 struct perf_event *event_list[UNCORE_PMC_IDX_MAX];
229 unsigned long active_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)];
230 u64 tags[UNCORE_PMC_IDX_MAX];
231 struct pci_dev *pci_dev;
232 struct intel_uncore_pmu *pmu;
233 struct hrtimer hrtimer;
234 struct list_head list;
235 struct intel_uncore_extra_reg shared_regs[0];
236};
237
238#define UNCORE_BOX_FLAG_INITIATED 0
239
240struct uncore_event_desc {
241 struct kobj_attribute attr;
242 const char *config;
243};
244
245#define INTEL_UNCORE_EVENT_DESC(_name, _config) \
246{ \
247 .attr = __ATTR(_name, 0444, uncore_event_show, NULL), \
248 .config = _config, \
249}
250
251#define DEFINE_UNCORE_FORMAT_ATTR(_var, _name, _format) \
252static ssize_t __uncore_##_var##_show(struct kobject *kobj, \
253 struct kobj_attribute *attr, \
254 char *page) \
255{ \
256 BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
257 return sprintf(page, _format "\n"); \
258} \
259static struct kobj_attribute format_attr_##_var = \
260 __ATTR(_name, 0444, __uncore_##_var##_show, NULL)
261
262
263static ssize_t uncore_event_show(struct kobject *kobj,
264 struct kobj_attribute *attr, char *buf)
265{
266 struct uncore_event_desc *event =
267 container_of(attr, struct uncore_event_desc, attr);
268 return sprintf(buf, "%s", event->config);
269}
270
271static inline unsigned uncore_pci_box_ctl(struct intel_uncore_box *box)
272{
273 return box->pmu->type->box_ctl;
274}
275
276static inline unsigned uncore_pci_fixed_ctl(struct intel_uncore_box *box)
277{
278 return box->pmu->type->fixed_ctl;
279}
280
281static inline unsigned uncore_pci_fixed_ctr(struct intel_uncore_box *box)
282{
283 return box->pmu->type->fixed_ctr;
284}
285
286static inline
287unsigned uncore_pci_event_ctl(struct intel_uncore_box *box, int idx)
288{
289 return idx * 4 + box->pmu->type->event_ctl;
290}
291
292static inline
293unsigned uncore_pci_perf_ctr(struct intel_uncore_box *box, int idx)
294{
295 return idx * 8 + box->pmu->type->perf_ctr;
296}
297
298static inline
299unsigned uncore_msr_box_ctl(struct intel_uncore_box *box)
300{
301 if (!box->pmu->type->box_ctl)
302 return 0;
303 return box->pmu->type->box_ctl +
304 box->pmu->type->msr_offset * box->pmu->pmu_idx;
305}
306
307static inline
308unsigned uncore_msr_fixed_ctl(struct intel_uncore_box *box)
309{
310 if (!box->pmu->type->fixed_ctl)
311 return 0;
312 return box->pmu->type->fixed_ctl +
313 box->pmu->type->msr_offset * box->pmu->pmu_idx;
314}
315
316static inline
317unsigned uncore_msr_fixed_ctr(struct intel_uncore_box *box)
318{
319 return box->pmu->type->fixed_ctr +
320 box->pmu->type->msr_offset * box->pmu->pmu_idx;
321}
322
323static inline
324unsigned uncore_msr_event_ctl(struct intel_uncore_box *box, int idx)
325{
326 return idx + box->pmu->type->event_ctl +
327 box->pmu->type->msr_offset * box->pmu->pmu_idx;
328}
329
330static inline
331unsigned uncore_msr_perf_ctr(struct intel_uncore_box *box, int idx)
332{
333 return idx + box->pmu->type->perf_ctr +
334 box->pmu->type->msr_offset * box->pmu->pmu_idx;
335}
336
337static inline
338unsigned uncore_fixed_ctl(struct intel_uncore_box *box)
339{
340 if (box->pci_dev)
341 return uncore_pci_fixed_ctl(box);
342 else
343 return uncore_msr_fixed_ctl(box);
344}
345
346static inline
347unsigned uncore_fixed_ctr(struct intel_uncore_box *box)
348{
349 if (box->pci_dev)
350 return uncore_pci_fixed_ctr(box);
351 else
352 return uncore_msr_fixed_ctr(box);
353}
354
355static inline
356unsigned uncore_event_ctl(struct intel_uncore_box *box, int idx)
357{
358 if (box->pci_dev)
359 return uncore_pci_event_ctl(box, idx);
360 else
361 return uncore_msr_event_ctl(box, idx);
362}
363
364static inline
365unsigned uncore_perf_ctr(struct intel_uncore_box *box, int idx)
366{
367 if (box->pci_dev)
368 return uncore_pci_perf_ctr(box, idx);
369 else
370 return uncore_msr_perf_ctr(box, idx);
371}
372
373static inline int uncore_perf_ctr_bits(struct intel_uncore_box *box)
374{
375 return box->pmu->type->perf_ctr_bits;
376}
377
378static inline int uncore_fixed_ctr_bits(struct intel_uncore_box *box)
379{
380 return box->pmu->type->fixed_ctr_bits;
381}
382
383static inline int uncore_num_counters(struct intel_uncore_box *box)
384{
385 return box->pmu->type->num_counters;
386}
387
388static inline void uncore_disable_box(struct intel_uncore_box *box)
389{
390 if (box->pmu->type->ops->disable_box)
391 box->pmu->type->ops->disable_box(box);
392}
393
394static inline void uncore_enable_box(struct intel_uncore_box *box)
395{
396 if (box->pmu->type->ops->enable_box)
397 box->pmu->type->ops->enable_box(box);
398}
399
400static inline void uncore_disable_event(struct intel_uncore_box *box,
401 struct perf_event *event)
402{
403 box->pmu->type->ops->disable_event(box, event);
404}
405
406static inline void uncore_enable_event(struct intel_uncore_box *box,
407 struct perf_event *event)
408{
409 box->pmu->type->ops->enable_event(box, event);
410}
411
412static inline u64 uncore_read_counter(struct intel_uncore_box *box,
413 struct perf_event *event)
414{
415 return box->pmu->type->ops->read_counter(box, event);
416}
417
418static inline void uncore_box_init(struct intel_uncore_box *box)
419{
420 if (!test_and_set_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) {
421 if (box->pmu->type->ops->init_box)
422 box->pmu->type->ops->init_box(box);
423 }
424}
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c
index 47124a73dd73..92c7e39a079f 100644
--- a/arch/x86/kernel/cpu/perf_event_p4.c
+++ b/arch/x86/kernel/cpu/perf_event_p4.c
@@ -895,8 +895,8 @@ static void p4_pmu_disable_pebs(void)
895 * So at moment let leave metrics turned on forever -- it's 895 * So at moment let leave metrics turned on forever -- it's
896 * ok for now but need to be revisited! 896 * ok for now but need to be revisited!
897 * 897 *
898 * (void)checking_wrmsrl(MSR_IA32_PEBS_ENABLE, (u64)0); 898 * (void)wrmsrl_safe(MSR_IA32_PEBS_ENABLE, (u64)0);
899 * (void)checking_wrmsrl(MSR_P4_PEBS_MATRIX_VERT, (u64)0); 899 * (void)wrmsrl_safe(MSR_P4_PEBS_MATRIX_VERT, (u64)0);
900 */ 900 */
901} 901}
902 902
@@ -909,7 +909,7 @@ static inline void p4_pmu_disable_event(struct perf_event *event)
909 * state we need to clear P4_CCCR_OVF, otherwise interrupt get 909 * state we need to clear P4_CCCR_OVF, otherwise interrupt get
910 * asserted again and again 910 * asserted again and again
911 */ 911 */
912 (void)checking_wrmsrl(hwc->config_base, 912 (void)wrmsrl_safe(hwc->config_base,
913 (u64)(p4_config_unpack_cccr(hwc->config)) & 913 (u64)(p4_config_unpack_cccr(hwc->config)) &
914 ~P4_CCCR_ENABLE & ~P4_CCCR_OVF & ~P4_CCCR_RESERVED); 914 ~P4_CCCR_ENABLE & ~P4_CCCR_OVF & ~P4_CCCR_RESERVED);
915} 915}
@@ -943,8 +943,8 @@ static void p4_pmu_enable_pebs(u64 config)
943 943
944 bind = &p4_pebs_bind_map[idx]; 944 bind = &p4_pebs_bind_map[idx];
945 945
946 (void)checking_wrmsrl(MSR_IA32_PEBS_ENABLE, (u64)bind->metric_pebs); 946 (void)wrmsrl_safe(MSR_IA32_PEBS_ENABLE, (u64)bind->metric_pebs);
947 (void)checking_wrmsrl(MSR_P4_PEBS_MATRIX_VERT, (u64)bind->metric_vert); 947 (void)wrmsrl_safe(MSR_P4_PEBS_MATRIX_VERT, (u64)bind->metric_vert);
948} 948}
949 949
950static void p4_pmu_enable_event(struct perf_event *event) 950static void p4_pmu_enable_event(struct perf_event *event)
@@ -978,8 +978,8 @@ static void p4_pmu_enable_event(struct perf_event *event)
978 */ 978 */
979 p4_pmu_enable_pebs(hwc->config); 979 p4_pmu_enable_pebs(hwc->config);
980 980
981 (void)checking_wrmsrl(escr_addr, escr_conf); 981 (void)wrmsrl_safe(escr_addr, escr_conf);
982 (void)checking_wrmsrl(hwc->config_base, 982 (void)wrmsrl_safe(hwc->config_base,
983 (cccr & ~P4_CCCR_RESERVED) | P4_CCCR_ENABLE); 983 (cccr & ~P4_CCCR_RESERVED) | P4_CCCR_ENABLE);
984} 984}
985 985
@@ -1325,7 +1325,7 @@ __init int p4_pmu_init(void)
1325 unsigned int low, high; 1325 unsigned int low, high;
1326 1326
1327 /* If we get stripped -- indexing fails */ 1327 /* If we get stripped -- indexing fails */
1328 BUILD_BUG_ON(ARCH_P4_MAX_CCCR > X86_PMC_MAX_GENERIC); 1328 BUILD_BUG_ON(ARCH_P4_MAX_CCCR > INTEL_PMC_MAX_GENERIC);
1329 1329
1330 rdmsr(MSR_IA32_MISC_ENABLE, low, high); 1330 rdmsr(MSR_IA32_MISC_ENABLE, low, high);
1331 if (!(low & (1 << 7))) { 1331 if (!(low & (1 << 7))) {
diff --git a/arch/x86/kernel/cpu/perf_event_p6.c b/arch/x86/kernel/cpu/perf_event_p6.c
index 32bcfc7dd230..e4dd0f7a0453 100644
--- a/arch/x86/kernel/cpu/perf_event_p6.c
+++ b/arch/x86/kernel/cpu/perf_event_p6.c
@@ -71,7 +71,7 @@ p6_pmu_disable_event(struct perf_event *event)
71 if (cpuc->enabled) 71 if (cpuc->enabled)
72 val |= ARCH_PERFMON_EVENTSEL_ENABLE; 72 val |= ARCH_PERFMON_EVENTSEL_ENABLE;
73 73
74 (void)checking_wrmsrl(hwc->config_base, val); 74 (void)wrmsrl_safe(hwc->config_base, val);
75} 75}
76 76
77static void p6_pmu_enable_event(struct perf_event *event) 77static void p6_pmu_enable_event(struct perf_event *event)
@@ -84,7 +84,7 @@ static void p6_pmu_enable_event(struct perf_event *event)
84 if (cpuc->enabled) 84 if (cpuc->enabled)
85 val |= ARCH_PERFMON_EVENTSEL_ENABLE; 85 val |= ARCH_PERFMON_EVENTSEL_ENABLE;
86 86
87 (void)checking_wrmsrl(hwc->config_base, val); 87 (void)wrmsrl_safe(hwc->config_base, val);
88} 88}
89 89
90PMU_FORMAT_ATTR(event, "config:0-7" ); 90PMU_FORMAT_ATTR(event, "config:0-7" );
diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
index fbdfc6917180..1649cf899ad6 100644
--- a/arch/x86/kernel/microcode_core.c
+++ b/arch/x86/kernel/microcode_core.c
@@ -87,6 +87,7 @@
87#include <asm/microcode.h> 87#include <asm/microcode.h>
88#include <asm/processor.h> 88#include <asm/processor.h>
89#include <asm/cpu_device_id.h> 89#include <asm/cpu_device_id.h>
90#include <asm/perf_event.h>
90 91
91MODULE_DESCRIPTION("Microcode Update Driver"); 92MODULE_DESCRIPTION("Microcode Update Driver");
92MODULE_AUTHOR("Tigran Aivazian <tigran@aivazian.fsnet.co.uk>"); 93MODULE_AUTHOR("Tigran Aivazian <tigran@aivazian.fsnet.co.uk>");
@@ -277,7 +278,6 @@ static int reload_for_cpu(int cpu)
277 struct ucode_cpu_info *uci = ucode_cpu_info + cpu; 278 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
278 int err = 0; 279 int err = 0;
279 280
280 mutex_lock(&microcode_mutex);
281 if (uci->valid) { 281 if (uci->valid) {
282 enum ucode_state ustate; 282 enum ucode_state ustate;
283 283
@@ -288,7 +288,6 @@ static int reload_for_cpu(int cpu)
288 if (ustate == UCODE_ERROR) 288 if (ustate == UCODE_ERROR)
289 err = -EINVAL; 289 err = -EINVAL;
290 } 290 }
291 mutex_unlock(&microcode_mutex);
292 291
293 return err; 292 return err;
294} 293}
@@ -298,19 +297,31 @@ static ssize_t reload_store(struct device *dev,
298 const char *buf, size_t size) 297 const char *buf, size_t size)
299{ 298{
300 unsigned long val; 299 unsigned long val;
301 int cpu = dev->id; 300 int cpu;
302 ssize_t ret = 0; 301 ssize_t ret = 0, tmp_ret;
303 302
304 ret = kstrtoul(buf, 0, &val); 303 ret = kstrtoul(buf, 0, &val);
305 if (ret) 304 if (ret)
306 return ret; 305 return ret;
307 306
308 if (val == 1) { 307 if (val != 1)
309 get_online_cpus(); 308 return size;
310 if (cpu_online(cpu)) 309
311 ret = reload_for_cpu(cpu); 310 get_online_cpus();
312 put_online_cpus(); 311 mutex_lock(&microcode_mutex);
312 for_each_online_cpu(cpu) {
313 tmp_ret = reload_for_cpu(cpu);
314 if (tmp_ret != 0)
315 pr_warn("Error reloading microcode on CPU %d\n", cpu);
316
317 /* save retval of the first encountered reload error */
318 if (!ret)
319 ret = tmp_ret;
313 } 320 }
321 if (!ret)
322 perf_check_microcode();
323 mutex_unlock(&microcode_mutex);
324 put_online_cpus();
314 325
315 if (!ret) 326 if (!ret)
316 ret = size; 327 ret = size;
@@ -339,7 +350,6 @@ static DEVICE_ATTR(version, 0400, version_show, NULL);
339static DEVICE_ATTR(processor_flags, 0400, pf_show, NULL); 350static DEVICE_ATTR(processor_flags, 0400, pf_show, NULL);
340 351
341static struct attribute *mc_default_attrs[] = { 352static struct attribute *mc_default_attrs[] = {
342 &dev_attr_reload.attr,
343 &dev_attr_version.attr, 353 &dev_attr_version.attr,
344 &dev_attr_processor_flags.attr, 354 &dev_attr_processor_flags.attr,
345 NULL 355 NULL
@@ -516,6 +526,16 @@ static const struct x86_cpu_id microcode_id[] = {
516MODULE_DEVICE_TABLE(x86cpu, microcode_id); 526MODULE_DEVICE_TABLE(x86cpu, microcode_id);
517#endif 527#endif
518 528
529static struct attribute *cpu_root_microcode_attrs[] = {
530 &dev_attr_reload.attr,
531 NULL
532};
533
534static struct attribute_group cpu_root_microcode_group = {
535 .name = "microcode",
536 .attrs = cpu_root_microcode_attrs,
537};
538
519static int __init microcode_init(void) 539static int __init microcode_init(void)
520{ 540{
521 struct cpuinfo_x86 *c = &cpu_data(0); 541 struct cpuinfo_x86 *c = &cpu_data(0);
@@ -540,16 +560,25 @@ static int __init microcode_init(void)
540 mutex_lock(&microcode_mutex); 560 mutex_lock(&microcode_mutex);
541 561
542 error = subsys_interface_register(&mc_cpu_interface); 562 error = subsys_interface_register(&mc_cpu_interface);
543 563 if (!error)
564 perf_check_microcode();
544 mutex_unlock(&microcode_mutex); 565 mutex_unlock(&microcode_mutex);
545 put_online_cpus(); 566 put_online_cpus();
546 567
547 if (error) 568 if (error)
548 goto out_pdev; 569 goto out_pdev;
549 570
571 error = sysfs_create_group(&cpu_subsys.dev_root->kobj,
572 &cpu_root_microcode_group);
573
574 if (error) {
575 pr_err("Error creating microcode group!\n");
576 goto out_driver;
577 }
578
550 error = microcode_dev_init(); 579 error = microcode_dev_init();
551 if (error) 580 if (error)
552 goto out_driver; 581 goto out_ucode_group;
553 582
554 register_syscore_ops(&mc_syscore_ops); 583 register_syscore_ops(&mc_syscore_ops);
555 register_hotcpu_notifier(&mc_cpu_notifier); 584 register_hotcpu_notifier(&mc_cpu_notifier);
@@ -559,7 +588,11 @@ static int __init microcode_init(void)
559 588
560 return 0; 589 return 0;
561 590
562out_driver: 591 out_ucode_group:
592 sysfs_remove_group(&cpu_subsys.dev_root->kobj,
593 &cpu_root_microcode_group);
594
595 out_driver:
563 get_online_cpus(); 596 get_online_cpus();
564 mutex_lock(&microcode_mutex); 597 mutex_lock(&microcode_mutex);
565 598
@@ -568,7 +601,7 @@ out_driver:
568 mutex_unlock(&microcode_mutex); 601 mutex_unlock(&microcode_mutex);
569 put_online_cpus(); 602 put_online_cpus();
570 603
571out_pdev: 604 out_pdev:
572 platform_device_unregister(microcode_pdev); 605 platform_device_unregister(microcode_pdev);
573 return error; 606 return error;
574 607
@@ -584,6 +617,9 @@ static void __exit microcode_exit(void)
584 unregister_hotcpu_notifier(&mc_cpu_notifier); 617 unregister_hotcpu_notifier(&mc_cpu_notifier);
585 unregister_syscore_ops(&mc_syscore_ops); 618 unregister_syscore_ops(&mc_syscore_ops);
586 619
620 sysfs_remove_group(&cpu_subsys.dev_root->kobj,
621 &cpu_root_microcode_group);
622
587 get_online_cpus(); 623 get_online_cpus();
588 mutex_lock(&microcode_mutex); 624 mutex_lock(&microcode_mutex);
589 625
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 9ce885996fd7..17fff18a1031 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -352,9 +352,7 @@ struct pv_cpu_ops pv_cpu_ops = {
352#endif 352#endif
353 .wbinvd = native_wbinvd, 353 .wbinvd = native_wbinvd,
354 .read_msr = native_read_msr_safe, 354 .read_msr = native_read_msr_safe,
355 .rdmsr_regs = native_rdmsr_safe_regs,
356 .write_msr = native_write_msr_safe, 355 .write_msr = native_write_msr_safe,
357 .wrmsr_regs = native_wrmsr_safe_regs,
358 .read_tsc = native_read_tsc, 356 .read_tsc = native_read_tsc,
359 .read_pmc = native_read_pmc, 357 .read_pmc = native_read_pmc,
360 .read_tscp = native_read_tscp, 358 .read_tscp = native_read_tscp,
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 61cdf7fdf099..3e215ba68766 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -466,7 +466,7 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
466 task->thread.gs = addr; 466 task->thread.gs = addr;
467 if (doit) { 467 if (doit) {
468 load_gs_index(0); 468 load_gs_index(0);
469 ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr); 469 ret = wrmsrl_safe(MSR_KERNEL_GS_BASE, addr);
470 } 470 }
471 } 471 }
472 put_cpu(); 472 put_cpu();
@@ -494,7 +494,7 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
494 /* set the selector to 0 to not confuse 494 /* set the selector to 0 to not confuse
495 __switch_to */ 495 __switch_to */
496 loadsegment(fs, 0); 496 loadsegment(fs, 0);
497 ret = checking_wrmsrl(MSR_FS_BASE, addr); 497 ret = wrmsrl_safe(MSR_FS_BASE, addr);
498 } 498 }
499 } 499 }
500 put_cpu(); 500 put_cpu();
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
index dc4e910a7d96..36fd42091fa7 100644
--- a/arch/x86/kernel/uprobes.c
+++ b/arch/x86/kernel/uprobes.c
@@ -409,9 +409,10 @@ static int validate_insn_bits(struct arch_uprobe *auprobe, struct mm_struct *mm,
409 * arch_uprobe_analyze_insn - instruction analysis including validity and fixups. 409 * arch_uprobe_analyze_insn - instruction analysis including validity and fixups.
410 * @mm: the probed address space. 410 * @mm: the probed address space.
411 * @arch_uprobe: the probepoint information. 411 * @arch_uprobe: the probepoint information.
412 * @addr: virtual address at which to install the probepoint
412 * Return 0 on success or a -ve number on error. 413 * Return 0 on success or a -ve number on error.
413 */ 414 */
414int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm) 415int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long addr)
415{ 416{
416 int ret; 417 int ret;
417 struct insn insn; 418 struct insn insn;
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index 2e88438ffd83..9b7ec1150ab0 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -80,10 +80,10 @@ static inline struct kvm_pmc *get_fixed_pmc_idx(struct kvm_pmu *pmu, int idx)
80 80
81static struct kvm_pmc *global_idx_to_pmc(struct kvm_pmu *pmu, int idx) 81static struct kvm_pmc *global_idx_to_pmc(struct kvm_pmu *pmu, int idx)
82{ 82{
83 if (idx < X86_PMC_IDX_FIXED) 83 if (idx < INTEL_PMC_IDX_FIXED)
84 return get_gp_pmc(pmu, MSR_P6_EVNTSEL0 + idx, MSR_P6_EVNTSEL0); 84 return get_gp_pmc(pmu, MSR_P6_EVNTSEL0 + idx, MSR_P6_EVNTSEL0);
85 else 85 else
86 return get_fixed_pmc_idx(pmu, idx - X86_PMC_IDX_FIXED); 86 return get_fixed_pmc_idx(pmu, idx - INTEL_PMC_IDX_FIXED);
87} 87}
88 88
89void kvm_deliver_pmi(struct kvm_vcpu *vcpu) 89void kvm_deliver_pmi(struct kvm_vcpu *vcpu)
@@ -291,7 +291,7 @@ static void reprogram_idx(struct kvm_pmu *pmu, int idx)
291 if (pmc_is_gp(pmc)) 291 if (pmc_is_gp(pmc))
292 reprogram_gp_counter(pmc, pmc->eventsel); 292 reprogram_gp_counter(pmc, pmc->eventsel);
293 else { 293 else {
294 int fidx = idx - X86_PMC_IDX_FIXED; 294 int fidx = idx - INTEL_PMC_IDX_FIXED;
295 reprogram_fixed_counter(pmc, 295 reprogram_fixed_counter(pmc,
296 fixed_en_pmi(pmu->fixed_ctr_ctrl, fidx), fidx); 296 fixed_en_pmi(pmu->fixed_ctr_ctrl, fidx), fidx);
297 } 297 }
@@ -452,7 +452,7 @@ void kvm_pmu_cpuid_update(struct kvm_vcpu *vcpu)
452 return; 452 return;
453 453
454 pmu->nr_arch_gp_counters = min((int)(entry->eax >> 8) & 0xff, 454 pmu->nr_arch_gp_counters = min((int)(entry->eax >> 8) & 0xff,
455 X86_PMC_MAX_GENERIC); 455 INTEL_PMC_MAX_GENERIC);
456 pmu->counter_bitmask[KVM_PMC_GP] = 456 pmu->counter_bitmask[KVM_PMC_GP] =
457 ((u64)1 << ((entry->eax >> 16) & 0xff)) - 1; 457 ((u64)1 << ((entry->eax >> 16) & 0xff)) - 1;
458 bitmap_len = (entry->eax >> 24) & 0xff; 458 bitmap_len = (entry->eax >> 24) & 0xff;
@@ -462,13 +462,13 @@ void kvm_pmu_cpuid_update(struct kvm_vcpu *vcpu)
462 pmu->nr_arch_fixed_counters = 0; 462 pmu->nr_arch_fixed_counters = 0;
463 } else { 463 } else {
464 pmu->nr_arch_fixed_counters = min((int)(entry->edx & 0x1f), 464 pmu->nr_arch_fixed_counters = min((int)(entry->edx & 0x1f),
465 X86_PMC_MAX_FIXED); 465 INTEL_PMC_MAX_FIXED);
466 pmu->counter_bitmask[KVM_PMC_FIXED] = 466 pmu->counter_bitmask[KVM_PMC_FIXED] =
467 ((u64)1 << ((entry->edx >> 5) & 0xff)) - 1; 467 ((u64)1 << ((entry->edx >> 5) & 0xff)) - 1;
468 } 468 }
469 469
470 pmu->global_ctrl = ((1 << pmu->nr_arch_gp_counters) - 1) | 470 pmu->global_ctrl = ((1 << pmu->nr_arch_gp_counters) - 1) |
471 (((1ull << pmu->nr_arch_fixed_counters) - 1) << X86_PMC_IDX_FIXED); 471 (((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED);
472 pmu->global_ctrl_mask = ~pmu->global_ctrl; 472 pmu->global_ctrl_mask = ~pmu->global_ctrl;
473} 473}
474 474
@@ -478,15 +478,15 @@ void kvm_pmu_init(struct kvm_vcpu *vcpu)
478 struct kvm_pmu *pmu = &vcpu->arch.pmu; 478 struct kvm_pmu *pmu = &vcpu->arch.pmu;
479 479
480 memset(pmu, 0, sizeof(*pmu)); 480 memset(pmu, 0, sizeof(*pmu));
481 for (i = 0; i < X86_PMC_MAX_GENERIC; i++) { 481 for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
482 pmu->gp_counters[i].type = KVM_PMC_GP; 482 pmu->gp_counters[i].type = KVM_PMC_GP;
483 pmu->gp_counters[i].vcpu = vcpu; 483 pmu->gp_counters[i].vcpu = vcpu;
484 pmu->gp_counters[i].idx = i; 484 pmu->gp_counters[i].idx = i;
485 } 485 }
486 for (i = 0; i < X86_PMC_MAX_FIXED; i++) { 486 for (i = 0; i < INTEL_PMC_MAX_FIXED; i++) {
487 pmu->fixed_counters[i].type = KVM_PMC_FIXED; 487 pmu->fixed_counters[i].type = KVM_PMC_FIXED;
488 pmu->fixed_counters[i].vcpu = vcpu; 488 pmu->fixed_counters[i].vcpu = vcpu;
489 pmu->fixed_counters[i].idx = i + X86_PMC_IDX_FIXED; 489 pmu->fixed_counters[i].idx = i + INTEL_PMC_IDX_FIXED;
490 } 490 }
491 init_irq_work(&pmu->irq_work, trigger_pmi); 491 init_irq_work(&pmu->irq_work, trigger_pmi);
492 kvm_pmu_cpuid_update(vcpu); 492 kvm_pmu_cpuid_update(vcpu);
@@ -498,13 +498,13 @@ void kvm_pmu_reset(struct kvm_vcpu *vcpu)
498 int i; 498 int i;
499 499
500 irq_work_sync(&pmu->irq_work); 500 irq_work_sync(&pmu->irq_work);
501 for (i = 0; i < X86_PMC_MAX_GENERIC; i++) { 501 for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
502 struct kvm_pmc *pmc = &pmu->gp_counters[i]; 502 struct kvm_pmc *pmc = &pmu->gp_counters[i];
503 stop_counter(pmc); 503 stop_counter(pmc);
504 pmc->counter = pmc->eventsel = 0; 504 pmc->counter = pmc->eventsel = 0;
505 } 505 }
506 506
507 for (i = 0; i < X86_PMC_MAX_FIXED; i++) 507 for (i = 0; i < INTEL_PMC_MAX_FIXED; i++)
508 stop_counter(&pmu->fixed_counters[i]); 508 stop_counter(&pmu->fixed_counters[i]);
509 509
510 pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status = 510 pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status =
diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h
index 911d2641f14c..62d02e3c3ed6 100644
--- a/arch/x86/kvm/trace.h
+++ b/arch/x86/kvm/trace.h
@@ -710,16 +710,6 @@ TRACE_EVENT(kvm_skinit,
710 __entry->rip, __entry->slb) 710 __entry->rip, __entry->slb)
711); 711);
712 712
713#define __print_insn(insn, ilen) ({ \
714 int i; \
715 const char *ret = p->buffer + p->len; \
716 \
717 for (i = 0; i < ilen; ++i) \
718 trace_seq_printf(p, " %02x", insn[i]); \
719 trace_seq_printf(p, "%c", 0); \
720 ret; \
721 })
722
723#define KVM_EMUL_INSN_F_CR0_PE (1 << 0) 713#define KVM_EMUL_INSN_F_CR0_PE (1 << 0)
724#define KVM_EMUL_INSN_F_EFL_VM (1 << 1) 714#define KVM_EMUL_INSN_F_EFL_VM (1 << 1)
725#define KVM_EMUL_INSN_F_CS_D (1 << 2) 715#define KVM_EMUL_INSN_F_CS_D (1 << 2)
@@ -786,7 +776,7 @@ TRACE_EVENT(kvm_emulate_insn,
786 776
787 TP_printk("%x:%llx:%s (%s)%s", 777 TP_printk("%x:%llx:%s (%s)%s",
788 __entry->csbase, __entry->rip, 778 __entry->csbase, __entry->rip,
789 __print_insn(__entry->insn, __entry->len), 779 __print_hex(__entry->insn, __entry->len),
790 __print_symbolic(__entry->flags, 780 __print_symbolic(__entry->flags,
791 kvm_trace_symbol_emul_flags), 781 kvm_trace_symbol_emul_flags),
792 __entry->failed ? " failed" : "" 782 __entry->failed ? " failed" : ""
diff --git a/arch/x86/lib/msr-reg-export.c b/arch/x86/lib/msr-reg-export.c
index a311cc59b65d..8d6ef78b5d01 100644
--- a/arch/x86/lib/msr-reg-export.c
+++ b/arch/x86/lib/msr-reg-export.c
@@ -1,5 +1,5 @@
1#include <linux/module.h> 1#include <linux/module.h>
2#include <asm/msr.h> 2#include <asm/msr.h>
3 3
4EXPORT_SYMBOL(native_rdmsr_safe_regs); 4EXPORT_SYMBOL(rdmsr_safe_regs);
5EXPORT_SYMBOL(native_wrmsr_safe_regs); 5EXPORT_SYMBOL(wrmsr_safe_regs);
diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
index 69fa10623f21..f6d13eefad10 100644
--- a/arch/x86/lib/msr-reg.S
+++ b/arch/x86/lib/msr-reg.S
@@ -6,13 +6,13 @@
6 6
7#ifdef CONFIG_X86_64 7#ifdef CONFIG_X86_64
8/* 8/*
9 * int native_{rdmsr,wrmsr}_safe_regs(u32 gprs[8]); 9 * int {rdmsr,wrmsr}_safe_regs(u32 gprs[8]);
10 * 10 *
11 * reg layout: u32 gprs[eax, ecx, edx, ebx, esp, ebp, esi, edi] 11 * reg layout: u32 gprs[eax, ecx, edx, ebx, esp, ebp, esi, edi]
12 * 12 *
13 */ 13 */
14.macro op_safe_regs op 14.macro op_safe_regs op
15ENTRY(native_\op\()_safe_regs) 15ENTRY(\op\()_safe_regs)
16 CFI_STARTPROC 16 CFI_STARTPROC
17 pushq_cfi %rbx 17 pushq_cfi %rbx
18 pushq_cfi %rbp 18 pushq_cfi %rbp
@@ -45,13 +45,13 @@ ENTRY(native_\op\()_safe_regs)
45 45
46 _ASM_EXTABLE(1b, 3b) 46 _ASM_EXTABLE(1b, 3b)
47 CFI_ENDPROC 47 CFI_ENDPROC
48ENDPROC(native_\op\()_safe_regs) 48ENDPROC(\op\()_safe_regs)
49.endm 49.endm
50 50
51#else /* X86_32 */ 51#else /* X86_32 */
52 52
53.macro op_safe_regs op 53.macro op_safe_regs op
54ENTRY(native_\op\()_safe_regs) 54ENTRY(\op\()_safe_regs)
55 CFI_STARTPROC 55 CFI_STARTPROC
56 pushl_cfi %ebx 56 pushl_cfi %ebx
57 pushl_cfi %ebp 57 pushl_cfi %ebp
@@ -92,7 +92,7 @@ ENTRY(native_\op\()_safe_regs)
92 92
93 _ASM_EXTABLE(1b, 3b) 93 _ASM_EXTABLE(1b, 3b)
94 CFI_ENDPROC 94 CFI_ENDPROC
95ENDPROC(native_\op\()_safe_regs) 95ENDPROC(\op\()_safe_regs)
96.endm 96.endm
97 97
98#endif 98#endif
diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
index 303f08637826..b2b94438ff05 100644
--- a/arch/x86/oprofile/op_model_amd.c
+++ b/arch/x86/oprofile/op_model_amd.c
@@ -312,7 +312,7 @@ static int op_amd_fill_in_addresses(struct op_msrs * const msrs)
312 goto fail; 312 goto fail;
313 } 313 }
314 /* both registers must be reserved */ 314 /* both registers must be reserved */
315 if (num_counters == AMD64_NUM_COUNTERS_F15H) { 315 if (num_counters == AMD64_NUM_COUNTERS_CORE) {
316 msrs->counters[i].addr = MSR_F15H_PERF_CTR + (i << 1); 316 msrs->counters[i].addr = MSR_F15H_PERF_CTR + (i << 1);
317 msrs->controls[i].addr = MSR_F15H_PERF_CTL + (i << 1); 317 msrs->controls[i].addr = MSR_F15H_PERF_CTL + (i << 1);
318 } else { 318 } else {
@@ -514,7 +514,7 @@ static int op_amd_init(struct oprofile_operations *ops)
514 ops->create_files = setup_ibs_files; 514 ops->create_files = setup_ibs_files;
515 515
516 if (boot_cpu_data.x86 == 0x15) { 516 if (boot_cpu_data.x86 == 0x15) {
517 num_counters = AMD64_NUM_COUNTERS_F15H; 517 num_counters = AMD64_NUM_COUNTERS_CORE;
518 } else { 518 } else {
519 num_counters = AMD64_NUM_COUNTERS; 519 num_counters = AMD64_NUM_COUNTERS;
520 } 520 }
diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
index 66e6d9359826..0faad646f5fd 100644
--- a/arch/x86/vdso/vdso32-setup.c
+++ b/arch/x86/vdso/vdso32-setup.c
@@ -205,9 +205,9 @@ void syscall32_cpu_init(void)
205{ 205{
206 /* Load these always in case some future AMD CPU supports 206 /* Load these always in case some future AMD CPU supports
207 SYSENTER from compat mode too. */ 207 SYSENTER from compat mode too. */
208 checking_wrmsrl(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS); 208 wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
209 checking_wrmsrl(MSR_IA32_SYSENTER_ESP, 0ULL); 209 wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL);
210 checking_wrmsrl(MSR_IA32_SYSENTER_EIP, (u64)ia32_sysenter_target); 210 wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)ia32_sysenter_target);
211 211
212 wrmsrl(MSR_CSTAR, ia32_cstar_target); 212 wrmsrl(MSR_CSTAR, ia32_cstar_target);
213} 213}
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index ff962d4b821e..ed7d54985d0c 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1124,9 +1124,7 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = {
1124 .wbinvd = native_wbinvd, 1124 .wbinvd = native_wbinvd,
1125 1125
1126 .read_msr = native_read_msr_safe, 1126 .read_msr = native_read_msr_safe,
1127 .rdmsr_regs = native_rdmsr_safe_regs,
1128 .write_msr = xen_write_msr_safe, 1127 .write_msr = xen_write_msr_safe,
1129 .wrmsr_regs = native_wrmsr_safe_regs,
1130 1128
1131 .read_tsc = native_read_tsc, 1129 .read_tsc = native_read_tsc,
1132 .read_pmc = native_read_pmc, 1130 .read_pmc = native_read_pmc,