diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-10-26 11:03:38 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-10-26 11:03:38 -0400 |
commit | 7115e3fcf45514db7525a05365b10454ff7f345e (patch) | |
tree | 17450e6337d559cc35dae6a7a73abab01ac63f00 /arch | |
parent | 1f6e05171bb5cc32a4d6437ab2269fc21d169ca7 (diff) | |
parent | c752d04066a36ae30b29795f3fa3f536292c1f8c (diff) |
Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
* 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (121 commits)
perf symbols: Increase symbol KSYM_NAME_LEN size
perf hists browser: Refuse 'a' hotkey on non symbolic views
perf ui browser: Use libslang to read keys
perf tools: Fix tracing info recording
perf hists browser: Elide DSO column when it is set to just one DSO, ditto for threads
perf hists: Don't consider filtered entries when calculating column widths
perf hists: Don't decay total_period for filtered entries
perf hists browser: Honour symbol_conf.show_{nr_samples,total_period}
perf hists browser: Do not exit on tab key with single event
perf annotate browser: Don't change selection line when returning from callq
perf tools: handle endianness of feature bitmap
perf tools: Add prelink suggestion to dso update message
perf script: Fix unknown feature comment
perf hists browser: Apply the dso and thread filters when merging new batches
perf hists: Move the dso and thread filters from hist_browser
perf ui browser: Honour the xterm colors
perf top tui: Give color hints just on the percentage, like on --stdio
perf ui browser: Make the colors configurable and change the defaults
perf tui: Remove unneeded call to newtCls on startup
perf hists: Don't format the percentage on hist_entry__snprintf
...
Fix up conflicts in arch/x86/kernel/kprobes.c manually.
Ingo's tree did the insane "add volatile to const array", which just
doesn't make sense ("volatile const"?). But we could remove the const
*and* make the array volatile to make doubly sure that gcc doesn't
optimize it away..
Also fix up kernel/trace/ring_buffer.c non-data-conflicts manually: the
reader_lock has been turned into a raw lock by the core locking merge,
and there was a new user of it introduced in this perf core merge. Make
sure that new use also uses the raw accessor functions.
Diffstat (limited to 'arch')
33 files changed, 1647 insertions, 1117 deletions
diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig index b92b9445255d..6c4e9aaa70c1 100644 --- a/arch/m32r/Kconfig +++ b/arch/m32r/Kconfig | |||
@@ -10,6 +10,7 @@ config M32R | |||
10 | select HAVE_GENERIC_HARDIRQS | 10 | select HAVE_GENERIC_HARDIRQS |
11 | select GENERIC_IRQ_PROBE | 11 | select GENERIC_IRQ_PROBE |
12 | select GENERIC_IRQ_SHOW | 12 | select GENERIC_IRQ_SHOW |
13 | select GENERIC_ATOMIC64 | ||
13 | 14 | ||
14 | config SBUS | 15 | config SBUS |
15 | bool | 16 | bool |
diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h index 4886a68f267e..fd3f9f18cf3f 100644 --- a/arch/x86/include/asm/nmi.h +++ b/arch/x86/include/asm/nmi.h | |||
@@ -22,27 +22,26 @@ void arch_trigger_all_cpu_backtrace(void); | |||
22 | #define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace | 22 | #define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace |
23 | #endif | 23 | #endif |
24 | 24 | ||
25 | /* | 25 | #define NMI_FLAG_FIRST 1 |
26 | * Define some priorities for the nmi notifier call chain. | 26 | |
27 | * | 27 | enum { |
28 | * Create a local nmi bit that has a higher priority than | 28 | NMI_LOCAL=0, |
29 | * external nmis, because the local ones are more frequent. | 29 | NMI_UNKNOWN, |
30 | * | 30 | NMI_MAX |
31 | * Also setup some default high/normal/low settings for | 31 | }; |
32 | * subsystems to registers with. Using 4 bits to separate | 32 | |
33 | * the priorities. This can go a lot higher if needed be. | 33 | #define NMI_DONE 0 |
34 | */ | 34 | #define NMI_HANDLED 1 |
35 | 35 | ||
36 | #define NMI_LOCAL_SHIFT 16 /* randomly picked */ | 36 | typedef int (*nmi_handler_t)(unsigned int, struct pt_regs *); |
37 | #define NMI_LOCAL_BIT (1ULL << NMI_LOCAL_SHIFT) | 37 | |
38 | #define NMI_HIGH_PRIOR (1ULL << 8) | 38 | int register_nmi_handler(unsigned int, nmi_handler_t, unsigned long, |
39 | #define NMI_NORMAL_PRIOR (1ULL << 4) | 39 | const char *); |
40 | #define NMI_LOW_PRIOR (1ULL << 0) | 40 | |
41 | #define NMI_LOCAL_HIGH_PRIOR (NMI_LOCAL_BIT | NMI_HIGH_PRIOR) | 41 | void unregister_nmi_handler(unsigned int, const char *); |
42 | #define NMI_LOCAL_NORMAL_PRIOR (NMI_LOCAL_BIT | NMI_NORMAL_PRIOR) | ||
43 | #define NMI_LOCAL_LOW_PRIOR (NMI_LOCAL_BIT | NMI_LOW_PRIOR) | ||
44 | 42 | ||
45 | void stop_nmi(void); | 43 | void stop_nmi(void); |
46 | void restart_nmi(void); | 44 | void restart_nmi(void); |
45 | void local_touch_nmi(void); | ||
47 | 46 | ||
48 | #endif /* _ASM_X86_NMI_H */ | 47 | #endif /* _ASM_X86_NMI_H */ |
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h index 094fb30817ab..f61c62f7d5d8 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h | |||
@@ -29,6 +29,9 @@ | |||
29 | #define ARCH_PERFMON_EVENTSEL_INV (1ULL << 23) | 29 | #define ARCH_PERFMON_EVENTSEL_INV (1ULL << 23) |
30 | #define ARCH_PERFMON_EVENTSEL_CMASK 0xFF000000ULL | 30 | #define ARCH_PERFMON_EVENTSEL_CMASK 0xFF000000ULL |
31 | 31 | ||
32 | #define AMD_PERFMON_EVENTSEL_GUESTONLY (1ULL << 40) | ||
33 | #define AMD_PERFMON_EVENTSEL_HOSTONLY (1ULL << 41) | ||
34 | |||
32 | #define AMD64_EVENTSEL_EVENT \ | 35 | #define AMD64_EVENTSEL_EVENT \ |
33 | (ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32)) | 36 | (ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32)) |
34 | #define INTEL_ARCH_EVENT_MASK \ | 37 | #define INTEL_ARCH_EVENT_MASK \ |
@@ -43,14 +46,17 @@ | |||
43 | #define AMD64_RAW_EVENT_MASK \ | 46 | #define AMD64_RAW_EVENT_MASK \ |
44 | (X86_RAW_EVENT_MASK | \ | 47 | (X86_RAW_EVENT_MASK | \ |
45 | AMD64_EVENTSEL_EVENT) | 48 | AMD64_EVENTSEL_EVENT) |
49 | #define AMD64_NUM_COUNTERS 4 | ||
50 | #define AMD64_NUM_COUNTERS_F15H 6 | ||
51 | #define AMD64_NUM_COUNTERS_MAX AMD64_NUM_COUNTERS_F15H | ||
46 | 52 | ||
47 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c | 53 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c |
48 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8) | 54 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8) |
49 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX 0 | 55 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX 0 |
50 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \ | 56 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \ |
51 | (1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX)) | 57 | (1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX)) |
52 | 58 | ||
53 | #define ARCH_PERFMON_BRANCH_MISSES_RETIRED 6 | 59 | #define ARCH_PERFMON_BRANCH_MISSES_RETIRED 6 |
54 | 60 | ||
55 | /* | 61 | /* |
56 | * Intel "Architectural Performance Monitoring" CPUID | 62 | * Intel "Architectural Performance Monitoring" CPUID |
@@ -110,6 +116,35 @@ union cpuid10_edx { | |||
110 | */ | 116 | */ |
111 | #define X86_PMC_IDX_FIXED_BTS (X86_PMC_IDX_FIXED + 16) | 117 | #define X86_PMC_IDX_FIXED_BTS (X86_PMC_IDX_FIXED + 16) |
112 | 118 | ||
119 | /* | ||
120 | * IBS cpuid feature detection | ||
121 | */ | ||
122 | |||
123 | #define IBS_CPUID_FEATURES 0x8000001b | ||
124 | |||
125 | /* | ||
126 | * Same bit mask as for IBS cpuid feature flags (Fn8000_001B_EAX), but | ||
127 | * bit 0 is used to indicate the existence of IBS. | ||
128 | */ | ||
129 | #define IBS_CAPS_AVAIL (1U<<0) | ||
130 | #define IBS_CAPS_FETCHSAM (1U<<1) | ||
131 | #define IBS_CAPS_OPSAM (1U<<2) | ||
132 | #define IBS_CAPS_RDWROPCNT (1U<<3) | ||
133 | #define IBS_CAPS_OPCNT (1U<<4) | ||
134 | #define IBS_CAPS_BRNTRGT (1U<<5) | ||
135 | #define IBS_CAPS_OPCNTEXT (1U<<6) | ||
136 | |||
137 | #define IBS_CAPS_DEFAULT (IBS_CAPS_AVAIL \ | ||
138 | | IBS_CAPS_FETCHSAM \ | ||
139 | | IBS_CAPS_OPSAM) | ||
140 | |||
141 | /* | ||
142 | * IBS APIC setup | ||
143 | */ | ||
144 | #define IBSCTL 0x1cc | ||
145 | #define IBSCTL_LVT_OFFSET_VALID (1ULL<<8) | ||
146 | #define IBSCTL_LVT_OFFSET_MASK 0x0F | ||
147 | |||
113 | /* IbsFetchCtl bits/masks */ | 148 | /* IbsFetchCtl bits/masks */ |
114 | #define IBS_FETCH_RAND_EN (1ULL<<57) | 149 | #define IBS_FETCH_RAND_EN (1ULL<<57) |
115 | #define IBS_FETCH_VAL (1ULL<<49) | 150 | #define IBS_FETCH_VAL (1ULL<<49) |
@@ -124,6 +159,8 @@ union cpuid10_edx { | |||
124 | #define IBS_OP_MAX_CNT 0x0000FFFFULL | 159 | #define IBS_OP_MAX_CNT 0x0000FFFFULL |
125 | #define IBS_OP_MAX_CNT_EXT 0x007FFFFFULL /* not a register bit mask */ | 160 | #define IBS_OP_MAX_CNT_EXT 0x007FFFFFULL /* not a register bit mask */ |
126 | 161 | ||
162 | extern u32 get_ibs_caps(void); | ||
163 | |||
127 | #ifdef CONFIG_PERF_EVENTS | 164 | #ifdef CONFIG_PERF_EVENTS |
128 | extern void perf_events_lapic_init(void); | 165 | extern void perf_events_lapic_init(void); |
129 | 166 | ||
@@ -159,7 +196,19 @@ extern unsigned long perf_misc_flags(struct pt_regs *regs); | |||
159 | ); \ | 196 | ); \ |
160 | } | 197 | } |
161 | 198 | ||
199 | struct perf_guest_switch_msr { | ||
200 | unsigned msr; | ||
201 | u64 host, guest; | ||
202 | }; | ||
203 | |||
204 | extern struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr); | ||
162 | #else | 205 | #else |
206 | static inline perf_guest_switch_msr *perf_guest_get_msrs(int *nr) | ||
207 | { | ||
208 | *nr = 0; | ||
209 | return NULL; | ||
210 | } | ||
211 | |||
163 | static inline void perf_events_lapic_init(void) { } | 212 | static inline void perf_events_lapic_init(void) { } |
164 | #endif | 213 | #endif |
165 | 214 | ||
diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h index 3250e3d605d9..92f297069e87 100644 --- a/arch/x86/include/asm/reboot.h +++ b/arch/x86/include/asm/reboot.h | |||
@@ -23,7 +23,7 @@ void machine_real_restart(unsigned int type); | |||
23 | #define MRR_BIOS 0 | 23 | #define MRR_BIOS 0 |
24 | #define MRR_APM 1 | 24 | #define MRR_APM 1 |
25 | 25 | ||
26 | typedef void (*nmi_shootdown_cb)(int, struct die_args*); | 26 | typedef void (*nmi_shootdown_cb)(int, struct pt_regs*); |
27 | void nmi_shootdown_cpus(nmi_shootdown_cb callback); | 27 | void nmi_shootdown_cpus(nmi_shootdown_cb callback); |
28 | 28 | ||
29 | #endif /* _ASM_X86_REBOOT_H */ | 29 | #endif /* _ASM_X86_REBOOT_H */ |
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 82f2912155a5..8baca3c4871c 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile | |||
@@ -19,7 +19,7 @@ endif | |||
19 | 19 | ||
20 | obj-y := process_$(BITS).o signal.o entry_$(BITS).o | 20 | obj-y := process_$(BITS).o signal.o entry_$(BITS).o |
21 | obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o | 21 | obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o |
22 | obj-y += time.o ioport.o ldt.o dumpstack.o | 22 | obj-y += time.o ioport.o ldt.o dumpstack.o nmi.o |
23 | obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o | 23 | obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o |
24 | obj-$(CONFIG_IRQ_WORK) += irq_work.o | 24 | obj-$(CONFIG_IRQ_WORK) += irq_work.o |
25 | obj-y += probe_roms.o | 25 | obj-y += probe_roms.o |
diff --git a/arch/x86/kernel/apic/hw_nmi.c b/arch/x86/kernel/apic/hw_nmi.c index d5e57db0f7be..31cb9ae992b7 100644 --- a/arch/x86/kernel/apic/hw_nmi.c +++ b/arch/x86/kernel/apic/hw_nmi.c | |||
@@ -60,22 +60,10 @@ void arch_trigger_all_cpu_backtrace(void) | |||
60 | } | 60 | } |
61 | 61 | ||
62 | static int __kprobes | 62 | static int __kprobes |
63 | arch_trigger_all_cpu_backtrace_handler(struct notifier_block *self, | 63 | arch_trigger_all_cpu_backtrace_handler(unsigned int cmd, struct pt_regs *regs) |
64 | unsigned long cmd, void *__args) | ||
65 | { | 64 | { |
66 | struct die_args *args = __args; | ||
67 | struct pt_regs *regs; | ||
68 | int cpu; | 65 | int cpu; |
69 | 66 | ||
70 | switch (cmd) { | ||
71 | case DIE_NMI: | ||
72 | break; | ||
73 | |||
74 | default: | ||
75 | return NOTIFY_DONE; | ||
76 | } | ||
77 | |||
78 | regs = args->regs; | ||
79 | cpu = smp_processor_id(); | 67 | cpu = smp_processor_id(); |
80 | 68 | ||
81 | if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) { | 69 | if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) { |
@@ -86,21 +74,16 @@ arch_trigger_all_cpu_backtrace_handler(struct notifier_block *self, | |||
86 | show_regs(regs); | 74 | show_regs(regs); |
87 | arch_spin_unlock(&lock); | 75 | arch_spin_unlock(&lock); |
88 | cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask)); | 76 | cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask)); |
89 | return NOTIFY_STOP; | 77 | return NMI_HANDLED; |
90 | } | 78 | } |
91 | 79 | ||
92 | return NOTIFY_DONE; | 80 | return NMI_DONE; |
93 | } | 81 | } |
94 | 82 | ||
95 | static __read_mostly struct notifier_block backtrace_notifier = { | ||
96 | .notifier_call = arch_trigger_all_cpu_backtrace_handler, | ||
97 | .next = NULL, | ||
98 | .priority = NMI_LOCAL_LOW_PRIOR, | ||
99 | }; | ||
100 | |||
101 | static int __init register_trigger_all_cpu_backtrace(void) | 83 | static int __init register_trigger_all_cpu_backtrace(void) |
102 | { | 84 | { |
103 | register_die_notifier(&backtrace_notifier); | 85 | register_nmi_handler(NMI_LOCAL, arch_trigger_all_cpu_backtrace_handler, |
86 | 0, "arch_bt"); | ||
104 | return 0; | 87 | return 0; |
105 | } | 88 | } |
106 | early_initcall(register_trigger_all_cpu_backtrace); | 89 | early_initcall(register_trigger_all_cpu_backtrace); |
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index 34b18594e724..75be00ecfff2 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c | |||
@@ -672,18 +672,11 @@ void __cpuinit uv_cpu_init(void) | |||
672 | /* | 672 | /* |
673 | * When NMI is received, print a stack trace. | 673 | * When NMI is received, print a stack trace. |
674 | */ | 674 | */ |
675 | int uv_handle_nmi(struct notifier_block *self, unsigned long reason, void *data) | 675 | int uv_handle_nmi(unsigned int reason, struct pt_regs *regs) |
676 | { | 676 | { |
677 | unsigned long real_uv_nmi; | 677 | unsigned long real_uv_nmi; |
678 | int bid; | 678 | int bid; |
679 | 679 | ||
680 | if (reason != DIE_NMIUNKNOWN) | ||
681 | return NOTIFY_OK; | ||
682 | |||
683 | if (in_crash_kexec) | ||
684 | /* do nothing if entering the crash kernel */ | ||
685 | return NOTIFY_OK; | ||
686 | |||
687 | /* | 680 | /* |
688 | * Each blade has an MMR that indicates when an NMI has been sent | 681 | * Each blade has an MMR that indicates when an NMI has been sent |
689 | * to cpus on the blade. If an NMI is detected, atomically | 682 | * to cpus on the blade. If an NMI is detected, atomically |
@@ -704,7 +697,7 @@ int uv_handle_nmi(struct notifier_block *self, unsigned long reason, void *data) | |||
704 | } | 697 | } |
705 | 698 | ||
706 | if (likely(__get_cpu_var(cpu_last_nmi_count) == uv_blade_info[bid].nmi_count)) | 699 | if (likely(__get_cpu_var(cpu_last_nmi_count) == uv_blade_info[bid].nmi_count)) |
707 | return NOTIFY_DONE; | 700 | return NMI_DONE; |
708 | 701 | ||
709 | __get_cpu_var(cpu_last_nmi_count) = uv_blade_info[bid].nmi_count; | 702 | __get_cpu_var(cpu_last_nmi_count) = uv_blade_info[bid].nmi_count; |
710 | 703 | ||
@@ -717,17 +710,12 @@ int uv_handle_nmi(struct notifier_block *self, unsigned long reason, void *data) | |||
717 | dump_stack(); | 710 | dump_stack(); |
718 | spin_unlock(&uv_nmi_lock); | 711 | spin_unlock(&uv_nmi_lock); |
719 | 712 | ||
720 | return NOTIFY_STOP; | 713 | return NMI_HANDLED; |
721 | } | 714 | } |
722 | 715 | ||
723 | static struct notifier_block uv_dump_stack_nmi_nb = { | ||
724 | .notifier_call = uv_handle_nmi, | ||
725 | .priority = NMI_LOCAL_LOW_PRIOR - 1, | ||
726 | }; | ||
727 | |||
728 | void uv_register_nmi_notifier(void) | 716 | void uv_register_nmi_notifier(void) |
729 | { | 717 | { |
730 | if (register_die_notifier(&uv_dump_stack_nmi_nb)) | 718 | if (register_nmi_handler(NMI_UNKNOWN, uv_handle_nmi, 0, "uv")) |
731 | printk(KERN_WARNING "UV NMI handler failed to register\n"); | 719 | printk(KERN_WARNING "UV NMI handler failed to register\n"); |
732 | } | 720 | } |
733 | 721 | ||
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index 6042981d0309..fe6eb197f848 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile | |||
@@ -28,10 +28,15 @@ obj-$(CONFIG_CPU_SUP_UMC_32) += umc.o | |||
28 | 28 | ||
29 | obj-$(CONFIG_PERF_EVENTS) += perf_event.o | 29 | obj-$(CONFIG_PERF_EVENTS) += perf_event.o |
30 | 30 | ||
31 | ifdef CONFIG_PERF_EVENTS | ||
32 | obj-$(CONFIG_CPU_SUP_AMD) += perf_event_amd.o | ||
33 | obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_p6.o perf_event_p4.o perf_event_intel_lbr.o perf_event_intel_ds.o perf_event_intel.o | ||
34 | endif | ||
35 | |||
31 | obj-$(CONFIG_X86_MCE) += mcheck/ | 36 | obj-$(CONFIG_X86_MCE) += mcheck/ |
32 | obj-$(CONFIG_MTRR) += mtrr/ | 37 | obj-$(CONFIG_MTRR) += mtrr/ |
33 | 38 | ||
34 | obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o | 39 | obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o perf_event_amd_ibs.o |
35 | 40 | ||
36 | quiet_cmd_mkcapflags = MKCAP $@ | 41 | quiet_cmd_mkcapflags = MKCAP $@ |
37 | cmd_mkcapflags = $(PERL) $(srctree)/$(src)/mkcapflags.pl $< $@ | 42 | cmd_mkcapflags = $(PERL) $(srctree)/$(src)/mkcapflags.pl $< $@ |
diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c index 0ed633c5048b..6199232161cf 100644 --- a/arch/x86/kernel/cpu/mcheck/mce-inject.c +++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c | |||
@@ -78,27 +78,20 @@ static void raise_exception(struct mce *m, struct pt_regs *pregs) | |||
78 | 78 | ||
79 | static cpumask_var_t mce_inject_cpumask; | 79 | static cpumask_var_t mce_inject_cpumask; |
80 | 80 | ||
81 | static int mce_raise_notify(struct notifier_block *self, | 81 | static int mce_raise_notify(unsigned int cmd, struct pt_regs *regs) |
82 | unsigned long val, void *data) | ||
83 | { | 82 | { |
84 | struct die_args *args = (struct die_args *)data; | ||
85 | int cpu = smp_processor_id(); | 83 | int cpu = smp_processor_id(); |
86 | struct mce *m = &__get_cpu_var(injectm); | 84 | struct mce *m = &__get_cpu_var(injectm); |
87 | if (val != DIE_NMI || !cpumask_test_cpu(cpu, mce_inject_cpumask)) | 85 | if (!cpumask_test_cpu(cpu, mce_inject_cpumask)) |
88 | return NOTIFY_DONE; | 86 | return NMI_DONE; |
89 | cpumask_clear_cpu(cpu, mce_inject_cpumask); | 87 | cpumask_clear_cpu(cpu, mce_inject_cpumask); |
90 | if (m->inject_flags & MCJ_EXCEPTION) | 88 | if (m->inject_flags & MCJ_EXCEPTION) |
91 | raise_exception(m, args->regs); | 89 | raise_exception(m, regs); |
92 | else if (m->status) | 90 | else if (m->status) |
93 | raise_poll(m); | 91 | raise_poll(m); |
94 | return NOTIFY_STOP; | 92 | return NMI_HANDLED; |
95 | } | 93 | } |
96 | 94 | ||
97 | static struct notifier_block mce_raise_nb = { | ||
98 | .notifier_call = mce_raise_notify, | ||
99 | .priority = NMI_LOCAL_NORMAL_PRIOR, | ||
100 | }; | ||
101 | |||
102 | /* Inject mce on current CPU */ | 95 | /* Inject mce on current CPU */ |
103 | static int raise_local(void) | 96 | static int raise_local(void) |
104 | { | 97 | { |
@@ -216,7 +209,8 @@ static int inject_init(void) | |||
216 | return -ENOMEM; | 209 | return -ENOMEM; |
217 | printk(KERN_INFO "Machine check injector initialized\n"); | 210 | printk(KERN_INFO "Machine check injector initialized\n"); |
218 | mce_chrdev_ops.write = mce_write; | 211 | mce_chrdev_ops.write = mce_write; |
219 | register_die_notifier(&mce_raise_nb); | 212 | register_nmi_handler(NMI_LOCAL, mce_raise_notify, 0, |
213 | "mce_notify"); | ||
220 | return 0; | 214 | return 0; |
221 | } | 215 | } |
222 | 216 | ||
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 08363b042122..fce51ad1f362 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c | |||
@@ -908,9 +908,6 @@ void do_machine_check(struct pt_regs *regs, long error_code) | |||
908 | 908 | ||
909 | percpu_inc(mce_exception_count); | 909 | percpu_inc(mce_exception_count); |
910 | 910 | ||
911 | if (notify_die(DIE_NMI, "machine check", regs, error_code, | ||
912 | 18, SIGKILL) == NOTIFY_STOP) | ||
913 | goto out; | ||
914 | if (!banks) | 911 | if (!banks) |
915 | goto out; | 912 | goto out; |
916 | 913 | ||
@@ -1140,6 +1137,15 @@ static void mce_start_timer(unsigned long data) | |||
1140 | add_timer_on(t, smp_processor_id()); | 1137 | add_timer_on(t, smp_processor_id()); |
1141 | } | 1138 | } |
1142 | 1139 | ||
1140 | /* Must not be called in IRQ context where del_timer_sync() can deadlock */ | ||
1141 | static void mce_timer_delete_all(void) | ||
1142 | { | ||
1143 | int cpu; | ||
1144 | |||
1145 | for_each_online_cpu(cpu) | ||
1146 | del_timer_sync(&per_cpu(mce_timer, cpu)); | ||
1147 | } | ||
1148 | |||
1143 | static void mce_do_trigger(struct work_struct *work) | 1149 | static void mce_do_trigger(struct work_struct *work) |
1144 | { | 1150 | { |
1145 | call_usermodehelper(mce_helper, mce_helper_argv, NULL, UMH_NO_WAIT); | 1151 | call_usermodehelper(mce_helper, mce_helper_argv, NULL, UMH_NO_WAIT); |
@@ -1750,7 +1756,6 @@ static struct syscore_ops mce_syscore_ops = { | |||
1750 | 1756 | ||
1751 | static void mce_cpu_restart(void *data) | 1757 | static void mce_cpu_restart(void *data) |
1752 | { | 1758 | { |
1753 | del_timer_sync(&__get_cpu_var(mce_timer)); | ||
1754 | if (!mce_available(__this_cpu_ptr(&cpu_info))) | 1759 | if (!mce_available(__this_cpu_ptr(&cpu_info))) |
1755 | return; | 1760 | return; |
1756 | __mcheck_cpu_init_generic(); | 1761 | __mcheck_cpu_init_generic(); |
@@ -1760,16 +1765,15 @@ static void mce_cpu_restart(void *data) | |||
1760 | /* Reinit MCEs after user configuration changes */ | 1765 | /* Reinit MCEs after user configuration changes */ |
1761 | static void mce_restart(void) | 1766 | static void mce_restart(void) |
1762 | { | 1767 | { |
1768 | mce_timer_delete_all(); | ||
1763 | on_each_cpu(mce_cpu_restart, NULL, 1); | 1769 | on_each_cpu(mce_cpu_restart, NULL, 1); |
1764 | } | 1770 | } |
1765 | 1771 | ||
1766 | /* Toggle features for corrected errors */ | 1772 | /* Toggle features for corrected errors */ |
1767 | static void mce_disable_ce(void *all) | 1773 | static void mce_disable_cmci(void *data) |
1768 | { | 1774 | { |
1769 | if (!mce_available(__this_cpu_ptr(&cpu_info))) | 1775 | if (!mce_available(__this_cpu_ptr(&cpu_info))) |
1770 | return; | 1776 | return; |
1771 | if (all) | ||
1772 | del_timer_sync(&__get_cpu_var(mce_timer)); | ||
1773 | cmci_clear(); | 1777 | cmci_clear(); |
1774 | } | 1778 | } |
1775 | 1779 | ||
@@ -1852,7 +1856,8 @@ static ssize_t set_ignore_ce(struct sys_device *s, | |||
1852 | if (mce_ignore_ce ^ !!new) { | 1856 | if (mce_ignore_ce ^ !!new) { |
1853 | if (new) { | 1857 | if (new) { |
1854 | /* disable ce features */ | 1858 | /* disable ce features */ |
1855 | on_each_cpu(mce_disable_ce, (void *)1, 1); | 1859 | mce_timer_delete_all(); |
1860 | on_each_cpu(mce_disable_cmci, NULL, 1); | ||
1856 | mce_ignore_ce = 1; | 1861 | mce_ignore_ce = 1; |
1857 | } else { | 1862 | } else { |
1858 | /* enable ce features */ | 1863 | /* enable ce features */ |
@@ -1875,7 +1880,7 @@ static ssize_t set_cmci_disabled(struct sys_device *s, | |||
1875 | if (mce_cmci_disabled ^ !!new) { | 1880 | if (mce_cmci_disabled ^ !!new) { |
1876 | if (new) { | 1881 | if (new) { |
1877 | /* disable cmci */ | 1882 | /* disable cmci */ |
1878 | on_each_cpu(mce_disable_ce, NULL, 1); | 1883 | on_each_cpu(mce_disable_cmci, NULL, 1); |
1879 | mce_cmci_disabled = 1; | 1884 | mce_cmci_disabled = 1; |
1880 | } else { | 1885 | } else { |
1881 | /* enable cmci */ | 1886 | /* enable cmci */ |
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index cfa62ec090ec..640891014b2a 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
@@ -32,6 +32,8 @@ | |||
32 | #include <asm/smp.h> | 32 | #include <asm/smp.h> |
33 | #include <asm/alternative.h> | 33 | #include <asm/alternative.h> |
34 | 34 | ||
35 | #include "perf_event.h" | ||
36 | |||
35 | #if 0 | 37 | #if 0 |
36 | #undef wrmsrl | 38 | #undef wrmsrl |
37 | #define wrmsrl(msr, val) \ | 39 | #define wrmsrl(msr, val) \ |
@@ -43,283 +45,17 @@ do { \ | |||
43 | } while (0) | 45 | } while (0) |
44 | #endif | 46 | #endif |
45 | 47 | ||
46 | /* | 48 | struct x86_pmu x86_pmu __read_mostly; |
47 | * | NHM/WSM | SNB | | ||
48 | * register ------------------------------- | ||
49 | * | HT | no HT | HT | no HT | | ||
50 | *----------------------------------------- | ||
51 | * offcore | core | core | cpu | core | | ||
52 | * lbr_sel | core | core | cpu | core | | ||
53 | * ld_lat | cpu | core | cpu | core | | ||
54 | *----------------------------------------- | ||
55 | * | ||
56 | * Given that there is a small number of shared regs, | ||
57 | * we can pre-allocate their slot in the per-cpu | ||
58 | * per-core reg tables. | ||
59 | */ | ||
60 | enum extra_reg_type { | ||
61 | EXTRA_REG_NONE = -1, /* not used */ | ||
62 | |||
63 | EXTRA_REG_RSP_0 = 0, /* offcore_response_0 */ | ||
64 | EXTRA_REG_RSP_1 = 1, /* offcore_response_1 */ | ||
65 | |||
66 | EXTRA_REG_MAX /* number of entries needed */ | ||
67 | }; | ||
68 | |||
69 | struct event_constraint { | ||
70 | union { | ||
71 | unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; | ||
72 | u64 idxmsk64; | ||
73 | }; | ||
74 | u64 code; | ||
75 | u64 cmask; | ||
76 | int weight; | ||
77 | }; | ||
78 | |||
79 | struct amd_nb { | ||
80 | int nb_id; /* NorthBridge id */ | ||
81 | int refcnt; /* reference count */ | ||
82 | struct perf_event *owners[X86_PMC_IDX_MAX]; | ||
83 | struct event_constraint event_constraints[X86_PMC_IDX_MAX]; | ||
84 | }; | ||
85 | |||
86 | struct intel_percore; | ||
87 | |||
88 | #define MAX_LBR_ENTRIES 16 | ||
89 | |||
90 | struct cpu_hw_events { | ||
91 | /* | ||
92 | * Generic x86 PMC bits | ||
93 | */ | ||
94 | struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */ | ||
95 | unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; | ||
96 | unsigned long running[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; | ||
97 | int enabled; | ||
98 | |||
99 | int n_events; | ||
100 | int n_added; | ||
101 | int n_txn; | ||
102 | int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */ | ||
103 | u64 tags[X86_PMC_IDX_MAX]; | ||
104 | struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */ | ||
105 | |||
106 | unsigned int group_flag; | ||
107 | |||
108 | /* | ||
109 | * Intel DebugStore bits | ||
110 | */ | ||
111 | struct debug_store *ds; | ||
112 | u64 pebs_enabled; | ||
113 | |||
114 | /* | ||
115 | * Intel LBR bits | ||
116 | */ | ||
117 | int lbr_users; | ||
118 | void *lbr_context; | ||
119 | struct perf_branch_stack lbr_stack; | ||
120 | struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES]; | ||
121 | |||
122 | /* | ||
123 | * manage shared (per-core, per-cpu) registers | ||
124 | * used on Intel NHM/WSM/SNB | ||
125 | */ | ||
126 | struct intel_shared_regs *shared_regs; | ||
127 | |||
128 | /* | ||
129 | * AMD specific bits | ||
130 | */ | ||
131 | struct amd_nb *amd_nb; | ||
132 | }; | ||
133 | |||
134 | #define __EVENT_CONSTRAINT(c, n, m, w) {\ | ||
135 | { .idxmsk64 = (n) }, \ | ||
136 | .code = (c), \ | ||
137 | .cmask = (m), \ | ||
138 | .weight = (w), \ | ||
139 | } | ||
140 | |||
141 | #define EVENT_CONSTRAINT(c, n, m) \ | ||
142 | __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n)) | ||
143 | |||
144 | /* | ||
145 | * Constraint on the Event code. | ||
146 | */ | ||
147 | #define INTEL_EVENT_CONSTRAINT(c, n) \ | ||
148 | EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT) | ||
149 | |||
150 | /* | ||
151 | * Constraint on the Event code + UMask + fixed-mask | ||
152 | * | ||
153 | * filter mask to validate fixed counter events. | ||
154 | * the following filters disqualify for fixed counters: | ||
155 | * - inv | ||
156 | * - edge | ||
157 | * - cnt-mask | ||
158 | * The other filters are supported by fixed counters. | ||
159 | * The any-thread option is supported starting with v3. | ||
160 | */ | ||
161 | #define FIXED_EVENT_CONSTRAINT(c, n) \ | ||
162 | EVENT_CONSTRAINT(c, (1ULL << (32+n)), X86_RAW_EVENT_MASK) | ||
163 | |||
164 | /* | ||
165 | * Constraint on the Event code + UMask | ||
166 | */ | ||
167 | #define INTEL_UEVENT_CONSTRAINT(c, n) \ | ||
168 | EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK) | ||
169 | |||
170 | #define EVENT_CONSTRAINT_END \ | ||
171 | EVENT_CONSTRAINT(0, 0, 0) | ||
172 | |||
173 | #define for_each_event_constraint(e, c) \ | ||
174 | for ((e) = (c); (e)->weight; (e)++) | ||
175 | |||
176 | /* | ||
177 | * Per register state. | ||
178 | */ | ||
179 | struct er_account { | ||
180 | raw_spinlock_t lock; /* per-core: protect structure */ | ||
181 | u64 config; /* extra MSR config */ | ||
182 | u64 reg; /* extra MSR number */ | ||
183 | atomic_t ref; /* reference count */ | ||
184 | }; | ||
185 | |||
186 | /* | ||
187 | * Extra registers for specific events. | ||
188 | * | ||
189 | * Some events need large masks and require external MSRs. | ||
190 | * Those extra MSRs end up being shared for all events on | ||
191 | * a PMU and sometimes between PMU of sibling HT threads. | ||
192 | * In either case, the kernel needs to handle conflicting | ||
193 | * accesses to those extra, shared, regs. The data structure | ||
194 | * to manage those registers is stored in cpu_hw_event. | ||
195 | */ | ||
196 | struct extra_reg { | ||
197 | unsigned int event; | ||
198 | unsigned int msr; | ||
199 | u64 config_mask; | ||
200 | u64 valid_mask; | ||
201 | int idx; /* per_xxx->regs[] reg index */ | ||
202 | }; | ||
203 | |||
204 | #define EVENT_EXTRA_REG(e, ms, m, vm, i) { \ | ||
205 | .event = (e), \ | ||
206 | .msr = (ms), \ | ||
207 | .config_mask = (m), \ | ||
208 | .valid_mask = (vm), \ | ||
209 | .idx = EXTRA_REG_##i \ | ||
210 | } | ||
211 | |||
212 | #define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx) \ | ||
213 | EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm, idx) | ||
214 | |||
215 | #define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0, RSP_0) | ||
216 | |||
217 | union perf_capabilities { | ||
218 | struct { | ||
219 | u64 lbr_format : 6; | ||
220 | u64 pebs_trap : 1; | ||
221 | u64 pebs_arch_reg : 1; | ||
222 | u64 pebs_format : 4; | ||
223 | u64 smm_freeze : 1; | ||
224 | }; | ||
225 | u64 capabilities; | ||
226 | }; | ||
227 | |||
228 | /* | ||
229 | * struct x86_pmu - generic x86 pmu | ||
230 | */ | ||
231 | struct x86_pmu { | ||
232 | /* | ||
233 | * Generic x86 PMC bits | ||
234 | */ | ||
235 | const char *name; | ||
236 | int version; | ||
237 | int (*handle_irq)(struct pt_regs *); | ||
238 | void (*disable_all)(void); | ||
239 | void (*enable_all)(int added); | ||
240 | void (*enable)(struct perf_event *); | ||
241 | void (*disable)(struct perf_event *); | ||
242 | int (*hw_config)(struct perf_event *event); | ||
243 | int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign); | ||
244 | unsigned eventsel; | ||
245 | unsigned perfctr; | ||
246 | u64 (*event_map)(int); | ||
247 | int max_events; | ||
248 | int num_counters; | ||
249 | int num_counters_fixed; | ||
250 | int cntval_bits; | ||
251 | u64 cntval_mask; | ||
252 | int apic; | ||
253 | u64 max_period; | ||
254 | struct event_constraint * | ||
255 | (*get_event_constraints)(struct cpu_hw_events *cpuc, | ||
256 | struct perf_event *event); | ||
257 | |||
258 | void (*put_event_constraints)(struct cpu_hw_events *cpuc, | ||
259 | struct perf_event *event); | ||
260 | struct event_constraint *event_constraints; | ||
261 | void (*quirks)(void); | ||
262 | int perfctr_second_write; | ||
263 | |||
264 | int (*cpu_prepare)(int cpu); | ||
265 | void (*cpu_starting)(int cpu); | ||
266 | void (*cpu_dying)(int cpu); | ||
267 | void (*cpu_dead)(int cpu); | ||
268 | |||
269 | /* | ||
270 | * Intel Arch Perfmon v2+ | ||
271 | */ | ||
272 | u64 intel_ctrl; | ||
273 | union perf_capabilities intel_cap; | ||
274 | 49 | ||
275 | /* | 50 | DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { |
276 | * Intel DebugStore bits | ||
277 | */ | ||
278 | int bts, pebs; | ||
279 | int bts_active, pebs_active; | ||
280 | int pebs_record_size; | ||
281 | void (*drain_pebs)(struct pt_regs *regs); | ||
282 | struct event_constraint *pebs_constraints; | ||
283 | |||
284 | /* | ||
285 | * Intel LBR | ||
286 | */ | ||
287 | unsigned long lbr_tos, lbr_from, lbr_to; /* MSR base regs */ | ||
288 | int lbr_nr; /* hardware stack size */ | ||
289 | |||
290 | /* | ||
291 | * Extra registers for events | ||
292 | */ | ||
293 | struct extra_reg *extra_regs; | ||
294 | unsigned int er_flags; | ||
295 | }; | ||
296 | |||
297 | #define ERF_NO_HT_SHARING 1 | ||
298 | #define ERF_HAS_RSP_1 2 | ||
299 | |||
300 | static struct x86_pmu x86_pmu __read_mostly; | ||
301 | |||
302 | static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { | ||
303 | .enabled = 1, | 51 | .enabled = 1, |
304 | }; | 52 | }; |
305 | 53 | ||
306 | static int x86_perf_event_set_period(struct perf_event *event); | 54 | u64 __read_mostly hw_cache_event_ids |
307 | |||
308 | /* | ||
309 | * Generalized hw caching related hw_event table, filled | ||
310 | * in on a per model basis. A value of 0 means | ||
311 | * 'not supported', -1 means 'hw_event makes no sense on | ||
312 | * this CPU', any other value means the raw hw_event | ||
313 | * ID. | ||
314 | */ | ||
315 | |||
316 | #define C(x) PERF_COUNT_HW_CACHE_##x | ||
317 | |||
318 | static u64 __read_mostly hw_cache_event_ids | ||
319 | [PERF_COUNT_HW_CACHE_MAX] | 55 | [PERF_COUNT_HW_CACHE_MAX] |
320 | [PERF_COUNT_HW_CACHE_OP_MAX] | 56 | [PERF_COUNT_HW_CACHE_OP_MAX] |
321 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; | 57 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; |
322 | static u64 __read_mostly hw_cache_extra_regs | 58 | u64 __read_mostly hw_cache_extra_regs |
323 | [PERF_COUNT_HW_CACHE_MAX] | 59 | [PERF_COUNT_HW_CACHE_MAX] |
324 | [PERF_COUNT_HW_CACHE_OP_MAX] | 60 | [PERF_COUNT_HW_CACHE_OP_MAX] |
325 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; | 61 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; |
@@ -329,8 +65,7 @@ static u64 __read_mostly hw_cache_extra_regs | |||
329 | * Can only be executed on the CPU where the event is active. | 65 | * Can only be executed on the CPU where the event is active. |
330 | * Returns the delta events processed. | 66 | * Returns the delta events processed. |
331 | */ | 67 | */ |
332 | static u64 | 68 | u64 x86_perf_event_update(struct perf_event *event) |
333 | x86_perf_event_update(struct perf_event *event) | ||
334 | { | 69 | { |
335 | struct hw_perf_event *hwc = &event->hw; | 70 | struct hw_perf_event *hwc = &event->hw; |
336 | int shift = 64 - x86_pmu.cntval_bits; | 71 | int shift = 64 - x86_pmu.cntval_bits; |
@@ -373,30 +108,6 @@ again: | |||
373 | return new_raw_count; | 108 | return new_raw_count; |
374 | } | 109 | } |
375 | 110 | ||
376 | static inline int x86_pmu_addr_offset(int index) | ||
377 | { | ||
378 | int offset; | ||
379 | |||
380 | /* offset = X86_FEATURE_PERFCTR_CORE ? index << 1 : index */ | ||
381 | alternative_io(ASM_NOP2, | ||
382 | "shll $1, %%eax", | ||
383 | X86_FEATURE_PERFCTR_CORE, | ||
384 | "=a" (offset), | ||
385 | "a" (index)); | ||
386 | |||
387 | return offset; | ||
388 | } | ||
389 | |||
390 | static inline unsigned int x86_pmu_config_addr(int index) | ||
391 | { | ||
392 | return x86_pmu.eventsel + x86_pmu_addr_offset(index); | ||
393 | } | ||
394 | |||
395 | static inline unsigned int x86_pmu_event_addr(int index) | ||
396 | { | ||
397 | return x86_pmu.perfctr + x86_pmu_addr_offset(index); | ||
398 | } | ||
399 | |||
400 | /* | 111 | /* |
401 | * Find and validate any extra registers to set up. | 112 | * Find and validate any extra registers to set up. |
402 | */ | 113 | */ |
@@ -532,9 +243,6 @@ msr_fail: | |||
532 | return false; | 243 | return false; |
533 | } | 244 | } |
534 | 245 | ||
535 | static void reserve_ds_buffers(void); | ||
536 | static void release_ds_buffers(void); | ||
537 | |||
538 | static void hw_perf_event_destroy(struct perf_event *event) | 246 | static void hw_perf_event_destroy(struct perf_event *event) |
539 | { | 247 | { |
540 | if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) { | 248 | if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) { |
@@ -583,7 +291,7 @@ set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event) | |||
583 | return x86_pmu_extra_regs(val, event); | 291 | return x86_pmu_extra_regs(val, event); |
584 | } | 292 | } |
585 | 293 | ||
586 | static int x86_setup_perfctr(struct perf_event *event) | 294 | int x86_setup_perfctr(struct perf_event *event) |
587 | { | 295 | { |
588 | struct perf_event_attr *attr = &event->attr; | 296 | struct perf_event_attr *attr = &event->attr; |
589 | struct hw_perf_event *hwc = &event->hw; | 297 | struct hw_perf_event *hwc = &event->hw; |
@@ -647,7 +355,7 @@ static int x86_setup_perfctr(struct perf_event *event) | |||
647 | return 0; | 355 | return 0; |
648 | } | 356 | } |
649 | 357 | ||
650 | static int x86_pmu_hw_config(struct perf_event *event) | 358 | int x86_pmu_hw_config(struct perf_event *event) |
651 | { | 359 | { |
652 | if (event->attr.precise_ip) { | 360 | if (event->attr.precise_ip) { |
653 | int precise = 0; | 361 | int precise = 0; |
@@ -723,7 +431,7 @@ static int __x86_pmu_event_init(struct perf_event *event) | |||
723 | return x86_pmu.hw_config(event); | 431 | return x86_pmu.hw_config(event); |
724 | } | 432 | } |
725 | 433 | ||
726 | static void x86_pmu_disable_all(void) | 434 | void x86_pmu_disable_all(void) |
727 | { | 435 | { |
728 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 436 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
729 | int idx; | 437 | int idx; |
@@ -758,15 +466,7 @@ static void x86_pmu_disable(struct pmu *pmu) | |||
758 | x86_pmu.disable_all(); | 466 | x86_pmu.disable_all(); |
759 | } | 467 | } |
760 | 468 | ||
761 | static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, | 469 | void x86_pmu_enable_all(int added) |
762 | u64 enable_mask) | ||
763 | { | ||
764 | if (hwc->extra_reg.reg) | ||
765 | wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config); | ||
766 | wrmsrl(hwc->config_base, hwc->config | enable_mask); | ||
767 | } | ||
768 | |||
769 | static void x86_pmu_enable_all(int added) | ||
770 | { | 470 | { |
771 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 471 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
772 | int idx; | 472 | int idx; |
@@ -788,7 +488,7 @@ static inline int is_x86_event(struct perf_event *event) | |||
788 | return event->pmu == &pmu; | 488 | return event->pmu == &pmu; |
789 | } | 489 | } |
790 | 490 | ||
791 | static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) | 491 | int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) |
792 | { | 492 | { |
793 | struct event_constraint *c, *constraints[X86_PMC_IDX_MAX]; | 493 | struct event_constraint *c, *constraints[X86_PMC_IDX_MAX]; |
794 | unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; | 494 | unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; |
@@ -959,7 +659,6 @@ static inline int match_prev_assignment(struct hw_perf_event *hwc, | |||
959 | } | 659 | } |
960 | 660 | ||
961 | static void x86_pmu_start(struct perf_event *event, int flags); | 661 | static void x86_pmu_start(struct perf_event *event, int flags); |
962 | static void x86_pmu_stop(struct perf_event *event, int flags); | ||
963 | 662 | ||
964 | static void x86_pmu_enable(struct pmu *pmu) | 663 | static void x86_pmu_enable(struct pmu *pmu) |
965 | { | 664 | { |
@@ -1031,21 +730,13 @@ static void x86_pmu_enable(struct pmu *pmu) | |||
1031 | x86_pmu.enable_all(added); | 730 | x86_pmu.enable_all(added); |
1032 | } | 731 | } |
1033 | 732 | ||
1034 | static inline void x86_pmu_disable_event(struct perf_event *event) | ||
1035 | { | ||
1036 | struct hw_perf_event *hwc = &event->hw; | ||
1037 | |||
1038 | wrmsrl(hwc->config_base, hwc->config); | ||
1039 | } | ||
1040 | |||
1041 | static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left); | 733 | static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left); |
1042 | 734 | ||
1043 | /* | 735 | /* |
1044 | * Set the next IRQ period, based on the hwc->period_left value. | 736 | * Set the next IRQ period, based on the hwc->period_left value. |
1045 | * To be called with the event disabled in hw: | 737 | * To be called with the event disabled in hw: |
1046 | */ | 738 | */ |
1047 | static int | 739 | int x86_perf_event_set_period(struct perf_event *event) |
1048 | x86_perf_event_set_period(struct perf_event *event) | ||
1049 | { | 740 | { |
1050 | struct hw_perf_event *hwc = &event->hw; | 741 | struct hw_perf_event *hwc = &event->hw; |
1051 | s64 left = local64_read(&hwc->period_left); | 742 | s64 left = local64_read(&hwc->period_left); |
@@ -1105,7 +796,7 @@ x86_perf_event_set_period(struct perf_event *event) | |||
1105 | return ret; | 796 | return ret; |
1106 | } | 797 | } |
1107 | 798 | ||
1108 | static void x86_pmu_enable_event(struct perf_event *event) | 799 | void x86_pmu_enable_event(struct perf_event *event) |
1109 | { | 800 | { |
1110 | if (__this_cpu_read(cpu_hw_events.enabled)) | 801 | if (__this_cpu_read(cpu_hw_events.enabled)) |
1111 | __x86_pmu_enable_event(&event->hw, | 802 | __x86_pmu_enable_event(&event->hw, |
@@ -1244,7 +935,7 @@ void perf_event_print_debug(void) | |||
1244 | local_irq_restore(flags); | 935 | local_irq_restore(flags); |
1245 | } | 936 | } |
1246 | 937 | ||
1247 | static void x86_pmu_stop(struct perf_event *event, int flags) | 938 | void x86_pmu_stop(struct perf_event *event, int flags) |
1248 | { | 939 | { |
1249 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 940 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
1250 | struct hw_perf_event *hwc = &event->hw; | 941 | struct hw_perf_event *hwc = &event->hw; |
@@ -1297,7 +988,7 @@ static void x86_pmu_del(struct perf_event *event, int flags) | |||
1297 | perf_event_update_userpage(event); | 988 | perf_event_update_userpage(event); |
1298 | } | 989 | } |
1299 | 990 | ||
1300 | static int x86_pmu_handle_irq(struct pt_regs *regs) | 991 | int x86_pmu_handle_irq(struct pt_regs *regs) |
1301 | { | 992 | { |
1302 | struct perf_sample_data data; | 993 | struct perf_sample_data data; |
1303 | struct cpu_hw_events *cpuc; | 994 | struct cpu_hw_events *cpuc; |
@@ -1367,109 +1058,28 @@ void perf_events_lapic_init(void) | |||
1367 | apic_write(APIC_LVTPC, APIC_DM_NMI); | 1058 | apic_write(APIC_LVTPC, APIC_DM_NMI); |
1368 | } | 1059 | } |
1369 | 1060 | ||
1370 | struct pmu_nmi_state { | ||
1371 | unsigned int marked; | ||
1372 | int handled; | ||
1373 | }; | ||
1374 | |||
1375 | static DEFINE_PER_CPU(struct pmu_nmi_state, pmu_nmi); | ||
1376 | |||
1377 | static int __kprobes | 1061 | static int __kprobes |
1378 | perf_event_nmi_handler(struct notifier_block *self, | 1062 | perf_event_nmi_handler(unsigned int cmd, struct pt_regs *regs) |
1379 | unsigned long cmd, void *__args) | ||
1380 | { | 1063 | { |
1381 | struct die_args *args = __args; | ||
1382 | unsigned int this_nmi; | ||
1383 | int handled; | ||
1384 | |||
1385 | if (!atomic_read(&active_events)) | 1064 | if (!atomic_read(&active_events)) |
1386 | return NOTIFY_DONE; | 1065 | return NMI_DONE; |
1387 | |||
1388 | switch (cmd) { | ||
1389 | case DIE_NMI: | ||
1390 | break; | ||
1391 | case DIE_NMIUNKNOWN: | ||
1392 | this_nmi = percpu_read(irq_stat.__nmi_count); | ||
1393 | if (this_nmi != __this_cpu_read(pmu_nmi.marked)) | ||
1394 | /* let the kernel handle the unknown nmi */ | ||
1395 | return NOTIFY_DONE; | ||
1396 | /* | ||
1397 | * This one is a PMU back-to-back nmi. Two events | ||
1398 | * trigger 'simultaneously' raising two back-to-back | ||
1399 | * NMIs. If the first NMI handles both, the latter | ||
1400 | * will be empty and daze the CPU. So, we drop it to | ||
1401 | * avoid false-positive 'unknown nmi' messages. | ||
1402 | */ | ||
1403 | return NOTIFY_STOP; | ||
1404 | default: | ||
1405 | return NOTIFY_DONE; | ||
1406 | } | ||
1407 | |||
1408 | handled = x86_pmu.handle_irq(args->regs); | ||
1409 | if (!handled) | ||
1410 | return NOTIFY_DONE; | ||
1411 | |||
1412 | this_nmi = percpu_read(irq_stat.__nmi_count); | ||
1413 | if ((handled > 1) || | ||
1414 | /* the next nmi could be a back-to-back nmi */ | ||
1415 | ((__this_cpu_read(pmu_nmi.marked) == this_nmi) && | ||
1416 | (__this_cpu_read(pmu_nmi.handled) > 1))) { | ||
1417 | /* | ||
1418 | * We could have two subsequent back-to-back nmis: The | ||
1419 | * first handles more than one counter, the 2nd | ||
1420 | * handles only one counter and the 3rd handles no | ||
1421 | * counter. | ||
1422 | * | ||
1423 | * This is the 2nd nmi because the previous was | ||
1424 | * handling more than one counter. We will mark the | ||
1425 | * next (3rd) and then drop it if unhandled. | ||
1426 | */ | ||
1427 | __this_cpu_write(pmu_nmi.marked, this_nmi + 1); | ||
1428 | __this_cpu_write(pmu_nmi.handled, handled); | ||
1429 | } | ||
1430 | 1066 | ||
1431 | return NOTIFY_STOP; | 1067 | return x86_pmu.handle_irq(regs); |
1432 | } | 1068 | } |
1433 | 1069 | ||
1434 | static __read_mostly struct notifier_block perf_event_nmi_notifier = { | 1070 | struct event_constraint emptyconstraint; |
1435 | .notifier_call = perf_event_nmi_handler, | 1071 | struct event_constraint unconstrained; |
1436 | .next = NULL, | ||
1437 | .priority = NMI_LOCAL_LOW_PRIOR, | ||
1438 | }; | ||
1439 | |||
1440 | static struct event_constraint unconstrained; | ||
1441 | static struct event_constraint emptyconstraint; | ||
1442 | |||
1443 | static struct event_constraint * | ||
1444 | x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) | ||
1445 | { | ||
1446 | struct event_constraint *c; | ||
1447 | |||
1448 | if (x86_pmu.event_constraints) { | ||
1449 | for_each_event_constraint(c, x86_pmu.event_constraints) { | ||
1450 | if ((event->hw.config & c->cmask) == c->code) | ||
1451 | return c; | ||
1452 | } | ||
1453 | } | ||
1454 | |||
1455 | return &unconstrained; | ||
1456 | } | ||
1457 | |||
1458 | #include "perf_event_amd.c" | ||
1459 | #include "perf_event_p6.c" | ||
1460 | #include "perf_event_p4.c" | ||
1461 | #include "perf_event_intel_lbr.c" | ||
1462 | #include "perf_event_intel_ds.c" | ||
1463 | #include "perf_event_intel.c" | ||
1464 | 1072 | ||
1465 | static int __cpuinit | 1073 | static int __cpuinit |
1466 | x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) | 1074 | x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) |
1467 | { | 1075 | { |
1468 | unsigned int cpu = (long)hcpu; | 1076 | unsigned int cpu = (long)hcpu; |
1077 | struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); | ||
1469 | int ret = NOTIFY_OK; | 1078 | int ret = NOTIFY_OK; |
1470 | 1079 | ||
1471 | switch (action & ~CPU_TASKS_FROZEN) { | 1080 | switch (action & ~CPU_TASKS_FROZEN) { |
1472 | case CPU_UP_PREPARE: | 1081 | case CPU_UP_PREPARE: |
1082 | cpuc->kfree_on_online = NULL; | ||
1473 | if (x86_pmu.cpu_prepare) | 1083 | if (x86_pmu.cpu_prepare) |
1474 | ret = x86_pmu.cpu_prepare(cpu); | 1084 | ret = x86_pmu.cpu_prepare(cpu); |
1475 | break; | 1085 | break; |
@@ -1479,6 +1089,10 @@ x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) | |||
1479 | x86_pmu.cpu_starting(cpu); | 1089 | x86_pmu.cpu_starting(cpu); |
1480 | break; | 1090 | break; |
1481 | 1091 | ||
1092 | case CPU_ONLINE: | ||
1093 | kfree(cpuc->kfree_on_online); | ||
1094 | break; | ||
1095 | |||
1482 | case CPU_DYING: | 1096 | case CPU_DYING: |
1483 | if (x86_pmu.cpu_dying) | 1097 | if (x86_pmu.cpu_dying) |
1484 | x86_pmu.cpu_dying(cpu); | 1098 | x86_pmu.cpu_dying(cpu); |
@@ -1557,7 +1171,7 @@ static int __init init_hw_perf_events(void) | |||
1557 | ((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED; | 1171 | ((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED; |
1558 | 1172 | ||
1559 | perf_events_lapic_init(); | 1173 | perf_events_lapic_init(); |
1560 | register_die_notifier(&perf_event_nmi_notifier); | 1174 | register_nmi_handler(NMI_LOCAL, perf_event_nmi_handler, 0, "PMI"); |
1561 | 1175 | ||
1562 | unconstrained = (struct event_constraint) | 1176 | unconstrained = (struct event_constraint) |
1563 | __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1, | 1177 | __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1, |
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h new file mode 100644 index 000000000000..b9698d40ac4b --- /dev/null +++ b/arch/x86/kernel/cpu/perf_event.h | |||
@@ -0,0 +1,505 @@ | |||
1 | /* | ||
2 | * Performance events x86 architecture header | ||
3 | * | ||
4 | * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> | ||
5 | * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar | ||
6 | * Copyright (C) 2009 Jaswinder Singh Rajput | ||
7 | * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter | ||
8 | * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> | ||
9 | * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com> | ||
10 | * Copyright (C) 2009 Google, Inc., Stephane Eranian | ||
11 | * | ||
12 | * For licencing details see kernel-base/COPYING | ||
13 | */ | ||
14 | |||
15 | #include <linux/perf_event.h> | ||
16 | |||
17 | /* | ||
18 | * | NHM/WSM | SNB | | ||
19 | * register ------------------------------- | ||
20 | * | HT | no HT | HT | no HT | | ||
21 | *----------------------------------------- | ||
22 | * offcore | core | core | cpu | core | | ||
23 | * lbr_sel | core | core | cpu | core | | ||
24 | * ld_lat | cpu | core | cpu | core | | ||
25 | *----------------------------------------- | ||
26 | * | ||
27 | * Given that there is a small number of shared regs, | ||
28 | * we can pre-allocate their slot in the per-cpu | ||
29 | * per-core reg tables. | ||
30 | */ | ||
31 | enum extra_reg_type { | ||
32 | EXTRA_REG_NONE = -1, /* not used */ | ||
33 | |||
34 | EXTRA_REG_RSP_0 = 0, /* offcore_response_0 */ | ||
35 | EXTRA_REG_RSP_1 = 1, /* offcore_response_1 */ | ||
36 | |||
37 | EXTRA_REG_MAX /* number of entries needed */ | ||
38 | }; | ||
39 | |||
40 | struct event_constraint { | ||
41 | union { | ||
42 | unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; | ||
43 | u64 idxmsk64; | ||
44 | }; | ||
45 | u64 code; | ||
46 | u64 cmask; | ||
47 | int weight; | ||
48 | }; | ||
49 | |||
50 | struct amd_nb { | ||
51 | int nb_id; /* NorthBridge id */ | ||
52 | int refcnt; /* reference count */ | ||
53 | struct perf_event *owners[X86_PMC_IDX_MAX]; | ||
54 | struct event_constraint event_constraints[X86_PMC_IDX_MAX]; | ||
55 | }; | ||
56 | |||
57 | /* The maximal number of PEBS events: */ | ||
58 | #define MAX_PEBS_EVENTS 4 | ||
59 | |||
60 | /* | ||
61 | * A debug store configuration. | ||
62 | * | ||
63 | * We only support architectures that use 64bit fields. | ||
64 | */ | ||
65 | struct debug_store { | ||
66 | u64 bts_buffer_base; | ||
67 | u64 bts_index; | ||
68 | u64 bts_absolute_maximum; | ||
69 | u64 bts_interrupt_threshold; | ||
70 | u64 pebs_buffer_base; | ||
71 | u64 pebs_index; | ||
72 | u64 pebs_absolute_maximum; | ||
73 | u64 pebs_interrupt_threshold; | ||
74 | u64 pebs_event_reset[MAX_PEBS_EVENTS]; | ||
75 | }; | ||
76 | |||
77 | /* | ||
78 | * Per register state. | ||
79 | */ | ||
80 | struct er_account { | ||
81 | raw_spinlock_t lock; /* per-core: protect structure */ | ||
82 | u64 config; /* extra MSR config */ | ||
83 | u64 reg; /* extra MSR number */ | ||
84 | atomic_t ref; /* reference count */ | ||
85 | }; | ||
86 | |||
87 | /* | ||
88 | * Per core/cpu state | ||
89 | * | ||
90 | * Used to coordinate shared registers between HT threads or | ||
91 | * among events on a single PMU. | ||
92 | */ | ||
93 | struct intel_shared_regs { | ||
94 | struct er_account regs[EXTRA_REG_MAX]; | ||
95 | int refcnt; /* per-core: #HT threads */ | ||
96 | unsigned core_id; /* per-core: core id */ | ||
97 | }; | ||
98 | |||
99 | #define MAX_LBR_ENTRIES 16 | ||
100 | |||
101 | struct cpu_hw_events { | ||
102 | /* | ||
103 | * Generic x86 PMC bits | ||
104 | */ | ||
105 | struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */ | ||
106 | unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; | ||
107 | unsigned long running[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; | ||
108 | int enabled; | ||
109 | |||
110 | int n_events; | ||
111 | int n_added; | ||
112 | int n_txn; | ||
113 | int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */ | ||
114 | u64 tags[X86_PMC_IDX_MAX]; | ||
115 | struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */ | ||
116 | |||
117 | unsigned int group_flag; | ||
118 | |||
119 | /* | ||
120 | * Intel DebugStore bits | ||
121 | */ | ||
122 | struct debug_store *ds; | ||
123 | u64 pebs_enabled; | ||
124 | |||
125 | /* | ||
126 | * Intel LBR bits | ||
127 | */ | ||
128 | int lbr_users; | ||
129 | void *lbr_context; | ||
130 | struct perf_branch_stack lbr_stack; | ||
131 | struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES]; | ||
132 | |||
133 | /* | ||
134 | * Intel host/guest exclude bits | ||
135 | */ | ||
136 | u64 intel_ctrl_guest_mask; | ||
137 | u64 intel_ctrl_host_mask; | ||
138 | struct perf_guest_switch_msr guest_switch_msrs[X86_PMC_IDX_MAX]; | ||
139 | |||
140 | /* | ||
141 | * manage shared (per-core, per-cpu) registers | ||
142 | * used on Intel NHM/WSM/SNB | ||
143 | */ | ||
144 | struct intel_shared_regs *shared_regs; | ||
145 | |||
146 | /* | ||
147 | * AMD specific bits | ||
148 | */ | ||
149 | struct amd_nb *amd_nb; | ||
150 | |||
151 | void *kfree_on_online; | ||
152 | }; | ||
153 | |||
154 | #define __EVENT_CONSTRAINT(c, n, m, w) {\ | ||
155 | { .idxmsk64 = (n) }, \ | ||
156 | .code = (c), \ | ||
157 | .cmask = (m), \ | ||
158 | .weight = (w), \ | ||
159 | } | ||
160 | |||
161 | #define EVENT_CONSTRAINT(c, n, m) \ | ||
162 | __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n)) | ||
163 | |||
164 | /* | ||
165 | * Constraint on the Event code. | ||
166 | */ | ||
167 | #define INTEL_EVENT_CONSTRAINT(c, n) \ | ||
168 | EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT) | ||
169 | |||
170 | /* | ||
171 | * Constraint on the Event code + UMask + fixed-mask | ||
172 | * | ||
173 | * filter mask to validate fixed counter events. | ||
174 | * the following filters disqualify for fixed counters: | ||
175 | * - inv | ||
176 | * - edge | ||
177 | * - cnt-mask | ||
178 | * The other filters are supported by fixed counters. | ||
179 | * The any-thread option is supported starting with v3. | ||
180 | */ | ||
181 | #define FIXED_EVENT_CONSTRAINT(c, n) \ | ||
182 | EVENT_CONSTRAINT(c, (1ULL << (32+n)), X86_RAW_EVENT_MASK) | ||
183 | |||
184 | /* | ||
185 | * Constraint on the Event code + UMask | ||
186 | */ | ||
187 | #define INTEL_UEVENT_CONSTRAINT(c, n) \ | ||
188 | EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK) | ||
189 | |||
190 | #define EVENT_CONSTRAINT_END \ | ||
191 | EVENT_CONSTRAINT(0, 0, 0) | ||
192 | |||
193 | #define for_each_event_constraint(e, c) \ | ||
194 | for ((e) = (c); (e)->weight; (e)++) | ||
195 | |||
196 | /* | ||
197 | * Extra registers for specific events. | ||
198 | * | ||
199 | * Some events need large masks and require external MSRs. | ||
200 | * Those extra MSRs end up being shared for all events on | ||
201 | * a PMU and sometimes between PMU of sibling HT threads. | ||
202 | * In either case, the kernel needs to handle conflicting | ||
203 | * accesses to those extra, shared, regs. The data structure | ||
204 | * to manage those registers is stored in cpu_hw_event. | ||
205 | */ | ||
206 | struct extra_reg { | ||
207 | unsigned int event; | ||
208 | unsigned int msr; | ||
209 | u64 config_mask; | ||
210 | u64 valid_mask; | ||
211 | int idx; /* per_xxx->regs[] reg index */ | ||
212 | }; | ||
213 | |||
214 | #define EVENT_EXTRA_REG(e, ms, m, vm, i) { \ | ||
215 | .event = (e), \ | ||
216 | .msr = (ms), \ | ||
217 | .config_mask = (m), \ | ||
218 | .valid_mask = (vm), \ | ||
219 | .idx = EXTRA_REG_##i \ | ||
220 | } | ||
221 | |||
222 | #define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx) \ | ||
223 | EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm, idx) | ||
224 | |||
225 | #define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0, RSP_0) | ||
226 | |||
227 | union perf_capabilities { | ||
228 | struct { | ||
229 | u64 lbr_format:6; | ||
230 | u64 pebs_trap:1; | ||
231 | u64 pebs_arch_reg:1; | ||
232 | u64 pebs_format:4; | ||
233 | u64 smm_freeze:1; | ||
234 | }; | ||
235 | u64 capabilities; | ||
236 | }; | ||
237 | |||
238 | /* | ||
239 | * struct x86_pmu - generic x86 pmu | ||
240 | */ | ||
241 | struct x86_pmu { | ||
242 | /* | ||
243 | * Generic x86 PMC bits | ||
244 | */ | ||
245 | const char *name; | ||
246 | int version; | ||
247 | int (*handle_irq)(struct pt_regs *); | ||
248 | void (*disable_all)(void); | ||
249 | void (*enable_all)(int added); | ||
250 | void (*enable)(struct perf_event *); | ||
251 | void (*disable)(struct perf_event *); | ||
252 | int (*hw_config)(struct perf_event *event); | ||
253 | int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign); | ||
254 | unsigned eventsel; | ||
255 | unsigned perfctr; | ||
256 | u64 (*event_map)(int); | ||
257 | int max_events; | ||
258 | int num_counters; | ||
259 | int num_counters_fixed; | ||
260 | int cntval_bits; | ||
261 | u64 cntval_mask; | ||
262 | int apic; | ||
263 | u64 max_period; | ||
264 | struct event_constraint * | ||
265 | (*get_event_constraints)(struct cpu_hw_events *cpuc, | ||
266 | struct perf_event *event); | ||
267 | |||
268 | void (*put_event_constraints)(struct cpu_hw_events *cpuc, | ||
269 | struct perf_event *event); | ||
270 | struct event_constraint *event_constraints; | ||
271 | void (*quirks)(void); | ||
272 | int perfctr_second_write; | ||
273 | |||
274 | int (*cpu_prepare)(int cpu); | ||
275 | void (*cpu_starting)(int cpu); | ||
276 | void (*cpu_dying)(int cpu); | ||
277 | void (*cpu_dead)(int cpu); | ||
278 | |||
279 | /* | ||
280 | * Intel Arch Perfmon v2+ | ||
281 | */ | ||
282 | u64 intel_ctrl; | ||
283 | union perf_capabilities intel_cap; | ||
284 | |||
285 | /* | ||
286 | * Intel DebugStore bits | ||
287 | */ | ||
288 | int bts, pebs; | ||
289 | int bts_active, pebs_active; | ||
290 | int pebs_record_size; | ||
291 | void (*drain_pebs)(struct pt_regs *regs); | ||
292 | struct event_constraint *pebs_constraints; | ||
293 | |||
294 | /* | ||
295 | * Intel LBR | ||
296 | */ | ||
297 | unsigned long lbr_tos, lbr_from, lbr_to; /* MSR base regs */ | ||
298 | int lbr_nr; /* hardware stack size */ | ||
299 | |||
300 | /* | ||
301 | * Extra registers for events | ||
302 | */ | ||
303 | struct extra_reg *extra_regs; | ||
304 | unsigned int er_flags; | ||
305 | |||
306 | /* | ||
307 | * Intel host/guest support (KVM) | ||
308 | */ | ||
309 | struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr); | ||
310 | }; | ||
311 | |||
312 | #define ERF_NO_HT_SHARING 1 | ||
313 | #define ERF_HAS_RSP_1 2 | ||
314 | |||
315 | extern struct x86_pmu x86_pmu __read_mostly; | ||
316 | |||
317 | DECLARE_PER_CPU(struct cpu_hw_events, cpu_hw_events); | ||
318 | |||
319 | int x86_perf_event_set_period(struct perf_event *event); | ||
320 | |||
321 | /* | ||
322 | * Generalized hw caching related hw_event table, filled | ||
323 | * in on a per model basis. A value of 0 means | ||
324 | * 'not supported', -1 means 'hw_event makes no sense on | ||
325 | * this CPU', any other value means the raw hw_event | ||
326 | * ID. | ||
327 | */ | ||
328 | |||
329 | #define C(x) PERF_COUNT_HW_CACHE_##x | ||
330 | |||
331 | extern u64 __read_mostly hw_cache_event_ids | ||
332 | [PERF_COUNT_HW_CACHE_MAX] | ||
333 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
334 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; | ||
335 | extern u64 __read_mostly hw_cache_extra_regs | ||
336 | [PERF_COUNT_HW_CACHE_MAX] | ||
337 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
338 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; | ||
339 | |||
340 | u64 x86_perf_event_update(struct perf_event *event); | ||
341 | |||
342 | static inline int x86_pmu_addr_offset(int index) | ||
343 | { | ||
344 | int offset; | ||
345 | |||
346 | /* offset = X86_FEATURE_PERFCTR_CORE ? index << 1 : index */ | ||
347 | alternative_io(ASM_NOP2, | ||
348 | "shll $1, %%eax", | ||
349 | X86_FEATURE_PERFCTR_CORE, | ||
350 | "=a" (offset), | ||
351 | "a" (index)); | ||
352 | |||
353 | return offset; | ||
354 | } | ||
355 | |||
356 | static inline unsigned int x86_pmu_config_addr(int index) | ||
357 | { | ||
358 | return x86_pmu.eventsel + x86_pmu_addr_offset(index); | ||
359 | } | ||
360 | |||
361 | static inline unsigned int x86_pmu_event_addr(int index) | ||
362 | { | ||
363 | return x86_pmu.perfctr + x86_pmu_addr_offset(index); | ||
364 | } | ||
365 | |||
366 | int x86_setup_perfctr(struct perf_event *event); | ||
367 | |||
368 | int x86_pmu_hw_config(struct perf_event *event); | ||
369 | |||
370 | void x86_pmu_disable_all(void); | ||
371 | |||
372 | static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, | ||
373 | u64 enable_mask) | ||
374 | { | ||
375 | if (hwc->extra_reg.reg) | ||
376 | wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config); | ||
377 | wrmsrl(hwc->config_base, hwc->config | enable_mask); | ||
378 | } | ||
379 | |||
380 | void x86_pmu_enable_all(int added); | ||
381 | |||
382 | int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign); | ||
383 | |||
384 | void x86_pmu_stop(struct perf_event *event, int flags); | ||
385 | |||
386 | static inline void x86_pmu_disable_event(struct perf_event *event) | ||
387 | { | ||
388 | struct hw_perf_event *hwc = &event->hw; | ||
389 | |||
390 | wrmsrl(hwc->config_base, hwc->config); | ||
391 | } | ||
392 | |||
393 | void x86_pmu_enable_event(struct perf_event *event); | ||
394 | |||
395 | int x86_pmu_handle_irq(struct pt_regs *regs); | ||
396 | |||
397 | extern struct event_constraint emptyconstraint; | ||
398 | |||
399 | extern struct event_constraint unconstrained; | ||
400 | |||
401 | #ifdef CONFIG_CPU_SUP_AMD | ||
402 | |||
403 | int amd_pmu_init(void); | ||
404 | |||
405 | #else /* CONFIG_CPU_SUP_AMD */ | ||
406 | |||
407 | static inline int amd_pmu_init(void) | ||
408 | { | ||
409 | return 0; | ||
410 | } | ||
411 | |||
412 | #endif /* CONFIG_CPU_SUP_AMD */ | ||
413 | |||
414 | #ifdef CONFIG_CPU_SUP_INTEL | ||
415 | |||
416 | int intel_pmu_save_and_restart(struct perf_event *event); | ||
417 | |||
418 | struct event_constraint * | ||
419 | x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event); | ||
420 | |||
421 | struct intel_shared_regs *allocate_shared_regs(int cpu); | ||
422 | |||
423 | int intel_pmu_init(void); | ||
424 | |||
425 | void init_debug_store_on_cpu(int cpu); | ||
426 | |||
427 | void fini_debug_store_on_cpu(int cpu); | ||
428 | |||
429 | void release_ds_buffers(void); | ||
430 | |||
431 | void reserve_ds_buffers(void); | ||
432 | |||
433 | extern struct event_constraint bts_constraint; | ||
434 | |||
435 | void intel_pmu_enable_bts(u64 config); | ||
436 | |||
437 | void intel_pmu_disable_bts(void); | ||
438 | |||
439 | int intel_pmu_drain_bts_buffer(void); | ||
440 | |||
441 | extern struct event_constraint intel_core2_pebs_event_constraints[]; | ||
442 | |||
443 | extern struct event_constraint intel_atom_pebs_event_constraints[]; | ||
444 | |||
445 | extern struct event_constraint intel_nehalem_pebs_event_constraints[]; | ||
446 | |||
447 | extern struct event_constraint intel_westmere_pebs_event_constraints[]; | ||
448 | |||
449 | extern struct event_constraint intel_snb_pebs_event_constraints[]; | ||
450 | |||
451 | struct event_constraint *intel_pebs_constraints(struct perf_event *event); | ||
452 | |||
453 | void intel_pmu_pebs_enable(struct perf_event *event); | ||
454 | |||
455 | void intel_pmu_pebs_disable(struct perf_event *event); | ||
456 | |||
457 | void intel_pmu_pebs_enable_all(void); | ||
458 | |||
459 | void intel_pmu_pebs_disable_all(void); | ||
460 | |||
461 | void intel_ds_init(void); | ||
462 | |||
463 | void intel_pmu_lbr_reset(void); | ||
464 | |||
465 | void intel_pmu_lbr_enable(struct perf_event *event); | ||
466 | |||
467 | void intel_pmu_lbr_disable(struct perf_event *event); | ||
468 | |||
469 | void intel_pmu_lbr_enable_all(void); | ||
470 | |||
471 | void intel_pmu_lbr_disable_all(void); | ||
472 | |||
473 | void intel_pmu_lbr_read(void); | ||
474 | |||
475 | void intel_pmu_lbr_init_core(void); | ||
476 | |||
477 | void intel_pmu_lbr_init_nhm(void); | ||
478 | |||
479 | void intel_pmu_lbr_init_atom(void); | ||
480 | |||
481 | int p4_pmu_init(void); | ||
482 | |||
483 | int p6_pmu_init(void); | ||
484 | |||
485 | #else /* CONFIG_CPU_SUP_INTEL */ | ||
486 | |||
487 | static inline void reserve_ds_buffers(void) | ||
488 | { | ||
489 | } | ||
490 | |||
491 | static inline void release_ds_buffers(void) | ||
492 | { | ||
493 | } | ||
494 | |||
495 | static inline int intel_pmu_init(void) | ||
496 | { | ||
497 | return 0; | ||
498 | } | ||
499 | |||
500 | static inline struct intel_shared_regs *allocate_shared_regs(int cpu) | ||
501 | { | ||
502 | return NULL; | ||
503 | } | ||
504 | |||
505 | #endif /* CONFIG_CPU_SUP_INTEL */ | ||
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c index 941caa2e449b..aeefd45697a2 100644 --- a/arch/x86/kernel/cpu/perf_event_amd.c +++ b/arch/x86/kernel/cpu/perf_event_amd.c | |||
@@ -1,4 +1,10 @@ | |||
1 | #ifdef CONFIG_CPU_SUP_AMD | 1 | #include <linux/perf_event.h> |
2 | #include <linux/types.h> | ||
3 | #include <linux/init.h> | ||
4 | #include <linux/slab.h> | ||
5 | #include <asm/apicdef.h> | ||
6 | |||
7 | #include "perf_event.h" | ||
2 | 8 | ||
3 | static __initconst const u64 amd_hw_cache_event_ids | 9 | static __initconst const u64 amd_hw_cache_event_ids |
4 | [PERF_COUNT_HW_CACHE_MAX] | 10 | [PERF_COUNT_HW_CACHE_MAX] |
@@ -132,6 +138,19 @@ static int amd_pmu_hw_config(struct perf_event *event) | |||
132 | if (ret) | 138 | if (ret) |
133 | return ret; | 139 | return ret; |
134 | 140 | ||
141 | if (event->attr.exclude_host && event->attr.exclude_guest) | ||
142 | /* | ||
143 | * When HO == GO == 1 the hardware treats that as GO == HO == 0 | ||
144 | * and will count in both modes. We don't want to count in that | ||
145 | * case so we emulate no-counting by setting US = OS = 0. | ||
146 | */ | ||
147 | event->hw.config &= ~(ARCH_PERFMON_EVENTSEL_USR | | ||
148 | ARCH_PERFMON_EVENTSEL_OS); | ||
149 | else if (event->attr.exclude_host) | ||
150 | event->hw.config |= AMD_PERFMON_EVENTSEL_GUESTONLY; | ||
151 | else if (event->attr.exclude_guest) | ||
152 | event->hw.config |= AMD_PERFMON_EVENTSEL_HOSTONLY; | ||
153 | |||
135 | if (event->attr.type != PERF_TYPE_RAW) | 154 | if (event->attr.type != PERF_TYPE_RAW) |
136 | return 0; | 155 | return 0; |
137 | 156 | ||
@@ -350,7 +369,7 @@ static void amd_pmu_cpu_starting(int cpu) | |||
350 | continue; | 369 | continue; |
351 | 370 | ||
352 | if (nb->nb_id == nb_id) { | 371 | if (nb->nb_id == nb_id) { |
353 | kfree(cpuc->amd_nb); | 372 | cpuc->kfree_on_online = cpuc->amd_nb; |
354 | cpuc->amd_nb = nb; | 373 | cpuc->amd_nb = nb; |
355 | break; | 374 | break; |
356 | } | 375 | } |
@@ -392,7 +411,7 @@ static __initconst const struct x86_pmu amd_pmu = { | |||
392 | .perfctr = MSR_K7_PERFCTR0, | 411 | .perfctr = MSR_K7_PERFCTR0, |
393 | .event_map = amd_pmu_event_map, | 412 | .event_map = amd_pmu_event_map, |
394 | .max_events = ARRAY_SIZE(amd_perfmon_event_map), | 413 | .max_events = ARRAY_SIZE(amd_perfmon_event_map), |
395 | .num_counters = 4, | 414 | .num_counters = AMD64_NUM_COUNTERS, |
396 | .cntval_bits = 48, | 415 | .cntval_bits = 48, |
397 | .cntval_mask = (1ULL << 48) - 1, | 416 | .cntval_mask = (1ULL << 48) - 1, |
398 | .apic = 1, | 417 | .apic = 1, |
@@ -556,7 +575,7 @@ static __initconst const struct x86_pmu amd_pmu_f15h = { | |||
556 | .perfctr = MSR_F15H_PERF_CTR, | 575 | .perfctr = MSR_F15H_PERF_CTR, |
557 | .event_map = amd_pmu_event_map, | 576 | .event_map = amd_pmu_event_map, |
558 | .max_events = ARRAY_SIZE(amd_perfmon_event_map), | 577 | .max_events = ARRAY_SIZE(amd_perfmon_event_map), |
559 | .num_counters = 6, | 578 | .num_counters = AMD64_NUM_COUNTERS_F15H, |
560 | .cntval_bits = 48, | 579 | .cntval_bits = 48, |
561 | .cntval_mask = (1ULL << 48) - 1, | 580 | .cntval_mask = (1ULL << 48) - 1, |
562 | .apic = 1, | 581 | .apic = 1, |
@@ -573,7 +592,7 @@ static __initconst const struct x86_pmu amd_pmu_f15h = { | |||
573 | #endif | 592 | #endif |
574 | }; | 593 | }; |
575 | 594 | ||
576 | static __init int amd_pmu_init(void) | 595 | __init int amd_pmu_init(void) |
577 | { | 596 | { |
578 | /* Performance-monitoring supported from K7 and later: */ | 597 | /* Performance-monitoring supported from K7 and later: */ |
579 | if (boot_cpu_data.x86 < 6) | 598 | if (boot_cpu_data.x86 < 6) |
@@ -602,12 +621,3 @@ static __init int amd_pmu_init(void) | |||
602 | 621 | ||
603 | return 0; | 622 | return 0; |
604 | } | 623 | } |
605 | |||
606 | #else /* CONFIG_CPU_SUP_AMD */ | ||
607 | |||
608 | static int amd_pmu_init(void) | ||
609 | { | ||
610 | return 0; | ||
611 | } | ||
612 | |||
613 | #endif | ||
diff --git a/arch/x86/kernel/cpu/perf_event_amd_ibs.c b/arch/x86/kernel/cpu/perf_event_amd_ibs.c new file mode 100644 index 000000000000..ab6343d21825 --- /dev/null +++ b/arch/x86/kernel/cpu/perf_event_amd_ibs.c | |||
@@ -0,0 +1,294 @@ | |||
1 | /* | ||
2 | * Performance events - AMD IBS | ||
3 | * | ||
4 | * Copyright (C) 2011 Advanced Micro Devices, Inc., Robert Richter | ||
5 | * | ||
6 | * For licencing details see kernel-base/COPYING | ||
7 | */ | ||
8 | |||
9 | #include <linux/perf_event.h> | ||
10 | #include <linux/module.h> | ||
11 | #include <linux/pci.h> | ||
12 | |||
13 | #include <asm/apic.h> | ||
14 | |||
15 | static u32 ibs_caps; | ||
16 | |||
17 | #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD) | ||
18 | |||
19 | static struct pmu perf_ibs; | ||
20 | |||
21 | static int perf_ibs_init(struct perf_event *event) | ||
22 | { | ||
23 | if (perf_ibs.type != event->attr.type) | ||
24 | return -ENOENT; | ||
25 | return 0; | ||
26 | } | ||
27 | |||
28 | static int perf_ibs_add(struct perf_event *event, int flags) | ||
29 | { | ||
30 | return 0; | ||
31 | } | ||
32 | |||
33 | static void perf_ibs_del(struct perf_event *event, int flags) | ||
34 | { | ||
35 | } | ||
36 | |||
37 | static struct pmu perf_ibs = { | ||
38 | .event_init= perf_ibs_init, | ||
39 | .add= perf_ibs_add, | ||
40 | .del= perf_ibs_del, | ||
41 | }; | ||
42 | |||
43 | static __init int perf_event_ibs_init(void) | ||
44 | { | ||
45 | if (!ibs_caps) | ||
46 | return -ENODEV; /* ibs not supported by the cpu */ | ||
47 | |||
48 | perf_pmu_register(&perf_ibs, "ibs", -1); | ||
49 | printk(KERN_INFO "perf: AMD IBS detected (0x%08x)\n", ibs_caps); | ||
50 | |||
51 | return 0; | ||
52 | } | ||
53 | |||
54 | #else /* defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD) */ | ||
55 | |||
56 | static __init int perf_event_ibs_init(void) { return 0; } | ||
57 | |||
58 | #endif | ||
59 | |||
60 | /* IBS - apic initialization, for perf and oprofile */ | ||
61 | |||
62 | static __init u32 __get_ibs_caps(void) | ||
63 | { | ||
64 | u32 caps; | ||
65 | unsigned int max_level; | ||
66 | |||
67 | if (!boot_cpu_has(X86_FEATURE_IBS)) | ||
68 | return 0; | ||
69 | |||
70 | /* check IBS cpuid feature flags */ | ||
71 | max_level = cpuid_eax(0x80000000); | ||
72 | if (max_level < IBS_CPUID_FEATURES) | ||
73 | return IBS_CAPS_DEFAULT; | ||
74 | |||
75 | caps = cpuid_eax(IBS_CPUID_FEATURES); | ||
76 | if (!(caps & IBS_CAPS_AVAIL)) | ||
77 | /* cpuid flags not valid */ | ||
78 | return IBS_CAPS_DEFAULT; | ||
79 | |||
80 | return caps; | ||
81 | } | ||
82 | |||
83 | u32 get_ibs_caps(void) | ||
84 | { | ||
85 | return ibs_caps; | ||
86 | } | ||
87 | |||
88 | EXPORT_SYMBOL(get_ibs_caps); | ||
89 | |||
90 | static inline int get_eilvt(int offset) | ||
91 | { | ||
92 | return !setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_NMI, 1); | ||
93 | } | ||
94 | |||
95 | static inline int put_eilvt(int offset) | ||
96 | { | ||
97 | return !setup_APIC_eilvt(offset, 0, 0, 1); | ||
98 | } | ||
99 | |||
100 | /* | ||
101 | * Check and reserve APIC extended interrupt LVT offset for IBS if available. | ||
102 | */ | ||
103 | static inline int ibs_eilvt_valid(void) | ||
104 | { | ||
105 | int offset; | ||
106 | u64 val; | ||
107 | int valid = 0; | ||
108 | |||
109 | preempt_disable(); | ||
110 | |||
111 | rdmsrl(MSR_AMD64_IBSCTL, val); | ||
112 | offset = val & IBSCTL_LVT_OFFSET_MASK; | ||
113 | |||
114 | if (!(val & IBSCTL_LVT_OFFSET_VALID)) { | ||
115 | pr_err(FW_BUG "cpu %d, invalid IBS interrupt offset %d (MSR%08X=0x%016llx)\n", | ||
116 | smp_processor_id(), offset, MSR_AMD64_IBSCTL, val); | ||
117 | goto out; | ||
118 | } | ||
119 | |||
120 | if (!get_eilvt(offset)) { | ||
121 | pr_err(FW_BUG "cpu %d, IBS interrupt offset %d not available (MSR%08X=0x%016llx)\n", | ||
122 | smp_processor_id(), offset, MSR_AMD64_IBSCTL, val); | ||
123 | goto out; | ||
124 | } | ||
125 | |||
126 | valid = 1; | ||
127 | out: | ||
128 | preempt_enable(); | ||
129 | |||
130 | return valid; | ||
131 | } | ||
132 | |||
133 | static int setup_ibs_ctl(int ibs_eilvt_off) | ||
134 | { | ||
135 | struct pci_dev *cpu_cfg; | ||
136 | int nodes; | ||
137 | u32 value = 0; | ||
138 | |||
139 | nodes = 0; | ||
140 | cpu_cfg = NULL; | ||
141 | do { | ||
142 | cpu_cfg = pci_get_device(PCI_VENDOR_ID_AMD, | ||
143 | PCI_DEVICE_ID_AMD_10H_NB_MISC, | ||
144 | cpu_cfg); | ||
145 | if (!cpu_cfg) | ||
146 | break; | ||
147 | ++nodes; | ||
148 | pci_write_config_dword(cpu_cfg, IBSCTL, ibs_eilvt_off | ||
149 | | IBSCTL_LVT_OFFSET_VALID); | ||
150 | pci_read_config_dword(cpu_cfg, IBSCTL, &value); | ||
151 | if (value != (ibs_eilvt_off | IBSCTL_LVT_OFFSET_VALID)) { | ||
152 | pci_dev_put(cpu_cfg); | ||
153 | printk(KERN_DEBUG "Failed to setup IBS LVT offset, " | ||
154 | "IBSCTL = 0x%08x\n", value); | ||
155 | return -EINVAL; | ||
156 | } | ||
157 | } while (1); | ||
158 | |||
159 | if (!nodes) { | ||
160 | printk(KERN_DEBUG "No CPU node configured for IBS\n"); | ||
161 | return -ENODEV; | ||
162 | } | ||
163 | |||
164 | return 0; | ||
165 | } | ||
166 | |||
167 | /* | ||
168 | * This runs only on the current cpu. We try to find an LVT offset and | ||
169 | * setup the local APIC. For this we must disable preemption. On | ||
170 | * success we initialize all nodes with this offset. This updates then | ||
171 | * the offset in the IBS_CTL per-node msr. The per-core APIC setup of | ||
172 | * the IBS interrupt vector is handled by perf_ibs_cpu_notifier that | ||
173 | * is using the new offset. | ||
174 | */ | ||
175 | static int force_ibs_eilvt_setup(void) | ||
176 | { | ||
177 | int offset; | ||
178 | int ret; | ||
179 | |||
180 | preempt_disable(); | ||
181 | /* find the next free available EILVT entry, skip offset 0 */ | ||
182 | for (offset = 1; offset < APIC_EILVT_NR_MAX; offset++) { | ||
183 | if (get_eilvt(offset)) | ||
184 | break; | ||
185 | } | ||
186 | preempt_enable(); | ||
187 | |||
188 | if (offset == APIC_EILVT_NR_MAX) { | ||
189 | printk(KERN_DEBUG "No EILVT entry available\n"); | ||
190 | return -EBUSY; | ||
191 | } | ||
192 | |||
193 | ret = setup_ibs_ctl(offset); | ||
194 | if (ret) | ||
195 | goto out; | ||
196 | |||
197 | if (!ibs_eilvt_valid()) { | ||
198 | ret = -EFAULT; | ||
199 | goto out; | ||
200 | } | ||
201 | |||
202 | pr_err(FW_BUG "using offset %d for IBS interrupts\n", offset); | ||
203 | pr_err(FW_BUG "workaround enabled for IBS LVT offset\n"); | ||
204 | |||
205 | return 0; | ||
206 | out: | ||
207 | preempt_disable(); | ||
208 | put_eilvt(offset); | ||
209 | preempt_enable(); | ||
210 | return ret; | ||
211 | } | ||
212 | |||
213 | static inline int get_ibs_lvt_offset(void) | ||
214 | { | ||
215 | u64 val; | ||
216 | |||
217 | rdmsrl(MSR_AMD64_IBSCTL, val); | ||
218 | if (!(val & IBSCTL_LVT_OFFSET_VALID)) | ||
219 | return -EINVAL; | ||
220 | |||
221 | return val & IBSCTL_LVT_OFFSET_MASK; | ||
222 | } | ||
223 | |||
224 | static void setup_APIC_ibs(void *dummy) | ||
225 | { | ||
226 | int offset; | ||
227 | |||
228 | offset = get_ibs_lvt_offset(); | ||
229 | if (offset < 0) | ||
230 | goto failed; | ||
231 | |||
232 | if (!setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_NMI, 0)) | ||
233 | return; | ||
234 | failed: | ||
235 | pr_warn("perf: IBS APIC setup failed on cpu #%d\n", | ||
236 | smp_processor_id()); | ||
237 | } | ||
238 | |||
239 | static void clear_APIC_ibs(void *dummy) | ||
240 | { | ||
241 | int offset; | ||
242 | |||
243 | offset = get_ibs_lvt_offset(); | ||
244 | if (offset >= 0) | ||
245 | setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_FIX, 1); | ||
246 | } | ||
247 | |||
248 | static int __cpuinit | ||
249 | perf_ibs_cpu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) | ||
250 | { | ||
251 | switch (action & ~CPU_TASKS_FROZEN) { | ||
252 | case CPU_STARTING: | ||
253 | setup_APIC_ibs(NULL); | ||
254 | break; | ||
255 | case CPU_DYING: | ||
256 | clear_APIC_ibs(NULL); | ||
257 | break; | ||
258 | default: | ||
259 | break; | ||
260 | } | ||
261 | |||
262 | return NOTIFY_OK; | ||
263 | } | ||
264 | |||
265 | static __init int amd_ibs_init(void) | ||
266 | { | ||
267 | u32 caps; | ||
268 | int ret; | ||
269 | |||
270 | caps = __get_ibs_caps(); | ||
271 | if (!caps) | ||
272 | return -ENODEV; /* ibs not supported by the cpu */ | ||
273 | |||
274 | if (!ibs_eilvt_valid()) { | ||
275 | ret = force_ibs_eilvt_setup(); | ||
276 | if (ret) { | ||
277 | pr_err("Failed to setup IBS, %d\n", ret); | ||
278 | return ret; | ||
279 | } | ||
280 | } | ||
281 | |||
282 | get_online_cpus(); | ||
283 | ibs_caps = caps; | ||
284 | /* make ibs_caps visible to other cpus: */ | ||
285 | smp_mb(); | ||
286 | perf_cpu_notifier(perf_ibs_cpu_notifier); | ||
287 | smp_call_function(setup_APIC_ibs, NULL, 1); | ||
288 | put_online_cpus(); | ||
289 | |||
290 | return perf_event_ibs_init(); | ||
291 | } | ||
292 | |||
293 | /* Since we need the pci subsystem to init ibs we can't do this earlier: */ | ||
294 | device_initcall(amd_ibs_init); | ||
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index f88af2c2a561..e09ca20e86ee 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c | |||
@@ -1,16 +1,19 @@ | |||
1 | #ifdef CONFIG_CPU_SUP_INTEL | ||
2 | |||
3 | /* | 1 | /* |
4 | * Per core/cpu state | 2 | * Per core/cpu state |
5 | * | 3 | * |
6 | * Used to coordinate shared registers between HT threads or | 4 | * Used to coordinate shared registers between HT threads or |
7 | * among events on a single PMU. | 5 | * among events on a single PMU. |
8 | */ | 6 | */ |
9 | struct intel_shared_regs { | 7 | |
10 | struct er_account regs[EXTRA_REG_MAX]; | 8 | #include <linux/stddef.h> |
11 | int refcnt; /* per-core: #HT threads */ | 9 | #include <linux/types.h> |
12 | unsigned core_id; /* per-core: core id */ | 10 | #include <linux/init.h> |
13 | }; | 11 | #include <linux/slab.h> |
12 | |||
13 | #include <asm/hardirq.h> | ||
14 | #include <asm/apic.h> | ||
15 | |||
16 | #include "perf_event.h" | ||
14 | 17 | ||
15 | /* | 18 | /* |
16 | * Intel PerfMon, used on Core and later. | 19 | * Intel PerfMon, used on Core and later. |
@@ -746,7 +749,8 @@ static void intel_pmu_enable_all(int added) | |||
746 | 749 | ||
747 | intel_pmu_pebs_enable_all(); | 750 | intel_pmu_pebs_enable_all(); |
748 | intel_pmu_lbr_enable_all(); | 751 | intel_pmu_lbr_enable_all(); |
749 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl); | 752 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, |
753 | x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask); | ||
750 | 754 | ||
751 | if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) { | 755 | if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) { |
752 | struct perf_event *event = | 756 | struct perf_event *event = |
@@ -869,6 +873,7 @@ static void intel_pmu_disable_fixed(struct hw_perf_event *hwc) | |||
869 | static void intel_pmu_disable_event(struct perf_event *event) | 873 | static void intel_pmu_disable_event(struct perf_event *event) |
870 | { | 874 | { |
871 | struct hw_perf_event *hwc = &event->hw; | 875 | struct hw_perf_event *hwc = &event->hw; |
876 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
872 | 877 | ||
873 | if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) { | 878 | if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) { |
874 | intel_pmu_disable_bts(); | 879 | intel_pmu_disable_bts(); |
@@ -876,6 +881,9 @@ static void intel_pmu_disable_event(struct perf_event *event) | |||
876 | return; | 881 | return; |
877 | } | 882 | } |
878 | 883 | ||
884 | cpuc->intel_ctrl_guest_mask &= ~(1ull << hwc->idx); | ||
885 | cpuc->intel_ctrl_host_mask &= ~(1ull << hwc->idx); | ||
886 | |||
879 | if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { | 887 | if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { |
880 | intel_pmu_disable_fixed(hwc); | 888 | intel_pmu_disable_fixed(hwc); |
881 | return; | 889 | return; |
@@ -921,6 +929,7 @@ static void intel_pmu_enable_fixed(struct hw_perf_event *hwc) | |||
921 | static void intel_pmu_enable_event(struct perf_event *event) | 929 | static void intel_pmu_enable_event(struct perf_event *event) |
922 | { | 930 | { |
923 | struct hw_perf_event *hwc = &event->hw; | 931 | struct hw_perf_event *hwc = &event->hw; |
932 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
924 | 933 | ||
925 | if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) { | 934 | if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) { |
926 | if (!__this_cpu_read(cpu_hw_events.enabled)) | 935 | if (!__this_cpu_read(cpu_hw_events.enabled)) |
@@ -930,6 +939,11 @@ static void intel_pmu_enable_event(struct perf_event *event) | |||
930 | return; | 939 | return; |
931 | } | 940 | } |
932 | 941 | ||
942 | if (event->attr.exclude_host) | ||
943 | cpuc->intel_ctrl_guest_mask |= (1ull << hwc->idx); | ||
944 | if (event->attr.exclude_guest) | ||
945 | cpuc->intel_ctrl_host_mask |= (1ull << hwc->idx); | ||
946 | |||
933 | if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { | 947 | if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { |
934 | intel_pmu_enable_fixed(hwc); | 948 | intel_pmu_enable_fixed(hwc); |
935 | return; | 949 | return; |
@@ -945,7 +959,7 @@ static void intel_pmu_enable_event(struct perf_event *event) | |||
945 | * Save and restart an expired event. Called by NMI contexts, | 959 | * Save and restart an expired event. Called by NMI contexts, |
946 | * so it has to be careful about preempting normal event ops: | 960 | * so it has to be careful about preempting normal event ops: |
947 | */ | 961 | */ |
948 | static int intel_pmu_save_and_restart(struct perf_event *event) | 962 | int intel_pmu_save_and_restart(struct perf_event *event) |
949 | { | 963 | { |
950 | x86_perf_event_update(event); | 964 | x86_perf_event_update(event); |
951 | return x86_perf_event_set_period(event); | 965 | return x86_perf_event_set_period(event); |
@@ -1197,6 +1211,21 @@ intel_shared_regs_constraints(struct cpu_hw_events *cpuc, | |||
1197 | return c; | 1211 | return c; |
1198 | } | 1212 | } |
1199 | 1213 | ||
1214 | struct event_constraint * | ||
1215 | x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) | ||
1216 | { | ||
1217 | struct event_constraint *c; | ||
1218 | |||
1219 | if (x86_pmu.event_constraints) { | ||
1220 | for_each_event_constraint(c, x86_pmu.event_constraints) { | ||
1221 | if ((event->hw.config & c->cmask) == c->code) | ||
1222 | return c; | ||
1223 | } | ||
1224 | } | ||
1225 | |||
1226 | return &unconstrained; | ||
1227 | } | ||
1228 | |||
1200 | static struct event_constraint * | 1229 | static struct event_constraint * |
1201 | intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) | 1230 | intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) |
1202 | { | 1231 | { |
@@ -1284,12 +1313,84 @@ static int intel_pmu_hw_config(struct perf_event *event) | |||
1284 | return 0; | 1313 | return 0; |
1285 | } | 1314 | } |
1286 | 1315 | ||
1316 | struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr) | ||
1317 | { | ||
1318 | if (x86_pmu.guest_get_msrs) | ||
1319 | return x86_pmu.guest_get_msrs(nr); | ||
1320 | *nr = 0; | ||
1321 | return NULL; | ||
1322 | } | ||
1323 | EXPORT_SYMBOL_GPL(perf_guest_get_msrs); | ||
1324 | |||
1325 | static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr) | ||
1326 | { | ||
1327 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
1328 | struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs; | ||
1329 | |||
1330 | arr[0].msr = MSR_CORE_PERF_GLOBAL_CTRL; | ||
1331 | arr[0].host = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask; | ||
1332 | arr[0].guest = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_host_mask; | ||
1333 | |||
1334 | *nr = 1; | ||
1335 | return arr; | ||
1336 | } | ||
1337 | |||
1338 | static struct perf_guest_switch_msr *core_guest_get_msrs(int *nr) | ||
1339 | { | ||
1340 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
1341 | struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs; | ||
1342 | int idx; | ||
1343 | |||
1344 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { | ||
1345 | struct perf_event *event = cpuc->events[idx]; | ||
1346 | |||
1347 | arr[idx].msr = x86_pmu_config_addr(idx); | ||
1348 | arr[idx].host = arr[idx].guest = 0; | ||
1349 | |||
1350 | if (!test_bit(idx, cpuc->active_mask)) | ||
1351 | continue; | ||
1352 | |||
1353 | arr[idx].host = arr[idx].guest = | ||
1354 | event->hw.config | ARCH_PERFMON_EVENTSEL_ENABLE; | ||
1355 | |||
1356 | if (event->attr.exclude_host) | ||
1357 | arr[idx].host &= ~ARCH_PERFMON_EVENTSEL_ENABLE; | ||
1358 | else if (event->attr.exclude_guest) | ||
1359 | arr[idx].guest &= ~ARCH_PERFMON_EVENTSEL_ENABLE; | ||
1360 | } | ||
1361 | |||
1362 | *nr = x86_pmu.num_counters; | ||
1363 | return arr; | ||
1364 | } | ||
1365 | |||
1366 | static void core_pmu_enable_event(struct perf_event *event) | ||
1367 | { | ||
1368 | if (!event->attr.exclude_host) | ||
1369 | x86_pmu_enable_event(event); | ||
1370 | } | ||
1371 | |||
1372 | static void core_pmu_enable_all(int added) | ||
1373 | { | ||
1374 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
1375 | int idx; | ||
1376 | |||
1377 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { | ||
1378 | struct hw_perf_event *hwc = &cpuc->events[idx]->hw; | ||
1379 | |||
1380 | if (!test_bit(idx, cpuc->active_mask) || | ||
1381 | cpuc->events[idx]->attr.exclude_host) | ||
1382 | continue; | ||
1383 | |||
1384 | __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE); | ||
1385 | } | ||
1386 | } | ||
1387 | |||
1287 | static __initconst const struct x86_pmu core_pmu = { | 1388 | static __initconst const struct x86_pmu core_pmu = { |
1288 | .name = "core", | 1389 | .name = "core", |
1289 | .handle_irq = x86_pmu_handle_irq, | 1390 | .handle_irq = x86_pmu_handle_irq, |
1290 | .disable_all = x86_pmu_disable_all, | 1391 | .disable_all = x86_pmu_disable_all, |
1291 | .enable_all = x86_pmu_enable_all, | 1392 | .enable_all = core_pmu_enable_all, |
1292 | .enable = x86_pmu_enable_event, | 1393 | .enable = core_pmu_enable_event, |
1293 | .disable = x86_pmu_disable_event, | 1394 | .disable = x86_pmu_disable_event, |
1294 | .hw_config = x86_pmu_hw_config, | 1395 | .hw_config = x86_pmu_hw_config, |
1295 | .schedule_events = x86_schedule_events, | 1396 | .schedule_events = x86_schedule_events, |
@@ -1307,9 +1408,10 @@ static __initconst const struct x86_pmu core_pmu = { | |||
1307 | .get_event_constraints = intel_get_event_constraints, | 1408 | .get_event_constraints = intel_get_event_constraints, |
1308 | .put_event_constraints = intel_put_event_constraints, | 1409 | .put_event_constraints = intel_put_event_constraints, |
1309 | .event_constraints = intel_core_event_constraints, | 1410 | .event_constraints = intel_core_event_constraints, |
1411 | .guest_get_msrs = core_guest_get_msrs, | ||
1310 | }; | 1412 | }; |
1311 | 1413 | ||
1312 | static struct intel_shared_regs *allocate_shared_regs(int cpu) | 1414 | struct intel_shared_regs *allocate_shared_regs(int cpu) |
1313 | { | 1415 | { |
1314 | struct intel_shared_regs *regs; | 1416 | struct intel_shared_regs *regs; |
1315 | int i; | 1417 | int i; |
@@ -1362,7 +1464,7 @@ static void intel_pmu_cpu_starting(int cpu) | |||
1362 | 1464 | ||
1363 | pc = per_cpu(cpu_hw_events, i).shared_regs; | 1465 | pc = per_cpu(cpu_hw_events, i).shared_regs; |
1364 | if (pc && pc->core_id == core_id) { | 1466 | if (pc && pc->core_id == core_id) { |
1365 | kfree(cpuc->shared_regs); | 1467 | cpuc->kfree_on_online = cpuc->shared_regs; |
1366 | cpuc->shared_regs = pc; | 1468 | cpuc->shared_regs = pc; |
1367 | break; | 1469 | break; |
1368 | } | 1470 | } |
@@ -1413,6 +1515,7 @@ static __initconst const struct x86_pmu intel_pmu = { | |||
1413 | .cpu_prepare = intel_pmu_cpu_prepare, | 1515 | .cpu_prepare = intel_pmu_cpu_prepare, |
1414 | .cpu_starting = intel_pmu_cpu_starting, | 1516 | .cpu_starting = intel_pmu_cpu_starting, |
1415 | .cpu_dying = intel_pmu_cpu_dying, | 1517 | .cpu_dying = intel_pmu_cpu_dying, |
1518 | .guest_get_msrs = intel_guest_get_msrs, | ||
1416 | }; | 1519 | }; |
1417 | 1520 | ||
1418 | static void intel_clovertown_quirks(void) | 1521 | static void intel_clovertown_quirks(void) |
@@ -1441,7 +1544,7 @@ static void intel_clovertown_quirks(void) | |||
1441 | x86_pmu.pebs_constraints = NULL; | 1544 | x86_pmu.pebs_constraints = NULL; |
1442 | } | 1545 | } |
1443 | 1546 | ||
1444 | static __init int intel_pmu_init(void) | 1547 | __init int intel_pmu_init(void) |
1445 | { | 1548 | { |
1446 | union cpuid10_edx edx; | 1549 | union cpuid10_edx edx; |
1447 | union cpuid10_eax eax; | 1550 | union cpuid10_eax eax; |
@@ -1597,7 +1700,7 @@ static __init int intel_pmu_init(void) | |||
1597 | intel_pmu_lbr_init_nhm(); | 1700 | intel_pmu_lbr_init_nhm(); |
1598 | 1701 | ||
1599 | x86_pmu.event_constraints = intel_snb_event_constraints; | 1702 | x86_pmu.event_constraints = intel_snb_event_constraints; |
1600 | x86_pmu.pebs_constraints = intel_snb_pebs_events; | 1703 | x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints; |
1601 | x86_pmu.extra_regs = intel_snb_extra_regs; | 1704 | x86_pmu.extra_regs = intel_snb_extra_regs; |
1602 | /* all extra regs are per-cpu when HT is on */ | 1705 | /* all extra regs are per-cpu when HT is on */ |
1603 | x86_pmu.er_flags |= ERF_HAS_RSP_1; | 1706 | x86_pmu.er_flags |= ERF_HAS_RSP_1; |
@@ -1628,16 +1731,3 @@ static __init int intel_pmu_init(void) | |||
1628 | } | 1731 | } |
1629 | return 0; | 1732 | return 0; |
1630 | } | 1733 | } |
1631 | |||
1632 | #else /* CONFIG_CPU_SUP_INTEL */ | ||
1633 | |||
1634 | static int intel_pmu_init(void) | ||
1635 | { | ||
1636 | return 0; | ||
1637 | } | ||
1638 | |||
1639 | static struct intel_shared_regs *allocate_shared_regs(int cpu) | ||
1640 | { | ||
1641 | return NULL; | ||
1642 | } | ||
1643 | #endif /* CONFIG_CPU_SUP_INTEL */ | ||
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c index 1b1ef3addcfd..c0d238f49db8 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_ds.c +++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c | |||
@@ -1,7 +1,10 @@ | |||
1 | #ifdef CONFIG_CPU_SUP_INTEL | 1 | #include <linux/bitops.h> |
2 | #include <linux/types.h> | ||
3 | #include <linux/slab.h> | ||
2 | 4 | ||
3 | /* The maximal number of PEBS events: */ | 5 | #include <asm/perf_event.h> |
4 | #define MAX_PEBS_EVENTS 4 | 6 | |
7 | #include "perf_event.h" | ||
5 | 8 | ||
6 | /* The size of a BTS record in bytes: */ | 9 | /* The size of a BTS record in bytes: */ |
7 | #define BTS_RECORD_SIZE 24 | 10 | #define BTS_RECORD_SIZE 24 |
@@ -37,24 +40,7 @@ struct pebs_record_nhm { | |||
37 | u64 status, dla, dse, lat; | 40 | u64 status, dla, dse, lat; |
38 | }; | 41 | }; |
39 | 42 | ||
40 | /* | 43 | void init_debug_store_on_cpu(int cpu) |
41 | * A debug store configuration. | ||
42 | * | ||
43 | * We only support architectures that use 64bit fields. | ||
44 | */ | ||
45 | struct debug_store { | ||
46 | u64 bts_buffer_base; | ||
47 | u64 bts_index; | ||
48 | u64 bts_absolute_maximum; | ||
49 | u64 bts_interrupt_threshold; | ||
50 | u64 pebs_buffer_base; | ||
51 | u64 pebs_index; | ||
52 | u64 pebs_absolute_maximum; | ||
53 | u64 pebs_interrupt_threshold; | ||
54 | u64 pebs_event_reset[MAX_PEBS_EVENTS]; | ||
55 | }; | ||
56 | |||
57 | static void init_debug_store_on_cpu(int cpu) | ||
58 | { | 44 | { |
59 | struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; | 45 | struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; |
60 | 46 | ||
@@ -66,7 +52,7 @@ static void init_debug_store_on_cpu(int cpu) | |||
66 | (u32)((u64)(unsigned long)ds >> 32)); | 52 | (u32)((u64)(unsigned long)ds >> 32)); |
67 | } | 53 | } |
68 | 54 | ||
69 | static void fini_debug_store_on_cpu(int cpu) | 55 | void fini_debug_store_on_cpu(int cpu) |
70 | { | 56 | { |
71 | if (!per_cpu(cpu_hw_events, cpu).ds) | 57 | if (!per_cpu(cpu_hw_events, cpu).ds) |
72 | return; | 58 | return; |
@@ -175,7 +161,7 @@ static void release_ds_buffer(int cpu) | |||
175 | kfree(ds); | 161 | kfree(ds); |
176 | } | 162 | } |
177 | 163 | ||
178 | static void release_ds_buffers(void) | 164 | void release_ds_buffers(void) |
179 | { | 165 | { |
180 | int cpu; | 166 | int cpu; |
181 | 167 | ||
@@ -194,7 +180,7 @@ static void release_ds_buffers(void) | |||
194 | put_online_cpus(); | 180 | put_online_cpus(); |
195 | } | 181 | } |
196 | 182 | ||
197 | static void reserve_ds_buffers(void) | 183 | void reserve_ds_buffers(void) |
198 | { | 184 | { |
199 | int bts_err = 0, pebs_err = 0; | 185 | int bts_err = 0, pebs_err = 0; |
200 | int cpu; | 186 | int cpu; |
@@ -260,10 +246,10 @@ static void reserve_ds_buffers(void) | |||
260 | * BTS | 246 | * BTS |
261 | */ | 247 | */ |
262 | 248 | ||
263 | static struct event_constraint bts_constraint = | 249 | struct event_constraint bts_constraint = |
264 | EVENT_CONSTRAINT(0, 1ULL << X86_PMC_IDX_FIXED_BTS, 0); | 250 | EVENT_CONSTRAINT(0, 1ULL << X86_PMC_IDX_FIXED_BTS, 0); |
265 | 251 | ||
266 | static void intel_pmu_enable_bts(u64 config) | 252 | void intel_pmu_enable_bts(u64 config) |
267 | { | 253 | { |
268 | unsigned long debugctlmsr; | 254 | unsigned long debugctlmsr; |
269 | 255 | ||
@@ -282,7 +268,7 @@ static void intel_pmu_enable_bts(u64 config) | |||
282 | update_debugctlmsr(debugctlmsr); | 268 | update_debugctlmsr(debugctlmsr); |
283 | } | 269 | } |
284 | 270 | ||
285 | static void intel_pmu_disable_bts(void) | 271 | void intel_pmu_disable_bts(void) |
286 | { | 272 | { |
287 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 273 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
288 | unsigned long debugctlmsr; | 274 | unsigned long debugctlmsr; |
@@ -299,7 +285,7 @@ static void intel_pmu_disable_bts(void) | |||
299 | update_debugctlmsr(debugctlmsr); | 285 | update_debugctlmsr(debugctlmsr); |
300 | } | 286 | } |
301 | 287 | ||
302 | static int intel_pmu_drain_bts_buffer(void) | 288 | int intel_pmu_drain_bts_buffer(void) |
303 | { | 289 | { |
304 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 290 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
305 | struct debug_store *ds = cpuc->ds; | 291 | struct debug_store *ds = cpuc->ds; |
@@ -361,7 +347,7 @@ static int intel_pmu_drain_bts_buffer(void) | |||
361 | /* | 347 | /* |
362 | * PEBS | 348 | * PEBS |
363 | */ | 349 | */ |
364 | static struct event_constraint intel_core2_pebs_event_constraints[] = { | 350 | struct event_constraint intel_core2_pebs_event_constraints[] = { |
365 | INTEL_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */ | 351 | INTEL_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */ |
366 | INTEL_UEVENT_CONSTRAINT(0xfec1, 0x1), /* X87_OPS_RETIRED.ANY */ | 352 | INTEL_UEVENT_CONSTRAINT(0xfec1, 0x1), /* X87_OPS_RETIRED.ANY */ |
367 | INTEL_UEVENT_CONSTRAINT(0x00c5, 0x1), /* BR_INST_RETIRED.MISPRED */ | 353 | INTEL_UEVENT_CONSTRAINT(0x00c5, 0x1), /* BR_INST_RETIRED.MISPRED */ |
@@ -370,14 +356,14 @@ static struct event_constraint intel_core2_pebs_event_constraints[] = { | |||
370 | EVENT_CONSTRAINT_END | 356 | EVENT_CONSTRAINT_END |
371 | }; | 357 | }; |
372 | 358 | ||
373 | static struct event_constraint intel_atom_pebs_event_constraints[] = { | 359 | struct event_constraint intel_atom_pebs_event_constraints[] = { |
374 | INTEL_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */ | 360 | INTEL_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */ |
375 | INTEL_UEVENT_CONSTRAINT(0x00c5, 0x1), /* MISPREDICTED_BRANCH_RETIRED */ | 361 | INTEL_UEVENT_CONSTRAINT(0x00c5, 0x1), /* MISPREDICTED_BRANCH_RETIRED */ |
376 | INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */ | 362 | INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */ |
377 | EVENT_CONSTRAINT_END | 363 | EVENT_CONSTRAINT_END |
378 | }; | 364 | }; |
379 | 365 | ||
380 | static struct event_constraint intel_nehalem_pebs_event_constraints[] = { | 366 | struct event_constraint intel_nehalem_pebs_event_constraints[] = { |
381 | INTEL_EVENT_CONSTRAINT(0x0b, 0xf), /* MEM_INST_RETIRED.* */ | 367 | INTEL_EVENT_CONSTRAINT(0x0b, 0xf), /* MEM_INST_RETIRED.* */ |
382 | INTEL_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */ | 368 | INTEL_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */ |
383 | INTEL_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */ | 369 | INTEL_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */ |
@@ -392,7 +378,7 @@ static struct event_constraint intel_nehalem_pebs_event_constraints[] = { | |||
392 | EVENT_CONSTRAINT_END | 378 | EVENT_CONSTRAINT_END |
393 | }; | 379 | }; |
394 | 380 | ||
395 | static struct event_constraint intel_westmere_pebs_event_constraints[] = { | 381 | struct event_constraint intel_westmere_pebs_event_constraints[] = { |
396 | INTEL_EVENT_CONSTRAINT(0x0b, 0xf), /* MEM_INST_RETIRED.* */ | 382 | INTEL_EVENT_CONSTRAINT(0x0b, 0xf), /* MEM_INST_RETIRED.* */ |
397 | INTEL_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */ | 383 | INTEL_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */ |
398 | INTEL_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */ | 384 | INTEL_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */ |
@@ -407,7 +393,7 @@ static struct event_constraint intel_westmere_pebs_event_constraints[] = { | |||
407 | EVENT_CONSTRAINT_END | 393 | EVENT_CONSTRAINT_END |
408 | }; | 394 | }; |
409 | 395 | ||
410 | static struct event_constraint intel_snb_pebs_events[] = { | 396 | struct event_constraint intel_snb_pebs_event_constraints[] = { |
411 | INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */ | 397 | INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */ |
412 | INTEL_UEVENT_CONSTRAINT(0x01c2, 0xf), /* UOPS_RETIRED.ALL */ | 398 | INTEL_UEVENT_CONSTRAINT(0x01c2, 0xf), /* UOPS_RETIRED.ALL */ |
413 | INTEL_UEVENT_CONSTRAINT(0x02c2, 0xf), /* UOPS_RETIRED.RETIRE_SLOTS */ | 399 | INTEL_UEVENT_CONSTRAINT(0x02c2, 0xf), /* UOPS_RETIRED.RETIRE_SLOTS */ |
@@ -428,8 +414,7 @@ static struct event_constraint intel_snb_pebs_events[] = { | |||
428 | EVENT_CONSTRAINT_END | 414 | EVENT_CONSTRAINT_END |
429 | }; | 415 | }; |
430 | 416 | ||
431 | static struct event_constraint * | 417 | struct event_constraint *intel_pebs_constraints(struct perf_event *event) |
432 | intel_pebs_constraints(struct perf_event *event) | ||
433 | { | 418 | { |
434 | struct event_constraint *c; | 419 | struct event_constraint *c; |
435 | 420 | ||
@@ -446,7 +431,7 @@ intel_pebs_constraints(struct perf_event *event) | |||
446 | return &emptyconstraint; | 431 | return &emptyconstraint; |
447 | } | 432 | } |
448 | 433 | ||
449 | static void intel_pmu_pebs_enable(struct perf_event *event) | 434 | void intel_pmu_pebs_enable(struct perf_event *event) |
450 | { | 435 | { |
451 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 436 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
452 | struct hw_perf_event *hwc = &event->hw; | 437 | struct hw_perf_event *hwc = &event->hw; |
@@ -460,7 +445,7 @@ static void intel_pmu_pebs_enable(struct perf_event *event) | |||
460 | intel_pmu_lbr_enable(event); | 445 | intel_pmu_lbr_enable(event); |
461 | } | 446 | } |
462 | 447 | ||
463 | static void intel_pmu_pebs_disable(struct perf_event *event) | 448 | void intel_pmu_pebs_disable(struct perf_event *event) |
464 | { | 449 | { |
465 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 450 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
466 | struct hw_perf_event *hwc = &event->hw; | 451 | struct hw_perf_event *hwc = &event->hw; |
@@ -475,7 +460,7 @@ static void intel_pmu_pebs_disable(struct perf_event *event) | |||
475 | intel_pmu_lbr_disable(event); | 460 | intel_pmu_lbr_disable(event); |
476 | } | 461 | } |
477 | 462 | ||
478 | static void intel_pmu_pebs_enable_all(void) | 463 | void intel_pmu_pebs_enable_all(void) |
479 | { | 464 | { |
480 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 465 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
481 | 466 | ||
@@ -483,7 +468,7 @@ static void intel_pmu_pebs_enable_all(void) | |||
483 | wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled); | 468 | wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled); |
484 | } | 469 | } |
485 | 470 | ||
486 | static void intel_pmu_pebs_disable_all(void) | 471 | void intel_pmu_pebs_disable_all(void) |
487 | { | 472 | { |
488 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 473 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
489 | 474 | ||
@@ -576,8 +561,6 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs) | |||
576 | return 0; | 561 | return 0; |
577 | } | 562 | } |
578 | 563 | ||
579 | static int intel_pmu_save_and_restart(struct perf_event *event); | ||
580 | |||
581 | static void __intel_pmu_pebs_event(struct perf_event *event, | 564 | static void __intel_pmu_pebs_event(struct perf_event *event, |
582 | struct pt_regs *iregs, void *__pebs) | 565 | struct pt_regs *iregs, void *__pebs) |
583 | { | 566 | { |
@@ -716,7 +699,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs) | |||
716 | * BTS, PEBS probe and setup | 699 | * BTS, PEBS probe and setup |
717 | */ | 700 | */ |
718 | 701 | ||
719 | static void intel_ds_init(void) | 702 | void intel_ds_init(void) |
720 | { | 703 | { |
721 | /* | 704 | /* |
722 | * No support for 32bit formats | 705 | * No support for 32bit formats |
@@ -749,15 +732,3 @@ static void intel_ds_init(void) | |||
749 | } | 732 | } |
750 | } | 733 | } |
751 | } | 734 | } |
752 | |||
753 | #else /* CONFIG_CPU_SUP_INTEL */ | ||
754 | |||
755 | static void reserve_ds_buffers(void) | ||
756 | { | ||
757 | } | ||
758 | |||
759 | static void release_ds_buffers(void) | ||
760 | { | ||
761 | } | ||
762 | |||
763 | #endif /* CONFIG_CPU_SUP_INTEL */ | ||
diff --git a/arch/x86/kernel/cpu/perf_event_intel_lbr.c b/arch/x86/kernel/cpu/perf_event_intel_lbr.c index d202c1bece1a..3fab3de3ce96 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_lbr.c +++ b/arch/x86/kernel/cpu/perf_event_intel_lbr.c | |||
@@ -1,4 +1,10 @@ | |||
1 | #ifdef CONFIG_CPU_SUP_INTEL | 1 | #include <linux/perf_event.h> |
2 | #include <linux/types.h> | ||
3 | |||
4 | #include <asm/perf_event.h> | ||
5 | #include <asm/msr.h> | ||
6 | |||
7 | #include "perf_event.h" | ||
2 | 8 | ||
3 | enum { | 9 | enum { |
4 | LBR_FORMAT_32 = 0x00, | 10 | LBR_FORMAT_32 = 0x00, |
@@ -48,7 +54,7 @@ static void intel_pmu_lbr_reset_64(void) | |||
48 | } | 54 | } |
49 | } | 55 | } |
50 | 56 | ||
51 | static void intel_pmu_lbr_reset(void) | 57 | void intel_pmu_lbr_reset(void) |
52 | { | 58 | { |
53 | if (!x86_pmu.lbr_nr) | 59 | if (!x86_pmu.lbr_nr) |
54 | return; | 60 | return; |
@@ -59,7 +65,7 @@ static void intel_pmu_lbr_reset(void) | |||
59 | intel_pmu_lbr_reset_64(); | 65 | intel_pmu_lbr_reset_64(); |
60 | } | 66 | } |
61 | 67 | ||
62 | static void intel_pmu_lbr_enable(struct perf_event *event) | 68 | void intel_pmu_lbr_enable(struct perf_event *event) |
63 | { | 69 | { |
64 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 70 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
65 | 71 | ||
@@ -81,7 +87,7 @@ static void intel_pmu_lbr_enable(struct perf_event *event) | |||
81 | cpuc->lbr_users++; | 87 | cpuc->lbr_users++; |
82 | } | 88 | } |
83 | 89 | ||
84 | static void intel_pmu_lbr_disable(struct perf_event *event) | 90 | void intel_pmu_lbr_disable(struct perf_event *event) |
85 | { | 91 | { |
86 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 92 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
87 | 93 | ||
@@ -95,7 +101,7 @@ static void intel_pmu_lbr_disable(struct perf_event *event) | |||
95 | __intel_pmu_lbr_disable(); | 101 | __intel_pmu_lbr_disable(); |
96 | } | 102 | } |
97 | 103 | ||
98 | static void intel_pmu_lbr_enable_all(void) | 104 | void intel_pmu_lbr_enable_all(void) |
99 | { | 105 | { |
100 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 106 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
101 | 107 | ||
@@ -103,7 +109,7 @@ static void intel_pmu_lbr_enable_all(void) | |||
103 | __intel_pmu_lbr_enable(); | 109 | __intel_pmu_lbr_enable(); |
104 | } | 110 | } |
105 | 111 | ||
106 | static void intel_pmu_lbr_disable_all(void) | 112 | void intel_pmu_lbr_disable_all(void) |
107 | { | 113 | { |
108 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 114 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
109 | 115 | ||
@@ -178,7 +184,7 @@ static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc) | |||
178 | cpuc->lbr_stack.nr = i; | 184 | cpuc->lbr_stack.nr = i; |
179 | } | 185 | } |
180 | 186 | ||
181 | static void intel_pmu_lbr_read(void) | 187 | void intel_pmu_lbr_read(void) |
182 | { | 188 | { |
183 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 189 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
184 | 190 | ||
@@ -191,7 +197,7 @@ static void intel_pmu_lbr_read(void) | |||
191 | intel_pmu_lbr_read_64(cpuc); | 197 | intel_pmu_lbr_read_64(cpuc); |
192 | } | 198 | } |
193 | 199 | ||
194 | static void intel_pmu_lbr_init_core(void) | 200 | void intel_pmu_lbr_init_core(void) |
195 | { | 201 | { |
196 | x86_pmu.lbr_nr = 4; | 202 | x86_pmu.lbr_nr = 4; |
197 | x86_pmu.lbr_tos = 0x01c9; | 203 | x86_pmu.lbr_tos = 0x01c9; |
@@ -199,7 +205,7 @@ static void intel_pmu_lbr_init_core(void) | |||
199 | x86_pmu.lbr_to = 0x60; | 205 | x86_pmu.lbr_to = 0x60; |
200 | } | 206 | } |
201 | 207 | ||
202 | static void intel_pmu_lbr_init_nhm(void) | 208 | void intel_pmu_lbr_init_nhm(void) |
203 | { | 209 | { |
204 | x86_pmu.lbr_nr = 16; | 210 | x86_pmu.lbr_nr = 16; |
205 | x86_pmu.lbr_tos = 0x01c9; | 211 | x86_pmu.lbr_tos = 0x01c9; |
@@ -207,12 +213,10 @@ static void intel_pmu_lbr_init_nhm(void) | |||
207 | x86_pmu.lbr_to = 0x6c0; | 213 | x86_pmu.lbr_to = 0x6c0; |
208 | } | 214 | } |
209 | 215 | ||
210 | static void intel_pmu_lbr_init_atom(void) | 216 | void intel_pmu_lbr_init_atom(void) |
211 | { | 217 | { |
212 | x86_pmu.lbr_nr = 8; | 218 | x86_pmu.lbr_nr = 8; |
213 | x86_pmu.lbr_tos = 0x01c9; | 219 | x86_pmu.lbr_tos = 0x01c9; |
214 | x86_pmu.lbr_from = 0x40; | 220 | x86_pmu.lbr_from = 0x40; |
215 | x86_pmu.lbr_to = 0x60; | 221 | x86_pmu.lbr_to = 0x60; |
216 | } | 222 | } |
217 | |||
218 | #endif /* CONFIG_CPU_SUP_INTEL */ | ||
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c index 7809d2bcb209..492bf1358a7c 100644 --- a/arch/x86/kernel/cpu/perf_event_p4.c +++ b/arch/x86/kernel/cpu/perf_event_p4.c | |||
@@ -7,9 +7,13 @@ | |||
7 | * For licencing details see kernel-base/COPYING | 7 | * For licencing details see kernel-base/COPYING |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #ifdef CONFIG_CPU_SUP_INTEL | 10 | #include <linux/perf_event.h> |
11 | 11 | ||
12 | #include <asm/perf_event_p4.h> | 12 | #include <asm/perf_event_p4.h> |
13 | #include <asm/hardirq.h> | ||
14 | #include <asm/apic.h> | ||
15 | |||
16 | #include "perf_event.h" | ||
13 | 17 | ||
14 | #define P4_CNTR_LIMIT 3 | 18 | #define P4_CNTR_LIMIT 3 |
15 | /* | 19 | /* |
@@ -1303,7 +1307,7 @@ static __initconst const struct x86_pmu p4_pmu = { | |||
1303 | .perfctr_second_write = 1, | 1307 | .perfctr_second_write = 1, |
1304 | }; | 1308 | }; |
1305 | 1309 | ||
1306 | static __init int p4_pmu_init(void) | 1310 | __init int p4_pmu_init(void) |
1307 | { | 1311 | { |
1308 | unsigned int low, high; | 1312 | unsigned int low, high; |
1309 | 1313 | ||
@@ -1326,5 +1330,3 @@ static __init int p4_pmu_init(void) | |||
1326 | 1330 | ||
1327 | return 0; | 1331 | return 0; |
1328 | } | 1332 | } |
1329 | |||
1330 | #endif /* CONFIG_CPU_SUP_INTEL */ | ||
diff --git a/arch/x86/kernel/cpu/perf_event_p6.c b/arch/x86/kernel/cpu/perf_event_p6.c index 20c097e33860..c7181befecde 100644 --- a/arch/x86/kernel/cpu/perf_event_p6.c +++ b/arch/x86/kernel/cpu/perf_event_p6.c | |||
@@ -1,4 +1,7 @@ | |||
1 | #ifdef CONFIG_CPU_SUP_INTEL | 1 | #include <linux/perf_event.h> |
2 | #include <linux/types.h> | ||
3 | |||
4 | #include "perf_event.h" | ||
2 | 5 | ||
3 | /* | 6 | /* |
4 | * Not sure about some of these | 7 | * Not sure about some of these |
@@ -114,7 +117,7 @@ static __initconst const struct x86_pmu p6_pmu = { | |||
114 | .event_constraints = p6_event_constraints, | 117 | .event_constraints = p6_event_constraints, |
115 | }; | 118 | }; |
116 | 119 | ||
117 | static __init int p6_pmu_init(void) | 120 | __init int p6_pmu_init(void) |
118 | { | 121 | { |
119 | switch (boot_cpu_data.x86_model) { | 122 | switch (boot_cpu_data.x86_model) { |
120 | case 1: | 123 | case 1: |
@@ -138,5 +141,3 @@ static __init int p6_pmu_init(void) | |||
138 | 141 | ||
139 | return 0; | 142 | return 0; |
140 | } | 143 | } |
141 | |||
142 | #endif /* CONFIG_CPU_SUP_INTEL */ | ||
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c index 764c7c2b1811..13ad89971d47 100644 --- a/arch/x86/kernel/crash.c +++ b/arch/x86/kernel/crash.c | |||
@@ -32,15 +32,12 @@ int in_crash_kexec; | |||
32 | 32 | ||
33 | #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC) | 33 | #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC) |
34 | 34 | ||
35 | static void kdump_nmi_callback(int cpu, struct die_args *args) | 35 | static void kdump_nmi_callback(int cpu, struct pt_regs *regs) |
36 | { | 36 | { |
37 | struct pt_regs *regs; | ||
38 | #ifdef CONFIG_X86_32 | 37 | #ifdef CONFIG_X86_32 |
39 | struct pt_regs fixed_regs; | 38 | struct pt_regs fixed_regs; |
40 | #endif | 39 | #endif |
41 | 40 | ||
42 | regs = args->regs; | ||
43 | |||
44 | #ifdef CONFIG_X86_32 | 41 | #ifdef CONFIG_X86_32 |
45 | if (!user_mode_vm(regs)) { | 42 | if (!user_mode_vm(regs)) { |
46 | crash_fixup_ss_esp(&fixed_regs, regs); | 43 | crash_fixup_ss_esp(&fixed_regs, regs); |
diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c index 3fee346ef545..cacdd46d184d 100644 --- a/arch/x86/kernel/jump_label.c +++ b/arch/x86/kernel/jump_label.c | |||
@@ -42,7 +42,7 @@ void arch_jump_label_transform(struct jump_entry *entry, | |||
42 | put_online_cpus(); | 42 | put_online_cpus(); |
43 | } | 43 | } |
44 | 44 | ||
45 | void arch_jump_label_text_poke_early(jump_label_t addr) | 45 | void __init_or_module arch_jump_label_text_poke_early(jump_label_t addr) |
46 | { | 46 | { |
47 | text_poke_early((void *)addr, ideal_nops[NOP_ATOMIC5], | 47 | text_poke_early((void *)addr, ideal_nops[NOP_ATOMIC5], |
48 | JUMP_LABEL_NOP_SIZE); | 48 | JUMP_LABEL_NOP_SIZE); |
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c index 00354d4919a9..faba5771acad 100644 --- a/arch/x86/kernel/kgdb.c +++ b/arch/x86/kernel/kgdb.c | |||
@@ -511,28 +511,37 @@ single_step_cont(struct pt_regs *regs, struct die_args *args) | |||
511 | 511 | ||
512 | static int was_in_debug_nmi[NR_CPUS]; | 512 | static int was_in_debug_nmi[NR_CPUS]; |
513 | 513 | ||
514 | static int __kgdb_notify(struct die_args *args, unsigned long cmd) | 514 | static int kgdb_nmi_handler(unsigned int cmd, struct pt_regs *regs) |
515 | { | 515 | { |
516 | struct pt_regs *regs = args->regs; | ||
517 | |||
518 | switch (cmd) { | 516 | switch (cmd) { |
519 | case DIE_NMI: | 517 | case NMI_LOCAL: |
520 | if (atomic_read(&kgdb_active) != -1) { | 518 | if (atomic_read(&kgdb_active) != -1) { |
521 | /* KGDB CPU roundup */ | 519 | /* KGDB CPU roundup */ |
522 | kgdb_nmicallback(raw_smp_processor_id(), regs); | 520 | kgdb_nmicallback(raw_smp_processor_id(), regs); |
523 | was_in_debug_nmi[raw_smp_processor_id()] = 1; | 521 | was_in_debug_nmi[raw_smp_processor_id()] = 1; |
524 | touch_nmi_watchdog(); | 522 | touch_nmi_watchdog(); |
525 | return NOTIFY_STOP; | 523 | return NMI_HANDLED; |
526 | } | 524 | } |
527 | return NOTIFY_DONE; | 525 | break; |
528 | 526 | ||
529 | case DIE_NMIUNKNOWN: | 527 | case NMI_UNKNOWN: |
530 | if (was_in_debug_nmi[raw_smp_processor_id()]) { | 528 | if (was_in_debug_nmi[raw_smp_processor_id()]) { |
531 | was_in_debug_nmi[raw_smp_processor_id()] = 0; | 529 | was_in_debug_nmi[raw_smp_processor_id()] = 0; |
532 | return NOTIFY_STOP; | 530 | return NMI_HANDLED; |
533 | } | 531 | } |
534 | return NOTIFY_DONE; | 532 | break; |
533 | default: | ||
534 | /* do nothing */ | ||
535 | break; | ||
536 | } | ||
537 | return NMI_DONE; | ||
538 | } | ||
539 | |||
540 | static int __kgdb_notify(struct die_args *args, unsigned long cmd) | ||
541 | { | ||
542 | struct pt_regs *regs = args->regs; | ||
535 | 543 | ||
544 | switch (cmd) { | ||
536 | case DIE_DEBUG: | 545 | case DIE_DEBUG: |
537 | if (atomic_read(&kgdb_cpu_doing_single_step) != -1) { | 546 | if (atomic_read(&kgdb_cpu_doing_single_step) != -1) { |
538 | if (user_mode(regs)) | 547 | if (user_mode(regs)) |
@@ -590,11 +599,6 @@ kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr) | |||
590 | 599 | ||
591 | static struct notifier_block kgdb_notifier = { | 600 | static struct notifier_block kgdb_notifier = { |
592 | .notifier_call = kgdb_notify, | 601 | .notifier_call = kgdb_notify, |
593 | |||
594 | /* | ||
595 | * Lowest-prio notifier priority, we want to be notified last: | ||
596 | */ | ||
597 | .priority = NMI_LOCAL_LOW_PRIOR, | ||
598 | }; | 602 | }; |
599 | 603 | ||
600 | /** | 604 | /** |
@@ -605,7 +609,31 @@ static struct notifier_block kgdb_notifier = { | |||
605 | */ | 609 | */ |
606 | int kgdb_arch_init(void) | 610 | int kgdb_arch_init(void) |
607 | { | 611 | { |
608 | return register_die_notifier(&kgdb_notifier); | 612 | int retval; |
613 | |||
614 | retval = register_die_notifier(&kgdb_notifier); | ||
615 | if (retval) | ||
616 | goto out; | ||
617 | |||
618 | retval = register_nmi_handler(NMI_LOCAL, kgdb_nmi_handler, | ||
619 | 0, "kgdb"); | ||
620 | if (retval) | ||
621 | goto out1; | ||
622 | |||
623 | retval = register_nmi_handler(NMI_UNKNOWN, kgdb_nmi_handler, | ||
624 | 0, "kgdb"); | ||
625 | |||
626 | if (retval) | ||
627 | goto out2; | ||
628 | |||
629 | return retval; | ||
630 | |||
631 | out2: | ||
632 | unregister_nmi_handler(NMI_LOCAL, "kgdb"); | ||
633 | out1: | ||
634 | unregister_die_notifier(&kgdb_notifier); | ||
635 | out: | ||
636 | return retval; | ||
609 | } | 637 | } |
610 | 638 | ||
611 | static void kgdb_hw_overflow_handler(struct perf_event *event, | 639 | static void kgdb_hw_overflow_handler(struct perf_event *event, |
@@ -673,6 +701,8 @@ void kgdb_arch_exit(void) | |||
673 | breakinfo[i].pev = NULL; | 701 | breakinfo[i].pev = NULL; |
674 | } | 702 | } |
675 | } | 703 | } |
704 | unregister_nmi_handler(NMI_UNKNOWN, "kgdb"); | ||
705 | unregister_nmi_handler(NMI_LOCAL, "kgdb"); | ||
676 | unregister_die_notifier(&kgdb_notifier); | 706 | unregister_die_notifier(&kgdb_notifier); |
677 | } | 707 | } |
678 | 708 | ||
diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c index 794bc95134cd..7da647d8b64c 100644 --- a/arch/x86/kernel/kprobes.c +++ b/arch/x86/kernel/kprobes.c | |||
@@ -75,10 +75,11 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); | |||
75 | /* | 75 | /* |
76 | * Undefined/reserved opcodes, conditional jump, Opcode Extension | 76 | * Undefined/reserved opcodes, conditional jump, Opcode Extension |
77 | * Groups, and some special opcodes can not boost. | 77 | * Groups, and some special opcodes can not boost. |
78 | * This is non-const to keep gcc from statically optimizing it out, as | 78 | * This is non-const and volatile to keep gcc from statically |
79 | * variable_test_bit makes gcc think only *(unsigned long*) is used. | 79 | * optimizing it out, as variable_test_bit makes gcc think only |
80 | * *(unsigned long*) is used. | ||
80 | */ | 81 | */ |
81 | static u32 twobyte_is_boostable[256 / 32] = { | 82 | static volatile u32 twobyte_is_boostable[256 / 32] = { |
82 | /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ | 83 | /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ |
83 | /* ---------------------------------------------- */ | 84 | /* ---------------------------------------------- */ |
84 | W(0x00, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0) | /* 00 */ | 85 | W(0x00, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0) | /* 00 */ |
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c new file mode 100644 index 000000000000..7ec5bd140b87 --- /dev/null +++ b/arch/x86/kernel/nmi.c | |||
@@ -0,0 +1,433 @@ | |||
1 | /* | ||
2 | * Copyright (C) 1991, 1992 Linus Torvalds | ||
3 | * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs | ||
4 | * Copyright (C) 2011 Don Zickus Red Hat, Inc. | ||
5 | * | ||
6 | * Pentium III FXSR, SSE support | ||
7 | * Gareth Hughes <gareth@valinux.com>, May 2000 | ||
8 | */ | ||
9 | |||
10 | /* | ||
11 | * Handle hardware traps and faults. | ||
12 | */ | ||
13 | #include <linux/spinlock.h> | ||
14 | #include <linux/kprobes.h> | ||
15 | #include <linux/kdebug.h> | ||
16 | #include <linux/nmi.h> | ||
17 | #include <linux/delay.h> | ||
18 | #include <linux/hardirq.h> | ||
19 | #include <linux/slab.h> | ||
20 | |||
21 | #include <linux/mca.h> | ||
22 | |||
23 | #if defined(CONFIG_EDAC) | ||
24 | #include <linux/edac.h> | ||
25 | #endif | ||
26 | |||
27 | #include <linux/atomic.h> | ||
28 | #include <asm/traps.h> | ||
29 | #include <asm/mach_traps.h> | ||
30 | #include <asm/nmi.h> | ||
31 | |||
32 | #define NMI_MAX_NAMELEN 16 | ||
33 | struct nmiaction { | ||
34 | struct list_head list; | ||
35 | nmi_handler_t handler; | ||
36 | unsigned int flags; | ||
37 | char *name; | ||
38 | }; | ||
39 | |||
40 | struct nmi_desc { | ||
41 | spinlock_t lock; | ||
42 | struct list_head head; | ||
43 | }; | ||
44 | |||
45 | static struct nmi_desc nmi_desc[NMI_MAX] = | ||
46 | { | ||
47 | { | ||
48 | .lock = __SPIN_LOCK_UNLOCKED(&nmi_desc[0].lock), | ||
49 | .head = LIST_HEAD_INIT(nmi_desc[0].head), | ||
50 | }, | ||
51 | { | ||
52 | .lock = __SPIN_LOCK_UNLOCKED(&nmi_desc[1].lock), | ||
53 | .head = LIST_HEAD_INIT(nmi_desc[1].head), | ||
54 | }, | ||
55 | |||
56 | }; | ||
57 | |||
58 | struct nmi_stats { | ||
59 | unsigned int normal; | ||
60 | unsigned int unknown; | ||
61 | unsigned int external; | ||
62 | unsigned int swallow; | ||
63 | }; | ||
64 | |||
65 | static DEFINE_PER_CPU(struct nmi_stats, nmi_stats); | ||
66 | |||
67 | static int ignore_nmis; | ||
68 | |||
69 | int unknown_nmi_panic; | ||
70 | /* | ||
71 | * Prevent NMI reason port (0x61) being accessed simultaneously, can | ||
72 | * only be used in NMI handler. | ||
73 | */ | ||
74 | static DEFINE_RAW_SPINLOCK(nmi_reason_lock); | ||
75 | |||
76 | static int __init setup_unknown_nmi_panic(char *str) | ||
77 | { | ||
78 | unknown_nmi_panic = 1; | ||
79 | return 1; | ||
80 | } | ||
81 | __setup("unknown_nmi_panic", setup_unknown_nmi_panic); | ||
82 | |||
83 | #define nmi_to_desc(type) (&nmi_desc[type]) | ||
84 | |||
85 | static int notrace __kprobes nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b) | ||
86 | { | ||
87 | struct nmi_desc *desc = nmi_to_desc(type); | ||
88 | struct nmiaction *a; | ||
89 | int handled=0; | ||
90 | |||
91 | rcu_read_lock(); | ||
92 | |||
93 | /* | ||
94 | * NMIs are edge-triggered, which means if you have enough | ||
95 | * of them concurrently, you can lose some because only one | ||
96 | * can be latched at any given time. Walk the whole list | ||
97 | * to handle those situations. | ||
98 | */ | ||
99 | list_for_each_entry_rcu(a, &desc->head, list) | ||
100 | handled += a->handler(type, regs); | ||
101 | |||
102 | rcu_read_unlock(); | ||
103 | |||
104 | /* return total number of NMI events handled */ | ||
105 | return handled; | ||
106 | } | ||
107 | |||
108 | static int __setup_nmi(unsigned int type, struct nmiaction *action) | ||
109 | { | ||
110 | struct nmi_desc *desc = nmi_to_desc(type); | ||
111 | unsigned long flags; | ||
112 | |||
113 | spin_lock_irqsave(&desc->lock, flags); | ||
114 | |||
115 | /* | ||
116 | * most handlers of type NMI_UNKNOWN never return because | ||
117 | * they just assume the NMI is theirs. Just a sanity check | ||
118 | * to manage expectations | ||
119 | */ | ||
120 | WARN_ON_ONCE(type == NMI_UNKNOWN && !list_empty(&desc->head)); | ||
121 | |||
122 | /* | ||
123 | * some handlers need to be executed first otherwise a fake | ||
124 | * event confuses some handlers (kdump uses this flag) | ||
125 | */ | ||
126 | if (action->flags & NMI_FLAG_FIRST) | ||
127 | list_add_rcu(&action->list, &desc->head); | ||
128 | else | ||
129 | list_add_tail_rcu(&action->list, &desc->head); | ||
130 | |||
131 | spin_unlock_irqrestore(&desc->lock, flags); | ||
132 | return 0; | ||
133 | } | ||
134 | |||
135 | static struct nmiaction *__free_nmi(unsigned int type, const char *name) | ||
136 | { | ||
137 | struct nmi_desc *desc = nmi_to_desc(type); | ||
138 | struct nmiaction *n; | ||
139 | unsigned long flags; | ||
140 | |||
141 | spin_lock_irqsave(&desc->lock, flags); | ||
142 | |||
143 | list_for_each_entry_rcu(n, &desc->head, list) { | ||
144 | /* | ||
145 | * the name passed in to describe the nmi handler | ||
146 | * is used as the lookup key | ||
147 | */ | ||
148 | if (!strcmp(n->name, name)) { | ||
149 | WARN(in_nmi(), | ||
150 | "Trying to free NMI (%s) from NMI context!\n", n->name); | ||
151 | list_del_rcu(&n->list); | ||
152 | break; | ||
153 | } | ||
154 | } | ||
155 | |||
156 | spin_unlock_irqrestore(&desc->lock, flags); | ||
157 | synchronize_rcu(); | ||
158 | return (n); | ||
159 | } | ||
160 | |||
161 | int register_nmi_handler(unsigned int type, nmi_handler_t handler, | ||
162 | unsigned long nmiflags, const char *devname) | ||
163 | { | ||
164 | struct nmiaction *action; | ||
165 | int retval = -ENOMEM; | ||
166 | |||
167 | if (!handler) | ||
168 | return -EINVAL; | ||
169 | |||
170 | action = kzalloc(sizeof(struct nmiaction), GFP_KERNEL); | ||
171 | if (!action) | ||
172 | goto fail_action; | ||
173 | |||
174 | action->handler = handler; | ||
175 | action->flags = nmiflags; | ||
176 | action->name = kstrndup(devname, NMI_MAX_NAMELEN, GFP_KERNEL); | ||
177 | if (!action->name) | ||
178 | goto fail_action_name; | ||
179 | |||
180 | retval = __setup_nmi(type, action); | ||
181 | |||
182 | if (retval) | ||
183 | goto fail_setup_nmi; | ||
184 | |||
185 | return retval; | ||
186 | |||
187 | fail_setup_nmi: | ||
188 | kfree(action->name); | ||
189 | fail_action_name: | ||
190 | kfree(action); | ||
191 | fail_action: | ||
192 | |||
193 | return retval; | ||
194 | } | ||
195 | EXPORT_SYMBOL_GPL(register_nmi_handler); | ||
196 | |||
197 | void unregister_nmi_handler(unsigned int type, const char *name) | ||
198 | { | ||
199 | struct nmiaction *a; | ||
200 | |||
201 | a = __free_nmi(type, name); | ||
202 | if (a) { | ||
203 | kfree(a->name); | ||
204 | kfree(a); | ||
205 | } | ||
206 | } | ||
207 | |||
208 | EXPORT_SYMBOL_GPL(unregister_nmi_handler); | ||
209 | |||
210 | static notrace __kprobes void | ||
211 | pci_serr_error(unsigned char reason, struct pt_regs *regs) | ||
212 | { | ||
213 | pr_emerg("NMI: PCI system error (SERR) for reason %02x on CPU %d.\n", | ||
214 | reason, smp_processor_id()); | ||
215 | |||
216 | /* | ||
217 | * On some machines, PCI SERR line is used to report memory | ||
218 | * errors. EDAC makes use of it. | ||
219 | */ | ||
220 | #if defined(CONFIG_EDAC) | ||
221 | if (edac_handler_set()) { | ||
222 | edac_atomic_assert_error(); | ||
223 | return; | ||
224 | } | ||
225 | #endif | ||
226 | |||
227 | if (panic_on_unrecovered_nmi) | ||
228 | panic("NMI: Not continuing"); | ||
229 | |||
230 | pr_emerg("Dazed and confused, but trying to continue\n"); | ||
231 | |||
232 | /* Clear and disable the PCI SERR error line. */ | ||
233 | reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_SERR; | ||
234 | outb(reason, NMI_REASON_PORT); | ||
235 | } | ||
236 | |||
237 | static notrace __kprobes void | ||
238 | io_check_error(unsigned char reason, struct pt_regs *regs) | ||
239 | { | ||
240 | unsigned long i; | ||
241 | |||
242 | pr_emerg( | ||
243 | "NMI: IOCK error (debug interrupt?) for reason %02x on CPU %d.\n", | ||
244 | reason, smp_processor_id()); | ||
245 | show_registers(regs); | ||
246 | |||
247 | if (panic_on_io_nmi) | ||
248 | panic("NMI IOCK error: Not continuing"); | ||
249 | |||
250 | /* Re-enable the IOCK line, wait for a few seconds */ | ||
251 | reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_IOCHK; | ||
252 | outb(reason, NMI_REASON_PORT); | ||
253 | |||
254 | i = 20000; | ||
255 | while (--i) { | ||
256 | touch_nmi_watchdog(); | ||
257 | udelay(100); | ||
258 | } | ||
259 | |||
260 | reason &= ~NMI_REASON_CLEAR_IOCHK; | ||
261 | outb(reason, NMI_REASON_PORT); | ||
262 | } | ||
263 | |||
264 | static notrace __kprobes void | ||
265 | unknown_nmi_error(unsigned char reason, struct pt_regs *regs) | ||
266 | { | ||
267 | int handled; | ||
268 | |||
269 | /* | ||
270 | * Use 'false' as back-to-back NMIs are dealt with one level up. | ||
271 | * Of course this makes having multiple 'unknown' handlers useless | ||
272 | * as only the first one is ever run (unless it can actually determine | ||
273 | * if it caused the NMI) | ||
274 | */ | ||
275 | handled = nmi_handle(NMI_UNKNOWN, regs, false); | ||
276 | if (handled) { | ||
277 | __this_cpu_add(nmi_stats.unknown, handled); | ||
278 | return; | ||
279 | } | ||
280 | |||
281 | __this_cpu_add(nmi_stats.unknown, 1); | ||
282 | |||
283 | #ifdef CONFIG_MCA | ||
284 | /* | ||
285 | * Might actually be able to figure out what the guilty party | ||
286 | * is: | ||
287 | */ | ||
288 | if (MCA_bus) { | ||
289 | mca_handle_nmi(); | ||
290 | return; | ||
291 | } | ||
292 | #endif | ||
293 | pr_emerg("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n", | ||
294 | reason, smp_processor_id()); | ||
295 | |||
296 | pr_emerg("Do you have a strange power saving mode enabled?\n"); | ||
297 | if (unknown_nmi_panic || panic_on_unrecovered_nmi) | ||
298 | panic("NMI: Not continuing"); | ||
299 | |||
300 | pr_emerg("Dazed and confused, but trying to continue\n"); | ||
301 | } | ||
302 | |||
303 | static DEFINE_PER_CPU(bool, swallow_nmi); | ||
304 | static DEFINE_PER_CPU(unsigned long, last_nmi_rip); | ||
305 | |||
306 | static notrace __kprobes void default_do_nmi(struct pt_regs *regs) | ||
307 | { | ||
308 | unsigned char reason = 0; | ||
309 | int handled; | ||
310 | bool b2b = false; | ||
311 | |||
312 | /* | ||
313 | * CPU-specific NMI must be processed before non-CPU-specific | ||
314 | * NMI, otherwise we may lose it, because the CPU-specific | ||
315 | * NMI can not be detected/processed on other CPUs. | ||
316 | */ | ||
317 | |||
318 | /* | ||
319 | * Back-to-back NMIs are interesting because they can either | ||
320 | * be two NMI or more than two NMIs (any thing over two is dropped | ||
321 | * due to NMI being edge-triggered). If this is the second half | ||
322 | * of the back-to-back NMI, assume we dropped things and process | ||
323 | * more handlers. Otherwise reset the 'swallow' NMI behaviour | ||
324 | */ | ||
325 | if (regs->ip == __this_cpu_read(last_nmi_rip)) | ||
326 | b2b = true; | ||
327 | else | ||
328 | __this_cpu_write(swallow_nmi, false); | ||
329 | |||
330 | __this_cpu_write(last_nmi_rip, regs->ip); | ||
331 | |||
332 | handled = nmi_handle(NMI_LOCAL, regs, b2b); | ||
333 | __this_cpu_add(nmi_stats.normal, handled); | ||
334 | if (handled) { | ||
335 | /* | ||
336 | * There are cases when a NMI handler handles multiple | ||
337 | * events in the current NMI. One of these events may | ||
338 | * be queued for in the next NMI. Because the event is | ||
339 | * already handled, the next NMI will result in an unknown | ||
340 | * NMI. Instead lets flag this for a potential NMI to | ||
341 | * swallow. | ||
342 | */ | ||
343 | if (handled > 1) | ||
344 | __this_cpu_write(swallow_nmi, true); | ||
345 | return; | ||
346 | } | ||
347 | |||
348 | /* Non-CPU-specific NMI: NMI sources can be processed on any CPU */ | ||
349 | raw_spin_lock(&nmi_reason_lock); | ||
350 | reason = get_nmi_reason(); | ||
351 | |||
352 | if (reason & NMI_REASON_MASK) { | ||
353 | if (reason & NMI_REASON_SERR) | ||
354 | pci_serr_error(reason, regs); | ||
355 | else if (reason & NMI_REASON_IOCHK) | ||
356 | io_check_error(reason, regs); | ||
357 | #ifdef CONFIG_X86_32 | ||
358 | /* | ||
359 | * Reassert NMI in case it became active | ||
360 | * meanwhile as it's edge-triggered: | ||
361 | */ | ||
362 | reassert_nmi(); | ||
363 | #endif | ||
364 | __this_cpu_add(nmi_stats.external, 1); | ||
365 | raw_spin_unlock(&nmi_reason_lock); | ||
366 | return; | ||
367 | } | ||
368 | raw_spin_unlock(&nmi_reason_lock); | ||
369 | |||
370 | /* | ||
371 | * Only one NMI can be latched at a time. To handle | ||
372 | * this we may process multiple nmi handlers at once to | ||
373 | * cover the case where an NMI is dropped. The downside | ||
374 | * to this approach is we may process an NMI prematurely, | ||
375 | * while its real NMI is sitting latched. This will cause | ||
376 | * an unknown NMI on the next run of the NMI processing. | ||
377 | * | ||
378 | * We tried to flag that condition above, by setting the | ||
379 | * swallow_nmi flag when we process more than one event. | ||
380 | * This condition is also only present on the second half | ||
381 | * of a back-to-back NMI, so we flag that condition too. | ||
382 | * | ||
383 | * If both are true, we assume we already processed this | ||
384 | * NMI previously and we swallow it. Otherwise we reset | ||
385 | * the logic. | ||
386 | * | ||
387 | * There are scenarios where we may accidentally swallow | ||
388 | * a 'real' unknown NMI. For example, while processing | ||
389 | * a perf NMI another perf NMI comes in along with a | ||
390 | * 'real' unknown NMI. These two NMIs get combined into | ||
391 | * one (as descibed above). When the next NMI gets | ||
392 | * processed, it will be flagged by perf as handled, but | ||
393 | * noone will know that there was a 'real' unknown NMI sent | ||
394 | * also. As a result it gets swallowed. Or if the first | ||
395 | * perf NMI returns two events handled then the second | ||
396 | * NMI will get eaten by the logic below, again losing a | ||
397 | * 'real' unknown NMI. But this is the best we can do | ||
398 | * for now. | ||
399 | */ | ||
400 | if (b2b && __this_cpu_read(swallow_nmi)) | ||
401 | __this_cpu_add(nmi_stats.swallow, 1); | ||
402 | else | ||
403 | unknown_nmi_error(reason, regs); | ||
404 | } | ||
405 | |||
406 | dotraplinkage notrace __kprobes void | ||
407 | do_nmi(struct pt_regs *regs, long error_code) | ||
408 | { | ||
409 | nmi_enter(); | ||
410 | |||
411 | inc_irq_stat(__nmi_count); | ||
412 | |||
413 | if (!ignore_nmis) | ||
414 | default_do_nmi(regs); | ||
415 | |||
416 | nmi_exit(); | ||
417 | } | ||
418 | |||
419 | void stop_nmi(void) | ||
420 | { | ||
421 | ignore_nmis++; | ||
422 | } | ||
423 | |||
424 | void restart_nmi(void) | ||
425 | { | ||
426 | ignore_nmis--; | ||
427 | } | ||
428 | |||
429 | /* reset the back-to-back NMI logic */ | ||
430 | void local_touch_nmi(void) | ||
431 | { | ||
432 | __this_cpu_write(last_nmi_rip, 0); | ||
433 | } | ||
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 2196c703c5e2..795b79f984c2 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c | |||
@@ -57,6 +57,7 @@ | |||
57 | #include <asm/idle.h> | 57 | #include <asm/idle.h> |
58 | #include <asm/syscalls.h> | 58 | #include <asm/syscalls.h> |
59 | #include <asm/debugreg.h> | 59 | #include <asm/debugreg.h> |
60 | #include <asm/nmi.h> | ||
60 | 61 | ||
61 | asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); | 62 | asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); |
62 | 63 | ||
@@ -107,6 +108,7 @@ void cpu_idle(void) | |||
107 | if (cpu_is_offline(cpu)) | 108 | if (cpu_is_offline(cpu)) |
108 | play_dead(); | 109 | play_dead(); |
109 | 110 | ||
111 | local_touch_nmi(); | ||
110 | local_irq_disable(); | 112 | local_irq_disable(); |
111 | /* Don't trace irqs off for idle */ | 113 | /* Don't trace irqs off for idle */ |
112 | stop_critical_timings(); | 114 | stop_critical_timings(); |
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index f693e44e1bf6..3bd7e6eebf31 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c | |||
@@ -51,6 +51,7 @@ | |||
51 | #include <asm/idle.h> | 51 | #include <asm/idle.h> |
52 | #include <asm/syscalls.h> | 52 | #include <asm/syscalls.h> |
53 | #include <asm/debugreg.h> | 53 | #include <asm/debugreg.h> |
54 | #include <asm/nmi.h> | ||
54 | 55 | ||
55 | asmlinkage extern void ret_from_fork(void); | 56 | asmlinkage extern void ret_from_fork(void); |
56 | 57 | ||
@@ -133,6 +134,7 @@ void cpu_idle(void) | |||
133 | * from here on, until they go to idle. | 134 | * from here on, until they go to idle. |
134 | * Otherwise, idle callbacks can misfire. | 135 | * Otherwise, idle callbacks can misfire. |
135 | */ | 136 | */ |
137 | local_touch_nmi(); | ||
136 | local_irq_disable(); | 138 | local_irq_disable(); |
137 | enter_idle(); | 139 | enter_idle(); |
138 | /* Don't trace irqs off for idle */ | 140 | /* Don't trace irqs off for idle */ |
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 9242436e9937..e334be1182b9 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c | |||
@@ -464,7 +464,7 @@ static inline void kb_wait(void) | |||
464 | } | 464 | } |
465 | } | 465 | } |
466 | 466 | ||
467 | static void vmxoff_nmi(int cpu, struct die_args *args) | 467 | static void vmxoff_nmi(int cpu, struct pt_regs *regs) |
468 | { | 468 | { |
469 | cpu_emergency_vmxoff(); | 469 | cpu_emergency_vmxoff(); |
470 | } | 470 | } |
@@ -736,14 +736,10 @@ static nmi_shootdown_cb shootdown_callback; | |||
736 | 736 | ||
737 | static atomic_t waiting_for_crash_ipi; | 737 | static atomic_t waiting_for_crash_ipi; |
738 | 738 | ||
739 | static int crash_nmi_callback(struct notifier_block *self, | 739 | static int crash_nmi_callback(unsigned int val, struct pt_regs *regs) |
740 | unsigned long val, void *data) | ||
741 | { | 740 | { |
742 | int cpu; | 741 | int cpu; |
743 | 742 | ||
744 | if (val != DIE_NMI) | ||
745 | return NOTIFY_OK; | ||
746 | |||
747 | cpu = raw_smp_processor_id(); | 743 | cpu = raw_smp_processor_id(); |
748 | 744 | ||
749 | /* Don't do anything if this handler is invoked on crashing cpu. | 745 | /* Don't do anything if this handler is invoked on crashing cpu. |
@@ -751,10 +747,10 @@ static int crash_nmi_callback(struct notifier_block *self, | |||
751 | * an NMI if system was initially booted with nmi_watchdog parameter. | 747 | * an NMI if system was initially booted with nmi_watchdog parameter. |
752 | */ | 748 | */ |
753 | if (cpu == crashing_cpu) | 749 | if (cpu == crashing_cpu) |
754 | return NOTIFY_STOP; | 750 | return NMI_HANDLED; |
755 | local_irq_disable(); | 751 | local_irq_disable(); |
756 | 752 | ||
757 | shootdown_callback(cpu, (struct die_args *)data); | 753 | shootdown_callback(cpu, regs); |
758 | 754 | ||
759 | atomic_dec(&waiting_for_crash_ipi); | 755 | atomic_dec(&waiting_for_crash_ipi); |
760 | /* Assume hlt works */ | 756 | /* Assume hlt works */ |
@@ -762,7 +758,7 @@ static int crash_nmi_callback(struct notifier_block *self, | |||
762 | for (;;) | 758 | for (;;) |
763 | cpu_relax(); | 759 | cpu_relax(); |
764 | 760 | ||
765 | return 1; | 761 | return NMI_HANDLED; |
766 | } | 762 | } |
767 | 763 | ||
768 | static void smp_send_nmi_allbutself(void) | 764 | static void smp_send_nmi_allbutself(void) |
@@ -770,12 +766,6 @@ static void smp_send_nmi_allbutself(void) | |||
770 | apic->send_IPI_allbutself(NMI_VECTOR); | 766 | apic->send_IPI_allbutself(NMI_VECTOR); |
771 | } | 767 | } |
772 | 768 | ||
773 | static struct notifier_block crash_nmi_nb = { | ||
774 | .notifier_call = crash_nmi_callback, | ||
775 | /* we want to be the first one called */ | ||
776 | .priority = NMI_LOCAL_HIGH_PRIOR+1, | ||
777 | }; | ||
778 | |||
779 | /* Halt all other CPUs, calling the specified function on each of them | 769 | /* Halt all other CPUs, calling the specified function on each of them |
780 | * | 770 | * |
781 | * This function can be used to halt all other CPUs on crash | 771 | * This function can be used to halt all other CPUs on crash |
@@ -794,7 +784,8 @@ void nmi_shootdown_cpus(nmi_shootdown_cb callback) | |||
794 | 784 | ||
795 | atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1); | 785 | atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1); |
796 | /* Would it be better to replace the trap vector here? */ | 786 | /* Would it be better to replace the trap vector here? */ |
797 | if (register_die_notifier(&crash_nmi_nb)) | 787 | if (register_nmi_handler(NMI_LOCAL, crash_nmi_callback, |
788 | NMI_FLAG_FIRST, "crash")) | ||
798 | return; /* return what? */ | 789 | return; /* return what? */ |
799 | /* Ensure the new callback function is set before sending | 790 | /* Ensure the new callback function is set before sending |
800 | * out the NMI | 791 | * out the NMI |
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 6913369c234c..a8e3eb83466c 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c | |||
@@ -81,15 +81,6 @@ gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, }; | |||
81 | DECLARE_BITMAP(used_vectors, NR_VECTORS); | 81 | DECLARE_BITMAP(used_vectors, NR_VECTORS); |
82 | EXPORT_SYMBOL_GPL(used_vectors); | 82 | EXPORT_SYMBOL_GPL(used_vectors); |
83 | 83 | ||
84 | static int ignore_nmis; | ||
85 | |||
86 | int unknown_nmi_panic; | ||
87 | /* | ||
88 | * Prevent NMI reason port (0x61) being accessed simultaneously, can | ||
89 | * only be used in NMI handler. | ||
90 | */ | ||
91 | static DEFINE_RAW_SPINLOCK(nmi_reason_lock); | ||
92 | |||
93 | static inline void conditional_sti(struct pt_regs *regs) | 84 | static inline void conditional_sti(struct pt_regs *regs) |
94 | { | 85 | { |
95 | if (regs->flags & X86_EFLAGS_IF) | 86 | if (regs->flags & X86_EFLAGS_IF) |
@@ -307,152 +298,6 @@ gp_in_kernel: | |||
307 | die("general protection fault", regs, error_code); | 298 | die("general protection fault", regs, error_code); |
308 | } | 299 | } |
309 | 300 | ||
310 | static int __init setup_unknown_nmi_panic(char *str) | ||
311 | { | ||
312 | unknown_nmi_panic = 1; | ||
313 | return 1; | ||
314 | } | ||
315 | __setup("unknown_nmi_panic", setup_unknown_nmi_panic); | ||
316 | |||
317 | static notrace __kprobes void | ||
318 | pci_serr_error(unsigned char reason, struct pt_regs *regs) | ||
319 | { | ||
320 | pr_emerg("NMI: PCI system error (SERR) for reason %02x on CPU %d.\n", | ||
321 | reason, smp_processor_id()); | ||
322 | |||
323 | /* | ||
324 | * On some machines, PCI SERR line is used to report memory | ||
325 | * errors. EDAC makes use of it. | ||
326 | */ | ||
327 | #if defined(CONFIG_EDAC) | ||
328 | if (edac_handler_set()) { | ||
329 | edac_atomic_assert_error(); | ||
330 | return; | ||
331 | } | ||
332 | #endif | ||
333 | |||
334 | if (panic_on_unrecovered_nmi) | ||
335 | panic("NMI: Not continuing"); | ||
336 | |||
337 | pr_emerg("Dazed and confused, but trying to continue\n"); | ||
338 | |||
339 | /* Clear and disable the PCI SERR error line. */ | ||
340 | reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_SERR; | ||
341 | outb(reason, NMI_REASON_PORT); | ||
342 | } | ||
343 | |||
344 | static notrace __kprobes void | ||
345 | io_check_error(unsigned char reason, struct pt_regs *regs) | ||
346 | { | ||
347 | unsigned long i; | ||
348 | |||
349 | pr_emerg( | ||
350 | "NMI: IOCK error (debug interrupt?) for reason %02x on CPU %d.\n", | ||
351 | reason, smp_processor_id()); | ||
352 | show_registers(regs); | ||
353 | |||
354 | if (panic_on_io_nmi) | ||
355 | panic("NMI IOCK error: Not continuing"); | ||
356 | |||
357 | /* Re-enable the IOCK line, wait for a few seconds */ | ||
358 | reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_IOCHK; | ||
359 | outb(reason, NMI_REASON_PORT); | ||
360 | |||
361 | i = 20000; | ||
362 | while (--i) { | ||
363 | touch_nmi_watchdog(); | ||
364 | udelay(100); | ||
365 | } | ||
366 | |||
367 | reason &= ~NMI_REASON_CLEAR_IOCHK; | ||
368 | outb(reason, NMI_REASON_PORT); | ||
369 | } | ||
370 | |||
371 | static notrace __kprobes void | ||
372 | unknown_nmi_error(unsigned char reason, struct pt_regs *regs) | ||
373 | { | ||
374 | if (notify_die(DIE_NMIUNKNOWN, "nmi", regs, reason, 2, SIGINT) == | ||
375 | NOTIFY_STOP) | ||
376 | return; | ||
377 | #ifdef CONFIG_MCA | ||
378 | /* | ||
379 | * Might actually be able to figure out what the guilty party | ||
380 | * is: | ||
381 | */ | ||
382 | if (MCA_bus) { | ||
383 | mca_handle_nmi(); | ||
384 | return; | ||
385 | } | ||
386 | #endif | ||
387 | pr_emerg("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n", | ||
388 | reason, smp_processor_id()); | ||
389 | |||
390 | pr_emerg("Do you have a strange power saving mode enabled?\n"); | ||
391 | if (unknown_nmi_panic || panic_on_unrecovered_nmi) | ||
392 | panic("NMI: Not continuing"); | ||
393 | |||
394 | pr_emerg("Dazed and confused, but trying to continue\n"); | ||
395 | } | ||
396 | |||
397 | static notrace __kprobes void default_do_nmi(struct pt_regs *regs) | ||
398 | { | ||
399 | unsigned char reason = 0; | ||
400 | |||
401 | /* | ||
402 | * CPU-specific NMI must be processed before non-CPU-specific | ||
403 | * NMI, otherwise we may lose it, because the CPU-specific | ||
404 | * NMI can not be detected/processed on other CPUs. | ||
405 | */ | ||
406 | if (notify_die(DIE_NMI, "nmi", regs, 0, 2, SIGINT) == NOTIFY_STOP) | ||
407 | return; | ||
408 | |||
409 | /* Non-CPU-specific NMI: NMI sources can be processed on any CPU */ | ||
410 | raw_spin_lock(&nmi_reason_lock); | ||
411 | reason = get_nmi_reason(); | ||
412 | |||
413 | if (reason & NMI_REASON_MASK) { | ||
414 | if (reason & NMI_REASON_SERR) | ||
415 | pci_serr_error(reason, regs); | ||
416 | else if (reason & NMI_REASON_IOCHK) | ||
417 | io_check_error(reason, regs); | ||
418 | #ifdef CONFIG_X86_32 | ||
419 | /* | ||
420 | * Reassert NMI in case it became active | ||
421 | * meanwhile as it's edge-triggered: | ||
422 | */ | ||
423 | reassert_nmi(); | ||
424 | #endif | ||
425 | raw_spin_unlock(&nmi_reason_lock); | ||
426 | return; | ||
427 | } | ||
428 | raw_spin_unlock(&nmi_reason_lock); | ||
429 | |||
430 | unknown_nmi_error(reason, regs); | ||
431 | } | ||
432 | |||
433 | dotraplinkage notrace __kprobes void | ||
434 | do_nmi(struct pt_regs *regs, long error_code) | ||
435 | { | ||
436 | nmi_enter(); | ||
437 | |||
438 | inc_irq_stat(__nmi_count); | ||
439 | |||
440 | if (!ignore_nmis) | ||
441 | default_do_nmi(regs); | ||
442 | |||
443 | nmi_exit(); | ||
444 | } | ||
445 | |||
446 | void stop_nmi(void) | ||
447 | { | ||
448 | ignore_nmis++; | ||
449 | } | ||
450 | |||
451 | void restart_nmi(void) | ||
452 | { | ||
453 | ignore_nmis--; | ||
454 | } | ||
455 | |||
456 | /* May run on IST stack. */ | 301 | /* May run on IST stack. */ |
457 | dotraplinkage void __kprobes do_int3(struct pt_regs *regs, long error_code) | 302 | dotraplinkage void __kprobes do_int3(struct pt_regs *regs, long error_code) |
458 | { | 303 | { |
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c index 96646b3aeca8..75f9528e0372 100644 --- a/arch/x86/oprofile/nmi_int.c +++ b/arch/x86/oprofile/nmi_int.c | |||
@@ -61,26 +61,15 @@ u64 op_x86_get_ctrl(struct op_x86_model_spec const *model, | |||
61 | } | 61 | } |
62 | 62 | ||
63 | 63 | ||
64 | static int profile_exceptions_notify(struct notifier_block *self, | 64 | static int profile_exceptions_notify(unsigned int val, struct pt_regs *regs) |
65 | unsigned long val, void *data) | ||
66 | { | 65 | { |
67 | struct die_args *args = (struct die_args *)data; | 66 | if (ctr_running) |
68 | int ret = NOTIFY_DONE; | 67 | model->check_ctrs(regs, &__get_cpu_var(cpu_msrs)); |
69 | 68 | else if (!nmi_enabled) | |
70 | switch (val) { | 69 | return NMI_DONE; |
71 | case DIE_NMI: | 70 | else |
72 | if (ctr_running) | 71 | model->stop(&__get_cpu_var(cpu_msrs)); |
73 | model->check_ctrs(args->regs, &__get_cpu_var(cpu_msrs)); | 72 | return NMI_HANDLED; |
74 | else if (!nmi_enabled) | ||
75 | break; | ||
76 | else | ||
77 | model->stop(&__get_cpu_var(cpu_msrs)); | ||
78 | ret = NOTIFY_STOP; | ||
79 | break; | ||
80 | default: | ||
81 | break; | ||
82 | } | ||
83 | return ret; | ||
84 | } | 73 | } |
85 | 74 | ||
86 | static void nmi_cpu_save_registers(struct op_msrs *msrs) | 75 | static void nmi_cpu_save_registers(struct op_msrs *msrs) |
@@ -363,12 +352,6 @@ static void nmi_cpu_setup(void *dummy) | |||
363 | apic_write(APIC_LVTPC, APIC_DM_NMI); | 352 | apic_write(APIC_LVTPC, APIC_DM_NMI); |
364 | } | 353 | } |
365 | 354 | ||
366 | static struct notifier_block profile_exceptions_nb = { | ||
367 | .notifier_call = profile_exceptions_notify, | ||
368 | .next = NULL, | ||
369 | .priority = NMI_LOCAL_LOW_PRIOR, | ||
370 | }; | ||
371 | |||
372 | static void nmi_cpu_restore_registers(struct op_msrs *msrs) | 355 | static void nmi_cpu_restore_registers(struct op_msrs *msrs) |
373 | { | 356 | { |
374 | struct op_msr *counters = msrs->counters; | 357 | struct op_msr *counters = msrs->counters; |
@@ -402,8 +385,6 @@ static void nmi_cpu_shutdown(void *dummy) | |||
402 | apic_write(APIC_LVTPC, per_cpu(saved_lvtpc, cpu)); | 385 | apic_write(APIC_LVTPC, per_cpu(saved_lvtpc, cpu)); |
403 | apic_write(APIC_LVTERR, v); | 386 | apic_write(APIC_LVTERR, v); |
404 | nmi_cpu_restore_registers(msrs); | 387 | nmi_cpu_restore_registers(msrs); |
405 | if (model->cpu_down) | ||
406 | model->cpu_down(); | ||
407 | } | 388 | } |
408 | 389 | ||
409 | static void nmi_cpu_up(void *dummy) | 390 | static void nmi_cpu_up(void *dummy) |
@@ -508,7 +489,8 @@ static int nmi_setup(void) | |||
508 | ctr_running = 0; | 489 | ctr_running = 0; |
509 | /* make variables visible to the nmi handler: */ | 490 | /* make variables visible to the nmi handler: */ |
510 | smp_mb(); | 491 | smp_mb(); |
511 | err = register_die_notifier(&profile_exceptions_nb); | 492 | err = register_nmi_handler(NMI_LOCAL, profile_exceptions_notify, |
493 | 0, "oprofile"); | ||
512 | if (err) | 494 | if (err) |
513 | goto fail; | 495 | goto fail; |
514 | 496 | ||
@@ -538,7 +520,7 @@ static void nmi_shutdown(void) | |||
538 | put_online_cpus(); | 520 | put_online_cpus(); |
539 | /* make variables visible to the nmi handler: */ | 521 | /* make variables visible to the nmi handler: */ |
540 | smp_mb(); | 522 | smp_mb(); |
541 | unregister_die_notifier(&profile_exceptions_nb); | 523 | unregister_nmi_handler(NMI_LOCAL, "oprofile"); |
542 | msrs = &get_cpu_var(cpu_msrs); | 524 | msrs = &get_cpu_var(cpu_msrs); |
543 | model->shutdown(msrs); | 525 | model->shutdown(msrs); |
544 | free_msrs(); | 526 | free_msrs(); |
diff --git a/arch/x86/oprofile/nmi_timer_int.c b/arch/x86/oprofile/nmi_timer_int.c index 720bf5a53c51..7f8052cd6620 100644 --- a/arch/x86/oprofile/nmi_timer_int.c +++ b/arch/x86/oprofile/nmi_timer_int.c | |||
@@ -18,32 +18,16 @@ | |||
18 | #include <asm/apic.h> | 18 | #include <asm/apic.h> |
19 | #include <asm/ptrace.h> | 19 | #include <asm/ptrace.h> |
20 | 20 | ||
21 | static int profile_timer_exceptions_notify(struct notifier_block *self, | 21 | static int profile_timer_exceptions_notify(unsigned int val, struct pt_regs *regs) |
22 | unsigned long val, void *data) | ||
23 | { | 22 | { |
24 | struct die_args *args = (struct die_args *)data; | 23 | oprofile_add_sample(regs, 0); |
25 | int ret = NOTIFY_DONE; | 24 | return NMI_HANDLED; |
26 | |||
27 | switch (val) { | ||
28 | case DIE_NMI: | ||
29 | oprofile_add_sample(args->regs, 0); | ||
30 | ret = NOTIFY_STOP; | ||
31 | break; | ||
32 | default: | ||
33 | break; | ||
34 | } | ||
35 | return ret; | ||
36 | } | 25 | } |
37 | 26 | ||
38 | static struct notifier_block profile_timer_exceptions_nb = { | ||
39 | .notifier_call = profile_timer_exceptions_notify, | ||
40 | .next = NULL, | ||
41 | .priority = NMI_LOW_PRIOR, | ||
42 | }; | ||
43 | |||
44 | static int timer_start(void) | 27 | static int timer_start(void) |
45 | { | 28 | { |
46 | if (register_die_notifier(&profile_timer_exceptions_nb)) | 29 | if (register_nmi_handler(NMI_LOCAL, profile_timer_exceptions_notify, |
30 | 0, "oprofile-timer")) | ||
47 | return 1; | 31 | return 1; |
48 | return 0; | 32 | return 0; |
49 | } | 33 | } |
@@ -51,7 +35,7 @@ static int timer_start(void) | |||
51 | 35 | ||
52 | static void timer_stop(void) | 36 | static void timer_stop(void) |
53 | { | 37 | { |
54 | unregister_die_notifier(&profile_timer_exceptions_nb); | 38 | unregister_nmi_handler(NMI_LOCAL, "oprofile-timer"); |
55 | synchronize_sched(); /* Allow already-started NMIs to complete. */ | 39 | synchronize_sched(); /* Allow already-started NMIs to complete. */ |
56 | } | 40 | } |
57 | 41 | ||
diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c index 9cbb710dc94b..303f08637826 100644 --- a/arch/x86/oprofile/op_model_amd.c +++ b/arch/x86/oprofile/op_model_amd.c | |||
@@ -29,8 +29,6 @@ | |||
29 | #include "op_x86_model.h" | 29 | #include "op_x86_model.h" |
30 | #include "op_counter.h" | 30 | #include "op_counter.h" |
31 | 31 | ||
32 | #define NUM_COUNTERS 4 | ||
33 | #define NUM_COUNTERS_F15H 6 | ||
34 | #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX | 32 | #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX |
35 | #define NUM_VIRT_COUNTERS 32 | 33 | #define NUM_VIRT_COUNTERS 32 |
36 | #else | 34 | #else |
@@ -70,62 +68,12 @@ static struct ibs_config ibs_config; | |||
70 | static struct ibs_state ibs_state; | 68 | static struct ibs_state ibs_state; |
71 | 69 | ||
72 | /* | 70 | /* |
73 | * IBS cpuid feature detection | ||
74 | */ | ||
75 | |||
76 | #define IBS_CPUID_FEATURES 0x8000001b | ||
77 | |||
78 | /* | ||
79 | * Same bit mask as for IBS cpuid feature flags (Fn8000_001B_EAX), but | ||
80 | * bit 0 is used to indicate the existence of IBS. | ||
81 | */ | ||
82 | #define IBS_CAPS_AVAIL (1U<<0) | ||
83 | #define IBS_CAPS_FETCHSAM (1U<<1) | ||
84 | #define IBS_CAPS_OPSAM (1U<<2) | ||
85 | #define IBS_CAPS_RDWROPCNT (1U<<3) | ||
86 | #define IBS_CAPS_OPCNT (1U<<4) | ||
87 | #define IBS_CAPS_BRNTRGT (1U<<5) | ||
88 | #define IBS_CAPS_OPCNTEXT (1U<<6) | ||
89 | |||
90 | #define IBS_CAPS_DEFAULT (IBS_CAPS_AVAIL \ | ||
91 | | IBS_CAPS_FETCHSAM \ | ||
92 | | IBS_CAPS_OPSAM) | ||
93 | |||
94 | /* | ||
95 | * IBS APIC setup | ||
96 | */ | ||
97 | #define IBSCTL 0x1cc | ||
98 | #define IBSCTL_LVT_OFFSET_VALID (1ULL<<8) | ||
99 | #define IBSCTL_LVT_OFFSET_MASK 0x0F | ||
100 | |||
101 | /* | ||
102 | * IBS randomization macros | 71 | * IBS randomization macros |
103 | */ | 72 | */ |
104 | #define IBS_RANDOM_BITS 12 | 73 | #define IBS_RANDOM_BITS 12 |
105 | #define IBS_RANDOM_MASK ((1ULL << IBS_RANDOM_BITS) - 1) | 74 | #define IBS_RANDOM_MASK ((1ULL << IBS_RANDOM_BITS) - 1) |
106 | #define IBS_RANDOM_MAXCNT_OFFSET (1ULL << (IBS_RANDOM_BITS - 5)) | 75 | #define IBS_RANDOM_MAXCNT_OFFSET (1ULL << (IBS_RANDOM_BITS - 5)) |
107 | 76 | ||
108 | static u32 get_ibs_caps(void) | ||
109 | { | ||
110 | u32 ibs_caps; | ||
111 | unsigned int max_level; | ||
112 | |||
113 | if (!boot_cpu_has(X86_FEATURE_IBS)) | ||
114 | return 0; | ||
115 | |||
116 | /* check IBS cpuid feature flags */ | ||
117 | max_level = cpuid_eax(0x80000000); | ||
118 | if (max_level < IBS_CPUID_FEATURES) | ||
119 | return IBS_CAPS_DEFAULT; | ||
120 | |||
121 | ibs_caps = cpuid_eax(IBS_CPUID_FEATURES); | ||
122 | if (!(ibs_caps & IBS_CAPS_AVAIL)) | ||
123 | /* cpuid flags not valid */ | ||
124 | return IBS_CAPS_DEFAULT; | ||
125 | |||
126 | return ibs_caps; | ||
127 | } | ||
128 | |||
129 | /* | 77 | /* |
130 | * 16-bit Linear Feedback Shift Register (LFSR) | 78 | * 16-bit Linear Feedback Shift Register (LFSR) |
131 | * | 79 | * |
@@ -316,81 +264,6 @@ static void op_amd_stop_ibs(void) | |||
316 | wrmsrl(MSR_AMD64_IBSOPCTL, 0); | 264 | wrmsrl(MSR_AMD64_IBSOPCTL, 0); |
317 | } | 265 | } |
318 | 266 | ||
319 | static inline int get_eilvt(int offset) | ||
320 | { | ||
321 | return !setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_NMI, 1); | ||
322 | } | ||
323 | |||
324 | static inline int put_eilvt(int offset) | ||
325 | { | ||
326 | return !setup_APIC_eilvt(offset, 0, 0, 1); | ||
327 | } | ||
328 | |||
329 | static inline int ibs_eilvt_valid(void) | ||
330 | { | ||
331 | int offset; | ||
332 | u64 val; | ||
333 | int valid = 0; | ||
334 | |||
335 | preempt_disable(); | ||
336 | |||
337 | rdmsrl(MSR_AMD64_IBSCTL, val); | ||
338 | offset = val & IBSCTL_LVT_OFFSET_MASK; | ||
339 | |||
340 | if (!(val & IBSCTL_LVT_OFFSET_VALID)) { | ||
341 | pr_err(FW_BUG "cpu %d, invalid IBS interrupt offset %d (MSR%08X=0x%016llx)\n", | ||
342 | smp_processor_id(), offset, MSR_AMD64_IBSCTL, val); | ||
343 | goto out; | ||
344 | } | ||
345 | |||
346 | if (!get_eilvt(offset)) { | ||
347 | pr_err(FW_BUG "cpu %d, IBS interrupt offset %d not available (MSR%08X=0x%016llx)\n", | ||
348 | smp_processor_id(), offset, MSR_AMD64_IBSCTL, val); | ||
349 | goto out; | ||
350 | } | ||
351 | |||
352 | valid = 1; | ||
353 | out: | ||
354 | preempt_enable(); | ||
355 | |||
356 | return valid; | ||
357 | } | ||
358 | |||
359 | static inline int get_ibs_offset(void) | ||
360 | { | ||
361 | u64 val; | ||
362 | |||
363 | rdmsrl(MSR_AMD64_IBSCTL, val); | ||
364 | if (!(val & IBSCTL_LVT_OFFSET_VALID)) | ||
365 | return -EINVAL; | ||
366 | |||
367 | return val & IBSCTL_LVT_OFFSET_MASK; | ||
368 | } | ||
369 | |||
370 | static void setup_APIC_ibs(void) | ||
371 | { | ||
372 | int offset; | ||
373 | |||
374 | offset = get_ibs_offset(); | ||
375 | if (offset < 0) | ||
376 | goto failed; | ||
377 | |||
378 | if (!setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_NMI, 0)) | ||
379 | return; | ||
380 | failed: | ||
381 | pr_warn("oprofile: IBS APIC setup failed on cpu #%d\n", | ||
382 | smp_processor_id()); | ||
383 | } | ||
384 | |||
385 | static void clear_APIC_ibs(void) | ||
386 | { | ||
387 | int offset; | ||
388 | |||
389 | offset = get_ibs_offset(); | ||
390 | if (offset >= 0) | ||
391 | setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_FIX, 1); | ||
392 | } | ||
393 | |||
394 | #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX | 267 | #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX |
395 | 268 | ||
396 | static void op_mux_switch_ctrl(struct op_x86_model_spec const *model, | 269 | static void op_mux_switch_ctrl(struct op_x86_model_spec const *model, |
@@ -439,7 +312,7 @@ static int op_amd_fill_in_addresses(struct op_msrs * const msrs) | |||
439 | goto fail; | 312 | goto fail; |
440 | } | 313 | } |
441 | /* both registers must be reserved */ | 314 | /* both registers must be reserved */ |
442 | if (num_counters == NUM_COUNTERS_F15H) { | 315 | if (num_counters == AMD64_NUM_COUNTERS_F15H) { |
443 | msrs->counters[i].addr = MSR_F15H_PERF_CTR + (i << 1); | 316 | msrs->counters[i].addr = MSR_F15H_PERF_CTR + (i << 1); |
444 | msrs->controls[i].addr = MSR_F15H_PERF_CTL + (i << 1); | 317 | msrs->controls[i].addr = MSR_F15H_PERF_CTL + (i << 1); |
445 | } else { | 318 | } else { |
@@ -504,15 +377,6 @@ static void op_amd_setup_ctrs(struct op_x86_model_spec const *model, | |||
504 | val |= op_x86_get_ctrl(model, &counter_config[virt]); | 377 | val |= op_x86_get_ctrl(model, &counter_config[virt]); |
505 | wrmsrl(msrs->controls[i].addr, val); | 378 | wrmsrl(msrs->controls[i].addr, val); |
506 | } | 379 | } |
507 | |||
508 | if (ibs_caps) | ||
509 | setup_APIC_ibs(); | ||
510 | } | ||
511 | |||
512 | static void op_amd_cpu_shutdown(void) | ||
513 | { | ||
514 | if (ibs_caps) | ||
515 | clear_APIC_ibs(); | ||
516 | } | 380 | } |
517 | 381 | ||
518 | static int op_amd_check_ctrs(struct pt_regs * const regs, | 382 | static int op_amd_check_ctrs(struct pt_regs * const regs, |
@@ -575,86 +439,6 @@ static void op_amd_stop(struct op_msrs const * const msrs) | |||
575 | op_amd_stop_ibs(); | 439 | op_amd_stop_ibs(); |
576 | } | 440 | } |
577 | 441 | ||
578 | static int setup_ibs_ctl(int ibs_eilvt_off) | ||
579 | { | ||
580 | struct pci_dev *cpu_cfg; | ||
581 | int nodes; | ||
582 | u32 value = 0; | ||
583 | |||
584 | nodes = 0; | ||
585 | cpu_cfg = NULL; | ||
586 | do { | ||
587 | cpu_cfg = pci_get_device(PCI_VENDOR_ID_AMD, | ||
588 | PCI_DEVICE_ID_AMD_10H_NB_MISC, | ||
589 | cpu_cfg); | ||
590 | if (!cpu_cfg) | ||
591 | break; | ||
592 | ++nodes; | ||
593 | pci_write_config_dword(cpu_cfg, IBSCTL, ibs_eilvt_off | ||
594 | | IBSCTL_LVT_OFFSET_VALID); | ||
595 | pci_read_config_dword(cpu_cfg, IBSCTL, &value); | ||
596 | if (value != (ibs_eilvt_off | IBSCTL_LVT_OFFSET_VALID)) { | ||
597 | pci_dev_put(cpu_cfg); | ||
598 | printk(KERN_DEBUG "Failed to setup IBS LVT offset, " | ||
599 | "IBSCTL = 0x%08x\n", value); | ||
600 | return -EINVAL; | ||
601 | } | ||
602 | } while (1); | ||
603 | |||
604 | if (!nodes) { | ||
605 | printk(KERN_DEBUG "No CPU node configured for IBS\n"); | ||
606 | return -ENODEV; | ||
607 | } | ||
608 | |||
609 | return 0; | ||
610 | } | ||
611 | |||
612 | /* | ||
613 | * This runs only on the current cpu. We try to find an LVT offset and | ||
614 | * setup the local APIC. For this we must disable preemption. On | ||
615 | * success we initialize all nodes with this offset. This updates then | ||
616 | * the offset in the IBS_CTL per-node msr. The per-core APIC setup of | ||
617 | * the IBS interrupt vector is called from op_amd_setup_ctrs()/op_- | ||
618 | * amd_cpu_shutdown() using the new offset. | ||
619 | */ | ||
620 | static int force_ibs_eilvt_setup(void) | ||
621 | { | ||
622 | int offset; | ||
623 | int ret; | ||
624 | |||
625 | preempt_disable(); | ||
626 | /* find the next free available EILVT entry, skip offset 0 */ | ||
627 | for (offset = 1; offset < APIC_EILVT_NR_MAX; offset++) { | ||
628 | if (get_eilvt(offset)) | ||
629 | break; | ||
630 | } | ||
631 | preempt_enable(); | ||
632 | |||
633 | if (offset == APIC_EILVT_NR_MAX) { | ||
634 | printk(KERN_DEBUG "No EILVT entry available\n"); | ||
635 | return -EBUSY; | ||
636 | } | ||
637 | |||
638 | ret = setup_ibs_ctl(offset); | ||
639 | if (ret) | ||
640 | goto out; | ||
641 | |||
642 | if (!ibs_eilvt_valid()) { | ||
643 | ret = -EFAULT; | ||
644 | goto out; | ||
645 | } | ||
646 | |||
647 | pr_err(FW_BUG "using offset %d for IBS interrupts\n", offset); | ||
648 | pr_err(FW_BUG "workaround enabled for IBS LVT offset\n"); | ||
649 | |||
650 | return 0; | ||
651 | out: | ||
652 | preempt_disable(); | ||
653 | put_eilvt(offset); | ||
654 | preempt_enable(); | ||
655 | return ret; | ||
656 | } | ||
657 | |||
658 | /* | 442 | /* |
659 | * check and reserve APIC extended interrupt LVT offset for IBS if | 443 | * check and reserve APIC extended interrupt LVT offset for IBS if |
660 | * available | 444 | * available |
@@ -667,17 +451,6 @@ static void init_ibs(void) | |||
667 | if (!ibs_caps) | 451 | if (!ibs_caps) |
668 | return; | 452 | return; |
669 | 453 | ||
670 | if (ibs_eilvt_valid()) | ||
671 | goto out; | ||
672 | |||
673 | if (!force_ibs_eilvt_setup()) | ||
674 | goto out; | ||
675 | |||
676 | /* Failed to setup ibs */ | ||
677 | ibs_caps = 0; | ||
678 | return; | ||
679 | |||
680 | out: | ||
681 | printk(KERN_INFO "oprofile: AMD IBS detected (0x%08x)\n", ibs_caps); | 454 | printk(KERN_INFO "oprofile: AMD IBS detected (0x%08x)\n", ibs_caps); |
682 | } | 455 | } |
683 | 456 | ||
@@ -741,9 +514,9 @@ static int op_amd_init(struct oprofile_operations *ops) | |||
741 | ops->create_files = setup_ibs_files; | 514 | ops->create_files = setup_ibs_files; |
742 | 515 | ||
743 | if (boot_cpu_data.x86 == 0x15) { | 516 | if (boot_cpu_data.x86 == 0x15) { |
744 | num_counters = NUM_COUNTERS_F15H; | 517 | num_counters = AMD64_NUM_COUNTERS_F15H; |
745 | } else { | 518 | } else { |
746 | num_counters = NUM_COUNTERS; | 519 | num_counters = AMD64_NUM_COUNTERS; |
747 | } | 520 | } |
748 | 521 | ||
749 | op_amd_spec.num_counters = num_counters; | 522 | op_amd_spec.num_counters = num_counters; |
@@ -760,7 +533,6 @@ struct op_x86_model_spec op_amd_spec = { | |||
760 | .init = op_amd_init, | 533 | .init = op_amd_init, |
761 | .fill_in_addresses = &op_amd_fill_in_addresses, | 534 | .fill_in_addresses = &op_amd_fill_in_addresses, |
762 | .setup_ctrs = &op_amd_setup_ctrs, | 535 | .setup_ctrs = &op_amd_setup_ctrs, |
763 | .cpu_down = &op_amd_cpu_shutdown, | ||
764 | .check_ctrs = &op_amd_check_ctrs, | 536 | .check_ctrs = &op_amd_check_ctrs, |
765 | .start = &op_amd_start, | 537 | .start = &op_amd_start, |
766 | .stop = &op_amd_stop, | 538 | .stop = &op_amd_stop, |
diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c index 94b745045e45..d90528ea5412 100644 --- a/arch/x86/oprofile/op_model_ppro.c +++ b/arch/x86/oprofile/op_model_ppro.c | |||
@@ -28,7 +28,7 @@ static int counter_width = 32; | |||
28 | 28 | ||
29 | #define MSR_PPRO_EVENTSEL_RESERVED ((0xFFFFFFFFULL<<32)|(1ULL<<21)) | 29 | #define MSR_PPRO_EVENTSEL_RESERVED ((0xFFFFFFFFULL<<32)|(1ULL<<21)) |
30 | 30 | ||
31 | static u64 *reset_value; | 31 | static u64 reset_value[OP_MAX_COUNTER]; |
32 | 32 | ||
33 | static void ppro_shutdown(struct op_msrs const * const msrs) | 33 | static void ppro_shutdown(struct op_msrs const * const msrs) |
34 | { | 34 | { |
@@ -40,10 +40,6 @@ static void ppro_shutdown(struct op_msrs const * const msrs) | |||
40 | release_perfctr_nmi(MSR_P6_PERFCTR0 + i); | 40 | release_perfctr_nmi(MSR_P6_PERFCTR0 + i); |
41 | release_evntsel_nmi(MSR_P6_EVNTSEL0 + i); | 41 | release_evntsel_nmi(MSR_P6_EVNTSEL0 + i); |
42 | } | 42 | } |
43 | if (reset_value) { | ||
44 | kfree(reset_value); | ||
45 | reset_value = NULL; | ||
46 | } | ||
47 | } | 43 | } |
48 | 44 | ||
49 | static int ppro_fill_in_addresses(struct op_msrs * const msrs) | 45 | static int ppro_fill_in_addresses(struct op_msrs * const msrs) |
@@ -79,13 +75,6 @@ static void ppro_setup_ctrs(struct op_x86_model_spec const *model, | |||
79 | u64 val; | 75 | u64 val; |
80 | int i; | 76 | int i; |
81 | 77 | ||
82 | if (!reset_value) { | ||
83 | reset_value = kzalloc(sizeof(reset_value[0]) * num_counters, | ||
84 | GFP_ATOMIC); | ||
85 | if (!reset_value) | ||
86 | return; | ||
87 | } | ||
88 | |||
89 | if (cpu_has_arch_perfmon) { | 78 | if (cpu_has_arch_perfmon) { |
90 | union cpuid10_eax eax; | 79 | union cpuid10_eax eax; |
91 | eax.full = cpuid_eax(0xa); | 80 | eax.full = cpuid_eax(0xa); |
@@ -141,13 +130,6 @@ static int ppro_check_ctrs(struct pt_regs * const regs, | |||
141 | u64 val; | 130 | u64 val; |
142 | int i; | 131 | int i; |
143 | 132 | ||
144 | /* | ||
145 | * This can happen if perf counters are in use when | ||
146 | * we steal the die notifier NMI. | ||
147 | */ | ||
148 | if (unlikely(!reset_value)) | ||
149 | goto out; | ||
150 | |||
151 | for (i = 0; i < num_counters; ++i) { | 133 | for (i = 0; i < num_counters; ++i) { |
152 | if (!reset_value[i]) | 134 | if (!reset_value[i]) |
153 | continue; | 135 | continue; |
@@ -158,7 +140,6 @@ static int ppro_check_ctrs(struct pt_regs * const regs, | |||
158 | wrmsrl(msrs->counters[i].addr, -reset_value[i]); | 140 | wrmsrl(msrs->counters[i].addr, -reset_value[i]); |
159 | } | 141 | } |
160 | 142 | ||
161 | out: | ||
162 | /* Only P6 based Pentium M need to re-unmask the apic vector but it | 143 | /* Only P6 based Pentium M need to re-unmask the apic vector but it |
163 | * doesn't hurt other P6 variant */ | 144 | * doesn't hurt other P6 variant */ |
164 | apic_write(APIC_LVTPC, apic_read(APIC_LVTPC) & ~APIC_LVT_MASKED); | 145 | apic_write(APIC_LVTPC, apic_read(APIC_LVTPC) & ~APIC_LVT_MASKED); |
@@ -179,8 +160,6 @@ static void ppro_start(struct op_msrs const * const msrs) | |||
179 | u64 val; | 160 | u64 val; |
180 | int i; | 161 | int i; |
181 | 162 | ||
182 | if (!reset_value) | ||
183 | return; | ||
184 | for (i = 0; i < num_counters; ++i) { | 163 | for (i = 0; i < num_counters; ++i) { |
185 | if (reset_value[i]) { | 164 | if (reset_value[i]) { |
186 | rdmsrl(msrs->controls[i].addr, val); | 165 | rdmsrl(msrs->controls[i].addr, val); |
@@ -196,8 +175,6 @@ static void ppro_stop(struct op_msrs const * const msrs) | |||
196 | u64 val; | 175 | u64 val; |
197 | int i; | 176 | int i; |
198 | 177 | ||
199 | if (!reset_value) | ||
200 | return; | ||
201 | for (i = 0; i < num_counters; ++i) { | 178 | for (i = 0; i < num_counters; ++i) { |
202 | if (!reset_value[i]) | 179 | if (!reset_value[i]) |
203 | continue; | 180 | continue; |
@@ -242,7 +219,7 @@ static void arch_perfmon_setup_counters(void) | |||
242 | eax.split.bit_width = 40; | 219 | eax.split.bit_width = 40; |
243 | } | 220 | } |
244 | 221 | ||
245 | num_counters = eax.split.num_counters; | 222 | num_counters = min((int)eax.split.num_counters, OP_MAX_COUNTER); |
246 | 223 | ||
247 | op_arch_perfmon_spec.num_counters = num_counters; | 224 | op_arch_perfmon_spec.num_counters = num_counters; |
248 | op_arch_perfmon_spec.num_controls = num_counters; | 225 | op_arch_perfmon_spec.num_controls = num_counters; |
diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h index 89017fa1fd63..71e8a67337e2 100644 --- a/arch/x86/oprofile/op_x86_model.h +++ b/arch/x86/oprofile/op_x86_model.h | |||
@@ -43,7 +43,6 @@ struct op_x86_model_spec { | |||
43 | int (*fill_in_addresses)(struct op_msrs * const msrs); | 43 | int (*fill_in_addresses)(struct op_msrs * const msrs); |
44 | void (*setup_ctrs)(struct op_x86_model_spec const *model, | 44 | void (*setup_ctrs)(struct op_x86_model_spec const *model, |
45 | struct op_msrs const * const msrs); | 45 | struct op_msrs const * const msrs); |
46 | void (*cpu_down)(void); | ||
47 | int (*check_ctrs)(struct pt_regs * const regs, | 46 | int (*check_ctrs)(struct pt_regs * const regs, |
48 | struct op_msrs const * const msrs); | 47 | struct op_msrs const * const msrs); |
49 | void (*start)(struct op_msrs const * const msrs); | 48 | void (*start)(struct op_msrs const * const msrs); |