diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-10-26 11:03:38 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-10-26 11:03:38 -0400 |
commit | 7115e3fcf45514db7525a05365b10454ff7f345e (patch) | |
tree | 17450e6337d559cc35dae6a7a73abab01ac63f00 /arch/x86/include | |
parent | 1f6e05171bb5cc32a4d6437ab2269fc21d169ca7 (diff) | |
parent | c752d04066a36ae30b29795f3fa3f536292c1f8c (diff) |
Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
* 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (121 commits)
perf symbols: Increase symbol KSYM_NAME_LEN size
perf hists browser: Refuse 'a' hotkey on non symbolic views
perf ui browser: Use libslang to read keys
perf tools: Fix tracing info recording
perf hists browser: Elide DSO column when it is set to just one DSO, ditto for threads
perf hists: Don't consider filtered entries when calculating column widths
perf hists: Don't decay total_period for filtered entries
perf hists browser: Honour symbol_conf.show_{nr_samples,total_period}
perf hists browser: Do not exit on tab key with single event
perf annotate browser: Don't change selection line when returning from callq
perf tools: handle endianness of feature bitmap
perf tools: Add prelink suggestion to dso update message
perf script: Fix unknown feature comment
perf hists browser: Apply the dso and thread filters when merging new batches
perf hists: Move the dso and thread filters from hist_browser
perf ui browser: Honour the xterm colors
perf top tui: Give color hints just on the percentage, like on --stdio
perf ui browser: Make the colors configurable and change the defaults
perf tui: Remove unneeded call to newtCls on startup
perf hists: Don't format the percentage on hist_entry__snprintf
...
Fix up conflicts in arch/x86/kernel/kprobes.c manually.
Ingo's tree did the insane "add volatile to const array", which just
doesn't make sense ("volatile const"?). But we could remove the const
*and* make the array volatile to make doubly sure that gcc doesn't
optimize it away..
Also fix up kernel/trace/ring_buffer.c non-data-conflicts manually: the
reader_lock has been turned into a raw lock by the core locking merge,
and there was a new user of it introduced in this perf core merge. Make
sure that new use also uses the raw accessor functions.
Diffstat (limited to 'arch/x86/include')
-rw-r--r-- | arch/x86/include/asm/nmi.h | 37 | ||||
-rw-r--r-- | arch/x86/include/asm/perf_event.h | 55 | ||||
-rw-r--r-- | arch/x86/include/asm/reboot.h | 2 |
3 files changed, 71 insertions, 23 deletions
diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h index 4886a68f267e..fd3f9f18cf3f 100644 --- a/arch/x86/include/asm/nmi.h +++ b/arch/x86/include/asm/nmi.h | |||
@@ -22,27 +22,26 @@ void arch_trigger_all_cpu_backtrace(void); | |||
22 | #define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace | 22 | #define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace |
23 | #endif | 23 | #endif |
24 | 24 | ||
25 | /* | 25 | #define NMI_FLAG_FIRST 1 |
26 | * Define some priorities for the nmi notifier call chain. | 26 | |
27 | * | 27 | enum { |
28 | * Create a local nmi bit that has a higher priority than | 28 | NMI_LOCAL=0, |
29 | * external nmis, because the local ones are more frequent. | 29 | NMI_UNKNOWN, |
30 | * | 30 | NMI_MAX |
31 | * Also setup some default high/normal/low settings for | 31 | }; |
32 | * subsystems to registers with. Using 4 bits to separate | 32 | |
33 | * the priorities. This can go a lot higher if needed be. | 33 | #define NMI_DONE 0 |
34 | */ | 34 | #define NMI_HANDLED 1 |
35 | 35 | ||
36 | #define NMI_LOCAL_SHIFT 16 /* randomly picked */ | 36 | typedef int (*nmi_handler_t)(unsigned int, struct pt_regs *); |
37 | #define NMI_LOCAL_BIT (1ULL << NMI_LOCAL_SHIFT) | 37 | |
38 | #define NMI_HIGH_PRIOR (1ULL << 8) | 38 | int register_nmi_handler(unsigned int, nmi_handler_t, unsigned long, |
39 | #define NMI_NORMAL_PRIOR (1ULL << 4) | 39 | const char *); |
40 | #define NMI_LOW_PRIOR (1ULL << 0) | 40 | |
41 | #define NMI_LOCAL_HIGH_PRIOR (NMI_LOCAL_BIT | NMI_HIGH_PRIOR) | 41 | void unregister_nmi_handler(unsigned int, const char *); |
42 | #define NMI_LOCAL_NORMAL_PRIOR (NMI_LOCAL_BIT | NMI_NORMAL_PRIOR) | ||
43 | #define NMI_LOCAL_LOW_PRIOR (NMI_LOCAL_BIT | NMI_LOW_PRIOR) | ||
44 | 42 | ||
45 | void stop_nmi(void); | 43 | void stop_nmi(void); |
46 | void restart_nmi(void); | 44 | void restart_nmi(void); |
45 | void local_touch_nmi(void); | ||
47 | 46 | ||
48 | #endif /* _ASM_X86_NMI_H */ | 47 | #endif /* _ASM_X86_NMI_H */ |
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h index 094fb30817ab..f61c62f7d5d8 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h | |||
@@ -29,6 +29,9 @@ | |||
29 | #define ARCH_PERFMON_EVENTSEL_INV (1ULL << 23) | 29 | #define ARCH_PERFMON_EVENTSEL_INV (1ULL << 23) |
30 | #define ARCH_PERFMON_EVENTSEL_CMASK 0xFF000000ULL | 30 | #define ARCH_PERFMON_EVENTSEL_CMASK 0xFF000000ULL |
31 | 31 | ||
32 | #define AMD_PERFMON_EVENTSEL_GUESTONLY (1ULL << 40) | ||
33 | #define AMD_PERFMON_EVENTSEL_HOSTONLY (1ULL << 41) | ||
34 | |||
32 | #define AMD64_EVENTSEL_EVENT \ | 35 | #define AMD64_EVENTSEL_EVENT \ |
33 | (ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32)) | 36 | (ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32)) |
34 | #define INTEL_ARCH_EVENT_MASK \ | 37 | #define INTEL_ARCH_EVENT_MASK \ |
@@ -43,14 +46,17 @@ | |||
43 | #define AMD64_RAW_EVENT_MASK \ | 46 | #define AMD64_RAW_EVENT_MASK \ |
44 | (X86_RAW_EVENT_MASK | \ | 47 | (X86_RAW_EVENT_MASK | \ |
45 | AMD64_EVENTSEL_EVENT) | 48 | AMD64_EVENTSEL_EVENT) |
49 | #define AMD64_NUM_COUNTERS 4 | ||
50 | #define AMD64_NUM_COUNTERS_F15H 6 | ||
51 | #define AMD64_NUM_COUNTERS_MAX AMD64_NUM_COUNTERS_F15H | ||
46 | 52 | ||
47 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c | 53 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c |
48 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8) | 54 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8) |
49 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX 0 | 55 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX 0 |
50 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \ | 56 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \ |
51 | (1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX)) | 57 | (1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX)) |
52 | 58 | ||
53 | #define ARCH_PERFMON_BRANCH_MISSES_RETIRED 6 | 59 | #define ARCH_PERFMON_BRANCH_MISSES_RETIRED 6 |
54 | 60 | ||
55 | /* | 61 | /* |
56 | * Intel "Architectural Performance Monitoring" CPUID | 62 | * Intel "Architectural Performance Monitoring" CPUID |
@@ -110,6 +116,35 @@ union cpuid10_edx { | |||
110 | */ | 116 | */ |
111 | #define X86_PMC_IDX_FIXED_BTS (X86_PMC_IDX_FIXED + 16) | 117 | #define X86_PMC_IDX_FIXED_BTS (X86_PMC_IDX_FIXED + 16) |
112 | 118 | ||
119 | /* | ||
120 | * IBS cpuid feature detection | ||
121 | */ | ||
122 | |||
123 | #define IBS_CPUID_FEATURES 0x8000001b | ||
124 | |||
125 | /* | ||
126 | * Same bit mask as for IBS cpuid feature flags (Fn8000_001B_EAX), but | ||
127 | * bit 0 is used to indicate the existence of IBS. | ||
128 | */ | ||
129 | #define IBS_CAPS_AVAIL (1U<<0) | ||
130 | #define IBS_CAPS_FETCHSAM (1U<<1) | ||
131 | #define IBS_CAPS_OPSAM (1U<<2) | ||
132 | #define IBS_CAPS_RDWROPCNT (1U<<3) | ||
133 | #define IBS_CAPS_OPCNT (1U<<4) | ||
134 | #define IBS_CAPS_BRNTRGT (1U<<5) | ||
135 | #define IBS_CAPS_OPCNTEXT (1U<<6) | ||
136 | |||
137 | #define IBS_CAPS_DEFAULT (IBS_CAPS_AVAIL \ | ||
138 | | IBS_CAPS_FETCHSAM \ | ||
139 | | IBS_CAPS_OPSAM) | ||
140 | |||
141 | /* | ||
142 | * IBS APIC setup | ||
143 | */ | ||
144 | #define IBSCTL 0x1cc | ||
145 | #define IBSCTL_LVT_OFFSET_VALID (1ULL<<8) | ||
146 | #define IBSCTL_LVT_OFFSET_MASK 0x0F | ||
147 | |||
113 | /* IbsFetchCtl bits/masks */ | 148 | /* IbsFetchCtl bits/masks */ |
114 | #define IBS_FETCH_RAND_EN (1ULL<<57) | 149 | #define IBS_FETCH_RAND_EN (1ULL<<57) |
115 | #define IBS_FETCH_VAL (1ULL<<49) | 150 | #define IBS_FETCH_VAL (1ULL<<49) |
@@ -124,6 +159,8 @@ union cpuid10_edx { | |||
124 | #define IBS_OP_MAX_CNT 0x0000FFFFULL | 159 | #define IBS_OP_MAX_CNT 0x0000FFFFULL |
125 | #define IBS_OP_MAX_CNT_EXT 0x007FFFFFULL /* not a register bit mask */ | 160 | #define IBS_OP_MAX_CNT_EXT 0x007FFFFFULL /* not a register bit mask */ |
126 | 161 | ||
162 | extern u32 get_ibs_caps(void); | ||
163 | |||
127 | #ifdef CONFIG_PERF_EVENTS | 164 | #ifdef CONFIG_PERF_EVENTS |
128 | extern void perf_events_lapic_init(void); | 165 | extern void perf_events_lapic_init(void); |
129 | 166 | ||
@@ -159,7 +196,19 @@ extern unsigned long perf_misc_flags(struct pt_regs *regs); | |||
159 | ); \ | 196 | ); \ |
160 | } | 197 | } |
161 | 198 | ||
199 | struct perf_guest_switch_msr { | ||
200 | unsigned msr; | ||
201 | u64 host, guest; | ||
202 | }; | ||
203 | |||
204 | extern struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr); | ||
162 | #else | 205 | #else |
206 | static inline perf_guest_switch_msr *perf_guest_get_msrs(int *nr) | ||
207 | { | ||
208 | *nr = 0; | ||
209 | return NULL; | ||
210 | } | ||
211 | |||
163 | static inline void perf_events_lapic_init(void) { } | 212 | static inline void perf_events_lapic_init(void) { } |
164 | #endif | 213 | #endif |
165 | 214 | ||
diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h index 3250e3d605d9..92f297069e87 100644 --- a/arch/x86/include/asm/reboot.h +++ b/arch/x86/include/asm/reboot.h | |||
@@ -23,7 +23,7 @@ void machine_real_restart(unsigned int type); | |||
23 | #define MRR_BIOS 0 | 23 | #define MRR_BIOS 0 |
24 | #define MRR_APM 1 | 24 | #define MRR_APM 1 |
25 | 25 | ||
26 | typedef void (*nmi_shootdown_cb)(int, struct die_args*); | 26 | typedef void (*nmi_shootdown_cb)(int, struct pt_regs*); |
27 | void nmi_shootdown_cpus(nmi_shootdown_cb callback); | 27 | void nmi_shootdown_cpus(nmi_shootdown_cb callback); |
28 | 28 | ||
29 | #endif /* _ASM_X86_REBOOT_H */ | 29 | #endif /* _ASM_X86_REBOOT_H */ |