aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-02-26 22:40:37 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-02-26 22:40:37 -0500
commitf8ef15d6b9d8e38729cd740a43919adf88468119 (patch)
treef950b0342b8ae7ad0a11477cdf669015fc85366e /arch/x86
parent6515925b8259549b7f2187e25d3260306e3e85e5 (diff)
parentff1fb5f6b4925a536ffb8171e5f2dbd01ccfeb97 (diff)
Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf fixes from Ingo Molnar. * 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: perf/x86: Add Intel IvyBridge event scheduling constraints ftrace: Call ftrace cleanup module notifier after all other notifiers tracing/syscalls: Allow archs to ignore tracing compat syscalls
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/ftrace.h24
-rw-r--r--arch/x86/include/asm/thread_info.h1
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c23
3 files changed, 46 insertions, 2 deletions
diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h
index 86cb51e1ca96..0525a8bdf65d 100644
--- a/arch/x86/include/asm/ftrace.h
+++ b/arch/x86/include/asm/ftrace.h
@@ -72,4 +72,28 @@ int ftrace_int3_handler(struct pt_regs *regs);
72#endif /* __ASSEMBLY__ */ 72#endif /* __ASSEMBLY__ */
73#endif /* CONFIG_FUNCTION_TRACER */ 73#endif /* CONFIG_FUNCTION_TRACER */
74 74
75
76#if !defined(__ASSEMBLY__) && !defined(COMPILE_OFFSETS)
77
78#if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_IA32_EMULATION)
79#include <asm/compat.h>
80
81/*
82 * Because ia32 syscalls do not map to x86_64 syscall numbers
83 * this screws up the trace output when tracing a ia32 task.
84 * Instead of reporting bogus syscalls, just do not trace them.
85 *
86 * If the user realy wants these, then they should use the
87 * raw syscall tracepoints with filtering.
88 */
89#define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS 1
90static inline bool arch_trace_is_compat_syscall(struct pt_regs *regs)
91{
92 if (is_compat_task())
93 return true;
94 return false;
95}
96#endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_IA32_EMULATION */
97#endif /* !__ASSEMBLY__ && !COMPILE_OFFSETS */
98
75#endif /* _ASM_X86_FTRACE_H */ 99#endif /* _ASM_X86_FTRACE_H */
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index 2d946e63ee82..2cd056e3ada3 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -20,7 +20,6 @@
20struct task_struct; 20struct task_struct;
21struct exec_domain; 21struct exec_domain;
22#include <asm/processor.h> 22#include <asm/processor.h>
23#include <asm/ftrace.h>
24#include <linux/atomic.h> 23#include <linux/atomic.h>
25 24
26struct thread_info { 25struct thread_info {
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 4914e94ad6e8..529c8931fc02 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -107,6 +107,27 @@ static struct event_constraint intel_snb_event_constraints[] __read_mostly =
107 EVENT_CONSTRAINT_END 107 EVENT_CONSTRAINT_END
108}; 108};
109 109
110static struct event_constraint intel_ivb_event_constraints[] __read_mostly =
111{
112 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
113 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
114 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
115 INTEL_UEVENT_CONSTRAINT(0x0148, 0x4), /* L1D_PEND_MISS.PENDING */
116 INTEL_UEVENT_CONSTRAINT(0x0279, 0xf), /* IDQ.EMTPY */
117 INTEL_UEVENT_CONSTRAINT(0x019c, 0xf), /* IDQ_UOPS_NOT_DELIVERED.CORE */
118 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
119 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
120 INTEL_UEVENT_CONSTRAINT(0x06a3, 0xf), /* CYCLE_ACTIVITY.STALLS_LDM_PENDING */
121 INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
122 INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
123 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
124 INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
125 INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
126 INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
127 INTEL_EVENT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
128 EVENT_CONSTRAINT_END
129};
130
110static struct extra_reg intel_westmere_extra_regs[] __read_mostly = 131static struct extra_reg intel_westmere_extra_regs[] __read_mostly =
111{ 132{
112 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0), 133 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
@@ -2095,7 +2116,7 @@ __init int intel_pmu_init(void)
2095 2116
2096 intel_pmu_lbr_init_snb(); 2117 intel_pmu_lbr_init_snb();
2097 2118
2098 x86_pmu.event_constraints = intel_snb_event_constraints; 2119 x86_pmu.event_constraints = intel_ivb_event_constraints;
2099 x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints; 2120 x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints;
2100 x86_pmu.pebs_aliases = intel_pebs_aliases_snb; 2121 x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
2101 x86_pmu.extra_regs = intel_snb_extra_regs; 2122 x86_pmu.extra_regs = intel_snb_extra_regs;