diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-02-28 13:20:25 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-02-28 13:20:25 -0500 |
commit | 6556a6743549defc32e5f90ee2cb1ecd833a44c3 (patch) | |
tree | 622306583d4a3c13235a8bfc012854c125c597f1 /arch | |
parent | e0d272429a34ff143bfa04ee8e29dd4eed2964c7 (diff) | |
parent | 1dd2980d990068e20045b90c424518cc7f3657ff (diff) |
Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (172 commits)
perf_event, amd: Fix spinlock initialization
perf_event: Fix preempt warning in perf_clock()
perf tools: Flush maps on COMM events
perf_events, x86: Split PMU definitions into separate files
perf annotate: Handle samples not at objdump output addr boundaries
perf_events, x86: Remove superflous MSR writes
perf_events: Simplify code by removing cpu argument to hw_perf_group_sched_in()
perf_events, x86: AMD event scheduling
perf_events: Add new start/stop PMU callbacks
perf_events: Report the MMAP pgoff value in bytes
perf annotate: Defer allocating sym_priv->hist array
perf symbols: Improve debugging information about symtab origins
perf top: Use a macro instead of a constant variable
perf symbols: Check the right return variable
perf/scripts: Tag syscall_name helper as not yet available
perf/scripts: Add perf-trace-python Documentation
perf/scripts: Remove unnecessary PyTuple resizes
perf/scripts: Add syscall tracing scripts
perf/scripts: Add Python scripting engine
perf/scripts: Remove check-perf-trace from listed scripts
...
Fix trivial conflict in tools/perf/util/probe-event.c
Diffstat (limited to 'arch')
23 files changed, 2143 insertions, 1395 deletions
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c index 9adac441ac9b..7026b29e277a 100644 --- a/arch/ia64/kernel/kprobes.c +++ b/arch/ia64/kernel/kprobes.c | |||
@@ -870,7 +870,7 @@ static int __kprobes pre_kprobes_handler(struct die_args *args) | |||
870 | return 1; | 870 | return 1; |
871 | 871 | ||
872 | ss_probe: | 872 | ss_probe: |
873 | #if !defined(CONFIG_PREEMPT) || defined(CONFIG_FREEZER) | 873 | #if !defined(CONFIG_PREEMPT) |
874 | if (p->ainsn.inst_flag == INST_FLAG_BOOSTABLE && !p->post_handler) { | 874 | if (p->ainsn.inst_flag == INST_FLAG_BOOSTABLE && !p->post_handler) { |
875 | /* Boost up -- we can execute copied instructions directly */ | 875 | /* Boost up -- we can execute copied instructions directly */ |
876 | ia64_psr(regs)->ri = p->ainsn.slot; | 876 | ia64_psr(regs)->ri = p->ainsn.slot; |
diff --git a/arch/powerpc/kernel/perf_callchain.c b/arch/powerpc/kernel/perf_callchain.c index a3c11cac3d71..95ad9dad298e 100644 --- a/arch/powerpc/kernel/perf_callchain.c +++ b/arch/powerpc/kernel/perf_callchain.c | |||
@@ -495,9 +495,6 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) | |||
495 | 495 | ||
496 | entry->nr = 0; | 496 | entry->nr = 0; |
497 | 497 | ||
498 | if (current->pid == 0) /* idle task? */ | ||
499 | return entry; | ||
500 | |||
501 | if (!user_mode(regs)) { | 498 | if (!user_mode(regs)) { |
502 | perf_callchain_kernel(regs, entry); | 499 | perf_callchain_kernel(regs, entry); |
503 | if (current->mm) | 500 | if (current->mm) |
diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c index 1eb85fbf53a5..b6cf8f1f4d35 100644 --- a/arch/powerpc/kernel/perf_event.c +++ b/arch/powerpc/kernel/perf_event.c | |||
@@ -718,10 +718,10 @@ static int collect_events(struct perf_event *group, int max_count, | |||
718 | return n; | 718 | return n; |
719 | } | 719 | } |
720 | 720 | ||
721 | static void event_sched_in(struct perf_event *event, int cpu) | 721 | static void event_sched_in(struct perf_event *event) |
722 | { | 722 | { |
723 | event->state = PERF_EVENT_STATE_ACTIVE; | 723 | event->state = PERF_EVENT_STATE_ACTIVE; |
724 | event->oncpu = cpu; | 724 | event->oncpu = smp_processor_id(); |
725 | event->tstamp_running += event->ctx->time - event->tstamp_stopped; | 725 | event->tstamp_running += event->ctx->time - event->tstamp_stopped; |
726 | if (is_software_event(event)) | 726 | if (is_software_event(event)) |
727 | event->pmu->enable(event); | 727 | event->pmu->enable(event); |
@@ -735,7 +735,7 @@ static void event_sched_in(struct perf_event *event, int cpu) | |||
735 | */ | 735 | */ |
736 | int hw_perf_group_sched_in(struct perf_event *group_leader, | 736 | int hw_perf_group_sched_in(struct perf_event *group_leader, |
737 | struct perf_cpu_context *cpuctx, | 737 | struct perf_cpu_context *cpuctx, |
738 | struct perf_event_context *ctx, int cpu) | 738 | struct perf_event_context *ctx) |
739 | { | 739 | { |
740 | struct cpu_hw_events *cpuhw; | 740 | struct cpu_hw_events *cpuhw; |
741 | long i, n, n0; | 741 | long i, n, n0; |
@@ -766,10 +766,10 @@ int hw_perf_group_sched_in(struct perf_event *group_leader, | |||
766 | cpuhw->event[i]->hw.config = cpuhw->events[i]; | 766 | cpuhw->event[i]->hw.config = cpuhw->events[i]; |
767 | cpuctx->active_oncpu += n; | 767 | cpuctx->active_oncpu += n; |
768 | n = 1; | 768 | n = 1; |
769 | event_sched_in(group_leader, cpu); | 769 | event_sched_in(group_leader); |
770 | list_for_each_entry(sub, &group_leader->sibling_list, group_entry) { | 770 | list_for_each_entry(sub, &group_leader->sibling_list, group_entry) { |
771 | if (sub->state != PERF_EVENT_STATE_OFF) { | 771 | if (sub->state != PERF_EVENT_STATE_OFF) { |
772 | event_sched_in(sub, cpu); | 772 | event_sched_in(sub); |
773 | ++n; | 773 | ++n; |
774 | } | 774 | } |
775 | } | 775 | } |
diff --git a/arch/sh/kernel/perf_callchain.c b/arch/sh/kernel/perf_callchain.c index 24ea837eac5b..a9dd3abde28e 100644 --- a/arch/sh/kernel/perf_callchain.c +++ b/arch/sh/kernel/perf_callchain.c | |||
@@ -68,9 +68,6 @@ perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry) | |||
68 | 68 | ||
69 | is_user = user_mode(regs); | 69 | is_user = user_mode(regs); |
70 | 70 | ||
71 | if (!current || current->pid == 0) | ||
72 | return; | ||
73 | |||
74 | if (is_user && current->state != TASK_RUNNING) | 71 | if (is_user && current->state != TASK_RUNNING) |
75 | return; | 72 | return; |
76 | 73 | ||
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index e856456ec02f..9f2b2bac8b2b 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c | |||
@@ -980,10 +980,10 @@ static int collect_events(struct perf_event *group, int max_count, | |||
980 | return n; | 980 | return n; |
981 | } | 981 | } |
982 | 982 | ||
983 | static void event_sched_in(struct perf_event *event, int cpu) | 983 | static void event_sched_in(struct perf_event *event) |
984 | { | 984 | { |
985 | event->state = PERF_EVENT_STATE_ACTIVE; | 985 | event->state = PERF_EVENT_STATE_ACTIVE; |
986 | event->oncpu = cpu; | 986 | event->oncpu = smp_processor_id(); |
987 | event->tstamp_running += event->ctx->time - event->tstamp_stopped; | 987 | event->tstamp_running += event->ctx->time - event->tstamp_stopped; |
988 | if (is_software_event(event)) | 988 | if (is_software_event(event)) |
989 | event->pmu->enable(event); | 989 | event->pmu->enable(event); |
@@ -991,7 +991,7 @@ static void event_sched_in(struct perf_event *event, int cpu) | |||
991 | 991 | ||
992 | int hw_perf_group_sched_in(struct perf_event *group_leader, | 992 | int hw_perf_group_sched_in(struct perf_event *group_leader, |
993 | struct perf_cpu_context *cpuctx, | 993 | struct perf_cpu_context *cpuctx, |
994 | struct perf_event_context *ctx, int cpu) | 994 | struct perf_event_context *ctx) |
995 | { | 995 | { |
996 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 996 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
997 | struct perf_event *sub; | 997 | struct perf_event *sub; |
@@ -1015,10 +1015,10 @@ int hw_perf_group_sched_in(struct perf_event *group_leader, | |||
1015 | 1015 | ||
1016 | cpuctx->active_oncpu += n; | 1016 | cpuctx->active_oncpu += n; |
1017 | n = 1; | 1017 | n = 1; |
1018 | event_sched_in(group_leader, cpu); | 1018 | event_sched_in(group_leader); |
1019 | list_for_each_entry(sub, &group_leader->sibling_list, group_entry) { | 1019 | list_for_each_entry(sub, &group_leader->sibling_list, group_entry) { |
1020 | if (sub->state != PERF_EVENT_STATE_OFF) { | 1020 | if (sub->state != PERF_EVENT_STATE_OFF) { |
1021 | event_sched_in(sub, cpu); | 1021 | event_sched_in(sub); |
1022 | n++; | 1022 | n++; |
1023 | } | 1023 | } |
1024 | } | 1024 | } |
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h index 69b74a7b877f..ac80b7d70014 100644 --- a/arch/x86/include/asm/alternative.h +++ b/arch/x86/include/asm/alternative.h | |||
@@ -65,12 +65,17 @@ extern void alternatives_smp_module_add(struct module *mod, char *name, | |||
65 | void *text, void *text_end); | 65 | void *text, void *text_end); |
66 | extern void alternatives_smp_module_del(struct module *mod); | 66 | extern void alternatives_smp_module_del(struct module *mod); |
67 | extern void alternatives_smp_switch(int smp); | 67 | extern void alternatives_smp_switch(int smp); |
68 | extern int alternatives_text_reserved(void *start, void *end); | ||
68 | #else | 69 | #else |
69 | static inline void alternatives_smp_module_add(struct module *mod, char *name, | 70 | static inline void alternatives_smp_module_add(struct module *mod, char *name, |
70 | void *locks, void *locks_end, | 71 | void *locks, void *locks_end, |
71 | void *text, void *text_end) {} | 72 | void *text, void *text_end) {} |
72 | static inline void alternatives_smp_module_del(struct module *mod) {} | 73 | static inline void alternatives_smp_module_del(struct module *mod) {} |
73 | static inline void alternatives_smp_switch(int smp) {} | 74 | static inline void alternatives_smp_switch(int smp) {} |
75 | static inline int alternatives_text_reserved(void *start, void *end) | ||
76 | { | ||
77 | return 0; | ||
78 | } | ||
74 | #endif /* CONFIG_SMP */ | 79 | #endif /* CONFIG_SMP */ |
75 | 80 | ||
76 | /* alternative assembly primitive: */ | 81 | /* alternative assembly primitive: */ |
diff --git a/arch/x86/include/asm/debugreg.h b/arch/x86/include/asm/debugreg.h index 8240f76b531e..b81002f23614 100644 --- a/arch/x86/include/asm/debugreg.h +++ b/arch/x86/include/asm/debugreg.h | |||
@@ -14,6 +14,9 @@ | |||
14 | which debugging register was responsible for the trap. The other bits | 14 | which debugging register was responsible for the trap. The other bits |
15 | are either reserved or not of interest to us. */ | 15 | are either reserved or not of interest to us. */ |
16 | 16 | ||
17 | /* Define reserved bits in DR6 which are always set to 1 */ | ||
18 | #define DR6_RESERVED (0xFFFF0FF0) | ||
19 | |||
17 | #define DR_TRAP0 (0x1) /* db0 */ | 20 | #define DR_TRAP0 (0x1) /* db0 */ |
18 | #define DR_TRAP1 (0x2) /* db1 */ | 21 | #define DR_TRAP1 (0x2) /* db1 */ |
19 | #define DR_TRAP2 (0x4) /* db2 */ | 22 | #define DR_TRAP2 (0x4) /* db2 */ |
diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h index 139d4c1a33a7..93da9c3f3341 100644 --- a/arch/x86/include/asm/nmi.h +++ b/arch/x86/include/asm/nmi.h | |||
@@ -19,7 +19,6 @@ extern void die_nmi(char *str, struct pt_regs *regs, int do_panic); | |||
19 | extern int check_nmi_watchdog(void); | 19 | extern int check_nmi_watchdog(void); |
20 | extern int nmi_watchdog_enabled; | 20 | extern int nmi_watchdog_enabled; |
21 | extern int avail_to_resrv_perfctr_nmi_bit(unsigned int); | 21 | extern int avail_to_resrv_perfctr_nmi_bit(unsigned int); |
22 | extern int avail_to_resrv_perfctr_nmi(unsigned int); | ||
23 | extern int reserve_perfctr_nmi(unsigned int); | 22 | extern int reserve_perfctr_nmi(unsigned int); |
24 | extern void release_perfctr_nmi(unsigned int); | 23 | extern void release_perfctr_nmi(unsigned int); |
25 | extern int reserve_evntsel_nmi(unsigned int); | 24 | extern int reserve_evntsel_nmi(unsigned int); |
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h index 1380367dabd9..befd172c82ad 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h | |||
@@ -27,7 +27,14 @@ | |||
27 | /* | 27 | /* |
28 | * Includes eventsel and unit mask as well: | 28 | * Includes eventsel and unit mask as well: |
29 | */ | 29 | */ |
30 | #define ARCH_PERFMON_EVENT_MASK 0xffff | 30 | |
31 | |||
32 | #define INTEL_ARCH_EVTSEL_MASK 0x000000FFULL | ||
33 | #define INTEL_ARCH_UNIT_MASK 0x0000FF00ULL | ||
34 | #define INTEL_ARCH_EDGE_MASK 0x00040000ULL | ||
35 | #define INTEL_ARCH_INV_MASK 0x00800000ULL | ||
36 | #define INTEL_ARCH_CNT_MASK 0xFF000000ULL | ||
37 | #define INTEL_ARCH_EVENT_MASK (INTEL_ARCH_UNIT_MASK|INTEL_ARCH_EVTSEL_MASK) | ||
31 | 38 | ||
32 | /* | 39 | /* |
33 | * filter mask to validate fixed counter events. | 40 | * filter mask to validate fixed counter events. |
@@ -38,7 +45,12 @@ | |||
38 | * The other filters are supported by fixed counters. | 45 | * The other filters are supported by fixed counters. |
39 | * The any-thread option is supported starting with v3. | 46 | * The any-thread option is supported starting with v3. |
40 | */ | 47 | */ |
41 | #define ARCH_PERFMON_EVENT_FILTER_MASK 0xff840000 | 48 | #define INTEL_ARCH_FIXED_MASK \ |
49 | (INTEL_ARCH_CNT_MASK| \ | ||
50 | INTEL_ARCH_INV_MASK| \ | ||
51 | INTEL_ARCH_EDGE_MASK|\ | ||
52 | INTEL_ARCH_UNIT_MASK|\ | ||
53 | INTEL_ARCH_EVTSEL_MASK) | ||
42 | 54 | ||
43 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c | 55 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c |
44 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8) | 56 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8) |
diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h index 9d369f680321..20102808b191 100644 --- a/arch/x86/include/asm/ptrace.h +++ b/arch/x86/include/asm/ptrace.h | |||
@@ -274,10 +274,6 @@ static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, | |||
274 | return 0; | 274 | return 0; |
275 | } | 275 | } |
276 | 276 | ||
277 | /* Get Nth argument at function call */ | ||
278 | extern unsigned long regs_get_argument_nth(struct pt_regs *regs, | ||
279 | unsigned int n); | ||
280 | |||
281 | /* | 277 | /* |
282 | * These are defined as per linux/ptrace.h, which see. | 278 | * These are defined as per linux/ptrace.h, which see. |
283 | */ | 279 | */ |
diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h index 35e89122a42f..4dab78edbad9 100644 --- a/arch/x86/include/asm/stacktrace.h +++ b/arch/x86/include/asm/stacktrace.h | |||
@@ -3,8 +3,6 @@ | |||
3 | 3 | ||
4 | extern int kstack_depth_to_print; | 4 | extern int kstack_depth_to_print; |
5 | 5 | ||
6 | int x86_is_stack_id(int id, char *name); | ||
7 | |||
8 | struct thread_info; | 6 | struct thread_info; |
9 | struct stacktrace_ops; | 7 | struct stacktrace_ops; |
10 | 8 | ||
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index de7353c0ce9c..e63b80e5861c 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c | |||
@@ -390,6 +390,24 @@ void alternatives_smp_switch(int smp) | |||
390 | mutex_unlock(&smp_alt); | 390 | mutex_unlock(&smp_alt); |
391 | } | 391 | } |
392 | 392 | ||
393 | /* Return 1 if the address range is reserved for smp-alternatives */ | ||
394 | int alternatives_text_reserved(void *start, void *end) | ||
395 | { | ||
396 | struct smp_alt_module *mod; | ||
397 | u8 **ptr; | ||
398 | u8 *text_start = start; | ||
399 | u8 *text_end = end; | ||
400 | |||
401 | list_for_each_entry(mod, &smp_alt_modules, next) { | ||
402 | if (mod->text > text_end || mod->text_end < text_start) | ||
403 | continue; | ||
404 | for (ptr = mod->locks; ptr < mod->locks_end; ptr++) | ||
405 | if (text_start <= *ptr && text_end >= *ptr) | ||
406 | return 1; | ||
407 | } | ||
408 | |||
409 | return 0; | ||
410 | } | ||
393 | #endif | 411 | #endif |
394 | 412 | ||
395 | #ifdef CONFIG_PARAVIRT | 413 | #ifdef CONFIG_PARAVIRT |
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 8c1c07073ccc..641ccb9dddbc 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
@@ -7,6 +7,7 @@ | |||
7 | * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter | 7 | * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter |
8 | * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> | 8 | * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> |
9 | * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com> | 9 | * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com> |
10 | * Copyright (C) 2009 Google, Inc., Stephane Eranian | ||
10 | * | 11 | * |
11 | * For licencing details see kernel-base/COPYING | 12 | * For licencing details see kernel-base/COPYING |
12 | */ | 13 | */ |
@@ -22,6 +23,7 @@ | |||
22 | #include <linux/uaccess.h> | 23 | #include <linux/uaccess.h> |
23 | #include <linux/highmem.h> | 24 | #include <linux/highmem.h> |
24 | #include <linux/cpu.h> | 25 | #include <linux/cpu.h> |
26 | #include <linux/bitops.h> | ||
25 | 27 | ||
26 | #include <asm/apic.h> | 28 | #include <asm/apic.h> |
27 | #include <asm/stacktrace.h> | 29 | #include <asm/stacktrace.h> |
@@ -68,26 +70,59 @@ struct debug_store { | |||
68 | u64 pebs_event_reset[MAX_PEBS_EVENTS]; | 70 | u64 pebs_event_reset[MAX_PEBS_EVENTS]; |
69 | }; | 71 | }; |
70 | 72 | ||
73 | struct event_constraint { | ||
74 | union { | ||
75 | unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; | ||
76 | u64 idxmsk64[1]; | ||
77 | }; | ||
78 | int code; | ||
79 | int cmask; | ||
80 | int weight; | ||
81 | }; | ||
82 | |||
83 | struct amd_nb { | ||
84 | int nb_id; /* NorthBridge id */ | ||
85 | int refcnt; /* reference count */ | ||
86 | struct perf_event *owners[X86_PMC_IDX_MAX]; | ||
87 | struct event_constraint event_constraints[X86_PMC_IDX_MAX]; | ||
88 | }; | ||
89 | |||
71 | struct cpu_hw_events { | 90 | struct cpu_hw_events { |
72 | struct perf_event *events[X86_PMC_IDX_MAX]; | 91 | struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */ |
73 | unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; | ||
74 | unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; | 92 | unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; |
75 | unsigned long interrupts; | 93 | unsigned long interrupts; |
76 | int enabled; | 94 | int enabled; |
77 | struct debug_store *ds; | 95 | struct debug_store *ds; |
78 | }; | ||
79 | 96 | ||
80 | struct event_constraint { | 97 | int n_events; |
81 | unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; | 98 | int n_added; |
82 | int code; | 99 | int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */ |
100 | u64 tags[X86_PMC_IDX_MAX]; | ||
101 | struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */ | ||
102 | struct amd_nb *amd_nb; | ||
83 | }; | 103 | }; |
84 | 104 | ||
85 | #define EVENT_CONSTRAINT(c, m) { .code = (c), .idxmsk[0] = (m) } | 105 | #define __EVENT_CONSTRAINT(c, n, m, w) {\ |
86 | #define EVENT_CONSTRAINT_END { .code = 0, .idxmsk[0] = 0 } | 106 | { .idxmsk64[0] = (n) }, \ |
107 | .code = (c), \ | ||
108 | .cmask = (m), \ | ||
109 | .weight = (w), \ | ||
110 | } | ||
111 | |||
112 | #define EVENT_CONSTRAINT(c, n, m) \ | ||
113 | __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n)) | ||
87 | 114 | ||
88 | #define for_each_event_constraint(e, c) \ | 115 | #define INTEL_EVENT_CONSTRAINT(c, n) \ |
89 | for ((e) = (c); (e)->idxmsk[0]; (e)++) | 116 | EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVTSEL_MASK) |
90 | 117 | ||
118 | #define FIXED_EVENT_CONSTRAINT(c, n) \ | ||
119 | EVENT_CONSTRAINT(c, n, INTEL_ARCH_FIXED_MASK) | ||
120 | |||
121 | #define EVENT_CONSTRAINT_END \ | ||
122 | EVENT_CONSTRAINT(0, 0, 0) | ||
123 | |||
124 | #define for_each_event_constraint(e, c) \ | ||
125 | for ((e) = (c); (e)->cmask; (e)++) | ||
91 | 126 | ||
92 | /* | 127 | /* |
93 | * struct x86_pmu - generic x86 pmu | 128 | * struct x86_pmu - generic x86 pmu |
@@ -114,8 +149,14 @@ struct x86_pmu { | |||
114 | u64 intel_ctrl; | 149 | u64 intel_ctrl; |
115 | void (*enable_bts)(u64 config); | 150 | void (*enable_bts)(u64 config); |
116 | void (*disable_bts)(void); | 151 | void (*disable_bts)(void); |
117 | int (*get_event_idx)(struct cpu_hw_events *cpuc, | 152 | |
118 | struct hw_perf_event *hwc); | 153 | struct event_constraint * |
154 | (*get_event_constraints)(struct cpu_hw_events *cpuc, | ||
155 | struct perf_event *event); | ||
156 | |||
157 | void (*put_event_constraints)(struct cpu_hw_events *cpuc, | ||
158 | struct perf_event *event); | ||
159 | struct event_constraint *event_constraints; | ||
119 | }; | 160 | }; |
120 | 161 | ||
121 | static struct x86_pmu x86_pmu __read_mostly; | 162 | static struct x86_pmu x86_pmu __read_mostly; |
@@ -124,111 +165,8 @@ static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { | |||
124 | .enabled = 1, | 165 | .enabled = 1, |
125 | }; | 166 | }; |
126 | 167 | ||
127 | static const struct event_constraint *event_constraints; | 168 | static int x86_perf_event_set_period(struct perf_event *event, |
128 | 169 | struct hw_perf_event *hwc, int idx); | |
129 | /* | ||
130 | * Not sure about some of these | ||
131 | */ | ||
132 | static const u64 p6_perfmon_event_map[] = | ||
133 | { | ||
134 | [PERF_COUNT_HW_CPU_CYCLES] = 0x0079, | ||
135 | [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, | ||
136 | [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0f2e, | ||
137 | [PERF_COUNT_HW_CACHE_MISSES] = 0x012e, | ||
138 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4, | ||
139 | [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5, | ||
140 | [PERF_COUNT_HW_BUS_CYCLES] = 0x0062, | ||
141 | }; | ||
142 | |||
143 | static u64 p6_pmu_event_map(int hw_event) | ||
144 | { | ||
145 | return p6_perfmon_event_map[hw_event]; | ||
146 | } | ||
147 | |||
148 | /* | ||
149 | * Event setting that is specified not to count anything. | ||
150 | * We use this to effectively disable a counter. | ||
151 | * | ||
152 | * L2_RQSTS with 0 MESI unit mask. | ||
153 | */ | ||
154 | #define P6_NOP_EVENT 0x0000002EULL | ||
155 | |||
156 | static u64 p6_pmu_raw_event(u64 hw_event) | ||
157 | { | ||
158 | #define P6_EVNTSEL_EVENT_MASK 0x000000FFULL | ||
159 | #define P6_EVNTSEL_UNIT_MASK 0x0000FF00ULL | ||
160 | #define P6_EVNTSEL_EDGE_MASK 0x00040000ULL | ||
161 | #define P6_EVNTSEL_INV_MASK 0x00800000ULL | ||
162 | #define P6_EVNTSEL_REG_MASK 0xFF000000ULL | ||
163 | |||
164 | #define P6_EVNTSEL_MASK \ | ||
165 | (P6_EVNTSEL_EVENT_MASK | \ | ||
166 | P6_EVNTSEL_UNIT_MASK | \ | ||
167 | P6_EVNTSEL_EDGE_MASK | \ | ||
168 | P6_EVNTSEL_INV_MASK | \ | ||
169 | P6_EVNTSEL_REG_MASK) | ||
170 | |||
171 | return hw_event & P6_EVNTSEL_MASK; | ||
172 | } | ||
173 | |||
174 | static const struct event_constraint intel_p6_event_constraints[] = | ||
175 | { | ||
176 | EVENT_CONSTRAINT(0xc1, 0x1), /* FLOPS */ | ||
177 | EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */ | ||
178 | EVENT_CONSTRAINT(0x11, 0x1), /* FP_ASSIST */ | ||
179 | EVENT_CONSTRAINT(0x12, 0x2), /* MUL */ | ||
180 | EVENT_CONSTRAINT(0x13, 0x2), /* DIV */ | ||
181 | EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */ | ||
182 | EVENT_CONSTRAINT_END | ||
183 | }; | ||
184 | |||
185 | /* | ||
186 | * Intel PerfMon v3. Used on Core2 and later. | ||
187 | */ | ||
188 | static const u64 intel_perfmon_event_map[] = | ||
189 | { | ||
190 | [PERF_COUNT_HW_CPU_CYCLES] = 0x003c, | ||
191 | [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, | ||
192 | [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e, | ||
193 | [PERF_COUNT_HW_CACHE_MISSES] = 0x412e, | ||
194 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4, | ||
195 | [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5, | ||
196 | [PERF_COUNT_HW_BUS_CYCLES] = 0x013c, | ||
197 | }; | ||
198 | |||
199 | static const struct event_constraint intel_core_event_constraints[] = | ||
200 | { | ||
201 | EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */ | ||
202 | EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */ | ||
203 | EVENT_CONSTRAINT(0x12, 0x2), /* MUL */ | ||
204 | EVENT_CONSTRAINT(0x13, 0x2), /* DIV */ | ||
205 | EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */ | ||
206 | EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */ | ||
207 | EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */ | ||
208 | EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */ | ||
209 | EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */ | ||
210 | EVENT_CONSTRAINT_END | ||
211 | }; | ||
212 | |||
213 | static const struct event_constraint intel_nehalem_event_constraints[] = | ||
214 | { | ||
215 | EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */ | ||
216 | EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */ | ||
217 | EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */ | ||
218 | EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */ | ||
219 | EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */ | ||
220 | EVENT_CONSTRAINT(0x4c, 0x3), /* LOAD_HIT_PRE */ | ||
221 | EVENT_CONSTRAINT(0x51, 0x3), /* L1D */ | ||
222 | EVENT_CONSTRAINT(0x52, 0x3), /* L1D_CACHE_PREFETCH_LOCK_FB_HIT */ | ||
223 | EVENT_CONSTRAINT(0x53, 0x3), /* L1D_CACHE_LOCK_FB_HIT */ | ||
224 | EVENT_CONSTRAINT(0xc5, 0x3), /* CACHE_LOCK_CYCLES */ | ||
225 | EVENT_CONSTRAINT_END | ||
226 | }; | ||
227 | |||
228 | static u64 intel_pmu_event_map(int hw_event) | ||
229 | { | ||
230 | return intel_perfmon_event_map[hw_event]; | ||
231 | } | ||
232 | 170 | ||
233 | /* | 171 | /* |
234 | * Generalized hw caching related hw_event table, filled | 172 | * Generalized hw caching related hw_event table, filled |
@@ -245,424 +183,6 @@ static u64 __read_mostly hw_cache_event_ids | |||
245 | [PERF_COUNT_HW_CACHE_OP_MAX] | 183 | [PERF_COUNT_HW_CACHE_OP_MAX] |
246 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; | 184 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; |
247 | 185 | ||
248 | static __initconst u64 nehalem_hw_cache_event_ids | ||
249 | [PERF_COUNT_HW_CACHE_MAX] | ||
250 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
251 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = | ||
252 | { | ||
253 | [ C(L1D) ] = { | ||
254 | [ C(OP_READ) ] = { | ||
255 | [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */ | ||
256 | [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */ | ||
257 | }, | ||
258 | [ C(OP_WRITE) ] = { | ||
259 | [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */ | ||
260 | [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */ | ||
261 | }, | ||
262 | [ C(OP_PREFETCH) ] = { | ||
263 | [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */ | ||
264 | [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */ | ||
265 | }, | ||
266 | }, | ||
267 | [ C(L1I ) ] = { | ||
268 | [ C(OP_READ) ] = { | ||
269 | [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */ | ||
270 | [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */ | ||
271 | }, | ||
272 | [ C(OP_WRITE) ] = { | ||
273 | [ C(RESULT_ACCESS) ] = -1, | ||
274 | [ C(RESULT_MISS) ] = -1, | ||
275 | }, | ||
276 | [ C(OP_PREFETCH) ] = { | ||
277 | [ C(RESULT_ACCESS) ] = 0x0, | ||
278 | [ C(RESULT_MISS) ] = 0x0, | ||
279 | }, | ||
280 | }, | ||
281 | [ C(LL ) ] = { | ||
282 | [ C(OP_READ) ] = { | ||
283 | [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS */ | ||
284 | [ C(RESULT_MISS) ] = 0x0224, /* L2_RQSTS.LD_MISS */ | ||
285 | }, | ||
286 | [ C(OP_WRITE) ] = { | ||
287 | [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS */ | ||
288 | [ C(RESULT_MISS) ] = 0x0824, /* L2_RQSTS.RFO_MISS */ | ||
289 | }, | ||
290 | [ C(OP_PREFETCH) ] = { | ||
291 | [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference */ | ||
292 | [ C(RESULT_MISS) ] = 0x412e, /* LLC Misses */ | ||
293 | }, | ||
294 | }, | ||
295 | [ C(DTLB) ] = { | ||
296 | [ C(OP_READ) ] = { | ||
297 | [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */ | ||
298 | [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */ | ||
299 | }, | ||
300 | [ C(OP_WRITE) ] = { | ||
301 | [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */ | ||
302 | [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */ | ||
303 | }, | ||
304 | [ C(OP_PREFETCH) ] = { | ||
305 | [ C(RESULT_ACCESS) ] = 0x0, | ||
306 | [ C(RESULT_MISS) ] = 0x0, | ||
307 | }, | ||
308 | }, | ||
309 | [ C(ITLB) ] = { | ||
310 | [ C(OP_READ) ] = { | ||
311 | [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */ | ||
312 | [ C(RESULT_MISS) ] = 0x20c8, /* ITLB_MISS_RETIRED */ | ||
313 | }, | ||
314 | [ C(OP_WRITE) ] = { | ||
315 | [ C(RESULT_ACCESS) ] = -1, | ||
316 | [ C(RESULT_MISS) ] = -1, | ||
317 | }, | ||
318 | [ C(OP_PREFETCH) ] = { | ||
319 | [ C(RESULT_ACCESS) ] = -1, | ||
320 | [ C(RESULT_MISS) ] = -1, | ||
321 | }, | ||
322 | }, | ||
323 | [ C(BPU ) ] = { | ||
324 | [ C(OP_READ) ] = { | ||
325 | [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */ | ||
326 | [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */ | ||
327 | }, | ||
328 | [ C(OP_WRITE) ] = { | ||
329 | [ C(RESULT_ACCESS) ] = -1, | ||
330 | [ C(RESULT_MISS) ] = -1, | ||
331 | }, | ||
332 | [ C(OP_PREFETCH) ] = { | ||
333 | [ C(RESULT_ACCESS) ] = -1, | ||
334 | [ C(RESULT_MISS) ] = -1, | ||
335 | }, | ||
336 | }, | ||
337 | }; | ||
338 | |||
339 | static __initconst u64 core2_hw_cache_event_ids | ||
340 | [PERF_COUNT_HW_CACHE_MAX] | ||
341 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
342 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = | ||
343 | { | ||
344 | [ C(L1D) ] = { | ||
345 | [ C(OP_READ) ] = { | ||
346 | [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */ | ||
347 | [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */ | ||
348 | }, | ||
349 | [ C(OP_WRITE) ] = { | ||
350 | [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */ | ||
351 | [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */ | ||
352 | }, | ||
353 | [ C(OP_PREFETCH) ] = { | ||
354 | [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */ | ||
355 | [ C(RESULT_MISS) ] = 0, | ||
356 | }, | ||
357 | }, | ||
358 | [ C(L1I ) ] = { | ||
359 | [ C(OP_READ) ] = { | ||
360 | [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS */ | ||
361 | [ C(RESULT_MISS) ] = 0x0081, /* L1I.MISSES */ | ||
362 | }, | ||
363 | [ C(OP_WRITE) ] = { | ||
364 | [ C(RESULT_ACCESS) ] = -1, | ||
365 | [ C(RESULT_MISS) ] = -1, | ||
366 | }, | ||
367 | [ C(OP_PREFETCH) ] = { | ||
368 | [ C(RESULT_ACCESS) ] = 0, | ||
369 | [ C(RESULT_MISS) ] = 0, | ||
370 | }, | ||
371 | }, | ||
372 | [ C(LL ) ] = { | ||
373 | [ C(OP_READ) ] = { | ||
374 | [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */ | ||
375 | [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */ | ||
376 | }, | ||
377 | [ C(OP_WRITE) ] = { | ||
378 | [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */ | ||
379 | [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */ | ||
380 | }, | ||
381 | [ C(OP_PREFETCH) ] = { | ||
382 | [ C(RESULT_ACCESS) ] = 0, | ||
383 | [ C(RESULT_MISS) ] = 0, | ||
384 | }, | ||
385 | }, | ||
386 | [ C(DTLB) ] = { | ||
387 | [ C(OP_READ) ] = { | ||
388 | [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */ | ||
389 | [ C(RESULT_MISS) ] = 0x0208, /* DTLB_MISSES.MISS_LD */ | ||
390 | }, | ||
391 | [ C(OP_WRITE) ] = { | ||
392 | [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */ | ||
393 | [ C(RESULT_MISS) ] = 0x0808, /* DTLB_MISSES.MISS_ST */ | ||
394 | }, | ||
395 | [ C(OP_PREFETCH) ] = { | ||
396 | [ C(RESULT_ACCESS) ] = 0, | ||
397 | [ C(RESULT_MISS) ] = 0, | ||
398 | }, | ||
399 | }, | ||
400 | [ C(ITLB) ] = { | ||
401 | [ C(OP_READ) ] = { | ||
402 | [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */ | ||
403 | [ C(RESULT_MISS) ] = 0x1282, /* ITLBMISSES */ | ||
404 | }, | ||
405 | [ C(OP_WRITE) ] = { | ||
406 | [ C(RESULT_ACCESS) ] = -1, | ||
407 | [ C(RESULT_MISS) ] = -1, | ||
408 | }, | ||
409 | [ C(OP_PREFETCH) ] = { | ||
410 | [ C(RESULT_ACCESS) ] = -1, | ||
411 | [ C(RESULT_MISS) ] = -1, | ||
412 | }, | ||
413 | }, | ||
414 | [ C(BPU ) ] = { | ||
415 | [ C(OP_READ) ] = { | ||
416 | [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */ | ||
417 | [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */ | ||
418 | }, | ||
419 | [ C(OP_WRITE) ] = { | ||
420 | [ C(RESULT_ACCESS) ] = -1, | ||
421 | [ C(RESULT_MISS) ] = -1, | ||
422 | }, | ||
423 | [ C(OP_PREFETCH) ] = { | ||
424 | [ C(RESULT_ACCESS) ] = -1, | ||
425 | [ C(RESULT_MISS) ] = -1, | ||
426 | }, | ||
427 | }, | ||
428 | }; | ||
429 | |||
430 | static __initconst u64 atom_hw_cache_event_ids | ||
431 | [PERF_COUNT_HW_CACHE_MAX] | ||
432 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
433 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = | ||
434 | { | ||
435 | [ C(L1D) ] = { | ||
436 | [ C(OP_READ) ] = { | ||
437 | [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD */ | ||
438 | [ C(RESULT_MISS) ] = 0, | ||
439 | }, | ||
440 | [ C(OP_WRITE) ] = { | ||
441 | [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST */ | ||
442 | [ C(RESULT_MISS) ] = 0, | ||
443 | }, | ||
444 | [ C(OP_PREFETCH) ] = { | ||
445 | [ C(RESULT_ACCESS) ] = 0x0, | ||
446 | [ C(RESULT_MISS) ] = 0, | ||
447 | }, | ||
448 | }, | ||
449 | [ C(L1I ) ] = { | ||
450 | [ C(OP_READ) ] = { | ||
451 | [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */ | ||
452 | [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */ | ||
453 | }, | ||
454 | [ C(OP_WRITE) ] = { | ||
455 | [ C(RESULT_ACCESS) ] = -1, | ||
456 | [ C(RESULT_MISS) ] = -1, | ||
457 | }, | ||
458 | [ C(OP_PREFETCH) ] = { | ||
459 | [ C(RESULT_ACCESS) ] = 0, | ||
460 | [ C(RESULT_MISS) ] = 0, | ||
461 | }, | ||
462 | }, | ||
463 | [ C(LL ) ] = { | ||
464 | [ C(OP_READ) ] = { | ||
465 | [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */ | ||
466 | [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */ | ||
467 | }, | ||
468 | [ C(OP_WRITE) ] = { | ||
469 | [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */ | ||
470 | [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */ | ||
471 | }, | ||
472 | [ C(OP_PREFETCH) ] = { | ||
473 | [ C(RESULT_ACCESS) ] = 0, | ||
474 | [ C(RESULT_MISS) ] = 0, | ||
475 | }, | ||
476 | }, | ||
477 | [ C(DTLB) ] = { | ||
478 | [ C(OP_READ) ] = { | ||
479 | [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */ | ||
480 | [ C(RESULT_MISS) ] = 0x0508, /* DTLB_MISSES.MISS_LD */ | ||
481 | }, | ||
482 | [ C(OP_WRITE) ] = { | ||
483 | [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */ | ||
484 | [ C(RESULT_MISS) ] = 0x0608, /* DTLB_MISSES.MISS_ST */ | ||
485 | }, | ||
486 | [ C(OP_PREFETCH) ] = { | ||
487 | [ C(RESULT_ACCESS) ] = 0, | ||
488 | [ C(RESULT_MISS) ] = 0, | ||
489 | }, | ||
490 | }, | ||
491 | [ C(ITLB) ] = { | ||
492 | [ C(OP_READ) ] = { | ||
493 | [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */ | ||
494 | [ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */ | ||
495 | }, | ||
496 | [ C(OP_WRITE) ] = { | ||
497 | [ C(RESULT_ACCESS) ] = -1, | ||
498 | [ C(RESULT_MISS) ] = -1, | ||
499 | }, | ||
500 | [ C(OP_PREFETCH) ] = { | ||
501 | [ C(RESULT_ACCESS) ] = -1, | ||
502 | [ C(RESULT_MISS) ] = -1, | ||
503 | }, | ||
504 | }, | ||
505 | [ C(BPU ) ] = { | ||
506 | [ C(OP_READ) ] = { | ||
507 | [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */ | ||
508 | [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */ | ||
509 | }, | ||
510 | [ C(OP_WRITE) ] = { | ||
511 | [ C(RESULT_ACCESS) ] = -1, | ||
512 | [ C(RESULT_MISS) ] = -1, | ||
513 | }, | ||
514 | [ C(OP_PREFETCH) ] = { | ||
515 | [ C(RESULT_ACCESS) ] = -1, | ||
516 | [ C(RESULT_MISS) ] = -1, | ||
517 | }, | ||
518 | }, | ||
519 | }; | ||
520 | |||
521 | static u64 intel_pmu_raw_event(u64 hw_event) | ||
522 | { | ||
523 | #define CORE_EVNTSEL_EVENT_MASK 0x000000FFULL | ||
524 | #define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL | ||
525 | #define CORE_EVNTSEL_EDGE_MASK 0x00040000ULL | ||
526 | #define CORE_EVNTSEL_INV_MASK 0x00800000ULL | ||
527 | #define CORE_EVNTSEL_REG_MASK 0xFF000000ULL | ||
528 | |||
529 | #define CORE_EVNTSEL_MASK \ | ||
530 | (CORE_EVNTSEL_EVENT_MASK | \ | ||
531 | CORE_EVNTSEL_UNIT_MASK | \ | ||
532 | CORE_EVNTSEL_EDGE_MASK | \ | ||
533 | CORE_EVNTSEL_INV_MASK | \ | ||
534 | CORE_EVNTSEL_REG_MASK) | ||
535 | |||
536 | return hw_event & CORE_EVNTSEL_MASK; | ||
537 | } | ||
538 | |||
539 | static __initconst u64 amd_hw_cache_event_ids | ||
540 | [PERF_COUNT_HW_CACHE_MAX] | ||
541 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
542 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = | ||
543 | { | ||
544 | [ C(L1D) ] = { | ||
545 | [ C(OP_READ) ] = { | ||
546 | [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */ | ||
547 | [ C(RESULT_MISS) ] = 0x0041, /* Data Cache Misses */ | ||
548 | }, | ||
549 | [ C(OP_WRITE) ] = { | ||
550 | [ C(RESULT_ACCESS) ] = 0x0142, /* Data Cache Refills :system */ | ||
551 | [ C(RESULT_MISS) ] = 0, | ||
552 | }, | ||
553 | [ C(OP_PREFETCH) ] = { | ||
554 | [ C(RESULT_ACCESS) ] = 0x0267, /* Data Prefetcher :attempts */ | ||
555 | [ C(RESULT_MISS) ] = 0x0167, /* Data Prefetcher :cancelled */ | ||
556 | }, | ||
557 | }, | ||
558 | [ C(L1I ) ] = { | ||
559 | [ C(OP_READ) ] = { | ||
560 | [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction cache fetches */ | ||
561 | [ C(RESULT_MISS) ] = 0x0081, /* Instruction cache misses */ | ||
562 | }, | ||
563 | [ C(OP_WRITE) ] = { | ||
564 | [ C(RESULT_ACCESS) ] = -1, | ||
565 | [ C(RESULT_MISS) ] = -1, | ||
566 | }, | ||
567 | [ C(OP_PREFETCH) ] = { | ||
568 | [ C(RESULT_ACCESS) ] = 0x014B, /* Prefetch Instructions :Load */ | ||
569 | [ C(RESULT_MISS) ] = 0, | ||
570 | }, | ||
571 | }, | ||
572 | [ C(LL ) ] = { | ||
573 | [ C(OP_READ) ] = { | ||
574 | [ C(RESULT_ACCESS) ] = 0x037D, /* Requests to L2 Cache :IC+DC */ | ||
575 | [ C(RESULT_MISS) ] = 0x037E, /* L2 Cache Misses : IC+DC */ | ||
576 | }, | ||
577 | [ C(OP_WRITE) ] = { | ||
578 | [ C(RESULT_ACCESS) ] = 0x017F, /* L2 Fill/Writeback */ | ||
579 | [ C(RESULT_MISS) ] = 0, | ||
580 | }, | ||
581 | [ C(OP_PREFETCH) ] = { | ||
582 | [ C(RESULT_ACCESS) ] = 0, | ||
583 | [ C(RESULT_MISS) ] = 0, | ||
584 | }, | ||
585 | }, | ||
586 | [ C(DTLB) ] = { | ||
587 | [ C(OP_READ) ] = { | ||
588 | [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */ | ||
589 | [ C(RESULT_MISS) ] = 0x0046, /* L1 DTLB and L2 DLTB Miss */ | ||
590 | }, | ||
591 | [ C(OP_WRITE) ] = { | ||
592 | [ C(RESULT_ACCESS) ] = 0, | ||
593 | [ C(RESULT_MISS) ] = 0, | ||
594 | }, | ||
595 | [ C(OP_PREFETCH) ] = { | ||
596 | [ C(RESULT_ACCESS) ] = 0, | ||
597 | [ C(RESULT_MISS) ] = 0, | ||
598 | }, | ||
599 | }, | ||
600 | [ C(ITLB) ] = { | ||
601 | [ C(OP_READ) ] = { | ||
602 | [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes */ | ||
603 | [ C(RESULT_MISS) ] = 0x0085, /* Instr. fetch ITLB misses */ | ||
604 | }, | ||
605 | [ C(OP_WRITE) ] = { | ||
606 | [ C(RESULT_ACCESS) ] = -1, | ||
607 | [ C(RESULT_MISS) ] = -1, | ||
608 | }, | ||
609 | [ C(OP_PREFETCH) ] = { | ||
610 | [ C(RESULT_ACCESS) ] = -1, | ||
611 | [ C(RESULT_MISS) ] = -1, | ||
612 | }, | ||
613 | }, | ||
614 | [ C(BPU ) ] = { | ||
615 | [ C(OP_READ) ] = { | ||
616 | [ C(RESULT_ACCESS) ] = 0x00c2, /* Retired Branch Instr. */ | ||
617 | [ C(RESULT_MISS) ] = 0x00c3, /* Retired Mispredicted BI */ | ||
618 | }, | ||
619 | [ C(OP_WRITE) ] = { | ||
620 | [ C(RESULT_ACCESS) ] = -1, | ||
621 | [ C(RESULT_MISS) ] = -1, | ||
622 | }, | ||
623 | [ C(OP_PREFETCH) ] = { | ||
624 | [ C(RESULT_ACCESS) ] = -1, | ||
625 | [ C(RESULT_MISS) ] = -1, | ||
626 | }, | ||
627 | }, | ||
628 | }; | ||
629 | |||
630 | /* | ||
631 | * AMD Performance Monitor K7 and later. | ||
632 | */ | ||
633 | static const u64 amd_perfmon_event_map[] = | ||
634 | { | ||
635 | [PERF_COUNT_HW_CPU_CYCLES] = 0x0076, | ||
636 | [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, | ||
637 | [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0080, | ||
638 | [PERF_COUNT_HW_CACHE_MISSES] = 0x0081, | ||
639 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4, | ||
640 | [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5, | ||
641 | }; | ||
642 | |||
643 | static u64 amd_pmu_event_map(int hw_event) | ||
644 | { | ||
645 | return amd_perfmon_event_map[hw_event]; | ||
646 | } | ||
647 | |||
648 | static u64 amd_pmu_raw_event(u64 hw_event) | ||
649 | { | ||
650 | #define K7_EVNTSEL_EVENT_MASK 0x7000000FFULL | ||
651 | #define K7_EVNTSEL_UNIT_MASK 0x00000FF00ULL | ||
652 | #define K7_EVNTSEL_EDGE_MASK 0x000040000ULL | ||
653 | #define K7_EVNTSEL_INV_MASK 0x000800000ULL | ||
654 | #define K7_EVNTSEL_REG_MASK 0x0FF000000ULL | ||
655 | |||
656 | #define K7_EVNTSEL_MASK \ | ||
657 | (K7_EVNTSEL_EVENT_MASK | \ | ||
658 | K7_EVNTSEL_UNIT_MASK | \ | ||
659 | K7_EVNTSEL_EDGE_MASK | \ | ||
660 | K7_EVNTSEL_INV_MASK | \ | ||
661 | K7_EVNTSEL_REG_MASK) | ||
662 | |||
663 | return hw_event & K7_EVNTSEL_MASK; | ||
664 | } | ||
665 | |||
666 | /* | 186 | /* |
667 | * Propagate event elapsed time into the generic event. | 187 | * Propagate event elapsed time into the generic event. |
668 | * Can only be executed on the CPU where the event is active. | 188 | * Can only be executed on the CPU where the event is active. |
@@ -914,42 +434,6 @@ set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event_attr *attr) | |||
914 | return 0; | 434 | return 0; |
915 | } | 435 | } |
916 | 436 | ||
917 | static void intel_pmu_enable_bts(u64 config) | ||
918 | { | ||
919 | unsigned long debugctlmsr; | ||
920 | |||
921 | debugctlmsr = get_debugctlmsr(); | ||
922 | |||
923 | debugctlmsr |= X86_DEBUGCTL_TR; | ||
924 | debugctlmsr |= X86_DEBUGCTL_BTS; | ||
925 | debugctlmsr |= X86_DEBUGCTL_BTINT; | ||
926 | |||
927 | if (!(config & ARCH_PERFMON_EVENTSEL_OS)) | ||
928 | debugctlmsr |= X86_DEBUGCTL_BTS_OFF_OS; | ||
929 | |||
930 | if (!(config & ARCH_PERFMON_EVENTSEL_USR)) | ||
931 | debugctlmsr |= X86_DEBUGCTL_BTS_OFF_USR; | ||
932 | |||
933 | update_debugctlmsr(debugctlmsr); | ||
934 | } | ||
935 | |||
936 | static void intel_pmu_disable_bts(void) | ||
937 | { | ||
938 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
939 | unsigned long debugctlmsr; | ||
940 | |||
941 | if (!cpuc->ds) | ||
942 | return; | ||
943 | |||
944 | debugctlmsr = get_debugctlmsr(); | ||
945 | |||
946 | debugctlmsr &= | ||
947 | ~(X86_DEBUGCTL_TR | X86_DEBUGCTL_BTS | X86_DEBUGCTL_BTINT | | ||
948 | X86_DEBUGCTL_BTS_OFF_OS | X86_DEBUGCTL_BTS_OFF_USR); | ||
949 | |||
950 | update_debugctlmsr(debugctlmsr); | ||
951 | } | ||
952 | |||
953 | /* | 437 | /* |
954 | * Setup the hardware configuration for a given attr_type | 438 | * Setup the hardware configuration for a given attr_type |
955 | */ | 439 | */ |
@@ -988,6 +472,8 @@ static int __hw_perf_event_init(struct perf_event *event) | |||
988 | hwc->config = ARCH_PERFMON_EVENTSEL_INT; | 472 | hwc->config = ARCH_PERFMON_EVENTSEL_INT; |
989 | 473 | ||
990 | hwc->idx = -1; | 474 | hwc->idx = -1; |
475 | hwc->last_cpu = -1; | ||
476 | hwc->last_tag = ~0ULL; | ||
991 | 477 | ||
992 | /* | 478 | /* |
993 | * Count user and OS events unless requested not to. | 479 | * Count user and OS events unless requested not to. |
@@ -1056,216 +542,323 @@ static int __hw_perf_event_init(struct perf_event *event) | |||
1056 | return 0; | 542 | return 0; |
1057 | } | 543 | } |
1058 | 544 | ||
1059 | static void p6_pmu_disable_all(void) | 545 | static void x86_pmu_disable_all(void) |
1060 | { | 546 | { |
1061 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 547 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
1062 | u64 val; | 548 | int idx; |
1063 | |||
1064 | if (!cpuc->enabled) | ||
1065 | return; | ||
1066 | 549 | ||
1067 | cpuc->enabled = 0; | 550 | for (idx = 0; idx < x86_pmu.num_events; idx++) { |
1068 | barrier(); | 551 | u64 val; |
1069 | 552 | ||
1070 | /* p6 only has one enable register */ | 553 | if (!test_bit(idx, cpuc->active_mask)) |
1071 | rdmsrl(MSR_P6_EVNTSEL0, val); | 554 | continue; |
1072 | val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE; | 555 | rdmsrl(x86_pmu.eventsel + idx, val); |
1073 | wrmsrl(MSR_P6_EVNTSEL0, val); | 556 | if (!(val & ARCH_PERFMON_EVENTSEL0_ENABLE)) |
557 | continue; | ||
558 | val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE; | ||
559 | wrmsrl(x86_pmu.eventsel + idx, val); | ||
560 | } | ||
1074 | } | 561 | } |
1075 | 562 | ||
1076 | static void intel_pmu_disable_all(void) | 563 | void hw_perf_disable(void) |
1077 | { | 564 | { |
1078 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 565 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
1079 | 566 | ||
567 | if (!x86_pmu_initialized()) | ||
568 | return; | ||
569 | |||
1080 | if (!cpuc->enabled) | 570 | if (!cpuc->enabled) |
1081 | return; | 571 | return; |
1082 | 572 | ||
573 | cpuc->n_added = 0; | ||
1083 | cpuc->enabled = 0; | 574 | cpuc->enabled = 0; |
1084 | barrier(); | 575 | barrier(); |
1085 | 576 | ||
1086 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); | 577 | x86_pmu.disable_all(); |
1087 | |||
1088 | if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) | ||
1089 | intel_pmu_disable_bts(); | ||
1090 | } | 578 | } |
1091 | 579 | ||
1092 | static void amd_pmu_disable_all(void) | 580 | static void x86_pmu_enable_all(void) |
1093 | { | 581 | { |
1094 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 582 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
1095 | int idx; | 583 | int idx; |
1096 | 584 | ||
1097 | if (!cpuc->enabled) | ||
1098 | return; | ||
1099 | |||
1100 | cpuc->enabled = 0; | ||
1101 | /* | ||
1102 | * ensure we write the disable before we start disabling the | ||
1103 | * events proper, so that amd_pmu_enable_event() does the | ||
1104 | * right thing. | ||
1105 | */ | ||
1106 | barrier(); | ||
1107 | |||
1108 | for (idx = 0; idx < x86_pmu.num_events; idx++) { | 585 | for (idx = 0; idx < x86_pmu.num_events; idx++) { |
586 | struct perf_event *event = cpuc->events[idx]; | ||
1109 | u64 val; | 587 | u64 val; |
1110 | 588 | ||
1111 | if (!test_bit(idx, cpuc->active_mask)) | 589 | if (!test_bit(idx, cpuc->active_mask)) |
1112 | continue; | 590 | continue; |
1113 | rdmsrl(MSR_K7_EVNTSEL0 + idx, val); | 591 | |
1114 | if (!(val & ARCH_PERFMON_EVENTSEL0_ENABLE)) | 592 | val = event->hw.config; |
1115 | continue; | 593 | val |= ARCH_PERFMON_EVENTSEL0_ENABLE; |
1116 | val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE; | 594 | wrmsrl(x86_pmu.eventsel + idx, val); |
1117 | wrmsrl(MSR_K7_EVNTSEL0 + idx, val); | ||
1118 | } | 595 | } |
1119 | } | 596 | } |
1120 | 597 | ||
1121 | void hw_perf_disable(void) | 598 | static const struct pmu pmu; |
599 | |||
600 | static inline int is_x86_event(struct perf_event *event) | ||
1122 | { | 601 | { |
1123 | if (!x86_pmu_initialized()) | 602 | return event->pmu == &pmu; |
1124 | return; | ||
1125 | return x86_pmu.disable_all(); | ||
1126 | } | 603 | } |
1127 | 604 | ||
1128 | static void p6_pmu_enable_all(void) | 605 | static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) |
1129 | { | 606 | { |
1130 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 607 | struct event_constraint *c, *constraints[X86_PMC_IDX_MAX]; |
1131 | unsigned long val; | 608 | unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; |
609 | int i, j, w, wmax, num = 0; | ||
610 | struct hw_perf_event *hwc; | ||
1132 | 611 | ||
1133 | if (cpuc->enabled) | 612 | bitmap_zero(used_mask, X86_PMC_IDX_MAX); |
1134 | return; | ||
1135 | 613 | ||
1136 | cpuc->enabled = 1; | 614 | for (i = 0; i < n; i++) { |
1137 | barrier(); | 615 | constraints[i] = |
616 | x86_pmu.get_event_constraints(cpuc, cpuc->event_list[i]); | ||
617 | } | ||
1138 | 618 | ||
1139 | /* p6 only has one enable register */ | 619 | /* |
1140 | rdmsrl(MSR_P6_EVNTSEL0, val); | 620 | * fastpath, try to reuse previous register |
1141 | val |= ARCH_PERFMON_EVENTSEL0_ENABLE; | 621 | */ |
1142 | wrmsrl(MSR_P6_EVNTSEL0, val); | 622 | for (i = 0; i < n; i++) { |
1143 | } | 623 | hwc = &cpuc->event_list[i]->hw; |
624 | c = constraints[i]; | ||
1144 | 625 | ||
1145 | static void intel_pmu_enable_all(void) | 626 | /* never assigned */ |
1146 | { | 627 | if (hwc->idx == -1) |
1147 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 628 | break; |
1148 | 629 | ||
1149 | if (cpuc->enabled) | 630 | /* constraint still honored */ |
1150 | return; | 631 | if (!test_bit(hwc->idx, c->idxmsk)) |
632 | break; | ||
1151 | 633 | ||
1152 | cpuc->enabled = 1; | 634 | /* not already used */ |
1153 | barrier(); | 635 | if (test_bit(hwc->idx, used_mask)) |
636 | break; | ||
637 | |||
638 | set_bit(hwc->idx, used_mask); | ||
639 | if (assign) | ||
640 | assign[i] = hwc->idx; | ||
641 | } | ||
642 | if (i == n) | ||
643 | goto done; | ||
644 | |||
645 | /* | ||
646 | * begin slow path | ||
647 | */ | ||
648 | |||
649 | bitmap_zero(used_mask, X86_PMC_IDX_MAX); | ||
650 | |||
651 | /* | ||
652 | * weight = number of possible counters | ||
653 | * | ||
654 | * 1 = most constrained, only works on one counter | ||
655 | * wmax = least constrained, works on any counter | ||
656 | * | ||
657 | * assign events to counters starting with most | ||
658 | * constrained events. | ||
659 | */ | ||
660 | wmax = x86_pmu.num_events; | ||
661 | |||
662 | /* | ||
663 | * when fixed event counters are present, | ||
664 | * wmax is incremented by 1 to account | ||
665 | * for one more choice | ||
666 | */ | ||
667 | if (x86_pmu.num_events_fixed) | ||
668 | wmax++; | ||
1154 | 669 | ||
1155 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl); | 670 | for (w = 1, num = n; num && w <= wmax; w++) { |
671 | /* for each event */ | ||
672 | for (i = 0; num && i < n; i++) { | ||
673 | c = constraints[i]; | ||
674 | hwc = &cpuc->event_list[i]->hw; | ||
1156 | 675 | ||
1157 | if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) { | 676 | if (c->weight != w) |
1158 | struct perf_event *event = | 677 | continue; |
1159 | cpuc->events[X86_PMC_IDX_FIXED_BTS]; | ||
1160 | 678 | ||
1161 | if (WARN_ON_ONCE(!event)) | 679 | for_each_bit(j, c->idxmsk, X86_PMC_IDX_MAX) { |
1162 | return; | 680 | if (!test_bit(j, used_mask)) |
681 | break; | ||
682 | } | ||
683 | |||
684 | if (j == X86_PMC_IDX_MAX) | ||
685 | break; | ||
686 | |||
687 | set_bit(j, used_mask); | ||
1163 | 688 | ||
1164 | intel_pmu_enable_bts(event->hw.config); | 689 | if (assign) |
690 | assign[i] = j; | ||
691 | num--; | ||
692 | } | ||
693 | } | ||
694 | done: | ||
695 | /* | ||
696 | * scheduling failed or is just a simulation, | ||
697 | * free resources if necessary | ||
698 | */ | ||
699 | if (!assign || num) { | ||
700 | for (i = 0; i < n; i++) { | ||
701 | if (x86_pmu.put_event_constraints) | ||
702 | x86_pmu.put_event_constraints(cpuc, cpuc->event_list[i]); | ||
703 | } | ||
1165 | } | 704 | } |
705 | return num ? -ENOSPC : 0; | ||
1166 | } | 706 | } |
1167 | 707 | ||
1168 | static void amd_pmu_enable_all(void) | 708 | /* |
709 | * dogrp: true if must collect siblings events (group) | ||
710 | * returns total number of events and error code | ||
711 | */ | ||
712 | static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, bool dogrp) | ||
1169 | { | 713 | { |
1170 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 714 | struct perf_event *event; |
1171 | int idx; | 715 | int n, max_count; |
1172 | 716 | ||
1173 | if (cpuc->enabled) | 717 | max_count = x86_pmu.num_events + x86_pmu.num_events_fixed; |
1174 | return; | ||
1175 | 718 | ||
1176 | cpuc->enabled = 1; | 719 | /* current number of events already accepted */ |
1177 | barrier(); | 720 | n = cpuc->n_events; |
1178 | 721 | ||
1179 | for (idx = 0; idx < x86_pmu.num_events; idx++) { | 722 | if (is_x86_event(leader)) { |
1180 | struct perf_event *event = cpuc->events[idx]; | 723 | if (n >= max_count) |
1181 | u64 val; | 724 | return -ENOSPC; |
725 | cpuc->event_list[n] = leader; | ||
726 | n++; | ||
727 | } | ||
728 | if (!dogrp) | ||
729 | return n; | ||
1182 | 730 | ||
1183 | if (!test_bit(idx, cpuc->active_mask)) | 731 | list_for_each_entry(event, &leader->sibling_list, group_entry) { |
732 | if (!is_x86_event(event) || | ||
733 | event->state <= PERF_EVENT_STATE_OFF) | ||
1184 | continue; | 734 | continue; |
1185 | 735 | ||
1186 | val = event->hw.config; | 736 | if (n >= max_count) |
1187 | val |= ARCH_PERFMON_EVENTSEL0_ENABLE; | 737 | return -ENOSPC; |
1188 | wrmsrl(MSR_K7_EVNTSEL0 + idx, val); | ||
1189 | } | ||
1190 | } | ||
1191 | 738 | ||
1192 | void hw_perf_enable(void) | 739 | cpuc->event_list[n] = event; |
1193 | { | 740 | n++; |
1194 | if (!x86_pmu_initialized()) | 741 | } |
1195 | return; | 742 | return n; |
1196 | x86_pmu.enable_all(); | ||
1197 | } | 743 | } |
1198 | 744 | ||
1199 | static inline u64 intel_pmu_get_status(void) | 745 | static inline void x86_assign_hw_event(struct perf_event *event, |
746 | struct cpu_hw_events *cpuc, int i) | ||
1200 | { | 747 | { |
1201 | u64 status; | 748 | struct hw_perf_event *hwc = &event->hw; |
1202 | 749 | ||
1203 | rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); | 750 | hwc->idx = cpuc->assign[i]; |
751 | hwc->last_cpu = smp_processor_id(); | ||
752 | hwc->last_tag = ++cpuc->tags[i]; | ||
1204 | 753 | ||
1205 | return status; | 754 | if (hwc->idx == X86_PMC_IDX_FIXED_BTS) { |
755 | hwc->config_base = 0; | ||
756 | hwc->event_base = 0; | ||
757 | } else if (hwc->idx >= X86_PMC_IDX_FIXED) { | ||
758 | hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL; | ||
759 | /* | ||
760 | * We set it so that event_base + idx in wrmsr/rdmsr maps to | ||
761 | * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2: | ||
762 | */ | ||
763 | hwc->event_base = | ||
764 | MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED; | ||
765 | } else { | ||
766 | hwc->config_base = x86_pmu.eventsel; | ||
767 | hwc->event_base = x86_pmu.perfctr; | ||
768 | } | ||
1206 | } | 769 | } |
1207 | 770 | ||
1208 | static inline void intel_pmu_ack_status(u64 ack) | 771 | static inline int match_prev_assignment(struct hw_perf_event *hwc, |
772 | struct cpu_hw_events *cpuc, | ||
773 | int i) | ||
1209 | { | 774 | { |
1210 | wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack); | 775 | return hwc->idx == cpuc->assign[i] && |
776 | hwc->last_cpu == smp_processor_id() && | ||
777 | hwc->last_tag == cpuc->tags[i]; | ||
1211 | } | 778 | } |
1212 | 779 | ||
1213 | static inline void x86_pmu_enable_event(struct hw_perf_event *hwc, int idx) | 780 | static void x86_pmu_stop(struct perf_event *event); |
1214 | { | ||
1215 | (void)checking_wrmsrl(hwc->config_base + idx, | ||
1216 | hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE); | ||
1217 | } | ||
1218 | 781 | ||
1219 | static inline void x86_pmu_disable_event(struct hw_perf_event *hwc, int idx) | 782 | void hw_perf_enable(void) |
1220 | { | 783 | { |
1221 | (void)checking_wrmsrl(hwc->config_base + idx, hwc->config); | 784 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
1222 | } | 785 | struct perf_event *event; |
786 | struct hw_perf_event *hwc; | ||
787 | int i; | ||
1223 | 788 | ||
1224 | static inline void | 789 | if (!x86_pmu_initialized()) |
1225 | intel_pmu_disable_fixed(struct hw_perf_event *hwc, int __idx) | 790 | return; |
1226 | { | ||
1227 | int idx = __idx - X86_PMC_IDX_FIXED; | ||
1228 | u64 ctrl_val, mask; | ||
1229 | 791 | ||
1230 | mask = 0xfULL << (idx * 4); | 792 | if (cpuc->enabled) |
793 | return; | ||
1231 | 794 | ||
1232 | rdmsrl(hwc->config_base, ctrl_val); | 795 | if (cpuc->n_added) { |
1233 | ctrl_val &= ~mask; | 796 | /* |
1234 | (void)checking_wrmsrl(hwc->config_base, ctrl_val); | 797 | * apply assignment obtained either from |
1235 | } | 798 | * hw_perf_group_sched_in() or x86_pmu_enable() |
799 | * | ||
800 | * step1: save events moving to new counters | ||
801 | * step2: reprogram moved events into new counters | ||
802 | */ | ||
803 | for (i = 0; i < cpuc->n_events; i++) { | ||
1236 | 804 | ||
1237 | static inline void | 805 | event = cpuc->event_list[i]; |
1238 | p6_pmu_disable_event(struct hw_perf_event *hwc, int idx) | 806 | hwc = &event->hw; |
1239 | { | ||
1240 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
1241 | u64 val = P6_NOP_EVENT; | ||
1242 | 807 | ||
1243 | if (cpuc->enabled) | 808 | /* |
1244 | val |= ARCH_PERFMON_EVENTSEL0_ENABLE; | 809 | * we can avoid reprogramming counter if: |
810 | * - assigned same counter as last time | ||
811 | * - running on same CPU as last time | ||
812 | * - no other event has used the counter since | ||
813 | */ | ||
814 | if (hwc->idx == -1 || | ||
815 | match_prev_assignment(hwc, cpuc, i)) | ||
816 | continue; | ||
1245 | 817 | ||
1246 | (void)checking_wrmsrl(hwc->config_base + idx, val); | 818 | x86_pmu_stop(event); |
1247 | } | ||
1248 | 819 | ||
1249 | static inline void | 820 | hwc->idx = -1; |
1250 | intel_pmu_disable_event(struct hw_perf_event *hwc, int idx) | 821 | } |
1251 | { | ||
1252 | if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) { | ||
1253 | intel_pmu_disable_bts(); | ||
1254 | return; | ||
1255 | } | ||
1256 | 822 | ||
1257 | if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { | 823 | for (i = 0; i < cpuc->n_events; i++) { |
1258 | intel_pmu_disable_fixed(hwc, idx); | 824 | |
1259 | return; | 825 | event = cpuc->event_list[i]; |
826 | hwc = &event->hw; | ||
827 | |||
828 | if (hwc->idx == -1) { | ||
829 | x86_assign_hw_event(event, cpuc, i); | ||
830 | x86_perf_event_set_period(event, hwc, hwc->idx); | ||
831 | } | ||
832 | /* | ||
833 | * need to mark as active because x86_pmu_disable() | ||
834 | * clear active_mask and events[] yet it preserves | ||
835 | * idx | ||
836 | */ | ||
837 | set_bit(hwc->idx, cpuc->active_mask); | ||
838 | cpuc->events[hwc->idx] = event; | ||
839 | |||
840 | x86_pmu.enable(hwc, hwc->idx); | ||
841 | perf_event_update_userpage(event); | ||
842 | } | ||
843 | cpuc->n_added = 0; | ||
844 | perf_events_lapic_init(); | ||
1260 | } | 845 | } |
1261 | 846 | ||
1262 | x86_pmu_disable_event(hwc, idx); | 847 | cpuc->enabled = 1; |
848 | barrier(); | ||
849 | |||
850 | x86_pmu.enable_all(); | ||
851 | } | ||
852 | |||
853 | static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, int idx) | ||
854 | { | ||
855 | (void)checking_wrmsrl(hwc->config_base + idx, | ||
856 | hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE); | ||
1263 | } | 857 | } |
1264 | 858 | ||
1265 | static inline void | 859 | static inline void x86_pmu_disable_event(struct hw_perf_event *hwc, int idx) |
1266 | amd_pmu_disable_event(struct hw_perf_event *hwc, int idx) | ||
1267 | { | 860 | { |
1268 | x86_pmu_disable_event(hwc, idx); | 861 | (void)checking_wrmsrl(hwc->config_base + idx, hwc->config); |
1269 | } | 862 | } |
1270 | 863 | ||
1271 | static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left); | 864 | static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left); |
@@ -1326,220 +919,60 @@ x86_perf_event_set_period(struct perf_event *event, | |||
1326 | return ret; | 919 | return ret; |
1327 | } | 920 | } |
1328 | 921 | ||
1329 | static inline void | 922 | static void x86_pmu_enable_event(struct hw_perf_event *hwc, int idx) |
1330 | intel_pmu_enable_fixed(struct hw_perf_event *hwc, int __idx) | ||
1331 | { | ||
1332 | int idx = __idx - X86_PMC_IDX_FIXED; | ||
1333 | u64 ctrl_val, bits, mask; | ||
1334 | int err; | ||
1335 | |||
1336 | /* | ||
1337 | * Enable IRQ generation (0x8), | ||
1338 | * and enable ring-3 counting (0x2) and ring-0 counting (0x1) | ||
1339 | * if requested: | ||
1340 | */ | ||
1341 | bits = 0x8ULL; | ||
1342 | if (hwc->config & ARCH_PERFMON_EVENTSEL_USR) | ||
1343 | bits |= 0x2; | ||
1344 | if (hwc->config & ARCH_PERFMON_EVENTSEL_OS) | ||
1345 | bits |= 0x1; | ||
1346 | |||
1347 | /* | ||
1348 | * ANY bit is supported in v3 and up | ||
1349 | */ | ||
1350 | if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY) | ||
1351 | bits |= 0x4; | ||
1352 | |||
1353 | bits <<= (idx * 4); | ||
1354 | mask = 0xfULL << (idx * 4); | ||
1355 | |||
1356 | rdmsrl(hwc->config_base, ctrl_val); | ||
1357 | ctrl_val &= ~mask; | ||
1358 | ctrl_val |= bits; | ||
1359 | err = checking_wrmsrl(hwc->config_base, ctrl_val); | ||
1360 | } | ||
1361 | |||
1362 | static void p6_pmu_enable_event(struct hw_perf_event *hwc, int idx) | ||
1363 | { | 923 | { |
1364 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 924 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
1365 | u64 val; | ||
1366 | |||
1367 | val = hwc->config; | ||
1368 | if (cpuc->enabled) | 925 | if (cpuc->enabled) |
1369 | val |= ARCH_PERFMON_EVENTSEL0_ENABLE; | 926 | __x86_pmu_enable_event(hwc, idx); |
1370 | |||
1371 | (void)checking_wrmsrl(hwc->config_base + idx, val); | ||
1372 | } | 927 | } |
1373 | 928 | ||
1374 | 929 | /* | |
1375 | static void intel_pmu_enable_event(struct hw_perf_event *hwc, int idx) | 930 | * activate a single event |
1376 | { | 931 | * |
1377 | if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) { | 932 | * The event is added to the group of enabled events |
1378 | if (!__get_cpu_var(cpu_hw_events).enabled) | 933 | * but only if it can be scehduled with existing events. |
1379 | return; | 934 | * |
1380 | 935 | * Called with PMU disabled. If successful and return value 1, | |
1381 | intel_pmu_enable_bts(hwc->config); | 936 | * then guaranteed to call perf_enable() and hw_perf_enable() |
1382 | return; | 937 | */ |
1383 | } | 938 | static int x86_pmu_enable(struct perf_event *event) |
1384 | |||
1385 | if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { | ||
1386 | intel_pmu_enable_fixed(hwc, idx); | ||
1387 | return; | ||
1388 | } | ||
1389 | |||
1390 | x86_pmu_enable_event(hwc, idx); | ||
1391 | } | ||
1392 | |||
1393 | static void amd_pmu_enable_event(struct hw_perf_event *hwc, int idx) | ||
1394 | { | 939 | { |
1395 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 940 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
941 | struct hw_perf_event *hwc; | ||
942 | int assign[X86_PMC_IDX_MAX]; | ||
943 | int n, n0, ret; | ||
1396 | 944 | ||
1397 | if (cpuc->enabled) | 945 | hwc = &event->hw; |
1398 | x86_pmu_enable_event(hwc, idx); | ||
1399 | } | ||
1400 | |||
1401 | static int fixed_mode_idx(struct hw_perf_event *hwc) | ||
1402 | { | ||
1403 | unsigned int hw_event; | ||
1404 | |||
1405 | hw_event = hwc->config & ARCH_PERFMON_EVENT_MASK; | ||
1406 | |||
1407 | if (unlikely((hw_event == | ||
1408 | x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS)) && | ||
1409 | (hwc->sample_period == 1))) | ||
1410 | return X86_PMC_IDX_FIXED_BTS; | ||
1411 | 946 | ||
1412 | if (!x86_pmu.num_events_fixed) | 947 | n0 = cpuc->n_events; |
1413 | return -1; | 948 | n = collect_events(cpuc, event, false); |
949 | if (n < 0) | ||
950 | return n; | ||
1414 | 951 | ||
952 | ret = x86_schedule_events(cpuc, n, assign); | ||
953 | if (ret) | ||
954 | return ret; | ||
1415 | /* | 955 | /* |
1416 | * fixed counters do not take all possible filters | 956 | * copy new assignment, now we know it is possible |
957 | * will be used by hw_perf_enable() | ||
1417 | */ | 958 | */ |
1418 | if (hwc->config & ARCH_PERFMON_EVENT_FILTER_MASK) | 959 | memcpy(cpuc->assign, assign, n*sizeof(int)); |
1419 | return -1; | ||
1420 | 960 | ||
1421 | if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_INSTRUCTIONS))) | 961 | cpuc->n_events = n; |
1422 | return X86_PMC_IDX_FIXED_INSTRUCTIONS; | 962 | cpuc->n_added = n - n0; |
1423 | if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_CPU_CYCLES))) | ||
1424 | return X86_PMC_IDX_FIXED_CPU_CYCLES; | ||
1425 | if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_BUS_CYCLES))) | ||
1426 | return X86_PMC_IDX_FIXED_BUS_CYCLES; | ||
1427 | 963 | ||
1428 | return -1; | 964 | return 0; |
1429 | } | ||
1430 | |||
1431 | /* | ||
1432 | * generic counter allocator: get next free counter | ||
1433 | */ | ||
1434 | static int | ||
1435 | gen_get_event_idx(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc) | ||
1436 | { | ||
1437 | int idx; | ||
1438 | |||
1439 | idx = find_first_zero_bit(cpuc->used_mask, x86_pmu.num_events); | ||
1440 | return idx == x86_pmu.num_events ? -1 : idx; | ||
1441 | } | ||
1442 | |||
1443 | /* | ||
1444 | * intel-specific counter allocator: check event constraints | ||
1445 | */ | ||
1446 | static int | ||
1447 | intel_get_event_idx(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc) | ||
1448 | { | ||
1449 | const struct event_constraint *event_constraint; | ||
1450 | int i, code; | ||
1451 | |||
1452 | if (!event_constraints) | ||
1453 | goto skip; | ||
1454 | |||
1455 | code = hwc->config & CORE_EVNTSEL_EVENT_MASK; | ||
1456 | |||
1457 | for_each_event_constraint(event_constraint, event_constraints) { | ||
1458 | if (code == event_constraint->code) { | ||
1459 | for_each_bit(i, event_constraint->idxmsk, X86_PMC_IDX_MAX) { | ||
1460 | if (!test_and_set_bit(i, cpuc->used_mask)) | ||
1461 | return i; | ||
1462 | } | ||
1463 | return -1; | ||
1464 | } | ||
1465 | } | ||
1466 | skip: | ||
1467 | return gen_get_event_idx(cpuc, hwc); | ||
1468 | } | ||
1469 | |||
1470 | static int | ||
1471 | x86_schedule_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc) | ||
1472 | { | ||
1473 | int idx; | ||
1474 | |||
1475 | idx = fixed_mode_idx(hwc); | ||
1476 | if (idx == X86_PMC_IDX_FIXED_BTS) { | ||
1477 | /* BTS is already occupied. */ | ||
1478 | if (test_and_set_bit(idx, cpuc->used_mask)) | ||
1479 | return -EAGAIN; | ||
1480 | |||
1481 | hwc->config_base = 0; | ||
1482 | hwc->event_base = 0; | ||
1483 | hwc->idx = idx; | ||
1484 | } else if (idx >= 0) { | ||
1485 | /* | ||
1486 | * Try to get the fixed event, if that is already taken | ||
1487 | * then try to get a generic event: | ||
1488 | */ | ||
1489 | if (test_and_set_bit(idx, cpuc->used_mask)) | ||
1490 | goto try_generic; | ||
1491 | |||
1492 | hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL; | ||
1493 | /* | ||
1494 | * We set it so that event_base + idx in wrmsr/rdmsr maps to | ||
1495 | * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2: | ||
1496 | */ | ||
1497 | hwc->event_base = | ||
1498 | MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED; | ||
1499 | hwc->idx = idx; | ||
1500 | } else { | ||
1501 | idx = hwc->idx; | ||
1502 | /* Try to get the previous generic event again */ | ||
1503 | if (idx == -1 || test_and_set_bit(idx, cpuc->used_mask)) { | ||
1504 | try_generic: | ||
1505 | idx = x86_pmu.get_event_idx(cpuc, hwc); | ||
1506 | if (idx == -1) | ||
1507 | return -EAGAIN; | ||
1508 | |||
1509 | set_bit(idx, cpuc->used_mask); | ||
1510 | hwc->idx = idx; | ||
1511 | } | ||
1512 | hwc->config_base = x86_pmu.eventsel; | ||
1513 | hwc->event_base = x86_pmu.perfctr; | ||
1514 | } | ||
1515 | |||
1516 | return idx; | ||
1517 | } | 965 | } |
1518 | 966 | ||
1519 | /* | 967 | static int x86_pmu_start(struct perf_event *event) |
1520 | * Find a PMC slot for the freshly enabled / scheduled in event: | ||
1521 | */ | ||
1522 | static int x86_pmu_enable(struct perf_event *event) | ||
1523 | { | 968 | { |
1524 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
1525 | struct hw_perf_event *hwc = &event->hw; | 969 | struct hw_perf_event *hwc = &event->hw; |
1526 | int idx; | ||
1527 | |||
1528 | idx = x86_schedule_event(cpuc, hwc); | ||
1529 | if (idx < 0) | ||
1530 | return idx; | ||
1531 | 970 | ||
1532 | perf_events_lapic_init(); | 971 | if (hwc->idx == -1) |
1533 | 972 | return -EAGAIN; | |
1534 | x86_pmu.disable(hwc, idx); | ||
1535 | 973 | ||
1536 | cpuc->events[idx] = event; | 974 | x86_perf_event_set_period(event, hwc, hwc->idx); |
1537 | set_bit(idx, cpuc->active_mask); | 975 | x86_pmu.enable(hwc, hwc->idx); |
1538 | |||
1539 | x86_perf_event_set_period(event, hwc, idx); | ||
1540 | x86_pmu.enable(hwc, idx); | ||
1541 | |||
1542 | perf_event_update_userpage(event); | ||
1543 | 976 | ||
1544 | return 0; | 977 | return 0; |
1545 | } | 978 | } |
@@ -1583,7 +1016,7 @@ void perf_event_print_debug(void) | |||
1583 | pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow); | 1016 | pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow); |
1584 | pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed); | 1017 | pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed); |
1585 | } | 1018 | } |
1586 | pr_info("CPU#%d: used: %016llx\n", cpu, *(u64 *)cpuc->used_mask); | 1019 | pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask); |
1587 | 1020 | ||
1588 | for (idx = 0; idx < x86_pmu.num_events; idx++) { | 1021 | for (idx = 0; idx < x86_pmu.num_events; idx++) { |
1589 | rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl); | 1022 | rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl); |
@@ -1607,67 +1040,7 @@ void perf_event_print_debug(void) | |||
1607 | local_irq_restore(flags); | 1040 | local_irq_restore(flags); |
1608 | } | 1041 | } |
1609 | 1042 | ||
1610 | static void intel_pmu_drain_bts_buffer(struct cpu_hw_events *cpuc) | 1043 | static void x86_pmu_stop(struct perf_event *event) |
1611 | { | ||
1612 | struct debug_store *ds = cpuc->ds; | ||
1613 | struct bts_record { | ||
1614 | u64 from; | ||
1615 | u64 to; | ||
1616 | u64 flags; | ||
1617 | }; | ||
1618 | struct perf_event *event = cpuc->events[X86_PMC_IDX_FIXED_BTS]; | ||
1619 | struct bts_record *at, *top; | ||
1620 | struct perf_output_handle handle; | ||
1621 | struct perf_event_header header; | ||
1622 | struct perf_sample_data data; | ||
1623 | struct pt_regs regs; | ||
1624 | |||
1625 | if (!event) | ||
1626 | return; | ||
1627 | |||
1628 | if (!ds) | ||
1629 | return; | ||
1630 | |||
1631 | at = (struct bts_record *)(unsigned long)ds->bts_buffer_base; | ||
1632 | top = (struct bts_record *)(unsigned long)ds->bts_index; | ||
1633 | |||
1634 | if (top <= at) | ||
1635 | return; | ||
1636 | |||
1637 | ds->bts_index = ds->bts_buffer_base; | ||
1638 | |||
1639 | |||
1640 | data.period = event->hw.last_period; | ||
1641 | data.addr = 0; | ||
1642 | data.raw = NULL; | ||
1643 | regs.ip = 0; | ||
1644 | |||
1645 | /* | ||
1646 | * Prepare a generic sample, i.e. fill in the invariant fields. | ||
1647 | * We will overwrite the from and to address before we output | ||
1648 | * the sample. | ||
1649 | */ | ||
1650 | perf_prepare_sample(&header, &data, event, ®s); | ||
1651 | |||
1652 | if (perf_output_begin(&handle, event, | ||
1653 | header.size * (top - at), 1, 1)) | ||
1654 | return; | ||
1655 | |||
1656 | for (; at < top; at++) { | ||
1657 | data.ip = at->from; | ||
1658 | data.addr = at->to; | ||
1659 | |||
1660 | perf_output_sample(&handle, &header, &data, event); | ||
1661 | } | ||
1662 | |||
1663 | perf_output_end(&handle); | ||
1664 | |||
1665 | /* There's new data available. */ | ||
1666 | event->hw.interrupts++; | ||
1667 | event->pending_kill = POLL_IN; | ||
1668 | } | ||
1669 | |||
1670 | static void x86_pmu_disable(struct perf_event *event) | ||
1671 | { | 1044 | { |
1672 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1045 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
1673 | struct hw_perf_event *hwc = &event->hw; | 1046 | struct hw_perf_event *hwc = &event->hw; |
@@ -1681,183 +1054,38 @@ static void x86_pmu_disable(struct perf_event *event) | |||
1681 | x86_pmu.disable(hwc, idx); | 1054 | x86_pmu.disable(hwc, idx); |
1682 | 1055 | ||
1683 | /* | 1056 | /* |
1684 | * Make sure the cleared pointer becomes visible before we | ||
1685 | * (potentially) free the event: | ||
1686 | */ | ||
1687 | barrier(); | ||
1688 | |||
1689 | /* | ||
1690 | * Drain the remaining delta count out of a event | 1057 | * Drain the remaining delta count out of a event |
1691 | * that we are disabling: | 1058 | * that we are disabling: |
1692 | */ | 1059 | */ |
1693 | x86_perf_event_update(event, hwc, idx); | 1060 | x86_perf_event_update(event, hwc, idx); |
1694 | 1061 | ||
1695 | /* Drain the remaining BTS records. */ | ||
1696 | if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) | ||
1697 | intel_pmu_drain_bts_buffer(cpuc); | ||
1698 | |||
1699 | cpuc->events[idx] = NULL; | 1062 | cpuc->events[idx] = NULL; |
1700 | clear_bit(idx, cpuc->used_mask); | ||
1701 | |||
1702 | perf_event_update_userpage(event); | ||
1703 | } | ||
1704 | |||
1705 | /* | ||
1706 | * Save and restart an expired event. Called by NMI contexts, | ||
1707 | * so it has to be careful about preempting normal event ops: | ||
1708 | */ | ||
1709 | static int intel_pmu_save_and_restart(struct perf_event *event) | ||
1710 | { | ||
1711 | struct hw_perf_event *hwc = &event->hw; | ||
1712 | int idx = hwc->idx; | ||
1713 | int ret; | ||
1714 | |||
1715 | x86_perf_event_update(event, hwc, idx); | ||
1716 | ret = x86_perf_event_set_period(event, hwc, idx); | ||
1717 | |||
1718 | if (event->state == PERF_EVENT_STATE_ACTIVE) | ||
1719 | intel_pmu_enable_event(hwc, idx); | ||
1720 | |||
1721 | return ret; | ||
1722 | } | 1063 | } |
1723 | 1064 | ||
1724 | static void intel_pmu_reset(void) | 1065 | static void x86_pmu_disable(struct perf_event *event) |
1725 | { | ||
1726 | struct debug_store *ds = __get_cpu_var(cpu_hw_events).ds; | ||
1727 | unsigned long flags; | ||
1728 | int idx; | ||
1729 | |||
1730 | if (!x86_pmu.num_events) | ||
1731 | return; | ||
1732 | |||
1733 | local_irq_save(flags); | ||
1734 | |||
1735 | printk("clearing PMU state on CPU#%d\n", smp_processor_id()); | ||
1736 | |||
1737 | for (idx = 0; idx < x86_pmu.num_events; idx++) { | ||
1738 | checking_wrmsrl(x86_pmu.eventsel + idx, 0ull); | ||
1739 | checking_wrmsrl(x86_pmu.perfctr + idx, 0ull); | ||
1740 | } | ||
1741 | for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) { | ||
1742 | checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull); | ||
1743 | } | ||
1744 | if (ds) | ||
1745 | ds->bts_index = ds->bts_buffer_base; | ||
1746 | |||
1747 | local_irq_restore(flags); | ||
1748 | } | ||
1749 | |||
1750 | static int p6_pmu_handle_irq(struct pt_regs *regs) | ||
1751 | { | ||
1752 | struct perf_sample_data data; | ||
1753 | struct cpu_hw_events *cpuc; | ||
1754 | struct perf_event *event; | ||
1755 | struct hw_perf_event *hwc; | ||
1756 | int idx, handled = 0; | ||
1757 | u64 val; | ||
1758 | |||
1759 | data.addr = 0; | ||
1760 | data.raw = NULL; | ||
1761 | |||
1762 | cpuc = &__get_cpu_var(cpu_hw_events); | ||
1763 | |||
1764 | for (idx = 0; idx < x86_pmu.num_events; idx++) { | ||
1765 | if (!test_bit(idx, cpuc->active_mask)) | ||
1766 | continue; | ||
1767 | |||
1768 | event = cpuc->events[idx]; | ||
1769 | hwc = &event->hw; | ||
1770 | |||
1771 | val = x86_perf_event_update(event, hwc, idx); | ||
1772 | if (val & (1ULL << (x86_pmu.event_bits - 1))) | ||
1773 | continue; | ||
1774 | |||
1775 | /* | ||
1776 | * event overflow | ||
1777 | */ | ||
1778 | handled = 1; | ||
1779 | data.period = event->hw.last_period; | ||
1780 | |||
1781 | if (!x86_perf_event_set_period(event, hwc, idx)) | ||
1782 | continue; | ||
1783 | |||
1784 | if (perf_event_overflow(event, 1, &data, regs)) | ||
1785 | p6_pmu_disable_event(hwc, idx); | ||
1786 | } | ||
1787 | |||
1788 | if (handled) | ||
1789 | inc_irq_stat(apic_perf_irqs); | ||
1790 | |||
1791 | return handled; | ||
1792 | } | ||
1793 | |||
1794 | /* | ||
1795 | * This handler is triggered by the local APIC, so the APIC IRQ handling | ||
1796 | * rules apply: | ||
1797 | */ | ||
1798 | static int intel_pmu_handle_irq(struct pt_regs *regs) | ||
1799 | { | 1066 | { |
1800 | struct perf_sample_data data; | 1067 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
1801 | struct cpu_hw_events *cpuc; | 1068 | int i; |
1802 | int bit, loops; | ||
1803 | u64 ack, status; | ||
1804 | |||
1805 | data.addr = 0; | ||
1806 | data.raw = NULL; | ||
1807 | |||
1808 | cpuc = &__get_cpu_var(cpu_hw_events); | ||
1809 | |||
1810 | perf_disable(); | ||
1811 | intel_pmu_drain_bts_buffer(cpuc); | ||
1812 | status = intel_pmu_get_status(); | ||
1813 | if (!status) { | ||
1814 | perf_enable(); | ||
1815 | return 0; | ||
1816 | } | ||
1817 | |||
1818 | loops = 0; | ||
1819 | again: | ||
1820 | if (++loops > 100) { | ||
1821 | WARN_ONCE(1, "perfevents: irq loop stuck!\n"); | ||
1822 | perf_event_print_debug(); | ||
1823 | intel_pmu_reset(); | ||
1824 | perf_enable(); | ||
1825 | return 1; | ||
1826 | } | ||
1827 | 1069 | ||
1828 | inc_irq_stat(apic_perf_irqs); | 1070 | x86_pmu_stop(event); |
1829 | ack = status; | ||
1830 | for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) { | ||
1831 | struct perf_event *event = cpuc->events[bit]; | ||
1832 | 1071 | ||
1833 | clear_bit(bit, (unsigned long *) &status); | 1072 | for (i = 0; i < cpuc->n_events; i++) { |
1834 | if (!test_bit(bit, cpuc->active_mask)) | 1073 | if (event == cpuc->event_list[i]) { |
1835 | continue; | ||
1836 | 1074 | ||
1837 | if (!intel_pmu_save_and_restart(event)) | 1075 | if (x86_pmu.put_event_constraints) |
1838 | continue; | 1076 | x86_pmu.put_event_constraints(cpuc, event); |
1839 | 1077 | ||
1840 | data.period = event->hw.last_period; | 1078 | while (++i < cpuc->n_events) |
1079 | cpuc->event_list[i-1] = cpuc->event_list[i]; | ||
1841 | 1080 | ||
1842 | if (perf_event_overflow(event, 1, &data, regs)) | 1081 | --cpuc->n_events; |
1843 | intel_pmu_disable_event(&event->hw, bit); | 1082 | break; |
1083 | } | ||
1844 | } | 1084 | } |
1845 | 1085 | perf_event_update_userpage(event); | |
1846 | intel_pmu_ack_status(ack); | ||
1847 | |||
1848 | /* | ||
1849 | * Repeat if there is more work to be done: | ||
1850 | */ | ||
1851 | status = intel_pmu_get_status(); | ||
1852 | if (status) | ||
1853 | goto again; | ||
1854 | |||
1855 | perf_enable(); | ||
1856 | |||
1857 | return 1; | ||
1858 | } | 1086 | } |
1859 | 1087 | ||
1860 | static int amd_pmu_handle_irq(struct pt_regs *regs) | 1088 | static int x86_pmu_handle_irq(struct pt_regs *regs) |
1861 | { | 1089 | { |
1862 | struct perf_sample_data data; | 1090 | struct perf_sample_data data; |
1863 | struct cpu_hw_events *cpuc; | 1091 | struct cpu_hw_events *cpuc; |
@@ -1892,7 +1120,7 @@ static int amd_pmu_handle_irq(struct pt_regs *regs) | |||
1892 | continue; | 1120 | continue; |
1893 | 1121 | ||
1894 | if (perf_event_overflow(event, 1, &data, regs)) | 1122 | if (perf_event_overflow(event, 1, &data, regs)) |
1895 | amd_pmu_disable_event(hwc, idx); | 1123 | x86_pmu.disable(hwc, idx); |
1896 | } | 1124 | } |
1897 | 1125 | ||
1898 | if (handled) | 1126 | if (handled) |
@@ -1975,194 +1203,137 @@ static __read_mostly struct notifier_block perf_event_nmi_notifier = { | |||
1975 | .priority = 1 | 1203 | .priority = 1 |
1976 | }; | 1204 | }; |
1977 | 1205 | ||
1978 | static __initconst struct x86_pmu p6_pmu = { | 1206 | static struct event_constraint unconstrained; |
1979 | .name = "p6", | 1207 | static struct event_constraint emptyconstraint; |
1980 | .handle_irq = p6_pmu_handle_irq, | ||
1981 | .disable_all = p6_pmu_disable_all, | ||
1982 | .enable_all = p6_pmu_enable_all, | ||
1983 | .enable = p6_pmu_enable_event, | ||
1984 | .disable = p6_pmu_disable_event, | ||
1985 | .eventsel = MSR_P6_EVNTSEL0, | ||
1986 | .perfctr = MSR_P6_PERFCTR0, | ||
1987 | .event_map = p6_pmu_event_map, | ||
1988 | .raw_event = p6_pmu_raw_event, | ||
1989 | .max_events = ARRAY_SIZE(p6_perfmon_event_map), | ||
1990 | .apic = 1, | ||
1991 | .max_period = (1ULL << 31) - 1, | ||
1992 | .version = 0, | ||
1993 | .num_events = 2, | ||
1994 | /* | ||
1995 | * Events have 40 bits implemented. However they are designed such | ||
1996 | * that bits [32-39] are sign extensions of bit 31. As such the | ||
1997 | * effective width of a event for P6-like PMU is 32 bits only. | ||
1998 | * | ||
1999 | * See IA-32 Intel Architecture Software developer manual Vol 3B | ||
2000 | */ | ||
2001 | .event_bits = 32, | ||
2002 | .event_mask = (1ULL << 32) - 1, | ||
2003 | .get_event_idx = intel_get_event_idx, | ||
2004 | }; | ||
2005 | 1208 | ||
2006 | static __initconst struct x86_pmu intel_pmu = { | 1209 | static struct event_constraint * |
2007 | .name = "Intel", | 1210 | x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) |
2008 | .handle_irq = intel_pmu_handle_irq, | 1211 | { |
2009 | .disable_all = intel_pmu_disable_all, | 1212 | struct event_constraint *c; |
2010 | .enable_all = intel_pmu_enable_all, | ||
2011 | .enable = intel_pmu_enable_event, | ||
2012 | .disable = intel_pmu_disable_event, | ||
2013 | .eventsel = MSR_ARCH_PERFMON_EVENTSEL0, | ||
2014 | .perfctr = MSR_ARCH_PERFMON_PERFCTR0, | ||
2015 | .event_map = intel_pmu_event_map, | ||
2016 | .raw_event = intel_pmu_raw_event, | ||
2017 | .max_events = ARRAY_SIZE(intel_perfmon_event_map), | ||
2018 | .apic = 1, | ||
2019 | /* | ||
2020 | * Intel PMCs cannot be accessed sanely above 32 bit width, | ||
2021 | * so we install an artificial 1<<31 period regardless of | ||
2022 | * the generic event period: | ||
2023 | */ | ||
2024 | .max_period = (1ULL << 31) - 1, | ||
2025 | .enable_bts = intel_pmu_enable_bts, | ||
2026 | .disable_bts = intel_pmu_disable_bts, | ||
2027 | .get_event_idx = intel_get_event_idx, | ||
2028 | }; | ||
2029 | 1213 | ||
2030 | static __initconst struct x86_pmu amd_pmu = { | 1214 | if (x86_pmu.event_constraints) { |
2031 | .name = "AMD", | 1215 | for_each_event_constraint(c, x86_pmu.event_constraints) { |
2032 | .handle_irq = amd_pmu_handle_irq, | 1216 | if ((event->hw.config & c->cmask) == c->code) |
2033 | .disable_all = amd_pmu_disable_all, | 1217 | return c; |
2034 | .enable_all = amd_pmu_enable_all, | 1218 | } |
2035 | .enable = amd_pmu_enable_event, | 1219 | } |
2036 | .disable = amd_pmu_disable_event, | 1220 | |
2037 | .eventsel = MSR_K7_EVNTSEL0, | 1221 | return &unconstrained; |
2038 | .perfctr = MSR_K7_PERFCTR0, | 1222 | } |
2039 | .event_map = amd_pmu_event_map, | ||
2040 | .raw_event = amd_pmu_raw_event, | ||
2041 | .max_events = ARRAY_SIZE(amd_perfmon_event_map), | ||
2042 | .num_events = 4, | ||
2043 | .event_bits = 48, | ||
2044 | .event_mask = (1ULL << 48) - 1, | ||
2045 | .apic = 1, | ||
2046 | /* use highest bit to detect overflow */ | ||
2047 | .max_period = (1ULL << 47) - 1, | ||
2048 | .get_event_idx = gen_get_event_idx, | ||
2049 | }; | ||
2050 | 1223 | ||
2051 | static __init int p6_pmu_init(void) | 1224 | static int x86_event_sched_in(struct perf_event *event, |
1225 | struct perf_cpu_context *cpuctx) | ||
2052 | { | 1226 | { |
2053 | switch (boot_cpu_data.x86_model) { | 1227 | int ret = 0; |
2054 | case 1: | ||
2055 | case 3: /* Pentium Pro */ | ||
2056 | case 5: | ||
2057 | case 6: /* Pentium II */ | ||
2058 | case 7: | ||
2059 | case 8: | ||
2060 | case 11: /* Pentium III */ | ||
2061 | event_constraints = intel_p6_event_constraints; | ||
2062 | break; | ||
2063 | case 9: | ||
2064 | case 13: | ||
2065 | /* Pentium M */ | ||
2066 | event_constraints = intel_p6_event_constraints; | ||
2067 | break; | ||
2068 | default: | ||
2069 | pr_cont("unsupported p6 CPU model %d ", | ||
2070 | boot_cpu_data.x86_model); | ||
2071 | return -ENODEV; | ||
2072 | } | ||
2073 | 1228 | ||
2074 | x86_pmu = p6_pmu; | 1229 | event->state = PERF_EVENT_STATE_ACTIVE; |
1230 | event->oncpu = smp_processor_id(); | ||
1231 | event->tstamp_running += event->ctx->time - event->tstamp_stopped; | ||
2075 | 1232 | ||
2076 | return 0; | 1233 | if (!is_x86_event(event)) |
1234 | ret = event->pmu->enable(event); | ||
1235 | |||
1236 | if (!ret && !is_software_event(event)) | ||
1237 | cpuctx->active_oncpu++; | ||
1238 | |||
1239 | if (!ret && event->attr.exclusive) | ||
1240 | cpuctx->exclusive = 1; | ||
1241 | |||
1242 | return ret; | ||
2077 | } | 1243 | } |
2078 | 1244 | ||
2079 | static __init int intel_pmu_init(void) | 1245 | static void x86_event_sched_out(struct perf_event *event, |
1246 | struct perf_cpu_context *cpuctx) | ||
2080 | { | 1247 | { |
2081 | union cpuid10_edx edx; | 1248 | event->state = PERF_EVENT_STATE_INACTIVE; |
2082 | union cpuid10_eax eax; | 1249 | event->oncpu = -1; |
2083 | unsigned int unused; | ||
2084 | unsigned int ebx; | ||
2085 | int version; | ||
2086 | |||
2087 | if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) { | ||
2088 | /* check for P6 processor family */ | ||
2089 | if (boot_cpu_data.x86 == 6) { | ||
2090 | return p6_pmu_init(); | ||
2091 | } else { | ||
2092 | return -ENODEV; | ||
2093 | } | ||
2094 | } | ||
2095 | 1250 | ||
2096 | /* | 1251 | if (!is_x86_event(event)) |
2097 | * Check whether the Architectural PerfMon supports | 1252 | event->pmu->disable(event); |
2098 | * Branch Misses Retired hw_event or not. | ||
2099 | */ | ||
2100 | cpuid(10, &eax.full, &ebx, &unused, &edx.full); | ||
2101 | if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED) | ||
2102 | return -ENODEV; | ||
2103 | 1253 | ||
2104 | version = eax.split.version_id; | 1254 | event->tstamp_running -= event->ctx->time - event->tstamp_stopped; |
2105 | if (version < 2) | ||
2106 | return -ENODEV; | ||
2107 | 1255 | ||
2108 | x86_pmu = intel_pmu; | 1256 | if (!is_software_event(event)) |
2109 | x86_pmu.version = version; | 1257 | cpuctx->active_oncpu--; |
2110 | x86_pmu.num_events = eax.split.num_events; | ||
2111 | x86_pmu.event_bits = eax.split.bit_width; | ||
2112 | x86_pmu.event_mask = (1ULL << eax.split.bit_width) - 1; | ||
2113 | 1258 | ||
2114 | /* | 1259 | if (event->attr.exclusive || !cpuctx->active_oncpu) |
2115 | * Quirk: v2 perfmon does not report fixed-purpose events, so | 1260 | cpuctx->exclusive = 0; |
2116 | * assume at least 3 events: | 1261 | } |
2117 | */ | ||
2118 | x86_pmu.num_events_fixed = max((int)edx.split.num_events_fixed, 3); | ||
2119 | 1262 | ||
1263 | /* | ||
1264 | * Called to enable a whole group of events. | ||
1265 | * Returns 1 if the group was enabled, or -EAGAIN if it could not be. | ||
1266 | * Assumes the caller has disabled interrupts and has | ||
1267 | * frozen the PMU with hw_perf_save_disable. | ||
1268 | * | ||
1269 | * called with PMU disabled. If successful and return value 1, | ||
1270 | * then guaranteed to call perf_enable() and hw_perf_enable() | ||
1271 | */ | ||
1272 | int hw_perf_group_sched_in(struct perf_event *leader, | ||
1273 | struct perf_cpu_context *cpuctx, | ||
1274 | struct perf_event_context *ctx) | ||
1275 | { | ||
1276 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
1277 | struct perf_event *sub; | ||
1278 | int assign[X86_PMC_IDX_MAX]; | ||
1279 | int n0, n1, ret; | ||
1280 | |||
1281 | /* n0 = total number of events */ | ||
1282 | n0 = collect_events(cpuc, leader, true); | ||
1283 | if (n0 < 0) | ||
1284 | return n0; | ||
1285 | |||
1286 | ret = x86_schedule_events(cpuc, n0, assign); | ||
1287 | if (ret) | ||
1288 | return ret; | ||
1289 | |||
1290 | ret = x86_event_sched_in(leader, cpuctx); | ||
1291 | if (ret) | ||
1292 | return ret; | ||
1293 | |||
1294 | n1 = 1; | ||
1295 | list_for_each_entry(sub, &leader->sibling_list, group_entry) { | ||
1296 | if (sub->state > PERF_EVENT_STATE_OFF) { | ||
1297 | ret = x86_event_sched_in(sub, cpuctx); | ||
1298 | if (ret) | ||
1299 | goto undo; | ||
1300 | ++n1; | ||
1301 | } | ||
1302 | } | ||
2120 | /* | 1303 | /* |
2121 | * Install the hw-cache-events table: | 1304 | * copy new assignment, now we know it is possible |
1305 | * will be used by hw_perf_enable() | ||
2122 | */ | 1306 | */ |
2123 | switch (boot_cpu_data.x86_model) { | 1307 | memcpy(cpuc->assign, assign, n0*sizeof(int)); |
2124 | case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */ | ||
2125 | case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */ | ||
2126 | case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */ | ||
2127 | case 29: /* six-core 45 nm xeon "Dunnington" */ | ||
2128 | memcpy(hw_cache_event_ids, core2_hw_cache_event_ids, | ||
2129 | sizeof(hw_cache_event_ids)); | ||
2130 | |||
2131 | pr_cont("Core2 events, "); | ||
2132 | event_constraints = intel_core_event_constraints; | ||
2133 | break; | ||
2134 | default: | ||
2135 | case 26: | ||
2136 | memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids, | ||
2137 | sizeof(hw_cache_event_ids)); | ||
2138 | 1308 | ||
2139 | event_constraints = intel_nehalem_event_constraints; | 1309 | cpuc->n_events = n0; |
2140 | pr_cont("Nehalem/Corei7 events, "); | 1310 | cpuc->n_added = n1; |
2141 | break; | 1311 | ctx->nr_active += n1; |
2142 | case 28: | ||
2143 | memcpy(hw_cache_event_ids, atom_hw_cache_event_ids, | ||
2144 | sizeof(hw_cache_event_ids)); | ||
2145 | 1312 | ||
2146 | pr_cont("Atom events, "); | 1313 | /* |
2147 | break; | 1314 | * 1 means successful and events are active |
1315 | * This is not quite true because we defer | ||
1316 | * actual activation until hw_perf_enable() but | ||
1317 | * this way we* ensure caller won't try to enable | ||
1318 | * individual events | ||
1319 | */ | ||
1320 | return 1; | ||
1321 | undo: | ||
1322 | x86_event_sched_out(leader, cpuctx); | ||
1323 | n0 = 1; | ||
1324 | list_for_each_entry(sub, &leader->sibling_list, group_entry) { | ||
1325 | if (sub->state == PERF_EVENT_STATE_ACTIVE) { | ||
1326 | x86_event_sched_out(sub, cpuctx); | ||
1327 | if (++n0 == n1) | ||
1328 | break; | ||
1329 | } | ||
2148 | } | 1330 | } |
2149 | return 0; | 1331 | return ret; |
2150 | } | 1332 | } |
2151 | 1333 | ||
2152 | static __init int amd_pmu_init(void) | 1334 | #include "perf_event_amd.c" |
2153 | { | 1335 | #include "perf_event_p6.c" |
2154 | /* Performance-monitoring supported from K7 and later: */ | 1336 | #include "perf_event_intel.c" |
2155 | if (boot_cpu_data.x86 < 6) | ||
2156 | return -ENODEV; | ||
2157 | |||
2158 | x86_pmu = amd_pmu; | ||
2159 | |||
2160 | /* Events are common for all AMDs */ | ||
2161 | memcpy(hw_cache_event_ids, amd_hw_cache_event_ids, | ||
2162 | sizeof(hw_cache_event_ids)); | ||
2163 | |||
2164 | return 0; | ||
2165 | } | ||
2166 | 1337 | ||
2167 | static void __init pmu_check_apic(void) | 1338 | static void __init pmu_check_apic(void) |
2168 | { | 1339 | { |
@@ -2220,6 +1391,10 @@ void __init init_hw_perf_events(void) | |||
2220 | perf_events_lapic_init(); | 1391 | perf_events_lapic_init(); |
2221 | register_die_notifier(&perf_event_nmi_notifier); | 1392 | register_die_notifier(&perf_event_nmi_notifier); |
2222 | 1393 | ||
1394 | unconstrained = (struct event_constraint) | ||
1395 | __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_events) - 1, | ||
1396 | 0, x86_pmu.num_events); | ||
1397 | |||
2223 | pr_info("... version: %d\n", x86_pmu.version); | 1398 | pr_info("... version: %d\n", x86_pmu.version); |
2224 | pr_info("... bit width: %d\n", x86_pmu.event_bits); | 1399 | pr_info("... bit width: %d\n", x86_pmu.event_bits); |
2225 | pr_info("... generic registers: %d\n", x86_pmu.num_events); | 1400 | pr_info("... generic registers: %d\n", x86_pmu.num_events); |
@@ -2237,50 +1412,79 @@ static inline void x86_pmu_read(struct perf_event *event) | |||
2237 | static const struct pmu pmu = { | 1412 | static const struct pmu pmu = { |
2238 | .enable = x86_pmu_enable, | 1413 | .enable = x86_pmu_enable, |
2239 | .disable = x86_pmu_disable, | 1414 | .disable = x86_pmu_disable, |
1415 | .start = x86_pmu_start, | ||
1416 | .stop = x86_pmu_stop, | ||
2240 | .read = x86_pmu_read, | 1417 | .read = x86_pmu_read, |
2241 | .unthrottle = x86_pmu_unthrottle, | 1418 | .unthrottle = x86_pmu_unthrottle, |
2242 | }; | 1419 | }; |
2243 | 1420 | ||
2244 | static int | 1421 | /* |
2245 | validate_event(struct cpu_hw_events *cpuc, struct perf_event *event) | 1422 | * validate a single event group |
2246 | { | 1423 | * |
2247 | struct hw_perf_event fake_event = event->hw; | 1424 | * validation include: |
2248 | 1425 | * - check events are compatible which each other | |
2249 | if (event->pmu && event->pmu != &pmu) | 1426 | * - events do not compete for the same counter |
2250 | return 0; | 1427 | * - number of events <= number of counters |
2251 | 1428 | * | |
2252 | return x86_schedule_event(cpuc, &fake_event) >= 0; | 1429 | * validation ensures the group can be loaded onto the |
2253 | } | 1430 | * PMU if it was the only group available. |
2254 | 1431 | */ | |
2255 | static int validate_group(struct perf_event *event) | 1432 | static int validate_group(struct perf_event *event) |
2256 | { | 1433 | { |
2257 | struct perf_event *sibling, *leader = event->group_leader; | 1434 | struct perf_event *leader = event->group_leader; |
2258 | struct cpu_hw_events fake_pmu; | 1435 | struct cpu_hw_events *fake_cpuc; |
1436 | int ret, n; | ||
2259 | 1437 | ||
2260 | memset(&fake_pmu, 0, sizeof(fake_pmu)); | 1438 | ret = -ENOMEM; |
1439 | fake_cpuc = kmalloc(sizeof(*fake_cpuc), GFP_KERNEL | __GFP_ZERO); | ||
1440 | if (!fake_cpuc) | ||
1441 | goto out; | ||
1442 | |||
1443 | /* | ||
1444 | * the event is not yet connected with its | ||
1445 | * siblings therefore we must first collect | ||
1446 | * existing siblings, then add the new event | ||
1447 | * before we can simulate the scheduling | ||
1448 | */ | ||
1449 | ret = -ENOSPC; | ||
1450 | n = collect_events(fake_cpuc, leader, true); | ||
1451 | if (n < 0) | ||
1452 | goto out_free; | ||
2261 | 1453 | ||
2262 | if (!validate_event(&fake_pmu, leader)) | 1454 | fake_cpuc->n_events = n; |
2263 | return -ENOSPC; | 1455 | n = collect_events(fake_cpuc, event, false); |
1456 | if (n < 0) | ||
1457 | goto out_free; | ||
2264 | 1458 | ||
2265 | list_for_each_entry(sibling, &leader->sibling_list, group_entry) { | 1459 | fake_cpuc->n_events = n; |
2266 | if (!validate_event(&fake_pmu, sibling)) | ||
2267 | return -ENOSPC; | ||
2268 | } | ||
2269 | 1460 | ||
2270 | if (!validate_event(&fake_pmu, event)) | 1461 | ret = x86_schedule_events(fake_cpuc, n, NULL); |
2271 | return -ENOSPC; | ||
2272 | 1462 | ||
2273 | return 0; | 1463 | out_free: |
1464 | kfree(fake_cpuc); | ||
1465 | out: | ||
1466 | return ret; | ||
2274 | } | 1467 | } |
2275 | 1468 | ||
2276 | const struct pmu *hw_perf_event_init(struct perf_event *event) | 1469 | const struct pmu *hw_perf_event_init(struct perf_event *event) |
2277 | { | 1470 | { |
1471 | const struct pmu *tmp; | ||
2278 | int err; | 1472 | int err; |
2279 | 1473 | ||
2280 | err = __hw_perf_event_init(event); | 1474 | err = __hw_perf_event_init(event); |
2281 | if (!err) { | 1475 | if (!err) { |
1476 | /* | ||
1477 | * we temporarily connect event to its pmu | ||
1478 | * such that validate_group() can classify | ||
1479 | * it as an x86 event using is_x86_event() | ||
1480 | */ | ||
1481 | tmp = event->pmu; | ||
1482 | event->pmu = &pmu; | ||
1483 | |||
2282 | if (event->group_leader != event) | 1484 | if (event->group_leader != event) |
2283 | err = validate_group(event); | 1485 | err = validate_group(event); |
1486 | |||
1487 | event->pmu = tmp; | ||
2284 | } | 1488 | } |
2285 | if (err) { | 1489 | if (err) { |
2286 | if (event->destroy) | 1490 | if (event->destroy) |
@@ -2304,7 +1508,6 @@ void callchain_store(struct perf_callchain_entry *entry, u64 ip) | |||
2304 | 1508 | ||
2305 | static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry); | 1509 | static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry); |
2306 | static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_nmi_entry); | 1510 | static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_nmi_entry); |
2307 | static DEFINE_PER_CPU(int, in_ignored_frame); | ||
2308 | 1511 | ||
2309 | 1512 | ||
2310 | static void | 1513 | static void |
@@ -2320,10 +1523,6 @@ static void backtrace_warning(void *data, char *msg) | |||
2320 | 1523 | ||
2321 | static int backtrace_stack(void *data, char *name) | 1524 | static int backtrace_stack(void *data, char *name) |
2322 | { | 1525 | { |
2323 | per_cpu(in_ignored_frame, smp_processor_id()) = | ||
2324 | x86_is_stack_id(NMI_STACK, name) || | ||
2325 | x86_is_stack_id(DEBUG_STACK, name); | ||
2326 | |||
2327 | return 0; | 1526 | return 0; |
2328 | } | 1527 | } |
2329 | 1528 | ||
@@ -2331,9 +1530,6 @@ static void backtrace_address(void *data, unsigned long addr, int reliable) | |||
2331 | { | 1530 | { |
2332 | struct perf_callchain_entry *entry = data; | 1531 | struct perf_callchain_entry *entry = data; |
2333 | 1532 | ||
2334 | if (per_cpu(in_ignored_frame, smp_processor_id())) | ||
2335 | return; | ||
2336 | |||
2337 | if (reliable) | 1533 | if (reliable) |
2338 | callchain_store(entry, addr); | 1534 | callchain_store(entry, addr); |
2339 | } | 1535 | } |
@@ -2440,9 +1636,6 @@ perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry) | |||
2440 | 1636 | ||
2441 | is_user = user_mode(regs); | 1637 | is_user = user_mode(regs); |
2442 | 1638 | ||
2443 | if (!current || current->pid == 0) | ||
2444 | return; | ||
2445 | |||
2446 | if (is_user && current->state != TASK_RUNNING) | 1639 | if (is_user && current->state != TASK_RUNNING) |
2447 | return; | 1640 | return; |
2448 | 1641 | ||
@@ -2472,4 +1665,25 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) | |||
2472 | void hw_perf_event_setup_online(int cpu) | 1665 | void hw_perf_event_setup_online(int cpu) |
2473 | { | 1666 | { |
2474 | init_debug_store_on_cpu(cpu); | 1667 | init_debug_store_on_cpu(cpu); |
1668 | |||
1669 | switch (boot_cpu_data.x86_vendor) { | ||
1670 | case X86_VENDOR_AMD: | ||
1671 | amd_pmu_cpu_online(cpu); | ||
1672 | break; | ||
1673 | default: | ||
1674 | return; | ||
1675 | } | ||
1676 | } | ||
1677 | |||
1678 | void hw_perf_event_setup_offline(int cpu) | ||
1679 | { | ||
1680 | init_debug_store_on_cpu(cpu); | ||
1681 | |||
1682 | switch (boot_cpu_data.x86_vendor) { | ||
1683 | case X86_VENDOR_AMD: | ||
1684 | amd_pmu_cpu_offline(cpu); | ||
1685 | break; | ||
1686 | default: | ||
1687 | return; | ||
1688 | } | ||
2475 | } | 1689 | } |
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c new file mode 100644 index 000000000000..8f3dbfda3c4f --- /dev/null +++ b/arch/x86/kernel/cpu/perf_event_amd.c | |||
@@ -0,0 +1,416 @@ | |||
1 | #ifdef CONFIG_CPU_SUP_AMD | ||
2 | |||
3 | static DEFINE_RAW_SPINLOCK(amd_nb_lock); | ||
4 | |||
5 | static __initconst u64 amd_hw_cache_event_ids | ||
6 | [PERF_COUNT_HW_CACHE_MAX] | ||
7 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
8 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = | ||
9 | { | ||
10 | [ C(L1D) ] = { | ||
11 | [ C(OP_READ) ] = { | ||
12 | [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */ | ||
13 | [ C(RESULT_MISS) ] = 0x0041, /* Data Cache Misses */ | ||
14 | }, | ||
15 | [ C(OP_WRITE) ] = { | ||
16 | [ C(RESULT_ACCESS) ] = 0x0142, /* Data Cache Refills :system */ | ||
17 | [ C(RESULT_MISS) ] = 0, | ||
18 | }, | ||
19 | [ C(OP_PREFETCH) ] = { | ||
20 | [ C(RESULT_ACCESS) ] = 0x0267, /* Data Prefetcher :attempts */ | ||
21 | [ C(RESULT_MISS) ] = 0x0167, /* Data Prefetcher :cancelled */ | ||
22 | }, | ||
23 | }, | ||
24 | [ C(L1I ) ] = { | ||
25 | [ C(OP_READ) ] = { | ||
26 | [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction cache fetches */ | ||
27 | [ C(RESULT_MISS) ] = 0x0081, /* Instruction cache misses */ | ||
28 | }, | ||
29 | [ C(OP_WRITE) ] = { | ||
30 | [ C(RESULT_ACCESS) ] = -1, | ||
31 | [ C(RESULT_MISS) ] = -1, | ||
32 | }, | ||
33 | [ C(OP_PREFETCH) ] = { | ||
34 | [ C(RESULT_ACCESS) ] = 0x014B, /* Prefetch Instructions :Load */ | ||
35 | [ C(RESULT_MISS) ] = 0, | ||
36 | }, | ||
37 | }, | ||
38 | [ C(LL ) ] = { | ||
39 | [ C(OP_READ) ] = { | ||
40 | [ C(RESULT_ACCESS) ] = 0x037D, /* Requests to L2 Cache :IC+DC */ | ||
41 | [ C(RESULT_MISS) ] = 0x037E, /* L2 Cache Misses : IC+DC */ | ||
42 | }, | ||
43 | [ C(OP_WRITE) ] = { | ||
44 | [ C(RESULT_ACCESS) ] = 0x017F, /* L2 Fill/Writeback */ | ||
45 | [ C(RESULT_MISS) ] = 0, | ||
46 | }, | ||
47 | [ C(OP_PREFETCH) ] = { | ||
48 | [ C(RESULT_ACCESS) ] = 0, | ||
49 | [ C(RESULT_MISS) ] = 0, | ||
50 | }, | ||
51 | }, | ||
52 | [ C(DTLB) ] = { | ||
53 | [ C(OP_READ) ] = { | ||
54 | [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */ | ||
55 | [ C(RESULT_MISS) ] = 0x0046, /* L1 DTLB and L2 DLTB Miss */ | ||
56 | }, | ||
57 | [ C(OP_WRITE) ] = { | ||
58 | [ C(RESULT_ACCESS) ] = 0, | ||
59 | [ C(RESULT_MISS) ] = 0, | ||
60 | }, | ||
61 | [ C(OP_PREFETCH) ] = { | ||
62 | [ C(RESULT_ACCESS) ] = 0, | ||
63 | [ C(RESULT_MISS) ] = 0, | ||
64 | }, | ||
65 | }, | ||
66 | [ C(ITLB) ] = { | ||
67 | [ C(OP_READ) ] = { | ||
68 | [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes */ | ||
69 | [ C(RESULT_MISS) ] = 0x0085, /* Instr. fetch ITLB misses */ | ||
70 | }, | ||
71 | [ C(OP_WRITE) ] = { | ||
72 | [ C(RESULT_ACCESS) ] = -1, | ||
73 | [ C(RESULT_MISS) ] = -1, | ||
74 | }, | ||
75 | [ C(OP_PREFETCH) ] = { | ||
76 | [ C(RESULT_ACCESS) ] = -1, | ||
77 | [ C(RESULT_MISS) ] = -1, | ||
78 | }, | ||
79 | }, | ||
80 | [ C(BPU ) ] = { | ||
81 | [ C(OP_READ) ] = { | ||
82 | [ C(RESULT_ACCESS) ] = 0x00c2, /* Retired Branch Instr. */ | ||
83 | [ C(RESULT_MISS) ] = 0x00c3, /* Retired Mispredicted BI */ | ||
84 | }, | ||
85 | [ C(OP_WRITE) ] = { | ||
86 | [ C(RESULT_ACCESS) ] = -1, | ||
87 | [ C(RESULT_MISS) ] = -1, | ||
88 | }, | ||
89 | [ C(OP_PREFETCH) ] = { | ||
90 | [ C(RESULT_ACCESS) ] = -1, | ||
91 | [ C(RESULT_MISS) ] = -1, | ||
92 | }, | ||
93 | }, | ||
94 | }; | ||
95 | |||
96 | /* | ||
97 | * AMD Performance Monitor K7 and later. | ||
98 | */ | ||
99 | static const u64 amd_perfmon_event_map[] = | ||
100 | { | ||
101 | [PERF_COUNT_HW_CPU_CYCLES] = 0x0076, | ||
102 | [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, | ||
103 | [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0080, | ||
104 | [PERF_COUNT_HW_CACHE_MISSES] = 0x0081, | ||
105 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4, | ||
106 | [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5, | ||
107 | }; | ||
108 | |||
109 | static u64 amd_pmu_event_map(int hw_event) | ||
110 | { | ||
111 | return amd_perfmon_event_map[hw_event]; | ||
112 | } | ||
113 | |||
114 | static u64 amd_pmu_raw_event(u64 hw_event) | ||
115 | { | ||
116 | #define K7_EVNTSEL_EVENT_MASK 0xF000000FFULL | ||
117 | #define K7_EVNTSEL_UNIT_MASK 0x00000FF00ULL | ||
118 | #define K7_EVNTSEL_EDGE_MASK 0x000040000ULL | ||
119 | #define K7_EVNTSEL_INV_MASK 0x000800000ULL | ||
120 | #define K7_EVNTSEL_REG_MASK 0x0FF000000ULL | ||
121 | |||
122 | #define K7_EVNTSEL_MASK \ | ||
123 | (K7_EVNTSEL_EVENT_MASK | \ | ||
124 | K7_EVNTSEL_UNIT_MASK | \ | ||
125 | K7_EVNTSEL_EDGE_MASK | \ | ||
126 | K7_EVNTSEL_INV_MASK | \ | ||
127 | K7_EVNTSEL_REG_MASK) | ||
128 | |||
129 | return hw_event & K7_EVNTSEL_MASK; | ||
130 | } | ||
131 | |||
132 | /* | ||
133 | * AMD64 events are detected based on their event codes. | ||
134 | */ | ||
135 | static inline int amd_is_nb_event(struct hw_perf_event *hwc) | ||
136 | { | ||
137 | return (hwc->config & 0xe0) == 0xe0; | ||
138 | } | ||
139 | |||
140 | static void amd_put_event_constraints(struct cpu_hw_events *cpuc, | ||
141 | struct perf_event *event) | ||
142 | { | ||
143 | struct hw_perf_event *hwc = &event->hw; | ||
144 | struct amd_nb *nb = cpuc->amd_nb; | ||
145 | int i; | ||
146 | |||
147 | /* | ||
148 | * only care about NB events | ||
149 | */ | ||
150 | if (!(nb && amd_is_nb_event(hwc))) | ||
151 | return; | ||
152 | |||
153 | /* | ||
154 | * need to scan whole list because event may not have | ||
155 | * been assigned during scheduling | ||
156 | * | ||
157 | * no race condition possible because event can only | ||
158 | * be removed on one CPU at a time AND PMU is disabled | ||
159 | * when we come here | ||
160 | */ | ||
161 | for (i = 0; i < x86_pmu.num_events; i++) { | ||
162 | if (nb->owners[i] == event) { | ||
163 | cmpxchg(nb->owners+i, event, NULL); | ||
164 | break; | ||
165 | } | ||
166 | } | ||
167 | } | ||
168 | |||
169 | /* | ||
170 | * AMD64 NorthBridge events need special treatment because | ||
171 | * counter access needs to be synchronized across all cores | ||
172 | * of a package. Refer to BKDG section 3.12 | ||
173 | * | ||
174 | * NB events are events measuring L3 cache, Hypertransport | ||
175 | * traffic. They are identified by an event code >= 0xe00. | ||
176 | * They measure events on the NorthBride which is shared | ||
177 | * by all cores on a package. NB events are counted on a | ||
178 | * shared set of counters. When a NB event is programmed | ||
179 | * in a counter, the data actually comes from a shared | ||
180 | * counter. Thus, access to those counters needs to be | ||
181 | * synchronized. | ||
182 | * | ||
183 | * We implement the synchronization such that no two cores | ||
184 | * can be measuring NB events using the same counters. Thus, | ||
185 | * we maintain a per-NB allocation table. The available slot | ||
186 | * is propagated using the event_constraint structure. | ||
187 | * | ||
188 | * We provide only one choice for each NB event based on | ||
189 | * the fact that only NB events have restrictions. Consequently, | ||
190 | * if a counter is available, there is a guarantee the NB event | ||
191 | * will be assigned to it. If no slot is available, an empty | ||
192 | * constraint is returned and scheduling will eventually fail | ||
193 | * for this event. | ||
194 | * | ||
195 | * Note that all cores attached the same NB compete for the same | ||
196 | * counters to host NB events, this is why we use atomic ops. Some | ||
197 | * multi-chip CPUs may have more than one NB. | ||
198 | * | ||
199 | * Given that resources are allocated (cmpxchg), they must be | ||
200 | * eventually freed for others to use. This is accomplished by | ||
201 | * calling amd_put_event_constraints(). | ||
202 | * | ||
203 | * Non NB events are not impacted by this restriction. | ||
204 | */ | ||
205 | static struct event_constraint * | ||
206 | amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) | ||
207 | { | ||
208 | struct hw_perf_event *hwc = &event->hw; | ||
209 | struct amd_nb *nb = cpuc->amd_nb; | ||
210 | struct perf_event *old = NULL; | ||
211 | int max = x86_pmu.num_events; | ||
212 | int i, j, k = -1; | ||
213 | |||
214 | /* | ||
215 | * if not NB event or no NB, then no constraints | ||
216 | */ | ||
217 | if (!(nb && amd_is_nb_event(hwc))) | ||
218 | return &unconstrained; | ||
219 | |||
220 | /* | ||
221 | * detect if already present, if so reuse | ||
222 | * | ||
223 | * cannot merge with actual allocation | ||
224 | * because of possible holes | ||
225 | * | ||
226 | * event can already be present yet not assigned (in hwc->idx) | ||
227 | * because of successive calls to x86_schedule_events() from | ||
228 | * hw_perf_group_sched_in() without hw_perf_enable() | ||
229 | */ | ||
230 | for (i = 0; i < max; i++) { | ||
231 | /* | ||
232 | * keep track of first free slot | ||
233 | */ | ||
234 | if (k == -1 && !nb->owners[i]) | ||
235 | k = i; | ||
236 | |||
237 | /* already present, reuse */ | ||
238 | if (nb->owners[i] == event) | ||
239 | goto done; | ||
240 | } | ||
241 | /* | ||
242 | * not present, so grab a new slot | ||
243 | * starting either at: | ||
244 | */ | ||
245 | if (hwc->idx != -1) { | ||
246 | /* previous assignment */ | ||
247 | i = hwc->idx; | ||
248 | } else if (k != -1) { | ||
249 | /* start from free slot found */ | ||
250 | i = k; | ||
251 | } else { | ||
252 | /* | ||
253 | * event not found, no slot found in | ||
254 | * first pass, try again from the | ||
255 | * beginning | ||
256 | */ | ||
257 | i = 0; | ||
258 | } | ||
259 | j = i; | ||
260 | do { | ||
261 | old = cmpxchg(nb->owners+i, NULL, event); | ||
262 | if (!old) | ||
263 | break; | ||
264 | if (++i == max) | ||
265 | i = 0; | ||
266 | } while (i != j); | ||
267 | done: | ||
268 | if (!old) | ||
269 | return &nb->event_constraints[i]; | ||
270 | |||
271 | return &emptyconstraint; | ||
272 | } | ||
273 | |||
274 | static __initconst struct x86_pmu amd_pmu = { | ||
275 | .name = "AMD", | ||
276 | .handle_irq = x86_pmu_handle_irq, | ||
277 | .disable_all = x86_pmu_disable_all, | ||
278 | .enable_all = x86_pmu_enable_all, | ||
279 | .enable = x86_pmu_enable_event, | ||
280 | .disable = x86_pmu_disable_event, | ||
281 | .eventsel = MSR_K7_EVNTSEL0, | ||
282 | .perfctr = MSR_K7_PERFCTR0, | ||
283 | .event_map = amd_pmu_event_map, | ||
284 | .raw_event = amd_pmu_raw_event, | ||
285 | .max_events = ARRAY_SIZE(amd_perfmon_event_map), | ||
286 | .num_events = 4, | ||
287 | .event_bits = 48, | ||
288 | .event_mask = (1ULL << 48) - 1, | ||
289 | .apic = 1, | ||
290 | /* use highest bit to detect overflow */ | ||
291 | .max_period = (1ULL << 47) - 1, | ||
292 | .get_event_constraints = amd_get_event_constraints, | ||
293 | .put_event_constraints = amd_put_event_constraints | ||
294 | }; | ||
295 | |||
296 | static struct amd_nb *amd_alloc_nb(int cpu, int nb_id) | ||
297 | { | ||
298 | struct amd_nb *nb; | ||
299 | int i; | ||
300 | |||
301 | nb = kmalloc(sizeof(struct amd_nb), GFP_KERNEL); | ||
302 | if (!nb) | ||
303 | return NULL; | ||
304 | |||
305 | memset(nb, 0, sizeof(*nb)); | ||
306 | nb->nb_id = nb_id; | ||
307 | |||
308 | /* | ||
309 | * initialize all possible NB constraints | ||
310 | */ | ||
311 | for (i = 0; i < x86_pmu.num_events; i++) { | ||
312 | set_bit(i, nb->event_constraints[i].idxmsk); | ||
313 | nb->event_constraints[i].weight = 1; | ||
314 | } | ||
315 | return nb; | ||
316 | } | ||
317 | |||
318 | static void amd_pmu_cpu_online(int cpu) | ||
319 | { | ||
320 | struct cpu_hw_events *cpu1, *cpu2; | ||
321 | struct amd_nb *nb = NULL; | ||
322 | int i, nb_id; | ||
323 | |||
324 | if (boot_cpu_data.x86_max_cores < 2) | ||
325 | return; | ||
326 | |||
327 | /* | ||
328 | * function may be called too early in the | ||
329 | * boot process, in which case nb_id is bogus | ||
330 | */ | ||
331 | nb_id = amd_get_nb_id(cpu); | ||
332 | if (nb_id == BAD_APICID) | ||
333 | return; | ||
334 | |||
335 | cpu1 = &per_cpu(cpu_hw_events, cpu); | ||
336 | cpu1->amd_nb = NULL; | ||
337 | |||
338 | raw_spin_lock(&amd_nb_lock); | ||
339 | |||
340 | for_each_online_cpu(i) { | ||
341 | cpu2 = &per_cpu(cpu_hw_events, i); | ||
342 | nb = cpu2->amd_nb; | ||
343 | if (!nb) | ||
344 | continue; | ||
345 | if (nb->nb_id == nb_id) | ||
346 | goto found; | ||
347 | } | ||
348 | |||
349 | nb = amd_alloc_nb(cpu, nb_id); | ||
350 | if (!nb) { | ||
351 | pr_err("perf_events: failed NB allocation for CPU%d\n", cpu); | ||
352 | raw_spin_unlock(&amd_nb_lock); | ||
353 | return; | ||
354 | } | ||
355 | found: | ||
356 | nb->refcnt++; | ||
357 | cpu1->amd_nb = nb; | ||
358 | |||
359 | raw_spin_unlock(&amd_nb_lock); | ||
360 | } | ||
361 | |||
362 | static void amd_pmu_cpu_offline(int cpu) | ||
363 | { | ||
364 | struct cpu_hw_events *cpuhw; | ||
365 | |||
366 | if (boot_cpu_data.x86_max_cores < 2) | ||
367 | return; | ||
368 | |||
369 | cpuhw = &per_cpu(cpu_hw_events, cpu); | ||
370 | |||
371 | raw_spin_lock(&amd_nb_lock); | ||
372 | |||
373 | if (--cpuhw->amd_nb->refcnt == 0) | ||
374 | kfree(cpuhw->amd_nb); | ||
375 | |||
376 | cpuhw->amd_nb = NULL; | ||
377 | |||
378 | raw_spin_unlock(&amd_nb_lock); | ||
379 | } | ||
380 | |||
381 | static __init int amd_pmu_init(void) | ||
382 | { | ||
383 | /* Performance-monitoring supported from K7 and later: */ | ||
384 | if (boot_cpu_data.x86 < 6) | ||
385 | return -ENODEV; | ||
386 | |||
387 | x86_pmu = amd_pmu; | ||
388 | |||
389 | /* Events are common for all AMDs */ | ||
390 | memcpy(hw_cache_event_ids, amd_hw_cache_event_ids, | ||
391 | sizeof(hw_cache_event_ids)); | ||
392 | |||
393 | /* | ||
394 | * explicitly initialize the boot cpu, other cpus will get | ||
395 | * the cpu hotplug callbacks from smp_init() | ||
396 | */ | ||
397 | amd_pmu_cpu_online(smp_processor_id()); | ||
398 | return 0; | ||
399 | } | ||
400 | |||
401 | #else /* CONFIG_CPU_SUP_AMD */ | ||
402 | |||
403 | static int amd_pmu_init(void) | ||
404 | { | ||
405 | return 0; | ||
406 | } | ||
407 | |||
408 | static void amd_pmu_cpu_online(int cpu) | ||
409 | { | ||
410 | } | ||
411 | |||
412 | static void amd_pmu_cpu_offline(int cpu) | ||
413 | { | ||
414 | } | ||
415 | |||
416 | #endif | ||
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c new file mode 100644 index 000000000000..cf6590cf4a5f --- /dev/null +++ b/arch/x86/kernel/cpu/perf_event_intel.c | |||
@@ -0,0 +1,971 @@ | |||
1 | #ifdef CONFIG_CPU_SUP_INTEL | ||
2 | |||
3 | /* | ||
4 | * Intel PerfMon v3. Used on Core2 and later. | ||
5 | */ | ||
6 | static const u64 intel_perfmon_event_map[] = | ||
7 | { | ||
8 | [PERF_COUNT_HW_CPU_CYCLES] = 0x003c, | ||
9 | [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, | ||
10 | [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e, | ||
11 | [PERF_COUNT_HW_CACHE_MISSES] = 0x412e, | ||
12 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4, | ||
13 | [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5, | ||
14 | [PERF_COUNT_HW_BUS_CYCLES] = 0x013c, | ||
15 | }; | ||
16 | |||
17 | static struct event_constraint intel_core_event_constraints[] = | ||
18 | { | ||
19 | INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */ | ||
20 | INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */ | ||
21 | INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */ | ||
22 | INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */ | ||
23 | INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */ | ||
24 | INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */ | ||
25 | EVENT_CONSTRAINT_END | ||
26 | }; | ||
27 | |||
28 | static struct event_constraint intel_core2_event_constraints[] = | ||
29 | { | ||
30 | FIXED_EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */ | ||
31 | FIXED_EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */ | ||
32 | INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */ | ||
33 | INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */ | ||
34 | INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */ | ||
35 | INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */ | ||
36 | INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */ | ||
37 | INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */ | ||
38 | INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */ | ||
39 | INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */ | ||
40 | INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */ | ||
41 | EVENT_CONSTRAINT_END | ||
42 | }; | ||
43 | |||
44 | static struct event_constraint intel_nehalem_event_constraints[] = | ||
45 | { | ||
46 | FIXED_EVENT_CONSTRAINT(0xc0, (0xf|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */ | ||
47 | FIXED_EVENT_CONSTRAINT(0x3c, (0xf|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */ | ||
48 | INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */ | ||
49 | INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */ | ||
50 | INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */ | ||
51 | INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */ | ||
52 | INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */ | ||
53 | INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */ | ||
54 | INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */ | ||
55 | INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */ | ||
56 | EVENT_CONSTRAINT_END | ||
57 | }; | ||
58 | |||
59 | static struct event_constraint intel_westmere_event_constraints[] = | ||
60 | { | ||
61 | FIXED_EVENT_CONSTRAINT(0xc0, (0xf|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */ | ||
62 | FIXED_EVENT_CONSTRAINT(0x3c, (0xf|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */ | ||
63 | INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */ | ||
64 | INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */ | ||
65 | INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */ | ||
66 | EVENT_CONSTRAINT_END | ||
67 | }; | ||
68 | |||
69 | static struct event_constraint intel_gen_event_constraints[] = | ||
70 | { | ||
71 | FIXED_EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */ | ||
72 | FIXED_EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */ | ||
73 | EVENT_CONSTRAINT_END | ||
74 | }; | ||
75 | |||
76 | static u64 intel_pmu_event_map(int hw_event) | ||
77 | { | ||
78 | return intel_perfmon_event_map[hw_event]; | ||
79 | } | ||
80 | |||
81 | static __initconst u64 westmere_hw_cache_event_ids | ||
82 | [PERF_COUNT_HW_CACHE_MAX] | ||
83 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
84 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = | ||
85 | { | ||
86 | [ C(L1D) ] = { | ||
87 | [ C(OP_READ) ] = { | ||
88 | [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */ | ||
89 | [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */ | ||
90 | }, | ||
91 | [ C(OP_WRITE) ] = { | ||
92 | [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */ | ||
93 | [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */ | ||
94 | }, | ||
95 | [ C(OP_PREFETCH) ] = { | ||
96 | [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */ | ||
97 | [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */ | ||
98 | }, | ||
99 | }, | ||
100 | [ C(L1I ) ] = { | ||
101 | [ C(OP_READ) ] = { | ||
102 | [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */ | ||
103 | [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */ | ||
104 | }, | ||
105 | [ C(OP_WRITE) ] = { | ||
106 | [ C(RESULT_ACCESS) ] = -1, | ||
107 | [ C(RESULT_MISS) ] = -1, | ||
108 | }, | ||
109 | [ C(OP_PREFETCH) ] = { | ||
110 | [ C(RESULT_ACCESS) ] = 0x0, | ||
111 | [ C(RESULT_MISS) ] = 0x0, | ||
112 | }, | ||
113 | }, | ||
114 | [ C(LL ) ] = { | ||
115 | [ C(OP_READ) ] = { | ||
116 | [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS */ | ||
117 | [ C(RESULT_MISS) ] = 0x0224, /* L2_RQSTS.LD_MISS */ | ||
118 | }, | ||
119 | [ C(OP_WRITE) ] = { | ||
120 | [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS */ | ||
121 | [ C(RESULT_MISS) ] = 0x0824, /* L2_RQSTS.RFO_MISS */ | ||
122 | }, | ||
123 | [ C(OP_PREFETCH) ] = { | ||
124 | [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference */ | ||
125 | [ C(RESULT_MISS) ] = 0x412e, /* LLC Misses */ | ||
126 | }, | ||
127 | }, | ||
128 | [ C(DTLB) ] = { | ||
129 | [ C(OP_READ) ] = { | ||
130 | [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */ | ||
131 | [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */ | ||
132 | }, | ||
133 | [ C(OP_WRITE) ] = { | ||
134 | [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */ | ||
135 | [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */ | ||
136 | }, | ||
137 | [ C(OP_PREFETCH) ] = { | ||
138 | [ C(RESULT_ACCESS) ] = 0x0, | ||
139 | [ C(RESULT_MISS) ] = 0x0, | ||
140 | }, | ||
141 | }, | ||
142 | [ C(ITLB) ] = { | ||
143 | [ C(OP_READ) ] = { | ||
144 | [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */ | ||
145 | [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.ANY */ | ||
146 | }, | ||
147 | [ C(OP_WRITE) ] = { | ||
148 | [ C(RESULT_ACCESS) ] = -1, | ||
149 | [ C(RESULT_MISS) ] = -1, | ||
150 | }, | ||
151 | [ C(OP_PREFETCH) ] = { | ||
152 | [ C(RESULT_ACCESS) ] = -1, | ||
153 | [ C(RESULT_MISS) ] = -1, | ||
154 | }, | ||
155 | }, | ||
156 | [ C(BPU ) ] = { | ||
157 | [ C(OP_READ) ] = { | ||
158 | [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */ | ||
159 | [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */ | ||
160 | }, | ||
161 | [ C(OP_WRITE) ] = { | ||
162 | [ C(RESULT_ACCESS) ] = -1, | ||
163 | [ C(RESULT_MISS) ] = -1, | ||
164 | }, | ||
165 | [ C(OP_PREFETCH) ] = { | ||
166 | [ C(RESULT_ACCESS) ] = -1, | ||
167 | [ C(RESULT_MISS) ] = -1, | ||
168 | }, | ||
169 | }, | ||
170 | }; | ||
171 | |||
172 | static __initconst u64 nehalem_hw_cache_event_ids | ||
173 | [PERF_COUNT_HW_CACHE_MAX] | ||
174 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
175 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = | ||
176 | { | ||
177 | [ C(L1D) ] = { | ||
178 | [ C(OP_READ) ] = { | ||
179 | [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */ | ||
180 | [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */ | ||
181 | }, | ||
182 | [ C(OP_WRITE) ] = { | ||
183 | [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */ | ||
184 | [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */ | ||
185 | }, | ||
186 | [ C(OP_PREFETCH) ] = { | ||
187 | [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */ | ||
188 | [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */ | ||
189 | }, | ||
190 | }, | ||
191 | [ C(L1I ) ] = { | ||
192 | [ C(OP_READ) ] = { | ||
193 | [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */ | ||
194 | [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */ | ||
195 | }, | ||
196 | [ C(OP_WRITE) ] = { | ||
197 | [ C(RESULT_ACCESS) ] = -1, | ||
198 | [ C(RESULT_MISS) ] = -1, | ||
199 | }, | ||
200 | [ C(OP_PREFETCH) ] = { | ||
201 | [ C(RESULT_ACCESS) ] = 0x0, | ||
202 | [ C(RESULT_MISS) ] = 0x0, | ||
203 | }, | ||
204 | }, | ||
205 | [ C(LL ) ] = { | ||
206 | [ C(OP_READ) ] = { | ||
207 | [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS */ | ||
208 | [ C(RESULT_MISS) ] = 0x0224, /* L2_RQSTS.LD_MISS */ | ||
209 | }, | ||
210 | [ C(OP_WRITE) ] = { | ||
211 | [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS */ | ||
212 | [ C(RESULT_MISS) ] = 0x0824, /* L2_RQSTS.RFO_MISS */ | ||
213 | }, | ||
214 | [ C(OP_PREFETCH) ] = { | ||
215 | [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference */ | ||
216 | [ C(RESULT_MISS) ] = 0x412e, /* LLC Misses */ | ||
217 | }, | ||
218 | }, | ||
219 | [ C(DTLB) ] = { | ||
220 | [ C(OP_READ) ] = { | ||
221 | [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */ | ||
222 | [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */ | ||
223 | }, | ||
224 | [ C(OP_WRITE) ] = { | ||
225 | [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */ | ||
226 | [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */ | ||
227 | }, | ||
228 | [ C(OP_PREFETCH) ] = { | ||
229 | [ C(RESULT_ACCESS) ] = 0x0, | ||
230 | [ C(RESULT_MISS) ] = 0x0, | ||
231 | }, | ||
232 | }, | ||
233 | [ C(ITLB) ] = { | ||
234 | [ C(OP_READ) ] = { | ||
235 | [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */ | ||
236 | [ C(RESULT_MISS) ] = 0x20c8, /* ITLB_MISS_RETIRED */ | ||
237 | }, | ||
238 | [ C(OP_WRITE) ] = { | ||
239 | [ C(RESULT_ACCESS) ] = -1, | ||
240 | [ C(RESULT_MISS) ] = -1, | ||
241 | }, | ||
242 | [ C(OP_PREFETCH) ] = { | ||
243 | [ C(RESULT_ACCESS) ] = -1, | ||
244 | [ C(RESULT_MISS) ] = -1, | ||
245 | }, | ||
246 | }, | ||
247 | [ C(BPU ) ] = { | ||
248 | [ C(OP_READ) ] = { | ||
249 | [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */ | ||
250 | [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */ | ||
251 | }, | ||
252 | [ C(OP_WRITE) ] = { | ||
253 | [ C(RESULT_ACCESS) ] = -1, | ||
254 | [ C(RESULT_MISS) ] = -1, | ||
255 | }, | ||
256 | [ C(OP_PREFETCH) ] = { | ||
257 | [ C(RESULT_ACCESS) ] = -1, | ||
258 | [ C(RESULT_MISS) ] = -1, | ||
259 | }, | ||
260 | }, | ||
261 | }; | ||
262 | |||
263 | static __initconst u64 core2_hw_cache_event_ids | ||
264 | [PERF_COUNT_HW_CACHE_MAX] | ||
265 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
266 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = | ||
267 | { | ||
268 | [ C(L1D) ] = { | ||
269 | [ C(OP_READ) ] = { | ||
270 | [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */ | ||
271 | [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */ | ||
272 | }, | ||
273 | [ C(OP_WRITE) ] = { | ||
274 | [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */ | ||
275 | [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */ | ||
276 | }, | ||
277 | [ C(OP_PREFETCH) ] = { | ||
278 | [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */ | ||
279 | [ C(RESULT_MISS) ] = 0, | ||
280 | }, | ||
281 | }, | ||
282 | [ C(L1I ) ] = { | ||
283 | [ C(OP_READ) ] = { | ||
284 | [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS */ | ||
285 | [ C(RESULT_MISS) ] = 0x0081, /* L1I.MISSES */ | ||
286 | }, | ||
287 | [ C(OP_WRITE) ] = { | ||
288 | [ C(RESULT_ACCESS) ] = -1, | ||
289 | [ C(RESULT_MISS) ] = -1, | ||
290 | }, | ||
291 | [ C(OP_PREFETCH) ] = { | ||
292 | [ C(RESULT_ACCESS) ] = 0, | ||
293 | [ C(RESULT_MISS) ] = 0, | ||
294 | }, | ||
295 | }, | ||
296 | [ C(LL ) ] = { | ||
297 | [ C(OP_READ) ] = { | ||
298 | [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */ | ||
299 | [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */ | ||
300 | }, | ||
301 | [ C(OP_WRITE) ] = { | ||
302 | [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */ | ||
303 | [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */ | ||
304 | }, | ||
305 | [ C(OP_PREFETCH) ] = { | ||
306 | [ C(RESULT_ACCESS) ] = 0, | ||
307 | [ C(RESULT_MISS) ] = 0, | ||
308 | }, | ||
309 | }, | ||
310 | [ C(DTLB) ] = { | ||
311 | [ C(OP_READ) ] = { | ||
312 | [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */ | ||
313 | [ C(RESULT_MISS) ] = 0x0208, /* DTLB_MISSES.MISS_LD */ | ||
314 | }, | ||
315 | [ C(OP_WRITE) ] = { | ||
316 | [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */ | ||
317 | [ C(RESULT_MISS) ] = 0x0808, /* DTLB_MISSES.MISS_ST */ | ||
318 | }, | ||
319 | [ C(OP_PREFETCH) ] = { | ||
320 | [ C(RESULT_ACCESS) ] = 0, | ||
321 | [ C(RESULT_MISS) ] = 0, | ||
322 | }, | ||
323 | }, | ||
324 | [ C(ITLB) ] = { | ||
325 | [ C(OP_READ) ] = { | ||
326 | [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */ | ||
327 | [ C(RESULT_MISS) ] = 0x1282, /* ITLBMISSES */ | ||
328 | }, | ||
329 | [ C(OP_WRITE) ] = { | ||
330 | [ C(RESULT_ACCESS) ] = -1, | ||
331 | [ C(RESULT_MISS) ] = -1, | ||
332 | }, | ||
333 | [ C(OP_PREFETCH) ] = { | ||
334 | [ C(RESULT_ACCESS) ] = -1, | ||
335 | [ C(RESULT_MISS) ] = -1, | ||
336 | }, | ||
337 | }, | ||
338 | [ C(BPU ) ] = { | ||
339 | [ C(OP_READ) ] = { | ||
340 | [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */ | ||
341 | [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */ | ||
342 | }, | ||
343 | [ C(OP_WRITE) ] = { | ||
344 | [ C(RESULT_ACCESS) ] = -1, | ||
345 | [ C(RESULT_MISS) ] = -1, | ||
346 | }, | ||
347 | [ C(OP_PREFETCH) ] = { | ||
348 | [ C(RESULT_ACCESS) ] = -1, | ||
349 | [ C(RESULT_MISS) ] = -1, | ||
350 | }, | ||
351 | }, | ||
352 | }; | ||
353 | |||
354 | static __initconst u64 atom_hw_cache_event_ids | ||
355 | [PERF_COUNT_HW_CACHE_MAX] | ||
356 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
357 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = | ||
358 | { | ||
359 | [ C(L1D) ] = { | ||
360 | [ C(OP_READ) ] = { | ||
361 | [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD */ | ||
362 | [ C(RESULT_MISS) ] = 0, | ||
363 | }, | ||
364 | [ C(OP_WRITE) ] = { | ||
365 | [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST */ | ||
366 | [ C(RESULT_MISS) ] = 0, | ||
367 | }, | ||
368 | [ C(OP_PREFETCH) ] = { | ||
369 | [ C(RESULT_ACCESS) ] = 0x0, | ||
370 | [ C(RESULT_MISS) ] = 0, | ||
371 | }, | ||
372 | }, | ||
373 | [ C(L1I ) ] = { | ||
374 | [ C(OP_READ) ] = { | ||
375 | [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */ | ||
376 | [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */ | ||
377 | }, | ||
378 | [ C(OP_WRITE) ] = { | ||
379 | [ C(RESULT_ACCESS) ] = -1, | ||
380 | [ C(RESULT_MISS) ] = -1, | ||
381 | }, | ||
382 | [ C(OP_PREFETCH) ] = { | ||
383 | [ C(RESULT_ACCESS) ] = 0, | ||
384 | [ C(RESULT_MISS) ] = 0, | ||
385 | }, | ||
386 | }, | ||
387 | [ C(LL ) ] = { | ||
388 | [ C(OP_READ) ] = { | ||
389 | [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */ | ||
390 | [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */ | ||
391 | }, | ||
392 | [ C(OP_WRITE) ] = { | ||
393 | [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */ | ||
394 | [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */ | ||
395 | }, | ||
396 | [ C(OP_PREFETCH) ] = { | ||
397 | [ C(RESULT_ACCESS) ] = 0, | ||
398 | [ C(RESULT_MISS) ] = 0, | ||
399 | }, | ||
400 | }, | ||
401 | [ C(DTLB) ] = { | ||
402 | [ C(OP_READ) ] = { | ||
403 | [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */ | ||
404 | [ C(RESULT_MISS) ] = 0x0508, /* DTLB_MISSES.MISS_LD */ | ||
405 | }, | ||
406 | [ C(OP_WRITE) ] = { | ||
407 | [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */ | ||
408 | [ C(RESULT_MISS) ] = 0x0608, /* DTLB_MISSES.MISS_ST */ | ||
409 | }, | ||
410 | [ C(OP_PREFETCH) ] = { | ||
411 | [ C(RESULT_ACCESS) ] = 0, | ||
412 | [ C(RESULT_MISS) ] = 0, | ||
413 | }, | ||
414 | }, | ||
415 | [ C(ITLB) ] = { | ||
416 | [ C(OP_READ) ] = { | ||
417 | [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */ | ||
418 | [ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */ | ||
419 | }, | ||
420 | [ C(OP_WRITE) ] = { | ||
421 | [ C(RESULT_ACCESS) ] = -1, | ||
422 | [ C(RESULT_MISS) ] = -1, | ||
423 | }, | ||
424 | [ C(OP_PREFETCH) ] = { | ||
425 | [ C(RESULT_ACCESS) ] = -1, | ||
426 | [ C(RESULT_MISS) ] = -1, | ||
427 | }, | ||
428 | }, | ||
429 | [ C(BPU ) ] = { | ||
430 | [ C(OP_READ) ] = { | ||
431 | [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */ | ||
432 | [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */ | ||
433 | }, | ||
434 | [ C(OP_WRITE) ] = { | ||
435 | [ C(RESULT_ACCESS) ] = -1, | ||
436 | [ C(RESULT_MISS) ] = -1, | ||
437 | }, | ||
438 | [ C(OP_PREFETCH) ] = { | ||
439 | [ C(RESULT_ACCESS) ] = -1, | ||
440 | [ C(RESULT_MISS) ] = -1, | ||
441 | }, | ||
442 | }, | ||
443 | }; | ||
444 | |||
445 | static u64 intel_pmu_raw_event(u64 hw_event) | ||
446 | { | ||
447 | #define CORE_EVNTSEL_EVENT_MASK 0x000000FFULL | ||
448 | #define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL | ||
449 | #define CORE_EVNTSEL_EDGE_MASK 0x00040000ULL | ||
450 | #define CORE_EVNTSEL_INV_MASK 0x00800000ULL | ||
451 | #define CORE_EVNTSEL_REG_MASK 0xFF000000ULL | ||
452 | |||
453 | #define CORE_EVNTSEL_MASK \ | ||
454 | (INTEL_ARCH_EVTSEL_MASK | \ | ||
455 | INTEL_ARCH_UNIT_MASK | \ | ||
456 | INTEL_ARCH_EDGE_MASK | \ | ||
457 | INTEL_ARCH_INV_MASK | \ | ||
458 | INTEL_ARCH_CNT_MASK) | ||
459 | |||
460 | return hw_event & CORE_EVNTSEL_MASK; | ||
461 | } | ||
462 | |||
463 | static void intel_pmu_enable_bts(u64 config) | ||
464 | { | ||
465 | unsigned long debugctlmsr; | ||
466 | |||
467 | debugctlmsr = get_debugctlmsr(); | ||
468 | |||
469 | debugctlmsr |= X86_DEBUGCTL_TR; | ||
470 | debugctlmsr |= X86_DEBUGCTL_BTS; | ||
471 | debugctlmsr |= X86_DEBUGCTL_BTINT; | ||
472 | |||
473 | if (!(config & ARCH_PERFMON_EVENTSEL_OS)) | ||
474 | debugctlmsr |= X86_DEBUGCTL_BTS_OFF_OS; | ||
475 | |||
476 | if (!(config & ARCH_PERFMON_EVENTSEL_USR)) | ||
477 | debugctlmsr |= X86_DEBUGCTL_BTS_OFF_USR; | ||
478 | |||
479 | update_debugctlmsr(debugctlmsr); | ||
480 | } | ||
481 | |||
482 | static void intel_pmu_disable_bts(void) | ||
483 | { | ||
484 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
485 | unsigned long debugctlmsr; | ||
486 | |||
487 | if (!cpuc->ds) | ||
488 | return; | ||
489 | |||
490 | debugctlmsr = get_debugctlmsr(); | ||
491 | |||
492 | debugctlmsr &= | ||
493 | ~(X86_DEBUGCTL_TR | X86_DEBUGCTL_BTS | X86_DEBUGCTL_BTINT | | ||
494 | X86_DEBUGCTL_BTS_OFF_OS | X86_DEBUGCTL_BTS_OFF_USR); | ||
495 | |||
496 | update_debugctlmsr(debugctlmsr); | ||
497 | } | ||
498 | |||
499 | static void intel_pmu_disable_all(void) | ||
500 | { | ||
501 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
502 | |||
503 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); | ||
504 | |||
505 | if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) | ||
506 | intel_pmu_disable_bts(); | ||
507 | } | ||
508 | |||
509 | static void intel_pmu_enable_all(void) | ||
510 | { | ||
511 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
512 | |||
513 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl); | ||
514 | |||
515 | if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) { | ||
516 | struct perf_event *event = | ||
517 | cpuc->events[X86_PMC_IDX_FIXED_BTS]; | ||
518 | |||
519 | if (WARN_ON_ONCE(!event)) | ||
520 | return; | ||
521 | |||
522 | intel_pmu_enable_bts(event->hw.config); | ||
523 | } | ||
524 | } | ||
525 | |||
526 | static inline u64 intel_pmu_get_status(void) | ||
527 | { | ||
528 | u64 status; | ||
529 | |||
530 | rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); | ||
531 | |||
532 | return status; | ||
533 | } | ||
534 | |||
535 | static inline void intel_pmu_ack_status(u64 ack) | ||
536 | { | ||
537 | wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack); | ||
538 | } | ||
539 | |||
540 | static inline void | ||
541 | intel_pmu_disable_fixed(struct hw_perf_event *hwc, int __idx) | ||
542 | { | ||
543 | int idx = __idx - X86_PMC_IDX_FIXED; | ||
544 | u64 ctrl_val, mask; | ||
545 | |||
546 | mask = 0xfULL << (idx * 4); | ||
547 | |||
548 | rdmsrl(hwc->config_base, ctrl_val); | ||
549 | ctrl_val &= ~mask; | ||
550 | (void)checking_wrmsrl(hwc->config_base, ctrl_val); | ||
551 | } | ||
552 | |||
553 | static void intel_pmu_drain_bts_buffer(void) | ||
554 | { | ||
555 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
556 | struct debug_store *ds = cpuc->ds; | ||
557 | struct bts_record { | ||
558 | u64 from; | ||
559 | u64 to; | ||
560 | u64 flags; | ||
561 | }; | ||
562 | struct perf_event *event = cpuc->events[X86_PMC_IDX_FIXED_BTS]; | ||
563 | struct bts_record *at, *top; | ||
564 | struct perf_output_handle handle; | ||
565 | struct perf_event_header header; | ||
566 | struct perf_sample_data data; | ||
567 | struct pt_regs regs; | ||
568 | |||
569 | if (!event) | ||
570 | return; | ||
571 | |||
572 | if (!ds) | ||
573 | return; | ||
574 | |||
575 | at = (struct bts_record *)(unsigned long)ds->bts_buffer_base; | ||
576 | top = (struct bts_record *)(unsigned long)ds->bts_index; | ||
577 | |||
578 | if (top <= at) | ||
579 | return; | ||
580 | |||
581 | ds->bts_index = ds->bts_buffer_base; | ||
582 | |||
583 | |||
584 | data.period = event->hw.last_period; | ||
585 | data.addr = 0; | ||
586 | data.raw = NULL; | ||
587 | regs.ip = 0; | ||
588 | |||
589 | /* | ||
590 | * Prepare a generic sample, i.e. fill in the invariant fields. | ||
591 | * We will overwrite the from and to address before we output | ||
592 | * the sample. | ||
593 | */ | ||
594 | perf_prepare_sample(&header, &data, event, ®s); | ||
595 | |||
596 | if (perf_output_begin(&handle, event, | ||
597 | header.size * (top - at), 1, 1)) | ||
598 | return; | ||
599 | |||
600 | for (; at < top; at++) { | ||
601 | data.ip = at->from; | ||
602 | data.addr = at->to; | ||
603 | |||
604 | perf_output_sample(&handle, &header, &data, event); | ||
605 | } | ||
606 | |||
607 | perf_output_end(&handle); | ||
608 | |||
609 | /* There's new data available. */ | ||
610 | event->hw.interrupts++; | ||
611 | event->pending_kill = POLL_IN; | ||
612 | } | ||
613 | |||
614 | static inline void | ||
615 | intel_pmu_disable_event(struct hw_perf_event *hwc, int idx) | ||
616 | { | ||
617 | if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) { | ||
618 | intel_pmu_disable_bts(); | ||
619 | intel_pmu_drain_bts_buffer(); | ||
620 | return; | ||
621 | } | ||
622 | |||
623 | if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { | ||
624 | intel_pmu_disable_fixed(hwc, idx); | ||
625 | return; | ||
626 | } | ||
627 | |||
628 | x86_pmu_disable_event(hwc, idx); | ||
629 | } | ||
630 | |||
631 | static inline void | ||
632 | intel_pmu_enable_fixed(struct hw_perf_event *hwc, int __idx) | ||
633 | { | ||
634 | int idx = __idx - X86_PMC_IDX_FIXED; | ||
635 | u64 ctrl_val, bits, mask; | ||
636 | int err; | ||
637 | |||
638 | /* | ||
639 | * Enable IRQ generation (0x8), | ||
640 | * and enable ring-3 counting (0x2) and ring-0 counting (0x1) | ||
641 | * if requested: | ||
642 | */ | ||
643 | bits = 0x8ULL; | ||
644 | if (hwc->config & ARCH_PERFMON_EVENTSEL_USR) | ||
645 | bits |= 0x2; | ||
646 | if (hwc->config & ARCH_PERFMON_EVENTSEL_OS) | ||
647 | bits |= 0x1; | ||
648 | |||
649 | /* | ||
650 | * ANY bit is supported in v3 and up | ||
651 | */ | ||
652 | if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY) | ||
653 | bits |= 0x4; | ||
654 | |||
655 | bits <<= (idx * 4); | ||
656 | mask = 0xfULL << (idx * 4); | ||
657 | |||
658 | rdmsrl(hwc->config_base, ctrl_val); | ||
659 | ctrl_val &= ~mask; | ||
660 | ctrl_val |= bits; | ||
661 | err = checking_wrmsrl(hwc->config_base, ctrl_val); | ||
662 | } | ||
663 | |||
664 | static void intel_pmu_enable_event(struct hw_perf_event *hwc, int idx) | ||
665 | { | ||
666 | if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) { | ||
667 | if (!__get_cpu_var(cpu_hw_events).enabled) | ||
668 | return; | ||
669 | |||
670 | intel_pmu_enable_bts(hwc->config); | ||
671 | return; | ||
672 | } | ||
673 | |||
674 | if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { | ||
675 | intel_pmu_enable_fixed(hwc, idx); | ||
676 | return; | ||
677 | } | ||
678 | |||
679 | __x86_pmu_enable_event(hwc, idx); | ||
680 | } | ||
681 | |||
682 | /* | ||
683 | * Save and restart an expired event. Called by NMI contexts, | ||
684 | * so it has to be careful about preempting normal event ops: | ||
685 | */ | ||
686 | static int intel_pmu_save_and_restart(struct perf_event *event) | ||
687 | { | ||
688 | struct hw_perf_event *hwc = &event->hw; | ||
689 | int idx = hwc->idx; | ||
690 | int ret; | ||
691 | |||
692 | x86_perf_event_update(event, hwc, idx); | ||
693 | ret = x86_perf_event_set_period(event, hwc, idx); | ||
694 | |||
695 | return ret; | ||
696 | } | ||
697 | |||
698 | static void intel_pmu_reset(void) | ||
699 | { | ||
700 | struct debug_store *ds = __get_cpu_var(cpu_hw_events).ds; | ||
701 | unsigned long flags; | ||
702 | int idx; | ||
703 | |||
704 | if (!x86_pmu.num_events) | ||
705 | return; | ||
706 | |||
707 | local_irq_save(flags); | ||
708 | |||
709 | printk("clearing PMU state on CPU#%d\n", smp_processor_id()); | ||
710 | |||
711 | for (idx = 0; idx < x86_pmu.num_events; idx++) { | ||
712 | checking_wrmsrl(x86_pmu.eventsel + idx, 0ull); | ||
713 | checking_wrmsrl(x86_pmu.perfctr + idx, 0ull); | ||
714 | } | ||
715 | for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) { | ||
716 | checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull); | ||
717 | } | ||
718 | if (ds) | ||
719 | ds->bts_index = ds->bts_buffer_base; | ||
720 | |||
721 | local_irq_restore(flags); | ||
722 | } | ||
723 | |||
724 | /* | ||
725 | * This handler is triggered by the local APIC, so the APIC IRQ handling | ||
726 | * rules apply: | ||
727 | */ | ||
728 | static int intel_pmu_handle_irq(struct pt_regs *regs) | ||
729 | { | ||
730 | struct perf_sample_data data; | ||
731 | struct cpu_hw_events *cpuc; | ||
732 | int bit, loops; | ||
733 | u64 ack, status; | ||
734 | |||
735 | data.addr = 0; | ||
736 | data.raw = NULL; | ||
737 | |||
738 | cpuc = &__get_cpu_var(cpu_hw_events); | ||
739 | |||
740 | perf_disable(); | ||
741 | intel_pmu_drain_bts_buffer(); | ||
742 | status = intel_pmu_get_status(); | ||
743 | if (!status) { | ||
744 | perf_enable(); | ||
745 | return 0; | ||
746 | } | ||
747 | |||
748 | loops = 0; | ||
749 | again: | ||
750 | if (++loops > 100) { | ||
751 | WARN_ONCE(1, "perfevents: irq loop stuck!\n"); | ||
752 | perf_event_print_debug(); | ||
753 | intel_pmu_reset(); | ||
754 | perf_enable(); | ||
755 | return 1; | ||
756 | } | ||
757 | |||
758 | inc_irq_stat(apic_perf_irqs); | ||
759 | ack = status; | ||
760 | for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) { | ||
761 | struct perf_event *event = cpuc->events[bit]; | ||
762 | |||
763 | clear_bit(bit, (unsigned long *) &status); | ||
764 | if (!test_bit(bit, cpuc->active_mask)) | ||
765 | continue; | ||
766 | |||
767 | if (!intel_pmu_save_and_restart(event)) | ||
768 | continue; | ||
769 | |||
770 | data.period = event->hw.last_period; | ||
771 | |||
772 | if (perf_event_overflow(event, 1, &data, regs)) | ||
773 | intel_pmu_disable_event(&event->hw, bit); | ||
774 | } | ||
775 | |||
776 | intel_pmu_ack_status(ack); | ||
777 | |||
778 | /* | ||
779 | * Repeat if there is more work to be done: | ||
780 | */ | ||
781 | status = intel_pmu_get_status(); | ||
782 | if (status) | ||
783 | goto again; | ||
784 | |||
785 | perf_enable(); | ||
786 | |||
787 | return 1; | ||
788 | } | ||
789 | |||
790 | static struct event_constraint bts_constraint = | ||
791 | EVENT_CONSTRAINT(0, 1ULL << X86_PMC_IDX_FIXED_BTS, 0); | ||
792 | |||
793 | static struct event_constraint * | ||
794 | intel_special_constraints(struct perf_event *event) | ||
795 | { | ||
796 | unsigned int hw_event; | ||
797 | |||
798 | hw_event = event->hw.config & INTEL_ARCH_EVENT_MASK; | ||
799 | |||
800 | if (unlikely((hw_event == | ||
801 | x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS)) && | ||
802 | (event->hw.sample_period == 1))) { | ||
803 | |||
804 | return &bts_constraint; | ||
805 | } | ||
806 | return NULL; | ||
807 | } | ||
808 | |||
809 | static struct event_constraint * | ||
810 | intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) | ||
811 | { | ||
812 | struct event_constraint *c; | ||
813 | |||
814 | c = intel_special_constraints(event); | ||
815 | if (c) | ||
816 | return c; | ||
817 | |||
818 | return x86_get_event_constraints(cpuc, event); | ||
819 | } | ||
820 | |||
821 | static __initconst struct x86_pmu core_pmu = { | ||
822 | .name = "core", | ||
823 | .handle_irq = x86_pmu_handle_irq, | ||
824 | .disable_all = x86_pmu_disable_all, | ||
825 | .enable_all = x86_pmu_enable_all, | ||
826 | .enable = x86_pmu_enable_event, | ||
827 | .disable = x86_pmu_disable_event, | ||
828 | .eventsel = MSR_ARCH_PERFMON_EVENTSEL0, | ||
829 | .perfctr = MSR_ARCH_PERFMON_PERFCTR0, | ||
830 | .event_map = intel_pmu_event_map, | ||
831 | .raw_event = intel_pmu_raw_event, | ||
832 | .max_events = ARRAY_SIZE(intel_perfmon_event_map), | ||
833 | .apic = 1, | ||
834 | /* | ||
835 | * Intel PMCs cannot be accessed sanely above 32 bit width, | ||
836 | * so we install an artificial 1<<31 period regardless of | ||
837 | * the generic event period: | ||
838 | */ | ||
839 | .max_period = (1ULL << 31) - 1, | ||
840 | .get_event_constraints = intel_get_event_constraints, | ||
841 | .event_constraints = intel_core_event_constraints, | ||
842 | }; | ||
843 | |||
844 | static __initconst struct x86_pmu intel_pmu = { | ||
845 | .name = "Intel", | ||
846 | .handle_irq = intel_pmu_handle_irq, | ||
847 | .disable_all = intel_pmu_disable_all, | ||
848 | .enable_all = intel_pmu_enable_all, | ||
849 | .enable = intel_pmu_enable_event, | ||
850 | .disable = intel_pmu_disable_event, | ||
851 | .eventsel = MSR_ARCH_PERFMON_EVENTSEL0, | ||
852 | .perfctr = MSR_ARCH_PERFMON_PERFCTR0, | ||
853 | .event_map = intel_pmu_event_map, | ||
854 | .raw_event = intel_pmu_raw_event, | ||
855 | .max_events = ARRAY_SIZE(intel_perfmon_event_map), | ||
856 | .apic = 1, | ||
857 | /* | ||
858 | * Intel PMCs cannot be accessed sanely above 32 bit width, | ||
859 | * so we install an artificial 1<<31 period regardless of | ||
860 | * the generic event period: | ||
861 | */ | ||
862 | .max_period = (1ULL << 31) - 1, | ||
863 | .enable_bts = intel_pmu_enable_bts, | ||
864 | .disable_bts = intel_pmu_disable_bts, | ||
865 | .get_event_constraints = intel_get_event_constraints | ||
866 | }; | ||
867 | |||
868 | static __init int intel_pmu_init(void) | ||
869 | { | ||
870 | union cpuid10_edx edx; | ||
871 | union cpuid10_eax eax; | ||
872 | unsigned int unused; | ||
873 | unsigned int ebx; | ||
874 | int version; | ||
875 | |||
876 | if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) { | ||
877 | /* check for P6 processor family */ | ||
878 | if (boot_cpu_data.x86 == 6) { | ||
879 | return p6_pmu_init(); | ||
880 | } else { | ||
881 | return -ENODEV; | ||
882 | } | ||
883 | } | ||
884 | |||
885 | /* | ||
886 | * Check whether the Architectural PerfMon supports | ||
887 | * Branch Misses Retired hw_event or not. | ||
888 | */ | ||
889 | cpuid(10, &eax.full, &ebx, &unused, &edx.full); | ||
890 | if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED) | ||
891 | return -ENODEV; | ||
892 | |||
893 | version = eax.split.version_id; | ||
894 | if (version < 2) | ||
895 | x86_pmu = core_pmu; | ||
896 | else | ||
897 | x86_pmu = intel_pmu; | ||
898 | |||
899 | x86_pmu.version = version; | ||
900 | x86_pmu.num_events = eax.split.num_events; | ||
901 | x86_pmu.event_bits = eax.split.bit_width; | ||
902 | x86_pmu.event_mask = (1ULL << eax.split.bit_width) - 1; | ||
903 | |||
904 | /* | ||
905 | * Quirk: v2 perfmon does not report fixed-purpose events, so | ||
906 | * assume at least 3 events: | ||
907 | */ | ||
908 | if (version > 1) | ||
909 | x86_pmu.num_events_fixed = max((int)edx.split.num_events_fixed, 3); | ||
910 | |||
911 | /* | ||
912 | * Install the hw-cache-events table: | ||
913 | */ | ||
914 | switch (boot_cpu_data.x86_model) { | ||
915 | case 14: /* 65 nm core solo/duo, "Yonah" */ | ||
916 | pr_cont("Core events, "); | ||
917 | break; | ||
918 | |||
919 | case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */ | ||
920 | case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */ | ||
921 | case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */ | ||
922 | case 29: /* six-core 45 nm xeon "Dunnington" */ | ||
923 | memcpy(hw_cache_event_ids, core2_hw_cache_event_ids, | ||
924 | sizeof(hw_cache_event_ids)); | ||
925 | |||
926 | x86_pmu.event_constraints = intel_core2_event_constraints; | ||
927 | pr_cont("Core2 events, "); | ||
928 | break; | ||
929 | |||
930 | case 26: /* 45 nm nehalem, "Bloomfield" */ | ||
931 | case 30: /* 45 nm nehalem, "Lynnfield" */ | ||
932 | memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids, | ||
933 | sizeof(hw_cache_event_ids)); | ||
934 | |||
935 | x86_pmu.event_constraints = intel_nehalem_event_constraints; | ||
936 | pr_cont("Nehalem/Corei7 events, "); | ||
937 | break; | ||
938 | case 28: | ||
939 | memcpy(hw_cache_event_ids, atom_hw_cache_event_ids, | ||
940 | sizeof(hw_cache_event_ids)); | ||
941 | |||
942 | x86_pmu.event_constraints = intel_gen_event_constraints; | ||
943 | pr_cont("Atom events, "); | ||
944 | break; | ||
945 | |||
946 | case 37: /* 32 nm nehalem, "Clarkdale" */ | ||
947 | case 44: /* 32 nm nehalem, "Gulftown" */ | ||
948 | memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids, | ||
949 | sizeof(hw_cache_event_ids)); | ||
950 | |||
951 | x86_pmu.event_constraints = intel_westmere_event_constraints; | ||
952 | pr_cont("Westmere events, "); | ||
953 | break; | ||
954 | default: | ||
955 | /* | ||
956 | * default constraints for v2 and up | ||
957 | */ | ||
958 | x86_pmu.event_constraints = intel_gen_event_constraints; | ||
959 | pr_cont("generic architected perfmon, "); | ||
960 | } | ||
961 | return 0; | ||
962 | } | ||
963 | |||
964 | #else /* CONFIG_CPU_SUP_INTEL */ | ||
965 | |||
966 | static int intel_pmu_init(void) | ||
967 | { | ||
968 | return 0; | ||
969 | } | ||
970 | |||
971 | #endif /* CONFIG_CPU_SUP_INTEL */ | ||
diff --git a/arch/x86/kernel/cpu/perf_event_p6.c b/arch/x86/kernel/cpu/perf_event_p6.c new file mode 100644 index 000000000000..1ca5ba078afd --- /dev/null +++ b/arch/x86/kernel/cpu/perf_event_p6.c | |||
@@ -0,0 +1,157 @@ | |||
1 | #ifdef CONFIG_CPU_SUP_INTEL | ||
2 | |||
3 | /* | ||
4 | * Not sure about some of these | ||
5 | */ | ||
6 | static const u64 p6_perfmon_event_map[] = | ||
7 | { | ||
8 | [PERF_COUNT_HW_CPU_CYCLES] = 0x0079, | ||
9 | [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, | ||
10 | [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0f2e, | ||
11 | [PERF_COUNT_HW_CACHE_MISSES] = 0x012e, | ||
12 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4, | ||
13 | [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5, | ||
14 | [PERF_COUNT_HW_BUS_CYCLES] = 0x0062, | ||
15 | }; | ||
16 | |||
17 | static u64 p6_pmu_event_map(int hw_event) | ||
18 | { | ||
19 | return p6_perfmon_event_map[hw_event]; | ||
20 | } | ||
21 | |||
22 | /* | ||
23 | * Event setting that is specified not to count anything. | ||
24 | * We use this to effectively disable a counter. | ||
25 | * | ||
26 | * L2_RQSTS with 0 MESI unit mask. | ||
27 | */ | ||
28 | #define P6_NOP_EVENT 0x0000002EULL | ||
29 | |||
30 | static u64 p6_pmu_raw_event(u64 hw_event) | ||
31 | { | ||
32 | #define P6_EVNTSEL_EVENT_MASK 0x000000FFULL | ||
33 | #define P6_EVNTSEL_UNIT_MASK 0x0000FF00ULL | ||
34 | #define P6_EVNTSEL_EDGE_MASK 0x00040000ULL | ||
35 | #define P6_EVNTSEL_INV_MASK 0x00800000ULL | ||
36 | #define P6_EVNTSEL_REG_MASK 0xFF000000ULL | ||
37 | |||
38 | #define P6_EVNTSEL_MASK \ | ||
39 | (P6_EVNTSEL_EVENT_MASK | \ | ||
40 | P6_EVNTSEL_UNIT_MASK | \ | ||
41 | P6_EVNTSEL_EDGE_MASK | \ | ||
42 | P6_EVNTSEL_INV_MASK | \ | ||
43 | P6_EVNTSEL_REG_MASK) | ||
44 | |||
45 | return hw_event & P6_EVNTSEL_MASK; | ||
46 | } | ||
47 | |||
48 | static struct event_constraint p6_event_constraints[] = | ||
49 | { | ||
50 | INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FLOPS */ | ||
51 | INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */ | ||
52 | INTEL_EVENT_CONSTRAINT(0x11, 0x1), /* FP_ASSIST */ | ||
53 | INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */ | ||
54 | INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */ | ||
55 | INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */ | ||
56 | EVENT_CONSTRAINT_END | ||
57 | }; | ||
58 | |||
59 | static void p6_pmu_disable_all(void) | ||
60 | { | ||
61 | u64 val; | ||
62 | |||
63 | /* p6 only has one enable register */ | ||
64 | rdmsrl(MSR_P6_EVNTSEL0, val); | ||
65 | val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE; | ||
66 | wrmsrl(MSR_P6_EVNTSEL0, val); | ||
67 | } | ||
68 | |||
69 | static void p6_pmu_enable_all(void) | ||
70 | { | ||
71 | unsigned long val; | ||
72 | |||
73 | /* p6 only has one enable register */ | ||
74 | rdmsrl(MSR_P6_EVNTSEL0, val); | ||
75 | val |= ARCH_PERFMON_EVENTSEL0_ENABLE; | ||
76 | wrmsrl(MSR_P6_EVNTSEL0, val); | ||
77 | } | ||
78 | |||
79 | static inline void | ||
80 | p6_pmu_disable_event(struct hw_perf_event *hwc, int idx) | ||
81 | { | ||
82 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
83 | u64 val = P6_NOP_EVENT; | ||
84 | |||
85 | if (cpuc->enabled) | ||
86 | val |= ARCH_PERFMON_EVENTSEL0_ENABLE; | ||
87 | |||
88 | (void)checking_wrmsrl(hwc->config_base + idx, val); | ||
89 | } | ||
90 | |||
91 | static void p6_pmu_enable_event(struct hw_perf_event *hwc, int idx) | ||
92 | { | ||
93 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
94 | u64 val; | ||
95 | |||
96 | val = hwc->config; | ||
97 | if (cpuc->enabled) | ||
98 | val |= ARCH_PERFMON_EVENTSEL0_ENABLE; | ||
99 | |||
100 | (void)checking_wrmsrl(hwc->config_base + idx, val); | ||
101 | } | ||
102 | |||
103 | static __initconst struct x86_pmu p6_pmu = { | ||
104 | .name = "p6", | ||
105 | .handle_irq = x86_pmu_handle_irq, | ||
106 | .disable_all = p6_pmu_disable_all, | ||
107 | .enable_all = p6_pmu_enable_all, | ||
108 | .enable = p6_pmu_enable_event, | ||
109 | .disable = p6_pmu_disable_event, | ||
110 | .eventsel = MSR_P6_EVNTSEL0, | ||
111 | .perfctr = MSR_P6_PERFCTR0, | ||
112 | .event_map = p6_pmu_event_map, | ||
113 | .raw_event = p6_pmu_raw_event, | ||
114 | .max_events = ARRAY_SIZE(p6_perfmon_event_map), | ||
115 | .apic = 1, | ||
116 | .max_period = (1ULL << 31) - 1, | ||
117 | .version = 0, | ||
118 | .num_events = 2, | ||
119 | /* | ||
120 | * Events have 40 bits implemented. However they are designed such | ||
121 | * that bits [32-39] are sign extensions of bit 31. As such the | ||
122 | * effective width of a event for P6-like PMU is 32 bits only. | ||
123 | * | ||
124 | * See IA-32 Intel Architecture Software developer manual Vol 3B | ||
125 | */ | ||
126 | .event_bits = 32, | ||
127 | .event_mask = (1ULL << 32) - 1, | ||
128 | .get_event_constraints = x86_get_event_constraints, | ||
129 | .event_constraints = p6_event_constraints, | ||
130 | }; | ||
131 | |||
132 | static __init int p6_pmu_init(void) | ||
133 | { | ||
134 | switch (boot_cpu_data.x86_model) { | ||
135 | case 1: | ||
136 | case 3: /* Pentium Pro */ | ||
137 | case 5: | ||
138 | case 6: /* Pentium II */ | ||
139 | case 7: | ||
140 | case 8: | ||
141 | case 11: /* Pentium III */ | ||
142 | case 9: | ||
143 | case 13: | ||
144 | /* Pentium M */ | ||
145 | break; | ||
146 | default: | ||
147 | pr_cont("unsupported p6 CPU model %d ", | ||
148 | boot_cpu_data.x86_model); | ||
149 | return -ENODEV; | ||
150 | } | ||
151 | |||
152 | x86_pmu = p6_pmu; | ||
153 | |||
154 | return 0; | ||
155 | } | ||
156 | |||
157 | #endif /* CONFIG_CPU_SUP_INTEL */ | ||
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c index 898df9719afb..74f4e85a5727 100644 --- a/arch/x86/kernel/cpu/perfctr-watchdog.c +++ b/arch/x86/kernel/cpu/perfctr-watchdog.c | |||
@@ -115,17 +115,6 @@ int avail_to_resrv_perfctr_nmi_bit(unsigned int counter) | |||
115 | 115 | ||
116 | return !test_bit(counter, perfctr_nmi_owner); | 116 | return !test_bit(counter, perfctr_nmi_owner); |
117 | } | 117 | } |
118 | |||
119 | /* checks the an msr for availability */ | ||
120 | int avail_to_resrv_perfctr_nmi(unsigned int msr) | ||
121 | { | ||
122 | unsigned int counter; | ||
123 | |||
124 | counter = nmi_perfctr_msr_to_bit(msr); | ||
125 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); | ||
126 | |||
127 | return !test_bit(counter, perfctr_nmi_owner); | ||
128 | } | ||
129 | EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit); | 118 | EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit); |
130 | 119 | ||
131 | int reserve_perfctr_nmi(unsigned int msr) | 120 | int reserve_perfctr_nmi(unsigned int msr) |
diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c index ae775ca47b25..11540a189d93 100644 --- a/arch/x86/kernel/dumpstack_32.c +++ b/arch/x86/kernel/dumpstack_32.c | |||
@@ -18,11 +18,6 @@ | |||
18 | 18 | ||
19 | #include "dumpstack.h" | 19 | #include "dumpstack.h" |
20 | 20 | ||
21 | /* Just a stub for now */ | ||
22 | int x86_is_stack_id(int id, char *name) | ||
23 | { | ||
24 | return 0; | ||
25 | } | ||
26 | 21 | ||
27 | void dump_trace(struct task_struct *task, struct pt_regs *regs, | 22 | void dump_trace(struct task_struct *task, struct pt_regs *regs, |
28 | unsigned long *stack, unsigned long bp, | 23 | unsigned long *stack, unsigned long bp, |
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c index 907a90e2901c..dce99abb4496 100644 --- a/arch/x86/kernel/dumpstack_64.c +++ b/arch/x86/kernel/dumpstack_64.c | |||
@@ -33,11 +33,6 @@ static char x86_stack_ids[][8] = { | |||
33 | #endif | 33 | #endif |
34 | }; | 34 | }; |
35 | 35 | ||
36 | int x86_is_stack_id(int id, char *name) | ||
37 | { | ||
38 | return x86_stack_ids[id - 1] == name; | ||
39 | } | ||
40 | |||
41 | static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack, | 36 | static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack, |
42 | unsigned *usedp, char **idp) | 37 | unsigned *usedp, char **idp) |
43 | { | 38 | { |
diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c index bb6006e3e295..dca2802c666f 100644 --- a/arch/x86/kernel/hw_breakpoint.c +++ b/arch/x86/kernel/hw_breakpoint.c | |||
@@ -486,8 +486,6 @@ static int __kprobes hw_breakpoint_handler(struct die_args *args) | |||
486 | rcu_read_lock(); | 486 | rcu_read_lock(); |
487 | 487 | ||
488 | bp = per_cpu(bp_per_reg[i], cpu); | 488 | bp = per_cpu(bp_per_reg[i], cpu); |
489 | if (bp) | ||
490 | rc = NOTIFY_DONE; | ||
491 | /* | 489 | /* |
492 | * Reset the 'i'th TRAP bit in dr6 to denote completion of | 490 | * Reset the 'i'th TRAP bit in dr6 to denote completion of |
493 | * exception handling | 491 | * exception handling |
@@ -506,7 +504,13 @@ static int __kprobes hw_breakpoint_handler(struct die_args *args) | |||
506 | 504 | ||
507 | rcu_read_unlock(); | 505 | rcu_read_unlock(); |
508 | } | 506 | } |
509 | if (dr6 & (~DR_TRAP_BITS)) | 507 | /* |
508 | * Further processing in do_debug() is needed for a) user-space | ||
509 | * breakpoints (to generate signals) and b) when the system has | ||
510 | * taken exception due to multiple causes | ||
511 | */ | ||
512 | if ((current->thread.debugreg6 & DR_TRAP_BITS) || | ||
513 | (dr6 & (~DR_TRAP_BITS))) | ||
510 | rc = NOTIFY_DONE; | 514 | rc = NOTIFY_DONE; |
511 | 515 | ||
512 | set_debugreg(dr7, 7); | 516 | set_debugreg(dr7, 7); |
diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c index 5b8c7505b3bc..5de9f4a9c3fd 100644 --- a/arch/x86/kernel/kprobes.c +++ b/arch/x86/kernel/kprobes.c | |||
@@ -337,6 +337,9 @@ static void __kprobes arch_copy_kprobe(struct kprobe *p) | |||
337 | 337 | ||
338 | int __kprobes arch_prepare_kprobe(struct kprobe *p) | 338 | int __kprobes arch_prepare_kprobe(struct kprobe *p) |
339 | { | 339 | { |
340 | if (alternatives_text_reserved(p->addr, p->addr)) | ||
341 | return -EINVAL; | ||
342 | |||
340 | if (!can_probe((unsigned long)p->addr)) | 343 | if (!can_probe((unsigned long)p->addr)) |
341 | return -EILSEQ; | 344 | return -EILSEQ; |
342 | /* insn: must be on special executable page on x86. */ | 345 | /* insn: must be on special executable page on x86. */ |
@@ -429,7 +432,7 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, | |||
429 | static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs, | 432 | static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs, |
430 | struct kprobe_ctlblk *kcb) | 433 | struct kprobe_ctlblk *kcb) |
431 | { | 434 | { |
432 | #if !defined(CONFIG_PREEMPT) || defined(CONFIG_FREEZER) | 435 | #if !defined(CONFIG_PREEMPT) |
433 | if (p->ainsn.boostable == 1 && !p->post_handler) { | 436 | if (p->ainsn.boostable == 1 && !p->post_handler) { |
434 | /* Boost up -- we can execute copied instructions directly */ | 437 | /* Boost up -- we can execute copied instructions directly */ |
435 | reset_current_kprobe(); | 438 | reset_current_kprobe(); |
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index 0c1033d61e59..d03146f71b2f 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c | |||
@@ -140,30 +140,6 @@ static const int arg_offs_table[] = { | |||
140 | #endif | 140 | #endif |
141 | }; | 141 | }; |
142 | 142 | ||
143 | /** | ||
144 | * regs_get_argument_nth() - get Nth argument at function call | ||
145 | * @regs: pt_regs which contains registers at function entry. | ||
146 | * @n: argument number. | ||
147 | * | ||
148 | * regs_get_argument_nth() returns @n th argument of a function call. | ||
149 | * Since usually the kernel stack will be changed right after function entry, | ||
150 | * you must use this at function entry. If the @n th entry is NOT in the | ||
151 | * kernel stack or pt_regs, this returns 0. | ||
152 | */ | ||
153 | unsigned long regs_get_argument_nth(struct pt_regs *regs, unsigned int n) | ||
154 | { | ||
155 | if (n < ARRAY_SIZE(arg_offs_table)) | ||
156 | return *(unsigned long *)((char *)regs + arg_offs_table[n]); | ||
157 | else { | ||
158 | /* | ||
159 | * The typical case: arg n is on the stack. | ||
160 | * (Note: stack[0] = return address, so skip it) | ||
161 | */ | ||
162 | n -= ARRAY_SIZE(arg_offs_table); | ||
163 | return regs_get_kernel_stack_nth(regs, 1 + n); | ||
164 | } | ||
165 | } | ||
166 | |||
167 | /* | 143 | /* |
168 | * does not yet catch signals sent when the child dies. | 144 | * does not yet catch signals sent when the child dies. |
169 | * in exit.c or in signal.c. | 145 | * in exit.c or in signal.c. |
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 33399176512a..1168e4454188 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c | |||
@@ -534,6 +534,9 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code) | |||
534 | 534 | ||
535 | get_debugreg(dr6, 6); | 535 | get_debugreg(dr6, 6); |
536 | 536 | ||
537 | /* Filter out all the reserved bits which are preset to 1 */ | ||
538 | dr6 &= ~DR6_RESERVED; | ||
539 | |||
537 | /* Catch kmemcheck conditions first of all! */ | 540 | /* Catch kmemcheck conditions first of all! */ |
538 | if ((dr6 & DR_STEP) && kmemcheck_trap(regs)) | 541 | if ((dr6 & DR_STEP) && kmemcheck_trap(regs)) |
539 | return; | 542 | return; |