diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-03-13 17:39:42 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-03-13 17:39:42 -0500 |
commit | 9fdfbc2bff587f454dd95e2caa6d147c9abe39e4 (patch) | |
tree | 2feaee47cbcfb57dd0d5cf23509e22011541e717 /arch/x86/kernel | |
parent | 8cea4eb642890a1de58980e7e1617d1765ef8f7c (diff) | |
parent | dc1d628a67a8f042e711ea5accc0beedc3ef0092 (diff) |
Merge branch 'perf-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'perf-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
perf: Provide generic perf_sample_data initialization
MAINTAINERS: Add Arnaldo as tools/perf/ co-maintainer
perf trace: Don't use pager if scripting
perf trace/scripting: Remove extraneous header read
perf, ARM: Modify kuser rmb() call to compile for Thumb-2
x86/stacktrace: Don't dereference bad frame pointers
perf archive: Don't try to collect files without a build-id
perf_events, x86: Fixup fixed counter constraints
perf, x86: Restrict the ANY flag
perf, x86: rename macro in ARCH_PERFMON_EVENTSEL_ENABLE
perf, x86: add some IBS macros to perf_event.h
perf, x86: make IBS macros available in perf_event.h
hw-breakpoints: Remove stub unthrottle callback
x86/hw-breakpoints: Remove the name field
perf: Remove pointless breakpoint union
perf lock: Drop the buffers multiplexing dependency
perf lock: Fix and add misc documentally things
percpu: Add __percpu sparse annotations to hw_breakpoint
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r-- | arch/x86/kernel/cpu/perf_event.c | 39 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_intel.c | 37 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_p6.c | 8 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perfctr-watchdog.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/dumpstack_64.c | 10 | ||||
-rw-r--r-- | arch/x86/kernel/hw_breakpoint.c | 12 |
6 files changed, 62 insertions, 46 deletions
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index b1fbdeecf6c9..42aafd11e170 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
@@ -73,10 +73,10 @@ struct debug_store { | |||
73 | struct event_constraint { | 73 | struct event_constraint { |
74 | union { | 74 | union { |
75 | unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; | 75 | unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; |
76 | u64 idxmsk64[1]; | 76 | u64 idxmsk64; |
77 | }; | 77 | }; |
78 | int code; | 78 | u64 code; |
79 | int cmask; | 79 | u64 cmask; |
80 | int weight; | 80 | int weight; |
81 | }; | 81 | }; |
82 | 82 | ||
@@ -103,7 +103,7 @@ struct cpu_hw_events { | |||
103 | }; | 103 | }; |
104 | 104 | ||
105 | #define __EVENT_CONSTRAINT(c, n, m, w) {\ | 105 | #define __EVENT_CONSTRAINT(c, n, m, w) {\ |
106 | { .idxmsk64[0] = (n) }, \ | 106 | { .idxmsk64 = (n) }, \ |
107 | .code = (c), \ | 107 | .code = (c), \ |
108 | .cmask = (m), \ | 108 | .cmask = (m), \ |
109 | .weight = (w), \ | 109 | .weight = (w), \ |
@@ -116,7 +116,7 @@ struct cpu_hw_events { | |||
116 | EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVTSEL_MASK) | 116 | EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVTSEL_MASK) |
117 | 117 | ||
118 | #define FIXED_EVENT_CONSTRAINT(c, n) \ | 118 | #define FIXED_EVENT_CONSTRAINT(c, n) \ |
119 | EVENT_CONSTRAINT(c, n, INTEL_ARCH_FIXED_MASK) | 119 | EVENT_CONSTRAINT(c, (1ULL << (32+n)), INTEL_ARCH_FIXED_MASK) |
120 | 120 | ||
121 | #define EVENT_CONSTRAINT_END \ | 121 | #define EVENT_CONSTRAINT_END \ |
122 | EVENT_CONSTRAINT(0, 0, 0) | 122 | EVENT_CONSTRAINT(0, 0, 0) |
@@ -503,6 +503,9 @@ static int __hw_perf_event_init(struct perf_event *event) | |||
503 | */ | 503 | */ |
504 | if (attr->type == PERF_TYPE_RAW) { | 504 | if (attr->type == PERF_TYPE_RAW) { |
505 | hwc->config |= x86_pmu.raw_event(attr->config); | 505 | hwc->config |= x86_pmu.raw_event(attr->config); |
506 | if ((hwc->config & ARCH_PERFMON_EVENTSEL_ANY) && | ||
507 | perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN)) | ||
508 | return -EACCES; | ||
506 | return 0; | 509 | return 0; |
507 | } | 510 | } |
508 | 511 | ||
@@ -553,9 +556,9 @@ static void x86_pmu_disable_all(void) | |||
553 | if (!test_bit(idx, cpuc->active_mask)) | 556 | if (!test_bit(idx, cpuc->active_mask)) |
554 | continue; | 557 | continue; |
555 | rdmsrl(x86_pmu.eventsel + idx, val); | 558 | rdmsrl(x86_pmu.eventsel + idx, val); |
556 | if (!(val & ARCH_PERFMON_EVENTSEL0_ENABLE)) | 559 | if (!(val & ARCH_PERFMON_EVENTSEL_ENABLE)) |
557 | continue; | 560 | continue; |
558 | val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE; | 561 | val &= ~ARCH_PERFMON_EVENTSEL_ENABLE; |
559 | wrmsrl(x86_pmu.eventsel + idx, val); | 562 | wrmsrl(x86_pmu.eventsel + idx, val); |
560 | } | 563 | } |
561 | } | 564 | } |
@@ -590,7 +593,7 @@ static void x86_pmu_enable_all(void) | |||
590 | continue; | 593 | continue; |
591 | 594 | ||
592 | val = event->hw.config; | 595 | val = event->hw.config; |
593 | val |= ARCH_PERFMON_EVENTSEL0_ENABLE; | 596 | val |= ARCH_PERFMON_EVENTSEL_ENABLE; |
594 | wrmsrl(x86_pmu.eventsel + idx, val); | 597 | wrmsrl(x86_pmu.eventsel + idx, val); |
595 | } | 598 | } |
596 | } | 599 | } |
@@ -612,8 +615,8 @@ static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) | |||
612 | bitmap_zero(used_mask, X86_PMC_IDX_MAX); | 615 | bitmap_zero(used_mask, X86_PMC_IDX_MAX); |
613 | 616 | ||
614 | for (i = 0; i < n; i++) { | 617 | for (i = 0; i < n; i++) { |
615 | constraints[i] = | 618 | c = x86_pmu.get_event_constraints(cpuc, cpuc->event_list[i]); |
616 | x86_pmu.get_event_constraints(cpuc, cpuc->event_list[i]); | 619 | constraints[i] = c; |
617 | } | 620 | } |
618 | 621 | ||
619 | /* | 622 | /* |
@@ -853,7 +856,7 @@ void hw_perf_enable(void) | |||
853 | static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, int idx) | 856 | static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, int idx) |
854 | { | 857 | { |
855 | (void)checking_wrmsrl(hwc->config_base + idx, | 858 | (void)checking_wrmsrl(hwc->config_base + idx, |
856 | hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE); | 859 | hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE); |
857 | } | 860 | } |
858 | 861 | ||
859 | static inline void x86_pmu_disable_event(struct hw_perf_event *hwc, int idx) | 862 | static inline void x86_pmu_disable_event(struct hw_perf_event *hwc, int idx) |
@@ -1094,8 +1097,7 @@ static int x86_pmu_handle_irq(struct pt_regs *regs) | |||
1094 | int idx, handled = 0; | 1097 | int idx, handled = 0; |
1095 | u64 val; | 1098 | u64 val; |
1096 | 1099 | ||
1097 | data.addr = 0; | 1100 | perf_sample_data_init(&data, 0); |
1098 | data.raw = NULL; | ||
1099 | 1101 | ||
1100 | cpuc = &__get_cpu_var(cpu_hw_events); | 1102 | cpuc = &__get_cpu_var(cpu_hw_events); |
1101 | 1103 | ||
@@ -1347,6 +1349,7 @@ static void __init pmu_check_apic(void) | |||
1347 | 1349 | ||
1348 | void __init init_hw_perf_events(void) | 1350 | void __init init_hw_perf_events(void) |
1349 | { | 1351 | { |
1352 | struct event_constraint *c; | ||
1350 | int err; | 1353 | int err; |
1351 | 1354 | ||
1352 | pr_info("Performance Events: "); | 1355 | pr_info("Performance Events: "); |
@@ -1395,6 +1398,16 @@ void __init init_hw_perf_events(void) | |||
1395 | __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_events) - 1, | 1398 | __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_events) - 1, |
1396 | 0, x86_pmu.num_events); | 1399 | 0, x86_pmu.num_events); |
1397 | 1400 | ||
1401 | if (x86_pmu.event_constraints) { | ||
1402 | for_each_event_constraint(c, x86_pmu.event_constraints) { | ||
1403 | if (c->cmask != INTEL_ARCH_FIXED_MASK) | ||
1404 | continue; | ||
1405 | |||
1406 | c->idxmsk64 |= (1ULL << x86_pmu.num_events) - 1; | ||
1407 | c->weight += x86_pmu.num_events; | ||
1408 | } | ||
1409 | } | ||
1410 | |||
1398 | pr_info("... version: %d\n", x86_pmu.version); | 1411 | pr_info("... version: %d\n", x86_pmu.version); |
1399 | pr_info("... bit width: %d\n", x86_pmu.event_bits); | 1412 | pr_info("... bit width: %d\n", x86_pmu.event_bits); |
1400 | pr_info("... generic registers: %d\n", x86_pmu.num_events); | 1413 | pr_info("... generic registers: %d\n", x86_pmu.num_events); |
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 977e7544738c..44b60c852107 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c | |||
@@ -1,7 +1,7 @@ | |||
1 | #ifdef CONFIG_CPU_SUP_INTEL | 1 | #ifdef CONFIG_CPU_SUP_INTEL |
2 | 2 | ||
3 | /* | 3 | /* |
4 | * Intel PerfMon v3. Used on Core2 and later. | 4 | * Intel PerfMon, used on Core and later. |
5 | */ | 5 | */ |
6 | static const u64 intel_perfmon_event_map[] = | 6 | static const u64 intel_perfmon_event_map[] = |
7 | { | 7 | { |
@@ -27,8 +27,14 @@ static struct event_constraint intel_core_event_constraints[] = | |||
27 | 27 | ||
28 | static struct event_constraint intel_core2_event_constraints[] = | 28 | static struct event_constraint intel_core2_event_constraints[] = |
29 | { | 29 | { |
30 | FIXED_EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */ | 30 | FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ |
31 | FIXED_EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */ | 31 | FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ |
32 | /* | ||
33 | * Core2 has Fixed Counter 2 listed as CPU_CLK_UNHALTED.REF and event | ||
34 | * 0x013c as CPU_CLK_UNHALTED.BUS and specifies there is a fixed | ||
35 | * ratio between these counters. | ||
36 | */ | ||
37 | /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */ | ||
32 | INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */ | 38 | INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */ |
33 | INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */ | 39 | INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */ |
34 | INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */ | 40 | INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */ |
@@ -37,14 +43,16 @@ static struct event_constraint intel_core2_event_constraints[] = | |||
37 | INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */ | 43 | INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */ |
38 | INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */ | 44 | INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */ |
39 | INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */ | 45 | INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */ |
46 | INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */ | ||
40 | INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */ | 47 | INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */ |
41 | EVENT_CONSTRAINT_END | 48 | EVENT_CONSTRAINT_END |
42 | }; | 49 | }; |
43 | 50 | ||
44 | static struct event_constraint intel_nehalem_event_constraints[] = | 51 | static struct event_constraint intel_nehalem_event_constraints[] = |
45 | { | 52 | { |
46 | FIXED_EVENT_CONSTRAINT(0xc0, (0xf|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */ | 53 | FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ |
47 | FIXED_EVENT_CONSTRAINT(0x3c, (0xf|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */ | 54 | FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ |
55 | /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */ | ||
48 | INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */ | 56 | INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */ |
49 | INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */ | 57 | INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */ |
50 | INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */ | 58 | INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */ |
@@ -58,8 +66,9 @@ static struct event_constraint intel_nehalem_event_constraints[] = | |||
58 | 66 | ||
59 | static struct event_constraint intel_westmere_event_constraints[] = | 67 | static struct event_constraint intel_westmere_event_constraints[] = |
60 | { | 68 | { |
61 | FIXED_EVENT_CONSTRAINT(0xc0, (0xf|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */ | 69 | FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ |
62 | FIXED_EVENT_CONSTRAINT(0x3c, (0xf|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */ | 70 | FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ |
71 | /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */ | ||
63 | INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */ | 72 | INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */ |
64 | INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */ | 73 | INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */ |
65 | INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */ | 74 | INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */ |
@@ -68,8 +77,9 @@ static struct event_constraint intel_westmere_event_constraints[] = | |||
68 | 77 | ||
69 | static struct event_constraint intel_gen_event_constraints[] = | 78 | static struct event_constraint intel_gen_event_constraints[] = |
70 | { | 79 | { |
71 | FIXED_EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */ | 80 | FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ |
72 | FIXED_EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */ | 81 | FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ |
82 | /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */ | ||
73 | EVENT_CONSTRAINT_END | 83 | EVENT_CONSTRAINT_END |
74 | }; | 84 | }; |
75 | 85 | ||
@@ -580,10 +590,9 @@ static void intel_pmu_drain_bts_buffer(void) | |||
580 | 590 | ||
581 | ds->bts_index = ds->bts_buffer_base; | 591 | ds->bts_index = ds->bts_buffer_base; |
582 | 592 | ||
593 | perf_sample_data_init(&data, 0); | ||
583 | 594 | ||
584 | data.period = event->hw.last_period; | 595 | data.period = event->hw.last_period; |
585 | data.addr = 0; | ||
586 | data.raw = NULL; | ||
587 | regs.ip = 0; | 596 | regs.ip = 0; |
588 | 597 | ||
589 | /* | 598 | /* |
@@ -732,8 +741,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs) | |||
732 | int bit, loops; | 741 | int bit, loops; |
733 | u64 ack, status; | 742 | u64 ack, status; |
734 | 743 | ||
735 | data.addr = 0; | 744 | perf_sample_data_init(&data, 0); |
736 | data.raw = NULL; | ||
737 | 745 | ||
738 | cpuc = &__get_cpu_var(cpu_hw_events); | 746 | cpuc = &__get_cpu_var(cpu_hw_events); |
739 | 747 | ||
@@ -935,7 +943,7 @@ static __init int intel_pmu_init(void) | |||
935 | x86_pmu.event_constraints = intel_nehalem_event_constraints; | 943 | x86_pmu.event_constraints = intel_nehalem_event_constraints; |
936 | pr_cont("Nehalem/Corei7 events, "); | 944 | pr_cont("Nehalem/Corei7 events, "); |
937 | break; | 945 | break; |
938 | case 28: | 946 | case 28: /* Atom */ |
939 | memcpy(hw_cache_event_ids, atom_hw_cache_event_ids, | 947 | memcpy(hw_cache_event_ids, atom_hw_cache_event_ids, |
940 | sizeof(hw_cache_event_ids)); | 948 | sizeof(hw_cache_event_ids)); |
941 | 949 | ||
@@ -951,6 +959,7 @@ static __init int intel_pmu_init(void) | |||
951 | x86_pmu.event_constraints = intel_westmere_event_constraints; | 959 | x86_pmu.event_constraints = intel_westmere_event_constraints; |
952 | pr_cont("Westmere events, "); | 960 | pr_cont("Westmere events, "); |
953 | break; | 961 | break; |
962 | |||
954 | default: | 963 | default: |
955 | /* | 964 | /* |
956 | * default constraints for v2 and up | 965 | * default constraints for v2 and up |
diff --git a/arch/x86/kernel/cpu/perf_event_p6.c b/arch/x86/kernel/cpu/perf_event_p6.c index 1ca5ba078afd..a4e67b99d91c 100644 --- a/arch/x86/kernel/cpu/perf_event_p6.c +++ b/arch/x86/kernel/cpu/perf_event_p6.c | |||
@@ -62,7 +62,7 @@ static void p6_pmu_disable_all(void) | |||
62 | 62 | ||
63 | /* p6 only has one enable register */ | 63 | /* p6 only has one enable register */ |
64 | rdmsrl(MSR_P6_EVNTSEL0, val); | 64 | rdmsrl(MSR_P6_EVNTSEL0, val); |
65 | val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE; | 65 | val &= ~ARCH_PERFMON_EVENTSEL_ENABLE; |
66 | wrmsrl(MSR_P6_EVNTSEL0, val); | 66 | wrmsrl(MSR_P6_EVNTSEL0, val); |
67 | } | 67 | } |
68 | 68 | ||
@@ -72,7 +72,7 @@ static void p6_pmu_enable_all(void) | |||
72 | 72 | ||
73 | /* p6 only has one enable register */ | 73 | /* p6 only has one enable register */ |
74 | rdmsrl(MSR_P6_EVNTSEL0, val); | 74 | rdmsrl(MSR_P6_EVNTSEL0, val); |
75 | val |= ARCH_PERFMON_EVENTSEL0_ENABLE; | 75 | val |= ARCH_PERFMON_EVENTSEL_ENABLE; |
76 | wrmsrl(MSR_P6_EVNTSEL0, val); | 76 | wrmsrl(MSR_P6_EVNTSEL0, val); |
77 | } | 77 | } |
78 | 78 | ||
@@ -83,7 +83,7 @@ p6_pmu_disable_event(struct hw_perf_event *hwc, int idx) | |||
83 | u64 val = P6_NOP_EVENT; | 83 | u64 val = P6_NOP_EVENT; |
84 | 84 | ||
85 | if (cpuc->enabled) | 85 | if (cpuc->enabled) |
86 | val |= ARCH_PERFMON_EVENTSEL0_ENABLE; | 86 | val |= ARCH_PERFMON_EVENTSEL_ENABLE; |
87 | 87 | ||
88 | (void)checking_wrmsrl(hwc->config_base + idx, val); | 88 | (void)checking_wrmsrl(hwc->config_base + idx, val); |
89 | } | 89 | } |
@@ -95,7 +95,7 @@ static void p6_pmu_enable_event(struct hw_perf_event *hwc, int idx) | |||
95 | 95 | ||
96 | val = hwc->config; | 96 | val = hwc->config; |
97 | if (cpuc->enabled) | 97 | if (cpuc->enabled) |
98 | val |= ARCH_PERFMON_EVENTSEL0_ENABLE; | 98 | val |= ARCH_PERFMON_EVENTSEL_ENABLE; |
99 | 99 | ||
100 | (void)checking_wrmsrl(hwc->config_base + idx, val); | 100 | (void)checking_wrmsrl(hwc->config_base + idx, val); |
101 | } | 101 | } |
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c index 74f4e85a5727..fb329e9f8494 100644 --- a/arch/x86/kernel/cpu/perfctr-watchdog.c +++ b/arch/x86/kernel/cpu/perfctr-watchdog.c | |||
@@ -680,7 +680,7 @@ static int setup_intel_arch_watchdog(unsigned nmi_hz) | |||
680 | cpu_nmi_set_wd_enabled(); | 680 | cpu_nmi_set_wd_enabled(); |
681 | 681 | ||
682 | apic_write(APIC_LVTPC, APIC_DM_NMI); | 682 | apic_write(APIC_LVTPC, APIC_DM_NMI); |
683 | evntsel |= ARCH_PERFMON_EVENTSEL0_ENABLE; | 683 | evntsel |= ARCH_PERFMON_EVENTSEL_ENABLE; |
684 | wrmsr(evntsel_msr, evntsel, 0); | 684 | wrmsr(evntsel_msr, evntsel, 0); |
685 | intel_arch_wd_ops.checkbit = 1ULL << (eax.split.bit_width - 1); | 685 | intel_arch_wd_ops.checkbit = 1ULL << (eax.split.bit_width - 1); |
686 | return 1; | 686 | return 1; |
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c index dce99abb4496..d5e2a2ebb627 100644 --- a/arch/x86/kernel/dumpstack_64.c +++ b/arch/x86/kernel/dumpstack_64.c | |||
@@ -120,9 +120,15 @@ fixup_bp_irq_link(unsigned long bp, unsigned long *stack, | |||
120 | { | 120 | { |
121 | #ifdef CONFIG_FRAME_POINTER | 121 | #ifdef CONFIG_FRAME_POINTER |
122 | struct stack_frame *frame = (struct stack_frame *)bp; | 122 | struct stack_frame *frame = (struct stack_frame *)bp; |
123 | unsigned long next; | ||
123 | 124 | ||
124 | if (!in_irq_stack(stack, irq_stack, irq_stack_end)) | 125 | if (!in_irq_stack(stack, irq_stack, irq_stack_end)) { |
125 | return (unsigned long)frame->next_frame; | 126 | if (!probe_kernel_address(&frame->next_frame, next)) |
127 | return next; | ||
128 | else | ||
129 | WARN_ONCE(1, "Perf: bad frame pointer = %p in " | ||
130 | "callchain\n", &frame->next_frame); | ||
131 | } | ||
126 | #endif | 132 | #endif |
127 | return bp; | 133 | return bp; |
128 | } | 134 | } |
diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c index dca2802c666f..d6cc065f519f 100644 --- a/arch/x86/kernel/hw_breakpoint.c +++ b/arch/x86/kernel/hw_breakpoint.c | |||
@@ -344,13 +344,6 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp, | |||
344 | } | 344 | } |
345 | 345 | ||
346 | /* | 346 | /* |
347 | * For kernel-addresses, either the address or symbol name can be | ||
348 | * specified. | ||
349 | */ | ||
350 | if (info->name) | ||
351 | info->address = (unsigned long) | ||
352 | kallsyms_lookup_name(info->name); | ||
353 | /* | ||
354 | * Check that the low-order bits of the address are appropriate | 347 | * Check that the low-order bits of the address are appropriate |
355 | * for the alignment implied by len. | 348 | * for the alignment implied by len. |
356 | */ | 349 | */ |
@@ -535,8 +528,3 @@ void hw_breakpoint_pmu_read(struct perf_event *bp) | |||
535 | { | 528 | { |
536 | /* TODO */ | 529 | /* TODO */ |
537 | } | 530 | } |
538 | |||
539 | void hw_breakpoint_pmu_unthrottle(struct perf_event *bp) | ||
540 | { | ||
541 | /* TODO */ | ||
542 | } | ||