aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-03-13 17:39:42 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2010-03-13 17:39:42 -0500
commit9fdfbc2bff587f454dd95e2caa6d147c9abe39e4 (patch)
tree2feaee47cbcfb57dd0d5cf23509e22011541e717 /arch
parent8cea4eb642890a1de58980e7e1617d1765ef8f7c (diff)
parentdc1d628a67a8f042e711ea5accc0beedc3ef0092 (diff)
Merge branch 'perf-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'perf-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: perf: Provide generic perf_sample_data initialization MAINTAINERS: Add Arnaldo as tools/perf/ co-maintainer perf trace: Don't use pager if scripting perf trace/scripting: Remove extraneous header read perf, ARM: Modify kuser rmb() call to compile for Thumb-2 x86/stacktrace: Don't dereference bad frame pointers perf archive: Don't try to collect files without a build-id perf_events, x86: Fixup fixed counter constraints perf, x86: Restrict the ANY flag perf, x86: rename macro in ARCH_PERFMON_EVENTSEL_ENABLE perf, x86: add some IBS macros to perf_event.h perf, x86: make IBS macros available in perf_event.h hw-breakpoints: Remove stub unthrottle callback x86/hw-breakpoints: Remove the name field perf: Remove pointless breakpoint union perf lock: Drop the buffers multiplexing dependency perf lock: Fix and add misc documentally things percpu: Add __percpu sparse annotations to hw_breakpoint
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/kernel/perf_event.c4
-rw-r--r--arch/powerpc/kernel/perf_event.c8
-rw-r--r--arch/sparc/kernel/perf_event.c2
-rw-r--r--arch/x86/include/asm/hw_breakpoint.h1
-rw-r--r--arch/x86/include/asm/perf_event.h16
-rw-r--r--arch/x86/kernel/cpu/perf_event.c39
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c37
-rw-r--r--arch/x86/kernel/cpu/perf_event_p6.c8
-rw-r--r--arch/x86/kernel/cpu/perfctr-watchdog.c2
-rw-r--r--arch/x86/kernel/dumpstack_64.c10
-rw-r--r--arch/x86/kernel/hw_breakpoint.c12
-rw-r--r--arch/x86/oprofile/op_model_amd.c23
-rw-r--r--arch/x86/oprofile/op_model_ppro.c6
13 files changed, 92 insertions, 76 deletions
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index c54ceb3d1f97..3875d99cc40f 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -965,7 +965,7 @@ armv6pmu_handle_irq(int irq_num,
965 */ 965 */
966 armv6_pmcr_write(pmcr); 966 armv6_pmcr_write(pmcr);
967 967
968 data.addr = 0; 968 perf_sample_data_init(&data, 0);
969 969
970 cpuc = &__get_cpu_var(cpu_hw_events); 970 cpuc = &__get_cpu_var(cpu_hw_events);
971 for (idx = 0; idx <= armpmu->num_events; ++idx) { 971 for (idx = 0; idx <= armpmu->num_events; ++idx) {
@@ -1945,7 +1945,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
1945 */ 1945 */
1946 regs = get_irq_regs(); 1946 regs = get_irq_regs();
1947 1947
1948 data.addr = 0; 1948 perf_sample_data_init(&data, 0);
1949 1949
1950 cpuc = &__get_cpu_var(cpu_hw_events); 1950 cpuc = &__get_cpu_var(cpu_hw_events);
1951 for (idx = 0; idx <= armpmu->num_events; ++idx) { 1951 for (idx = 0; idx <= armpmu->num_events; ++idx) {
diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c
index b6cf8f1f4d35..5120bd44f69a 100644
--- a/arch/powerpc/kernel/perf_event.c
+++ b/arch/powerpc/kernel/perf_event.c
@@ -1164,10 +1164,10 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
1164 * Finally record data if requested. 1164 * Finally record data if requested.
1165 */ 1165 */
1166 if (record) { 1166 if (record) {
1167 struct perf_sample_data data = { 1167 struct perf_sample_data data;
1168 .addr = ~0ULL, 1168
1169 .period = event->hw.last_period, 1169 perf_sample_data_init(&data, ~0ULL);
1170 }; 1170 data.period = event->hw.last_period;
1171 1171
1172 if (event->attr.sample_type & PERF_SAMPLE_ADDR) 1172 if (event->attr.sample_type & PERF_SAMPLE_ADDR)
1173 perf_get_data_addr(regs, &data.addr); 1173 perf_get_data_addr(regs, &data.addr);
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index b867ab3353b4..68cb9b42088f 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -1189,7 +1189,7 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
1189 1189
1190 regs = args->regs; 1190 regs = args->regs;
1191 1191
1192 data.addr = 0; 1192 perf_sample_data_init(&data, 0);
1193 1193
1194 cpuc = &__get_cpu_var(cpu_hw_events); 1194 cpuc = &__get_cpu_var(cpu_hw_events);
1195 1195
diff --git a/arch/x86/include/asm/hw_breakpoint.h b/arch/x86/include/asm/hw_breakpoint.h
index 0675a7c4c20e..2a1bd8f4f23a 100644
--- a/arch/x86/include/asm/hw_breakpoint.h
+++ b/arch/x86/include/asm/hw_breakpoint.h
@@ -10,7 +10,6 @@
10 * (display/resolving) 10 * (display/resolving)
11 */ 11 */
12struct arch_hw_breakpoint { 12struct arch_hw_breakpoint {
13 char *name; /* Contains name of the symbol to set bkpt */
14 unsigned long address; 13 unsigned long address;
15 u8 len; 14 u8 len;
16 u8 type; 15 u8 type;
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index befd172c82ad..db6109a885a7 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -18,7 +18,7 @@
18#define MSR_ARCH_PERFMON_EVENTSEL0 0x186 18#define MSR_ARCH_PERFMON_EVENTSEL0 0x186
19#define MSR_ARCH_PERFMON_EVENTSEL1 0x187 19#define MSR_ARCH_PERFMON_EVENTSEL1 0x187
20 20
21#define ARCH_PERFMON_EVENTSEL0_ENABLE (1 << 22) 21#define ARCH_PERFMON_EVENTSEL_ENABLE (1 << 22)
22#define ARCH_PERFMON_EVENTSEL_ANY (1 << 21) 22#define ARCH_PERFMON_EVENTSEL_ANY (1 << 21)
23#define ARCH_PERFMON_EVENTSEL_INT (1 << 20) 23#define ARCH_PERFMON_EVENTSEL_INT (1 << 20)
24#define ARCH_PERFMON_EVENTSEL_OS (1 << 17) 24#define ARCH_PERFMON_EVENTSEL_OS (1 << 17)
@@ -50,7 +50,7 @@
50 INTEL_ARCH_INV_MASK| \ 50 INTEL_ARCH_INV_MASK| \
51 INTEL_ARCH_EDGE_MASK|\ 51 INTEL_ARCH_EDGE_MASK|\
52 INTEL_ARCH_UNIT_MASK|\ 52 INTEL_ARCH_UNIT_MASK|\
53 INTEL_ARCH_EVTSEL_MASK) 53 INTEL_ARCH_EVENT_MASK)
54 54
55#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c 55#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c
56#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8) 56#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8)
@@ -117,6 +117,18 @@ union cpuid10_edx {
117 */ 117 */
118#define X86_PMC_IDX_FIXED_BTS (X86_PMC_IDX_FIXED + 16) 118#define X86_PMC_IDX_FIXED_BTS (X86_PMC_IDX_FIXED + 16)
119 119
120/* IbsFetchCtl bits/masks */
121#define IBS_FETCH_RAND_EN (1ULL<<57)
122#define IBS_FETCH_VAL (1ULL<<49)
123#define IBS_FETCH_ENABLE (1ULL<<48)
124#define IBS_FETCH_CNT 0xFFFF0000ULL
125#define IBS_FETCH_MAX_CNT 0x0000FFFFULL
126
127/* IbsOpCtl bits */
128#define IBS_OP_CNT_CTL (1ULL<<19)
129#define IBS_OP_VAL (1ULL<<18)
130#define IBS_OP_ENABLE (1ULL<<17)
131#define IBS_OP_MAX_CNT 0x0000FFFFULL
120 132
121#ifdef CONFIG_PERF_EVENTS 133#ifdef CONFIG_PERF_EVENTS
122extern void init_hw_perf_events(void); 134extern void init_hw_perf_events(void);
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index b1fbdeecf6c9..42aafd11e170 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -73,10 +73,10 @@ struct debug_store {
73struct event_constraint { 73struct event_constraint {
74 union { 74 union {
75 unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; 75 unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
76 u64 idxmsk64[1]; 76 u64 idxmsk64;
77 }; 77 };
78 int code; 78 u64 code;
79 int cmask; 79 u64 cmask;
80 int weight; 80 int weight;
81}; 81};
82 82
@@ -103,7 +103,7 @@ struct cpu_hw_events {
103}; 103};
104 104
105#define __EVENT_CONSTRAINT(c, n, m, w) {\ 105#define __EVENT_CONSTRAINT(c, n, m, w) {\
106 { .idxmsk64[0] = (n) }, \ 106 { .idxmsk64 = (n) }, \
107 .code = (c), \ 107 .code = (c), \
108 .cmask = (m), \ 108 .cmask = (m), \
109 .weight = (w), \ 109 .weight = (w), \
@@ -116,7 +116,7 @@ struct cpu_hw_events {
116 EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVTSEL_MASK) 116 EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVTSEL_MASK)
117 117
118#define FIXED_EVENT_CONSTRAINT(c, n) \ 118#define FIXED_EVENT_CONSTRAINT(c, n) \
119 EVENT_CONSTRAINT(c, n, INTEL_ARCH_FIXED_MASK) 119 EVENT_CONSTRAINT(c, (1ULL << (32+n)), INTEL_ARCH_FIXED_MASK)
120 120
121#define EVENT_CONSTRAINT_END \ 121#define EVENT_CONSTRAINT_END \
122 EVENT_CONSTRAINT(0, 0, 0) 122 EVENT_CONSTRAINT(0, 0, 0)
@@ -503,6 +503,9 @@ static int __hw_perf_event_init(struct perf_event *event)
503 */ 503 */
504 if (attr->type == PERF_TYPE_RAW) { 504 if (attr->type == PERF_TYPE_RAW) {
505 hwc->config |= x86_pmu.raw_event(attr->config); 505 hwc->config |= x86_pmu.raw_event(attr->config);
506 if ((hwc->config & ARCH_PERFMON_EVENTSEL_ANY) &&
507 perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
508 return -EACCES;
506 return 0; 509 return 0;
507 } 510 }
508 511
@@ -553,9 +556,9 @@ static void x86_pmu_disable_all(void)
553 if (!test_bit(idx, cpuc->active_mask)) 556 if (!test_bit(idx, cpuc->active_mask))
554 continue; 557 continue;
555 rdmsrl(x86_pmu.eventsel + idx, val); 558 rdmsrl(x86_pmu.eventsel + idx, val);
556 if (!(val & ARCH_PERFMON_EVENTSEL0_ENABLE)) 559 if (!(val & ARCH_PERFMON_EVENTSEL_ENABLE))
557 continue; 560 continue;
558 val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE; 561 val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
559 wrmsrl(x86_pmu.eventsel + idx, val); 562 wrmsrl(x86_pmu.eventsel + idx, val);
560 } 563 }
561} 564}
@@ -590,7 +593,7 @@ static void x86_pmu_enable_all(void)
590 continue; 593 continue;
591 594
592 val = event->hw.config; 595 val = event->hw.config;
593 val |= ARCH_PERFMON_EVENTSEL0_ENABLE; 596 val |= ARCH_PERFMON_EVENTSEL_ENABLE;
594 wrmsrl(x86_pmu.eventsel + idx, val); 597 wrmsrl(x86_pmu.eventsel + idx, val);
595 } 598 }
596} 599}
@@ -612,8 +615,8 @@ static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
612 bitmap_zero(used_mask, X86_PMC_IDX_MAX); 615 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
613 616
614 for (i = 0; i < n; i++) { 617 for (i = 0; i < n; i++) {
615 constraints[i] = 618 c = x86_pmu.get_event_constraints(cpuc, cpuc->event_list[i]);
616 x86_pmu.get_event_constraints(cpuc, cpuc->event_list[i]); 619 constraints[i] = c;
617 } 620 }
618 621
619 /* 622 /*
@@ -853,7 +856,7 @@ void hw_perf_enable(void)
853static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, int idx) 856static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, int idx)
854{ 857{
855 (void)checking_wrmsrl(hwc->config_base + idx, 858 (void)checking_wrmsrl(hwc->config_base + idx,
856 hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE); 859 hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE);
857} 860}
858 861
859static inline void x86_pmu_disable_event(struct hw_perf_event *hwc, int idx) 862static inline void x86_pmu_disable_event(struct hw_perf_event *hwc, int idx)
@@ -1094,8 +1097,7 @@ static int x86_pmu_handle_irq(struct pt_regs *regs)
1094 int idx, handled = 0; 1097 int idx, handled = 0;
1095 u64 val; 1098 u64 val;
1096 1099
1097 data.addr = 0; 1100 perf_sample_data_init(&data, 0);
1098 data.raw = NULL;
1099 1101
1100 cpuc = &__get_cpu_var(cpu_hw_events); 1102 cpuc = &__get_cpu_var(cpu_hw_events);
1101 1103
@@ -1347,6 +1349,7 @@ static void __init pmu_check_apic(void)
1347 1349
1348void __init init_hw_perf_events(void) 1350void __init init_hw_perf_events(void)
1349{ 1351{
1352 struct event_constraint *c;
1350 int err; 1353 int err;
1351 1354
1352 pr_info("Performance Events: "); 1355 pr_info("Performance Events: ");
@@ -1395,6 +1398,16 @@ void __init init_hw_perf_events(void)
1395 __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_events) - 1, 1398 __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_events) - 1,
1396 0, x86_pmu.num_events); 1399 0, x86_pmu.num_events);
1397 1400
1401 if (x86_pmu.event_constraints) {
1402 for_each_event_constraint(c, x86_pmu.event_constraints) {
1403 if (c->cmask != INTEL_ARCH_FIXED_MASK)
1404 continue;
1405
1406 c->idxmsk64 |= (1ULL << x86_pmu.num_events) - 1;
1407 c->weight += x86_pmu.num_events;
1408 }
1409 }
1410
1398 pr_info("... version: %d\n", x86_pmu.version); 1411 pr_info("... version: %d\n", x86_pmu.version);
1399 pr_info("... bit width: %d\n", x86_pmu.event_bits); 1412 pr_info("... bit width: %d\n", x86_pmu.event_bits);
1400 pr_info("... generic registers: %d\n", x86_pmu.num_events); 1413 pr_info("... generic registers: %d\n", x86_pmu.num_events);
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 977e7544738c..44b60c852107 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -1,7 +1,7 @@
1#ifdef CONFIG_CPU_SUP_INTEL 1#ifdef CONFIG_CPU_SUP_INTEL
2 2
3/* 3/*
4 * Intel PerfMon v3. Used on Core2 and later. 4 * Intel PerfMon, used on Core and later.
5 */ 5 */
6static const u64 intel_perfmon_event_map[] = 6static const u64 intel_perfmon_event_map[] =
7{ 7{
@@ -27,8 +27,14 @@ static struct event_constraint intel_core_event_constraints[] =
27 27
28static struct event_constraint intel_core2_event_constraints[] = 28static struct event_constraint intel_core2_event_constraints[] =
29{ 29{
30 FIXED_EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */ 30 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
31 FIXED_EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */ 31 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
32 /*
33 * Core2 has Fixed Counter 2 listed as CPU_CLK_UNHALTED.REF and event
34 * 0x013c as CPU_CLK_UNHALTED.BUS and specifies there is a fixed
35 * ratio between these counters.
36 */
37 /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
32 INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */ 38 INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
33 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */ 39 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
34 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */ 40 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
@@ -37,14 +43,16 @@ static struct event_constraint intel_core2_event_constraints[] =
37 INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */ 43 INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
38 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */ 44 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
39 INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */ 45 INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
46 INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */
40 INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */ 47 INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
41 EVENT_CONSTRAINT_END 48 EVENT_CONSTRAINT_END
42}; 49};
43 50
44static struct event_constraint intel_nehalem_event_constraints[] = 51static struct event_constraint intel_nehalem_event_constraints[] =
45{ 52{
46 FIXED_EVENT_CONSTRAINT(0xc0, (0xf|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */ 53 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
47 FIXED_EVENT_CONSTRAINT(0x3c, (0xf|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */ 54 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
55 /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
48 INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */ 56 INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
49 INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */ 57 INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
50 INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */ 58 INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
@@ -58,8 +66,9 @@ static struct event_constraint intel_nehalem_event_constraints[] =
58 66
59static struct event_constraint intel_westmere_event_constraints[] = 67static struct event_constraint intel_westmere_event_constraints[] =
60{ 68{
61 FIXED_EVENT_CONSTRAINT(0xc0, (0xf|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */ 69 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
62 FIXED_EVENT_CONSTRAINT(0x3c, (0xf|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */ 70 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
71 /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
63 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */ 72 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
64 INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */ 73 INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
65 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */ 74 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
@@ -68,8 +77,9 @@ static struct event_constraint intel_westmere_event_constraints[] =
68 77
69static struct event_constraint intel_gen_event_constraints[] = 78static struct event_constraint intel_gen_event_constraints[] =
70{ 79{
71 FIXED_EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */ 80 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
72 FIXED_EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */ 81 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
82 /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
73 EVENT_CONSTRAINT_END 83 EVENT_CONSTRAINT_END
74}; 84};
75 85
@@ -580,10 +590,9 @@ static void intel_pmu_drain_bts_buffer(void)
580 590
581 ds->bts_index = ds->bts_buffer_base; 591 ds->bts_index = ds->bts_buffer_base;
582 592
593 perf_sample_data_init(&data, 0);
583 594
584 data.period = event->hw.last_period; 595 data.period = event->hw.last_period;
585 data.addr = 0;
586 data.raw = NULL;
587 regs.ip = 0; 596 regs.ip = 0;
588 597
589 /* 598 /*
@@ -732,8 +741,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
732 int bit, loops; 741 int bit, loops;
733 u64 ack, status; 742 u64 ack, status;
734 743
735 data.addr = 0; 744 perf_sample_data_init(&data, 0);
736 data.raw = NULL;
737 745
738 cpuc = &__get_cpu_var(cpu_hw_events); 746 cpuc = &__get_cpu_var(cpu_hw_events);
739 747
@@ -935,7 +943,7 @@ static __init int intel_pmu_init(void)
935 x86_pmu.event_constraints = intel_nehalem_event_constraints; 943 x86_pmu.event_constraints = intel_nehalem_event_constraints;
936 pr_cont("Nehalem/Corei7 events, "); 944 pr_cont("Nehalem/Corei7 events, ");
937 break; 945 break;
938 case 28: 946 case 28: /* Atom */
939 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids, 947 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
940 sizeof(hw_cache_event_ids)); 948 sizeof(hw_cache_event_ids));
941 949
@@ -951,6 +959,7 @@ static __init int intel_pmu_init(void)
951 x86_pmu.event_constraints = intel_westmere_event_constraints; 959 x86_pmu.event_constraints = intel_westmere_event_constraints;
952 pr_cont("Westmere events, "); 960 pr_cont("Westmere events, ");
953 break; 961 break;
962
954 default: 963 default:
955 /* 964 /*
956 * default constraints for v2 and up 965 * default constraints for v2 and up
diff --git a/arch/x86/kernel/cpu/perf_event_p6.c b/arch/x86/kernel/cpu/perf_event_p6.c
index 1ca5ba078afd..a4e67b99d91c 100644
--- a/arch/x86/kernel/cpu/perf_event_p6.c
+++ b/arch/x86/kernel/cpu/perf_event_p6.c
@@ -62,7 +62,7 @@ static void p6_pmu_disable_all(void)
62 62
63 /* p6 only has one enable register */ 63 /* p6 only has one enable register */
64 rdmsrl(MSR_P6_EVNTSEL0, val); 64 rdmsrl(MSR_P6_EVNTSEL0, val);
65 val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE; 65 val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
66 wrmsrl(MSR_P6_EVNTSEL0, val); 66 wrmsrl(MSR_P6_EVNTSEL0, val);
67} 67}
68 68
@@ -72,7 +72,7 @@ static void p6_pmu_enable_all(void)
72 72
73 /* p6 only has one enable register */ 73 /* p6 only has one enable register */
74 rdmsrl(MSR_P6_EVNTSEL0, val); 74 rdmsrl(MSR_P6_EVNTSEL0, val);
75 val |= ARCH_PERFMON_EVENTSEL0_ENABLE; 75 val |= ARCH_PERFMON_EVENTSEL_ENABLE;
76 wrmsrl(MSR_P6_EVNTSEL0, val); 76 wrmsrl(MSR_P6_EVNTSEL0, val);
77} 77}
78 78
@@ -83,7 +83,7 @@ p6_pmu_disable_event(struct hw_perf_event *hwc, int idx)
83 u64 val = P6_NOP_EVENT; 83 u64 val = P6_NOP_EVENT;
84 84
85 if (cpuc->enabled) 85 if (cpuc->enabled)
86 val |= ARCH_PERFMON_EVENTSEL0_ENABLE; 86 val |= ARCH_PERFMON_EVENTSEL_ENABLE;
87 87
88 (void)checking_wrmsrl(hwc->config_base + idx, val); 88 (void)checking_wrmsrl(hwc->config_base + idx, val);
89} 89}
@@ -95,7 +95,7 @@ static void p6_pmu_enable_event(struct hw_perf_event *hwc, int idx)
95 95
96 val = hwc->config; 96 val = hwc->config;
97 if (cpuc->enabled) 97 if (cpuc->enabled)
98 val |= ARCH_PERFMON_EVENTSEL0_ENABLE; 98 val |= ARCH_PERFMON_EVENTSEL_ENABLE;
99 99
100 (void)checking_wrmsrl(hwc->config_base + idx, val); 100 (void)checking_wrmsrl(hwc->config_base + idx, val);
101} 101}
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
index 74f4e85a5727..fb329e9f8494 100644
--- a/arch/x86/kernel/cpu/perfctr-watchdog.c
+++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
@@ -680,7 +680,7 @@ static int setup_intel_arch_watchdog(unsigned nmi_hz)
680 cpu_nmi_set_wd_enabled(); 680 cpu_nmi_set_wd_enabled();
681 681
682 apic_write(APIC_LVTPC, APIC_DM_NMI); 682 apic_write(APIC_LVTPC, APIC_DM_NMI);
683 evntsel |= ARCH_PERFMON_EVENTSEL0_ENABLE; 683 evntsel |= ARCH_PERFMON_EVENTSEL_ENABLE;
684 wrmsr(evntsel_msr, evntsel, 0); 684 wrmsr(evntsel_msr, evntsel, 0);
685 intel_arch_wd_ops.checkbit = 1ULL << (eax.split.bit_width - 1); 685 intel_arch_wd_ops.checkbit = 1ULL << (eax.split.bit_width - 1);
686 return 1; 686 return 1;
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
index dce99abb4496..d5e2a2ebb627 100644
--- a/arch/x86/kernel/dumpstack_64.c
+++ b/arch/x86/kernel/dumpstack_64.c
@@ -120,9 +120,15 @@ fixup_bp_irq_link(unsigned long bp, unsigned long *stack,
120{ 120{
121#ifdef CONFIG_FRAME_POINTER 121#ifdef CONFIG_FRAME_POINTER
122 struct stack_frame *frame = (struct stack_frame *)bp; 122 struct stack_frame *frame = (struct stack_frame *)bp;
123 unsigned long next;
123 124
124 if (!in_irq_stack(stack, irq_stack, irq_stack_end)) 125 if (!in_irq_stack(stack, irq_stack, irq_stack_end)) {
125 return (unsigned long)frame->next_frame; 126 if (!probe_kernel_address(&frame->next_frame, next))
127 return next;
128 else
129 WARN_ONCE(1, "Perf: bad frame pointer = %p in "
130 "callchain\n", &frame->next_frame);
131 }
126#endif 132#endif
127 return bp; 133 return bp;
128} 134}
diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c
index dca2802c666f..d6cc065f519f 100644
--- a/arch/x86/kernel/hw_breakpoint.c
+++ b/arch/x86/kernel/hw_breakpoint.c
@@ -344,13 +344,6 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp,
344 } 344 }
345 345
346 /* 346 /*
347 * For kernel-addresses, either the address or symbol name can be
348 * specified.
349 */
350 if (info->name)
351 info->address = (unsigned long)
352 kallsyms_lookup_name(info->name);
353 /*
354 * Check that the low-order bits of the address are appropriate 347 * Check that the low-order bits of the address are appropriate
355 * for the alignment implied by len. 348 * for the alignment implied by len.
356 */ 349 */
@@ -535,8 +528,3 @@ void hw_breakpoint_pmu_read(struct perf_event *bp)
535{ 528{
536 /* TODO */ 529 /* TODO */
537} 530}
538
539void hw_breakpoint_pmu_unthrottle(struct perf_event *bp)
540{
541 /* TODO */
542}
diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
index 6a58256dce9f..090cbbec7dbd 100644
--- a/arch/x86/oprofile/op_model_amd.c
+++ b/arch/x86/oprofile/op_model_amd.c
@@ -46,17 +46,6 @@
46 46
47static unsigned long reset_value[NUM_VIRT_COUNTERS]; 47static unsigned long reset_value[NUM_VIRT_COUNTERS];
48 48
49/* IbsFetchCtl bits/masks */
50#define IBS_FETCH_RAND_EN (1ULL<<57)
51#define IBS_FETCH_VAL (1ULL<<49)
52#define IBS_FETCH_ENABLE (1ULL<<48)
53#define IBS_FETCH_CNT_MASK 0xFFFF0000ULL
54
55/* IbsOpCtl bits */
56#define IBS_OP_CNT_CTL (1ULL<<19)
57#define IBS_OP_VAL (1ULL<<18)
58#define IBS_OP_ENABLE (1ULL<<17)
59
60#define IBS_FETCH_SIZE 6 49#define IBS_FETCH_SIZE 6
61#define IBS_OP_SIZE 12 50#define IBS_OP_SIZE 12
62 51
@@ -182,7 +171,7 @@ static void op_amd_setup_ctrs(struct op_x86_model_spec const *model,
182 continue; 171 continue;
183 } 172 }
184 rdmsrl(msrs->controls[i].addr, val); 173 rdmsrl(msrs->controls[i].addr, val);
185 if (val & ARCH_PERFMON_EVENTSEL0_ENABLE) 174 if (val & ARCH_PERFMON_EVENTSEL_ENABLE)
186 op_x86_warn_in_use(i); 175 op_x86_warn_in_use(i);
187 val &= model->reserved; 176 val &= model->reserved;
188 wrmsrl(msrs->controls[i].addr, val); 177 wrmsrl(msrs->controls[i].addr, val);
@@ -290,7 +279,7 @@ op_amd_handle_ibs(struct pt_regs * const regs,
290 oprofile_write_commit(&entry); 279 oprofile_write_commit(&entry);
291 280
292 /* reenable the IRQ */ 281 /* reenable the IRQ */
293 ctl &= ~(IBS_FETCH_VAL | IBS_FETCH_CNT_MASK); 282 ctl &= ~(IBS_FETCH_VAL | IBS_FETCH_CNT);
294 ctl |= IBS_FETCH_ENABLE; 283 ctl |= IBS_FETCH_ENABLE;
295 wrmsrl(MSR_AMD64_IBSFETCHCTL, ctl); 284 wrmsrl(MSR_AMD64_IBSFETCHCTL, ctl);
296 } 285 }
@@ -330,7 +319,7 @@ static inline void op_amd_start_ibs(void)
330 return; 319 return;
331 320
332 if (ibs_config.fetch_enabled) { 321 if (ibs_config.fetch_enabled) {
333 val = (ibs_config.max_cnt_fetch >> 4) & 0xFFFF; 322 val = (ibs_config.max_cnt_fetch >> 4) & IBS_FETCH_MAX_CNT;
334 val |= ibs_config.rand_en ? IBS_FETCH_RAND_EN : 0; 323 val |= ibs_config.rand_en ? IBS_FETCH_RAND_EN : 0;
335 val |= IBS_FETCH_ENABLE; 324 val |= IBS_FETCH_ENABLE;
336 wrmsrl(MSR_AMD64_IBSFETCHCTL, val); 325 wrmsrl(MSR_AMD64_IBSFETCHCTL, val);
@@ -352,7 +341,7 @@ static inline void op_amd_start_ibs(void)
352 * avoid underflows. 341 * avoid underflows.
353 */ 342 */
354 ibs_op_ctl = min(ibs_op_ctl + IBS_RANDOM_MAXCNT_OFFSET, 343 ibs_op_ctl = min(ibs_op_ctl + IBS_RANDOM_MAXCNT_OFFSET,
355 0xFFFFULL); 344 IBS_OP_MAX_CNT);
356 } 345 }
357 if (ibs_caps & IBS_CAPS_OPCNT && ibs_config.dispatched_ops) 346 if (ibs_caps & IBS_CAPS_OPCNT && ibs_config.dispatched_ops)
358 ibs_op_ctl |= IBS_OP_CNT_CTL; 347 ibs_op_ctl |= IBS_OP_CNT_CTL;
@@ -409,7 +398,7 @@ static void op_amd_start(struct op_msrs const * const msrs)
409 if (!reset_value[op_x86_phys_to_virt(i)]) 398 if (!reset_value[op_x86_phys_to_virt(i)])
410 continue; 399 continue;
411 rdmsrl(msrs->controls[i].addr, val); 400 rdmsrl(msrs->controls[i].addr, val);
412 val |= ARCH_PERFMON_EVENTSEL0_ENABLE; 401 val |= ARCH_PERFMON_EVENTSEL_ENABLE;
413 wrmsrl(msrs->controls[i].addr, val); 402 wrmsrl(msrs->controls[i].addr, val);
414 } 403 }
415 404
@@ -429,7 +418,7 @@ static void op_amd_stop(struct op_msrs const * const msrs)
429 if (!reset_value[op_x86_phys_to_virt(i)]) 418 if (!reset_value[op_x86_phys_to_virt(i)])
430 continue; 419 continue;
431 rdmsrl(msrs->controls[i].addr, val); 420 rdmsrl(msrs->controls[i].addr, val);
432 val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE; 421 val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
433 wrmsrl(msrs->controls[i].addr, val); 422 wrmsrl(msrs->controls[i].addr, val);
434 } 423 }
435 424
diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
index 5d1727ba409e..2bf90fafa7b5 100644
--- a/arch/x86/oprofile/op_model_ppro.c
+++ b/arch/x86/oprofile/op_model_ppro.c
@@ -88,7 +88,7 @@ static void ppro_setup_ctrs(struct op_x86_model_spec const *model,
88 continue; 88 continue;
89 } 89 }
90 rdmsrl(msrs->controls[i].addr, val); 90 rdmsrl(msrs->controls[i].addr, val);
91 if (val & ARCH_PERFMON_EVENTSEL0_ENABLE) 91 if (val & ARCH_PERFMON_EVENTSEL_ENABLE)
92 op_x86_warn_in_use(i); 92 op_x86_warn_in_use(i);
93 val &= model->reserved; 93 val &= model->reserved;
94 wrmsrl(msrs->controls[i].addr, val); 94 wrmsrl(msrs->controls[i].addr, val);
@@ -166,7 +166,7 @@ static void ppro_start(struct op_msrs const * const msrs)
166 for (i = 0; i < num_counters; ++i) { 166 for (i = 0; i < num_counters; ++i) {
167 if (reset_value[i]) { 167 if (reset_value[i]) {
168 rdmsrl(msrs->controls[i].addr, val); 168 rdmsrl(msrs->controls[i].addr, val);
169 val |= ARCH_PERFMON_EVENTSEL0_ENABLE; 169 val |= ARCH_PERFMON_EVENTSEL_ENABLE;
170 wrmsrl(msrs->controls[i].addr, val); 170 wrmsrl(msrs->controls[i].addr, val);
171 } 171 }
172 } 172 }
@@ -184,7 +184,7 @@ static void ppro_stop(struct op_msrs const * const msrs)
184 if (!reset_value[i]) 184 if (!reset_value[i])
185 continue; 185 continue;
186 rdmsrl(msrs->controls[i].addr, val); 186 rdmsrl(msrs->controls[i].addr, val);
187 val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE; 187 val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
188 wrmsrl(msrs->controls[i].addr, val); 188 wrmsrl(msrs->controls[i].addr, val);
189 } 189 }
190} 190}