aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu/perf_event.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/cpu/perf_event.c')
-rw-r--r--arch/x86/kernel/cpu/perf_event.c39
1 files changed, 26 insertions, 13 deletions
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index b1fbdeecf6c9..42aafd11e170 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -73,10 +73,10 @@ struct debug_store {
73struct event_constraint { 73struct event_constraint {
74 union { 74 union {
75 unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; 75 unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
76 u64 idxmsk64[1]; 76 u64 idxmsk64;
77 }; 77 };
78 int code; 78 u64 code;
79 int cmask; 79 u64 cmask;
80 int weight; 80 int weight;
81}; 81};
82 82
@@ -103,7 +103,7 @@ struct cpu_hw_events {
103}; 103};
104 104
105#define __EVENT_CONSTRAINT(c, n, m, w) {\ 105#define __EVENT_CONSTRAINT(c, n, m, w) {\
106 { .idxmsk64[0] = (n) }, \ 106 { .idxmsk64 = (n) }, \
107 .code = (c), \ 107 .code = (c), \
108 .cmask = (m), \ 108 .cmask = (m), \
109 .weight = (w), \ 109 .weight = (w), \
@@ -116,7 +116,7 @@ struct cpu_hw_events {
116 EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVTSEL_MASK) 116 EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVTSEL_MASK)
117 117
118#define FIXED_EVENT_CONSTRAINT(c, n) \ 118#define FIXED_EVENT_CONSTRAINT(c, n) \
119 EVENT_CONSTRAINT(c, n, INTEL_ARCH_FIXED_MASK) 119 EVENT_CONSTRAINT(c, (1ULL << (32+n)), INTEL_ARCH_FIXED_MASK)
120 120
121#define EVENT_CONSTRAINT_END \ 121#define EVENT_CONSTRAINT_END \
122 EVENT_CONSTRAINT(0, 0, 0) 122 EVENT_CONSTRAINT(0, 0, 0)
@@ -503,6 +503,9 @@ static int __hw_perf_event_init(struct perf_event *event)
503 */ 503 */
504 if (attr->type == PERF_TYPE_RAW) { 504 if (attr->type == PERF_TYPE_RAW) {
505 hwc->config |= x86_pmu.raw_event(attr->config); 505 hwc->config |= x86_pmu.raw_event(attr->config);
506 if ((hwc->config & ARCH_PERFMON_EVENTSEL_ANY) &&
507 perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
508 return -EACCES;
506 return 0; 509 return 0;
507 } 510 }
508 511
@@ -553,9 +556,9 @@ static void x86_pmu_disable_all(void)
553 if (!test_bit(idx, cpuc->active_mask)) 556 if (!test_bit(idx, cpuc->active_mask))
554 continue; 557 continue;
555 rdmsrl(x86_pmu.eventsel + idx, val); 558 rdmsrl(x86_pmu.eventsel + idx, val);
556 if (!(val & ARCH_PERFMON_EVENTSEL0_ENABLE)) 559 if (!(val & ARCH_PERFMON_EVENTSEL_ENABLE))
557 continue; 560 continue;
558 val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE; 561 val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
559 wrmsrl(x86_pmu.eventsel + idx, val); 562 wrmsrl(x86_pmu.eventsel + idx, val);
560 } 563 }
561} 564}
@@ -590,7 +593,7 @@ static void x86_pmu_enable_all(void)
590 continue; 593 continue;
591 594
592 val = event->hw.config; 595 val = event->hw.config;
593 val |= ARCH_PERFMON_EVENTSEL0_ENABLE; 596 val |= ARCH_PERFMON_EVENTSEL_ENABLE;
594 wrmsrl(x86_pmu.eventsel + idx, val); 597 wrmsrl(x86_pmu.eventsel + idx, val);
595 } 598 }
596} 599}
@@ -612,8 +615,8 @@ static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
612 bitmap_zero(used_mask, X86_PMC_IDX_MAX); 615 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
613 616
614 for (i = 0; i < n; i++) { 617 for (i = 0; i < n; i++) {
615 constraints[i] = 618 c = x86_pmu.get_event_constraints(cpuc, cpuc->event_list[i]);
616 x86_pmu.get_event_constraints(cpuc, cpuc->event_list[i]); 619 constraints[i] = c;
617 } 620 }
618 621
619 /* 622 /*
@@ -853,7 +856,7 @@ void hw_perf_enable(void)
853static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, int idx) 856static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, int idx)
854{ 857{
855 (void)checking_wrmsrl(hwc->config_base + idx, 858 (void)checking_wrmsrl(hwc->config_base + idx,
856 hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE); 859 hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE);
857} 860}
858 861
859static inline void x86_pmu_disable_event(struct hw_perf_event *hwc, int idx) 862static inline void x86_pmu_disable_event(struct hw_perf_event *hwc, int idx)
@@ -1094,8 +1097,7 @@ static int x86_pmu_handle_irq(struct pt_regs *regs)
1094 int idx, handled = 0; 1097 int idx, handled = 0;
1095 u64 val; 1098 u64 val;
1096 1099
1097 data.addr = 0; 1100 perf_sample_data_init(&data, 0);
1098 data.raw = NULL;
1099 1101
1100 cpuc = &__get_cpu_var(cpu_hw_events); 1102 cpuc = &__get_cpu_var(cpu_hw_events);
1101 1103
@@ -1347,6 +1349,7 @@ static void __init pmu_check_apic(void)
1347 1349
1348void __init init_hw_perf_events(void) 1350void __init init_hw_perf_events(void)
1349{ 1351{
1352 struct event_constraint *c;
1350 int err; 1353 int err;
1351 1354
1352 pr_info("Performance Events: "); 1355 pr_info("Performance Events: ");
@@ -1395,6 +1398,16 @@ void __init init_hw_perf_events(void)
1395 __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_events) - 1, 1398 __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_events) - 1,
1396 0, x86_pmu.num_events); 1399 0, x86_pmu.num_events);
1397 1400
1401 if (x86_pmu.event_constraints) {
1402 for_each_event_constraint(c, x86_pmu.event_constraints) {
1403 if (c->cmask != INTEL_ARCH_FIXED_MASK)
1404 continue;
1405
1406 c->idxmsk64 |= (1ULL << x86_pmu.num_events) - 1;
1407 c->weight += x86_pmu.num_events;
1408 }
1409 }
1410
1398 pr_info("... version: %d\n", x86_pmu.version); 1411 pr_info("... version: %d\n", x86_pmu.version);
1399 pr_info("... bit width: %d\n", x86_pmu.event_bits); 1412 pr_info("... bit width: %d\n", x86_pmu.event_bits);
1400 pr_info("... generic registers: %d\n", x86_pmu.num_events); 1413 pr_info("... generic registers: %d\n", x86_pmu.num_events);