aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/kernel/cpu/perf_event.c35
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd.c17
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c30
-rw-r--r--arch/x86/kernel/cpu/perf_event_p4.c40
-rw-r--r--arch/x86/kernel/cpu/perf_event_p6.c3
5 files changed, 74 insertions, 51 deletions
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 1dd42c18f1cb..65e9c5efb618 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -196,12 +196,11 @@ struct x86_pmu {
196 void (*enable_all)(int added); 196 void (*enable_all)(int added);
197 void (*enable)(struct perf_event *); 197 void (*enable)(struct perf_event *);
198 void (*disable)(struct perf_event *); 198 void (*disable)(struct perf_event *);
199 int (*hw_config)(struct perf_event_attr *attr, struct hw_perf_event *hwc); 199 int (*hw_config)(struct perf_event *event);
200 int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign); 200 int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign);
201 unsigned eventsel; 201 unsigned eventsel;
202 unsigned perfctr; 202 unsigned perfctr;
203 u64 (*event_map)(int); 203 u64 (*event_map)(int);
204 u64 (*raw_event)(u64);
205 int max_events; 204 int max_events;
206 int num_counters; 205 int num_counters;
207 int num_counters_fixed; 206 int num_counters_fixed;
@@ -426,28 +425,26 @@ set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event_attr *attr)
426 return 0; 425 return 0;
427} 426}
428 427
429static int x86_hw_config(struct perf_event_attr *attr, struct hw_perf_event *hwc) 428static int x86_pmu_hw_config(struct perf_event *event)
430{ 429{
431 /* 430 /*
432 * Generate PMC IRQs: 431 * Generate PMC IRQs:
433 * (keep 'enabled' bit clear for now) 432 * (keep 'enabled' bit clear for now)
434 */ 433 */
435 hwc->config = ARCH_PERFMON_EVENTSEL_INT; 434 event->hw.config = ARCH_PERFMON_EVENTSEL_INT;
436 435
437 /* 436 /*
438 * Count user and OS events unless requested not to 437 * Count user and OS events unless requested not to
439 */ 438 */
440 if (!attr->exclude_user) 439 if (!event->attr.exclude_user)
441 hwc->config |= ARCH_PERFMON_EVENTSEL_USR; 440 event->hw.config |= ARCH_PERFMON_EVENTSEL_USR;
442 if (!attr->exclude_kernel) 441 if (!event->attr.exclude_kernel)
443 hwc->config |= ARCH_PERFMON_EVENTSEL_OS; 442 event->hw.config |= ARCH_PERFMON_EVENTSEL_OS;
444 443
445 return 0; 444 if (event->attr.type == PERF_TYPE_RAW)
446} 445 event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK;
447 446
448static u64 x86_pmu_raw_event(u64 hw_event) 447 return 0;
449{
450 return hw_event & X86_RAW_EVENT_MASK;
451} 448}
452 449
453/* 450/*
@@ -489,7 +486,7 @@ static int __hw_perf_event_init(struct perf_event *event)
489 hwc->last_tag = ~0ULL; 486 hwc->last_tag = ~0ULL;
490 487
491 /* Processor specifics */ 488 /* Processor specifics */
492 err = x86_pmu.hw_config(attr, hwc); 489 err = x86_pmu.hw_config(event);
493 if (err) 490 if (err)
494 return err; 491 return err;
495 492
@@ -508,16 +505,8 @@ static int __hw_perf_event_init(struct perf_event *event)
508 return -EOPNOTSUPP; 505 return -EOPNOTSUPP;
509 } 506 }
510 507
511 /* 508 if (attr->type == PERF_TYPE_RAW)
512 * Raw hw_event type provide the config in the hw_event structure
513 */
514 if (attr->type == PERF_TYPE_RAW) {
515 hwc->config |= x86_pmu.raw_event(attr->config);
516 if ((hwc->config & ARCH_PERFMON_EVENTSEL_ANY) &&
517 perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
518 return -EACCES;
519 return 0; 509 return 0;
520 }
521 510
522 if (attr->type == PERF_TYPE_HW_CACHE) 511 if (attr->type == PERF_TYPE_HW_CACHE)
523 return set_ext_hw_attr(hwc, attr); 512 return set_ext_hw_attr(hwc, attr);
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c
index 37e9517729df..bbd7339f08a9 100644
--- a/arch/x86/kernel/cpu/perf_event_amd.c
+++ b/arch/x86/kernel/cpu/perf_event_amd.c
@@ -111,9 +111,19 @@ static u64 amd_pmu_event_map(int hw_event)
111 return amd_perfmon_event_map[hw_event]; 111 return amd_perfmon_event_map[hw_event];
112} 112}
113 113
114static u64 amd_pmu_raw_event(u64 hw_event) 114static int amd_pmu_hw_config(struct perf_event *event)
115{ 115{
116 return hw_event & AMD64_RAW_EVENT_MASK; 116 int ret = x86_pmu_hw_config(event);
117
118 if (ret)
119 return ret;
120
121 if (event->attr.type != PERF_TYPE_RAW)
122 return 0;
123
124 event->hw.config |= event->attr.config & AMD64_RAW_EVENT_MASK;
125
126 return 0;
117} 127}
118 128
119/* 129/*
@@ -365,12 +375,11 @@ static __initconst struct x86_pmu amd_pmu = {
365 .enable_all = x86_pmu_enable_all, 375 .enable_all = x86_pmu_enable_all,
366 .enable = x86_pmu_enable_event, 376 .enable = x86_pmu_enable_event,
367 .disable = x86_pmu_disable_event, 377 .disable = x86_pmu_disable_event,
368 .hw_config = x86_hw_config, 378 .hw_config = amd_pmu_hw_config,
369 .schedule_events = x86_schedule_events, 379 .schedule_events = x86_schedule_events,
370 .eventsel = MSR_K7_EVNTSEL0, 380 .eventsel = MSR_K7_EVNTSEL0,
371 .perfctr = MSR_K7_PERFCTR0, 381 .perfctr = MSR_K7_PERFCTR0,
372 .event_map = amd_pmu_event_map, 382 .event_map = amd_pmu_event_map,
373 .raw_event = amd_pmu_raw_event,
374 .max_events = ARRAY_SIZE(amd_perfmon_event_map), 383 .max_events = ARRAY_SIZE(amd_perfmon_event_map),
375 .num_counters = 4, 384 .num_counters = 4,
376 .cntval_bits = 48, 385 .cntval_bits = 48,
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index dfdd6f90fc8e..30bf10c55f1e 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -758,6 +758,30 @@ intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event
758 return x86_get_event_constraints(cpuc, event); 758 return x86_get_event_constraints(cpuc, event);
759} 759}
760 760
761static int intel_pmu_hw_config(struct perf_event *event)
762{
763 int ret = x86_pmu_hw_config(event);
764
765 if (ret)
766 return ret;
767
768 if (event->attr.type != PERF_TYPE_RAW)
769 return 0;
770
771 if (!(event->attr.config & ARCH_PERFMON_EVENTSEL_ANY))
772 return 0;
773
774 if (x86_pmu.version < 3)
775 return -EINVAL;
776
777 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
778 return -EACCES;
779
780 event->hw.config |= ARCH_PERFMON_EVENTSEL_ANY;
781
782 return 0;
783}
784
761static __initconst struct x86_pmu core_pmu = { 785static __initconst struct x86_pmu core_pmu = {
762 .name = "core", 786 .name = "core",
763 .handle_irq = x86_pmu_handle_irq, 787 .handle_irq = x86_pmu_handle_irq,
@@ -765,12 +789,11 @@ static __initconst struct x86_pmu core_pmu = {
765 .enable_all = x86_pmu_enable_all, 789 .enable_all = x86_pmu_enable_all,
766 .enable = x86_pmu_enable_event, 790 .enable = x86_pmu_enable_event,
767 .disable = x86_pmu_disable_event, 791 .disable = x86_pmu_disable_event,
768 .hw_config = x86_hw_config, 792 .hw_config = x86_pmu_hw_config,
769 .schedule_events = x86_schedule_events, 793 .schedule_events = x86_schedule_events,
770 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0, 794 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
771 .perfctr = MSR_ARCH_PERFMON_PERFCTR0, 795 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
772 .event_map = intel_pmu_event_map, 796 .event_map = intel_pmu_event_map,
773 .raw_event = x86_pmu_raw_event,
774 .max_events = ARRAY_SIZE(intel_perfmon_event_map), 797 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
775 .apic = 1, 798 .apic = 1,
776 /* 799 /*
@@ -804,12 +827,11 @@ static __initconst struct x86_pmu intel_pmu = {
804 .enable_all = intel_pmu_enable_all, 827 .enable_all = intel_pmu_enable_all,
805 .enable = intel_pmu_enable_event, 828 .enable = intel_pmu_enable_event,
806 .disable = intel_pmu_disable_event, 829 .disable = intel_pmu_disable_event,
807 .hw_config = x86_hw_config, 830 .hw_config = intel_pmu_hw_config,
808 .schedule_events = x86_schedule_events, 831 .schedule_events = x86_schedule_events,
809 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0, 832 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
810 .perfctr = MSR_ARCH_PERFMON_PERFCTR0, 833 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
811 .event_map = intel_pmu_event_map, 834 .event_map = intel_pmu_event_map,
812 .raw_event = x86_pmu_raw_event,
813 .max_events = ARRAY_SIZE(intel_perfmon_event_map), 835 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
814 .apic = 1, 836 .apic = 1,
815 /* 837 /*
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c
index 4139100404e8..acd237d29f11 100644
--- a/arch/x86/kernel/cpu/perf_event_p4.c
+++ b/arch/x86/kernel/cpu/perf_event_p4.c
@@ -419,20 +419,7 @@ static u64 p4_pmu_event_map(int hw_event)
419 return config; 419 return config;
420} 420}
421 421
422/* 422static int p4_hw_config(struct perf_event *event)
423 * We don't control raw events so it's up to the caller
424 * to pass sane values (and we don't count the thread number
425 * on HT machine but allow HT-compatible specifics to be
426 * passed on)
427 */
428static u64 p4_pmu_raw_event(u64 hw_event)
429{
430 return hw_event &
431 (p4_config_pack_escr(P4_ESCR_MASK_HT) |
432 p4_config_pack_cccr(P4_CCCR_MASK_HT));
433}
434
435static int p4_hw_config(struct perf_event_attr *attr, struct hw_perf_event *hwc)
436{ 423{
437 int cpu = raw_smp_processor_id(); 424 int cpu = raw_smp_processor_id();
438 u32 escr, cccr; 425 u32 escr, cccr;
@@ -444,11 +431,29 @@ static int p4_hw_config(struct perf_event_attr *attr, struct hw_perf_event *hwc)
444 */ 431 */
445 432
446 cccr = p4_default_cccr_conf(cpu); 433 cccr = p4_default_cccr_conf(cpu);
447 escr = p4_default_escr_conf(cpu, attr->exclude_kernel, attr->exclude_user); 434 escr = p4_default_escr_conf(cpu, event->attr.exclude_kernel,
448 hwc->config = p4_config_pack_escr(escr) | p4_config_pack_cccr(cccr); 435 event->attr.exclude_user);
436 event->hw.config = p4_config_pack_escr(escr) |
437 p4_config_pack_cccr(cccr);
449 438
450 if (p4_ht_active() && p4_ht_thread(cpu)) 439 if (p4_ht_active() && p4_ht_thread(cpu))
451 hwc->config = p4_set_ht_bit(hwc->config); 440 event->hw.config = p4_set_ht_bit(event->hw.config);
441
442 if (event->attr.type != PERF_TYPE_RAW)
443 return 0;
444
445 /*
446 * We don't control raw events so it's up to the caller
447 * to pass sane values (and we don't count the thread number
448 * on HT machine but allow HT-compatible specifics to be
449 * passed on)
450 *
451 * XXX: HT wide things should check perf_paranoid_cpu() &&
452 * CAP_SYS_ADMIN
453 */
454 event->hw.config |= event->attr.config &
455 (p4_config_pack_escr(P4_ESCR_MASK_HT) |
456 p4_config_pack_cccr(P4_CCCR_MASK_HT));
452 457
453 return 0; 458 return 0;
454} 459}
@@ -785,7 +790,6 @@ static __initconst struct x86_pmu p4_pmu = {
785 .eventsel = MSR_P4_BPU_CCCR0, 790 .eventsel = MSR_P4_BPU_CCCR0,
786 .perfctr = MSR_P4_BPU_PERFCTR0, 791 .perfctr = MSR_P4_BPU_PERFCTR0,
787 .event_map = p4_pmu_event_map, 792 .event_map = p4_pmu_event_map,
788 .raw_event = p4_pmu_raw_event,
789 .max_events = ARRAY_SIZE(p4_general_events), 793 .max_events = ARRAY_SIZE(p4_general_events),
790 .get_event_constraints = x86_get_event_constraints, 794 .get_event_constraints = x86_get_event_constraints,
791 /* 795 /*
diff --git a/arch/x86/kernel/cpu/perf_event_p6.c b/arch/x86/kernel/cpu/perf_event_p6.c
index 03c139a67baa..9123e8ec9958 100644
--- a/arch/x86/kernel/cpu/perf_event_p6.c
+++ b/arch/x86/kernel/cpu/perf_event_p6.c
@@ -91,12 +91,11 @@ static __initconst struct x86_pmu p6_pmu = {
91 .enable_all = p6_pmu_enable_all, 91 .enable_all = p6_pmu_enable_all,
92 .enable = p6_pmu_enable_event, 92 .enable = p6_pmu_enable_event,
93 .disable = p6_pmu_disable_event, 93 .disable = p6_pmu_disable_event,
94 .hw_config = x86_hw_config, 94 .hw_config = x86_pmu_hw_config,
95 .schedule_events = x86_schedule_events, 95 .schedule_events = x86_schedule_events,
96 .eventsel = MSR_P6_EVNTSEL0, 96 .eventsel = MSR_P6_EVNTSEL0,
97 .perfctr = MSR_P6_PERFCTR0, 97 .perfctr = MSR_P6_PERFCTR0,
98 .event_map = p6_pmu_event_map, 98 .event_map = p6_pmu_event_map,
99 .raw_event = x86_pmu_raw_event,
100 .max_events = ARRAY_SIZE(p6_perfmon_event_map), 99 .max_events = ARRAY_SIZE(p6_perfmon_event_map),
101 .apic = 1, 100 .apic = 1,
102 .max_period = (1ULL << 31) - 1, 101 .max_period = (1ULL << 31) - 1,