aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRobert Richter <robert.richter@amd.com>2009-04-29 06:47:03 -0400
committerIngo Molnar <mingo@elte.hu>2009-04-29 08:51:03 -0400
commit4aeb0b4239bb3b67ed402cb9cef3e000c892cadf (patch)
tree0a025a30fa5de3b40ab1ea156a3f86ee2d000839
parent527e26af3741a2168986d8b82653ffe173891324 (diff)
perfcounters: rename struct hw_perf_counter_ops into struct pmu
This patch renames struct hw_perf_counter_ops into struct pmu. It introduces a structure to describe a cpu specific pmu (performance monitoring unit). It may contain ops and data. The new name of the structure fits better, is shorter, and thus better to handle. Where it was appropriate, names of function and variable have been changed too. [ Impact: cleanup ] Signed-off-by: Robert Richter <robert.richter@amd.com> Cc: Paul Mackerras <paulus@samba.org> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <1241002046-8832-7-git-send-email-robert.richter@amd.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--arch/powerpc/kernel/perf_counter.c25
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c37
-rw-r--r--include/linux/perf_counter.h9
-rw-r--r--kernel/perf_counter.c68
4 files changed, 66 insertions, 73 deletions
diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c
index bd76d0fa2c35..d9bbe5efc649 100644
--- a/arch/powerpc/kernel/perf_counter.c
+++ b/arch/powerpc/kernel/perf_counter.c
@@ -256,7 +256,7 @@ static int check_excludes(struct perf_counter **ctrs, int n_prev, int n_new)
256 return 0; 256 return 0;
257} 257}
258 258
259static void power_perf_read(struct perf_counter *counter) 259static void power_pmu_read(struct perf_counter *counter)
260{ 260{
261 long val, delta, prev; 261 long val, delta, prev;
262 262
@@ -405,7 +405,7 @@ void hw_perf_restore(u64 disable)
405 for (i = 0; i < cpuhw->n_counters; ++i) { 405 for (i = 0; i < cpuhw->n_counters; ++i) {
406 counter = cpuhw->counter[i]; 406 counter = cpuhw->counter[i];
407 if (counter->hw.idx && counter->hw.idx != hwc_index[i] + 1) { 407 if (counter->hw.idx && counter->hw.idx != hwc_index[i] + 1) {
408 power_perf_read(counter); 408 power_pmu_read(counter);
409 write_pmc(counter->hw.idx, 0); 409 write_pmc(counter->hw.idx, 0);
410 counter->hw.idx = 0; 410 counter->hw.idx = 0;
411 } 411 }
@@ -477,7 +477,7 @@ static void counter_sched_in(struct perf_counter *counter, int cpu)
477 counter->oncpu = cpu; 477 counter->oncpu = cpu;
478 counter->tstamp_running += counter->ctx->time - counter->tstamp_stopped; 478 counter->tstamp_running += counter->ctx->time - counter->tstamp_stopped;
479 if (is_software_counter(counter)) 479 if (is_software_counter(counter))
480 counter->hw_ops->enable(counter); 480 counter->pmu->enable(counter);
481} 481}
482 482
483/* 483/*
@@ -533,7 +533,7 @@ int hw_perf_group_sched_in(struct perf_counter *group_leader,
533 * re-enable the PMU in order to get hw_perf_restore to do the 533 * re-enable the PMU in order to get hw_perf_restore to do the
534 * actual work of reconfiguring the PMU. 534 * actual work of reconfiguring the PMU.
535 */ 535 */
536static int power_perf_enable(struct perf_counter *counter) 536static int power_pmu_enable(struct perf_counter *counter)
537{ 537{
538 struct cpu_hw_counters *cpuhw; 538 struct cpu_hw_counters *cpuhw;
539 unsigned long flags; 539 unsigned long flags;
@@ -573,7 +573,7 @@ static int power_perf_enable(struct perf_counter *counter)
573/* 573/*
574 * Remove a counter from the PMU. 574 * Remove a counter from the PMU.
575 */ 575 */
576static void power_perf_disable(struct perf_counter *counter) 576static void power_pmu_disable(struct perf_counter *counter)
577{ 577{
578 struct cpu_hw_counters *cpuhw; 578 struct cpu_hw_counters *cpuhw;
579 long i; 579 long i;
@@ -583,7 +583,7 @@ static void power_perf_disable(struct perf_counter *counter)
583 local_irq_save(flags); 583 local_irq_save(flags);
584 pmudis = hw_perf_save_disable(); 584 pmudis = hw_perf_save_disable();
585 585
586 power_perf_read(counter); 586 power_pmu_read(counter);
587 587
588 cpuhw = &__get_cpu_var(cpu_hw_counters); 588 cpuhw = &__get_cpu_var(cpu_hw_counters);
589 for (i = 0; i < cpuhw->n_counters; ++i) { 589 for (i = 0; i < cpuhw->n_counters; ++i) {
@@ -607,10 +607,10 @@ static void power_perf_disable(struct perf_counter *counter)
607 local_irq_restore(flags); 607 local_irq_restore(flags);
608} 608}
609 609
610struct hw_perf_counter_ops power_perf_ops = { 610struct pmu power_pmu = {
611 .enable = power_perf_enable, 611 .enable = power_pmu_enable,
612 .disable = power_perf_disable, 612 .disable = power_pmu_disable,
613 .read = power_perf_read 613 .read = power_pmu_read,
614}; 614};
615 615
616/* Number of perf_counters counting hardware events */ 616/* Number of perf_counters counting hardware events */
@@ -631,8 +631,7 @@ static void hw_perf_counter_destroy(struct perf_counter *counter)
631 } 631 }
632} 632}
633 633
634const struct hw_perf_counter_ops * 634const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
635hw_perf_counter_init(struct perf_counter *counter)
636{ 635{
637 unsigned long ev; 636 unsigned long ev;
638 struct perf_counter *ctrs[MAX_HWCOUNTERS]; 637 struct perf_counter *ctrs[MAX_HWCOUNTERS];
@@ -705,7 +704,7 @@ hw_perf_counter_init(struct perf_counter *counter)
705 704
706 if (err) 705 if (err)
707 return ERR_PTR(err); 706 return ERR_PTR(err);
708 return &power_perf_ops; 707 return &power_pmu;
709} 708}
710 709
711/* 710/*
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index ad663d5ad2d9..95de980c74a0 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -515,8 +515,8 @@ __pmc_fixed_disable(struct perf_counter *counter,
515} 515}
516 516
517static inline void 517static inline void
518__pmc_generic_disable(struct perf_counter *counter, 518__x86_pmu_disable(struct perf_counter *counter,
519 struct hw_perf_counter *hwc, unsigned int idx) 519 struct hw_perf_counter *hwc, unsigned int idx)
520{ 520{
521 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) 521 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL))
522 __pmc_fixed_disable(counter, hwc, idx); 522 __pmc_fixed_disable(counter, hwc, idx);
@@ -591,8 +591,8 @@ __pmc_fixed_enable(struct perf_counter *counter,
591} 591}
592 592
593static void 593static void
594__pmc_generic_enable(struct perf_counter *counter, 594__x86_pmu_enable(struct perf_counter *counter,
595 struct hw_perf_counter *hwc, int idx) 595 struct hw_perf_counter *hwc, int idx)
596{ 596{
597 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) 597 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL))
598 __pmc_fixed_enable(counter, hwc, idx); 598 __pmc_fixed_enable(counter, hwc, idx);
@@ -626,7 +626,7 @@ fixed_mode_idx(struct perf_counter *counter, struct hw_perf_counter *hwc)
626/* 626/*
627 * Find a PMC slot for the freshly enabled / scheduled in counter: 627 * Find a PMC slot for the freshly enabled / scheduled in counter:
628 */ 628 */
629static int pmc_generic_enable(struct perf_counter *counter) 629static int x86_pmu_enable(struct perf_counter *counter)
630{ 630{
631 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); 631 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
632 struct hw_perf_counter *hwc = &counter->hw; 632 struct hw_perf_counter *hwc = &counter->hw;
@@ -667,7 +667,7 @@ try_generic:
667 667
668 perf_counters_lapic_init(hwc->nmi); 668 perf_counters_lapic_init(hwc->nmi);
669 669
670 __pmc_generic_disable(counter, hwc, idx); 670 __x86_pmu_disable(counter, hwc, idx);
671 671
672 cpuc->counters[idx] = counter; 672 cpuc->counters[idx] = counter;
673 /* 673 /*
@@ -676,7 +676,7 @@ try_generic:
676 barrier(); 676 barrier();
677 677
678 __hw_perf_counter_set_period(counter, hwc, idx); 678 __hw_perf_counter_set_period(counter, hwc, idx);
679 __pmc_generic_enable(counter, hwc, idx); 679 __x86_pmu_enable(counter, hwc, idx);
680 680
681 return 0; 681 return 0;
682} 682}
@@ -731,13 +731,13 @@ void perf_counter_print_debug(void)
731 local_irq_enable(); 731 local_irq_enable();
732} 732}
733 733
734static void pmc_generic_disable(struct perf_counter *counter) 734static void x86_pmu_disable(struct perf_counter *counter)
735{ 735{
736 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); 736 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
737 struct hw_perf_counter *hwc = &counter->hw; 737 struct hw_perf_counter *hwc = &counter->hw;
738 unsigned int idx = hwc->idx; 738 unsigned int idx = hwc->idx;
739 739
740 __pmc_generic_disable(counter, hwc, idx); 740 __x86_pmu_disable(counter, hwc, idx);
741 741
742 clear_bit(idx, cpuc->used); 742 clear_bit(idx, cpuc->used);
743 cpuc->counters[idx] = NULL; 743 cpuc->counters[idx] = NULL;
@@ -767,7 +767,7 @@ static void perf_save_and_restart(struct perf_counter *counter)
767 __hw_perf_counter_set_period(counter, hwc, idx); 767 __hw_perf_counter_set_period(counter, hwc, idx);
768 768
769 if (counter->state == PERF_COUNTER_STATE_ACTIVE) 769 if (counter->state == PERF_COUNTER_STATE_ACTIVE)
770 __pmc_generic_enable(counter, hwc, idx); 770 __x86_pmu_enable(counter, hwc, idx);
771} 771}
772 772
773/* 773/*
@@ -805,7 +805,7 @@ again:
805 805
806 perf_save_and_restart(counter); 806 perf_save_and_restart(counter);
807 if (perf_counter_overflow(counter, nmi, regs, 0)) 807 if (perf_counter_overflow(counter, nmi, regs, 0))
808 __pmc_generic_disable(counter, &counter->hw, bit); 808 __x86_pmu_disable(counter, &counter->hw, bit);
809 } 809 }
810 810
811 hw_perf_ack_status(ack); 811 hw_perf_ack_status(ack);
@@ -1034,19 +1034,18 @@ void __init init_hw_perf_counters(void)
1034 register_die_notifier(&perf_counter_nmi_notifier); 1034 register_die_notifier(&perf_counter_nmi_notifier);
1035} 1035}
1036 1036
1037static void pmc_generic_read(struct perf_counter *counter) 1037static void x86_pmu_read(struct perf_counter *counter)
1038{ 1038{
1039 x86_perf_counter_update(counter, &counter->hw, counter->hw.idx); 1039 x86_perf_counter_update(counter, &counter->hw, counter->hw.idx);
1040} 1040}
1041 1041
1042static const struct hw_perf_counter_ops x86_perf_counter_ops = { 1042static const struct pmu pmu = {
1043 .enable = pmc_generic_enable, 1043 .enable = x86_pmu_enable,
1044 .disable = pmc_generic_disable, 1044 .disable = x86_pmu_disable,
1045 .read = pmc_generic_read, 1045 .read = x86_pmu_read,
1046}; 1046};
1047 1047
1048const struct hw_perf_counter_ops * 1048const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
1049hw_perf_counter_init(struct perf_counter *counter)
1050{ 1049{
1051 int err; 1050 int err;
1052 1051
@@ -1054,7 +1053,7 @@ hw_perf_counter_init(struct perf_counter *counter)
1054 if (err) 1053 if (err)
1055 return ERR_PTR(err); 1054 return ERR_PTR(err);
1056 1055
1057 return &x86_perf_counter_ops; 1056 return &pmu;
1058} 1057}
1059 1058
1060/* 1059/*
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h
index be10b3ffe320..c3db52dc876a 100644
--- a/include/linux/perf_counter.h
+++ b/include/linux/perf_counter.h
@@ -334,9 +334,9 @@ struct hw_perf_counter {
334struct perf_counter; 334struct perf_counter;
335 335
336/** 336/**
337 * struct hw_perf_counter_ops - performance counter hw ops 337 * struct pmu - generic performance monitoring unit
338 */ 338 */
339struct hw_perf_counter_ops { 339struct pmu {
340 int (*enable) (struct perf_counter *counter); 340 int (*enable) (struct perf_counter *counter);
341 void (*disable) (struct perf_counter *counter); 341 void (*disable) (struct perf_counter *counter);
342 void (*read) (struct perf_counter *counter); 342 void (*read) (struct perf_counter *counter);
@@ -381,7 +381,7 @@ struct perf_counter {
381 struct list_head sibling_list; 381 struct list_head sibling_list;
382 int nr_siblings; 382 int nr_siblings;
383 struct perf_counter *group_leader; 383 struct perf_counter *group_leader;
384 const struct hw_perf_counter_ops *hw_ops; 384 const struct pmu *pmu;
385 385
386 enum perf_counter_active_state state; 386 enum perf_counter_active_state state;
387 enum perf_counter_active_state prev_state; 387 enum perf_counter_active_state prev_state;
@@ -519,8 +519,7 @@ struct perf_cpu_context {
519 */ 519 */
520extern int perf_max_counters; 520extern int perf_max_counters;
521 521
522extern const struct hw_perf_counter_ops * 522extern const struct pmu *hw_perf_counter_init(struct perf_counter *counter);
523hw_perf_counter_init(struct perf_counter *counter);
524 523
525extern void perf_counter_task_sched_in(struct task_struct *task, int cpu); 524extern void perf_counter_task_sched_in(struct task_struct *task, int cpu);
526extern void perf_counter_task_sched_out(struct task_struct *task, int cpu); 525extern void perf_counter_task_sched_out(struct task_struct *task, int cpu);
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 09396098dd0d..582108addefa 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -52,8 +52,7 @@ static DEFINE_MUTEX(perf_resource_mutex);
52/* 52/*
53 * Architecture provided APIs - weak aliases: 53 * Architecture provided APIs - weak aliases:
54 */ 54 */
55extern __weak const struct hw_perf_counter_ops * 55extern __weak const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
56hw_perf_counter_init(struct perf_counter *counter)
57{ 56{
58 return NULL; 57 return NULL;
59} 58}
@@ -124,7 +123,7 @@ counter_sched_out(struct perf_counter *counter,
124 123
125 counter->state = PERF_COUNTER_STATE_INACTIVE; 124 counter->state = PERF_COUNTER_STATE_INACTIVE;
126 counter->tstamp_stopped = ctx->time; 125 counter->tstamp_stopped = ctx->time;
127 counter->hw_ops->disable(counter); 126 counter->pmu->disable(counter);
128 counter->oncpu = -1; 127 counter->oncpu = -1;
129 128
130 if (!is_software_counter(counter)) 129 if (!is_software_counter(counter))
@@ -417,7 +416,7 @@ counter_sched_in(struct perf_counter *counter,
417 */ 416 */
418 smp_wmb(); 417 smp_wmb();
419 418
420 if (counter->hw_ops->enable(counter)) { 419 if (counter->pmu->enable(counter)) {
421 counter->state = PERF_COUNTER_STATE_INACTIVE; 420 counter->state = PERF_COUNTER_STATE_INACTIVE;
422 counter->oncpu = -1; 421 counter->oncpu = -1;
423 return -EAGAIN; 422 return -EAGAIN;
@@ -1096,7 +1095,7 @@ static void __read(void *info)
1096 local_irq_save(flags); 1095 local_irq_save(flags);
1097 if (ctx->is_active) 1096 if (ctx->is_active)
1098 update_context_time(ctx); 1097 update_context_time(ctx);
1099 counter->hw_ops->read(counter); 1098 counter->pmu->read(counter);
1100 update_counter_times(counter); 1099 update_counter_times(counter);
1101 local_irq_restore(flags); 1100 local_irq_restore(flags);
1102} 1101}
@@ -1922,7 +1921,7 @@ static void perf_counter_output(struct perf_counter *counter,
1922 leader = counter->group_leader; 1921 leader = counter->group_leader;
1923 list_for_each_entry(sub, &leader->sibling_list, list_entry) { 1922 list_for_each_entry(sub, &leader->sibling_list, list_entry) {
1924 if (sub != counter) 1923 if (sub != counter)
1925 sub->hw_ops->read(sub); 1924 sub->pmu->read(sub);
1926 1925
1927 group_entry.event = sub->hw_event.config; 1926 group_entry.event = sub->hw_event.config;
1928 group_entry.counter = atomic64_read(&sub->count); 1927 group_entry.counter = atomic64_read(&sub->count);
@@ -2264,7 +2263,7 @@ static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
2264 struct pt_regs *regs; 2263 struct pt_regs *regs;
2265 2264
2266 counter = container_of(hrtimer, struct perf_counter, hw.hrtimer); 2265 counter = container_of(hrtimer, struct perf_counter, hw.hrtimer);
2267 counter->hw_ops->read(counter); 2266 counter->pmu->read(counter);
2268 2267
2269 regs = get_irq_regs(); 2268 regs = get_irq_regs();
2270 /* 2269 /*
@@ -2410,7 +2409,7 @@ static void perf_swcounter_disable(struct perf_counter *counter)
2410 perf_swcounter_update(counter); 2409 perf_swcounter_update(counter);
2411} 2410}
2412 2411
2413static const struct hw_perf_counter_ops perf_ops_generic = { 2412static const struct pmu perf_ops_generic = {
2414 .enable = perf_swcounter_enable, 2413 .enable = perf_swcounter_enable,
2415 .disable = perf_swcounter_disable, 2414 .disable = perf_swcounter_disable,
2416 .read = perf_swcounter_read, 2415 .read = perf_swcounter_read,
@@ -2460,7 +2459,7 @@ static void cpu_clock_perf_counter_read(struct perf_counter *counter)
2460 cpu_clock_perf_counter_update(counter); 2459 cpu_clock_perf_counter_update(counter);
2461} 2460}
2462 2461
2463static const struct hw_perf_counter_ops perf_ops_cpu_clock = { 2462static const struct pmu perf_ops_cpu_clock = {
2464 .enable = cpu_clock_perf_counter_enable, 2463 .enable = cpu_clock_perf_counter_enable,
2465 .disable = cpu_clock_perf_counter_disable, 2464 .disable = cpu_clock_perf_counter_disable,
2466 .read = cpu_clock_perf_counter_read, 2465 .read = cpu_clock_perf_counter_read,
@@ -2522,7 +2521,7 @@ static void task_clock_perf_counter_read(struct perf_counter *counter)
2522 task_clock_perf_counter_update(counter, time); 2521 task_clock_perf_counter_update(counter, time);
2523} 2522}
2524 2523
2525static const struct hw_perf_counter_ops perf_ops_task_clock = { 2524static const struct pmu perf_ops_task_clock = {
2526 .enable = task_clock_perf_counter_enable, 2525 .enable = task_clock_perf_counter_enable,
2527 .disable = task_clock_perf_counter_disable, 2526 .disable = task_clock_perf_counter_disable,
2528 .read = task_clock_perf_counter_read, 2527 .read = task_clock_perf_counter_read,
@@ -2574,7 +2573,7 @@ static void cpu_migrations_perf_counter_disable(struct perf_counter *counter)
2574 cpu_migrations_perf_counter_update(counter); 2573 cpu_migrations_perf_counter_update(counter);
2575} 2574}
2576 2575
2577static const struct hw_perf_counter_ops perf_ops_cpu_migrations = { 2576static const struct pmu perf_ops_cpu_migrations = {
2578 .enable = cpu_migrations_perf_counter_enable, 2577 .enable = cpu_migrations_perf_counter_enable,
2579 .disable = cpu_migrations_perf_counter_disable, 2578 .disable = cpu_migrations_perf_counter_disable,
2580 .read = cpu_migrations_perf_counter_read, 2579 .read = cpu_migrations_perf_counter_read,
@@ -2600,8 +2599,7 @@ static void tp_perf_counter_destroy(struct perf_counter *counter)
2600 ftrace_profile_disable(perf_event_id(&counter->hw_event)); 2599 ftrace_profile_disable(perf_event_id(&counter->hw_event));
2601} 2600}
2602 2601
2603static const struct hw_perf_counter_ops * 2602static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
2604tp_perf_counter_init(struct perf_counter *counter)
2605{ 2603{
2606 int event_id = perf_event_id(&counter->hw_event); 2604 int event_id = perf_event_id(&counter->hw_event);
2607 int ret; 2605 int ret;
@@ -2616,18 +2614,16 @@ tp_perf_counter_init(struct perf_counter *counter)
2616 return &perf_ops_generic; 2614 return &perf_ops_generic;
2617} 2615}
2618#else 2616#else
2619static const struct hw_perf_counter_ops * 2617static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
2620tp_perf_counter_init(struct perf_counter *counter)
2621{ 2618{
2622 return NULL; 2619 return NULL;
2623} 2620}
2624#endif 2621#endif
2625 2622
2626static const struct hw_perf_counter_ops * 2623static const struct pmu *sw_perf_counter_init(struct perf_counter *counter)
2627sw_perf_counter_init(struct perf_counter *counter)
2628{ 2624{
2629 struct perf_counter_hw_event *hw_event = &counter->hw_event; 2625 struct perf_counter_hw_event *hw_event = &counter->hw_event;
2630 const struct hw_perf_counter_ops *hw_ops = NULL; 2626 const struct pmu *pmu = NULL;
2631 struct hw_perf_counter *hwc = &counter->hw; 2627 struct hw_perf_counter *hwc = &counter->hw;
2632 2628
2633 /* 2629 /*
@@ -2639,7 +2635,7 @@ sw_perf_counter_init(struct perf_counter *counter)
2639 */ 2635 */
2640 switch (perf_event_id(&counter->hw_event)) { 2636 switch (perf_event_id(&counter->hw_event)) {
2641 case PERF_COUNT_CPU_CLOCK: 2637 case PERF_COUNT_CPU_CLOCK:
2642 hw_ops = &perf_ops_cpu_clock; 2638 pmu = &perf_ops_cpu_clock;
2643 2639
2644 if (hw_event->irq_period && hw_event->irq_period < 10000) 2640 if (hw_event->irq_period && hw_event->irq_period < 10000)
2645 hw_event->irq_period = 10000; 2641 hw_event->irq_period = 10000;
@@ -2650,9 +2646,9 @@ sw_perf_counter_init(struct perf_counter *counter)
2650 * use the cpu_clock counter instead. 2646 * use the cpu_clock counter instead.
2651 */ 2647 */
2652 if (counter->ctx->task) 2648 if (counter->ctx->task)
2653 hw_ops = &perf_ops_task_clock; 2649 pmu = &perf_ops_task_clock;
2654 else 2650 else
2655 hw_ops = &perf_ops_cpu_clock; 2651 pmu = &perf_ops_cpu_clock;
2656 2652
2657 if (hw_event->irq_period && hw_event->irq_period < 10000) 2653 if (hw_event->irq_period && hw_event->irq_period < 10000)
2658 hw_event->irq_period = 10000; 2654 hw_event->irq_period = 10000;
@@ -2661,18 +2657,18 @@ sw_perf_counter_init(struct perf_counter *counter)
2661 case PERF_COUNT_PAGE_FAULTS_MIN: 2657 case PERF_COUNT_PAGE_FAULTS_MIN:
2662 case PERF_COUNT_PAGE_FAULTS_MAJ: 2658 case PERF_COUNT_PAGE_FAULTS_MAJ:
2663 case PERF_COUNT_CONTEXT_SWITCHES: 2659 case PERF_COUNT_CONTEXT_SWITCHES:
2664 hw_ops = &perf_ops_generic; 2660 pmu = &perf_ops_generic;
2665 break; 2661 break;
2666 case PERF_COUNT_CPU_MIGRATIONS: 2662 case PERF_COUNT_CPU_MIGRATIONS:
2667 if (!counter->hw_event.exclude_kernel) 2663 if (!counter->hw_event.exclude_kernel)
2668 hw_ops = &perf_ops_cpu_migrations; 2664 pmu = &perf_ops_cpu_migrations;
2669 break; 2665 break;
2670 } 2666 }
2671 2667
2672 if (hw_ops) 2668 if (pmu)
2673 hwc->irq_period = hw_event->irq_period; 2669 hwc->irq_period = hw_event->irq_period;
2674 2670
2675 return hw_ops; 2671 return pmu;
2676} 2672}
2677 2673
2678/* 2674/*
@@ -2685,7 +2681,7 @@ perf_counter_alloc(struct perf_counter_hw_event *hw_event,
2685 struct perf_counter *group_leader, 2681 struct perf_counter *group_leader,
2686 gfp_t gfpflags) 2682 gfp_t gfpflags)
2687{ 2683{
2688 const struct hw_perf_counter_ops *hw_ops; 2684 const struct pmu *pmu;
2689 struct perf_counter *counter; 2685 struct perf_counter *counter;
2690 long err; 2686 long err;
2691 2687
@@ -2713,46 +2709,46 @@ perf_counter_alloc(struct perf_counter_hw_event *hw_event,
2713 counter->cpu = cpu; 2709 counter->cpu = cpu;
2714 counter->hw_event = *hw_event; 2710 counter->hw_event = *hw_event;
2715 counter->group_leader = group_leader; 2711 counter->group_leader = group_leader;
2716 counter->hw_ops = NULL; 2712 counter->pmu = NULL;
2717 counter->ctx = ctx; 2713 counter->ctx = ctx;
2718 2714
2719 counter->state = PERF_COUNTER_STATE_INACTIVE; 2715 counter->state = PERF_COUNTER_STATE_INACTIVE;
2720 if (hw_event->disabled) 2716 if (hw_event->disabled)
2721 counter->state = PERF_COUNTER_STATE_OFF; 2717 counter->state = PERF_COUNTER_STATE_OFF;
2722 2718
2723 hw_ops = NULL; 2719 pmu = NULL;
2724 2720
2725 if (perf_event_raw(hw_event)) { 2721 if (perf_event_raw(hw_event)) {
2726 hw_ops = hw_perf_counter_init(counter); 2722 pmu = hw_perf_counter_init(counter);
2727 goto done; 2723 goto done;
2728 } 2724 }
2729 2725
2730 switch (perf_event_type(hw_event)) { 2726 switch (perf_event_type(hw_event)) {
2731 case PERF_TYPE_HARDWARE: 2727 case PERF_TYPE_HARDWARE:
2732 hw_ops = hw_perf_counter_init(counter); 2728 pmu = hw_perf_counter_init(counter);
2733 break; 2729 break;
2734 2730
2735 case PERF_TYPE_SOFTWARE: 2731 case PERF_TYPE_SOFTWARE:
2736 hw_ops = sw_perf_counter_init(counter); 2732 pmu = sw_perf_counter_init(counter);
2737 break; 2733 break;
2738 2734
2739 case PERF_TYPE_TRACEPOINT: 2735 case PERF_TYPE_TRACEPOINT:
2740 hw_ops = tp_perf_counter_init(counter); 2736 pmu = tp_perf_counter_init(counter);
2741 break; 2737 break;
2742 } 2738 }
2743done: 2739done:
2744 err = 0; 2740 err = 0;
2745 if (!hw_ops) 2741 if (!pmu)
2746 err = -EINVAL; 2742 err = -EINVAL;
2747 else if (IS_ERR(hw_ops)) 2743 else if (IS_ERR(pmu))
2748 err = PTR_ERR(hw_ops); 2744 err = PTR_ERR(pmu);
2749 2745
2750 if (err) { 2746 if (err) {
2751 kfree(counter); 2747 kfree(counter);
2752 return ERR_PTR(err); 2748 return ERR_PTR(err);
2753 } 2749 }
2754 2750
2755 counter->hw_ops = hw_ops; 2751 counter->pmu = pmu;
2756 2752
2757 if (counter->hw_event.mmap) 2753 if (counter->hw_event.mmap)
2758 atomic_inc(&nr_mmap_tracking); 2754 atomic_inc(&nr_mmap_tracking);