diff options
author | Robert Richter <robert.richter@amd.com> | 2009-04-29 06:47:03 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-04-29 08:51:03 -0400 |
commit | 4aeb0b4239bb3b67ed402cb9cef3e000c892cadf (patch) | |
tree | 0a025a30fa5de3b40ab1ea156a3f86ee2d000839 /kernel/perf_counter.c | |
parent | 527e26af3741a2168986d8b82653ffe173891324 (diff) |
perfcounters: rename struct hw_perf_counter_ops into struct pmu
This patch renames struct hw_perf_counter_ops into struct pmu. It
introduces a structure to describe a cpu specific pmu (performance
monitoring unit). It may contain ops and data. The new name of the
structure fits better, is shorter, and thus better to handle. Where it
was appropriate, names of function and variable have been changed too.
[ Impact: cleanup ]
Signed-off-by: Robert Richter <robert.richter@amd.com>
Cc: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1241002046-8832-7-git-send-email-robert.richter@amd.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/perf_counter.c')
-rw-r--r-- | kernel/perf_counter.c | 68 |
1 files changed, 32 insertions, 36 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 09396098dd0d..582108addefa 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c | |||
@@ -52,8 +52,7 @@ static DEFINE_MUTEX(perf_resource_mutex); | |||
52 | /* | 52 | /* |
53 | * Architecture provided APIs - weak aliases: | 53 | * Architecture provided APIs - weak aliases: |
54 | */ | 54 | */ |
55 | extern __weak const struct hw_perf_counter_ops * | 55 | extern __weak const struct pmu *hw_perf_counter_init(struct perf_counter *counter) |
56 | hw_perf_counter_init(struct perf_counter *counter) | ||
57 | { | 56 | { |
58 | return NULL; | 57 | return NULL; |
59 | } | 58 | } |
@@ -124,7 +123,7 @@ counter_sched_out(struct perf_counter *counter, | |||
124 | 123 | ||
125 | counter->state = PERF_COUNTER_STATE_INACTIVE; | 124 | counter->state = PERF_COUNTER_STATE_INACTIVE; |
126 | counter->tstamp_stopped = ctx->time; | 125 | counter->tstamp_stopped = ctx->time; |
127 | counter->hw_ops->disable(counter); | 126 | counter->pmu->disable(counter); |
128 | counter->oncpu = -1; | 127 | counter->oncpu = -1; |
129 | 128 | ||
130 | if (!is_software_counter(counter)) | 129 | if (!is_software_counter(counter)) |
@@ -417,7 +416,7 @@ counter_sched_in(struct perf_counter *counter, | |||
417 | */ | 416 | */ |
418 | smp_wmb(); | 417 | smp_wmb(); |
419 | 418 | ||
420 | if (counter->hw_ops->enable(counter)) { | 419 | if (counter->pmu->enable(counter)) { |
421 | counter->state = PERF_COUNTER_STATE_INACTIVE; | 420 | counter->state = PERF_COUNTER_STATE_INACTIVE; |
422 | counter->oncpu = -1; | 421 | counter->oncpu = -1; |
423 | return -EAGAIN; | 422 | return -EAGAIN; |
@@ -1096,7 +1095,7 @@ static void __read(void *info) | |||
1096 | local_irq_save(flags); | 1095 | local_irq_save(flags); |
1097 | if (ctx->is_active) | 1096 | if (ctx->is_active) |
1098 | update_context_time(ctx); | 1097 | update_context_time(ctx); |
1099 | counter->hw_ops->read(counter); | 1098 | counter->pmu->read(counter); |
1100 | update_counter_times(counter); | 1099 | update_counter_times(counter); |
1101 | local_irq_restore(flags); | 1100 | local_irq_restore(flags); |
1102 | } | 1101 | } |
@@ -1922,7 +1921,7 @@ static void perf_counter_output(struct perf_counter *counter, | |||
1922 | leader = counter->group_leader; | 1921 | leader = counter->group_leader; |
1923 | list_for_each_entry(sub, &leader->sibling_list, list_entry) { | 1922 | list_for_each_entry(sub, &leader->sibling_list, list_entry) { |
1924 | if (sub != counter) | 1923 | if (sub != counter) |
1925 | sub->hw_ops->read(sub); | 1924 | sub->pmu->read(sub); |
1926 | 1925 | ||
1927 | group_entry.event = sub->hw_event.config; | 1926 | group_entry.event = sub->hw_event.config; |
1928 | group_entry.counter = atomic64_read(&sub->count); | 1927 | group_entry.counter = atomic64_read(&sub->count); |
@@ -2264,7 +2263,7 @@ static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer) | |||
2264 | struct pt_regs *regs; | 2263 | struct pt_regs *regs; |
2265 | 2264 | ||
2266 | counter = container_of(hrtimer, struct perf_counter, hw.hrtimer); | 2265 | counter = container_of(hrtimer, struct perf_counter, hw.hrtimer); |
2267 | counter->hw_ops->read(counter); | 2266 | counter->pmu->read(counter); |
2268 | 2267 | ||
2269 | regs = get_irq_regs(); | 2268 | regs = get_irq_regs(); |
2270 | /* | 2269 | /* |
@@ -2410,7 +2409,7 @@ static void perf_swcounter_disable(struct perf_counter *counter) | |||
2410 | perf_swcounter_update(counter); | 2409 | perf_swcounter_update(counter); |
2411 | } | 2410 | } |
2412 | 2411 | ||
2413 | static const struct hw_perf_counter_ops perf_ops_generic = { | 2412 | static const struct pmu perf_ops_generic = { |
2414 | .enable = perf_swcounter_enable, | 2413 | .enable = perf_swcounter_enable, |
2415 | .disable = perf_swcounter_disable, | 2414 | .disable = perf_swcounter_disable, |
2416 | .read = perf_swcounter_read, | 2415 | .read = perf_swcounter_read, |
@@ -2460,7 +2459,7 @@ static void cpu_clock_perf_counter_read(struct perf_counter *counter) | |||
2460 | cpu_clock_perf_counter_update(counter); | 2459 | cpu_clock_perf_counter_update(counter); |
2461 | } | 2460 | } |
2462 | 2461 | ||
2463 | static const struct hw_perf_counter_ops perf_ops_cpu_clock = { | 2462 | static const struct pmu perf_ops_cpu_clock = { |
2464 | .enable = cpu_clock_perf_counter_enable, | 2463 | .enable = cpu_clock_perf_counter_enable, |
2465 | .disable = cpu_clock_perf_counter_disable, | 2464 | .disable = cpu_clock_perf_counter_disable, |
2466 | .read = cpu_clock_perf_counter_read, | 2465 | .read = cpu_clock_perf_counter_read, |
@@ -2522,7 +2521,7 @@ static void task_clock_perf_counter_read(struct perf_counter *counter) | |||
2522 | task_clock_perf_counter_update(counter, time); | 2521 | task_clock_perf_counter_update(counter, time); |
2523 | } | 2522 | } |
2524 | 2523 | ||
2525 | static const struct hw_perf_counter_ops perf_ops_task_clock = { | 2524 | static const struct pmu perf_ops_task_clock = { |
2526 | .enable = task_clock_perf_counter_enable, | 2525 | .enable = task_clock_perf_counter_enable, |
2527 | .disable = task_clock_perf_counter_disable, | 2526 | .disable = task_clock_perf_counter_disable, |
2528 | .read = task_clock_perf_counter_read, | 2527 | .read = task_clock_perf_counter_read, |
@@ -2574,7 +2573,7 @@ static void cpu_migrations_perf_counter_disable(struct perf_counter *counter) | |||
2574 | cpu_migrations_perf_counter_update(counter); | 2573 | cpu_migrations_perf_counter_update(counter); |
2575 | } | 2574 | } |
2576 | 2575 | ||
2577 | static const struct hw_perf_counter_ops perf_ops_cpu_migrations = { | 2576 | static const struct pmu perf_ops_cpu_migrations = { |
2578 | .enable = cpu_migrations_perf_counter_enable, | 2577 | .enable = cpu_migrations_perf_counter_enable, |
2579 | .disable = cpu_migrations_perf_counter_disable, | 2578 | .disable = cpu_migrations_perf_counter_disable, |
2580 | .read = cpu_migrations_perf_counter_read, | 2579 | .read = cpu_migrations_perf_counter_read, |
@@ -2600,8 +2599,7 @@ static void tp_perf_counter_destroy(struct perf_counter *counter) | |||
2600 | ftrace_profile_disable(perf_event_id(&counter->hw_event)); | 2599 | ftrace_profile_disable(perf_event_id(&counter->hw_event)); |
2601 | } | 2600 | } |
2602 | 2601 | ||
2603 | static const struct hw_perf_counter_ops * | 2602 | static const struct pmu *tp_perf_counter_init(struct perf_counter *counter) |
2604 | tp_perf_counter_init(struct perf_counter *counter) | ||
2605 | { | 2603 | { |
2606 | int event_id = perf_event_id(&counter->hw_event); | 2604 | int event_id = perf_event_id(&counter->hw_event); |
2607 | int ret; | 2605 | int ret; |
@@ -2616,18 +2614,16 @@ tp_perf_counter_init(struct perf_counter *counter) | |||
2616 | return &perf_ops_generic; | 2614 | return &perf_ops_generic; |
2617 | } | 2615 | } |
2618 | #else | 2616 | #else |
2619 | static const struct hw_perf_counter_ops * | 2617 | static const struct pmu *tp_perf_counter_init(struct perf_counter *counter) |
2620 | tp_perf_counter_init(struct perf_counter *counter) | ||
2621 | { | 2618 | { |
2622 | return NULL; | 2619 | return NULL; |
2623 | } | 2620 | } |
2624 | #endif | 2621 | #endif |
2625 | 2622 | ||
2626 | static const struct hw_perf_counter_ops * | 2623 | static const struct pmu *sw_perf_counter_init(struct perf_counter *counter) |
2627 | sw_perf_counter_init(struct perf_counter *counter) | ||
2628 | { | 2624 | { |
2629 | struct perf_counter_hw_event *hw_event = &counter->hw_event; | 2625 | struct perf_counter_hw_event *hw_event = &counter->hw_event; |
2630 | const struct hw_perf_counter_ops *hw_ops = NULL; | 2626 | const struct pmu *pmu = NULL; |
2631 | struct hw_perf_counter *hwc = &counter->hw; | 2627 | struct hw_perf_counter *hwc = &counter->hw; |
2632 | 2628 | ||
2633 | /* | 2629 | /* |
@@ -2639,7 +2635,7 @@ sw_perf_counter_init(struct perf_counter *counter) | |||
2639 | */ | 2635 | */ |
2640 | switch (perf_event_id(&counter->hw_event)) { | 2636 | switch (perf_event_id(&counter->hw_event)) { |
2641 | case PERF_COUNT_CPU_CLOCK: | 2637 | case PERF_COUNT_CPU_CLOCK: |
2642 | hw_ops = &perf_ops_cpu_clock; | 2638 | pmu = &perf_ops_cpu_clock; |
2643 | 2639 | ||
2644 | if (hw_event->irq_period && hw_event->irq_period < 10000) | 2640 | if (hw_event->irq_period && hw_event->irq_period < 10000) |
2645 | hw_event->irq_period = 10000; | 2641 | hw_event->irq_period = 10000; |
@@ -2650,9 +2646,9 @@ sw_perf_counter_init(struct perf_counter *counter) | |||
2650 | * use the cpu_clock counter instead. | 2646 | * use the cpu_clock counter instead. |
2651 | */ | 2647 | */ |
2652 | if (counter->ctx->task) | 2648 | if (counter->ctx->task) |
2653 | hw_ops = &perf_ops_task_clock; | 2649 | pmu = &perf_ops_task_clock; |
2654 | else | 2650 | else |
2655 | hw_ops = &perf_ops_cpu_clock; | 2651 | pmu = &perf_ops_cpu_clock; |
2656 | 2652 | ||
2657 | if (hw_event->irq_period && hw_event->irq_period < 10000) | 2653 | if (hw_event->irq_period && hw_event->irq_period < 10000) |
2658 | hw_event->irq_period = 10000; | 2654 | hw_event->irq_period = 10000; |
@@ -2661,18 +2657,18 @@ sw_perf_counter_init(struct perf_counter *counter) | |||
2661 | case PERF_COUNT_PAGE_FAULTS_MIN: | 2657 | case PERF_COUNT_PAGE_FAULTS_MIN: |
2662 | case PERF_COUNT_PAGE_FAULTS_MAJ: | 2658 | case PERF_COUNT_PAGE_FAULTS_MAJ: |
2663 | case PERF_COUNT_CONTEXT_SWITCHES: | 2659 | case PERF_COUNT_CONTEXT_SWITCHES: |
2664 | hw_ops = &perf_ops_generic; | 2660 | pmu = &perf_ops_generic; |
2665 | break; | 2661 | break; |
2666 | case PERF_COUNT_CPU_MIGRATIONS: | 2662 | case PERF_COUNT_CPU_MIGRATIONS: |
2667 | if (!counter->hw_event.exclude_kernel) | 2663 | if (!counter->hw_event.exclude_kernel) |
2668 | hw_ops = &perf_ops_cpu_migrations; | 2664 | pmu = &perf_ops_cpu_migrations; |
2669 | break; | 2665 | break; |
2670 | } | 2666 | } |
2671 | 2667 | ||
2672 | if (hw_ops) | 2668 | if (pmu) |
2673 | hwc->irq_period = hw_event->irq_period; | 2669 | hwc->irq_period = hw_event->irq_period; |
2674 | 2670 | ||
2675 | return hw_ops; | 2671 | return pmu; |
2676 | } | 2672 | } |
2677 | 2673 | ||
2678 | /* | 2674 | /* |
@@ -2685,7 +2681,7 @@ perf_counter_alloc(struct perf_counter_hw_event *hw_event, | |||
2685 | struct perf_counter *group_leader, | 2681 | struct perf_counter *group_leader, |
2686 | gfp_t gfpflags) | 2682 | gfp_t gfpflags) |
2687 | { | 2683 | { |
2688 | const struct hw_perf_counter_ops *hw_ops; | 2684 | const struct pmu *pmu; |
2689 | struct perf_counter *counter; | 2685 | struct perf_counter *counter; |
2690 | long err; | 2686 | long err; |
2691 | 2687 | ||
@@ -2713,46 +2709,46 @@ perf_counter_alloc(struct perf_counter_hw_event *hw_event, | |||
2713 | counter->cpu = cpu; | 2709 | counter->cpu = cpu; |
2714 | counter->hw_event = *hw_event; | 2710 | counter->hw_event = *hw_event; |
2715 | counter->group_leader = group_leader; | 2711 | counter->group_leader = group_leader; |
2716 | counter->hw_ops = NULL; | 2712 | counter->pmu = NULL; |
2717 | counter->ctx = ctx; | 2713 | counter->ctx = ctx; |
2718 | 2714 | ||
2719 | counter->state = PERF_COUNTER_STATE_INACTIVE; | 2715 | counter->state = PERF_COUNTER_STATE_INACTIVE; |
2720 | if (hw_event->disabled) | 2716 | if (hw_event->disabled) |
2721 | counter->state = PERF_COUNTER_STATE_OFF; | 2717 | counter->state = PERF_COUNTER_STATE_OFF; |
2722 | 2718 | ||
2723 | hw_ops = NULL; | 2719 | pmu = NULL; |
2724 | 2720 | ||
2725 | if (perf_event_raw(hw_event)) { | 2721 | if (perf_event_raw(hw_event)) { |
2726 | hw_ops = hw_perf_counter_init(counter); | 2722 | pmu = hw_perf_counter_init(counter); |
2727 | goto done; | 2723 | goto done; |
2728 | } | 2724 | } |
2729 | 2725 | ||
2730 | switch (perf_event_type(hw_event)) { | 2726 | switch (perf_event_type(hw_event)) { |
2731 | case PERF_TYPE_HARDWARE: | 2727 | case PERF_TYPE_HARDWARE: |
2732 | hw_ops = hw_perf_counter_init(counter); | 2728 | pmu = hw_perf_counter_init(counter); |
2733 | break; | 2729 | break; |
2734 | 2730 | ||
2735 | case PERF_TYPE_SOFTWARE: | 2731 | case PERF_TYPE_SOFTWARE: |
2736 | hw_ops = sw_perf_counter_init(counter); | 2732 | pmu = sw_perf_counter_init(counter); |
2737 | break; | 2733 | break; |
2738 | 2734 | ||
2739 | case PERF_TYPE_TRACEPOINT: | 2735 | case PERF_TYPE_TRACEPOINT: |
2740 | hw_ops = tp_perf_counter_init(counter); | 2736 | pmu = tp_perf_counter_init(counter); |
2741 | break; | 2737 | break; |
2742 | } | 2738 | } |
2743 | done: | 2739 | done: |
2744 | err = 0; | 2740 | err = 0; |
2745 | if (!hw_ops) | 2741 | if (!pmu) |
2746 | err = -EINVAL; | 2742 | err = -EINVAL; |
2747 | else if (IS_ERR(hw_ops)) | 2743 | else if (IS_ERR(pmu)) |
2748 | err = PTR_ERR(hw_ops); | 2744 | err = PTR_ERR(pmu); |
2749 | 2745 | ||
2750 | if (err) { | 2746 | if (err) { |
2751 | kfree(counter); | 2747 | kfree(counter); |
2752 | return ERR_PTR(err); | 2748 | return ERR_PTR(err); |
2753 | } | 2749 | } |
2754 | 2750 | ||
2755 | counter->hw_ops = hw_ops; | 2751 | counter->pmu = pmu; |
2756 | 2752 | ||
2757 | if (counter->hw_event.mmap) | 2753 | if (counter->hw_event.mmap) |
2758 | atomic_inc(&nr_mmap_tracking); | 2754 | atomic_inc(&nr_mmap_tracking); |