diff options
author | Stephane Eranian <eranian@google.com> | 2014-02-11 10:20:11 -0500 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2014-02-21 15:49:07 -0500 |
commit | 001e413f7e7a4a68dc1c3231f72b5be173939c8f (patch) | |
tree | c61451dc5fe0733e9f8eec377b85f52f67afabed | |
parent | 79859cce5a551c3bcd8d36a6287f9d83568ccedb (diff) |
perf/x86/uncore: move uncore_event_to_box() and uncore_pmu_to_box()
Move a couple of functions around to avoid forward declarations
when we add code later on.
Cc: mingo@elte.hu
Cc: acme@redhat.com
Cc: ak@linux.intel.com
Cc: zheng.z.yan@intel.com
Cc: peterz@infradead.org
Signed-off-by: Stephane Eranian <eranian@google.com>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1392132015-14521-6-git-send-email-eranian@google.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_intel_uncore.c | 72 |
1 files changed, 36 insertions, 36 deletions
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c index ea823b8fd592..acbbdde5751c 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c | |||
@@ -66,6 +66,42 @@ DEFINE_UNCORE_FORMAT_ATTR(mask_vnw, mask_vnw, "config2:3-4"); | |||
66 | DEFINE_UNCORE_FORMAT_ATTR(mask0, mask0, "config2:0-31"); | 66 | DEFINE_UNCORE_FORMAT_ATTR(mask0, mask0, "config2:0-31"); |
67 | DEFINE_UNCORE_FORMAT_ATTR(mask1, mask1, "config2:32-63"); | 67 | DEFINE_UNCORE_FORMAT_ATTR(mask1, mask1, "config2:32-63"); |
68 | 68 | ||
69 | static struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event) | ||
70 | { | ||
71 | return container_of(event->pmu, struct intel_uncore_pmu, pmu); | ||
72 | } | ||
73 | |||
74 | static struct intel_uncore_box * | ||
75 | uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu) | ||
76 | { | ||
77 | struct intel_uncore_box *box; | ||
78 | |||
79 | box = *per_cpu_ptr(pmu->box, cpu); | ||
80 | if (box) | ||
81 | return box; | ||
82 | |||
83 | raw_spin_lock(&uncore_box_lock); | ||
84 | list_for_each_entry(box, &pmu->box_list, list) { | ||
85 | if (box->phys_id == topology_physical_package_id(cpu)) { | ||
86 | atomic_inc(&box->refcnt); | ||
87 | *per_cpu_ptr(pmu->box, cpu) = box; | ||
88 | break; | ||
89 | } | ||
90 | } | ||
91 | raw_spin_unlock(&uncore_box_lock); | ||
92 | |||
93 | return *per_cpu_ptr(pmu->box, cpu); | ||
94 | } | ||
95 | |||
96 | static struct intel_uncore_box *uncore_event_to_box(struct perf_event *event) | ||
97 | { | ||
98 | /* | ||
99 | * perf core schedules event on the basis of cpu, uncore events are | ||
100 | * collected by one of the cpus inside a physical package. | ||
101 | */ | ||
102 | return uncore_pmu_to_box(uncore_event_to_pmu(event), smp_processor_id()); | ||
103 | } | ||
104 | |||
69 | static u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event) | 105 | static u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event) |
70 | { | 106 | { |
71 | u64 count; | 107 | u64 count; |
@@ -2845,42 +2881,6 @@ static struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type, | |||
2845 | return box; | 2881 | return box; |
2846 | } | 2882 | } |
2847 | 2883 | ||
2848 | static struct intel_uncore_box * | ||
2849 | uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu) | ||
2850 | { | ||
2851 | struct intel_uncore_box *box; | ||
2852 | |||
2853 | box = *per_cpu_ptr(pmu->box, cpu); | ||
2854 | if (box) | ||
2855 | return box; | ||
2856 | |||
2857 | raw_spin_lock(&uncore_box_lock); | ||
2858 | list_for_each_entry(box, &pmu->box_list, list) { | ||
2859 | if (box->phys_id == topology_physical_package_id(cpu)) { | ||
2860 | atomic_inc(&box->refcnt); | ||
2861 | *per_cpu_ptr(pmu->box, cpu) = box; | ||
2862 | break; | ||
2863 | } | ||
2864 | } | ||
2865 | raw_spin_unlock(&uncore_box_lock); | ||
2866 | |||
2867 | return *per_cpu_ptr(pmu->box, cpu); | ||
2868 | } | ||
2869 | |||
2870 | static struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event) | ||
2871 | { | ||
2872 | return container_of(event->pmu, struct intel_uncore_pmu, pmu); | ||
2873 | } | ||
2874 | |||
2875 | static struct intel_uncore_box *uncore_event_to_box(struct perf_event *event) | ||
2876 | { | ||
2877 | /* | ||
2878 | * perf core schedules event on the basis of cpu, uncore events are | ||
2879 | * collected by one of the cpus inside a physical package. | ||
2880 | */ | ||
2881 | return uncore_pmu_to_box(uncore_event_to_pmu(event), smp_processor_id()); | ||
2882 | } | ||
2883 | |||
2884 | static int | 2884 | static int |
2885 | uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader, bool dogrp) | 2885 | uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader, bool dogrp) |
2886 | { | 2886 | { |