diff options
author | David Daney <david.daney@cavium.com> | 2011-09-23 20:29:55 -0400 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 2011-10-24 18:34:26 -0400 |
commit | 4409af37b83587097e6d0f675a4ed0bb2ca0ee59 (patch) | |
tree | a5da11a64ed8c611de3266375f472191e3888b67 /arch/mips | |
parent | 4d36f59d876d431c3d7b98dc8a1164d70273da55 (diff) |
MIPS: perf: Cleanup formatting in arch/mips/kernel/perf_event.c
Get rid of a bunch of useless inline declarations, and join a bunch of
improperly split lines.
Signed-off-by: David Daney <david.daney@cavium.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com>
To: linux-mips@linux-mips.org
Patchwork: https://patchwork.linux-mips.org/patch/2793/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips')
-rw-r--r-- | arch/mips/kernel/perf_event.c | 26 | ||||
-rw-r--r-- | arch/mips/kernel/perf_event_mipsxx.c | 68 |
2 files changed, 37 insertions, 57 deletions
diff --git a/arch/mips/kernel/perf_event.c b/arch/mips/kernel/perf_event.c index 0aee944ac380..f3d6e99ab575 100644 --- a/arch/mips/kernel/perf_event.c +++ b/arch/mips/kernel/perf_event.c | |||
@@ -118,10 +118,9 @@ struct mips_pmu { | |||
118 | 118 | ||
119 | static const struct mips_pmu *mipspmu; | 119 | static const struct mips_pmu *mipspmu; |
120 | 120 | ||
121 | static int | 121 | static int mipspmu_event_set_period(struct perf_event *event, |
122 | mipspmu_event_set_period(struct perf_event *event, | 122 | struct hw_perf_event *hwc, |
123 | struct hw_perf_event *hwc, | 123 | int idx) |
124 | int idx) | ||
125 | { | 124 | { |
126 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 125 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
127 | s64 left = local64_read(&hwc->period_left); | 126 | s64 left = local64_read(&hwc->period_left); |
@@ -162,8 +161,8 @@ mipspmu_event_set_period(struct perf_event *event, | |||
162 | } | 161 | } |
163 | 162 | ||
164 | static void mipspmu_event_update(struct perf_event *event, | 163 | static void mipspmu_event_update(struct perf_event *event, |
165 | struct hw_perf_event *hwc, | 164 | struct hw_perf_event *hwc, |
166 | int idx) | 165 | int idx) |
167 | { | 166 | { |
168 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 167 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
169 | unsigned long flags; | 168 | unsigned long flags; |
@@ -420,8 +419,7 @@ static struct pmu pmu = { | |||
420 | .read = mipspmu_read, | 419 | .read = mipspmu_read, |
421 | }; | 420 | }; |
422 | 421 | ||
423 | static inline unsigned int | 422 | static unsigned int mipspmu_perf_event_encode(const struct mips_perf_event *pev) |
424 | mipspmu_perf_event_encode(const struct mips_perf_event *pev) | ||
425 | { | 423 | { |
426 | /* | 424 | /* |
427 | * Top 8 bits for range, next 16 bits for cntr_mask, lowest 8 bits for | 425 | * Top 8 bits for range, next 16 bits for cntr_mask, lowest 8 bits for |
@@ -437,8 +435,7 @@ mipspmu_perf_event_encode(const struct mips_perf_event *pev) | |||
437 | #endif | 435 | #endif |
438 | } | 436 | } |
439 | 437 | ||
440 | static const struct mips_perf_event * | 438 | static const struct mips_perf_event *mipspmu_map_general_event(int idx) |
441 | mipspmu_map_general_event(int idx) | ||
442 | { | 439 | { |
443 | const struct mips_perf_event *pev; | 440 | const struct mips_perf_event *pev; |
444 | 441 | ||
@@ -449,8 +446,7 @@ mipspmu_map_general_event(int idx) | |||
449 | return pev; | 446 | return pev; |
450 | } | 447 | } |
451 | 448 | ||
452 | static const struct mips_perf_event * | 449 | static const struct mips_perf_event *mipspmu_map_cache_event(u64 config) |
453 | mipspmu_map_cache_event(u64 config) | ||
454 | { | 450 | { |
455 | unsigned int cache_type, cache_op, cache_result; | 451 | unsigned int cache_type, cache_op, cache_result; |
456 | const struct mips_perf_event *pev; | 452 | const struct mips_perf_event *pev; |
@@ -513,9 +509,9 @@ static int validate_group(struct perf_event *event) | |||
513 | } | 509 | } |
514 | 510 | ||
515 | /* This is needed by specific irq handlers in perf_event_*.c */ | 511 | /* This is needed by specific irq handlers in perf_event_*.c */ |
516 | static void | 512 | static void handle_associated_event(struct cpu_hw_events *cpuc, |
517 | handle_associated_event(struct cpu_hw_events *cpuc, | 513 | int idx, struct perf_sample_data *data, |
518 | int idx, struct perf_sample_data *data, struct pt_regs *regs) | 514 | struct pt_regs *regs) |
519 | { | 515 | { |
520 | struct perf_event *event = cpuc->events[idx]; | 516 | struct perf_event *event = cpuc->events[idx]; |
521 | struct hw_perf_event *hwc = &event->hw; | 517 | struct hw_perf_event *hwc = &event->hw; |
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c index e5ad09a9baf7..a5925b5c792b 100644 --- a/arch/mips/kernel/perf_event_mipsxx.c +++ b/arch/mips/kernel/perf_event_mipsxx.c | |||
@@ -49,37 +49,32 @@ static int cpu_has_mipsmt_pertccounters; | |||
49 | #endif | 49 | #endif |
50 | 50 | ||
51 | /* Copied from op_model_mipsxx.c */ | 51 | /* Copied from op_model_mipsxx.c */ |
52 | static inline unsigned int vpe_shift(void) | 52 | static unsigned int vpe_shift(void) |
53 | { | 53 | { |
54 | if (num_possible_cpus() > 1) | 54 | if (num_possible_cpus() > 1) |
55 | return 1; | 55 | return 1; |
56 | 56 | ||
57 | return 0; | 57 | return 0; |
58 | } | 58 | } |
59 | #else /* !CONFIG_MIPS_MT_SMP */ | ||
60 | #define vpe_id() 0 | ||
61 | |||
62 | static inline unsigned int vpe_shift(void) | ||
63 | { | ||
64 | return 0; | ||
65 | } | ||
66 | #endif /* CONFIG_MIPS_MT_SMP */ | ||
67 | 59 | ||
68 | static inline unsigned int | 60 | static unsigned int counters_total_to_per_cpu(unsigned int counters) |
69 | counters_total_to_per_cpu(unsigned int counters) | ||
70 | { | 61 | { |
71 | return counters >> vpe_shift(); | 62 | return counters >> vpe_shift(); |
72 | } | 63 | } |
73 | 64 | ||
74 | static inline unsigned int | 65 | static unsigned int counters_per_cpu_to_total(unsigned int counters) |
75 | counters_per_cpu_to_total(unsigned int counters) | ||
76 | { | 66 | { |
77 | return counters << vpe_shift(); | 67 | return counters << vpe_shift(); |
78 | } | 68 | } |
79 | 69 | ||
70 | #else /* !CONFIG_MIPS_MT_SMP */ | ||
71 | #define vpe_id() 0 | ||
72 | |||
73 | #endif /* CONFIG_MIPS_MT_SMP */ | ||
74 | |||
80 | #define __define_perf_accessors(r, n, np) \ | 75 | #define __define_perf_accessors(r, n, np) \ |
81 | \ | 76 | \ |
82 | static inline unsigned int r_c0_ ## r ## n(void) \ | 77 | static unsigned int r_c0_ ## r ## n(void) \ |
83 | { \ | 78 | { \ |
84 | unsigned int cpu = vpe_id(); \ | 79 | unsigned int cpu = vpe_id(); \ |
85 | \ | 80 | \ |
@@ -94,7 +89,7 @@ static inline unsigned int r_c0_ ## r ## n(void) \ | |||
94 | return 0; \ | 89 | return 0; \ |
95 | } \ | 90 | } \ |
96 | \ | 91 | \ |
97 | static inline void w_c0_ ## r ## n(unsigned int value) \ | 92 | static void w_c0_ ## r ## n(unsigned int value) \ |
98 | { \ | 93 | { \ |
99 | unsigned int cpu = vpe_id(); \ | 94 | unsigned int cpu = vpe_id(); \ |
100 | \ | 95 | \ |
@@ -121,7 +116,7 @@ __define_perf_accessors(perfctrl, 1, 3) | |||
121 | __define_perf_accessors(perfctrl, 2, 0) | 116 | __define_perf_accessors(perfctrl, 2, 0) |
122 | __define_perf_accessors(perfctrl, 3, 1) | 117 | __define_perf_accessors(perfctrl, 3, 1) |
123 | 118 | ||
124 | static inline int __n_counters(void) | 119 | static int __n_counters(void) |
125 | { | 120 | { |
126 | if (!(read_c0_config1() & M_CONFIG1_PC)) | 121 | if (!(read_c0_config1() & M_CONFIG1_PC)) |
127 | return 0; | 122 | return 0; |
@@ -135,7 +130,7 @@ static inline int __n_counters(void) | |||
135 | return 4; | 130 | return 4; |
136 | } | 131 | } |
137 | 132 | ||
138 | static inline int n_counters(void) | 133 | static int n_counters(void) |
139 | { | 134 | { |
140 | int counters; | 135 | int counters; |
141 | 136 | ||
@@ -175,8 +170,7 @@ static void reset_counters(void *arg) | |||
175 | } | 170 | } |
176 | } | 171 | } |
177 | 172 | ||
178 | static inline u64 | 173 | static u64 mipsxx_pmu_read_counter(unsigned int idx) |
179 | mipsxx_pmu_read_counter(unsigned int idx) | ||
180 | { | 174 | { |
181 | switch (idx) { | 175 | switch (idx) { |
182 | case 0: | 176 | case 0: |
@@ -193,8 +187,7 @@ mipsxx_pmu_read_counter(unsigned int idx) | |||
193 | } | 187 | } |
194 | } | 188 | } |
195 | 189 | ||
196 | static inline void | 190 | static void mipsxx_pmu_write_counter(unsigned int idx, u64 val) |
197 | mipsxx_pmu_write_counter(unsigned int idx, u64 val) | ||
198 | { | 191 | { |
199 | switch (idx) { | 192 | switch (idx) { |
200 | case 0: | 193 | case 0: |
@@ -212,8 +205,7 @@ mipsxx_pmu_write_counter(unsigned int idx, u64 val) | |||
212 | } | 205 | } |
213 | } | 206 | } |
214 | 207 | ||
215 | static inline unsigned int | 208 | static unsigned int mipsxx_pmu_read_control(unsigned int idx) |
216 | mipsxx_pmu_read_control(unsigned int idx) | ||
217 | { | 209 | { |
218 | switch (idx) { | 210 | switch (idx) { |
219 | case 0: | 211 | case 0: |
@@ -230,8 +222,7 @@ mipsxx_pmu_read_control(unsigned int idx) | |||
230 | } | 222 | } |
231 | } | 223 | } |
232 | 224 | ||
233 | static inline void | 225 | static void mipsxx_pmu_write_control(unsigned int idx, unsigned int val) |
234 | mipsxx_pmu_write_control(unsigned int idx, unsigned int val) | ||
235 | { | 226 | { |
236 | switch (idx) { | 227 | switch (idx) { |
237 | case 0: | 228 | case 0: |
@@ -511,9 +502,8 @@ static const struct mips_perf_event mipsxx74Kcore_cache_map | |||
511 | }; | 502 | }; |
512 | 503 | ||
513 | #ifdef CONFIG_MIPS_MT_SMP | 504 | #ifdef CONFIG_MIPS_MT_SMP |
514 | static void | 505 | static void check_and_calc_range(struct perf_event *event, |
515 | check_and_calc_range(struct perf_event *event, | 506 | const struct mips_perf_event *pev) |
516 | const struct mips_perf_event *pev) | ||
517 | { | 507 | { |
518 | struct hw_perf_event *hwc = &event->hw; | 508 | struct hw_perf_event *hwc = &event->hw; |
519 | 509 | ||
@@ -536,9 +526,8 @@ check_and_calc_range(struct perf_event *event, | |||
536 | hwc->config_base |= M_TC_EN_ALL; | 526 | hwc->config_base |= M_TC_EN_ALL; |
537 | } | 527 | } |
538 | #else | 528 | #else |
539 | static void | 529 | static void check_and_calc_range(struct perf_event *event, |
540 | check_and_calc_range(struct perf_event *event, | 530 | const struct mips_perf_event *pev) |
541 | const struct mips_perf_event *pev) | ||
542 | { | 531 | { |
543 | } | 532 | } |
544 | #endif | 533 | #endif |
@@ -733,8 +722,7 @@ static int mipsxx_pmu_handle_shared_irq(void) | |||
733 | return handled; | 722 | return handled; |
734 | } | 723 | } |
735 | 724 | ||
736 | static irqreturn_t | 725 | static irqreturn_t mipsxx_pmu_handle_irq(int irq, void *dev) |
737 | mipsxx_pmu_handle_irq(int irq, void *dev) | ||
738 | { | 726 | { |
739 | return mipsxx_pmu_handle_shared_irq(); | 727 | return mipsxx_pmu_handle_shared_irq(); |
740 | } | 728 | } |
@@ -766,9 +754,8 @@ static void mipsxx_pmu_stop(void) | |||
766 | #endif | 754 | #endif |
767 | } | 755 | } |
768 | 756 | ||
769 | static int | 757 | static int mipsxx_pmu_alloc_counter(struct cpu_hw_events *cpuc, |
770 | mipsxx_pmu_alloc_counter(struct cpu_hw_events *cpuc, | 758 | struct hw_perf_event *hwc) |
771 | struct hw_perf_event *hwc) | ||
772 | { | 759 | { |
773 | int i; | 760 | int i; |
774 | 761 | ||
@@ -797,8 +784,7 @@ mipsxx_pmu_alloc_counter(struct cpu_hw_events *cpuc, | |||
797 | return -EAGAIN; | 784 | return -EAGAIN; |
798 | } | 785 | } |
799 | 786 | ||
800 | static void | 787 | static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx) |
801 | mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx) | ||
802 | { | 788 | { |
803 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 789 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
804 | unsigned long flags; | 790 | unsigned long flags; |
@@ -816,8 +802,7 @@ mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx) | |||
816 | local_irq_restore(flags); | 802 | local_irq_restore(flags); |
817 | } | 803 | } |
818 | 804 | ||
819 | static void | 805 | static void mipsxx_pmu_disable_event(int idx) |
820 | mipsxx_pmu_disable_event(int idx) | ||
821 | { | 806 | { |
822 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 807 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
823 | unsigned long flags; | 808 | unsigned long flags; |
@@ -892,8 +877,7 @@ mipsxx_pmu_disable_event(int idx) | |||
892 | * then 128 needs to be added to 15 as the input for the event config, | 877 | * then 128 needs to be added to 15 as the input for the event config, |
893 | * i.e., 143 (0x8F) to be used. | 878 | * i.e., 143 (0x8F) to be used. |
894 | */ | 879 | */ |
895 | static const struct mips_perf_event * | 880 | static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config) |
896 | mipsxx_pmu_map_raw_event(u64 config) | ||
897 | { | 881 | { |
898 | unsigned int raw_id = config & 0xff; | 882 | unsigned int raw_id = config & 0xff; |
899 | unsigned int base_id = raw_id & 0x7f; | 883 | unsigned int base_id = raw_id & 0x7f; |