diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2010-06-11 07:35:57 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-09-09 14:46:27 -0400 |
commit | 51b0fe39549a04858001922919ab355dee9bdfcf (patch) | |
tree | 024768dd0c87e890edf76e129820ea0cdf16a257 /arch | |
parent | 2aa61274efb9f532deaebc9812675a27af1994cb (diff) |
perf: Deconstify struct pmu
sed -ie 's/const struct pmu\>/struct pmu/g' `git grep -l "const struct pmu\>"`
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: paulus <paulus@samba.org>
Cc: stephane eranian <eranian@googlemail.com>
Cc: Robert Richter <robert.richter@amd.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Cyrill Gorcunov <gorcunov@gmail.com>
Cc: Lin Ming <ming.m.lin@intel.com>
Cc: Yanmin <yanmin_zhang@linux.intel.com>
Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com>
Cc: David Miller <davem@davemloft.net>
Cc: Michael Cree <mcree@orcon.net.nz>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/alpha/kernel/perf_event.c | 4 | ||||
-rw-r--r-- | arch/arm/kernel/perf_event.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/perf_event.c | 8 | ||||
-rw-r--r-- | arch/powerpc/kernel/perf_event_fsl_emb.c | 2 | ||||
-rw-r--r-- | arch/sh/kernel/perf_event.c | 4 | ||||
-rw-r--r-- | arch/sparc/kernel/perf_event.c | 10 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event.c | 14 |
7 files changed, 22 insertions, 22 deletions
diff --git a/arch/alpha/kernel/perf_event.c b/arch/alpha/kernel/perf_event.c index 51c39fa41693..56fa41590381 100644 --- a/arch/alpha/kernel/perf_event.c +++ b/arch/alpha/kernel/perf_event.c | |||
@@ -642,7 +642,7 @@ static int __hw_perf_event_init(struct perf_event *event) | |||
642 | return 0; | 642 | return 0; |
643 | } | 643 | } |
644 | 644 | ||
645 | static const struct pmu pmu = { | 645 | static struct pmu pmu = { |
646 | .enable = alpha_pmu_enable, | 646 | .enable = alpha_pmu_enable, |
647 | .disable = alpha_pmu_disable, | 647 | .disable = alpha_pmu_disable, |
648 | .read = alpha_pmu_read, | 648 | .read = alpha_pmu_read, |
@@ -653,7 +653,7 @@ static const struct pmu pmu = { | |||
653 | /* | 653 | /* |
654 | * Main entry point to initialise a HW performance event. | 654 | * Main entry point to initialise a HW performance event. |
655 | */ | 655 | */ |
656 | const struct pmu *hw_perf_event_init(struct perf_event *event) | 656 | struct pmu *hw_perf_event_init(struct perf_event *event) |
657 | { | 657 | { |
658 | int err; | 658 | int err; |
659 | 659 | ||
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index 64ca8c3ab94b..0671e92c5111 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c | |||
@@ -491,7 +491,7 @@ __hw_perf_event_init(struct perf_event *event) | |||
491 | return err; | 491 | return err; |
492 | } | 492 | } |
493 | 493 | ||
494 | const struct pmu * | 494 | struct pmu * |
495 | hw_perf_event_init(struct perf_event *event) | 495 | hw_perf_event_init(struct perf_event *event) |
496 | { | 496 | { |
497 | int err = 0; | 497 | int err = 0; |
diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c index d301a30445e0..5f78681ad902 100644 --- a/arch/powerpc/kernel/perf_event.c +++ b/arch/powerpc/kernel/perf_event.c | |||
@@ -857,7 +857,7 @@ static void power_pmu_unthrottle(struct perf_event *event) | |||
857 | * Set the flag to make pmu::enable() not perform the | 857 | * Set the flag to make pmu::enable() not perform the |
858 | * schedulability test, it will be performed at commit time | 858 | * schedulability test, it will be performed at commit time |
859 | */ | 859 | */ |
860 | void power_pmu_start_txn(const struct pmu *pmu) | 860 | void power_pmu_start_txn(struct pmu *pmu) |
861 | { | 861 | { |
862 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | 862 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); |
863 | 863 | ||
@@ -870,7 +870,7 @@ void power_pmu_start_txn(const struct pmu *pmu) | |||
870 | * Clear the flag and pmu::enable() will perform the | 870 | * Clear the flag and pmu::enable() will perform the |
871 | * schedulability test. | 871 | * schedulability test. |
872 | */ | 872 | */ |
873 | void power_pmu_cancel_txn(const struct pmu *pmu) | 873 | void power_pmu_cancel_txn(struct pmu *pmu) |
874 | { | 874 | { |
875 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | 875 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); |
876 | 876 | ||
@@ -882,7 +882,7 @@ void power_pmu_cancel_txn(const struct pmu *pmu) | |||
882 | * Perform the group schedulability test as a whole | 882 | * Perform the group schedulability test as a whole |
883 | * Return 0 if success | 883 | * Return 0 if success |
884 | */ | 884 | */ |
885 | int power_pmu_commit_txn(const struct pmu *pmu) | 885 | int power_pmu_commit_txn(struct pmu *pmu) |
886 | { | 886 | { |
887 | struct cpu_hw_events *cpuhw; | 887 | struct cpu_hw_events *cpuhw; |
888 | long i, n; | 888 | long i, n; |
@@ -1014,7 +1014,7 @@ static int hw_perf_cache_event(u64 config, u64 *eventp) | |||
1014 | return 0; | 1014 | return 0; |
1015 | } | 1015 | } |
1016 | 1016 | ||
1017 | const struct pmu *hw_perf_event_init(struct perf_event *event) | 1017 | struct pmu *hw_perf_event_init(struct perf_event *event) |
1018 | { | 1018 | { |
1019 | u64 ev; | 1019 | u64 ev; |
1020 | unsigned long flags; | 1020 | unsigned long flags; |
diff --git a/arch/powerpc/kernel/perf_event_fsl_emb.c b/arch/powerpc/kernel/perf_event_fsl_emb.c index 1ba45471ae43..d7619b5e7a6e 100644 --- a/arch/powerpc/kernel/perf_event_fsl_emb.c +++ b/arch/powerpc/kernel/perf_event_fsl_emb.c | |||
@@ -428,7 +428,7 @@ static int hw_perf_cache_event(u64 config, u64 *eventp) | |||
428 | return 0; | 428 | return 0; |
429 | } | 429 | } |
430 | 430 | ||
431 | const struct pmu *hw_perf_event_init(struct perf_event *event) | 431 | struct pmu *hw_perf_event_init(struct perf_event *event) |
432 | { | 432 | { |
433 | u64 ev; | 433 | u64 ev; |
434 | struct perf_event *events[MAX_HWEVENTS]; | 434 | struct perf_event *events[MAX_HWEVENTS]; |
diff --git a/arch/sh/kernel/perf_event.c b/arch/sh/kernel/perf_event.c index 7a3dc3567258..395572c94c6a 100644 --- a/arch/sh/kernel/perf_event.c +++ b/arch/sh/kernel/perf_event.c | |||
@@ -257,13 +257,13 @@ static void sh_pmu_read(struct perf_event *event) | |||
257 | sh_perf_event_update(event, &event->hw, event->hw.idx); | 257 | sh_perf_event_update(event, &event->hw, event->hw.idx); |
258 | } | 258 | } |
259 | 259 | ||
260 | static const struct pmu pmu = { | 260 | static struct pmu pmu = { |
261 | .enable = sh_pmu_enable, | 261 | .enable = sh_pmu_enable, |
262 | .disable = sh_pmu_disable, | 262 | .disable = sh_pmu_disable, |
263 | .read = sh_pmu_read, | 263 | .read = sh_pmu_read, |
264 | }; | 264 | }; |
265 | 265 | ||
266 | const struct pmu *hw_perf_event_init(struct perf_event *event) | 266 | struct pmu *hw_perf_event_init(struct perf_event *event) |
267 | { | 267 | { |
268 | int err = __hw_perf_event_init(event); | 268 | int err = __hw_perf_event_init(event); |
269 | if (unlikely(err)) { | 269 | if (unlikely(err)) { |
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index 4bc402938575..481b894a5018 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c | |||
@@ -1099,7 +1099,7 @@ static int __hw_perf_event_init(struct perf_event *event) | |||
1099 | * Set the flag to make pmu::enable() not perform the | 1099 | * Set the flag to make pmu::enable() not perform the |
1100 | * schedulability test, it will be performed at commit time | 1100 | * schedulability test, it will be performed at commit time |
1101 | */ | 1101 | */ |
1102 | static void sparc_pmu_start_txn(const struct pmu *pmu) | 1102 | static void sparc_pmu_start_txn(struct pmu *pmu) |
1103 | { | 1103 | { |
1104 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | 1104 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); |
1105 | 1105 | ||
@@ -1111,7 +1111,7 @@ static void sparc_pmu_start_txn(const struct pmu *pmu) | |||
1111 | * Clear the flag and pmu::enable() will perform the | 1111 | * Clear the flag and pmu::enable() will perform the |
1112 | * schedulability test. | 1112 | * schedulability test. |
1113 | */ | 1113 | */ |
1114 | static void sparc_pmu_cancel_txn(const struct pmu *pmu) | 1114 | static void sparc_pmu_cancel_txn(struct pmu *pmu) |
1115 | { | 1115 | { |
1116 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | 1116 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); |
1117 | 1117 | ||
@@ -1123,7 +1123,7 @@ static void sparc_pmu_cancel_txn(const struct pmu *pmu) | |||
1123 | * Perform the group schedulability test as a whole | 1123 | * Perform the group schedulability test as a whole |
1124 | * Return 0 if success | 1124 | * Return 0 if success |
1125 | */ | 1125 | */ |
1126 | static int sparc_pmu_commit_txn(const struct pmu *pmu) | 1126 | static int sparc_pmu_commit_txn(struct pmu *pmu) |
1127 | { | 1127 | { |
1128 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1128 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
1129 | int n; | 1129 | int n; |
@@ -1142,7 +1142,7 @@ static int sparc_pmu_commit_txn(const struct pmu *pmu) | |||
1142 | return 0; | 1142 | return 0; |
1143 | } | 1143 | } |
1144 | 1144 | ||
1145 | static const struct pmu pmu = { | 1145 | static struct pmu pmu = { |
1146 | .enable = sparc_pmu_enable, | 1146 | .enable = sparc_pmu_enable, |
1147 | .disable = sparc_pmu_disable, | 1147 | .disable = sparc_pmu_disable, |
1148 | .read = sparc_pmu_read, | 1148 | .read = sparc_pmu_read, |
@@ -1152,7 +1152,7 @@ static const struct pmu pmu = { | |||
1152 | .commit_txn = sparc_pmu_commit_txn, | 1152 | .commit_txn = sparc_pmu_commit_txn, |
1153 | }; | 1153 | }; |
1154 | 1154 | ||
1155 | const struct pmu *hw_perf_event_init(struct perf_event *event) | 1155 | struct pmu *hw_perf_event_init(struct perf_event *event) |
1156 | { | 1156 | { |
1157 | int err = __hw_perf_event_init(event); | 1157 | int err = __hw_perf_event_init(event); |
1158 | 1158 | ||
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index de6569c04cd0..fdd97f2e9961 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
@@ -618,7 +618,7 @@ static void x86_pmu_enable_all(int added) | |||
618 | } | 618 | } |
619 | } | 619 | } |
620 | 620 | ||
621 | static const struct pmu pmu; | 621 | static struct pmu pmu; |
622 | 622 | ||
623 | static inline int is_x86_event(struct perf_event *event) | 623 | static inline int is_x86_event(struct perf_event *event) |
624 | { | 624 | { |
@@ -1427,7 +1427,7 @@ static inline void x86_pmu_read(struct perf_event *event) | |||
1427 | * Set the flag to make pmu::enable() not perform the | 1427 | * Set the flag to make pmu::enable() not perform the |
1428 | * schedulability test, it will be performed at commit time | 1428 | * schedulability test, it will be performed at commit time |
1429 | */ | 1429 | */ |
1430 | static void x86_pmu_start_txn(const struct pmu *pmu) | 1430 | static void x86_pmu_start_txn(struct pmu *pmu) |
1431 | { | 1431 | { |
1432 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1432 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
1433 | 1433 | ||
@@ -1440,7 +1440,7 @@ static void x86_pmu_start_txn(const struct pmu *pmu) | |||
1440 | * Clear the flag and pmu::enable() will perform the | 1440 | * Clear the flag and pmu::enable() will perform the |
1441 | * schedulability test. | 1441 | * schedulability test. |
1442 | */ | 1442 | */ |
1443 | static void x86_pmu_cancel_txn(const struct pmu *pmu) | 1443 | static void x86_pmu_cancel_txn(struct pmu *pmu) |
1444 | { | 1444 | { |
1445 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1445 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
1446 | 1446 | ||
@@ -1457,7 +1457,7 @@ static void x86_pmu_cancel_txn(const struct pmu *pmu) | |||
1457 | * Perform the group schedulability test as a whole | 1457 | * Perform the group schedulability test as a whole |
1458 | * Return 0 if success | 1458 | * Return 0 if success |
1459 | */ | 1459 | */ |
1460 | static int x86_pmu_commit_txn(const struct pmu *pmu) | 1460 | static int x86_pmu_commit_txn(struct pmu *pmu) |
1461 | { | 1461 | { |
1462 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1462 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
1463 | int assign[X86_PMC_IDX_MAX]; | 1463 | int assign[X86_PMC_IDX_MAX]; |
@@ -1483,7 +1483,7 @@ static int x86_pmu_commit_txn(const struct pmu *pmu) | |||
1483 | return 0; | 1483 | return 0; |
1484 | } | 1484 | } |
1485 | 1485 | ||
1486 | static const struct pmu pmu = { | 1486 | static struct pmu pmu = { |
1487 | .enable = x86_pmu_enable, | 1487 | .enable = x86_pmu_enable, |
1488 | .disable = x86_pmu_disable, | 1488 | .disable = x86_pmu_disable, |
1489 | .start = x86_pmu_start, | 1489 | .start = x86_pmu_start, |
@@ -1569,9 +1569,9 @@ out: | |||
1569 | return ret; | 1569 | return ret; |
1570 | } | 1570 | } |
1571 | 1571 | ||
1572 | const struct pmu *hw_perf_event_init(struct perf_event *event) | 1572 | struct pmu *hw_perf_event_init(struct perf_event *event) |
1573 | { | 1573 | { |
1574 | const struct pmu *tmp; | 1574 | struct pmu *tmp; |
1575 | int err; | 1575 | int err; |
1576 | 1576 | ||
1577 | err = __hw_perf_event_init(event); | 1577 | err = __hw_perf_event_init(event); |