aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2010-06-11 07:35:57 -0400
committerIngo Molnar <mingo@elte.hu>2010-09-09 14:46:27 -0400
commit51b0fe39549a04858001922919ab355dee9bdfcf (patch)
tree024768dd0c87e890edf76e129820ea0cdf16a257
parent2aa61274efb9f532deaebc9812675a27af1994cb (diff)
perf: Deconstify struct pmu
sed -ie 's/const struct pmu\>/struct pmu/g' `git grep -l "const struct pmu\>"` Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: paulus <paulus@samba.org> Cc: stephane eranian <eranian@googlemail.com> Cc: Robert Richter <robert.richter@amd.com> Cc: Will Deacon <will.deacon@arm.com> Cc: Paul Mundt <lethal@linux-sh.org> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Cyrill Gorcunov <gorcunov@gmail.com> Cc: Lin Ming <ming.m.lin@intel.com> Cc: Yanmin <yanmin_zhang@linux.intel.com> Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com> Cc: David Miller <davem@davemloft.net> Cc: Michael Cree <mcree@orcon.net.nz> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--arch/alpha/kernel/perf_event.c4
-rw-r--r--arch/arm/kernel/perf_event.c2
-rw-r--r--arch/powerpc/kernel/perf_event.c8
-rw-r--r--arch/powerpc/kernel/perf_event_fsl_emb.c2
-rw-r--r--arch/sh/kernel/perf_event.c4
-rw-r--r--arch/sparc/kernel/perf_event.c10
-rw-r--r--arch/x86/kernel/cpu/perf_event.c14
-rw-r--r--include/linux/perf_event.h10
-rw-r--r--kernel/perf_event.c26
9 files changed, 40 insertions, 40 deletions
diff --git a/arch/alpha/kernel/perf_event.c b/arch/alpha/kernel/perf_event.c
index 51c39fa41693..56fa41590381 100644
--- a/arch/alpha/kernel/perf_event.c
+++ b/arch/alpha/kernel/perf_event.c
@@ -642,7 +642,7 @@ static int __hw_perf_event_init(struct perf_event *event)
642 return 0; 642 return 0;
643} 643}
644 644
645static const struct pmu pmu = { 645static struct pmu pmu = {
646 .enable = alpha_pmu_enable, 646 .enable = alpha_pmu_enable,
647 .disable = alpha_pmu_disable, 647 .disable = alpha_pmu_disable,
648 .read = alpha_pmu_read, 648 .read = alpha_pmu_read,
@@ -653,7 +653,7 @@ static const struct pmu pmu = {
653/* 653/*
654 * Main entry point to initialise a HW performance event. 654 * Main entry point to initialise a HW performance event.
655 */ 655 */
656const struct pmu *hw_perf_event_init(struct perf_event *event) 656struct pmu *hw_perf_event_init(struct perf_event *event)
657{ 657{
658 int err; 658 int err;
659 659
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 64ca8c3ab94b..0671e92c5111 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -491,7 +491,7 @@ __hw_perf_event_init(struct perf_event *event)
491 return err; 491 return err;
492} 492}
493 493
494const struct pmu * 494struct pmu *
495hw_perf_event_init(struct perf_event *event) 495hw_perf_event_init(struct perf_event *event)
496{ 496{
497 int err = 0; 497 int err = 0;
diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c
index d301a30445e0..5f78681ad902 100644
--- a/arch/powerpc/kernel/perf_event.c
+++ b/arch/powerpc/kernel/perf_event.c
@@ -857,7 +857,7 @@ static void power_pmu_unthrottle(struct perf_event *event)
857 * Set the flag to make pmu::enable() not perform the 857 * Set the flag to make pmu::enable() not perform the
858 * schedulability test, it will be performed at commit time 858 * schedulability test, it will be performed at commit time
859 */ 859 */
860void power_pmu_start_txn(const struct pmu *pmu) 860void power_pmu_start_txn(struct pmu *pmu)
861{ 861{
862 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); 862 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
863 863
@@ -870,7 +870,7 @@ void power_pmu_start_txn(const struct pmu *pmu)
870 * Clear the flag and pmu::enable() will perform the 870 * Clear the flag and pmu::enable() will perform the
871 * schedulability test. 871 * schedulability test.
872 */ 872 */
873void power_pmu_cancel_txn(const struct pmu *pmu) 873void power_pmu_cancel_txn(struct pmu *pmu)
874{ 874{
875 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); 875 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
876 876
@@ -882,7 +882,7 @@ void power_pmu_cancel_txn(const struct pmu *pmu)
882 * Perform the group schedulability test as a whole 882 * Perform the group schedulability test as a whole
883 * Return 0 if success 883 * Return 0 if success
884 */ 884 */
885int power_pmu_commit_txn(const struct pmu *pmu) 885int power_pmu_commit_txn(struct pmu *pmu)
886{ 886{
887 struct cpu_hw_events *cpuhw; 887 struct cpu_hw_events *cpuhw;
888 long i, n; 888 long i, n;
@@ -1014,7 +1014,7 @@ static int hw_perf_cache_event(u64 config, u64 *eventp)
1014 return 0; 1014 return 0;
1015} 1015}
1016 1016
1017const struct pmu *hw_perf_event_init(struct perf_event *event) 1017struct pmu *hw_perf_event_init(struct perf_event *event)
1018{ 1018{
1019 u64 ev; 1019 u64 ev;
1020 unsigned long flags; 1020 unsigned long flags;
diff --git a/arch/powerpc/kernel/perf_event_fsl_emb.c b/arch/powerpc/kernel/perf_event_fsl_emb.c
index 1ba45471ae43..d7619b5e7a6e 100644
--- a/arch/powerpc/kernel/perf_event_fsl_emb.c
+++ b/arch/powerpc/kernel/perf_event_fsl_emb.c
@@ -428,7 +428,7 @@ static int hw_perf_cache_event(u64 config, u64 *eventp)
428 return 0; 428 return 0;
429} 429}
430 430
431const struct pmu *hw_perf_event_init(struct perf_event *event) 431struct pmu *hw_perf_event_init(struct perf_event *event)
432{ 432{
433 u64 ev; 433 u64 ev;
434 struct perf_event *events[MAX_HWEVENTS]; 434 struct perf_event *events[MAX_HWEVENTS];
diff --git a/arch/sh/kernel/perf_event.c b/arch/sh/kernel/perf_event.c
index 7a3dc3567258..395572c94c6a 100644
--- a/arch/sh/kernel/perf_event.c
+++ b/arch/sh/kernel/perf_event.c
@@ -257,13 +257,13 @@ static void sh_pmu_read(struct perf_event *event)
257 sh_perf_event_update(event, &event->hw, event->hw.idx); 257 sh_perf_event_update(event, &event->hw, event->hw.idx);
258} 258}
259 259
260static const struct pmu pmu = { 260static struct pmu pmu = {
261 .enable = sh_pmu_enable, 261 .enable = sh_pmu_enable,
262 .disable = sh_pmu_disable, 262 .disable = sh_pmu_disable,
263 .read = sh_pmu_read, 263 .read = sh_pmu_read,
264}; 264};
265 265
266const struct pmu *hw_perf_event_init(struct perf_event *event) 266struct pmu *hw_perf_event_init(struct perf_event *event)
267{ 267{
268 int err = __hw_perf_event_init(event); 268 int err = __hw_perf_event_init(event);
269 if (unlikely(err)) { 269 if (unlikely(err)) {
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index 4bc402938575..481b894a5018 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -1099,7 +1099,7 @@ static int __hw_perf_event_init(struct perf_event *event)
1099 * Set the flag to make pmu::enable() not perform the 1099 * Set the flag to make pmu::enable() not perform the
1100 * schedulability test, it will be performed at commit time 1100 * schedulability test, it will be performed at commit time
1101 */ 1101 */
1102static void sparc_pmu_start_txn(const struct pmu *pmu) 1102static void sparc_pmu_start_txn(struct pmu *pmu)
1103{ 1103{
1104 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); 1104 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
1105 1105
@@ -1111,7 +1111,7 @@ static void sparc_pmu_start_txn(const struct pmu *pmu)
1111 * Clear the flag and pmu::enable() will perform the 1111 * Clear the flag and pmu::enable() will perform the
1112 * schedulability test. 1112 * schedulability test.
1113 */ 1113 */
1114static void sparc_pmu_cancel_txn(const struct pmu *pmu) 1114static void sparc_pmu_cancel_txn(struct pmu *pmu)
1115{ 1115{
1116 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); 1116 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
1117 1117
@@ -1123,7 +1123,7 @@ static void sparc_pmu_cancel_txn(const struct pmu *pmu)
1123 * Perform the group schedulability test as a whole 1123 * Perform the group schedulability test as a whole
1124 * Return 0 if success 1124 * Return 0 if success
1125 */ 1125 */
1126static int sparc_pmu_commit_txn(const struct pmu *pmu) 1126static int sparc_pmu_commit_txn(struct pmu *pmu)
1127{ 1127{
1128 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1128 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1129 int n; 1129 int n;
@@ -1142,7 +1142,7 @@ static int sparc_pmu_commit_txn(const struct pmu *pmu)
1142 return 0; 1142 return 0;
1143} 1143}
1144 1144
1145static const struct pmu pmu = { 1145static struct pmu pmu = {
1146 .enable = sparc_pmu_enable, 1146 .enable = sparc_pmu_enable,
1147 .disable = sparc_pmu_disable, 1147 .disable = sparc_pmu_disable,
1148 .read = sparc_pmu_read, 1148 .read = sparc_pmu_read,
@@ -1152,7 +1152,7 @@ static const struct pmu pmu = {
1152 .commit_txn = sparc_pmu_commit_txn, 1152 .commit_txn = sparc_pmu_commit_txn,
1153}; 1153};
1154 1154
1155const struct pmu *hw_perf_event_init(struct perf_event *event) 1155struct pmu *hw_perf_event_init(struct perf_event *event)
1156{ 1156{
1157 int err = __hw_perf_event_init(event); 1157 int err = __hw_perf_event_init(event);
1158 1158
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index de6569c04cd0..fdd97f2e9961 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -618,7 +618,7 @@ static void x86_pmu_enable_all(int added)
618 } 618 }
619} 619}
620 620
621static const struct pmu pmu; 621static struct pmu pmu;
622 622
623static inline int is_x86_event(struct perf_event *event) 623static inline int is_x86_event(struct perf_event *event)
624{ 624{
@@ -1427,7 +1427,7 @@ static inline void x86_pmu_read(struct perf_event *event)
1427 * Set the flag to make pmu::enable() not perform the 1427 * Set the flag to make pmu::enable() not perform the
1428 * schedulability test, it will be performed at commit time 1428 * schedulability test, it will be performed at commit time
1429 */ 1429 */
1430static void x86_pmu_start_txn(const struct pmu *pmu) 1430static void x86_pmu_start_txn(struct pmu *pmu)
1431{ 1431{
1432 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1432 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1433 1433
@@ -1440,7 +1440,7 @@ static void x86_pmu_start_txn(const struct pmu *pmu)
1440 * Clear the flag and pmu::enable() will perform the 1440 * Clear the flag and pmu::enable() will perform the
1441 * schedulability test. 1441 * schedulability test.
1442 */ 1442 */
1443static void x86_pmu_cancel_txn(const struct pmu *pmu) 1443static void x86_pmu_cancel_txn(struct pmu *pmu)
1444{ 1444{
1445 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1445 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1446 1446
@@ -1457,7 +1457,7 @@ static void x86_pmu_cancel_txn(const struct pmu *pmu)
1457 * Perform the group schedulability test as a whole 1457 * Perform the group schedulability test as a whole
1458 * Return 0 if success 1458 * Return 0 if success
1459 */ 1459 */
1460static int x86_pmu_commit_txn(const struct pmu *pmu) 1460static int x86_pmu_commit_txn(struct pmu *pmu)
1461{ 1461{
1462 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1462 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1463 int assign[X86_PMC_IDX_MAX]; 1463 int assign[X86_PMC_IDX_MAX];
@@ -1483,7 +1483,7 @@ static int x86_pmu_commit_txn(const struct pmu *pmu)
1483 return 0; 1483 return 0;
1484} 1484}
1485 1485
1486static const struct pmu pmu = { 1486static struct pmu pmu = {
1487 .enable = x86_pmu_enable, 1487 .enable = x86_pmu_enable,
1488 .disable = x86_pmu_disable, 1488 .disable = x86_pmu_disable,
1489 .start = x86_pmu_start, 1489 .start = x86_pmu_start,
@@ -1569,9 +1569,9 @@ out:
1569 return ret; 1569 return ret;
1570} 1570}
1571 1571
1572const struct pmu *hw_perf_event_init(struct perf_event *event) 1572struct pmu *hw_perf_event_init(struct perf_event *event)
1573{ 1573{
1574 const struct pmu *tmp; 1574 struct pmu *tmp;
1575 int err; 1575 int err;
1576 1576
1577 err = __hw_perf_event_init(event); 1577 err = __hw_perf_event_init(event);
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 000610c4de71..09d048b52115 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -578,19 +578,19 @@ struct pmu {
578 * Start the transaction, after this ->enable() doesn't need 578 * Start the transaction, after this ->enable() doesn't need
579 * to do schedulability tests. 579 * to do schedulability tests.
580 */ 580 */
581 void (*start_txn) (const struct pmu *pmu); 581 void (*start_txn) (struct pmu *pmu);
582 /* 582 /*
583 * If ->start_txn() disabled the ->enable() schedulability test 583 * If ->start_txn() disabled the ->enable() schedulability test
584 * then ->commit_txn() is required to perform one. On success 584 * then ->commit_txn() is required to perform one. On success
585 * the transaction is closed. On error the transaction is kept 585 * the transaction is closed. On error the transaction is kept
586 * open until ->cancel_txn() is called. 586 * open until ->cancel_txn() is called.
587 */ 587 */
588 int (*commit_txn) (const struct pmu *pmu); 588 int (*commit_txn) (struct pmu *pmu);
589 /* 589 /*
590 * Will cancel the transaction, assumes ->disable() is called for 590 * Will cancel the transaction, assumes ->disable() is called for
591 * each successfull ->enable() during the transaction. 591 * each successfull ->enable() during the transaction.
592 */ 592 */
593 void (*cancel_txn) (const struct pmu *pmu); 593 void (*cancel_txn) (struct pmu *pmu);
594}; 594};
595 595
596/** 596/**
@@ -669,7 +669,7 @@ struct perf_event {
669 int nr_siblings; 669 int nr_siblings;
670 int group_flags; 670 int group_flags;
671 struct perf_event *group_leader; 671 struct perf_event *group_leader;
672 const struct pmu *pmu; 672 struct pmu *pmu;
673 673
674 enum perf_event_active_state state; 674 enum perf_event_active_state state;
675 unsigned int attach_state; 675 unsigned int attach_state;
@@ -849,7 +849,7 @@ struct perf_output_handle {
849 */ 849 */
850extern int perf_max_events; 850extern int perf_max_events;
851 851
852extern const struct pmu *hw_perf_event_init(struct perf_event *event); 852extern struct pmu *hw_perf_event_init(struct perf_event *event);
853 853
854extern void perf_event_task_sched_in(struct task_struct *task); 854extern void perf_event_task_sched_in(struct task_struct *task);
855extern void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next); 855extern void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next);
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 2d74f31220ad..fb46fd13f31f 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -75,7 +75,7 @@ static DEFINE_SPINLOCK(perf_resource_lock);
75/* 75/*
76 * Architecture provided APIs - weak aliases: 76 * Architecture provided APIs - weak aliases:
77 */ 77 */
78extern __weak const struct pmu *hw_perf_event_init(struct perf_event *event) 78extern __weak struct pmu *hw_perf_event_init(struct perf_event *event)
79{ 79{
80 return NULL; 80 return NULL;
81} 81}
@@ -691,7 +691,7 @@ group_sched_in(struct perf_event *group_event,
691 struct perf_event_context *ctx) 691 struct perf_event_context *ctx)
692{ 692{
693 struct perf_event *event, *partial_group = NULL; 693 struct perf_event *event, *partial_group = NULL;
694 const struct pmu *pmu = group_event->pmu; 694 struct pmu *pmu = group_event->pmu;
695 bool txn = false; 695 bool txn = false;
696 696
697 if (group_event->state == PERF_EVENT_STATE_OFF) 697 if (group_event->state == PERF_EVENT_STATE_OFF)
@@ -4501,7 +4501,7 @@ static int perf_swevent_int(struct perf_event *event)
4501 return 0; 4501 return 0;
4502} 4502}
4503 4503
4504static const struct pmu perf_ops_generic = { 4504static struct pmu perf_ops_generic = {
4505 .enable = perf_swevent_enable, 4505 .enable = perf_swevent_enable,
4506 .disable = perf_swevent_disable, 4506 .disable = perf_swevent_disable,
4507 .start = perf_swevent_int, 4507 .start = perf_swevent_int,
@@ -4614,7 +4614,7 @@ static void cpu_clock_perf_event_read(struct perf_event *event)
4614 cpu_clock_perf_event_update(event); 4614 cpu_clock_perf_event_update(event);
4615} 4615}
4616 4616
4617static const struct pmu perf_ops_cpu_clock = { 4617static struct pmu perf_ops_cpu_clock = {
4618 .enable = cpu_clock_perf_event_enable, 4618 .enable = cpu_clock_perf_event_enable,
4619 .disable = cpu_clock_perf_event_disable, 4619 .disable = cpu_clock_perf_event_disable,
4620 .read = cpu_clock_perf_event_read, 4620 .read = cpu_clock_perf_event_read,
@@ -4671,7 +4671,7 @@ static void task_clock_perf_event_read(struct perf_event *event)
4671 task_clock_perf_event_update(event, time); 4671 task_clock_perf_event_update(event, time);
4672} 4672}
4673 4673
4674static const struct pmu perf_ops_task_clock = { 4674static struct pmu perf_ops_task_clock = {
4675 .enable = task_clock_perf_event_enable, 4675 .enable = task_clock_perf_event_enable,
4676 .disable = task_clock_perf_event_disable, 4676 .disable = task_clock_perf_event_disable,
4677 .read = task_clock_perf_event_read, 4677 .read = task_clock_perf_event_read,
@@ -4785,7 +4785,7 @@ static int swevent_hlist_get(struct perf_event *event)
4785 4785
4786#ifdef CONFIG_EVENT_TRACING 4786#ifdef CONFIG_EVENT_TRACING
4787 4787
4788static const struct pmu perf_ops_tracepoint = { 4788static struct pmu perf_ops_tracepoint = {
4789 .enable = perf_trace_enable, 4789 .enable = perf_trace_enable,
4790 .disable = perf_trace_disable, 4790 .disable = perf_trace_disable,
4791 .start = perf_swevent_int, 4791 .start = perf_swevent_int,
@@ -4849,7 +4849,7 @@ static void tp_perf_event_destroy(struct perf_event *event)
4849 perf_trace_destroy(event); 4849 perf_trace_destroy(event);
4850} 4850}
4851 4851
4852static const struct pmu *tp_perf_event_init(struct perf_event *event) 4852static struct pmu *tp_perf_event_init(struct perf_event *event)
4853{ 4853{
4854 int err; 4854 int err;
4855 4855
@@ -4896,7 +4896,7 @@ static void perf_event_free_filter(struct perf_event *event)
4896 4896
4897#else 4897#else
4898 4898
4899static const struct pmu *tp_perf_event_init(struct perf_event *event) 4899static struct pmu *tp_perf_event_init(struct perf_event *event)
4900{ 4900{
4901 return NULL; 4901 return NULL;
4902} 4902}
@@ -4918,7 +4918,7 @@ static void bp_perf_event_destroy(struct perf_event *event)
4918 release_bp_slot(event); 4918 release_bp_slot(event);
4919} 4919}
4920 4920
4921static const struct pmu *bp_perf_event_init(struct perf_event *bp) 4921static struct pmu *bp_perf_event_init(struct perf_event *bp)
4922{ 4922{
4923 int err; 4923 int err;
4924 4924
@@ -4942,7 +4942,7 @@ void perf_bp_event(struct perf_event *bp, void *data)
4942 perf_swevent_add(bp, 1, 1, &sample, regs); 4942 perf_swevent_add(bp, 1, 1, &sample, regs);
4943} 4943}
4944#else 4944#else
4945static const struct pmu *bp_perf_event_init(struct perf_event *bp) 4945static struct pmu *bp_perf_event_init(struct perf_event *bp)
4946{ 4946{
4947 return NULL; 4947 return NULL;
4948} 4948}
@@ -4964,9 +4964,9 @@ static void sw_perf_event_destroy(struct perf_event *event)
4964 swevent_hlist_put(event); 4964 swevent_hlist_put(event);
4965} 4965}
4966 4966
4967static const struct pmu *sw_perf_event_init(struct perf_event *event) 4967static struct pmu *sw_perf_event_init(struct perf_event *event)
4968{ 4968{
4969 const struct pmu *pmu = NULL; 4969 struct pmu *pmu = NULL;
4970 u64 event_id = event->attr.config; 4970 u64 event_id = event->attr.config;
4971 4971
4972 /* 4972 /*
@@ -5028,7 +5028,7 @@ perf_event_alloc(struct perf_event_attr *attr,
5028 perf_overflow_handler_t overflow_handler, 5028 perf_overflow_handler_t overflow_handler,
5029 gfp_t gfpflags) 5029 gfp_t gfpflags)
5030{ 5030{
5031 const struct pmu *pmu; 5031 struct pmu *pmu;
5032 struct perf_event *event; 5032 struct perf_event *event;
5033 struct hw_perf_event *hwc; 5033 struct hw_perf_event *hwc;
5034 long err; 5034 long err;