aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/perf_event.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2010-06-15 06:22:39 -0400
committerIngo Molnar <mingo@elte.hu>2010-09-09 14:46:30 -0400
commitad5133b7030d04ce7701aa7cbe98f561347c79c2 (patch)
treeb0593f2cdc40432ad2d91c5eaa9485df328ab97b /kernel/perf_event.c
parent33696fc0d141bbbcb12f75b69608ea83282e3117 (diff)
perf: Default PMU ops
Provide default implementations for the pmu txn methods, this allows us to remove some conditional code. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: paulus <paulus@samba.org> Cc: stephane eranian <eranian@googlemail.com> Cc: Robert Richter <robert.richter@amd.com> Cc: Will Deacon <will.deacon@arm.com> Cc: Paul Mundt <lethal@linux-sh.org> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Cyrill Gorcunov <gorcunov@gmail.com> Cc: Lin Ming <ming.m.lin@intel.com> Cc: Yanmin <yanmin_zhang@linux.intel.com> Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com> Cc: David Miller <davem@davemloft.net> Cc: Michael Cree <mcree@orcon.net.nz> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/perf_event.c')
-rw-r--r--kernel/perf_event.c64
1 files changed, 52 insertions, 12 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 5ed0c06765bb..8ef4ba3bcb1f 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -674,21 +674,14 @@ group_sched_in(struct perf_event *group_event,
674{ 674{
675 struct perf_event *event, *partial_group = NULL; 675 struct perf_event *event, *partial_group = NULL;
676 struct pmu *pmu = group_event->pmu; 676 struct pmu *pmu = group_event->pmu;
677 bool txn = false;
678 677
679 if (group_event->state == PERF_EVENT_STATE_OFF) 678 if (group_event->state == PERF_EVENT_STATE_OFF)
680 return 0; 679 return 0;
681 680
682 /* Check if group transaction availabe */ 681 pmu->start_txn(pmu);
683 if (pmu->start_txn)
684 txn = true;
685
686 if (txn)
687 pmu->start_txn(pmu);
688 682
689 if (event_sched_in(group_event, cpuctx, ctx)) { 683 if (event_sched_in(group_event, cpuctx, ctx)) {
690 if (txn) 684 pmu->cancel_txn(pmu);
691 pmu->cancel_txn(pmu);
692 return -EAGAIN; 685 return -EAGAIN;
693 } 686 }
694 687
@@ -702,7 +695,7 @@ group_sched_in(struct perf_event *group_event,
702 } 695 }
703 } 696 }
704 697
705 if (!txn || !pmu->commit_txn(pmu)) 698 if (!pmu->commit_txn(pmu))
706 return 0; 699 return 0;
707 700
708group_error: 701group_error:
@@ -717,8 +710,7 @@ group_error:
717 } 710 }
718 event_sched_out(group_event, cpuctx, ctx); 711 event_sched_out(group_event, cpuctx, ctx);
719 712
720 if (txn) 713 pmu->cancel_txn(pmu);
721 pmu->cancel_txn(pmu);
722 714
723 return -EAGAIN; 715 return -EAGAIN;
724} 716}
@@ -4965,6 +4957,31 @@ static LIST_HEAD(pmus);
4965static DEFINE_MUTEX(pmus_lock); 4957static DEFINE_MUTEX(pmus_lock);
4966static struct srcu_struct pmus_srcu; 4958static struct srcu_struct pmus_srcu;
4967 4959
4960static void perf_pmu_nop_void(struct pmu *pmu)
4961{
4962}
4963
4964static int perf_pmu_nop_int(struct pmu *pmu)
4965{
4966 return 0;
4967}
4968
4969static void perf_pmu_start_txn(struct pmu *pmu)
4970{
4971 perf_pmu_disable(pmu);
4972}
4973
4974static int perf_pmu_commit_txn(struct pmu *pmu)
4975{
4976 perf_pmu_enable(pmu);
4977 return 0;
4978}
4979
4980static void perf_pmu_cancel_txn(struct pmu *pmu)
4981{
4982 perf_pmu_enable(pmu);
4983}
4984
4968int perf_pmu_register(struct pmu *pmu) 4985int perf_pmu_register(struct pmu *pmu)
4969{ 4986{
4970 int ret; 4987 int ret;
@@ -4974,6 +4991,29 @@ int perf_pmu_register(struct pmu *pmu)
4974 pmu->pmu_disable_count = alloc_percpu(int); 4991 pmu->pmu_disable_count = alloc_percpu(int);
4975 if (!pmu->pmu_disable_count) 4992 if (!pmu->pmu_disable_count)
4976 goto unlock; 4993 goto unlock;
4994
4995 if (!pmu->start_txn) {
4996 if (pmu->pmu_enable) {
4997 /*
4998 * If we have pmu_enable/pmu_disable calls, install
4999 * transaction stubs that use that to try and batch
5000 * hardware accesses.
5001 */
5002 pmu->start_txn = perf_pmu_start_txn;
5003 pmu->commit_txn = perf_pmu_commit_txn;
5004 pmu->cancel_txn = perf_pmu_cancel_txn;
5005 } else {
5006 pmu->start_txn = perf_pmu_nop_void;
5007 pmu->commit_txn = perf_pmu_nop_int;
5008 pmu->cancel_txn = perf_pmu_nop_void;
5009 }
5010 }
5011
5012 if (!pmu->pmu_enable) {
5013 pmu->pmu_enable = perf_pmu_nop_void;
5014 pmu->pmu_disable = perf_pmu_nop_void;
5015 }
5016
4977 list_add_rcu(&pmu->entry, &pmus); 5017 list_add_rcu(&pmu->entry, &pmus);
4978 ret = 0; 5018 ret = 0;
4979unlock: 5019unlock: