diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2010-06-15 06:22:39 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-09-09 14:46:30 -0400 |
commit | ad5133b7030d04ce7701aa7cbe98f561347c79c2 (patch) | |
tree | b0593f2cdc40432ad2d91c5eaa9485df328ab97b | |
parent | 33696fc0d141bbbcb12f75b69608ea83282e3117 (diff) |
perf: Default PMU ops
Provide default implementations for the pmu txn methods, this
allows us to remove some conditional code.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: paulus <paulus@samba.org>
Cc: stephane eranian <eranian@googlemail.com>
Cc: Robert Richter <robert.richter@amd.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Cyrill Gorcunov <gorcunov@gmail.com>
Cc: Lin Ming <ming.m.lin@intel.com>
Cc: Yanmin <yanmin_zhang@linux.intel.com>
Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com>
Cc: David Miller <davem@davemloft.net>
Cc: Michael Cree <mcree@orcon.net.nz>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | include/linux/perf_event.h | 10 | ||||
-rw-r--r-- | kernel/perf_event.c | 64 |
2 files changed, 57 insertions, 17 deletions
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 6abf103fb7f8..bf85733597ec 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h | |||
@@ -565,8 +565,8 @@ struct pmu { | |||
565 | 565 | ||
566 | int *pmu_disable_count; | 566 | int *pmu_disable_count; |
567 | 567 | ||
568 | void (*pmu_enable) (struct pmu *pmu); | 568 | void (*pmu_enable) (struct pmu *pmu); /* optional */ |
569 | void (*pmu_disable) (struct pmu *pmu); | 569 | void (*pmu_disable) (struct pmu *pmu); /* optional */ |
570 | 570 | ||
571 | /* | 571 | /* |
572 | * Should return -ENOENT when the @event doesn't match this PMU. | 572 | * Should return -ENOENT when the @event doesn't match this PMU. |
@@ -590,19 +590,19 @@ struct pmu { | |||
590 | * Start the transaction, after this ->enable() doesn't need to | 590 | * Start the transaction, after this ->enable() doesn't need to |
591 | * do schedulability tests. | 591 | * do schedulability tests. |
592 | */ | 592 | */ |
593 | void (*start_txn) (struct pmu *pmu); | 593 | void (*start_txn) (struct pmu *pmu); /* optional */ |
594 | /* | 594 | /* |
595 | * If ->start_txn() disabled the ->enable() schedulability test | 595 | * If ->start_txn() disabled the ->enable() schedulability test |
596 | * then ->commit_txn() is required to perform one. On success | 596 | * then ->commit_txn() is required to perform one. On success |
597 | * the transaction is closed. On error the transaction is kept | 597 | * the transaction is closed. On error the transaction is kept |
598 | * open until ->cancel_txn() is called. | 598 | * open until ->cancel_txn() is called. |
599 | */ | 599 | */ |
600 | int (*commit_txn) (struct pmu *pmu); | 600 | int (*commit_txn) (struct pmu *pmu); /* optional */ |
601 | /* | 601 | /* |
602 | * Will cancel the transaction, assumes ->disable() is called | 602 | * Will cancel the transaction, assumes ->disable() is called |
603 | * for each successfull ->enable() during the transaction. | 603 | * for each successfull ->enable() during the transaction. |
604 | */ | 604 | */ |
605 | void (*cancel_txn) (struct pmu *pmu); | 605 | void (*cancel_txn) (struct pmu *pmu); /* optional */ |
606 | }; | 606 | }; |
607 | 607 | ||
608 | /** | 608 | /** |
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 5ed0c06765bb..8ef4ba3bcb1f 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
@@ -674,21 +674,14 @@ group_sched_in(struct perf_event *group_event, | |||
674 | { | 674 | { |
675 | struct perf_event *event, *partial_group = NULL; | 675 | struct perf_event *event, *partial_group = NULL; |
676 | struct pmu *pmu = group_event->pmu; | 676 | struct pmu *pmu = group_event->pmu; |
677 | bool txn = false; | ||
678 | 677 | ||
679 | if (group_event->state == PERF_EVENT_STATE_OFF) | 678 | if (group_event->state == PERF_EVENT_STATE_OFF) |
680 | return 0; | 679 | return 0; |
681 | 680 | ||
682 | /* Check if group transaction availabe */ | 681 | pmu->start_txn(pmu); |
683 | if (pmu->start_txn) | ||
684 | txn = true; | ||
685 | |||
686 | if (txn) | ||
687 | pmu->start_txn(pmu); | ||
688 | 682 | ||
689 | if (event_sched_in(group_event, cpuctx, ctx)) { | 683 | if (event_sched_in(group_event, cpuctx, ctx)) { |
690 | if (txn) | 684 | pmu->cancel_txn(pmu); |
691 | pmu->cancel_txn(pmu); | ||
692 | return -EAGAIN; | 685 | return -EAGAIN; |
693 | } | 686 | } |
694 | 687 | ||
@@ -702,7 +695,7 @@ group_sched_in(struct perf_event *group_event, | |||
702 | } | 695 | } |
703 | } | 696 | } |
704 | 697 | ||
705 | if (!txn || !pmu->commit_txn(pmu)) | 698 | if (!pmu->commit_txn(pmu)) |
706 | return 0; | 699 | return 0; |
707 | 700 | ||
708 | group_error: | 701 | group_error: |
@@ -717,8 +710,7 @@ group_error: | |||
717 | } | 710 | } |
718 | event_sched_out(group_event, cpuctx, ctx); | 711 | event_sched_out(group_event, cpuctx, ctx); |
719 | 712 | ||
720 | if (txn) | 713 | pmu->cancel_txn(pmu); |
721 | pmu->cancel_txn(pmu); | ||
722 | 714 | ||
723 | return -EAGAIN; | 715 | return -EAGAIN; |
724 | } | 716 | } |
@@ -4965,6 +4957,31 @@ static LIST_HEAD(pmus); | |||
4965 | static DEFINE_MUTEX(pmus_lock); | 4957 | static DEFINE_MUTEX(pmus_lock); |
4966 | static struct srcu_struct pmus_srcu; | 4958 | static struct srcu_struct pmus_srcu; |
4967 | 4959 | ||
4960 | static void perf_pmu_nop_void(struct pmu *pmu) | ||
4961 | { | ||
4962 | } | ||
4963 | |||
4964 | static int perf_pmu_nop_int(struct pmu *pmu) | ||
4965 | { | ||
4966 | return 0; | ||
4967 | } | ||
4968 | |||
4969 | static void perf_pmu_start_txn(struct pmu *pmu) | ||
4970 | { | ||
4971 | perf_pmu_disable(pmu); | ||
4972 | } | ||
4973 | |||
4974 | static int perf_pmu_commit_txn(struct pmu *pmu) | ||
4975 | { | ||
4976 | perf_pmu_enable(pmu); | ||
4977 | return 0; | ||
4978 | } | ||
4979 | |||
4980 | static void perf_pmu_cancel_txn(struct pmu *pmu) | ||
4981 | { | ||
4982 | perf_pmu_enable(pmu); | ||
4983 | } | ||
4984 | |||
4968 | int perf_pmu_register(struct pmu *pmu) | 4985 | int perf_pmu_register(struct pmu *pmu) |
4969 | { | 4986 | { |
4970 | int ret; | 4987 | int ret; |
@@ -4974,6 +4991,29 @@ int perf_pmu_register(struct pmu *pmu) | |||
4974 | pmu->pmu_disable_count = alloc_percpu(int); | 4991 | pmu->pmu_disable_count = alloc_percpu(int); |
4975 | if (!pmu->pmu_disable_count) | 4992 | if (!pmu->pmu_disable_count) |
4976 | goto unlock; | 4993 | goto unlock; |
4994 | |||
4995 | if (!pmu->start_txn) { | ||
4996 | if (pmu->pmu_enable) { | ||
4997 | /* | ||
4998 | * If we have pmu_enable/pmu_disable calls, install | ||
4999 | * transaction stubs that use that to try and batch | ||
5000 | * hardware accesses. | ||
5001 | */ | ||
5002 | pmu->start_txn = perf_pmu_start_txn; | ||
5003 | pmu->commit_txn = perf_pmu_commit_txn; | ||
5004 | pmu->cancel_txn = perf_pmu_cancel_txn; | ||
5005 | } else { | ||
5006 | pmu->start_txn = perf_pmu_nop_void; | ||
5007 | pmu->commit_txn = perf_pmu_nop_int; | ||
5008 | pmu->cancel_txn = perf_pmu_nop_void; | ||
5009 | } | ||
5010 | } | ||
5011 | |||
5012 | if (!pmu->pmu_enable) { | ||
5013 | pmu->pmu_enable = perf_pmu_nop_void; | ||
5014 | pmu->pmu_disable = perf_pmu_nop_void; | ||
5015 | } | ||
5016 | |||
4977 | list_add_rcu(&pmu->entry, &pmus); | 5017 | list_add_rcu(&pmu->entry, &pmus); |
4978 | ret = 0; | 5018 | ret = 0; |
4979 | unlock: | 5019 | unlock: |