diff options
author | Lin Ming <ming.m.lin@intel.com> | 2010-04-23 01:56:00 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-05-07 05:31:02 -0400 |
commit | 6bde9b6ce0127e2a56228a2071536d422be31336 (patch) | |
tree | 1a1f2fe3dbb741d81b0c08f1822ef7c0af01f91d /kernel/perf_event.c | |
parent | ab608344bcbde4f55ec4cd911b686b0ce3eae076 (diff) |
perf: Add group scheduling transactional APIs
Add group scheduling transactional APIs to struct pmu.
These APIs will be implemented in arch code, based on Peter's idea as
below.
> the idea behind hw_perf_group_sched_in() is to not perform
> schedulability tests on each event in the group, but to add the group
> as a whole and then perform one test.
>
> Of course, when that test fails, you'll have to roll-back the whole
> group again.
>
> So start_txn (or a better name) would simply toggle a flag in the pmu
> implementation that will make pmu::enable() not perform the
> schedulablilty test.
>
> Then commit_txn() will perform the schedulability test (so note the
> method has to have a !void return value.
>
> This will allow us to use the regular
> kernel/perf_event.c::group_sched_in() and all the rollback code.
> Currently each hw_perf_group_sched_in() implementation duplicates all
> the rolllback code (with various bugs).
->start_txn:
Start group events scheduling transaction, set a flag to make
pmu::enable() not perform the schedulability test, it will be performed
at commit time.
->commit_txn:
Commit group events scheduling transaction, perform the group
schedulability as a whole
->cancel_txn:
Stop group events scheduling transaction, clear the flag so
pmu::enable() will perform the schedulability test.
Reviewed-by: Stephane Eranian <eranian@google.com>
Reviewed-by: Frederic Weisbecker <fweisbec@gmail.com>
Signed-off-by: Lin Ming <ming.m.lin@intel.com>
Cc: David Miller <davem@davemloft.net>
Cc: Paul Mackerras <paulus@samba.org>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1272002160.5707.60.camel@minggr.sh.intel.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/perf_event.c')
-rw-r--r-- | kernel/perf_event.c | 33 |
1 files changed, 20 insertions, 13 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 34659d4085c7..bb06382f98e7 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
@@ -83,14 +83,6 @@ extern __weak const struct pmu *hw_perf_event_init(struct perf_event *event) | |||
83 | void __weak hw_perf_disable(void) { barrier(); } | 83 | void __weak hw_perf_disable(void) { barrier(); } |
84 | void __weak hw_perf_enable(void) { barrier(); } | 84 | void __weak hw_perf_enable(void) { barrier(); } |
85 | 85 | ||
86 | int __weak | ||
87 | hw_perf_group_sched_in(struct perf_event *group_leader, | ||
88 | struct perf_cpu_context *cpuctx, | ||
89 | struct perf_event_context *ctx) | ||
90 | { | ||
91 | return 0; | ||
92 | } | ||
93 | |||
94 | void __weak perf_event_print_debug(void) { } | 86 | void __weak perf_event_print_debug(void) { } |
95 | 87 | ||
96 | static DEFINE_PER_CPU(int, perf_disable_count); | 88 | static DEFINE_PER_CPU(int, perf_disable_count); |
@@ -644,15 +636,20 @@ group_sched_in(struct perf_event *group_event, | |||
644 | struct perf_cpu_context *cpuctx, | 636 | struct perf_cpu_context *cpuctx, |
645 | struct perf_event_context *ctx) | 637 | struct perf_event_context *ctx) |
646 | { | 638 | { |
647 | struct perf_event *event, *partial_group; | 639 | struct perf_event *event, *partial_group = NULL; |
640 | const struct pmu *pmu = group_event->pmu; | ||
641 | bool txn = false; | ||
648 | int ret; | 642 | int ret; |
649 | 643 | ||
650 | if (group_event->state == PERF_EVENT_STATE_OFF) | 644 | if (group_event->state == PERF_EVENT_STATE_OFF) |
651 | return 0; | 645 | return 0; |
652 | 646 | ||
653 | ret = hw_perf_group_sched_in(group_event, cpuctx, ctx); | 647 | /* Check if group transaction availabe */ |
654 | if (ret) | 648 | if (pmu->start_txn) |
655 | return ret < 0 ? ret : 0; | 649 | txn = true; |
650 | |||
651 | if (txn) | ||
652 | pmu->start_txn(pmu); | ||
656 | 653 | ||
657 | if (event_sched_in(group_event, cpuctx, ctx)) | 654 | if (event_sched_in(group_event, cpuctx, ctx)) |
658 | return -EAGAIN; | 655 | return -EAGAIN; |
@@ -667,9 +664,19 @@ group_sched_in(struct perf_event *group_event, | |||
667 | } | 664 | } |
668 | } | 665 | } |
669 | 666 | ||
670 | return 0; | 667 | if (txn) { |
668 | ret = pmu->commit_txn(pmu); | ||
669 | if (!ret) { | ||
670 | pmu->cancel_txn(pmu); | ||
671 | |||
672 | return 0; | ||
673 | } | ||
674 | } | ||
671 | 675 | ||
672 | group_error: | 676 | group_error: |
677 | if (txn) | ||
678 | pmu->cancel_txn(pmu); | ||
679 | |||
673 | /* | 680 | /* |
674 | * Groups can be scheduled in as one unit only, so undo any | 681 | * Groups can be scheduled in as one unit only, so undo any |
675 | * partial group before returning: | 682 | * partial group before returning: |