diff options
author | Lin Ming <ming.m.lin@intel.com> | 2010-04-23 01:56:00 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-05-07 05:31:02 -0400 |
commit | 6bde9b6ce0127e2a56228a2071536d422be31336 (patch) | |
tree | 1a1f2fe3dbb741d81b0c08f1822ef7c0af01f91d | |
parent | ab608344bcbde4f55ec4cd911b686b0ce3eae076 (diff) |
perf: Add group scheduling transactional APIs
Add group scheduling transactional APIs to struct pmu.
These APIs will be implemented in arch code, based on Peter's idea as
below.
> the idea behind hw_perf_group_sched_in() is to not perform
> schedulability tests on each event in the group, but to add the group
> as a whole and then perform one test.
>
> Of course, when that test fails, you'll have to roll-back the whole
> group again.
>
> So start_txn (or a better name) would simply toggle a flag in the pmu
> implementation that will make pmu::enable() not perform the
> schedulablilty test.
>
> Then commit_txn() will perform the schedulability test (so note the
> method has to have a !void return value.
>
> This will allow us to use the regular
> kernel/perf_event.c::group_sched_in() and all the rollback code.
> Currently each hw_perf_group_sched_in() implementation duplicates all
> the rolllback code (with various bugs).
->start_txn:
Start group events scheduling transaction, set a flag to make
pmu::enable() not perform the schedulability test, it will be performed
at commit time.
->commit_txn:
Commit group events scheduling transaction, perform the group
schedulability as a whole
->cancel_txn:
Stop group events scheduling transaction, clear the flag so
pmu::enable() will perform the schedulability test.
Reviewed-by: Stephane Eranian <eranian@google.com>
Reviewed-by: Frederic Weisbecker <fweisbec@gmail.com>
Signed-off-by: Lin Ming <ming.m.lin@intel.com>
Cc: David Miller <davem@davemloft.net>
Cc: Paul Mackerras <paulus@samba.org>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1272002160.5707.60.camel@minggr.sh.intel.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | include/linux/perf_event.h | 15 | ||||
-rw-r--r-- | kernel/perf_event.c | 33 |
2 files changed, 32 insertions, 16 deletions
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 23cd0057a681..4924c96d7e2d 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h | |||
@@ -547,6 +547,8 @@ struct hw_perf_event { | |||
547 | 547 | ||
548 | struct perf_event; | 548 | struct perf_event; |
549 | 549 | ||
550 | #define PERF_EVENT_TXN_STARTED 1 | ||
551 | |||
550 | /** | 552 | /** |
551 | * struct pmu - generic performance monitoring unit | 553 | * struct pmu - generic performance monitoring unit |
552 | */ | 554 | */ |
@@ -557,6 +559,16 @@ struct pmu { | |||
557 | void (*stop) (struct perf_event *event); | 559 | void (*stop) (struct perf_event *event); |
558 | void (*read) (struct perf_event *event); | 560 | void (*read) (struct perf_event *event); |
559 | void (*unthrottle) (struct perf_event *event); | 561 | void (*unthrottle) (struct perf_event *event); |
562 | |||
563 | /* | ||
564 | * group events scheduling is treated as a transaction, | ||
565 | * add group events as a whole and perform one schedulability test. | ||
566 | * If test fails, roll back the whole group | ||
567 | */ | ||
568 | |||
569 | void (*start_txn) (const struct pmu *pmu); | ||
570 | void (*cancel_txn) (const struct pmu *pmu); | ||
571 | int (*commit_txn) (const struct pmu *pmu); | ||
560 | }; | 572 | }; |
561 | 573 | ||
562 | /** | 574 | /** |
@@ -823,9 +835,6 @@ extern void perf_disable(void); | |||
823 | extern void perf_enable(void); | 835 | extern void perf_enable(void); |
824 | extern int perf_event_task_disable(void); | 836 | extern int perf_event_task_disable(void); |
825 | extern int perf_event_task_enable(void); | 837 | extern int perf_event_task_enable(void); |
826 | extern int hw_perf_group_sched_in(struct perf_event *group_leader, | ||
827 | struct perf_cpu_context *cpuctx, | ||
828 | struct perf_event_context *ctx); | ||
829 | extern void perf_event_update_userpage(struct perf_event *event); | 838 | extern void perf_event_update_userpage(struct perf_event *event); |
830 | extern int perf_event_release_kernel(struct perf_event *event); | 839 | extern int perf_event_release_kernel(struct perf_event *event); |
831 | extern struct perf_event * | 840 | extern struct perf_event * |
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 34659d4085c7..bb06382f98e7 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
@@ -83,14 +83,6 @@ extern __weak const struct pmu *hw_perf_event_init(struct perf_event *event) | |||
83 | void __weak hw_perf_disable(void) { barrier(); } | 83 | void __weak hw_perf_disable(void) { barrier(); } |
84 | void __weak hw_perf_enable(void) { barrier(); } | 84 | void __weak hw_perf_enable(void) { barrier(); } |
85 | 85 | ||
86 | int __weak | ||
87 | hw_perf_group_sched_in(struct perf_event *group_leader, | ||
88 | struct perf_cpu_context *cpuctx, | ||
89 | struct perf_event_context *ctx) | ||
90 | { | ||
91 | return 0; | ||
92 | } | ||
93 | |||
94 | void __weak perf_event_print_debug(void) { } | 86 | void __weak perf_event_print_debug(void) { } |
95 | 87 | ||
96 | static DEFINE_PER_CPU(int, perf_disable_count); | 88 | static DEFINE_PER_CPU(int, perf_disable_count); |
@@ -644,15 +636,20 @@ group_sched_in(struct perf_event *group_event, | |||
644 | struct perf_cpu_context *cpuctx, | 636 | struct perf_cpu_context *cpuctx, |
645 | struct perf_event_context *ctx) | 637 | struct perf_event_context *ctx) |
646 | { | 638 | { |
647 | struct perf_event *event, *partial_group; | 639 | struct perf_event *event, *partial_group = NULL; |
640 | const struct pmu *pmu = group_event->pmu; | ||
641 | bool txn = false; | ||
648 | int ret; | 642 | int ret; |
649 | 643 | ||
650 | if (group_event->state == PERF_EVENT_STATE_OFF) | 644 | if (group_event->state == PERF_EVENT_STATE_OFF) |
651 | return 0; | 645 | return 0; |
652 | 646 | ||
653 | ret = hw_perf_group_sched_in(group_event, cpuctx, ctx); | 647 | /* Check if group transaction availabe */ |
654 | if (ret) | 648 | if (pmu->start_txn) |
655 | return ret < 0 ? ret : 0; | 649 | txn = true; |
650 | |||
651 | if (txn) | ||
652 | pmu->start_txn(pmu); | ||
656 | 653 | ||
657 | if (event_sched_in(group_event, cpuctx, ctx)) | 654 | if (event_sched_in(group_event, cpuctx, ctx)) |
658 | return -EAGAIN; | 655 | return -EAGAIN; |
@@ -667,9 +664,19 @@ group_sched_in(struct perf_event *group_event, | |||
667 | } | 664 | } |
668 | } | 665 | } |
669 | 666 | ||
670 | return 0; | 667 | if (txn) { |
668 | ret = pmu->commit_txn(pmu); | ||
669 | if (!ret) { | ||
670 | pmu->cancel_txn(pmu); | ||
671 | |||
672 | return 0; | ||
673 | } | ||
674 | } | ||
671 | 675 | ||
672 | group_error: | 676 | group_error: |
677 | if (txn) | ||
678 | pmu->cancel_txn(pmu); | ||
679 | |||
673 | /* | 680 | /* |
674 | * Groups can be scheduled in as one unit only, so undo any | 681 | * Groups can be scheduled in as one unit only, so undo any |
675 | * partial group before returning: | 682 | * partial group before returning: |