aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/perf_event.h15
-rw-r--r--kernel/perf_event.c33
2 files changed, 32 insertions, 16 deletions
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 23cd0057a681..4924c96d7e2d 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -547,6 +547,8 @@ struct hw_perf_event {
547 547
548struct perf_event; 548struct perf_event;
549 549
550#define PERF_EVENT_TXN_STARTED 1
551
550/** 552/**
551 * struct pmu - generic performance monitoring unit 553 * struct pmu - generic performance monitoring unit
552 */ 554 */
@@ -557,6 +559,16 @@ struct pmu {
557 void (*stop) (struct perf_event *event); 559 void (*stop) (struct perf_event *event);
558 void (*read) (struct perf_event *event); 560 void (*read) (struct perf_event *event);
559 void (*unthrottle) (struct perf_event *event); 561 void (*unthrottle) (struct perf_event *event);
562
563 /*
564 * group events scheduling is treated as a transaction,
565 * add group events as a whole and perform one schedulability test.
566 * If test fails, roll back the whole group
567 */
568
569 void (*start_txn) (const struct pmu *pmu);
570 void (*cancel_txn) (const struct pmu *pmu);
571 int (*commit_txn) (const struct pmu *pmu);
560}; 572};
561 573
562/** 574/**
@@ -823,9 +835,6 @@ extern void perf_disable(void);
823extern void perf_enable(void); 835extern void perf_enable(void);
824extern int perf_event_task_disable(void); 836extern int perf_event_task_disable(void);
825extern int perf_event_task_enable(void); 837extern int perf_event_task_enable(void);
826extern int hw_perf_group_sched_in(struct perf_event *group_leader,
827 struct perf_cpu_context *cpuctx,
828 struct perf_event_context *ctx);
829extern void perf_event_update_userpage(struct perf_event *event); 838extern void perf_event_update_userpage(struct perf_event *event);
830extern int perf_event_release_kernel(struct perf_event *event); 839extern int perf_event_release_kernel(struct perf_event *event);
831extern struct perf_event * 840extern struct perf_event *
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 34659d4085c7..bb06382f98e7 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -83,14 +83,6 @@ extern __weak const struct pmu *hw_perf_event_init(struct perf_event *event)
83void __weak hw_perf_disable(void) { barrier(); } 83void __weak hw_perf_disable(void) { barrier(); }
84void __weak hw_perf_enable(void) { barrier(); } 84void __weak hw_perf_enable(void) { barrier(); }
85 85
86int __weak
87hw_perf_group_sched_in(struct perf_event *group_leader,
88 struct perf_cpu_context *cpuctx,
89 struct perf_event_context *ctx)
90{
91 return 0;
92}
93
94void __weak perf_event_print_debug(void) { } 86void __weak perf_event_print_debug(void) { }
95 87
96static DEFINE_PER_CPU(int, perf_disable_count); 88static DEFINE_PER_CPU(int, perf_disable_count);
@@ -644,15 +636,20 @@ group_sched_in(struct perf_event *group_event,
644 struct perf_cpu_context *cpuctx, 636 struct perf_cpu_context *cpuctx,
645 struct perf_event_context *ctx) 637 struct perf_event_context *ctx)
646{ 638{
647 struct perf_event *event, *partial_group; 639 struct perf_event *event, *partial_group = NULL;
640 const struct pmu *pmu = group_event->pmu;
641 bool txn = false;
648 int ret; 642 int ret;
649 643
650 if (group_event->state == PERF_EVENT_STATE_OFF) 644 if (group_event->state == PERF_EVENT_STATE_OFF)
651 return 0; 645 return 0;
652 646
653 ret = hw_perf_group_sched_in(group_event, cpuctx, ctx); 647 /* Check if group transaction availabe */
654 if (ret) 648 if (pmu->start_txn)
655 return ret < 0 ? ret : 0; 649 txn = true;
650
651 if (txn)
652 pmu->start_txn(pmu);
656 653
657 if (event_sched_in(group_event, cpuctx, ctx)) 654 if (event_sched_in(group_event, cpuctx, ctx))
658 return -EAGAIN; 655 return -EAGAIN;
@@ -667,9 +664,19 @@ group_sched_in(struct perf_event *group_event,
667 } 664 }
668 } 665 }
669 666
670 return 0; 667 if (txn) {
668 ret = pmu->commit_txn(pmu);
669 if (!ret) {
670 pmu->cancel_txn(pmu);
671
672 return 0;
673 }
674 }
671 675
672group_error: 676group_error:
677 if (txn)
678 pmu->cancel_txn(pmu);
679
673 /* 680 /*
674 * Groups can be scheduled in as one unit only, so undo any 681 * Groups can be scheduled in as one unit only, so undo any
675 * partial group before returning: 682 * partial group before returning: