diff options
-rw-r--r-- | arch/powerpc/kernel/perf_event.c | 7 | ||||
-rw-r--r-- | arch/sparc/kernel/perf_event.c | 7 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event.c | 14 | ||||
-rw-r--r-- | include/linux/perf_event.h | 27 | ||||
-rw-r--r-- | kernel/perf_event.c | 9 |
5 files changed, 36 insertions, 28 deletions
diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c index 43b83c35cf54..ac2a8c2554d9 100644 --- a/arch/powerpc/kernel/perf_event.c +++ b/arch/powerpc/kernel/perf_event.c | |||
@@ -754,7 +754,7 @@ static int power_pmu_enable(struct perf_event *event) | |||
754 | * skip the schedulability test here, it will be peformed | 754 | * skip the schedulability test here, it will be peformed |
755 | * at commit time(->commit_txn) as a whole | 755 | * at commit time(->commit_txn) as a whole |
756 | */ | 756 | */ |
757 | if (cpuhw->group_flag & PERF_EVENT_TXN_STARTED) | 757 | if (cpuhw->group_flag & PERF_EVENT_TXN) |
758 | goto nocheck; | 758 | goto nocheck; |
759 | 759 | ||
760 | if (check_excludes(cpuhw->event, cpuhw->flags, n0, 1)) | 760 | if (check_excludes(cpuhw->event, cpuhw->flags, n0, 1)) |
@@ -858,7 +858,7 @@ void power_pmu_start_txn(const struct pmu *pmu) | |||
858 | { | 858 | { |
859 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | 859 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); |
860 | 860 | ||
861 | cpuhw->group_flag |= PERF_EVENT_TXN_STARTED; | 861 | cpuhw->group_flag |= PERF_EVENT_TXN; |
862 | cpuhw->n_txn_start = cpuhw->n_events; | 862 | cpuhw->n_txn_start = cpuhw->n_events; |
863 | } | 863 | } |
864 | 864 | ||
@@ -871,7 +871,7 @@ void power_pmu_cancel_txn(const struct pmu *pmu) | |||
871 | { | 871 | { |
872 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | 872 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); |
873 | 873 | ||
874 | cpuhw->group_flag &= ~PERF_EVENT_TXN_STARTED; | 874 | cpuhw->group_flag &= ~PERF_EVENT_TXN; |
875 | } | 875 | } |
876 | 876 | ||
877 | /* | 877 | /* |
@@ -897,6 +897,7 @@ int power_pmu_commit_txn(const struct pmu *pmu) | |||
897 | for (i = cpuhw->n_txn_start; i < n; ++i) | 897 | for (i = cpuhw->n_txn_start; i < n; ++i) |
898 | cpuhw->event[i]->hw.config = cpuhw->events[i]; | 898 | cpuhw->event[i]->hw.config = cpuhw->events[i]; |
899 | 899 | ||
900 | cpuhw->group_flag &= ~PERF_EVENT_TXN; | ||
900 | return 0; | 901 | return 0; |
901 | } | 902 | } |
902 | 903 | ||
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index 0ec92c8861dd..beeb92fa3acd 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c | |||
@@ -1005,7 +1005,7 @@ static int sparc_pmu_enable(struct perf_event *event) | |||
1005 | * skip the schedulability test here, it will be peformed | 1005 | * skip the schedulability test here, it will be peformed |
1006 | * at commit time(->commit_txn) as a whole | 1006 | * at commit time(->commit_txn) as a whole |
1007 | */ | 1007 | */ |
1008 | if (cpuc->group_flag & PERF_EVENT_TXN_STARTED) | 1008 | if (cpuc->group_flag & PERF_EVENT_TXN) |
1009 | goto nocheck; | 1009 | goto nocheck; |
1010 | 1010 | ||
1011 | if (check_excludes(cpuc->event, n0, 1)) | 1011 | if (check_excludes(cpuc->event, n0, 1)) |
@@ -1102,7 +1102,7 @@ static void sparc_pmu_start_txn(const struct pmu *pmu) | |||
1102 | { | 1102 | { |
1103 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | 1103 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); |
1104 | 1104 | ||
1105 | cpuhw->group_flag |= PERF_EVENT_TXN_STARTED; | 1105 | cpuhw->group_flag |= PERF_EVENT_TXN; |
1106 | } | 1106 | } |
1107 | 1107 | ||
1108 | /* | 1108 | /* |
@@ -1114,7 +1114,7 @@ static void sparc_pmu_cancel_txn(const struct pmu *pmu) | |||
1114 | { | 1114 | { |
1115 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | 1115 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); |
1116 | 1116 | ||
1117 | cpuhw->group_flag &= ~PERF_EVENT_TXN_STARTED; | 1117 | cpuhw->group_flag &= ~PERF_EVENT_TXN; |
1118 | } | 1118 | } |
1119 | 1119 | ||
1120 | /* | 1120 | /* |
@@ -1137,6 +1137,7 @@ static int sparc_pmu_commit_txn(const struct pmu *pmu) | |||
1137 | if (sparc_check_constraints(cpuc->event, cpuc->events, n)) | 1137 | if (sparc_check_constraints(cpuc->event, cpuc->events, n)) |
1138 | return -EAGAIN; | 1138 | return -EAGAIN; |
1139 | 1139 | ||
1140 | cpuc->group_flag &= ~PERF_EVENT_TXN; | ||
1140 | return 0; | 1141 | return 0; |
1141 | } | 1142 | } |
1142 | 1143 | ||
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 5db5b7d65a18..af04c6fa59cb 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
@@ -969,7 +969,7 @@ static int x86_pmu_enable(struct perf_event *event) | |||
969 | * skip the schedulability test here, it will be peformed | 969 | * skip the schedulability test here, it will be peformed |
970 | * at commit time(->commit_txn) as a whole | 970 | * at commit time(->commit_txn) as a whole |
971 | */ | 971 | */ |
972 | if (cpuc->group_flag & PERF_EVENT_TXN_STARTED) | 972 | if (cpuc->group_flag & PERF_EVENT_TXN) |
973 | goto out; | 973 | goto out; |
974 | 974 | ||
975 | ret = x86_pmu.schedule_events(cpuc, n, assign); | 975 | ret = x86_pmu.schedule_events(cpuc, n, assign); |
@@ -1096,7 +1096,7 @@ static void x86_pmu_disable(struct perf_event *event) | |||
1096 | * The events never got scheduled and ->cancel_txn will truncate | 1096 | * The events never got scheduled and ->cancel_txn will truncate |
1097 | * the event_list. | 1097 | * the event_list. |
1098 | */ | 1098 | */ |
1099 | if (cpuc->group_flag & PERF_EVENT_TXN_STARTED) | 1099 | if (cpuc->group_flag & PERF_EVENT_TXN) |
1100 | return; | 1100 | return; |
1101 | 1101 | ||
1102 | x86_pmu_stop(event); | 1102 | x86_pmu_stop(event); |
@@ -1388,7 +1388,7 @@ static void x86_pmu_start_txn(const struct pmu *pmu) | |||
1388 | { | 1388 | { |
1389 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1389 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
1390 | 1390 | ||
1391 | cpuc->group_flag |= PERF_EVENT_TXN_STARTED; | 1391 | cpuc->group_flag |= PERF_EVENT_TXN; |
1392 | cpuc->n_txn = 0; | 1392 | cpuc->n_txn = 0; |
1393 | } | 1393 | } |
1394 | 1394 | ||
@@ -1401,7 +1401,7 @@ static void x86_pmu_cancel_txn(const struct pmu *pmu) | |||
1401 | { | 1401 | { |
1402 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1402 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
1403 | 1403 | ||
1404 | cpuc->group_flag &= ~PERF_EVENT_TXN_STARTED; | 1404 | cpuc->group_flag &= ~PERF_EVENT_TXN; |
1405 | /* | 1405 | /* |
1406 | * Truncate the collected events. | 1406 | * Truncate the collected events. |
1407 | */ | 1407 | */ |
@@ -1435,11 +1435,7 @@ static int x86_pmu_commit_txn(const struct pmu *pmu) | |||
1435 | */ | 1435 | */ |
1436 | memcpy(cpuc->assign, assign, n*sizeof(int)); | 1436 | memcpy(cpuc->assign, assign, n*sizeof(int)); |
1437 | 1437 | ||
1438 | /* | 1438 | cpuc->group_flag &= ~PERF_EVENT_TXN; |
1439 | * Clear out the txn count so that ->cancel_txn() which gets | ||
1440 | * run after ->commit_txn() doesn't undo things. | ||
1441 | */ | ||
1442 | cpuc->n_txn = 0; | ||
1443 | 1439 | ||
1444 | return 0; | 1440 | return 0; |
1445 | } | 1441 | } |
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 36efad90cd43..f1b6ba0770e0 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h | |||
@@ -549,7 +549,10 @@ struct hw_perf_event { | |||
549 | 549 | ||
550 | struct perf_event; | 550 | struct perf_event; |
551 | 551 | ||
552 | #define PERF_EVENT_TXN_STARTED 1 | 552 | /* |
553 | * Common implementation detail of pmu::{start,commit,cancel}_txn | ||
554 | */ | ||
555 | #define PERF_EVENT_TXN 0x1 | ||
553 | 556 | ||
554 | /** | 557 | /** |
555 | * struct pmu - generic performance monitoring unit | 558 | * struct pmu - generic performance monitoring unit |
@@ -563,14 +566,28 @@ struct pmu { | |||
563 | void (*unthrottle) (struct perf_event *event); | 566 | void (*unthrottle) (struct perf_event *event); |
564 | 567 | ||
565 | /* | 568 | /* |
566 | * group events scheduling is treated as a transaction, | 569 | * Group events scheduling is treated as a transaction, add group |
567 | * add group events as a whole and perform one schedulability test. | 570 | * events as a whole and perform one schedulability test. If the test |
568 | * If test fails, roll back the whole group | 571 | * fails, roll back the whole group |
569 | */ | 572 | */ |
570 | 573 | ||
574 | /* | ||
575 | * Start the transaction, after this ->enable() doesn't need | ||
576 | * to do schedulability tests. | ||
577 | */ | ||
571 | void (*start_txn) (const struct pmu *pmu); | 578 | void (*start_txn) (const struct pmu *pmu); |
572 | void (*cancel_txn) (const struct pmu *pmu); | 579 | /* |
580 | * If ->start_txn() disabled the ->enable() schedulability test | ||
581 | * then ->commit_txn() is required to perform one. On success | ||
582 | * the transaction is closed. On error the transaction is kept | ||
583 | * open until ->cancel_txn() is called. | ||
584 | */ | ||
573 | int (*commit_txn) (const struct pmu *pmu); | 585 | int (*commit_txn) (const struct pmu *pmu); |
586 | /* | ||
587 | * Will cancel the transaction, assumes ->disable() is called for | ||
588 | * each successfull ->enable() during the transaction. | ||
589 | */ | ||
590 | void (*cancel_txn) (const struct pmu *pmu); | ||
574 | }; | 591 | }; |
575 | 592 | ||
576 | /** | 593 | /** |
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 227ed9c8ec34..6f60920772b3 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
@@ -675,7 +675,6 @@ group_sched_in(struct perf_event *group_event, | |||
675 | struct perf_event *event, *partial_group = NULL; | 675 | struct perf_event *event, *partial_group = NULL; |
676 | const struct pmu *pmu = group_event->pmu; | 676 | const struct pmu *pmu = group_event->pmu; |
677 | bool txn = false; | 677 | bool txn = false; |
678 | int ret; | ||
679 | 678 | ||
680 | if (group_event->state == PERF_EVENT_STATE_OFF) | 679 | if (group_event->state == PERF_EVENT_STATE_OFF) |
681 | return 0; | 680 | return 0; |
@@ -703,15 +702,9 @@ group_sched_in(struct perf_event *group_event, | |||
703 | } | 702 | } |
704 | } | 703 | } |
705 | 704 | ||
706 | if (!txn) | 705 | if (!txn || !pmu->commit_txn(pmu)) |
707 | return 0; | 706 | return 0; |
708 | 707 | ||
709 | ret = pmu->commit_txn(pmu); | ||
710 | if (!ret) { | ||
711 | pmu->cancel_txn(pmu); | ||
712 | return 0; | ||
713 | } | ||
714 | |||
715 | group_error: | 708 | group_error: |
716 | /* | 709 | /* |
717 | * Groups can be scheduled in as one unit only, so undo any | 710 | * Groups can be scheduled in as one unit only, so undo any |