diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2010-05-25 11:49:05 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-06-09 05:12:34 -0400 |
commit | 8d2cacbbb8deadfae78aa16e4e1ee619bdd7019e (patch) | |
tree | 79fcb319d1c99e348b0a04056e016c85c3666b35 /arch | |
parent | 3af9e859281bda7eb7c20b51879cf43aa788ac2e (diff) |
perf: Cleanup {start,commit,cancel}_txn details
Clarify some of the transactional group scheduling API details
and change it so that a successfull ->commit_txn also closes
the transaction.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Steven Rostedt <rostedt@goodmis.org>
LKML-Reference: <1274803086.5882.1752.camel@twins>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/powerpc/kernel/perf_event.c | 7 | ||||
-rw-r--r-- | arch/sparc/kernel/perf_event.c | 7 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event.c | 14 |
3 files changed, 13 insertions, 15 deletions
diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c index 43b83c35cf54..ac2a8c2554d9 100644 --- a/arch/powerpc/kernel/perf_event.c +++ b/arch/powerpc/kernel/perf_event.c | |||
@@ -754,7 +754,7 @@ static int power_pmu_enable(struct perf_event *event) | |||
754 | * skip the schedulability test here, it will be peformed | 754 | * skip the schedulability test here, it will be peformed |
755 | * at commit time(->commit_txn) as a whole | 755 | * at commit time(->commit_txn) as a whole |
756 | */ | 756 | */ |
757 | if (cpuhw->group_flag & PERF_EVENT_TXN_STARTED) | 757 | if (cpuhw->group_flag & PERF_EVENT_TXN) |
758 | goto nocheck; | 758 | goto nocheck; |
759 | 759 | ||
760 | if (check_excludes(cpuhw->event, cpuhw->flags, n0, 1)) | 760 | if (check_excludes(cpuhw->event, cpuhw->flags, n0, 1)) |
@@ -858,7 +858,7 @@ void power_pmu_start_txn(const struct pmu *pmu) | |||
858 | { | 858 | { |
859 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | 859 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); |
860 | 860 | ||
861 | cpuhw->group_flag |= PERF_EVENT_TXN_STARTED; | 861 | cpuhw->group_flag |= PERF_EVENT_TXN; |
862 | cpuhw->n_txn_start = cpuhw->n_events; | 862 | cpuhw->n_txn_start = cpuhw->n_events; |
863 | } | 863 | } |
864 | 864 | ||
@@ -871,7 +871,7 @@ void power_pmu_cancel_txn(const struct pmu *pmu) | |||
871 | { | 871 | { |
872 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | 872 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); |
873 | 873 | ||
874 | cpuhw->group_flag &= ~PERF_EVENT_TXN_STARTED; | 874 | cpuhw->group_flag &= ~PERF_EVENT_TXN; |
875 | } | 875 | } |
876 | 876 | ||
877 | /* | 877 | /* |
@@ -897,6 +897,7 @@ int power_pmu_commit_txn(const struct pmu *pmu) | |||
897 | for (i = cpuhw->n_txn_start; i < n; ++i) | 897 | for (i = cpuhw->n_txn_start; i < n; ++i) |
898 | cpuhw->event[i]->hw.config = cpuhw->events[i]; | 898 | cpuhw->event[i]->hw.config = cpuhw->events[i]; |
899 | 899 | ||
900 | cpuhw->group_flag &= ~PERF_EVENT_TXN; | ||
900 | return 0; | 901 | return 0; |
901 | } | 902 | } |
902 | 903 | ||
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index 0ec92c8861dd..beeb92fa3acd 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c | |||
@@ -1005,7 +1005,7 @@ static int sparc_pmu_enable(struct perf_event *event) | |||
1005 | * skip the schedulability test here, it will be peformed | 1005 | * skip the schedulability test here, it will be peformed |
1006 | * at commit time(->commit_txn) as a whole | 1006 | * at commit time(->commit_txn) as a whole |
1007 | */ | 1007 | */ |
1008 | if (cpuc->group_flag & PERF_EVENT_TXN_STARTED) | 1008 | if (cpuc->group_flag & PERF_EVENT_TXN) |
1009 | goto nocheck; | 1009 | goto nocheck; |
1010 | 1010 | ||
1011 | if (check_excludes(cpuc->event, n0, 1)) | 1011 | if (check_excludes(cpuc->event, n0, 1)) |
@@ -1102,7 +1102,7 @@ static void sparc_pmu_start_txn(const struct pmu *pmu) | |||
1102 | { | 1102 | { |
1103 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | 1103 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); |
1104 | 1104 | ||
1105 | cpuhw->group_flag |= PERF_EVENT_TXN_STARTED; | 1105 | cpuhw->group_flag |= PERF_EVENT_TXN; |
1106 | } | 1106 | } |
1107 | 1107 | ||
1108 | /* | 1108 | /* |
@@ -1114,7 +1114,7 @@ static void sparc_pmu_cancel_txn(const struct pmu *pmu) | |||
1114 | { | 1114 | { |
1115 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | 1115 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); |
1116 | 1116 | ||
1117 | cpuhw->group_flag &= ~PERF_EVENT_TXN_STARTED; | 1117 | cpuhw->group_flag &= ~PERF_EVENT_TXN; |
1118 | } | 1118 | } |
1119 | 1119 | ||
1120 | /* | 1120 | /* |
@@ -1137,6 +1137,7 @@ static int sparc_pmu_commit_txn(const struct pmu *pmu) | |||
1137 | if (sparc_check_constraints(cpuc->event, cpuc->events, n)) | 1137 | if (sparc_check_constraints(cpuc->event, cpuc->events, n)) |
1138 | return -EAGAIN; | 1138 | return -EAGAIN; |
1139 | 1139 | ||
1140 | cpuc->group_flag &= ~PERF_EVENT_TXN; | ||
1140 | return 0; | 1141 | return 0; |
1141 | } | 1142 | } |
1142 | 1143 | ||
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 5db5b7d65a18..af04c6fa59cb 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
@@ -969,7 +969,7 @@ static int x86_pmu_enable(struct perf_event *event) | |||
969 | * skip the schedulability test here, it will be peformed | 969 | * skip the schedulability test here, it will be peformed |
970 | * at commit time(->commit_txn) as a whole | 970 | * at commit time(->commit_txn) as a whole |
971 | */ | 971 | */ |
972 | if (cpuc->group_flag & PERF_EVENT_TXN_STARTED) | 972 | if (cpuc->group_flag & PERF_EVENT_TXN) |
973 | goto out; | 973 | goto out; |
974 | 974 | ||
975 | ret = x86_pmu.schedule_events(cpuc, n, assign); | 975 | ret = x86_pmu.schedule_events(cpuc, n, assign); |
@@ -1096,7 +1096,7 @@ static void x86_pmu_disable(struct perf_event *event) | |||
1096 | * The events never got scheduled and ->cancel_txn will truncate | 1096 | * The events never got scheduled and ->cancel_txn will truncate |
1097 | * the event_list. | 1097 | * the event_list. |
1098 | */ | 1098 | */ |
1099 | if (cpuc->group_flag & PERF_EVENT_TXN_STARTED) | 1099 | if (cpuc->group_flag & PERF_EVENT_TXN) |
1100 | return; | 1100 | return; |
1101 | 1101 | ||
1102 | x86_pmu_stop(event); | 1102 | x86_pmu_stop(event); |
@@ -1388,7 +1388,7 @@ static void x86_pmu_start_txn(const struct pmu *pmu) | |||
1388 | { | 1388 | { |
1389 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1389 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
1390 | 1390 | ||
1391 | cpuc->group_flag |= PERF_EVENT_TXN_STARTED; | 1391 | cpuc->group_flag |= PERF_EVENT_TXN; |
1392 | cpuc->n_txn = 0; | 1392 | cpuc->n_txn = 0; |
1393 | } | 1393 | } |
1394 | 1394 | ||
@@ -1401,7 +1401,7 @@ static void x86_pmu_cancel_txn(const struct pmu *pmu) | |||
1401 | { | 1401 | { |
1402 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1402 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
1403 | 1403 | ||
1404 | cpuc->group_flag &= ~PERF_EVENT_TXN_STARTED; | 1404 | cpuc->group_flag &= ~PERF_EVENT_TXN; |
1405 | /* | 1405 | /* |
1406 | * Truncate the collected events. | 1406 | * Truncate the collected events. |
1407 | */ | 1407 | */ |
@@ -1435,11 +1435,7 @@ static int x86_pmu_commit_txn(const struct pmu *pmu) | |||
1435 | */ | 1435 | */ |
1436 | memcpy(cpuc->assign, assign, n*sizeof(int)); | 1436 | memcpy(cpuc->assign, assign, n*sizeof(int)); |
1437 | 1437 | ||
1438 | /* | 1438 | cpuc->group_flag &= ~PERF_EVENT_TXN; |
1439 | * Clear out the txn count so that ->cancel_txn() which gets | ||
1440 | * run after ->commit_txn() doesn't undo things. | ||
1441 | */ | ||
1442 | cpuc->n_txn = 0; | ||
1443 | 1439 | ||
1444 | return 0; | 1440 | return 0; |
1445 | } | 1441 | } |