diff options
Diffstat (limited to 'arch')
| -rw-r--r-- | arch/x86/kernel/cpu/perf_event.c | 22 |
1 files changed, 22 insertions, 0 deletions
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index c77586061bcb..5db5b7d65a18 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
| @@ -106,6 +106,7 @@ struct cpu_hw_events { | |||
| 106 | 106 | ||
| 107 | int n_events; | 107 | int n_events; |
| 108 | int n_added; | 108 | int n_added; |
| 109 | int n_txn; | ||
| 109 | int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */ | 110 | int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */ |
| 110 | u64 tags[X86_PMC_IDX_MAX]; | 111 | u64 tags[X86_PMC_IDX_MAX]; |
| 111 | struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */ | 112 | struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */ |
| @@ -983,6 +984,7 @@ static int x86_pmu_enable(struct perf_event *event) | |||
| 983 | out: | 984 | out: |
| 984 | cpuc->n_events = n; | 985 | cpuc->n_events = n; |
| 985 | cpuc->n_added += n - n0; | 986 | cpuc->n_added += n - n0; |
| 987 | cpuc->n_txn += n - n0; | ||
| 986 | 988 | ||
| 987 | return 0; | 989 | return 0; |
| 988 | } | 990 | } |
| @@ -1089,6 +1091,14 @@ static void x86_pmu_disable(struct perf_event *event) | |||
| 1089 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1091 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
| 1090 | int i; | 1092 | int i; |
| 1091 | 1093 | ||
| 1094 | /* | ||
| 1095 | * If we're called during a txn, we don't need to do anything. | ||
| 1096 | * The events never got scheduled and ->cancel_txn will truncate | ||
| 1097 | * the event_list. | ||
| 1098 | */ | ||
| 1099 | if (cpuc->group_flag & PERF_EVENT_TXN_STARTED) | ||
| 1100 | return; | ||
| 1101 | |||
| 1092 | x86_pmu_stop(event); | 1102 | x86_pmu_stop(event); |
| 1093 | 1103 | ||
| 1094 | for (i = 0; i < cpuc->n_events; i++) { | 1104 | for (i = 0; i < cpuc->n_events; i++) { |
| @@ -1379,6 +1389,7 @@ static void x86_pmu_start_txn(const struct pmu *pmu) | |||
| 1379 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1389 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
| 1380 | 1390 | ||
| 1381 | cpuc->group_flag |= PERF_EVENT_TXN_STARTED; | 1391 | cpuc->group_flag |= PERF_EVENT_TXN_STARTED; |
| 1392 | cpuc->n_txn = 0; | ||
| 1382 | } | 1393 | } |
| 1383 | 1394 | ||
| 1384 | /* | 1395 | /* |
| @@ -1391,6 +1402,11 @@ static void x86_pmu_cancel_txn(const struct pmu *pmu) | |||
| 1391 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1402 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
| 1392 | 1403 | ||
| 1393 | cpuc->group_flag &= ~PERF_EVENT_TXN_STARTED; | 1404 | cpuc->group_flag &= ~PERF_EVENT_TXN_STARTED; |
| 1405 | /* | ||
| 1406 | * Truncate the collected events. | ||
| 1407 | */ | ||
| 1408 | cpuc->n_added -= cpuc->n_txn; | ||
| 1409 | cpuc->n_events -= cpuc->n_txn; | ||
| 1394 | } | 1410 | } |
| 1395 | 1411 | ||
| 1396 | /* | 1412 | /* |
| @@ -1419,6 +1435,12 @@ static int x86_pmu_commit_txn(const struct pmu *pmu) | |||
| 1419 | */ | 1435 | */ |
| 1420 | memcpy(cpuc->assign, assign, n*sizeof(int)); | 1436 | memcpy(cpuc->assign, assign, n*sizeof(int)); |
| 1421 | 1437 | ||
| 1438 | /* | ||
| 1439 | * Clear out the txn count so that ->cancel_txn() which gets | ||
| 1440 | * run after ->commit_txn() doesn't undo things. | ||
| 1441 | */ | ||
| 1442 | cpuc->n_txn = 0; | ||
| 1443 | |||
| 1422 | return 0; | 1444 | return 0; |
| 1423 | } | 1445 | } |
| 1424 | 1446 | ||
