diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-06-03 18:45:26 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-06-03 18:45:26 -0400 |
commit | f150dba6d4a1e275b62ca76572c2786c71b91e85 (patch) | |
tree | 1fb8ed0a64f69431e8a3304a1b346a19979028b9 /arch/x86 | |
parent | 636667a545b2d16797f27002a65d688c195c9b60 (diff) | |
parent | c6df8d5ab87a246942d138321e1721edbb69f6e1 (diff) |
Merge branch 'perf-fixes-for-linus-2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'perf-fixes-for-linus-2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
perf: Fix crash in swevents
perf buildid-list: Fix --with-hits event processing
perf scripts python: Give field dict to unhandled callback
perf hist: fix objdump output parsing
perf-record: Check correct pid when forking
perf: Do the comm inheritance per thread in event__process_task
perf: Use event__process_task from perf sched
perf: Process comm events by tid
blktrace: Fix new kernel-doc warnings
perf_events: Fix unincremented buffer base on partial copy
perf_events: Fix event scheduling issues introduced by transactional API
perf_events, trace: Fix perf_trace_destroy(), mutex went missing
perf_events, trace: Fix probe unregister race
perf_events: Fix races in group composition
perf_events: Fix races and clean up perf_event and perf_mmap_data interaction
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/kernel/cpu/perf_event.c | 22 |
1 files changed, 22 insertions, 0 deletions
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index c77586061bcb..5db5b7d65a18 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
@@ -106,6 +106,7 @@ struct cpu_hw_events { | |||
106 | 106 | ||
107 | int n_events; | 107 | int n_events; |
108 | int n_added; | 108 | int n_added; |
109 | int n_txn; | ||
109 | int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */ | 110 | int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */ |
110 | u64 tags[X86_PMC_IDX_MAX]; | 111 | u64 tags[X86_PMC_IDX_MAX]; |
111 | struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */ | 112 | struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */ |
@@ -983,6 +984,7 @@ static int x86_pmu_enable(struct perf_event *event) | |||
983 | out: | 984 | out: |
984 | cpuc->n_events = n; | 985 | cpuc->n_events = n; |
985 | cpuc->n_added += n - n0; | 986 | cpuc->n_added += n - n0; |
987 | cpuc->n_txn += n - n0; | ||
986 | 988 | ||
987 | return 0; | 989 | return 0; |
988 | } | 990 | } |
@@ -1089,6 +1091,14 @@ static void x86_pmu_disable(struct perf_event *event) | |||
1089 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1091 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
1090 | int i; | 1092 | int i; |
1091 | 1093 | ||
1094 | /* | ||
1095 | * If we're called during a txn, we don't need to do anything. | ||
1096 | * The events never got scheduled and ->cancel_txn will truncate | ||
1097 | * the event_list. | ||
1098 | */ | ||
1099 | if (cpuc->group_flag & PERF_EVENT_TXN_STARTED) | ||
1100 | return; | ||
1101 | |||
1092 | x86_pmu_stop(event); | 1102 | x86_pmu_stop(event); |
1093 | 1103 | ||
1094 | for (i = 0; i < cpuc->n_events; i++) { | 1104 | for (i = 0; i < cpuc->n_events; i++) { |
@@ -1379,6 +1389,7 @@ static void x86_pmu_start_txn(const struct pmu *pmu) | |||
1379 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1389 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
1380 | 1390 | ||
1381 | cpuc->group_flag |= PERF_EVENT_TXN_STARTED; | 1391 | cpuc->group_flag |= PERF_EVENT_TXN_STARTED; |
1392 | cpuc->n_txn = 0; | ||
1382 | } | 1393 | } |
1383 | 1394 | ||
1384 | /* | 1395 | /* |
@@ -1391,6 +1402,11 @@ static void x86_pmu_cancel_txn(const struct pmu *pmu) | |||
1391 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1402 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
1392 | 1403 | ||
1393 | cpuc->group_flag &= ~PERF_EVENT_TXN_STARTED; | 1404 | cpuc->group_flag &= ~PERF_EVENT_TXN_STARTED; |
1405 | /* | ||
1406 | * Truncate the collected events. | ||
1407 | */ | ||
1408 | cpuc->n_added -= cpuc->n_txn; | ||
1409 | cpuc->n_events -= cpuc->n_txn; | ||
1394 | } | 1410 | } |
1395 | 1411 | ||
1396 | /* | 1412 | /* |
@@ -1419,6 +1435,12 @@ static int x86_pmu_commit_txn(const struct pmu *pmu) | |||
1419 | */ | 1435 | */ |
1420 | memcpy(cpuc->assign, assign, n*sizeof(int)); | 1436 | memcpy(cpuc->assign, assign, n*sizeof(int)); |
1421 | 1437 | ||
1438 | /* | ||
1439 | * Clear out the txn count so that ->cancel_txn() which gets | ||
1440 | * run after ->commit_txn() doesn't undo things. | ||
1441 | */ | ||
1442 | cpuc->n_txn = 0; | ||
1443 | |||
1422 | return 0; | 1444 | return 0; |
1423 | } | 1445 | } |
1424 | 1446 | ||