aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorStephane Eranian <eranian@google.com>2010-02-01 07:50:01 -0500
committerIngo Molnar <mingo@elte.hu>2010-02-04 03:59:50 -0500
commit447a194b393f32699607fd99617a40abd6a95114 (patch)
tree7d202a6ad8f80c913a4e3d439eedf5ba0abbbf39
parentfce877e3a429940a986e085a41e8b57f2d922e36 (diff)
perf_events, x86: Fix bug in hw_perf_enable()
We cannot assume that because hwc->idx == assign[i], we can avoid reprogramming the counter in hw_perf_enable(). The event may have been scheduled out and another event may have been programmed into this counter. Thus, we need a more robust way of verifying if the counter still contains config/data related to an event. This patch adds a generation number to each counter on each cpu. Using this mechanism we can verify reliabilty whether the content of a counter corresponds to an event. Signed-off-by: Stephane Eranian <eranian@google.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul Mackerras <paulus@samba.org> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> LKML-Reference: <4b66dc67.0b38560a.1635.ffffae18@mx.google.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--arch/x86/kernel/cpu/perf_event.c34
-rw-r--r--include/linux/perf_event.h2
2 files changed, 30 insertions, 6 deletions
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 96cfc1a4fe9f..a920f173a220 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -90,6 +90,7 @@ struct cpu_hw_events {
90 int n_events; 90 int n_events;
91 int n_added; 91 int n_added;
92 int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */ 92 int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
93 u64 tags[X86_PMC_IDX_MAX];
93 struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */ 94 struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
94}; 95};
95 96
@@ -1142,6 +1143,8 @@ static int __hw_perf_event_init(struct perf_event *event)
1142 hwc->config = ARCH_PERFMON_EVENTSEL_INT; 1143 hwc->config = ARCH_PERFMON_EVENTSEL_INT;
1143 1144
1144 hwc->idx = -1; 1145 hwc->idx = -1;
1146 hwc->last_cpu = -1;
1147 hwc->last_tag = ~0ULL;
1145 1148
1146 /* 1149 /*
1147 * Count user and OS events unless requested not to. 1150 * Count user and OS events unless requested not to.
@@ -1457,11 +1460,14 @@ static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader,
1457 return n; 1460 return n;
1458} 1461}
1459 1462
1460
1461static inline void x86_assign_hw_event(struct perf_event *event, 1463static inline void x86_assign_hw_event(struct perf_event *event,
1462 struct hw_perf_event *hwc, int idx) 1464 struct cpu_hw_events *cpuc, int i)
1463{ 1465{
1464 hwc->idx = idx; 1466 struct hw_perf_event *hwc = &event->hw;
1467
1468 hwc->idx = cpuc->assign[i];
1469 hwc->last_cpu = smp_processor_id();
1470 hwc->last_tag = ++cpuc->tags[i];
1465 1471
1466 if (hwc->idx == X86_PMC_IDX_FIXED_BTS) { 1472 if (hwc->idx == X86_PMC_IDX_FIXED_BTS) {
1467 hwc->config_base = 0; 1473 hwc->config_base = 0;
@@ -1480,6 +1486,15 @@ static inline void x86_assign_hw_event(struct perf_event *event,
1480 } 1486 }
1481} 1487}
1482 1488
1489static inline int match_prev_assignment(struct hw_perf_event *hwc,
1490 struct cpu_hw_events *cpuc,
1491 int i)
1492{
1493 return hwc->idx == cpuc->assign[i] &&
1494 hwc->last_cpu == smp_processor_id() &&
1495 hwc->last_tag == cpuc->tags[i];
1496}
1497
1483static void __x86_pmu_disable(struct perf_event *event, struct cpu_hw_events *cpuc); 1498static void __x86_pmu_disable(struct perf_event *event, struct cpu_hw_events *cpuc);
1484 1499
1485void hw_perf_enable(void) 1500void hw_perf_enable(void)
@@ -1508,7 +1523,14 @@ void hw_perf_enable(void)
1508 event = cpuc->event_list[i]; 1523 event = cpuc->event_list[i];
1509 hwc = &event->hw; 1524 hwc = &event->hw;
1510 1525
1511 if (hwc->idx == -1 || hwc->idx == cpuc->assign[i]) 1526 /*
1527 * we can avoid reprogramming counter if:
1528 * - assigned same counter as last time
1529 * - running on same CPU as last time
1530 * - no other event has used the counter since
1531 */
1532 if (hwc->idx == -1 ||
1533 match_prev_assignment(hwc, cpuc, i))
1512 continue; 1534 continue;
1513 1535
1514 __x86_pmu_disable(event, cpuc); 1536 __x86_pmu_disable(event, cpuc);
@@ -1522,12 +1544,12 @@ void hw_perf_enable(void)
1522 hwc = &event->hw; 1544 hwc = &event->hw;
1523 1545
1524 if (hwc->idx == -1) { 1546 if (hwc->idx == -1) {
1525 x86_assign_hw_event(event, hwc, cpuc->assign[i]); 1547 x86_assign_hw_event(event, cpuc, i);
1526 x86_perf_event_set_period(event, hwc, hwc->idx); 1548 x86_perf_event_set_period(event, hwc, hwc->idx);
1527 } 1549 }
1528 /* 1550 /*
1529 * need to mark as active because x86_pmu_disable() 1551 * need to mark as active because x86_pmu_disable()
1530 * clear active_mask and eventsp[] yet it preserves 1552 * clear active_mask and events[] yet it preserves
1531 * idx 1553 * idx
1532 */ 1554 */
1533 set_bit(hwc->idx, cpuc->active_mask); 1555 set_bit(hwc->idx, cpuc->active_mask);
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 556b0f4a668e..071a7db52549 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -478,9 +478,11 @@ struct hw_perf_event {
478 union { 478 union {
479 struct { /* hardware */ 479 struct { /* hardware */
480 u64 config; 480 u64 config;
481 u64 last_tag;
481 unsigned long config_base; 482 unsigned long config_base;
482 unsigned long event_base; 483 unsigned long event_base;
483 int idx; 484 int idx;
485 int last_cpu;
484 }; 486 };
485 struct { /* software */ 487 struct { /* software */
486 s64 remaining; 488 s64 remaining;