diff options
author | Peter Zijlstra <peterz@infradead.org> | 2010-04-08 17:03:20 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-05-07 05:31:02 -0400 |
commit | ab608344bcbde4f55ec4cd911b686b0ce3eae076 (patch) | |
tree | ebd38efabfaab59d6de11a24143d70e1eec36fae /arch/x86/kernel/cpu | |
parent | 2b0b5c6fe9b383f3cf35a0a6371c9d577bd523ff (diff) |
perf, x86: Improve the PEBS ABI
Rename perf_event_attr::precise to perf_event_attr::precise_ip and
widen it to 2 bits. This new field describes the required precision of
the PERF_SAMPLE_IP field:
0 - SAMPLE_IP can have arbitrary skid
1 - SAMPLE_IP must have constant skid
2 - SAMPLE_IP requested to have 0 skid
3 - SAMPLE_IP must have 0 skid
And modify the Intel PEBS code accordingly. The PEBS implementation
now supports up to precise_ip == 2, where we perform the IP fixup.
Also s/PERF_RECORD_MISC_EXACT/&_IP/ to clarify its meaning, this bit
should be set for each PERF_SAMPLE_IP field known to match the actual
instruction triggering the event.
This new scheme allows for a PEBS mode that uses the buffer for more
than a single event.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Stephane Eranian <eranian@google.com>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/cpu')
-rw-r--r-- | arch/x86/kernel/cpu/perf_event.c | 17 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_intel.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_intel_ds.c | 12 |
3 files changed, 24 insertions, 9 deletions
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 4a3f1f2b9b91..27fa9eeed024 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
@@ -488,6 +488,21 @@ static int x86_setup_perfctr(struct perf_event *event) | |||
488 | 488 | ||
489 | static int x86_pmu_hw_config(struct perf_event *event) | 489 | static int x86_pmu_hw_config(struct perf_event *event) |
490 | { | 490 | { |
491 | if (event->attr.precise_ip) { | ||
492 | int precise = 0; | ||
493 | |||
494 | /* Support for constant skid */ | ||
495 | if (x86_pmu.pebs) | ||
496 | precise++; | ||
497 | |||
498 | /* Support for IP fixup */ | ||
499 | if (x86_pmu.lbr_nr) | ||
500 | precise++; | ||
501 | |||
502 | if (event->attr.precise_ip > precise) | ||
503 | return -EOPNOTSUPP; | ||
504 | } | ||
505 | |||
491 | /* | 506 | /* |
492 | * Generate PMC IRQs: | 507 | * Generate PMC IRQs: |
493 | * (keep 'enabled' bit clear for now) | 508 | * (keep 'enabled' bit clear for now) |
@@ -1780,7 +1795,7 @@ unsigned long perf_misc_flags(struct pt_regs *regs) | |||
1780 | } | 1795 | } |
1781 | 1796 | ||
1782 | if (regs->flags & PERF_EFLAGS_EXACT) | 1797 | if (regs->flags & PERF_EFLAGS_EXACT) |
1783 | misc |= PERF_RECORD_MISC_EXACT; | 1798 | misc |= PERF_RECORD_MISC_EXACT_IP; |
1784 | 1799 | ||
1785 | return misc; | 1800 | return misc; |
1786 | } | 1801 | } |
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index a4b56ac425cb..fdbc652d3feb 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c | |||
@@ -563,7 +563,7 @@ static void intel_pmu_disable_event(struct perf_event *event) | |||
563 | 563 | ||
564 | x86_pmu_disable_event(event); | 564 | x86_pmu_disable_event(event); |
565 | 565 | ||
566 | if (unlikely(event->attr.precise)) | 566 | if (unlikely(event->attr.precise_ip)) |
567 | intel_pmu_pebs_disable(event); | 567 | intel_pmu_pebs_disable(event); |
568 | } | 568 | } |
569 | 569 | ||
@@ -615,7 +615,7 @@ static void intel_pmu_enable_event(struct perf_event *event) | |||
615 | return; | 615 | return; |
616 | } | 616 | } |
617 | 617 | ||
618 | if (unlikely(event->attr.precise)) | 618 | if (unlikely(event->attr.precise_ip)) |
619 | intel_pmu_pebs_enable(event); | 619 | intel_pmu_pebs_enable(event); |
620 | 620 | ||
621 | __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE); | 621 | __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE); |
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c index 35056f715e9e..18018d1311cd 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_ds.c +++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c | |||
@@ -307,7 +307,7 @@ intel_pebs_constraints(struct perf_event *event) | |||
307 | { | 307 | { |
308 | struct event_constraint *c; | 308 | struct event_constraint *c; |
309 | 309 | ||
310 | if (!event->attr.precise) | 310 | if (!event->attr.precise_ip) |
311 | return NULL; | 311 | return NULL; |
312 | 312 | ||
313 | if (x86_pmu.pebs_constraints) { | 313 | if (x86_pmu.pebs_constraints) { |
@@ -330,7 +330,7 @@ static void intel_pmu_pebs_enable(struct perf_event *event) | |||
330 | cpuc->pebs_enabled |= 1ULL << hwc->idx; | 330 | cpuc->pebs_enabled |= 1ULL << hwc->idx; |
331 | WARN_ON_ONCE(cpuc->enabled); | 331 | WARN_ON_ONCE(cpuc->enabled); |
332 | 332 | ||
333 | if (x86_pmu.intel_cap.pebs_trap) | 333 | if (x86_pmu.intel_cap.pebs_trap && event->attr.precise_ip > 1) |
334 | intel_pmu_lbr_enable(event); | 334 | intel_pmu_lbr_enable(event); |
335 | } | 335 | } |
336 | 336 | ||
@@ -345,7 +345,7 @@ static void intel_pmu_pebs_disable(struct perf_event *event) | |||
345 | 345 | ||
346 | hwc->config |= ARCH_PERFMON_EVENTSEL_INT; | 346 | hwc->config |= ARCH_PERFMON_EVENTSEL_INT; |
347 | 347 | ||
348 | if (x86_pmu.intel_cap.pebs_trap) | 348 | if (x86_pmu.intel_cap.pebs_trap && event->attr.precise_ip > 1) |
349 | intel_pmu_lbr_disable(event); | 349 | intel_pmu_lbr_disable(event); |
350 | } | 350 | } |
351 | 351 | ||
@@ -485,7 +485,7 @@ static void __intel_pmu_pebs_event(struct perf_event *event, | |||
485 | regs.bp = pebs->bp; | 485 | regs.bp = pebs->bp; |
486 | regs.sp = pebs->sp; | 486 | regs.sp = pebs->sp; |
487 | 487 | ||
488 | if (intel_pmu_pebs_fixup_ip(regs)) | 488 | if (event->attr.precise_ip > 1 && intel_pmu_pebs_fixup_ip(®s)) |
489 | regs.flags |= PERF_EFLAGS_EXACT; | 489 | regs.flags |= PERF_EFLAGS_EXACT; |
490 | else | 490 | else |
491 | regs.flags &= ~PERF_EFLAGS_EXACT; | 491 | regs.flags &= ~PERF_EFLAGS_EXACT; |
@@ -518,7 +518,7 @@ static void intel_pmu_drain_pebs_core(struct pt_regs *iregs) | |||
518 | 518 | ||
519 | WARN_ON_ONCE(!event); | 519 | WARN_ON_ONCE(!event); |
520 | 520 | ||
521 | if (!event->attr.precise) | 521 | if (!event->attr.precise_ip) |
522 | return; | 522 | return; |
523 | 523 | ||
524 | n = top - at; | 524 | n = top - at; |
@@ -570,7 +570,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs) | |||
570 | 570 | ||
571 | WARN_ON_ONCE(!event); | 571 | WARN_ON_ONCE(!event); |
572 | 572 | ||
573 | if (!event->attr.precise) | 573 | if (!event->attr.precise_ip) |
574 | continue; | 574 | continue; |
575 | 575 | ||
576 | if (__test_and_set_bit(bit, (unsigned long *)&status)) | 576 | if (__test_and_set_bit(bit, (unsigned long *)&status)) |