diff options
author | Peter Zijlstra <peterz@infradead.org> | 2010-04-08 17:03:20 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-05-07 05:31:02 -0400 |
commit | 2b0b5c6fe9b383f3cf35a0a6371c9d577bd523ff (patch) | |
tree | 673509da6a079615cb021eb5772edc472cbfd694 | |
parent | 1e9a6d8d44cb6dcd2799b36ceb23007e6a423bfe (diff) |
perf, x86: Consolidate some code repetition
Remove some duplicated logic.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_intel_ds.c | 97 |
1 files changed, 44 insertions, 53 deletions
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c index 080b9b065bdd..35056f715e9e 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_ds.c +++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c | |||
@@ -452,14 +452,54 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs) | |||
452 | 452 | ||
453 | static int intel_pmu_save_and_restart(struct perf_event *event); | 453 | static int intel_pmu_save_and_restart(struct perf_event *event); |
454 | 454 | ||
455 | static void __intel_pmu_pebs_event(struct perf_event *event, | ||
456 | struct pt_regs *iregs, void *__pebs) | ||
457 | { | ||
458 | /* | ||
459 | * We cast to pebs_record_core since that is a subset of | ||
460 | * both formats and we don't use the other fields in this | ||
461 | * routine. | ||
462 | */ | ||
463 | struct pebs_record_core *pebs = __pebs; | ||
464 | struct perf_sample_data data; | ||
465 | struct pt_regs regs; | ||
466 | |||
467 | if (!intel_pmu_save_and_restart(event)) | ||
468 | return; | ||
469 | |||
470 | perf_sample_data_init(&data, 0); | ||
471 | data.period = event->hw.last_period; | ||
472 | |||
473 | /* | ||
474 | * We use the interrupt regs as a base because the PEBS record | ||
475 | * does not contain a full regs set, specifically it seems to | ||
476 | * lack segment descriptors, which get used by things like | ||
477 | * user_mode(). | ||
478 | * | ||
479 | * In the simple case fix up only the IP and BP,SP regs, for | ||
480 | * PERF_SAMPLE_IP and PERF_SAMPLE_CALLCHAIN to function properly. | ||
481 | * A possible PERF_SAMPLE_REGS will have to transfer all regs. | ||
482 | */ | ||
483 | regs = *iregs; | ||
484 | regs.ip = pebs->ip; | ||
485 | regs.bp = pebs->bp; | ||
486 | regs.sp = pebs->sp; | ||
487 | |||
488 | if (intel_pmu_pebs_fixup_ip(regs)) | ||
489 | regs.flags |= PERF_EFLAGS_EXACT; | ||
490 | else | ||
491 | regs.flags &= ~PERF_EFLAGS_EXACT; | ||
492 | |||
493 | if (perf_event_overflow(event, 1, &data, ®s)) | ||
494 | x86_pmu_stop(event); | ||
495 | } | ||
496 | |||
455 | static void intel_pmu_drain_pebs_core(struct pt_regs *iregs) | 497 | static void intel_pmu_drain_pebs_core(struct pt_regs *iregs) |
456 | { | 498 | { |
457 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 499 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
458 | struct debug_store *ds = cpuc->ds; | 500 | struct debug_store *ds = cpuc->ds; |
459 | struct perf_event *event = cpuc->events[0]; /* PMC0 only */ | 501 | struct perf_event *event = cpuc->events[0]; /* PMC0 only */ |
460 | struct pebs_record_core *at, *top; | 502 | struct pebs_record_core *at, *top; |
461 | struct perf_sample_data data; | ||
462 | struct pt_regs regs; | ||
463 | int n; | 503 | int n; |
464 | 504 | ||
465 | if (!ds || !x86_pmu.pebs) | 505 | if (!ds || !x86_pmu.pebs) |
@@ -485,9 +525,6 @@ static void intel_pmu_drain_pebs_core(struct pt_regs *iregs) | |||
485 | if (n <= 0) | 525 | if (n <= 0) |
486 | return; | 526 | return; |
487 | 527 | ||
488 | if (!intel_pmu_save_and_restart(event)) | ||
489 | return; | ||
490 | |||
491 | /* | 528 | /* |
492 | * Should not happen, we program the threshold at 1 and do not | 529 | * Should not happen, we program the threshold at 1 and do not |
493 | * set a reset value. | 530 | * set a reset value. |
@@ -495,31 +532,7 @@ static void intel_pmu_drain_pebs_core(struct pt_regs *iregs) | |||
495 | WARN_ON_ONCE(n > 1); | 532 | WARN_ON_ONCE(n > 1); |
496 | at += n - 1; | 533 | at += n - 1; |
497 | 534 | ||
498 | perf_sample_data_init(&data, 0); | 535 | __intel_pmu_pebs_event(event, iregs, at); |
499 | data.period = event->hw.last_period; | ||
500 | |||
501 | /* | ||
502 | * We use the interrupt regs as a base because the PEBS record | ||
503 | * does not contain a full regs set, specifically it seems to | ||
504 | * lack segment descriptors, which get used by things like | ||
505 | * user_mode(). | ||
506 | * | ||
507 | * In the simple case fix up only the IP and BP,SP regs, for | ||
508 | * PERF_SAMPLE_IP and PERF_SAMPLE_CALLCHAIN to function properly. | ||
509 | * A possible PERF_SAMPLE_REGS will have to transfer all regs. | ||
510 | */ | ||
511 | regs = *iregs; | ||
512 | regs.ip = at->ip; | ||
513 | regs.bp = at->bp; | ||
514 | regs.sp = at->sp; | ||
515 | |||
516 | if (intel_pmu_pebs_fixup_ip(®s)) | ||
517 | regs.flags |= PERF_EFLAGS_EXACT; | ||
518 | else | ||
519 | regs.flags &= ~PERF_EFLAGS_EXACT; | ||
520 | |||
521 | if (perf_event_overflow(event, 1, &data, ®s)) | ||
522 | x86_pmu_stop(event); | ||
523 | } | 536 | } |
524 | 537 | ||
525 | static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs) | 538 | static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs) |
@@ -527,9 +540,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs) | |||
527 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 540 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
528 | struct debug_store *ds = cpuc->ds; | 541 | struct debug_store *ds = cpuc->ds; |
529 | struct pebs_record_nhm *at, *top; | 542 | struct pebs_record_nhm *at, *top; |
530 | struct perf_sample_data data; | ||
531 | struct perf_event *event = NULL; | 543 | struct perf_event *event = NULL; |
532 | struct pt_regs regs; | ||
533 | u64 status = 0; | 544 | u64 status = 0; |
534 | int bit, n; | 545 | int bit, n; |
535 | 546 | ||
@@ -571,27 +582,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs) | |||
571 | if (!event || bit >= MAX_PEBS_EVENTS) | 582 | if (!event || bit >= MAX_PEBS_EVENTS) |
572 | continue; | 583 | continue; |
573 | 584 | ||
574 | if (!intel_pmu_save_and_restart(event)) | 585 | __intel_pmu_pebs_event(event, iregs, at); |
575 | continue; | ||
576 | |||
577 | perf_sample_data_init(&data, 0); | ||
578 | data.period = event->hw.last_period; | ||
579 | |||
580 | /* | ||
581 | * See the comment in intel_pmu_drain_pebs_core() | ||
582 | */ | ||
583 | regs = *iregs; | ||
584 | regs.ip = at->ip; | ||
585 | regs.bp = at->bp; | ||
586 | regs.sp = at->sp; | ||
587 | |||
588 | if (intel_pmu_pebs_fixup_ip(®s)) | ||
589 | regs.flags |= PERF_EFLAGS_EXACT; | ||
590 | else | ||
591 | regs.flags &= ~PERF_EFLAGS_EXACT; | ||
592 | |||
593 | if (perf_event_overflow(event, 1, &data, ®s)) | ||
594 | x86_pmu_stop(event); | ||
595 | } | 586 | } |
596 | } | 587 | } |
597 | 588 | ||