diff options
Diffstat (limited to 'arch/x86/kernel/cpu/perf_counter.c')
-rw-r--r-- | arch/x86/kernel/cpu/perf_counter.c | 53 |
1 files changed, 1 insertions, 52 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index d844ae41d5a3..902282d68b0c 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c | |||
@@ -674,20 +674,6 @@ static void pmc_generic_disable(struct perf_counter *counter) | |||
674 | x86_perf_counter_update(counter, hwc, idx); | 674 | x86_perf_counter_update(counter, hwc, idx); |
675 | } | 675 | } |
676 | 676 | ||
677 | static void perf_store_irq_data(struct perf_counter *counter, u64 data) | ||
678 | { | ||
679 | struct perf_data *irqdata = counter->irqdata; | ||
680 | |||
681 | if (irqdata->len > PERF_DATA_BUFLEN - sizeof(u64)) { | ||
682 | irqdata->overrun++; | ||
683 | } else { | ||
684 | u64 *p = (u64 *) &irqdata->data[irqdata->len]; | ||
685 | |||
686 | *p = data; | ||
687 | irqdata->len += sizeof(u64); | ||
688 | } | ||
689 | } | ||
690 | |||
691 | /* | 677 | /* |
692 | * Save and restart an expired counter. Called by NMI contexts, | 678 | * Save and restart an expired counter. Called by NMI contexts, |
693 | * so it has to be careful about preempting normal counter ops: | 679 | * so it has to be careful about preempting normal counter ops: |
@@ -704,22 +690,6 @@ static void perf_save_and_restart(struct perf_counter *counter) | |||
704 | __pmc_generic_enable(counter, hwc, idx); | 690 | __pmc_generic_enable(counter, hwc, idx); |
705 | } | 691 | } |
706 | 692 | ||
707 | static void | ||
708 | perf_handle_group(struct perf_counter *sibling, u64 *status, u64 *overflown) | ||
709 | { | ||
710 | struct perf_counter *counter, *group_leader = sibling->group_leader; | ||
711 | |||
712 | /* | ||
713 | * Store sibling timestamps (if any): | ||
714 | */ | ||
715 | list_for_each_entry(counter, &group_leader->sibling_list, list_entry) { | ||
716 | |||
717 | x86_perf_counter_update(counter, &counter->hw, counter->hw.idx); | ||
718 | perf_store_irq_data(sibling, counter->hw_event.event_config); | ||
719 | perf_store_irq_data(sibling, atomic64_read(&counter->count)); | ||
720 | } | ||
721 | } | ||
722 | |||
723 | /* | 693 | /* |
724 | * Maximum interrupt frequency of 100KHz per CPU | 694 | * Maximum interrupt frequency of 100KHz per CPU |
725 | */ | 695 | */ |
@@ -754,28 +724,7 @@ again: | |||
754 | continue; | 724 | continue; |
755 | 725 | ||
756 | perf_save_and_restart(counter); | 726 | perf_save_and_restart(counter); |
757 | 727 | perf_counter_output(counter, nmi, regs); | |
758 | switch (counter->hw_event.record_type) { | ||
759 | case PERF_RECORD_SIMPLE: | ||
760 | continue; | ||
761 | case PERF_RECORD_IRQ: | ||
762 | perf_store_irq_data(counter, instruction_pointer(regs)); | ||
763 | break; | ||
764 | case PERF_RECORD_GROUP: | ||
765 | perf_handle_group(counter, &status, &ack); | ||
766 | break; | ||
767 | } | ||
768 | /* | ||
769 | * From NMI context we cannot call into the scheduler to | ||
770 | * do a task wakeup - but we mark these generic as | ||
771 | * wakeup_pending and initate a wakeup callback: | ||
772 | */ | ||
773 | if (nmi) { | ||
774 | counter->wakeup_pending = 1; | ||
775 | set_tsk_thread_flag(current, TIF_PERF_COUNTERS); | ||
776 | } else { | ||
777 | wake_up(&counter->waitq); | ||
778 | } | ||
779 | } | 728 | } |
780 | 729 | ||
781 | hw_perf_ack_status(ack); | 730 | hw_perf_ack_status(ack); |