diff options
| author | Lin Ming <ming.m.lin@intel.com> | 2010-05-08 06:28:41 -0400 |
|---|---|---|
| committer | Ingo Molnar <mingo@elte.hu> | 2010-05-11 11:08:24 -0400 |
| commit | 8e6d5573af55435160d329f6ae3fe16a0abbdaec (patch) | |
| tree | 30156432a5589fbb602bbe22e31c9e48aec9c476 | |
| parent | 96c21a460a37880abfbc8445d5b098dbab958a29 (diff) | |
perf, powerpc: Implement group scheduling transactional APIs
[paulus@samba.org: Set cpuhw->event[i]->hw.config in
power_pmu_commit_txn.]
Signed-off-by: Lin Ming <ming.m.lin@intel.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <20100508102841.GA10650@brick.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
| -rw-r--r-- | arch/powerpc/kernel/perf_event.c | 129 |
1 files changed, 68 insertions, 61 deletions
diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c index 08460a2e9f41..43b83c35cf54 100644 --- a/arch/powerpc/kernel/perf_event.c +++ b/arch/powerpc/kernel/perf_event.c | |||
| @@ -35,6 +35,9 @@ struct cpu_hw_events { | |||
| 35 | u64 alternatives[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES]; | 35 | u64 alternatives[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES]; |
| 36 | unsigned long amasks[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES]; | 36 | unsigned long amasks[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES]; |
| 37 | unsigned long avalues[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES]; | 37 | unsigned long avalues[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES]; |
| 38 | |||
| 39 | unsigned int group_flag; | ||
| 40 | int n_txn_start; | ||
| 38 | }; | 41 | }; |
| 39 | DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events); | 42 | DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events); |
| 40 | 43 | ||
| @@ -718,66 +721,6 @@ static int collect_events(struct perf_event *group, int max_count, | |||
| 718 | return n; | 721 | return n; |
| 719 | } | 722 | } |
| 720 | 723 | ||
| 721 | static void event_sched_in(struct perf_event *event) | ||
| 722 | { | ||
| 723 | event->state = PERF_EVENT_STATE_ACTIVE; | ||
| 724 | event->oncpu = smp_processor_id(); | ||
| 725 | event->tstamp_running += event->ctx->time - event->tstamp_stopped; | ||
| 726 | if (is_software_event(event)) | ||
| 727 | event->pmu->enable(event); | ||
| 728 | } | ||
| 729 | |||
| 730 | /* | ||
| 731 | * Called to enable a whole group of events. | ||
| 732 | * Returns 1 if the group was enabled, or -EAGAIN if it could not be. | ||
| 733 | * Assumes the caller has disabled interrupts and has | ||
| 734 | * frozen the PMU with hw_perf_save_disable. | ||
| 735 | */ | ||
| 736 | int hw_perf_group_sched_in(struct perf_event *group_leader, | ||
| 737 | struct perf_cpu_context *cpuctx, | ||
| 738 | struct perf_event_context *ctx) | ||
| 739 | { | ||
| 740 | struct cpu_hw_events *cpuhw; | ||
| 741 | long i, n, n0; | ||
| 742 | struct perf_event *sub; | ||
| 743 | |||
| 744 | if (!ppmu) | ||
| 745 | return 0; | ||
| 746 | cpuhw = &__get_cpu_var(cpu_hw_events); | ||
| 747 | n0 = cpuhw->n_events; | ||
| 748 | n = collect_events(group_leader, ppmu->n_counter - n0, | ||
| 749 | &cpuhw->event[n0], &cpuhw->events[n0], | ||
| 750 | &cpuhw->flags[n0]); | ||
| 751 | if (n < 0) | ||
| 752 | return -EAGAIN; | ||
| 753 | if (check_excludes(cpuhw->event, cpuhw->flags, n0, n)) | ||
| 754 | return -EAGAIN; | ||
| 755 | i = power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n + n0); | ||
| 756 | if (i < 0) | ||
| 757 | return -EAGAIN; | ||
| 758 | cpuhw->n_events = n0 + n; | ||
| 759 | cpuhw->n_added += n; | ||
| 760 | |||
| 761 | /* | ||
| 762 | * OK, this group can go on; update event states etc., | ||
| 763 | * and enable any software events | ||
| 764 | */ | ||
| 765 | for (i = n0; i < n0 + n; ++i) | ||
| 766 | cpuhw->event[i]->hw.config = cpuhw->events[i]; | ||
| 767 | cpuctx->active_oncpu += n; | ||
| 768 | n = 1; | ||
| 769 | event_sched_in(group_leader); | ||
| 770 | list_for_each_entry(sub, &group_leader->sibling_list, group_entry) { | ||
| 771 | if (sub->state != PERF_EVENT_STATE_OFF) { | ||
| 772 | event_sched_in(sub); | ||
| 773 | ++n; | ||
| 774 | } | ||
| 775 | } | ||
| 776 | ctx->nr_active += n; | ||
| 777 | |||
| 778 | return 1; | ||
| 779 | } | ||
| 780 | |||
| 781 | /* | 724 | /* |
| 782 | * Add a event to the PMU. | 725 | * Add a event to the PMU. |
| 783 | * If all events are not already frozen, then we disable and | 726 | * If all events are not already frozen, then we disable and |
| @@ -805,12 +748,22 @@ static int power_pmu_enable(struct perf_event *event) | |||
| 805 | cpuhw->event[n0] = event; | 748 | cpuhw->event[n0] = event; |
| 806 | cpuhw->events[n0] = event->hw.config; | 749 | cpuhw->events[n0] = event->hw.config; |
| 807 | cpuhw->flags[n0] = event->hw.event_base; | 750 | cpuhw->flags[n0] = event->hw.event_base; |
| 751 | |||
| 752 | /* | ||
| 753 | * If group events scheduling transaction was started, | ||
| 754 | * skip the schedulability test here, it will be peformed | ||
| 755 | * at commit time(->commit_txn) as a whole | ||
| 756 | */ | ||
| 757 | if (cpuhw->group_flag & PERF_EVENT_TXN_STARTED) | ||
| 758 | goto nocheck; | ||
| 759 | |||
| 808 | if (check_excludes(cpuhw->event, cpuhw->flags, n0, 1)) | 760 | if (check_excludes(cpuhw->event, cpuhw->flags, n0, 1)) |
| 809 | goto out; | 761 | goto out; |
| 810 | if (power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n0 + 1)) | 762 | if (power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n0 + 1)) |
| 811 | goto out; | 763 | goto out; |
| 812 | |||
| 813 | event->hw.config = cpuhw->events[n0]; | 764 | event->hw.config = cpuhw->events[n0]; |
| 765 | |||
| 766 | nocheck: | ||
| 814 | ++cpuhw->n_events; | 767 | ++cpuhw->n_events; |
| 815 | ++cpuhw->n_added; | 768 | ++cpuhw->n_added; |
| 816 | 769 | ||
| @@ -896,11 +849,65 @@ static void power_pmu_unthrottle(struct perf_event *event) | |||
| 896 | local_irq_restore(flags); | 849 | local_irq_restore(flags); |
| 897 | } | 850 | } |
| 898 | 851 | ||
| 852 | /* | ||
| 853 | * Start group events scheduling transaction | ||
| 854 | * Set the flag to make pmu::enable() not perform the | ||
| 855 | * schedulability test, it will be performed at commit time | ||
| 856 | */ | ||
| 857 | void power_pmu_start_txn(const struct pmu *pmu) | ||
| 858 | { | ||
| 859 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | ||
| 860 | |||
| 861 | cpuhw->group_flag |= PERF_EVENT_TXN_STARTED; | ||
| 862 | cpuhw->n_txn_start = cpuhw->n_events; | ||
| 863 | } | ||
| 864 | |||
| 865 | /* | ||
| 866 | * Stop group events scheduling transaction | ||
| 867 | * Clear the flag and pmu::enable() will perform the | ||
| 868 | * schedulability test. | ||
| 869 | */ | ||
| 870 | void power_pmu_cancel_txn(const struct pmu *pmu) | ||
| 871 | { | ||
| 872 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | ||
| 873 | |||
| 874 | cpuhw->group_flag &= ~PERF_EVENT_TXN_STARTED; | ||
| 875 | } | ||
| 876 | |||
| 877 | /* | ||
| 878 | * Commit group events scheduling transaction | ||
| 879 | * Perform the group schedulability test as a whole | ||
| 880 | * Return 0 if success | ||
| 881 | */ | ||
| 882 | int power_pmu_commit_txn(const struct pmu *pmu) | ||
| 883 | { | ||
| 884 | struct cpu_hw_events *cpuhw; | ||
| 885 | long i, n; | ||
| 886 | |||
| 887 | if (!ppmu) | ||
| 888 | return -EAGAIN; | ||
| 889 | cpuhw = &__get_cpu_var(cpu_hw_events); | ||
| 890 | n = cpuhw->n_events; | ||
| 891 | if (check_excludes(cpuhw->event, cpuhw->flags, 0, n)) | ||
| 892 | return -EAGAIN; | ||
| 893 | i = power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n); | ||
| 894 | if (i < 0) | ||
| 895 | return -EAGAIN; | ||
| 896 | |||
| 897 | for (i = cpuhw->n_txn_start; i < n; ++i) | ||
| 898 | cpuhw->event[i]->hw.config = cpuhw->events[i]; | ||
| 899 | |||
| 900 | return 0; | ||
| 901 | } | ||
| 902 | |||
| 899 | struct pmu power_pmu = { | 903 | struct pmu power_pmu = { |
| 900 | .enable = power_pmu_enable, | 904 | .enable = power_pmu_enable, |
| 901 | .disable = power_pmu_disable, | 905 | .disable = power_pmu_disable, |
| 902 | .read = power_pmu_read, | 906 | .read = power_pmu_read, |
| 903 | .unthrottle = power_pmu_unthrottle, | 907 | .unthrottle = power_pmu_unthrottle, |
| 908 | .start_txn = power_pmu_start_txn, | ||
| 909 | .cancel_txn = power_pmu_cancel_txn, | ||
| 910 | .commit_txn = power_pmu_commit_txn, | ||
| 904 | }; | 911 | }; |
| 905 | 912 | ||
| 906 | /* | 913 | /* |
