diff options
| author | Lin Ming <ming.m.lin@intel.com> | 2010-04-23 01:56:33 -0400 |
|---|---|---|
| committer | Ingo Molnar <mingo@elte.hu> | 2010-05-18 12:59:12 -0400 |
| commit | a13c3afd9b62b6dace80654964cc4ca7d2db8092 (patch) | |
| tree | b1492c20445ef4eabf288b5e02a04a3136f4fd24 | |
| parent | 6d1acfd5c6bfd5231c13a8f2858d7f2afbaa1b62 (diff) | |
perf, sparc: Implement group scheduling transactional APIs
Convert to the transactional PMU API and remove the duplication of
group_sched_in().
[cross build only]
Signed-off-by: Lin Ming <ming.m.lin@intel.com>
Acked-by: David Miller <davem@davemloft.net>
Cc: Paul Mackerras <paulus@samba.org>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1272002193.5707.65.camel@minggr.sh.intel.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
| -rw-r--r-- | arch/sparc/kernel/perf_event.c | 108 |
1 files changed, 61 insertions, 47 deletions
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index e2771939341d..cf4ce263ff81 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c | |||
| @@ -91,6 +91,8 @@ struct cpu_hw_events { | |||
| 91 | 91 | ||
| 92 | /* Enabled/disable state. */ | 92 | /* Enabled/disable state. */ |
| 93 | int enabled; | 93 | int enabled; |
| 94 | |||
| 95 | unsigned int group_flag; | ||
| 94 | }; | 96 | }; |
| 95 | DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, }; | 97 | DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, }; |
| 96 | 98 | ||
| @@ -980,53 +982,6 @@ static int collect_events(struct perf_event *group, int max_count, | |||
| 980 | return n; | 982 | return n; |
| 981 | } | 983 | } |
| 982 | 984 | ||
| 983 | static void event_sched_in(struct perf_event *event) | ||
| 984 | { | ||
| 985 | event->state = PERF_EVENT_STATE_ACTIVE; | ||
| 986 | event->oncpu = smp_processor_id(); | ||
| 987 | event->tstamp_running += event->ctx->time - event->tstamp_stopped; | ||
| 988 | if (is_software_event(event)) | ||
| 989 | event->pmu->enable(event); | ||
| 990 | } | ||
| 991 | |||
| 992 | int hw_perf_group_sched_in(struct perf_event *group_leader, | ||
| 993 | struct perf_cpu_context *cpuctx, | ||
| 994 | struct perf_event_context *ctx) | ||
| 995 | { | ||
| 996 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
| 997 | struct perf_event *sub; | ||
| 998 | int n0, n; | ||
| 999 | |||
| 1000 | if (!sparc_pmu) | ||
| 1001 | return 0; | ||
| 1002 | |||
| 1003 | n0 = cpuc->n_events; | ||
| 1004 | n = collect_events(group_leader, perf_max_events - n0, | ||
| 1005 | &cpuc->event[n0], &cpuc->events[n0], | ||
| 1006 | &cpuc->current_idx[n0]); | ||
| 1007 | if (n < 0) | ||
| 1008 | return -EAGAIN; | ||
| 1009 | if (check_excludes(cpuc->event, n0, n)) | ||
| 1010 | return -EINVAL; | ||
| 1011 | if (sparc_check_constraints(cpuc->event, cpuc->events, n + n0)) | ||
| 1012 | return -EAGAIN; | ||
| 1013 | cpuc->n_events = n0 + n; | ||
| 1014 | cpuc->n_added += n; | ||
| 1015 | |||
| 1016 | cpuctx->active_oncpu += n; | ||
| 1017 | n = 1; | ||
| 1018 | event_sched_in(group_leader); | ||
| 1019 | list_for_each_entry(sub, &group_leader->sibling_list, group_entry) { | ||
| 1020 | if (sub->state != PERF_EVENT_STATE_OFF) { | ||
| 1021 | event_sched_in(sub); | ||
| 1022 | n++; | ||
| 1023 | } | ||
| 1024 | } | ||
| 1025 | ctx->nr_active += n; | ||
| 1026 | |||
| 1027 | return 1; | ||
| 1028 | } | ||
| 1029 | |||
| 1030 | static int sparc_pmu_enable(struct perf_event *event) | 985 | static int sparc_pmu_enable(struct perf_event *event) |
| 1031 | { | 986 | { |
| 1032 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 987 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
| @@ -1044,11 +999,20 @@ static int sparc_pmu_enable(struct perf_event *event) | |||
| 1044 | cpuc->events[n0] = event->hw.event_base; | 999 | cpuc->events[n0] = event->hw.event_base; |
| 1045 | cpuc->current_idx[n0] = PIC_NO_INDEX; | 1000 | cpuc->current_idx[n0] = PIC_NO_INDEX; |
| 1046 | 1001 | ||
| 1002 | /* | ||
| 1003 | * If group events scheduling transaction was started, | ||
| 1004 | * skip the schedulability test here, it will be peformed | ||
| 1005 | * at commit time(->commit_txn) as a whole | ||
| 1006 | */ | ||
| 1007 | if (cpuc->group_flag & PERF_EVENT_TXN_STARTED) | ||
| 1008 | goto nocheck; | ||
| 1009 | |||
| 1047 | if (check_excludes(cpuc->event, n0, 1)) | 1010 | if (check_excludes(cpuc->event, n0, 1)) |
| 1048 | goto out; | 1011 | goto out; |
| 1049 | if (sparc_check_constraints(cpuc->event, cpuc->events, n0 + 1)) | 1012 | if (sparc_check_constraints(cpuc->event, cpuc->events, n0 + 1)) |
| 1050 | goto out; | 1013 | goto out; |
| 1051 | 1014 | ||
| 1015 | nocheck: | ||
| 1052 | cpuc->n_events++; | 1016 | cpuc->n_events++; |
| 1053 | cpuc->n_added++; | 1017 | cpuc->n_added++; |
| 1054 | 1018 | ||
| @@ -1128,11 +1092,61 @@ static int __hw_perf_event_init(struct perf_event *event) | |||
| 1128 | return 0; | 1092 | return 0; |
| 1129 | } | 1093 | } |
| 1130 | 1094 | ||
| 1095 | /* | ||
| 1096 | * Start group events scheduling transaction | ||
| 1097 | * Set the flag to make pmu::enable() not perform the | ||
| 1098 | * schedulability test, it will be performed at commit time | ||
| 1099 | */ | ||
| 1100 | static void sparc_pmu_start_txn(const struct pmu *pmu) | ||
| 1101 | { | ||
| 1102 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | ||
| 1103 | |||
| 1104 | cpuhw->group_flag |= PERF_EVENT_TXN_STARTED; | ||
| 1105 | } | ||
| 1106 | |||
| 1107 | /* | ||
| 1108 | * Stop group events scheduling transaction | ||
| 1109 | * Clear the flag and pmu::enable() will perform the | ||
| 1110 | * schedulability test. | ||
| 1111 | */ | ||
| 1112 | static void sparc_pmu_cancel_txn(const struct pmu *pmu) | ||
| 1113 | { | ||
| 1114 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | ||
| 1115 | |||
| 1116 | cpuhw->group_flag &= ~PERF_EVENT_TXN_STARTED; | ||
| 1117 | } | ||
| 1118 | |||
| 1119 | /* | ||
| 1120 | * Commit group events scheduling transaction | ||
| 1121 | * Perform the group schedulability test as a whole | ||
| 1122 | * Return 0 if success | ||
| 1123 | */ | ||
| 1124 | static int sparc_pmu_commit_txn(const struct pmu *pmu) | ||
| 1125 | { | ||
| 1126 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
| 1127 | int n; | ||
| 1128 | |||
| 1129 | if (!sparc_pmu) | ||
| 1130 | return -EINVAL; | ||
| 1131 | |||
| 1132 | cpuc = &__get_cpu_var(cpu_hw_events); | ||
| 1133 | n = cpuc->n_events; | ||
| 1134 | if (check_excludes(cpuc->event, 0, n)) | ||
| 1135 | return -EINVAL; | ||
| 1136 | if (sparc_check_constraints(cpuc->event, cpuc->events, n)) | ||
| 1137 | return -EAGAIN; | ||
| 1138 | |||
| 1139 | return 0; | ||
| 1140 | } | ||
| 1141 | |||
| 1131 | static const struct pmu pmu = { | 1142 | static const struct pmu pmu = { |
| 1132 | .enable = sparc_pmu_enable, | 1143 | .enable = sparc_pmu_enable, |
| 1133 | .disable = sparc_pmu_disable, | 1144 | .disable = sparc_pmu_disable, |
| 1134 | .read = sparc_pmu_read, | 1145 | .read = sparc_pmu_read, |
| 1135 | .unthrottle = sparc_pmu_unthrottle, | 1146 | .unthrottle = sparc_pmu_unthrottle, |
| 1147 | .start_txn = sparc_pmu_start_txn, | ||
| 1148 | .cancel_txn = sparc_pmu_cancel_txn, | ||
| 1149 | .commit_txn = sparc_pmu_commit_txn, | ||
| 1136 | }; | 1150 | }; |
| 1137 | 1151 | ||
| 1138 | const struct pmu *hw_perf_event_init(struct perf_event *event) | 1152 | const struct pmu *hw_perf_event_init(struct perf_event *event) |
