aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc/kernel/perf_event.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2010-06-26 13:27:00 -0400
committerDavid S. Miller <davem@davemloft.net>2010-06-26 13:27:00 -0400
commitc67dda14389205f0a223c5089307495290939b3b (patch)
treefad0bb26b28703d02a22ebdd44d94eabac4a2ade /arch/sparc/kernel/perf_event.c
parent43bc2db47292a824152145253b1dd2847e7312a3 (diff)
parent7e27d6e778cd87b6f2415515d7127eba53fe5d02 (diff)
Merge branch 'master' of /home/davem/src/GIT/linux-2.6/
Diffstat (limited to 'arch/sparc/kernel/perf_event.c')
-rw-r--r--arch/sparc/kernel/perf_event.c108
1 files changed, 61 insertions, 47 deletions
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index 2e1698332b6d..44faabc3c02c 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -92,6 +92,8 @@ struct cpu_hw_events {
92 92
93 /* Enabled/disable state. */ 93 /* Enabled/disable state. */
94 int enabled; 94 int enabled;
95
96 unsigned int group_flag;
95}; 97};
96DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, }; 98DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, };
97 99
@@ -982,53 +984,6 @@ static int collect_events(struct perf_event *group, int max_count,
982 return n; 984 return n;
983} 985}
984 986
985static void event_sched_in(struct perf_event *event)
986{
987 event->state = PERF_EVENT_STATE_ACTIVE;
988 event->oncpu = smp_processor_id();
989 event->tstamp_running += event->ctx->time - event->tstamp_stopped;
990 if (is_software_event(event))
991 event->pmu->enable(event);
992}
993
994int hw_perf_group_sched_in(struct perf_event *group_leader,
995 struct perf_cpu_context *cpuctx,
996 struct perf_event_context *ctx)
997{
998 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
999 struct perf_event *sub;
1000 int n0, n;
1001
1002 if (!sparc_pmu)
1003 return 0;
1004
1005 n0 = cpuc->n_events;
1006 n = collect_events(group_leader, perf_max_events - n0,
1007 &cpuc->event[n0], &cpuc->events[n0],
1008 &cpuc->current_idx[n0]);
1009 if (n < 0)
1010 return -EAGAIN;
1011 if (check_excludes(cpuc->event, n0, n))
1012 return -EINVAL;
1013 if (sparc_check_constraints(cpuc->event, cpuc->events, n + n0))
1014 return -EAGAIN;
1015 cpuc->n_events = n0 + n;
1016 cpuc->n_added += n;
1017
1018 cpuctx->active_oncpu += n;
1019 n = 1;
1020 event_sched_in(group_leader);
1021 list_for_each_entry(sub, &group_leader->sibling_list, group_entry) {
1022 if (sub->state != PERF_EVENT_STATE_OFF) {
1023 event_sched_in(sub);
1024 n++;
1025 }
1026 }
1027 ctx->nr_active += n;
1028
1029 return 1;
1030}
1031
1032static int sparc_pmu_enable(struct perf_event *event) 987static int sparc_pmu_enable(struct perf_event *event)
1033{ 988{
1034 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 989 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
@@ -1046,11 +1001,20 @@ static int sparc_pmu_enable(struct perf_event *event)
1046 cpuc->events[n0] = event->hw.event_base; 1001 cpuc->events[n0] = event->hw.event_base;
1047 cpuc->current_idx[n0] = PIC_NO_INDEX; 1002 cpuc->current_idx[n0] = PIC_NO_INDEX;
1048 1003
1004 /*
1005 * If group events scheduling transaction was started,
1006 * skip the schedulability test here, it will be peformed
1007 * at commit time(->commit_txn) as a whole
1008 */
1009 if (cpuc->group_flag & PERF_EVENT_TXN_STARTED)
1010 goto nocheck;
1011
1049 if (check_excludes(cpuc->event, n0, 1)) 1012 if (check_excludes(cpuc->event, n0, 1))
1050 goto out; 1013 goto out;
1051 if (sparc_check_constraints(cpuc->event, cpuc->events, n0 + 1)) 1014 if (sparc_check_constraints(cpuc->event, cpuc->events, n0 + 1))
1052 goto out; 1015 goto out;
1053 1016
1017nocheck:
1054 cpuc->n_events++; 1018 cpuc->n_events++;
1055 cpuc->n_added++; 1019 cpuc->n_added++;
1056 1020
@@ -1130,11 +1094,61 @@ static int __hw_perf_event_init(struct perf_event *event)
1130 return 0; 1094 return 0;
1131} 1095}
1132 1096
1097/*
1098 * Start group events scheduling transaction
1099 * Set the flag to make pmu::enable() not perform the
1100 * schedulability test, it will be performed at commit time
1101 */
1102static void sparc_pmu_start_txn(const struct pmu *pmu)
1103{
1104 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
1105
1106 cpuhw->group_flag |= PERF_EVENT_TXN_STARTED;
1107}
1108
1109/*
1110 * Stop group events scheduling transaction
1111 * Clear the flag and pmu::enable() will perform the
1112 * schedulability test.
1113 */
1114static void sparc_pmu_cancel_txn(const struct pmu *pmu)
1115{
1116 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
1117
1118 cpuhw->group_flag &= ~PERF_EVENT_TXN_STARTED;
1119}
1120
1121/*
1122 * Commit group events scheduling transaction
1123 * Perform the group schedulability test as a whole
1124 * Return 0 if success
1125 */
1126static int sparc_pmu_commit_txn(const struct pmu *pmu)
1127{
1128 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1129 int n;
1130
1131 if (!sparc_pmu)
1132 return -EINVAL;
1133
1134 cpuc = &__get_cpu_var(cpu_hw_events);
1135 n = cpuc->n_events;
1136 if (check_excludes(cpuc->event, 0, n))
1137 return -EINVAL;
1138 if (sparc_check_constraints(cpuc->event, cpuc->events, n))
1139 return -EAGAIN;
1140
1141 return 0;
1142}
1143
1133static const struct pmu pmu = { 1144static const struct pmu pmu = {
1134 .enable = sparc_pmu_enable, 1145 .enable = sparc_pmu_enable,
1135 .disable = sparc_pmu_disable, 1146 .disable = sparc_pmu_disable,
1136 .read = sparc_pmu_read, 1147 .read = sparc_pmu_read,
1137 .unthrottle = sparc_pmu_unthrottle, 1148 .unthrottle = sparc_pmu_unthrottle,
1149 .start_txn = sparc_pmu_start_txn,
1150 .cancel_txn = sparc_pmu_cancel_txn,
1151 .commit_txn = sparc_pmu_commit_txn,
1138}; 1152};
1139 1153
1140const struct pmu *hw_perf_event_init(struct perf_event *event) 1154const struct pmu *hw_perf_event_init(struct perf_event *event)