aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc/kernel/perf_event.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc/kernel/perf_event.c')
-rw-r--r--arch/sparc/kernel/perf_event.c125
1 files changed, 77 insertions, 48 deletions
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index 68cb9b42088f..44faabc3c02c 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -14,6 +14,7 @@
14 14
15#include <linux/perf_event.h> 15#include <linux/perf_event.h>
16#include <linux/kprobes.h> 16#include <linux/kprobes.h>
17#include <linux/ftrace.h>
17#include <linux/kernel.h> 18#include <linux/kernel.h>
18#include <linux/kdebug.h> 19#include <linux/kdebug.h>
19#include <linux/mutex.h> 20#include <linux/mutex.h>
@@ -91,6 +92,8 @@ struct cpu_hw_events {
91 92
92 /* Enabled/disable state. */ 93 /* Enabled/disable state. */
93 int enabled; 94 int enabled;
95
96 unsigned int group_flag;
94}; 97};
95DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, }; 98DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, };
96 99
@@ -654,6 +657,7 @@ static u64 maybe_change_configuration(struct cpu_hw_events *cpuc, u64 pcr)
654 cpuc->current_idx[i] = idx; 657 cpuc->current_idx[i] = idx;
655 658
656 enc = perf_event_get_enc(cpuc->events[i]); 659 enc = perf_event_get_enc(cpuc->events[i]);
660 pcr &= ~mask_for_index(idx);
657 pcr |= event_encoding(enc, idx); 661 pcr |= event_encoding(enc, idx);
658 } 662 }
659out: 663out:
@@ -980,53 +984,6 @@ static int collect_events(struct perf_event *group, int max_count,
980 return n; 984 return n;
981} 985}
982 986
983static void event_sched_in(struct perf_event *event)
984{
985 event->state = PERF_EVENT_STATE_ACTIVE;
986 event->oncpu = smp_processor_id();
987 event->tstamp_running += event->ctx->time - event->tstamp_stopped;
988 if (is_software_event(event))
989 event->pmu->enable(event);
990}
991
992int hw_perf_group_sched_in(struct perf_event *group_leader,
993 struct perf_cpu_context *cpuctx,
994 struct perf_event_context *ctx)
995{
996 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
997 struct perf_event *sub;
998 int n0, n;
999
1000 if (!sparc_pmu)
1001 return 0;
1002
1003 n0 = cpuc->n_events;
1004 n = collect_events(group_leader, perf_max_events - n0,
1005 &cpuc->event[n0], &cpuc->events[n0],
1006 &cpuc->current_idx[n0]);
1007 if (n < 0)
1008 return -EAGAIN;
1009 if (check_excludes(cpuc->event, n0, n))
1010 return -EINVAL;
1011 if (sparc_check_constraints(cpuc->event, cpuc->events, n + n0))
1012 return -EAGAIN;
1013 cpuc->n_events = n0 + n;
1014 cpuc->n_added += n;
1015
1016 cpuctx->active_oncpu += n;
1017 n = 1;
1018 event_sched_in(group_leader);
1019 list_for_each_entry(sub, &group_leader->sibling_list, group_entry) {
1020 if (sub->state != PERF_EVENT_STATE_OFF) {
1021 event_sched_in(sub);
1022 n++;
1023 }
1024 }
1025 ctx->nr_active += n;
1026
1027 return 1;
1028}
1029
1030static int sparc_pmu_enable(struct perf_event *event) 987static int sparc_pmu_enable(struct perf_event *event)
1031{ 988{
1032 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 989 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
@@ -1044,11 +1001,20 @@ static int sparc_pmu_enable(struct perf_event *event)
1044 cpuc->events[n0] = event->hw.event_base; 1001 cpuc->events[n0] = event->hw.event_base;
1045 cpuc->current_idx[n0] = PIC_NO_INDEX; 1002 cpuc->current_idx[n0] = PIC_NO_INDEX;
1046 1003
1004 /*
1005 * If group events scheduling transaction was started,
1006 * skip the schedulability test here, it will be peformed
1007 * at commit time(->commit_txn) as a whole
1008 */
1009 if (cpuc->group_flag & PERF_EVENT_TXN_STARTED)
1010 goto nocheck;
1011
1047 if (check_excludes(cpuc->event, n0, 1)) 1012 if (check_excludes(cpuc->event, n0, 1))
1048 goto out; 1013 goto out;
1049 if (sparc_check_constraints(cpuc->event, cpuc->events, n0 + 1)) 1014 if (sparc_check_constraints(cpuc->event, cpuc->events, n0 + 1))
1050 goto out; 1015 goto out;
1051 1016
1017nocheck:
1052 cpuc->n_events++; 1018 cpuc->n_events++;
1053 cpuc->n_added++; 1019 cpuc->n_added++;
1054 1020
@@ -1128,11 +1094,61 @@ static int __hw_perf_event_init(struct perf_event *event)
1128 return 0; 1094 return 0;
1129} 1095}
1130 1096
1097/*
1098 * Start group events scheduling transaction
1099 * Set the flag to make pmu::enable() not perform the
1100 * schedulability test, it will be performed at commit time
1101 */
1102static void sparc_pmu_start_txn(const struct pmu *pmu)
1103{
1104 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
1105
1106 cpuhw->group_flag |= PERF_EVENT_TXN_STARTED;
1107}
1108
1109/*
1110 * Stop group events scheduling transaction
1111 * Clear the flag and pmu::enable() will perform the
1112 * schedulability test.
1113 */
1114static void sparc_pmu_cancel_txn(const struct pmu *pmu)
1115{
1116 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
1117
1118 cpuhw->group_flag &= ~PERF_EVENT_TXN_STARTED;
1119}
1120
1121/*
1122 * Commit group events scheduling transaction
1123 * Perform the group schedulability test as a whole
1124 * Return 0 if success
1125 */
1126static int sparc_pmu_commit_txn(const struct pmu *pmu)
1127{
1128 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1129 int n;
1130
1131 if (!sparc_pmu)
1132 return -EINVAL;
1133
1134 cpuc = &__get_cpu_var(cpu_hw_events);
1135 n = cpuc->n_events;
1136 if (check_excludes(cpuc->event, 0, n))
1137 return -EINVAL;
1138 if (sparc_check_constraints(cpuc->event, cpuc->events, n))
1139 return -EAGAIN;
1140
1141 return 0;
1142}
1143
1131static const struct pmu pmu = { 1144static const struct pmu pmu = {
1132 .enable = sparc_pmu_enable, 1145 .enable = sparc_pmu_enable,
1133 .disable = sparc_pmu_disable, 1146 .disable = sparc_pmu_disable,
1134 .read = sparc_pmu_read, 1147 .read = sparc_pmu_read,
1135 .unthrottle = sparc_pmu_unthrottle, 1148 .unthrottle = sparc_pmu_unthrottle,
1149 .start_txn = sparc_pmu_start_txn,
1150 .cancel_txn = sparc_pmu_cancel_txn,
1151 .commit_txn = sparc_pmu_commit_txn,
1136}; 1152};
1137 1153
1138const struct pmu *hw_perf_event_init(struct perf_event *event) 1154const struct pmu *hw_perf_event_init(struct perf_event *event)
@@ -1276,6 +1292,9 @@ static void perf_callchain_kernel(struct pt_regs *regs,
1276 struct perf_callchain_entry *entry) 1292 struct perf_callchain_entry *entry)
1277{ 1293{
1278 unsigned long ksp, fp; 1294 unsigned long ksp, fp;
1295#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1296 int graph = 0;
1297#endif
1279 1298
1280 callchain_store(entry, PERF_CONTEXT_KERNEL); 1299 callchain_store(entry, PERF_CONTEXT_KERNEL);
1281 callchain_store(entry, regs->tpc); 1300 callchain_store(entry, regs->tpc);
@@ -1303,6 +1322,16 @@ static void perf_callchain_kernel(struct pt_regs *regs,
1303 fp = (unsigned long)sf->fp + STACK_BIAS; 1322 fp = (unsigned long)sf->fp + STACK_BIAS;
1304 } 1323 }
1305 callchain_store(entry, pc); 1324 callchain_store(entry, pc);
1325#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1326 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
1327 int index = current->curr_ret_stack;
1328 if (current->ret_stack && index >= graph) {
1329 pc = current->ret_stack[index - graph].ret;
1330 callchain_store(entry, pc);
1331 graph++;
1332 }
1333 }
1334#endif
1306 } while (entry->nr < PERF_MAX_STACK_DEPTH); 1335 } while (entry->nr < PERF_MAX_STACK_DEPTH);
1307} 1336}
1308 1337
@@ -1337,7 +1366,7 @@ static void perf_callchain_user_32(struct pt_regs *regs,
1337 callchain_store(entry, PERF_CONTEXT_USER); 1366 callchain_store(entry, PERF_CONTEXT_USER);
1338 callchain_store(entry, regs->tpc); 1367 callchain_store(entry, regs->tpc);
1339 1368
1340 ufp = regs->u_regs[UREG_I6]; 1369 ufp = regs->u_regs[UREG_I6] & 0xffffffffUL;
1341 do { 1370 do {
1342 struct sparc_stackf32 *usf, sf; 1371 struct sparc_stackf32 *usf, sf;
1343 unsigned long pc; 1372 unsigned long pc;