aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc/kernel/perf_event.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc/kernel/perf_event.c')
-rw-r--r--arch/sparc/kernel/perf_event.c248
1 files changed, 129 insertions, 119 deletions
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index 6318e622cfb0..2cb0e1c001e2 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -26,6 +26,7 @@
26#include <asm/nmi.h> 26#include <asm/nmi.h>
27#include <asm/pcr.h> 27#include <asm/pcr.h>
28 28
29#include "kernel.h"
29#include "kstack.h" 30#include "kstack.h"
30 31
31/* Sparc64 chips have two performance counters, 32-bits each, with 32/* Sparc64 chips have two performance counters, 32-bits each, with
@@ -658,13 +659,16 @@ static u64 maybe_change_configuration(struct cpu_hw_events *cpuc, u64 pcr)
658 659
659 enc = perf_event_get_enc(cpuc->events[i]); 660 enc = perf_event_get_enc(cpuc->events[i]);
660 pcr &= ~mask_for_index(idx); 661 pcr &= ~mask_for_index(idx);
661 pcr |= event_encoding(enc, idx); 662 if (hwc->state & PERF_HES_STOPPED)
663 pcr |= nop_for_index(idx);
664 else
665 pcr |= event_encoding(enc, idx);
662 } 666 }
663out: 667out:
664 return pcr; 668 return pcr;
665} 669}
666 670
667void hw_perf_enable(void) 671static void sparc_pmu_enable(struct pmu *pmu)
668{ 672{
669 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 673 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
670 u64 pcr; 674 u64 pcr;
@@ -691,7 +695,7 @@ void hw_perf_enable(void)
691 pcr_ops->write(cpuc->pcr); 695 pcr_ops->write(cpuc->pcr);
692} 696}
693 697
694void hw_perf_disable(void) 698static void sparc_pmu_disable(struct pmu *pmu)
695{ 699{
696 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 700 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
697 u64 val; 701 u64 val;
@@ -710,19 +714,65 @@ void hw_perf_disable(void)
710 pcr_ops->write(cpuc->pcr); 714 pcr_ops->write(cpuc->pcr);
711} 715}
712 716
713static void sparc_pmu_disable(struct perf_event *event) 717static int active_event_index(struct cpu_hw_events *cpuc,
718 struct perf_event *event)
719{
720 int i;
721
722 for (i = 0; i < cpuc->n_events; i++) {
723 if (cpuc->event[i] == event)
724 break;
725 }
726 BUG_ON(i == cpuc->n_events);
727 return cpuc->current_idx[i];
728}
729
730static void sparc_pmu_start(struct perf_event *event, int flags)
731{
732 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
733 int idx = active_event_index(cpuc, event);
734
735 if (flags & PERF_EF_RELOAD) {
736 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
737 sparc_perf_event_set_period(event, &event->hw, idx);
738 }
739
740 event->hw.state = 0;
741
742 sparc_pmu_enable_event(cpuc, &event->hw, idx);
743}
744
745static void sparc_pmu_stop(struct perf_event *event, int flags)
746{
747 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
748 int idx = active_event_index(cpuc, event);
749
750 if (!(event->hw.state & PERF_HES_STOPPED)) {
751 sparc_pmu_disable_event(cpuc, &event->hw, idx);
752 event->hw.state |= PERF_HES_STOPPED;
753 }
754
755 if (!(event->hw.state & PERF_HES_UPTODATE) && (flags & PERF_EF_UPDATE)) {
756 sparc_perf_event_update(event, &event->hw, idx);
757 event->hw.state |= PERF_HES_UPTODATE;
758 }
759}
760
761static void sparc_pmu_del(struct perf_event *event, int _flags)
714{ 762{
715 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 763 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
716 struct hw_perf_event *hwc = &event->hw;
717 unsigned long flags; 764 unsigned long flags;
718 int i; 765 int i;
719 766
720 local_irq_save(flags); 767 local_irq_save(flags);
721 perf_disable(); 768 perf_pmu_disable(event->pmu);
722 769
723 for (i = 0; i < cpuc->n_events; i++) { 770 for (i = 0; i < cpuc->n_events; i++) {
724 if (event == cpuc->event[i]) { 771 if (event == cpuc->event[i]) {
725 int idx = cpuc->current_idx[i]; 772 /* Absorb the final count and turn off the
773 * event.
774 */
775 sparc_pmu_stop(event, PERF_EF_UPDATE);
726 776
727 /* Shift remaining entries down into 777 /* Shift remaining entries down into
728 * the existing slot. 778 * the existing slot.
@@ -734,13 +784,6 @@ static void sparc_pmu_disable(struct perf_event *event)
734 cpuc->current_idx[i]; 784 cpuc->current_idx[i];
735 } 785 }
736 786
737 /* Absorb the final count and turn off the
738 * event.
739 */
740 sparc_pmu_disable_event(cpuc, hwc, idx);
741 barrier();
742 sparc_perf_event_update(event, hwc, idx);
743
744 perf_event_update_userpage(event); 787 perf_event_update_userpage(event);
745 788
746 cpuc->n_events--; 789 cpuc->n_events--;
@@ -748,23 +791,10 @@ static void sparc_pmu_disable(struct perf_event *event)
748 } 791 }
749 } 792 }
750 793
751 perf_enable(); 794 perf_pmu_enable(event->pmu);
752 local_irq_restore(flags); 795 local_irq_restore(flags);
753} 796}
754 797
755static int active_event_index(struct cpu_hw_events *cpuc,
756 struct perf_event *event)
757{
758 int i;
759
760 for (i = 0; i < cpuc->n_events; i++) {
761 if (cpuc->event[i] == event)
762 break;
763 }
764 BUG_ON(i == cpuc->n_events);
765 return cpuc->current_idx[i];
766}
767
768static void sparc_pmu_read(struct perf_event *event) 798static void sparc_pmu_read(struct perf_event *event)
769{ 799{
770 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 800 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
@@ -774,15 +804,6 @@ static void sparc_pmu_read(struct perf_event *event)
774 sparc_perf_event_update(event, hwc, idx); 804 sparc_perf_event_update(event, hwc, idx);
775} 805}
776 806
777static void sparc_pmu_unthrottle(struct perf_event *event)
778{
779 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
780 int idx = active_event_index(cpuc, event);
781 struct hw_perf_event *hwc = &event->hw;
782
783 sparc_pmu_enable_event(cpuc, hwc, idx);
784}
785
786static atomic_t active_events = ATOMIC_INIT(0); 807static atomic_t active_events = ATOMIC_INIT(0);
787static DEFINE_MUTEX(pmc_grab_mutex); 808static DEFINE_MUTEX(pmc_grab_mutex);
788 809
@@ -877,7 +898,7 @@ static int sparc_check_constraints(struct perf_event **evts,
877 if (!n_ev) 898 if (!n_ev)
878 return 0; 899 return 0;
879 900
880 if (n_ev > perf_max_events) 901 if (n_ev > MAX_HWEVENTS)
881 return -1; 902 return -1;
882 903
883 msk0 = perf_event_get_msk(events[0]); 904 msk0 = perf_event_get_msk(events[0]);
@@ -984,26 +1005,30 @@ static int collect_events(struct perf_event *group, int max_count,
984 return n; 1005 return n;
985} 1006}
986 1007
987static int sparc_pmu_enable(struct perf_event *event) 1008static int sparc_pmu_add(struct perf_event *event, int ef_flags)
988{ 1009{
989 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1010 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
990 int n0, ret = -EAGAIN; 1011 int n0, ret = -EAGAIN;
991 unsigned long flags; 1012 unsigned long flags;
992 1013
993 local_irq_save(flags); 1014 local_irq_save(flags);
994 perf_disable(); 1015 perf_pmu_disable(event->pmu);
995 1016
996 n0 = cpuc->n_events; 1017 n0 = cpuc->n_events;
997 if (n0 >= perf_max_events) 1018 if (n0 >= MAX_HWEVENTS)
998 goto out; 1019 goto out;
999 1020
1000 cpuc->event[n0] = event; 1021 cpuc->event[n0] = event;
1001 cpuc->events[n0] = event->hw.event_base; 1022 cpuc->events[n0] = event->hw.event_base;
1002 cpuc->current_idx[n0] = PIC_NO_INDEX; 1023 cpuc->current_idx[n0] = PIC_NO_INDEX;
1003 1024
1025 event->hw.state = PERF_HES_UPTODATE;
1026 if (!(ef_flags & PERF_EF_START))
1027 event->hw.state |= PERF_HES_STOPPED;
1028
1004 /* 1029 /*
1005 * If group events scheduling transaction was started, 1030 * If group events scheduling transaction was started,
1006 * skip the schedulability test here, it will be peformed 1031 * skip the schedulability test here, it will be performed
1007 * at commit time(->commit_txn) as a whole 1032 * at commit time(->commit_txn) as a whole
1008 */ 1033 */
1009 if (cpuc->group_flag & PERF_EVENT_TXN) 1034 if (cpuc->group_flag & PERF_EVENT_TXN)
@@ -1020,12 +1045,12 @@ nocheck:
1020 1045
1021 ret = 0; 1046 ret = 0;
1022out: 1047out:
1023 perf_enable(); 1048 perf_pmu_enable(event->pmu);
1024 local_irq_restore(flags); 1049 local_irq_restore(flags);
1025 return ret; 1050 return ret;
1026} 1051}
1027 1052
1028static int __hw_perf_event_init(struct perf_event *event) 1053static int sparc_pmu_event_init(struct perf_event *event)
1029{ 1054{
1030 struct perf_event_attr *attr = &event->attr; 1055 struct perf_event_attr *attr = &event->attr;
1031 struct perf_event *evts[MAX_HWEVENTS]; 1056 struct perf_event *evts[MAX_HWEVENTS];
@@ -1038,22 +1063,33 @@ static int __hw_perf_event_init(struct perf_event *event)
1038 if (atomic_read(&nmi_active) < 0) 1063 if (atomic_read(&nmi_active) < 0)
1039 return -ENODEV; 1064 return -ENODEV;
1040 1065
1041 pmap = NULL; 1066 switch (attr->type) {
1042 if (attr->type == PERF_TYPE_HARDWARE) { 1067 case PERF_TYPE_HARDWARE:
1043 if (attr->config >= sparc_pmu->max_events) 1068 if (attr->config >= sparc_pmu->max_events)
1044 return -EINVAL; 1069 return -EINVAL;
1045 pmap = sparc_pmu->event_map(attr->config); 1070 pmap = sparc_pmu->event_map(attr->config);
1046 } else if (attr->type == PERF_TYPE_HW_CACHE) { 1071 break;
1072
1073 case PERF_TYPE_HW_CACHE:
1047 pmap = sparc_map_cache_event(attr->config); 1074 pmap = sparc_map_cache_event(attr->config);
1048 if (IS_ERR(pmap)) 1075 if (IS_ERR(pmap))
1049 return PTR_ERR(pmap); 1076 return PTR_ERR(pmap);
1050 } else if (attr->type != PERF_TYPE_RAW) 1077 break;
1051 return -EOPNOTSUPP; 1078
1079 case PERF_TYPE_RAW:
1080 pmap = NULL;
1081 break;
1082
1083 default:
1084 return -ENOENT;
1085
1086 }
1052 1087
1053 if (pmap) { 1088 if (pmap) {
1054 hwc->event_base = perf_event_encode(pmap); 1089 hwc->event_base = perf_event_encode(pmap);
1055 } else { 1090 } else {
1056 /* User gives us "(encoding << 16) | pic_mask" for 1091 /*
1092 * User gives us "(encoding << 16) | pic_mask" for
1057 * PERF_TYPE_RAW events. 1093 * PERF_TYPE_RAW events.
1058 */ 1094 */
1059 hwc->event_base = attr->config; 1095 hwc->event_base = attr->config;
@@ -1071,7 +1107,7 @@ static int __hw_perf_event_init(struct perf_event *event)
1071 n = 0; 1107 n = 0;
1072 if (event->group_leader != event) { 1108 if (event->group_leader != event) {
1073 n = collect_events(event->group_leader, 1109 n = collect_events(event->group_leader,
1074 perf_max_events - 1, 1110 MAX_HWEVENTS - 1,
1075 evts, events, current_idx_dmy); 1111 evts, events, current_idx_dmy);
1076 if (n < 0) 1112 if (n < 0)
1077 return -EINVAL; 1113 return -EINVAL;
@@ -1107,10 +1143,11 @@ static int __hw_perf_event_init(struct perf_event *event)
1107 * Set the flag to make pmu::enable() not perform the 1143 * Set the flag to make pmu::enable() not perform the
1108 * schedulability test, it will be performed at commit time 1144 * schedulability test, it will be performed at commit time
1109 */ 1145 */
1110static void sparc_pmu_start_txn(const struct pmu *pmu) 1146static void sparc_pmu_start_txn(struct pmu *pmu)
1111{ 1147{
1112 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); 1148 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
1113 1149
1150 perf_pmu_disable(pmu);
1114 cpuhw->group_flag |= PERF_EVENT_TXN; 1151 cpuhw->group_flag |= PERF_EVENT_TXN;
1115} 1152}
1116 1153
@@ -1119,11 +1156,12 @@ static void sparc_pmu_start_txn(const struct pmu *pmu)
1119 * Clear the flag and pmu::enable() will perform the 1156 * Clear the flag and pmu::enable() will perform the
1120 * schedulability test. 1157 * schedulability test.
1121 */ 1158 */
1122static void sparc_pmu_cancel_txn(const struct pmu *pmu) 1159static void sparc_pmu_cancel_txn(struct pmu *pmu)
1123{ 1160{
1124 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); 1161 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
1125 1162
1126 cpuhw->group_flag &= ~PERF_EVENT_TXN; 1163 cpuhw->group_flag &= ~PERF_EVENT_TXN;
1164 perf_pmu_enable(pmu);
1127} 1165}
1128 1166
1129/* 1167/*
@@ -1131,7 +1169,7 @@ static void sparc_pmu_cancel_txn(const struct pmu *pmu)
1131 * Perform the group schedulability test as a whole 1169 * Perform the group schedulability test as a whole
1132 * Return 0 if success 1170 * Return 0 if success
1133 */ 1171 */
1134static int sparc_pmu_commit_txn(const struct pmu *pmu) 1172static int sparc_pmu_commit_txn(struct pmu *pmu)
1135{ 1173{
1136 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1174 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1137 int n; 1175 int n;
@@ -1147,28 +1185,24 @@ static int sparc_pmu_commit_txn(const struct pmu *pmu)
1147 return -EAGAIN; 1185 return -EAGAIN;
1148 1186
1149 cpuc->group_flag &= ~PERF_EVENT_TXN; 1187 cpuc->group_flag &= ~PERF_EVENT_TXN;
1188 perf_pmu_enable(pmu);
1150 return 0; 1189 return 0;
1151} 1190}
1152 1191
1153static const struct pmu pmu = { 1192static struct pmu pmu = {
1154 .enable = sparc_pmu_enable, 1193 .pmu_enable = sparc_pmu_enable,
1155 .disable = sparc_pmu_disable, 1194 .pmu_disable = sparc_pmu_disable,
1195 .event_init = sparc_pmu_event_init,
1196 .add = sparc_pmu_add,
1197 .del = sparc_pmu_del,
1198 .start = sparc_pmu_start,
1199 .stop = sparc_pmu_stop,
1156 .read = sparc_pmu_read, 1200 .read = sparc_pmu_read,
1157 .unthrottle = sparc_pmu_unthrottle,
1158 .start_txn = sparc_pmu_start_txn, 1201 .start_txn = sparc_pmu_start_txn,
1159 .cancel_txn = sparc_pmu_cancel_txn, 1202 .cancel_txn = sparc_pmu_cancel_txn,
1160 .commit_txn = sparc_pmu_commit_txn, 1203 .commit_txn = sparc_pmu_commit_txn,
1161}; 1204};
1162 1205
1163const struct pmu *hw_perf_event_init(struct perf_event *event)
1164{
1165 int err = __hw_perf_event_init(event);
1166
1167 if (err)
1168 return ERR_PTR(err);
1169 return &pmu;
1170}
1171
1172void perf_event_print_debug(void) 1206void perf_event_print_debug(void)
1173{ 1207{
1174 unsigned long flags; 1208 unsigned long flags;
@@ -1244,7 +1278,7 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
1244 continue; 1278 continue;
1245 1279
1246 if (perf_event_overflow(event, 1, &data, regs)) 1280 if (perf_event_overflow(event, 1, &data, regs))
1247 sparc_pmu_disable_event(cpuc, hwc, idx); 1281 sparc_pmu_stop(event, 0);
1248 } 1282 }
1249 1283
1250 return NOTIFY_STOP; 1284 return NOTIFY_STOP;
@@ -1274,39 +1308,35 @@ static bool __init supported_pmu(void)
1274 return false; 1308 return false;
1275} 1309}
1276 1310
1277void __init init_hw_perf_events(void) 1311int __init init_hw_perf_events(void)
1278{ 1312{
1279 pr_info("Performance events: "); 1313 pr_info("Performance events: ");
1280 1314
1281 if (!supported_pmu()) { 1315 if (!supported_pmu()) {
1282 pr_cont("No support for PMU type '%s'\n", sparc_pmu_type); 1316 pr_cont("No support for PMU type '%s'\n", sparc_pmu_type);
1283 return; 1317 return 0;
1284 } 1318 }
1285 1319
1286 pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type); 1320 pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type);
1287 1321
1288 /* All sparc64 PMUs currently have 2 events. */ 1322 perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
1289 perf_max_events = 2;
1290
1291 register_die_notifier(&perf_event_nmi_notifier); 1323 register_die_notifier(&perf_event_nmi_notifier);
1292}
1293 1324
1294static inline void callchain_store(struct perf_callchain_entry *entry, u64 ip) 1325 return 0;
1295{
1296 if (entry->nr < PERF_MAX_STACK_DEPTH)
1297 entry->ip[entry->nr++] = ip;
1298} 1326}
1327early_initcall(init_hw_perf_events);
1299 1328
1300static void perf_callchain_kernel(struct pt_regs *regs, 1329void perf_callchain_kernel(struct perf_callchain_entry *entry,
1301 struct perf_callchain_entry *entry) 1330 struct pt_regs *regs)
1302{ 1331{
1303 unsigned long ksp, fp; 1332 unsigned long ksp, fp;
1304#ifdef CONFIG_FUNCTION_GRAPH_TRACER 1333#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1305 int graph = 0; 1334 int graph = 0;
1306#endif 1335#endif
1307 1336
1308 callchain_store(entry, PERF_CONTEXT_KERNEL); 1337 stack_trace_flush();
1309 callchain_store(entry, regs->tpc); 1338
1339 perf_callchain_store(entry, regs->tpc);
1310 1340
1311 ksp = regs->u_regs[UREG_I6]; 1341 ksp = regs->u_regs[UREG_I6];
1312 fp = ksp + STACK_BIAS; 1342 fp = ksp + STACK_BIAS;
@@ -1330,13 +1360,13 @@ static void perf_callchain_kernel(struct pt_regs *regs,
1330 pc = sf->callers_pc; 1360 pc = sf->callers_pc;
1331 fp = (unsigned long)sf->fp + STACK_BIAS; 1361 fp = (unsigned long)sf->fp + STACK_BIAS;
1332 } 1362 }
1333 callchain_store(entry, pc); 1363 perf_callchain_store(entry, pc);
1334#ifdef CONFIG_FUNCTION_GRAPH_TRACER 1364#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1335 if ((pc + 8UL) == (unsigned long) &return_to_handler) { 1365 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
1336 int index = current->curr_ret_stack; 1366 int index = current->curr_ret_stack;
1337 if (current->ret_stack && index >= graph) { 1367 if (current->ret_stack && index >= graph) {
1338 pc = current->ret_stack[index - graph].ret; 1368 pc = current->ret_stack[index - graph].ret;
1339 callchain_store(entry, pc); 1369 perf_callchain_store(entry, pc);
1340 graph++; 1370 graph++;
1341 } 1371 }
1342 } 1372 }
@@ -1344,13 +1374,12 @@ static void perf_callchain_kernel(struct pt_regs *regs,
1344 } while (entry->nr < PERF_MAX_STACK_DEPTH); 1374 } while (entry->nr < PERF_MAX_STACK_DEPTH);
1345} 1375}
1346 1376
1347static void perf_callchain_user_64(struct pt_regs *regs, 1377static void perf_callchain_user_64(struct perf_callchain_entry *entry,
1348 struct perf_callchain_entry *entry) 1378 struct pt_regs *regs)
1349{ 1379{
1350 unsigned long ufp; 1380 unsigned long ufp;
1351 1381
1352 callchain_store(entry, PERF_CONTEXT_USER); 1382 perf_callchain_store(entry, regs->tpc);
1353 callchain_store(entry, regs->tpc);
1354 1383
1355 ufp = regs->u_regs[UREG_I6] + STACK_BIAS; 1384 ufp = regs->u_regs[UREG_I6] + STACK_BIAS;
1356 do { 1385 do {
@@ -1363,17 +1392,16 @@ static void perf_callchain_user_64(struct pt_regs *regs,
1363 1392
1364 pc = sf.callers_pc; 1393 pc = sf.callers_pc;
1365 ufp = (unsigned long)sf.fp + STACK_BIAS; 1394 ufp = (unsigned long)sf.fp + STACK_BIAS;
1366 callchain_store(entry, pc); 1395 perf_callchain_store(entry, pc);
1367 } while (entry->nr < PERF_MAX_STACK_DEPTH); 1396 } while (entry->nr < PERF_MAX_STACK_DEPTH);
1368} 1397}
1369 1398
1370static void perf_callchain_user_32(struct pt_regs *regs, 1399static void perf_callchain_user_32(struct perf_callchain_entry *entry,
1371 struct perf_callchain_entry *entry) 1400 struct pt_regs *regs)
1372{ 1401{
1373 unsigned long ufp; 1402 unsigned long ufp;
1374 1403
1375 callchain_store(entry, PERF_CONTEXT_USER); 1404 perf_callchain_store(entry, regs->tpc);
1376 callchain_store(entry, regs->tpc);
1377 1405
1378 ufp = regs->u_regs[UREG_I6] & 0xffffffffUL; 1406 ufp = regs->u_regs[UREG_I6] & 0xffffffffUL;
1379 do { 1407 do {
@@ -1386,34 +1414,16 @@ static void perf_callchain_user_32(struct pt_regs *regs,
1386 1414
1387 pc = sf.callers_pc; 1415 pc = sf.callers_pc;
1388 ufp = (unsigned long)sf.fp; 1416 ufp = (unsigned long)sf.fp;
1389 callchain_store(entry, pc); 1417 perf_callchain_store(entry, pc);
1390 } while (entry->nr < PERF_MAX_STACK_DEPTH); 1418 } while (entry->nr < PERF_MAX_STACK_DEPTH);
1391} 1419}
1392 1420
1393/* Like powerpc we can't get PMU interrupts within the PMU handler, 1421void
1394 * so no need for separate NMI and IRQ chains as on x86. 1422perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
1395 */
1396static DEFINE_PER_CPU(struct perf_callchain_entry, callchain);
1397
1398struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
1399{ 1423{
1400 struct perf_callchain_entry *entry = &__get_cpu_var(callchain); 1424 flushw_user();
1401 1425 if (test_thread_flag(TIF_32BIT))
1402 entry->nr = 0; 1426 perf_callchain_user_32(entry, regs);
1403 if (!user_mode(regs)) { 1427 else
1404 stack_trace_flush(); 1428 perf_callchain_user_64(entry, regs);
1405 perf_callchain_kernel(regs, entry);
1406 if (current->mm)
1407 regs = task_pt_regs(current);
1408 else
1409 regs = NULL;
1410 }
1411 if (regs) {
1412 flushw_user();
1413 if (test_thread_flag(TIF_32BIT))
1414 perf_callchain_user_32(regs, entry);
1415 else
1416 perf_callchain_user_64(regs, entry);
1417 }
1418 return entry;
1419} 1429}