aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc/kernel')
-rw-r--r--arch/sparc/kernel/Makefile2
-rw-r--r--arch/sparc/kernel/jump_label.c47
-rw-r--r--arch/sparc/kernel/module.c6
-rw-r--r--arch/sparc/kernel/pcr.c8
-rw-r--r--arch/sparc/kernel/perf_event.c240
5 files changed, 182 insertions, 121 deletions
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
index 0c2dc1f24a9a..599398fbbc7c 100644
--- a/arch/sparc/kernel/Makefile
+++ b/arch/sparc/kernel/Makefile
@@ -119,3 +119,5 @@ obj-$(CONFIG_COMPAT) += $(audit--y)
119 119
120pc--$(CONFIG_PERF_EVENTS) := perf_event.o 120pc--$(CONFIG_PERF_EVENTS) := perf_event.o
121obj-$(CONFIG_SPARC64) += $(pc--y) 121obj-$(CONFIG_SPARC64) += $(pc--y)
122
123obj-$(CONFIG_SPARC64) += jump_label.o
diff --git a/arch/sparc/kernel/jump_label.c b/arch/sparc/kernel/jump_label.c
new file mode 100644
index 000000000000..ea2dafc93d78
--- /dev/null
+++ b/arch/sparc/kernel/jump_label.c
@@ -0,0 +1,47 @@
1#include <linux/kernel.h>
2#include <linux/types.h>
3#include <linux/mutex.h>
4#include <linux/cpu.h>
5
6#include <linux/jump_label.h>
7#include <linux/memory.h>
8
9#ifdef HAVE_JUMP_LABEL
10
11void arch_jump_label_transform(struct jump_entry *entry,
12 enum jump_label_type type)
13{
14 u32 val;
15 u32 *insn = (u32 *) (unsigned long) entry->code;
16
17 if (type == JUMP_LABEL_ENABLE) {
18 s32 off = (s32)entry->target - (s32)entry->code;
19
20#ifdef CONFIG_SPARC64
21 /* ba,pt %xcc, . + (off << 2) */
22 val = 0x10680000 | ((u32) off >> 2);
23#else
24 /* ba . + (off << 2) */
25 val = 0x10800000 | ((u32) off >> 2);
26#endif
27 } else {
28 val = 0x01000000;
29 }
30
31 get_online_cpus();
32 mutex_lock(&text_mutex);
33 *insn = val;
34 flushi(insn);
35 mutex_unlock(&text_mutex);
36 put_online_cpus();
37}
38
39void arch_jump_label_text_poke_early(jump_label_t addr)
40{
41 u32 *insn_p = (u32 *) (unsigned long) addr;
42
43 *insn_p = 0x01000000;
44 flushi(insn_p);
45}
46
47#endif
diff --git a/arch/sparc/kernel/module.c b/arch/sparc/kernel/module.c
index f848aadf54dc..ee3c7dde8d9f 100644
--- a/arch/sparc/kernel/module.c
+++ b/arch/sparc/kernel/module.c
@@ -18,6 +18,9 @@
18#include <asm/spitfire.h> 18#include <asm/spitfire.h>
19 19
20#ifdef CONFIG_SPARC64 20#ifdef CONFIG_SPARC64
21
22#include <linux/jump_label.h>
23
21static void *module_map(unsigned long size) 24static void *module_map(unsigned long size)
22{ 25{
23 struct vm_struct *area; 26 struct vm_struct *area;
@@ -227,6 +230,9 @@ int module_finalize(const Elf_Ehdr *hdr,
227 const Elf_Shdr *sechdrs, 230 const Elf_Shdr *sechdrs,
228 struct module *me) 231 struct module *me)
229{ 232{
233 /* make jump label nops */
234 jump_label_apply_nops(me);
235
230 /* Cheetah's I-cache is fully coherent. */ 236 /* Cheetah's I-cache is fully coherent. */
231 if (tlb_type == spitfire) { 237 if (tlb_type == spitfire) {
232 unsigned long va; 238 unsigned long va;
diff --git a/arch/sparc/kernel/pcr.c b/arch/sparc/kernel/pcr.c
index c4a6a50b4849..b87873c0e8ea 100644
--- a/arch/sparc/kernel/pcr.c
+++ b/arch/sparc/kernel/pcr.c
@@ -7,7 +7,7 @@
7#include <linux/init.h> 7#include <linux/init.h>
8#include <linux/irq.h> 8#include <linux/irq.h>
9 9
10#include <linux/perf_event.h> 10#include <linux/irq_work.h>
11#include <linux/ftrace.h> 11#include <linux/ftrace.h>
12 12
13#include <asm/pil.h> 13#include <asm/pil.h>
@@ -43,14 +43,14 @@ void __irq_entry deferred_pcr_work_irq(int irq, struct pt_regs *regs)
43 43
44 old_regs = set_irq_regs(regs); 44 old_regs = set_irq_regs(regs);
45 irq_enter(); 45 irq_enter();
46#ifdef CONFIG_PERF_EVENTS 46#ifdef CONFIG_IRQ_WORK
47 perf_event_do_pending(); 47 irq_work_run();
48#endif 48#endif
49 irq_exit(); 49 irq_exit();
50 set_irq_regs(old_regs); 50 set_irq_regs(old_regs);
51} 51}
52 52
53void set_perf_event_pending(void) 53void arch_irq_work_raise(void)
54{ 54{
55 set_softint(1 << PIL_DEFERRED_PCR_WORK); 55 set_softint(1 << PIL_DEFERRED_PCR_WORK);
56} 56}
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index 6318e622cfb0..0d6deb55a2ae 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -658,13 +658,16 @@ static u64 maybe_change_configuration(struct cpu_hw_events *cpuc, u64 pcr)
658 658
659 enc = perf_event_get_enc(cpuc->events[i]); 659 enc = perf_event_get_enc(cpuc->events[i]);
660 pcr &= ~mask_for_index(idx); 660 pcr &= ~mask_for_index(idx);
661 pcr |= event_encoding(enc, idx); 661 if (hwc->state & PERF_HES_STOPPED)
662 pcr |= nop_for_index(idx);
663 else
664 pcr |= event_encoding(enc, idx);
662 } 665 }
663out: 666out:
664 return pcr; 667 return pcr;
665} 668}
666 669
667void hw_perf_enable(void) 670static void sparc_pmu_enable(struct pmu *pmu)
668{ 671{
669 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 672 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
670 u64 pcr; 673 u64 pcr;
@@ -691,7 +694,7 @@ void hw_perf_enable(void)
691 pcr_ops->write(cpuc->pcr); 694 pcr_ops->write(cpuc->pcr);
692} 695}
693 696
694void hw_perf_disable(void) 697static void sparc_pmu_disable(struct pmu *pmu)
695{ 698{
696 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 699 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
697 u64 val; 700 u64 val;
@@ -710,19 +713,65 @@ void hw_perf_disable(void)
710 pcr_ops->write(cpuc->pcr); 713 pcr_ops->write(cpuc->pcr);
711} 714}
712 715
713static void sparc_pmu_disable(struct perf_event *event) 716static int active_event_index(struct cpu_hw_events *cpuc,
717 struct perf_event *event)
718{
719 int i;
720
721 for (i = 0; i < cpuc->n_events; i++) {
722 if (cpuc->event[i] == event)
723 break;
724 }
725 BUG_ON(i == cpuc->n_events);
726 return cpuc->current_idx[i];
727}
728
729static void sparc_pmu_start(struct perf_event *event, int flags)
730{
731 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
732 int idx = active_event_index(cpuc, event);
733
734 if (flags & PERF_EF_RELOAD) {
735 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
736 sparc_perf_event_set_period(event, &event->hw, idx);
737 }
738
739 event->hw.state = 0;
740
741 sparc_pmu_enable_event(cpuc, &event->hw, idx);
742}
743
744static void sparc_pmu_stop(struct perf_event *event, int flags)
745{
746 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
747 int idx = active_event_index(cpuc, event);
748
749 if (!(event->hw.state & PERF_HES_STOPPED)) {
750 sparc_pmu_disable_event(cpuc, &event->hw, idx);
751 event->hw.state |= PERF_HES_STOPPED;
752 }
753
754 if (!(event->hw.state & PERF_HES_UPTODATE) && (flags & PERF_EF_UPDATE)) {
755 sparc_perf_event_update(event, &event->hw, idx);
756 event->hw.state |= PERF_HES_UPTODATE;
757 }
758}
759
760static void sparc_pmu_del(struct perf_event *event, int _flags)
714{ 761{
715 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 762 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
716 struct hw_perf_event *hwc = &event->hw;
717 unsigned long flags; 763 unsigned long flags;
718 int i; 764 int i;
719 765
720 local_irq_save(flags); 766 local_irq_save(flags);
721 perf_disable(); 767 perf_pmu_disable(event->pmu);
722 768
723 for (i = 0; i < cpuc->n_events; i++) { 769 for (i = 0; i < cpuc->n_events; i++) {
724 if (event == cpuc->event[i]) { 770 if (event == cpuc->event[i]) {
725 int idx = cpuc->current_idx[i]; 771 /* Absorb the final count and turn off the
772 * event.
773 */
774 sparc_pmu_stop(event, PERF_EF_UPDATE);
726 775
727 /* Shift remaining entries down into 776 /* Shift remaining entries down into
728 * the existing slot. 777 * the existing slot.
@@ -734,13 +783,6 @@ static void sparc_pmu_disable(struct perf_event *event)
734 cpuc->current_idx[i]; 783 cpuc->current_idx[i];
735 } 784 }
736 785
737 /* Absorb the final count and turn off the
738 * event.
739 */
740 sparc_pmu_disable_event(cpuc, hwc, idx);
741 barrier();
742 sparc_perf_event_update(event, hwc, idx);
743
744 perf_event_update_userpage(event); 786 perf_event_update_userpage(event);
745 787
746 cpuc->n_events--; 788 cpuc->n_events--;
@@ -748,23 +790,10 @@ static void sparc_pmu_disable(struct perf_event *event)
748 } 790 }
749 } 791 }
750 792
751 perf_enable(); 793 perf_pmu_enable(event->pmu);
752 local_irq_restore(flags); 794 local_irq_restore(flags);
753} 795}
754 796
755static int active_event_index(struct cpu_hw_events *cpuc,
756 struct perf_event *event)
757{
758 int i;
759
760 for (i = 0; i < cpuc->n_events; i++) {
761 if (cpuc->event[i] == event)
762 break;
763 }
764 BUG_ON(i == cpuc->n_events);
765 return cpuc->current_idx[i];
766}
767
768static void sparc_pmu_read(struct perf_event *event) 797static void sparc_pmu_read(struct perf_event *event)
769{ 798{
770 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 799 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
@@ -774,15 +803,6 @@ static void sparc_pmu_read(struct perf_event *event)
774 sparc_perf_event_update(event, hwc, idx); 803 sparc_perf_event_update(event, hwc, idx);
775} 804}
776 805
777static void sparc_pmu_unthrottle(struct perf_event *event)
778{
779 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
780 int idx = active_event_index(cpuc, event);
781 struct hw_perf_event *hwc = &event->hw;
782
783 sparc_pmu_enable_event(cpuc, hwc, idx);
784}
785
786static atomic_t active_events = ATOMIC_INIT(0); 806static atomic_t active_events = ATOMIC_INIT(0);
787static DEFINE_MUTEX(pmc_grab_mutex); 807static DEFINE_MUTEX(pmc_grab_mutex);
788 808
@@ -877,7 +897,7 @@ static int sparc_check_constraints(struct perf_event **evts,
877 if (!n_ev) 897 if (!n_ev)
878 return 0; 898 return 0;
879 899
880 if (n_ev > perf_max_events) 900 if (n_ev > MAX_HWEVENTS)
881 return -1; 901 return -1;
882 902
883 msk0 = perf_event_get_msk(events[0]); 903 msk0 = perf_event_get_msk(events[0]);
@@ -984,23 +1004,27 @@ static int collect_events(struct perf_event *group, int max_count,
984 return n; 1004 return n;
985} 1005}
986 1006
987static int sparc_pmu_enable(struct perf_event *event) 1007static int sparc_pmu_add(struct perf_event *event, int ef_flags)
988{ 1008{
989 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1009 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
990 int n0, ret = -EAGAIN; 1010 int n0, ret = -EAGAIN;
991 unsigned long flags; 1011 unsigned long flags;
992 1012
993 local_irq_save(flags); 1013 local_irq_save(flags);
994 perf_disable(); 1014 perf_pmu_disable(event->pmu);
995 1015
996 n0 = cpuc->n_events; 1016 n0 = cpuc->n_events;
997 if (n0 >= perf_max_events) 1017 if (n0 >= MAX_HWEVENTS)
998 goto out; 1018 goto out;
999 1019
1000 cpuc->event[n0] = event; 1020 cpuc->event[n0] = event;
1001 cpuc->events[n0] = event->hw.event_base; 1021 cpuc->events[n0] = event->hw.event_base;
1002 cpuc->current_idx[n0] = PIC_NO_INDEX; 1022 cpuc->current_idx[n0] = PIC_NO_INDEX;
1003 1023
1024 event->hw.state = PERF_HES_UPTODATE;
1025 if (!(ef_flags & PERF_EF_START))
1026 event->hw.state |= PERF_HES_STOPPED;
1027
1004 /* 1028 /*
1005 * If group events scheduling transaction was started, 1029 * If group events scheduling transaction was started,
1006 * skip the schedulability test here, it will be peformed 1030 * skip the schedulability test here, it will be peformed
@@ -1020,12 +1044,12 @@ nocheck:
1020 1044
1021 ret = 0; 1045 ret = 0;
1022out: 1046out:
1023 perf_enable(); 1047 perf_pmu_enable(event->pmu);
1024 local_irq_restore(flags); 1048 local_irq_restore(flags);
1025 return ret; 1049 return ret;
1026} 1050}
1027 1051
1028static int __hw_perf_event_init(struct perf_event *event) 1052static int sparc_pmu_event_init(struct perf_event *event)
1029{ 1053{
1030 struct perf_event_attr *attr = &event->attr; 1054 struct perf_event_attr *attr = &event->attr;
1031 struct perf_event *evts[MAX_HWEVENTS]; 1055 struct perf_event *evts[MAX_HWEVENTS];
@@ -1038,22 +1062,33 @@ static int __hw_perf_event_init(struct perf_event *event)
1038 if (atomic_read(&nmi_active) < 0) 1062 if (atomic_read(&nmi_active) < 0)
1039 return -ENODEV; 1063 return -ENODEV;
1040 1064
1041 pmap = NULL; 1065 switch (attr->type) {
1042 if (attr->type == PERF_TYPE_HARDWARE) { 1066 case PERF_TYPE_HARDWARE:
1043 if (attr->config >= sparc_pmu->max_events) 1067 if (attr->config >= sparc_pmu->max_events)
1044 return -EINVAL; 1068 return -EINVAL;
1045 pmap = sparc_pmu->event_map(attr->config); 1069 pmap = sparc_pmu->event_map(attr->config);
1046 } else if (attr->type == PERF_TYPE_HW_CACHE) { 1070 break;
1071
1072 case PERF_TYPE_HW_CACHE:
1047 pmap = sparc_map_cache_event(attr->config); 1073 pmap = sparc_map_cache_event(attr->config);
1048 if (IS_ERR(pmap)) 1074 if (IS_ERR(pmap))
1049 return PTR_ERR(pmap); 1075 return PTR_ERR(pmap);
1050 } else if (attr->type != PERF_TYPE_RAW) 1076 break;
1051 return -EOPNOTSUPP; 1077
1078 case PERF_TYPE_RAW:
1079 pmap = NULL;
1080 break;
1081
1082 default:
1083 return -ENOENT;
1084
1085 }
1052 1086
1053 if (pmap) { 1087 if (pmap) {
1054 hwc->event_base = perf_event_encode(pmap); 1088 hwc->event_base = perf_event_encode(pmap);
1055 } else { 1089 } else {
1056 /* User gives us "(encoding << 16) | pic_mask" for 1090 /*
1091 * User gives us "(encoding << 16) | pic_mask" for
1057 * PERF_TYPE_RAW events. 1092 * PERF_TYPE_RAW events.
1058 */ 1093 */
1059 hwc->event_base = attr->config; 1094 hwc->event_base = attr->config;
@@ -1071,7 +1106,7 @@ static int __hw_perf_event_init(struct perf_event *event)
1071 n = 0; 1106 n = 0;
1072 if (event->group_leader != event) { 1107 if (event->group_leader != event) {
1073 n = collect_events(event->group_leader, 1108 n = collect_events(event->group_leader,
1074 perf_max_events - 1, 1109 MAX_HWEVENTS - 1,
1075 evts, events, current_idx_dmy); 1110 evts, events, current_idx_dmy);
1076 if (n < 0) 1111 if (n < 0)
1077 return -EINVAL; 1112 return -EINVAL;
@@ -1107,10 +1142,11 @@ static int __hw_perf_event_init(struct perf_event *event)
1107 * Set the flag to make pmu::enable() not perform the 1142 * Set the flag to make pmu::enable() not perform the
1108 * schedulability test, it will be performed at commit time 1143 * schedulability test, it will be performed at commit time
1109 */ 1144 */
1110static void sparc_pmu_start_txn(const struct pmu *pmu) 1145static void sparc_pmu_start_txn(struct pmu *pmu)
1111{ 1146{
1112 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); 1147 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
1113 1148
1149 perf_pmu_disable(pmu);
1114 cpuhw->group_flag |= PERF_EVENT_TXN; 1150 cpuhw->group_flag |= PERF_EVENT_TXN;
1115} 1151}
1116 1152
@@ -1119,11 +1155,12 @@ static void sparc_pmu_start_txn(const struct pmu *pmu)
1119 * Clear the flag and pmu::enable() will perform the 1155 * Clear the flag and pmu::enable() will perform the
1120 * schedulability test. 1156 * schedulability test.
1121 */ 1157 */
1122static void sparc_pmu_cancel_txn(const struct pmu *pmu) 1158static void sparc_pmu_cancel_txn(struct pmu *pmu)
1123{ 1159{
1124 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); 1160 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
1125 1161
1126 cpuhw->group_flag &= ~PERF_EVENT_TXN; 1162 cpuhw->group_flag &= ~PERF_EVENT_TXN;
1163 perf_pmu_enable(pmu);
1127} 1164}
1128 1165
1129/* 1166/*
@@ -1131,7 +1168,7 @@ static void sparc_pmu_cancel_txn(const struct pmu *pmu)
1131 * Perform the group schedulability test as a whole 1168 * Perform the group schedulability test as a whole
1132 * Return 0 if success 1169 * Return 0 if success
1133 */ 1170 */
1134static int sparc_pmu_commit_txn(const struct pmu *pmu) 1171static int sparc_pmu_commit_txn(struct pmu *pmu)
1135{ 1172{
1136 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1173 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1137 int n; 1174 int n;
@@ -1147,28 +1184,24 @@ static int sparc_pmu_commit_txn(const struct pmu *pmu)
1147 return -EAGAIN; 1184 return -EAGAIN;
1148 1185
1149 cpuc->group_flag &= ~PERF_EVENT_TXN; 1186 cpuc->group_flag &= ~PERF_EVENT_TXN;
1187 perf_pmu_enable(pmu);
1150 return 0; 1188 return 0;
1151} 1189}
1152 1190
1153static const struct pmu pmu = { 1191static struct pmu pmu = {
1154 .enable = sparc_pmu_enable, 1192 .pmu_enable = sparc_pmu_enable,
1155 .disable = sparc_pmu_disable, 1193 .pmu_disable = sparc_pmu_disable,
1194 .event_init = sparc_pmu_event_init,
1195 .add = sparc_pmu_add,
1196 .del = sparc_pmu_del,
1197 .start = sparc_pmu_start,
1198 .stop = sparc_pmu_stop,
1156 .read = sparc_pmu_read, 1199 .read = sparc_pmu_read,
1157 .unthrottle = sparc_pmu_unthrottle,
1158 .start_txn = sparc_pmu_start_txn, 1200 .start_txn = sparc_pmu_start_txn,
1159 .cancel_txn = sparc_pmu_cancel_txn, 1201 .cancel_txn = sparc_pmu_cancel_txn,
1160 .commit_txn = sparc_pmu_commit_txn, 1202 .commit_txn = sparc_pmu_commit_txn,
1161}; 1203};
1162 1204
1163const struct pmu *hw_perf_event_init(struct perf_event *event)
1164{
1165 int err = __hw_perf_event_init(event);
1166
1167 if (err)
1168 return ERR_PTR(err);
1169 return &pmu;
1170}
1171
1172void perf_event_print_debug(void) 1205void perf_event_print_debug(void)
1173{ 1206{
1174 unsigned long flags; 1207 unsigned long flags;
@@ -1244,7 +1277,7 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
1244 continue; 1277 continue;
1245 1278
1246 if (perf_event_overflow(event, 1, &data, regs)) 1279 if (perf_event_overflow(event, 1, &data, regs))
1247 sparc_pmu_disable_event(cpuc, hwc, idx); 1280 sparc_pmu_stop(event, 0);
1248 } 1281 }
1249 1282
1250 return NOTIFY_STOP; 1283 return NOTIFY_STOP;
@@ -1285,28 +1318,21 @@ void __init init_hw_perf_events(void)
1285 1318
1286 pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type); 1319 pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type);
1287 1320
1288 /* All sparc64 PMUs currently have 2 events. */ 1321 perf_pmu_register(&pmu);
1289 perf_max_events = 2;
1290
1291 register_die_notifier(&perf_event_nmi_notifier); 1322 register_die_notifier(&perf_event_nmi_notifier);
1292} 1323}
1293 1324
1294static inline void callchain_store(struct perf_callchain_entry *entry, u64 ip) 1325void perf_callchain_kernel(struct perf_callchain_entry *entry,
1295{ 1326 struct pt_regs *regs)
1296 if (entry->nr < PERF_MAX_STACK_DEPTH)
1297 entry->ip[entry->nr++] = ip;
1298}
1299
1300static void perf_callchain_kernel(struct pt_regs *regs,
1301 struct perf_callchain_entry *entry)
1302{ 1327{
1303 unsigned long ksp, fp; 1328 unsigned long ksp, fp;
1304#ifdef CONFIG_FUNCTION_GRAPH_TRACER 1329#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1305 int graph = 0; 1330 int graph = 0;
1306#endif 1331#endif
1307 1332
1308 callchain_store(entry, PERF_CONTEXT_KERNEL); 1333 stack_trace_flush();
1309 callchain_store(entry, regs->tpc); 1334
1335 perf_callchain_store(entry, regs->tpc);
1310 1336
1311 ksp = regs->u_regs[UREG_I6]; 1337 ksp = regs->u_regs[UREG_I6];
1312 fp = ksp + STACK_BIAS; 1338 fp = ksp + STACK_BIAS;
@@ -1330,13 +1356,13 @@ static void perf_callchain_kernel(struct pt_regs *regs,
1330 pc = sf->callers_pc; 1356 pc = sf->callers_pc;
1331 fp = (unsigned long)sf->fp + STACK_BIAS; 1357 fp = (unsigned long)sf->fp + STACK_BIAS;
1332 } 1358 }
1333 callchain_store(entry, pc); 1359 perf_callchain_store(entry, pc);
1334#ifdef CONFIG_FUNCTION_GRAPH_TRACER 1360#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1335 if ((pc + 8UL) == (unsigned long) &return_to_handler) { 1361 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
1336 int index = current->curr_ret_stack; 1362 int index = current->curr_ret_stack;
1337 if (current->ret_stack && index >= graph) { 1363 if (current->ret_stack && index >= graph) {
1338 pc = current->ret_stack[index - graph].ret; 1364 pc = current->ret_stack[index - graph].ret;
1339 callchain_store(entry, pc); 1365 perf_callchain_store(entry, pc);
1340 graph++; 1366 graph++;
1341 } 1367 }
1342 } 1368 }
@@ -1344,13 +1370,12 @@ static void perf_callchain_kernel(struct pt_regs *regs,
1344 } while (entry->nr < PERF_MAX_STACK_DEPTH); 1370 } while (entry->nr < PERF_MAX_STACK_DEPTH);
1345} 1371}
1346 1372
1347static void perf_callchain_user_64(struct pt_regs *regs, 1373static void perf_callchain_user_64(struct perf_callchain_entry *entry,
1348 struct perf_callchain_entry *entry) 1374 struct pt_regs *regs)
1349{ 1375{
1350 unsigned long ufp; 1376 unsigned long ufp;
1351 1377
1352 callchain_store(entry, PERF_CONTEXT_USER); 1378 perf_callchain_store(entry, regs->tpc);
1353 callchain_store(entry, regs->tpc);
1354 1379
1355 ufp = regs->u_regs[UREG_I6] + STACK_BIAS; 1380 ufp = regs->u_regs[UREG_I6] + STACK_BIAS;
1356 do { 1381 do {
@@ -1363,17 +1388,16 @@ static void perf_callchain_user_64(struct pt_regs *regs,
1363 1388
1364 pc = sf.callers_pc; 1389 pc = sf.callers_pc;
1365 ufp = (unsigned long)sf.fp + STACK_BIAS; 1390 ufp = (unsigned long)sf.fp + STACK_BIAS;
1366 callchain_store(entry, pc); 1391 perf_callchain_store(entry, pc);
1367 } while (entry->nr < PERF_MAX_STACK_DEPTH); 1392 } while (entry->nr < PERF_MAX_STACK_DEPTH);
1368} 1393}
1369 1394
1370static void perf_callchain_user_32(struct pt_regs *regs, 1395static void perf_callchain_user_32(struct perf_callchain_entry *entry,
1371 struct perf_callchain_entry *entry) 1396 struct pt_regs *regs)
1372{ 1397{
1373 unsigned long ufp; 1398 unsigned long ufp;
1374 1399
1375 callchain_store(entry, PERF_CONTEXT_USER); 1400 perf_callchain_store(entry, regs->tpc);
1376 callchain_store(entry, regs->tpc);
1377 1401
1378 ufp = regs->u_regs[UREG_I6] & 0xffffffffUL; 1402 ufp = regs->u_regs[UREG_I6] & 0xffffffffUL;
1379 do { 1403 do {
@@ -1386,34 +1410,16 @@ static void perf_callchain_user_32(struct pt_regs *regs,
1386 1410
1387 pc = sf.callers_pc; 1411 pc = sf.callers_pc;
1388 ufp = (unsigned long)sf.fp; 1412 ufp = (unsigned long)sf.fp;
1389 callchain_store(entry, pc); 1413 perf_callchain_store(entry, pc);
1390 } while (entry->nr < PERF_MAX_STACK_DEPTH); 1414 } while (entry->nr < PERF_MAX_STACK_DEPTH);
1391} 1415}
1392 1416
1393/* Like powerpc we can't get PMU interrupts within the PMU handler, 1417void
1394 * so no need for separate NMI and IRQ chains as on x86. 1418perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
1395 */
1396static DEFINE_PER_CPU(struct perf_callchain_entry, callchain);
1397
1398struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
1399{ 1419{
1400 struct perf_callchain_entry *entry = &__get_cpu_var(callchain); 1420 flushw_user();
1401 1421 if (test_thread_flag(TIF_32BIT))
1402 entry->nr = 0; 1422 perf_callchain_user_32(entry, regs);
1403 if (!user_mode(regs)) { 1423 else
1404 stack_trace_flush(); 1424 perf_callchain_user_64(entry, regs);
1405 perf_callchain_kernel(regs, entry);
1406 if (current->mm)
1407 regs = task_pt_regs(current);
1408 else
1409 regs = NULL;
1410 }
1411 if (regs) {
1412 flushw_user();
1413 if (test_thread_flag(TIF_32BIT))
1414 perf_callchain_user_32(regs, entry);
1415 else
1416 perf_callchain_user_64(regs, entry);
1417 }
1418 return entry;
1419} 1425}