aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc')
-rw-r--r--arch/sparc/Kconfig2
-rw-r--r--arch/sparc/kernel/Makefile1
-rw-r--r--arch/sparc/kernel/ftrace.c58
-rw-r--r--arch/sparc/kernel/irq_64.c3
-rw-r--r--arch/sparc/kernel/kgdb_64.c3
-rw-r--r--arch/sparc/kernel/pcr.c3
-rw-r--r--arch/sparc/kernel/smp_64.c11
-rw-r--r--arch/sparc/kernel/time_64.c3
-rw-r--r--arch/sparc/kernel/vmlinux.lds.S1
-rw-r--r--arch/sparc/lib/mcount.S62
10 files changed, 132 insertions, 15 deletions
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 035304c30ab4..9908d477ccd9 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -37,6 +37,8 @@ config SPARC64
37 def_bool 64BIT 37 def_bool 64BIT
38 select ARCH_SUPPORTS_MSI 38 select ARCH_SUPPORTS_MSI
39 select HAVE_FUNCTION_TRACER 39 select HAVE_FUNCTION_TRACER
40 select HAVE_FUNCTION_GRAPH_TRACER
41 select HAVE_FUNCTION_GRAPH_FP_TEST
40 select HAVE_FUNCTION_TRACE_MCOUNT_TEST 42 select HAVE_FUNCTION_TRACE_MCOUNT_TEST
41 select HAVE_KRETPROBES 43 select HAVE_KRETPROBES
42 select HAVE_KPROBES 44 select HAVE_KPROBES
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
index 1b35ed6be4d9..0c2dc1f24a9a 100644
--- a/arch/sparc/kernel/Makefile
+++ b/arch/sparc/kernel/Makefile
@@ -93,6 +93,7 @@ obj-$(CONFIG_KGDB) += kgdb_$(BITS).o
93 93
94 94
95obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o 95obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
96obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
96 97
97obj-$(CONFIG_EARLYFB) += btext.o 98obj-$(CONFIG_EARLYFB) += btext.o
98obj-$(CONFIG_STACKTRACE) += stacktrace.o 99obj-$(CONFIG_STACKTRACE) += stacktrace.o
diff --git a/arch/sparc/kernel/ftrace.c b/arch/sparc/kernel/ftrace.c
index 2ea6e4ff7fd2..03ab022e51c5 100644
--- a/arch/sparc/kernel/ftrace.c
+++ b/arch/sparc/kernel/ftrace.c
@@ -91,3 +91,61 @@ int __init ftrace_dyn_arch_init(void *data)
91 return 0; 91 return 0;
92} 92}
93#endif 93#endif
94
95#ifdef CONFIG_FUNCTION_GRAPH_TRACER
96
97#ifdef CONFIG_DYNAMIC_FTRACE
98extern void ftrace_graph_call(void);
99
100int ftrace_enable_ftrace_graph_caller(void)
101{
102 unsigned long ip = (unsigned long)(&ftrace_graph_call);
103 u32 old, new;
104
105 old = *(u32 *) &ftrace_graph_call;
106 new = ftrace_call_replace(ip, (unsigned long) &ftrace_graph_caller);
107 return ftrace_modify_code(ip, old, new);
108}
109
110int ftrace_disable_ftrace_graph_caller(void)
111{
112 unsigned long ip = (unsigned long)(&ftrace_graph_call);
113 u32 old, new;
114
115 old = *(u32 *) &ftrace_graph_call;
116 new = ftrace_call_replace(ip, (unsigned long) &ftrace_stub);
117
118 return ftrace_modify_code(ip, old, new);
119}
120
121#endif /* !CONFIG_DYNAMIC_FTRACE */
122
123/*
124 * Hook the return address and push it in the stack of return addrs
125 * in current thread info.
126 */
127unsigned long prepare_ftrace_return(unsigned long parent,
128 unsigned long self_addr,
129 unsigned long frame_pointer)
130{
131 unsigned long return_hooker = (unsigned long) &return_to_handler;
132 struct ftrace_graph_ent trace;
133
134 if (unlikely(atomic_read(&current->tracing_graph_pause)))
135 return parent + 8UL;
136
137 if (ftrace_push_return_trace(parent, self_addr, &trace.depth,
138 frame_pointer) == -EBUSY)
139 return parent + 8UL;
140
141 trace.func = self_addr;
142
143 /* Only trace if the calling function expects to */
144 if (!ftrace_graph_entry(&trace)) {
145 current->curr_ret_stack--;
146 return parent + 8UL;
147 }
148
149 return return_hooker;
150}
151#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
index e1cbdb94d97b..af5c76c04e99 100644
--- a/arch/sparc/kernel/irq_64.c
+++ b/arch/sparc/kernel/irq_64.c
@@ -20,6 +20,7 @@
20#include <linux/delay.h> 20#include <linux/delay.h>
21#include <linux/proc_fs.h> 21#include <linux/proc_fs.h>
22#include <linux/seq_file.h> 22#include <linux/seq_file.h>
23#include <linux/ftrace.h>
23#include <linux/irq.h> 24#include <linux/irq.h>
24 25
25#include <asm/ptrace.h> 26#include <asm/ptrace.h>
@@ -721,7 +722,7 @@ static __attribute__((always_inline)) void restore_hardirq_stack(void *orig_sp)
721 __asm__ __volatile__("mov %0, %%sp" : : "r" (orig_sp)); 722 __asm__ __volatile__("mov %0, %%sp" : : "r" (orig_sp));
722} 723}
723 724
724void handler_irq(int irq, struct pt_regs *regs) 725void __irq_entry handler_irq(int irq, struct pt_regs *regs)
725{ 726{
726 unsigned long pstate, bucket_pa; 727 unsigned long pstate, bucket_pa;
727 struct pt_regs *old_regs; 728 struct pt_regs *old_regs;
diff --git a/arch/sparc/kernel/kgdb_64.c b/arch/sparc/kernel/kgdb_64.c
index f5a0fd490b59..0a2bd0f99fc1 100644
--- a/arch/sparc/kernel/kgdb_64.c
+++ b/arch/sparc/kernel/kgdb_64.c
@@ -5,6 +5,7 @@
5 5
6#include <linux/kgdb.h> 6#include <linux/kgdb.h>
7#include <linux/kdebug.h> 7#include <linux/kdebug.h>
8#include <linux/ftrace.h>
8 9
9#include <asm/kdebug.h> 10#include <asm/kdebug.h>
10#include <asm/ptrace.h> 11#include <asm/ptrace.h>
@@ -108,7 +109,7 @@ void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs)
108} 109}
109 110
110#ifdef CONFIG_SMP 111#ifdef CONFIG_SMP
111void smp_kgdb_capture_client(int irq, struct pt_regs *regs) 112void __irq_entry smp_kgdb_capture_client(int irq, struct pt_regs *regs)
112{ 113{
113 unsigned long flags; 114 unsigned long flags;
114 115
diff --git a/arch/sparc/kernel/pcr.c b/arch/sparc/kernel/pcr.c
index 2d94e7a03af5..c4a6a50b4849 100644
--- a/arch/sparc/kernel/pcr.c
+++ b/arch/sparc/kernel/pcr.c
@@ -8,6 +8,7 @@
8#include <linux/irq.h> 8#include <linux/irq.h>
9 9
10#include <linux/perf_event.h> 10#include <linux/perf_event.h>
11#include <linux/ftrace.h>
11 12
12#include <asm/pil.h> 13#include <asm/pil.h>
13#include <asm/pcr.h> 14#include <asm/pcr.h>
@@ -34,7 +35,7 @@ unsigned int picl_shift;
34 * Therefore in such situations we defer the work by signalling 35 * Therefore in such situations we defer the work by signalling
35 * a lower level cpu IRQ. 36 * a lower level cpu IRQ.
36 */ 37 */
37void deferred_pcr_work_irq(int irq, struct pt_regs *regs) 38void __irq_entry deferred_pcr_work_irq(int irq, struct pt_regs *regs)
38{ 39{
39 struct pt_regs *old_regs; 40 struct pt_regs *old_regs;
40 41
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index eb14844a0021..1011fbf451bb 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -22,6 +22,7 @@
22#include <linux/profile.h> 22#include <linux/profile.h>
23#include <linux/bootmem.h> 23#include <linux/bootmem.h>
24#include <linux/vmalloc.h> 24#include <linux/vmalloc.h>
25#include <linux/ftrace.h>
25#include <linux/cpu.h> 26#include <linux/cpu.h>
26 27
27#include <asm/head.h> 28#include <asm/head.h>
@@ -822,13 +823,13 @@ void arch_send_call_function_single_ipi(int cpu)
822 &cpumask_of_cpu(cpu)); 823 &cpumask_of_cpu(cpu));
823} 824}
824 825
825void smp_call_function_client(int irq, struct pt_regs *regs) 826void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs)
826{ 827{
827 clear_softint(1 << irq); 828 clear_softint(1 << irq);
828 generic_smp_call_function_interrupt(); 829 generic_smp_call_function_interrupt();
829} 830}
830 831
831void smp_call_function_single_client(int irq, struct pt_regs *regs) 832void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs)
832{ 833{
833 clear_softint(1 << irq); 834 clear_softint(1 << irq);
834 generic_smp_call_function_single_interrupt(); 835 generic_smp_call_function_single_interrupt();
@@ -964,7 +965,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
964 put_cpu(); 965 put_cpu();
965} 966}
966 967
967void smp_new_mmu_context_version_client(int irq, struct pt_regs *regs) 968void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs)
968{ 969{
969 struct mm_struct *mm; 970 struct mm_struct *mm;
970 unsigned long flags; 971 unsigned long flags;
@@ -1148,7 +1149,7 @@ void smp_release(void)
1148 */ 1149 */
1149extern void prom_world(int); 1150extern void prom_world(int);
1150 1151
1151void smp_penguin_jailcell(int irq, struct pt_regs *regs) 1152void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs)
1152{ 1153{
1153 clear_softint(1 << irq); 1154 clear_softint(1 << irq);
1154 1155
@@ -1364,7 +1365,7 @@ void smp_send_reschedule(int cpu)
1364 &cpumask_of_cpu(cpu)); 1365 &cpumask_of_cpu(cpu));
1365} 1366}
1366 1367
1367void smp_receive_signal_client(int irq, struct pt_regs *regs) 1368void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs)
1368{ 1369{
1369 clear_softint(1 << irq); 1370 clear_softint(1 << irq);
1370} 1371}
diff --git a/arch/sparc/kernel/time_64.c b/arch/sparc/kernel/time_64.c
index f25858e442ab..c7bbe6cf7b85 100644
--- a/arch/sparc/kernel/time_64.c
+++ b/arch/sparc/kernel/time_64.c
@@ -35,6 +35,7 @@
35#include <linux/clocksource.h> 35#include <linux/clocksource.h>
36#include <linux/of_device.h> 36#include <linux/of_device.h>
37#include <linux/platform_device.h> 37#include <linux/platform_device.h>
38#include <linux/ftrace.h>
38 39
39#include <asm/oplib.h> 40#include <asm/oplib.h>
40#include <asm/timer.h> 41#include <asm/timer.h>
@@ -717,7 +718,7 @@ static struct clock_event_device sparc64_clockevent = {
717}; 718};
718static DEFINE_PER_CPU(struct clock_event_device, sparc64_events); 719static DEFINE_PER_CPU(struct clock_event_device, sparc64_events);
719 720
720void timer_interrupt(int irq, struct pt_regs *regs) 721void __irq_entry timer_interrupt(int irq, struct pt_regs *regs)
721{ 722{
722 struct pt_regs *old_regs = set_irq_regs(regs); 723 struct pt_regs *old_regs = set_irq_regs(regs);
723 unsigned long tick_mask = tick_ops->softint_mask; 724 unsigned long tick_mask = tick_ops->softint_mask;
diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S
index 4e5992593967..c4f5758c9dc7 100644
--- a/arch/sparc/kernel/vmlinux.lds.S
+++ b/arch/sparc/kernel/vmlinux.lds.S
@@ -46,6 +46,7 @@ SECTIONS
46 SCHED_TEXT 46 SCHED_TEXT
47 LOCK_TEXT 47 LOCK_TEXT
48 KPROBES_TEXT 48 KPROBES_TEXT
49 IRQENTRY_TEXT
49 *(.gnu.warning) 50 *(.gnu.warning)
50 } = 0 51 } = 0
51 _etext = .; 52 _etext = .;
diff --git a/arch/sparc/lib/mcount.S b/arch/sparc/lib/mcount.S
index 153c80e62cf1..3753e3c6e176 100644
--- a/arch/sparc/lib/mcount.S
+++ b/arch/sparc/lib/mcount.S
@@ -26,22 +26,42 @@ mcount:
26#else 26#else
27 sethi %hi(function_trace_stop), %g1 27 sethi %hi(function_trace_stop), %g1
28 lduw [%g1 + %lo(function_trace_stop)], %g2 28 lduw [%g1 + %lo(function_trace_stop)], %g2
29 brnz,pn %g2, 1f 29 brnz,pn %g2, 2f
30 sethi %hi(ftrace_trace_function), %g1 30 sethi %hi(ftrace_trace_function), %g1
31 sethi %hi(ftrace_stub), %g2 31 sethi %hi(ftrace_stub), %g2
32 ldx [%g1 + %lo(ftrace_trace_function)], %g1 32 ldx [%g1 + %lo(ftrace_trace_function)], %g1
33 or %g2, %lo(ftrace_stub), %g2 33 or %g2, %lo(ftrace_stub), %g2
34 cmp %g1, %g2 34 cmp %g1, %g2
35 be,pn %icc, 1f 35 be,pn %icc, 1f
36 mov %i7, %g2 36 mov %i7, %g3
37 save %sp, -128, %sp 37 save %sp, -128, %sp
38 mov %g2, %o1 38 mov %g3, %o1
39 jmpl %g1, %o7 39 jmpl %g1, %o7
40 mov %i7, %o0 40 mov %i7, %o0
41 ret 41 ret
42 restore 42 restore
43 /* not reached */ 43 /* not reached */
441: 441:
45#ifdef CONFIG_FUNCTION_GRAPH_TRACER
46 sethi %hi(ftrace_graph_return), %g1
47 ldx [%g1 + %lo(ftrace_graph_return)], %g3
48 cmp %g2, %g3
49 bne,pn %xcc, 5f
50 sethi %hi(ftrace_graph_entry_stub), %g2
51 sethi %hi(ftrace_graph_entry), %g1
52 or %g2, %lo(ftrace_graph_entry_stub), %g2
53 ldx [%g1 + %lo(ftrace_graph_entry)], %g1
54 cmp %g1, %g2
55 be,pt %xcc, 2f
56 nop
575: mov %i7, %g2
58 mov %fp, %g3
59 save %sp, -128, %sp
60 mov %g2, %l0
61 ba,pt %xcc, ftrace_graph_caller
62 mov %g3, %l1
63#endif
642:
45#endif 65#endif
46#endif 66#endif
47 retl 67 retl
@@ -62,18 +82,48 @@ ftrace_stub:
62ftrace_caller: 82ftrace_caller:
63 sethi %hi(function_trace_stop), %g1 83 sethi %hi(function_trace_stop), %g1
64 mov %i7, %g2 84 mov %i7, %g2
65 lduw [%g1 + %lo(function_trace_stop)], %g3 85 lduw [%g1 + %lo(function_trace_stop)], %g1
66 brnz,pn %g3, ftrace_stub 86 brnz,pn %g1, ftrace_stub
67 nop 87 mov %fp, %g3
68 save %sp, -128, %sp 88 save %sp, -128, %sp
69 mov %g2, %o1 89 mov %g2, %o1
90 mov %g2, %l0
91 mov %g3, %l1
70 .globl ftrace_call 92 .globl ftrace_call
71ftrace_call: 93ftrace_call:
72 call ftrace_stub 94 call ftrace_stub
73 mov %i7, %o0 95 mov %i7, %o0
96#ifdef CONFIG_FUNCTION_GRAPH_TRACER
97 .globl ftrace_graph_call
98ftrace_graph_call:
99 call ftrace_stub
100 nop
101#endif
74 ret 102 ret
75 restore 103 restore
104#ifdef CONFIG_FUNCTION_GRAPH_TRACER
105 .size ftrace_graph_call,.-ftrace_graph_call
106#endif
76 .size ftrace_call,.-ftrace_call 107 .size ftrace_call,.-ftrace_call
77 .size ftrace_caller,.-ftrace_caller 108 .size ftrace_caller,.-ftrace_caller
78#endif 109#endif
79#endif 110#endif
111
112#ifdef CONFIG_FUNCTION_GRAPH_TRACER
113ENTRY(ftrace_graph_caller)
114 mov %l0, %o0
115 mov %i7, %o1
116 call prepare_ftrace_return
117 mov %l1, %o2
118 ret
119 restore %o0, -8, %i7
120END(ftrace_graph_caller)
121
122ENTRY(return_to_handler)
123 save %sp, -128, %sp
124 call ftrace_return_to_handler
125 mov %fp, %o0
126 jmpl %o0 + 8, %g0
127 restore
128END(return_to_handler)
129#endif