aboutsummaryrefslogtreecommitdiffstats
path: root/arch/blackfin
diff options
context:
space:
mode:
authorMike Frysinger <vapier@gentoo.org>2010-07-21 09:13:02 -0400
committerMike Frysinger <vapier@gentoo.org>2010-08-06 12:55:54 -0400
commitf5074429621ceb0ec42f8116bd51d02c031faf82 (patch)
treea5d22f85446c63be25961e2ea04321e197c615db /arch/blackfin
parent67df6cc665dc3441bf5eb2ad7018e969463a2588 (diff)
Blackfin: add support for dynamic ftrace
Signed-off-by: Mike Frysinger <vapier@gentoo.org>
Diffstat (limited to 'arch/blackfin')
-rw-r--r--arch/blackfin/Kconfig2
-rw-r--r--arch/blackfin/include/asm/ftrace.h16
-rw-r--r--arch/blackfin/kernel/Makefile1
-rw-r--r--arch/blackfin/kernel/ftrace-entry.S85
-rw-r--r--arch/blackfin/kernel/ftrace.c86
5 files changed, 181 insertions, 9 deletions
diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig
index 0f63ed4fe143..10bdd8de0253 100644
--- a/arch/blackfin/Kconfig
+++ b/arch/blackfin/Kconfig
@@ -25,6 +25,8 @@ config BLACKFIN
25 def_bool y 25 def_bool y
26 select HAVE_ARCH_KGDB 26 select HAVE_ARCH_KGDB
27 select HAVE_ARCH_TRACEHOOK 27 select HAVE_ARCH_TRACEHOOK
28 select HAVE_DYNAMIC_FTRACE
29 select HAVE_FTRACE_MCOUNT_RECORD
28 select HAVE_FUNCTION_GRAPH_TRACER 30 select HAVE_FUNCTION_GRAPH_TRACER
29 select HAVE_FUNCTION_TRACER 31 select HAVE_FUNCTION_TRACER
30 select HAVE_FUNCTION_TRACE_MCOUNT_TEST 32 select HAVE_FUNCTION_TRACE_MCOUNT_TEST
diff --git a/arch/blackfin/include/asm/ftrace.h b/arch/blackfin/include/asm/ftrace.h
index 4cfe2d9ba7e8..8a029505d7b7 100644
--- a/arch/blackfin/include/asm/ftrace.h
+++ b/arch/blackfin/include/asm/ftrace.h
@@ -12,6 +12,22 @@
12 12
13#ifndef __ASSEMBLY__ 13#ifndef __ASSEMBLY__
14 14
15#ifdef CONFIG_DYNAMIC_FTRACE
16
17extern void _mcount(void);
18#define MCOUNT_ADDR ((unsigned long)_mcount)
19
20static inline unsigned long ftrace_call_adjust(unsigned long addr)
21{
22 return addr;
23}
24
25struct dyn_arch_ftrace {
26 /* No extra data needed for Blackfin */
27};
28
29#endif
30
15#ifdef CONFIG_FRAME_POINTER 31#ifdef CONFIG_FRAME_POINTER
16#include <linux/mm.h> 32#include <linux/mm.h>
17 33
diff --git a/arch/blackfin/kernel/Makefile b/arch/blackfin/kernel/Makefile
index 30d0d1f01dc7..ca5ccc777772 100644
--- a/arch/blackfin/kernel/Makefile
+++ b/arch/blackfin/kernel/Makefile
@@ -16,6 +16,7 @@ else
16 obj-y += time.o 16 obj-y += time.o
17endif 17endif
18 18
19obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
19obj-$(CONFIG_FUNCTION_TRACER) += ftrace-entry.o 20obj-$(CONFIG_FUNCTION_TRACER) += ftrace-entry.o
20obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o 21obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
21CFLAGS_REMOVE_ftrace.o = -pg 22CFLAGS_REMOVE_ftrace.o = -pg
diff --git a/arch/blackfin/kernel/ftrace-entry.S b/arch/blackfin/kernel/ftrace-entry.S
index d66446b572c0..7eed00bbd26d 100644
--- a/arch/blackfin/kernel/ftrace-entry.S
+++ b/arch/blackfin/kernel/ftrace-entry.S
@@ -10,6 +10,18 @@
10 10
11.text 11.text
12 12
13#ifdef CONFIG_DYNAMIC_FTRACE
14
15/* Simple stub so we can boot the kernel until runtime patching has
16 * disabled all calls to this. Then it'll be unused.
17 */
18ENTRY(__mcount)
19# if ANOMALY_05000371
20 nop; nop; nop; nop;
21# endif
22 rts;
23ENDPROC(__mcount)
24
13/* GCC will have called us before setting up the function prologue, so we 25/* GCC will have called us before setting up the function prologue, so we
14 * can clobber the normal scratch registers, but we need to make sure to 26 * can clobber the normal scratch registers, but we need to make sure to
15 * save/restore the registers used for argument passing (R0-R2) in case 27 * save/restore the registers used for argument passing (R0-R2) in case
@@ -20,15 +32,65 @@
20 * function. And since GCC pushed the previous RETS for us, the previous 32 * function. And since GCC pushed the previous RETS for us, the previous
21 * function will be waiting there. mmmm pie. 33 * function will be waiting there. mmmm pie.
22 */ 34 */
35ENTRY(_ftrace_caller)
36# ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
37 /* optional micro optimization: return if stopped */
38 p1.l = _function_trace_stop;
39 p1.h = _function_trace_stop;
40 r3 = [p1];
41 cc = r3 == 0;
42 if ! cc jump _ftrace_stub (bp);
43# endif
44
45 /* save first/second/third function arg and the return register */
46 [--sp] = r2;
47 [--sp] = r0;
48 [--sp] = r1;
49 [--sp] = rets;
50
51 /* function_trace_call(unsigned long ip, unsigned long parent_ip):
52 * ip: this point was called by ...
53 * parent_ip: ... this function
54 * the ip itself will need adjusting for the mcount call
55 */
56 r0 = rets;
57 r1 = [sp + 16]; /* skip the 4 local regs on stack */
58 r0 += -MCOUNT_INSN_SIZE;
59
60.globl _ftrace_call
61_ftrace_call:
62 call _ftrace_stub
63
64# ifdef CONFIG_FUNCTION_GRAPH_TRACER
65.globl _ftrace_graph_call
66_ftrace_graph_call:
67 nop; /* jump _ftrace_graph_caller; */
68# endif
69
70 /* restore state and get out of dodge */
71.Lfinish_trace:
72 rets = [sp++];
73 r1 = [sp++];
74 r0 = [sp++];
75 r2 = [sp++];
76
77.globl _ftrace_stub
78_ftrace_stub:
79 rts;
80ENDPROC(_ftrace_caller)
81
82#else
83
84/* See documentation for _ftrace_caller */
23ENTRY(__mcount) 85ENTRY(__mcount)
24#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST 86# ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
25 /* optional micro optimization: return if stopped */ 87 /* optional micro optimization: return if stopped */
26 p1.l = _function_trace_stop; 88 p1.l = _function_trace_stop;
27 p1.h = _function_trace_stop; 89 p1.h = _function_trace_stop;
28 r3 = [p1]; 90 r3 = [p1];
29 cc = r3 == 0; 91 cc = r3 == 0;
30 if ! cc jump _ftrace_stub (bp); 92 if ! cc jump _ftrace_stub (bp);
31#endif 93# endif
32 94
33 /* save third function arg early so we can do testing below */ 95 /* save third function arg early so we can do testing below */
34 [--sp] = r2; 96 [--sp] = r2;
@@ -44,7 +106,7 @@ ENTRY(__mcount)
44 cc = r2 == r3; 106 cc = r2 == r3;
45 if ! cc jump .Ldo_trace; 107 if ! cc jump .Ldo_trace;
46 108
47#ifdef CONFIG_FUNCTION_GRAPH_TRACER 109# ifdef CONFIG_FUNCTION_GRAPH_TRACER
48 /* if the ftrace_graph_return function pointer is not set to 110 /* if the ftrace_graph_return function pointer is not set to
49 * the ftrace_stub entry, call prepare_ftrace_return(). 111 * the ftrace_stub entry, call prepare_ftrace_return().
50 */ 112 */
@@ -64,7 +126,7 @@ ENTRY(__mcount)
64 r3 = [p0]; 126 r3 = [p0];
65 cc = r2 == r3; 127 cc = r2 == r3;
66 if ! cc jump _ftrace_graph_caller; 128 if ! cc jump _ftrace_graph_caller;
67#endif 129# endif
68 130
69 r2 = [sp++]; 131 r2 = [sp++];
70 rts; 132 rts;
@@ -103,6 +165,8 @@ _ftrace_stub:
103 rts; 165 rts;
104ENDPROC(__mcount) 166ENDPROC(__mcount)
105 167
168#endif
169
106#ifdef CONFIG_FUNCTION_GRAPH_TRACER 170#ifdef CONFIG_FUNCTION_GRAPH_TRACER
107/* The prepare_ftrace_return() function is similar to the trace function 171/* The prepare_ftrace_return() function is similar to the trace function
108 * except it takes a pointer to the location of the frompc. This is so 172 * except it takes a pointer to the location of the frompc. This is so
@@ -110,6 +174,7 @@ ENDPROC(__mcount)
110 * purposes. 174 * purposes.
111 */ 175 */
112ENTRY(_ftrace_graph_caller) 176ENTRY(_ftrace_graph_caller)
177# ifndef CONFIG_DYNAMIC_FTRACE
113 /* save first/second function arg and the return register */ 178 /* save first/second function arg and the return register */
114 [--sp] = r0; 179 [--sp] = r0;
115 [--sp] = r1; 180 [--sp] = r1;
@@ -118,9 +183,13 @@ ENTRY(_ftrace_graph_caller)
118 /* prepare_ftrace_return(parent, self_addr, frame_pointer) */ 183 /* prepare_ftrace_return(parent, self_addr, frame_pointer) */
119 r0 = sp; /* unsigned long *parent */ 184 r0 = sp; /* unsigned long *parent */
120 r1 = rets; /* unsigned long self_addr */ 185 r1 = rets; /* unsigned long self_addr */
121#ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST 186# else
187 r0 = sp; /* unsigned long *parent */
188 r1 = [sp]; /* unsigned long self_addr */
189# endif
190# ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST
122 r2 = fp; /* unsigned long frame_pointer */ 191 r2 = fp; /* unsigned long frame_pointer */
123#endif 192# endif
124 r0 += 16; /* skip the 4 local regs on stack */ 193 r0 += 16; /* skip the 4 local regs on stack */
125 r1 += -MCOUNT_INSN_SIZE; 194 r1 += -MCOUNT_INSN_SIZE;
126 call _prepare_ftrace_return; 195 call _prepare_ftrace_return;
@@ -139,9 +208,9 @@ ENTRY(_return_to_handler)
139 [--sp] = r1; 208 [--sp] = r1;
140 209
141 /* get original return address */ 210 /* get original return address */
142#ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST 211# ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST
143 r0 = fp; /* Blackfin is sane, so omit this */ 212 r0 = fp; /* Blackfin is sane, so omit this */
144#endif 213# endif
145 call _ftrace_return_to_handler; 214 call _ftrace_return_to_handler;
146 rets = r0; 215 rets = r0;
147 216
diff --git a/arch/blackfin/kernel/ftrace.c b/arch/blackfin/kernel/ftrace.c
index a61d948ea925..48808a12b427 100644
--- a/arch/blackfin/kernel/ftrace.c
+++ b/arch/blackfin/kernel/ftrace.c
@@ -1,17 +1,101 @@
1/* 1/*
2 * ftrace graph code 2 * ftrace graph code
3 * 3 *
4 * Copyright (C) 2009 Analog Devices Inc. 4 * Copyright (C) 2009-2010 Analog Devices Inc.
5 * Licensed under the GPL-2 or later. 5 * Licensed under the GPL-2 or later.
6 */ 6 */
7 7
8#include <linux/ftrace.h> 8#include <linux/ftrace.h>
9#include <linux/kernel.h> 9#include <linux/kernel.h>
10#include <linux/sched.h> 10#include <linux/sched.h>
11#include <linux/uaccess.h>
11#include <asm/atomic.h> 12#include <asm/atomic.h>
13#include <asm/cacheflush.h>
14
15#ifdef CONFIG_DYNAMIC_FTRACE
16
17static const unsigned char mnop[] = {
18 0x03, 0xc0, 0x00, 0x18, /* MNOP; */
19 0x03, 0xc0, 0x00, 0x18, /* MNOP; */
20};
21
22static void bfin_make_pcrel24(unsigned char *insn, unsigned long src,
23 unsigned long dst)
24{
25 uint32_t pcrel = (dst - src) >> 1;
26 insn[0] = pcrel >> 16;
27 insn[1] = 0xe3;
28 insn[2] = pcrel;
29 insn[3] = pcrel >> 8;
30}
31#define bfin_make_pcrel24(insn, src, dst) bfin_make_pcrel24(insn, src, (unsigned long)(dst))
32
33static int ftrace_modify_code(unsigned long ip, const unsigned char *code,
34 unsigned long len)
35{
36 int ret = probe_kernel_write((void *)ip, (void *)code, len);
37 flush_icache_range(ip, ip + len);
38 return ret;
39}
40
41int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
42 unsigned long addr)
43{
44 /* Turn the mcount call site into two MNOPs as those are 32bit insns */
45 return ftrace_modify_code(rec->ip, mnop, sizeof(mnop));
46}
47
48int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
49{
50 /* Restore the mcount call site */
51 unsigned char call[8];
52 call[0] = 0x67; /* [--SP] = RETS; */
53 call[1] = 0x01;
54 bfin_make_pcrel24(&call[2], rec->ip + 2, addr);
55 call[6] = 0x27; /* RETS = [SP++]; */
56 call[7] = 0x01;
57 return ftrace_modify_code(rec->ip, call, sizeof(call));
58}
59
60int ftrace_update_ftrace_func(ftrace_func_t func)
61{
62 unsigned char call[4];
63 unsigned long ip = (unsigned long)&ftrace_call;
64 bfin_make_pcrel24(call, ip, func);
65 return ftrace_modify_code(ip, call, sizeof(call));
66}
67
68int __init ftrace_dyn_arch_init(void *data)
69{
70 /* return value is done indirectly via data */
71 *(unsigned long *)data = 0;
72
73 return 0;
74}
75
76#endif
12 77
13#ifdef CONFIG_FUNCTION_GRAPH_TRACER 78#ifdef CONFIG_FUNCTION_GRAPH_TRACER
14 79
80# ifdef CONFIG_DYNAMIC_FTRACE
81
82extern void ftrace_graph_call(void);
83
84int ftrace_enable_ftrace_graph_caller(void)
85{
86 unsigned long ip = (unsigned long)&ftrace_graph_call;
87 uint16_t jump_pcrel12 = ((unsigned long)&ftrace_graph_caller - ip) >> 1;
88 jump_pcrel12 |= 0x2000;
89 return ftrace_modify_code(ip, (void *)&jump_pcrel12, sizeof(jump_pcrel12));
90}
91
92int ftrace_disable_ftrace_graph_caller(void)
93{
94 return ftrace_modify_code((unsigned long)&ftrace_graph_call, empty_zero_page, 2);
95}
96
97# endif
98
15/* 99/*
16 * Hook the return address and push it in the stack of return addrs 100 * Hook the return address and push it in the stack of return addrs
17 * in current thread info. 101 * in current thread info.