diff options
author | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2011-01-05 06:48:11 -0500 |
---|---|---|
committer | Martin Schwidefsky <sky@mschwide.boeblingen.de.ibm.com> | 2011-01-05 06:47:31 -0500 |
commit | 4cc9bed034d1ae588e5b773ee0edeb74ef3c0ff4 (patch) | |
tree | 80f2b7c92d9ef40ee93d82888f305c93c21ef5c8 /arch/s390 | |
parent | 5e9a26928f550157563cfc06ce12c4ae121a02ec (diff) |
[S390] cleanup ftrace backend functions
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390')
-rw-r--r-- | arch/s390/include/asm/ftrace.h | 11 | ||||
-rw-r--r-- | arch/s390/kernel/ftrace.c | 238 | ||||
-rw-r--r-- | arch/s390/kernel/mcount.S | 30 | ||||
-rw-r--r-- | arch/s390/kernel/mcount64.S | 27 |
4 files changed, 135 insertions, 171 deletions
diff --git a/arch/s390/include/asm/ftrace.h b/arch/s390/include/asm/ftrace.h index 96c14a9102b8..3c29be4836ed 100644 --- a/arch/s390/include/asm/ftrace.h +++ b/arch/s390/include/asm/ftrace.h | |||
@@ -4,20 +4,17 @@ | |||
4 | #ifndef __ASSEMBLY__ | 4 | #ifndef __ASSEMBLY__ |
5 | 5 | ||
6 | extern void _mcount(void); | 6 | extern void _mcount(void); |
7 | extern unsigned long ftrace_dyn_func; | ||
8 | 7 | ||
9 | struct dyn_arch_ftrace { }; | 8 | struct dyn_arch_ftrace { }; |
10 | 9 | ||
11 | #define MCOUNT_ADDR ((long)_mcount) | 10 | #define MCOUNT_ADDR ((long)_mcount) |
12 | 11 | ||
13 | #ifdef CONFIG_64BIT | 12 | #ifdef CONFIG_64BIT |
14 | #define MCOUNT_OFFSET_RET 18 | 13 | #define MCOUNT_INSN_SIZE 12 |
15 | #define MCOUNT_INSN_SIZE 24 | ||
16 | #define MCOUNT_OFFSET 14 | ||
17 | #else | ||
18 | #define MCOUNT_OFFSET_RET 26 | ||
19 | #define MCOUNT_INSN_SIZE 30 | ||
20 | #define MCOUNT_OFFSET 8 | 14 | #define MCOUNT_OFFSET 8 |
15 | #else | ||
16 | #define MCOUNT_INSN_SIZE 20 | ||
17 | #define MCOUNT_OFFSET 4 | ||
21 | #endif | 18 | #endif |
22 | 19 | ||
23 | static inline unsigned long ftrace_call_adjust(unsigned long addr) | 20 | static inline unsigned long ftrace_call_adjust(unsigned long addr) |
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c index 6a83d0581317..78bdf0e5dff7 100644 --- a/arch/s390/kernel/ftrace.c +++ b/arch/s390/kernel/ftrace.c | |||
@@ -4,7 +4,7 @@ | |||
4 | * Copyright IBM Corp. 2009 | 4 | * Copyright IBM Corp. 2009 |
5 | * | 5 | * |
6 | * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>, | 6 | * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>, |
7 | * | 7 | * Martin Schwidefsky <schwidefsky@de.ibm.com> |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/hardirq.h> | 10 | #include <linux/hardirq.h> |
@@ -12,176 +12,144 @@ | |||
12 | #include <linux/ftrace.h> | 12 | #include <linux/ftrace.h> |
13 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
14 | #include <linux/types.h> | 14 | #include <linux/types.h> |
15 | #include <linux/kprobes.h> | ||
15 | #include <trace/syscall.h> | 16 | #include <trace/syscall.h> |
16 | #include <asm/asm-offsets.h> | 17 | #include <asm/asm-offsets.h> |
17 | 18 | ||
19 | #ifdef CONFIG_64BIT | ||
20 | #define MCOUNT_OFFSET_RET 12 | ||
21 | #else | ||
22 | #define MCOUNT_OFFSET_RET 22 | ||
23 | #endif | ||
24 | |||
18 | #ifdef CONFIG_DYNAMIC_FTRACE | 25 | #ifdef CONFIG_DYNAMIC_FTRACE |
19 | 26 | ||
20 | void ftrace_disable_code(void); | 27 | void ftrace_disable_code(void); |
21 | void ftrace_disable_return(void); | 28 | void ftrace_enable_insn(void); |
22 | void ftrace_call_code(void); | ||
23 | void ftrace_nop_code(void); | ||
24 | |||
25 | #define FTRACE_INSN_SIZE 4 | ||
26 | 29 | ||
27 | #ifdef CONFIG_64BIT | 30 | #ifdef CONFIG_64BIT |
28 | 31 | /* | |
32 | * The 64-bit mcount code looks like this: | ||
33 | * stg %r14,8(%r15) # offset 0 | ||
34 | * > larl %r1,<&counter> # offset 6 | ||
35 | * > brasl %r14,_mcount # offset 12 | ||
36 | * lg %r14,8(%r15) # offset 18 | ||
37 | * Total length is 24 bytes. The middle two instructions of the mcount | ||
38 | * block get overwritten by ftrace_make_nop / ftrace_make_call. | ||
39 | * The 64-bit enabled ftrace code block looks like this: | ||
40 | * stg %r14,8(%r15) # offset 0 | ||
41 | * > lg %r1,__LC_FTRACE_FUNC # offset 6 | ||
42 | * > lgr %r0,%r0 # offset 12 | ||
43 | * > basr %r14,%r1 # offset 16 | ||
44 | * lg %r14,8(%15) # offset 18 | ||
45 | * The return points of the mcount/ftrace function have the same offset 18. | ||
46 | * The 64-bit disable ftrace code block looks like this: | ||
47 | * stg %r14,8(%r15) # offset 0 | ||
48 | * > jg .+18 # offset 6 | ||
49 | * > lgr %r0,%r0 # offset 12 | ||
50 | * > basr %r14,%r1 # offset 16 | ||
51 | * lg %r14,8(%15) # offset 18 | ||
52 | * The jg instruction branches to offset 24 to skip as many instructions | ||
53 | * as possible. | ||
54 | */ | ||
29 | asm( | 55 | asm( |
30 | " .align 4\n" | 56 | " .align 4\n" |
31 | "ftrace_disable_code:\n" | 57 | "ftrace_disable_code:\n" |
32 | " j 0f\n" | 58 | " jg 0f\n" |
33 | " .word 0x0024\n" | ||
34 | " lg %r1,"__stringify(__LC_FTRACE_FUNC)"\n" | ||
35 | " basr %r14,%r1\n" | ||
36 | "ftrace_disable_return:\n" | ||
37 | " lg %r14,8(15)\n" | ||
38 | " lgr %r0,%r0\n" | 59 | " lgr %r0,%r0\n" |
39 | "0:\n"); | 60 | " basr %r14,%r1\n" |
40 | 61 | "0:\n" | |
41 | asm( | ||
42 | " .align 4\n" | 62 | " .align 4\n" |
43 | "ftrace_nop_code:\n" | 63 | "ftrace_enable_insn:\n" |
44 | " j .+"__stringify(MCOUNT_INSN_SIZE)"\n"); | 64 | " lg %r1,"__stringify(__LC_FTRACE_FUNC)"\n"); |
45 | 65 | ||
46 | asm( | 66 | #define FTRACE_INSN_SIZE 6 |
47 | " .align 4\n" | ||
48 | "ftrace_call_code:\n" | ||
49 | " stg %r14,8(%r15)\n"); | ||
50 | 67 | ||
51 | #else /* CONFIG_64BIT */ | 68 | #else /* CONFIG_64BIT */ |
52 | 69 | /* | |
70 | * The 31-bit mcount code looks like this: | ||
71 | * st %r14,4(%r15) # offset 0 | ||
72 | * > bras %r1,0f # offset 4 | ||
73 | * > .long _mcount # offset 8 | ||
74 | * > .long <&counter> # offset 12 | ||
75 | * > 0: l %r14,0(%r1) # offset 16 | ||
76 | * > l %r1,4(%r1) # offset 20 | ||
77 | * basr %r14,%r14 # offset 24 | ||
78 | * l %r14,4(%r15) # offset 26 | ||
79 | * Total length is 30 bytes. The twenty bytes starting from offset 4 | ||
80 | * to offset 24 get overwritten by ftrace_make_nop / ftrace_make_call. | ||
81 | * The 31-bit enabled ftrace code block looks like this: | ||
82 | * st %r14,4(%r15) # offset 0 | ||
83 | * > l %r14,__LC_FTRACE_FUNC # offset 4 | ||
84 | * > j 0f # offset 8 | ||
85 | * > .fill 12,1,0x07 # offset 12 | ||
86 | * 0: basr %r14,%r14 # offset 24 | ||
87 | * l %r14,4(%r14) # offset 26 | ||
88 | * The return points of the mcount/ftrace function have the same offset 26. | ||
89 | * The 31-bit disabled ftrace code block looks like this: | ||
90 | * st %r14,4(%r15) # offset 0 | ||
91 | * > j .+26 # offset 4 | ||
92 | * > j 0f # offset 8 | ||
93 | * > .fill 12,1,0x07 # offset 12 | ||
94 | * 0: basr %r14,%r14 # offset 24 | ||
95 | * l %r14,4(%r14) # offset 26 | ||
96 | * The j instruction branches to offset 30 to skip as many instructions | ||
97 | * as possible. | ||
98 | */ | ||
53 | asm( | 99 | asm( |
54 | " .align 4\n" | 100 | " .align 4\n" |
55 | "ftrace_disable_code:\n" | 101 | "ftrace_disable_code:\n" |
102 | " j 1f\n" | ||
56 | " j 0f\n" | 103 | " j 0f\n" |
57 | " l %r1,"__stringify(__LC_FTRACE_FUNC)"\n" | 104 | " .fill 12,1,0x07\n" |
58 | " basr %r14,%r1\n" | 105 | "0: basr %r14,%r14\n" |
59 | "ftrace_disable_return:\n" | 106 | "1:\n" |
60 | " l %r14,4(%r15)\n" | ||
61 | " j 0f\n" | ||
62 | " bcr 0,%r7\n" | ||
63 | " bcr 0,%r7\n" | ||
64 | " bcr 0,%r7\n" | ||
65 | " bcr 0,%r7\n" | ||
66 | " bcr 0,%r7\n" | ||
67 | " bcr 0,%r7\n" | ||
68 | "0:\n"); | ||
69 | |||
70 | asm( | ||
71 | " .align 4\n" | 107 | " .align 4\n" |
72 | "ftrace_nop_code:\n" | 108 | "ftrace_enable_insn:\n" |
73 | " j .+"__stringify(MCOUNT_INSN_SIZE)"\n"); | 109 | " l %r14,"__stringify(__LC_FTRACE_FUNC)"\n"); |
74 | 110 | ||
75 | asm( | 111 | #define FTRACE_INSN_SIZE 4 |
76 | " .align 4\n" | ||
77 | "ftrace_call_code:\n" | ||
78 | " st %r14,4(%r15)\n"); | ||
79 | 112 | ||
80 | #endif /* CONFIG_64BIT */ | 113 | #endif /* CONFIG_64BIT */ |
81 | 114 | ||
82 | static int ftrace_modify_code(unsigned long ip, | ||
83 | void *old_code, int old_size, | ||
84 | void *new_code, int new_size) | ||
85 | { | ||
86 | unsigned char replaced[MCOUNT_INSN_SIZE]; | ||
87 | |||
88 | /* | ||
89 | * Note: Due to modules code can disappear and change. | ||
90 | * We need to protect against faulting as well as code | ||
91 | * changing. We do this by using the probe_kernel_* | ||
92 | * functions. | ||
93 | * This however is just a simple sanity check. | ||
94 | */ | ||
95 | if (probe_kernel_read(replaced, (void *)ip, old_size)) | ||
96 | return -EFAULT; | ||
97 | if (memcmp(replaced, old_code, old_size) != 0) | ||
98 | return -EINVAL; | ||
99 | if (probe_kernel_write((void *)ip, new_code, new_size)) | ||
100 | return -EPERM; | ||
101 | return 0; | ||
102 | } | ||
103 | |||
104 | static int ftrace_make_initial_nop(struct module *mod, struct dyn_ftrace *rec, | ||
105 | unsigned long addr) | ||
106 | { | ||
107 | return ftrace_modify_code(rec->ip, | ||
108 | ftrace_call_code, FTRACE_INSN_SIZE, | ||
109 | ftrace_disable_code, MCOUNT_INSN_SIZE); | ||
110 | } | ||
111 | 115 | ||
112 | int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, | 116 | int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, |
113 | unsigned long addr) | 117 | unsigned long addr) |
114 | { | 118 | { |
115 | if (addr == MCOUNT_ADDR) | 119 | if (probe_kernel_write((void *) rec->ip, ftrace_disable_code, |
116 | return ftrace_make_initial_nop(mod, rec, addr); | 120 | MCOUNT_INSN_SIZE)) |
117 | return ftrace_modify_code(rec->ip, | 121 | return -EPERM; |
118 | ftrace_call_code, FTRACE_INSN_SIZE, | 122 | return 0; |
119 | ftrace_nop_code, FTRACE_INSN_SIZE); | ||
120 | } | 123 | } |
121 | 124 | ||
122 | int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) | 125 | int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) |
123 | { | 126 | { |
124 | return ftrace_modify_code(rec->ip, | 127 | if (probe_kernel_write((void *) rec->ip, ftrace_enable_insn, |
125 | ftrace_nop_code, FTRACE_INSN_SIZE, | 128 | FTRACE_INSN_SIZE)) |
126 | ftrace_call_code, FTRACE_INSN_SIZE); | 129 | return -EPERM; |
130 | return 0; | ||
127 | } | 131 | } |
128 | 132 | ||
129 | int ftrace_update_ftrace_func(ftrace_func_t func) | 133 | int ftrace_update_ftrace_func(ftrace_func_t func) |
130 | { | 134 | { |
131 | ftrace_dyn_func = (unsigned long)func; | ||
132 | return 0; | 135 | return 0; |
133 | } | 136 | } |
134 | 137 | ||
135 | int __init ftrace_dyn_arch_init(void *data) | 138 | int __init ftrace_dyn_arch_init(void *data) |
136 | { | 139 | { |
137 | *(unsigned long *)data = 0; | 140 | *(unsigned long *) data = 0; |
138 | return 0; | 141 | return 0; |
139 | } | 142 | } |
140 | 143 | ||
141 | #endif /* CONFIG_DYNAMIC_FTRACE */ | 144 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
142 | 145 | ||
143 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 146 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
144 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
145 | /* | ||
146 | * Patch the kernel code at ftrace_graph_caller location: | ||
147 | * The instruction there is branch relative on condition. The condition mask | ||
148 | * is either all ones (always branch aka disable ftrace_graph_caller) or all | ||
149 | * zeroes (nop aka enable ftrace_graph_caller). | ||
150 | * Instruction format for brc is a7m4xxxx where m is the condition mask. | ||
151 | */ | ||
152 | int ftrace_enable_ftrace_graph_caller(void) | ||
153 | { | ||
154 | unsigned short opcode = 0xa704; | ||
155 | |||
156 | return probe_kernel_write(ftrace_graph_caller, &opcode, sizeof(opcode)); | ||
157 | } | ||
158 | |||
159 | int ftrace_disable_ftrace_graph_caller(void) | ||
160 | { | ||
161 | unsigned short opcode = 0xa7f4; | ||
162 | |||
163 | return probe_kernel_write(ftrace_graph_caller, &opcode, sizeof(opcode)); | ||
164 | } | ||
165 | |||
166 | static inline unsigned long ftrace_mcount_call_adjust(unsigned long addr) | ||
167 | { | ||
168 | return addr - (ftrace_disable_return - ftrace_disable_code); | ||
169 | } | ||
170 | |||
171 | #else /* CONFIG_DYNAMIC_FTRACE */ | ||
172 | |||
173 | static inline unsigned long ftrace_mcount_call_adjust(unsigned long addr) | ||
174 | { | ||
175 | return addr - MCOUNT_OFFSET_RET; | ||
176 | } | ||
177 | |||
178 | #endif /* CONFIG_DYNAMIC_FTRACE */ | ||
179 | |||
180 | /* | 147 | /* |
181 | * Hook the return address and push it in the stack of return addresses | 148 | * Hook the return address and push it in the stack of return addresses |
182 | * in current thread info. | 149 | * in current thread info. |
183 | */ | 150 | */ |
184 | unsigned long prepare_ftrace_return(unsigned long ip, unsigned long parent) | 151 | unsigned long __kprobes prepare_ftrace_return(unsigned long parent, |
152 | unsigned long ip) | ||
185 | { | 153 | { |
186 | struct ftrace_graph_ent trace; | 154 | struct ftrace_graph_ent trace; |
187 | 155 | ||
@@ -189,14 +157,42 @@ unsigned long prepare_ftrace_return(unsigned long ip, unsigned long parent) | |||
189 | goto out; | 157 | goto out; |
190 | if (ftrace_push_return_trace(parent, ip, &trace.depth, 0) == -EBUSY) | 158 | if (ftrace_push_return_trace(parent, ip, &trace.depth, 0) == -EBUSY) |
191 | goto out; | 159 | goto out; |
192 | trace.func = ftrace_mcount_call_adjust(ip) & PSW_ADDR_INSN; | 160 | trace.func = (ip & PSW_ADDR_INSN) - MCOUNT_OFFSET_RET; |
193 | /* Only trace if the calling function expects to. */ | 161 | /* Only trace if the calling function expects to. */ |
194 | if (!ftrace_graph_entry(&trace)) { | 162 | if (!ftrace_graph_entry(&trace)) { |
195 | current->curr_ret_stack--; | 163 | current->curr_ret_stack--; |
196 | goto out; | 164 | goto out; |
197 | } | 165 | } |
198 | parent = (unsigned long)return_to_handler; | 166 | parent = (unsigned long) return_to_handler; |
199 | out: | 167 | out: |
200 | return parent; | 168 | return parent; |
201 | } | 169 | } |
170 | |||
171 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
172 | /* | ||
173 | * Patch the kernel code at ftrace_graph_caller location. The instruction | ||
174 | * there is branch relative and save to prepare_ftrace_return. To disable | ||
175 | * the call to prepare_ftrace_return we patch the bras offset to point | ||
176 | * directly after the instructions. To enable the call we calculate | ||
177 | * the original offset to prepare_ftrace_return and put it back. | ||
178 | */ | ||
179 | int ftrace_enable_ftrace_graph_caller(void) | ||
180 | { | ||
181 | unsigned short offset; | ||
182 | |||
183 | offset = ((void *) prepare_ftrace_return - | ||
184 | (void *) ftrace_graph_caller) / 2; | ||
185 | return probe_kernel_write(ftrace_graph_caller + 2, | ||
186 | &offset, sizeof(offset)); | ||
187 | } | ||
188 | |||
189 | int ftrace_disable_ftrace_graph_caller(void) | ||
190 | { | ||
191 | static unsigned short offset = 0x0002; | ||
192 | |||
193 | return probe_kernel_write(ftrace_graph_caller + 2, | ||
194 | &offset, sizeof(offset)); | ||
195 | } | ||
196 | |||
197 | #endif /* CONFIG_DYNAMIC_FTRACE */ | ||
202 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 198 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S index 4a6e1a575f9e..1e6a55795628 100644 --- a/arch/s390/kernel/mcount.S +++ b/arch/s390/kernel/mcount.S | |||
@@ -18,22 +18,12 @@ _mcount: | |||
18 | #ifdef CONFIG_DYNAMIC_FTRACE | 18 | #ifdef CONFIG_DYNAMIC_FTRACE |
19 | br %r14 | 19 | br %r14 |
20 | 20 | ||
21 | .data | ||
22 | .globl ftrace_dyn_func | ||
23 | ftrace_dyn_func: | ||
24 | .long ftrace_stub | ||
25 | .previous | ||
26 | |||
27 | .globl ftrace_caller | 21 | .globl ftrace_caller |
28 | ftrace_caller: | 22 | ftrace_caller: |
29 | #endif | 23 | #endif |
30 | stm %r2,%r5,16(%r15) | 24 | stm %r2,%r5,16(%r15) |
31 | bras %r1,2f | 25 | bras %r1,2f |
32 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
33 | 0: .long ftrace_dyn_func | ||
34 | #else | ||
35 | 0: .long ftrace_trace_function | 26 | 0: .long ftrace_trace_function |
36 | #endif | ||
37 | 1: .long function_trace_stop | 27 | 1: .long function_trace_stop |
38 | 2: l %r2,1b-0b(%r1) | 28 | 2: l %r2,1b-0b(%r1) |
39 | icm %r2,0xf,0(%r2) | 29 | icm %r2,0xf,0(%r2) |
@@ -49,21 +39,15 @@ ftrace_caller: | |||
49 | l %r14,0(%r14) | 39 | l %r14,0(%r14) |
50 | basr %r14,%r14 | 40 | basr %r14,%r14 |
51 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 41 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
52 | #ifdef CONFIG_DYNAMIC_FTRACE | 42 | l %r2,100(%r15) |
43 | l %r3,152(%r15) | ||
53 | .globl ftrace_graph_caller | 44 | .globl ftrace_graph_caller |
54 | ftrace_graph_caller: | 45 | ftrace_graph_caller: |
55 | # This unconditional branch gets runtime patched. Change only if | 46 | # The bras instruction gets runtime patched to call prepare_ftrace_return. |
56 | # you know what you are doing. See ftrace_enable_graph_caller(). | 47 | # See ftrace_enable_ftrace_graph_caller. The patched instruction is: |
57 | j 1f | 48 | # bras %r14,prepare_ftrace_return |
58 | #endif | 49 | bras %r14,0f |
59 | bras %r1,0f | 50 | 0: st %r2,100(%r15) |
60 | .long prepare_ftrace_return | ||
61 | 0: l %r2,152(%r15) | ||
62 | l %r4,0(%r1) | ||
63 | l %r3,100(%r15) | ||
64 | basr %r14,%r4 | ||
65 | st %r2,100(%r15) | ||
66 | 1: | ||
67 | #endif | 51 | #endif |
68 | ahi %r15,96 | 52 | ahi %r15,96 |
69 | l %r14,56(%r15) | 53 | l %r14,56(%r15) |
diff --git a/arch/s390/kernel/mcount64.S b/arch/s390/kernel/mcount64.S index b2bae06ad6c7..e73667286ac0 100644 --- a/arch/s390/kernel/mcount64.S +++ b/arch/s390/kernel/mcount64.S | |||
@@ -18,12 +18,6 @@ _mcount: | |||
18 | #ifdef CONFIG_DYNAMIC_FTRACE | 18 | #ifdef CONFIG_DYNAMIC_FTRACE |
19 | br %r14 | 19 | br %r14 |
20 | 20 | ||
21 | .data | ||
22 | .globl ftrace_dyn_func | ||
23 | ftrace_dyn_func: | ||
24 | .quad ftrace_stub | ||
25 | .previous | ||
26 | |||
27 | .globl ftrace_caller | 21 | .globl ftrace_caller |
28 | ftrace_caller: | 22 | ftrace_caller: |
29 | #endif | 23 | #endif |
@@ -37,26 +31,19 @@ ftrace_caller: | |||
37 | stg %r1,__SF_BACKCHAIN(%r15) | 31 | stg %r1,__SF_BACKCHAIN(%r15) |
38 | lgr %r2,%r14 | 32 | lgr %r2,%r14 |
39 | lg %r3,168(%r15) | 33 | lg %r3,168(%r15) |
40 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
41 | larl %r14,ftrace_dyn_func | ||
42 | #else | ||
43 | larl %r14,ftrace_trace_function | 34 | larl %r14,ftrace_trace_function |
44 | #endif | ||
45 | lg %r14,0(%r14) | 35 | lg %r14,0(%r14) |
46 | basr %r14,%r14 | 36 | basr %r14,%r14 |
47 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 37 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
48 | #ifdef CONFIG_DYNAMIC_FTRACE | 38 | lg %r2,168(%r15) |
39 | lg %r3,272(%r15) | ||
49 | .globl ftrace_graph_caller | 40 | .globl ftrace_graph_caller |
50 | ftrace_graph_caller: | 41 | ftrace_graph_caller: |
51 | # This unconditional branch gets runtime patched. Change only if | 42 | # The bras instruction gets runtime patched to call prepare_ftrace_return. |
52 | # you know what you are doing. See ftrace_enable_graph_caller(). | 43 | # See ftrace_enable_ftrace_graph_caller. The patched instruction is: |
53 | j 0f | 44 | # bras %r14,prepare_ftrace_return |
54 | #endif | 45 | bras %r14,0f |
55 | lg %r2,272(%r15) | 46 | 0: stg %r2,168(%r15) |
56 | lg %r3,168(%r15) | ||
57 | brasl %r14,prepare_ftrace_return | ||
58 | stg %r2,168(%r15) | ||
59 | 0: | ||
60 | #endif | 47 | #endif |
61 | aghi %r15,160 | 48 | aghi %r15,160 |
62 | lmg %r2,%r5,32(%r15) | 49 | lmg %r2,%r5,32(%r15) |