diff options
author | Steven Rostedt <srostedt@redhat.com> | 2008-05-12 15:20:43 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2008-05-23 14:33:35 -0400 |
commit | 3c1720f00bb619302ba19d55986ab565e74d06db (patch) | |
tree | d58aaa54bc8e7a465597f385de36204c3b0b9cf8 | |
parent | dfa60aba04dae7833d75b2e2be124bb7cfb8239f (diff) |
ftrace: move memory management out of arch code
This patch moves the memory management of the ftrace
records out of the arch code and into the generic code
making the arch code simpler.
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r-- | arch/x86/kernel/ftrace.c | 183 | ||||
-rw-r--r-- | include/linux/ftrace.h | 18 | ||||
-rw-r--r-- | kernel/trace/ftrace.c | 154 |
3 files changed, 192 insertions, 163 deletions
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index 2e060c58b860..b69795efa226 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c | |||
@@ -23,25 +23,6 @@ | |||
23 | /* Long is fine, even if it is only 4 bytes ;-) */ | 23 | /* Long is fine, even if it is only 4 bytes ;-) */ |
24 | static long *ftrace_nop; | 24 | static long *ftrace_nop; |
25 | 25 | ||
26 | struct ftrace_record { | ||
27 | struct dyn_ftrace rec; | ||
28 | int failed; | ||
29 | } __attribute__((packed)); | ||
30 | |||
31 | struct ftrace_page { | ||
32 | struct ftrace_page *next; | ||
33 | int index; | ||
34 | struct ftrace_record records[]; | ||
35 | } __attribute__((packed)); | ||
36 | |||
37 | #define ENTRIES_PER_PAGE \ | ||
38 | ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct ftrace_record)) | ||
39 | |||
40 | /* estimate from running different kernels */ | ||
41 | #define NR_TO_INIT 10000 | ||
42 | |||
43 | #define MCOUNT_ADDR ((long)(&mcount)) | ||
44 | |||
45 | union ftrace_code_union { | 26 | union ftrace_code_union { |
46 | char code[5]; | 27 | char code[5]; |
47 | struct { | 28 | struct { |
@@ -50,33 +31,41 @@ union ftrace_code_union { | |||
50 | } __attribute__((packed)); | 31 | } __attribute__((packed)); |
51 | }; | 32 | }; |
52 | 33 | ||
53 | static struct ftrace_page *ftrace_pages_start; | 34 | notrace int ftrace_ip_converted(unsigned long ip) |
54 | static struct ftrace_page *ftrace_pages; | ||
55 | |||
56 | notrace struct dyn_ftrace *ftrace_alloc_shutdown_node(unsigned long ip) | ||
57 | { | 35 | { |
58 | struct ftrace_record *rec; | ||
59 | unsigned long save; | 36 | unsigned long save; |
60 | 37 | ||
61 | ip -= CALL_BACK; | 38 | ip -= CALL_BACK; |
62 | save = *(long *)ip; | 39 | save = *(long *)ip; |
63 | 40 | ||
64 | /* If this was already converted, skip it */ | 41 | return save == *ftrace_nop; |
65 | if (save == *ftrace_nop) | 42 | } |
66 | return NULL; | ||
67 | 43 | ||
68 | if (ftrace_pages->index == ENTRIES_PER_PAGE) { | 44 | static int notrace ftrace_calc_offset(long ip, long addr) |
69 | if (!ftrace_pages->next) | 45 | { |
70 | return NULL; | 46 | return (int)(addr - ip); |
71 | ftrace_pages = ftrace_pages->next; | 47 | } |
72 | } | ||
73 | 48 | ||
74 | rec = &ftrace_pages->records[ftrace_pages->index++]; | 49 | notrace unsigned char *ftrace_nop_replace(void) |
50 | { | ||
51 | return (char *)ftrace_nop; | ||
52 | } | ||
53 | |||
54 | notrace unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) | ||
55 | { | ||
56 | static union ftrace_code_union calc; | ||
75 | 57 | ||
76 | return &rec->rec; | 58 | calc.e8 = 0xe8; |
59 | calc.offset = ftrace_calc_offset(ip, addr); | ||
60 | |||
61 | /* | ||
62 | * No locking needed, this must be called via kstop_machine | ||
63 | * which in essence is like running on a uniprocessor machine. | ||
64 | */ | ||
65 | return calc.code; | ||
77 | } | 66 | } |
78 | 67 | ||
79 | static int notrace | 68 | notrace int |
80 | ftrace_modify_code(unsigned long ip, unsigned char *old_code, | 69 | ftrace_modify_code(unsigned long ip, unsigned char *old_code, |
81 | unsigned char *new_code) | 70 | unsigned char *new_code) |
82 | { | 71 | { |
@@ -86,6 +75,9 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code, | |||
86 | unsigned char newch = new_code[4]; | 75 | unsigned char newch = new_code[4]; |
87 | int faulted = 0; | 76 | int faulted = 0; |
88 | 77 | ||
78 | /* move the IP back to the start of the call */ | ||
79 | ip -= CALL_BACK; | ||
80 | |||
89 | /* | 81 | /* |
90 | * Note: Due to modules and __init, code can | 82 | * Note: Due to modules and __init, code can |
91 | * disappear and change, we need to protect against faulting | 83 | * disappear and change, we need to protect against faulting |
@@ -117,129 +109,12 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code, | |||
117 | return faulted; | 109 | return faulted; |
118 | } | 110 | } |
119 | 111 | ||
120 | static int notrace ftrace_calc_offset(long ip) | 112 | int __init ftrace_dyn_arch_init(void) |
121 | { | ||
122 | return (int)(MCOUNT_ADDR - ip); | ||
123 | } | ||
124 | |||
125 | notrace void ftrace_code_disable(struct dyn_ftrace *rec) | ||
126 | { | ||
127 | unsigned long ip; | ||
128 | union ftrace_code_union save; | ||
129 | struct ftrace_record *r = | ||
130 | container_of(rec, struct ftrace_record, rec); | ||
131 | |||
132 | ip = rec->ip; | ||
133 | |||
134 | save.e8 = 0xe8; | ||
135 | save.offset = ftrace_calc_offset(ip); | ||
136 | |||
137 | /* move the IP back to the start of the call */ | ||
138 | ip -= CALL_BACK; | ||
139 | |||
140 | r->failed = ftrace_modify_code(ip, save.code, (char *)ftrace_nop); | ||
141 | } | ||
142 | |||
143 | static void notrace ftrace_replace_code(int saved) | ||
144 | { | ||
145 | unsigned char *new = NULL, *old = NULL; | ||
146 | struct ftrace_record *rec; | ||
147 | struct ftrace_page *pg; | ||
148 | unsigned long ip; | ||
149 | int i; | ||
150 | |||
151 | if (saved) | ||
152 | old = (char *)ftrace_nop; | ||
153 | else | ||
154 | new = (char *)ftrace_nop; | ||
155 | |||
156 | for (pg = ftrace_pages_start; pg; pg = pg->next) { | ||
157 | for (i = 0; i < pg->index; i++) { | ||
158 | union ftrace_code_union calc; | ||
159 | rec = &pg->records[i]; | ||
160 | |||
161 | /* don't modify code that has already faulted */ | ||
162 | if (rec->failed) | ||
163 | continue; | ||
164 | |||
165 | ip = rec->rec.ip; | ||
166 | |||
167 | calc.e8 = 0xe8; | ||
168 | calc.offset = ftrace_calc_offset(ip); | ||
169 | |||
170 | if (saved) | ||
171 | new = calc.code; | ||
172 | else | ||
173 | old = calc.code; | ||
174 | |||
175 | ip -= CALL_BACK; | ||
176 | |||
177 | rec->failed = ftrace_modify_code(ip, old, new); | ||
178 | } | ||
179 | } | ||
180 | |||
181 | } | ||
182 | |||
183 | notrace void ftrace_startup_code(void) | ||
184 | { | ||
185 | ftrace_replace_code(1); | ||
186 | } | ||
187 | |||
188 | notrace void ftrace_shutdown_code(void) | ||
189 | { | ||
190 | ftrace_replace_code(0); | ||
191 | } | ||
192 | |||
193 | notrace void ftrace_shutdown_replenish(void) | ||
194 | { | ||
195 | if (ftrace_pages->next) | ||
196 | return; | ||
197 | |||
198 | /* allocate another page */ | ||
199 | ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL); | ||
200 | } | ||
201 | |||
202 | notrace int __init ftrace_shutdown_arch_init(void) | ||
203 | { | 113 | { |
204 | const unsigned char *const *noptable = find_nop_table(); | 114 | const unsigned char *const *noptable = find_nop_table(); |
205 | struct ftrace_page *pg; | ||
206 | int cnt; | ||
207 | int i; | ||
208 | 115 | ||
209 | ftrace_nop = (unsigned long *)noptable[CALL_BACK]; | 116 | ftrace_nop = (unsigned long *)noptable[CALL_BACK]; |
210 | 117 | ||
211 | /* allocate a few pages */ | ||
212 | ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL); | ||
213 | if (!ftrace_pages_start) | ||
214 | return -1; | ||
215 | |||
216 | /* | ||
217 | * Allocate a few more pages. | ||
218 | * | ||
219 | * TODO: have some parser search vmlinux before | ||
220 | * final linking to find all calls to ftrace. | ||
221 | * Then we can: | ||
222 | * a) know how many pages to allocate. | ||
223 | * and/or | ||
224 | * b) set up the table then. | ||
225 | * | ||
226 | * The dynamic code is still necessary for | ||
227 | * modules. | ||
228 | */ | ||
229 | |||
230 | pg = ftrace_pages = ftrace_pages_start; | ||
231 | |||
232 | cnt = NR_TO_INIT / ENTRIES_PER_PAGE; | ||
233 | |||
234 | for (i = 0; i < cnt; i++) { | ||
235 | pg->next = (void *)get_zeroed_page(GFP_KERNEL); | ||
236 | |||
237 | /* If we fail, we'll try later anyway */ | ||
238 | if (!pg->next) | ||
239 | break; | ||
240 | |||
241 | pg = pg->next; | ||
242 | } | ||
243 | |||
244 | return 0; | 118 | return 0; |
245 | } | 119 | } |
120 | |||
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index ccd8537dbdb7..d509ad6c9cb8 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h | |||
@@ -42,19 +42,23 @@ extern void mcount(void); | |||
42 | # define FTRACE_HASHBITS 10 | 42 | # define FTRACE_HASHBITS 10 |
43 | # define FTRACE_HASHSIZE (1<<FTRACE_HASHBITS) | 43 | # define FTRACE_HASHSIZE (1<<FTRACE_HASHBITS) |
44 | 44 | ||
45 | enum { | ||
46 | FTRACE_FL_FAILED = (1<<0), | ||
47 | }; | ||
48 | |||
45 | struct dyn_ftrace { | 49 | struct dyn_ftrace { |
46 | struct hlist_node node; | 50 | struct hlist_node node; |
47 | unsigned long ip; | 51 | unsigned long ip; |
52 | unsigned long flags; | ||
48 | }; | 53 | }; |
49 | 54 | ||
50 | /* defined in arch */ | 55 | /* defined in arch */ |
51 | extern struct dyn_ftrace * | 56 | extern int ftrace_ip_converted(unsigned long ip); |
52 | ftrace_alloc_shutdown_node(unsigned long ip); | 57 | extern unsigned char *ftrace_nop_replace(void); |
53 | extern int ftrace_shutdown_arch_init(void); | 58 | extern unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr); |
54 | extern void ftrace_code_disable(struct dyn_ftrace *rec); | 59 | extern int ftrace_dyn_arch_init(void); |
55 | extern void ftrace_startup_code(void); | 60 | extern int ftrace_modify_code(unsigned long ip, unsigned char *old_code, |
56 | extern void ftrace_shutdown_code(void); | 61 | unsigned char *new_code); |
57 | extern void ftrace_shutdown_replenish(void); | ||
58 | #endif | 62 | #endif |
59 | 63 | ||
60 | #ifdef CONFIG_FRAME_POINTER | 64 | #ifdef CONFIG_FRAME_POINTER |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index d3de37299ba4..f6d9af3bf66b 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -156,6 +156,21 @@ static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu); | |||
156 | static DEFINE_SPINLOCK(ftrace_shutdown_lock); | 156 | static DEFINE_SPINLOCK(ftrace_shutdown_lock); |
157 | static DEFINE_MUTEX(ftraced_lock); | 157 | static DEFINE_MUTEX(ftraced_lock); |
158 | 158 | ||
159 | struct ftrace_page { | ||
160 | struct ftrace_page *next; | ||
161 | int index; | ||
162 | struct dyn_ftrace records[]; | ||
163 | } __attribute__((packed)); | ||
164 | |||
165 | #define ENTRIES_PER_PAGE \ | ||
166 | ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace)) | ||
167 | |||
168 | /* estimate from running different kernels */ | ||
169 | #define NR_TO_INIT 10000 | ||
170 | |||
171 | static struct ftrace_page *ftrace_pages_start; | ||
172 | static struct ftrace_page *ftrace_pages; | ||
173 | |||
159 | static int ftraced_trigger; | 174 | static int ftraced_trigger; |
160 | static int ftraced_suspend; | 175 | static int ftraced_suspend; |
161 | 176 | ||
@@ -184,6 +199,21 @@ ftrace_add_hash(struct dyn_ftrace *node, unsigned long key) | |||
184 | hlist_add_head(&node->node, &ftrace_hash[key]); | 199 | hlist_add_head(&node->node, &ftrace_hash[key]); |
185 | } | 200 | } |
186 | 201 | ||
202 | static notrace struct dyn_ftrace *ftrace_alloc_shutdown_node(unsigned long ip) | ||
203 | { | ||
204 | /* If this was already converted, skip it */ | ||
205 | if (ftrace_ip_converted(ip)) | ||
206 | return NULL; | ||
207 | |||
208 | if (ftrace_pages->index == ENTRIES_PER_PAGE) { | ||
209 | if (!ftrace_pages->next) | ||
210 | return NULL; | ||
211 | ftrace_pages = ftrace_pages->next; | ||
212 | } | ||
213 | |||
214 | return &ftrace_pages->records[ftrace_pages->index++]; | ||
215 | } | ||
216 | |||
187 | static void notrace | 217 | static void notrace |
188 | ftrace_record_ip(unsigned long ip, unsigned long parent_ip) | 218 | ftrace_record_ip(unsigned long ip, unsigned long parent_ip) |
189 | { | 219 | { |
@@ -252,6 +282,62 @@ static struct ftrace_ops ftrace_shutdown_ops __read_mostly = | |||
252 | .func = ftrace_record_ip, | 282 | .func = ftrace_record_ip, |
253 | }; | 283 | }; |
254 | 284 | ||
285 | #define MCOUNT_ADDR ((long)(&mcount)) | ||
286 | |||
287 | static void notrace ftrace_replace_code(int saved) | ||
288 | { | ||
289 | unsigned char *new = NULL, *old = NULL; | ||
290 | struct dyn_ftrace *rec; | ||
291 | struct ftrace_page *pg; | ||
292 | unsigned long ip; | ||
293 | int failed; | ||
294 | int i; | ||
295 | |||
296 | if (saved) | ||
297 | old = ftrace_nop_replace(); | ||
298 | else | ||
299 | new = ftrace_nop_replace(); | ||
300 | |||
301 | for (pg = ftrace_pages_start; pg; pg = pg->next) { | ||
302 | for (i = 0; i < pg->index; i++) { | ||
303 | rec = &pg->records[i]; | ||
304 | |||
305 | /* don't modify code that has already faulted */ | ||
306 | if (rec->flags & FTRACE_FL_FAILED) | ||
307 | continue; | ||
308 | |||
309 | ip = rec->ip; | ||
310 | |||
311 | if (saved) | ||
312 | new = ftrace_call_replace(ip, MCOUNT_ADDR); | ||
313 | else | ||
314 | old = ftrace_call_replace(ip, MCOUNT_ADDR); | ||
315 | |||
316 | failed = ftrace_modify_code(ip, old, new); | ||
317 | if (failed) | ||
318 | rec->flags |= FTRACE_FL_FAILED; | ||
319 | } | ||
320 | } | ||
321 | } | ||
322 | |||
323 | static notrace void ftrace_startup_code(void) | ||
324 | { | ||
325 | ftrace_replace_code(1); | ||
326 | } | ||
327 | |||
328 | static notrace void ftrace_shutdown_code(void) | ||
329 | { | ||
330 | ftrace_replace_code(0); | ||
331 | } | ||
332 | |||
333 | static notrace void ftrace_shutdown_replenish(void) | ||
334 | { | ||
335 | if (ftrace_pages->next) | ||
336 | return; | ||
337 | |||
338 | /* allocate another page */ | ||
339 | ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL); | ||
340 | } | ||
255 | 341 | ||
256 | static int notrace __ftrace_modify_code(void *data) | 342 | static int notrace __ftrace_modify_code(void *data) |
257 | { | 343 | { |
@@ -261,6 +347,23 @@ static int notrace __ftrace_modify_code(void *data) | |||
261 | return 0; | 347 | return 0; |
262 | } | 348 | } |
263 | 349 | ||
350 | static notrace void | ||
351 | ftrace_code_disable(struct dyn_ftrace *rec, unsigned long addr) | ||
352 | { | ||
353 | unsigned long ip; | ||
354 | unsigned char *nop, *call; | ||
355 | int failed; | ||
356 | |||
357 | ip = rec->ip; | ||
358 | |||
359 | nop = ftrace_nop_replace(); | ||
360 | call = ftrace_call_replace(ip, addr); | ||
361 | |||
362 | failed = ftrace_modify_code(ip, call, nop); | ||
363 | if (failed) | ||
364 | rec->flags |= FTRACE_FL_FAILED; | ||
365 | } | ||
366 | |||
264 | static void notrace ftrace_run_startup_code(void) | 367 | static void notrace ftrace_run_startup_code(void) |
265 | { | 368 | { |
266 | stop_machine_run(__ftrace_modify_code, ftrace_startup_code, NR_CPUS); | 369 | stop_machine_run(__ftrace_modify_code, ftrace_startup_code, NR_CPUS); |
@@ -346,7 +449,7 @@ static int notrace __ftrace_update_code(void *ignore) | |||
346 | 449 | ||
347 | /* all CPUS are stopped, we are safe to modify code */ | 450 | /* all CPUS are stopped, we are safe to modify code */ |
348 | hlist_for_each_entry(p, t, &head, node) { | 451 | hlist_for_each_entry(p, t, &head, node) { |
349 | ftrace_code_disable(p); | 452 | ftrace_code_disable(p, MCOUNT_ADDR); |
350 | ftrace_update_cnt++; | 453 | ftrace_update_cnt++; |
351 | } | 454 | } |
352 | 455 | ||
@@ -407,12 +510,59 @@ static int notrace ftraced(void *ignore) | |||
407 | return 0; | 510 | return 0; |
408 | } | 511 | } |
409 | 512 | ||
513 | static int __init ftrace_dyn_table_alloc(void) | ||
514 | { | ||
515 | struct ftrace_page *pg; | ||
516 | int cnt; | ||
517 | int i; | ||
518 | int ret; | ||
519 | |||
520 | ret = ftrace_dyn_arch_init(); | ||
521 | if (ret) | ||
522 | return ret; | ||
523 | |||
524 | /* allocate a few pages */ | ||
525 | ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL); | ||
526 | if (!ftrace_pages_start) | ||
527 | return -1; | ||
528 | |||
529 | /* | ||
530 | * Allocate a few more pages. | ||
531 | * | ||
532 | * TODO: have some parser search vmlinux before | ||
533 | * final linking to find all calls to ftrace. | ||
534 | * Then we can: | ||
535 | * a) know how many pages to allocate. | ||
536 | * and/or | ||
537 | * b) set up the table then. | ||
538 | * | ||
539 | * The dynamic code is still necessary for | ||
540 | * modules. | ||
541 | */ | ||
542 | |||
543 | pg = ftrace_pages = ftrace_pages_start; | ||
544 | |||
545 | cnt = NR_TO_INIT / ENTRIES_PER_PAGE; | ||
546 | |||
547 | for (i = 0; i < cnt; i++) { | ||
548 | pg->next = (void *)get_zeroed_page(GFP_KERNEL); | ||
549 | |||
550 | /* If we fail, we'll try later anyway */ | ||
551 | if (!pg->next) | ||
552 | break; | ||
553 | |||
554 | pg = pg->next; | ||
555 | } | ||
556 | |||
557 | return 0; | ||
558 | } | ||
559 | |||
410 | static int __init notrace ftrace_shutdown_init(void) | 560 | static int __init notrace ftrace_shutdown_init(void) |
411 | { | 561 | { |
412 | struct task_struct *p; | 562 | struct task_struct *p; |
413 | int ret; | 563 | int ret; |
414 | 564 | ||
415 | ret = ftrace_shutdown_arch_init(); | 565 | ret = ftrace_dyn_table_alloc(); |
416 | if (ret) | 566 | if (ret) |
417 | return ret; | 567 | return ret; |
418 | 568 | ||