aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/ftrace.c
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2008-05-12 15:20:43 -0400
committerThomas Gleixner <tglx@linutronix.de>2008-05-23 14:33:35 -0400
commit3c1720f00bb619302ba19d55986ab565e74d06db (patch)
treed58aaa54bc8e7a465597f385de36204c3b0b9cf8 /kernel/trace/ftrace.c
parentdfa60aba04dae7833d75b2e2be124bb7cfb8239f (diff)
ftrace: move memory management out of arch code
This patch moves the memory management of the ftrace records out of the arch code and into the generic code making the arch code simpler. Signed-off-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/trace/ftrace.c')
-rw-r--r--kernel/trace/ftrace.c154
1 files changed, 152 insertions, 2 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index d3de37299ba4..f6d9af3bf66b 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -156,6 +156,21 @@ static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu);
156static DEFINE_SPINLOCK(ftrace_shutdown_lock); 156static DEFINE_SPINLOCK(ftrace_shutdown_lock);
157static DEFINE_MUTEX(ftraced_lock); 157static DEFINE_MUTEX(ftraced_lock);
158 158
159struct ftrace_page {
160 struct ftrace_page *next;
161 int index;
162 struct dyn_ftrace records[];
163} __attribute__((packed));
164
165#define ENTRIES_PER_PAGE \
166 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
167
168/* estimate from running different kernels */
169#define NR_TO_INIT 10000
170
171static struct ftrace_page *ftrace_pages_start;
172static struct ftrace_page *ftrace_pages;
173
159static int ftraced_trigger; 174static int ftraced_trigger;
160static int ftraced_suspend; 175static int ftraced_suspend;
161 176
@@ -184,6 +199,21 @@ ftrace_add_hash(struct dyn_ftrace *node, unsigned long key)
184 hlist_add_head(&node->node, &ftrace_hash[key]); 199 hlist_add_head(&node->node, &ftrace_hash[key]);
185} 200}
186 201
202static notrace struct dyn_ftrace *ftrace_alloc_shutdown_node(unsigned long ip)
203{
204 /* If this was already converted, skip it */
205 if (ftrace_ip_converted(ip))
206 return NULL;
207
208 if (ftrace_pages->index == ENTRIES_PER_PAGE) {
209 if (!ftrace_pages->next)
210 return NULL;
211 ftrace_pages = ftrace_pages->next;
212 }
213
214 return &ftrace_pages->records[ftrace_pages->index++];
215}
216
187static void notrace 217static void notrace
188ftrace_record_ip(unsigned long ip, unsigned long parent_ip) 218ftrace_record_ip(unsigned long ip, unsigned long parent_ip)
189{ 219{
@@ -252,6 +282,62 @@ static struct ftrace_ops ftrace_shutdown_ops __read_mostly =
252 .func = ftrace_record_ip, 282 .func = ftrace_record_ip,
253}; 283};
254 284
285#define MCOUNT_ADDR ((long)(&mcount))
286
287static void notrace ftrace_replace_code(int saved)
288{
289 unsigned char *new = NULL, *old = NULL;
290 struct dyn_ftrace *rec;
291 struct ftrace_page *pg;
292 unsigned long ip;
293 int failed;
294 int i;
295
296 if (saved)
297 old = ftrace_nop_replace();
298 else
299 new = ftrace_nop_replace();
300
301 for (pg = ftrace_pages_start; pg; pg = pg->next) {
302 for (i = 0; i < pg->index; i++) {
303 rec = &pg->records[i];
304
305 /* don't modify code that has already faulted */
306 if (rec->flags & FTRACE_FL_FAILED)
307 continue;
308
309 ip = rec->ip;
310
311 if (saved)
312 new = ftrace_call_replace(ip, MCOUNT_ADDR);
313 else
314 old = ftrace_call_replace(ip, MCOUNT_ADDR);
315
316 failed = ftrace_modify_code(ip, old, new);
317 if (failed)
318 rec->flags |= FTRACE_FL_FAILED;
319 }
320 }
321}
322
323static notrace void ftrace_startup_code(void)
324{
325 ftrace_replace_code(1);
326}
327
328static notrace void ftrace_shutdown_code(void)
329{
330 ftrace_replace_code(0);
331}
332
333static notrace void ftrace_shutdown_replenish(void)
334{
335 if (ftrace_pages->next)
336 return;
337
338 /* allocate another page */
339 ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL);
340}
255 341
256static int notrace __ftrace_modify_code(void *data) 342static int notrace __ftrace_modify_code(void *data)
257{ 343{
@@ -261,6 +347,23 @@ static int notrace __ftrace_modify_code(void *data)
261 return 0; 347 return 0;
262} 348}
263 349
350static notrace void
351ftrace_code_disable(struct dyn_ftrace *rec, unsigned long addr)
352{
353 unsigned long ip;
354 unsigned char *nop, *call;
355 int failed;
356
357 ip = rec->ip;
358
359 nop = ftrace_nop_replace();
360 call = ftrace_call_replace(ip, addr);
361
362 failed = ftrace_modify_code(ip, call, nop);
363 if (failed)
364 rec->flags |= FTRACE_FL_FAILED;
365}
366
264static void notrace ftrace_run_startup_code(void) 367static void notrace ftrace_run_startup_code(void)
265{ 368{
266 stop_machine_run(__ftrace_modify_code, ftrace_startup_code, NR_CPUS); 369 stop_machine_run(__ftrace_modify_code, ftrace_startup_code, NR_CPUS);
@@ -346,7 +449,7 @@ static int notrace __ftrace_update_code(void *ignore)
346 449
347 /* all CPUS are stopped, we are safe to modify code */ 450 /* all CPUS are stopped, we are safe to modify code */
348 hlist_for_each_entry(p, t, &head, node) { 451 hlist_for_each_entry(p, t, &head, node) {
349 ftrace_code_disable(p); 452 ftrace_code_disable(p, MCOUNT_ADDR);
350 ftrace_update_cnt++; 453 ftrace_update_cnt++;
351 } 454 }
352 455
@@ -407,12 +510,59 @@ static int notrace ftraced(void *ignore)
407 return 0; 510 return 0;
408} 511}
409 512
513static int __init ftrace_dyn_table_alloc(void)
514{
515 struct ftrace_page *pg;
516 int cnt;
517 int i;
518 int ret;
519
520 ret = ftrace_dyn_arch_init();
521 if (ret)
522 return ret;
523
524 /* allocate a few pages */
525 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
526 if (!ftrace_pages_start)
527 return -1;
528
529 /*
530 * Allocate a few more pages.
531 *
532 * TODO: have some parser search vmlinux before
533 * final linking to find all calls to ftrace.
534 * Then we can:
535 * a) know how many pages to allocate.
536 * and/or
537 * b) set up the table then.
538 *
539 * The dynamic code is still necessary for
540 * modules.
541 */
542
543 pg = ftrace_pages = ftrace_pages_start;
544
545 cnt = NR_TO_INIT / ENTRIES_PER_PAGE;
546
547 for (i = 0; i < cnt; i++) {
548 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
549
550 /* If we fail, we'll try later anyway */
551 if (!pg->next)
552 break;
553
554 pg = pg->next;
555 }
556
557 return 0;
558}
559
410static int __init notrace ftrace_shutdown_init(void) 560static int __init notrace ftrace_shutdown_init(void)
411{ 561{
412 struct task_struct *p; 562 struct task_struct *p;
413 int ret; 563 int ret;
414 564
415 ret = ftrace_shutdown_arch_init(); 565 ret = ftrace_dyn_table_alloc();
416 if (ret) 566 if (ret)
417 return ret; 567 return ret;
418 568