diff options
author | Steven Rostedt <srostedt@redhat.com> | 2009-03-23 17:12:36 -0400 |
---|---|---|
committer | Steven Rostedt <srostedt@redhat.com> | 2009-03-24 23:41:06 -0400 |
commit | 493762fc534c71d11d489f872c4b4a2c61173668 (patch) | |
tree | 90f308853f26ad5334717d53b18680957aab5ff4 /kernel/trace/ftrace.c | |
parent | bac429f037f1a51a74d62bad6d1518c3be065df3 (diff) |
tracing: move function profiler data out of function struct
Impact: reduce size of memory in function profiler
The function profiler originally introduces its counters into the
function records itself. There is 20 thousand different functions on
a normal system, and that is adding 20 thousand counters for profiling
event when not needed.
A normal run of the profiler yields only a couple of thousand functions
executed, depending on what is being profiled. This means we have around
18 thousand useless counters.
This patch rectifies this by moving the data out of the function
records used by dynamic ftrace. Data is preallocated to hold the functions
when the profiling begins. Checks are made during profiling to see if
more recorcds should be allocated, and they are allocated if it is safe
to do so.
This also removes the dependency from using dynamic ftrace, and also
removes the overhead by having it enabled.
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Diffstat (limited to 'kernel/trace/ftrace.c')
-rw-r--r-- | kernel/trace/ftrace.c | 440 |
1 files changed, 260 insertions, 180 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 11f364c776d5..24dac448cdc9 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -241,87 +241,48 @@ static void ftrace_update_pid_func(void) | |||
241 | #endif | 241 | #endif |
242 | } | 242 | } |
243 | 243 | ||
244 | /* set when tracing only a pid */ | 244 | #ifdef CONFIG_FUNCTION_PROFILER |
245 | struct pid *ftrace_pid_trace; | 245 | struct ftrace_profile { |
246 | static struct pid * const ftrace_swapper_pid = &init_struct_pid; | 246 | struct hlist_node node; |
247 | 247 | unsigned long ip; | |
248 | #ifdef CONFIG_DYNAMIC_FTRACE | 248 | unsigned long counter; |
249 | |||
250 | #ifndef CONFIG_FTRACE_MCOUNT_RECORD | ||
251 | # error Dynamic ftrace depends on MCOUNT_RECORD | ||
252 | #endif | ||
253 | |||
254 | static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly; | ||
255 | |||
256 | struct ftrace_func_probe { | ||
257 | struct hlist_node node; | ||
258 | struct ftrace_probe_ops *ops; | ||
259 | unsigned long flags; | ||
260 | unsigned long ip; | ||
261 | void *data; | ||
262 | struct rcu_head rcu; | ||
263 | }; | 249 | }; |
264 | 250 | ||
265 | enum { | 251 | struct ftrace_profile_page { |
266 | FTRACE_ENABLE_CALLS = (1 << 0), | 252 | struct ftrace_profile_page *next; |
267 | FTRACE_DISABLE_CALLS = (1 << 1), | 253 | unsigned long index; |
268 | FTRACE_UPDATE_TRACE_FUNC = (1 << 2), | 254 | struct ftrace_profile records[]; |
269 | FTRACE_ENABLE_MCOUNT = (1 << 3), | ||
270 | FTRACE_DISABLE_MCOUNT = (1 << 4), | ||
271 | FTRACE_START_FUNC_RET = (1 << 5), | ||
272 | FTRACE_STOP_FUNC_RET = (1 << 6), | ||
273 | }; | 255 | }; |
274 | 256 | ||
275 | static int ftrace_filtered; | 257 | #define PROFILE_RECORDS_SIZE \ |
258 | (PAGE_SIZE - offsetof(struct ftrace_profile_page, records)) | ||
276 | 259 | ||
277 | static struct dyn_ftrace *ftrace_new_addrs; | 260 | #define PROFILES_PER_PAGE \ |
261 | (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile)) | ||
278 | 262 | ||
279 | static DEFINE_MUTEX(ftrace_regex_lock); | 263 | /* TODO: make these percpu, to prevent cache line bouncing */ |
280 | 264 | static struct ftrace_profile_page *profile_pages_start; | |
281 | struct ftrace_page { | 265 | static struct ftrace_profile_page *profile_pages; |
282 | struct ftrace_page *next; | ||
283 | int index; | ||
284 | struct dyn_ftrace records[]; | ||
285 | }; | ||
286 | 266 | ||
287 | #define ENTRIES_PER_PAGE \ | ||
288 | ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace)) | ||
289 | |||
290 | /* estimate from running different kernels */ | ||
291 | #define NR_TO_INIT 10000 | ||
292 | |||
293 | static struct ftrace_page *ftrace_pages_start; | ||
294 | static struct ftrace_page *ftrace_pages; | ||
295 | |||
296 | static struct dyn_ftrace *ftrace_free_records; | ||
297 | |||
298 | /* | ||
299 | * This is a double for. Do not use 'break' to break out of the loop, | ||
300 | * you must use a goto. | ||
301 | */ | ||
302 | #define do_for_each_ftrace_rec(pg, rec) \ | ||
303 | for (pg = ftrace_pages_start; pg; pg = pg->next) { \ | ||
304 | int _____i; \ | ||
305 | for (_____i = 0; _____i < pg->index; _____i++) { \ | ||
306 | rec = &pg->records[_____i]; | ||
307 | |||
308 | #define while_for_each_ftrace_rec() \ | ||
309 | } \ | ||
310 | } | ||
311 | |||
312 | #ifdef CONFIG_FUNCTION_PROFILER | ||
313 | static struct hlist_head *ftrace_profile_hash; | 267 | static struct hlist_head *ftrace_profile_hash; |
314 | static int ftrace_profile_bits; | 268 | static int ftrace_profile_bits; |
315 | static int ftrace_profile_enabled; | 269 | static int ftrace_profile_enabled; |
316 | static DEFINE_MUTEX(ftrace_profile_lock); | 270 | static DEFINE_MUTEX(ftrace_profile_lock); |
317 | 271 | ||
272 | static DEFINE_PER_CPU(atomic_t, ftrace_profile_disable); | ||
273 | |||
274 | #define FTRACE_PROFILE_HASH_SIZE 1024 /* must be power of 2 */ | ||
275 | |||
276 | static raw_spinlock_t ftrace_profile_rec_lock = | ||
277 | (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | ||
278 | |||
318 | static void * | 279 | static void * |
319 | function_stat_next(void *v, int idx) | 280 | function_stat_next(void *v, int idx) |
320 | { | 281 | { |
321 | struct dyn_ftrace *rec = v; | 282 | struct ftrace_profile *rec = v; |
322 | struct ftrace_page *pg; | 283 | struct ftrace_profile_page *pg; |
323 | 284 | ||
324 | pg = (struct ftrace_page *)((unsigned long)rec & PAGE_MASK); | 285 | pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK); |
325 | 286 | ||
326 | again: | 287 | again: |
327 | rec++; | 288 | rec++; |
@@ -330,27 +291,22 @@ function_stat_next(void *v, int idx) | |||
330 | if (!pg) | 291 | if (!pg) |
331 | return NULL; | 292 | return NULL; |
332 | rec = &pg->records[0]; | 293 | rec = &pg->records[0]; |
294 | if (!rec->counter) | ||
295 | goto again; | ||
333 | } | 296 | } |
334 | 297 | ||
335 | if (rec->flags & FTRACE_FL_FREE || | ||
336 | rec->flags & FTRACE_FL_FAILED || | ||
337 | !(rec->flags & FTRACE_FL_CONVERTED) || | ||
338 | /* ignore non hit functions */ | ||
339 | !rec->counter) | ||
340 | goto again; | ||
341 | |||
342 | return rec; | 298 | return rec; |
343 | } | 299 | } |
344 | 300 | ||
345 | static void *function_stat_start(struct tracer_stat *trace) | 301 | static void *function_stat_start(struct tracer_stat *trace) |
346 | { | 302 | { |
347 | return function_stat_next(&ftrace_pages_start->records[0], 0); | 303 | return function_stat_next(&profile_pages_start->records[0], 0); |
348 | } | 304 | } |
349 | 305 | ||
350 | static int function_stat_cmp(void *p1, void *p2) | 306 | static int function_stat_cmp(void *p1, void *p2) |
351 | { | 307 | { |
352 | struct dyn_ftrace *a = p1; | 308 | struct ftrace_profile *a = p1; |
353 | struct dyn_ftrace *b = p2; | 309 | struct ftrace_profile *b = p2; |
354 | 310 | ||
355 | if (a->counter < b->counter) | 311 | if (a->counter < b->counter) |
356 | return -1; | 312 | return -1; |
@@ -369,7 +325,7 @@ static int function_stat_headers(struct seq_file *m) | |||
369 | 325 | ||
370 | static int function_stat_show(struct seq_file *m, void *v) | 326 | static int function_stat_show(struct seq_file *m, void *v) |
371 | { | 327 | { |
372 | struct dyn_ftrace *rec = v; | 328 | struct ftrace_profile *rec = v; |
373 | char str[KSYM_SYMBOL_LEN]; | 329 | char str[KSYM_SYMBOL_LEN]; |
374 | 330 | ||
375 | kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); | 331 | kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); |
@@ -387,115 +343,191 @@ static struct tracer_stat function_stats = { | |||
387 | .stat_show = function_stat_show | 343 | .stat_show = function_stat_show |
388 | }; | 344 | }; |
389 | 345 | ||
390 | static void ftrace_profile_init(int nr_funcs) | 346 | static void ftrace_profile_reset(void) |
391 | { | 347 | { |
392 | unsigned long addr; | 348 | struct ftrace_profile_page *pg; |
393 | int order; | ||
394 | int size; | ||
395 | 349 | ||
396 | /* | 350 | pg = profile_pages = profile_pages_start; |
397 | * We are profiling all functions, lets make it 1/4th of the | ||
398 | * number of functions that are in core kernel. So we have to | ||
399 | * iterate 4 times. | ||
400 | */ | ||
401 | order = (sizeof(struct hlist_head) * nr_funcs) / 4; | ||
402 | order = get_order(order); | ||
403 | size = 1 << (PAGE_SHIFT + order); | ||
404 | |||
405 | pr_info("Allocating %d KB for profiler hash\n", size >> 10); | ||
406 | 351 | ||
407 | addr = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order); | 352 | while (pg) { |
408 | if (!addr) { | 353 | memset(pg->records, 0, PROFILE_RECORDS_SIZE); |
409 | pr_warning("Could not allocate function profiler hash\n"); | 354 | pg->index = 0; |
410 | return; | 355 | pg = pg->next; |
411 | } | 356 | } |
412 | 357 | ||
413 | ftrace_profile_hash = (void *)addr; | 358 | memset(ftrace_profile_hash, 0, |
359 | FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head)); | ||
360 | } | ||
414 | 361 | ||
415 | /* | 362 | int ftrace_profile_pages_init(void) |
416 | * struct hlist_head should be a pointer of 4 or 8 bytes. | 363 | { |
417 | * And a simple bit manipulation can be done, but if for | 364 | struct ftrace_profile_page *pg; |
418 | * some reason struct hlist_head is not a mulitple of 2, | 365 | int i; |
419 | * then we play it safe, and simply count. This function | ||
420 | * is done once at boot up, so it is not that critical in | ||
421 | * performance. | ||
422 | */ | ||
423 | 366 | ||
424 | size--; | 367 | /* If we already allocated, do nothing */ |
425 | size /= sizeof(struct hlist_head); | 368 | if (profile_pages) |
369 | return 0; | ||
426 | 370 | ||
427 | for (; size; size >>= 1) | 371 | profile_pages = (void *)get_zeroed_page(GFP_KERNEL); |
428 | ftrace_profile_bits++; | 372 | if (!profile_pages) |
373 | return -ENOMEM; | ||
429 | 374 | ||
430 | pr_info("Function profiler has %d hash buckets\n", | 375 | pg = profile_pages_start = profile_pages; |
431 | 1 << ftrace_profile_bits); | ||
432 | 376 | ||
433 | return; | 377 | /* allocate 10 more pages to start */ |
378 | for (i = 0; i < 10; i++) { | ||
379 | pg->next = (void *)get_zeroed_page(GFP_KERNEL); | ||
380 | /* | ||
381 | * We only care about allocating profile_pages, if | ||
382 | * we failed to allocate here, hopefully we will allocate | ||
383 | * later. | ||
384 | */ | ||
385 | if (!pg->next) | ||
386 | break; | ||
387 | pg = pg->next; | ||
388 | } | ||
389 | |||
390 | return 0; | ||
434 | } | 391 | } |
435 | 392 | ||
436 | static ssize_t | 393 | static int ftrace_profile_init(void) |
437 | ftrace_profile_read(struct file *filp, char __user *ubuf, | ||
438 | size_t cnt, loff_t *ppos) | ||
439 | { | 394 | { |
440 | char buf[64]; | 395 | int size; |
441 | int r; | ||
442 | 396 | ||
443 | r = sprintf(buf, "%u\n", ftrace_profile_enabled); | 397 | if (ftrace_profile_hash) { |
444 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | 398 | /* If the profile is already created, simply reset it */ |
445 | } | 399 | ftrace_profile_reset(); |
400 | return 0; | ||
401 | } | ||
446 | 402 | ||
447 | static void ftrace_profile_reset(void) | 403 | /* |
448 | { | 404 | * We are profiling all functions, but usually only a few thousand |
449 | struct dyn_ftrace *rec; | 405 | * functions are hit. We'll make a hash of 1024 items. |
450 | struct ftrace_page *pg; | 406 | */ |
407 | size = FTRACE_PROFILE_HASH_SIZE; | ||
451 | 408 | ||
452 | do_for_each_ftrace_rec(pg, rec) { | 409 | ftrace_profile_hash = |
453 | rec->counter = 0; | 410 | kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL); |
454 | } while_for_each_ftrace_rec(); | 411 | |
412 | if (!ftrace_profile_hash) | ||
413 | return -ENOMEM; | ||
414 | |||
415 | size--; | ||
416 | |||
417 | for (; size; size >>= 1) | ||
418 | ftrace_profile_bits++; | ||
419 | |||
420 | /* Preallocate a few pages */ | ||
421 | if (ftrace_profile_pages_init() < 0) { | ||
422 | kfree(ftrace_profile_hash); | ||
423 | ftrace_profile_hash = NULL; | ||
424 | return -ENOMEM; | ||
425 | } | ||
426 | |||
427 | return 0; | ||
455 | } | 428 | } |
456 | 429 | ||
457 | static struct dyn_ftrace *ftrace_find_profiled_func(unsigned long ip) | 430 | /* interrupts must be disabled */ |
431 | static struct ftrace_profile *ftrace_find_profiled_func(unsigned long ip) | ||
458 | { | 432 | { |
459 | struct dyn_ftrace *rec; | 433 | struct ftrace_profile *rec; |
460 | struct hlist_head *hhd; | 434 | struct hlist_head *hhd; |
461 | struct hlist_node *n; | 435 | struct hlist_node *n; |
462 | unsigned long flags; | ||
463 | unsigned long key; | 436 | unsigned long key; |
464 | 437 | ||
465 | if (!ftrace_profile_hash) | ||
466 | return NULL; | ||
467 | |||
468 | key = hash_long(ip, ftrace_profile_bits); | 438 | key = hash_long(ip, ftrace_profile_bits); |
469 | hhd = &ftrace_profile_hash[key]; | 439 | hhd = &ftrace_profile_hash[key]; |
470 | 440 | ||
471 | if (hlist_empty(hhd)) | 441 | if (hlist_empty(hhd)) |
472 | return NULL; | 442 | return NULL; |
473 | 443 | ||
474 | local_irq_save(flags); | ||
475 | hlist_for_each_entry_rcu(rec, n, hhd, node) { | 444 | hlist_for_each_entry_rcu(rec, n, hhd, node) { |
476 | if (rec->ip == ip) | 445 | if (rec->ip == ip) |
477 | goto out; | 446 | return rec; |
447 | } | ||
448 | |||
449 | return NULL; | ||
450 | } | ||
451 | |||
452 | static void ftrace_add_profile(struct ftrace_profile *rec) | ||
453 | { | ||
454 | unsigned long key; | ||
455 | |||
456 | key = hash_long(rec->ip, ftrace_profile_bits); | ||
457 | hlist_add_head_rcu(&rec->node, &ftrace_profile_hash[key]); | ||
458 | } | ||
459 | |||
460 | /* Interrupts must be disabled calling this */ | ||
461 | static struct ftrace_profile * | ||
462 | ftrace_profile_alloc(unsigned long ip, bool alloc_safe) | ||
463 | { | ||
464 | struct ftrace_profile *rec = NULL; | ||
465 | |||
466 | /* prevent recursion */ | ||
467 | if (atomic_inc_return(&__get_cpu_var(ftrace_profile_disable)) != 1) | ||
468 | goto out; | ||
469 | |||
470 | __raw_spin_lock(&ftrace_profile_rec_lock); | ||
471 | |||
472 | /* Try to always keep another page available */ | ||
473 | if (!profile_pages->next && alloc_safe) | ||
474 | profile_pages->next = (void *)get_zeroed_page(GFP_ATOMIC); | ||
475 | |||
476 | /* | ||
477 | * Try to find the function again since another | ||
478 | * task on another CPU could have added it | ||
479 | */ | ||
480 | rec = ftrace_find_profiled_func(ip); | ||
481 | if (rec) | ||
482 | goto out_unlock; | ||
483 | |||
484 | if (profile_pages->index == PROFILES_PER_PAGE) { | ||
485 | if (!profile_pages->next) | ||
486 | goto out_unlock; | ||
487 | profile_pages = profile_pages->next; | ||
478 | } | 488 | } |
479 | rec = NULL; | 489 | |
490 | rec = &profile_pages->records[profile_pages->index++]; | ||
491 | rec->ip = ip; | ||
492 | ftrace_add_profile(rec); | ||
493 | |||
494 | out_unlock: | ||
495 | __raw_spin_unlock(&ftrace_profile_rec_lock); | ||
480 | out: | 496 | out: |
481 | local_irq_restore(flags); | 497 | atomic_dec(&__get_cpu_var(ftrace_profile_disable)); |
482 | 498 | ||
483 | return rec; | 499 | return rec; |
484 | } | 500 | } |
485 | 501 | ||
502 | /* | ||
503 | * If we are not in an interrupt, or softirq and | ||
504 | * and interrupts are disabled and preemption is not enabled | ||
505 | * (not in a spinlock) then it should be safe to allocate memory. | ||
506 | */ | ||
507 | static bool ftrace_safe_to_allocate(void) | ||
508 | { | ||
509 | return !in_interrupt() && irqs_disabled() && !preempt_count(); | ||
510 | } | ||
511 | |||
486 | static void | 512 | static void |
487 | function_profile_call(unsigned long ip, unsigned long parent_ip) | 513 | function_profile_call(unsigned long ip, unsigned long parent_ip) |
488 | { | 514 | { |
489 | struct dyn_ftrace *rec; | 515 | struct ftrace_profile *rec; |
490 | unsigned long flags; | 516 | unsigned long flags; |
517 | bool alloc_safe; | ||
491 | 518 | ||
492 | if (!ftrace_profile_enabled) | 519 | if (!ftrace_profile_enabled) |
493 | return; | 520 | return; |
494 | 521 | ||
522 | alloc_safe = ftrace_safe_to_allocate(); | ||
523 | |||
495 | local_irq_save(flags); | 524 | local_irq_save(flags); |
496 | rec = ftrace_find_profiled_func(ip); | 525 | rec = ftrace_find_profiled_func(ip); |
497 | if (!rec) | 526 | if (!rec) { |
498 | goto out; | 527 | rec = ftrace_profile_alloc(ip, alloc_safe); |
528 | if (!rec) | ||
529 | goto out; | ||
530 | } | ||
499 | 531 | ||
500 | rec->counter++; | 532 | rec->counter++; |
501 | out: | 533 | out: |
@@ -515,11 +547,6 @@ ftrace_profile_write(struct file *filp, const char __user *ubuf, | |||
515 | char buf[64]; | 547 | char buf[64]; |
516 | int ret; | 548 | int ret; |
517 | 549 | ||
518 | if (!ftrace_profile_hash) { | ||
519 | pr_info("Can not enable hash due to earlier problems\n"); | ||
520 | return -ENODEV; | ||
521 | } | ||
522 | |||
523 | if (cnt >= sizeof(buf)) | 550 | if (cnt >= sizeof(buf)) |
524 | return -EINVAL; | 551 | return -EINVAL; |
525 | 552 | ||
@@ -537,7 +564,12 @@ ftrace_profile_write(struct file *filp, const char __user *ubuf, | |||
537 | mutex_lock(&ftrace_profile_lock); | 564 | mutex_lock(&ftrace_profile_lock); |
538 | if (ftrace_profile_enabled ^ val) { | 565 | if (ftrace_profile_enabled ^ val) { |
539 | if (val) { | 566 | if (val) { |
540 | ftrace_profile_reset(); | 567 | ret = ftrace_profile_init(); |
568 | if (ret < 0) { | ||
569 | cnt = ret; | ||
570 | goto out; | ||
571 | } | ||
572 | |||
541 | register_ftrace_function(&ftrace_profile_ops); | 573 | register_ftrace_function(&ftrace_profile_ops); |
542 | ftrace_profile_enabled = 1; | 574 | ftrace_profile_enabled = 1; |
543 | } else { | 575 | } else { |
@@ -545,6 +577,7 @@ ftrace_profile_write(struct file *filp, const char __user *ubuf, | |||
545 | unregister_ftrace_function(&ftrace_profile_ops); | 577 | unregister_ftrace_function(&ftrace_profile_ops); |
546 | } | 578 | } |
547 | } | 579 | } |
580 | out: | ||
548 | mutex_unlock(&ftrace_profile_lock); | 581 | mutex_unlock(&ftrace_profile_lock); |
549 | 582 | ||
550 | filp->f_pos += cnt; | 583 | filp->f_pos += cnt; |
@@ -552,6 +585,17 @@ ftrace_profile_write(struct file *filp, const char __user *ubuf, | |||
552 | return cnt; | 585 | return cnt; |
553 | } | 586 | } |
554 | 587 | ||
588 | static ssize_t | ||
589 | ftrace_profile_read(struct file *filp, char __user *ubuf, | ||
590 | size_t cnt, loff_t *ppos) | ||
591 | { | ||
592 | char buf[64]; | ||
593 | int r; | ||
594 | |||
595 | r = sprintf(buf, "%u\n", ftrace_profile_enabled); | ||
596 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | ||
597 | } | ||
598 | |||
555 | static const struct file_operations ftrace_profile_fops = { | 599 | static const struct file_operations ftrace_profile_fops = { |
556 | .open = tracing_open_generic, | 600 | .open = tracing_open_generic, |
557 | .read = ftrace_profile_read, | 601 | .read = ftrace_profile_read, |
@@ -577,39 +621,80 @@ static void ftrace_profile_debugfs(struct dentry *d_tracer) | |||
577 | "'function_profile_enabled' entry\n"); | 621 | "'function_profile_enabled' entry\n"); |
578 | } | 622 | } |
579 | 623 | ||
580 | static void ftrace_add_profile(struct dyn_ftrace *rec) | ||
581 | { | ||
582 | unsigned long key; | ||
583 | |||
584 | if (!ftrace_profile_hash) | ||
585 | return; | ||
586 | |||
587 | key = hash_long(rec->ip, ftrace_profile_bits); | ||
588 | hlist_add_head_rcu(&rec->node, &ftrace_profile_hash[key]); | ||
589 | } | ||
590 | |||
591 | static void ftrace_profile_release(struct dyn_ftrace *rec) | ||
592 | { | ||
593 | mutex_lock(&ftrace_profile_lock); | ||
594 | hlist_del(&rec->node); | ||
595 | mutex_unlock(&ftrace_profile_lock); | ||
596 | } | ||
597 | |||
598 | #else /* CONFIG_FUNCTION_PROFILER */ | 624 | #else /* CONFIG_FUNCTION_PROFILER */ |
599 | static void ftrace_profile_init(int nr_funcs) | ||
600 | { | ||
601 | } | ||
602 | static void ftrace_add_profile(struct dyn_ftrace *rec) | ||
603 | { | ||
604 | } | ||
605 | static void ftrace_profile_debugfs(struct dentry *d_tracer) | 625 | static void ftrace_profile_debugfs(struct dentry *d_tracer) |
606 | { | 626 | { |
607 | } | 627 | } |
608 | static void ftrace_profile_release(struct dyn_ftrace *rec) | ||
609 | { | ||
610 | } | ||
611 | #endif /* CONFIG_FUNCTION_PROFILER */ | 628 | #endif /* CONFIG_FUNCTION_PROFILER */ |
612 | 629 | ||
630 | /* set when tracing only a pid */ | ||
631 | struct pid *ftrace_pid_trace; | ||
632 | static struct pid * const ftrace_swapper_pid = &init_struct_pid; | ||
633 | |||
634 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
635 | |||
636 | #ifndef CONFIG_FTRACE_MCOUNT_RECORD | ||
637 | # error Dynamic ftrace depends on MCOUNT_RECORD | ||
638 | #endif | ||
639 | |||
640 | static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly; | ||
641 | |||
642 | struct ftrace_func_probe { | ||
643 | struct hlist_node node; | ||
644 | struct ftrace_probe_ops *ops; | ||
645 | unsigned long flags; | ||
646 | unsigned long ip; | ||
647 | void *data; | ||
648 | struct rcu_head rcu; | ||
649 | }; | ||
650 | |||
651 | enum { | ||
652 | FTRACE_ENABLE_CALLS = (1 << 0), | ||
653 | FTRACE_DISABLE_CALLS = (1 << 1), | ||
654 | FTRACE_UPDATE_TRACE_FUNC = (1 << 2), | ||
655 | FTRACE_ENABLE_MCOUNT = (1 << 3), | ||
656 | FTRACE_DISABLE_MCOUNT = (1 << 4), | ||
657 | FTRACE_START_FUNC_RET = (1 << 5), | ||
658 | FTRACE_STOP_FUNC_RET = (1 << 6), | ||
659 | }; | ||
660 | |||
661 | static int ftrace_filtered; | ||
662 | |||
663 | static struct dyn_ftrace *ftrace_new_addrs; | ||
664 | |||
665 | static DEFINE_MUTEX(ftrace_regex_lock); | ||
666 | |||
667 | struct ftrace_page { | ||
668 | struct ftrace_page *next; | ||
669 | int index; | ||
670 | struct dyn_ftrace records[]; | ||
671 | }; | ||
672 | |||
673 | #define ENTRIES_PER_PAGE \ | ||
674 | ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace)) | ||
675 | |||
676 | /* estimate from running different kernels */ | ||
677 | #define NR_TO_INIT 10000 | ||
678 | |||
679 | static struct ftrace_page *ftrace_pages_start; | ||
680 | static struct ftrace_page *ftrace_pages; | ||
681 | |||
682 | static struct dyn_ftrace *ftrace_free_records; | ||
683 | |||
684 | /* | ||
685 | * This is a double for. Do not use 'break' to break out of the loop, | ||
686 | * you must use a goto. | ||
687 | */ | ||
688 | #define do_for_each_ftrace_rec(pg, rec) \ | ||
689 | for (pg = ftrace_pages_start; pg; pg = pg->next) { \ | ||
690 | int _____i; \ | ||
691 | for (_____i = 0; _____i < pg->index; _____i++) { \ | ||
692 | rec = &pg->records[_____i]; | ||
693 | |||
694 | #define while_for_each_ftrace_rec() \ | ||
695 | } \ | ||
696 | } | ||
697 | |||
613 | #ifdef CONFIG_KPROBES | 698 | #ifdef CONFIG_KPROBES |
614 | 699 | ||
615 | static int frozen_record_count; | 700 | static int frozen_record_count; |
@@ -660,10 +745,8 @@ void ftrace_release(void *start, unsigned long size) | |||
660 | mutex_lock(&ftrace_lock); | 745 | mutex_lock(&ftrace_lock); |
661 | do_for_each_ftrace_rec(pg, rec) { | 746 | do_for_each_ftrace_rec(pg, rec) { |
662 | if ((rec->ip >= s) && (rec->ip < e) && | 747 | if ((rec->ip >= s) && (rec->ip < e) && |
663 | !(rec->flags & FTRACE_FL_FREE)) { | 748 | !(rec->flags & FTRACE_FL_FREE)) |
664 | ftrace_free_rec(rec); | 749 | ftrace_free_rec(rec); |
665 | ftrace_profile_release(rec); | ||
666 | } | ||
667 | } while_for_each_ftrace_rec(); | 750 | } while_for_each_ftrace_rec(); |
668 | mutex_unlock(&ftrace_lock); | 751 | mutex_unlock(&ftrace_lock); |
669 | } | 752 | } |
@@ -717,8 +800,6 @@ ftrace_record_ip(unsigned long ip) | |||
717 | rec->newlist = ftrace_new_addrs; | 800 | rec->newlist = ftrace_new_addrs; |
718 | ftrace_new_addrs = rec; | 801 | ftrace_new_addrs = rec; |
719 | 802 | ||
720 | ftrace_add_profile(rec); | ||
721 | |||
722 | return rec; | 803 | return rec; |
723 | } | 804 | } |
724 | 805 | ||
@@ -2462,8 +2543,6 @@ static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer) | |||
2462 | "'set_graph_function' entry\n"); | 2543 | "'set_graph_function' entry\n"); |
2463 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 2544 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
2464 | 2545 | ||
2465 | ftrace_profile_debugfs(d_tracer); | ||
2466 | |||
2467 | return 0; | 2546 | return 0; |
2468 | } | 2547 | } |
2469 | 2548 | ||
@@ -2532,8 +2611,6 @@ void __init ftrace_init(void) | |||
2532 | if (ret) | 2611 | if (ret) |
2533 | goto failed; | 2612 | goto failed; |
2534 | 2613 | ||
2535 | ftrace_profile_init(count); | ||
2536 | |||
2537 | last_ftrace_enabled = ftrace_enabled = 1; | 2614 | last_ftrace_enabled = ftrace_enabled = 1; |
2538 | 2615 | ||
2539 | ret = ftrace_convert_nops(NULL, | 2616 | ret = ftrace_convert_nops(NULL, |
@@ -2734,6 +2811,9 @@ static __init int ftrace_init_debugfs(void) | |||
2734 | if (!entry) | 2811 | if (!entry) |
2735 | pr_warning("Could not create debugfs " | 2812 | pr_warning("Could not create debugfs " |
2736 | "'set_ftrace_pid' entry\n"); | 2813 | "'set_ftrace_pid' entry\n"); |
2814 | |||
2815 | ftrace_profile_debugfs(d_tracer); | ||
2816 | |||
2737 | return 0; | 2817 | return 0; |
2738 | } | 2818 | } |
2739 | fs_initcall(ftrace_init_debugfs); | 2819 | fs_initcall(ftrace_init_debugfs); |