diff options
author | Steven Rostedt <rostedt@goodmis.org> | 2008-05-12 15:20:59 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2008-05-23 15:49:54 -0400 |
commit | a98a3c3fde3ae7614f19758a043691b6f59dac53 (patch) | |
tree | 7a04e3e7c1748ee5b258e5176e10e1fe13468001 /kernel/trace | |
parent | 05bd68c514579e007b46e4fa0461b78416a3f4c2 (diff) |
ftrace: trace_entries to dynamically change trace buffer size
This patch adds /debug/tracing/trace_entries that allows users to
see as well as modify the number of trace entries the buffers hold.
The number of entries only increments in ENTRIES_PER_PAGE which is
calculated by the size of an entry with the number of entries that
can fit in a page. The user does not need to use an exact size, but
the entries will be rounded to one of the increments.
Trying to set the entries to 0 will return with -EINVAL.
To avoid race conditions, the modification of the buffer size can only
be done when tracing is completely disabled (current_tracer == none).
A info message will be printed if a user tries to modify the buffer size
when not set to none.
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/trace')
-rw-r--r-- | kernel/trace/trace.c | 145 |
1 files changed, 137 insertions, 8 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 3b4eaf36ed5d..4723e012151d 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -35,6 +35,15 @@ | |||
35 | unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX; | 35 | unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX; |
36 | unsigned long __read_mostly tracing_thresh; | 36 | unsigned long __read_mostly tracing_thresh; |
37 | 37 | ||
38 | /* dummy trace to disable tracing */ | ||
39 | static struct tracer no_tracer __read_mostly = | ||
40 | { | ||
41 | .name = "none", | ||
42 | }; | ||
43 | |||
44 | static int trace_alloc_page(void); | ||
45 | static int trace_free_page(void); | ||
46 | |||
38 | static int tracing_disabled = 1; | 47 | static int tracing_disabled = 1; |
39 | 48 | ||
40 | long | 49 | long |
@@ -2364,6 +2373,70 @@ tracing_read_pipe(struct file *filp, char __user *ubuf, | |||
2364 | return read; | 2373 | return read; |
2365 | } | 2374 | } |
2366 | 2375 | ||
2376 | static ssize_t | ||
2377 | tracing_entries_read(struct file *filp, char __user *ubuf, | ||
2378 | size_t cnt, loff_t *ppos) | ||
2379 | { | ||
2380 | struct trace_array *tr = filp->private_data; | ||
2381 | char buf[64]; | ||
2382 | int r; | ||
2383 | |||
2384 | r = sprintf(buf, "%lu\n", tr->entries); | ||
2385 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | ||
2386 | } | ||
2387 | |||
2388 | static ssize_t | ||
2389 | tracing_entries_write(struct file *filp, const char __user *ubuf, | ||
2390 | size_t cnt, loff_t *ppos) | ||
2391 | { | ||
2392 | unsigned long val; | ||
2393 | char buf[64]; | ||
2394 | |||
2395 | if (cnt > 63) | ||
2396 | cnt = 63; | ||
2397 | |||
2398 | if (copy_from_user(&buf, ubuf, cnt)) | ||
2399 | return -EFAULT; | ||
2400 | |||
2401 | buf[cnt] = 0; | ||
2402 | |||
2403 | val = simple_strtoul(buf, NULL, 10); | ||
2404 | |||
2405 | /* must have at least 1 entry */ | ||
2406 | if (!val) | ||
2407 | return -EINVAL; | ||
2408 | |||
2409 | mutex_lock(&trace_types_lock); | ||
2410 | |||
2411 | if (current_trace != &no_tracer) { | ||
2412 | cnt = -EBUSY; | ||
2413 | pr_info("ftrace: set current_tracer to none" | ||
2414 | " before modifying buffer size\n"); | ||
2415 | goto out; | ||
2416 | } | ||
2417 | |||
2418 | if (val > global_trace.entries) { | ||
2419 | while (global_trace.entries < val) { | ||
2420 | if (trace_alloc_page()) { | ||
2421 | cnt = -ENOMEM; | ||
2422 | goto out; | ||
2423 | } | ||
2424 | } | ||
2425 | } else { | ||
2426 | /* include the number of entries in val (inc of page entries) */ | ||
2427 | while (global_trace.entries > val + (ENTRIES_PER_PAGE - 1)) | ||
2428 | trace_free_page(); | ||
2429 | } | ||
2430 | |||
2431 | filp->f_pos += cnt; | ||
2432 | |||
2433 | out: | ||
2434 | max_tr.entries = global_trace.entries; | ||
2435 | mutex_unlock(&trace_types_lock); | ||
2436 | |||
2437 | return cnt; | ||
2438 | } | ||
2439 | |||
2367 | static struct file_operations tracing_max_lat_fops = { | 2440 | static struct file_operations tracing_max_lat_fops = { |
2368 | .open = tracing_open_generic, | 2441 | .open = tracing_open_generic, |
2369 | .read = tracing_max_lat_read, | 2442 | .read = tracing_max_lat_read, |
@@ -2389,6 +2462,12 @@ static struct file_operations tracing_pipe_fops = { | |||
2389 | .release = tracing_release_pipe, | 2462 | .release = tracing_release_pipe, |
2390 | }; | 2463 | }; |
2391 | 2464 | ||
2465 | static struct file_operations tracing_entries_fops = { | ||
2466 | .open = tracing_open_generic, | ||
2467 | .read = tracing_entries_read, | ||
2468 | .write = tracing_entries_write, | ||
2469 | }; | ||
2470 | |||
2392 | #ifdef CONFIG_DYNAMIC_FTRACE | 2471 | #ifdef CONFIG_DYNAMIC_FTRACE |
2393 | 2472 | ||
2394 | static ssize_t | 2473 | static ssize_t |
@@ -2500,6 +2579,12 @@ static __init void tracer_init_debugfs(void) | |||
2500 | pr_warning("Could not create debugfs " | 2579 | pr_warning("Could not create debugfs " |
2501 | "'tracing_threash' entry\n"); | 2580 | "'tracing_threash' entry\n"); |
2502 | 2581 | ||
2582 | entry = debugfs_create_file("trace_entries", 0644, d_tracer, | ||
2583 | &global_trace, &tracing_entries_fops); | ||
2584 | if (!entry) | ||
2585 | pr_warning("Could not create debugfs " | ||
2586 | "'tracing_threash' entry\n"); | ||
2587 | |||
2503 | #ifdef CONFIG_DYNAMIC_FTRACE | 2588 | #ifdef CONFIG_DYNAMIC_FTRACE |
2504 | entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer, | 2589 | entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer, |
2505 | &ftrace_update_tot_cnt, | 2590 | &ftrace_update_tot_cnt, |
@@ -2510,12 +2595,6 @@ static __init void tracer_init_debugfs(void) | |||
2510 | #endif | 2595 | #endif |
2511 | } | 2596 | } |
2512 | 2597 | ||
2513 | /* dummy trace to disable tracing */ | ||
2514 | static struct tracer no_tracer __read_mostly = | ||
2515 | { | ||
2516 | .name = "none", | ||
2517 | }; | ||
2518 | |||
2519 | static int trace_alloc_page(void) | 2598 | static int trace_alloc_page(void) |
2520 | { | 2599 | { |
2521 | struct trace_array_cpu *data; | 2600 | struct trace_array_cpu *data; |
@@ -2552,7 +2631,6 @@ static int trace_alloc_page(void) | |||
2552 | /* Now that we successfully allocate a page per CPU, add them */ | 2631 | /* Now that we successfully allocate a page per CPU, add them */ |
2553 | for_each_possible_cpu(i) { | 2632 | for_each_possible_cpu(i) { |
2554 | data = global_trace.data[i]; | 2633 | data = global_trace.data[i]; |
2555 | data->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | ||
2556 | page = list_entry(pages.next, struct page, lru); | 2634 | page = list_entry(pages.next, struct page, lru); |
2557 | list_del_init(&page->lru); | 2635 | list_del_init(&page->lru); |
2558 | list_add_tail(&page->lru, &data->trace_pages); | 2636 | list_add_tail(&page->lru, &data->trace_pages); |
@@ -2560,7 +2638,6 @@ static int trace_alloc_page(void) | |||
2560 | 2638 | ||
2561 | #ifdef CONFIG_TRACER_MAX_TRACE | 2639 | #ifdef CONFIG_TRACER_MAX_TRACE |
2562 | data = max_tr.data[i]; | 2640 | data = max_tr.data[i]; |
2563 | data->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | ||
2564 | page = list_entry(pages.next, struct page, lru); | 2641 | page = list_entry(pages.next, struct page, lru); |
2565 | list_del_init(&page->lru); | 2642 | list_del_init(&page->lru); |
2566 | list_add_tail(&page->lru, &data->trace_pages); | 2643 | list_add_tail(&page->lru, &data->trace_pages); |
@@ -2579,6 +2656,55 @@ static int trace_alloc_page(void) | |||
2579 | return -ENOMEM; | 2656 | return -ENOMEM; |
2580 | } | 2657 | } |
2581 | 2658 | ||
2659 | static int trace_free_page(void) | ||
2660 | { | ||
2661 | struct trace_array_cpu *data; | ||
2662 | struct page *page; | ||
2663 | struct list_head *p; | ||
2664 | int i; | ||
2665 | int ret = 0; | ||
2666 | |||
2667 | /* free one page from each buffer */ | ||
2668 | for_each_possible_cpu(i) { | ||
2669 | data = global_trace.data[i]; | ||
2670 | p = data->trace_pages.next; | ||
2671 | if (p == &data->trace_pages) { | ||
2672 | /* should never happen */ | ||
2673 | WARN_ON(1); | ||
2674 | tracing_disabled = 1; | ||
2675 | ret = -1; | ||
2676 | break; | ||
2677 | } | ||
2678 | page = list_entry(p, struct page, lru); | ||
2679 | ClearPageLRU(page); | ||
2680 | list_del(&page->lru); | ||
2681 | __free_page(page); | ||
2682 | |||
2683 | tracing_reset(data); | ||
2684 | |||
2685 | #ifdef CONFIG_TRACER_MAX_TRACE | ||
2686 | data = max_tr.data[i]; | ||
2687 | p = data->trace_pages.next; | ||
2688 | if (p == &data->trace_pages) { | ||
2689 | /* should never happen */ | ||
2690 | WARN_ON(1); | ||
2691 | tracing_disabled = 1; | ||
2692 | ret = -1; | ||
2693 | break; | ||
2694 | } | ||
2695 | page = list_entry(p, struct page, lru); | ||
2696 | ClearPageLRU(page); | ||
2697 | list_del(&page->lru); | ||
2698 | __free_page(page); | ||
2699 | |||
2700 | tracing_reset(data); | ||
2701 | #endif | ||
2702 | } | ||
2703 | global_trace.entries -= ENTRIES_PER_PAGE; | ||
2704 | |||
2705 | return ret; | ||
2706 | } | ||
2707 | |||
2582 | __init static int tracer_alloc_buffers(void) | 2708 | __init static int tracer_alloc_buffers(void) |
2583 | { | 2709 | { |
2584 | struct trace_array_cpu *data; | 2710 | struct trace_array_cpu *data; |
@@ -2609,6 +2735,9 @@ __init static int tracer_alloc_buffers(void) | |||
2609 | /* use the LRU flag to differentiate the two buffers */ | 2735 | /* use the LRU flag to differentiate the two buffers */ |
2610 | ClearPageLRU(page); | 2736 | ClearPageLRU(page); |
2611 | 2737 | ||
2738 | data->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | ||
2739 | max_tr.data[i]->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | ||
2740 | |||
2612 | /* Only allocate if we are actually using the max trace */ | 2741 | /* Only allocate if we are actually using the max trace */ |
2613 | #ifdef CONFIG_TRACER_MAX_TRACE | 2742 | #ifdef CONFIG_TRACER_MAX_TRACE |
2614 | array = (void *)__get_free_page(GFP_KERNEL); | 2743 | array = (void *)__get_free_page(GFP_KERNEL); |