diff options
Diffstat (limited to 'kernel/trace')
| -rw-r--r-- | kernel/trace/ring_buffer.c | 118 | ||||
| -rw-r--r-- | kernel/trace/trace.c | 288 | ||||
| -rw-r--r-- | kernel/trace/trace.h | 1 | ||||
| -rw-r--r-- | kernel/trace/trace_events_stage_3.h | 4 |
4 files changed, 362 insertions, 49 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index a8c275c01e83..f2a163db52f9 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
| @@ -61,6 +61,8 @@ enum { | |||
| 61 | 61 | ||
| 62 | static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON; | 62 | static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON; |
| 63 | 63 | ||
| 64 | #define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data) | ||
| 65 | |||
| 64 | /** | 66 | /** |
| 65 | * tracing_on - enable all tracing buffers | 67 | * tracing_on - enable all tracing buffers |
| 66 | * | 68 | * |
| @@ -132,7 +134,7 @@ void ring_buffer_normalize_time_stamp(int cpu, u64 *ts) | |||
| 132 | } | 134 | } |
| 133 | EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp); | 135 | EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp); |
| 134 | 136 | ||
| 135 | #define RB_EVNT_HDR_SIZE (sizeof(struct ring_buffer_event)) | 137 | #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array)) |
| 136 | #define RB_ALIGNMENT 4U | 138 | #define RB_ALIGNMENT 4U |
| 137 | #define RB_MAX_SMALL_DATA 28 | 139 | #define RB_MAX_SMALL_DATA 28 |
| 138 | 140 | ||
| @@ -234,6 +236,18 @@ static void rb_init_page(struct buffer_data_page *bpage) | |||
| 234 | local_set(&bpage->commit, 0); | 236 | local_set(&bpage->commit, 0); |
| 235 | } | 237 | } |
| 236 | 238 | ||
| 239 | /** | ||
| 240 | * ring_buffer_page_len - the size of data on the page. | ||
| 241 | * @page: The page to read | ||
| 242 | * | ||
| 243 | * Returns the amount of data on the page, including buffer page header. | ||
| 244 | */ | ||
| 245 | size_t ring_buffer_page_len(void *page) | ||
| 246 | { | ||
| 247 | return local_read(&((struct buffer_data_page *)page)->commit) | ||
| 248 | + BUF_PAGE_HDR_SIZE; | ||
| 249 | } | ||
| 250 | |||
| 237 | /* | 251 | /* |
| 238 | * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing | 252 | * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing |
| 239 | * this issue out. | 253 | * this issue out. |
| @@ -254,7 +268,7 @@ static inline int test_time_stamp(u64 delta) | |||
| 254 | return 0; | 268 | return 0; |
| 255 | } | 269 | } |
| 256 | 270 | ||
| 257 | #define BUF_PAGE_SIZE (PAGE_SIZE - offsetof(struct buffer_data_page, data)) | 271 | #define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE) |
| 258 | 272 | ||
| 259 | /* | 273 | /* |
| 260 | * head_page == tail_page && head == tail then buffer is empty. | 274 | * head_page == tail_page && head == tail then buffer is empty. |
| @@ -2378,8 +2392,8 @@ static void rb_remove_entries(struct ring_buffer_per_cpu *cpu_buffer, | |||
| 2378 | */ | 2392 | */ |
| 2379 | void *ring_buffer_alloc_read_page(struct ring_buffer *buffer) | 2393 | void *ring_buffer_alloc_read_page(struct ring_buffer *buffer) |
| 2380 | { | 2394 | { |
| 2381 | unsigned long addr; | ||
| 2382 | struct buffer_data_page *bpage; | 2395 | struct buffer_data_page *bpage; |
| 2396 | unsigned long addr; | ||
| 2383 | 2397 | ||
| 2384 | addr = __get_free_page(GFP_KERNEL); | 2398 | addr = __get_free_page(GFP_KERNEL); |
| 2385 | if (!addr) | 2399 | if (!addr) |
| @@ -2387,6 +2401,8 @@ void *ring_buffer_alloc_read_page(struct ring_buffer *buffer) | |||
| 2387 | 2401 | ||
| 2388 | bpage = (void *)addr; | 2402 | bpage = (void *)addr; |
| 2389 | 2403 | ||
| 2404 | rb_init_page(bpage); | ||
| 2405 | |||
| 2390 | return bpage; | 2406 | return bpage; |
| 2391 | } | 2407 | } |
| 2392 | 2408 | ||
| @@ -2406,6 +2422,7 @@ void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data) | |||
| 2406 | * ring_buffer_read_page - extract a page from the ring buffer | 2422 | * ring_buffer_read_page - extract a page from the ring buffer |
| 2407 | * @buffer: buffer to extract from | 2423 | * @buffer: buffer to extract from |
| 2408 | * @data_page: the page to use allocated from ring_buffer_alloc_read_page | 2424 | * @data_page: the page to use allocated from ring_buffer_alloc_read_page |
| 2425 | * @len: amount to extract | ||
| 2409 | * @cpu: the cpu of the buffer to extract | 2426 | * @cpu: the cpu of the buffer to extract |
| 2410 | * @full: should the extraction only happen when the page is full. | 2427 | * @full: should the extraction only happen when the page is full. |
| 2411 | * | 2428 | * |
| @@ -2418,7 +2435,7 @@ void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data) | |||
| 2418 | * rpage = ring_buffer_alloc_read_page(buffer); | 2435 | * rpage = ring_buffer_alloc_read_page(buffer); |
| 2419 | * if (!rpage) | 2436 | * if (!rpage) |
| 2420 | * return error; | 2437 | * return error; |
| 2421 | * ret = ring_buffer_read_page(buffer, &rpage, cpu, 0); | 2438 | * ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0); |
| 2422 | * if (ret >= 0) | 2439 | * if (ret >= 0) |
| 2423 | * process_page(rpage, ret); | 2440 | * process_page(rpage, ret); |
| 2424 | * | 2441 | * |
| @@ -2435,70 +2452,103 @@ void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data) | |||
| 2435 | * <0 if no data has been transferred. | 2452 | * <0 if no data has been transferred. |
| 2436 | */ | 2453 | */ |
| 2437 | int ring_buffer_read_page(struct ring_buffer *buffer, | 2454 | int ring_buffer_read_page(struct ring_buffer *buffer, |
| 2438 | void **data_page, int cpu, int full) | 2455 | void **data_page, size_t len, int cpu, int full) |
| 2439 | { | 2456 | { |
| 2440 | struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; | 2457 | struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; |
| 2441 | struct ring_buffer_event *event; | 2458 | struct ring_buffer_event *event; |
| 2442 | struct buffer_data_page *bpage; | 2459 | struct buffer_data_page *bpage; |
| 2460 | struct buffer_page *reader; | ||
| 2443 | unsigned long flags; | 2461 | unsigned long flags; |
| 2462 | unsigned int commit; | ||
| 2444 | unsigned int read; | 2463 | unsigned int read; |
| 2445 | int ret = -1; | 2464 | int ret = -1; |
| 2446 | 2465 | ||
| 2466 | /* | ||
| 2467 | * If len is not big enough to hold the page header, then | ||
| 2468 | * we can not copy anything. | ||
| 2469 | */ | ||
| 2470 | if (len <= BUF_PAGE_HDR_SIZE) | ||
| 2471 | return -1; | ||
| 2472 | |||
| 2473 | len -= BUF_PAGE_HDR_SIZE; | ||
| 2474 | |||
| 2447 | if (!data_page) | 2475 | if (!data_page) |
| 2448 | return 0; | 2476 | return -1; |
| 2449 | 2477 | ||
| 2450 | bpage = *data_page; | 2478 | bpage = *data_page; |
| 2451 | if (!bpage) | 2479 | if (!bpage) |
| 2452 | return 0; | 2480 | return -1; |
| 2453 | 2481 | ||
| 2454 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | 2482 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
| 2455 | 2483 | ||
| 2456 | /* | 2484 | reader = rb_get_reader_page(cpu_buffer); |
| 2457 | * rb_buffer_peek will get the next ring buffer if | 2485 | if (!reader) |
| 2458 | * the current reader page is empty. | ||
| 2459 | */ | ||
| 2460 | event = rb_buffer_peek(buffer, cpu, NULL); | ||
| 2461 | if (!event) | ||
| 2462 | goto out; | 2486 | goto out; |
| 2463 | 2487 | ||
| 2464 | /* check for data */ | 2488 | event = rb_reader_event(cpu_buffer); |
| 2465 | if (!local_read(&cpu_buffer->reader_page->page->commit)) | 2489 | |
| 2466 | goto out; | 2490 | read = reader->read; |
| 2491 | commit = rb_page_commit(reader); | ||
| 2467 | 2492 | ||
| 2468 | read = cpu_buffer->reader_page->read; | ||
| 2469 | /* | 2493 | /* |
| 2470 | * If the writer is already off of the read page, then simply | 2494 | * If this page has been partially read or |
| 2471 | * switch the read page with the given page. Otherwise | 2495 | * if len is not big enough to read the rest of the page or |
| 2472 | * we need to copy the data from the reader to the writer. | 2496 | * a writer is still on the page, then |
| 2497 | * we must copy the data from the page to the buffer. | ||
| 2498 | * Otherwise, we can simply swap the page with the one passed in. | ||
| 2473 | */ | 2499 | */ |
| 2474 | if (cpu_buffer->reader_page == cpu_buffer->commit_page) { | 2500 | if (read || (len < (commit - read)) || |
| 2475 | unsigned int commit = rb_page_commit(cpu_buffer->reader_page); | 2501 | cpu_buffer->reader_page == cpu_buffer->commit_page) { |
| 2476 | struct buffer_data_page *rpage = cpu_buffer->reader_page->page; | 2502 | struct buffer_data_page *rpage = cpu_buffer->reader_page->page; |
| 2503 | unsigned int rpos = read; | ||
| 2504 | unsigned int pos = 0; | ||
| 2505 | unsigned int size; | ||
| 2477 | 2506 | ||
| 2478 | if (full) | 2507 | if (full) |
| 2479 | goto out; | 2508 | goto out; |
| 2480 | /* The writer is still on the reader page, we must copy */ | ||
| 2481 | memcpy(bpage->data + read, rpage->data + read, commit - read); | ||
| 2482 | 2509 | ||
| 2483 | /* consume what was read */ | 2510 | if (len > (commit - read)) |
| 2484 | cpu_buffer->reader_page->read = commit; | 2511 | len = (commit - read); |
| 2512 | |||
| 2513 | size = rb_event_length(event); | ||
| 2514 | |||
| 2515 | if (len < size) | ||
| 2516 | goto out; | ||
| 2517 | |||
| 2518 | /* Need to copy one event at a time */ | ||
| 2519 | do { | ||
| 2520 | memcpy(bpage->data + pos, rpage->data + rpos, size); | ||
| 2521 | |||
| 2522 | len -= size; | ||
| 2523 | |||
| 2524 | rb_advance_reader(cpu_buffer); | ||
| 2525 | rpos = reader->read; | ||
| 2526 | pos += size; | ||
| 2527 | |||
| 2528 | event = rb_reader_event(cpu_buffer); | ||
| 2529 | size = rb_event_length(event); | ||
| 2530 | } while (len > size); | ||
| 2485 | 2531 | ||
| 2486 | /* update bpage */ | 2532 | /* update bpage */ |
| 2487 | local_set(&bpage->commit, commit); | 2533 | local_set(&bpage->commit, pos); |
| 2488 | if (!read) | 2534 | bpage->time_stamp = rpage->time_stamp; |
| 2489 | bpage->time_stamp = rpage->time_stamp; | 2535 | |
| 2536 | /* we copied everything to the beginning */ | ||
| 2537 | read = 0; | ||
| 2490 | } else { | 2538 | } else { |
| 2491 | /* swap the pages */ | 2539 | /* swap the pages */ |
| 2492 | rb_init_page(bpage); | 2540 | rb_init_page(bpage); |
| 2493 | bpage = cpu_buffer->reader_page->page; | 2541 | bpage = reader->page; |
| 2494 | cpu_buffer->reader_page->page = *data_page; | 2542 | reader->page = *data_page; |
| 2495 | cpu_buffer->reader_page->read = 0; | 2543 | local_set(&reader->write, 0); |
| 2544 | reader->read = 0; | ||
| 2496 | *data_page = bpage; | 2545 | *data_page = bpage; |
| 2546 | |||
| 2547 | /* update the entry counter */ | ||
| 2548 | rb_remove_entries(cpu_buffer, bpage, read); | ||
| 2497 | } | 2549 | } |
| 2498 | ret = read; | 2550 | ret = read; |
| 2499 | 2551 | ||
| 2500 | /* update the entry counter */ | ||
| 2501 | rb_remove_entries(cpu_buffer, bpage, read); | ||
| 2502 | out: | 2552 | out: |
| 2503 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 2553 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
| 2504 | 2554 | ||
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index ea055aa21cd9..c8abbb0c8397 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
| @@ -11,31 +11,30 @@ | |||
| 11 | * Copyright (C) 2004-2006 Ingo Molnar | 11 | * Copyright (C) 2004-2006 Ingo Molnar |
| 12 | * Copyright (C) 2004 William Lee Irwin III | 12 | * Copyright (C) 2004 William Lee Irwin III |
| 13 | */ | 13 | */ |
| 14 | #include <linux/ring_buffer.h> | ||
| 14 | #include <linux/utsrelease.h> | 15 | #include <linux/utsrelease.h> |
| 16 | #include <linux/stacktrace.h> | ||
| 17 | #include <linux/writeback.h> | ||
| 15 | #include <linux/kallsyms.h> | 18 | #include <linux/kallsyms.h> |
| 16 | #include <linux/seq_file.h> | 19 | #include <linux/seq_file.h> |
| 17 | #include <linux/notifier.h> | 20 | #include <linux/notifier.h> |
| 21 | #include <linux/irqflags.h> | ||
| 18 | #include <linux/debugfs.h> | 22 | #include <linux/debugfs.h> |
| 19 | #include <linux/pagemap.h> | 23 | #include <linux/pagemap.h> |
| 20 | #include <linux/hardirq.h> | 24 | #include <linux/hardirq.h> |
| 21 | #include <linux/linkage.h> | 25 | #include <linux/linkage.h> |
| 22 | #include <linux/uaccess.h> | 26 | #include <linux/uaccess.h> |
| 27 | #include <linux/kprobes.h> | ||
| 23 | #include <linux/ftrace.h> | 28 | #include <linux/ftrace.h> |
| 24 | #include <linux/module.h> | 29 | #include <linux/module.h> |
| 25 | #include <linux/percpu.h> | 30 | #include <linux/percpu.h> |
| 31 | #include <linux/splice.h> | ||
| 26 | #include <linux/kdebug.h> | 32 | #include <linux/kdebug.h> |
| 27 | #include <linux/ctype.h> | 33 | #include <linux/ctype.h> |
| 28 | #include <linux/init.h> | 34 | #include <linux/init.h> |
| 29 | #include <linux/poll.h> | 35 | #include <linux/poll.h> |
| 30 | #include <linux/gfp.h> | 36 | #include <linux/gfp.h> |
| 31 | #include <linux/fs.h> | 37 | #include <linux/fs.h> |
| 32 | #include <linux/kprobes.h> | ||
| 33 | #include <linux/writeback.h> | ||
| 34 | #include <linux/splice.h> | ||
| 35 | |||
| 36 | #include <linux/stacktrace.h> | ||
| 37 | #include <linux/ring_buffer.h> | ||
| 38 | #include <linux/irqflags.h> | ||
| 39 | 38 | ||
| 40 | #include "trace.h" | 39 | #include "trace.h" |
| 41 | #include "trace_output.h" | 40 | #include "trace_output.h" |
| @@ -624,7 +623,7 @@ static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1]; | |||
| 624 | static unsigned map_cmdline_to_pid[SAVED_CMDLINES]; | 623 | static unsigned map_cmdline_to_pid[SAVED_CMDLINES]; |
| 625 | static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN]; | 624 | static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN]; |
| 626 | static int cmdline_idx; | 625 | static int cmdline_idx; |
| 627 | static DEFINE_SPINLOCK(trace_cmdline_lock); | 626 | static raw_spinlock_t trace_cmdline_lock = __RAW_SPIN_LOCK_UNLOCKED; |
| 628 | 627 | ||
| 629 | /* temporary disable recording */ | 628 | /* temporary disable recording */ |
| 630 | static atomic_t trace_record_cmdline_disabled __read_mostly; | 629 | static atomic_t trace_record_cmdline_disabled __read_mostly; |
| @@ -736,7 +735,7 @@ static void trace_save_cmdline(struct task_struct *tsk) | |||
| 736 | * nor do we want to disable interrupts, | 735 | * nor do we want to disable interrupts, |
| 737 | * so if we miss here, then better luck next time. | 736 | * so if we miss here, then better luck next time. |
| 738 | */ | 737 | */ |
| 739 | if (!spin_trylock(&trace_cmdline_lock)) | 738 | if (!__raw_spin_trylock(&trace_cmdline_lock)) |
| 740 | return; | 739 | return; |
| 741 | 740 | ||
| 742 | idx = map_pid_to_cmdline[tsk->pid]; | 741 | idx = map_pid_to_cmdline[tsk->pid]; |
| @@ -754,7 +753,7 @@ static void trace_save_cmdline(struct task_struct *tsk) | |||
| 754 | 753 | ||
| 755 | memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN); | 754 | memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN); |
| 756 | 755 | ||
| 757 | spin_unlock(&trace_cmdline_lock); | 756 | __raw_spin_unlock(&trace_cmdline_lock); |
| 758 | } | 757 | } |
| 759 | 758 | ||
| 760 | char *trace_find_cmdline(int pid) | 759 | char *trace_find_cmdline(int pid) |
| @@ -3005,6 +3004,246 @@ static struct file_operations tracing_mark_fops = { | |||
| 3005 | .write = tracing_mark_write, | 3004 | .write = tracing_mark_write, |
| 3006 | }; | 3005 | }; |
| 3007 | 3006 | ||
| 3007 | struct ftrace_buffer_info { | ||
| 3008 | struct trace_array *tr; | ||
| 3009 | void *spare; | ||
| 3010 | int cpu; | ||
| 3011 | unsigned int read; | ||
| 3012 | }; | ||
| 3013 | |||
| 3014 | static int tracing_buffers_open(struct inode *inode, struct file *filp) | ||
| 3015 | { | ||
| 3016 | int cpu = (int)(long)inode->i_private; | ||
| 3017 | struct ftrace_buffer_info *info; | ||
| 3018 | |||
| 3019 | if (tracing_disabled) | ||
| 3020 | return -ENODEV; | ||
| 3021 | |||
| 3022 | info = kzalloc(sizeof(*info), GFP_KERNEL); | ||
| 3023 | if (!info) | ||
| 3024 | return -ENOMEM; | ||
| 3025 | |||
| 3026 | info->tr = &global_trace; | ||
| 3027 | info->cpu = cpu; | ||
| 3028 | info->spare = ring_buffer_alloc_read_page(info->tr->buffer); | ||
| 3029 | /* Force reading ring buffer for first read */ | ||
| 3030 | info->read = (unsigned int)-1; | ||
| 3031 | if (!info->spare) | ||
| 3032 | goto out; | ||
| 3033 | |||
| 3034 | filp->private_data = info; | ||
| 3035 | |||
| 3036 | return 0; | ||
| 3037 | |||
| 3038 | out: | ||
| 3039 | kfree(info); | ||
| 3040 | return -ENOMEM; | ||
| 3041 | } | ||
| 3042 | |||
| 3043 | static ssize_t | ||
| 3044 | tracing_buffers_read(struct file *filp, char __user *ubuf, | ||
| 3045 | size_t count, loff_t *ppos) | ||
| 3046 | { | ||
| 3047 | struct ftrace_buffer_info *info = filp->private_data; | ||
| 3048 | unsigned int pos; | ||
| 3049 | ssize_t ret; | ||
| 3050 | size_t size; | ||
| 3051 | |||
| 3052 | /* Do we have previous read data to read? */ | ||
| 3053 | if (info->read < PAGE_SIZE) | ||
| 3054 | goto read; | ||
| 3055 | |||
| 3056 | info->read = 0; | ||
| 3057 | |||
| 3058 | ret = ring_buffer_read_page(info->tr->buffer, | ||
| 3059 | &info->spare, | ||
| 3060 | count, | ||
| 3061 | info->cpu, 0); | ||
| 3062 | if (ret < 0) | ||
| 3063 | return 0; | ||
| 3064 | |||
| 3065 | pos = ring_buffer_page_len(info->spare); | ||
| 3066 | |||
| 3067 | if (pos < PAGE_SIZE) | ||
| 3068 | memset(info->spare + pos, 0, PAGE_SIZE - pos); | ||
| 3069 | |||
| 3070 | read: | ||
| 3071 | size = PAGE_SIZE - info->read; | ||
| 3072 | if (size > count) | ||
| 3073 | size = count; | ||
| 3074 | |||
| 3075 | ret = copy_to_user(ubuf, info->spare + info->read, size); | ||
| 3076 | if (ret) | ||
| 3077 | return -EFAULT; | ||
| 3078 | *ppos += size; | ||
| 3079 | info->read += size; | ||
| 3080 | |||
| 3081 | return size; | ||
| 3082 | } | ||
| 3083 | |||
| 3084 | static int tracing_buffers_release(struct inode *inode, struct file *file) | ||
| 3085 | { | ||
| 3086 | struct ftrace_buffer_info *info = file->private_data; | ||
| 3087 | |||
| 3088 | ring_buffer_free_read_page(info->tr->buffer, info->spare); | ||
| 3089 | kfree(info); | ||
| 3090 | |||
| 3091 | return 0; | ||
| 3092 | } | ||
| 3093 | |||
| 3094 | struct buffer_ref { | ||
| 3095 | struct ring_buffer *buffer; | ||
| 3096 | void *page; | ||
| 3097 | int ref; | ||
| 3098 | }; | ||
| 3099 | |||
| 3100 | static void buffer_pipe_buf_release(struct pipe_inode_info *pipe, | ||
| 3101 | struct pipe_buffer *buf) | ||
| 3102 | { | ||
| 3103 | struct buffer_ref *ref = (struct buffer_ref *)buf->private; | ||
| 3104 | |||
| 3105 | if (--ref->ref) | ||
| 3106 | return; | ||
| 3107 | |||
| 3108 | ring_buffer_free_read_page(ref->buffer, ref->page); | ||
| 3109 | kfree(ref); | ||
| 3110 | buf->private = 0; | ||
| 3111 | } | ||
| 3112 | |||
| 3113 | static int buffer_pipe_buf_steal(struct pipe_inode_info *pipe, | ||
| 3114 | struct pipe_buffer *buf) | ||
| 3115 | { | ||
| 3116 | return 1; | ||
| 3117 | } | ||
| 3118 | |||
| 3119 | static void buffer_pipe_buf_get(struct pipe_inode_info *pipe, | ||
| 3120 | struct pipe_buffer *buf) | ||
| 3121 | { | ||
| 3122 | struct buffer_ref *ref = (struct buffer_ref *)buf->private; | ||
| 3123 | |||
| 3124 | ref->ref++; | ||
| 3125 | } | ||
| 3126 | |||
| 3127 | /* Pipe buffer operations for a buffer. */ | ||
| 3128 | static struct pipe_buf_operations buffer_pipe_buf_ops = { | ||
| 3129 | .can_merge = 0, | ||
| 3130 | .map = generic_pipe_buf_map, | ||
| 3131 | .unmap = generic_pipe_buf_unmap, | ||
| 3132 | .confirm = generic_pipe_buf_confirm, | ||
| 3133 | .release = buffer_pipe_buf_release, | ||
| 3134 | .steal = buffer_pipe_buf_steal, | ||
| 3135 | .get = buffer_pipe_buf_get, | ||
| 3136 | }; | ||
| 3137 | |||
| 3138 | /* | ||
| 3139 | * Callback from splice_to_pipe(), if we need to release some pages | ||
| 3140 | * at the end of the spd in case we error'ed out in filling the pipe. | ||
| 3141 | */ | ||
| 3142 | static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i) | ||
| 3143 | { | ||
| 3144 | struct buffer_ref *ref = | ||
| 3145 | (struct buffer_ref *)spd->partial[i].private; | ||
| 3146 | |||
| 3147 | if (--ref->ref) | ||
| 3148 | return; | ||
| 3149 | |||
| 3150 | ring_buffer_free_read_page(ref->buffer, ref->page); | ||
| 3151 | kfree(ref); | ||
| 3152 | spd->partial[i].private = 0; | ||
| 3153 | } | ||
| 3154 | |||
| 3155 | static ssize_t | ||
| 3156 | tracing_buffers_splice_read(struct file *file, loff_t *ppos, | ||
| 3157 | struct pipe_inode_info *pipe, size_t len, | ||
| 3158 | unsigned int flags) | ||
| 3159 | { | ||
| 3160 | struct ftrace_buffer_info *info = file->private_data; | ||
| 3161 | struct partial_page partial[PIPE_BUFFERS]; | ||
| 3162 | struct page *pages[PIPE_BUFFERS]; | ||
| 3163 | struct splice_pipe_desc spd = { | ||
| 3164 | .pages = pages, | ||
| 3165 | .partial = partial, | ||
| 3166 | .flags = flags, | ||
| 3167 | .ops = &buffer_pipe_buf_ops, | ||
| 3168 | .spd_release = buffer_spd_release, | ||
| 3169 | }; | ||
| 3170 | struct buffer_ref *ref; | ||
| 3171 | int size, i; | ||
| 3172 | size_t ret; | ||
| 3173 | |||
| 3174 | /* | ||
| 3175 | * We can't seek on a buffer input | ||
| 3176 | */ | ||
| 3177 | if (unlikely(*ppos)) | ||
| 3178 | return -ESPIPE; | ||
| 3179 | |||
| 3180 | |||
| 3181 | for (i = 0; i < PIPE_BUFFERS && len; i++, len -= size) { | ||
| 3182 | struct page *page; | ||
| 3183 | int r; | ||
| 3184 | |||
| 3185 | ref = kzalloc(sizeof(*ref), GFP_KERNEL); | ||
| 3186 | if (!ref) | ||
| 3187 | break; | ||
| 3188 | |||
| 3189 | ref->buffer = info->tr->buffer; | ||
| 3190 | ref->page = ring_buffer_alloc_read_page(ref->buffer); | ||
| 3191 | if (!ref->page) { | ||
| 3192 | kfree(ref); | ||
| 3193 | break; | ||
| 3194 | } | ||
| 3195 | |||
| 3196 | r = ring_buffer_read_page(ref->buffer, &ref->page, | ||
| 3197 | len, info->cpu, 0); | ||
| 3198 | if (r < 0) { | ||
| 3199 | ring_buffer_free_read_page(ref->buffer, | ||
| 3200 | ref->page); | ||
| 3201 | kfree(ref); | ||
| 3202 | break; | ||
| 3203 | } | ||
| 3204 | |||
| 3205 | /* | ||
| 3206 | * zero out any left over data, this is going to | ||
| 3207 | * user land. | ||
| 3208 | */ | ||
| 3209 | size = ring_buffer_page_len(ref->page); | ||
| 3210 | if (size < PAGE_SIZE) | ||
| 3211 | memset(ref->page + size, 0, PAGE_SIZE - size); | ||
| 3212 | |||
| 3213 | page = virt_to_page(ref->page); | ||
| 3214 | |||
| 3215 | spd.pages[i] = page; | ||
| 3216 | spd.partial[i].len = PAGE_SIZE; | ||
| 3217 | spd.partial[i].offset = 0; | ||
| 3218 | spd.partial[i].private = (unsigned long)ref; | ||
| 3219 | spd.nr_pages++; | ||
| 3220 | } | ||
| 3221 | |||
| 3222 | spd.nr_pages = i; | ||
| 3223 | |||
| 3224 | /* did we read anything? */ | ||
| 3225 | if (!spd.nr_pages) { | ||
| 3226 | if (flags & SPLICE_F_NONBLOCK) | ||
| 3227 | ret = -EAGAIN; | ||
| 3228 | else | ||
| 3229 | ret = 0; | ||
| 3230 | /* TODO: block */ | ||
| 3231 | return ret; | ||
| 3232 | } | ||
| 3233 | |||
| 3234 | ret = splice_to_pipe(pipe, &spd); | ||
| 3235 | |||
| 3236 | return ret; | ||
| 3237 | } | ||
| 3238 | |||
| 3239 | static const struct file_operations tracing_buffers_fops = { | ||
| 3240 | .open = tracing_buffers_open, | ||
| 3241 | .read = tracing_buffers_read, | ||
| 3242 | .release = tracing_buffers_release, | ||
| 3243 | .splice_read = tracing_buffers_splice_read, | ||
| 3244 | .llseek = no_llseek, | ||
| 3245 | }; | ||
| 3246 | |||
| 3008 | #ifdef CONFIG_DYNAMIC_FTRACE | 3247 | #ifdef CONFIG_DYNAMIC_FTRACE |
| 3009 | 3248 | ||
| 3010 | int __weak ftrace_arch_read_dyn_info(char *buf, int size) | 3249 | int __weak ftrace_arch_read_dyn_info(char *buf, int size) |
| @@ -3399,6 +3638,7 @@ static __init void create_trace_options_dir(void) | |||
| 3399 | static __init int tracer_init_debugfs(void) | 3638 | static __init int tracer_init_debugfs(void) |
| 3400 | { | 3639 | { |
| 3401 | struct dentry *d_tracer; | 3640 | struct dentry *d_tracer; |
| 3641 | struct dentry *buffers; | ||
| 3402 | struct dentry *entry; | 3642 | struct dentry *entry; |
| 3403 | int cpu; | 3643 | int cpu; |
| 3404 | 3644 | ||
| @@ -3471,6 +3711,26 @@ static __init int tracer_init_debugfs(void) | |||
| 3471 | pr_warning("Could not create debugfs " | 3711 | pr_warning("Could not create debugfs " |
| 3472 | "'trace_marker' entry\n"); | 3712 | "'trace_marker' entry\n"); |
| 3473 | 3713 | ||
| 3714 | buffers = debugfs_create_dir("binary_buffers", d_tracer); | ||
| 3715 | |||
| 3716 | if (!buffers) | ||
| 3717 | pr_warning("Could not create buffers directory\n"); | ||
| 3718 | else { | ||
| 3719 | int cpu; | ||
| 3720 | char buf[64]; | ||
| 3721 | |||
| 3722 | for_each_tracing_cpu(cpu) { | ||
| 3723 | sprintf(buf, "%d", cpu); | ||
| 3724 | |||
| 3725 | entry = debugfs_create_file(buf, 0444, buffers, | ||
| 3726 | (void *)(long)cpu, | ||
| 3727 | &tracing_buffers_fops); | ||
| 3728 | if (!entry) | ||
| 3729 | pr_warning("Could not create debugfs buffers " | ||
| 3730 | "'%s' entry\n", buf); | ||
| 3731 | } | ||
| 3732 | } | ||
| 3733 | |||
| 3474 | #ifdef CONFIG_DYNAMIC_FTRACE | 3734 | #ifdef CONFIG_DYNAMIC_FTRACE |
| 3475 | entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer, | 3735 | entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer, |
| 3476 | &ftrace_update_tot_cnt, | 3736 | &ftrace_update_tot_cnt, |
| @@ -3491,7 +3751,7 @@ static __init int tracer_init_debugfs(void) | |||
| 3491 | 3751 | ||
| 3492 | int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args) | 3752 | int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args) |
| 3493 | { | 3753 | { |
| 3494 | static DEFINE_SPINLOCK(trace_buf_lock); | 3754 | static raw_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED; |
| 3495 | static char trace_buf[TRACE_BUF_SIZE]; | 3755 | static char trace_buf[TRACE_BUF_SIZE]; |
| 3496 | 3756 | ||
| 3497 | struct ring_buffer_event *event; | 3757 | struct ring_buffer_event *event; |
| @@ -3513,7 +3773,8 @@ int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args) | |||
| 3513 | goto out; | 3773 | goto out; |
| 3514 | 3774 | ||
| 3515 | pause_graph_tracing(); | 3775 | pause_graph_tracing(); |
| 3516 | spin_lock_irqsave(&trace_buf_lock, irq_flags); | 3776 | raw_local_irq_save(irq_flags); |
| 3777 | __raw_spin_lock(&trace_buf_lock); | ||
| 3517 | len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); | 3778 | len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); |
| 3518 | 3779 | ||
| 3519 | len = min(len, TRACE_BUF_SIZE-1); | 3780 | len = min(len, TRACE_BUF_SIZE-1); |
| @@ -3532,7 +3793,8 @@ int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args) | |||
| 3532 | ring_buffer_unlock_commit(tr->buffer, event); | 3793 | ring_buffer_unlock_commit(tr->buffer, event); |
| 3533 | 3794 | ||
| 3534 | out_unlock: | 3795 | out_unlock: |
| 3535 | spin_unlock_irqrestore(&trace_buf_lock, irq_flags); | 3796 | __raw_spin_unlock(&trace_buf_lock); |
| 3797 | raw_local_irq_restore(irq_flags); | ||
| 3536 | unpause_graph_tracing(); | 3798 | unpause_graph_tracing(); |
| 3537 | out: | 3799 | out: |
| 3538 | preempt_enable_notrace(); | 3800 | preempt_enable_notrace(); |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index e606633fb498..561bb5c5d988 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
| @@ -217,6 +217,7 @@ enum trace_flag_type { | |||
| 217 | */ | 217 | */ |
| 218 | struct trace_array_cpu { | 218 | struct trace_array_cpu { |
| 219 | atomic_t disabled; | 219 | atomic_t disabled; |
| 220 | void *buffer_page; /* ring buffer spare */ | ||
| 220 | 221 | ||
| 221 | /* these fields get copied into max-trace: */ | 222 | /* these fields get copied into max-trace: */ |
| 222 | unsigned long trace_idx; | 223 | unsigned long trace_idx; |
diff --git a/kernel/trace/trace_events_stage_3.h b/kernel/trace/trace_events_stage_3.h index 041789ffbac1..2c8d76c7dbed 100644 --- a/kernel/trace/trace_events_stage_3.h +++ b/kernel/trace/trace_events_stage_3.h | |||
| @@ -5,7 +5,7 @@ | |||
| 5 | * | 5 | * |
| 6 | * static void ftrace_event_<call>(proto) | 6 | * static void ftrace_event_<call>(proto) |
| 7 | * { | 7 | * { |
| 8 | * event_trace_printk(_RET_IP_, "(<call>) " <fmt>); | 8 | * event_trace_printk(_RET_IP_, "<call>: " <fmt>); |
| 9 | * } | 9 | * } |
| 10 | * | 10 | * |
| 11 | * static int ftrace_reg_event_<call>(void) | 11 | * static int ftrace_reg_event_<call>(void) |
| @@ -112,7 +112,7 @@ | |||
| 112 | #define _TRACE_FORMAT(call, proto, args, fmt) \ | 112 | #define _TRACE_FORMAT(call, proto, args, fmt) \ |
| 113 | static void ftrace_event_##call(proto) \ | 113 | static void ftrace_event_##call(proto) \ |
| 114 | { \ | 114 | { \ |
| 115 | event_trace_printk(_RET_IP_, "(" #call ") " fmt); \ | 115 | event_trace_printk(_RET_IP_, #call ": " fmt); \ |
| 116 | } \ | 116 | } \ |
| 117 | \ | 117 | \ |
| 118 | static int ftrace_reg_event_##call(void) \ | 118 | static int ftrace_reg_event_##call(void) \ |
