diff options
Diffstat (limited to 'kernel/trace/ring_buffer.c')
-rw-r--r-- | kernel/trace/ring_buffer.c | 56 |
1 files changed, 31 insertions, 25 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 53ba3a6d16d0..a3901b550c93 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -59,7 +59,7 @@ enum { | |||
59 | RB_BUFFERS_DISABLED = 1 << RB_BUFFERS_DISABLED_BIT, | 59 | RB_BUFFERS_DISABLED = 1 << RB_BUFFERS_DISABLED_BIT, |
60 | }; | 60 | }; |
61 | 61 | ||
62 | static long ring_buffer_flags __read_mostly = RB_BUFFERS_ON; | 62 | static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON; |
63 | 63 | ||
64 | /** | 64 | /** |
65 | * tracing_on - enable all tracing buffers | 65 | * tracing_on - enable all tracing buffers |
@@ -91,7 +91,7 @@ EXPORT_SYMBOL_GPL(tracing_off); | |||
91 | * tracing_off_permanent - permanently disable ring buffers | 91 | * tracing_off_permanent - permanently disable ring buffers |
92 | * | 92 | * |
93 | * This function, once called, will disable all ring buffers | 93 | * This function, once called, will disable all ring buffers |
94 | * permanenty. | 94 | * permanently. |
95 | */ | 95 | */ |
96 | void tracing_off_permanent(void) | 96 | void tracing_off_permanent(void) |
97 | { | 97 | { |
@@ -210,7 +210,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_event_data); | |||
210 | 210 | ||
211 | struct buffer_data_page { | 211 | struct buffer_data_page { |
212 | u64 time_stamp; /* page time stamp */ | 212 | u64 time_stamp; /* page time stamp */ |
213 | local_t commit; /* write commited index */ | 213 | local_t commit; /* write committed index */ |
214 | unsigned char data[]; /* data of buffer page */ | 214 | unsigned char data[]; /* data of buffer page */ |
215 | }; | 215 | }; |
216 | 216 | ||
@@ -260,7 +260,7 @@ struct ring_buffer_per_cpu { | |||
260 | struct list_head pages; | 260 | struct list_head pages; |
261 | struct buffer_page *head_page; /* read from head */ | 261 | struct buffer_page *head_page; /* read from head */ |
262 | struct buffer_page *tail_page; /* write to tail */ | 262 | struct buffer_page *tail_page; /* write to tail */ |
263 | struct buffer_page *commit_page; /* commited pages */ | 263 | struct buffer_page *commit_page; /* committed pages */ |
264 | struct buffer_page *reader_page; | 264 | struct buffer_page *reader_page; |
265 | unsigned long overrun; | 265 | unsigned long overrun; |
266 | unsigned long entries; | 266 | unsigned long entries; |
@@ -273,8 +273,8 @@ struct ring_buffer { | |||
273 | unsigned pages; | 273 | unsigned pages; |
274 | unsigned flags; | 274 | unsigned flags; |
275 | int cpus; | 275 | int cpus; |
276 | cpumask_var_t cpumask; | ||
277 | atomic_t record_disabled; | 276 | atomic_t record_disabled; |
277 | cpumask_var_t cpumask; | ||
278 | 278 | ||
279 | struct mutex mutex; | 279 | struct mutex mutex; |
280 | 280 | ||
@@ -303,7 +303,7 @@ struct ring_buffer_iter { | |||
303 | * check_pages - integrity check of buffer pages | 303 | * check_pages - integrity check of buffer pages |
304 | * @cpu_buffer: CPU buffer with pages to test | 304 | * @cpu_buffer: CPU buffer with pages to test |
305 | * | 305 | * |
306 | * As a safty measure we check to make sure the data pages have not | 306 | * As a safety measure we check to make sure the data pages have not |
307 | * been corrupted. | 307 | * been corrupted. |
308 | */ | 308 | */ |
309 | static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer) | 309 | static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer) |
@@ -2332,13 +2332,14 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a, | |||
2332 | EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu); | 2332 | EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu); |
2333 | 2333 | ||
2334 | static void rb_remove_entries(struct ring_buffer_per_cpu *cpu_buffer, | 2334 | static void rb_remove_entries(struct ring_buffer_per_cpu *cpu_buffer, |
2335 | struct buffer_data_page *bpage) | 2335 | struct buffer_data_page *bpage, |
2336 | unsigned int offset) | ||
2336 | { | 2337 | { |
2337 | struct ring_buffer_event *event; | 2338 | struct ring_buffer_event *event; |
2338 | unsigned long head; | 2339 | unsigned long head; |
2339 | 2340 | ||
2340 | __raw_spin_lock(&cpu_buffer->lock); | 2341 | __raw_spin_lock(&cpu_buffer->lock); |
2341 | for (head = 0; head < local_read(&bpage->commit); | 2342 | for (head = offset; head < local_read(&bpage->commit); |
2342 | head += rb_event_length(event)) { | 2343 | head += rb_event_length(event)) { |
2343 | 2344 | ||
2344 | event = __rb_data_page_index(bpage, head); | 2345 | event = __rb_data_page_index(bpage, head); |
@@ -2406,12 +2407,12 @@ void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data) | |||
2406 | * to swap with a page in the ring buffer. | 2407 | * to swap with a page in the ring buffer. |
2407 | * | 2408 | * |
2408 | * for example: | 2409 | * for example: |
2409 | * rpage = ring_buffer_alloc_page(buffer); | 2410 | * rpage = ring_buffer_alloc_read_page(buffer); |
2410 | * if (!rpage) | 2411 | * if (!rpage) |
2411 | * return error; | 2412 | * return error; |
2412 | * ret = ring_buffer_read_page(buffer, &rpage, cpu, 0); | 2413 | * ret = ring_buffer_read_page(buffer, &rpage, cpu, 0); |
2413 | * if (ret) | 2414 | * if (ret >= 0) |
2414 | * process_page(rpage); | 2415 | * process_page(rpage, ret); |
2415 | * | 2416 | * |
2416 | * When @full is set, the function will not return true unless | 2417 | * When @full is set, the function will not return true unless |
2417 | * the writer is off the reader page. | 2418 | * the writer is off the reader page. |
@@ -2422,8 +2423,8 @@ void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data) | |||
2422 | * responsible for that. | 2423 | * responsible for that. |
2423 | * | 2424 | * |
2424 | * Returns: | 2425 | * Returns: |
2425 | * 1 if data has been transferred | 2426 | * >=0 if data has been transferred, returns the offset of consumed data. |
2426 | * 0 if no data has been transferred. | 2427 | * <0 if no data has been transferred. |
2427 | */ | 2428 | */ |
2428 | int ring_buffer_read_page(struct ring_buffer *buffer, | 2429 | int ring_buffer_read_page(struct ring_buffer *buffer, |
2429 | void **data_page, int cpu, int full) | 2430 | void **data_page, int cpu, int full) |
@@ -2432,7 +2433,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer, | |||
2432 | struct ring_buffer_event *event; | 2433 | struct ring_buffer_event *event; |
2433 | struct buffer_data_page *bpage; | 2434 | struct buffer_data_page *bpage; |
2434 | unsigned long flags; | 2435 | unsigned long flags; |
2435 | int ret = 0; | 2436 | unsigned int read; |
2437 | int ret = -1; | ||
2436 | 2438 | ||
2437 | if (!data_page) | 2439 | if (!data_page) |
2438 | return 0; | 2440 | return 0; |
@@ -2454,25 +2456,29 @@ int ring_buffer_read_page(struct ring_buffer *buffer, | |||
2454 | /* check for data */ | 2456 | /* check for data */ |
2455 | if (!local_read(&cpu_buffer->reader_page->page->commit)) | 2457 | if (!local_read(&cpu_buffer->reader_page->page->commit)) |
2456 | goto out; | 2458 | goto out; |
2459 | |||
2460 | read = cpu_buffer->reader_page->read; | ||
2457 | /* | 2461 | /* |
2458 | * If the writer is already off of the read page, then simply | 2462 | * If the writer is already off of the read page, then simply |
2459 | * switch the read page with the given page. Otherwise | 2463 | * switch the read page with the given page. Otherwise |
2460 | * we need to copy the data from the reader to the writer. | 2464 | * we need to copy the data from the reader to the writer. |
2461 | */ | 2465 | */ |
2462 | if (cpu_buffer->reader_page == cpu_buffer->commit_page) { | 2466 | if (cpu_buffer->reader_page == cpu_buffer->commit_page) { |
2463 | unsigned int read = cpu_buffer->reader_page->read; | 2467 | unsigned int commit = rb_page_commit(cpu_buffer->reader_page); |
2468 | struct buffer_data_page *rpage = cpu_buffer->reader_page->page; | ||
2464 | 2469 | ||
2465 | if (full) | 2470 | if (full) |
2466 | goto out; | 2471 | goto out; |
2467 | /* The writer is still on the reader page, we must copy */ | 2472 | /* The writer is still on the reader page, we must copy */ |
2468 | bpage = cpu_buffer->reader_page->page; | 2473 | memcpy(bpage->data + read, rpage->data + read, commit - read); |
2469 | memcpy(bpage->data, | ||
2470 | cpu_buffer->reader_page->page->data + read, | ||
2471 | local_read(&bpage->commit) - read); | ||
2472 | 2474 | ||
2473 | /* consume what was read */ | 2475 | /* consume what was read */ |
2474 | cpu_buffer->reader_page += read; | 2476 | cpu_buffer->reader_page->read = commit; |
2475 | 2477 | ||
2478 | /* update bpage */ | ||
2479 | local_set(&bpage->commit, commit); | ||
2480 | if (!read) | ||
2481 | bpage->time_stamp = rpage->time_stamp; | ||
2476 | } else { | 2482 | } else { |
2477 | /* swap the pages */ | 2483 | /* swap the pages */ |
2478 | rb_init_page(bpage); | 2484 | rb_init_page(bpage); |
@@ -2481,10 +2487,10 @@ int ring_buffer_read_page(struct ring_buffer *buffer, | |||
2481 | cpu_buffer->reader_page->read = 0; | 2487 | cpu_buffer->reader_page->read = 0; |
2482 | *data_page = bpage; | 2488 | *data_page = bpage; |
2483 | } | 2489 | } |
2484 | ret = 1; | 2490 | ret = read; |
2485 | 2491 | ||
2486 | /* update the entry counter */ | 2492 | /* update the entry counter */ |
2487 | rb_remove_entries(cpu_buffer, bpage); | 2493 | rb_remove_entries(cpu_buffer, bpage, read); |
2488 | out: | 2494 | out: |
2489 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 2495 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
2490 | 2496 | ||
@@ -2495,7 +2501,7 @@ static ssize_t | |||
2495 | rb_simple_read(struct file *filp, char __user *ubuf, | 2501 | rb_simple_read(struct file *filp, char __user *ubuf, |
2496 | size_t cnt, loff_t *ppos) | 2502 | size_t cnt, loff_t *ppos) |
2497 | { | 2503 | { |
2498 | long *p = filp->private_data; | 2504 | unsigned long *p = filp->private_data; |
2499 | char buf[64]; | 2505 | char buf[64]; |
2500 | int r; | 2506 | int r; |
2501 | 2507 | ||
@@ -2511,9 +2517,9 @@ static ssize_t | |||
2511 | rb_simple_write(struct file *filp, const char __user *ubuf, | 2517 | rb_simple_write(struct file *filp, const char __user *ubuf, |
2512 | size_t cnt, loff_t *ppos) | 2518 | size_t cnt, loff_t *ppos) |
2513 | { | 2519 | { |
2514 | long *p = filp->private_data; | 2520 | unsigned long *p = filp->private_data; |
2515 | char buf[64]; | 2521 | char buf[64]; |
2516 | long val; | 2522 | unsigned long val; |
2517 | int ret; | 2523 | int ret; |
2518 | 2524 | ||
2519 | if (cnt >= sizeof(buf)) | 2525 | if (cnt >= sizeof(buf)) |