aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/ring_buffer.c
diff options
context:
space:
mode:
authorLai Jiangshan <laijs@cn.fujitsu.com>2009-02-09 01:21:17 -0500
committerSteven Rostedt <srostedt@redhat.com>2009-02-10 09:17:37 -0500
commit667d24125839b6f3363d8177d7ed9fab8a40e45f (patch)
treebe090ba5c44386556c143348b83266230ad4fb47 /kernel/trace/ring_buffer.c
parentb85fa01ed958ca59523a2db3c2ee647b98745d6a (diff)
ring_buffer: fix ring_buffer_read_page()
Impact: change API and init bpage when copy ring_buffer_read_page()/rb_remove_entries() may be called for a partially consumed page. Add a parameter for rb_remove_entries() and make it update cpu_buffer->entries correctly for partially consumed pages. ring_buffer_read_page() now returns the offset to the next event. Init the bpage's time_stamp when return value is 0. Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com> Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Diffstat (limited to 'kernel/trace/ring_buffer.c')
-rw-r--r--kernel/trace/ring_buffer.c33
1 files changed, 20 insertions, 13 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index eca282720838..10d202ea06f3 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -2332,13 +2332,14 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
2332EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu); 2332EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
2333 2333
2334static void rb_remove_entries(struct ring_buffer_per_cpu *cpu_buffer, 2334static void rb_remove_entries(struct ring_buffer_per_cpu *cpu_buffer,
2335 struct buffer_data_page *bpage) 2335 struct buffer_data_page *bpage,
2336 unsigned int offset)
2336{ 2337{
2337 struct ring_buffer_event *event; 2338 struct ring_buffer_event *event;
2338 unsigned long head; 2339 unsigned long head;
2339 2340
2340 __raw_spin_lock(&cpu_buffer->lock); 2341 __raw_spin_lock(&cpu_buffer->lock);
2341 for (head = 0; head < local_read(&bpage->commit); 2342 for (head = offset; head < local_read(&bpage->commit);
2342 head += rb_event_length(event)) { 2343 head += rb_event_length(event)) {
2343 2344
2344 event = __rb_data_page_index(bpage, head); 2345 event = __rb_data_page_index(bpage, head);
@@ -2410,8 +2411,8 @@ void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
2410 * if (!rpage) 2411 * if (!rpage)
2411 * return error; 2412 * return error;
2412 * ret = ring_buffer_read_page(buffer, &rpage, cpu, 0); 2413 * ret = ring_buffer_read_page(buffer, &rpage, cpu, 0);
2413 * if (ret) 2414 * if (ret >= 0)
2414 * process_page(rpage); 2415 * process_page(rpage, ret);
2415 * 2416 *
2416 * When @full is set, the function will not return true unless 2417 * When @full is set, the function will not return true unless
2417 * the writer is off the reader page. 2418 * the writer is off the reader page.
@@ -2422,8 +2423,8 @@ void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
2422 * responsible for that. 2423 * responsible for that.
2423 * 2424 *
2424 * Returns: 2425 * Returns:
2425 * 1 if data has been transferred 2426 * >=0 if data has been transferred, returns the offset of consumed data.
2426 * 0 if no data has been transferred. 2427 * <0 if no data has been transferred.
2427 */ 2428 */
2428int ring_buffer_read_page(struct ring_buffer *buffer, 2429int ring_buffer_read_page(struct ring_buffer *buffer,
2429 void **data_page, int cpu, int full) 2430 void **data_page, int cpu, int full)
@@ -2432,7 +2433,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
2432 struct ring_buffer_event *event; 2433 struct ring_buffer_event *event;
2433 struct buffer_data_page *bpage; 2434 struct buffer_data_page *bpage;
2434 unsigned long flags; 2435 unsigned long flags;
2435 int ret = 0; 2436 unsigned int read;
2437 int ret = -1;
2436 2438
2437 if (!data_page) 2439 if (!data_page)
2438 return 0; 2440 return 0;
@@ -2454,24 +2456,29 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
2454 /* check for data */ 2456 /* check for data */
2455 if (!local_read(&cpu_buffer->reader_page->page->commit)) 2457 if (!local_read(&cpu_buffer->reader_page->page->commit))
2456 goto out; 2458 goto out;
2459
2460 read = cpu_buffer->reader_page->read;
2457 /* 2461 /*
2458 * If the writer is already off of the read page, then simply 2462 * If the writer is already off of the read page, then simply
2459 * switch the read page with the given page. Otherwise 2463 * switch the read page with the given page. Otherwise
2460 * we need to copy the data from the reader to the writer. 2464 * we need to copy the data from the reader to the writer.
2461 */ 2465 */
2462 if (cpu_buffer->reader_page == cpu_buffer->commit_page) { 2466 if (cpu_buffer->reader_page == cpu_buffer->commit_page) {
2463 unsigned int read = cpu_buffer->reader_page->read;
2464 unsigned int commit = rb_page_commit(cpu_buffer->reader_page); 2467 unsigned int commit = rb_page_commit(cpu_buffer->reader_page);
2468 struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
2465 2469
2466 if (full) 2470 if (full)
2467 goto out; 2471 goto out;
2468 /* The writer is still on the reader page, we must copy */ 2472 /* The writer is still on the reader page, we must copy */
2469 memcpy(bpage->data, 2473 memcpy(bpage->data + read, rpage->data + read, commit - read);
2470 cpu_buffer->reader_page->page->data + read,
2471 commit - read);
2472 2474
2473 /* consume what was read */ 2475 /* consume what was read */
2474 cpu_buffer->reader_page->read = commit; 2476 cpu_buffer->reader_page->read = commit;
2477
2478 /* update bpage */
2479 local_set(&bpage->commit, commit);
2480 if (!read)
2481 bpage->time_stamp = rpage->time_stamp;
2475 } else { 2482 } else {
2476 /* swap the pages */ 2483 /* swap the pages */
2477 rb_init_page(bpage); 2484 rb_init_page(bpage);
@@ -2480,10 +2487,10 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
2480 cpu_buffer->reader_page->read = 0; 2487 cpu_buffer->reader_page->read = 0;
2481 *data_page = bpage; 2488 *data_page = bpage;
2482 } 2489 }
2483 ret = 1; 2490 ret = read;
2484 2491
2485 /* update the entry counter */ 2492 /* update the entry counter */
2486 rb_remove_entries(cpu_buffer, bpage); 2493 rb_remove_entries(cpu_buffer, bpage, read);
2487 out: 2494 out:
2488 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 2495 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2489 2496