aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/ring_buffer.c
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2009-03-03 00:27:49 -0500
committerSteven Rostedt <srostedt@redhat.com>2009-03-03 20:51:24 -0500
commitef7a4a161472b952941bf78855a9cd95703c024e (patch)
tree17145213ece6872d34951a2d33af6670fc2ae453 /kernel/trace/ring_buffer.c
parent41be4da4e85e58520b934040966a6ae919c66c2d (diff)
ring-buffer: fix ring_buffer_read_page
The ring_buffer_read_page was broken if it were to only copy part of the page. This patch fixes that up as well as adds a parameter to allow a length field, in order to only copy part of the buffer page. Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Diffstat (limited to 'kernel/trace/ring_buffer.c')
-rw-r--r--kernel/trace/ring_buffer.c92
1 files changed, 59 insertions, 33 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 9baad7ee4b36..2ad6bae95a3d 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -234,6 +234,11 @@ static void rb_init_page(struct buffer_data_page *bpage)
234 local_set(&bpage->commit, 0); 234 local_set(&bpage->commit, 0);
235} 235}
236 236
237size_t ring_buffer_page_len(void *page)
238{
239 return local_read(&((struct buffer_data_page *)page)->commit);
240}
241
237/* 242/*
238 * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing 243 * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
239 * this issue out. 244 * this issue out.
@@ -2378,8 +2383,8 @@ static void rb_remove_entries(struct ring_buffer_per_cpu *cpu_buffer,
2378 */ 2383 */
2379void *ring_buffer_alloc_read_page(struct ring_buffer *buffer) 2384void *ring_buffer_alloc_read_page(struct ring_buffer *buffer)
2380{ 2385{
2381 unsigned long addr;
2382 struct buffer_data_page *bpage; 2386 struct buffer_data_page *bpage;
2387 unsigned long addr;
2383 2388
2384 addr = __get_free_page(GFP_KERNEL); 2389 addr = __get_free_page(GFP_KERNEL);
2385 if (!addr) 2390 if (!addr)
@@ -2387,6 +2392,8 @@ void *ring_buffer_alloc_read_page(struct ring_buffer *buffer)
2387 2392
2388 bpage = (void *)addr; 2393 bpage = (void *)addr;
2389 2394
2395 rb_init_page(bpage);
2396
2390 return bpage; 2397 return bpage;
2391} 2398}
2392 2399
@@ -2406,6 +2413,7 @@ void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
2406 * ring_buffer_read_page - extract a page from the ring buffer 2413 * ring_buffer_read_page - extract a page from the ring buffer
2407 * @buffer: buffer to extract from 2414 * @buffer: buffer to extract from
2408 * @data_page: the page to use allocated from ring_buffer_alloc_read_page 2415 * @data_page: the page to use allocated from ring_buffer_alloc_read_page
2416 * @len: amount to extract
2409 * @cpu: the cpu of the buffer to extract 2417 * @cpu: the cpu of the buffer to extract
2410 * @full: should the extraction only happen when the page is full. 2418 * @full: should the extraction only happen when the page is full.
2411 * 2419 *
@@ -2418,7 +2426,7 @@ void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
2418 * rpage = ring_buffer_alloc_read_page(buffer); 2426 * rpage = ring_buffer_alloc_read_page(buffer);
2419 * if (!rpage) 2427 * if (!rpage)
2420 * return error; 2428 * return error;
2421 * ret = ring_buffer_read_page(buffer, &rpage, cpu, 0); 2429 * ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
2422 * if (ret >= 0) 2430 * if (ret >= 0)
2423 * process_page(rpage, ret); 2431 * process_page(rpage, ret);
2424 * 2432 *
@@ -2435,71 +2443,89 @@ void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
2435 * <0 if no data has been transferred. 2443 * <0 if no data has been transferred.
2436 */ 2444 */
2437int ring_buffer_read_page(struct ring_buffer *buffer, 2445int ring_buffer_read_page(struct ring_buffer *buffer,
2438 void **data_page, int cpu, int full) 2446 void **data_page, size_t len, int cpu, int full)
2439{ 2447{
2440 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; 2448 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
2441 struct ring_buffer_event *event; 2449 struct ring_buffer_event *event;
2442 struct buffer_data_page *bpage; 2450 struct buffer_data_page *bpage;
2451 struct buffer_page *reader;
2443 unsigned long flags; 2452 unsigned long flags;
2453 unsigned int commit;
2444 unsigned int read; 2454 unsigned int read;
2445 int ret = -1; 2455 int ret = -1;
2446 2456
2447 if (!data_page) 2457 if (!data_page)
2448 return 0; 2458 return -1;
2449 2459
2450 bpage = *data_page; 2460 bpage = *data_page;
2451 if (!bpage) 2461 if (!bpage)
2452 return 0; 2462 return -1;
2453 2463
2454 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 2464 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2455 2465
2456 /* 2466 reader = rb_get_reader_page(cpu_buffer);
2457 * rb_buffer_peek will get the next ring buffer if 2467 if (!reader)
2458 * the current reader page is empty.
2459 */
2460 event = rb_buffer_peek(buffer, cpu, NULL);
2461 if (!event)
2462 goto out; 2468 goto out;
2463 2469
2464 /* check for data */ 2470 event = rb_reader_event(cpu_buffer);
2465 if (!local_read(&cpu_buffer->reader_page->page->commit)) 2471
2466 goto out; 2472 read = reader->read;
2473 commit = rb_page_commit(reader);
2467 2474
2468 read = cpu_buffer->reader_page->read;
2469 /* 2475 /*
2470 * If the writer is already off of the read page, then simply 2476 * If len > what's left on the page, and the writer is also off of
2471 * switch the read page with the given page. Otherwise 2477 * the read page, then simply switch the read page with the given
2472 * we need to copy the data from the reader to the writer. 2478 * page. Otherwise we need to copy the data from the reader to the
2479 * writer.
2473 */ 2480 */
2474 if (cpu_buffer->reader_page == cpu_buffer->commit_page) { 2481 if ((len < (commit - read)) ||
2475 unsigned int commit = rb_page_commit(cpu_buffer->reader_page); 2482 cpu_buffer->reader_page == cpu_buffer->commit_page) {
2476 struct buffer_data_page *rpage = cpu_buffer->reader_page->page; 2483 struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
2484 unsigned int pos = read;
2485 unsigned int size;
2477 2486
2478 if (full) 2487 if (full)
2479 goto out; 2488 goto out;
2480 /* The writer is still on the reader page, we must copy */
2481 memcpy(bpage->data + read, rpage->data + read, commit - read);
2482 2489
2483 /* consume what was read */ 2490 if (len > (commit - read))
2484 cpu_buffer->reader_page->read = commit; 2491 len = (commit - read);
2492
2493 size = rb_event_length(event);
2494
2495 if (len < size)
2496 goto out;
2497
2498 /* Need to copy one event at a time */
2499 do {
2500 memcpy(bpage->data + pos, rpage->data + pos, size);
2501
2502 len -= size;
2503
2504 rb_advance_reader(cpu_buffer);
2505 pos = reader->read;
2506
2507 event = rb_reader_event(cpu_buffer);
2508 size = rb_event_length(event);
2509 } while (len > size);
2485 2510
2486 /* update bpage */ 2511 /* update bpage */
2487 local_set(&bpage->commit, commit); 2512 local_set(&bpage->commit, pos);
2488 if (!read) 2513 bpage->time_stamp = rpage->time_stamp;
2489 bpage->time_stamp = rpage->time_stamp; 2514
2490 } else { 2515 } else {
2491 /* swap the pages */ 2516 /* swap the pages */
2492 rb_init_page(bpage); 2517 rb_init_page(bpage);
2493 bpage = cpu_buffer->reader_page->page; 2518 bpage = reader->page;
2494 cpu_buffer->reader_page->page = *data_page; 2519 reader->page = *data_page;
2495 local_set(&cpu_buffer->reader_page->write, 0); 2520 local_set(&reader->write, 0);
2496 cpu_buffer->reader_page->read = 0; 2521 reader->read = 0;
2497 *data_page = bpage; 2522 *data_page = bpage;
2523
2524 /* update the entry counter */
2525 rb_remove_entries(cpu_buffer, bpage, read);
2498 } 2526 }
2499 ret = read; 2527 ret = read;
2500 2528
2501 /* update the entry counter */
2502 rb_remove_entries(cpu_buffer, bpage, read);
2503 out: 2529 out:
2504 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 2530 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2505 2531