aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2009-03-03 19:51:40 -0500
committerSteven Rostedt <srostedt@redhat.com>2009-03-03 20:52:27 -0500
commit474d32b68d6d842f3e710e9ae9fe2568c53339f8 (patch)
tree674376b2f7a45cf6558879a4985398397dc96e79 /kernel/trace
parente3d6bf0a0781a269f34250fd41e0d3dbfe540cf1 (diff)
ring-buffer: make ring_buffer_read_page read from start on partial page
Impact: dont leave holes in read buffer page The ring_buffer_read_page swaps a given page with the reader page of the ring buffer, if certain conditions are set: 1) requested length is big enough to hold entire page data 2) a writer is not currently on the page 3) the page is not partially consumed. Instead of swapping with the supplied page. It copies the data to the supplied page instead. But currently the data is copied in the same offset as the source page. This causes a hole at the start of the reader page. This complicates the use of this function. Instead, it should copy the data at the beginning of the function and update the index fields accordingly. Other small clean ups are also done in this patch. Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/ring_buffer.c43
1 files changed, 33 insertions, 10 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 27cf834d8b4e..f2a163db52f9 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -61,6 +61,8 @@ enum {
61 61
62static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON; 62static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON;
63 63
64#define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
65
64/** 66/**
65 * tracing_on - enable all tracing buffers 67 * tracing_on - enable all tracing buffers
66 * 68 *
@@ -234,9 +236,16 @@ static void rb_init_page(struct buffer_data_page *bpage)
234 local_set(&bpage->commit, 0); 236 local_set(&bpage->commit, 0);
235} 237}
236 238
239/**
240 * ring_buffer_page_len - the size of data on the page.
241 * @page: The page to read
242 *
243 * Returns the amount of data on the page, including buffer page header.
244 */
237size_t ring_buffer_page_len(void *page) 245size_t ring_buffer_page_len(void *page)
238{ 246{
239 return local_read(&((struct buffer_data_page *)page)->commit); 247 return local_read(&((struct buffer_data_page *)page)->commit)
248 + BUF_PAGE_HDR_SIZE;
240} 249}
241 250
242/* 251/*
@@ -259,7 +268,7 @@ static inline int test_time_stamp(u64 delta)
259 return 0; 268 return 0;
260} 269}
261 270
262#define BUF_PAGE_SIZE (PAGE_SIZE - offsetof(struct buffer_data_page, data)) 271#define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
263 272
264/* 273/*
265 * head_page == tail_page && head == tail then buffer is empty. 274 * head_page == tail_page && head == tail then buffer is empty.
@@ -2454,6 +2463,15 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
2454 unsigned int read; 2463 unsigned int read;
2455 int ret = -1; 2464 int ret = -1;
2456 2465
2466 /*
2467 * If len is not big enough to hold the page header, then
2468 * we can not copy anything.
2469 */
2470 if (len <= BUF_PAGE_HDR_SIZE)
2471 return -1;
2472
2473 len -= BUF_PAGE_HDR_SIZE;
2474
2457 if (!data_page) 2475 if (!data_page)
2458 return -1; 2476 return -1;
2459 2477
@@ -2473,15 +2491,17 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
2473 commit = rb_page_commit(reader); 2491 commit = rb_page_commit(reader);
2474 2492
2475 /* 2493 /*
2476 * If len > what's left on the page, and the writer is also off of 2494 * If this page has been partially read or
2477 * the read page, then simply switch the read page with the given 2495 * if len is not big enough to read the rest of the page or
2478 * page. Otherwise we need to copy the data from the reader to the 2496 * a writer is still on the page, then
2479 * writer. 2497 * we must copy the data from the page to the buffer.
2498 * Otherwise, we can simply swap the page with the one passed in.
2480 */ 2499 */
2481 if ((len < (commit - read)) || 2500 if (read || (len < (commit - read)) ||
2482 cpu_buffer->reader_page == cpu_buffer->commit_page) { 2501 cpu_buffer->reader_page == cpu_buffer->commit_page) {
2483 struct buffer_data_page *rpage = cpu_buffer->reader_page->page; 2502 struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
2484 unsigned int pos = read; 2503 unsigned int rpos = read;
2504 unsigned int pos = 0;
2485 unsigned int size; 2505 unsigned int size;
2486 2506
2487 if (full) 2507 if (full)
@@ -2497,12 +2517,13 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
2497 2517
2498 /* Need to copy one event at a time */ 2518 /* Need to copy one event at a time */
2499 do { 2519 do {
2500 memcpy(bpage->data + pos, rpage->data + pos, size); 2520 memcpy(bpage->data + pos, rpage->data + rpos, size);
2501 2521
2502 len -= size; 2522 len -= size;
2503 2523
2504 rb_advance_reader(cpu_buffer); 2524 rb_advance_reader(cpu_buffer);
2505 pos = reader->read; 2525 rpos = reader->read;
2526 pos += size;
2506 2527
2507 event = rb_reader_event(cpu_buffer); 2528 event = rb_reader_event(cpu_buffer);
2508 size = rb_event_length(event); 2529 size = rb_event_length(event);
@@ -2512,6 +2533,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
2512 local_set(&bpage->commit, pos); 2533 local_set(&bpage->commit, pos);
2513 bpage->time_stamp = rpage->time_stamp; 2534 bpage->time_stamp = rpage->time_stamp;
2514 2535
2536 /* we copied everything to the beginning */
2537 read = 0;
2515 } else { 2538 } else {
2516 /* swap the pages */ 2539 /* swap the pages */
2517 rb_init_page(bpage); 2540 rb_init_page(bpage);