aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorVaibhav Nagarnaik <vnagarnaik@google.com>2012-05-03 21:59:51 -0400
committerSteven Rostedt <rostedt@goodmis.org>2012-05-16 16:25:51 -0400
commit5040b4b7bcc26a311c799d46f67174bcb20d05dd (patch)
tree9d0a5656f891ef2a59793df5726fca00fac3a188 /kernel
parent83f40318dab00e3298a1f6d0b12ac025e84e478d (diff)
ring-buffer: Make addition of pages in ring buffer atomic
This patch adds the capability to add new pages to a ring buffer atomically while write operations are going on. This makes it possible to expand the ring buffer size without reinitializing the ring buffer. The new pages are attached between the head page and its previous page. Link: http://lkml.kernel.org/r/1336096792-25373-2-git-send-email-vnagarnaik@google.com Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Laurent Chavey <chavey@google.com> Cc: Justin Teravest <teravest@google.com> Cc: David Sharp <dhsharp@google.com> Signed-off-by: Vaibhav Nagarnaik <vnagarnaik@google.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/trace/ring_buffer.c102
1 files changed, 77 insertions, 25 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 27ac37efb2b0..d673ef03d16d 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1252,7 +1252,7 @@ static inline unsigned long rb_page_write(struct buffer_page *bpage)
1252 return local_read(&bpage->write) & RB_WRITE_MASK; 1252 return local_read(&bpage->write) & RB_WRITE_MASK;
1253} 1253}
1254 1254
1255static void 1255static int
1256rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages) 1256rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
1257{ 1257{
1258 struct list_head *tail_page, *to_remove, *next_page; 1258 struct list_head *tail_page, *to_remove, *next_page;
@@ -1359,46 +1359,97 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
1359 } while (to_remove_page != last_page); 1359 } while (to_remove_page != last_page);
1360 1360
1361 RB_WARN_ON(cpu_buffer, nr_removed); 1361 RB_WARN_ON(cpu_buffer, nr_removed);
1362
1363 return nr_removed == 0;
1362} 1364}
1363 1365
1364static void 1366static int
1365rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer, 1367rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer)
1366 struct list_head *pages, unsigned nr_pages)
1367{ 1368{
1368 struct buffer_page *bpage; 1369 struct list_head *pages = &cpu_buffer->new_pages;
1369 struct list_head *p; 1370 int retries, success;
1370 unsigned i;
1371 1371
1372 raw_spin_lock_irq(&cpu_buffer->reader_lock); 1372 raw_spin_lock_irq(&cpu_buffer->reader_lock);
1373 /* stop the writers while inserting pages */ 1373 /*
1374 atomic_inc(&cpu_buffer->record_disabled); 1374 * We are holding the reader lock, so the reader page won't be swapped
1375 rb_head_page_deactivate(cpu_buffer); 1375 * in the ring buffer. Now we are racing with the writer trying to
1376 * move head page and the tail page.
1377 * We are going to adapt the reader page update process where:
1378 * 1. We first splice the start and end of list of new pages between
1379 * the head page and its previous page.
1380 * 2. We cmpxchg the prev_page->next to point from head page to the
1381 * start of new pages list.
1382 * 3. Finally, we update the head->prev to the end of new list.
1383 *
1384 * We will try this process 10 times, to make sure that we don't keep
1385 * spinning.
1386 */
1387 retries = 10;
1388 success = 0;
1389 while (retries--) {
1390 struct list_head *head_page, *prev_page, *r;
1391 struct list_head *last_page, *first_page;
1392 struct list_head *head_page_with_bit;
1376 1393
1377 for (i = 0; i < nr_pages; i++) { 1394 head_page = &rb_set_head_page(cpu_buffer)->list;
1378 if (RB_WARN_ON(cpu_buffer, list_empty(pages))) 1395 prev_page = head_page->prev;
1379 goto out; 1396
1380 p = pages->next; 1397 first_page = pages->next;
1381 bpage = list_entry(p, struct buffer_page, list); 1398 last_page = pages->prev;
1382 list_del_init(&bpage->list); 1399
1383 list_add_tail(&bpage->list, cpu_buffer->pages); 1400 head_page_with_bit = (struct list_head *)
1401 ((unsigned long)head_page | RB_PAGE_HEAD);
1402
1403 last_page->next = head_page_with_bit;
1404 first_page->prev = prev_page;
1405
1406 r = cmpxchg(&prev_page->next, head_page_with_bit, first_page);
1407
1408 if (r == head_page_with_bit) {
1409 /*
1410 * yay, we replaced the page pointer to our new list,
1411 * now, we just have to update to head page's prev
1412 * pointer to point to end of list
1413 */
1414 head_page->prev = last_page;
1415 success = 1;
1416 break;
1417 }
1384 } 1418 }
1385 rb_reset_cpu(cpu_buffer);
1386 rb_check_pages(cpu_buffer);
1387 1419
1388out: 1420 if (success)
1389 atomic_dec(&cpu_buffer->record_disabled); 1421 INIT_LIST_HEAD(pages);
1422 /*
1423 * If we weren't successful in adding in new pages, warn and stop
1424 * tracing
1425 */
1426 RB_WARN_ON(cpu_buffer, !success);
1390 raw_spin_unlock_irq(&cpu_buffer->reader_lock); 1427 raw_spin_unlock_irq(&cpu_buffer->reader_lock);
1428
1429 /* free pages if they weren't inserted */
1430 if (!success) {
1431 struct buffer_page *bpage, *tmp;
1432 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
1433 list) {
1434 list_del_init(&bpage->list);
1435 free_buffer_page(bpage);
1436 }
1437 }
1438 return success;
1391} 1439}
1392 1440
1393static void rb_update_pages(struct ring_buffer_per_cpu *cpu_buffer) 1441static void rb_update_pages(struct ring_buffer_per_cpu *cpu_buffer)
1394{ 1442{
1443 int success;
1444
1395 if (cpu_buffer->nr_pages_to_update > 0) 1445 if (cpu_buffer->nr_pages_to_update > 0)
1396 rb_insert_pages(cpu_buffer, &cpu_buffer->new_pages, 1446 success = rb_insert_pages(cpu_buffer);
1397 cpu_buffer->nr_pages_to_update);
1398 else 1447 else
1399 rb_remove_pages(cpu_buffer, -cpu_buffer->nr_pages_to_update); 1448 success = rb_remove_pages(cpu_buffer,
1449 -cpu_buffer->nr_pages_to_update);
1400 1450
1401 cpu_buffer->nr_pages += cpu_buffer->nr_pages_to_update; 1451 if (success)
1452 cpu_buffer->nr_pages += cpu_buffer->nr_pages_to_update;
1402} 1453}
1403 1454
1404static void update_pages_handler(struct work_struct *work) 1455static void update_pages_handler(struct work_struct *work)
@@ -3772,6 +3823,7 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
3772 cpu_buffer->commit_page = cpu_buffer->head_page; 3823 cpu_buffer->commit_page = cpu_buffer->head_page;
3773 3824
3774 INIT_LIST_HEAD(&cpu_buffer->reader_page->list); 3825 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
3826 INIT_LIST_HEAD(&cpu_buffer->new_pages);
3775 local_set(&cpu_buffer->reader_page->write, 0); 3827 local_set(&cpu_buffer->reader_page->write, 0);
3776 local_set(&cpu_buffer->reader_page->entries, 0); 3828 local_set(&cpu_buffer->reader_page->entries, 0);
3777 local_set(&cpu_buffer->reader_page->page->commit, 0); 3829 local_set(&cpu_buffer->reader_page->page->commit, 0);