aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2008-11-10 23:07:30 -0500
committerIngo Molnar <mingo@elte.hu>2008-11-11 03:40:34 -0500
commitf536aafc5a2e6f0c8f1577a155e6f93db5e469f0 (patch)
tree3ed56e3969ef2f1cccc921f3c49a959012ba1bda /kernel
parent5aa1ba6a6c710e747838a22d798ac97a8b248745 (diff)
ring-buffer: replace most bug ons with warn on and disable buffer
This patch replaces most of the BUG_ONs in the ring_buffer code with RB_WARN_ON variants. It adds some more variants as needed for the replacement. This lets the buffer die nicely and still warn the user. One BUG_ON remains in the code, and that is because it detects a bad pointer passed in by the calling function, and not a bug by the ring buffer code itself. Signed-off-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/trace/ring_buffer.c65
1 files changed, 49 insertions, 16 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index ee9b93d318b9..a6b8f9d7ac96 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -188,6 +188,7 @@ struct ring_buffer_iter {
188 u64 read_stamp; 188 u64 read_stamp;
189}; 189};
190 190
191/* buffer may be either ring_buffer or ring_buffer_per_cpu */
191#define RB_WARN_ON(buffer, cond) \ 192#define RB_WARN_ON(buffer, cond) \
192 do { \ 193 do { \
193 if (unlikely(cond)) { \ 194 if (unlikely(cond)) { \
@@ -201,10 +202,28 @@ struct ring_buffer_iter {
201 if (unlikely(cond)) { \ 202 if (unlikely(cond)) { \
202 atomic_inc(&buffer->record_disabled); \ 203 atomic_inc(&buffer->record_disabled); \
203 WARN_ON(1); \ 204 WARN_ON(1); \
205 return; \
206 } \
207 } while (0)
208
209#define RB_WARN_ON_RET_INT(buffer, cond) \
210 do { \
211 if (unlikely(cond)) { \
212 atomic_inc(&buffer->record_disabled); \
213 WARN_ON(1); \
204 return -1; \ 214 return -1; \
205 } \ 215 } \
206 } while (0) 216 } while (0)
207 217
218#define RB_WARN_ON_RET_NULL(buffer, cond) \
219 do { \
220 if (unlikely(cond)) { \
221 atomic_inc(&buffer->record_disabled); \
222 WARN_ON(1); \
223 return NULL; \
224 } \
225 } while (0)
226
208#define RB_WARN_ON_ONCE(buffer, cond) \ 227#define RB_WARN_ON_ONCE(buffer, cond) \
209 do { \ 228 do { \
210 static int once; \ 229 static int once; \
@@ -215,6 +234,17 @@ struct ring_buffer_iter {
215 } \ 234 } \
216 } while (0) 235 } while (0)
217 236
237/* buffer must be ring_buffer not per_cpu */
238#define RB_WARN_ON_UNLOCK(buffer, cond) \
239 do { \
240 if (unlikely(cond)) { \
241 mutex_unlock(&buffer->mutex); \
242 atomic_inc(&buffer->record_disabled); \
243 WARN_ON(1); \
244 return -1; \
245 } \
246 } while (0)
247
218/** 248/**
219 * check_pages - integrity check of buffer pages 249 * check_pages - integrity check of buffer pages
220 * @cpu_buffer: CPU buffer with pages to test 250 * @cpu_buffer: CPU buffer with pages to test
@@ -227,13 +257,13 @@ static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
227 struct list_head *head = &cpu_buffer->pages; 257 struct list_head *head = &cpu_buffer->pages;
228 struct buffer_page *page, *tmp; 258 struct buffer_page *page, *tmp;
229 259
230 RB_WARN_ON_RET(cpu_buffer, head->next->prev != head); 260 RB_WARN_ON_RET_INT(cpu_buffer, head->next->prev != head);
231 RB_WARN_ON_RET(cpu_buffer, head->prev->next != head); 261 RB_WARN_ON_RET_INT(cpu_buffer, head->prev->next != head);
232 262
233 list_for_each_entry_safe(page, tmp, head, list) { 263 list_for_each_entry_safe(page, tmp, head, list) {
234 RB_WARN_ON_RET(cpu_buffer, 264 RB_WARN_ON_RET_INT(cpu_buffer,
235 page->list.next->prev != &page->list); 265 page->list.next->prev != &page->list);
236 RB_WARN_ON_RET(cpu_buffer, 266 RB_WARN_ON_RET_INT(cpu_buffer,
237 page->list.prev->next != &page->list); 267 page->list.prev->next != &page->list);
238 } 268 }
239 269
@@ -440,13 +470,13 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
440 synchronize_sched(); 470 synchronize_sched();
441 471
442 for (i = 0; i < nr_pages; i++) { 472 for (i = 0; i < nr_pages; i++) {
443 BUG_ON(list_empty(&cpu_buffer->pages)); 473 RB_WARN_ON_RET(cpu_buffer, list_empty(&cpu_buffer->pages));
444 p = cpu_buffer->pages.next; 474 p = cpu_buffer->pages.next;
445 page = list_entry(p, struct buffer_page, list); 475 page = list_entry(p, struct buffer_page, list);
446 list_del_init(&page->list); 476 list_del_init(&page->list);
447 free_buffer_page(page); 477 free_buffer_page(page);
448 } 478 }
449 BUG_ON(list_empty(&cpu_buffer->pages)); 479 RB_WARN_ON_RET(cpu_buffer, list_empty(&cpu_buffer->pages));
450 480
451 rb_reset_cpu(cpu_buffer); 481 rb_reset_cpu(cpu_buffer);
452 482
@@ -468,7 +498,7 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
468 synchronize_sched(); 498 synchronize_sched();
469 499
470 for (i = 0; i < nr_pages; i++) { 500 for (i = 0; i < nr_pages; i++) {
471 BUG_ON(list_empty(pages)); 501 RB_WARN_ON_RET(cpu_buffer, list_empty(pages));
472 p = pages->next; 502 p = pages->next;
473 page = list_entry(p, struct buffer_page, list); 503 page = list_entry(p, struct buffer_page, list);
474 list_del_init(&page->list); 504 list_del_init(&page->list);
@@ -523,7 +553,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
523 if (size < buffer_size) { 553 if (size < buffer_size) {
524 554
525 /* easy case, just free pages */ 555 /* easy case, just free pages */
526 BUG_ON(nr_pages >= buffer->pages); 556 RB_WARN_ON_UNLOCK(buffer, nr_pages >= buffer->pages);
527 557
528 rm_pages = buffer->pages - nr_pages; 558 rm_pages = buffer->pages - nr_pages;
529 559
@@ -542,7 +572,8 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
542 * add these pages to the cpu_buffers. Otherwise we just free 572 * add these pages to the cpu_buffers. Otherwise we just free
543 * them all and return -ENOMEM; 573 * them all and return -ENOMEM;
544 */ 574 */
545 BUG_ON(nr_pages <= buffer->pages); 575 RB_WARN_ON_UNLOCK(buffer, nr_pages <= buffer->pages);
576
546 new_pages = nr_pages - buffer->pages; 577 new_pages = nr_pages - buffer->pages;
547 578
548 for_each_buffer_cpu(buffer, cpu) { 579 for_each_buffer_cpu(buffer, cpu) {
@@ -565,7 +596,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
565 rb_insert_pages(cpu_buffer, &pages, new_pages); 596 rb_insert_pages(cpu_buffer, &pages, new_pages);
566 } 597 }
567 598
568 BUG_ON(!list_empty(&pages)); 599 RB_WARN_ON_UNLOCK(buffer, !list_empty(&pages));
569 600
570 out: 601 out:
571 buffer->pages = nr_pages; 602 buffer->pages = nr_pages;
@@ -653,7 +684,7 @@ static void rb_update_overflow(struct ring_buffer_per_cpu *cpu_buffer)
653 head += rb_event_length(event)) { 684 head += rb_event_length(event)) {
654 685
655 event = __rb_page_index(cpu_buffer->head_page, head); 686 event = __rb_page_index(cpu_buffer->head_page, head);
656 BUG_ON(rb_null_event(event)); 687 RB_WARN_ON_RET(cpu_buffer, rb_null_event(event));
657 /* Only count data entries */ 688 /* Only count data entries */
658 if (event->type != RINGBUF_TYPE_DATA) 689 if (event->type != RINGBUF_TYPE_DATA)
659 continue; 690 continue;
@@ -940,7 +971,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
940 971
941 /* We reserved something on the buffer */ 972 /* We reserved something on the buffer */
942 973
943 BUG_ON(write > BUF_PAGE_SIZE); 974 RB_WARN_ON_RET_NULL(cpu_buffer, write > BUF_PAGE_SIZE);
944 975
945 event = __rb_page_index(tail_page, tail); 976 event = __rb_page_index(tail_page, tail);
946 rb_update_event(event, type, length); 977 rb_update_event(event, type, length);
@@ -1621,7 +1652,7 @@ static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
1621 reader = rb_get_reader_page(cpu_buffer); 1652 reader = rb_get_reader_page(cpu_buffer);
1622 1653
1623 /* This function should not be called when buffer is empty */ 1654 /* This function should not be called when buffer is empty */
1624 BUG_ON(!reader); 1655 RB_WARN_ON_RET(cpu_buffer, !reader);
1625 1656
1626 event = rb_reader_event(cpu_buffer); 1657 event = rb_reader_event(cpu_buffer);
1627 1658
@@ -1648,7 +1679,8 @@ static void rb_advance_iter(struct ring_buffer_iter *iter)
1648 * Check if we are at the end of the buffer. 1679 * Check if we are at the end of the buffer.
1649 */ 1680 */
1650 if (iter->head >= rb_page_size(iter->head_page)) { 1681 if (iter->head >= rb_page_size(iter->head_page)) {
1651 BUG_ON(iter->head_page == cpu_buffer->commit_page); 1682 RB_WARN_ON_RET(buffer,
1683 iter->head_page == cpu_buffer->commit_page);
1652 rb_inc_iter(iter); 1684 rb_inc_iter(iter);
1653 return; 1685 return;
1654 } 1686 }
@@ -1661,8 +1693,9 @@ static void rb_advance_iter(struct ring_buffer_iter *iter)
1661 * This should not be called to advance the header if we are 1693 * This should not be called to advance the header if we are
1662 * at the tail of the buffer. 1694 * at the tail of the buffer.
1663 */ 1695 */
1664 BUG_ON((iter->head_page == cpu_buffer->commit_page) && 1696 RB_WARN_ON_RET(cpu_buffer,
1665 (iter->head + length > rb_commit_index(cpu_buffer))); 1697 (iter->head_page == cpu_buffer->commit_page) &&
1698 (iter->head + length > rb_commit_index(cpu_buffer)));
1666 1699
1667 rb_update_iter_read_stamp(iter, event); 1700 rb_update_iter_read_stamp(iter, event);
1668 1701