aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/ring_buffer.c
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2008-11-11 15:28:41 -0500
committerIngo Molnar <mingo@elte.hu>2008-11-11 16:02:35 -0500
commit3e89c7bb92fc92bb964734341487798c8d497bae (patch)
tree9b20d0a934152220981e3a25c27ee0d799bc9de3 /kernel/trace/ring_buffer.c
parentf83c9d0fe42a7544b4d4ffcebb2e6716fcfd95c0 (diff)
ring-buffer: clean up warn ons
Impact: Restructure WARN_ONs in ring_buffer.c The current WARN_ON macros in ring_buffer.c are quite ugly. This patch cleans them up and uses a single RB_WARN_ON that returns the value of the condition. This allows the caller to abort the function if the condition is true. Signed-off-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace/ring_buffer.c')
-rw-r--r--kernel/trace/ring_buffer.c143
1 files changed, 57 insertions, 86 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 17c2ccebb567..8c5cacb25a18 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -191,60 +191,14 @@ struct ring_buffer_iter {
191 191
192/* buffer may be either ring_buffer or ring_buffer_per_cpu */ 192/* buffer may be either ring_buffer or ring_buffer_per_cpu */
193#define RB_WARN_ON(buffer, cond) \ 193#define RB_WARN_ON(buffer, cond) \
194 do { \ 194 ({ \
195 if (unlikely(cond)) { \ 195 int _____ret = unlikely(cond); \
196 if (_____ret) { \
196 atomic_inc(&buffer->record_disabled); \ 197 atomic_inc(&buffer->record_disabled); \
197 WARN_ON(1); \ 198 WARN_ON(1); \
198 } \ 199 } \
199 } while (0) 200 _____ret; \
200 201 })
201#define RB_WARN_ON_RET(buffer, cond) \
202 do { \
203 if (unlikely(cond)) { \
204 atomic_inc(&buffer->record_disabled); \
205 WARN_ON(1); \
206 return; \
207 } \
208 } while (0)
209
210#define RB_WARN_ON_RET_INT(buffer, cond) \
211 do { \
212 if (unlikely(cond)) { \
213 atomic_inc(&buffer->record_disabled); \
214 WARN_ON(1); \
215 return -1; \
216 } \
217 } while (0)
218
219#define RB_WARN_ON_RET_NULL(buffer, cond) \
220 do { \
221 if (unlikely(cond)) { \
222 atomic_inc(&buffer->record_disabled); \
223 WARN_ON(1); \
224 return NULL; \
225 } \
226 } while (0)
227
228#define RB_WARN_ON_ONCE(buffer, cond) \
229 do { \
230 static int once; \
231 if (unlikely(cond) && !once) { \
232 once++; \
233 atomic_inc(&buffer->record_disabled); \
234 WARN_ON(1); \
235 } \
236 } while (0)
237
238/* buffer must be ring_buffer not per_cpu */
239#define RB_WARN_ON_UNLOCK(buffer, cond) \
240 do { \
241 if (unlikely(cond)) { \
242 mutex_unlock(&buffer->mutex); \
243 atomic_inc(&buffer->record_disabled); \
244 WARN_ON(1); \
245 return -1; \
246 } \
247 } while (0)
248 202
249/** 203/**
250 * check_pages - integrity check of buffer pages 204 * check_pages - integrity check of buffer pages
@@ -258,14 +212,18 @@ static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
258 struct list_head *head = &cpu_buffer->pages; 212 struct list_head *head = &cpu_buffer->pages;
259 struct buffer_page *page, *tmp; 213 struct buffer_page *page, *tmp;
260 214
261 RB_WARN_ON_RET_INT(cpu_buffer, head->next->prev != head); 215 if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
262 RB_WARN_ON_RET_INT(cpu_buffer, head->prev->next != head); 216 return -1;
217 if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
218 return -1;
263 219
264 list_for_each_entry_safe(page, tmp, head, list) { 220 list_for_each_entry_safe(page, tmp, head, list) {
265 RB_WARN_ON_RET_INT(cpu_buffer, 221 if (RB_WARN_ON(cpu_buffer,
266 page->list.next->prev != &page->list); 222 page->list.next->prev != &page->list))
267 RB_WARN_ON_RET_INT(cpu_buffer, 223 return -1;
268 page->list.prev->next != &page->list); 224 if (RB_WARN_ON(cpu_buffer,
225 page->list.prev->next != &page->list))
226 return -1;
269 } 227 }
270 228
271 return 0; 229 return 0;
@@ -472,13 +430,15 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
472 synchronize_sched(); 430 synchronize_sched();
473 431
474 for (i = 0; i < nr_pages; i++) { 432 for (i = 0; i < nr_pages; i++) {
475 RB_WARN_ON_RET(cpu_buffer, list_empty(&cpu_buffer->pages)); 433 if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages)))
434 return;
476 p = cpu_buffer->pages.next; 435 p = cpu_buffer->pages.next;
477 page = list_entry(p, struct buffer_page, list); 436 page = list_entry(p, struct buffer_page, list);
478 list_del_init(&page->list); 437 list_del_init(&page->list);
479 free_buffer_page(page); 438 free_buffer_page(page);
480 } 439 }
481 RB_WARN_ON_RET(cpu_buffer, list_empty(&cpu_buffer->pages)); 440 if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages)))
441 return;
482 442
483 rb_reset_cpu(cpu_buffer); 443 rb_reset_cpu(cpu_buffer);
484 444
@@ -500,7 +460,8 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
500 synchronize_sched(); 460 synchronize_sched();
501 461
502 for (i = 0; i < nr_pages; i++) { 462 for (i = 0; i < nr_pages; i++) {
503 RB_WARN_ON_RET(cpu_buffer, list_empty(pages)); 463 if (RB_WARN_ON(cpu_buffer, list_empty(pages)))
464 return;
504 p = pages->next; 465 p = pages->next;
505 page = list_entry(p, struct buffer_page, list); 466 page = list_entry(p, struct buffer_page, list);
506 list_del_init(&page->list); 467 list_del_init(&page->list);
@@ -555,7 +516,10 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
555 if (size < buffer_size) { 516 if (size < buffer_size) {
556 517
557 /* easy case, just free pages */ 518 /* easy case, just free pages */
558 RB_WARN_ON_UNLOCK(buffer, nr_pages >= buffer->pages); 519 if (RB_WARN_ON(buffer, nr_pages >= buffer->pages)) {
520 mutex_unlock(&buffer->mutex);
521 return -1;
522 }
559 523
560 rm_pages = buffer->pages - nr_pages; 524 rm_pages = buffer->pages - nr_pages;
561 525
@@ -574,7 +538,10 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
574 * add these pages to the cpu_buffers. Otherwise we just free 538 * add these pages to the cpu_buffers. Otherwise we just free
575 * them all and return -ENOMEM; 539 * them all and return -ENOMEM;
576 */ 540 */
577 RB_WARN_ON_UNLOCK(buffer, nr_pages <= buffer->pages); 541 if (RB_WARN_ON(buffer, nr_pages <= buffer->pages)) {
542 mutex_unlock(&buffer->mutex);
543 return -1;
544 }
578 545
579 new_pages = nr_pages - buffer->pages; 546 new_pages = nr_pages - buffer->pages;
580 547
@@ -598,7 +565,10 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
598 rb_insert_pages(cpu_buffer, &pages, new_pages); 565 rb_insert_pages(cpu_buffer, &pages, new_pages);
599 } 566 }
600 567
601 RB_WARN_ON_UNLOCK(buffer, !list_empty(&pages)); 568 if (RB_WARN_ON(buffer, !list_empty(&pages))) {
569 mutex_unlock(&buffer->mutex);
570 return -1;
571 }
602 572
603 out: 573 out:
604 buffer->pages = nr_pages; 574 buffer->pages = nr_pages;
@@ -686,7 +656,8 @@ static void rb_update_overflow(struct ring_buffer_per_cpu *cpu_buffer)
686 head += rb_event_length(event)) { 656 head += rb_event_length(event)) {
687 657
688 event = __rb_page_index(cpu_buffer->head_page, head); 658 event = __rb_page_index(cpu_buffer->head_page, head);
689 RB_WARN_ON_RET(cpu_buffer, rb_null_event(event)); 659 if (RB_WARN_ON(cpu_buffer, rb_null_event(event)))
660 return;
690 /* Only count data entries */ 661 /* Only count data entries */
691 if (event->type != RINGBUF_TYPE_DATA) 662 if (event->type != RINGBUF_TYPE_DATA)
692 continue; 663 continue;
@@ -739,8 +710,9 @@ rb_set_commit_event(struct ring_buffer_per_cpu *cpu_buffer,
739 addr &= PAGE_MASK; 710 addr &= PAGE_MASK;
740 711
741 while (cpu_buffer->commit_page->page != (void *)addr) { 712 while (cpu_buffer->commit_page->page != (void *)addr) {
742 RB_WARN_ON(cpu_buffer, 713 if (RB_WARN_ON(cpu_buffer,
743 cpu_buffer->commit_page == cpu_buffer->tail_page); 714 cpu_buffer->commit_page == cpu_buffer->tail_page))
715 return;
744 cpu_buffer->commit_page->commit = 716 cpu_buffer->commit_page->commit =
745 cpu_buffer->commit_page->write; 717 cpu_buffer->commit_page->write;
746 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page); 718 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
@@ -896,7 +868,8 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
896 reader_page = cpu_buffer->reader_page; 868 reader_page = cpu_buffer->reader_page;
897 869
898 /* we grabbed the lock before incrementing */ 870 /* we grabbed the lock before incrementing */
899 RB_WARN_ON(cpu_buffer, next_page == reader_page); 871 if (RB_WARN_ON(cpu_buffer, next_page == reader_page))
872 goto out_unlock;
900 873
901 /* 874 /*
902 * If for some reason, we had an interrupt storm that made 875 * If for some reason, we had an interrupt storm that made
@@ -973,7 +946,8 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
973 946
974 /* We reserved something on the buffer */ 947 /* We reserved something on the buffer */
975 948
976 RB_WARN_ON_RET_NULL(cpu_buffer, write > BUF_PAGE_SIZE); 949 if (RB_WARN_ON(cpu_buffer, write > BUF_PAGE_SIZE))
950 return NULL;
977 951
978 event = __rb_page_index(tail_page, tail); 952 event = __rb_page_index(tail_page, tail);
979 rb_update_event(event, type, length); 953 rb_update_event(event, type, length);
@@ -1072,10 +1046,8 @@ rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
1072 * storm or we have something buggy. 1046 * storm or we have something buggy.
1073 * Bail! 1047 * Bail!
1074 */ 1048 */
1075 if (unlikely(++nr_loops > 1000)) { 1049 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
1076 RB_WARN_ON(cpu_buffer, 1);
1077 return NULL; 1050 return NULL;
1078 }
1079 1051
1080 ts = ring_buffer_time_stamp(cpu_buffer->cpu); 1052 ts = ring_buffer_time_stamp(cpu_buffer->cpu);
1081 1053
@@ -1591,8 +1563,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
1591 * a case where we will loop three times. There should be no 1563 * a case where we will loop three times. There should be no
1592 * reason to loop four times (that I know of). 1564 * reason to loop four times (that I know of).
1593 */ 1565 */
1594 if (unlikely(++nr_loops > 3)) { 1566 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
1595 RB_WARN_ON(cpu_buffer, 1);
1596 reader = NULL; 1567 reader = NULL;
1597 goto out; 1568 goto out;
1598 } 1569 }
@@ -1604,8 +1575,9 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
1604 goto out; 1575 goto out;
1605 1576
1606 /* Never should we have an index greater than the size */ 1577 /* Never should we have an index greater than the size */
1607 RB_WARN_ON(cpu_buffer, 1578 if (RB_WARN_ON(cpu_buffer,
1608 cpu_buffer->reader_page->read > rb_page_size(reader)); 1579 cpu_buffer->reader_page->read > rb_page_size(reader)))
1580 goto out;
1609 1581
1610 /* check if we caught up to the tail */ 1582 /* check if we caught up to the tail */
1611 reader = NULL; 1583 reader = NULL;
@@ -1659,7 +1631,8 @@ static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
1659 reader = rb_get_reader_page(cpu_buffer); 1631 reader = rb_get_reader_page(cpu_buffer);
1660 1632
1661 /* This function should not be called when buffer is empty */ 1633 /* This function should not be called when buffer is empty */
1662 RB_WARN_ON_RET(cpu_buffer, !reader); 1634 if (RB_WARN_ON(cpu_buffer, !reader))
1635 return;
1663 1636
1664 event = rb_reader_event(cpu_buffer); 1637 event = rb_reader_event(cpu_buffer);
1665 1638
@@ -1686,8 +1659,9 @@ static void rb_advance_iter(struct ring_buffer_iter *iter)
1686 * Check if we are at the end of the buffer. 1659 * Check if we are at the end of the buffer.
1687 */ 1660 */
1688 if (iter->head >= rb_page_size(iter->head_page)) { 1661 if (iter->head >= rb_page_size(iter->head_page)) {
1689 RB_WARN_ON_RET(buffer, 1662 if (RB_WARN_ON(buffer,
1690 iter->head_page == cpu_buffer->commit_page); 1663 iter->head_page == cpu_buffer->commit_page))
1664 return;
1691 rb_inc_iter(iter); 1665 rb_inc_iter(iter);
1692 return; 1666 return;
1693 } 1667 }
@@ -1700,9 +1674,10 @@ static void rb_advance_iter(struct ring_buffer_iter *iter)
1700 * This should not be called to advance the header if we are 1674 * This should not be called to advance the header if we are
1701 * at the tail of the buffer. 1675 * at the tail of the buffer.
1702 */ 1676 */
1703 RB_WARN_ON_RET(cpu_buffer, 1677 if (RB_WARN_ON(cpu_buffer,
1704 (iter->head_page == cpu_buffer->commit_page) && 1678 (iter->head_page == cpu_buffer->commit_page) &&
1705 (iter->head + length > rb_commit_index(cpu_buffer))); 1679 (iter->head + length > rb_commit_index(cpu_buffer))))
1680 return;
1706 1681
1707 rb_update_iter_read_stamp(iter, event); 1682 rb_update_iter_read_stamp(iter, event);
1708 1683
@@ -1736,10 +1711,8 @@ rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
1736 * can have. Nesting 10 deep of interrupts is clearly 1711 * can have. Nesting 10 deep of interrupts is clearly
1737 * an anomaly. 1712 * an anomaly.
1738 */ 1713 */
1739 if (unlikely(++nr_loops > 10)) { 1714 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 10))
1740 RB_WARN_ON(cpu_buffer, 1);
1741 return NULL; 1715 return NULL;
1742 }
1743 1716
1744 reader = rb_get_reader_page(cpu_buffer); 1717 reader = rb_get_reader_page(cpu_buffer);
1745 if (!reader) 1718 if (!reader)
@@ -1800,10 +1773,8 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
1800 * can have. Nesting 10 deep of interrupts is clearly 1773 * can have. Nesting 10 deep of interrupts is clearly
1801 * an anomaly. 1774 * an anomaly.
1802 */ 1775 */
1803 if (unlikely(++nr_loops > 10)) { 1776 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 10))
1804 RB_WARN_ON(cpu_buffer, 1);
1805 return NULL; 1777 return NULL;
1806 }
1807 1778
1808 if (rb_per_cpu_empty(cpu_buffer)) 1779 if (rb_per_cpu_empty(cpu_buffer))
1809 return NULL; 1780 return NULL;