aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSteven Rostedt (Red Hat) <rostedt@goodmis.org>2016-05-12 11:01:24 -0400
committerSteven Rostedt <rostedt@goodmis.org>2016-05-13 11:12:20 -0400
commit9b94a8fba501f38368aef6ac1b30e7335252a220 (patch)
tree4188e1302caadfc839fab841d8427a018c46d2e6
parent854145e0a8e9a05f7366d240e2f99d9c1ca6d6dd (diff)
ring-buffer: Use long for nr_pages to avoid overflow failures
The size variable to change the ring buffer in ftrace is a long. The nr_pages used to update the ring buffer based on the size is int. On 64 bit machines this can cause an overflow problem. For example, the following will cause the ring buffer to crash: # cd /sys/kernel/debug/tracing # echo 10 > buffer_size_kb # echo 8556384240 > buffer_size_kb Then you get the warning of: WARNING: CPU: 1 PID: 318 at kernel/trace/ring_buffer.c:1527 rb_update_pages+0x22f/0x260 Which is: RB_WARN_ON(cpu_buffer, nr_removed); Note each ring buffer page holds 4080 bytes. This is because: 1) 10 causes the ring buffer to have 3 pages. (10kb requires 3 * 4080 pages to hold) 2) (2^31 / 2^10 + 1) * 4080 = 8556384240 The value written into buffer_size_kb is shifted by 10 and then passed to ring_buffer_resize(). 8556384240 * 2^10 = 8761737461760 3) The size passed to ring_buffer_resize() is then divided by BUF_PAGE_SIZE which is 4080. 8761737461760 / 4080 = 2147484672 4) nr_pages is subtracted from the current nr_pages (3) and we get: 2147484669. This value is saved in a signed integer nr_pages_to_update 5) 2147484669 is greater than 2^31 but smaller than 2^32, a signed int turns into the value of -2147482627 6) As the value is a negative number, in update_pages_handler() it is negated and passed to rb_remove_pages() and 2147482627 pages will be removed, which is much larger than 3 and it causes the warning because not all the pages asked to be removed were removed. Link: https://bugzilla.kernel.org/show_bug.cgi?id=118001 Cc: stable@vger.kernel.org # 2.6.28+ Fixes: 7a8e76a3829f1 ("tracing: unified trace buffer") Reported-by: Hao Qin <QEver.cn@gmail.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
-rw-r--r--kernel/trace/ring_buffer.c26
1 files changed, 14 insertions, 12 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 95181e36891a..99d64cd58c52 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -437,7 +437,7 @@ struct ring_buffer_per_cpu {
437 raw_spinlock_t reader_lock; /* serialize readers */ 437 raw_spinlock_t reader_lock; /* serialize readers */
438 arch_spinlock_t lock; 438 arch_spinlock_t lock;
439 struct lock_class_key lock_key; 439 struct lock_class_key lock_key;
440 unsigned int nr_pages; 440 unsigned long nr_pages;
441 unsigned int current_context; 441 unsigned int current_context;
442 struct list_head *pages; 442 struct list_head *pages;
443 struct buffer_page *head_page; /* read from head */ 443 struct buffer_page *head_page; /* read from head */
@@ -458,7 +458,7 @@ struct ring_buffer_per_cpu {
458 u64 write_stamp; 458 u64 write_stamp;
459 u64 read_stamp; 459 u64 read_stamp;
460 /* ring buffer pages to update, > 0 to add, < 0 to remove */ 460 /* ring buffer pages to update, > 0 to add, < 0 to remove */
461 int nr_pages_to_update; 461 long nr_pages_to_update;
462 struct list_head new_pages; /* new pages to add */ 462 struct list_head new_pages; /* new pages to add */
463 struct work_struct update_pages_work; 463 struct work_struct update_pages_work;
464 struct completion update_done; 464 struct completion update_done;
@@ -1128,10 +1128,10 @@ static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
1128 return 0; 1128 return 0;
1129} 1129}
1130 1130
1131static int __rb_allocate_pages(int nr_pages, struct list_head *pages, int cpu) 1131static int __rb_allocate_pages(long nr_pages, struct list_head *pages, int cpu)
1132{ 1132{
1133 int i;
1134 struct buffer_page *bpage, *tmp; 1133 struct buffer_page *bpage, *tmp;
1134 long i;
1135 1135
1136 for (i = 0; i < nr_pages; i++) { 1136 for (i = 0; i < nr_pages; i++) {
1137 struct page *page; 1137 struct page *page;
@@ -1168,7 +1168,7 @@ free_pages:
1168} 1168}
1169 1169
1170static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, 1170static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
1171 unsigned nr_pages) 1171 unsigned long nr_pages)
1172{ 1172{
1173 LIST_HEAD(pages); 1173 LIST_HEAD(pages);
1174 1174
@@ -1193,7 +1193,7 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
1193} 1193}
1194 1194
1195static struct ring_buffer_per_cpu * 1195static struct ring_buffer_per_cpu *
1196rb_allocate_cpu_buffer(struct ring_buffer *buffer, int nr_pages, int cpu) 1196rb_allocate_cpu_buffer(struct ring_buffer *buffer, long nr_pages, int cpu)
1197{ 1197{
1198 struct ring_buffer_per_cpu *cpu_buffer; 1198 struct ring_buffer_per_cpu *cpu_buffer;
1199 struct buffer_page *bpage; 1199 struct buffer_page *bpage;
@@ -1293,8 +1293,9 @@ struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
1293 struct lock_class_key *key) 1293 struct lock_class_key *key)
1294{ 1294{
1295 struct ring_buffer *buffer; 1295 struct ring_buffer *buffer;
1296 long nr_pages;
1296 int bsize; 1297 int bsize;
1297 int cpu, nr_pages; 1298 int cpu;
1298 1299
1299 /* keep it in its own cache line */ 1300 /* keep it in its own cache line */
1300 buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()), 1301 buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
@@ -1420,12 +1421,12 @@ static inline unsigned long rb_page_write(struct buffer_page *bpage)
1420} 1421}
1421 1422
1422static int 1423static int
1423rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages) 1424rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
1424{ 1425{
1425 struct list_head *tail_page, *to_remove, *next_page; 1426 struct list_head *tail_page, *to_remove, *next_page;
1426 struct buffer_page *to_remove_page, *tmp_iter_page; 1427 struct buffer_page *to_remove_page, *tmp_iter_page;
1427 struct buffer_page *last_page, *first_page; 1428 struct buffer_page *last_page, *first_page;
1428 unsigned int nr_removed; 1429 unsigned long nr_removed;
1429 unsigned long head_bit; 1430 unsigned long head_bit;
1430 int page_entries; 1431 int page_entries;
1431 1432
@@ -1642,7 +1643,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
1642 int cpu_id) 1643 int cpu_id)
1643{ 1644{
1644 struct ring_buffer_per_cpu *cpu_buffer; 1645 struct ring_buffer_per_cpu *cpu_buffer;
1645 unsigned nr_pages; 1646 unsigned long nr_pages;
1646 int cpu, err = 0; 1647 int cpu, err = 0;
1647 1648
1648 /* 1649 /*
@@ -4640,8 +4641,9 @@ static int rb_cpu_notify(struct notifier_block *self,
4640 struct ring_buffer *buffer = 4641 struct ring_buffer *buffer =
4641 container_of(self, struct ring_buffer, cpu_notify); 4642 container_of(self, struct ring_buffer, cpu_notify);
4642 long cpu = (long)hcpu; 4643 long cpu = (long)hcpu;
4643 int cpu_i, nr_pages_same; 4644 long nr_pages_same;
4644 unsigned int nr_pages; 4645 int cpu_i;
4646 unsigned long nr_pages;
4645 4647
4646 switch (action) { 4648 switch (action) {
4647 case CPU_UP_PREPARE: 4649 case CPU_UP_PREPARE: