aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-05-17 12:42:58 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-05-17 12:42:58 -0400
commit2fe2edf85ffe51fddb800bb0b332c7ae454a6db3 (patch)
tree2aa964fa004957fb3b191df6333f7a5d4f6f718b
parent9a07a7968407e20fe87ed6b5eb6a6000e4819492 (diff)
parent59643d1535eb220668692a5359de22545af579f6 (diff)
Merge tag 'trace-fixes-v4.6-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace
Pull tracing ring-buffer fixes from Steven Rostedt: "Hao Qin reported an integer overflow possibility with signed and unsigned numbers in the ring-buffer code. https://bugzilla.kernel.org/show_bug.cgi?id=118001 At first I did not think this was too much of an issue, because the overflow would be caught later when either too much data was allocated or it would trigger RB_WARN_ON() which shuts down the ring buffer. But looking closer into it, I found that the right settings could bypass the checks and crash the kernel. Luckily, this is only accessible by root. The first fix is to convert all the variables into long, such that we don't get into issues between 32 bit variables being assigned 64 bit ones. This fixes the RB_WARN_ON() triggering. The next fix is to get rid of a duplicate DIV_ROUND_UP() that when called twice with the right value, can cause a kernel crash. The first DIV_ROUND_UP() is to normalize the input and it is checked against the minimum allowable value. But then DIV_ROUND_UP() is called again, which can overflow due to the (a + b - 1)/b, logic. The first called upped the value, the second can overflow (with the +b part). The second call to DIV_ROUND_UP() came in via a second change a while ago and the code is cleaned up to remove it" * tag 'trace-fixes-v4.6-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: ring-buffer: Prevent overflow of size in ring_buffer_resize() ring-buffer: Use long for nr_pages to avoid overflow failures
-rw-r--r--kernel/trace/ring_buffer.c35
1 files changed, 18 insertions, 17 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 95181e36891a..9c143739b8d7 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -437,7 +437,7 @@ struct ring_buffer_per_cpu {
437 raw_spinlock_t reader_lock; /* serialize readers */ 437 raw_spinlock_t reader_lock; /* serialize readers */
438 arch_spinlock_t lock; 438 arch_spinlock_t lock;
439 struct lock_class_key lock_key; 439 struct lock_class_key lock_key;
440 unsigned int nr_pages; 440 unsigned long nr_pages;
441 unsigned int current_context; 441 unsigned int current_context;
442 struct list_head *pages; 442 struct list_head *pages;
443 struct buffer_page *head_page; /* read from head */ 443 struct buffer_page *head_page; /* read from head */
@@ -458,7 +458,7 @@ struct ring_buffer_per_cpu {
458 u64 write_stamp; 458 u64 write_stamp;
459 u64 read_stamp; 459 u64 read_stamp;
460 /* ring buffer pages to update, > 0 to add, < 0 to remove */ 460 /* ring buffer pages to update, > 0 to add, < 0 to remove */
461 int nr_pages_to_update; 461 long nr_pages_to_update;
462 struct list_head new_pages; /* new pages to add */ 462 struct list_head new_pages; /* new pages to add */
463 struct work_struct update_pages_work; 463 struct work_struct update_pages_work;
464 struct completion update_done; 464 struct completion update_done;
@@ -1128,10 +1128,10 @@ static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
1128 return 0; 1128 return 0;
1129} 1129}
1130 1130
1131static int __rb_allocate_pages(int nr_pages, struct list_head *pages, int cpu) 1131static int __rb_allocate_pages(long nr_pages, struct list_head *pages, int cpu)
1132{ 1132{
1133 int i;
1134 struct buffer_page *bpage, *tmp; 1133 struct buffer_page *bpage, *tmp;
1134 long i;
1135 1135
1136 for (i = 0; i < nr_pages; i++) { 1136 for (i = 0; i < nr_pages; i++) {
1137 struct page *page; 1137 struct page *page;
@@ -1168,7 +1168,7 @@ free_pages:
1168} 1168}
1169 1169
1170static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, 1170static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
1171 unsigned nr_pages) 1171 unsigned long nr_pages)
1172{ 1172{
1173 LIST_HEAD(pages); 1173 LIST_HEAD(pages);
1174 1174
@@ -1193,7 +1193,7 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
1193} 1193}
1194 1194
1195static struct ring_buffer_per_cpu * 1195static struct ring_buffer_per_cpu *
1196rb_allocate_cpu_buffer(struct ring_buffer *buffer, int nr_pages, int cpu) 1196rb_allocate_cpu_buffer(struct ring_buffer *buffer, long nr_pages, int cpu)
1197{ 1197{
1198 struct ring_buffer_per_cpu *cpu_buffer; 1198 struct ring_buffer_per_cpu *cpu_buffer;
1199 struct buffer_page *bpage; 1199 struct buffer_page *bpage;
@@ -1293,8 +1293,9 @@ struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
1293 struct lock_class_key *key) 1293 struct lock_class_key *key)
1294{ 1294{
1295 struct ring_buffer *buffer; 1295 struct ring_buffer *buffer;
1296 long nr_pages;
1296 int bsize; 1297 int bsize;
1297 int cpu, nr_pages; 1298 int cpu;
1298 1299
1299 /* keep it in its own cache line */ 1300 /* keep it in its own cache line */
1300 buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()), 1301 buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
@@ -1420,12 +1421,12 @@ static inline unsigned long rb_page_write(struct buffer_page *bpage)
1420} 1421}
1421 1422
1422static int 1423static int
1423rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages) 1424rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
1424{ 1425{
1425 struct list_head *tail_page, *to_remove, *next_page; 1426 struct list_head *tail_page, *to_remove, *next_page;
1426 struct buffer_page *to_remove_page, *tmp_iter_page; 1427 struct buffer_page *to_remove_page, *tmp_iter_page;
1427 struct buffer_page *last_page, *first_page; 1428 struct buffer_page *last_page, *first_page;
1428 unsigned int nr_removed; 1429 unsigned long nr_removed;
1429 unsigned long head_bit; 1430 unsigned long head_bit;
1430 int page_entries; 1431 int page_entries;
1431 1432
@@ -1642,7 +1643,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
1642 int cpu_id) 1643 int cpu_id)
1643{ 1644{
1644 struct ring_buffer_per_cpu *cpu_buffer; 1645 struct ring_buffer_per_cpu *cpu_buffer;
1645 unsigned nr_pages; 1646 unsigned long nr_pages;
1646 int cpu, err = 0; 1647 int cpu, err = 0;
1647 1648
1648 /* 1649 /*
@@ -1656,14 +1657,13 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
1656 !cpumask_test_cpu(cpu_id, buffer->cpumask)) 1657 !cpumask_test_cpu(cpu_id, buffer->cpumask))
1657 return size; 1658 return size;
1658 1659
1659 size = DIV_ROUND_UP(size, BUF_PAGE_SIZE); 1660 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1660 size *= BUF_PAGE_SIZE;
1661 1661
1662 /* we need a minimum of two pages */ 1662 /* we need a minimum of two pages */
1663 if (size < BUF_PAGE_SIZE * 2) 1663 if (nr_pages < 2)
1664 size = BUF_PAGE_SIZE * 2; 1664 nr_pages = 2;
1665 1665
1666 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); 1666 size = nr_pages * BUF_PAGE_SIZE;
1667 1667
1668 /* 1668 /*
1669 * Don't succeed if resizing is disabled, as a reader might be 1669 * Don't succeed if resizing is disabled, as a reader might be
@@ -4640,8 +4640,9 @@ static int rb_cpu_notify(struct notifier_block *self,
4640 struct ring_buffer *buffer = 4640 struct ring_buffer *buffer =
4641 container_of(self, struct ring_buffer, cpu_notify); 4641 container_of(self, struct ring_buffer, cpu_notify);
4642 long cpu = (long)hcpu; 4642 long cpu = (long)hcpu;
4643 int cpu_i, nr_pages_same; 4643 long nr_pages_same;
4644 unsigned int nr_pages; 4644 int cpu_i;
4645 unsigned long nr_pages;
4645 4646
4646 switch (action) { 4647 switch (action) {
4647 case CPU_UP_PREPARE: 4648 case CPU_UP_PREPARE: