diff options
author | Jiri Olsa <jolsa@redhat.com> | 2013-03-19 10:35:09 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2013-05-01 06:34:46 -0400 |
commit | 5919b30933d7c4fac1f214c59f26c5e990044f09 (patch) | |
tree | 1d719e5fd6534ed0a6dd00927e3b8343bb725e50 /kernel/events/ring_buffer.c | |
parent | 1b0dac2ac6debdbf1541e15f2cede03613cf4465 (diff) |
perf: Fix vmalloc ring buffer pages handling
If we allocate perf ring buffer with the size of single (user)
page, we will get memory corruption when releasing itin
rb_free_work function (for CONFIG_PERF_USE_VMALLOC option).
For single page sized ring buffer the page_order is -1 (because
nr_pages is 0). This needs to be recognized in the rb_free_work
function to release proper amount of pages.
Adding data_page_nr function that returns number of allocated
data pages. Customizing the rest of the code to use it.
Reported-by: Jan Stancek <jstancek@redhat.com>
Original-patch-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: Jiri Olsa <jolsa@redhat.com>
Link: http://lkml.kernel.org/r/20130319143509.GA1128@krava.brq.redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/events/ring_buffer.c')
-rw-r--r-- | kernel/events/ring_buffer.c | 14 |
1 files changed, 10 insertions, 4 deletions
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c index 97fddb09762b..cd55144270b5 100644 --- a/kernel/events/ring_buffer.c +++ b/kernel/events/ring_buffer.c | |||
@@ -326,11 +326,16 @@ void rb_free(struct ring_buffer *rb) | |||
326 | } | 326 | } |
327 | 327 | ||
328 | #else | 328 | #else |
329 | static int data_page_nr(struct ring_buffer *rb) | ||
330 | { | ||
331 | return rb->nr_pages << page_order(rb); | ||
332 | } | ||
329 | 333 | ||
330 | struct page * | 334 | struct page * |
331 | perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff) | 335 | perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff) |
332 | { | 336 | { |
333 | if (pgoff > (1UL << page_order(rb))) | 337 | /* The '>' counts in the user page. */ |
338 | if (pgoff > data_page_nr(rb)) | ||
334 | return NULL; | 339 | return NULL; |
335 | 340 | ||
336 | return vmalloc_to_page((void *)rb->user_page + pgoff * PAGE_SIZE); | 341 | return vmalloc_to_page((void *)rb->user_page + pgoff * PAGE_SIZE); |
@@ -350,10 +355,11 @@ static void rb_free_work(struct work_struct *work) | |||
350 | int i, nr; | 355 | int i, nr; |
351 | 356 | ||
352 | rb = container_of(work, struct ring_buffer, work); | 357 | rb = container_of(work, struct ring_buffer, work); |
353 | nr = 1 << page_order(rb); | 358 | nr = data_page_nr(rb); |
354 | 359 | ||
355 | base = rb->user_page; | 360 | base = rb->user_page; |
356 | for (i = 0; i < nr + 1; i++) | 361 | /* The '<=' counts in the user page. */ |
362 | for (i = 0; i <= nr; i++) | ||
357 | perf_mmap_unmark_page(base + (i * PAGE_SIZE)); | 363 | perf_mmap_unmark_page(base + (i * PAGE_SIZE)); |
358 | 364 | ||
359 | vfree(base); | 365 | vfree(base); |
@@ -387,7 +393,7 @@ struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags) | |||
387 | rb->user_page = all_buf; | 393 | rb->user_page = all_buf; |
388 | rb->data_pages[0] = all_buf + PAGE_SIZE; | 394 | rb->data_pages[0] = all_buf + PAGE_SIZE; |
389 | rb->page_order = ilog2(nr_pages); | 395 | rb->page_order = ilog2(nr_pages); |
390 | rb->nr_pages = 1; | 396 | rb->nr_pages = !!nr_pages; |
391 | 397 | ||
392 | ring_buffer_init(rb, watermark, flags); | 398 | ring_buffer_init(rb, watermark, flags); |
393 | 399 | ||