diff options
author | Vaibhav Nagarnaik <vnagarnaik@google.com> | 2011-06-07 20:01:42 -0400 |
---|---|---|
committer | Steven Rostedt <rostedt@goodmis.org> | 2011-06-14 22:48:51 -0400 |
commit | d7ec4bfed6c97405c6417970ba06c439e08ab8e3 (patch) | |
tree | 35272bc4262f70243382d085787765c5e83d6736 /kernel/trace | |
parent | 22fe9b54d859e53bfbbbdc1a0a77a82bc453927c (diff) |
ring-buffer: Set __GFP_NORETRY flag for ring buffer allocating process
The tracing ring buffer is allocated from kernel memory. While
allocating a large chunk of memory, OOM might happen which destabilizes
the system. Thus random processes might get killed during the
allocation.
This patch adds __GFP_NORETRY flag to the ring buffer allocation calls
to make it fail more gracefully if the system will not be able to
complete the allocation request.
Acked-by: David Rientjes <rientjes@google.com>
Signed-off-by: Vaibhav Nagarnaik <vnagarnaik@google.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Michael Rubin <mrubin@google.com>
Cc: David Sharp <dhsharp@google.com>
Link: http://lkml.kernel.org/r/1307491302-9236-1-git-send-email-vnagarnaik@google.com
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace')
-rw-r--r-- | kernel/trace/ring_buffer.c | 25 |
1 files changed, 19 insertions, 6 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index f00ede314eb6..731201bf4acc 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -1004,9 +1004,14 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, | |||
1004 | 1004 | ||
1005 | for (i = 0; i < nr_pages; i++) { | 1005 | for (i = 0; i < nr_pages; i++) { |
1006 | struct page *page; | 1006 | struct page *page; |
1007 | 1007 | /* | |
1008 | * __GFP_NORETRY flag makes sure that the allocation fails | ||
1009 | * gracefully without invoking oom-killer and the system is | ||
1010 | * not destabilized. | ||
1011 | */ | ||
1008 | bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), | 1012 | bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), |
1009 | GFP_KERNEL, cpu_to_node(cpu_buffer->cpu)); | 1013 | GFP_KERNEL | __GFP_NORETRY, |
1014 | cpu_to_node(cpu_buffer->cpu)); | ||
1010 | if (!bpage) | 1015 | if (!bpage) |
1011 | goto free_pages; | 1016 | goto free_pages; |
1012 | 1017 | ||
@@ -1015,7 +1020,7 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, | |||
1015 | list_add(&bpage->list, &pages); | 1020 | list_add(&bpage->list, &pages); |
1016 | 1021 | ||
1017 | page = alloc_pages_node(cpu_to_node(cpu_buffer->cpu), | 1022 | page = alloc_pages_node(cpu_to_node(cpu_buffer->cpu), |
1018 | GFP_KERNEL, 0); | 1023 | GFP_KERNEL | __GFP_NORETRY, 0); |
1019 | if (!page) | 1024 | if (!page) |
1020 | goto free_pages; | 1025 | goto free_pages; |
1021 | bpage->page = page_address(page); | 1026 | bpage->page = page_address(page); |
@@ -1377,13 +1382,20 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size) | |||
1377 | for_each_buffer_cpu(buffer, cpu) { | 1382 | for_each_buffer_cpu(buffer, cpu) { |
1378 | for (i = 0; i < new_pages; i++) { | 1383 | for (i = 0; i < new_pages; i++) { |
1379 | struct page *page; | 1384 | struct page *page; |
1385 | /* | ||
1386 | * __GFP_NORETRY flag makes sure that the allocation | ||
1387 | * fails gracefully without invoking oom-killer and | ||
1388 | * the system is not destabilized. | ||
1389 | */ | ||
1380 | bpage = kzalloc_node(ALIGN(sizeof(*bpage), | 1390 | bpage = kzalloc_node(ALIGN(sizeof(*bpage), |
1381 | cache_line_size()), | 1391 | cache_line_size()), |
1382 | GFP_KERNEL, cpu_to_node(cpu)); | 1392 | GFP_KERNEL | __GFP_NORETRY, |
1393 | cpu_to_node(cpu)); | ||
1383 | if (!bpage) | 1394 | if (!bpage) |
1384 | goto free_pages; | 1395 | goto free_pages; |
1385 | list_add(&bpage->list, &pages); | 1396 | list_add(&bpage->list, &pages); |
1386 | page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, 0); | 1397 | page = alloc_pages_node(cpu_to_node(cpu), |
1398 | GFP_KERNEL | __GFP_NORETRY, 0); | ||
1387 | if (!page) | 1399 | if (!page) |
1388 | goto free_pages; | 1400 | goto free_pages; |
1389 | bpage->page = page_address(page); | 1401 | bpage->page = page_address(page); |
@@ -3737,7 +3749,8 @@ void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu) | |||
3737 | struct buffer_data_page *bpage; | 3749 | struct buffer_data_page *bpage; |
3738 | struct page *page; | 3750 | struct page *page; |
3739 | 3751 | ||
3740 | page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, 0); | 3752 | page = alloc_pages_node(cpu_to_node(cpu), |
3753 | GFP_KERNEL | __GFP_NORETRY, 0); | ||
3741 | if (!page) | 3754 | if (!page) |
3742 | return NULL; | 3755 | return NULL; |
3743 | 3756 | ||