diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-07-21 16:59:51 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-07-21 16:59:51 -0400 |
commit | f79ec886f994918de057dd224fa2dfdc162bb3c3 (patch) | |
tree | 8d66a5ebf3d2f9475a908b7c5b37675965ee1c8d /kernel/trace/ring_buffer.c | |
parent | b0a752818bdba3d0fe9de593618ccab9f1101508 (diff) | |
parent | f86f418059b94aa01f9342611a272ca60c583e89 (diff) |
Merge tag 'trace-v4.13-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace
Pull tracing fixes from Steven Rostedt:
"Three minor updates
- Use the new GFP_RETRY_MAYFAIL to be more aggressive in allocating
memory for the ring buffer without causing OOMs
- Fix a memory leak in adding and removing instances
- Add __rcu annotation to be able to debug RCU usage of function
tracing a bit better"
* tag 'trace-v4.13-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace:
trace: fix the errors caused by incompatible type of RCU variables
tracing: Fix kmemleak in instance_rmdir
tracing/ring_buffer: Try harder to allocate
Diffstat (limited to 'kernel/trace/ring_buffer.c')
-rw-r--r-- | kernel/trace/ring_buffer.c | 10 |
1 files changed, 5 insertions, 5 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 4ae268e687fe..529cc50d7243 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -1136,12 +1136,12 @@ static int __rb_allocate_pages(long nr_pages, struct list_head *pages, int cpu) | |||
1136 | for (i = 0; i < nr_pages; i++) { | 1136 | for (i = 0; i < nr_pages; i++) { |
1137 | struct page *page; | 1137 | struct page *page; |
1138 | /* | 1138 | /* |
1139 | * __GFP_NORETRY flag makes sure that the allocation fails | 1139 | * __GFP_RETRY_MAYFAIL flag makes sure that the allocation fails |
1140 | * gracefully without invoking oom-killer and the system is | 1140 | * gracefully without invoking oom-killer and the system is not |
1141 | * not destabilized. | 1141 | * destabilized. |
1142 | */ | 1142 | */ |
1143 | bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), | 1143 | bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), |
1144 | GFP_KERNEL | __GFP_NORETRY, | 1144 | GFP_KERNEL | __GFP_RETRY_MAYFAIL, |
1145 | cpu_to_node(cpu)); | 1145 | cpu_to_node(cpu)); |
1146 | if (!bpage) | 1146 | if (!bpage) |
1147 | goto free_pages; | 1147 | goto free_pages; |
@@ -1149,7 +1149,7 @@ static int __rb_allocate_pages(long nr_pages, struct list_head *pages, int cpu) | |||
1149 | list_add(&bpage->list, pages); | 1149 | list_add(&bpage->list, pages); |
1150 | 1150 | ||
1151 | page = alloc_pages_node(cpu_to_node(cpu), | 1151 | page = alloc_pages_node(cpu_to_node(cpu), |
1152 | GFP_KERNEL | __GFP_NORETRY, 0); | 1152 | GFP_KERNEL | __GFP_RETRY_MAYFAIL, 0); |
1153 | if (!page) | 1153 | if (!page) |
1154 | goto free_pages; | 1154 | goto free_pages; |
1155 | bpage->page = page_address(page); | 1155 | bpage->page = page_address(page); |