aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/ring_buffer.c
diff options
context:
space:
mode:
authorSteven Rostedt (VMware) <rostedt@goodmis.org>2018-04-04 11:29:57 -0400
committerSteven Rostedt (VMware) <rostedt@goodmis.org>2018-04-06 08:56:52 -0400
commit927e56db6253225166d521cee3772624347b5cd5 (patch)
treefb922defc804cd746ff42d3a784e4aee0014dcb8 /kernel/trace/ring_buffer.c
parent2a872fa4e9c8adc79c830e4009e1cc0c013a9d8a (diff)
ring-buffer: Add set/clear_current_oom_origin() during allocations
As si_mem_available() can say there is enough memory even though the memory available is not useable by the ring buffer, it is best to not kill innocent applications because the ring buffer is taking up all the memory while it is trying to allocate a great deal of memory. If the allocator is user space (because kernel threads can also increase the size of the kernel ring buffer on boot up), then after si_mem_available() says there is enough memory, set the OOM killer to kill the current task if an OOM triggers during the allocation. Link: http://lkml.kernel.org/r/20180404062340.GD6312@dhcp22.suse.cz Suggested-by: Michal Hocko <mhocko@kernel.org> Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/ring_buffer.c')
-rw-r--r--kernel/trace/ring_buffer.c48
1 files changed, 38 insertions, 10 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 966128f02121..c9cb9767d49b 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -22,6 +22,7 @@
22#include <linux/hash.h> 22#include <linux/hash.h>
23#include <linux/list.h> 23#include <linux/list.h>
24#include <linux/cpu.h> 24#include <linux/cpu.h>
25#include <linux/oom.h>
25 26
26#include <asm/local.h> 27#include <asm/local.h>
27 28
@@ -1162,35 +1163,60 @@ static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
1162static int __rb_allocate_pages(long nr_pages, struct list_head *pages, int cpu) 1163static int __rb_allocate_pages(long nr_pages, struct list_head *pages, int cpu)
1163{ 1164{
1164 struct buffer_page *bpage, *tmp; 1165 struct buffer_page *bpage, *tmp;
1166 bool user_thread = current->mm != NULL;
1167 gfp_t mflags;
1165 long i; 1168 long i;
1166 1169
1167 /* Check if the available memory is there first */ 1170 /*
1171 * Check if the available memory is there first.
1172 * Note, si_mem_available() only gives us a rough estimate of available
1173 * memory. It may not be accurate. But we don't care, we just want
1174 * to prevent doing any allocation when it is obvious that it is
1175 * not going to succeed.
1176 */
1168 i = si_mem_available(); 1177 i = si_mem_available();
1169 if (i < nr_pages) 1178 if (i < nr_pages)
1170 return -ENOMEM; 1179 return -ENOMEM;
1171 1180
1181 /*
1182 * __GFP_RETRY_MAYFAIL flag makes sure that the allocation fails
1183 * gracefully without invoking oom-killer and the system is not
1184 * destabilized.
1185 */
1186 mflags = GFP_KERNEL | __GFP_RETRY_MAYFAIL;
1187
1188 /*
1189 * If a user thread allocates too much, and si_mem_available()
1190 * reports there's enough memory, even though there is not.
1191 * Make sure the OOM killer kills this thread. This can happen
1192 * even with RETRY_MAYFAIL because another task may be doing
1193 * an allocation after this task has taken all memory.
1194 * This is the task the OOM killer needs to take out during this
1195 * loop, even if it was triggered by an allocation somewhere else.
1196 */
1197 if (user_thread)
1198 set_current_oom_origin();
1172 for (i = 0; i < nr_pages; i++) { 1199 for (i = 0; i < nr_pages; i++) {
1173 struct page *page; 1200 struct page *page;
1174 /* 1201
1175 * __GFP_RETRY_MAYFAIL flag makes sure that the allocation fails
1176 * gracefully without invoking oom-killer and the system is not
1177 * destabilized.
1178 */
1179 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), 1202 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
1180 GFP_KERNEL | __GFP_RETRY_MAYFAIL, 1203 mflags, cpu_to_node(cpu));
1181 cpu_to_node(cpu));
1182 if (!bpage) 1204 if (!bpage)
1183 goto free_pages; 1205 goto free_pages;
1184 1206
1185 list_add(&bpage->list, pages); 1207 list_add(&bpage->list, pages);
1186 1208
1187 page = alloc_pages_node(cpu_to_node(cpu), 1209 page = alloc_pages_node(cpu_to_node(cpu), mflags, 0);
1188 GFP_KERNEL | __GFP_RETRY_MAYFAIL, 0);
1189 if (!page) 1210 if (!page)
1190 goto free_pages; 1211 goto free_pages;
1191 bpage->page = page_address(page); 1212 bpage->page = page_address(page);
1192 rb_init_page(bpage->page); 1213 rb_init_page(bpage->page);
1214
1215 if (user_thread && fatal_signal_pending(current))
1216 goto free_pages;
1193 } 1217 }
1218 if (user_thread)
1219 clear_current_oom_origin();
1194 1220
1195 return 0; 1221 return 0;
1196 1222
@@ -1199,6 +1225,8 @@ free_pages:
1199 list_del_init(&bpage->list); 1225 list_del_init(&bpage->list);
1200 free_buffer_page(bpage); 1226 free_buffer_page(bpage);
1201 } 1227 }
1228 if (user_thread)
1229 clear_current_oom_origin();
1202 1230
1203 return -ENOMEM; 1231 return -ENOMEM;
1204} 1232}