aboutsummaryrefslogtreecommitdiffstats
path: root/mm/oom_kill.c
diff options
context:
space:
mode:
authorDavid Rientjes <rientjes@google.com>2012-07-31 19:43:40 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-07-31 21:42:44 -0400
commit462607ecc519b197f7b5cc6b024a1c26fa6fc0ac (patch)
tree3ecf52a4c1052a4af7d1ea7f10ef193938a9d046 /mm/oom_kill.c
parent62ce1c706f817cb9defef3ac2dfdd815149f2968 (diff)
mm, oom: introduce helper function to process threads during scan
This patch introduces a helper function to process each thread during the iteration over the tasklist. A new return type, enum oom_scan_t, is defined to determine the future behavior of the iteration: - OOM_SCAN_OK: continue scanning the thread and find its badness, - OOM_SCAN_CONTINUE: do not consider this thread for oom kill, it's ineligible, - OOM_SCAN_ABORT: abort the iteration and return, or - OOM_SCAN_SELECT: always select this thread with the highest badness possible. There is no functional change with this patch. This new helper function will be used in the next patch in the memory controller. Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Acked-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Reviewed-by: Michal Hocko <mhocko@suse.cz> Signed-off-by: David Rientjes <rientjes@google.com> Cc: Oleg Nesterov <oleg@redhat.com> Reviewed-by: Sha Zhengju <handai.szj@taobao.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/oom_kill.c')
-rw-r--r--mm/oom_kill.c111
1 files changed, 65 insertions, 46 deletions
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index e6c10640e56b..f8eba9651c0c 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -288,6 +288,59 @@ static enum oom_constraint constrained_alloc(struct zonelist *zonelist,
288} 288}
289#endif 289#endif
290 290
291enum oom_scan_t {
292 OOM_SCAN_OK, /* scan thread and find its badness */
293 OOM_SCAN_CONTINUE, /* do not consider thread for oom kill */
294 OOM_SCAN_ABORT, /* abort the iteration and return */
295 OOM_SCAN_SELECT, /* always select this thread first */
296};
297
298static enum oom_scan_t oom_scan_process_thread(struct task_struct *task,
299 struct mem_cgroup *memcg, unsigned long totalpages,
300 const nodemask_t *nodemask, bool force_kill)
301{
302 if (task->exit_state)
303 return OOM_SCAN_CONTINUE;
304 if (oom_unkillable_task(task, memcg, nodemask))
305 return OOM_SCAN_CONTINUE;
306
307 /*
308 * This task already has access to memory reserves and is being killed.
309 * Don't allow any other task to have access to the reserves.
310 */
311 if (test_tsk_thread_flag(task, TIF_MEMDIE)) {
312 if (unlikely(frozen(task)))
313 __thaw_task(task);
314 if (!force_kill)
315 return OOM_SCAN_ABORT;
316 }
317 if (!task->mm)
318 return OOM_SCAN_CONTINUE;
319
320 if (task->flags & PF_EXITING) {
321 /*
322 * If task is current and is in the process of releasing memory,
323 * allow the "kill" to set TIF_MEMDIE, which will allow it to
324 * access memory reserves. Otherwise, it may stall forever.
325 *
326 * The iteration isn't broken here, however, in case other
327 * threads are found to have already been oom killed.
328 */
329 if (task == current)
330 return OOM_SCAN_SELECT;
331 else if (!force_kill) {
332 /*
333 * If this task is not being ptraced on exit, then wait
334 * for it to finish before killing some other task
335 * unnecessarily.
336 */
337 if (!(task->group_leader->ptrace & PT_TRACE_EXIT))
338 return OOM_SCAN_ABORT;
339 }
340 }
341 return OOM_SCAN_OK;
342}
343
291/* 344/*
292 * Simple selection loop. We chose the process with the highest 345 * Simple selection loop. We chose the process with the highest
293 * number of 'points'. We expect the caller will lock the tasklist. 346 * number of 'points'. We expect the caller will lock the tasklist.
@@ -305,53 +358,19 @@ static struct task_struct *select_bad_process(unsigned int *ppoints,
305 do_each_thread(g, p) { 358 do_each_thread(g, p) {
306 unsigned int points; 359 unsigned int points;
307 360
308 if (p->exit_state) 361 switch (oom_scan_process_thread(p, memcg, totalpages, nodemask,
309 continue; 362 force_kill)) {
310 if (oom_unkillable_task(p, memcg, nodemask)) 363 case OOM_SCAN_SELECT:
311 continue; 364 chosen = p;
312 365 chosen_points = ULONG_MAX;
313 /* 366 /* fall through */
314 * This task already has access to memory reserves and is 367 case OOM_SCAN_CONTINUE:
315 * being killed. Don't allow any other task access to the
316 * memory reserve.
317 *
318 * Note: this may have a chance of deadlock if it gets
319 * blocked waiting for another task which itself is waiting
320 * for memory. Is there a better alternative?
321 */
322 if (test_tsk_thread_flag(p, TIF_MEMDIE)) {
323 if (unlikely(frozen(p)))
324 __thaw_task(p);
325 if (!force_kill)
326 return ERR_PTR(-1UL);
327 }
328 if (!p->mm)
329 continue; 368 continue;
330 369 case OOM_SCAN_ABORT:
331 if (p->flags & PF_EXITING) { 370 return ERR_PTR(-1UL);
332 /* 371 case OOM_SCAN_OK:
333 * If p is the current task and is in the process of 372 break;
334 * releasing memory, we allow the "kill" to set 373 };
335 * TIF_MEMDIE, which will allow it to gain access to
336 * memory reserves. Otherwise, it may stall forever.
337 *
338 * The loop isn't broken here, however, in case other
339 * threads are found to have already been oom killed.
340 */
341 if (p == current) {
342 chosen = p;
343 chosen_points = ULONG_MAX;
344 } else if (!force_kill) {
345 /*
346 * If this task is not being ptraced on exit,
347 * then wait for it to finish before killing
348 * some other task unnecessarily.
349 */
350 if (!(p->group_leader->ptrace & PT_TRACE_EXIT))
351 return ERR_PTR(-1UL);
352 }
353 }
354
355 points = oom_badness(p, memcg, nodemask, totalpages); 374 points = oom_badness(p, memcg, nodemask, totalpages);
356 if (points > chosen_points) { 375 if (points > chosen_points) {
357 chosen = p; 376 chosen = p;