diff options
Diffstat (limited to 'mm/oom_kill.c')
-rw-r--r-- | mm/oom_kill.c | 60 |
1 files changed, 35 insertions, 25 deletions
diff --git a/mm/oom_kill.c b/mm/oom_kill.c index dee0f75c3013..c86fbd1b590e 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c | |||
@@ -44,6 +44,7 @@ | |||
44 | 44 | ||
45 | #include <asm/tlb.h> | 45 | #include <asm/tlb.h> |
46 | #include "internal.h" | 46 | #include "internal.h" |
47 | #include "slab.h" | ||
47 | 48 | ||
48 | #define CREATE_TRACE_POINTS | 49 | #define CREATE_TRACE_POINTS |
49 | #include <trace/events/oom.h> | 50 | #include <trace/events/oom.h> |
@@ -161,6 +162,25 @@ static bool oom_unkillable_task(struct task_struct *p, | |||
161 | return false; | 162 | return false; |
162 | } | 163 | } |
163 | 164 | ||
165 | /* | ||
166 | * Print out unreclaimble slabs info when unreclaimable slabs amount is greater | ||
167 | * than all user memory (LRU pages) | ||
168 | */ | ||
169 | static bool is_dump_unreclaim_slabs(void) | ||
170 | { | ||
171 | unsigned long nr_lru; | ||
172 | |||
173 | nr_lru = global_node_page_state(NR_ACTIVE_ANON) + | ||
174 | global_node_page_state(NR_INACTIVE_ANON) + | ||
175 | global_node_page_state(NR_ACTIVE_FILE) + | ||
176 | global_node_page_state(NR_INACTIVE_FILE) + | ||
177 | global_node_page_state(NR_ISOLATED_ANON) + | ||
178 | global_node_page_state(NR_ISOLATED_FILE) + | ||
179 | global_node_page_state(NR_UNEVICTABLE); | ||
180 | |||
181 | return (global_node_page_state(NR_SLAB_UNRECLAIMABLE) > nr_lru); | ||
182 | } | ||
183 | |||
164 | /** | 184 | /** |
165 | * oom_badness - heuristic function to determine which candidate task to kill | 185 | * oom_badness - heuristic function to determine which candidate task to kill |
166 | * @p: task struct of which task we should calculate | 186 | * @p: task struct of which task we should calculate |
@@ -201,7 +221,7 @@ unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg, | |||
201 | * task's rss, pagetable and swap space use. | 221 | * task's rss, pagetable and swap space use. |
202 | */ | 222 | */ |
203 | points = get_mm_rss(p->mm) + get_mm_counter(p->mm, MM_SWAPENTS) + | 223 | points = get_mm_rss(p->mm) + get_mm_counter(p->mm, MM_SWAPENTS) + |
204 | atomic_long_read(&p->mm->nr_ptes) + mm_nr_pmds(p->mm); | 224 | mm_pgtables_bytes(p->mm) / PAGE_SIZE; |
205 | task_unlock(p); | 225 | task_unlock(p); |
206 | 226 | ||
207 | /* | 227 | /* |
@@ -369,15 +389,15 @@ static void select_bad_process(struct oom_control *oc) | |||
369 | * Dumps the current memory state of all eligible tasks. Tasks not in the same | 389 | * Dumps the current memory state of all eligible tasks. Tasks not in the same |
370 | * memcg, not in the same cpuset, or bound to a disjoint set of mempolicy nodes | 390 | * memcg, not in the same cpuset, or bound to a disjoint set of mempolicy nodes |
371 | * are not shown. | 391 | * are not shown. |
372 | * State information includes task's pid, uid, tgid, vm size, rss, nr_ptes, | 392 | * State information includes task's pid, uid, tgid, vm size, rss, |
373 | * swapents, oom_score_adj value, and name. | 393 | * pgtables_bytes, swapents, oom_score_adj value, and name. |
374 | */ | 394 | */ |
375 | static void dump_tasks(struct mem_cgroup *memcg, const nodemask_t *nodemask) | 395 | static void dump_tasks(struct mem_cgroup *memcg, const nodemask_t *nodemask) |
376 | { | 396 | { |
377 | struct task_struct *p; | 397 | struct task_struct *p; |
378 | struct task_struct *task; | 398 | struct task_struct *task; |
379 | 399 | ||
380 | pr_info("[ pid ] uid tgid total_vm rss nr_ptes nr_pmds swapents oom_score_adj name\n"); | 400 | pr_info("[ pid ] uid tgid total_vm rss pgtables_bytes swapents oom_score_adj name\n"); |
381 | rcu_read_lock(); | 401 | rcu_read_lock(); |
382 | for_each_process(p) { | 402 | for_each_process(p) { |
383 | if (oom_unkillable_task(p, memcg, nodemask)) | 403 | if (oom_unkillable_task(p, memcg, nodemask)) |
@@ -393,11 +413,10 @@ static void dump_tasks(struct mem_cgroup *memcg, const nodemask_t *nodemask) | |||
393 | continue; | 413 | continue; |
394 | } | 414 | } |
395 | 415 | ||
396 | pr_info("[%5d] %5d %5d %8lu %8lu %7ld %7ld %8lu %5hd %s\n", | 416 | pr_info("[%5d] %5d %5d %8lu %8lu %8ld %8lu %5hd %s\n", |
397 | task->pid, from_kuid(&init_user_ns, task_uid(task)), | 417 | task->pid, from_kuid(&init_user_ns, task_uid(task)), |
398 | task->tgid, task->mm->total_vm, get_mm_rss(task->mm), | 418 | task->tgid, task->mm->total_vm, get_mm_rss(task->mm), |
399 | atomic_long_read(&task->mm->nr_ptes), | 419 | mm_pgtables_bytes(task->mm), |
400 | mm_nr_pmds(task->mm), | ||
401 | get_mm_counter(task->mm, MM_SWAPENTS), | 420 | get_mm_counter(task->mm, MM_SWAPENTS), |
402 | task->signal->oom_score_adj, task->comm); | 421 | task->signal->oom_score_adj, task->comm); |
403 | task_unlock(task); | 422 | task_unlock(task); |
@@ -407,23 +426,22 @@ static void dump_tasks(struct mem_cgroup *memcg, const nodemask_t *nodemask) | |||
407 | 426 | ||
408 | static void dump_header(struct oom_control *oc, struct task_struct *p) | 427 | static void dump_header(struct oom_control *oc, struct task_struct *p) |
409 | { | 428 | { |
410 | pr_warn("%s invoked oom-killer: gfp_mask=%#x(%pGg), nodemask=", | 429 | pr_warn("%s invoked oom-killer: gfp_mask=%#x(%pGg), nodemask=%*pbl, order=%d, oom_score_adj=%hd\n", |
411 | current->comm, oc->gfp_mask, &oc->gfp_mask); | 430 | current->comm, oc->gfp_mask, &oc->gfp_mask, |
412 | if (oc->nodemask) | 431 | nodemask_pr_args(oc->nodemask), oc->order, |
413 | pr_cont("%*pbl", nodemask_pr_args(oc->nodemask)); | 432 | current->signal->oom_score_adj); |
414 | else | ||
415 | pr_cont("(null)"); | ||
416 | pr_cont(", order=%d, oom_score_adj=%hd\n", | ||
417 | oc->order, current->signal->oom_score_adj); | ||
418 | if (!IS_ENABLED(CONFIG_COMPACTION) && oc->order) | 433 | if (!IS_ENABLED(CONFIG_COMPACTION) && oc->order) |
419 | pr_warn("COMPACTION is disabled!!!\n"); | 434 | pr_warn("COMPACTION is disabled!!!\n"); |
420 | 435 | ||
421 | cpuset_print_current_mems_allowed(); | 436 | cpuset_print_current_mems_allowed(); |
422 | dump_stack(); | 437 | dump_stack(); |
423 | if (oc->memcg) | 438 | if (is_memcg_oom(oc)) |
424 | mem_cgroup_print_oom_info(oc->memcg, p); | 439 | mem_cgroup_print_oom_info(oc->memcg, p); |
425 | else | 440 | else { |
426 | show_mem(SHOW_MEM_FILTER_NODES, oc->nodemask); | 441 | show_mem(SHOW_MEM_FILTER_NODES, oc->nodemask); |
442 | if (is_dump_unreclaim_slabs()) | ||
443 | dump_unreclaimable_slab(); | ||
444 | } | ||
427 | if (sysctl_oom_dump_tasks) | 445 | if (sysctl_oom_dump_tasks) |
428 | dump_tasks(oc->memcg, oc->nodemask); | 446 | dump_tasks(oc->memcg, oc->nodemask); |
429 | } | 447 | } |
@@ -618,9 +636,6 @@ static int oom_reaper(void *unused) | |||
618 | 636 | ||
619 | static void wake_oom_reaper(struct task_struct *tsk) | 637 | static void wake_oom_reaper(struct task_struct *tsk) |
620 | { | 638 | { |
621 | if (!oom_reaper_th) | ||
622 | return; | ||
623 | |||
624 | /* tsk is already queued? */ | 639 | /* tsk is already queued? */ |
625 | if (tsk == oom_reaper_list || tsk->oom_reaper_list) | 640 | if (tsk == oom_reaper_list || tsk->oom_reaper_list) |
626 | return; | 641 | return; |
@@ -638,11 +653,6 @@ static void wake_oom_reaper(struct task_struct *tsk) | |||
638 | static int __init oom_init(void) | 653 | static int __init oom_init(void) |
639 | { | 654 | { |
640 | oom_reaper_th = kthread_run(oom_reaper, NULL, "oom_reaper"); | 655 | oom_reaper_th = kthread_run(oom_reaper, NULL, "oom_reaper"); |
641 | if (IS_ERR(oom_reaper_th)) { | ||
642 | pr_err("Unable to start OOM reaper %ld. Continuing regardless\n", | ||
643 | PTR_ERR(oom_reaper_th)); | ||
644 | oom_reaper_th = NULL; | ||
645 | } | ||
646 | return 0; | 656 | return 0; |
647 | } | 657 | } |
648 | subsys_initcall(oom_init) | 658 | subsys_initcall(oom_init) |