diff options
Diffstat (limited to 'mm/oom_kill.c')
-rw-r--r-- | mm/oom_kill.c | 119 |
1 files changed, 82 insertions, 37 deletions
diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 558f9afe6e4e..40ba05061a4f 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c | |||
@@ -31,7 +31,7 @@ | |||
31 | int sysctl_panic_on_oom; | 31 | int sysctl_panic_on_oom; |
32 | int sysctl_oom_kill_allocating_task; | 32 | int sysctl_oom_kill_allocating_task; |
33 | int sysctl_oom_dump_tasks; | 33 | int sysctl_oom_dump_tasks; |
34 | static DEFINE_SPINLOCK(zone_scan_mutex); | 34 | static DEFINE_SPINLOCK(zone_scan_lock); |
35 | /* #define DEBUG */ | 35 | /* #define DEBUG */ |
36 | 36 | ||
37 | /** | 37 | /** |
@@ -392,6 +392,9 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order, | |||
392 | printk(KERN_WARNING "%s invoked oom-killer: " | 392 | printk(KERN_WARNING "%s invoked oom-killer: " |
393 | "gfp_mask=0x%x, order=%d, oomkilladj=%d\n", | 393 | "gfp_mask=0x%x, order=%d, oomkilladj=%d\n", |
394 | current->comm, gfp_mask, order, current->oomkilladj); | 394 | current->comm, gfp_mask, order, current->oomkilladj); |
395 | task_lock(current); | ||
396 | cpuset_print_task_mems_allowed(current); | ||
397 | task_unlock(current); | ||
395 | dump_stack(); | 398 | dump_stack(); |
396 | show_mem(); | 399 | show_mem(); |
397 | if (sysctl_oom_dump_tasks) | 400 | if (sysctl_oom_dump_tasks) |
@@ -426,7 +429,6 @@ void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask) | |||
426 | unsigned long points = 0; | 429 | unsigned long points = 0; |
427 | struct task_struct *p; | 430 | struct task_struct *p; |
428 | 431 | ||
429 | cgroup_lock(); | ||
430 | read_lock(&tasklist_lock); | 432 | read_lock(&tasklist_lock); |
431 | retry: | 433 | retry: |
432 | p = select_bad_process(&points, mem); | 434 | p = select_bad_process(&points, mem); |
@@ -441,7 +443,6 @@ retry: | |||
441 | goto retry; | 443 | goto retry; |
442 | out: | 444 | out: |
443 | read_unlock(&tasklist_lock); | 445 | read_unlock(&tasklist_lock); |
444 | cgroup_unlock(); | ||
445 | } | 446 | } |
446 | #endif | 447 | #endif |
447 | 448 | ||
@@ -470,7 +471,7 @@ int try_set_zone_oom(struct zonelist *zonelist, gfp_t gfp_mask) | |||
470 | struct zone *zone; | 471 | struct zone *zone; |
471 | int ret = 1; | 472 | int ret = 1; |
472 | 473 | ||
473 | spin_lock(&zone_scan_mutex); | 474 | spin_lock(&zone_scan_lock); |
474 | for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) { | 475 | for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) { |
475 | if (zone_is_oom_locked(zone)) { | 476 | if (zone_is_oom_locked(zone)) { |
476 | ret = 0; | 477 | ret = 0; |
@@ -480,7 +481,7 @@ int try_set_zone_oom(struct zonelist *zonelist, gfp_t gfp_mask) | |||
480 | 481 | ||
481 | for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) { | 482 | for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) { |
482 | /* | 483 | /* |
483 | * Lock each zone in the zonelist under zone_scan_mutex so a | 484 | * Lock each zone in the zonelist under zone_scan_lock so a |
484 | * parallel invocation of try_set_zone_oom() doesn't succeed | 485 | * parallel invocation of try_set_zone_oom() doesn't succeed |
485 | * when it shouldn't. | 486 | * when it shouldn't. |
486 | */ | 487 | */ |
@@ -488,7 +489,7 @@ int try_set_zone_oom(struct zonelist *zonelist, gfp_t gfp_mask) | |||
488 | } | 489 | } |
489 | 490 | ||
490 | out: | 491 | out: |
491 | spin_unlock(&zone_scan_mutex); | 492 | spin_unlock(&zone_scan_lock); |
492 | return ret; | 493 | return ret; |
493 | } | 494 | } |
494 | 495 | ||
@@ -502,11 +503,82 @@ void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_mask) | |||
502 | struct zoneref *z; | 503 | struct zoneref *z; |
503 | struct zone *zone; | 504 | struct zone *zone; |
504 | 505 | ||
505 | spin_lock(&zone_scan_mutex); | 506 | spin_lock(&zone_scan_lock); |
506 | for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) { | 507 | for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) { |
507 | zone_clear_flag(zone, ZONE_OOM_LOCKED); | 508 | zone_clear_flag(zone, ZONE_OOM_LOCKED); |
508 | } | 509 | } |
509 | spin_unlock(&zone_scan_mutex); | 510 | spin_unlock(&zone_scan_lock); |
511 | } | ||
512 | |||
513 | /* | ||
514 | * Must be called with tasklist_lock held for read. | ||
515 | */ | ||
516 | static void __out_of_memory(gfp_t gfp_mask, int order) | ||
517 | { | ||
518 | if (sysctl_oom_kill_allocating_task) { | ||
519 | oom_kill_process(current, gfp_mask, order, 0, NULL, | ||
520 | "Out of memory (oom_kill_allocating_task)"); | ||
521 | |||
522 | } else { | ||
523 | unsigned long points; | ||
524 | struct task_struct *p; | ||
525 | |||
526 | retry: | ||
527 | /* | ||
528 | * Rambo mode: Shoot down a process and hope it solves whatever | ||
529 | * issues we may have. | ||
530 | */ | ||
531 | p = select_bad_process(&points, NULL); | ||
532 | |||
533 | if (PTR_ERR(p) == -1UL) | ||
534 | return; | ||
535 | |||
536 | /* Found nothing?!?! Either we hang forever, or we panic. */ | ||
537 | if (!p) { | ||
538 | read_unlock(&tasklist_lock); | ||
539 | panic("Out of memory and no killable processes...\n"); | ||
540 | } | ||
541 | |||
542 | if (oom_kill_process(p, gfp_mask, order, points, NULL, | ||
543 | "Out of memory")) | ||
544 | goto retry; | ||
545 | } | ||
546 | } | ||
547 | |||
548 | /* | ||
549 | * pagefault handler calls into here because it is out of memory but | ||
550 | * doesn't know exactly how or why. | ||
551 | */ | ||
552 | void pagefault_out_of_memory(void) | ||
553 | { | ||
554 | unsigned long freed = 0; | ||
555 | |||
556 | blocking_notifier_call_chain(&oom_notify_list, 0, &freed); | ||
557 | if (freed > 0) | ||
558 | /* Got some memory back in the last second. */ | ||
559 | return; | ||
560 | |||
561 | /* | ||
562 | * If this is from memcg, oom-killer is already invoked. | ||
563 | * and not worth to go system-wide-oom. | ||
564 | */ | ||
565 | if (mem_cgroup_oom_called(current)) | ||
566 | goto rest_and_return; | ||
567 | |||
568 | if (sysctl_panic_on_oom) | ||
569 | panic("out of memory from page fault. panic_on_oom is selected.\n"); | ||
570 | |||
571 | read_lock(&tasklist_lock); | ||
572 | __out_of_memory(0, 0); /* unknown gfp_mask and order */ | ||
573 | read_unlock(&tasklist_lock); | ||
574 | |||
575 | /* | ||
576 | * Give "p" a good chance of killing itself before we | ||
577 | * retry to allocate memory. | ||
578 | */ | ||
579 | rest_and_return: | ||
580 | if (!test_thread_flag(TIF_MEMDIE)) | ||
581 | schedule_timeout_uninterruptible(1); | ||
510 | } | 582 | } |
511 | 583 | ||
512 | /** | 584 | /** |
@@ -522,8 +594,6 @@ void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_mask) | |||
522 | */ | 594 | */ |
523 | void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order) | 595 | void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order) |
524 | { | 596 | { |
525 | struct task_struct *p; | ||
526 | unsigned long points = 0; | ||
527 | unsigned long freed = 0; | 597 | unsigned long freed = 0; |
528 | enum oom_constraint constraint; | 598 | enum oom_constraint constraint; |
529 | 599 | ||
@@ -544,7 +614,7 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order) | |||
544 | 614 | ||
545 | switch (constraint) { | 615 | switch (constraint) { |
546 | case CONSTRAINT_MEMORY_POLICY: | 616 | case CONSTRAINT_MEMORY_POLICY: |
547 | oom_kill_process(current, gfp_mask, order, points, NULL, | 617 | oom_kill_process(current, gfp_mask, order, 0, NULL, |
548 | "No available memory (MPOL_BIND)"); | 618 | "No available memory (MPOL_BIND)"); |
549 | break; | 619 | break; |
550 | 620 | ||
@@ -553,35 +623,10 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order) | |||
553 | panic("out of memory. panic_on_oom is selected\n"); | 623 | panic("out of memory. panic_on_oom is selected\n"); |
554 | /* Fall-through */ | 624 | /* Fall-through */ |
555 | case CONSTRAINT_CPUSET: | 625 | case CONSTRAINT_CPUSET: |
556 | if (sysctl_oom_kill_allocating_task) { | 626 | __out_of_memory(gfp_mask, order); |
557 | oom_kill_process(current, gfp_mask, order, points, NULL, | ||
558 | "Out of memory (oom_kill_allocating_task)"); | ||
559 | break; | ||
560 | } | ||
561 | retry: | ||
562 | /* | ||
563 | * Rambo mode: Shoot down a process and hope it solves whatever | ||
564 | * issues we may have. | ||
565 | */ | ||
566 | p = select_bad_process(&points, NULL); | ||
567 | |||
568 | if (PTR_ERR(p) == -1UL) | ||
569 | goto out; | ||
570 | |||
571 | /* Found nothing?!?! Either we hang forever, or we panic. */ | ||
572 | if (!p) { | ||
573 | read_unlock(&tasklist_lock); | ||
574 | panic("Out of memory and no killable processes...\n"); | ||
575 | } | ||
576 | |||
577 | if (oom_kill_process(p, gfp_mask, order, points, NULL, | ||
578 | "Out of memory")) | ||
579 | goto retry; | ||
580 | |||
581 | break; | 627 | break; |
582 | } | 628 | } |
583 | 629 | ||
584 | out: | ||
585 | read_unlock(&tasklist_lock); | 630 | read_unlock(&tasklist_lock); |
586 | 631 | ||
587 | /* | 632 | /* |