diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-10-26 20:15:20 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-10-26 20:15:20 -0400 |
| commit | 31453a9764f7e2a72a6e2c502ace586e2663a68c (patch) | |
| tree | 5d4db63de5b4b85d1ffdab4e95a75175a784a10a /mm/oom_kill.c | |
| parent | f9ba5375a8aae4aeea6be15df77e24707a429812 (diff) | |
| parent | 93ed0e2d07b25aff4db1d61bfbcd1e82074c0ad5 (diff) | |
Merge branch 'akpm-incoming-1'
* akpm-incoming-1: (176 commits)
scripts/checkpatch.pl: add check for declaration of pci_device_id
scripts/checkpatch.pl: add warnings for static char that could be static const char
checkpatch: version 0.31
checkpatch: statement/block context analyser should look at sanitised lines
checkpatch: handle EXPORT_SYMBOL for DEVICE_ATTR and similar
checkpatch: clean up structure definition macro handline
checkpatch: update copyright dates
checkpatch: Add additional attribute #defines
checkpatch: check for incorrect permissions
checkpatch: ensure kconfig help checks only apply when we are adding help
checkpatch: simplify and consolidate "missing space after" checks
checkpatch: add check for space after struct, union, and enum
checkpatch: returning errno typically should be negative
checkpatch: handle casts better fixing false categorisation of : as binary
checkpatch: ensure we do not collapse bracketed sections into constants
checkpatch: suggest cleanpatch and cleanfile when appropriate
checkpatch: types may sit on a line on their own
checkpatch: fix regressions in "fix handling of leading spaces"
div64_u64(): improve precision on 32bit platforms
lib/parser: cleanup match_number()
...
Diffstat (limited to 'mm/oom_kill.c')
| -rw-r--r-- | mm/oom_kill.c | 33 |
1 files changed, 29 insertions, 4 deletions
diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 4029583a1024..7dcca55ede7c 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c | |||
| @@ -162,10 +162,11 @@ unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem, | |||
| 162 | return 0; | 162 | return 0; |
| 163 | 163 | ||
| 164 | /* | 164 | /* |
| 165 | * Shortcut check for OOM_SCORE_ADJ_MIN so the entire heuristic doesn't | 165 | * Shortcut check for a thread sharing p->mm that is OOM_SCORE_ADJ_MIN |
| 166 | * need to be executed for something that cannot be killed. | 166 | * so the entire heuristic doesn't need to be executed for something |
| 167 | * that cannot be killed. | ||
| 167 | */ | 168 | */ |
| 168 | if (p->signal->oom_score_adj == OOM_SCORE_ADJ_MIN) { | 169 | if (atomic_read(&p->mm->oom_disable_count)) { |
| 169 | task_unlock(p); | 170 | task_unlock(p); |
| 170 | return 0; | 171 | return 0; |
| 171 | } | 172 | } |
| @@ -403,16 +404,40 @@ static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order, | |||
| 403 | #define K(x) ((x) << (PAGE_SHIFT-10)) | 404 | #define K(x) ((x) << (PAGE_SHIFT-10)) |
| 404 | static int oom_kill_task(struct task_struct *p, struct mem_cgroup *mem) | 405 | static int oom_kill_task(struct task_struct *p, struct mem_cgroup *mem) |
| 405 | { | 406 | { |
| 407 | struct task_struct *q; | ||
| 408 | struct mm_struct *mm; | ||
| 409 | |||
| 406 | p = find_lock_task_mm(p); | 410 | p = find_lock_task_mm(p); |
| 407 | if (!p) | 411 | if (!p) |
| 408 | return 1; | 412 | return 1; |
| 409 | 413 | ||
| 414 | /* mm cannot be safely dereferenced after task_unlock(p) */ | ||
| 415 | mm = p->mm; | ||
| 416 | |||
| 410 | pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB\n", | 417 | pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB\n", |
| 411 | task_pid_nr(p), p->comm, K(p->mm->total_vm), | 418 | task_pid_nr(p), p->comm, K(p->mm->total_vm), |
| 412 | K(get_mm_counter(p->mm, MM_ANONPAGES)), | 419 | K(get_mm_counter(p->mm, MM_ANONPAGES)), |
| 413 | K(get_mm_counter(p->mm, MM_FILEPAGES))); | 420 | K(get_mm_counter(p->mm, MM_FILEPAGES))); |
| 414 | task_unlock(p); | 421 | task_unlock(p); |
| 415 | 422 | ||
| 423 | /* | ||
| 424 | * Kill all processes sharing p->mm in other thread groups, if any. | ||
| 425 | * They don't get access to memory reserves or a higher scheduler | ||
| 426 | * priority, though, to avoid depletion of all memory or task | ||
| 427 | * starvation. This prevents mm->mmap_sem livelock when an oom killed | ||
| 428 | * task cannot exit because it requires the semaphore and its contended | ||
| 429 | * by another thread trying to allocate memory itself. That thread will | ||
| 430 | * now get access to memory reserves since it has a pending fatal | ||
| 431 | * signal. | ||
| 432 | */ | ||
| 433 | for_each_process(q) | ||
| 434 | if (q->mm == mm && !same_thread_group(q, p)) { | ||
| 435 | task_lock(q); /* Protect ->comm from prctl() */ | ||
| 436 | pr_err("Kill process %d (%s) sharing same memory\n", | ||
| 437 | task_pid_nr(q), q->comm); | ||
| 438 | task_unlock(q); | ||
| 439 | force_sig(SIGKILL, q); | ||
| 440 | } | ||
| 416 | 441 | ||
| 417 | set_tsk_thread_flag(p, TIF_MEMDIE); | 442 | set_tsk_thread_flag(p, TIF_MEMDIE); |
| 418 | force_sig(SIGKILL, p); | 443 | force_sig(SIGKILL, p); |
| @@ -680,7 +705,7 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, | |||
| 680 | read_lock(&tasklist_lock); | 705 | read_lock(&tasklist_lock); |
| 681 | if (sysctl_oom_kill_allocating_task && | 706 | if (sysctl_oom_kill_allocating_task && |
| 682 | !oom_unkillable_task(current, NULL, nodemask) && | 707 | !oom_unkillable_task(current, NULL, nodemask) && |
| 683 | (current->signal->oom_adj != OOM_DISABLE)) { | 708 | current->mm && !atomic_read(¤t->mm->oom_disable_count)) { |
| 684 | /* | 709 | /* |
| 685 | * oom_kill_process() needs tasklist_lock held. If it returns | 710 | * oom_kill_process() needs tasklist_lock held. If it returns |
| 686 | * non-zero, current could not be killed so we must fallback to | 711 | * non-zero, current could not be killed so we must fallback to |
