summaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorMichal Hocko <mhocko@suse.com>2016-01-14 18:20:36 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2016-01-14 19:00:49 -0500
commit5020e285856cb406224e6f977fd893a006077806 (patch)
treef0c0e5b5f18b95cab526c8a7bf96f2f415f9a3c6 /mm/page_alloc.c
parent86760a2c6e827858f8eaf020b12b72b3210faf79 (diff)
mm, oom: give __GFP_NOFAIL allocations access to memory reserves
__GFP_NOFAIL is a big hammer used to ensure that the allocation request can never fail. This is a strong requirement and as such it also deserves a special treatment when the system is OOM. The primary problem here is that the allocation request might have come with some locks held and the oom victim might be blocked on the same locks. This is basically an OOM deadlock situation. This patch tries to reduce the risk of such a deadlocks by giving __GFP_NOFAIL allocations a special treatment and let them dive into memory reserves after oom killer invocation. This should help them to make a progress and release resources they are holding. The OOM victim should compensate for the reserves consumption. Signed-off-by: Michal Hocko <mhocko@suse.com> Suggested-by: Andrea Arcangeli <aarcange@redhat.com> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: Johannes Weiner <hannes@cmpxchg.org> Acked-by: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c15
1 files changed, 14 insertions, 1 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index d7f5bc895157..ce63d603820f 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2732,8 +2732,21 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
2732 goto out; 2732 goto out;
2733 } 2733 }
2734 /* Exhausted what can be done so it's blamo time */ 2734 /* Exhausted what can be done so it's blamo time */
2735 if (out_of_memory(&oc) || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) 2735 if (out_of_memory(&oc) || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) {
2736 *did_some_progress = 1; 2736 *did_some_progress = 1;
2737
2738 if (gfp_mask & __GFP_NOFAIL) {
2739 page = get_page_from_freelist(gfp_mask, order,
2740 ALLOC_NO_WATERMARKS|ALLOC_CPUSET, ac);
2741 /*
2742 * fallback to ignore cpuset restriction if our nodes
2743 * are depleted
2744 */
2745 if (!page)
2746 page = get_page_from_freelist(gfp_mask, order,
2747 ALLOC_NO_WATERMARKS, ac);
2748 }
2749 }
2737out: 2750out:
2738 mutex_unlock(&oom_lock); 2751 mutex_unlock(&oom_lock);
2739 return page; 2752 return page;