summaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorMichal Hocko <mhocko@suse.com>2017-02-22 18:46:25 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2017-02-22 19:41:30 -0500
commit6c18ba7a18997dadbf7ee912e15677ad2c9993e5 (patch)
tree96d56cead2db0b666c2c1398cc3468cdbdbd7f7e /mm/page_alloc.c
parent06ad276ac18742c6b281698d41b27a290cd42407 (diff)
mm: help __GFP_NOFAIL allocations which do not trigger OOM killer
Now that __GFP_NOFAIL doesn't override decisions to skip the oom killer we are left with requests which require to loop inside the allocator without invoking the oom killer (e.g. GFP_NOFS|__GFP_NOFAIL used by fs code) and so they might, in very unlikely situations, loop for ever - e.g. other parallel request could starve them. This patch tries to limit the likelihood of such a lockup by giving these __GFP_NOFAIL requests a chance to move on by consuming a small part of memory reserves. We are using ALLOC_HARDER which should be enough to prevent from the starvation by regular allocation requests, yet it shouldn't consume enough from the reserves to disrupt high priority requests (ALLOC_HIGH). While we are at it, let's introduce a helper __alloc_pages_cpuset_fallback which enforces the cpusets but allows to fallback to ignore them if the first attempt fails. __GFP_NOFAIL requests can be considered important enough to allow cpuset runaway in order for the system to move on. It is highly unlikely that any of these will be GFP_USER anyway. Link: http://lkml.kernel.org/r/20161220134904.21023-4-mhocko@kernel.org Signed-off-by: Michal Hocko <mhocko@suse.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Hillf Danton <hillf.zj@alibaba-inc.com> Cc: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c46
1 files changed, 36 insertions, 10 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 1e37740837ac..a179607de26f 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3056,6 +3056,26 @@ void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...)
3056} 3056}
3057 3057
3058static inline struct page * 3058static inline struct page *
3059__alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order,
3060 unsigned int alloc_flags,
3061 const struct alloc_context *ac)
3062{
3063 struct page *page;
3064
3065 page = get_page_from_freelist(gfp_mask, order,
3066 alloc_flags|ALLOC_CPUSET, ac);
3067 /*
3068 * fallback to ignore cpuset restriction if our nodes
3069 * are depleted
3070 */
3071 if (!page)
3072 page = get_page_from_freelist(gfp_mask, order,
3073 alloc_flags, ac);
3074
3075 return page;
3076}
3077
3078static inline struct page *
3059__alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, 3079__alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
3060 const struct alloc_context *ac, unsigned long *did_some_progress) 3080 const struct alloc_context *ac, unsigned long *did_some_progress)
3061{ 3081{
@@ -3119,17 +3139,13 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
3119 if (out_of_memory(&oc) || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) { 3139 if (out_of_memory(&oc) || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) {
3120 *did_some_progress = 1; 3140 *did_some_progress = 1;
3121 3141
3122 if (gfp_mask & __GFP_NOFAIL) { 3142 /*
3123 page = get_page_from_freelist(gfp_mask, order, 3143 * Help non-failing allocations by giving them access to memory
3124 ALLOC_NO_WATERMARKS|ALLOC_CPUSET, ac); 3144 * reserves
3125 /* 3145 */
3126 * fallback to ignore cpuset restriction if our nodes 3146 if (gfp_mask & __GFP_NOFAIL)
3127 * are depleted 3147 page = __alloc_pages_cpuset_fallback(gfp_mask, order,
3128 */
3129 if (!page)
3130 page = get_page_from_freelist(gfp_mask, order,
3131 ALLOC_NO_WATERMARKS, ac); 3148 ALLOC_NO_WATERMARKS, ac);
3132 }
3133 } 3149 }
3134out: 3150out:
3135 mutex_unlock(&oom_lock); 3151 mutex_unlock(&oom_lock);
@@ -3785,6 +3801,16 @@ nopage:
3785 */ 3801 */
3786 WARN_ON_ONCE(order > PAGE_ALLOC_COSTLY_ORDER); 3802 WARN_ON_ONCE(order > PAGE_ALLOC_COSTLY_ORDER);
3787 3803
3804 /*
3805 * Help non-failing allocations by giving them access to memory
3806 * reserves but do not use ALLOC_NO_WATERMARKS because this
3807 * could deplete whole memory reserves which would just make
3808 * the situation worse
3809 */
3810 page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_HARDER, ac);
3811 if (page)
3812 goto got_pg;
3813
3788 cond_resched(); 3814 cond_resched();
3789 goto retry; 3815 goto retry;
3790 } 3816 }