diff options
-rw-r--r-- | mm/page_alloc.c | 4 | ||||
-rw-r--r-- | mm/slub.c | 10 |
2 files changed, 10 insertions, 4 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index aecc9cdfdfce..5d714f8fb303 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -1153,10 +1153,10 @@ again: | |||
1153 | * properly detect and handle allocation failures. | 1153 | * properly detect and handle allocation failures. |
1154 | * | 1154 | * |
1155 | * We most definitely don't want callers attempting to | 1155 | * We most definitely don't want callers attempting to |
1156 | * allocate greater than single-page units with | 1156 | * allocate greater than order-1 page units with |
1157 | * __GFP_NOFAIL. | 1157 | * __GFP_NOFAIL. |
1158 | */ | 1158 | */ |
1159 | WARN_ON_ONCE(order > 0); | 1159 | WARN_ON_ONCE(order > 1); |
1160 | } | 1160 | } |
1161 | spin_lock_irqsave(&zone->lock, flags); | 1161 | spin_lock_irqsave(&zone->lock, flags); |
1162 | page = __rmqueue(zone, order, migratetype); | 1162 | page = __rmqueue(zone, order, migratetype); |
@@ -1085,11 +1085,17 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) | |||
1085 | { | 1085 | { |
1086 | struct page *page; | 1086 | struct page *page; |
1087 | struct kmem_cache_order_objects oo = s->oo; | 1087 | struct kmem_cache_order_objects oo = s->oo; |
1088 | gfp_t alloc_gfp; | ||
1088 | 1089 | ||
1089 | flags |= s->allocflags; | 1090 | flags |= s->allocflags; |
1090 | 1091 | ||
1091 | page = alloc_slab_page(flags | __GFP_NOWARN | __GFP_NORETRY, node, | 1092 | /* |
1092 | oo); | 1093 | * Let the initial higher-order allocation fail under memory pressure |
1094 | * so we fall-back to the minimum order allocation. | ||
1095 | */ | ||
1096 | alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL; | ||
1097 | |||
1098 | page = alloc_slab_page(alloc_gfp, node, oo); | ||
1093 | if (unlikely(!page)) { | 1099 | if (unlikely(!page)) { |
1094 | oo = s->min; | 1100 | oo = s->min; |
1095 | /* | 1101 | /* |