diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/page_alloc.c | 4 | ||||
-rw-r--r-- | mm/shmem.c | 5 | ||||
-rw-r--r-- | mm/slub.c | 10 |
3 files changed, 11 insertions, 8 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index aecc9cdfdfce..5d714f8fb303 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -1153,10 +1153,10 @@ again: | |||
1153 | * properly detect and handle allocation failures. | 1153 | * properly detect and handle allocation failures. |
1154 | * | 1154 | * |
1155 | * We most definitely don't want callers attempting to | 1155 | * We most definitely don't want callers attempting to |
1156 | * allocate greater than single-page units with | 1156 | * allocate greater than order-1 page units with |
1157 | * __GFP_NOFAIL. | 1157 | * __GFP_NOFAIL. |
1158 | */ | 1158 | */ |
1159 | WARN_ON_ONCE(order > 0); | 1159 | WARN_ON_ONCE(order > 1); |
1160 | } | 1160 | } |
1161 | spin_lock_irqsave(&zone->lock, flags); | 1161 | spin_lock_irqsave(&zone->lock, flags); |
1162 | page = __rmqueue(zone, order, migratetype); | 1162 | page = __rmqueue(zone, order, migratetype); |
diff --git a/mm/shmem.c b/mm/shmem.c index 5f2019fc7895..d713239ce2ce 100644 --- a/mm/shmem.c +++ b/mm/shmem.c | |||
@@ -1558,6 +1558,7 @@ static struct inode *shmem_get_inode(struct super_block *sb, int mode, | |||
1558 | spin_lock_init(&info->lock); | 1558 | spin_lock_init(&info->lock); |
1559 | info->flags = flags & VM_NORESERVE; | 1559 | info->flags = flags & VM_NORESERVE; |
1560 | INIT_LIST_HEAD(&info->swaplist); | 1560 | INIT_LIST_HEAD(&info->swaplist); |
1561 | cache_no_acl(inode); | ||
1561 | 1562 | ||
1562 | switch (mode & S_IFMT) { | 1563 | switch (mode & S_IFMT) { |
1563 | default: | 1564 | default: |
@@ -2379,10 +2380,6 @@ static struct inode *shmem_alloc_inode(struct super_block *sb) | |||
2379 | p = (struct shmem_inode_info *)kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL); | 2380 | p = (struct shmem_inode_info *)kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL); |
2380 | if (!p) | 2381 | if (!p) |
2381 | return NULL; | 2382 | return NULL; |
2382 | #ifdef CONFIG_TMPFS_POSIX_ACL | ||
2383 | p->vfs_inode.i_acl = NULL; | ||
2384 | p->vfs_inode.i_default_acl = NULL; | ||
2385 | #endif | ||
2386 | return &p->vfs_inode; | 2383 | return &p->vfs_inode; |
2387 | } | 2384 | } |
2388 | 2385 | ||
@@ -1085,11 +1085,17 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) | |||
1085 | { | 1085 | { |
1086 | struct page *page; | 1086 | struct page *page; |
1087 | struct kmem_cache_order_objects oo = s->oo; | 1087 | struct kmem_cache_order_objects oo = s->oo; |
1088 | gfp_t alloc_gfp; | ||
1088 | 1089 | ||
1089 | flags |= s->allocflags; | 1090 | flags |= s->allocflags; |
1090 | 1091 | ||
1091 | page = alloc_slab_page(flags | __GFP_NOWARN | __GFP_NORETRY, node, | 1092 | /* |
1092 | oo); | 1093 | * Let the initial higher-order allocation fail under memory pressure |
1094 | * so we fall-back to the minimum order allocation. | ||
1095 | */ | ||
1096 | alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL; | ||
1097 | |||
1098 | page = alloc_slab_page(alloc_gfp, node, oo); | ||
1093 | if (unlikely(!page)) { | 1099 | if (unlikely(!page)) { |
1094 | oo = s->min; | 1100 | oo = s->min; |
1095 | /* | 1101 | /* |