aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/gfp.h9
-rw-r--r--init/main.c4
-rw-r--r--mm/page_alloc.c3
-rw-r--r--mm/slab.c15
-rw-r--r--mm/slub.c12
5 files changed, 18 insertions, 25 deletions
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index cfdb35d71bca..7c777a0da17a 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -99,7 +99,7 @@ struct vm_area_struct;
99 __GFP_NORETRY|__GFP_NOMEMALLOC) 99 __GFP_NORETRY|__GFP_NOMEMALLOC)
100 100
101/* Control slab gfp mask during early boot */ 101/* Control slab gfp mask during early boot */
102#define SLAB_GFP_BOOT_MASK __GFP_BITS_MASK & ~(__GFP_WAIT|__GFP_IO|__GFP_FS) 102#define GFP_BOOT_MASK __GFP_BITS_MASK & ~(__GFP_WAIT|__GFP_IO|__GFP_FS)
103 103
104/* Control allocation constraints */ 104/* Control allocation constraints */
105#define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE) 105#define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE)
@@ -348,4 +348,11 @@ static inline void oom_killer_enable(void)
348 oom_killer_disabled = false; 348 oom_killer_disabled = false;
349} 349}
350 350
351extern gfp_t gfp_allowed_mask;
352
353static inline void set_gfp_allowed_mask(gfp_t mask)
354{
355 gfp_allowed_mask = mask;
356}
357
351#endif /* __LINUX_GFP_H */ 358#endif /* __LINUX_GFP_H */
diff --git a/init/main.c b/init/main.c
index 1a65fdd06318..09131ec090c1 100644
--- a/init/main.c
+++ b/init/main.c
@@ -642,6 +642,10 @@ asmlinkage void __init start_kernel(void)
642 "enabled early\n"); 642 "enabled early\n");
643 early_boot_irqs_on(); 643 early_boot_irqs_on();
644 local_irq_enable(); 644 local_irq_enable();
645
646 /* Interrupts are enabled now so all GFP allocations are safe. */
647 set_gfp_allowed_mask(__GFP_BITS_MASK);
648
645 kmem_cache_init_late(); 649 kmem_cache_init_late();
646 650
647 /* 651 /*
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index a5f3c278c573..6f0753fe694c 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -73,6 +73,7 @@ unsigned long totalram_pages __read_mostly;
73unsigned long totalreserve_pages __read_mostly; 73unsigned long totalreserve_pages __read_mostly;
74unsigned long highest_memmap_pfn __read_mostly; 74unsigned long highest_memmap_pfn __read_mostly;
75int percpu_pagelist_fraction; 75int percpu_pagelist_fraction;
76gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
76 77
77#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE 78#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
78int pageblock_order __read_mostly; 79int pageblock_order __read_mostly;
@@ -1863,6 +1864,8 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
1863 struct page *page; 1864 struct page *page;
1864 int migratetype = allocflags_to_migratetype(gfp_mask); 1865 int migratetype = allocflags_to_migratetype(gfp_mask);
1865 1866
1867 gfp_mask &= gfp_allowed_mask;
1868
1866 lockdep_trace_alloc(gfp_mask); 1869 lockdep_trace_alloc(gfp_mask);
1867 1870
1868 might_sleep_if(gfp_mask & __GFP_WAIT); 1871 might_sleep_if(gfp_mask & __GFP_WAIT);
diff --git a/mm/slab.c b/mm/slab.c
index d08692303f6e..e74a16e4ced6 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -305,12 +305,6 @@ struct kmem_list3 {
305}; 305};
306 306
307/* 307/*
308 * The slab allocator is initialized with interrupts disabled. Therefore, make
309 * sure early boot allocations don't accidentally enable interrupts.
310 */
311static gfp_t slab_gfp_mask __read_mostly = SLAB_GFP_BOOT_MASK;
312
313/*
314 * Need this for bootstrapping a per node allocator. 308 * Need this for bootstrapping a per node allocator.
315 */ 309 */
316#define NUM_INIT_LISTS (3 * MAX_NUMNODES) 310#define NUM_INIT_LISTS (3 * MAX_NUMNODES)
@@ -1559,11 +1553,6 @@ void __init kmem_cache_init_late(void)
1559{ 1553{
1560 struct kmem_cache *cachep; 1554 struct kmem_cache *cachep;
1561 1555
1562 /*
1563 * Interrupts are enabled now so all GFP allocations are safe.
1564 */
1565 slab_gfp_mask = __GFP_BITS_MASK;
1566
1567 /* 6) resize the head arrays to their final sizes */ 1556 /* 6) resize the head arrays to their final sizes */
1568 mutex_lock(&cache_chain_mutex); 1557 mutex_lock(&cache_chain_mutex);
1569 list_for_each_entry(cachep, &cache_chain, next) 1558 list_for_each_entry(cachep, &cache_chain, next)
@@ -3307,7 +3296,7 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
3307 unsigned long save_flags; 3296 unsigned long save_flags;
3308 void *ptr; 3297 void *ptr;
3309 3298
3310 flags &= slab_gfp_mask; 3299 flags &= gfp_allowed_mask;
3311 3300
3312 lockdep_trace_alloc(flags); 3301 lockdep_trace_alloc(flags);
3313 3302
@@ -3392,7 +3381,7 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
3392 unsigned long save_flags; 3381 unsigned long save_flags;
3393 void *objp; 3382 void *objp;
3394 3383
3395 flags &= slab_gfp_mask; 3384 flags &= gfp_allowed_mask;
3396 3385
3397 lockdep_trace_alloc(flags); 3386 lockdep_trace_alloc(flags);
3398 3387
diff --git a/mm/slub.c b/mm/slub.c
index 4c6449310a0e..ce62b770e2fc 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -179,12 +179,6 @@ static enum {
179 SYSFS /* Sysfs up */ 179 SYSFS /* Sysfs up */
180} slab_state = DOWN; 180} slab_state = DOWN;
181 181
182/*
183 * The slab allocator is initialized with interrupts disabled. Therefore, make
184 * sure early boot allocations don't accidentally enable interrupts.
185 */
186static gfp_t slab_gfp_mask __read_mostly = SLAB_GFP_BOOT_MASK;
187
188/* A list of all slab caches on the system */ 182/* A list of all slab caches on the system */
189static DECLARE_RWSEM(slub_lock); 183static DECLARE_RWSEM(slub_lock);
190static LIST_HEAD(slab_caches); 184static LIST_HEAD(slab_caches);
@@ -1692,7 +1686,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
1692 unsigned long flags; 1686 unsigned long flags;
1693 unsigned int objsize; 1687 unsigned int objsize;
1694 1688
1695 gfpflags &= slab_gfp_mask; 1689 gfpflags &= gfp_allowed_mask;
1696 1690
1697 lockdep_trace_alloc(gfpflags); 1691 lockdep_trace_alloc(gfpflags);
1698 might_sleep_if(gfpflags & __GFP_WAIT); 1692 might_sleep_if(gfpflags & __GFP_WAIT);
@@ -3220,10 +3214,6 @@ void __init kmem_cache_init(void)
3220 3214
3221void __init kmem_cache_init_late(void) 3215void __init kmem_cache_init_late(void)
3222{ 3216{
3223 /*
3224 * Interrupts are enabled now so all GFP allocations are safe.
3225 */
3226 slab_gfp_mask = __GFP_BITS_MASK;
3227} 3217}
3228 3218
3229/* 3219/*