aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2009-06-17 23:24:12 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-06-18 16:12:57 -0400
commitdcce284a259373f9e5570f2e33f79eca84fcf565 (patch)
treeafc4b23208974f17c080ea3d2ecfbaca4254c010 /mm/slab.c
parent9729a6eb5878a3daa18395f2b5fb38bf9359a761 (diff)
mm: Extend gfp masking to the page allocator
The page allocator also needs the masking of gfp flags during boot, so this moves it out of slab/slub and uses it with the page allocator as well. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Acked-by: Pekka Enberg <penberg@cs.helsinki.fi> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c15
1 files changed, 2 insertions, 13 deletions
diff --git a/mm/slab.c b/mm/slab.c
index d08692303f6e..e74a16e4ced6 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -305,12 +305,6 @@ struct kmem_list3 {
305}; 305};
306 306
307/* 307/*
308 * The slab allocator is initialized with interrupts disabled. Therefore, make
309 * sure early boot allocations don't accidentally enable interrupts.
310 */
311static gfp_t slab_gfp_mask __read_mostly = SLAB_GFP_BOOT_MASK;
312
313/*
314 * Need this for bootstrapping a per node allocator. 308 * Need this for bootstrapping a per node allocator.
315 */ 309 */
316#define NUM_INIT_LISTS (3 * MAX_NUMNODES) 310#define NUM_INIT_LISTS (3 * MAX_NUMNODES)
@@ -1559,11 +1553,6 @@ void __init kmem_cache_init_late(void)
1559{ 1553{
1560 struct kmem_cache *cachep; 1554 struct kmem_cache *cachep;
1561 1555
1562 /*
1563 * Interrupts are enabled now so all GFP allocations are safe.
1564 */
1565 slab_gfp_mask = __GFP_BITS_MASK;
1566
1567 /* 6) resize the head arrays to their final sizes */ 1556 /* 6) resize the head arrays to their final sizes */
1568 mutex_lock(&cache_chain_mutex); 1557 mutex_lock(&cache_chain_mutex);
1569 list_for_each_entry(cachep, &cache_chain, next) 1558 list_for_each_entry(cachep, &cache_chain, next)
@@ -3307,7 +3296,7 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
3307 unsigned long save_flags; 3296 unsigned long save_flags;
3308 void *ptr; 3297 void *ptr;
3309 3298
3310 flags &= slab_gfp_mask; 3299 flags &= gfp_allowed_mask;
3311 3300
3312 lockdep_trace_alloc(flags); 3301 lockdep_trace_alloc(flags);
3313 3302
@@ -3392,7 +3381,7 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
3392 unsigned long save_flags; 3381 unsigned long save_flags;
3393 void *objp; 3382 void *objp;
3394 3383
3395 flags &= slab_gfp_mask; 3384 flags &= gfp_allowed_mask;
3396 3385
3397 lockdep_trace_alloc(flags); 3386 lockdep_trace_alloc(flags);
3398 3387