aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c18
1 files changed, 18 insertions, 0 deletions
diff --git a/mm/slab.c b/mm/slab.c
index cd76964b53bc..453efcb1c980 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -304,6 +304,12 @@ struct kmem_list3 {
304}; 304};
305 305
306/* 306/*
307 * The slab allocator is initialized with interrupts disabled. Therefore, make
308 * sure early boot allocations don't accidentally enable interrupts.
309 */
310static gfp_t slab_gfp_mask __read_mostly = SLAB_GFP_BOOT_MASK;
311
312/*
307 * Need this for bootstrapping a per node allocator. 313 * Need this for bootstrapping a per node allocator.
308 */ 314 */
309#define NUM_INIT_LISTS (3 * MAX_NUMNODES) 315#define NUM_INIT_LISTS (3 * MAX_NUMNODES)
@@ -1654,6 +1660,14 @@ void __init kmem_cache_init(void)
1654 */ 1660 */
1655} 1661}
1656 1662
1663void __init kmem_cache_init_late(void)
1664{
1665 /*
1666 * Interrupts are enabled now so all GFP allocations are safe.
1667 */
1668 slab_gfp_mask = __GFP_BITS_MASK;
1669}
1670
1657static int __init cpucache_init(void) 1671static int __init cpucache_init(void)
1658{ 1672{
1659 int cpu; 1673 int cpu;
@@ -3354,6 +3368,8 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
3354 unsigned long save_flags; 3368 unsigned long save_flags;
3355 void *ptr; 3369 void *ptr;
3356 3370
3371 flags &= slab_gfp_mask;
3372
3357 lockdep_trace_alloc(flags); 3373 lockdep_trace_alloc(flags);
3358 3374
3359 if (slab_should_failslab(cachep, flags)) 3375 if (slab_should_failslab(cachep, flags))
@@ -3434,6 +3450,8 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
3434 unsigned long save_flags; 3450 unsigned long save_flags;
3435 void *objp; 3451 void *objp;
3436 3452
3453 flags &= slab_gfp_mask;
3454
3437 lockdep_trace_alloc(flags); 3455 lockdep_trace_alloc(flags);
3438 3456
3439 if (slab_should_failslab(cachep, flags)) 3457 if (slab_should_failslab(cachep, flags))