aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c16
1 files changed, 16 insertions, 0 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 3964d3ce4c15..30354bfeb43d 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -178,6 +178,12 @@ static enum {
178 SYSFS /* Sysfs up */ 178 SYSFS /* Sysfs up */
179} slab_state = DOWN; 179} slab_state = DOWN;
180 180
181/*
182 * The slab allocator is initialized with interrupts disabled. Therefore, make
183 * sure early boot allocations don't accidentally enable interrupts.
184 */
185static gfp_t slab_gfp_mask __read_mostly = SLAB_GFP_BOOT_MASK;
186
181/* A list of all slab caches on the system */ 187/* A list of all slab caches on the system */
182static DECLARE_RWSEM(slub_lock); 188static DECLARE_RWSEM(slub_lock);
183static LIST_HEAD(slab_caches); 189static LIST_HEAD(slab_caches);
@@ -1595,6 +1601,8 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
1595 unsigned long flags; 1601 unsigned long flags;
1596 unsigned int objsize; 1602 unsigned int objsize;
1597 1603
1604 gfpflags &= slab_gfp_mask;
1605
1598 lockdep_trace_alloc(gfpflags); 1606 lockdep_trace_alloc(gfpflags);
1599 might_sleep_if(gfpflags & __GFP_WAIT); 1607 might_sleep_if(gfpflags & __GFP_WAIT);
1600 1608
@@ -3104,6 +3112,14 @@ void __init kmem_cache_init(void)
3104 nr_cpu_ids, nr_node_ids); 3112 nr_cpu_ids, nr_node_ids);
3105} 3113}
3106 3114
3115void __init kmem_cache_init_late(void)
3116{
3117 /*
3118 * Interrupts are enabled now so all GFP allocations are safe.
3119 */
3120 slab_gfp_mask = __GFP_BITS_MASK;
3121}
3122
3107/* 3123/*
3108 * Find a mergeable slab cache 3124 * Find a mergeable slab cache
3109 */ 3125 */