diff options
-rw-r--r-- | mm/slub.c | 16 |
1 files changed, 16 insertions, 0 deletions
@@ -2103,8 +2103,24 @@ init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s) | |||
2103 | 2103 | ||
2104 | static inline int alloc_kmem_cache_cpus(struct kmem_cache *s) | 2104 | static inline int alloc_kmem_cache_cpus(struct kmem_cache *s) |
2105 | { | 2105 | { |
2106 | #ifdef CONFIG_SMP | ||
2107 | /* | ||
2108 | * Will use reserve that does not require slab operation during | ||
2109 | * early boot. | ||
2110 | */ | ||
2106 | BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE < | 2111 | BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE < |
2107 | SLUB_PAGE_SHIFT * sizeof(struct kmem_cache_cpu)); | 2112 | SLUB_PAGE_SHIFT * sizeof(struct kmem_cache_cpu)); |
2113 | #else | ||
2114 | /* | ||
2115 | * Special hack for UP mode. allocpercpu() falls back to kmalloc | ||
2116 | * operations. So we cannot use that before the slab allocator is up | ||
2117 | * Simply get the smallest possible compound page. The page will be | ||
2118 | * released via kfree() when the cpu caches are resized later. | ||
2119 | */ | ||
2120 | if (slab_state < UP) | ||
2121 | s->cpu_slab = (__percpu void *)kmalloc_large(PAGE_SIZE << 1, GFP_NOWAIT); | ||
2122 | else | ||
2123 | #endif | ||
2108 | 2124 | ||
2109 | s->cpu_slab = alloc_percpu(struct kmem_cache_cpu); | 2125 | s->cpu_slab = alloc_percpu(struct kmem_cache_cpu); |
2110 | 2126 | ||