diff options
| -rw-r--r-- | mm/slub.c | 16 |
1 files changed, 0 insertions, 16 deletions
| @@ -2104,24 +2104,8 @@ init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s) | |||
| 2104 | 2104 | ||
| 2105 | static inline int alloc_kmem_cache_cpus(struct kmem_cache *s) | 2105 | static inline int alloc_kmem_cache_cpus(struct kmem_cache *s) |
| 2106 | { | 2106 | { |
| 2107 | #ifdef CONFIG_SMP | ||
| 2108 | /* | ||
| 2109 | * Will use reserve that does not require slab operation during | ||
| 2110 | * early boot. | ||
| 2111 | */ | ||
| 2112 | BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE < | 2107 | BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE < |
| 2113 | SLUB_PAGE_SHIFT * sizeof(struct kmem_cache_cpu)); | 2108 | SLUB_PAGE_SHIFT * sizeof(struct kmem_cache_cpu)); |
| 2114 | #else | ||
| 2115 | /* | ||
| 2116 | * Special hack for UP mode. allocpercpu() falls back to kmalloc | ||
| 2117 | * operations. So we cannot use that before the slab allocator is up | ||
| 2118 | * Simply get the smallest possible compound page. The page will be | ||
| 2119 | * released via kfree() when the cpu caches are resized later. | ||
| 2120 | */ | ||
| 2121 | if (slab_state < UP) | ||
| 2122 | s->cpu_slab = (__percpu void *)kmalloc_large(PAGE_SIZE << 1, GFP_NOWAIT); | ||
| 2123 | else | ||
| 2124 | #endif | ||
| 2125 | 2109 | ||
| 2126 | s->cpu_slab = alloc_percpu(struct kmem_cache_cpu); | 2110 | s->cpu_slab = alloc_percpu(struct kmem_cache_cpu); |
| 2127 | 2111 | ||
