aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/slub.c17
1 files changed, 4 insertions, 13 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 38c73a3364c6..e8c117595367 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2062,23 +2062,14 @@ init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s)
2062#endif 2062#endif
2063} 2063}
2064 2064
2065static DEFINE_PER_CPU(struct kmem_cache_cpu, kmalloc_percpu[KMALLOC_CACHES]);
2066
2067static inline int alloc_kmem_cache_cpus(struct kmem_cache *s) 2065static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
2068{ 2066{
2069 if (s < kmalloc_caches + KMALLOC_CACHES && s >= kmalloc_caches) 2067 BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE <
2070 /* 2068 SLUB_PAGE_SHIFT * sizeof(struct kmem_cache_cpu));
2071 * Boot time creation of the kmalloc array. Use static per cpu data
2072 * since the per cpu allocator is not available yet.
2073 */
2074 s->cpu_slab = kmalloc_percpu + (s - kmalloc_caches);
2075 else
2076 s->cpu_slab = alloc_percpu(struct kmem_cache_cpu);
2077 2069
2078 if (!s->cpu_slab) 2070 s->cpu_slab = alloc_percpu(struct kmem_cache_cpu);
2079 return 0;
2080 2071
2081 return 1; 2072 return s->cpu_slab != NULL;
2082} 2073}
2083 2074
2084#ifdef CONFIG_NUMA 2075#ifdef CONFIG_NUMA