aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorPekka Enberg <penberg@kernel.org>2010-09-18 13:45:06 -0400
committerPekka Enberg <penberg@kernel.org>2010-10-02 03:28:55 -0400
commited59ecbf8904a40cf0a1ee5d6f100d76d2f44e5f (patch)
tree6f7eb1efc1cec284ce3762702ef2545bbeaafa10 /mm/slub.c
parented6c1115c835d822ec5d6356ae3043de54088f43 (diff)
Revert "Slub: UP bandaid"
This reverts commit 5249d039500f05a5ab379286b1d23ab9b04d3f2c. It's not needed after commit bbddff0545878a8649c091a9dd7c43ce91516734 ("percpu: use percpu allocator on UP too").
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c16
1 files changed, 0 insertions, 16 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 42ce17304275..7e1fe663795a 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2104,24 +2104,8 @@ init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s)
2104 2104
2105static inline int alloc_kmem_cache_cpus(struct kmem_cache *s) 2105static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
2106{ 2106{
2107#ifdef CONFIG_SMP
2108 /*
2109 * Will use reserve that does not require slab operation during
2110 * early boot.
2111 */
2112 BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE < 2107 BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE <
2113 SLUB_PAGE_SHIFT * sizeof(struct kmem_cache_cpu)); 2108 SLUB_PAGE_SHIFT * sizeof(struct kmem_cache_cpu));
2114#else
2115 /*
2116 * Special hack for UP mode. allocpercpu() falls back to kmalloc
2117 * operations. So we cannot use that before the slab allocator is up
2118 * Simply get the smallest possible compound page. The page will be
2119 * released via kfree() when the cpu caches are resized later.
2120 */
2121 if (slab_state < UP)
2122 s->cpu_slab = (__percpu void *)kmalloc_large(PAGE_SIZE << 1, GFP_NOWAIT);
2123 else
2124#endif
2125 2109
2126 s->cpu_slab = alloc_percpu(struct kmem_cache_cpu); 2110 s->cpu_slab = alloc_percpu(struct kmem_cache_cpu);
2127 2111