aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2010-08-26 10:41:19 -0400
committerPekka Enberg <penberg@kernel.org>2010-10-02 03:24:29 -0400
commitdb210e70e5f191710a3b1d09f653b44885d397ea (patch)
tree3d1472be3dfd80107090a73bf70710cdb5df21f1 /mm
parenta016471a16b5c4d4ec8f5221575e603a3d11e5e9 (diff)
Slub: UP bandaid
Since the percpu allocator does not provide early allocation in UP mode (only in SMP configurations) use __get_free_page() to improvise a compound page allocation that can be later freed via kfree(). Compound pages will be released when the cpu caches are resized. Acked-by: David Rientjes <rientjes@google.com> Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/slub.c16
1 files changed, 16 insertions, 0 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 4c5a76f505e..05674aac929 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2103,8 +2103,24 @@ init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s)
2103 2103
2104static inline int alloc_kmem_cache_cpus(struct kmem_cache *s) 2104static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
2105{ 2105{
2106#ifdef CONFIG_SMP
2107 /*
2108 * Will use reserve that does not require slab operation during
2109 * early boot.
2110 */
2106 BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE < 2111 BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE <
2107 SLUB_PAGE_SHIFT * sizeof(struct kmem_cache_cpu)); 2112 SLUB_PAGE_SHIFT * sizeof(struct kmem_cache_cpu));
2113#else
2114 /*
2115 * Special hack for UP mode. allocpercpu() falls back to kmalloc
2116 * operations. So we cannot use that before the slab allocator is up
2117 * Simply get the smallest possible compound page. The page will be
2118 * released via kfree() when the cpu caches are resized later.
2119 */
2120 if (slab_state < UP)
2121 s->cpu_slab = (__percpu void *)kmalloc_large(PAGE_SIZE << 1, GFP_NOWAIT);
2122 else
2123#endif
2108 2124
2109 s->cpu_slab = alloc_percpu(struct kmem_cache_cpu); 2125 s->cpu_slab = alloc_percpu(struct kmem_cache_cpu);
2110 2126