aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2008-02-14 17:28:01 -0500
committerChristoph Lameter <christoph@stapp.engr.sgi.com>2008-02-14 18:30:01 -0500
commit71c7a06ff0a2ba0434ace4d7aa679537c4211d9d (patch)
tree99f5a2a5e27eee88f9917d207e2849aac3ba7e62 /mm/slub.c
parentb7a49f0d4c34166ae84089d9f145cfaae1b0eec5 (diff)
slub: Fallback to kmalloc_large for failing higher order allocs
Slub already has two ways of allocating an object. One is via its own logic and the other is via the call to kmalloc_large to hand off object allocation to the page allocator. kmalloc_large is typically used for objects >= PAGE_SIZE. We can use that handoff to avoid failing if a higher order kmalloc slab allocation cannot be satisfied by the page allocator. If we reach the out of memory path then simply try a kmalloc_large(). kfree() can already handle the case of an object that was allocated via the page allocator and so this will work just fine (apart from object accounting...). For any kmalloc slab that already requires higher order allocs (which makes it impossible to use the page allocator fastpath!) we just use PAGE_ALLOC_COSTLY_ORDER to get the largest number of objects in one go from the page allocator slowpath. On a 4k platform this patch will lead to the following use of higher order pages for the following kmalloc slabs: 8 ... 1024 order 0 2048 .. 4096 order 3 (4k slab only after the next patch) We may waste some space if fallback occurs on a 2k slab but we are always able to fallback to an order 0 alloc. Reviewed-by: Pekka Enberg <penberg@cs.helsinki.fi> Signed-off-by: Christoph Lameter <clameter@sgi.com>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c43
1 files changed, 38 insertions, 5 deletions
diff --git a/mm/slub.c b/mm/slub.c
index ccfd41141b6b..644fd0aaeaf1 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -211,6 +211,8 @@ static inline void ClearSlabDebug(struct page *page)
211/* Internal SLUB flags */ 211/* Internal SLUB flags */
212#define __OBJECT_POISON 0x80000000 /* Poison object */ 212#define __OBJECT_POISON 0x80000000 /* Poison object */
213#define __SYSFS_ADD_DEFERRED 0x40000000 /* Not yet visible via sysfs */ 213#define __SYSFS_ADD_DEFERRED 0x40000000 /* Not yet visible via sysfs */
214#define __KMALLOC_CACHE 0x20000000 /* objects freed using kfree */
215#define __PAGE_ALLOC_FALLBACK 0x10000000 /* Allow fallback to page alloc */
214 216
215/* Not all arches define cache_line_size */ 217/* Not all arches define cache_line_size */
216#ifndef cache_line_size 218#ifndef cache_line_size
@@ -1539,7 +1541,6 @@ load_freelist:
1539unlock_out: 1541unlock_out:
1540 slab_unlock(c->page); 1542 slab_unlock(c->page);
1541 stat(c, ALLOC_SLOWPATH); 1543 stat(c, ALLOC_SLOWPATH);
1542out:
1543#ifdef SLUB_FASTPATH 1544#ifdef SLUB_FASTPATH
1544 local_irq_restore(flags); 1545 local_irq_restore(flags);
1545#endif 1546#endif
@@ -1574,8 +1575,24 @@ new_slab:
1574 c->page = new; 1575 c->page = new;
1575 goto load_freelist; 1576 goto load_freelist;
1576 } 1577 }
1577 object = NULL; 1578#ifdef SLUB_FASTPATH
1578 goto out; 1579 local_irq_restore(flags);
1580#endif
1581 /*
1582 * No memory available.
1583 *
1584 * If the slab uses higher order allocs but the object is
1585 * smaller than a page size then we can fallback in emergencies
1586 * to the page allocator via kmalloc_large. The page allocator may
1587 * have failed to obtain a higher order page and we can try to
1588 * allocate a single page if the object fits into a single page.
1589 * That is only possible if certain conditions are met that are being
1590 * checked when a slab is created.
1591 */
1592 if (!(gfpflags & __GFP_NORETRY) && (s->flags & __PAGE_ALLOC_FALLBACK))
1593 return kmalloc_large(s->objsize, gfpflags);
1594
1595 return NULL;
1579debug: 1596debug:
1580 object = c->page->freelist; 1597 object = c->page->freelist;
1581 if (!alloc_debug_processing(s, c->page, object, addr)) 1598 if (!alloc_debug_processing(s, c->page, object, addr))
@@ -2322,7 +2339,20 @@ static int calculate_sizes(struct kmem_cache *s)
2322 size = ALIGN(size, align); 2339 size = ALIGN(size, align);
2323 s->size = size; 2340 s->size = size;
2324 2341
2325 s->order = calculate_order(size); 2342 if ((flags & __KMALLOC_CACHE) &&
2343 PAGE_SIZE / size < slub_min_objects) {
2344 /*
2345 * Kmalloc cache that would not have enough objects in
2346 * an order 0 page. Kmalloc slabs can fallback to
2347 * page allocator order 0 allocs so take a reasonably large
2348 * order that will allows us a good number of objects.
2349 */
2350 s->order = max(slub_max_order, PAGE_ALLOC_COSTLY_ORDER);
2351 s->flags |= __PAGE_ALLOC_FALLBACK;
2352 s->allocflags |= __GFP_NOWARN;
2353 } else
2354 s->order = calculate_order(size);
2355
2326 if (s->order < 0) 2356 if (s->order < 0)
2327 return 0; 2357 return 0;
2328 2358
@@ -2539,7 +2569,7 @@ static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
2539 2569
2540 down_write(&slub_lock); 2570 down_write(&slub_lock);
2541 if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN, 2571 if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN,
2542 flags, NULL)) 2572 flags | __KMALLOC_CACHE, NULL))
2543 goto panic; 2573 goto panic;
2544 2574
2545 list_add(&s->list, &slab_caches); 2575 list_add(&s->list, &slab_caches);
@@ -3058,6 +3088,9 @@ static int slab_unmergeable(struct kmem_cache *s)
3058 if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE)) 3088 if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE))
3059 return 1; 3089 return 1;
3060 3090
3091 if ((s->flags & __PAGE_ALLOC_FALLBACK)
3092 return 1;
3093
3061 if (s->ctor) 3094 if (s->ctor)
3062 return 1; 3095 return 1;
3063 3096