diff options
author | Christoph Lameter <clameter@sgi.com> | 2008-04-14 12:11:41 -0400 |
---|---|---|
committer | Pekka Enberg <penberg@cs.helsinki.fi> | 2008-04-27 11:28:18 -0400 |
commit | 319d1e240683d37924ea8977c91730c3393fd453 (patch) | |
tree | 8c59f466123d9628ee441b5d3b65564ff8b997e3 /mm | |
parent | 65c3376aaca96c66aa76014aaf430398964b68cb (diff) |
slub: Drop fallback to page allocator method
There is now a generic method of falling back to a slab page of minimal
order. No need anymore for the fallback to kmalloc_large().
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/slub.c | 43 |
1 files changed, 2 insertions, 41 deletions
@@ -204,8 +204,6 @@ static inline void ClearSlabDebug(struct page *page) | |||
204 | /* Internal SLUB flags */ | 204 | /* Internal SLUB flags */ |
205 | #define __OBJECT_POISON 0x80000000 /* Poison object */ | 205 | #define __OBJECT_POISON 0x80000000 /* Poison object */ |
206 | #define __SYSFS_ADD_DEFERRED 0x40000000 /* Not yet visible via sysfs */ | 206 | #define __SYSFS_ADD_DEFERRED 0x40000000 /* Not yet visible via sysfs */ |
207 | #define __KMALLOC_CACHE 0x20000000 /* objects freed using kfree */ | ||
208 | #define __PAGE_ALLOC_FALLBACK 0x10000000 /* Allow fallback to page alloc */ | ||
209 | 207 | ||
210 | /* Not all arches define cache_line_size */ | 208 | /* Not all arches define cache_line_size */ |
211 | #ifndef cache_line_size | 209 | #ifndef cache_line_size |
@@ -1623,27 +1621,6 @@ new_slab: | |||
1623 | c->page = new; | 1621 | c->page = new; |
1624 | goto load_freelist; | 1622 | goto load_freelist; |
1625 | } | 1623 | } |
1626 | |||
1627 | /* | ||
1628 | * No memory available. | ||
1629 | * | ||
1630 | * If the slab uses higher order allocs but the object is | ||
1631 | * smaller than a page size then we can fallback in emergencies | ||
1632 | * to the page allocator via kmalloc_large. The page allocator may | ||
1633 | * have failed to obtain a higher order page and we can try to | ||
1634 | * allocate a single page if the object fits into a single page. | ||
1635 | * That is only possible if certain conditions are met that are being | ||
1636 | * checked when a slab is created. | ||
1637 | */ | ||
1638 | if (!(gfpflags & __GFP_NORETRY) && | ||
1639 | (s->flags & __PAGE_ALLOC_FALLBACK)) { | ||
1640 | if (gfpflags & __GFP_WAIT) | ||
1641 | local_irq_enable(); | ||
1642 | object = kmalloc_large(s->objsize, gfpflags); | ||
1643 | if (gfpflags & __GFP_WAIT) | ||
1644 | local_irq_disable(); | ||
1645 | return object; | ||
1646 | } | ||
1647 | return NULL; | 1624 | return NULL; |
1648 | debug: | 1625 | debug: |
1649 | if (!alloc_debug_processing(s, c->page, object, addr)) | 1626 | if (!alloc_debug_processing(s, c->page, object, addr)) |
@@ -2330,20 +2307,7 @@ static int calculate_sizes(struct kmem_cache *s) | |||
2330 | */ | 2307 | */ |
2331 | size = ALIGN(size, align); | 2308 | size = ALIGN(size, align); |
2332 | s->size = size; | 2309 | s->size = size; |
2333 | 2310 | order = calculate_order(size); | |
2334 | if ((flags & __KMALLOC_CACHE) && | ||
2335 | PAGE_SIZE / size < slub_min_objects) { | ||
2336 | /* | ||
2337 | * Kmalloc cache that would not have enough objects in | ||
2338 | * an order 0 page. Kmalloc slabs can fallback to | ||
2339 | * page allocator order 0 allocs so take a reasonably large | ||
2340 | * order that will allows us a good number of objects. | ||
2341 | */ | ||
2342 | order = max(slub_max_order, PAGE_ALLOC_COSTLY_ORDER); | ||
2343 | s->flags |= __PAGE_ALLOC_FALLBACK; | ||
2344 | s->allocflags |= __GFP_NOWARN; | ||
2345 | } else | ||
2346 | order = calculate_order(size); | ||
2347 | 2311 | ||
2348 | if (order < 0) | 2312 | if (order < 0) |
2349 | return 0; | 2313 | return 0; |
@@ -2589,7 +2553,7 @@ static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s, | |||
2589 | 2553 | ||
2590 | down_write(&slub_lock); | 2554 | down_write(&slub_lock); |
2591 | if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN, | 2555 | if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN, |
2592 | flags | __KMALLOC_CACHE, NULL)) | 2556 | flags, NULL)) |
2593 | goto panic; | 2557 | goto panic; |
2594 | 2558 | ||
2595 | list_add(&s->list, &slab_caches); | 2559 | list_add(&s->list, &slab_caches); |
@@ -3105,9 +3069,6 @@ static int slab_unmergeable(struct kmem_cache *s) | |||
3105 | if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE)) | 3069 | if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE)) |
3106 | return 1; | 3070 | return 1; |
3107 | 3071 | ||
3108 | if ((s->flags & __PAGE_ALLOC_FALLBACK)) | ||
3109 | return 1; | ||
3110 | |||
3111 | if (s->ctor) | 3072 | if (s->ctor) |
3112 | return 1; | 3073 | return 1; |
3113 | 3074 | ||