aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2007-10-16 04:24:38 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-16 12:42:53 -0400
commitaadb4bc4a1f9108c1d0fbd121827c936c2ed4217 (patch)
tree879b7c9ba11a65958e4477c563602e08d9e6635f /mm/slub.c
parent57f6b96c09c30e444e0d3fc3080feba037657a7b (diff)
SLUB: direct pass through of page size or higher kmalloc requests
This gets rid of all kmalloc caches larger than page size. A kmalloc request larger than PAGE_SIZE > 2 is going to be passed through to the page allocator. This works both inline where we will call __get_free_pages instead of kmem_cache_alloc and in __kmalloc. kfree is modified to check if the object is in a slab page. If not then the page is freed via the page allocator instead. Roughly similar to what SLOB does. Advantages: - Reduces memory overhead for kmalloc array - Large kmalloc operations are faster since they do not need to pass through the slab allocator to get to the page allocator. - Performance increase of 10%-20% on alloc and 50% on free for PAGE_SIZEd allocations. SLUB must call page allocator for each alloc anyways since the higher order pages which that allowed avoiding the page alloc calls are not available in a reliable way anymore. So we are basically removing useless slab allocator overhead. - Large kmallocs yields page aligned object which is what SLAB did. Bad things like using page sized kmalloc allocations to stand in for page allocate allocs can be transparently handled and are not distinguishable from page allocator uses. - Checking for too large objects can be removed since it is done by the page allocator. Drawbacks: - No accounting for large kmalloc slab allocations anymore - No debugging of large kmalloc slab allocations. Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c63
1 files changed, 38 insertions, 25 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 0eab12bd0ac9..edeb942dc8ae 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2227,11 +2227,11 @@ EXPORT_SYMBOL(kmem_cache_destroy);
2227 * Kmalloc subsystem 2227 * Kmalloc subsystem
2228 *******************************************************************/ 2228 *******************************************************************/
2229 2229
2230struct kmem_cache kmalloc_caches[KMALLOC_SHIFT_HIGH + 1] __cacheline_aligned; 2230struct kmem_cache kmalloc_caches[PAGE_SHIFT] __cacheline_aligned;
2231EXPORT_SYMBOL(kmalloc_caches); 2231EXPORT_SYMBOL(kmalloc_caches);
2232 2232
2233#ifdef CONFIG_ZONE_DMA 2233#ifdef CONFIG_ZONE_DMA
2234static struct kmem_cache *kmalloc_caches_dma[KMALLOC_SHIFT_HIGH + 1]; 2234static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT];
2235#endif 2235#endif
2236 2236
2237static int __init setup_slub_min_order(char *str) 2237static int __init setup_slub_min_order(char *str)
@@ -2397,12 +2397,8 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags)
2397 return ZERO_SIZE_PTR; 2397 return ZERO_SIZE_PTR;
2398 2398
2399 index = size_index[(size - 1) / 8]; 2399 index = size_index[(size - 1) / 8];
2400 } else { 2400 } else
2401 if (size > KMALLOC_MAX_SIZE)
2402 return NULL;
2403
2404 index = fls(size - 1); 2401 index = fls(size - 1);
2405 }
2406 2402
2407#ifdef CONFIG_ZONE_DMA 2403#ifdef CONFIG_ZONE_DMA
2408 if (unlikely((flags & SLUB_DMA))) 2404 if (unlikely((flags & SLUB_DMA)))
@@ -2414,9 +2410,15 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags)
2414 2410
2415void *__kmalloc(size_t size, gfp_t flags) 2411void *__kmalloc(size_t size, gfp_t flags)
2416{ 2412{
2417 struct kmem_cache *s = get_slab(size, flags); 2413 struct kmem_cache *s;
2418 2414
2419 if (ZERO_OR_NULL_PTR(s)) 2415 if (unlikely(size > PAGE_SIZE / 2))
2416 return (void *)__get_free_pages(flags | __GFP_COMP,
2417 get_order(size));
2418
2419 s = get_slab(size, flags);
2420
2421 if (unlikely(ZERO_OR_NULL_PTR(s)))
2420 return s; 2422 return s;
2421 2423
2422 return slab_alloc(s, flags, -1, __builtin_return_address(0)); 2424 return slab_alloc(s, flags, -1, __builtin_return_address(0));
@@ -2426,9 +2428,15 @@ EXPORT_SYMBOL(__kmalloc);
2426#ifdef CONFIG_NUMA 2428#ifdef CONFIG_NUMA
2427void *__kmalloc_node(size_t size, gfp_t flags, int node) 2429void *__kmalloc_node(size_t size, gfp_t flags, int node)
2428{ 2430{
2429 struct kmem_cache *s = get_slab(size, flags); 2431 struct kmem_cache *s;
2430 2432
2431 if (ZERO_OR_NULL_PTR(s)) 2433 if (unlikely(size > PAGE_SIZE / 2))
2434 return (void *)__get_free_pages(flags | __GFP_COMP,
2435 get_order(size));
2436
2437 s = get_slab(size, flags);
2438
2439 if (unlikely(ZERO_OR_NULL_PTR(s)))
2432 return s; 2440 return s;
2433 2441
2434 return slab_alloc(s, flags, node, __builtin_return_address(0)); 2442 return slab_alloc(s, flags, node, __builtin_return_address(0));
@@ -2473,22 +2481,17 @@ EXPORT_SYMBOL(ksize);
2473 2481
2474void kfree(const void *x) 2482void kfree(const void *x)
2475{ 2483{
2476 struct kmem_cache *s;
2477 struct page *page; 2484 struct page *page;
2478 2485
2479 /*
2480 * This has to be an unsigned comparison. According to Linus
2481 * some gcc version treat a pointer as a signed entity. Then
2482 * this comparison would be true for all "negative" pointers
2483 * (which would cover the whole upper half of the address space).
2484 */
2485 if (ZERO_OR_NULL_PTR(x)) 2486 if (ZERO_OR_NULL_PTR(x))
2486 return; 2487 return;
2487 2488
2488 page = virt_to_head_page(x); 2489 page = virt_to_head_page(x);
2489 s = page->slab; 2490 if (unlikely(!PageSlab(page))) {
2490 2491 put_page(page);
2491 slab_free(s, page, (void *)x, __builtin_return_address(0)); 2492 return;
2493 }
2494 slab_free(page->slab, page, (void *)x, __builtin_return_address(0));
2492} 2495}
2493EXPORT_SYMBOL(kfree); 2496EXPORT_SYMBOL(kfree);
2494 2497
@@ -2602,7 +2605,7 @@ void __init kmem_cache_init(void)
2602 caches++; 2605 caches++;
2603 } 2606 }
2604 2607
2605 for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) { 2608 for (i = KMALLOC_SHIFT_LOW; i < PAGE_SHIFT; i++) {
2606 create_kmalloc_cache(&kmalloc_caches[i], 2609 create_kmalloc_cache(&kmalloc_caches[i],
2607 "kmalloc", 1 << i, GFP_KERNEL); 2610 "kmalloc", 1 << i, GFP_KERNEL);
2608 caches++; 2611 caches++;
@@ -2629,7 +2632,7 @@ void __init kmem_cache_init(void)
2629 slab_state = UP; 2632 slab_state = UP;
2630 2633
2631 /* Provide the correct kmalloc names now that the caches are up */ 2634 /* Provide the correct kmalloc names now that the caches are up */
2632 for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) 2635 for (i = KMALLOC_SHIFT_LOW; i < PAGE_SHIFT; i++)
2633 kmalloc_caches[i]. name = 2636 kmalloc_caches[i]. name =
2634 kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i); 2637 kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i);
2635 2638
@@ -2790,7 +2793,12 @@ static struct notifier_block __cpuinitdata slab_notifier =
2790 2793
2791void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller) 2794void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller)
2792{ 2795{
2793 struct kmem_cache *s = get_slab(size, gfpflags); 2796 struct kmem_cache *s;
2797
2798 if (unlikely(size > PAGE_SIZE / 2))
2799 return (void *)__get_free_pages(gfpflags | __GFP_COMP,
2800 get_order(size));
2801 s = get_slab(size, gfpflags);
2794 2802
2795 if (ZERO_OR_NULL_PTR(s)) 2803 if (ZERO_OR_NULL_PTR(s))
2796 return s; 2804 return s;
@@ -2801,7 +2809,12 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller)
2801void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, 2809void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
2802 int node, void *caller) 2810 int node, void *caller)
2803{ 2811{
2804 struct kmem_cache *s = get_slab(size, gfpflags); 2812 struct kmem_cache *s;
2813
2814 if (unlikely(size > PAGE_SIZE / 2))
2815 return (void *)__get_free_pages(gfpflags | __GFP_COMP,
2816 get_order(size));
2817 s = get_slab(size, gfpflags);
2805 2818
2806 if (ZERO_OR_NULL_PTR(s)) 2819 if (ZERO_OR_NULL_PTR(s))
2807 return s; 2820 return s;