diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/slub.c | 63 |
1 files changed, 38 insertions, 25 deletions
@@ -2227,11 +2227,11 @@ EXPORT_SYMBOL(kmem_cache_destroy); | |||
2227 | * Kmalloc subsystem | 2227 | * Kmalloc subsystem |
2228 | *******************************************************************/ | 2228 | *******************************************************************/ |
2229 | 2229 | ||
2230 | struct kmem_cache kmalloc_caches[KMALLOC_SHIFT_HIGH + 1] __cacheline_aligned; | 2230 | struct kmem_cache kmalloc_caches[PAGE_SHIFT] __cacheline_aligned; |
2231 | EXPORT_SYMBOL(kmalloc_caches); | 2231 | EXPORT_SYMBOL(kmalloc_caches); |
2232 | 2232 | ||
2233 | #ifdef CONFIG_ZONE_DMA | 2233 | #ifdef CONFIG_ZONE_DMA |
2234 | static struct kmem_cache *kmalloc_caches_dma[KMALLOC_SHIFT_HIGH + 1]; | 2234 | static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT]; |
2235 | #endif | 2235 | #endif |
2236 | 2236 | ||
2237 | static int __init setup_slub_min_order(char *str) | 2237 | static int __init setup_slub_min_order(char *str) |
@@ -2397,12 +2397,8 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags) | |||
2397 | return ZERO_SIZE_PTR; | 2397 | return ZERO_SIZE_PTR; |
2398 | 2398 | ||
2399 | index = size_index[(size - 1) / 8]; | 2399 | index = size_index[(size - 1) / 8]; |
2400 | } else { | 2400 | } else |
2401 | if (size > KMALLOC_MAX_SIZE) | ||
2402 | return NULL; | ||
2403 | |||
2404 | index = fls(size - 1); | 2401 | index = fls(size - 1); |
2405 | } | ||
2406 | 2402 | ||
2407 | #ifdef CONFIG_ZONE_DMA | 2403 | #ifdef CONFIG_ZONE_DMA |
2408 | if (unlikely((flags & SLUB_DMA))) | 2404 | if (unlikely((flags & SLUB_DMA))) |
@@ -2414,9 +2410,15 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags) | |||
2414 | 2410 | ||
2415 | void *__kmalloc(size_t size, gfp_t flags) | 2411 | void *__kmalloc(size_t size, gfp_t flags) |
2416 | { | 2412 | { |
2417 | struct kmem_cache *s = get_slab(size, flags); | 2413 | struct kmem_cache *s; |
2418 | 2414 | ||
2419 | if (ZERO_OR_NULL_PTR(s)) | 2415 | if (unlikely(size > PAGE_SIZE / 2)) |
2416 | return (void *)__get_free_pages(flags | __GFP_COMP, | ||
2417 | get_order(size)); | ||
2418 | |||
2419 | s = get_slab(size, flags); | ||
2420 | |||
2421 | if (unlikely(ZERO_OR_NULL_PTR(s))) | ||
2420 | return s; | 2422 | return s; |
2421 | 2423 | ||
2422 | return slab_alloc(s, flags, -1, __builtin_return_address(0)); | 2424 | return slab_alloc(s, flags, -1, __builtin_return_address(0)); |
@@ -2426,9 +2428,15 @@ EXPORT_SYMBOL(__kmalloc); | |||
2426 | #ifdef CONFIG_NUMA | 2428 | #ifdef CONFIG_NUMA |
2427 | void *__kmalloc_node(size_t size, gfp_t flags, int node) | 2429 | void *__kmalloc_node(size_t size, gfp_t flags, int node) |
2428 | { | 2430 | { |
2429 | struct kmem_cache *s = get_slab(size, flags); | 2431 | struct kmem_cache *s; |
2430 | 2432 | ||
2431 | if (ZERO_OR_NULL_PTR(s)) | 2433 | if (unlikely(size > PAGE_SIZE / 2)) |
2434 | return (void *)__get_free_pages(flags | __GFP_COMP, | ||
2435 | get_order(size)); | ||
2436 | |||
2437 | s = get_slab(size, flags); | ||
2438 | |||
2439 | if (unlikely(ZERO_OR_NULL_PTR(s))) | ||
2432 | return s; | 2440 | return s; |
2433 | 2441 | ||
2434 | return slab_alloc(s, flags, node, __builtin_return_address(0)); | 2442 | return slab_alloc(s, flags, node, __builtin_return_address(0)); |
@@ -2473,22 +2481,17 @@ EXPORT_SYMBOL(ksize); | |||
2473 | 2481 | ||
2474 | void kfree(const void *x) | 2482 | void kfree(const void *x) |
2475 | { | 2483 | { |
2476 | struct kmem_cache *s; | ||
2477 | struct page *page; | 2484 | struct page *page; |
2478 | 2485 | ||
2479 | /* | ||
2480 | * This has to be an unsigned comparison. According to Linus | ||
2481 | * some gcc version treat a pointer as a signed entity. Then | ||
2482 | * this comparison would be true for all "negative" pointers | ||
2483 | * (which would cover the whole upper half of the address space). | ||
2484 | */ | ||
2485 | if (ZERO_OR_NULL_PTR(x)) | 2486 | if (ZERO_OR_NULL_PTR(x)) |
2486 | return; | 2487 | return; |
2487 | 2488 | ||
2488 | page = virt_to_head_page(x); | 2489 | page = virt_to_head_page(x); |
2489 | s = page->slab; | 2490 | if (unlikely(!PageSlab(page))) { |
2490 | 2491 | put_page(page); | |
2491 | slab_free(s, page, (void *)x, __builtin_return_address(0)); | 2492 | return; |
2493 | } | ||
2494 | slab_free(page->slab, page, (void *)x, __builtin_return_address(0)); | ||
2492 | } | 2495 | } |
2493 | EXPORT_SYMBOL(kfree); | 2496 | EXPORT_SYMBOL(kfree); |
2494 | 2497 | ||
@@ -2602,7 +2605,7 @@ void __init kmem_cache_init(void) | |||
2602 | caches++; | 2605 | caches++; |
2603 | } | 2606 | } |
2604 | 2607 | ||
2605 | for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) { | 2608 | for (i = KMALLOC_SHIFT_LOW; i < PAGE_SHIFT; i++) { |
2606 | create_kmalloc_cache(&kmalloc_caches[i], | 2609 | create_kmalloc_cache(&kmalloc_caches[i], |
2607 | "kmalloc", 1 << i, GFP_KERNEL); | 2610 | "kmalloc", 1 << i, GFP_KERNEL); |
2608 | caches++; | 2611 | caches++; |
@@ -2629,7 +2632,7 @@ void __init kmem_cache_init(void) | |||
2629 | slab_state = UP; | 2632 | slab_state = UP; |
2630 | 2633 | ||
2631 | /* Provide the correct kmalloc names now that the caches are up */ | 2634 | /* Provide the correct kmalloc names now that the caches are up */ |
2632 | for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) | 2635 | for (i = KMALLOC_SHIFT_LOW; i < PAGE_SHIFT; i++) |
2633 | kmalloc_caches[i]. name = | 2636 | kmalloc_caches[i]. name = |
2634 | kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i); | 2637 | kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i); |
2635 | 2638 | ||
@@ -2790,7 +2793,12 @@ static struct notifier_block __cpuinitdata slab_notifier = | |||
2790 | 2793 | ||
2791 | void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller) | 2794 | void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller) |
2792 | { | 2795 | { |
2793 | struct kmem_cache *s = get_slab(size, gfpflags); | 2796 | struct kmem_cache *s; |
2797 | |||
2798 | if (unlikely(size > PAGE_SIZE / 2)) | ||
2799 | return (void *)__get_free_pages(gfpflags | __GFP_COMP, | ||
2800 | get_order(size)); | ||
2801 | s = get_slab(size, gfpflags); | ||
2794 | 2802 | ||
2795 | if (ZERO_OR_NULL_PTR(s)) | 2803 | if (ZERO_OR_NULL_PTR(s)) |
2796 | return s; | 2804 | return s; |
@@ -2801,7 +2809,12 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller) | |||
2801 | void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, | 2809 | void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, |
2802 | int node, void *caller) | 2810 | int node, void *caller) |
2803 | { | 2811 | { |
2804 | struct kmem_cache *s = get_slab(size, gfpflags); | 2812 | struct kmem_cache *s; |
2813 | |||
2814 | if (unlikely(size > PAGE_SIZE / 2)) | ||
2815 | return (void *)__get_free_pages(gfpflags | __GFP_COMP, | ||
2816 | get_order(size)); | ||
2817 | s = get_slab(size, gfpflags); | ||
2805 | 2818 | ||
2806 | if (ZERO_OR_NULL_PTR(s)) | 2819 | if (ZERO_OR_NULL_PTR(s)) |
2807 | return s; | 2820 | return s; |