diff options
-rw-r--r-- | include/linux/slub_def.h | 57 | ||||
-rw-r--r-- | mm/slub.c | 63 |
2 files changed, 62 insertions, 58 deletions
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 74962077f632..3b361b2906bd 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h | |||
@@ -72,7 +72,7 @@ struct kmem_cache { | |||
72 | * We keep the general caches in an array of slab caches that are used for | 72 | * We keep the general caches in an array of slab caches that are used for |
73 | * 2^x bytes of allocations. | 73 | * 2^x bytes of allocations. |
74 | */ | 74 | */ |
75 | extern struct kmem_cache kmalloc_caches[KMALLOC_SHIFT_HIGH + 1]; | 75 | extern struct kmem_cache kmalloc_caches[PAGE_SHIFT]; |
76 | 76 | ||
77 | /* | 77 | /* |
78 | * Sorry that the following has to be that ugly but some versions of GCC | 78 | * Sorry that the following has to be that ugly but some versions of GCC |
@@ -83,9 +83,6 @@ static __always_inline int kmalloc_index(size_t size) | |||
83 | if (!size) | 83 | if (!size) |
84 | return 0; | 84 | return 0; |
85 | 85 | ||
86 | if (size > KMALLOC_MAX_SIZE) | ||
87 | return -1; | ||
88 | |||
89 | if (size <= KMALLOC_MIN_SIZE) | 86 | if (size <= KMALLOC_MIN_SIZE) |
90 | return KMALLOC_SHIFT_LOW; | 87 | return KMALLOC_SHIFT_LOW; |
91 | 88 | ||
@@ -102,6 +99,10 @@ static __always_inline int kmalloc_index(size_t size) | |||
102 | if (size <= 512) return 9; | 99 | if (size <= 512) return 9; |
103 | if (size <= 1024) return 10; | 100 | if (size <= 1024) return 10; |
104 | if (size <= 2 * 1024) return 11; | 101 | if (size <= 2 * 1024) return 11; |
102 | /* | ||
103 | * The following is only needed to support architectures with a larger page | ||
104 | * size than 4k. | ||
105 | */ | ||
105 | if (size <= 4 * 1024) return 12; | 106 | if (size <= 4 * 1024) return 12; |
106 | if (size <= 8 * 1024) return 13; | 107 | if (size <= 8 * 1024) return 13; |
107 | if (size <= 16 * 1024) return 14; | 108 | if (size <= 16 * 1024) return 14; |
@@ -109,13 +110,9 @@ static __always_inline int kmalloc_index(size_t size) | |||
109 | if (size <= 64 * 1024) return 16; | 110 | if (size <= 64 * 1024) return 16; |
110 | if (size <= 128 * 1024) return 17; | 111 | if (size <= 128 * 1024) return 17; |
111 | if (size <= 256 * 1024) return 18; | 112 | if (size <= 256 * 1024) return 18; |
112 | if (size <= 512 * 1024) return 19; | 113 | if (size <= 512 * 1024) return 19; |
113 | if (size <= 1024 * 1024) return 20; | 114 | if (size <= 1024 * 1024) return 20; |
114 | if (size <= 2 * 1024 * 1024) return 21; | 115 | if (size <= 2 * 1024 * 1024) return 21; |
115 | if (size <= 4 * 1024 * 1024) return 22; | ||
116 | if (size <= 8 * 1024 * 1024) return 23; | ||
117 | if (size <= 16 * 1024 * 1024) return 24; | ||
118 | if (size <= 32 * 1024 * 1024) return 25; | ||
119 | return -1; | 116 | return -1; |
120 | 117 | ||
121 | /* | 118 | /* |
@@ -140,19 +137,6 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size) | |||
140 | if (index == 0) | 137 | if (index == 0) |
141 | return NULL; | 138 | return NULL; |
142 | 139 | ||
143 | /* | ||
144 | * This function only gets expanded if __builtin_constant_p(size), so | ||
145 | * testing it here shouldn't be needed. But some versions of gcc need | ||
146 | * help. | ||
147 | */ | ||
148 | if (__builtin_constant_p(size) && index < 0) { | ||
149 | /* | ||
150 | * Generate a link failure. Would be great if we could | ||
151 | * do something to stop the compile here. | ||
152 | */ | ||
153 | extern void __kmalloc_size_too_large(void); | ||
154 | __kmalloc_size_too_large(); | ||
155 | } | ||
156 | return &kmalloc_caches[index]; | 140 | return &kmalloc_caches[index]; |
157 | } | 141 | } |
158 | 142 | ||
@@ -168,15 +152,21 @@ void *__kmalloc(size_t size, gfp_t flags); | |||
168 | 152 | ||
169 | static __always_inline void *kmalloc(size_t size, gfp_t flags) | 153 | static __always_inline void *kmalloc(size_t size, gfp_t flags) |
170 | { | 154 | { |
171 | if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) { | 155 | if (__builtin_constant_p(size)) { |
172 | struct kmem_cache *s = kmalloc_slab(size); | 156 | if (size > PAGE_SIZE / 2) |
157 | return (void *)__get_free_pages(flags | __GFP_COMP, | ||
158 | get_order(size)); | ||
173 | 159 | ||
174 | if (!s) | 160 | if (!(flags & SLUB_DMA)) { |
175 | return ZERO_SIZE_PTR; | 161 | struct kmem_cache *s = kmalloc_slab(size); |
162 | |||
163 | if (!s) | ||
164 | return ZERO_SIZE_PTR; | ||
176 | 165 | ||
177 | return kmem_cache_alloc(s, flags); | 166 | return kmem_cache_alloc(s, flags); |
178 | } else | 167 | } |
179 | return __kmalloc(size, flags); | 168 | } |
169 | return __kmalloc(size, flags); | ||
180 | } | 170 | } |
181 | 171 | ||
182 | #ifdef CONFIG_NUMA | 172 | #ifdef CONFIG_NUMA |
@@ -185,15 +175,16 @@ void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); | |||
185 | 175 | ||
186 | static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) | 176 | static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) |
187 | { | 177 | { |
188 | if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) { | 178 | if (__builtin_constant_p(size) && |
189 | struct kmem_cache *s = kmalloc_slab(size); | 179 | size <= PAGE_SIZE / 2 && !(flags & SLUB_DMA)) { |
180 | struct kmem_cache *s = kmalloc_slab(size); | ||
190 | 181 | ||
191 | if (!s) | 182 | if (!s) |
192 | return ZERO_SIZE_PTR; | 183 | return ZERO_SIZE_PTR; |
193 | 184 | ||
194 | return kmem_cache_alloc_node(s, flags, node); | 185 | return kmem_cache_alloc_node(s, flags, node); |
195 | } else | 186 | } |
196 | return __kmalloc_node(size, flags, node); | 187 | return __kmalloc_node(size, flags, node); |
197 | } | 188 | } |
198 | #endif | 189 | #endif |
199 | 190 | ||
@@ -2227,11 +2227,11 @@ EXPORT_SYMBOL(kmem_cache_destroy); | |||
2227 | * Kmalloc subsystem | 2227 | * Kmalloc subsystem |
2228 | *******************************************************************/ | 2228 | *******************************************************************/ |
2229 | 2229 | ||
2230 | struct kmem_cache kmalloc_caches[KMALLOC_SHIFT_HIGH + 1] __cacheline_aligned; | 2230 | struct kmem_cache kmalloc_caches[PAGE_SHIFT] __cacheline_aligned; |
2231 | EXPORT_SYMBOL(kmalloc_caches); | 2231 | EXPORT_SYMBOL(kmalloc_caches); |
2232 | 2232 | ||
2233 | #ifdef CONFIG_ZONE_DMA | 2233 | #ifdef CONFIG_ZONE_DMA |
2234 | static struct kmem_cache *kmalloc_caches_dma[KMALLOC_SHIFT_HIGH + 1]; | 2234 | static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT]; |
2235 | #endif | 2235 | #endif |
2236 | 2236 | ||
2237 | static int __init setup_slub_min_order(char *str) | 2237 | static int __init setup_slub_min_order(char *str) |
@@ -2397,12 +2397,8 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags) | |||
2397 | return ZERO_SIZE_PTR; | 2397 | return ZERO_SIZE_PTR; |
2398 | 2398 | ||
2399 | index = size_index[(size - 1) / 8]; | 2399 | index = size_index[(size - 1) / 8]; |
2400 | } else { | 2400 | } else |
2401 | if (size > KMALLOC_MAX_SIZE) | ||
2402 | return NULL; | ||
2403 | |||
2404 | index = fls(size - 1); | 2401 | index = fls(size - 1); |
2405 | } | ||
2406 | 2402 | ||
2407 | #ifdef CONFIG_ZONE_DMA | 2403 | #ifdef CONFIG_ZONE_DMA |
2408 | if (unlikely((flags & SLUB_DMA))) | 2404 | if (unlikely((flags & SLUB_DMA))) |
@@ -2414,9 +2410,15 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags) | |||
2414 | 2410 | ||
2415 | void *__kmalloc(size_t size, gfp_t flags) | 2411 | void *__kmalloc(size_t size, gfp_t flags) |
2416 | { | 2412 | { |
2417 | struct kmem_cache *s = get_slab(size, flags); | 2413 | struct kmem_cache *s; |
2418 | 2414 | ||
2419 | if (ZERO_OR_NULL_PTR(s)) | 2415 | if (unlikely(size > PAGE_SIZE / 2)) |
2416 | return (void *)__get_free_pages(flags | __GFP_COMP, | ||
2417 | get_order(size)); | ||
2418 | |||
2419 | s = get_slab(size, flags); | ||
2420 | |||
2421 | if (unlikely(ZERO_OR_NULL_PTR(s))) | ||
2420 | return s; | 2422 | return s; |
2421 | 2423 | ||
2422 | return slab_alloc(s, flags, -1, __builtin_return_address(0)); | 2424 | return slab_alloc(s, flags, -1, __builtin_return_address(0)); |
@@ -2426,9 +2428,15 @@ EXPORT_SYMBOL(__kmalloc); | |||
2426 | #ifdef CONFIG_NUMA | 2428 | #ifdef CONFIG_NUMA |
2427 | void *__kmalloc_node(size_t size, gfp_t flags, int node) | 2429 | void *__kmalloc_node(size_t size, gfp_t flags, int node) |
2428 | { | 2430 | { |
2429 | struct kmem_cache *s = get_slab(size, flags); | 2431 | struct kmem_cache *s; |
2430 | 2432 | ||
2431 | if (ZERO_OR_NULL_PTR(s)) | 2433 | if (unlikely(size > PAGE_SIZE / 2)) |
2434 | return (void *)__get_free_pages(flags | __GFP_COMP, | ||
2435 | get_order(size)); | ||
2436 | |||
2437 | s = get_slab(size, flags); | ||
2438 | |||
2439 | if (unlikely(ZERO_OR_NULL_PTR(s))) | ||
2432 | return s; | 2440 | return s; |
2433 | 2441 | ||
2434 | return slab_alloc(s, flags, node, __builtin_return_address(0)); | 2442 | return slab_alloc(s, flags, node, __builtin_return_address(0)); |
@@ -2473,22 +2481,17 @@ EXPORT_SYMBOL(ksize); | |||
2473 | 2481 | ||
2474 | void kfree(const void *x) | 2482 | void kfree(const void *x) |
2475 | { | 2483 | { |
2476 | struct kmem_cache *s; | ||
2477 | struct page *page; | 2484 | struct page *page; |
2478 | 2485 | ||
2479 | /* | ||
2480 | * This has to be an unsigned comparison. According to Linus | ||
2481 | * some gcc version treat a pointer as a signed entity. Then | ||
2482 | * this comparison would be true for all "negative" pointers | ||
2483 | * (which would cover the whole upper half of the address space). | ||
2484 | */ | ||
2485 | if (ZERO_OR_NULL_PTR(x)) | 2486 | if (ZERO_OR_NULL_PTR(x)) |
2486 | return; | 2487 | return; |
2487 | 2488 | ||
2488 | page = virt_to_head_page(x); | 2489 | page = virt_to_head_page(x); |
2489 | s = page->slab; | 2490 | if (unlikely(!PageSlab(page))) { |
2490 | 2491 | put_page(page); | |
2491 | slab_free(s, page, (void *)x, __builtin_return_address(0)); | 2492 | return; |
2493 | } | ||
2494 | slab_free(page->slab, page, (void *)x, __builtin_return_address(0)); | ||
2492 | } | 2495 | } |
2493 | EXPORT_SYMBOL(kfree); | 2496 | EXPORT_SYMBOL(kfree); |
2494 | 2497 | ||
@@ -2602,7 +2605,7 @@ void __init kmem_cache_init(void) | |||
2602 | caches++; | 2605 | caches++; |
2603 | } | 2606 | } |
2604 | 2607 | ||
2605 | for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) { | 2608 | for (i = KMALLOC_SHIFT_LOW; i < PAGE_SHIFT; i++) { |
2606 | create_kmalloc_cache(&kmalloc_caches[i], | 2609 | create_kmalloc_cache(&kmalloc_caches[i], |
2607 | "kmalloc", 1 << i, GFP_KERNEL); | 2610 | "kmalloc", 1 << i, GFP_KERNEL); |
2608 | caches++; | 2611 | caches++; |
@@ -2629,7 +2632,7 @@ void __init kmem_cache_init(void) | |||
2629 | slab_state = UP; | 2632 | slab_state = UP; |
2630 | 2633 | ||
2631 | /* Provide the correct kmalloc names now that the caches are up */ | 2634 | /* Provide the correct kmalloc names now that the caches are up */ |
2632 | for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) | 2635 | for (i = KMALLOC_SHIFT_LOW; i < PAGE_SHIFT; i++) |
2633 | kmalloc_caches[i]. name = | 2636 | kmalloc_caches[i]. name = |
2634 | kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i); | 2637 | kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i); |
2635 | 2638 | ||
@@ -2790,7 +2793,12 @@ static struct notifier_block __cpuinitdata slab_notifier = | |||
2790 | 2793 | ||
2791 | void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller) | 2794 | void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller) |
2792 | { | 2795 | { |
2793 | struct kmem_cache *s = get_slab(size, gfpflags); | 2796 | struct kmem_cache *s; |
2797 | |||
2798 | if (unlikely(size > PAGE_SIZE / 2)) | ||
2799 | return (void *)__get_free_pages(gfpflags | __GFP_COMP, | ||
2800 | get_order(size)); | ||
2801 | s = get_slab(size, gfpflags); | ||
2794 | 2802 | ||
2795 | if (ZERO_OR_NULL_PTR(s)) | 2803 | if (ZERO_OR_NULL_PTR(s)) |
2796 | return s; | 2804 | return s; |
@@ -2801,7 +2809,12 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller) | |||
2801 | void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, | 2809 | void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, |
2802 | int node, void *caller) | 2810 | int node, void *caller) |
2803 | { | 2811 | { |
2804 | struct kmem_cache *s = get_slab(size, gfpflags); | 2812 | struct kmem_cache *s; |
2813 | |||
2814 | if (unlikely(size > PAGE_SIZE / 2)) | ||
2815 | return (void *)__get_free_pages(gfpflags | __GFP_COMP, | ||
2816 | get_order(size)); | ||
2817 | s = get_slab(size, gfpflags); | ||
2805 | 2818 | ||
2806 | if (ZERO_OR_NULL_PTR(s)) | 2819 | if (ZERO_OR_NULL_PTR(s)) |
2807 | return s; | 2820 | return s; |