aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-05-07 11:42:20 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-05-07 11:42:20 -0400
commit0f47c9423c0fe468d0b5b153f9b9d6e8e20707eb (patch)
tree9eaec7fb4dc5fbfae07d168d0493a0a0a67c7d47 /mm/slub.c
parentb9e306e07ed58fc354bbd58124b281dd7dc697b7 (diff)
parent69df2ac1288b456a95aceadafbf88cd891a577c8 (diff)
Merge branch 'slab/for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux
Pull slab changes from Pekka Enberg: "The bulk of the changes are more slab unification from Christoph. There's also few fixes from Aaron, Glauber, and Joonsoo thrown into the mix." * 'slab/for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux: (24 commits) mm, slab_common: Fix bootstrap creation of kmalloc caches slab: Return NULL for oversized allocations mm: slab: Verify the nodeid passed to ____cache_alloc_node slub: tid must be retrieved from the percpu area of the current processor slub: Do not dereference NULL pointer in node_match slub: add 'likely' macro to inc_slabs_node() slub: correct to calculate num of acquired objects in get_partial_node() slub: correctly bootstrap boot caches mm/sl[au]b: correct allocation type check in kmalloc_slab() slab: Fixup CONFIG_PAGE_ALLOC/DEBUG_SLAB_LEAK sections slab: Handle ARCH_DMA_MINALIGN correctly slab: Common definition for kmem_cache_node slab: Rename list3/l3 to node slab: Common Kmalloc cache determination stat: Use size_t for sizes instead of unsigned slab: Common function to create the kmalloc array slab: Common definition for the array of kmalloc caches slab: Common constants for kmalloc boundaries slab: Rename nodelists to node slab: Common name for the per node structures ...
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c221
1 files changed, 40 insertions, 181 deletions
diff --git a/mm/slub.c b/mm/slub.c
index a0206df88aba..57707f01bcfb 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1006,7 +1006,7 @@ static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
1006 * dilemma by deferring the increment of the count during 1006 * dilemma by deferring the increment of the count during
1007 * bootstrap (see early_kmem_cache_node_alloc). 1007 * bootstrap (see early_kmem_cache_node_alloc).
1008 */ 1008 */
1009 if (n) { 1009 if (likely(n)) {
1010 atomic_long_inc(&n->nr_slabs); 1010 atomic_long_inc(&n->nr_slabs);
1011 atomic_long_add(objects, &n->total_objects); 1011 atomic_long_add(objects, &n->total_objects);
1012 } 1012 }
@@ -1494,7 +1494,7 @@ static inline void remove_partial(struct kmem_cache_node *n,
1494 */ 1494 */
1495static inline void *acquire_slab(struct kmem_cache *s, 1495static inline void *acquire_slab(struct kmem_cache *s,
1496 struct kmem_cache_node *n, struct page *page, 1496 struct kmem_cache_node *n, struct page *page,
1497 int mode) 1497 int mode, int *objects)
1498{ 1498{
1499 void *freelist; 1499 void *freelist;
1500 unsigned long counters; 1500 unsigned long counters;
@@ -1508,6 +1508,7 @@ static inline void *acquire_slab(struct kmem_cache *s,
1508 freelist = page->freelist; 1508 freelist = page->freelist;
1509 counters = page->counters; 1509 counters = page->counters;
1510 new.counters = counters; 1510 new.counters = counters;
1511 *objects = new.objects - new.inuse;
1511 if (mode) { 1512 if (mode) {
1512 new.inuse = page->objects; 1513 new.inuse = page->objects;
1513 new.freelist = NULL; 1514 new.freelist = NULL;
@@ -1529,7 +1530,7 @@ static inline void *acquire_slab(struct kmem_cache *s,
1529 return freelist; 1530 return freelist;
1530} 1531}
1531 1532
1532static int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain); 1533static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain);
1533static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags); 1534static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags);
1534 1535
1535/* 1536/*
@@ -1540,6 +1541,8 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
1540{ 1541{
1541 struct page *page, *page2; 1542 struct page *page, *page2;
1542 void *object = NULL; 1543 void *object = NULL;
1544 int available = 0;
1545 int objects;
1543 1546
1544 /* 1547 /*
1545 * Racy check. If we mistakenly see no partial slabs then we 1548 * Racy check. If we mistakenly see no partial slabs then we
@@ -1553,22 +1556,21 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
1553 spin_lock(&n->list_lock); 1556 spin_lock(&n->list_lock);
1554 list_for_each_entry_safe(page, page2, &n->partial, lru) { 1557 list_for_each_entry_safe(page, page2, &n->partial, lru) {
1555 void *t; 1558 void *t;
1556 int available;
1557 1559
1558 if (!pfmemalloc_match(page, flags)) 1560 if (!pfmemalloc_match(page, flags))
1559 continue; 1561 continue;
1560 1562
1561 t = acquire_slab(s, n, page, object == NULL); 1563 t = acquire_slab(s, n, page, object == NULL, &objects);
1562 if (!t) 1564 if (!t)
1563 break; 1565 break;
1564 1566
1567 available += objects;
1565 if (!object) { 1568 if (!object) {
1566 c->page = page; 1569 c->page = page;
1567 stat(s, ALLOC_FROM_PARTIAL); 1570 stat(s, ALLOC_FROM_PARTIAL);
1568 object = t; 1571 object = t;
1569 available = page->objects - page->inuse;
1570 } else { 1572 } else {
1571 available = put_cpu_partial(s, page, 0); 1573 put_cpu_partial(s, page, 0);
1572 stat(s, CPU_PARTIAL_NODE); 1574 stat(s, CPU_PARTIAL_NODE);
1573 } 1575 }
1574 if (kmem_cache_debug(s) || available > s->cpu_partial / 2) 1576 if (kmem_cache_debug(s) || available > s->cpu_partial / 2)
@@ -1947,7 +1949,7 @@ static void unfreeze_partials(struct kmem_cache *s,
1947 * If we did not find a slot then simply move all the partials to the 1949 * If we did not find a slot then simply move all the partials to the
1948 * per node partial list. 1950 * per node partial list.
1949 */ 1951 */
1950static int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) 1952static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
1951{ 1953{
1952 struct page *oldpage; 1954 struct page *oldpage;
1953 int pages; 1955 int pages;
@@ -1985,7 +1987,6 @@ static int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
1985 page->next = oldpage; 1987 page->next = oldpage;
1986 1988
1987 } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage); 1989 } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage);
1988 return pobjects;
1989} 1990}
1990 1991
1991static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) 1992static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
@@ -2042,7 +2043,7 @@ static void flush_all(struct kmem_cache *s)
2042static inline int node_match(struct page *page, int node) 2043static inline int node_match(struct page *page, int node)
2043{ 2044{
2044#ifdef CONFIG_NUMA 2045#ifdef CONFIG_NUMA
2045 if (node != NUMA_NO_NODE && page_to_nid(page) != node) 2046 if (!page || (node != NUMA_NO_NODE && page_to_nid(page) != node))
2046 return 0; 2047 return 0;
2047#endif 2048#endif
2048 return 1; 2049 return 1;
@@ -2332,13 +2333,18 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s,
2332 2333
2333 s = memcg_kmem_get_cache(s, gfpflags); 2334 s = memcg_kmem_get_cache(s, gfpflags);
2334redo: 2335redo:
2335
2336 /* 2336 /*
2337 * Must read kmem_cache cpu data via this cpu ptr. Preemption is 2337 * Must read kmem_cache cpu data via this cpu ptr. Preemption is
2338 * enabled. We may switch back and forth between cpus while 2338 * enabled. We may switch back and forth between cpus while
2339 * reading from one cpu area. That does not matter as long 2339 * reading from one cpu area. That does not matter as long
2340 * as we end up on the original cpu again when doing the cmpxchg. 2340 * as we end up on the original cpu again when doing the cmpxchg.
2341 *
2342 * Preemption is disabled for the retrieval of the tid because that
2343 * must occur from the current processor. We cannot allow rescheduling
2344 * on a different processor between the determination of the pointer
2345 * and the retrieval of the tid.
2341 */ 2346 */
2347 preempt_disable();
2342 c = __this_cpu_ptr(s->cpu_slab); 2348 c = __this_cpu_ptr(s->cpu_slab);
2343 2349
2344 /* 2350 /*
@@ -2348,7 +2354,7 @@ redo:
2348 * linked list in between. 2354 * linked list in between.
2349 */ 2355 */
2350 tid = c->tid; 2356 tid = c->tid;
2351 barrier(); 2357 preempt_enable();
2352 2358
2353 object = c->freelist; 2359 object = c->freelist;
2354 page = c->page; 2360 page = c->page;
@@ -2595,10 +2601,11 @@ redo:
2595 * data is retrieved via this pointer. If we are on the same cpu 2601 * data is retrieved via this pointer. If we are on the same cpu
2596 * during the cmpxchg then the free will succedd. 2602 * during the cmpxchg then the free will succedd.
2597 */ 2603 */
2604 preempt_disable();
2598 c = __this_cpu_ptr(s->cpu_slab); 2605 c = __this_cpu_ptr(s->cpu_slab);
2599 2606
2600 tid = c->tid; 2607 tid = c->tid;
2601 barrier(); 2608 preempt_enable();
2602 2609
2603 if (likely(page == c->page)) { 2610 if (likely(page == c->page)) {
2604 set_freepointer(s, object, c->freelist); 2611 set_freepointer(s, object, c->freelist);
@@ -2776,7 +2783,7 @@ init_kmem_cache_node(struct kmem_cache_node *n)
2776static inline int alloc_kmem_cache_cpus(struct kmem_cache *s) 2783static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
2777{ 2784{
2778 BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE < 2785 BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE <
2779 SLUB_PAGE_SHIFT * sizeof(struct kmem_cache_cpu)); 2786 KMALLOC_SHIFT_HIGH * sizeof(struct kmem_cache_cpu));
2780 2787
2781 /* 2788 /*
2782 * Must align to double word boundary for the double cmpxchg 2789 * Must align to double word boundary for the double cmpxchg
@@ -2983,7 +2990,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
2983 s->allocflags |= __GFP_COMP; 2990 s->allocflags |= __GFP_COMP;
2984 2991
2985 if (s->flags & SLAB_CACHE_DMA) 2992 if (s->flags & SLAB_CACHE_DMA)
2986 s->allocflags |= SLUB_DMA; 2993 s->allocflags |= GFP_DMA;
2987 2994
2988 if (s->flags & SLAB_RECLAIM_ACCOUNT) 2995 if (s->flags & SLAB_RECLAIM_ACCOUNT)
2989 s->allocflags |= __GFP_RECLAIMABLE; 2996 s->allocflags |= __GFP_RECLAIMABLE;
@@ -3175,13 +3182,6 @@ int __kmem_cache_shutdown(struct kmem_cache *s)
3175 * Kmalloc subsystem 3182 * Kmalloc subsystem
3176 *******************************************************************/ 3183 *******************************************************************/
3177 3184
3178struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT];
3179EXPORT_SYMBOL(kmalloc_caches);
3180
3181#ifdef CONFIG_ZONE_DMA
3182static struct kmem_cache *kmalloc_dma_caches[SLUB_PAGE_SHIFT];
3183#endif
3184
3185static int __init setup_slub_min_order(char *str) 3185static int __init setup_slub_min_order(char *str)
3186{ 3186{
3187 get_option(&str, &slub_min_order); 3187 get_option(&str, &slub_min_order);
@@ -3218,73 +3218,15 @@ static int __init setup_slub_nomerge(char *str)
3218 3218
3219__setup("slub_nomerge", setup_slub_nomerge); 3219__setup("slub_nomerge", setup_slub_nomerge);
3220 3220
3221/*
3222 * Conversion table for small slabs sizes / 8 to the index in the
3223 * kmalloc array. This is necessary for slabs < 192 since we have non power
3224 * of two cache sizes there. The size of larger slabs can be determined using
3225 * fls.
3226 */
3227static s8 size_index[24] = {
3228 3, /* 8 */
3229 4, /* 16 */
3230 5, /* 24 */
3231 5, /* 32 */
3232 6, /* 40 */
3233 6, /* 48 */
3234 6, /* 56 */
3235 6, /* 64 */
3236 1, /* 72 */
3237 1, /* 80 */
3238 1, /* 88 */
3239 1, /* 96 */
3240 7, /* 104 */
3241 7, /* 112 */
3242 7, /* 120 */
3243 7, /* 128 */
3244 2, /* 136 */
3245 2, /* 144 */
3246 2, /* 152 */
3247 2, /* 160 */
3248 2, /* 168 */
3249 2, /* 176 */
3250 2, /* 184 */
3251 2 /* 192 */
3252};
3253
3254static inline int size_index_elem(size_t bytes)
3255{
3256 return (bytes - 1) / 8;
3257}
3258
3259static struct kmem_cache *get_slab(size_t size, gfp_t flags)
3260{
3261 int index;
3262
3263 if (size <= 192) {
3264 if (!size)
3265 return ZERO_SIZE_PTR;
3266
3267 index = size_index[size_index_elem(size)];
3268 } else
3269 index = fls(size - 1);
3270
3271#ifdef CONFIG_ZONE_DMA
3272 if (unlikely((flags & SLUB_DMA)))
3273 return kmalloc_dma_caches[index];
3274
3275#endif
3276 return kmalloc_caches[index];
3277}
3278
3279void *__kmalloc(size_t size, gfp_t flags) 3221void *__kmalloc(size_t size, gfp_t flags)
3280{ 3222{
3281 struct kmem_cache *s; 3223 struct kmem_cache *s;
3282 void *ret; 3224 void *ret;
3283 3225
3284 if (unlikely(size > SLUB_MAX_SIZE)) 3226 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
3285 return kmalloc_large(size, flags); 3227 return kmalloc_large(size, flags);
3286 3228
3287 s = get_slab(size, flags); 3229 s = kmalloc_slab(size, flags);
3288 3230
3289 if (unlikely(ZERO_OR_NULL_PTR(s))) 3231 if (unlikely(ZERO_OR_NULL_PTR(s)))
3290 return s; 3232 return s;
@@ -3317,7 +3259,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
3317 struct kmem_cache *s; 3259 struct kmem_cache *s;
3318 void *ret; 3260 void *ret;
3319 3261
3320 if (unlikely(size > SLUB_MAX_SIZE)) { 3262 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
3321 ret = kmalloc_large_node(size, flags, node); 3263 ret = kmalloc_large_node(size, flags, node);
3322 3264
3323 trace_kmalloc_node(_RET_IP_, ret, 3265 trace_kmalloc_node(_RET_IP_, ret,
@@ -3327,7 +3269,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
3327 return ret; 3269 return ret;
3328 } 3270 }
3329 3271
3330 s = get_slab(size, flags); 3272 s = kmalloc_slab(size, flags);
3331 3273
3332 if (unlikely(ZERO_OR_NULL_PTR(s))) 3274 if (unlikely(ZERO_OR_NULL_PTR(s)))
3333 return s; 3275 return s;
@@ -3620,6 +3562,12 @@ static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache)
3620 3562
3621 memcpy(s, static_cache, kmem_cache->object_size); 3563 memcpy(s, static_cache, kmem_cache->object_size);
3622 3564
3565 /*
3566 * This runs very early, and only the boot processor is supposed to be
3567 * up. Even if it weren't true, IRQs are not up so we couldn't fire
3568 * IPIs around.
3569 */
3570 __flush_cpu_slab(s, smp_processor_id());
3623 for_each_node_state(node, N_NORMAL_MEMORY) { 3571 for_each_node_state(node, N_NORMAL_MEMORY) {
3624 struct kmem_cache_node *n = get_node(s, node); 3572 struct kmem_cache_node *n = get_node(s, node);
3625 struct page *p; 3573 struct page *p;
@@ -3642,8 +3590,6 @@ void __init kmem_cache_init(void)
3642{ 3590{
3643 static __initdata struct kmem_cache boot_kmem_cache, 3591 static __initdata struct kmem_cache boot_kmem_cache,
3644 boot_kmem_cache_node; 3592 boot_kmem_cache_node;
3645 int i;
3646 int caches = 2;
3647 3593
3648 if (debug_guardpage_minorder()) 3594 if (debug_guardpage_minorder())
3649 slub_max_order = 0; 3595 slub_max_order = 0;
@@ -3674,103 +3620,16 @@ void __init kmem_cache_init(void)
3674 kmem_cache_node = bootstrap(&boot_kmem_cache_node); 3620 kmem_cache_node = bootstrap(&boot_kmem_cache_node);
3675 3621
3676 /* Now we can use the kmem_cache to allocate kmalloc slabs */ 3622 /* Now we can use the kmem_cache to allocate kmalloc slabs */
3677 3623 create_kmalloc_caches(0);
3678 /*
3679 * Patch up the size_index table if we have strange large alignment
3680 * requirements for the kmalloc array. This is only the case for
3681 * MIPS it seems. The standard arches will not generate any code here.
3682 *
3683 * Largest permitted alignment is 256 bytes due to the way we
3684 * handle the index determination for the smaller caches.
3685 *
3686 * Make sure that nothing crazy happens if someone starts tinkering
3687 * around with ARCH_KMALLOC_MINALIGN
3688 */
3689 BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
3690 (KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1)));
3691
3692 for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) {
3693 int elem = size_index_elem(i);
3694 if (elem >= ARRAY_SIZE(size_index))
3695 break;
3696 size_index[elem] = KMALLOC_SHIFT_LOW;
3697 }
3698
3699 if (KMALLOC_MIN_SIZE == 64) {
3700 /*
3701 * The 96 byte size cache is not used if the alignment
3702 * is 64 byte.
3703 */
3704 for (i = 64 + 8; i <= 96; i += 8)
3705 size_index[size_index_elem(i)] = 7;
3706 } else if (KMALLOC_MIN_SIZE == 128) {
3707 /*
3708 * The 192 byte sized cache is not used if the alignment
3709 * is 128 byte. Redirect kmalloc to use the 256 byte cache
3710 * instead.
3711 */
3712 for (i = 128 + 8; i <= 192; i += 8)
3713 size_index[size_index_elem(i)] = 8;
3714 }
3715
3716 /* Caches that are not of the two-to-the-power-of size */
3717 if (KMALLOC_MIN_SIZE <= 32) {
3718 kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
3719 caches++;
3720 }
3721
3722 if (KMALLOC_MIN_SIZE <= 64) {
3723 kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
3724 caches++;
3725 }
3726
3727 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
3728 kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
3729 caches++;
3730 }
3731
3732 slab_state = UP;
3733
3734 /* Provide the correct kmalloc names now that the caches are up */
3735 if (KMALLOC_MIN_SIZE <= 32) {
3736 kmalloc_caches[1]->name = kstrdup(kmalloc_caches[1]->name, GFP_NOWAIT);
3737 BUG_ON(!kmalloc_caches[1]->name);
3738 }
3739
3740 if (KMALLOC_MIN_SIZE <= 64) {
3741 kmalloc_caches[2]->name = kstrdup(kmalloc_caches[2]->name, GFP_NOWAIT);
3742 BUG_ON(!kmalloc_caches[2]->name);
3743 }
3744
3745 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
3746 char *s = kasprintf(GFP_NOWAIT, "kmalloc-%d", 1 << i);
3747
3748 BUG_ON(!s);
3749 kmalloc_caches[i]->name = s;
3750 }
3751 3624
3752#ifdef CONFIG_SMP 3625#ifdef CONFIG_SMP
3753 register_cpu_notifier(&slab_notifier); 3626 register_cpu_notifier(&slab_notifier);
3754#endif 3627#endif
3755 3628
3756#ifdef CONFIG_ZONE_DMA
3757 for (i = 0; i < SLUB_PAGE_SHIFT; i++) {
3758 struct kmem_cache *s = kmalloc_caches[i];
3759
3760 if (s && s->size) {
3761 char *name = kasprintf(GFP_NOWAIT,
3762 "dma-kmalloc-%d", s->object_size);
3763
3764 BUG_ON(!name);
3765 kmalloc_dma_caches[i] = create_kmalloc_cache(name,
3766 s->object_size, SLAB_CACHE_DMA);
3767 }
3768 }
3769#endif
3770 printk(KERN_INFO 3629 printk(KERN_INFO
3771 "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d," 3630 "SLUB: HWalign=%d, Order=%d-%d, MinObjects=%d,"
3772 " CPUs=%d, Nodes=%d\n", 3631 " CPUs=%d, Nodes=%d\n",
3773 caches, cache_line_size(), 3632 cache_line_size(),
3774 slub_min_order, slub_max_order, slub_min_objects, 3633 slub_min_order, slub_max_order, slub_min_objects,
3775 nr_cpu_ids, nr_node_ids); 3634 nr_cpu_ids, nr_node_ids);
3776} 3635}
@@ -3933,10 +3792,10 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
3933 struct kmem_cache *s; 3792 struct kmem_cache *s;
3934 void *ret; 3793 void *ret;
3935 3794
3936 if (unlikely(size > SLUB_MAX_SIZE)) 3795 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
3937 return kmalloc_large(size, gfpflags); 3796 return kmalloc_large(size, gfpflags);
3938 3797
3939 s = get_slab(size, gfpflags); 3798 s = kmalloc_slab(size, gfpflags);
3940 3799
3941 if (unlikely(ZERO_OR_NULL_PTR(s))) 3800 if (unlikely(ZERO_OR_NULL_PTR(s)))
3942 return s; 3801 return s;
@@ -3956,7 +3815,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
3956 struct kmem_cache *s; 3815 struct kmem_cache *s;
3957 void *ret; 3816 void *ret;
3958 3817
3959 if (unlikely(size > SLUB_MAX_SIZE)) { 3818 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
3960 ret = kmalloc_large_node(size, gfpflags, node); 3819 ret = kmalloc_large_node(size, gfpflags, node);
3961 3820
3962 trace_kmalloc_node(caller, ret, 3821 trace_kmalloc_node(caller, ret,
@@ -3966,7 +3825,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
3966 return ret; 3825 return ret;
3967 } 3826 }
3968 3827
3969 s = get_slab(size, gfpflags); 3828 s = kmalloc_slab(size, gfpflags);
3970 3829
3971 if (unlikely(ZERO_OR_NULL_PTR(s))) 3830 if (unlikely(ZERO_OR_NULL_PTR(s)))
3972 return s; 3831 return s;
@@ -4315,7 +4174,7 @@ static void resiliency_test(void)
4315{ 4174{
4316 u8 *p; 4175 u8 *p;
4317 4176
4318 BUILD_BUG_ON(KMALLOC_MIN_SIZE > 16 || SLUB_PAGE_SHIFT < 10); 4177 BUILD_BUG_ON(KMALLOC_MIN_SIZE > 16 || KMALLOC_SHIFT_HIGH < 10);
4319 4178
4320 printk(KERN_ERR "SLUB resiliency testing\n"); 4179 printk(KERN_ERR "SLUB resiliency testing\n");
4321 printk(KERN_ERR "-----------------------\n"); 4180 printk(KERN_ERR "-----------------------\n");