diff options
author | Pekka Enberg <penberg@kernel.org> | 2013-05-07 02:19:47 -0400 |
---|---|---|
committer | Pekka Enberg <penberg@kernel.org> | 2013-05-07 02:19:47 -0400 |
commit | 69df2ac1288b456a95aceadafbf88cd891a577c8 (patch) | |
tree | 0f2e83a8c4bc826f12d3f3871ecc1d7be0c9e4e3 /mm/slub.c | |
parent | c1be5a5b1b355d40e6cf79cc979eb66dafa24ad1 (diff) | |
parent | 8a965b3baa89ffedc73c0fbc750006c631012ced (diff) |
Merge branch 'slab/next' into slab/for-linus
Diffstat (limited to 'mm/slub.c')
-rw-r--r-- | mm/slub.c | 221 |
1 files changed, 40 insertions, 181 deletions
@@ -1005,7 +1005,7 @@ static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects) | |||
1005 | * dilemma by deferring the increment of the count during | 1005 | * dilemma by deferring the increment of the count during |
1006 | * bootstrap (see early_kmem_cache_node_alloc). | 1006 | * bootstrap (see early_kmem_cache_node_alloc). |
1007 | */ | 1007 | */ |
1008 | if (n) { | 1008 | if (likely(n)) { |
1009 | atomic_long_inc(&n->nr_slabs); | 1009 | atomic_long_inc(&n->nr_slabs); |
1010 | atomic_long_add(objects, &n->total_objects); | 1010 | atomic_long_add(objects, &n->total_objects); |
1011 | } | 1011 | } |
@@ -1493,7 +1493,7 @@ static inline void remove_partial(struct kmem_cache_node *n, | |||
1493 | */ | 1493 | */ |
1494 | static inline void *acquire_slab(struct kmem_cache *s, | 1494 | static inline void *acquire_slab(struct kmem_cache *s, |
1495 | struct kmem_cache_node *n, struct page *page, | 1495 | struct kmem_cache_node *n, struct page *page, |
1496 | int mode) | 1496 | int mode, int *objects) |
1497 | { | 1497 | { |
1498 | void *freelist; | 1498 | void *freelist; |
1499 | unsigned long counters; | 1499 | unsigned long counters; |
@@ -1507,6 +1507,7 @@ static inline void *acquire_slab(struct kmem_cache *s, | |||
1507 | freelist = page->freelist; | 1507 | freelist = page->freelist; |
1508 | counters = page->counters; | 1508 | counters = page->counters; |
1509 | new.counters = counters; | 1509 | new.counters = counters; |
1510 | *objects = new.objects - new.inuse; | ||
1510 | if (mode) { | 1511 | if (mode) { |
1511 | new.inuse = page->objects; | 1512 | new.inuse = page->objects; |
1512 | new.freelist = NULL; | 1513 | new.freelist = NULL; |
@@ -1528,7 +1529,7 @@ static inline void *acquire_slab(struct kmem_cache *s, | |||
1528 | return freelist; | 1529 | return freelist; |
1529 | } | 1530 | } |
1530 | 1531 | ||
1531 | static int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain); | 1532 | static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain); |
1532 | static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags); | 1533 | static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags); |
1533 | 1534 | ||
1534 | /* | 1535 | /* |
@@ -1539,6 +1540,8 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n, | |||
1539 | { | 1540 | { |
1540 | struct page *page, *page2; | 1541 | struct page *page, *page2; |
1541 | void *object = NULL; | 1542 | void *object = NULL; |
1543 | int available = 0; | ||
1544 | int objects; | ||
1542 | 1545 | ||
1543 | /* | 1546 | /* |
1544 | * Racy check. If we mistakenly see no partial slabs then we | 1547 | * Racy check. If we mistakenly see no partial slabs then we |
@@ -1552,22 +1555,21 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n, | |||
1552 | spin_lock(&n->list_lock); | 1555 | spin_lock(&n->list_lock); |
1553 | list_for_each_entry_safe(page, page2, &n->partial, lru) { | 1556 | list_for_each_entry_safe(page, page2, &n->partial, lru) { |
1554 | void *t; | 1557 | void *t; |
1555 | int available; | ||
1556 | 1558 | ||
1557 | if (!pfmemalloc_match(page, flags)) | 1559 | if (!pfmemalloc_match(page, flags)) |
1558 | continue; | 1560 | continue; |
1559 | 1561 | ||
1560 | t = acquire_slab(s, n, page, object == NULL); | 1562 | t = acquire_slab(s, n, page, object == NULL, &objects); |
1561 | if (!t) | 1563 | if (!t) |
1562 | break; | 1564 | break; |
1563 | 1565 | ||
1566 | available += objects; | ||
1564 | if (!object) { | 1567 | if (!object) { |
1565 | c->page = page; | 1568 | c->page = page; |
1566 | stat(s, ALLOC_FROM_PARTIAL); | 1569 | stat(s, ALLOC_FROM_PARTIAL); |
1567 | object = t; | 1570 | object = t; |
1568 | available = page->objects - page->inuse; | ||
1569 | } else { | 1571 | } else { |
1570 | available = put_cpu_partial(s, page, 0); | 1572 | put_cpu_partial(s, page, 0); |
1571 | stat(s, CPU_PARTIAL_NODE); | 1573 | stat(s, CPU_PARTIAL_NODE); |
1572 | } | 1574 | } |
1573 | if (kmem_cache_debug(s) || available > s->cpu_partial / 2) | 1575 | if (kmem_cache_debug(s) || available > s->cpu_partial / 2) |
@@ -1946,7 +1948,7 @@ static void unfreeze_partials(struct kmem_cache *s, | |||
1946 | * If we did not find a slot then simply move all the partials to the | 1948 | * If we did not find a slot then simply move all the partials to the |
1947 | * per node partial list. | 1949 | * per node partial list. |
1948 | */ | 1950 | */ |
1949 | static int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) | 1951 | static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) |
1950 | { | 1952 | { |
1951 | struct page *oldpage; | 1953 | struct page *oldpage; |
1952 | int pages; | 1954 | int pages; |
@@ -1984,7 +1986,6 @@ static int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) | |||
1984 | page->next = oldpage; | 1986 | page->next = oldpage; |
1985 | 1987 | ||
1986 | } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage); | 1988 | } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage); |
1987 | return pobjects; | ||
1988 | } | 1989 | } |
1989 | 1990 | ||
1990 | static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) | 1991 | static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) |
@@ -2041,7 +2042,7 @@ static void flush_all(struct kmem_cache *s) | |||
2041 | static inline int node_match(struct page *page, int node) | 2042 | static inline int node_match(struct page *page, int node) |
2042 | { | 2043 | { |
2043 | #ifdef CONFIG_NUMA | 2044 | #ifdef CONFIG_NUMA |
2044 | if (node != NUMA_NO_NODE && page_to_nid(page) != node) | 2045 | if (!page || (node != NUMA_NO_NODE && page_to_nid(page) != node)) |
2045 | return 0; | 2046 | return 0; |
2046 | #endif | 2047 | #endif |
2047 | return 1; | 2048 | return 1; |
@@ -2331,13 +2332,18 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s, | |||
2331 | 2332 | ||
2332 | s = memcg_kmem_get_cache(s, gfpflags); | 2333 | s = memcg_kmem_get_cache(s, gfpflags); |
2333 | redo: | 2334 | redo: |
2334 | |||
2335 | /* | 2335 | /* |
2336 | * Must read kmem_cache cpu data via this cpu ptr. Preemption is | 2336 | * Must read kmem_cache cpu data via this cpu ptr. Preemption is |
2337 | * enabled. We may switch back and forth between cpus while | 2337 | * enabled. We may switch back and forth between cpus while |
2338 | * reading from one cpu area. That does not matter as long | 2338 | * reading from one cpu area. That does not matter as long |
2339 | * as we end up on the original cpu again when doing the cmpxchg. | 2339 | * as we end up on the original cpu again when doing the cmpxchg. |
2340 | * | ||
2341 | * Preemption is disabled for the retrieval of the tid because that | ||
2342 | * must occur from the current processor. We cannot allow rescheduling | ||
2343 | * on a different processor between the determination of the pointer | ||
2344 | * and the retrieval of the tid. | ||
2340 | */ | 2345 | */ |
2346 | preempt_disable(); | ||
2341 | c = __this_cpu_ptr(s->cpu_slab); | 2347 | c = __this_cpu_ptr(s->cpu_slab); |
2342 | 2348 | ||
2343 | /* | 2349 | /* |
@@ -2347,7 +2353,7 @@ redo: | |||
2347 | * linked list in between. | 2353 | * linked list in between. |
2348 | */ | 2354 | */ |
2349 | tid = c->tid; | 2355 | tid = c->tid; |
2350 | barrier(); | 2356 | preempt_enable(); |
2351 | 2357 | ||
2352 | object = c->freelist; | 2358 | object = c->freelist; |
2353 | page = c->page; | 2359 | page = c->page; |
@@ -2594,10 +2600,11 @@ redo: | |||
2594 | * data is retrieved via this pointer. If we are on the same cpu | 2600 | * data is retrieved via this pointer. If we are on the same cpu |
2595 | * during the cmpxchg then the free will succedd. | 2601 | * during the cmpxchg then the free will succedd. |
2596 | */ | 2602 | */ |
2603 | preempt_disable(); | ||
2597 | c = __this_cpu_ptr(s->cpu_slab); | 2604 | c = __this_cpu_ptr(s->cpu_slab); |
2598 | 2605 | ||
2599 | tid = c->tid; | 2606 | tid = c->tid; |
2600 | barrier(); | 2607 | preempt_enable(); |
2601 | 2608 | ||
2602 | if (likely(page == c->page)) { | 2609 | if (likely(page == c->page)) { |
2603 | set_freepointer(s, object, c->freelist); | 2610 | set_freepointer(s, object, c->freelist); |
@@ -2775,7 +2782,7 @@ init_kmem_cache_node(struct kmem_cache_node *n) | |||
2775 | static inline int alloc_kmem_cache_cpus(struct kmem_cache *s) | 2782 | static inline int alloc_kmem_cache_cpus(struct kmem_cache *s) |
2776 | { | 2783 | { |
2777 | BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE < | 2784 | BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE < |
2778 | SLUB_PAGE_SHIFT * sizeof(struct kmem_cache_cpu)); | 2785 | KMALLOC_SHIFT_HIGH * sizeof(struct kmem_cache_cpu)); |
2779 | 2786 | ||
2780 | /* | 2787 | /* |
2781 | * Must align to double word boundary for the double cmpxchg | 2788 | * Must align to double word boundary for the double cmpxchg |
@@ -2982,7 +2989,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order) | |||
2982 | s->allocflags |= __GFP_COMP; | 2989 | s->allocflags |= __GFP_COMP; |
2983 | 2990 | ||
2984 | if (s->flags & SLAB_CACHE_DMA) | 2991 | if (s->flags & SLAB_CACHE_DMA) |
2985 | s->allocflags |= SLUB_DMA; | 2992 | s->allocflags |= GFP_DMA; |
2986 | 2993 | ||
2987 | if (s->flags & SLAB_RECLAIM_ACCOUNT) | 2994 | if (s->flags & SLAB_RECLAIM_ACCOUNT) |
2988 | s->allocflags |= __GFP_RECLAIMABLE; | 2995 | s->allocflags |= __GFP_RECLAIMABLE; |
@@ -3174,13 +3181,6 @@ int __kmem_cache_shutdown(struct kmem_cache *s) | |||
3174 | * Kmalloc subsystem | 3181 | * Kmalloc subsystem |
3175 | *******************************************************************/ | 3182 | *******************************************************************/ |
3176 | 3183 | ||
3177 | struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT]; | ||
3178 | EXPORT_SYMBOL(kmalloc_caches); | ||
3179 | |||
3180 | #ifdef CONFIG_ZONE_DMA | ||
3181 | static struct kmem_cache *kmalloc_dma_caches[SLUB_PAGE_SHIFT]; | ||
3182 | #endif | ||
3183 | |||
3184 | static int __init setup_slub_min_order(char *str) | 3184 | static int __init setup_slub_min_order(char *str) |
3185 | { | 3185 | { |
3186 | get_option(&str, &slub_min_order); | 3186 | get_option(&str, &slub_min_order); |
@@ -3217,73 +3217,15 @@ static int __init setup_slub_nomerge(char *str) | |||
3217 | 3217 | ||
3218 | __setup("slub_nomerge", setup_slub_nomerge); | 3218 | __setup("slub_nomerge", setup_slub_nomerge); |
3219 | 3219 | ||
3220 | /* | ||
3221 | * Conversion table for small slabs sizes / 8 to the index in the | ||
3222 | * kmalloc array. This is necessary for slabs < 192 since we have non power | ||
3223 | * of two cache sizes there. The size of larger slabs can be determined using | ||
3224 | * fls. | ||
3225 | */ | ||
3226 | static s8 size_index[24] = { | ||
3227 | 3, /* 8 */ | ||
3228 | 4, /* 16 */ | ||
3229 | 5, /* 24 */ | ||
3230 | 5, /* 32 */ | ||
3231 | 6, /* 40 */ | ||
3232 | 6, /* 48 */ | ||
3233 | 6, /* 56 */ | ||
3234 | 6, /* 64 */ | ||
3235 | 1, /* 72 */ | ||
3236 | 1, /* 80 */ | ||
3237 | 1, /* 88 */ | ||
3238 | 1, /* 96 */ | ||
3239 | 7, /* 104 */ | ||
3240 | 7, /* 112 */ | ||
3241 | 7, /* 120 */ | ||
3242 | 7, /* 128 */ | ||
3243 | 2, /* 136 */ | ||
3244 | 2, /* 144 */ | ||
3245 | 2, /* 152 */ | ||
3246 | 2, /* 160 */ | ||
3247 | 2, /* 168 */ | ||
3248 | 2, /* 176 */ | ||
3249 | 2, /* 184 */ | ||
3250 | 2 /* 192 */ | ||
3251 | }; | ||
3252 | |||
3253 | static inline int size_index_elem(size_t bytes) | ||
3254 | { | ||
3255 | return (bytes - 1) / 8; | ||
3256 | } | ||
3257 | |||
3258 | static struct kmem_cache *get_slab(size_t size, gfp_t flags) | ||
3259 | { | ||
3260 | int index; | ||
3261 | |||
3262 | if (size <= 192) { | ||
3263 | if (!size) | ||
3264 | return ZERO_SIZE_PTR; | ||
3265 | |||
3266 | index = size_index[size_index_elem(size)]; | ||
3267 | } else | ||
3268 | index = fls(size - 1); | ||
3269 | |||
3270 | #ifdef CONFIG_ZONE_DMA | ||
3271 | if (unlikely((flags & SLUB_DMA))) | ||
3272 | return kmalloc_dma_caches[index]; | ||
3273 | |||
3274 | #endif | ||
3275 | return kmalloc_caches[index]; | ||
3276 | } | ||
3277 | |||
3278 | void *__kmalloc(size_t size, gfp_t flags) | 3220 | void *__kmalloc(size_t size, gfp_t flags) |
3279 | { | 3221 | { |
3280 | struct kmem_cache *s; | 3222 | struct kmem_cache *s; |
3281 | void *ret; | 3223 | void *ret; |
3282 | 3224 | ||
3283 | if (unlikely(size > SLUB_MAX_SIZE)) | 3225 | if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) |
3284 | return kmalloc_large(size, flags); | 3226 | return kmalloc_large(size, flags); |
3285 | 3227 | ||
3286 | s = get_slab(size, flags); | 3228 | s = kmalloc_slab(size, flags); |
3287 | 3229 | ||
3288 | if (unlikely(ZERO_OR_NULL_PTR(s))) | 3230 | if (unlikely(ZERO_OR_NULL_PTR(s))) |
3289 | return s; | 3231 | return s; |
@@ -3316,7 +3258,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) | |||
3316 | struct kmem_cache *s; | 3258 | struct kmem_cache *s; |
3317 | void *ret; | 3259 | void *ret; |
3318 | 3260 | ||
3319 | if (unlikely(size > SLUB_MAX_SIZE)) { | 3261 | if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) { |
3320 | ret = kmalloc_large_node(size, flags, node); | 3262 | ret = kmalloc_large_node(size, flags, node); |
3321 | 3263 | ||
3322 | trace_kmalloc_node(_RET_IP_, ret, | 3264 | trace_kmalloc_node(_RET_IP_, ret, |
@@ -3326,7 +3268,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) | |||
3326 | return ret; | 3268 | return ret; |
3327 | } | 3269 | } |
3328 | 3270 | ||
3329 | s = get_slab(size, flags); | 3271 | s = kmalloc_slab(size, flags); |
3330 | 3272 | ||
3331 | if (unlikely(ZERO_OR_NULL_PTR(s))) | 3273 | if (unlikely(ZERO_OR_NULL_PTR(s))) |
3332 | return s; | 3274 | return s; |
@@ -3617,6 +3559,12 @@ static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache) | |||
3617 | 3559 | ||
3618 | memcpy(s, static_cache, kmem_cache->object_size); | 3560 | memcpy(s, static_cache, kmem_cache->object_size); |
3619 | 3561 | ||
3562 | /* | ||
3563 | * This runs very early, and only the boot processor is supposed to be | ||
3564 | * up. Even if it weren't true, IRQs are not up so we couldn't fire | ||
3565 | * IPIs around. | ||
3566 | */ | ||
3567 | __flush_cpu_slab(s, smp_processor_id()); | ||
3620 | for_each_node_state(node, N_NORMAL_MEMORY) { | 3568 | for_each_node_state(node, N_NORMAL_MEMORY) { |
3621 | struct kmem_cache_node *n = get_node(s, node); | 3569 | struct kmem_cache_node *n = get_node(s, node); |
3622 | struct page *p; | 3570 | struct page *p; |
@@ -3639,8 +3587,6 @@ void __init kmem_cache_init(void) | |||
3639 | { | 3587 | { |
3640 | static __initdata struct kmem_cache boot_kmem_cache, | 3588 | static __initdata struct kmem_cache boot_kmem_cache, |
3641 | boot_kmem_cache_node; | 3589 | boot_kmem_cache_node; |
3642 | int i; | ||
3643 | int caches = 2; | ||
3644 | 3590 | ||
3645 | if (debug_guardpage_minorder()) | 3591 | if (debug_guardpage_minorder()) |
3646 | slub_max_order = 0; | 3592 | slub_max_order = 0; |
@@ -3671,103 +3617,16 @@ void __init kmem_cache_init(void) | |||
3671 | kmem_cache_node = bootstrap(&boot_kmem_cache_node); | 3617 | kmem_cache_node = bootstrap(&boot_kmem_cache_node); |
3672 | 3618 | ||
3673 | /* Now we can use the kmem_cache to allocate kmalloc slabs */ | 3619 | /* Now we can use the kmem_cache to allocate kmalloc slabs */ |
3674 | 3620 | create_kmalloc_caches(0); | |
3675 | /* | ||
3676 | * Patch up the size_index table if we have strange large alignment | ||
3677 | * requirements for the kmalloc array. This is only the case for | ||
3678 | * MIPS it seems. The standard arches will not generate any code here. | ||
3679 | * | ||
3680 | * Largest permitted alignment is 256 bytes due to the way we | ||
3681 | * handle the index determination for the smaller caches. | ||
3682 | * | ||
3683 | * Make sure that nothing crazy happens if someone starts tinkering | ||
3684 | * around with ARCH_KMALLOC_MINALIGN | ||
3685 | */ | ||
3686 | BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 || | ||
3687 | (KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1))); | ||
3688 | |||
3689 | for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) { | ||
3690 | int elem = size_index_elem(i); | ||
3691 | if (elem >= ARRAY_SIZE(size_index)) | ||
3692 | break; | ||
3693 | size_index[elem] = KMALLOC_SHIFT_LOW; | ||
3694 | } | ||
3695 | |||
3696 | if (KMALLOC_MIN_SIZE == 64) { | ||
3697 | /* | ||
3698 | * The 96 byte size cache is not used if the alignment | ||
3699 | * is 64 byte. | ||
3700 | */ | ||
3701 | for (i = 64 + 8; i <= 96; i += 8) | ||
3702 | size_index[size_index_elem(i)] = 7; | ||
3703 | } else if (KMALLOC_MIN_SIZE == 128) { | ||
3704 | /* | ||
3705 | * The 192 byte sized cache is not used if the alignment | ||
3706 | * is 128 byte. Redirect kmalloc to use the 256 byte cache | ||
3707 | * instead. | ||
3708 | */ | ||
3709 | for (i = 128 + 8; i <= 192; i += 8) | ||
3710 | size_index[size_index_elem(i)] = 8; | ||
3711 | } | ||
3712 | |||
3713 | /* Caches that are not of the two-to-the-power-of size */ | ||
3714 | if (KMALLOC_MIN_SIZE <= 32) { | ||
3715 | kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0); | ||
3716 | caches++; | ||
3717 | } | ||
3718 | |||
3719 | if (KMALLOC_MIN_SIZE <= 64) { | ||
3720 | kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0); | ||
3721 | caches++; | ||
3722 | } | ||
3723 | |||
3724 | for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) { | ||
3725 | kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0); | ||
3726 | caches++; | ||
3727 | } | ||
3728 | |||
3729 | slab_state = UP; | ||
3730 | |||
3731 | /* Provide the correct kmalloc names now that the caches are up */ | ||
3732 | if (KMALLOC_MIN_SIZE <= 32) { | ||
3733 | kmalloc_caches[1]->name = kstrdup(kmalloc_caches[1]->name, GFP_NOWAIT); | ||
3734 | BUG_ON(!kmalloc_caches[1]->name); | ||
3735 | } | ||
3736 | |||
3737 | if (KMALLOC_MIN_SIZE <= 64) { | ||
3738 | kmalloc_caches[2]->name = kstrdup(kmalloc_caches[2]->name, GFP_NOWAIT); | ||
3739 | BUG_ON(!kmalloc_caches[2]->name); | ||
3740 | } | ||
3741 | |||
3742 | for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) { | ||
3743 | char *s = kasprintf(GFP_NOWAIT, "kmalloc-%d", 1 << i); | ||
3744 | |||
3745 | BUG_ON(!s); | ||
3746 | kmalloc_caches[i]->name = s; | ||
3747 | } | ||
3748 | 3621 | ||
3749 | #ifdef CONFIG_SMP | 3622 | #ifdef CONFIG_SMP |
3750 | register_cpu_notifier(&slab_notifier); | 3623 | register_cpu_notifier(&slab_notifier); |
3751 | #endif | 3624 | #endif |
3752 | 3625 | ||
3753 | #ifdef CONFIG_ZONE_DMA | ||
3754 | for (i = 0; i < SLUB_PAGE_SHIFT; i++) { | ||
3755 | struct kmem_cache *s = kmalloc_caches[i]; | ||
3756 | |||
3757 | if (s && s->size) { | ||
3758 | char *name = kasprintf(GFP_NOWAIT, | ||
3759 | "dma-kmalloc-%d", s->object_size); | ||
3760 | |||
3761 | BUG_ON(!name); | ||
3762 | kmalloc_dma_caches[i] = create_kmalloc_cache(name, | ||
3763 | s->object_size, SLAB_CACHE_DMA); | ||
3764 | } | ||
3765 | } | ||
3766 | #endif | ||
3767 | printk(KERN_INFO | 3626 | printk(KERN_INFO |
3768 | "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d," | 3627 | "SLUB: HWalign=%d, Order=%d-%d, MinObjects=%d," |
3769 | " CPUs=%d, Nodes=%d\n", | 3628 | " CPUs=%d, Nodes=%d\n", |
3770 | caches, cache_line_size(), | 3629 | cache_line_size(), |
3771 | slub_min_order, slub_max_order, slub_min_objects, | 3630 | slub_min_order, slub_max_order, slub_min_objects, |
3772 | nr_cpu_ids, nr_node_ids); | 3631 | nr_cpu_ids, nr_node_ids); |
3773 | } | 3632 | } |
@@ -3930,10 +3789,10 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) | |||
3930 | struct kmem_cache *s; | 3789 | struct kmem_cache *s; |
3931 | void *ret; | 3790 | void *ret; |
3932 | 3791 | ||
3933 | if (unlikely(size > SLUB_MAX_SIZE)) | 3792 | if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) |
3934 | return kmalloc_large(size, gfpflags); | 3793 | return kmalloc_large(size, gfpflags); |
3935 | 3794 | ||
3936 | s = get_slab(size, gfpflags); | 3795 | s = kmalloc_slab(size, gfpflags); |
3937 | 3796 | ||
3938 | if (unlikely(ZERO_OR_NULL_PTR(s))) | 3797 | if (unlikely(ZERO_OR_NULL_PTR(s))) |
3939 | return s; | 3798 | return s; |
@@ -3953,7 +3812,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, | |||
3953 | struct kmem_cache *s; | 3812 | struct kmem_cache *s; |
3954 | void *ret; | 3813 | void *ret; |
3955 | 3814 | ||
3956 | if (unlikely(size > SLUB_MAX_SIZE)) { | 3815 | if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) { |
3957 | ret = kmalloc_large_node(size, gfpflags, node); | 3816 | ret = kmalloc_large_node(size, gfpflags, node); |
3958 | 3817 | ||
3959 | trace_kmalloc_node(caller, ret, | 3818 | trace_kmalloc_node(caller, ret, |
@@ -3963,7 +3822,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, | |||
3963 | return ret; | 3822 | return ret; |
3964 | } | 3823 | } |
3965 | 3824 | ||
3966 | s = get_slab(size, gfpflags); | 3825 | s = kmalloc_slab(size, gfpflags); |
3967 | 3826 | ||
3968 | if (unlikely(ZERO_OR_NULL_PTR(s))) | 3827 | if (unlikely(ZERO_OR_NULL_PTR(s))) |
3969 | return s; | 3828 | return s; |
@@ -4312,7 +4171,7 @@ static void resiliency_test(void) | |||
4312 | { | 4171 | { |
4313 | u8 *p; | 4172 | u8 *p; |
4314 | 4173 | ||
4315 | BUILD_BUG_ON(KMALLOC_MIN_SIZE > 16 || SLUB_PAGE_SHIFT < 10); | 4174 | BUILD_BUG_ON(KMALLOC_MIN_SIZE > 16 || KMALLOC_SHIFT_HIGH < 10); |
4316 | 4175 | ||
4317 | printk(KERN_ERR "SLUB resiliency testing\n"); | 4176 | printk(KERN_ERR "SLUB resiliency testing\n"); |
4318 | printk(KERN_ERR "-----------------------\n"); | 4177 | printk(KERN_ERR "-----------------------\n"); |