aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/slab.c253
-rw-r--r--mm/slab.h19
-rw-r--r--mm/slab_common.c80
-rw-r--r--mm/slob.c60
-rw-r--r--mm/slub.c145
5 files changed, 280 insertions, 277 deletions
diff --git a/mm/slab.c b/mm/slab.c
index d264d90b3682..a99f71a39baf 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -570,9 +570,9 @@ static struct arraycache_init initarray_generic =
570 { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} }; 570 { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
571 571
572/* internal cache of cache description objs */ 572/* internal cache of cache description objs */
573static struct kmem_list3 *cache_cache_nodelists[MAX_NUMNODES]; 573static struct kmem_list3 *kmem_cache_nodelists[MAX_NUMNODES];
574static struct kmem_cache cache_cache = { 574static struct kmem_cache kmem_cache_boot = {
575 .nodelists = cache_cache_nodelists, 575 .nodelists = kmem_cache_nodelists,
576 .batchcount = 1, 576 .batchcount = 1,
577 .limit = BOOT_CPUCACHE_ENTRIES, 577 .limit = BOOT_CPUCACHE_ENTRIES,
578 .shared = 1, 578 .shared = 1,
@@ -795,6 +795,7 @@ static void cache_estimate(unsigned long gfporder, size_t buffer_size,
795 *left_over = slab_size - nr_objs*buffer_size - mgmt_size; 795 *left_over = slab_size - nr_objs*buffer_size - mgmt_size;
796} 796}
797 797
798#if DEBUG
798#define slab_error(cachep, msg) __slab_error(__func__, cachep, msg) 799#define slab_error(cachep, msg) __slab_error(__func__, cachep, msg)
799 800
800static void __slab_error(const char *function, struct kmem_cache *cachep, 801static void __slab_error(const char *function, struct kmem_cache *cachep,
@@ -805,6 +806,7 @@ static void __slab_error(const char *function, struct kmem_cache *cachep,
805 dump_stack(); 806 dump_stack();
806 add_taint(TAINT_BAD_PAGE); 807 add_taint(TAINT_BAD_PAGE);
807} 808}
809#endif
808 810
809/* 811/*
810 * By default on NUMA we use alien caches to stage the freeing of 812 * By default on NUMA we use alien caches to stage the freeing of
@@ -1587,15 +1589,17 @@ void __init kmem_cache_init(void)
1587 int order; 1589 int order;
1588 int node; 1590 int node;
1589 1591
1592 kmem_cache = &kmem_cache_boot;
1593
1590 if (num_possible_nodes() == 1) 1594 if (num_possible_nodes() == 1)
1591 use_alien_caches = 0; 1595 use_alien_caches = 0;
1592 1596
1593 for (i = 0; i < NUM_INIT_LISTS; i++) { 1597 for (i = 0; i < NUM_INIT_LISTS; i++) {
1594 kmem_list3_init(&initkmem_list3[i]); 1598 kmem_list3_init(&initkmem_list3[i]);
1595 if (i < MAX_NUMNODES) 1599 if (i < MAX_NUMNODES)
1596 cache_cache.nodelists[i] = NULL; 1600 kmem_cache->nodelists[i] = NULL;
1597 } 1601 }
1598 set_up_list3s(&cache_cache, CACHE_CACHE); 1602 set_up_list3s(kmem_cache, CACHE_CACHE);
1599 1603
1600 /* 1604 /*
1601 * Fragmentation resistance on low memory - only use bigger 1605 * Fragmentation resistance on low memory - only use bigger
@@ -1607,9 +1611,9 @@ void __init kmem_cache_init(void)
1607 1611
1608 /* Bootstrap is tricky, because several objects are allocated 1612 /* Bootstrap is tricky, because several objects are allocated
1609 * from caches that do not exist yet: 1613 * from caches that do not exist yet:
1610 * 1) initialize the cache_cache cache: it contains the struct 1614 * 1) initialize the kmem_cache cache: it contains the struct
1611 * kmem_cache structures of all caches, except cache_cache itself: 1615 * kmem_cache structures of all caches, except kmem_cache itself:
1612 * cache_cache is statically allocated. 1616 * kmem_cache is statically allocated.
1613 * Initially an __init data area is used for the head array and the 1617 * Initially an __init data area is used for the head array and the
1614 * kmem_list3 structures, it's replaced with a kmalloc allocated 1618 * kmem_list3 structures, it's replaced with a kmalloc allocated
1615 * array at the end of the bootstrap. 1619 * array at the end of the bootstrap.
@@ -1618,43 +1622,43 @@ void __init kmem_cache_init(void)
1618 * An __init data area is used for the head array. 1622 * An __init data area is used for the head array.
1619 * 3) Create the remaining kmalloc caches, with minimally sized 1623 * 3) Create the remaining kmalloc caches, with minimally sized
1620 * head arrays. 1624 * head arrays.
1621 * 4) Replace the __init data head arrays for cache_cache and the first 1625 * 4) Replace the __init data head arrays for kmem_cache and the first
1622 * kmalloc cache with kmalloc allocated arrays. 1626 * kmalloc cache with kmalloc allocated arrays.
1623 * 5) Replace the __init data for kmem_list3 for cache_cache and 1627 * 5) Replace the __init data for kmem_list3 for kmem_cache and
1624 * the other cache's with kmalloc allocated memory. 1628 * the other cache's with kmalloc allocated memory.
1625 * 6) Resize the head arrays of the kmalloc caches to their final sizes. 1629 * 6) Resize the head arrays of the kmalloc caches to their final sizes.
1626 */ 1630 */
1627 1631
1628 node = numa_mem_id(); 1632 node = numa_mem_id();
1629 1633
1630 /* 1) create the cache_cache */ 1634 /* 1) create the kmem_cache */
1631 INIT_LIST_HEAD(&slab_caches); 1635 INIT_LIST_HEAD(&slab_caches);
1632 list_add(&cache_cache.list, &slab_caches); 1636 list_add(&kmem_cache->list, &slab_caches);
1633 cache_cache.colour_off = cache_line_size(); 1637 kmem_cache->colour_off = cache_line_size();
1634 cache_cache.array[smp_processor_id()] = &initarray_cache.cache; 1638 kmem_cache->array[smp_processor_id()] = &initarray_cache.cache;
1635 cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE + node]; 1639 kmem_cache->nodelists[node] = &initkmem_list3[CACHE_CACHE + node];
1636 1640
1637 /* 1641 /*
1638 * struct kmem_cache size depends on nr_node_ids & nr_cpu_ids 1642 * struct kmem_cache size depends on nr_node_ids & nr_cpu_ids
1639 */ 1643 */
1640 cache_cache.size = offsetof(struct kmem_cache, array[nr_cpu_ids]) + 1644 kmem_cache->size = offsetof(struct kmem_cache, array[nr_cpu_ids]) +
1641 nr_node_ids * sizeof(struct kmem_list3 *); 1645 nr_node_ids * sizeof(struct kmem_list3 *);
1642 cache_cache.object_size = cache_cache.size; 1646 kmem_cache->object_size = kmem_cache->size;
1643 cache_cache.size = ALIGN(cache_cache.size, 1647 kmem_cache->size = ALIGN(kmem_cache->object_size,
1644 cache_line_size()); 1648 cache_line_size());
1645 cache_cache.reciprocal_buffer_size = 1649 kmem_cache->reciprocal_buffer_size =
1646 reciprocal_value(cache_cache.size); 1650 reciprocal_value(kmem_cache->size);
1647 1651
1648 for (order = 0; order < MAX_ORDER; order++) { 1652 for (order = 0; order < MAX_ORDER; order++) {
1649 cache_estimate(order, cache_cache.size, 1653 cache_estimate(order, kmem_cache->size,
1650 cache_line_size(), 0, &left_over, &cache_cache.num); 1654 cache_line_size(), 0, &left_over, &kmem_cache->num);
1651 if (cache_cache.num) 1655 if (kmem_cache->num)
1652 break; 1656 break;
1653 } 1657 }
1654 BUG_ON(!cache_cache.num); 1658 BUG_ON(!kmem_cache->num);
1655 cache_cache.gfporder = order; 1659 kmem_cache->gfporder = order;
1656 cache_cache.colour = left_over / cache_cache.colour_off; 1660 kmem_cache->colour = left_over / kmem_cache->colour_off;
1657 cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) + 1661 kmem_cache->slab_size = ALIGN(kmem_cache->num * sizeof(kmem_bufctl_t) +
1658 sizeof(struct slab), cache_line_size()); 1662 sizeof(struct slab), cache_line_size());
1659 1663
1660 /* 2+3) create the kmalloc caches */ 1664 /* 2+3) create the kmalloc caches */
@@ -1667,19 +1671,22 @@ void __init kmem_cache_init(void)
1667 * bug. 1671 * bug.
1668 */ 1672 */
1669 1673
1670 sizes[INDEX_AC].cs_cachep = __kmem_cache_create(names[INDEX_AC].name, 1674 sizes[INDEX_AC].cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
1671 sizes[INDEX_AC].cs_size, 1675 sizes[INDEX_AC].cs_cachep->name = names[INDEX_AC].name;
1672 ARCH_KMALLOC_MINALIGN, 1676 sizes[INDEX_AC].cs_cachep->size = sizes[INDEX_AC].cs_size;
1673 ARCH_KMALLOC_FLAGS|SLAB_PANIC, 1677 sizes[INDEX_AC].cs_cachep->object_size = sizes[INDEX_AC].cs_size;
1674 NULL); 1678 sizes[INDEX_AC].cs_cachep->align = ARCH_KMALLOC_MINALIGN;
1679 __kmem_cache_create(sizes[INDEX_AC].cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC);
1680 list_add(&sizes[INDEX_AC].cs_cachep->list, &slab_caches);
1675 1681
1676 if (INDEX_AC != INDEX_L3) { 1682 if (INDEX_AC != INDEX_L3) {
1677 sizes[INDEX_L3].cs_cachep = 1683 sizes[INDEX_L3].cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
1678 __kmem_cache_create(names[INDEX_L3].name, 1684 sizes[INDEX_L3].cs_cachep->name = names[INDEX_L3].name;
1679 sizes[INDEX_L3].cs_size, 1685 sizes[INDEX_L3].cs_cachep->size = sizes[INDEX_L3].cs_size;
1680 ARCH_KMALLOC_MINALIGN, 1686 sizes[INDEX_L3].cs_cachep->object_size = sizes[INDEX_L3].cs_size;
1681 ARCH_KMALLOC_FLAGS|SLAB_PANIC, 1687 sizes[INDEX_L3].cs_cachep->align = ARCH_KMALLOC_MINALIGN;
1682 NULL); 1688 __kmem_cache_create(sizes[INDEX_L3].cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC);
1689 list_add(&sizes[INDEX_L3].cs_cachep->list, &slab_caches);
1683 } 1690 }
1684 1691
1685 slab_early_init = 0; 1692 slab_early_init = 0;
@@ -1693,20 +1700,23 @@ void __init kmem_cache_init(void)
1693 * allow tighter packing of the smaller caches. 1700 * allow tighter packing of the smaller caches.
1694 */ 1701 */
1695 if (!sizes->cs_cachep) { 1702 if (!sizes->cs_cachep) {
1696 sizes->cs_cachep = __kmem_cache_create(names->name, 1703 sizes->cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
1697 sizes->cs_size, 1704 sizes->cs_cachep->name = names->name;
1698 ARCH_KMALLOC_MINALIGN, 1705 sizes->cs_cachep->size = sizes->cs_size;
1699 ARCH_KMALLOC_FLAGS|SLAB_PANIC, 1706 sizes->cs_cachep->object_size = sizes->cs_size;
1700 NULL); 1707 sizes->cs_cachep->align = ARCH_KMALLOC_MINALIGN;
1708 __kmem_cache_create(sizes->cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC);
1709 list_add(&sizes->cs_cachep->list, &slab_caches);
1701 } 1710 }
1702#ifdef CONFIG_ZONE_DMA 1711#ifdef CONFIG_ZONE_DMA
1703 sizes->cs_dmacachep = __kmem_cache_create( 1712 sizes->cs_dmacachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
1704 names->name_dma, 1713 sizes->cs_dmacachep->name = names->name_dma;
1705 sizes->cs_size, 1714 sizes->cs_dmacachep->size = sizes->cs_size;
1706 ARCH_KMALLOC_MINALIGN, 1715 sizes->cs_dmacachep->object_size = sizes->cs_size;
1707 ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA| 1716 sizes->cs_dmacachep->align = ARCH_KMALLOC_MINALIGN;
1708 SLAB_PANIC, 1717 __kmem_cache_create(sizes->cs_dmacachep,
1709 NULL); 1718 ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA| SLAB_PANIC);
1719 list_add(&sizes->cs_dmacachep->list, &slab_caches);
1710#endif 1720#endif
1711 sizes++; 1721 sizes++;
1712 names++; 1722 names++;
@@ -1717,15 +1727,15 @@ void __init kmem_cache_init(void)
1717 1727
1718 ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT); 1728 ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT);
1719 1729
1720 BUG_ON(cpu_cache_get(&cache_cache) != &initarray_cache.cache); 1730 BUG_ON(cpu_cache_get(kmem_cache) != &initarray_cache.cache);
1721 memcpy(ptr, cpu_cache_get(&cache_cache), 1731 memcpy(ptr, cpu_cache_get(kmem_cache),
1722 sizeof(struct arraycache_init)); 1732 sizeof(struct arraycache_init));
1723 /* 1733 /*
1724 * Do not assume that spinlocks can be initialized via memcpy: 1734 * Do not assume that spinlocks can be initialized via memcpy:
1725 */ 1735 */
1726 spin_lock_init(&ptr->lock); 1736 spin_lock_init(&ptr->lock);
1727 1737
1728 cache_cache.array[smp_processor_id()] = ptr; 1738 kmem_cache->array[smp_processor_id()] = ptr;
1729 1739
1730 ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT); 1740 ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT);
1731 1741
@@ -1746,7 +1756,7 @@ void __init kmem_cache_init(void)
1746 int nid; 1756 int nid;
1747 1757
1748 for_each_online_node(nid) { 1758 for_each_online_node(nid) {
1749 init_list(&cache_cache, &initkmem_list3[CACHE_CACHE + nid], nid); 1759 init_list(kmem_cache, &initkmem_list3[CACHE_CACHE + nid], nid);
1750 1760
1751 init_list(malloc_sizes[INDEX_AC].cs_cachep, 1761 init_list(malloc_sizes[INDEX_AC].cs_cachep,
1752 &initkmem_list3[SIZE_AC + nid], nid); 1762 &initkmem_list3[SIZE_AC + nid], nid);
@@ -2195,27 +2205,6 @@ static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
2195 } 2205 }
2196} 2206}
2197 2207
2198static void __kmem_cache_destroy(struct kmem_cache *cachep)
2199{
2200 int i;
2201 struct kmem_list3 *l3;
2202
2203 for_each_online_cpu(i)
2204 kfree(cachep->array[i]);
2205
2206 /* NUMA: free the list3 structures */
2207 for_each_online_node(i) {
2208 l3 = cachep->nodelists[i];
2209 if (l3) {
2210 kfree(l3->shared);
2211 free_alien_cache(l3->alien);
2212 kfree(l3);
2213 }
2214 }
2215 kmem_cache_free(&cache_cache, cachep);
2216}
2217
2218
2219/** 2208/**
2220 * calculate_slab_order - calculate size (page order) of slabs 2209 * calculate_slab_order - calculate size (page order) of slabs
2221 * @cachep: pointer to the cache that is being created 2210 * @cachep: pointer to the cache that is being created
@@ -2352,9 +2341,6 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
2352 * Cannot be called within a int, but can be interrupted. 2341 * Cannot be called within a int, but can be interrupted.
2353 * The @ctor is run when new pages are allocated by the cache. 2342 * The @ctor is run when new pages are allocated by the cache.
2354 * 2343 *
2355 * @name must be valid until the cache is destroyed. This implies that
2356 * the module calling this has to destroy the cache before getting unloaded.
2357 *
2358 * The flags are 2344 * The flags are
2359 * 2345 *
2360 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5) 2346 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
@@ -2367,13 +2353,13 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
2367 * cacheline. This can be beneficial if you're counting cycles as closely 2353 * cacheline. This can be beneficial if you're counting cycles as closely
2368 * as davem. 2354 * as davem.
2369 */ 2355 */
2370struct kmem_cache * 2356int
2371__kmem_cache_create (const char *name, size_t size, size_t align, 2357__kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
2372 unsigned long flags, void (*ctor)(void *))
2373{ 2358{
2374 size_t left_over, slab_size, ralign; 2359 size_t left_over, slab_size, ralign;
2375 struct kmem_cache *cachep = NULL;
2376 gfp_t gfp; 2360 gfp_t gfp;
2361 int err;
2362 size_t size = cachep->size;
2377 2363
2378#if DEBUG 2364#if DEBUG
2379#if FORCED_DEBUG 2365#if FORCED_DEBUG
@@ -2445,8 +2431,8 @@ __kmem_cache_create (const char *name, size_t size, size_t align,
2445 ralign = ARCH_SLAB_MINALIGN; 2431 ralign = ARCH_SLAB_MINALIGN;
2446 } 2432 }
2447 /* 3) caller mandated alignment */ 2433 /* 3) caller mandated alignment */
2448 if (ralign < align) { 2434 if (ralign < cachep->align) {
2449 ralign = align; 2435 ralign = cachep->align;
2450 } 2436 }
2451 /* disable debug if necessary */ 2437 /* disable debug if necessary */
2452 if (ralign > __alignof__(unsigned long long)) 2438 if (ralign > __alignof__(unsigned long long))
@@ -2454,21 +2440,14 @@ __kmem_cache_create (const char *name, size_t size, size_t align,
2454 /* 2440 /*
2455 * 4) Store it. 2441 * 4) Store it.
2456 */ 2442 */
2457 align = ralign; 2443 cachep->align = ralign;
2458 2444
2459 if (slab_is_available()) 2445 if (slab_is_available())
2460 gfp = GFP_KERNEL; 2446 gfp = GFP_KERNEL;
2461 else 2447 else
2462 gfp = GFP_NOWAIT; 2448 gfp = GFP_NOWAIT;
2463 2449
2464 /* Get cache's description obj. */
2465 cachep = kmem_cache_zalloc(&cache_cache, gfp);
2466 if (!cachep)
2467 return NULL;
2468
2469 cachep->nodelists = (struct kmem_list3 **)&cachep->array[nr_cpu_ids]; 2450 cachep->nodelists = (struct kmem_list3 **)&cachep->array[nr_cpu_ids];
2470 cachep->object_size = size;
2471 cachep->align = align;
2472#if DEBUG 2451#if DEBUG
2473 2452
2474 /* 2453 /*
@@ -2514,18 +2493,15 @@ __kmem_cache_create (const char *name, size_t size, size_t align,
2514 */ 2493 */
2515 flags |= CFLGS_OFF_SLAB; 2494 flags |= CFLGS_OFF_SLAB;
2516 2495
2517 size = ALIGN(size, align); 2496 size = ALIGN(size, cachep->align);
2518 2497
2519 left_over = calculate_slab_order(cachep, size, align, flags); 2498 left_over = calculate_slab_order(cachep, size, cachep->align, flags);
2499
2500 if (!cachep->num)
2501 return -E2BIG;
2520 2502
2521 if (!cachep->num) {
2522 printk(KERN_ERR
2523 "kmem_cache_create: couldn't create cache %s.\n", name);
2524 kmem_cache_free(&cache_cache, cachep);
2525 return NULL;
2526 }
2527 slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t) 2503 slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t)
2528 + sizeof(struct slab), align); 2504 + sizeof(struct slab), cachep->align);
2529 2505
2530 /* 2506 /*
2531 * If the slab has been placed off-slab, and we have enough space then 2507 * If the slab has been placed off-slab, and we have enough space then
@@ -2553,8 +2529,8 @@ __kmem_cache_create (const char *name, size_t size, size_t align,
2553 2529
2554 cachep->colour_off = cache_line_size(); 2530 cachep->colour_off = cache_line_size();
2555 /* Offset must be a multiple of the alignment. */ 2531 /* Offset must be a multiple of the alignment. */
2556 if (cachep->colour_off < align) 2532 if (cachep->colour_off < cachep->align)
2557 cachep->colour_off = align; 2533 cachep->colour_off = cachep->align;
2558 cachep->colour = left_over / cachep->colour_off; 2534 cachep->colour = left_over / cachep->colour_off;
2559 cachep->slab_size = slab_size; 2535 cachep->slab_size = slab_size;
2560 cachep->flags = flags; 2536 cachep->flags = flags;
@@ -2575,12 +2551,11 @@ __kmem_cache_create (const char *name, size_t size, size_t align,
2575 */ 2551 */
2576 BUG_ON(ZERO_OR_NULL_PTR(cachep->slabp_cache)); 2552 BUG_ON(ZERO_OR_NULL_PTR(cachep->slabp_cache));
2577 } 2553 }
2578 cachep->ctor = ctor;
2579 cachep->name = name;
2580 2554
2581 if (setup_cpu_cache(cachep, gfp)) { 2555 err = setup_cpu_cache(cachep, gfp);
2582 __kmem_cache_destroy(cachep); 2556 if (err) {
2583 return NULL; 2557 __kmem_cache_shutdown(cachep);
2558 return err;
2584 } 2559 }
2585 2560
2586 if (flags & SLAB_DEBUG_OBJECTS) { 2561 if (flags & SLAB_DEBUG_OBJECTS) {
@@ -2593,9 +2568,7 @@ __kmem_cache_create (const char *name, size_t size, size_t align,
2593 slab_set_debugobj_lock_classes(cachep); 2568 slab_set_debugobj_lock_classes(cachep);
2594 } 2569 }
2595 2570
2596 /* cache setup completed, link it into the list */ 2571 return 0;
2597 list_add(&cachep->list, &slab_caches);
2598 return cachep;
2599} 2572}
2600 2573
2601#if DEBUG 2574#if DEBUG
@@ -2754,49 +2727,29 @@ int kmem_cache_shrink(struct kmem_cache *cachep)
2754} 2727}
2755EXPORT_SYMBOL(kmem_cache_shrink); 2728EXPORT_SYMBOL(kmem_cache_shrink);
2756 2729
2757/** 2730int __kmem_cache_shutdown(struct kmem_cache *cachep)
2758 * kmem_cache_destroy - delete a cache
2759 * @cachep: the cache to destroy
2760 *
2761 * Remove a &struct kmem_cache object from the slab cache.
2762 *
2763 * It is expected this function will be called by a module when it is
2764 * unloaded. This will remove the cache completely, and avoid a duplicate
2765 * cache being allocated each time a module is loaded and unloaded, if the
2766 * module doesn't have persistent in-kernel storage across loads and unloads.
2767 *
2768 * The cache must be empty before calling this function.
2769 *
2770 * The caller must guarantee that no one will allocate memory from the cache
2771 * during the kmem_cache_destroy().
2772 */
2773void kmem_cache_destroy(struct kmem_cache *cachep)
2774{ 2731{
2775 BUG_ON(!cachep || in_interrupt()); 2732 int i;
2733 struct kmem_list3 *l3;
2734 int rc = __cache_shrink(cachep);
2776 2735
2777 /* Find the cache in the chain of caches. */ 2736 if (rc)
2778 get_online_cpus(); 2737 return rc;
2779 mutex_lock(&slab_mutex);
2780 /*
2781 * the chain is never empty, cache_cache is never destroyed
2782 */
2783 list_del(&cachep->list);
2784 if (__cache_shrink(cachep)) {
2785 slab_error(cachep, "Can't free all objects");
2786 list_add(&cachep->list, &slab_caches);
2787 mutex_unlock(&slab_mutex);
2788 put_online_cpus();
2789 return;
2790 }
2791 2738
2792 if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) 2739 for_each_online_cpu(i)
2793 rcu_barrier(); 2740 kfree(cachep->array[i]);
2794 2741
2795 __kmem_cache_destroy(cachep); 2742 /* NUMA: free the list3 structures */
2796 mutex_unlock(&slab_mutex); 2743 for_each_online_node(i) {
2797 put_online_cpus(); 2744 l3 = cachep->nodelists[i];
2745 if (l3) {
2746 kfree(l3->shared);
2747 free_alien_cache(l3->alien);
2748 kfree(l3);
2749 }
2750 }
2751 return 0;
2798} 2752}
2799EXPORT_SYMBOL(kmem_cache_destroy);
2800 2753
2801/* 2754/*
2802 * Get the memory for a slab management obj. 2755 * Get the memory for a slab management obj.
@@ -3330,7 +3283,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
3330 3283
3331static bool slab_should_failslab(struct kmem_cache *cachep, gfp_t flags) 3284static bool slab_should_failslab(struct kmem_cache *cachep, gfp_t flags)
3332{ 3285{
3333 if (cachep == &cache_cache) 3286 if (cachep == kmem_cache)
3334 return false; 3287 return false;
3335 3288
3336 return should_failslab(cachep->object_size, flags, cachep->flags); 3289 return should_failslab(cachep->object_size, flags, cachep->flags);
diff --git a/mm/slab.h b/mm/slab.h
index db7848caaa25..7deeb449a301 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -25,9 +25,26 @@ extern enum slab_state slab_state;
25 25
26/* The slab cache mutex protects the management structures during changes */ 26/* The slab cache mutex protects the management structures during changes */
27extern struct mutex slab_mutex; 27extern struct mutex slab_mutex;
28
29/* The list of all slab caches on the system */
28extern struct list_head slab_caches; 30extern struct list_head slab_caches;
29 31
30struct kmem_cache *__kmem_cache_create(const char *name, size_t size, 32/* The slab cache that manages slab cache information */
33extern struct kmem_cache *kmem_cache;
34
35/* Functions provided by the slab allocators */
36extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags);
37
38#ifdef CONFIG_SLUB
39struct kmem_cache *__kmem_cache_alias(const char *name, size_t size,
31 size_t align, unsigned long flags, void (*ctor)(void *)); 40 size_t align, unsigned long flags, void (*ctor)(void *));
41#else
42static inline struct kmem_cache *__kmem_cache_alias(const char *name, size_t size,
43 size_t align, unsigned long flags, void (*ctor)(void *))
44{ return NULL; }
45#endif
46
47
48int __kmem_cache_shutdown(struct kmem_cache *);
32 49
33#endif 50#endif
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 8cf8b4962d6c..9c217255ac49 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -22,6 +22,7 @@
22enum slab_state slab_state; 22enum slab_state slab_state;
23LIST_HEAD(slab_caches); 23LIST_HEAD(slab_caches);
24DEFINE_MUTEX(slab_mutex); 24DEFINE_MUTEX(slab_mutex);
25struct kmem_cache *kmem_cache;
25 26
26#ifdef CONFIG_DEBUG_VM 27#ifdef CONFIG_DEBUG_VM
27static int kmem_cache_sanity_check(const char *name, size_t size) 28static int kmem_cache_sanity_check(const char *name, size_t size)
@@ -98,21 +99,92 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align
98 unsigned long flags, void (*ctor)(void *)) 99 unsigned long flags, void (*ctor)(void *))
99{ 100{
100 struct kmem_cache *s = NULL; 101 struct kmem_cache *s = NULL;
102 int err = 0;
101 103
102 get_online_cpus(); 104 get_online_cpus();
103 mutex_lock(&slab_mutex); 105 mutex_lock(&slab_mutex);
104 if (kmem_cache_sanity_check(name, size) == 0) 106
105 s = __kmem_cache_create(name, size, align, flags, ctor); 107 if (!kmem_cache_sanity_check(name, size) == 0)
108 goto out_locked;
109
110
111 s = __kmem_cache_alias(name, size, align, flags, ctor);
112 if (s)
113 goto out_locked;
114
115 s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL);
116 if (s) {
117 s->object_size = s->size = size;
118 s->align = align;
119 s->ctor = ctor;
120 s->name = kstrdup(name, GFP_KERNEL);
121 if (!s->name) {
122 kmem_cache_free(kmem_cache, s);
123 err = -ENOMEM;
124 goto out_locked;
125 }
126
127 err = __kmem_cache_create(s, flags);
128 if (!err) {
129
130 s->refcount = 1;
131 list_add(&s->list, &slab_caches);
132
133 } else {
134 kfree(s->name);
135 kmem_cache_free(kmem_cache, s);
136 }
137 } else
138 err = -ENOMEM;
139
140out_locked:
106 mutex_unlock(&slab_mutex); 141 mutex_unlock(&slab_mutex);
107 put_online_cpus(); 142 put_online_cpus();
108 143
109 if (!s && (flags & SLAB_PANIC)) 144 if (err) {
110 panic("kmem_cache_create: Failed to create slab '%s'\n", name); 145
146 if (flags & SLAB_PANIC)
147 panic("kmem_cache_create: Failed to create slab '%s'. Error %d\n",
148 name, err);
149 else {
150 printk(KERN_WARNING "kmem_cache_create(%s) failed with error %d",
151 name, err);
152 dump_stack();
153 }
154
155 return NULL;
156 }
111 157
112 return s; 158 return s;
113} 159}
114EXPORT_SYMBOL(kmem_cache_create); 160EXPORT_SYMBOL(kmem_cache_create);
115 161
162void kmem_cache_destroy(struct kmem_cache *s)
163{
164 get_online_cpus();
165 mutex_lock(&slab_mutex);
166 s->refcount--;
167 if (!s->refcount) {
168 list_del(&s->list);
169
170 if (!__kmem_cache_shutdown(s)) {
171 if (s->flags & SLAB_DESTROY_BY_RCU)
172 rcu_barrier();
173
174 kfree(s->name);
175 kmem_cache_free(kmem_cache, s);
176 } else {
177 list_add(&s->list, &slab_caches);
178 printk(KERN_ERR "kmem_cache_destroy %s: Slab cache still has objects\n",
179 s->name);
180 dump_stack();
181 }
182 }
183 mutex_unlock(&slab_mutex);
184 put_online_cpus();
185}
186EXPORT_SYMBOL(kmem_cache_destroy);
187
116int slab_is_available(void) 188int slab_is_available(void)
117{ 189{
118 return slab_state >= UP; 190 return slab_state >= UP;
diff --git a/mm/slob.c b/mm/slob.c
index dd47d16d57b6..f3a5ced392d7 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -529,44 +529,24 @@ size_t ksize(const void *block)
529} 529}
530EXPORT_SYMBOL(ksize); 530EXPORT_SYMBOL(ksize);
531 531
532struct kmem_cache *__kmem_cache_create(const char *name, size_t size, 532int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
533 size_t align, unsigned long flags, void (*ctor)(void *))
534{ 533{
535 struct kmem_cache *c; 534 size_t align = c->size;
536 535
537 c = slob_alloc(sizeof(struct kmem_cache), 536 if (flags & SLAB_DESTROY_BY_RCU) {
538 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, NUMA_NO_NODE); 537 /* leave room for rcu footer at the end of object */
539 538 c->size += sizeof(struct slob_rcu);
540 if (c) {
541 c->name = name;
542 c->size = size;
543 if (flags & SLAB_DESTROY_BY_RCU) {
544 /* leave room for rcu footer at the end of object */
545 c->size += sizeof(struct slob_rcu);
546 }
547 c->flags = flags;
548 c->ctor = ctor;
549 /* ignore alignment unless it's forced */
550 c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0;
551 if (c->align < ARCH_SLAB_MINALIGN)
552 c->align = ARCH_SLAB_MINALIGN;
553 if (c->align < align)
554 c->align = align;
555
556 kmemleak_alloc(c, sizeof(struct kmem_cache), 1, GFP_KERNEL);
557 c->refcount = 1;
558 } 539 }
559 return c; 540 c->flags = flags;
560} 541 /* ignore alignment unless it's forced */
542 c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0;
543 if (c->align < ARCH_SLAB_MINALIGN)
544 c->align = ARCH_SLAB_MINALIGN;
545 if (c->align < align)
546 c->align = align;
561 547
562void kmem_cache_destroy(struct kmem_cache *c) 548 return 0;
563{
564 kmemleak_free(c);
565 if (c->flags & SLAB_DESTROY_BY_RCU)
566 rcu_barrier();
567 slob_free(c, sizeof(struct kmem_cache));
568} 549}
569EXPORT_SYMBOL(kmem_cache_destroy);
570 550
571void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node) 551void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
572{ 552{
@@ -634,14 +614,28 @@ unsigned int kmem_cache_size(struct kmem_cache *c)
634} 614}
635EXPORT_SYMBOL(kmem_cache_size); 615EXPORT_SYMBOL(kmem_cache_size);
636 616
617int __kmem_cache_shutdown(struct kmem_cache *c)
618{
619 /* No way to check for remaining objects */
620 return 0;
621}
622
637int kmem_cache_shrink(struct kmem_cache *d) 623int kmem_cache_shrink(struct kmem_cache *d)
638{ 624{
639 return 0; 625 return 0;
640} 626}
641EXPORT_SYMBOL(kmem_cache_shrink); 627EXPORT_SYMBOL(kmem_cache_shrink);
642 628
629struct kmem_cache kmem_cache_boot = {
630 .name = "kmem_cache",
631 .size = sizeof(struct kmem_cache),
632 .flags = SLAB_PANIC,
633 .align = ARCH_KMALLOC_MINALIGN,
634};
635
643void __init kmem_cache_init(void) 636void __init kmem_cache_init(void)
644{ 637{
638 kmem_cache = &kmem_cache_boot;
645 slab_state = UP; 639 slab_state = UP;
646} 640}
647 641
diff --git a/mm/slub.c b/mm/slub.c
index 97a49d9a37cd..a0d698467f70 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -210,11 +210,7 @@ static void sysfs_slab_remove(struct kmem_cache *);
210static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; } 210static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
211static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p) 211static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
212 { return 0; } 212 { return 0; }
213static inline void sysfs_slab_remove(struct kmem_cache *s) 213static inline void sysfs_slab_remove(struct kmem_cache *s) { }
214{
215 kfree(s->name);
216 kfree(s);
217}
218 214
219#endif 215#endif
220 216
@@ -626,7 +622,7 @@ static void object_err(struct kmem_cache *s, struct page *page,
626 print_trailer(s, page, object); 622 print_trailer(s, page, object);
627} 623}
628 624
629static void slab_err(struct kmem_cache *s, struct page *page, char *fmt, ...) 625static void slab_err(struct kmem_cache *s, struct page *page, const char *fmt, ...)
630{ 626{
631 va_list args; 627 va_list args;
632 char buf[100]; 628 char buf[100];
@@ -2627,6 +2623,13 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
2627 2623
2628 page = virt_to_head_page(x); 2624 page = virt_to_head_page(x);
2629 2625
2626 if (kmem_cache_debug(s) && page->slab != s) {
2627 pr_err("kmem_cache_free: Wrong slab cache. %s but object"
2628 " is from %s\n", page->slab->name, s->name);
2629 WARN_ON_ONCE(1);
2630 return;
2631 }
2632
2630 slab_free(s, page, x, _RET_IP_); 2633 slab_free(s, page, x, _RET_IP_);
2631 2634
2632 trace_kmem_cache_free(_RET_IP_, x); 2635 trace_kmem_cache_free(_RET_IP_, x);
@@ -3041,17 +3044,9 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
3041 3044
3042} 3045}
3043 3046
3044static int kmem_cache_open(struct kmem_cache *s, 3047static int kmem_cache_open(struct kmem_cache *s, unsigned long flags)
3045 const char *name, size_t size,
3046 size_t align, unsigned long flags,
3047 void (*ctor)(void *))
3048{ 3048{
3049 memset(s, 0, kmem_size); 3049 s->flags = kmem_cache_flags(s->size, flags, s->name, s->ctor);
3050 s->name = name;
3051 s->ctor = ctor;
3052 s->object_size = size;
3053 s->align = align;
3054 s->flags = kmem_cache_flags(size, flags, name, ctor);
3055 s->reserved = 0; 3050 s->reserved = 0;
3056 3051
3057 if (need_reserve_slab_rcu && (s->flags & SLAB_DESTROY_BY_RCU)) 3052 if (need_reserve_slab_rcu && (s->flags & SLAB_DESTROY_BY_RCU))
@@ -3113,7 +3108,6 @@ static int kmem_cache_open(struct kmem_cache *s,
3113 else 3108 else
3114 s->cpu_partial = 30; 3109 s->cpu_partial = 30;
3115 3110
3116 s->refcount = 1;
3117#ifdef CONFIG_NUMA 3111#ifdef CONFIG_NUMA
3118 s->remote_node_defrag_ratio = 1000; 3112 s->remote_node_defrag_ratio = 1000;
3119#endif 3113#endif
@@ -3121,16 +3115,16 @@ static int kmem_cache_open(struct kmem_cache *s,
3121 goto error; 3115 goto error;
3122 3116
3123 if (alloc_kmem_cache_cpus(s)) 3117 if (alloc_kmem_cache_cpus(s))
3124 return 1; 3118 return 0;
3125 3119
3126 free_kmem_cache_nodes(s); 3120 free_kmem_cache_nodes(s);
3127error: 3121error:
3128 if (flags & SLAB_PANIC) 3122 if (flags & SLAB_PANIC)
3129 panic("Cannot create slab %s size=%lu realsize=%u " 3123 panic("Cannot create slab %s size=%lu realsize=%u "
3130 "order=%u offset=%u flags=%lx\n", 3124 "order=%u offset=%u flags=%lx\n",
3131 s->name, (unsigned long)size, s->size, oo_order(s->oo), 3125 s->name, (unsigned long)s->size, s->size, oo_order(s->oo),
3132 s->offset, flags); 3126 s->offset, flags);
3133 return 0; 3127 return -EINVAL;
3134} 3128}
3135 3129
3136/* 3130/*
@@ -3152,7 +3146,7 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
3152 sizeof(long), GFP_ATOMIC); 3146 sizeof(long), GFP_ATOMIC);
3153 if (!map) 3147 if (!map)
3154 return; 3148 return;
3155 slab_err(s, page, "%s", text); 3149 slab_err(s, page, text, s->name);
3156 slab_lock(page); 3150 slab_lock(page);
3157 3151
3158 get_map(s, page, map); 3152 get_map(s, page, map);
@@ -3184,7 +3178,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
3184 discard_slab(s, page); 3178 discard_slab(s, page);
3185 } else { 3179 } else {
3186 list_slab_objects(s, page, 3180 list_slab_objects(s, page,
3187 "Objects remaining on kmem_cache_close()"); 3181 "Objects remaining in %s on kmem_cache_close()");
3188 } 3182 }
3189 } 3183 }
3190} 3184}
@@ -3197,7 +3191,6 @@ static inline int kmem_cache_close(struct kmem_cache *s)
3197 int node; 3191 int node;
3198 3192
3199 flush_all(s); 3193 flush_all(s);
3200 free_percpu(s->cpu_slab);
3201 /* Attempt to free all objects */ 3194 /* Attempt to free all objects */
3202 for_each_node_state(node, N_NORMAL_MEMORY) { 3195 for_each_node_state(node, N_NORMAL_MEMORY) {
3203 struct kmem_cache_node *n = get_node(s, node); 3196 struct kmem_cache_node *n = get_node(s, node);
@@ -3206,33 +3199,20 @@ static inline int kmem_cache_close(struct kmem_cache *s)
3206 if (n->nr_partial || slabs_node(s, node)) 3199 if (n->nr_partial || slabs_node(s, node))
3207 return 1; 3200 return 1;
3208 } 3201 }
3202 free_percpu(s->cpu_slab);
3209 free_kmem_cache_nodes(s); 3203 free_kmem_cache_nodes(s);
3210 return 0; 3204 return 0;
3211} 3205}
3212 3206
3213/* 3207int __kmem_cache_shutdown(struct kmem_cache *s)
3214 * Close a cache and release the kmem_cache structure
3215 * (must be used for caches created using kmem_cache_create)
3216 */
3217void kmem_cache_destroy(struct kmem_cache *s)
3218{ 3208{
3219 mutex_lock(&slab_mutex); 3209 int rc = kmem_cache_close(s);
3220 s->refcount--; 3210
3221 if (!s->refcount) { 3211 if (!rc)
3222 list_del(&s->list);
3223 mutex_unlock(&slab_mutex);
3224 if (kmem_cache_close(s)) {
3225 printk(KERN_ERR "SLUB %s: %s called for cache that "
3226 "still has objects.\n", s->name, __func__);
3227 dump_stack();
3228 }
3229 if (s->flags & SLAB_DESTROY_BY_RCU)
3230 rcu_barrier();
3231 sysfs_slab_remove(s); 3212 sysfs_slab_remove(s);
3232 } else 3213
3233 mutex_unlock(&slab_mutex); 3214 return rc;
3234} 3215}
3235EXPORT_SYMBOL(kmem_cache_destroy);
3236 3216
3237/******************************************************************** 3217/********************************************************************
3238 * Kmalloc subsystem 3218 * Kmalloc subsystem
@@ -3241,8 +3221,6 @@ EXPORT_SYMBOL(kmem_cache_destroy);
3241struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT]; 3221struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT];
3242EXPORT_SYMBOL(kmalloc_caches); 3222EXPORT_SYMBOL(kmalloc_caches);
3243 3223
3244static struct kmem_cache *kmem_cache;
3245
3246#ifdef CONFIG_ZONE_DMA 3224#ifdef CONFIG_ZONE_DMA
3247static struct kmem_cache *kmalloc_dma_caches[SLUB_PAGE_SHIFT]; 3225static struct kmem_cache *kmalloc_dma_caches[SLUB_PAGE_SHIFT];
3248#endif 3226#endif
@@ -3288,14 +3266,17 @@ static struct kmem_cache *__init create_kmalloc_cache(const char *name,
3288{ 3266{
3289 struct kmem_cache *s; 3267 struct kmem_cache *s;
3290 3268
3291 s = kmem_cache_alloc(kmem_cache, GFP_NOWAIT); 3269 s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
3270
3271 s->name = name;
3272 s->size = s->object_size = size;
3273 s->align = ARCH_KMALLOC_MINALIGN;
3292 3274
3293 /* 3275 /*
3294 * This function is called with IRQs disabled during early-boot on 3276 * This function is called with IRQs disabled during early-boot on
3295 * single CPU so there's no need to take slab_mutex here. 3277 * single CPU so there's no need to take slab_mutex here.
3296 */ 3278 */
3297 if (!kmem_cache_open(s, name, size, ARCH_KMALLOC_MINALIGN, 3279 if (kmem_cache_open(s, flags))
3298 flags, NULL))
3299 goto panic; 3280 goto panic;
3300 3281
3301 list_add(&s->list, &slab_caches); 3282 list_add(&s->list, &slab_caches);
@@ -3734,12 +3715,12 @@ void __init kmem_cache_init(void)
3734 slub_max_order = 0; 3715 slub_max_order = 0;
3735 3716
3736 kmem_size = offsetof(struct kmem_cache, node) + 3717 kmem_size = offsetof(struct kmem_cache, node) +
3737 nr_node_ids * sizeof(struct kmem_cache_node *); 3718 nr_node_ids * sizeof(struct kmem_cache_node *);
3738 3719
3739 /* Allocate two kmem_caches from the page allocator */ 3720 /* Allocate two kmem_caches from the page allocator */
3740 kmalloc_size = ALIGN(kmem_size, cache_line_size()); 3721 kmalloc_size = ALIGN(kmem_size, cache_line_size());
3741 order = get_order(2 * kmalloc_size); 3722 order = get_order(2 * kmalloc_size);
3742 kmem_cache = (void *)__get_free_pages(GFP_NOWAIT, order); 3723 kmem_cache = (void *)__get_free_pages(GFP_NOWAIT | __GFP_ZERO, order);
3743 3724
3744 /* 3725 /*
3745 * Must first have the slab cache available for the allocations of the 3726 * Must first have the slab cache available for the allocations of the
@@ -3748,9 +3729,10 @@ void __init kmem_cache_init(void)
3748 */ 3729 */
3749 kmem_cache_node = (void *)kmem_cache + kmalloc_size; 3730 kmem_cache_node = (void *)kmem_cache + kmalloc_size;
3750 3731
3751 kmem_cache_open(kmem_cache_node, "kmem_cache_node", 3732 kmem_cache_node->name = "kmem_cache_node";
3752 sizeof(struct kmem_cache_node), 3733 kmem_cache_node->size = kmem_cache_node->object_size =
3753 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); 3734 sizeof(struct kmem_cache_node);
3735 kmem_cache_open(kmem_cache_node, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
3754 3736
3755 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI); 3737 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
3756 3738
@@ -3758,8 +3740,10 @@ void __init kmem_cache_init(void)
3758 slab_state = PARTIAL; 3740 slab_state = PARTIAL;
3759 3741
3760 temp_kmem_cache = kmem_cache; 3742 temp_kmem_cache = kmem_cache;
3761 kmem_cache_open(kmem_cache, "kmem_cache", kmem_size, 3743 kmem_cache->name = "kmem_cache";
3762 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); 3744 kmem_cache->size = kmem_cache->object_size = kmem_size;
3745 kmem_cache_open(kmem_cache, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
3746
3763 kmem_cache = kmem_cache_alloc(kmem_cache, GFP_NOWAIT); 3747 kmem_cache = kmem_cache_alloc(kmem_cache, GFP_NOWAIT);
3764 memcpy(kmem_cache, temp_kmem_cache, kmem_size); 3748 memcpy(kmem_cache, temp_kmem_cache, kmem_size);
3765 3749
@@ -3948,11 +3932,10 @@ static struct kmem_cache *find_mergeable(size_t size,
3948 return NULL; 3932 return NULL;
3949} 3933}
3950 3934
3951struct kmem_cache *__kmem_cache_create(const char *name, size_t size, 3935struct kmem_cache *__kmem_cache_alias(const char *name, size_t size,
3952 size_t align, unsigned long flags, void (*ctor)(void *)) 3936 size_t align, unsigned long flags, void (*ctor)(void *))
3953{ 3937{
3954 struct kmem_cache *s; 3938 struct kmem_cache *s;
3955 char *n;
3956 3939
3957 s = find_mergeable(size, align, flags, name, ctor); 3940 s = find_mergeable(size, align, flags, name, ctor);
3958 if (s) { 3941 if (s) {
@@ -3966,36 +3949,29 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
3966 3949
3967 if (sysfs_slab_alias(s, name)) { 3950 if (sysfs_slab_alias(s, name)) {
3968 s->refcount--; 3951 s->refcount--;
3969 return NULL; 3952 s = NULL;
3970 } 3953 }
3971 return s;
3972 } 3954 }
3973 3955
3974 n = kstrdup(name, GFP_KERNEL); 3956 return s;
3975 if (!n) 3957}
3976 return NULL;
3977 3958
3978 s = kmalloc(kmem_size, GFP_KERNEL); 3959int __kmem_cache_create(struct kmem_cache *s, unsigned long flags)
3979 if (s) { 3960{
3980 if (kmem_cache_open(s, n, 3961 int err;
3981 size, align, flags, ctor)) { 3962
3982 int r; 3963 err = kmem_cache_open(s, flags);
3964 if (err)
3965 return err;
3983 3966
3984 list_add(&s->list, &slab_caches); 3967 mutex_unlock(&slab_mutex);
3985 mutex_unlock(&slab_mutex); 3968 err = sysfs_slab_add(s);
3986 r = sysfs_slab_add(s); 3969 mutex_lock(&slab_mutex);
3987 mutex_lock(&slab_mutex);
3988 3970
3989 if (!r) 3971 if (err)
3990 return s; 3972 kmem_cache_close(s);
3991 3973
3992 list_del(&s->list); 3974 return err;
3993 kmem_cache_close(s);
3994 }
3995 kfree(s);
3996 }
3997 kfree(n);
3998 return NULL;
3999} 3975}
4000 3976
4001#ifdef CONFIG_SMP 3977#ifdef CONFIG_SMP
@@ -5225,14 +5201,6 @@ static ssize_t slab_attr_store(struct kobject *kobj,
5225 return err; 5201 return err;
5226} 5202}
5227 5203
5228static void kmem_cache_release(struct kobject *kobj)
5229{
5230 struct kmem_cache *s = to_slab(kobj);
5231
5232 kfree(s->name);
5233 kfree(s);
5234}
5235
5236static const struct sysfs_ops slab_sysfs_ops = { 5204static const struct sysfs_ops slab_sysfs_ops = {
5237 .show = slab_attr_show, 5205 .show = slab_attr_show,
5238 .store = slab_attr_store, 5206 .store = slab_attr_store,
@@ -5240,7 +5208,6 @@ static const struct sysfs_ops slab_sysfs_ops = {
5240 5208
5241static struct kobj_type slab_ktype = { 5209static struct kobj_type slab_ktype = {
5242 .sysfs_ops = &slab_sysfs_ops, 5210 .sysfs_ops = &slab_sysfs_ops,
5243 .release = kmem_cache_release
5244}; 5211};
5245 5212
5246static int uevent_filter(struct kset *kset, struct kobject *kobj) 5213static int uevent_filter(struct kset *kset, struct kobject *kobj)