diff options
Diffstat (limited to 'mm/slab.c')
-rw-r--r-- | mm/slab.c | 265 |
1 files changed, 110 insertions, 155 deletions
@@ -570,9 +570,9 @@ static struct arraycache_init initarray_generic = | |||
570 | { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} }; | 570 | { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} }; |
571 | 571 | ||
572 | /* internal cache of cache description objs */ | 572 | /* internal cache of cache description objs */ |
573 | static struct kmem_list3 *cache_cache_nodelists[MAX_NUMNODES]; | 573 | static struct kmem_list3 *kmem_cache_nodelists[MAX_NUMNODES]; |
574 | static struct kmem_cache cache_cache = { | 574 | static struct kmem_cache kmem_cache_boot = { |
575 | .nodelists = cache_cache_nodelists, | 575 | .nodelists = kmem_cache_nodelists, |
576 | .batchcount = 1, | 576 | .batchcount = 1, |
577 | .limit = BOOT_CPUCACHE_ENTRIES, | 577 | .limit = BOOT_CPUCACHE_ENTRIES, |
578 | .shared = 1, | 578 | .shared = 1, |
@@ -795,6 +795,7 @@ static void cache_estimate(unsigned long gfporder, size_t buffer_size, | |||
795 | *left_over = slab_size - nr_objs*buffer_size - mgmt_size; | 795 | *left_over = slab_size - nr_objs*buffer_size - mgmt_size; |
796 | } | 796 | } |
797 | 797 | ||
798 | #if DEBUG | ||
798 | #define slab_error(cachep, msg) __slab_error(__func__, cachep, msg) | 799 | #define slab_error(cachep, msg) __slab_error(__func__, cachep, msg) |
799 | 800 | ||
800 | static void __slab_error(const char *function, struct kmem_cache *cachep, | 801 | static void __slab_error(const char *function, struct kmem_cache *cachep, |
@@ -805,6 +806,7 @@ static void __slab_error(const char *function, struct kmem_cache *cachep, | |||
805 | dump_stack(); | 806 | dump_stack(); |
806 | add_taint(TAINT_BAD_PAGE); | 807 | add_taint(TAINT_BAD_PAGE); |
807 | } | 808 | } |
809 | #endif | ||
808 | 810 | ||
809 | /* | 811 | /* |
810 | * By default on NUMA we use alien caches to stage the freeing of | 812 | * By default on NUMA we use alien caches to stage the freeing of |
@@ -969,7 +971,7 @@ static void *__ac_get_obj(struct kmem_cache *cachep, struct array_cache *ac, | |||
969 | } | 971 | } |
970 | 972 | ||
971 | /* The caller cannot use PFMEMALLOC objects, find another one */ | 973 | /* The caller cannot use PFMEMALLOC objects, find another one */ |
972 | for (i = 1; i < ac->avail; i++) { | 974 | for (i = 0; i < ac->avail; i++) { |
973 | /* If a !PFMEMALLOC object is found, swap them */ | 975 | /* If a !PFMEMALLOC object is found, swap them */ |
974 | if (!is_obj_pfmemalloc(ac->entry[i])) { | 976 | if (!is_obj_pfmemalloc(ac->entry[i])) { |
975 | objp = ac->entry[i]; | 977 | objp = ac->entry[i]; |
@@ -986,7 +988,7 @@ static void *__ac_get_obj(struct kmem_cache *cachep, struct array_cache *ac, | |||
986 | l3 = cachep->nodelists[numa_mem_id()]; | 988 | l3 = cachep->nodelists[numa_mem_id()]; |
987 | if (!list_empty(&l3->slabs_free) && force_refill) { | 989 | if (!list_empty(&l3->slabs_free) && force_refill) { |
988 | struct slab *slabp = virt_to_slab(objp); | 990 | struct slab *slabp = virt_to_slab(objp); |
989 | ClearPageSlabPfmemalloc(virt_to_page(slabp->s_mem)); | 991 | ClearPageSlabPfmemalloc(virt_to_head_page(slabp->s_mem)); |
990 | clear_obj_pfmemalloc(&objp); | 992 | clear_obj_pfmemalloc(&objp); |
991 | recheck_pfmemalloc_active(cachep, ac); | 993 | recheck_pfmemalloc_active(cachep, ac); |
992 | return objp; | 994 | return objp; |
@@ -1018,7 +1020,7 @@ static void *__ac_put_obj(struct kmem_cache *cachep, struct array_cache *ac, | |||
1018 | { | 1020 | { |
1019 | if (unlikely(pfmemalloc_active)) { | 1021 | if (unlikely(pfmemalloc_active)) { |
1020 | /* Some pfmemalloc slabs exist, check if this is one */ | 1022 | /* Some pfmemalloc slabs exist, check if this is one */ |
1021 | struct page *page = virt_to_page(objp); | 1023 | struct page *page = virt_to_head_page(objp); |
1022 | if (PageSlabPfmemalloc(page)) | 1024 | if (PageSlabPfmemalloc(page)) |
1023 | set_obj_pfmemalloc(&objp); | 1025 | set_obj_pfmemalloc(&objp); |
1024 | } | 1026 | } |
@@ -1587,15 +1589,17 @@ void __init kmem_cache_init(void) | |||
1587 | int order; | 1589 | int order; |
1588 | int node; | 1590 | int node; |
1589 | 1591 | ||
1592 | kmem_cache = &kmem_cache_boot; | ||
1593 | |||
1590 | if (num_possible_nodes() == 1) | 1594 | if (num_possible_nodes() == 1) |
1591 | use_alien_caches = 0; | 1595 | use_alien_caches = 0; |
1592 | 1596 | ||
1593 | for (i = 0; i < NUM_INIT_LISTS; i++) { | 1597 | for (i = 0; i < NUM_INIT_LISTS; i++) { |
1594 | kmem_list3_init(&initkmem_list3[i]); | 1598 | kmem_list3_init(&initkmem_list3[i]); |
1595 | if (i < MAX_NUMNODES) | 1599 | if (i < MAX_NUMNODES) |
1596 | cache_cache.nodelists[i] = NULL; | 1600 | kmem_cache->nodelists[i] = NULL; |
1597 | } | 1601 | } |
1598 | set_up_list3s(&cache_cache, CACHE_CACHE); | 1602 | set_up_list3s(kmem_cache, CACHE_CACHE); |
1599 | 1603 | ||
1600 | /* | 1604 | /* |
1601 | * Fragmentation resistance on low memory - only use bigger | 1605 | * Fragmentation resistance on low memory - only use bigger |
@@ -1607,9 +1611,9 @@ void __init kmem_cache_init(void) | |||
1607 | 1611 | ||
1608 | /* Bootstrap is tricky, because several objects are allocated | 1612 | /* Bootstrap is tricky, because several objects are allocated |
1609 | * from caches that do not exist yet: | 1613 | * from caches that do not exist yet: |
1610 | * 1) initialize the cache_cache cache: it contains the struct | 1614 | * 1) initialize the kmem_cache cache: it contains the struct |
1611 | * kmem_cache structures of all caches, except cache_cache itself: | 1615 | * kmem_cache structures of all caches, except kmem_cache itself: |
1612 | * cache_cache is statically allocated. | 1616 | * kmem_cache is statically allocated. |
1613 | * Initially an __init data area is used for the head array and the | 1617 | * Initially an __init data area is used for the head array and the |
1614 | * kmem_list3 structures, it's replaced with a kmalloc allocated | 1618 | * kmem_list3 structures, it's replaced with a kmalloc allocated |
1615 | * array at the end of the bootstrap. | 1619 | * array at the end of the bootstrap. |
@@ -1618,43 +1622,43 @@ void __init kmem_cache_init(void) | |||
1618 | * An __init data area is used for the head array. | 1622 | * An __init data area is used for the head array. |
1619 | * 3) Create the remaining kmalloc caches, with minimally sized | 1623 | * 3) Create the remaining kmalloc caches, with minimally sized |
1620 | * head arrays. | 1624 | * head arrays. |
1621 | * 4) Replace the __init data head arrays for cache_cache and the first | 1625 | * 4) Replace the __init data head arrays for kmem_cache and the first |
1622 | * kmalloc cache with kmalloc allocated arrays. | 1626 | * kmalloc cache with kmalloc allocated arrays. |
1623 | * 5) Replace the __init data for kmem_list3 for cache_cache and | 1627 | * 5) Replace the __init data for kmem_list3 for kmem_cache and |
1624 | * the other cache's with kmalloc allocated memory. | 1628 | * the other cache's with kmalloc allocated memory. |
1625 | * 6) Resize the head arrays of the kmalloc caches to their final sizes. | 1629 | * 6) Resize the head arrays of the kmalloc caches to their final sizes. |
1626 | */ | 1630 | */ |
1627 | 1631 | ||
1628 | node = numa_mem_id(); | 1632 | node = numa_mem_id(); |
1629 | 1633 | ||
1630 | /* 1) create the cache_cache */ | 1634 | /* 1) create the kmem_cache */ |
1631 | INIT_LIST_HEAD(&slab_caches); | 1635 | INIT_LIST_HEAD(&slab_caches); |
1632 | list_add(&cache_cache.list, &slab_caches); | 1636 | list_add(&kmem_cache->list, &slab_caches); |
1633 | cache_cache.colour_off = cache_line_size(); | 1637 | kmem_cache->colour_off = cache_line_size(); |
1634 | cache_cache.array[smp_processor_id()] = &initarray_cache.cache; | 1638 | kmem_cache->array[smp_processor_id()] = &initarray_cache.cache; |
1635 | cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE + node]; | 1639 | kmem_cache->nodelists[node] = &initkmem_list3[CACHE_CACHE + node]; |
1636 | 1640 | ||
1637 | /* | 1641 | /* |
1638 | * struct kmem_cache size depends on nr_node_ids & nr_cpu_ids | 1642 | * struct kmem_cache size depends on nr_node_ids & nr_cpu_ids |
1639 | */ | 1643 | */ |
1640 | cache_cache.size = offsetof(struct kmem_cache, array[nr_cpu_ids]) + | 1644 | kmem_cache->size = offsetof(struct kmem_cache, array[nr_cpu_ids]) + |
1641 | nr_node_ids * sizeof(struct kmem_list3 *); | 1645 | nr_node_ids * sizeof(struct kmem_list3 *); |
1642 | cache_cache.object_size = cache_cache.size; | 1646 | kmem_cache->object_size = kmem_cache->size; |
1643 | cache_cache.size = ALIGN(cache_cache.size, | 1647 | kmem_cache->size = ALIGN(kmem_cache->object_size, |
1644 | cache_line_size()); | 1648 | cache_line_size()); |
1645 | cache_cache.reciprocal_buffer_size = | 1649 | kmem_cache->reciprocal_buffer_size = |
1646 | reciprocal_value(cache_cache.size); | 1650 | reciprocal_value(kmem_cache->size); |
1647 | 1651 | ||
1648 | for (order = 0; order < MAX_ORDER; order++) { | 1652 | for (order = 0; order < MAX_ORDER; order++) { |
1649 | cache_estimate(order, cache_cache.size, | 1653 | cache_estimate(order, kmem_cache->size, |
1650 | cache_line_size(), 0, &left_over, &cache_cache.num); | 1654 | cache_line_size(), 0, &left_over, &kmem_cache->num); |
1651 | if (cache_cache.num) | 1655 | if (kmem_cache->num) |
1652 | break; | 1656 | break; |
1653 | } | 1657 | } |
1654 | BUG_ON(!cache_cache.num); | 1658 | BUG_ON(!kmem_cache->num); |
1655 | cache_cache.gfporder = order; | 1659 | kmem_cache->gfporder = order; |
1656 | cache_cache.colour = left_over / cache_cache.colour_off; | 1660 | kmem_cache->colour = left_over / kmem_cache->colour_off; |
1657 | cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) + | 1661 | kmem_cache->slab_size = ALIGN(kmem_cache->num * sizeof(kmem_bufctl_t) + |
1658 | sizeof(struct slab), cache_line_size()); | 1662 | sizeof(struct slab), cache_line_size()); |
1659 | 1663 | ||
1660 | /* 2+3) create the kmalloc caches */ | 1664 | /* 2+3) create the kmalloc caches */ |
@@ -1667,19 +1671,22 @@ void __init kmem_cache_init(void) | |||
1667 | * bug. | 1671 | * bug. |
1668 | */ | 1672 | */ |
1669 | 1673 | ||
1670 | sizes[INDEX_AC].cs_cachep = __kmem_cache_create(names[INDEX_AC].name, | 1674 | sizes[INDEX_AC].cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); |
1671 | sizes[INDEX_AC].cs_size, | 1675 | sizes[INDEX_AC].cs_cachep->name = names[INDEX_AC].name; |
1672 | ARCH_KMALLOC_MINALIGN, | 1676 | sizes[INDEX_AC].cs_cachep->size = sizes[INDEX_AC].cs_size; |
1673 | ARCH_KMALLOC_FLAGS|SLAB_PANIC, | 1677 | sizes[INDEX_AC].cs_cachep->object_size = sizes[INDEX_AC].cs_size; |
1674 | NULL); | 1678 | sizes[INDEX_AC].cs_cachep->align = ARCH_KMALLOC_MINALIGN; |
1679 | __kmem_cache_create(sizes[INDEX_AC].cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC); | ||
1680 | list_add(&sizes[INDEX_AC].cs_cachep->list, &slab_caches); | ||
1675 | 1681 | ||
1676 | if (INDEX_AC != INDEX_L3) { | 1682 | if (INDEX_AC != INDEX_L3) { |
1677 | sizes[INDEX_L3].cs_cachep = | 1683 | sizes[INDEX_L3].cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); |
1678 | __kmem_cache_create(names[INDEX_L3].name, | 1684 | sizes[INDEX_L3].cs_cachep->name = names[INDEX_L3].name; |
1679 | sizes[INDEX_L3].cs_size, | 1685 | sizes[INDEX_L3].cs_cachep->size = sizes[INDEX_L3].cs_size; |
1680 | ARCH_KMALLOC_MINALIGN, | 1686 | sizes[INDEX_L3].cs_cachep->object_size = sizes[INDEX_L3].cs_size; |
1681 | ARCH_KMALLOC_FLAGS|SLAB_PANIC, | 1687 | sizes[INDEX_L3].cs_cachep->align = ARCH_KMALLOC_MINALIGN; |
1682 | NULL); | 1688 | __kmem_cache_create(sizes[INDEX_L3].cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC); |
1689 | list_add(&sizes[INDEX_L3].cs_cachep->list, &slab_caches); | ||
1683 | } | 1690 | } |
1684 | 1691 | ||
1685 | slab_early_init = 0; | 1692 | slab_early_init = 0; |
@@ -1693,20 +1700,23 @@ void __init kmem_cache_init(void) | |||
1693 | * allow tighter packing of the smaller caches. | 1700 | * allow tighter packing of the smaller caches. |
1694 | */ | 1701 | */ |
1695 | if (!sizes->cs_cachep) { | 1702 | if (!sizes->cs_cachep) { |
1696 | sizes->cs_cachep = __kmem_cache_create(names->name, | 1703 | sizes->cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); |
1697 | sizes->cs_size, | 1704 | sizes->cs_cachep->name = names->name; |
1698 | ARCH_KMALLOC_MINALIGN, | 1705 | sizes->cs_cachep->size = sizes->cs_size; |
1699 | ARCH_KMALLOC_FLAGS|SLAB_PANIC, | 1706 | sizes->cs_cachep->object_size = sizes->cs_size; |
1700 | NULL); | 1707 | sizes->cs_cachep->align = ARCH_KMALLOC_MINALIGN; |
1708 | __kmem_cache_create(sizes->cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC); | ||
1709 | list_add(&sizes->cs_cachep->list, &slab_caches); | ||
1701 | } | 1710 | } |
1702 | #ifdef CONFIG_ZONE_DMA | 1711 | #ifdef CONFIG_ZONE_DMA |
1703 | sizes->cs_dmacachep = __kmem_cache_create( | 1712 | sizes->cs_dmacachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); |
1704 | names->name_dma, | 1713 | sizes->cs_dmacachep->name = names->name_dma; |
1705 | sizes->cs_size, | 1714 | sizes->cs_dmacachep->size = sizes->cs_size; |
1706 | ARCH_KMALLOC_MINALIGN, | 1715 | sizes->cs_dmacachep->object_size = sizes->cs_size; |
1707 | ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA| | 1716 | sizes->cs_dmacachep->align = ARCH_KMALLOC_MINALIGN; |
1708 | SLAB_PANIC, | 1717 | __kmem_cache_create(sizes->cs_dmacachep, |
1709 | NULL); | 1718 | ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA| SLAB_PANIC); |
1719 | list_add(&sizes->cs_dmacachep->list, &slab_caches); | ||
1710 | #endif | 1720 | #endif |
1711 | sizes++; | 1721 | sizes++; |
1712 | names++; | 1722 | names++; |
@@ -1717,15 +1727,15 @@ void __init kmem_cache_init(void) | |||
1717 | 1727 | ||
1718 | ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT); | 1728 | ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT); |
1719 | 1729 | ||
1720 | BUG_ON(cpu_cache_get(&cache_cache) != &initarray_cache.cache); | 1730 | BUG_ON(cpu_cache_get(kmem_cache) != &initarray_cache.cache); |
1721 | memcpy(ptr, cpu_cache_get(&cache_cache), | 1731 | memcpy(ptr, cpu_cache_get(kmem_cache), |
1722 | sizeof(struct arraycache_init)); | 1732 | sizeof(struct arraycache_init)); |
1723 | /* | 1733 | /* |
1724 | * Do not assume that spinlocks can be initialized via memcpy: | 1734 | * Do not assume that spinlocks can be initialized via memcpy: |
1725 | */ | 1735 | */ |
1726 | spin_lock_init(&ptr->lock); | 1736 | spin_lock_init(&ptr->lock); |
1727 | 1737 | ||
1728 | cache_cache.array[smp_processor_id()] = ptr; | 1738 | kmem_cache->array[smp_processor_id()] = ptr; |
1729 | 1739 | ||
1730 | ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT); | 1740 | ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT); |
1731 | 1741 | ||
@@ -1746,7 +1756,7 @@ void __init kmem_cache_init(void) | |||
1746 | int nid; | 1756 | int nid; |
1747 | 1757 | ||
1748 | for_each_online_node(nid) { | 1758 | for_each_online_node(nid) { |
1749 | init_list(&cache_cache, &initkmem_list3[CACHE_CACHE + nid], nid); | 1759 | init_list(kmem_cache, &initkmem_list3[CACHE_CACHE + nid], nid); |
1750 | 1760 | ||
1751 | init_list(malloc_sizes[INDEX_AC].cs_cachep, | 1761 | init_list(malloc_sizes[INDEX_AC].cs_cachep, |
1752 | &initkmem_list3[SIZE_AC + nid], nid); | 1762 | &initkmem_list3[SIZE_AC + nid], nid); |
@@ -2195,27 +2205,6 @@ static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp) | |||
2195 | } | 2205 | } |
2196 | } | 2206 | } |
2197 | 2207 | ||
2198 | static void __kmem_cache_destroy(struct kmem_cache *cachep) | ||
2199 | { | ||
2200 | int i; | ||
2201 | struct kmem_list3 *l3; | ||
2202 | |||
2203 | for_each_online_cpu(i) | ||
2204 | kfree(cachep->array[i]); | ||
2205 | |||
2206 | /* NUMA: free the list3 structures */ | ||
2207 | for_each_online_node(i) { | ||
2208 | l3 = cachep->nodelists[i]; | ||
2209 | if (l3) { | ||
2210 | kfree(l3->shared); | ||
2211 | free_alien_cache(l3->alien); | ||
2212 | kfree(l3); | ||
2213 | } | ||
2214 | } | ||
2215 | kmem_cache_free(&cache_cache, cachep); | ||
2216 | } | ||
2217 | |||
2218 | |||
2219 | /** | 2208 | /** |
2220 | * calculate_slab_order - calculate size (page order) of slabs | 2209 | * calculate_slab_order - calculate size (page order) of slabs |
2221 | * @cachep: pointer to the cache that is being created | 2210 | * @cachep: pointer to the cache that is being created |
@@ -2352,9 +2341,6 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) | |||
2352 | * Cannot be called within a int, but can be interrupted. | 2341 | * Cannot be called within a int, but can be interrupted. |
2353 | * The @ctor is run when new pages are allocated by the cache. | 2342 | * The @ctor is run when new pages are allocated by the cache. |
2354 | * | 2343 | * |
2355 | * @name must be valid until the cache is destroyed. This implies that | ||
2356 | * the module calling this has to destroy the cache before getting unloaded. | ||
2357 | * | ||
2358 | * The flags are | 2344 | * The flags are |
2359 | * | 2345 | * |
2360 | * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5) | 2346 | * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5) |
@@ -2367,13 +2353,13 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) | |||
2367 | * cacheline. This can be beneficial if you're counting cycles as closely | 2353 | * cacheline. This can be beneficial if you're counting cycles as closely |
2368 | * as davem. | 2354 | * as davem. |
2369 | */ | 2355 | */ |
2370 | struct kmem_cache * | 2356 | int |
2371 | __kmem_cache_create (const char *name, size_t size, size_t align, | 2357 | __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) |
2372 | unsigned long flags, void (*ctor)(void *)) | ||
2373 | { | 2358 | { |
2374 | size_t left_over, slab_size, ralign; | 2359 | size_t left_over, slab_size, ralign; |
2375 | struct kmem_cache *cachep = NULL; | ||
2376 | gfp_t gfp; | 2360 | gfp_t gfp; |
2361 | int err; | ||
2362 | size_t size = cachep->size; | ||
2377 | 2363 | ||
2378 | #if DEBUG | 2364 | #if DEBUG |
2379 | #if FORCED_DEBUG | 2365 | #if FORCED_DEBUG |
@@ -2445,8 +2431,8 @@ __kmem_cache_create (const char *name, size_t size, size_t align, | |||
2445 | ralign = ARCH_SLAB_MINALIGN; | 2431 | ralign = ARCH_SLAB_MINALIGN; |
2446 | } | 2432 | } |
2447 | /* 3) caller mandated alignment */ | 2433 | /* 3) caller mandated alignment */ |
2448 | if (ralign < align) { | 2434 | if (ralign < cachep->align) { |
2449 | ralign = align; | 2435 | ralign = cachep->align; |
2450 | } | 2436 | } |
2451 | /* disable debug if necessary */ | 2437 | /* disable debug if necessary */ |
2452 | if (ralign > __alignof__(unsigned long long)) | 2438 | if (ralign > __alignof__(unsigned long long)) |
@@ -2454,21 +2440,14 @@ __kmem_cache_create (const char *name, size_t size, size_t align, | |||
2454 | /* | 2440 | /* |
2455 | * 4) Store it. | 2441 | * 4) Store it. |
2456 | */ | 2442 | */ |
2457 | align = ralign; | 2443 | cachep->align = ralign; |
2458 | 2444 | ||
2459 | if (slab_is_available()) | 2445 | if (slab_is_available()) |
2460 | gfp = GFP_KERNEL; | 2446 | gfp = GFP_KERNEL; |
2461 | else | 2447 | else |
2462 | gfp = GFP_NOWAIT; | 2448 | gfp = GFP_NOWAIT; |
2463 | 2449 | ||
2464 | /* Get cache's description obj. */ | ||
2465 | cachep = kmem_cache_zalloc(&cache_cache, gfp); | ||
2466 | if (!cachep) | ||
2467 | return NULL; | ||
2468 | |||
2469 | cachep->nodelists = (struct kmem_list3 **)&cachep->array[nr_cpu_ids]; | 2450 | cachep->nodelists = (struct kmem_list3 **)&cachep->array[nr_cpu_ids]; |
2470 | cachep->object_size = size; | ||
2471 | cachep->align = align; | ||
2472 | #if DEBUG | 2451 | #if DEBUG |
2473 | 2452 | ||
2474 | /* | 2453 | /* |
@@ -2492,8 +2471,9 @@ __kmem_cache_create (const char *name, size_t size, size_t align, | |||
2492 | } | 2471 | } |
2493 | #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC) | 2472 | #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC) |
2494 | if (size >= malloc_sizes[INDEX_L3 + 1].cs_size | 2473 | if (size >= malloc_sizes[INDEX_L3 + 1].cs_size |
2495 | && cachep->object_size > cache_line_size() && ALIGN(size, align) < PAGE_SIZE) { | 2474 | && cachep->object_size > cache_line_size() |
2496 | cachep->obj_offset += PAGE_SIZE - ALIGN(size, align); | 2475 | && ALIGN(size, cachep->align) < PAGE_SIZE) { |
2476 | cachep->obj_offset += PAGE_SIZE - ALIGN(size, cachep->align); | ||
2497 | size = PAGE_SIZE; | 2477 | size = PAGE_SIZE; |
2498 | } | 2478 | } |
2499 | #endif | 2479 | #endif |
@@ -2513,18 +2493,15 @@ __kmem_cache_create (const char *name, size_t size, size_t align, | |||
2513 | */ | 2493 | */ |
2514 | flags |= CFLGS_OFF_SLAB; | 2494 | flags |= CFLGS_OFF_SLAB; |
2515 | 2495 | ||
2516 | size = ALIGN(size, align); | 2496 | size = ALIGN(size, cachep->align); |
2517 | 2497 | ||
2518 | left_over = calculate_slab_order(cachep, size, align, flags); | 2498 | left_over = calculate_slab_order(cachep, size, cachep->align, flags); |
2499 | |||
2500 | if (!cachep->num) | ||
2501 | return -E2BIG; | ||
2519 | 2502 | ||
2520 | if (!cachep->num) { | ||
2521 | printk(KERN_ERR | ||
2522 | "kmem_cache_create: couldn't create cache %s.\n", name); | ||
2523 | kmem_cache_free(&cache_cache, cachep); | ||
2524 | return NULL; | ||
2525 | } | ||
2526 | slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t) | 2503 | slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t) |
2527 | + sizeof(struct slab), align); | 2504 | + sizeof(struct slab), cachep->align); |
2528 | 2505 | ||
2529 | /* | 2506 | /* |
2530 | * If the slab has been placed off-slab, and we have enough space then | 2507 | * If the slab has been placed off-slab, and we have enough space then |
@@ -2552,8 +2529,8 @@ __kmem_cache_create (const char *name, size_t size, size_t align, | |||
2552 | 2529 | ||
2553 | cachep->colour_off = cache_line_size(); | 2530 | cachep->colour_off = cache_line_size(); |
2554 | /* Offset must be a multiple of the alignment. */ | 2531 | /* Offset must be a multiple of the alignment. */ |
2555 | if (cachep->colour_off < align) | 2532 | if (cachep->colour_off < cachep->align) |
2556 | cachep->colour_off = align; | 2533 | cachep->colour_off = cachep->align; |
2557 | cachep->colour = left_over / cachep->colour_off; | 2534 | cachep->colour = left_over / cachep->colour_off; |
2558 | cachep->slab_size = slab_size; | 2535 | cachep->slab_size = slab_size; |
2559 | cachep->flags = flags; | 2536 | cachep->flags = flags; |
@@ -2574,12 +2551,11 @@ __kmem_cache_create (const char *name, size_t size, size_t align, | |||
2574 | */ | 2551 | */ |
2575 | BUG_ON(ZERO_OR_NULL_PTR(cachep->slabp_cache)); | 2552 | BUG_ON(ZERO_OR_NULL_PTR(cachep->slabp_cache)); |
2576 | } | 2553 | } |
2577 | cachep->ctor = ctor; | ||
2578 | cachep->name = name; | ||
2579 | 2554 | ||
2580 | if (setup_cpu_cache(cachep, gfp)) { | 2555 | err = setup_cpu_cache(cachep, gfp); |
2581 | __kmem_cache_destroy(cachep); | 2556 | if (err) { |
2582 | return NULL; | 2557 | __kmem_cache_shutdown(cachep); |
2558 | return err; | ||
2583 | } | 2559 | } |
2584 | 2560 | ||
2585 | if (flags & SLAB_DEBUG_OBJECTS) { | 2561 | if (flags & SLAB_DEBUG_OBJECTS) { |
@@ -2592,9 +2568,7 @@ __kmem_cache_create (const char *name, size_t size, size_t align, | |||
2592 | slab_set_debugobj_lock_classes(cachep); | 2568 | slab_set_debugobj_lock_classes(cachep); |
2593 | } | 2569 | } |
2594 | 2570 | ||
2595 | /* cache setup completed, link it into the list */ | 2571 | return 0; |
2596 | list_add(&cachep->list, &slab_caches); | ||
2597 | return cachep; | ||
2598 | } | 2572 | } |
2599 | 2573 | ||
2600 | #if DEBUG | 2574 | #if DEBUG |
@@ -2753,49 +2727,29 @@ int kmem_cache_shrink(struct kmem_cache *cachep) | |||
2753 | } | 2727 | } |
2754 | EXPORT_SYMBOL(kmem_cache_shrink); | 2728 | EXPORT_SYMBOL(kmem_cache_shrink); |
2755 | 2729 | ||
2756 | /** | 2730 | int __kmem_cache_shutdown(struct kmem_cache *cachep) |
2757 | * kmem_cache_destroy - delete a cache | ||
2758 | * @cachep: the cache to destroy | ||
2759 | * | ||
2760 | * Remove a &struct kmem_cache object from the slab cache. | ||
2761 | * | ||
2762 | * It is expected this function will be called by a module when it is | ||
2763 | * unloaded. This will remove the cache completely, and avoid a duplicate | ||
2764 | * cache being allocated each time a module is loaded and unloaded, if the | ||
2765 | * module doesn't have persistent in-kernel storage across loads and unloads. | ||
2766 | * | ||
2767 | * The cache must be empty before calling this function. | ||
2768 | * | ||
2769 | * The caller must guarantee that no one will allocate memory from the cache | ||
2770 | * during the kmem_cache_destroy(). | ||
2771 | */ | ||
2772 | void kmem_cache_destroy(struct kmem_cache *cachep) | ||
2773 | { | 2731 | { |
2774 | BUG_ON(!cachep || in_interrupt()); | 2732 | int i; |
2733 | struct kmem_list3 *l3; | ||
2734 | int rc = __cache_shrink(cachep); | ||
2775 | 2735 | ||
2776 | /* Find the cache in the chain of caches. */ | 2736 | if (rc) |
2777 | get_online_cpus(); | 2737 | return rc; |
2778 | mutex_lock(&slab_mutex); | ||
2779 | /* | ||
2780 | * the chain is never empty, cache_cache is never destroyed | ||
2781 | */ | ||
2782 | list_del(&cachep->list); | ||
2783 | if (__cache_shrink(cachep)) { | ||
2784 | slab_error(cachep, "Can't free all objects"); | ||
2785 | list_add(&cachep->list, &slab_caches); | ||
2786 | mutex_unlock(&slab_mutex); | ||
2787 | put_online_cpus(); | ||
2788 | return; | ||
2789 | } | ||
2790 | 2738 | ||
2791 | if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) | 2739 | for_each_online_cpu(i) |
2792 | rcu_barrier(); | 2740 | kfree(cachep->array[i]); |
2793 | 2741 | ||
2794 | __kmem_cache_destroy(cachep); | 2742 | /* NUMA: free the list3 structures */ |
2795 | mutex_unlock(&slab_mutex); | 2743 | for_each_online_node(i) { |
2796 | put_online_cpus(); | 2744 | l3 = cachep->nodelists[i]; |
2745 | if (l3) { | ||
2746 | kfree(l3->shared); | ||
2747 | free_alien_cache(l3->alien); | ||
2748 | kfree(l3); | ||
2749 | } | ||
2750 | } | ||
2751 | return 0; | ||
2797 | } | 2752 | } |
2798 | EXPORT_SYMBOL(kmem_cache_destroy); | ||
2799 | 2753 | ||
2800 | /* | 2754 | /* |
2801 | * Get the memory for a slab management obj. | 2755 | * Get the memory for a slab management obj. |
@@ -3246,6 +3200,7 @@ force_grow: | |||
3246 | 3200 | ||
3247 | /* cache_grow can reenable interrupts, then ac could change. */ | 3201 | /* cache_grow can reenable interrupts, then ac could change. */ |
3248 | ac = cpu_cache_get(cachep); | 3202 | ac = cpu_cache_get(cachep); |
3203 | node = numa_mem_id(); | ||
3249 | 3204 | ||
3250 | /* no objects in sight? abort */ | 3205 | /* no objects in sight? abort */ |
3251 | if (!x && (ac->avail == 0 || force_refill)) | 3206 | if (!x && (ac->avail == 0 || force_refill)) |
@@ -3328,7 +3283,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, | |||
3328 | 3283 | ||
3329 | static bool slab_should_failslab(struct kmem_cache *cachep, gfp_t flags) | 3284 | static bool slab_should_failslab(struct kmem_cache *cachep, gfp_t flags) |
3330 | { | 3285 | { |
3331 | if (cachep == &cache_cache) | 3286 | if (cachep == kmem_cache) |
3332 | return false; | 3287 | return false; |
3333 | 3288 | ||
3334 | return should_failslab(cachep->object_size, flags, cachep->flags); | 3289 | return should_failslab(cachep->object_size, flags, cachep->flags); |