aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-10-06 18:53:13 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-10-06 18:53:13 -0400
commit125b79d74a63552be757bb49a425b965782e4952 (patch)
tree978a30e588c070914b679c50ad7ae34d0aff67bc /mm/slab.c
parentf1c6872e4980bc4078cfaead05f892b3d78dea64 (diff)
parente2087be35a8ed101c1e748ef688c889419c69829 (diff)
Merge branch 'slab/for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux
Pull SLAB changes from Pekka Enberg: "New and noteworthy: * More SLAB allocator unification patches from Christoph Lameter and others. This paves the way for slab memcg patches that hopefully will land in v3.8. * SLAB tracing improvements from Ezequiel Garcia. * Kernel tainting upon SLAB corruption from Dave Jones. * Miscellanous SLAB allocator bug fixes and improvements from various people." * 'slab/for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux: (43 commits) slab: Fix build failure in __kmem_cache_create() slub: init_kmem_cache_cpus() and put_cpu_partial() can be static mm/slab: Fix kmem_cache_alloc_node_trace() declaration Revert "mm/slab: Fix kmem_cache_alloc_node_trace() declaration" mm, slob: fix build breakage in __kmalloc_node_track_caller mm/slab: Fix kmem_cache_alloc_node_trace() declaration mm/slab: Fix typo _RET_IP -> _RET_IP_ mm, slub: Rename slab_alloc() -> slab_alloc_node() to match SLAB mm, slab: Rename __cache_alloc() -> slab_alloc() mm, slab: Match SLAB and SLUB kmem_cache_alloc_xxx_trace() prototype mm, slab: Replace 'caller' type, void* -> unsigned long mm, slob: Add support for kmalloc_track_caller() mm, slab: Remove silly function slab_buffer_size() mm, slob: Use NUMA_NO_NODE instead of -1 mm, sl[au]b: Taint kernel when we detect a corrupted slab slab: Only define slab_error for DEBUG slab: fix the DEADLOCK issue on l3 alien lock slub: Zero initial memory segment for kmem_cache and kmem_cache_node Revert "mm/sl[aou]b: Move sysfs_slab_add to common" mm/sl[aou]b: Move kmem_cache refcounting to common code ...
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c348
1 files changed, 143 insertions, 205 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 11339110271e..33d3363658df 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -498,14 +498,6 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp)
498 498
499#endif 499#endif
500 500
501#ifdef CONFIG_TRACING
502size_t slab_buffer_size(struct kmem_cache *cachep)
503{
504 return cachep->size;
505}
506EXPORT_SYMBOL(slab_buffer_size);
507#endif
508
509/* 501/*
510 * Do not go above this order unless 0 objects fit into the slab or 502 * Do not go above this order unless 0 objects fit into the slab or
511 * overridden on the command line. 503 * overridden on the command line.
@@ -515,13 +507,6 @@ EXPORT_SYMBOL(slab_buffer_size);
515static int slab_max_order = SLAB_MAX_ORDER_LO; 507static int slab_max_order = SLAB_MAX_ORDER_LO;
516static bool slab_max_order_set __initdata; 508static bool slab_max_order_set __initdata;
517 509
518static inline struct kmem_cache *page_get_cache(struct page *page)
519{
520 page = compound_head(page);
521 BUG_ON(!PageSlab(page));
522 return page->slab_cache;
523}
524
525static inline struct kmem_cache *virt_to_cache(const void *obj) 510static inline struct kmem_cache *virt_to_cache(const void *obj)
526{ 511{
527 struct page *page = virt_to_head_page(obj); 512 struct page *page = virt_to_head_page(obj);
@@ -585,9 +570,9 @@ static struct arraycache_init initarray_generic =
585 { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} }; 570 { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
586 571
587/* internal cache of cache description objs */ 572/* internal cache of cache description objs */
588static struct kmem_list3 *cache_cache_nodelists[MAX_NUMNODES]; 573static struct kmem_list3 *kmem_cache_nodelists[MAX_NUMNODES];
589static struct kmem_cache cache_cache = { 574static struct kmem_cache kmem_cache_boot = {
590 .nodelists = cache_cache_nodelists, 575 .nodelists = kmem_cache_nodelists,
591 .batchcount = 1, 576 .batchcount = 1,
592 .limit = BOOT_CPUCACHE_ENTRIES, 577 .limit = BOOT_CPUCACHE_ENTRIES,
593 .shared = 1, 578 .shared = 1,
@@ -810,6 +795,7 @@ static void cache_estimate(unsigned long gfporder, size_t buffer_size,
810 *left_over = slab_size - nr_objs*buffer_size - mgmt_size; 795 *left_over = slab_size - nr_objs*buffer_size - mgmt_size;
811} 796}
812 797
798#if DEBUG
813#define slab_error(cachep, msg) __slab_error(__func__, cachep, msg) 799#define slab_error(cachep, msg) __slab_error(__func__, cachep, msg)
814 800
815static void __slab_error(const char *function, struct kmem_cache *cachep, 801static void __slab_error(const char *function, struct kmem_cache *cachep,
@@ -818,7 +804,9 @@ static void __slab_error(const char *function, struct kmem_cache *cachep,
818 printk(KERN_ERR "slab error in %s(): cache `%s': %s\n", 804 printk(KERN_ERR "slab error in %s(): cache `%s': %s\n",
819 function, cachep->name, msg); 805 function, cachep->name, msg);
820 dump_stack(); 806 dump_stack();
807 add_taint(TAINT_BAD_PAGE);
821} 808}
809#endif
822 810
823/* 811/*
824 * By default on NUMA we use alien caches to stage the freeing of 812 * By default on NUMA we use alien caches to stage the freeing of
@@ -1601,15 +1589,17 @@ void __init kmem_cache_init(void)
1601 int order; 1589 int order;
1602 int node; 1590 int node;
1603 1591
1592 kmem_cache = &kmem_cache_boot;
1593
1604 if (num_possible_nodes() == 1) 1594 if (num_possible_nodes() == 1)
1605 use_alien_caches = 0; 1595 use_alien_caches = 0;
1606 1596
1607 for (i = 0; i < NUM_INIT_LISTS; i++) { 1597 for (i = 0; i < NUM_INIT_LISTS; i++) {
1608 kmem_list3_init(&initkmem_list3[i]); 1598 kmem_list3_init(&initkmem_list3[i]);
1609 if (i < MAX_NUMNODES) 1599 if (i < MAX_NUMNODES)
1610 cache_cache.nodelists[i] = NULL; 1600 kmem_cache->nodelists[i] = NULL;
1611 } 1601 }
1612 set_up_list3s(&cache_cache, CACHE_CACHE); 1602 set_up_list3s(kmem_cache, CACHE_CACHE);
1613 1603
1614 /* 1604 /*
1615 * Fragmentation resistance on low memory - only use bigger 1605 * Fragmentation resistance on low memory - only use bigger
@@ -1621,9 +1611,9 @@ void __init kmem_cache_init(void)
1621 1611
1622 /* Bootstrap is tricky, because several objects are allocated 1612 /* Bootstrap is tricky, because several objects are allocated
1623 * from caches that do not exist yet: 1613 * from caches that do not exist yet:
1624 * 1) initialize the cache_cache cache: it contains the struct 1614 * 1) initialize the kmem_cache cache: it contains the struct
1625 * kmem_cache structures of all caches, except cache_cache itself: 1615 * kmem_cache structures of all caches, except kmem_cache itself:
1626 * cache_cache is statically allocated. 1616 * kmem_cache is statically allocated.
1627 * Initially an __init data area is used for the head array and the 1617 * Initially an __init data area is used for the head array and the
1628 * kmem_list3 structures, it's replaced with a kmalloc allocated 1618 * kmem_list3 structures, it's replaced with a kmalloc allocated
1629 * array at the end of the bootstrap. 1619 * array at the end of the bootstrap.
@@ -1632,43 +1622,43 @@ void __init kmem_cache_init(void)
1632 * An __init data area is used for the head array. 1622 * An __init data area is used for the head array.
1633 * 3) Create the remaining kmalloc caches, with minimally sized 1623 * 3) Create the remaining kmalloc caches, with minimally sized
1634 * head arrays. 1624 * head arrays.
1635 * 4) Replace the __init data head arrays for cache_cache and the first 1625 * 4) Replace the __init data head arrays for kmem_cache and the first
1636 * kmalloc cache with kmalloc allocated arrays. 1626 * kmalloc cache with kmalloc allocated arrays.
1637 * 5) Replace the __init data for kmem_list3 for cache_cache and 1627 * 5) Replace the __init data for kmem_list3 for kmem_cache and
1638 * the other cache's with kmalloc allocated memory. 1628 * the other cache's with kmalloc allocated memory.
1639 * 6) Resize the head arrays of the kmalloc caches to their final sizes. 1629 * 6) Resize the head arrays of the kmalloc caches to their final sizes.
1640 */ 1630 */
1641 1631
1642 node = numa_mem_id(); 1632 node = numa_mem_id();
1643 1633
1644 /* 1) create the cache_cache */ 1634 /* 1) create the kmem_cache */
1645 INIT_LIST_HEAD(&slab_caches); 1635 INIT_LIST_HEAD(&slab_caches);
1646 list_add(&cache_cache.list, &slab_caches); 1636 list_add(&kmem_cache->list, &slab_caches);
1647 cache_cache.colour_off = cache_line_size(); 1637 kmem_cache->colour_off = cache_line_size();
1648 cache_cache.array[smp_processor_id()] = &initarray_cache.cache; 1638 kmem_cache->array[smp_processor_id()] = &initarray_cache.cache;
1649 cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE + node]; 1639 kmem_cache->nodelists[node] = &initkmem_list3[CACHE_CACHE + node];
1650 1640
1651 /* 1641 /*
1652 * struct kmem_cache size depends on nr_node_ids & nr_cpu_ids 1642 * struct kmem_cache size depends on nr_node_ids & nr_cpu_ids
1653 */ 1643 */
1654 cache_cache.size = offsetof(struct kmem_cache, array[nr_cpu_ids]) + 1644 kmem_cache->size = offsetof(struct kmem_cache, array[nr_cpu_ids]) +
1655 nr_node_ids * sizeof(struct kmem_list3 *); 1645 nr_node_ids * sizeof(struct kmem_list3 *);
1656 cache_cache.object_size = cache_cache.size; 1646 kmem_cache->object_size = kmem_cache->size;
1657 cache_cache.size = ALIGN(cache_cache.size, 1647 kmem_cache->size = ALIGN(kmem_cache->object_size,
1658 cache_line_size()); 1648 cache_line_size());
1659 cache_cache.reciprocal_buffer_size = 1649 kmem_cache->reciprocal_buffer_size =
1660 reciprocal_value(cache_cache.size); 1650 reciprocal_value(kmem_cache->size);
1661 1651
1662 for (order = 0; order < MAX_ORDER; order++) { 1652 for (order = 0; order < MAX_ORDER; order++) {
1663 cache_estimate(order, cache_cache.size, 1653 cache_estimate(order, kmem_cache->size,
1664 cache_line_size(), 0, &left_over, &cache_cache.num); 1654 cache_line_size(), 0, &left_over, &kmem_cache->num);
1665 if (cache_cache.num) 1655 if (kmem_cache->num)
1666 break; 1656 break;
1667 } 1657 }
1668 BUG_ON(!cache_cache.num); 1658 BUG_ON(!kmem_cache->num);
1669 cache_cache.gfporder = order; 1659 kmem_cache->gfporder = order;
1670 cache_cache.colour = left_over / cache_cache.colour_off; 1660 kmem_cache->colour = left_over / kmem_cache->colour_off;
1671 cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) + 1661 kmem_cache->slab_size = ALIGN(kmem_cache->num * sizeof(kmem_bufctl_t) +
1672 sizeof(struct slab), cache_line_size()); 1662 sizeof(struct slab), cache_line_size());
1673 1663
1674 /* 2+3) create the kmalloc caches */ 1664 /* 2+3) create the kmalloc caches */
@@ -1681,19 +1671,22 @@ void __init kmem_cache_init(void)
1681 * bug. 1671 * bug.
1682 */ 1672 */
1683 1673
1684 sizes[INDEX_AC].cs_cachep = __kmem_cache_create(names[INDEX_AC].name, 1674 sizes[INDEX_AC].cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
1685 sizes[INDEX_AC].cs_size, 1675 sizes[INDEX_AC].cs_cachep->name = names[INDEX_AC].name;
1686 ARCH_KMALLOC_MINALIGN, 1676 sizes[INDEX_AC].cs_cachep->size = sizes[INDEX_AC].cs_size;
1687 ARCH_KMALLOC_FLAGS|SLAB_PANIC, 1677 sizes[INDEX_AC].cs_cachep->object_size = sizes[INDEX_AC].cs_size;
1688 NULL); 1678 sizes[INDEX_AC].cs_cachep->align = ARCH_KMALLOC_MINALIGN;
1679 __kmem_cache_create(sizes[INDEX_AC].cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC);
1680 list_add(&sizes[INDEX_AC].cs_cachep->list, &slab_caches);
1689 1681
1690 if (INDEX_AC != INDEX_L3) { 1682 if (INDEX_AC != INDEX_L3) {
1691 sizes[INDEX_L3].cs_cachep = 1683 sizes[INDEX_L3].cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
1692 __kmem_cache_create(names[INDEX_L3].name, 1684 sizes[INDEX_L3].cs_cachep->name = names[INDEX_L3].name;
1693 sizes[INDEX_L3].cs_size, 1685 sizes[INDEX_L3].cs_cachep->size = sizes[INDEX_L3].cs_size;
1694 ARCH_KMALLOC_MINALIGN, 1686 sizes[INDEX_L3].cs_cachep->object_size = sizes[INDEX_L3].cs_size;
1695 ARCH_KMALLOC_FLAGS|SLAB_PANIC, 1687 sizes[INDEX_L3].cs_cachep->align = ARCH_KMALLOC_MINALIGN;
1696 NULL); 1688 __kmem_cache_create(sizes[INDEX_L3].cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC);
1689 list_add(&sizes[INDEX_L3].cs_cachep->list, &slab_caches);
1697 } 1690 }
1698 1691
1699 slab_early_init = 0; 1692 slab_early_init = 0;
@@ -1707,20 +1700,23 @@ void __init kmem_cache_init(void)
1707 * allow tighter packing of the smaller caches. 1700 * allow tighter packing of the smaller caches.
1708 */ 1701 */
1709 if (!sizes->cs_cachep) { 1702 if (!sizes->cs_cachep) {
1710 sizes->cs_cachep = __kmem_cache_create(names->name, 1703 sizes->cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
1711 sizes->cs_size, 1704 sizes->cs_cachep->name = names->name;
1712 ARCH_KMALLOC_MINALIGN, 1705 sizes->cs_cachep->size = sizes->cs_size;
1713 ARCH_KMALLOC_FLAGS|SLAB_PANIC, 1706 sizes->cs_cachep->object_size = sizes->cs_size;
1714 NULL); 1707 sizes->cs_cachep->align = ARCH_KMALLOC_MINALIGN;
1708 __kmem_cache_create(sizes->cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC);
1709 list_add(&sizes->cs_cachep->list, &slab_caches);
1715 } 1710 }
1716#ifdef CONFIG_ZONE_DMA 1711#ifdef CONFIG_ZONE_DMA
1717 sizes->cs_dmacachep = __kmem_cache_create( 1712 sizes->cs_dmacachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
1718 names->name_dma, 1713 sizes->cs_dmacachep->name = names->name_dma;
1719 sizes->cs_size, 1714 sizes->cs_dmacachep->size = sizes->cs_size;
1720 ARCH_KMALLOC_MINALIGN, 1715 sizes->cs_dmacachep->object_size = sizes->cs_size;
1721 ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA| 1716 sizes->cs_dmacachep->align = ARCH_KMALLOC_MINALIGN;
1722 SLAB_PANIC, 1717 __kmem_cache_create(sizes->cs_dmacachep,
1723 NULL); 1718 ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA| SLAB_PANIC);
1719 list_add(&sizes->cs_dmacachep->list, &slab_caches);
1724#endif 1720#endif
1725 sizes++; 1721 sizes++;
1726 names++; 1722 names++;
@@ -1731,15 +1727,15 @@ void __init kmem_cache_init(void)
1731 1727
1732 ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT); 1728 ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT);
1733 1729
1734 BUG_ON(cpu_cache_get(&cache_cache) != &initarray_cache.cache); 1730 BUG_ON(cpu_cache_get(kmem_cache) != &initarray_cache.cache);
1735 memcpy(ptr, cpu_cache_get(&cache_cache), 1731 memcpy(ptr, cpu_cache_get(kmem_cache),
1736 sizeof(struct arraycache_init)); 1732 sizeof(struct arraycache_init));
1737 /* 1733 /*
1738 * Do not assume that spinlocks can be initialized via memcpy: 1734 * Do not assume that spinlocks can be initialized via memcpy:
1739 */ 1735 */
1740 spin_lock_init(&ptr->lock); 1736 spin_lock_init(&ptr->lock);
1741 1737
1742 cache_cache.array[smp_processor_id()] = ptr; 1738 kmem_cache->array[smp_processor_id()] = ptr;
1743 1739
1744 ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT); 1740 ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT);
1745 1741
@@ -1760,7 +1756,7 @@ void __init kmem_cache_init(void)
1760 int nid; 1756 int nid;
1761 1757
1762 for_each_online_node(nid) { 1758 for_each_online_node(nid) {
1763 init_list(&cache_cache, &initkmem_list3[CACHE_CACHE + nid], nid); 1759 init_list(kmem_cache, &initkmem_list3[CACHE_CACHE + nid], nid);
1764 1760
1765 init_list(malloc_sizes[INDEX_AC].cs_cachep, 1761 init_list(malloc_sizes[INDEX_AC].cs_cachep,
1766 &initkmem_list3[SIZE_AC + nid], nid); 1762 &initkmem_list3[SIZE_AC + nid], nid);
@@ -1781,9 +1777,6 @@ void __init kmem_cache_init_late(void)
1781 1777
1782 slab_state = UP; 1778 slab_state = UP;
1783 1779
1784 /* Annotate slab for lockdep -- annotate the malloc caches */
1785 init_lock_keys();
1786
1787 /* 6) resize the head arrays to their final sizes */ 1780 /* 6) resize the head arrays to their final sizes */
1788 mutex_lock(&slab_mutex); 1781 mutex_lock(&slab_mutex);
1789 list_for_each_entry(cachep, &slab_caches, list) 1782 list_for_each_entry(cachep, &slab_caches, list)
@@ -1791,6 +1784,9 @@ void __init kmem_cache_init_late(void)
1791 BUG(); 1784 BUG();
1792 mutex_unlock(&slab_mutex); 1785 mutex_unlock(&slab_mutex);
1793 1786
1787 /* Annotate slab for lockdep -- annotate the malloc caches */
1788 init_lock_keys();
1789
1794 /* Done! */ 1790 /* Done! */
1795 slab_state = FULL; 1791 slab_state = FULL;
1796 1792
@@ -2209,27 +2205,6 @@ static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
2209 } 2205 }
2210} 2206}
2211 2207
2212static void __kmem_cache_destroy(struct kmem_cache *cachep)
2213{
2214 int i;
2215 struct kmem_list3 *l3;
2216
2217 for_each_online_cpu(i)
2218 kfree(cachep->array[i]);
2219
2220 /* NUMA: free the list3 structures */
2221 for_each_online_node(i) {
2222 l3 = cachep->nodelists[i];
2223 if (l3) {
2224 kfree(l3->shared);
2225 free_alien_cache(l3->alien);
2226 kfree(l3);
2227 }
2228 }
2229 kmem_cache_free(&cache_cache, cachep);
2230}
2231
2232
2233/** 2208/**
2234 * calculate_slab_order - calculate size (page order) of slabs 2209 * calculate_slab_order - calculate size (page order) of slabs
2235 * @cachep: pointer to the cache that is being created 2210 * @cachep: pointer to the cache that is being created
@@ -2366,9 +2341,6 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
2366 * Cannot be called within a int, but can be interrupted. 2341 * Cannot be called within a int, but can be interrupted.
2367 * The @ctor is run when new pages are allocated by the cache. 2342 * The @ctor is run when new pages are allocated by the cache.
2368 * 2343 *
2369 * @name must be valid until the cache is destroyed. This implies that
2370 * the module calling this has to destroy the cache before getting unloaded.
2371 *
2372 * The flags are 2344 * The flags are
2373 * 2345 *
2374 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5) 2346 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
@@ -2381,13 +2353,13 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
2381 * cacheline. This can be beneficial if you're counting cycles as closely 2353 * cacheline. This can be beneficial if you're counting cycles as closely
2382 * as davem. 2354 * as davem.
2383 */ 2355 */
2384struct kmem_cache * 2356int
2385__kmem_cache_create (const char *name, size_t size, size_t align, 2357__kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
2386 unsigned long flags, void (*ctor)(void *))
2387{ 2358{
2388 size_t left_over, slab_size, ralign; 2359 size_t left_over, slab_size, ralign;
2389 struct kmem_cache *cachep = NULL;
2390 gfp_t gfp; 2360 gfp_t gfp;
2361 int err;
2362 size_t size = cachep->size;
2391 2363
2392#if DEBUG 2364#if DEBUG
2393#if FORCED_DEBUG 2365#if FORCED_DEBUG
@@ -2459,8 +2431,8 @@ __kmem_cache_create (const char *name, size_t size, size_t align,
2459 ralign = ARCH_SLAB_MINALIGN; 2431 ralign = ARCH_SLAB_MINALIGN;
2460 } 2432 }
2461 /* 3) caller mandated alignment */ 2433 /* 3) caller mandated alignment */
2462 if (ralign < align) { 2434 if (ralign < cachep->align) {
2463 ralign = align; 2435 ralign = cachep->align;
2464 } 2436 }
2465 /* disable debug if necessary */ 2437 /* disable debug if necessary */
2466 if (ralign > __alignof__(unsigned long long)) 2438 if (ralign > __alignof__(unsigned long long))
@@ -2468,21 +2440,14 @@ __kmem_cache_create (const char *name, size_t size, size_t align,
2468 /* 2440 /*
2469 * 4) Store it. 2441 * 4) Store it.
2470 */ 2442 */
2471 align = ralign; 2443 cachep->align = ralign;
2472 2444
2473 if (slab_is_available()) 2445 if (slab_is_available())
2474 gfp = GFP_KERNEL; 2446 gfp = GFP_KERNEL;
2475 else 2447 else
2476 gfp = GFP_NOWAIT; 2448 gfp = GFP_NOWAIT;
2477 2449
2478 /* Get cache's description obj. */
2479 cachep = kmem_cache_zalloc(&cache_cache, gfp);
2480 if (!cachep)
2481 return NULL;
2482
2483 cachep->nodelists = (struct kmem_list3 **)&cachep->array[nr_cpu_ids]; 2450 cachep->nodelists = (struct kmem_list3 **)&cachep->array[nr_cpu_ids];
2484 cachep->object_size = size;
2485 cachep->align = align;
2486#if DEBUG 2451#if DEBUG
2487 2452
2488 /* 2453 /*
@@ -2506,8 +2471,9 @@ __kmem_cache_create (const char *name, size_t size, size_t align,
2506 } 2471 }
2507#if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC) 2472#if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
2508 if (size >= malloc_sizes[INDEX_L3 + 1].cs_size 2473 if (size >= malloc_sizes[INDEX_L3 + 1].cs_size
2509 && cachep->object_size > cache_line_size() && ALIGN(size, align) < PAGE_SIZE) { 2474 && cachep->object_size > cache_line_size()
2510 cachep->obj_offset += PAGE_SIZE - ALIGN(size, align); 2475 && ALIGN(size, cachep->align) < PAGE_SIZE) {
2476 cachep->obj_offset += PAGE_SIZE - ALIGN(size, cachep->align);
2511 size = PAGE_SIZE; 2477 size = PAGE_SIZE;
2512 } 2478 }
2513#endif 2479#endif
@@ -2527,18 +2493,15 @@ __kmem_cache_create (const char *name, size_t size, size_t align,
2527 */ 2493 */
2528 flags |= CFLGS_OFF_SLAB; 2494 flags |= CFLGS_OFF_SLAB;
2529 2495
2530 size = ALIGN(size, align); 2496 size = ALIGN(size, cachep->align);
2531 2497
2532 left_over = calculate_slab_order(cachep, size, align, flags); 2498 left_over = calculate_slab_order(cachep, size, cachep->align, flags);
2499
2500 if (!cachep->num)
2501 return -E2BIG;
2533 2502
2534 if (!cachep->num) {
2535 printk(KERN_ERR
2536 "kmem_cache_create: couldn't create cache %s.\n", name);
2537 kmem_cache_free(&cache_cache, cachep);
2538 return NULL;
2539 }
2540 slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t) 2503 slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t)
2541 + sizeof(struct slab), align); 2504 + sizeof(struct slab), cachep->align);
2542 2505
2543 /* 2506 /*
2544 * If the slab has been placed off-slab, and we have enough space then 2507 * If the slab has been placed off-slab, and we have enough space then
@@ -2566,8 +2529,8 @@ __kmem_cache_create (const char *name, size_t size, size_t align,
2566 2529
2567 cachep->colour_off = cache_line_size(); 2530 cachep->colour_off = cache_line_size();
2568 /* Offset must be a multiple of the alignment. */ 2531 /* Offset must be a multiple of the alignment. */
2569 if (cachep->colour_off < align) 2532 if (cachep->colour_off < cachep->align)
2570 cachep->colour_off = align; 2533 cachep->colour_off = cachep->align;
2571 cachep->colour = left_over / cachep->colour_off; 2534 cachep->colour = left_over / cachep->colour_off;
2572 cachep->slab_size = slab_size; 2535 cachep->slab_size = slab_size;
2573 cachep->flags = flags; 2536 cachep->flags = flags;
@@ -2588,12 +2551,11 @@ __kmem_cache_create (const char *name, size_t size, size_t align,
2588 */ 2551 */
2589 BUG_ON(ZERO_OR_NULL_PTR(cachep->slabp_cache)); 2552 BUG_ON(ZERO_OR_NULL_PTR(cachep->slabp_cache));
2590 } 2553 }
2591 cachep->ctor = ctor;
2592 cachep->name = name;
2593 2554
2594 if (setup_cpu_cache(cachep, gfp)) { 2555 err = setup_cpu_cache(cachep, gfp);
2595 __kmem_cache_destroy(cachep); 2556 if (err) {
2596 return NULL; 2557 __kmem_cache_shutdown(cachep);
2558 return err;
2597 } 2559 }
2598 2560
2599 if (flags & SLAB_DEBUG_OBJECTS) { 2561 if (flags & SLAB_DEBUG_OBJECTS) {
@@ -2606,9 +2568,7 @@ __kmem_cache_create (const char *name, size_t size, size_t align,
2606 slab_set_debugobj_lock_classes(cachep); 2568 slab_set_debugobj_lock_classes(cachep);
2607 } 2569 }
2608 2570
2609 /* cache setup completed, link it into the list */ 2571 return 0;
2610 list_add(&cachep->list, &slab_caches);
2611 return cachep;
2612} 2572}
2613 2573
2614#if DEBUG 2574#if DEBUG
@@ -2767,49 +2727,29 @@ int kmem_cache_shrink(struct kmem_cache *cachep)
2767} 2727}
2768EXPORT_SYMBOL(kmem_cache_shrink); 2728EXPORT_SYMBOL(kmem_cache_shrink);
2769 2729
2770/** 2730int __kmem_cache_shutdown(struct kmem_cache *cachep)
2771 * kmem_cache_destroy - delete a cache
2772 * @cachep: the cache to destroy
2773 *
2774 * Remove a &struct kmem_cache object from the slab cache.
2775 *
2776 * It is expected this function will be called by a module when it is
2777 * unloaded. This will remove the cache completely, and avoid a duplicate
2778 * cache being allocated each time a module is loaded and unloaded, if the
2779 * module doesn't have persistent in-kernel storage across loads and unloads.
2780 *
2781 * The cache must be empty before calling this function.
2782 *
2783 * The caller must guarantee that no one will allocate memory from the cache
2784 * during the kmem_cache_destroy().
2785 */
2786void kmem_cache_destroy(struct kmem_cache *cachep)
2787{ 2731{
2788 BUG_ON(!cachep || in_interrupt()); 2732 int i;
2733 struct kmem_list3 *l3;
2734 int rc = __cache_shrink(cachep);
2789 2735
2790 /* Find the cache in the chain of caches. */ 2736 if (rc)
2791 get_online_cpus(); 2737 return rc;
2792 mutex_lock(&slab_mutex);
2793 /*
2794 * the chain is never empty, cache_cache is never destroyed
2795 */
2796 list_del(&cachep->list);
2797 if (__cache_shrink(cachep)) {
2798 slab_error(cachep, "Can't free all objects");
2799 list_add(&cachep->list, &slab_caches);
2800 mutex_unlock(&slab_mutex);
2801 put_online_cpus();
2802 return;
2803 }
2804 2738
2805 if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) 2739 for_each_online_cpu(i)
2806 rcu_barrier(); 2740 kfree(cachep->array[i]);
2807 2741
2808 __kmem_cache_destroy(cachep); 2742 /* NUMA: free the list3 structures */
2809 mutex_unlock(&slab_mutex); 2743 for_each_online_node(i) {
2810 put_online_cpus(); 2744 l3 = cachep->nodelists[i];
2745 if (l3) {
2746 kfree(l3->shared);
2747 free_alien_cache(l3->alien);
2748 kfree(l3);
2749 }
2750 }
2751 return 0;
2811} 2752}
2812EXPORT_SYMBOL(kmem_cache_destroy);
2813 2753
2814/* 2754/*
2815 * Get the memory for a slab management obj. 2755 * Get the memory for a slab management obj.
@@ -3098,7 +3038,7 @@ static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)
3098} 3038}
3099 3039
3100static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, 3040static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
3101 void *caller) 3041 unsigned long caller)
3102{ 3042{
3103 struct page *page; 3043 struct page *page;
3104 unsigned int objnr; 3044 unsigned int objnr;
@@ -3118,7 +3058,7 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
3118 *dbg_redzone2(cachep, objp) = RED_INACTIVE; 3058 *dbg_redzone2(cachep, objp) = RED_INACTIVE;
3119 } 3059 }
3120 if (cachep->flags & SLAB_STORE_USER) 3060 if (cachep->flags & SLAB_STORE_USER)
3121 *dbg_userword(cachep, objp) = caller; 3061 *dbg_userword(cachep, objp) = (void *)caller;
3122 3062
3123 objnr = obj_to_index(cachep, slabp, objp); 3063 objnr = obj_to_index(cachep, slabp, objp);
3124 3064
@@ -3131,7 +3071,7 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
3131 if (cachep->flags & SLAB_POISON) { 3071 if (cachep->flags & SLAB_POISON) {
3132#ifdef CONFIG_DEBUG_PAGEALLOC 3072#ifdef CONFIG_DEBUG_PAGEALLOC
3133 if ((cachep->size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) { 3073 if ((cachep->size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) {
3134 store_stackinfo(cachep, objp, (unsigned long)caller); 3074 store_stackinfo(cachep, objp, caller);
3135 kernel_map_pages(virt_to_page(objp), 3075 kernel_map_pages(virt_to_page(objp),
3136 cachep->size / PAGE_SIZE, 0); 3076 cachep->size / PAGE_SIZE, 0);
3137 } else { 3077 } else {
@@ -3285,7 +3225,7 @@ static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep,
3285 3225
3286#if DEBUG 3226#if DEBUG
3287static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, 3227static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
3288 gfp_t flags, void *objp, void *caller) 3228 gfp_t flags, void *objp, unsigned long caller)
3289{ 3229{
3290 if (!objp) 3230 if (!objp)
3291 return objp; 3231 return objp;
@@ -3302,7 +3242,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
3302 poison_obj(cachep, objp, POISON_INUSE); 3242 poison_obj(cachep, objp, POISON_INUSE);
3303 } 3243 }
3304 if (cachep->flags & SLAB_STORE_USER) 3244 if (cachep->flags & SLAB_STORE_USER)
3305 *dbg_userword(cachep, objp) = caller; 3245 *dbg_userword(cachep, objp) = (void *)caller;
3306 3246
3307 if (cachep->flags & SLAB_RED_ZONE) { 3247 if (cachep->flags & SLAB_RED_ZONE) {
3308 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE || 3248 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE ||
@@ -3343,7 +3283,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
3343 3283
3344static bool slab_should_failslab(struct kmem_cache *cachep, gfp_t flags) 3284static bool slab_should_failslab(struct kmem_cache *cachep, gfp_t flags)
3345{ 3285{
3346 if (cachep == &cache_cache) 3286 if (cachep == kmem_cache)
3347 return false; 3287 return false;
3348 3288
3349 return should_failslab(cachep->object_size, flags, cachep->flags); 3289 return should_failslab(cachep->object_size, flags, cachep->flags);
@@ -3576,8 +3516,8 @@ done:
3576 * Fallback to other node is possible if __GFP_THISNODE is not set. 3516 * Fallback to other node is possible if __GFP_THISNODE is not set.
3577 */ 3517 */
3578static __always_inline void * 3518static __always_inline void *
3579__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, 3519slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
3580 void *caller) 3520 unsigned long caller)
3581{ 3521{
3582 unsigned long save_flags; 3522 unsigned long save_flags;
3583 void *ptr; 3523 void *ptr;
@@ -3663,7 +3603,7 @@ __do_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3663#endif /* CONFIG_NUMA */ 3603#endif /* CONFIG_NUMA */
3664 3604
3665static __always_inline void * 3605static __always_inline void *
3666__cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller) 3606slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
3667{ 3607{
3668 unsigned long save_flags; 3608 unsigned long save_flags;
3669 void *objp; 3609 void *objp;
@@ -3799,7 +3739,7 @@ free_done:
3799 * be in this state _before_ it is released. Called with disabled ints. 3739 * be in this state _before_ it is released. Called with disabled ints.
3800 */ 3740 */
3801static inline void __cache_free(struct kmem_cache *cachep, void *objp, 3741static inline void __cache_free(struct kmem_cache *cachep, void *objp,
3802 void *caller) 3742 unsigned long caller)
3803{ 3743{
3804 struct array_cache *ac = cpu_cache_get(cachep); 3744 struct array_cache *ac = cpu_cache_get(cachep);
3805 3745
@@ -3839,7 +3779,7 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp,
3839 */ 3779 */
3840void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) 3780void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3841{ 3781{
3842 void *ret = __cache_alloc(cachep, flags, __builtin_return_address(0)); 3782 void *ret = slab_alloc(cachep, flags, _RET_IP_);
3843 3783
3844 trace_kmem_cache_alloc(_RET_IP_, ret, 3784 trace_kmem_cache_alloc(_RET_IP_, ret,
3845 cachep->object_size, cachep->size, flags); 3785 cachep->object_size, cachep->size, flags);
@@ -3850,14 +3790,14 @@ EXPORT_SYMBOL(kmem_cache_alloc);
3850 3790
3851#ifdef CONFIG_TRACING 3791#ifdef CONFIG_TRACING
3852void * 3792void *
3853kmem_cache_alloc_trace(size_t size, struct kmem_cache *cachep, gfp_t flags) 3793kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
3854{ 3794{
3855 void *ret; 3795 void *ret;
3856 3796
3857 ret = __cache_alloc(cachep, flags, __builtin_return_address(0)); 3797 ret = slab_alloc(cachep, flags, _RET_IP_);
3858 3798
3859 trace_kmalloc(_RET_IP_, ret, 3799 trace_kmalloc(_RET_IP_, ret,
3860 size, slab_buffer_size(cachep), flags); 3800 size, cachep->size, flags);
3861 return ret; 3801 return ret;
3862} 3802}
3863EXPORT_SYMBOL(kmem_cache_alloc_trace); 3803EXPORT_SYMBOL(kmem_cache_alloc_trace);
@@ -3866,8 +3806,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_trace);
3866#ifdef CONFIG_NUMA 3806#ifdef CONFIG_NUMA
3867void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) 3807void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
3868{ 3808{
3869 void *ret = __cache_alloc_node(cachep, flags, nodeid, 3809 void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
3870 __builtin_return_address(0));
3871 3810
3872 trace_kmem_cache_alloc_node(_RET_IP_, ret, 3811 trace_kmem_cache_alloc_node(_RET_IP_, ret,
3873 cachep->object_size, cachep->size, 3812 cachep->object_size, cachep->size,
@@ -3878,17 +3817,17 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
3878EXPORT_SYMBOL(kmem_cache_alloc_node); 3817EXPORT_SYMBOL(kmem_cache_alloc_node);
3879 3818
3880#ifdef CONFIG_TRACING 3819#ifdef CONFIG_TRACING
3881void *kmem_cache_alloc_node_trace(size_t size, 3820void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
3882 struct kmem_cache *cachep,
3883 gfp_t flags, 3821 gfp_t flags,
3884 int nodeid) 3822 int nodeid,
3823 size_t size)
3885{ 3824{
3886 void *ret; 3825 void *ret;
3887 3826
3888 ret = __cache_alloc_node(cachep, flags, nodeid, 3827 ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
3889 __builtin_return_address(0)); 3828
3890 trace_kmalloc_node(_RET_IP_, ret, 3829 trace_kmalloc_node(_RET_IP_, ret,
3891 size, slab_buffer_size(cachep), 3830 size, cachep->size,
3892 flags, nodeid); 3831 flags, nodeid);
3893 return ret; 3832 return ret;
3894} 3833}
@@ -3896,34 +3835,33 @@ EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
3896#endif 3835#endif
3897 3836
3898static __always_inline void * 3837static __always_inline void *
3899__do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller) 3838__do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
3900{ 3839{
3901 struct kmem_cache *cachep; 3840 struct kmem_cache *cachep;
3902 3841
3903 cachep = kmem_find_general_cachep(size, flags); 3842 cachep = kmem_find_general_cachep(size, flags);
3904 if (unlikely(ZERO_OR_NULL_PTR(cachep))) 3843 if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3905 return cachep; 3844 return cachep;
3906 return kmem_cache_alloc_node_trace(size, cachep, flags, node); 3845 return kmem_cache_alloc_node_trace(cachep, flags, node, size);
3907} 3846}
3908 3847
3909#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING) 3848#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING)
3910void *__kmalloc_node(size_t size, gfp_t flags, int node) 3849void *__kmalloc_node(size_t size, gfp_t flags, int node)
3911{ 3850{
3912 return __do_kmalloc_node(size, flags, node, 3851 return __do_kmalloc_node(size, flags, node, _RET_IP_);
3913 __builtin_return_address(0));
3914} 3852}
3915EXPORT_SYMBOL(__kmalloc_node); 3853EXPORT_SYMBOL(__kmalloc_node);
3916 3854
3917void *__kmalloc_node_track_caller(size_t size, gfp_t flags, 3855void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
3918 int node, unsigned long caller) 3856 int node, unsigned long caller)
3919{ 3857{
3920 return __do_kmalloc_node(size, flags, node, (void *)caller); 3858 return __do_kmalloc_node(size, flags, node, caller);
3921} 3859}
3922EXPORT_SYMBOL(__kmalloc_node_track_caller); 3860EXPORT_SYMBOL(__kmalloc_node_track_caller);
3923#else 3861#else
3924void *__kmalloc_node(size_t size, gfp_t flags, int node) 3862void *__kmalloc_node(size_t size, gfp_t flags, int node)
3925{ 3863{
3926 return __do_kmalloc_node(size, flags, node, NULL); 3864 return __do_kmalloc_node(size, flags, node, 0);
3927} 3865}
3928EXPORT_SYMBOL(__kmalloc_node); 3866EXPORT_SYMBOL(__kmalloc_node);
3929#endif /* CONFIG_DEBUG_SLAB || CONFIG_TRACING */ 3867#endif /* CONFIG_DEBUG_SLAB || CONFIG_TRACING */
@@ -3936,7 +3874,7 @@ EXPORT_SYMBOL(__kmalloc_node);
3936 * @caller: function caller for debug tracking of the caller 3874 * @caller: function caller for debug tracking of the caller
3937 */ 3875 */
3938static __always_inline void *__do_kmalloc(size_t size, gfp_t flags, 3876static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
3939 void *caller) 3877 unsigned long caller)
3940{ 3878{
3941 struct kmem_cache *cachep; 3879 struct kmem_cache *cachep;
3942 void *ret; 3880 void *ret;
@@ -3949,9 +3887,9 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
3949 cachep = __find_general_cachep(size, flags); 3887 cachep = __find_general_cachep(size, flags);
3950 if (unlikely(ZERO_OR_NULL_PTR(cachep))) 3888 if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3951 return cachep; 3889 return cachep;
3952 ret = __cache_alloc(cachep, flags, caller); 3890 ret = slab_alloc(cachep, flags, caller);
3953 3891
3954 trace_kmalloc((unsigned long) caller, ret, 3892 trace_kmalloc(caller, ret,
3955 size, cachep->size, flags); 3893 size, cachep->size, flags);
3956 3894
3957 return ret; 3895 return ret;
@@ -3961,20 +3899,20 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
3961#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING) 3899#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING)
3962void *__kmalloc(size_t size, gfp_t flags) 3900void *__kmalloc(size_t size, gfp_t flags)
3963{ 3901{
3964 return __do_kmalloc(size, flags, __builtin_return_address(0)); 3902 return __do_kmalloc(size, flags, _RET_IP_);
3965} 3903}
3966EXPORT_SYMBOL(__kmalloc); 3904EXPORT_SYMBOL(__kmalloc);
3967 3905
3968void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller) 3906void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller)
3969{ 3907{
3970 return __do_kmalloc(size, flags, (void *)caller); 3908 return __do_kmalloc(size, flags, caller);
3971} 3909}
3972EXPORT_SYMBOL(__kmalloc_track_caller); 3910EXPORT_SYMBOL(__kmalloc_track_caller);
3973 3911
3974#else 3912#else
3975void *__kmalloc(size_t size, gfp_t flags) 3913void *__kmalloc(size_t size, gfp_t flags)
3976{ 3914{
3977 return __do_kmalloc(size, flags, NULL); 3915 return __do_kmalloc(size, flags, 0);
3978} 3916}
3979EXPORT_SYMBOL(__kmalloc); 3917EXPORT_SYMBOL(__kmalloc);
3980#endif 3918#endif
@@ -3995,7 +3933,7 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
3995 debug_check_no_locks_freed(objp, cachep->object_size); 3933 debug_check_no_locks_freed(objp, cachep->object_size);
3996 if (!(cachep->flags & SLAB_DEBUG_OBJECTS)) 3934 if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
3997 debug_check_no_obj_freed(objp, cachep->object_size); 3935 debug_check_no_obj_freed(objp, cachep->object_size);
3998 __cache_free(cachep, objp, __builtin_return_address(0)); 3936 __cache_free(cachep, objp, _RET_IP_);
3999 local_irq_restore(flags); 3937 local_irq_restore(flags);
4000 3938
4001 trace_kmem_cache_free(_RET_IP_, objp); 3939 trace_kmem_cache_free(_RET_IP_, objp);
@@ -4026,7 +3964,7 @@ void kfree(const void *objp)
4026 debug_check_no_locks_freed(objp, c->object_size); 3964 debug_check_no_locks_freed(objp, c->object_size);
4027 3965
4028 debug_check_no_obj_freed(objp, c->object_size); 3966 debug_check_no_obj_freed(objp, c->object_size);
4029 __cache_free(c, (void *)objp, __builtin_return_address(0)); 3967 __cache_free(c, (void *)objp, _RET_IP_);
4030 local_irq_restore(flags); 3968 local_irq_restore(flags);
4031} 3969}
4032EXPORT_SYMBOL(kfree); 3970EXPORT_SYMBOL(kfree);