diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-10-06 18:53:13 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-10-06 18:53:13 -0400 |
| commit | 125b79d74a63552be757bb49a425b965782e4952 (patch) | |
| tree | 978a30e588c070914b679c50ad7ae34d0aff67bc | |
| parent | f1c6872e4980bc4078cfaead05f892b3d78dea64 (diff) | |
| parent | e2087be35a8ed101c1e748ef688c889419c69829 (diff) | |
Merge branch 'slab/for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux
Pull SLAB changes from Pekka Enberg:
"New and noteworthy:
* More SLAB allocator unification patches from Christoph Lameter and
others. This paves the way for slab memcg patches that hopefully
will land in v3.8.
* SLAB tracing improvements from Ezequiel Garcia.
* Kernel tainting upon SLAB corruption from Dave Jones.
* Miscellanous SLAB allocator bug fixes and improvements from various
people."
* 'slab/for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux: (43 commits)
slab: Fix build failure in __kmem_cache_create()
slub: init_kmem_cache_cpus() and put_cpu_partial() can be static
mm/slab: Fix kmem_cache_alloc_node_trace() declaration
Revert "mm/slab: Fix kmem_cache_alloc_node_trace() declaration"
mm, slob: fix build breakage in __kmalloc_node_track_caller
mm/slab: Fix kmem_cache_alloc_node_trace() declaration
mm/slab: Fix typo _RET_IP -> _RET_IP_
mm, slub: Rename slab_alloc() -> slab_alloc_node() to match SLAB
mm, slab: Rename __cache_alloc() -> slab_alloc()
mm, slab: Match SLAB and SLUB kmem_cache_alloc_xxx_trace() prototype
mm, slab: Replace 'caller' type, void* -> unsigned long
mm, slob: Add support for kmalloc_track_caller()
mm, slab: Remove silly function slab_buffer_size()
mm, slob: Use NUMA_NO_NODE instead of -1
mm, sl[au]b: Taint kernel when we detect a corrupted slab
slab: Only define slab_error for DEBUG
slab: fix the DEADLOCK issue on l3 alien lock
slub: Zero initial memory segment for kmem_cache and kmem_cache_node
Revert "mm/sl[aou]b: Move sysfs_slab_add to common"
mm/sl[aou]b: Move kmem_cache refcounting to common code
...
| -rw-r--r-- | include/linux/slab.h | 6 | ||||
| -rw-r--r-- | include/linux/slab_def.h | 27 | ||||
| -rw-r--r-- | include/linux/slob_def.h | 6 | ||||
| -rw-r--r-- | mm/slab.c | 348 | ||||
| -rw-r--r-- | mm/slab.h | 19 | ||||
| -rw-r--r-- | mm/slab_common.c | 159 | ||||
| -rw-r--r-- | mm/slob.c | 91 | ||||
| -rw-r--r-- | mm/slub.c | 208 | ||||
| -rw-r--r-- | mm/util.c | 35 |
9 files changed, 463 insertions, 436 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h index 0dd2dfa7beca..83d1a1454b7e 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h | |||
| @@ -321,7 +321,8 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep, | |||
| 321 | * request comes from. | 321 | * request comes from. |
| 322 | */ | 322 | */ |
| 323 | #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \ | 323 | #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \ |
| 324 | (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) | 324 | (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \ |
| 325 | (defined(CONFIG_SLOB) && defined(CONFIG_TRACING)) | ||
| 325 | extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long); | 326 | extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long); |
| 326 | #define kmalloc_track_caller(size, flags) \ | 327 | #define kmalloc_track_caller(size, flags) \ |
| 327 | __kmalloc_track_caller(size, flags, _RET_IP_) | 328 | __kmalloc_track_caller(size, flags, _RET_IP_) |
| @@ -340,7 +341,8 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long); | |||
| 340 | * allocation request comes from. | 341 | * allocation request comes from. |
| 341 | */ | 342 | */ |
| 342 | #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \ | 343 | #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \ |
| 343 | (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) | 344 | (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \ |
| 345 | (defined(CONFIG_SLOB) && defined(CONFIG_TRACING)) | ||
| 344 | extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long); | 346 | extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long); |
| 345 | #define kmalloc_node_track_caller(size, flags, node) \ | 347 | #define kmalloc_node_track_caller(size, flags, node) \ |
| 346 | __kmalloc_node_track_caller(size, flags, node, \ | 348 | __kmalloc_node_track_caller(size, flags, node, \ |
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index 0c634fa376c9..cc290f0bdb34 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h | |||
| @@ -45,7 +45,6 @@ struct kmem_cache { | |||
| 45 | unsigned int colour_off; /* colour offset */ | 45 | unsigned int colour_off; /* colour offset */ |
| 46 | struct kmem_cache *slabp_cache; | 46 | struct kmem_cache *slabp_cache; |
| 47 | unsigned int slab_size; | 47 | unsigned int slab_size; |
| 48 | unsigned int dflags; /* dynamic flags */ | ||
| 49 | 48 | ||
| 50 | /* constructor func */ | 49 | /* constructor func */ |
| 51 | void (*ctor)(void *obj); | 50 | void (*ctor)(void *obj); |
| @@ -112,19 +111,13 @@ void *kmem_cache_alloc(struct kmem_cache *, gfp_t); | |||
| 112 | void *__kmalloc(size_t size, gfp_t flags); | 111 | void *__kmalloc(size_t size, gfp_t flags); |
| 113 | 112 | ||
| 114 | #ifdef CONFIG_TRACING | 113 | #ifdef CONFIG_TRACING |
| 115 | extern void *kmem_cache_alloc_trace(size_t size, | 114 | extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t); |
| 116 | struct kmem_cache *cachep, gfp_t flags); | ||
| 117 | extern size_t slab_buffer_size(struct kmem_cache *cachep); | ||
| 118 | #else | 115 | #else |
| 119 | static __always_inline void * | 116 | static __always_inline void * |
| 120 | kmem_cache_alloc_trace(size_t size, struct kmem_cache *cachep, gfp_t flags) | 117 | kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size) |
| 121 | { | 118 | { |
| 122 | return kmem_cache_alloc(cachep, flags); | 119 | return kmem_cache_alloc(cachep, flags); |
| 123 | } | 120 | } |
| 124 | static inline size_t slab_buffer_size(struct kmem_cache *cachep) | ||
| 125 | { | ||
| 126 | return 0; | ||
| 127 | } | ||
| 128 | #endif | 121 | #endif |
| 129 | 122 | ||
| 130 | static __always_inline void *kmalloc(size_t size, gfp_t flags) | 123 | static __always_inline void *kmalloc(size_t size, gfp_t flags) |
| @@ -154,7 +147,7 @@ found: | |||
| 154 | #endif | 147 | #endif |
| 155 | cachep = malloc_sizes[i].cs_cachep; | 148 | cachep = malloc_sizes[i].cs_cachep; |
| 156 | 149 | ||
| 157 | ret = kmem_cache_alloc_trace(size, cachep, flags); | 150 | ret = kmem_cache_alloc_trace(cachep, flags, size); |
| 158 | 151 | ||
| 159 | return ret; | 152 | return ret; |
| 160 | } | 153 | } |
| @@ -166,16 +159,16 @@ extern void *__kmalloc_node(size_t size, gfp_t flags, int node); | |||
| 166 | extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); | 159 | extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); |
| 167 | 160 | ||
| 168 | #ifdef CONFIG_TRACING | 161 | #ifdef CONFIG_TRACING |
| 169 | extern void *kmem_cache_alloc_node_trace(size_t size, | 162 | extern void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep, |
| 170 | struct kmem_cache *cachep, | ||
| 171 | gfp_t flags, | 163 | gfp_t flags, |
| 172 | int nodeid); | 164 | int nodeid, |
| 165 | size_t size); | ||
| 173 | #else | 166 | #else |
| 174 | static __always_inline void * | 167 | static __always_inline void * |
| 175 | kmem_cache_alloc_node_trace(size_t size, | 168 | kmem_cache_alloc_node_trace(struct kmem_cache *cachep, |
| 176 | struct kmem_cache *cachep, | ||
| 177 | gfp_t flags, | 169 | gfp_t flags, |
| 178 | int nodeid) | 170 | int nodeid, |
| 171 | size_t size) | ||
| 179 | { | 172 | { |
| 180 | return kmem_cache_alloc_node(cachep, flags, nodeid); | 173 | return kmem_cache_alloc_node(cachep, flags, nodeid); |
| 181 | } | 174 | } |
| @@ -207,7 +200,7 @@ found: | |||
| 207 | #endif | 200 | #endif |
| 208 | cachep = malloc_sizes[i].cs_cachep; | 201 | cachep = malloc_sizes[i].cs_cachep; |
| 209 | 202 | ||
| 210 | return kmem_cache_alloc_node_trace(size, cachep, flags, node); | 203 | return kmem_cache_alloc_node_trace(cachep, flags, node, size); |
| 211 | } | 204 | } |
| 212 | return __kmalloc_node(size, flags, node); | 205 | return __kmalloc_node(size, flags, node); |
| 213 | } | 206 | } |
diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h index 0ec00b39d006..f28e14a12e3f 100644 --- a/include/linux/slob_def.h +++ b/include/linux/slob_def.h | |||
| @@ -1,12 +1,14 @@ | |||
| 1 | #ifndef __LINUX_SLOB_DEF_H | 1 | #ifndef __LINUX_SLOB_DEF_H |
| 2 | #define __LINUX_SLOB_DEF_H | 2 | #define __LINUX_SLOB_DEF_H |
| 3 | 3 | ||
| 4 | #include <linux/numa.h> | ||
| 5 | |||
| 4 | void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); | 6 | void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); |
| 5 | 7 | ||
| 6 | static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep, | 8 | static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep, |
| 7 | gfp_t flags) | 9 | gfp_t flags) |
| 8 | { | 10 | { |
| 9 | return kmem_cache_alloc_node(cachep, flags, -1); | 11 | return kmem_cache_alloc_node(cachep, flags, NUMA_NO_NODE); |
| 10 | } | 12 | } |
| 11 | 13 | ||
| 12 | void *__kmalloc_node(size_t size, gfp_t flags, int node); | 14 | void *__kmalloc_node(size_t size, gfp_t flags, int node); |
| @@ -26,7 +28,7 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) | |||
| 26 | */ | 28 | */ |
| 27 | static __always_inline void *kmalloc(size_t size, gfp_t flags) | 29 | static __always_inline void *kmalloc(size_t size, gfp_t flags) |
| 28 | { | 30 | { |
| 29 | return __kmalloc_node(size, flags, -1); | 31 | return __kmalloc_node(size, flags, NUMA_NO_NODE); |
| 30 | } | 32 | } |
| 31 | 33 | ||
| 32 | static __always_inline void *__kmalloc(size_t size, gfp_t flags) | 34 | static __always_inline void *__kmalloc(size_t size, gfp_t flags) |
| @@ -498,14 +498,6 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp) | |||
| 498 | 498 | ||
| 499 | #endif | 499 | #endif |
| 500 | 500 | ||
| 501 | #ifdef CONFIG_TRACING | ||
| 502 | size_t slab_buffer_size(struct kmem_cache *cachep) | ||
| 503 | { | ||
| 504 | return cachep->size; | ||
| 505 | } | ||
| 506 | EXPORT_SYMBOL(slab_buffer_size); | ||
| 507 | #endif | ||
| 508 | |||
| 509 | /* | 501 | /* |
| 510 | * Do not go above this order unless 0 objects fit into the slab or | 502 | * Do not go above this order unless 0 objects fit into the slab or |
| 511 | * overridden on the command line. | 503 | * overridden on the command line. |
| @@ -515,13 +507,6 @@ EXPORT_SYMBOL(slab_buffer_size); | |||
| 515 | static int slab_max_order = SLAB_MAX_ORDER_LO; | 507 | static int slab_max_order = SLAB_MAX_ORDER_LO; |
| 516 | static bool slab_max_order_set __initdata; | 508 | static bool slab_max_order_set __initdata; |
| 517 | 509 | ||
| 518 | static inline struct kmem_cache *page_get_cache(struct page *page) | ||
| 519 | { | ||
| 520 | page = compound_head(page); | ||
| 521 | BUG_ON(!PageSlab(page)); | ||
| 522 | return page->slab_cache; | ||
| 523 | } | ||
| 524 | |||
| 525 | static inline struct kmem_cache *virt_to_cache(const void *obj) | 510 | static inline struct kmem_cache *virt_to_cache(const void *obj) |
| 526 | { | 511 | { |
| 527 | struct page *page = virt_to_head_page(obj); | 512 | struct page *page = virt_to_head_page(obj); |
| @@ -585,9 +570,9 @@ static struct arraycache_init initarray_generic = | |||
| 585 | { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} }; | 570 | { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} }; |
| 586 | 571 | ||
| 587 | /* internal cache of cache description objs */ | 572 | /* internal cache of cache description objs */ |
| 588 | static struct kmem_list3 *cache_cache_nodelists[MAX_NUMNODES]; | 573 | static struct kmem_list3 *kmem_cache_nodelists[MAX_NUMNODES]; |
| 589 | static struct kmem_cache cache_cache = { | 574 | static struct kmem_cache kmem_cache_boot = { |
| 590 | .nodelists = cache_cache_nodelists, | 575 | .nodelists = kmem_cache_nodelists, |
| 591 | .batchcount = 1, | 576 | .batchcount = 1, |
| 592 | .limit = BOOT_CPUCACHE_ENTRIES, | 577 | .limit = BOOT_CPUCACHE_ENTRIES, |
| 593 | .shared = 1, | 578 | .shared = 1, |
| @@ -810,6 +795,7 @@ static void cache_estimate(unsigned long gfporder, size_t buffer_size, | |||
| 810 | *left_over = slab_size - nr_objs*buffer_size - mgmt_size; | 795 | *left_over = slab_size - nr_objs*buffer_size - mgmt_size; |
| 811 | } | 796 | } |
| 812 | 797 | ||
| 798 | #if DEBUG | ||
| 813 | #define slab_error(cachep, msg) __slab_error(__func__, cachep, msg) | 799 | #define slab_error(cachep, msg) __slab_error(__func__, cachep, msg) |
| 814 | 800 | ||
| 815 | static void __slab_error(const char *function, struct kmem_cache *cachep, | 801 | static void __slab_error(const char *function, struct kmem_cache *cachep, |
| @@ -818,7 +804,9 @@ static void __slab_error(const char *function, struct kmem_cache *cachep, | |||
| 818 | printk(KERN_ERR "slab error in %s(): cache `%s': %s\n", | 804 | printk(KERN_ERR "slab error in %s(): cache `%s': %s\n", |
| 819 | function, cachep->name, msg); | 805 | function, cachep->name, msg); |
| 820 | dump_stack(); | 806 | dump_stack(); |
| 807 | add_taint(TAINT_BAD_PAGE); | ||
| 821 | } | 808 | } |
| 809 | #endif | ||
| 822 | 810 | ||
| 823 | /* | 811 | /* |
| 824 | * By default on NUMA we use alien caches to stage the freeing of | 812 | * By default on NUMA we use alien caches to stage the freeing of |
| @@ -1601,15 +1589,17 @@ void __init kmem_cache_init(void) | |||
| 1601 | int order; | 1589 | int order; |
| 1602 | int node; | 1590 | int node; |
| 1603 | 1591 | ||
| 1592 | kmem_cache = &kmem_cache_boot; | ||
| 1593 | |||
| 1604 | if (num_possible_nodes() == 1) | 1594 | if (num_possible_nodes() == 1) |
| 1605 | use_alien_caches = 0; | 1595 | use_alien_caches = 0; |
| 1606 | 1596 | ||
| 1607 | for (i = 0; i < NUM_INIT_LISTS; i++) { | 1597 | for (i = 0; i < NUM_INIT_LISTS; i++) { |
| 1608 | kmem_list3_init(&initkmem_list3[i]); | 1598 | kmem_list3_init(&initkmem_list3[i]); |
| 1609 | if (i < MAX_NUMNODES) | 1599 | if (i < MAX_NUMNODES) |
| 1610 | cache_cache.nodelists[i] = NULL; | 1600 | kmem_cache->nodelists[i] = NULL; |
| 1611 | } | 1601 | } |
| 1612 | set_up_list3s(&cache_cache, CACHE_CACHE); | 1602 | set_up_list3s(kmem_cache, CACHE_CACHE); |
| 1613 | 1603 | ||
| 1614 | /* | 1604 | /* |
| 1615 | * Fragmentation resistance on low memory - only use bigger | 1605 | * Fragmentation resistance on low memory - only use bigger |
| @@ -1621,9 +1611,9 @@ void __init kmem_cache_init(void) | |||
| 1621 | 1611 | ||
| 1622 | /* Bootstrap is tricky, because several objects are allocated | 1612 | /* Bootstrap is tricky, because several objects are allocated |
| 1623 | * from caches that do not exist yet: | 1613 | * from caches that do not exist yet: |
| 1624 | * 1) initialize the cache_cache cache: it contains the struct | 1614 | * 1) initialize the kmem_cache cache: it contains the struct |
| 1625 | * kmem_cache structures of all caches, except cache_cache itself: | 1615 | * kmem_cache structures of all caches, except kmem_cache itself: |
| 1626 | * cache_cache is statically allocated. | 1616 | * kmem_cache is statically allocated. |
| 1627 | * Initially an __init data area is used for the head array and the | 1617 | * Initially an __init data area is used for the head array and the |
| 1628 | * kmem_list3 structures, it's replaced with a kmalloc allocated | 1618 | * kmem_list3 structures, it's replaced with a kmalloc allocated |
| 1629 | * array at the end of the bootstrap. | 1619 | * array at the end of the bootstrap. |
| @@ -1632,43 +1622,43 @@ void __init kmem_cache_init(void) | |||
| 1632 | * An __init data area is used for the head array. | 1622 | * An __init data area is used for the head array. |
| 1633 | * 3) Create the remaining kmalloc caches, with minimally sized | 1623 | * 3) Create the remaining kmalloc caches, with minimally sized |
| 1634 | * head arrays. | 1624 | * head arrays. |
| 1635 | * 4) Replace the __init data head arrays for cache_cache and the first | 1625 | * 4) Replace the __init data head arrays for kmem_cache and the first |
| 1636 | * kmalloc cache with kmalloc allocated arrays. | 1626 | * kmalloc cache with kmalloc allocated arrays. |
| 1637 | * 5) Replace the __init data for kmem_list3 for cache_cache and | 1627 | * 5) Replace the __init data for kmem_list3 for kmem_cache and |
| 1638 | * the other cache's with kmalloc allocated memory. | 1628 | * the other cache's with kmalloc allocated memory. |
| 1639 | * 6) Resize the head arrays of the kmalloc caches to their final sizes. | 1629 | * 6) Resize the head arrays of the kmalloc caches to their final sizes. |
| 1640 | */ | 1630 | */ |
| 1641 | 1631 | ||
| 1642 | node = numa_mem_id(); | 1632 | node = numa_mem_id(); |
| 1643 | 1633 | ||
| 1644 | /* 1) create the cache_cache */ | 1634 | /* 1) create the kmem_cache */ |
| 1645 | INIT_LIST_HEAD(&slab_caches); | 1635 | INIT_LIST_HEAD(&slab_caches); |
| 1646 | list_add(&cache_cache.list, &slab_caches); | 1636 | list_add(&kmem_cache->list, &slab_caches); |
| 1647 | cache_cache.colour_off = cache_line_size(); | 1637 | kmem_cache->colour_off = cache_line_size(); |
| 1648 | cache_cache.array[smp_processor_id()] = &initarray_cache.cache; | 1638 | kmem_cache->array[smp_processor_id()] = &initarray_cache.cache; |
| 1649 | cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE + node]; | 1639 | kmem_cache->nodelists[node] = &initkmem_list3[CACHE_CACHE + node]; |
| 1650 | 1640 | ||
| 1651 | /* | 1641 | /* |
| 1652 | * struct kmem_cache size depends on nr_node_ids & nr_cpu_ids | 1642 | * struct kmem_cache size depends on nr_node_ids & nr_cpu_ids |
| 1653 | */ | 1643 | */ |
| 1654 | cache_cache.size = offsetof(struct kmem_cache, array[nr_cpu_ids]) + | 1644 | kmem_cache->size = offsetof(struct kmem_cache, array[nr_cpu_ids]) + |
| 1655 | nr_node_ids * sizeof(struct kmem_list3 *); | 1645 | nr_node_ids * sizeof(struct kmem_list3 *); |
| 1656 | cache_cache.object_size = cache_cache.size; | 1646 | kmem_cache->object_size = kmem_cache->size; |
| 1657 | cache_cache.size = ALIGN(cache_cache.size, | 1647 | kmem_cache->size = ALIGN(kmem_cache->object_size, |
| 1658 | cache_line_size()); | 1648 | cache_line_size()); |
| 1659 | cache_cache.reciprocal_buffer_size = | 1649 | kmem_cache->reciprocal_buffer_size = |
| 1660 | reciprocal_value(cache_cache.size); | 1650 | reciprocal_value(kmem_cache->size); |
| 1661 | 1651 | ||
| 1662 | for (order = 0; order < MAX_ORDER; order++) { | 1652 | for (order = 0; order < MAX_ORDER; order++) { |
| 1663 | cache_estimate(order, cache_cache.size, | 1653 | cache_estimate(order, kmem_cache->size, |
| 1664 | cache_line_size(), 0, &left_over, &cache_cache.num); | 1654 | cache_line_size(), 0, &left_over, &kmem_cache->num); |
| 1665 | if (cache_cache.num) | 1655 | if (kmem_cache->num) |
| 1666 | break; | 1656 | break; |
| 1667 | } | 1657 | } |
| 1668 | BUG_ON(!cache_cache.num); | 1658 | BUG_ON(!kmem_cache->num); |
| 1669 | cache_cache.gfporder = order; | 1659 | kmem_cache->gfporder = order; |
| 1670 | cache_cache.colour = left_over / cache_cache.colour_off; | 1660 | kmem_cache->colour = left_over / kmem_cache->colour_off; |
| 1671 | cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) + | 1661 | kmem_cache->slab_size = ALIGN(kmem_cache->num * sizeof(kmem_bufctl_t) + |
| 1672 | sizeof(struct slab), cache_line_size()); | 1662 | sizeof(struct slab), cache_line_size()); |
| 1673 | 1663 | ||
| 1674 | /* 2+3) create the kmalloc caches */ | 1664 | /* 2+3) create the kmalloc caches */ |
| @@ -1681,19 +1671,22 @@ void __init kmem_cache_init(void) | |||
| 1681 | * bug. | 1671 | * bug. |
| 1682 | */ | 1672 | */ |
| 1683 | 1673 | ||
| 1684 | sizes[INDEX_AC].cs_cachep = __kmem_cache_create(names[INDEX_AC].name, | 1674 | sizes[INDEX_AC].cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); |
| 1685 | sizes[INDEX_AC].cs_size, | 1675 | sizes[INDEX_AC].cs_cachep->name = names[INDEX_AC].name; |
| 1686 | ARCH_KMALLOC_MINALIGN, | 1676 | sizes[INDEX_AC].cs_cachep->size = sizes[INDEX_AC].cs_size; |
| 1687 | ARCH_KMALLOC_FLAGS|SLAB_PANIC, | 1677 | sizes[INDEX_AC].cs_cachep->object_size = sizes[INDEX_AC].cs_size; |
| 1688 | NULL); | 1678 | sizes[INDEX_AC].cs_cachep->align = ARCH_KMALLOC_MINALIGN; |
| 1679 | __kmem_cache_create(sizes[INDEX_AC].cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC); | ||
| 1680 | list_add(&sizes[INDEX_AC].cs_cachep->list, &slab_caches); | ||
| 1689 | 1681 | ||
| 1690 | if (INDEX_AC != INDEX_L3) { | 1682 | if (INDEX_AC != INDEX_L3) { |
| 1691 | sizes[INDEX_L3].cs_cachep = | 1683 | sizes[INDEX_L3].cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); |
| 1692 | __kmem_cache_create(names[INDEX_L3].name, | 1684 | sizes[INDEX_L3].cs_cachep->name = names[INDEX_L3].name; |
| 1693 | sizes[INDEX_L3].cs_size, | 1685 | sizes[INDEX_L3].cs_cachep->size = sizes[INDEX_L3].cs_size; |
| 1694 | ARCH_KMALLOC_MINALIGN, | 1686 | sizes[INDEX_L3].cs_cachep->object_size = sizes[INDEX_L3].cs_size; |
| 1695 | ARCH_KMALLOC_FLAGS|SLAB_PANIC, | 1687 | sizes[INDEX_L3].cs_cachep->align = ARCH_KMALLOC_MINALIGN; |
| 1696 | NULL); | 1688 | __kmem_cache_create(sizes[INDEX_L3].cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC); |
| 1689 | list_add(&sizes[INDEX_L3].cs_cachep->list, &slab_caches); | ||
| 1697 | } | 1690 | } |
| 1698 | 1691 | ||
| 1699 | slab_early_init = 0; | 1692 | slab_early_init = 0; |
| @@ -1707,20 +1700,23 @@ void __init kmem_cache_init(void) | |||
| 1707 | * allow tighter packing of the smaller caches. | 1700 | * allow tighter packing of the smaller caches. |
| 1708 | */ | 1701 | */ |
| 1709 | if (!sizes->cs_cachep) { | 1702 | if (!sizes->cs_cachep) { |
| 1710 | sizes->cs_cachep = __kmem_cache_create(names->name, | 1703 | sizes->cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); |
| 1711 | sizes->cs_size, | 1704 | sizes->cs_cachep->name = names->name; |
| 1712 | ARCH_KMALLOC_MINALIGN, | 1705 | sizes->cs_cachep->size = sizes->cs_size; |
| 1713 | ARCH_KMALLOC_FLAGS|SLAB_PANIC, | 1706 | sizes->cs_cachep->object_size = sizes->cs_size; |
| 1714 | NULL); | 1707 | sizes->cs_cachep->align = ARCH_KMALLOC_MINALIGN; |
| 1708 | __kmem_cache_create(sizes->cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC); | ||
| 1709 | list_add(&sizes->cs_cachep->list, &slab_caches); | ||
| 1715 | } | 1710 | } |
| 1716 | #ifdef CONFIG_ZONE_DMA | 1711 | #ifdef CONFIG_ZONE_DMA |
| 1717 | sizes->cs_dmacachep = __kmem_cache_create( | 1712 | sizes->cs_dmacachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); |
| 1718 | names->name_dma, | 1713 | sizes->cs_dmacachep->name = names->name_dma; |
| 1719 | sizes->cs_size, | 1714 | sizes->cs_dmacachep->size = sizes->cs_size; |
| 1720 | ARCH_KMALLOC_MINALIGN, | 1715 | sizes->cs_dmacachep->object_size = sizes->cs_size; |
| 1721 | ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA| | 1716 | sizes->cs_dmacachep->align = ARCH_KMALLOC_MINALIGN; |
| 1722 | SLAB_PANIC, | 1717 | __kmem_cache_create(sizes->cs_dmacachep, |
| 1723 | NULL); | 1718 | ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA| SLAB_PANIC); |
| 1719 | list_add(&sizes->cs_dmacachep->list, &slab_caches); | ||
| 1724 | #endif | 1720 | #endif |
| 1725 | sizes++; | 1721 | sizes++; |
| 1726 | names++; | 1722 | names++; |
| @@ -1731,15 +1727,15 @@ void __init kmem_cache_init(void) | |||
| 1731 | 1727 | ||
| 1732 | ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT); | 1728 | ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT); |
| 1733 | 1729 | ||
| 1734 | BUG_ON(cpu_cache_get(&cache_cache) != &initarray_cache.cache); | 1730 | BUG_ON(cpu_cache_get(kmem_cache) != &initarray_cache.cache); |
| 1735 | memcpy(ptr, cpu_cache_get(&cache_cache), | 1731 | memcpy(ptr, cpu_cache_get(kmem_cache), |
| 1736 | sizeof(struct arraycache_init)); | 1732 | sizeof(struct arraycache_init)); |
| 1737 | /* | 1733 | /* |
| 1738 | * Do not assume that spinlocks can be initialized via memcpy: | 1734 | * Do not assume that spinlocks can be initialized via memcpy: |
| 1739 | */ | 1735 | */ |
| 1740 | spin_lock_init(&ptr->lock); | 1736 | spin_lock_init(&ptr->lock); |
| 1741 | 1737 | ||
| 1742 | cache_cache.array[smp_processor_id()] = ptr; | 1738 | kmem_cache->array[smp_processor_id()] = ptr; |
| 1743 | 1739 | ||
| 1744 | ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT); | 1740 | ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT); |
| 1745 | 1741 | ||
| @@ -1760,7 +1756,7 @@ void __init kmem_cache_init(void) | |||
| 1760 | int nid; | 1756 | int nid; |
| 1761 | 1757 | ||
| 1762 | for_each_online_node(nid) { | 1758 | for_each_online_node(nid) { |
| 1763 | init_list(&cache_cache, &initkmem_list3[CACHE_CACHE + nid], nid); | 1759 | init_list(kmem_cache, &initkmem_list3[CACHE_CACHE + nid], nid); |
| 1764 | 1760 | ||
| 1765 | init_list(malloc_sizes[INDEX_AC].cs_cachep, | 1761 | init_list(malloc_sizes[INDEX_AC].cs_cachep, |
| 1766 | &initkmem_list3[SIZE_AC + nid], nid); | 1762 | &initkmem_list3[SIZE_AC + nid], nid); |
| @@ -1781,9 +1777,6 @@ void __init kmem_cache_init_late(void) | |||
| 1781 | 1777 | ||
| 1782 | slab_state = UP; | 1778 | slab_state = UP; |
| 1783 | 1779 | ||
| 1784 | /* Annotate slab for lockdep -- annotate the malloc caches */ | ||
| 1785 | init_lock_keys(); | ||
| 1786 | |||
| 1787 | /* 6) resize the head arrays to their final sizes */ | 1780 | /* 6) resize the head arrays to their final sizes */ |
| 1788 | mutex_lock(&slab_mutex); | 1781 | mutex_lock(&slab_mutex); |
| 1789 | list_for_each_entry(cachep, &slab_caches, list) | 1782 | list_for_each_entry(cachep, &slab_caches, list) |
| @@ -1791,6 +1784,9 @@ void __init kmem_cache_init_late(void) | |||
| 1791 | BUG(); | 1784 | BUG(); |
| 1792 | mutex_unlock(&slab_mutex); | 1785 | mutex_unlock(&slab_mutex); |
| 1793 | 1786 | ||
| 1787 | /* Annotate slab for lockdep -- annotate the malloc caches */ | ||
| 1788 | init_lock_keys(); | ||
| 1789 | |||
| 1794 | /* Done! */ | 1790 | /* Done! */ |
| 1795 | slab_state = FULL; | 1791 | slab_state = FULL; |
| 1796 | 1792 | ||
| @@ -2209,27 +2205,6 @@ static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp) | |||
| 2209 | } | 2205 | } |
| 2210 | } | 2206 | } |
| 2211 | 2207 | ||
| 2212 | static void __kmem_cache_destroy(struct kmem_cache *cachep) | ||
| 2213 | { | ||
| 2214 | int i; | ||
| 2215 | struct kmem_list3 *l3; | ||
| 2216 | |||
| 2217 | for_each_online_cpu(i) | ||
| 2218 | kfree(cachep->array[i]); | ||
| 2219 | |||
| 2220 | /* NUMA: free the list3 structures */ | ||
| 2221 | for_each_online_node(i) { | ||
| 2222 | l3 = cachep->nodelists[i]; | ||
| 2223 | if (l3) { | ||
| 2224 | kfree(l3->shared); | ||
| 2225 | free_alien_cache(l3->alien); | ||
| 2226 | kfree(l3); | ||
| 2227 | } | ||
| 2228 | } | ||
| 2229 | kmem_cache_free(&cache_cache, cachep); | ||
| 2230 | } | ||
| 2231 | |||
| 2232 | |||
| 2233 | /** | 2208 | /** |
| 2234 | * calculate_slab_order - calculate size (page order) of slabs | 2209 | * calculate_slab_order - calculate size (page order) of slabs |
| 2235 | * @cachep: pointer to the cache that is being created | 2210 | * @cachep: pointer to the cache that is being created |
| @@ -2366,9 +2341,6 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) | |||
| 2366 | * Cannot be called within a int, but can be interrupted. | 2341 | * Cannot be called within a int, but can be interrupted. |
| 2367 | * The @ctor is run when new pages are allocated by the cache. | 2342 | * The @ctor is run when new pages are allocated by the cache. |
| 2368 | * | 2343 | * |
| 2369 | * @name must be valid until the cache is destroyed. This implies that | ||
| 2370 | * the module calling this has to destroy the cache before getting unloaded. | ||
| 2371 | * | ||
| 2372 | * The flags are | 2344 | * The flags are |
| 2373 | * | 2345 | * |
| 2374 | * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5) | 2346 | * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5) |
| @@ -2381,13 +2353,13 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) | |||
| 2381 | * cacheline. This can be beneficial if you're counting cycles as closely | 2353 | * cacheline. This can be beneficial if you're counting cycles as closely |
| 2382 | * as davem. | 2354 | * as davem. |
| 2383 | */ | 2355 | */ |
| 2384 | struct kmem_cache * | 2356 | int |
| 2385 | __kmem_cache_create (const char *name, size_t size, size_t align, | 2357 | __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) |
| 2386 | unsigned long flags, void (*ctor)(void *)) | ||
| 2387 | { | 2358 | { |
| 2388 | size_t left_over, slab_size, ralign; | 2359 | size_t left_over, slab_size, ralign; |
| 2389 | struct kmem_cache *cachep = NULL; | ||
| 2390 | gfp_t gfp; | 2360 | gfp_t gfp; |
| 2361 | int err; | ||
| 2362 | size_t size = cachep->size; | ||
| 2391 | 2363 | ||
| 2392 | #if DEBUG | 2364 | #if DEBUG |
| 2393 | #if FORCED_DEBUG | 2365 | #if FORCED_DEBUG |
| @@ -2459,8 +2431,8 @@ __kmem_cache_create (const char *name, size_t size, size_t align, | |||
| 2459 | ralign = ARCH_SLAB_MINALIGN; | 2431 | ralign = ARCH_SLAB_MINALIGN; |
| 2460 | } | 2432 | } |
| 2461 | /* 3) caller mandated alignment */ | 2433 | /* 3) caller mandated alignment */ |
| 2462 | if (ralign < align) { | 2434 | if (ralign < cachep->align) { |
| 2463 | ralign = align; | 2435 | ralign = cachep->align; |
| 2464 | } | 2436 | } |
| 2465 | /* disable debug if necessary */ | 2437 | /* disable debug if necessary */ |
| 2466 | if (ralign > __alignof__(unsigned long long)) | 2438 | if (ralign > __alignof__(unsigned long long)) |
| @@ -2468,21 +2440,14 @@ __kmem_cache_create (const char *name, size_t size, size_t align, | |||
| 2468 | /* | 2440 | /* |
| 2469 | * 4) Store it. | 2441 | * 4) Store it. |
| 2470 | */ | 2442 | */ |
| 2471 | align = ralign; | 2443 | cachep->align = ralign; |
| 2472 | 2444 | ||
| 2473 | if (slab_is_available()) | 2445 | if (slab_is_available()) |
| 2474 | gfp = GFP_KERNEL; | 2446 | gfp = GFP_KERNEL; |
| 2475 | else | 2447 | else |
| 2476 | gfp = GFP_NOWAIT; | 2448 | gfp = GFP_NOWAIT; |
| 2477 | 2449 | ||
| 2478 | /* Get cache's description obj. */ | ||
| 2479 | cachep = kmem_cache_zalloc(&cache_cache, gfp); | ||
| 2480 | if (!cachep) | ||
| 2481 | return NULL; | ||
| 2482 | |||
| 2483 | cachep->nodelists = (struct kmem_list3 **)&cachep->array[nr_cpu_ids]; | 2450 | cachep->nodelists = (struct kmem_list3 **)&cachep->array[nr_cpu_ids]; |
| 2484 | cachep->object_size = size; | ||
| 2485 | cachep->align = align; | ||
| 2486 | #if DEBUG | 2451 | #if DEBUG |
| 2487 | 2452 | ||
| 2488 | /* | 2453 | /* |
| @@ -2506,8 +2471,9 @@ __kmem_cache_create (const char *name, size_t size, size_t align, | |||
| 2506 | } | 2471 | } |
| 2507 | #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC) | 2472 | #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC) |
| 2508 | if (size >= malloc_sizes[INDEX_L3 + 1].cs_size | 2473 | if (size >= malloc_sizes[INDEX_L3 + 1].cs_size |
| 2509 | && cachep->object_size > cache_line_size() && ALIGN(size, align) < PAGE_SIZE) { | 2474 | && cachep->object_size > cache_line_size() |
| 2510 | cachep->obj_offset += PAGE_SIZE - ALIGN(size, align); | 2475 | && ALIGN(size, cachep->align) < PAGE_SIZE) { |
| 2476 | cachep->obj_offset += PAGE_SIZE - ALIGN(size, cachep->align); | ||
| 2511 | size = PAGE_SIZE; | 2477 | size = PAGE_SIZE; |
| 2512 | } | 2478 | } |
| 2513 | #endif | 2479 | #endif |
| @@ -2527,18 +2493,15 @@ __kmem_cache_create (const char *name, size_t size, size_t align, | |||
| 2527 | */ | 2493 | */ |
| 2528 | flags |= CFLGS_OFF_SLAB; | 2494 | flags |= CFLGS_OFF_SLAB; |
| 2529 | 2495 | ||
| 2530 | size = ALIGN(size, align); | 2496 | size = ALIGN(size, cachep->align); |
| 2531 | 2497 | ||
| 2532 | left_over = calculate_slab_order(cachep, size, align, flags); | 2498 | left_over = calculate_slab_order(cachep, size, cachep->align, flags); |
| 2499 | |||
| 2500 | if (!cachep->num) | ||
| 2501 | return -E2BIG; | ||
| 2533 | 2502 | ||
| 2534 | if (!cachep->num) { | ||
| 2535 | printk(KERN_ERR | ||
| 2536 | "kmem_cache_create: couldn't create cache %s.\n", name); | ||
| 2537 | kmem_cache_free(&cache_cache, cachep); | ||
| 2538 | return NULL; | ||
| 2539 | } | ||
| 2540 | slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t) | 2503 | slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t) |
| 2541 | + sizeof(struct slab), align); | 2504 | + sizeof(struct slab), cachep->align); |
| 2542 | 2505 | ||
| 2543 | /* | 2506 | /* |
| 2544 | * If the slab has been placed off-slab, and we have enough space then | 2507 | * If the slab has been placed off-slab, and we have enough space then |
| @@ -2566,8 +2529,8 @@ __kmem_cache_create (const char *name, size_t size, size_t align, | |||
| 2566 | 2529 | ||
| 2567 | cachep->colour_off = cache_line_size(); | 2530 | cachep->colour_off = cache_line_size(); |
| 2568 | /* Offset must be a multiple of the alignment. */ | 2531 | /* Offset must be a multiple of the alignment. */ |
| 2569 | if (cachep->colour_off < align) | 2532 | if (cachep->colour_off < cachep->align) |
| 2570 | cachep->colour_off = align; | 2533 | cachep->colour_off = cachep->align; |
| 2571 | cachep->colour = left_over / cachep->colour_off; | 2534 | cachep->colour = left_over / cachep->colour_off; |
| 2572 | cachep->slab_size = slab_size; | 2535 | cachep->slab_size = slab_size; |
| 2573 | cachep->flags = flags; | 2536 | cachep->flags = flags; |
| @@ -2588,12 +2551,11 @@ __kmem_cache_create (const char *name, size_t size, size_t align, | |||
| 2588 | */ | 2551 | */ |
| 2589 | BUG_ON(ZERO_OR_NULL_PTR(cachep->slabp_cache)); | 2552 | BUG_ON(ZERO_OR_NULL_PTR(cachep->slabp_cache)); |
| 2590 | } | 2553 | } |
| 2591 | cachep->ctor = ctor; | ||
| 2592 | cachep->name = name; | ||
| 2593 | 2554 | ||
| 2594 | if (setup_cpu_cache(cachep, gfp)) { | 2555 | err = setup_cpu_cache(cachep, gfp); |
| 2595 | __kmem_cache_destroy(cachep); | 2556 | if (err) { |
| 2596 | return NULL; | 2557 | __kmem_cache_shutdown(cachep); |
| 2558 | return err; | ||
| 2597 | } | 2559 | } |
| 2598 | 2560 | ||
| 2599 | if (flags & SLAB_DEBUG_OBJECTS) { | 2561 | if (flags & SLAB_DEBUG_OBJECTS) { |
| @@ -2606,9 +2568,7 @@ __kmem_cache_create (const char *name, size_t size, size_t align, | |||
| 2606 | slab_set_debugobj_lock_classes(cachep); | 2568 | slab_set_debugobj_lock_classes(cachep); |
| 2607 | } | 2569 | } |
| 2608 | 2570 | ||
| 2609 | /* cache setup completed, link it into the list */ | 2571 | return 0; |
| 2610 | list_add(&cachep->list, &slab_caches); | ||
| 2611 | return cachep; | ||
| 2612 | } | 2572 | } |
| 2613 | 2573 | ||
| 2614 | #if DEBUG | 2574 | #if DEBUG |
| @@ -2767,49 +2727,29 @@ int kmem_cache_shrink(struct kmem_cache *cachep) | |||
| 2767 | } | 2727 | } |
| 2768 | EXPORT_SYMBOL(kmem_cache_shrink); | 2728 | EXPORT_SYMBOL(kmem_cache_shrink); |
| 2769 | 2729 | ||
| 2770 | /** | 2730 | int __kmem_cache_shutdown(struct kmem_cache *cachep) |
| 2771 | * kmem_cache_destroy - delete a cache | ||
| 2772 | * @cachep: the cache to destroy | ||
| 2773 | * | ||
| 2774 | * Remove a &struct kmem_cache object from the slab cache. | ||
| 2775 | * | ||
| 2776 | * It is expected this function will be called by a module when it is | ||
| 2777 | * unloaded. This will remove the cache completely, and avoid a duplicate | ||
| 2778 | * cache being allocated each time a module is loaded and unloaded, if the | ||
| 2779 | * module doesn't have persistent in-kernel storage across loads and unloads. | ||
| 2780 | * | ||
| 2781 | * The cache must be empty before calling this function. | ||
| 2782 | * | ||
| 2783 | * The caller must guarantee that no one will allocate memory from the cache | ||
| 2784 | * during the kmem_cache_destroy(). | ||
| 2785 | */ | ||
| 2786 | void kmem_cache_destroy(struct kmem_cache *cachep) | ||
| 2787 | { | 2731 | { |
| 2788 | BUG_ON(!cachep || in_interrupt()); | 2732 | int i; |
| 2733 | struct kmem_list3 *l3; | ||
| 2734 | int rc = __cache_shrink(cachep); | ||
| 2789 | 2735 | ||
| 2790 | /* Find the cache in the chain of caches. */ | 2736 | if (rc) |
| 2791 | get_online_cpus(); | 2737 | return rc; |
| 2792 | mutex_lock(&slab_mutex); | ||
| 2793 | /* | ||
| 2794 | * the chain is never empty, cache_cache is never destroyed | ||
| 2795 | */ | ||
| 2796 | list_del(&cachep->list); | ||
| 2797 | if (__cache_shrink(cachep)) { | ||
| 2798 | slab_error(cachep, "Can't free all objects"); | ||
| 2799 | list_add(&cachep->list, &slab_caches); | ||
| 2800 | mutex_unlock(&slab_mutex); | ||
| 2801 | put_online_cpus(); | ||
| 2802 | return; | ||
| 2803 | } | ||
| 2804 | 2738 | ||
| 2805 | if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) | 2739 | for_each_online_cpu(i) |
| 2806 | rcu_barrier(); | 2740 | kfree(cachep->array[i]); |
| 2807 | 2741 | ||
| 2808 | __kmem_cache_destroy(cachep); | 2742 | /* NUMA: free the list3 structures */ |
| 2809 | mutex_unlock(&slab_mutex); | 2743 | for_each_online_node(i) { |
| 2810 | put_online_cpus(); | 2744 | l3 = cachep->nodelists[i]; |
| 2745 | if (l3) { | ||
| 2746 | kfree(l3->shared); | ||
| 2747 | free_alien_cache(l3->alien); | ||
| 2748 | kfree(l3); | ||
| 2749 | } | ||
| 2750 | } | ||
| 2751 | return 0; | ||
| 2811 | } | 2752 | } |
| 2812 | EXPORT_SYMBOL(kmem_cache_destroy); | ||
| 2813 | 2753 | ||
| 2814 | /* | 2754 | /* |
| 2815 | * Get the memory for a slab management obj. | 2755 | * Get the memory for a slab management obj. |
| @@ -3098,7 +3038,7 @@ static inline void verify_redzone_free(struct kmem_cache *cache, void *obj) | |||
| 3098 | } | 3038 | } |
| 3099 | 3039 | ||
| 3100 | static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, | 3040 | static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, |
| 3101 | void *caller) | 3041 | unsigned long caller) |
| 3102 | { | 3042 | { |
| 3103 | struct page *page; | 3043 | struct page *page; |
| 3104 | unsigned int objnr; | 3044 | unsigned int objnr; |
| @@ -3118,7 +3058,7 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, | |||
| 3118 | *dbg_redzone2(cachep, objp) = RED_INACTIVE; | 3058 | *dbg_redzone2(cachep, objp) = RED_INACTIVE; |
| 3119 | } | 3059 | } |
| 3120 | if (cachep->flags & SLAB_STORE_USER) | 3060 | if (cachep->flags & SLAB_STORE_USER) |
| 3121 | *dbg_userword(cachep, objp) = caller; | 3061 | *dbg_userword(cachep, objp) = (void *)caller; |
| 3122 | 3062 | ||
| 3123 | objnr = obj_to_index(cachep, slabp, objp); | 3063 | objnr = obj_to_index(cachep, slabp, objp); |
| 3124 | 3064 | ||
| @@ -3131,7 +3071,7 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, | |||
| 3131 | if (cachep->flags & SLAB_POISON) { | 3071 | if (cachep->flags & SLAB_POISON) { |
| 3132 | #ifdef CONFIG_DEBUG_PAGEALLOC | 3072 | #ifdef CONFIG_DEBUG_PAGEALLOC |
| 3133 | if ((cachep->size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) { | 3073 | if ((cachep->size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) { |
| 3134 | store_stackinfo(cachep, objp, (unsigned long)caller); | 3074 | store_stackinfo(cachep, objp, caller); |
| 3135 | kernel_map_pages(virt_to_page(objp), | 3075 | kernel_map_pages(virt_to_page(objp), |
| 3136 | cachep->size / PAGE_SIZE, 0); | 3076 | cachep->size / PAGE_SIZE, 0); |
| 3137 | } else { | 3077 | } else { |
| @@ -3285,7 +3225,7 @@ static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep, | |||
| 3285 | 3225 | ||
| 3286 | #if DEBUG | 3226 | #if DEBUG |
| 3287 | static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, | 3227 | static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, |
| 3288 | gfp_t flags, void *objp, void *caller) | 3228 | gfp_t flags, void *objp, unsigned long caller) |
| 3289 | { | 3229 | { |
| 3290 | if (!objp) | 3230 | if (!objp) |
| 3291 | return objp; | 3231 | return objp; |
| @@ -3302,7 +3242,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, | |||
| 3302 | poison_obj(cachep, objp, POISON_INUSE); | 3242 | poison_obj(cachep, objp, POISON_INUSE); |
| 3303 | } | 3243 | } |
| 3304 | if (cachep->flags & SLAB_STORE_USER) | 3244 | if (cachep->flags & SLAB_STORE_USER) |
| 3305 | *dbg_userword(cachep, objp) = caller; | 3245 | *dbg_userword(cachep, objp) = (void *)caller; |
| 3306 | 3246 | ||
| 3307 | if (cachep->flags & SLAB_RED_ZONE) { | 3247 | if (cachep->flags & SLAB_RED_ZONE) { |
| 3308 | if (*dbg_redzone1(cachep, objp) != RED_INACTIVE || | 3248 | if (*dbg_redzone1(cachep, objp) != RED_INACTIVE || |
| @@ -3343,7 +3283,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, | |||
| 3343 | 3283 | ||
| 3344 | static bool slab_should_failslab(struct kmem_cache *cachep, gfp_t flags) | 3284 | static bool slab_should_failslab(struct kmem_cache *cachep, gfp_t flags) |
| 3345 | { | 3285 | { |
| 3346 | if (cachep == &cache_cache) | 3286 | if (cachep == kmem_cache) |
| 3347 | return false; | 3287 | return false; |
| 3348 | 3288 | ||
| 3349 | return should_failslab(cachep->object_size, flags, cachep->flags); | 3289 | return should_failslab(cachep->object_size, flags, cachep->flags); |
| @@ -3576,8 +3516,8 @@ done: | |||
| 3576 | * Fallback to other node is possible if __GFP_THISNODE is not set. | 3516 | * Fallback to other node is possible if __GFP_THISNODE is not set. |
| 3577 | */ | 3517 | */ |
| 3578 | static __always_inline void * | 3518 | static __always_inline void * |
| 3579 | __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, | 3519 | slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, |
| 3580 | void *caller) | 3520 | unsigned long caller) |
| 3581 | { | 3521 | { |
| 3582 | unsigned long save_flags; | 3522 | unsigned long save_flags; |
| 3583 | void *ptr; | 3523 | void *ptr; |
| @@ -3663,7 +3603,7 @@ __do_cache_alloc(struct kmem_cache *cachep, gfp_t flags) | |||
| 3663 | #endif /* CONFIG_NUMA */ | 3603 | #endif /* CONFIG_NUMA */ |
| 3664 | 3604 | ||
| 3665 | static __always_inline void * | 3605 | static __always_inline void * |
| 3666 | __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller) | 3606 | slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller) |
| 3667 | { | 3607 | { |
| 3668 | unsigned long save_flags; | 3608 | unsigned long save_flags; |
| 3669 | void *objp; | 3609 | void *objp; |
| @@ -3799,7 +3739,7 @@ free_done: | |||
| 3799 | * be in this state _before_ it is released. Called with disabled ints. | 3739 | * be in this state _before_ it is released. Called with disabled ints. |
| 3800 | */ | 3740 | */ |
| 3801 | static inline void __cache_free(struct kmem_cache *cachep, void *objp, | 3741 | static inline void __cache_free(struct kmem_cache *cachep, void *objp, |
| 3802 | void *caller) | 3742 | unsigned long caller) |
| 3803 | { | 3743 | { |
| 3804 | struct array_cache *ac = cpu_cache_get(cachep); | 3744 | struct array_cache *ac = cpu_cache_get(cachep); |
| 3805 | 3745 | ||
| @@ -3839,7 +3779,7 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp, | |||
| 3839 | */ | 3779 | */ |
| 3840 | void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) | 3780 | void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) |
| 3841 | { | 3781 | { |
| 3842 | void *ret = __cache_alloc(cachep, flags, __builtin_return_address(0)); | 3782 | void *ret = slab_alloc(cachep, flags, _RET_IP_); |
| 3843 | 3783 | ||
| 3844 | trace_kmem_cache_alloc(_RET_IP_, ret, | 3784 | trace_kmem_cache_alloc(_RET_IP_, ret, |
| 3845 | cachep->object_size, cachep->size, flags); | 3785 | cachep->object_size, cachep->size, flags); |
| @@ -3850,14 +3790,14 @@ EXPORT_SYMBOL(kmem_cache_alloc); | |||
| 3850 | 3790 | ||
| 3851 | #ifdef CONFIG_TRACING | 3791 | #ifdef CONFIG_TRACING |
| 3852 | void * | 3792 | void * |
| 3853 | kmem_cache_alloc_trace(size_t size, struct kmem_cache *cachep, gfp_t flags) | 3793 | kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size) |
| 3854 | { | 3794 | { |
| 3855 | void *ret; | 3795 | void *ret; |
| 3856 | 3796 | ||
| 3857 | ret = __cache_alloc(cachep, flags, __builtin_return_address(0)); | 3797 | ret = slab_alloc(cachep, flags, _RET_IP_); |
| 3858 | 3798 | ||
| 3859 | trace_kmalloc(_RET_IP_, ret, | 3799 | trace_kmalloc(_RET_IP_, ret, |
| 3860 | size, slab_buffer_size(cachep), flags); | 3800 | size, cachep->size, flags); |
| 3861 | return ret; | 3801 | return ret; |
| 3862 | } | 3802 | } |
| 3863 | EXPORT_SYMBOL(kmem_cache_alloc_trace); | 3803 | EXPORT_SYMBOL(kmem_cache_alloc_trace); |
| @@ -3866,8 +3806,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_trace); | |||
| 3866 | #ifdef CONFIG_NUMA | 3806 | #ifdef CONFIG_NUMA |
| 3867 | void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) | 3807 | void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) |
| 3868 | { | 3808 | { |
| 3869 | void *ret = __cache_alloc_node(cachep, flags, nodeid, | 3809 | void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_); |
| 3870 | __builtin_return_address(0)); | ||
| 3871 | 3810 | ||
| 3872 | trace_kmem_cache_alloc_node(_RET_IP_, ret, | 3811 | trace_kmem_cache_alloc_node(_RET_IP_, ret, |
| 3873 | cachep->object_size, cachep->size, | 3812 | cachep->object_size, cachep->size, |
| @@ -3878,17 +3817,17 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) | |||
| 3878 | EXPORT_SYMBOL(kmem_cache_alloc_node); | 3817 | EXPORT_SYMBOL(kmem_cache_alloc_node); |
| 3879 | 3818 | ||
| 3880 | #ifdef CONFIG_TRACING | 3819 | #ifdef CONFIG_TRACING |
| 3881 | void *kmem_cache_alloc_node_trace(size_t size, | 3820 | void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep, |
| 3882 | struct kmem_cache *cachep, | ||
| 3883 | gfp_t flags, | 3821 | gfp_t flags, |
| 3884 | int nodeid) | 3822 | int nodeid, |
| 3823 | size_t size) | ||
| 3885 | { | 3824 | { |
| 3886 | void *ret; | 3825 | void *ret; |
| 3887 | 3826 | ||
| 3888 | ret = __cache_alloc_node(cachep, flags, nodeid, | 3827 | ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_); |
| 3889 | __builtin_return_address(0)); | 3828 | |
| 3890 | trace_kmalloc_node(_RET_IP_, ret, | 3829 | trace_kmalloc_node(_RET_IP_, ret, |
| 3891 | size, slab_buffer_size(cachep), | 3830 | size, cachep->size, |
| 3892 | flags, nodeid); | 3831 | flags, nodeid); |
| 3893 | return ret; | 3832 | return ret; |
| 3894 | } | 3833 | } |
| @@ -3896,34 +3835,33 @@ EXPORT_SYMBOL(kmem_cache_alloc_node_trace); | |||
| 3896 | #endif | 3835 | #endif |
| 3897 | 3836 | ||
| 3898 | static __always_inline void * | 3837 | static __always_inline void * |
| 3899 | __do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller) | 3838 | __do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller) |
| 3900 | { | 3839 | { |
| 3901 | struct kmem_cache *cachep; | 3840 | struct kmem_cache *cachep; |
| 3902 | 3841 | ||
| 3903 | cachep = kmem_find_general_cachep(size, flags); | 3842 | cachep = kmem_find_general_cachep(size, flags); |
| 3904 | if (unlikely(ZERO_OR_NULL_PTR(cachep))) | 3843 | if (unlikely(ZERO_OR_NULL_PTR(cachep))) |
| 3905 | return cachep; | 3844 | return cachep; |
| 3906 | return kmem_cache_alloc_node_trace(size, cachep, flags, node); | 3845 | return kmem_cache_alloc_node_trace(cachep, flags, node, size); |
| 3907 | } | 3846 | } |
| 3908 | 3847 | ||
| 3909 | #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING) | 3848 | #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING) |
| 3910 | void *__kmalloc_node(size_t size, gfp_t flags, int node) | 3849 | void *__kmalloc_node(size_t size, gfp_t flags, int node) |
| 3911 | { | 3850 | { |
| 3912 | return __do_kmalloc_node(size, flags, node, | 3851 | return __do_kmalloc_node(size, flags, node, _RET_IP_); |
| 3913 | __builtin_return_address(0)); | ||
| 3914 | } | 3852 | } |
| 3915 | EXPORT_SYMBOL(__kmalloc_node); | 3853 | EXPORT_SYMBOL(__kmalloc_node); |
| 3916 | 3854 | ||
| 3917 | void *__kmalloc_node_track_caller(size_t size, gfp_t flags, | 3855 | void *__kmalloc_node_track_caller(size_t size, gfp_t flags, |
| 3918 | int node, unsigned long caller) | 3856 | int node, unsigned long caller) |
| 3919 | { | 3857 | { |
| 3920 | return __do_kmalloc_node(size, flags, node, (void *)caller); | 3858 | return __do_kmalloc_node(size, flags, node, caller); |
| 3921 | } | 3859 | } |
| 3922 | EXPORT_SYMBOL(__kmalloc_node_track_caller); | 3860 | EXPORT_SYMBOL(__kmalloc_node_track_caller); |
| 3923 | #else | 3861 | #else |
| 3924 | void *__kmalloc_node(size_t size, gfp_t flags, int node) | 3862 | void *__kmalloc_node(size_t size, gfp_t flags, int node) |
| 3925 | { | 3863 | { |
| 3926 | return __do_kmalloc_node(size, flags, node, NULL); | 3864 | return __do_kmalloc_node(size, flags, node, 0); |
| 3927 | } | 3865 | } |
| 3928 | EXPORT_SYMBOL(__kmalloc_node); | 3866 | EXPORT_SYMBOL(__kmalloc_node); |
| 3929 | #endif /* CONFIG_DEBUG_SLAB || CONFIG_TRACING */ | 3867 | #endif /* CONFIG_DEBUG_SLAB || CONFIG_TRACING */ |
| @@ -3936,7 +3874,7 @@ EXPORT_SYMBOL(__kmalloc_node); | |||
| 3936 | * @caller: function caller for debug tracking of the caller | 3874 | * @caller: function caller for debug tracking of the caller |
| 3937 | */ | 3875 | */ |
| 3938 | static __always_inline void *__do_kmalloc(size_t size, gfp_t flags, | 3876 | static __always_inline void *__do_kmalloc(size_t size, gfp_t flags, |
| 3939 | void *caller) | 3877 | unsigned long caller) |
| 3940 | { | 3878 | { |
| 3941 | struct kmem_cache *cachep; | 3879 | struct kmem_cache *cachep; |
| 3942 | void *ret; | 3880 | void *ret; |
| @@ -3949,9 +3887,9 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags, | |||
| 3949 | cachep = __find_general_cachep(size, flags); | 3887 | cachep = __find_general_cachep(size, flags); |
| 3950 | if (unlikely(ZERO_OR_NULL_PTR(cachep))) | 3888 | if (unlikely(ZERO_OR_NULL_PTR(cachep))) |
| 3951 | return cachep; | 3889 | return cachep; |
| 3952 | ret = __cache_alloc(cachep, flags, caller); | 3890 | ret = slab_alloc(cachep, flags, caller); |
| 3953 | 3891 | ||
| 3954 | trace_kmalloc((unsigned long) caller, ret, | 3892 | trace_kmalloc(caller, ret, |
| 3955 | size, cachep->size, flags); | 3893 | size, cachep->size, flags); |
| 3956 | 3894 | ||
| 3957 | return ret; | 3895 | return ret; |
| @@ -3961,20 +3899,20 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags, | |||
| 3961 | #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING) | 3899 | #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING) |
| 3962 | void *__kmalloc(size_t size, gfp_t flags) | 3900 | void *__kmalloc(size_t size, gfp_t flags) |
| 3963 | { | 3901 | { |
| 3964 | return __do_kmalloc(size, flags, __builtin_return_address(0)); | 3902 | return __do_kmalloc(size, flags, _RET_IP_); |
| 3965 | } | 3903 | } |
| 3966 | EXPORT_SYMBOL(__kmalloc); | 3904 | EXPORT_SYMBOL(__kmalloc); |
| 3967 | 3905 | ||
| 3968 | void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller) | 3906 | void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller) |
| 3969 | { | 3907 | { |
| 3970 | return __do_kmalloc(size, flags, (void *)caller); | 3908 | return __do_kmalloc(size, flags, caller); |
| 3971 | } | 3909 | } |
| 3972 | EXPORT_SYMBOL(__kmalloc_track_caller); | 3910 | EXPORT_SYMBOL(__kmalloc_track_caller); |
| 3973 | 3911 | ||
| 3974 | #else | 3912 | #else |
| 3975 | void *__kmalloc(size_t size, gfp_t flags) | 3913 | void *__kmalloc(size_t size, gfp_t flags) |
| 3976 | { | 3914 | { |
| 3977 | return __do_kmalloc(size, flags, NULL); | 3915 | return __do_kmalloc(size, flags, 0); |
| 3978 | } | 3916 | } |
| 3979 | EXPORT_SYMBOL(__kmalloc); | 3917 | EXPORT_SYMBOL(__kmalloc); |
| 3980 | #endif | 3918 | #endif |
| @@ -3995,7 +3933,7 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp) | |||
| 3995 | debug_check_no_locks_freed(objp, cachep->object_size); | 3933 | debug_check_no_locks_freed(objp, cachep->object_size); |
| 3996 | if (!(cachep->flags & SLAB_DEBUG_OBJECTS)) | 3934 | if (!(cachep->flags & SLAB_DEBUG_OBJECTS)) |
| 3997 | debug_check_no_obj_freed(objp, cachep->object_size); | 3935 | debug_check_no_obj_freed(objp, cachep->object_size); |
| 3998 | __cache_free(cachep, objp, __builtin_return_address(0)); | 3936 | __cache_free(cachep, objp, _RET_IP_); |
| 3999 | local_irq_restore(flags); | 3937 | local_irq_restore(flags); |
| 4000 | 3938 | ||
| 4001 | trace_kmem_cache_free(_RET_IP_, objp); | 3939 | trace_kmem_cache_free(_RET_IP_, objp); |
| @@ -4026,7 +3964,7 @@ void kfree(const void *objp) | |||
| 4026 | debug_check_no_locks_freed(objp, c->object_size); | 3964 | debug_check_no_locks_freed(objp, c->object_size); |
| 4027 | 3965 | ||
| 4028 | debug_check_no_obj_freed(objp, c->object_size); | 3966 | debug_check_no_obj_freed(objp, c->object_size); |
| 4029 | __cache_free(c, (void *)objp, __builtin_return_address(0)); | 3967 | __cache_free(c, (void *)objp, _RET_IP_); |
| 4030 | local_irq_restore(flags); | 3968 | local_irq_restore(flags); |
| 4031 | } | 3969 | } |
| 4032 | EXPORT_SYMBOL(kfree); | 3970 | EXPORT_SYMBOL(kfree); |
| @@ -25,9 +25,26 @@ extern enum slab_state slab_state; | |||
| 25 | 25 | ||
| 26 | /* The slab cache mutex protects the management structures during changes */ | 26 | /* The slab cache mutex protects the management structures during changes */ |
| 27 | extern struct mutex slab_mutex; | 27 | extern struct mutex slab_mutex; |
| 28 | |||
| 29 | /* The list of all slab caches on the system */ | ||
| 28 | extern struct list_head slab_caches; | 30 | extern struct list_head slab_caches; |
| 29 | 31 | ||
| 30 | struct kmem_cache *__kmem_cache_create(const char *name, size_t size, | 32 | /* The slab cache that manages slab cache information */ |
| 33 | extern struct kmem_cache *kmem_cache; | ||
| 34 | |||
| 35 | /* Functions provided by the slab allocators */ | ||
| 36 | extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags); | ||
| 37 | |||
| 38 | #ifdef CONFIG_SLUB | ||
| 39 | struct kmem_cache *__kmem_cache_alias(const char *name, size_t size, | ||
| 31 | size_t align, unsigned long flags, void (*ctor)(void *)); | 40 | size_t align, unsigned long flags, void (*ctor)(void *)); |
| 41 | #else | ||
| 42 | static inline struct kmem_cache *__kmem_cache_alias(const char *name, size_t size, | ||
| 43 | size_t align, unsigned long flags, void (*ctor)(void *)) | ||
| 44 | { return NULL; } | ||
| 45 | #endif | ||
| 46 | |||
| 47 | |||
| 48 | int __kmem_cache_shutdown(struct kmem_cache *); | ||
| 32 | 49 | ||
| 33 | #endif | 50 | #endif |
diff --git a/mm/slab_common.c b/mm/slab_common.c index aa3ca5bb01b5..9c217255ac49 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c | |||
| @@ -22,6 +22,53 @@ | |||
| 22 | enum slab_state slab_state; | 22 | enum slab_state slab_state; |
| 23 | LIST_HEAD(slab_caches); | 23 | LIST_HEAD(slab_caches); |
| 24 | DEFINE_MUTEX(slab_mutex); | 24 | DEFINE_MUTEX(slab_mutex); |
| 25 | struct kmem_cache *kmem_cache; | ||
| 26 | |||
| 27 | #ifdef CONFIG_DEBUG_VM | ||
| 28 | static int kmem_cache_sanity_check(const char *name, size_t size) | ||
| 29 | { | ||
| 30 | struct kmem_cache *s = NULL; | ||
| 31 | |||
| 32 | if (!name || in_interrupt() || size < sizeof(void *) || | ||
| 33 | size > KMALLOC_MAX_SIZE) { | ||
| 34 | pr_err("kmem_cache_create(%s) integrity check failed\n", name); | ||
| 35 | return -EINVAL; | ||
| 36 | } | ||
| 37 | |||
| 38 | list_for_each_entry(s, &slab_caches, list) { | ||
| 39 | char tmp; | ||
| 40 | int res; | ||
| 41 | |||
| 42 | /* | ||
| 43 | * This happens when the module gets unloaded and doesn't | ||
| 44 | * destroy its slab cache and no-one else reuses the vmalloc | ||
| 45 | * area of the module. Print a warning. | ||
| 46 | */ | ||
| 47 | res = probe_kernel_address(s->name, tmp); | ||
| 48 | if (res) { | ||
| 49 | pr_err("Slab cache with size %d has lost its name\n", | ||
| 50 | s->object_size); | ||
| 51 | continue; | ||
| 52 | } | ||
| 53 | |||
| 54 | if (!strcmp(s->name, name)) { | ||
| 55 | pr_err("%s (%s): Cache name already exists.\n", | ||
| 56 | __func__, name); | ||
| 57 | dump_stack(); | ||
| 58 | s = NULL; | ||
| 59 | return -EINVAL; | ||
| 60 | } | ||
| 61 | } | ||
| 62 | |||
| 63 | WARN_ON(strchr(name, ' ')); /* It confuses parsers */ | ||
| 64 | return 0; | ||
| 65 | } | ||
| 66 | #else | ||
| 67 | static inline int kmem_cache_sanity_check(const char *name, size_t size) | ||
| 68 | { | ||
| 69 | return 0; | ||
| 70 | } | ||
| 71 | #endif | ||
| 25 | 72 | ||
| 26 | /* | 73 | /* |
| 27 | * kmem_cache_create - Create a cache. | 74 | * kmem_cache_create - Create a cache. |
| @@ -52,68 +99,92 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align | |||
| 52 | unsigned long flags, void (*ctor)(void *)) | 99 | unsigned long flags, void (*ctor)(void *)) |
| 53 | { | 100 | { |
| 54 | struct kmem_cache *s = NULL; | 101 | struct kmem_cache *s = NULL; |
| 55 | 102 | int err = 0; | |
| 56 | #ifdef CONFIG_DEBUG_VM | ||
| 57 | if (!name || in_interrupt() || size < sizeof(void *) || | ||
| 58 | size > KMALLOC_MAX_SIZE) { | ||
| 59 | printk(KERN_ERR "kmem_cache_create(%s) integrity check" | ||
| 60 | " failed\n", name); | ||
| 61 | goto out; | ||
| 62 | } | ||
| 63 | #endif | ||
| 64 | 103 | ||
| 65 | get_online_cpus(); | 104 | get_online_cpus(); |
| 66 | mutex_lock(&slab_mutex); | 105 | mutex_lock(&slab_mutex); |
| 67 | 106 | ||
| 68 | #ifdef CONFIG_DEBUG_VM | 107 | if (!kmem_cache_sanity_check(name, size) == 0) |
| 69 | list_for_each_entry(s, &slab_caches, list) { | 108 | goto out_locked; |
| 70 | char tmp; | ||
| 71 | int res; | ||
| 72 | 109 | ||
| 73 | /* | ||
| 74 | * This happens when the module gets unloaded and doesn't | ||
| 75 | * destroy its slab cache and no-one else reuses the vmalloc | ||
| 76 | * area of the module. Print a warning. | ||
| 77 | */ | ||
| 78 | res = probe_kernel_address(s->name, tmp); | ||
| 79 | if (res) { | ||
| 80 | printk(KERN_ERR | ||
| 81 | "Slab cache with size %d has lost its name\n", | ||
| 82 | s->object_size); | ||
| 83 | continue; | ||
| 84 | } | ||
| 85 | 110 | ||
| 86 | if (!strcmp(s->name, name)) { | 111 | s = __kmem_cache_alias(name, size, align, flags, ctor); |
| 87 | printk(KERN_ERR "kmem_cache_create(%s): Cache name" | 112 | if (s) |
| 88 | " already exists.\n", | 113 | goto out_locked; |
| 89 | name); | 114 | |
| 90 | dump_stack(); | 115 | s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL); |
| 91 | s = NULL; | 116 | if (s) { |
| 92 | goto oops; | 117 | s->object_size = s->size = size; |
| 118 | s->align = align; | ||
| 119 | s->ctor = ctor; | ||
| 120 | s->name = kstrdup(name, GFP_KERNEL); | ||
| 121 | if (!s->name) { | ||
| 122 | kmem_cache_free(kmem_cache, s); | ||
| 123 | err = -ENOMEM; | ||
| 124 | goto out_locked; | ||
| 93 | } | 125 | } |
| 94 | } | ||
| 95 | 126 | ||
| 96 | WARN_ON(strchr(name, ' ')); /* It confuses parsers */ | 127 | err = __kmem_cache_create(s, flags); |
| 97 | #endif | 128 | if (!err) { |
| 98 | 129 | ||
| 99 | s = __kmem_cache_create(name, size, align, flags, ctor); | 130 | s->refcount = 1; |
| 131 | list_add(&s->list, &slab_caches); | ||
| 100 | 132 | ||
| 101 | #ifdef CONFIG_DEBUG_VM | 133 | } else { |
| 102 | oops: | 134 | kfree(s->name); |
| 103 | #endif | 135 | kmem_cache_free(kmem_cache, s); |
| 136 | } | ||
| 137 | } else | ||
| 138 | err = -ENOMEM; | ||
| 139 | |||
| 140 | out_locked: | ||
| 104 | mutex_unlock(&slab_mutex); | 141 | mutex_unlock(&slab_mutex); |
| 105 | put_online_cpus(); | 142 | put_online_cpus(); |
| 106 | 143 | ||
| 107 | #ifdef CONFIG_DEBUG_VM | 144 | if (err) { |
| 108 | out: | 145 | |
| 109 | #endif | 146 | if (flags & SLAB_PANIC) |
| 110 | if (!s && (flags & SLAB_PANIC)) | 147 | panic("kmem_cache_create: Failed to create slab '%s'. Error %d\n", |
| 111 | panic("kmem_cache_create: Failed to create slab '%s'\n", name); | 148 | name, err); |
| 149 | else { | ||
| 150 | printk(KERN_WARNING "kmem_cache_create(%s) failed with error %d", | ||
| 151 | name, err); | ||
| 152 | dump_stack(); | ||
| 153 | } | ||
| 154 | |||
| 155 | return NULL; | ||
| 156 | } | ||
| 112 | 157 | ||
| 113 | return s; | 158 | return s; |
| 114 | } | 159 | } |
| 115 | EXPORT_SYMBOL(kmem_cache_create); | 160 | EXPORT_SYMBOL(kmem_cache_create); |
| 116 | 161 | ||
| 162 | void kmem_cache_destroy(struct kmem_cache *s) | ||
| 163 | { | ||
| 164 | get_online_cpus(); | ||
| 165 | mutex_lock(&slab_mutex); | ||
| 166 | s->refcount--; | ||
| 167 | if (!s->refcount) { | ||
| 168 | list_del(&s->list); | ||
| 169 | |||
| 170 | if (!__kmem_cache_shutdown(s)) { | ||
| 171 | if (s->flags & SLAB_DESTROY_BY_RCU) | ||
| 172 | rcu_barrier(); | ||
| 173 | |||
| 174 | kfree(s->name); | ||
| 175 | kmem_cache_free(kmem_cache, s); | ||
| 176 | } else { | ||
| 177 | list_add(&s->list, &slab_caches); | ||
| 178 | printk(KERN_ERR "kmem_cache_destroy %s: Slab cache still has objects\n", | ||
| 179 | s->name); | ||
| 180 | dump_stack(); | ||
| 181 | } | ||
| 182 | } | ||
| 183 | mutex_unlock(&slab_mutex); | ||
| 184 | put_online_cpus(); | ||
| 185 | } | ||
| 186 | EXPORT_SYMBOL(kmem_cache_destroy); | ||
| 187 | |||
| 117 | int slab_is_available(void) | 188 | int slab_is_available(void) |
| 118 | { | 189 | { |
| 119 | return slab_state >= UP; | 190 | return slab_state >= UP; |
| @@ -194,7 +194,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node) | |||
| 194 | void *page; | 194 | void *page; |
| 195 | 195 | ||
| 196 | #ifdef CONFIG_NUMA | 196 | #ifdef CONFIG_NUMA |
| 197 | if (node != -1) | 197 | if (node != NUMA_NO_NODE) |
| 198 | page = alloc_pages_exact_node(node, gfp, order); | 198 | page = alloc_pages_exact_node(node, gfp, order); |
| 199 | else | 199 | else |
| 200 | #endif | 200 | #endif |
| @@ -290,7 +290,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node) | |||
| 290 | * If there's a node specification, search for a partial | 290 | * If there's a node specification, search for a partial |
| 291 | * page with a matching node id in the freelist. | 291 | * page with a matching node id in the freelist. |
| 292 | */ | 292 | */ |
| 293 | if (node != -1 && page_to_nid(sp) != node) | 293 | if (node != NUMA_NO_NODE && page_to_nid(sp) != node) |
| 294 | continue; | 294 | continue; |
| 295 | #endif | 295 | #endif |
| 296 | /* Enough room on this page? */ | 296 | /* Enough room on this page? */ |
| @@ -425,7 +425,8 @@ out: | |||
| 425 | * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend. | 425 | * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend. |
| 426 | */ | 426 | */ |
| 427 | 427 | ||
| 428 | void *__kmalloc_node(size_t size, gfp_t gfp, int node) | 428 | static __always_inline void * |
| 429 | __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller) | ||
| 429 | { | 430 | { |
| 430 | unsigned int *m; | 431 | unsigned int *m; |
| 431 | int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); | 432 | int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); |
| @@ -446,7 +447,7 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node) | |||
| 446 | *m = size; | 447 | *m = size; |
| 447 | ret = (void *)m + align; | 448 | ret = (void *)m + align; |
| 448 | 449 | ||
| 449 | trace_kmalloc_node(_RET_IP_, ret, | 450 | trace_kmalloc_node(caller, ret, |
| 450 | size, size + align, gfp, node); | 451 | size, size + align, gfp, node); |
| 451 | } else { | 452 | } else { |
| 452 | unsigned int order = get_order(size); | 453 | unsigned int order = get_order(size); |
| @@ -460,15 +461,35 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node) | |||
| 460 | page->private = size; | 461 | page->private = size; |
| 461 | } | 462 | } |
| 462 | 463 | ||
| 463 | trace_kmalloc_node(_RET_IP_, ret, | 464 | trace_kmalloc_node(caller, ret, |
| 464 | size, PAGE_SIZE << order, gfp, node); | 465 | size, PAGE_SIZE << order, gfp, node); |
| 465 | } | 466 | } |
| 466 | 467 | ||
| 467 | kmemleak_alloc(ret, size, 1, gfp); | 468 | kmemleak_alloc(ret, size, 1, gfp); |
| 468 | return ret; | 469 | return ret; |
| 469 | } | 470 | } |
| 471 | |||
| 472 | void *__kmalloc_node(size_t size, gfp_t gfp, int node) | ||
| 473 | { | ||
| 474 | return __do_kmalloc_node(size, gfp, node, _RET_IP_); | ||
| 475 | } | ||
| 470 | EXPORT_SYMBOL(__kmalloc_node); | 476 | EXPORT_SYMBOL(__kmalloc_node); |
| 471 | 477 | ||
| 478 | #ifdef CONFIG_TRACING | ||
| 479 | void *__kmalloc_track_caller(size_t size, gfp_t gfp, unsigned long caller) | ||
| 480 | { | ||
| 481 | return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, caller); | ||
| 482 | } | ||
| 483 | |||
| 484 | #ifdef CONFIG_NUMA | ||
| 485 | void *__kmalloc_node_track_caller(size_t size, gfp_t gfp, | ||
| 486 | int node, unsigned long caller) | ||
| 487 | { | ||
| 488 | return __do_kmalloc_node(size, gfp, node, caller); | ||
| 489 | } | ||
| 490 | #endif | ||
| 491 | #endif | ||
| 492 | |||
| 472 | void kfree(const void *block) | 493 | void kfree(const void *block) |
| 473 | { | 494 | { |
| 474 | struct page *sp; | 495 | struct page *sp; |
| @@ -508,44 +529,24 @@ size_t ksize(const void *block) | |||
| 508 | } | 529 | } |
| 509 | EXPORT_SYMBOL(ksize); | 530 | EXPORT_SYMBOL(ksize); |
| 510 | 531 | ||
| 511 | struct kmem_cache *__kmem_cache_create(const char *name, size_t size, | 532 | int __kmem_cache_create(struct kmem_cache *c, unsigned long flags) |
| 512 | size_t align, unsigned long flags, void (*ctor)(void *)) | ||
| 513 | { | 533 | { |
| 514 | struct kmem_cache *c; | 534 | size_t align = c->size; |
| 515 | |||
| 516 | c = slob_alloc(sizeof(struct kmem_cache), | ||
| 517 | GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1); | ||
| 518 | 535 | ||
| 519 | if (c) { | 536 | if (flags & SLAB_DESTROY_BY_RCU) { |
| 520 | c->name = name; | 537 | /* leave room for rcu footer at the end of object */ |
| 521 | c->size = size; | 538 | c->size += sizeof(struct slob_rcu); |
| 522 | if (flags & SLAB_DESTROY_BY_RCU) { | ||
| 523 | /* leave room for rcu footer at the end of object */ | ||
| 524 | c->size += sizeof(struct slob_rcu); | ||
| 525 | } | ||
| 526 | c->flags = flags; | ||
| 527 | c->ctor = ctor; | ||
| 528 | /* ignore alignment unless it's forced */ | ||
| 529 | c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0; | ||
| 530 | if (c->align < ARCH_SLAB_MINALIGN) | ||
| 531 | c->align = ARCH_SLAB_MINALIGN; | ||
| 532 | if (c->align < align) | ||
| 533 | c->align = align; | ||
| 534 | |||
| 535 | kmemleak_alloc(c, sizeof(struct kmem_cache), 1, GFP_KERNEL); | ||
| 536 | c->refcount = 1; | ||
| 537 | } | 539 | } |
| 538 | return c; | 540 | c->flags = flags; |
| 539 | } | 541 | /* ignore alignment unless it's forced */ |
| 542 | c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0; | ||
| 543 | if (c->align < ARCH_SLAB_MINALIGN) | ||
| 544 | c->align = ARCH_SLAB_MINALIGN; | ||
| 545 | if (c->align < align) | ||
| 546 | c->align = align; | ||
| 540 | 547 | ||
| 541 | void kmem_cache_destroy(struct kmem_cache *c) | 548 | return 0; |
| 542 | { | ||
| 543 | kmemleak_free(c); | ||
| 544 | if (c->flags & SLAB_DESTROY_BY_RCU) | ||
| 545 | rcu_barrier(); | ||
| 546 | slob_free(c, sizeof(struct kmem_cache)); | ||
| 547 | } | 549 | } |
| 548 | EXPORT_SYMBOL(kmem_cache_destroy); | ||
| 549 | 550 | ||
| 550 | void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node) | 551 | void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node) |
| 551 | { | 552 | { |
| @@ -613,14 +614,28 @@ unsigned int kmem_cache_size(struct kmem_cache *c) | |||
| 613 | } | 614 | } |
| 614 | EXPORT_SYMBOL(kmem_cache_size); | 615 | EXPORT_SYMBOL(kmem_cache_size); |
| 615 | 616 | ||
| 617 | int __kmem_cache_shutdown(struct kmem_cache *c) | ||
| 618 | { | ||
| 619 | /* No way to check for remaining objects */ | ||
| 620 | return 0; | ||
| 621 | } | ||
| 622 | |||
| 616 | int kmem_cache_shrink(struct kmem_cache *d) | 623 | int kmem_cache_shrink(struct kmem_cache *d) |
| 617 | { | 624 | { |
| 618 | return 0; | 625 | return 0; |
| 619 | } | 626 | } |
| 620 | EXPORT_SYMBOL(kmem_cache_shrink); | 627 | EXPORT_SYMBOL(kmem_cache_shrink); |
| 621 | 628 | ||
| 629 | struct kmem_cache kmem_cache_boot = { | ||
| 630 | .name = "kmem_cache", | ||
| 631 | .size = sizeof(struct kmem_cache), | ||
| 632 | .flags = SLAB_PANIC, | ||
| 633 | .align = ARCH_KMALLOC_MINALIGN, | ||
| 634 | }; | ||
| 635 | |||
| 622 | void __init kmem_cache_init(void) | 636 | void __init kmem_cache_init(void) |
| 623 | { | 637 | { |
| 638 | kmem_cache = &kmem_cache_boot; | ||
| 624 | slab_state = UP; | 639 | slab_state = UP; |
| 625 | } | 640 | } |
| 626 | 641 | ||
| @@ -210,11 +210,7 @@ static void sysfs_slab_remove(struct kmem_cache *); | |||
| 210 | static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; } | 210 | static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; } |
| 211 | static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p) | 211 | static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p) |
| 212 | { return 0; } | 212 | { return 0; } |
| 213 | static inline void sysfs_slab_remove(struct kmem_cache *s) | 213 | static inline void sysfs_slab_remove(struct kmem_cache *s) { } |
| 214 | { | ||
| 215 | kfree(s->name); | ||
| 216 | kfree(s); | ||
| 217 | } | ||
| 218 | 214 | ||
| 219 | #endif | 215 | #endif |
| 220 | 216 | ||
| @@ -568,6 +564,8 @@ static void slab_bug(struct kmem_cache *s, char *fmt, ...) | |||
| 568 | printk(KERN_ERR "BUG %s (%s): %s\n", s->name, print_tainted(), buf); | 564 | printk(KERN_ERR "BUG %s (%s): %s\n", s->name, print_tainted(), buf); |
| 569 | printk(KERN_ERR "----------------------------------------" | 565 | printk(KERN_ERR "----------------------------------------" |
| 570 | "-------------------------------------\n\n"); | 566 | "-------------------------------------\n\n"); |
| 567 | |||
| 568 | add_taint(TAINT_BAD_PAGE); | ||
| 571 | } | 569 | } |
| 572 | 570 | ||
| 573 | static void slab_fix(struct kmem_cache *s, char *fmt, ...) | 571 | static void slab_fix(struct kmem_cache *s, char *fmt, ...) |
| @@ -624,7 +622,7 @@ static void object_err(struct kmem_cache *s, struct page *page, | |||
| 624 | print_trailer(s, page, object); | 622 | print_trailer(s, page, object); |
| 625 | } | 623 | } |
| 626 | 624 | ||
| 627 | static void slab_err(struct kmem_cache *s, struct page *page, char *fmt, ...) | 625 | static void slab_err(struct kmem_cache *s, struct page *page, const char *fmt, ...) |
| 628 | { | 626 | { |
| 629 | va_list args; | 627 | va_list args; |
| 630 | char buf[100]; | 628 | char buf[100]; |
| @@ -1069,13 +1067,13 @@ bad: | |||
| 1069 | return 0; | 1067 | return 0; |
| 1070 | } | 1068 | } |
| 1071 | 1069 | ||
| 1072 | static noinline int free_debug_processing(struct kmem_cache *s, | 1070 | static noinline struct kmem_cache_node *free_debug_processing( |
| 1073 | struct page *page, void *object, unsigned long addr) | 1071 | struct kmem_cache *s, struct page *page, void *object, |
| 1072 | unsigned long addr, unsigned long *flags) | ||
| 1074 | { | 1073 | { |
| 1075 | unsigned long flags; | 1074 | struct kmem_cache_node *n = get_node(s, page_to_nid(page)); |
| 1076 | int rc = 0; | ||
| 1077 | 1075 | ||
| 1078 | local_irq_save(flags); | 1076 | spin_lock_irqsave(&n->list_lock, *flags); |
| 1079 | slab_lock(page); | 1077 | slab_lock(page); |
| 1080 | 1078 | ||
| 1081 | if (!check_slab(s, page)) | 1079 | if (!check_slab(s, page)) |
| @@ -1113,15 +1111,19 @@ static noinline int free_debug_processing(struct kmem_cache *s, | |||
| 1113 | set_track(s, object, TRACK_FREE, addr); | 1111 | set_track(s, object, TRACK_FREE, addr); |
| 1114 | trace(s, page, object, 0); | 1112 | trace(s, page, object, 0); |
| 1115 | init_object(s, object, SLUB_RED_INACTIVE); | 1113 | init_object(s, object, SLUB_RED_INACTIVE); |
| 1116 | rc = 1; | ||
| 1117 | out: | 1114 | out: |
| 1118 | slab_unlock(page); | 1115 | slab_unlock(page); |
| 1119 | local_irq_restore(flags); | 1116 | /* |
| 1120 | return rc; | 1117 | * Keep node_lock to preserve integrity |
| 1118 | * until the object is actually freed | ||
| 1119 | */ | ||
| 1120 | return n; | ||
| 1121 | 1121 | ||
| 1122 | fail: | 1122 | fail: |
| 1123 | slab_unlock(page); | ||
| 1124 | spin_unlock_irqrestore(&n->list_lock, *flags); | ||
| 1123 | slab_fix(s, "Object at 0x%p not freed", object); | 1125 | slab_fix(s, "Object at 0x%p not freed", object); |
| 1124 | goto out; | 1126 | return NULL; |
| 1125 | } | 1127 | } |
| 1126 | 1128 | ||
| 1127 | static int __init setup_slub_debug(char *str) | 1129 | static int __init setup_slub_debug(char *str) |
| @@ -1214,8 +1216,9 @@ static inline void setup_object_debug(struct kmem_cache *s, | |||
| 1214 | static inline int alloc_debug_processing(struct kmem_cache *s, | 1216 | static inline int alloc_debug_processing(struct kmem_cache *s, |
| 1215 | struct page *page, void *object, unsigned long addr) { return 0; } | 1217 | struct page *page, void *object, unsigned long addr) { return 0; } |
| 1216 | 1218 | ||
| 1217 | static inline int free_debug_processing(struct kmem_cache *s, | 1219 | static inline struct kmem_cache_node *free_debug_processing( |
| 1218 | struct page *page, void *object, unsigned long addr) { return 0; } | 1220 | struct kmem_cache *s, struct page *page, void *object, |
| 1221 | unsigned long addr, unsigned long *flags) { return NULL; } | ||
| 1219 | 1222 | ||
| 1220 | static inline int slab_pad_check(struct kmem_cache *s, struct page *page) | 1223 | static inline int slab_pad_check(struct kmem_cache *s, struct page *page) |
| 1221 | { return 1; } | 1224 | { return 1; } |
| @@ -1714,7 +1717,7 @@ static inline void note_cmpxchg_failure(const char *n, | |||
| 1714 | stat(s, CMPXCHG_DOUBLE_CPU_FAIL); | 1717 | stat(s, CMPXCHG_DOUBLE_CPU_FAIL); |
| 1715 | } | 1718 | } |
| 1716 | 1719 | ||
| 1717 | void init_kmem_cache_cpus(struct kmem_cache *s) | 1720 | static void init_kmem_cache_cpus(struct kmem_cache *s) |
| 1718 | { | 1721 | { |
| 1719 | int cpu; | 1722 | int cpu; |
| 1720 | 1723 | ||
| @@ -1939,7 +1942,7 @@ static void unfreeze_partials(struct kmem_cache *s) | |||
| 1939 | * If we did not find a slot then simply move all the partials to the | 1942 | * If we did not find a slot then simply move all the partials to the |
| 1940 | * per node partial list. | 1943 | * per node partial list. |
| 1941 | */ | 1944 | */ |
| 1942 | int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) | 1945 | static int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) |
| 1943 | { | 1946 | { |
| 1944 | struct page *oldpage; | 1947 | struct page *oldpage; |
| 1945 | int pages; | 1948 | int pages; |
| @@ -1962,6 +1965,7 @@ int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) | |||
| 1962 | local_irq_save(flags); | 1965 | local_irq_save(flags); |
| 1963 | unfreeze_partials(s); | 1966 | unfreeze_partials(s); |
| 1964 | local_irq_restore(flags); | 1967 | local_irq_restore(flags); |
| 1968 | oldpage = NULL; | ||
| 1965 | pobjects = 0; | 1969 | pobjects = 0; |
| 1966 | pages = 0; | 1970 | pages = 0; |
| 1967 | stat(s, CPU_PARTIAL_DRAIN); | 1971 | stat(s, CPU_PARTIAL_DRAIN); |
| @@ -2310,7 +2314,7 @@ new_slab: | |||
| 2310 | * | 2314 | * |
| 2311 | * Otherwise we can simply pick the next object from the lockless free list. | 2315 | * Otherwise we can simply pick the next object from the lockless free list. |
| 2312 | */ | 2316 | */ |
| 2313 | static __always_inline void *slab_alloc(struct kmem_cache *s, | 2317 | static __always_inline void *slab_alloc_node(struct kmem_cache *s, |
| 2314 | gfp_t gfpflags, int node, unsigned long addr) | 2318 | gfp_t gfpflags, int node, unsigned long addr) |
| 2315 | { | 2319 | { |
| 2316 | void **object; | 2320 | void **object; |
| @@ -2380,9 +2384,15 @@ redo: | |||
| 2380 | return object; | 2384 | return object; |
| 2381 | } | 2385 | } |
| 2382 | 2386 | ||
| 2387 | static __always_inline void *slab_alloc(struct kmem_cache *s, | ||
| 2388 | gfp_t gfpflags, unsigned long addr) | ||
| 2389 | { | ||
| 2390 | return slab_alloc_node(s, gfpflags, NUMA_NO_NODE, addr); | ||
| 2391 | } | ||
| 2392 | |||
| 2383 | void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) | 2393 | void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) |
| 2384 | { | 2394 | { |
| 2385 | void *ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_); | 2395 | void *ret = slab_alloc(s, gfpflags, _RET_IP_); |
| 2386 | 2396 | ||
| 2387 | trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size, s->size, gfpflags); | 2397 | trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size, s->size, gfpflags); |
| 2388 | 2398 | ||
| @@ -2393,7 +2403,7 @@ EXPORT_SYMBOL(kmem_cache_alloc); | |||
| 2393 | #ifdef CONFIG_TRACING | 2403 | #ifdef CONFIG_TRACING |
| 2394 | void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size) | 2404 | void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size) |
| 2395 | { | 2405 | { |
| 2396 | void *ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_); | 2406 | void *ret = slab_alloc(s, gfpflags, _RET_IP_); |
| 2397 | trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags); | 2407 | trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags); |
| 2398 | return ret; | 2408 | return ret; |
| 2399 | } | 2409 | } |
| @@ -2411,7 +2421,7 @@ EXPORT_SYMBOL(kmalloc_order_trace); | |||
| 2411 | #ifdef CONFIG_NUMA | 2421 | #ifdef CONFIG_NUMA |
| 2412 | void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) | 2422 | void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) |
| 2413 | { | 2423 | { |
| 2414 | void *ret = slab_alloc(s, gfpflags, node, _RET_IP_); | 2424 | void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_); |
| 2415 | 2425 | ||
| 2416 | trace_kmem_cache_alloc_node(_RET_IP_, ret, | 2426 | trace_kmem_cache_alloc_node(_RET_IP_, ret, |
| 2417 | s->object_size, s->size, gfpflags, node); | 2427 | s->object_size, s->size, gfpflags, node); |
| @@ -2425,7 +2435,7 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *s, | |||
| 2425 | gfp_t gfpflags, | 2435 | gfp_t gfpflags, |
| 2426 | int node, size_t size) | 2436 | int node, size_t size) |
| 2427 | { | 2437 | { |
| 2428 | void *ret = slab_alloc(s, gfpflags, node, _RET_IP_); | 2438 | void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_); |
| 2429 | 2439 | ||
| 2430 | trace_kmalloc_node(_RET_IP_, ret, | 2440 | trace_kmalloc_node(_RET_IP_, ret, |
| 2431 | size, s->size, gfpflags, node); | 2441 | size, s->size, gfpflags, node); |
| @@ -2457,7 +2467,8 @@ static void __slab_free(struct kmem_cache *s, struct page *page, | |||
| 2457 | 2467 | ||
| 2458 | stat(s, FREE_SLOWPATH); | 2468 | stat(s, FREE_SLOWPATH); |
| 2459 | 2469 | ||
| 2460 | if (kmem_cache_debug(s) && !free_debug_processing(s, page, x, addr)) | 2470 | if (kmem_cache_debug(s) && |
| 2471 | !(n = free_debug_processing(s, page, x, addr, &flags))) | ||
| 2461 | return; | 2472 | return; |
| 2462 | 2473 | ||
| 2463 | do { | 2474 | do { |
| @@ -2612,6 +2623,13 @@ void kmem_cache_free(struct kmem_cache *s, void *x) | |||
| 2612 | 2623 | ||
| 2613 | page = virt_to_head_page(x); | 2624 | page = virt_to_head_page(x); |
| 2614 | 2625 | ||
| 2626 | if (kmem_cache_debug(s) && page->slab != s) { | ||
| 2627 | pr_err("kmem_cache_free: Wrong slab cache. %s but object" | ||
| 2628 | " is from %s\n", page->slab->name, s->name); | ||
| 2629 | WARN_ON_ONCE(1); | ||
| 2630 | return; | ||
| 2631 | } | ||
| 2632 | |||
| 2615 | slab_free(s, page, x, _RET_IP_); | 2633 | slab_free(s, page, x, _RET_IP_); |
| 2616 | 2634 | ||
| 2617 | trace_kmem_cache_free(_RET_IP_, x); | 2635 | trace_kmem_cache_free(_RET_IP_, x); |
| @@ -3026,17 +3044,9 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order) | |||
| 3026 | 3044 | ||
| 3027 | } | 3045 | } |
| 3028 | 3046 | ||
| 3029 | static int kmem_cache_open(struct kmem_cache *s, | 3047 | static int kmem_cache_open(struct kmem_cache *s, unsigned long flags) |
| 3030 | const char *name, size_t size, | ||
| 3031 | size_t align, unsigned long flags, | ||
| 3032 | void (*ctor)(void *)) | ||
| 3033 | { | 3048 | { |
| 3034 | memset(s, 0, kmem_size); | 3049 | s->flags = kmem_cache_flags(s->size, flags, s->name, s->ctor); |
| 3035 | s->name = name; | ||
| 3036 | s->ctor = ctor; | ||
| 3037 | s->object_size = size; | ||
| 3038 | s->align = align; | ||
| 3039 | s->flags = kmem_cache_flags(size, flags, name, ctor); | ||
| 3040 | s->reserved = 0; | 3050 | s->reserved = 0; |
| 3041 | 3051 | ||
| 3042 | if (need_reserve_slab_rcu && (s->flags & SLAB_DESTROY_BY_RCU)) | 3052 | if (need_reserve_slab_rcu && (s->flags & SLAB_DESTROY_BY_RCU)) |
| @@ -3098,7 +3108,6 @@ static int kmem_cache_open(struct kmem_cache *s, | |||
| 3098 | else | 3108 | else |
| 3099 | s->cpu_partial = 30; | 3109 | s->cpu_partial = 30; |
| 3100 | 3110 | ||
| 3101 | s->refcount = 1; | ||
| 3102 | #ifdef CONFIG_NUMA | 3111 | #ifdef CONFIG_NUMA |
| 3103 | s->remote_node_defrag_ratio = 1000; | 3112 | s->remote_node_defrag_ratio = 1000; |
| 3104 | #endif | 3113 | #endif |
| @@ -3106,16 +3115,16 @@ static int kmem_cache_open(struct kmem_cache *s, | |||
| 3106 | goto error; | 3115 | goto error; |
| 3107 | 3116 | ||
| 3108 | if (alloc_kmem_cache_cpus(s)) | 3117 | if (alloc_kmem_cache_cpus(s)) |
| 3109 | return 1; | 3118 | return 0; |
| 3110 | 3119 | ||
| 3111 | free_kmem_cache_nodes(s); | 3120 | free_kmem_cache_nodes(s); |
| 3112 | error: | 3121 | error: |
| 3113 | if (flags & SLAB_PANIC) | 3122 | if (flags & SLAB_PANIC) |
| 3114 | panic("Cannot create slab %s size=%lu realsize=%u " | 3123 | panic("Cannot create slab %s size=%lu realsize=%u " |
| 3115 | "order=%u offset=%u flags=%lx\n", | 3124 | "order=%u offset=%u flags=%lx\n", |
| 3116 | s->name, (unsigned long)size, s->size, oo_order(s->oo), | 3125 | s->name, (unsigned long)s->size, s->size, oo_order(s->oo), |
| 3117 | s->offset, flags); | 3126 | s->offset, flags); |
| 3118 | return 0; | 3127 | return -EINVAL; |
| 3119 | } | 3128 | } |
| 3120 | 3129 | ||
| 3121 | /* | 3130 | /* |
| @@ -3137,7 +3146,7 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page, | |||
| 3137 | sizeof(long), GFP_ATOMIC); | 3146 | sizeof(long), GFP_ATOMIC); |
| 3138 | if (!map) | 3147 | if (!map) |
| 3139 | return; | 3148 | return; |
| 3140 | slab_err(s, page, "%s", text); | 3149 | slab_err(s, page, text, s->name); |
| 3141 | slab_lock(page); | 3150 | slab_lock(page); |
| 3142 | 3151 | ||
| 3143 | get_map(s, page, map); | 3152 | get_map(s, page, map); |
| @@ -3169,7 +3178,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) | |||
| 3169 | discard_slab(s, page); | 3178 | discard_slab(s, page); |
| 3170 | } else { | 3179 | } else { |
| 3171 | list_slab_objects(s, page, | 3180 | list_slab_objects(s, page, |
| 3172 | "Objects remaining on kmem_cache_close()"); | 3181 | "Objects remaining in %s on kmem_cache_close()"); |
| 3173 | } | 3182 | } |
| 3174 | } | 3183 | } |
| 3175 | } | 3184 | } |
| @@ -3182,7 +3191,6 @@ static inline int kmem_cache_close(struct kmem_cache *s) | |||
| 3182 | int node; | 3191 | int node; |
| 3183 | 3192 | ||
| 3184 | flush_all(s); | 3193 | flush_all(s); |
| 3185 | free_percpu(s->cpu_slab); | ||
| 3186 | /* Attempt to free all objects */ | 3194 | /* Attempt to free all objects */ |
| 3187 | for_each_node_state(node, N_NORMAL_MEMORY) { | 3195 | for_each_node_state(node, N_NORMAL_MEMORY) { |
| 3188 | struct kmem_cache_node *n = get_node(s, node); | 3196 | struct kmem_cache_node *n = get_node(s, node); |
| @@ -3191,33 +3199,20 @@ static inline int kmem_cache_close(struct kmem_cache *s) | |||
| 3191 | if (n->nr_partial || slabs_node(s, node)) | 3199 | if (n->nr_partial || slabs_node(s, node)) |
| 3192 | return 1; | 3200 | return 1; |
| 3193 | } | 3201 | } |
| 3202 | free_percpu(s->cpu_slab); | ||
| 3194 | free_kmem_cache_nodes(s); | 3203 | free_kmem_cache_nodes(s); |
| 3195 | return 0; | 3204 | return 0; |
| 3196 | } | 3205 | } |
| 3197 | 3206 | ||
| 3198 | /* | 3207 | int __kmem_cache_shutdown(struct kmem_cache *s) |
| 3199 | * Close a cache and release the kmem_cache structure | ||
| 3200 | * (must be used for caches created using kmem_cache_create) | ||
| 3201 | */ | ||
| 3202 | void kmem_cache_destroy(struct kmem_cache *s) | ||
| 3203 | { | 3208 | { |
| 3204 | mutex_lock(&slab_mutex); | 3209 | int rc = kmem_cache_close(s); |
| 3205 | s->refcount--; | 3210 | |
| 3206 | if (!s->refcount) { | 3211 | if (!rc) |
| 3207 | list_del(&s->list); | ||
| 3208 | mutex_unlock(&slab_mutex); | ||
| 3209 | if (kmem_cache_close(s)) { | ||
| 3210 | printk(KERN_ERR "SLUB %s: %s called for cache that " | ||
| 3211 | "still has objects.\n", s->name, __func__); | ||
| 3212 | dump_stack(); | ||
| 3213 | } | ||
| 3214 | if (s->flags & SLAB_DESTROY_BY_RCU) | ||
| 3215 | rcu_barrier(); | ||
| 3216 | sysfs_slab_remove(s); | 3212 | sysfs_slab_remove(s); |
| 3217 | } else | 3213 | |
| 3218 | mutex_unlock(&slab_mutex); | 3214 | return rc; |
| 3219 | } | 3215 | } |
| 3220 | EXPORT_SYMBOL(kmem_cache_destroy); | ||
| 3221 | 3216 | ||
| 3222 | /******************************************************************** | 3217 | /******************************************************************** |
| 3223 | * Kmalloc subsystem | 3218 | * Kmalloc subsystem |
| @@ -3226,8 +3221,6 @@ EXPORT_SYMBOL(kmem_cache_destroy); | |||
| 3226 | struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT]; | 3221 | struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT]; |
| 3227 | EXPORT_SYMBOL(kmalloc_caches); | 3222 | EXPORT_SYMBOL(kmalloc_caches); |
| 3228 | 3223 | ||
| 3229 | static struct kmem_cache *kmem_cache; | ||
| 3230 | |||
| 3231 | #ifdef CONFIG_ZONE_DMA | 3224 | #ifdef CONFIG_ZONE_DMA |
| 3232 | static struct kmem_cache *kmalloc_dma_caches[SLUB_PAGE_SHIFT]; | 3225 | static struct kmem_cache *kmalloc_dma_caches[SLUB_PAGE_SHIFT]; |
| 3233 | #endif | 3226 | #endif |
| @@ -3273,14 +3266,17 @@ static struct kmem_cache *__init create_kmalloc_cache(const char *name, | |||
| 3273 | { | 3266 | { |
| 3274 | struct kmem_cache *s; | 3267 | struct kmem_cache *s; |
| 3275 | 3268 | ||
| 3276 | s = kmem_cache_alloc(kmem_cache, GFP_NOWAIT); | 3269 | s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); |
| 3270 | |||
| 3271 | s->name = name; | ||
| 3272 | s->size = s->object_size = size; | ||
| 3273 | s->align = ARCH_KMALLOC_MINALIGN; | ||
| 3277 | 3274 | ||
| 3278 | /* | 3275 | /* |
| 3279 | * This function is called with IRQs disabled during early-boot on | 3276 | * This function is called with IRQs disabled during early-boot on |
| 3280 | * single CPU so there's no need to take slab_mutex here. | 3277 | * single CPU so there's no need to take slab_mutex here. |
| 3281 | */ | 3278 | */ |
| 3282 | if (!kmem_cache_open(s, name, size, ARCH_KMALLOC_MINALIGN, | 3279 | if (kmem_cache_open(s, flags)) |
| 3283 | flags, NULL)) | ||
| 3284 | goto panic; | 3280 | goto panic; |
| 3285 | 3281 | ||
| 3286 | list_add(&s->list, &slab_caches); | 3282 | list_add(&s->list, &slab_caches); |
| @@ -3362,7 +3358,7 @@ void *__kmalloc(size_t size, gfp_t flags) | |||
| 3362 | if (unlikely(ZERO_OR_NULL_PTR(s))) | 3358 | if (unlikely(ZERO_OR_NULL_PTR(s))) |
| 3363 | return s; | 3359 | return s; |
| 3364 | 3360 | ||
| 3365 | ret = slab_alloc(s, flags, NUMA_NO_NODE, _RET_IP_); | 3361 | ret = slab_alloc(s, flags, _RET_IP_); |
| 3366 | 3362 | ||
| 3367 | trace_kmalloc(_RET_IP_, ret, size, s->size, flags); | 3363 | trace_kmalloc(_RET_IP_, ret, size, s->size, flags); |
| 3368 | 3364 | ||
| @@ -3405,7 +3401,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) | |||
| 3405 | if (unlikely(ZERO_OR_NULL_PTR(s))) | 3401 | if (unlikely(ZERO_OR_NULL_PTR(s))) |
| 3406 | return s; | 3402 | return s; |
| 3407 | 3403 | ||
| 3408 | ret = slab_alloc(s, flags, node, _RET_IP_); | 3404 | ret = slab_alloc_node(s, flags, node, _RET_IP_); |
| 3409 | 3405 | ||
| 3410 | trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node); | 3406 | trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node); |
| 3411 | 3407 | ||
| @@ -3482,7 +3478,7 @@ void kfree(const void *x) | |||
| 3482 | if (unlikely(!PageSlab(page))) { | 3478 | if (unlikely(!PageSlab(page))) { |
| 3483 | BUG_ON(!PageCompound(page)); | 3479 | BUG_ON(!PageCompound(page)); |
| 3484 | kmemleak_free(x); | 3480 | kmemleak_free(x); |
| 3485 | put_page(page); | 3481 | __free_pages(page, compound_order(page)); |
| 3486 | return; | 3482 | return; |
| 3487 | } | 3483 | } |
| 3488 | slab_free(page->slab, page, object, _RET_IP_); | 3484 | slab_free(page->slab, page, object, _RET_IP_); |
| @@ -3719,12 +3715,12 @@ void __init kmem_cache_init(void) | |||
| 3719 | slub_max_order = 0; | 3715 | slub_max_order = 0; |
| 3720 | 3716 | ||
| 3721 | kmem_size = offsetof(struct kmem_cache, node) + | 3717 | kmem_size = offsetof(struct kmem_cache, node) + |
| 3722 | nr_node_ids * sizeof(struct kmem_cache_node *); | 3718 | nr_node_ids * sizeof(struct kmem_cache_node *); |
| 3723 | 3719 | ||
| 3724 | /* Allocate two kmem_caches from the page allocator */ | 3720 | /* Allocate two kmem_caches from the page allocator */ |
| 3725 | kmalloc_size = ALIGN(kmem_size, cache_line_size()); | 3721 | kmalloc_size = ALIGN(kmem_size, cache_line_size()); |
| 3726 | order = get_order(2 * kmalloc_size); | 3722 | order = get_order(2 * kmalloc_size); |
| 3727 | kmem_cache = (void *)__get_free_pages(GFP_NOWAIT, order); | 3723 | kmem_cache = (void *)__get_free_pages(GFP_NOWAIT | __GFP_ZERO, order); |
| 3728 | 3724 | ||
| 3729 | /* | 3725 | /* |
| 3730 | * Must first have the slab cache available for the allocations of the | 3726 | * Must first have the slab cache available for the allocations of the |
| @@ -3733,9 +3729,10 @@ void __init kmem_cache_init(void) | |||
| 3733 | */ | 3729 | */ |
| 3734 | kmem_cache_node = (void *)kmem_cache + kmalloc_size; | 3730 | kmem_cache_node = (void *)kmem_cache + kmalloc_size; |
| 3735 | 3731 | ||
| 3736 | kmem_cache_open(kmem_cache_node, "kmem_cache_node", | 3732 | kmem_cache_node->name = "kmem_cache_node"; |
| 3737 | sizeof(struct kmem_cache_node), | 3733 | kmem_cache_node->size = kmem_cache_node->object_size = |
| 3738 | 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); | 3734 | sizeof(struct kmem_cache_node); |
| 3735 | kmem_cache_open(kmem_cache_node, SLAB_HWCACHE_ALIGN | SLAB_PANIC); | ||
| 3739 | 3736 | ||
| 3740 | hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI); | 3737 | hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI); |
| 3741 | 3738 | ||
| @@ -3743,8 +3740,10 @@ void __init kmem_cache_init(void) | |||
| 3743 | slab_state = PARTIAL; | 3740 | slab_state = PARTIAL; |
| 3744 | 3741 | ||
| 3745 | temp_kmem_cache = kmem_cache; | 3742 | temp_kmem_cache = kmem_cache; |
| 3746 | kmem_cache_open(kmem_cache, "kmem_cache", kmem_size, | 3743 | kmem_cache->name = "kmem_cache"; |
| 3747 | 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); | 3744 | kmem_cache->size = kmem_cache->object_size = kmem_size; |
| 3745 | kmem_cache_open(kmem_cache, SLAB_HWCACHE_ALIGN | SLAB_PANIC); | ||
| 3746 | |||
| 3748 | kmem_cache = kmem_cache_alloc(kmem_cache, GFP_NOWAIT); | 3747 | kmem_cache = kmem_cache_alloc(kmem_cache, GFP_NOWAIT); |
| 3749 | memcpy(kmem_cache, temp_kmem_cache, kmem_size); | 3748 | memcpy(kmem_cache, temp_kmem_cache, kmem_size); |
| 3750 | 3749 | ||
| @@ -3933,11 +3932,10 @@ static struct kmem_cache *find_mergeable(size_t size, | |||
| 3933 | return NULL; | 3932 | return NULL; |
| 3934 | } | 3933 | } |
| 3935 | 3934 | ||
| 3936 | struct kmem_cache *__kmem_cache_create(const char *name, size_t size, | 3935 | struct kmem_cache *__kmem_cache_alias(const char *name, size_t size, |
| 3937 | size_t align, unsigned long flags, void (*ctor)(void *)) | 3936 | size_t align, unsigned long flags, void (*ctor)(void *)) |
| 3938 | { | 3937 | { |
| 3939 | struct kmem_cache *s; | 3938 | struct kmem_cache *s; |
| 3940 | char *n; | ||
| 3941 | 3939 | ||
| 3942 | s = find_mergeable(size, align, flags, name, ctor); | 3940 | s = find_mergeable(size, align, flags, name, ctor); |
| 3943 | if (s) { | 3941 | if (s) { |
| @@ -3951,36 +3949,29 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size, | |||
| 3951 | 3949 | ||
| 3952 | if (sysfs_slab_alias(s, name)) { | 3950 | if (sysfs_slab_alias(s, name)) { |
| 3953 | s->refcount--; | 3951 | s->refcount--; |
| 3954 | return NULL; | 3952 | s = NULL; |
| 3955 | } | 3953 | } |
| 3956 | return s; | ||
| 3957 | } | 3954 | } |
| 3958 | 3955 | ||
| 3959 | n = kstrdup(name, GFP_KERNEL); | 3956 | return s; |
| 3960 | if (!n) | 3957 | } |
| 3961 | return NULL; | ||
| 3962 | 3958 | ||
| 3963 | s = kmalloc(kmem_size, GFP_KERNEL); | 3959 | int __kmem_cache_create(struct kmem_cache *s, unsigned long flags) |
| 3964 | if (s) { | 3960 | { |
| 3965 | if (kmem_cache_open(s, n, | 3961 | int err; |
| 3966 | size, align, flags, ctor)) { | ||
| 3967 | int r; | ||
| 3968 | 3962 | ||
| 3969 | list_add(&s->list, &slab_caches); | 3963 | err = kmem_cache_open(s, flags); |
| 3970 | mutex_unlock(&slab_mutex); | 3964 | if (err) |
| 3971 | r = sysfs_slab_add(s); | 3965 | return err; |
| 3972 | mutex_lock(&slab_mutex); | ||
| 3973 | 3966 | ||
| 3974 | if (!r) | 3967 | mutex_unlock(&slab_mutex); |
| 3975 | return s; | 3968 | err = sysfs_slab_add(s); |
| 3969 | mutex_lock(&slab_mutex); | ||
| 3976 | 3970 | ||
| 3977 | list_del(&s->list); | 3971 | if (err) |
| 3978 | kmem_cache_close(s); | 3972 | kmem_cache_close(s); |
| 3979 | } | 3973 | |
| 3980 | kfree(s); | 3974 | return err; |
| 3981 | } | ||
| 3982 | kfree(n); | ||
| 3983 | return NULL; | ||
| 3984 | } | 3975 | } |
| 3985 | 3976 | ||
| 3986 | #ifdef CONFIG_SMP | 3977 | #ifdef CONFIG_SMP |
| @@ -4033,7 +4024,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) | |||
| 4033 | if (unlikely(ZERO_OR_NULL_PTR(s))) | 4024 | if (unlikely(ZERO_OR_NULL_PTR(s))) |
| 4034 | return s; | 4025 | return s; |
| 4035 | 4026 | ||
| 4036 | ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, caller); | 4027 | ret = slab_alloc(s, gfpflags, caller); |
| 4037 | 4028 | ||
| 4038 | /* Honor the call site pointer we received. */ | 4029 | /* Honor the call site pointer we received. */ |
| 4039 | trace_kmalloc(caller, ret, size, s->size, gfpflags); | 4030 | trace_kmalloc(caller, ret, size, s->size, gfpflags); |
| @@ -4063,7 +4054,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, | |||
| 4063 | if (unlikely(ZERO_OR_NULL_PTR(s))) | 4054 | if (unlikely(ZERO_OR_NULL_PTR(s))) |
| 4064 | return s; | 4055 | return s; |
| 4065 | 4056 | ||
| 4066 | ret = slab_alloc(s, gfpflags, node, caller); | 4057 | ret = slab_alloc_node(s, gfpflags, node, caller); |
| 4067 | 4058 | ||
| 4068 | /* Honor the call site pointer we received. */ | 4059 | /* Honor the call site pointer we received. */ |
| 4069 | trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node); | 4060 | trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node); |
| @@ -5210,14 +5201,6 @@ static ssize_t slab_attr_store(struct kobject *kobj, | |||
| 5210 | return err; | 5201 | return err; |
| 5211 | } | 5202 | } |
| 5212 | 5203 | ||
| 5213 | static void kmem_cache_release(struct kobject *kobj) | ||
| 5214 | { | ||
| 5215 | struct kmem_cache *s = to_slab(kobj); | ||
| 5216 | |||
| 5217 | kfree(s->name); | ||
| 5218 | kfree(s); | ||
| 5219 | } | ||
| 5220 | |||
| 5221 | static const struct sysfs_ops slab_sysfs_ops = { | 5204 | static const struct sysfs_ops slab_sysfs_ops = { |
| 5222 | .show = slab_attr_show, | 5205 | .show = slab_attr_show, |
| 5223 | .store = slab_attr_store, | 5206 | .store = slab_attr_store, |
| @@ -5225,7 +5208,6 @@ static const struct sysfs_ops slab_sysfs_ops = { | |||
| 5225 | 5208 | ||
| 5226 | static struct kobj_type slab_ktype = { | 5209 | static struct kobj_type slab_ktype = { |
| 5227 | .sysfs_ops = &slab_sysfs_ops, | 5210 | .sysfs_ops = &slab_sysfs_ops, |
| 5228 | .release = kmem_cache_release | ||
| 5229 | }; | 5211 | }; |
| 5230 | 5212 | ||
| 5231 | static int uevent_filter(struct kset *kset, struct kobject *kobj) | 5213 | static int uevent_filter(struct kset *kset, struct kobject *kobj) |
| @@ -105,6 +105,25 @@ void *memdup_user(const void __user *src, size_t len) | |||
| 105 | } | 105 | } |
| 106 | EXPORT_SYMBOL(memdup_user); | 106 | EXPORT_SYMBOL(memdup_user); |
| 107 | 107 | ||
| 108 | static __always_inline void *__do_krealloc(const void *p, size_t new_size, | ||
| 109 | gfp_t flags) | ||
| 110 | { | ||
| 111 | void *ret; | ||
| 112 | size_t ks = 0; | ||
| 113 | |||
| 114 | if (p) | ||
| 115 | ks = ksize(p); | ||
| 116 | |||
| 117 | if (ks >= new_size) | ||
| 118 | return (void *)p; | ||
| 119 | |||
| 120 | ret = kmalloc_track_caller(new_size, flags); | ||
| 121 | if (ret && p) | ||
| 122 | memcpy(ret, p, ks); | ||
| 123 | |||
| 124 | return ret; | ||
| 125 | } | ||
| 126 | |||
| 108 | /** | 127 | /** |
| 109 | * __krealloc - like krealloc() but don't free @p. | 128 | * __krealloc - like krealloc() but don't free @p. |
| 110 | * @p: object to reallocate memory for. | 129 | * @p: object to reallocate memory for. |
| @@ -117,23 +136,11 @@ EXPORT_SYMBOL(memdup_user); | |||
| 117 | */ | 136 | */ |
| 118 | void *__krealloc(const void *p, size_t new_size, gfp_t flags) | 137 | void *__krealloc(const void *p, size_t new_size, gfp_t flags) |
| 119 | { | 138 | { |
| 120 | void *ret; | ||
| 121 | size_t ks = 0; | ||
| 122 | |||
| 123 | if (unlikely(!new_size)) | 139 | if (unlikely(!new_size)) |
| 124 | return ZERO_SIZE_PTR; | 140 | return ZERO_SIZE_PTR; |
| 125 | 141 | ||
| 126 | if (p) | 142 | return __do_krealloc(p, new_size, flags); |
| 127 | ks = ksize(p); | ||
| 128 | 143 | ||
| 129 | if (ks >= new_size) | ||
| 130 | return (void *)p; | ||
| 131 | |||
| 132 | ret = kmalloc_track_caller(new_size, flags); | ||
| 133 | if (ret && p) | ||
| 134 | memcpy(ret, p, ks); | ||
| 135 | |||
| 136 | return ret; | ||
| 137 | } | 144 | } |
| 138 | EXPORT_SYMBOL(__krealloc); | 145 | EXPORT_SYMBOL(__krealloc); |
| 139 | 146 | ||
| @@ -157,7 +164,7 @@ void *krealloc(const void *p, size_t new_size, gfp_t flags) | |||
| 157 | return ZERO_SIZE_PTR; | 164 | return ZERO_SIZE_PTR; |
| 158 | } | 165 | } |
| 159 | 166 | ||
| 160 | ret = __krealloc(p, new_size, flags); | 167 | ret = __do_krealloc(p, new_size, flags); |
| 161 | if (ret && p != ret) | 168 | if (ret && p != ret) |
| 162 | kfree(p); | 169 | kfree(p); |
| 163 | 170 | ||
