diff options
| author | Pekka Enberg <penberg@kernel.org> | 2012-10-03 02:56:12 -0400 |
|---|---|---|
| committer | Pekka Enberg <penberg@kernel.org> | 2012-10-03 02:56:12 -0400 |
| commit | 023dc70470502f41b285112d4840f35d9075b767 (patch) | |
| tree | f2f06d54be9583d9b1b2abae4c76722c5453df83 | |
| parent | a0d271cbfed1dd50278c6b06bead3d00ba0a88f9 (diff) | |
| parent | 608da7e3fc7259eca0d983b31bc8915af14cf15e (diff) | |
Merge branch 'slab/next' into slab/for-linus
| -rw-r--r-- | include/linux/slab.h | 6 | ||||
| -rw-r--r-- | include/linux/slab_def.h | 13 | ||||
| -rw-r--r-- | include/linux/slob_def.h | 6 | ||||
| -rw-r--r-- | mm/slab.c | 95 | ||||
| -rw-r--r-- | mm/slab_common.c | 97 | ||||
| -rw-r--r-- | mm/slob.c | 33 | ||||
| -rw-r--r-- | mm/slub.c | 63 | ||||
| -rw-r--r-- | mm/util.c | 35 |
8 files changed, 186 insertions, 162 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h index 0dd2dfa7beca..83d1a1454b7e 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h | |||
| @@ -321,7 +321,8 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep, | |||
| 321 | * request comes from. | 321 | * request comes from. |
| 322 | */ | 322 | */ |
| 323 | #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \ | 323 | #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \ |
| 324 | (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) | 324 | (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \ |
| 325 | (defined(CONFIG_SLOB) && defined(CONFIG_TRACING)) | ||
| 325 | extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long); | 326 | extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long); |
| 326 | #define kmalloc_track_caller(size, flags) \ | 327 | #define kmalloc_track_caller(size, flags) \ |
| 327 | __kmalloc_track_caller(size, flags, _RET_IP_) | 328 | __kmalloc_track_caller(size, flags, _RET_IP_) |
| @@ -340,7 +341,8 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long); | |||
| 340 | * allocation request comes from. | 341 | * allocation request comes from. |
| 341 | */ | 342 | */ |
| 342 | #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \ | 343 | #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \ |
| 343 | (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) | 344 | (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \ |
| 345 | (defined(CONFIG_SLOB) && defined(CONFIG_TRACING)) | ||
| 344 | extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long); | 346 | extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long); |
| 345 | #define kmalloc_node_track_caller(size, flags, node) \ | 347 | #define kmalloc_node_track_caller(size, flags, node) \ |
| 346 | __kmalloc_node_track_caller(size, flags, node, \ | 348 | __kmalloc_node_track_caller(size, flags, node, \ |
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index 0c634fa376c9..e98caebdd0bc 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h | |||
| @@ -45,7 +45,6 @@ struct kmem_cache { | |||
| 45 | unsigned int colour_off; /* colour offset */ | 45 | unsigned int colour_off; /* colour offset */ |
| 46 | struct kmem_cache *slabp_cache; | 46 | struct kmem_cache *slabp_cache; |
| 47 | unsigned int slab_size; | 47 | unsigned int slab_size; |
| 48 | unsigned int dflags; /* dynamic flags */ | ||
| 49 | 48 | ||
| 50 | /* constructor func */ | 49 | /* constructor func */ |
| 51 | void (*ctor)(void *obj); | 50 | void (*ctor)(void *obj); |
| @@ -112,19 +111,13 @@ void *kmem_cache_alloc(struct kmem_cache *, gfp_t); | |||
| 112 | void *__kmalloc(size_t size, gfp_t flags); | 111 | void *__kmalloc(size_t size, gfp_t flags); |
| 113 | 112 | ||
| 114 | #ifdef CONFIG_TRACING | 113 | #ifdef CONFIG_TRACING |
| 115 | extern void *kmem_cache_alloc_trace(size_t size, | 114 | extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t); |
| 116 | struct kmem_cache *cachep, gfp_t flags); | ||
| 117 | extern size_t slab_buffer_size(struct kmem_cache *cachep); | ||
| 118 | #else | 115 | #else |
| 119 | static __always_inline void * | 116 | static __always_inline void * |
| 120 | kmem_cache_alloc_trace(size_t size, struct kmem_cache *cachep, gfp_t flags) | 117 | kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size) |
| 121 | { | 118 | { |
| 122 | return kmem_cache_alloc(cachep, flags); | 119 | return kmem_cache_alloc(cachep, flags); |
| 123 | } | 120 | } |
| 124 | static inline size_t slab_buffer_size(struct kmem_cache *cachep) | ||
| 125 | { | ||
| 126 | return 0; | ||
| 127 | } | ||
| 128 | #endif | 121 | #endif |
| 129 | 122 | ||
| 130 | static __always_inline void *kmalloc(size_t size, gfp_t flags) | 123 | static __always_inline void *kmalloc(size_t size, gfp_t flags) |
| @@ -154,7 +147,7 @@ found: | |||
| 154 | #endif | 147 | #endif |
| 155 | cachep = malloc_sizes[i].cs_cachep; | 148 | cachep = malloc_sizes[i].cs_cachep; |
| 156 | 149 | ||
| 157 | ret = kmem_cache_alloc_trace(size, cachep, flags); | 150 | ret = kmem_cache_alloc_trace(cachep, flags, size); |
| 158 | 151 | ||
| 159 | return ret; | 152 | return ret; |
| 160 | } | 153 | } |
diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h index 0ec00b39d006..f28e14a12e3f 100644 --- a/include/linux/slob_def.h +++ b/include/linux/slob_def.h | |||
| @@ -1,12 +1,14 @@ | |||
| 1 | #ifndef __LINUX_SLOB_DEF_H | 1 | #ifndef __LINUX_SLOB_DEF_H |
| 2 | #define __LINUX_SLOB_DEF_H | 2 | #define __LINUX_SLOB_DEF_H |
| 3 | 3 | ||
| 4 | #include <linux/numa.h> | ||
| 5 | |||
| 4 | void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); | 6 | void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); |
| 5 | 7 | ||
| 6 | static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep, | 8 | static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep, |
| 7 | gfp_t flags) | 9 | gfp_t flags) |
| 8 | { | 10 | { |
| 9 | return kmem_cache_alloc_node(cachep, flags, -1); | 11 | return kmem_cache_alloc_node(cachep, flags, NUMA_NO_NODE); |
| 10 | } | 12 | } |
| 11 | 13 | ||
| 12 | void *__kmalloc_node(size_t size, gfp_t flags, int node); | 14 | void *__kmalloc_node(size_t size, gfp_t flags, int node); |
| @@ -26,7 +28,7 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) | |||
| 26 | */ | 28 | */ |
| 27 | static __always_inline void *kmalloc(size_t size, gfp_t flags) | 29 | static __always_inline void *kmalloc(size_t size, gfp_t flags) |
| 28 | { | 30 | { |
| 29 | return __kmalloc_node(size, flags, -1); | 31 | return __kmalloc_node(size, flags, NUMA_NO_NODE); |
| 30 | } | 32 | } |
| 31 | 33 | ||
| 32 | static __always_inline void *__kmalloc(size_t size, gfp_t flags) | 34 | static __always_inline void *__kmalloc(size_t size, gfp_t flags) |
| @@ -498,14 +498,6 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp) | |||
| 498 | 498 | ||
| 499 | #endif | 499 | #endif |
| 500 | 500 | ||
| 501 | #ifdef CONFIG_TRACING | ||
| 502 | size_t slab_buffer_size(struct kmem_cache *cachep) | ||
| 503 | { | ||
| 504 | return cachep->size; | ||
| 505 | } | ||
| 506 | EXPORT_SYMBOL(slab_buffer_size); | ||
| 507 | #endif | ||
| 508 | |||
| 509 | /* | 501 | /* |
| 510 | * Do not go above this order unless 0 objects fit into the slab or | 502 | * Do not go above this order unless 0 objects fit into the slab or |
| 511 | * overridden on the command line. | 503 | * overridden on the command line. |
| @@ -515,13 +507,6 @@ EXPORT_SYMBOL(slab_buffer_size); | |||
| 515 | static int slab_max_order = SLAB_MAX_ORDER_LO; | 507 | static int slab_max_order = SLAB_MAX_ORDER_LO; |
| 516 | static bool slab_max_order_set __initdata; | 508 | static bool slab_max_order_set __initdata; |
| 517 | 509 | ||
| 518 | static inline struct kmem_cache *page_get_cache(struct page *page) | ||
| 519 | { | ||
| 520 | page = compound_head(page); | ||
| 521 | BUG_ON(!PageSlab(page)); | ||
| 522 | return page->slab_cache; | ||
| 523 | } | ||
| 524 | |||
| 525 | static inline struct kmem_cache *virt_to_cache(const void *obj) | 510 | static inline struct kmem_cache *virt_to_cache(const void *obj) |
| 526 | { | 511 | { |
| 527 | struct page *page = virt_to_head_page(obj); | 512 | struct page *page = virt_to_head_page(obj); |
| @@ -818,6 +803,7 @@ static void __slab_error(const char *function, struct kmem_cache *cachep, | |||
| 818 | printk(KERN_ERR "slab error in %s(): cache `%s': %s\n", | 803 | printk(KERN_ERR "slab error in %s(): cache `%s': %s\n", |
| 819 | function, cachep->name, msg); | 804 | function, cachep->name, msg); |
| 820 | dump_stack(); | 805 | dump_stack(); |
| 806 | add_taint(TAINT_BAD_PAGE); | ||
| 821 | } | 807 | } |
| 822 | 808 | ||
| 823 | /* | 809 | /* |
| @@ -1781,9 +1767,6 @@ void __init kmem_cache_init_late(void) | |||
| 1781 | 1767 | ||
| 1782 | slab_state = UP; | 1768 | slab_state = UP; |
| 1783 | 1769 | ||
| 1784 | /* Annotate slab for lockdep -- annotate the malloc caches */ | ||
| 1785 | init_lock_keys(); | ||
| 1786 | |||
| 1787 | /* 6) resize the head arrays to their final sizes */ | 1770 | /* 6) resize the head arrays to their final sizes */ |
| 1788 | mutex_lock(&slab_mutex); | 1771 | mutex_lock(&slab_mutex); |
| 1789 | list_for_each_entry(cachep, &slab_caches, list) | 1772 | list_for_each_entry(cachep, &slab_caches, list) |
| @@ -1791,6 +1774,9 @@ void __init kmem_cache_init_late(void) | |||
| 1791 | BUG(); | 1774 | BUG(); |
| 1792 | mutex_unlock(&slab_mutex); | 1775 | mutex_unlock(&slab_mutex); |
| 1793 | 1776 | ||
| 1777 | /* Annotate slab for lockdep -- annotate the malloc caches */ | ||
| 1778 | init_lock_keys(); | ||
| 1779 | |||
| 1794 | /* Done! */ | 1780 | /* Done! */ |
| 1795 | slab_state = FULL; | 1781 | slab_state = FULL; |
| 1796 | 1782 | ||
| @@ -2506,8 +2492,9 @@ __kmem_cache_create (const char *name, size_t size, size_t align, | |||
| 2506 | } | 2492 | } |
| 2507 | #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC) | 2493 | #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC) |
| 2508 | if (size >= malloc_sizes[INDEX_L3 + 1].cs_size | 2494 | if (size >= malloc_sizes[INDEX_L3 + 1].cs_size |
| 2509 | && cachep->object_size > cache_line_size() && ALIGN(size, align) < PAGE_SIZE) { | 2495 | && cachep->object_size > cache_line_size() |
| 2510 | cachep->obj_offset += PAGE_SIZE - ALIGN(size, align); | 2496 | && ALIGN(size, cachep->align) < PAGE_SIZE) { |
| 2497 | cachep->obj_offset += PAGE_SIZE - ALIGN(size, cachep->align); | ||
| 2511 | size = PAGE_SIZE; | 2498 | size = PAGE_SIZE; |
| 2512 | } | 2499 | } |
| 2513 | #endif | 2500 | #endif |
| @@ -3098,7 +3085,7 @@ static inline void verify_redzone_free(struct kmem_cache *cache, void *obj) | |||
| 3098 | } | 3085 | } |
| 3099 | 3086 | ||
| 3100 | static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, | 3087 | static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, |
| 3101 | void *caller) | 3088 | unsigned long caller) |
| 3102 | { | 3089 | { |
| 3103 | struct page *page; | 3090 | struct page *page; |
| 3104 | unsigned int objnr; | 3091 | unsigned int objnr; |
| @@ -3118,7 +3105,7 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, | |||
| 3118 | *dbg_redzone2(cachep, objp) = RED_INACTIVE; | 3105 | *dbg_redzone2(cachep, objp) = RED_INACTIVE; |
| 3119 | } | 3106 | } |
| 3120 | if (cachep->flags & SLAB_STORE_USER) | 3107 | if (cachep->flags & SLAB_STORE_USER) |
| 3121 | *dbg_userword(cachep, objp) = caller; | 3108 | *dbg_userword(cachep, objp) = (void *)caller; |
| 3122 | 3109 | ||
| 3123 | objnr = obj_to_index(cachep, slabp, objp); | 3110 | objnr = obj_to_index(cachep, slabp, objp); |
| 3124 | 3111 | ||
| @@ -3131,7 +3118,7 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, | |||
| 3131 | if (cachep->flags & SLAB_POISON) { | 3118 | if (cachep->flags & SLAB_POISON) { |
| 3132 | #ifdef CONFIG_DEBUG_PAGEALLOC | 3119 | #ifdef CONFIG_DEBUG_PAGEALLOC |
| 3133 | if ((cachep->size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) { | 3120 | if ((cachep->size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) { |
| 3134 | store_stackinfo(cachep, objp, (unsigned long)caller); | 3121 | store_stackinfo(cachep, objp, caller); |
| 3135 | kernel_map_pages(virt_to_page(objp), | 3122 | kernel_map_pages(virt_to_page(objp), |
| 3136 | cachep->size / PAGE_SIZE, 0); | 3123 | cachep->size / PAGE_SIZE, 0); |
| 3137 | } else { | 3124 | } else { |
| @@ -3285,7 +3272,7 @@ static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep, | |||
| 3285 | 3272 | ||
| 3286 | #if DEBUG | 3273 | #if DEBUG |
| 3287 | static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, | 3274 | static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, |
| 3288 | gfp_t flags, void *objp, void *caller) | 3275 | gfp_t flags, void *objp, unsigned long caller) |
| 3289 | { | 3276 | { |
| 3290 | if (!objp) | 3277 | if (!objp) |
| 3291 | return objp; | 3278 | return objp; |
| @@ -3302,7 +3289,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, | |||
| 3302 | poison_obj(cachep, objp, POISON_INUSE); | 3289 | poison_obj(cachep, objp, POISON_INUSE); |
| 3303 | } | 3290 | } |
| 3304 | if (cachep->flags & SLAB_STORE_USER) | 3291 | if (cachep->flags & SLAB_STORE_USER) |
| 3305 | *dbg_userword(cachep, objp) = caller; | 3292 | *dbg_userword(cachep, objp) = (void *)caller; |
| 3306 | 3293 | ||
| 3307 | if (cachep->flags & SLAB_RED_ZONE) { | 3294 | if (cachep->flags & SLAB_RED_ZONE) { |
| 3308 | if (*dbg_redzone1(cachep, objp) != RED_INACTIVE || | 3295 | if (*dbg_redzone1(cachep, objp) != RED_INACTIVE || |
| @@ -3576,8 +3563,8 @@ done: | |||
| 3576 | * Fallback to other node is possible if __GFP_THISNODE is not set. | 3563 | * Fallback to other node is possible if __GFP_THISNODE is not set. |
| 3577 | */ | 3564 | */ |
| 3578 | static __always_inline void * | 3565 | static __always_inline void * |
| 3579 | __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, | 3566 | slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, |
| 3580 | void *caller) | 3567 | unsigned long caller) |
| 3581 | { | 3568 | { |
| 3582 | unsigned long save_flags; | 3569 | unsigned long save_flags; |
| 3583 | void *ptr; | 3570 | void *ptr; |
| @@ -3663,7 +3650,7 @@ __do_cache_alloc(struct kmem_cache *cachep, gfp_t flags) | |||
| 3663 | #endif /* CONFIG_NUMA */ | 3650 | #endif /* CONFIG_NUMA */ |
| 3664 | 3651 | ||
| 3665 | static __always_inline void * | 3652 | static __always_inline void * |
| 3666 | __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller) | 3653 | slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller) |
| 3667 | { | 3654 | { |
| 3668 | unsigned long save_flags; | 3655 | unsigned long save_flags; |
| 3669 | void *objp; | 3656 | void *objp; |
| @@ -3799,7 +3786,7 @@ free_done: | |||
| 3799 | * be in this state _before_ it is released. Called with disabled ints. | 3786 | * be in this state _before_ it is released. Called with disabled ints. |
| 3800 | */ | 3787 | */ |
| 3801 | static inline void __cache_free(struct kmem_cache *cachep, void *objp, | 3788 | static inline void __cache_free(struct kmem_cache *cachep, void *objp, |
| 3802 | void *caller) | 3789 | unsigned long caller) |
| 3803 | { | 3790 | { |
| 3804 | struct array_cache *ac = cpu_cache_get(cachep); | 3791 | struct array_cache *ac = cpu_cache_get(cachep); |
| 3805 | 3792 | ||
| @@ -3839,7 +3826,7 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp, | |||
| 3839 | */ | 3826 | */ |
| 3840 | void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) | 3827 | void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) |
| 3841 | { | 3828 | { |
| 3842 | void *ret = __cache_alloc(cachep, flags, __builtin_return_address(0)); | 3829 | void *ret = slab_alloc(cachep, flags, _RET_IP_); |
| 3843 | 3830 | ||
| 3844 | trace_kmem_cache_alloc(_RET_IP_, ret, | 3831 | trace_kmem_cache_alloc(_RET_IP_, ret, |
| 3845 | cachep->object_size, cachep->size, flags); | 3832 | cachep->object_size, cachep->size, flags); |
| @@ -3850,14 +3837,14 @@ EXPORT_SYMBOL(kmem_cache_alloc); | |||
| 3850 | 3837 | ||
| 3851 | #ifdef CONFIG_TRACING | 3838 | #ifdef CONFIG_TRACING |
| 3852 | void * | 3839 | void * |
| 3853 | kmem_cache_alloc_trace(size_t size, struct kmem_cache *cachep, gfp_t flags) | 3840 | kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size) |
| 3854 | { | 3841 | { |
| 3855 | void *ret; | 3842 | void *ret; |
| 3856 | 3843 | ||
| 3857 | ret = __cache_alloc(cachep, flags, __builtin_return_address(0)); | 3844 | ret = slab_alloc(cachep, flags, _RET_IP_); |
| 3858 | 3845 | ||
| 3859 | trace_kmalloc(_RET_IP_, ret, | 3846 | trace_kmalloc(_RET_IP_, ret, |
| 3860 | size, slab_buffer_size(cachep), flags); | 3847 | size, cachep->size, flags); |
| 3861 | return ret; | 3848 | return ret; |
| 3862 | } | 3849 | } |
| 3863 | EXPORT_SYMBOL(kmem_cache_alloc_trace); | 3850 | EXPORT_SYMBOL(kmem_cache_alloc_trace); |
| @@ -3866,8 +3853,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_trace); | |||
| 3866 | #ifdef CONFIG_NUMA | 3853 | #ifdef CONFIG_NUMA |
| 3867 | void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) | 3854 | void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) |
| 3868 | { | 3855 | { |
| 3869 | void *ret = __cache_alloc_node(cachep, flags, nodeid, | 3856 | void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_); |
| 3870 | __builtin_return_address(0)); | ||
| 3871 | 3857 | ||
| 3872 | trace_kmem_cache_alloc_node(_RET_IP_, ret, | 3858 | trace_kmem_cache_alloc_node(_RET_IP_, ret, |
| 3873 | cachep->object_size, cachep->size, | 3859 | cachep->object_size, cachep->size, |
| @@ -3878,17 +3864,17 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) | |||
| 3878 | EXPORT_SYMBOL(kmem_cache_alloc_node); | 3864 | EXPORT_SYMBOL(kmem_cache_alloc_node); |
| 3879 | 3865 | ||
| 3880 | #ifdef CONFIG_TRACING | 3866 | #ifdef CONFIG_TRACING |
| 3881 | void *kmem_cache_alloc_node_trace(size_t size, | 3867 | void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep, |
| 3882 | struct kmem_cache *cachep, | ||
| 3883 | gfp_t flags, | 3868 | gfp_t flags, |
| 3884 | int nodeid) | 3869 | int nodeid, |
| 3870 | size_t size) | ||
| 3885 | { | 3871 | { |
| 3886 | void *ret; | 3872 | void *ret; |
| 3887 | 3873 | ||
| 3888 | ret = __cache_alloc_node(cachep, flags, nodeid, | 3874 | ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP); |
| 3889 | __builtin_return_address(0)); | 3875 | |
| 3890 | trace_kmalloc_node(_RET_IP_, ret, | 3876 | trace_kmalloc_node(_RET_IP_, ret, |
| 3891 | size, slab_buffer_size(cachep), | 3877 | size, cachep->size, |
| 3892 | flags, nodeid); | 3878 | flags, nodeid); |
| 3893 | return ret; | 3879 | return ret; |
| 3894 | } | 3880 | } |
| @@ -3896,34 +3882,33 @@ EXPORT_SYMBOL(kmem_cache_alloc_node_trace); | |||
| 3896 | #endif | 3882 | #endif |
| 3897 | 3883 | ||
| 3898 | static __always_inline void * | 3884 | static __always_inline void * |
| 3899 | __do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller) | 3885 | __do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller) |
| 3900 | { | 3886 | { |
| 3901 | struct kmem_cache *cachep; | 3887 | struct kmem_cache *cachep; |
| 3902 | 3888 | ||
| 3903 | cachep = kmem_find_general_cachep(size, flags); | 3889 | cachep = kmem_find_general_cachep(size, flags); |
| 3904 | if (unlikely(ZERO_OR_NULL_PTR(cachep))) | 3890 | if (unlikely(ZERO_OR_NULL_PTR(cachep))) |
| 3905 | return cachep; | 3891 | return cachep; |
| 3906 | return kmem_cache_alloc_node_trace(size, cachep, flags, node); | 3892 | return kmem_cache_alloc_node_trace(cachep, flags, node, size); |
| 3907 | } | 3893 | } |
| 3908 | 3894 | ||
| 3909 | #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING) | 3895 | #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING) |
| 3910 | void *__kmalloc_node(size_t size, gfp_t flags, int node) | 3896 | void *__kmalloc_node(size_t size, gfp_t flags, int node) |
| 3911 | { | 3897 | { |
| 3912 | return __do_kmalloc_node(size, flags, node, | 3898 | return __do_kmalloc_node(size, flags, node, _RET_IP_); |
| 3913 | __builtin_return_address(0)); | ||
| 3914 | } | 3899 | } |
| 3915 | EXPORT_SYMBOL(__kmalloc_node); | 3900 | EXPORT_SYMBOL(__kmalloc_node); |
| 3916 | 3901 | ||
| 3917 | void *__kmalloc_node_track_caller(size_t size, gfp_t flags, | 3902 | void *__kmalloc_node_track_caller(size_t size, gfp_t flags, |
| 3918 | int node, unsigned long caller) | 3903 | int node, unsigned long caller) |
| 3919 | { | 3904 | { |
| 3920 | return __do_kmalloc_node(size, flags, node, (void *)caller); | 3905 | return __do_kmalloc_node(size, flags, node, caller); |
| 3921 | } | 3906 | } |
| 3922 | EXPORT_SYMBOL(__kmalloc_node_track_caller); | 3907 | EXPORT_SYMBOL(__kmalloc_node_track_caller); |
| 3923 | #else | 3908 | #else |
| 3924 | void *__kmalloc_node(size_t size, gfp_t flags, int node) | 3909 | void *__kmalloc_node(size_t size, gfp_t flags, int node) |
| 3925 | { | 3910 | { |
| 3926 | return __do_kmalloc_node(size, flags, node, NULL); | 3911 | return __do_kmalloc_node(size, flags, node, 0); |
| 3927 | } | 3912 | } |
| 3928 | EXPORT_SYMBOL(__kmalloc_node); | 3913 | EXPORT_SYMBOL(__kmalloc_node); |
| 3929 | #endif /* CONFIG_DEBUG_SLAB || CONFIG_TRACING */ | 3914 | #endif /* CONFIG_DEBUG_SLAB || CONFIG_TRACING */ |
| @@ -3936,7 +3921,7 @@ EXPORT_SYMBOL(__kmalloc_node); | |||
| 3936 | * @caller: function caller for debug tracking of the caller | 3921 | * @caller: function caller for debug tracking of the caller |
| 3937 | */ | 3922 | */ |
| 3938 | static __always_inline void *__do_kmalloc(size_t size, gfp_t flags, | 3923 | static __always_inline void *__do_kmalloc(size_t size, gfp_t flags, |
| 3939 | void *caller) | 3924 | unsigned long caller) |
| 3940 | { | 3925 | { |
| 3941 | struct kmem_cache *cachep; | 3926 | struct kmem_cache *cachep; |
| 3942 | void *ret; | 3927 | void *ret; |
| @@ -3949,9 +3934,9 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags, | |||
| 3949 | cachep = __find_general_cachep(size, flags); | 3934 | cachep = __find_general_cachep(size, flags); |
| 3950 | if (unlikely(ZERO_OR_NULL_PTR(cachep))) | 3935 | if (unlikely(ZERO_OR_NULL_PTR(cachep))) |
| 3951 | return cachep; | 3936 | return cachep; |
| 3952 | ret = __cache_alloc(cachep, flags, caller); | 3937 | ret = slab_alloc(cachep, flags, caller); |
| 3953 | 3938 | ||
| 3954 | trace_kmalloc((unsigned long) caller, ret, | 3939 | trace_kmalloc(caller, ret, |
| 3955 | size, cachep->size, flags); | 3940 | size, cachep->size, flags); |
| 3956 | 3941 | ||
| 3957 | return ret; | 3942 | return ret; |
| @@ -3961,20 +3946,20 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags, | |||
| 3961 | #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING) | 3946 | #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING) |
| 3962 | void *__kmalloc(size_t size, gfp_t flags) | 3947 | void *__kmalloc(size_t size, gfp_t flags) |
| 3963 | { | 3948 | { |
| 3964 | return __do_kmalloc(size, flags, __builtin_return_address(0)); | 3949 | return __do_kmalloc(size, flags, _RET_IP_); |
| 3965 | } | 3950 | } |
| 3966 | EXPORT_SYMBOL(__kmalloc); | 3951 | EXPORT_SYMBOL(__kmalloc); |
| 3967 | 3952 | ||
| 3968 | void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller) | 3953 | void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller) |
| 3969 | { | 3954 | { |
| 3970 | return __do_kmalloc(size, flags, (void *)caller); | 3955 | return __do_kmalloc(size, flags, caller); |
| 3971 | } | 3956 | } |
| 3972 | EXPORT_SYMBOL(__kmalloc_track_caller); | 3957 | EXPORT_SYMBOL(__kmalloc_track_caller); |
| 3973 | 3958 | ||
| 3974 | #else | 3959 | #else |
| 3975 | void *__kmalloc(size_t size, gfp_t flags) | 3960 | void *__kmalloc(size_t size, gfp_t flags) |
| 3976 | { | 3961 | { |
| 3977 | return __do_kmalloc(size, flags, NULL); | 3962 | return __do_kmalloc(size, flags, 0); |
| 3978 | } | 3963 | } |
| 3979 | EXPORT_SYMBOL(__kmalloc); | 3964 | EXPORT_SYMBOL(__kmalloc); |
| 3980 | #endif | 3965 | #endif |
| @@ -3995,7 +3980,7 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp) | |||
| 3995 | debug_check_no_locks_freed(objp, cachep->object_size); | 3980 | debug_check_no_locks_freed(objp, cachep->object_size); |
| 3996 | if (!(cachep->flags & SLAB_DEBUG_OBJECTS)) | 3981 | if (!(cachep->flags & SLAB_DEBUG_OBJECTS)) |
| 3997 | debug_check_no_obj_freed(objp, cachep->object_size); | 3982 | debug_check_no_obj_freed(objp, cachep->object_size); |
| 3998 | __cache_free(cachep, objp, __builtin_return_address(0)); | 3983 | __cache_free(cachep, objp, _RET_IP_); |
| 3999 | local_irq_restore(flags); | 3984 | local_irq_restore(flags); |
| 4000 | 3985 | ||
| 4001 | trace_kmem_cache_free(_RET_IP_, objp); | 3986 | trace_kmem_cache_free(_RET_IP_, objp); |
| @@ -4026,7 +4011,7 @@ void kfree(const void *objp) | |||
| 4026 | debug_check_no_locks_freed(objp, c->object_size); | 4011 | debug_check_no_locks_freed(objp, c->object_size); |
| 4027 | 4012 | ||
| 4028 | debug_check_no_obj_freed(objp, c->object_size); | 4013 | debug_check_no_obj_freed(objp, c->object_size); |
| 4029 | __cache_free(c, (void *)objp, __builtin_return_address(0)); | 4014 | __cache_free(c, (void *)objp, _RET_IP_); |
| 4030 | local_irq_restore(flags); | 4015 | local_irq_restore(flags); |
| 4031 | } | 4016 | } |
| 4032 | EXPORT_SYMBOL(kfree); | 4017 | EXPORT_SYMBOL(kfree); |
diff --git a/mm/slab_common.c b/mm/slab_common.c index aa3ca5bb01b5..8cf8b4962d6c 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c | |||
| @@ -23,49 +23,17 @@ enum slab_state slab_state; | |||
| 23 | LIST_HEAD(slab_caches); | 23 | LIST_HEAD(slab_caches); |
| 24 | DEFINE_MUTEX(slab_mutex); | 24 | DEFINE_MUTEX(slab_mutex); |
| 25 | 25 | ||
| 26 | /* | 26 | #ifdef CONFIG_DEBUG_VM |
| 27 | * kmem_cache_create - Create a cache. | 27 | static int kmem_cache_sanity_check(const char *name, size_t size) |
| 28 | * @name: A string which is used in /proc/slabinfo to identify this cache. | ||
| 29 | * @size: The size of objects to be created in this cache. | ||
| 30 | * @align: The required alignment for the objects. | ||
| 31 | * @flags: SLAB flags | ||
| 32 | * @ctor: A constructor for the objects. | ||
| 33 | * | ||
| 34 | * Returns a ptr to the cache on success, NULL on failure. | ||
| 35 | * Cannot be called within a interrupt, but can be interrupted. | ||
| 36 | * The @ctor is run when new pages are allocated by the cache. | ||
| 37 | * | ||
| 38 | * The flags are | ||
| 39 | * | ||
| 40 | * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5) | ||
| 41 | * to catch references to uninitialised memory. | ||
| 42 | * | ||
| 43 | * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check | ||
| 44 | * for buffer overruns. | ||
| 45 | * | ||
| 46 | * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware | ||
| 47 | * cacheline. This can be beneficial if you're counting cycles as closely | ||
| 48 | * as davem. | ||
| 49 | */ | ||
| 50 | |||
| 51 | struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align, | ||
| 52 | unsigned long flags, void (*ctor)(void *)) | ||
| 53 | { | 28 | { |
| 54 | struct kmem_cache *s = NULL; | 29 | struct kmem_cache *s = NULL; |
| 55 | 30 | ||
| 56 | #ifdef CONFIG_DEBUG_VM | ||
| 57 | if (!name || in_interrupt() || size < sizeof(void *) || | 31 | if (!name || in_interrupt() || size < sizeof(void *) || |
| 58 | size > KMALLOC_MAX_SIZE) { | 32 | size > KMALLOC_MAX_SIZE) { |
| 59 | printk(KERN_ERR "kmem_cache_create(%s) integrity check" | 33 | pr_err("kmem_cache_create(%s) integrity check failed\n", name); |
| 60 | " failed\n", name); | 34 | return -EINVAL; |
| 61 | goto out; | ||
| 62 | } | 35 | } |
| 63 | #endif | ||
| 64 | 36 | ||
| 65 | get_online_cpus(); | ||
| 66 | mutex_lock(&slab_mutex); | ||
| 67 | |||
| 68 | #ifdef CONFIG_DEBUG_VM | ||
| 69 | list_for_each_entry(s, &slab_caches, list) { | 37 | list_for_each_entry(s, &slab_caches, list) { |
| 70 | char tmp; | 38 | char tmp; |
| 71 | int res; | 39 | int res; |
| @@ -77,36 +45,67 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align | |||
| 77 | */ | 45 | */ |
| 78 | res = probe_kernel_address(s->name, tmp); | 46 | res = probe_kernel_address(s->name, tmp); |
| 79 | if (res) { | 47 | if (res) { |
| 80 | printk(KERN_ERR | 48 | pr_err("Slab cache with size %d has lost its name\n", |
| 81 | "Slab cache with size %d has lost its name\n", | ||
| 82 | s->object_size); | 49 | s->object_size); |
| 83 | continue; | 50 | continue; |
| 84 | } | 51 | } |
| 85 | 52 | ||
| 86 | if (!strcmp(s->name, name)) { | 53 | if (!strcmp(s->name, name)) { |
| 87 | printk(KERN_ERR "kmem_cache_create(%s): Cache name" | 54 | pr_err("%s (%s): Cache name already exists.\n", |
| 88 | " already exists.\n", | 55 | __func__, name); |
| 89 | name); | ||
| 90 | dump_stack(); | 56 | dump_stack(); |
| 91 | s = NULL; | 57 | s = NULL; |
| 92 | goto oops; | 58 | return -EINVAL; |
| 93 | } | 59 | } |
| 94 | } | 60 | } |
| 95 | 61 | ||
| 96 | WARN_ON(strchr(name, ' ')); /* It confuses parsers */ | 62 | WARN_ON(strchr(name, ' ')); /* It confuses parsers */ |
| 63 | return 0; | ||
| 64 | } | ||
| 65 | #else | ||
| 66 | static inline int kmem_cache_sanity_check(const char *name, size_t size) | ||
| 67 | { | ||
| 68 | return 0; | ||
| 69 | } | ||
| 97 | #endif | 70 | #endif |
| 98 | 71 | ||
| 99 | s = __kmem_cache_create(name, size, align, flags, ctor); | 72 | /* |
| 73 | * kmem_cache_create - Create a cache. | ||
| 74 | * @name: A string which is used in /proc/slabinfo to identify this cache. | ||
| 75 | * @size: The size of objects to be created in this cache. | ||
| 76 | * @align: The required alignment for the objects. | ||
| 77 | * @flags: SLAB flags | ||
| 78 | * @ctor: A constructor for the objects. | ||
| 79 | * | ||
| 80 | * Returns a ptr to the cache on success, NULL on failure. | ||
| 81 | * Cannot be called within a interrupt, but can be interrupted. | ||
| 82 | * The @ctor is run when new pages are allocated by the cache. | ||
| 83 | * | ||
| 84 | * The flags are | ||
| 85 | * | ||
| 86 | * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5) | ||
| 87 | * to catch references to uninitialised memory. | ||
| 88 | * | ||
| 89 | * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check | ||
| 90 | * for buffer overruns. | ||
| 91 | * | ||
| 92 | * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware | ||
| 93 | * cacheline. This can be beneficial if you're counting cycles as closely | ||
| 94 | * as davem. | ||
| 95 | */ | ||
| 96 | |||
| 97 | struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align, | ||
| 98 | unsigned long flags, void (*ctor)(void *)) | ||
| 99 | { | ||
| 100 | struct kmem_cache *s = NULL; | ||
| 100 | 101 | ||
| 101 | #ifdef CONFIG_DEBUG_VM | 102 | get_online_cpus(); |
| 102 | oops: | 103 | mutex_lock(&slab_mutex); |
| 103 | #endif | 104 | if (kmem_cache_sanity_check(name, size) == 0) |
| 105 | s = __kmem_cache_create(name, size, align, flags, ctor); | ||
| 104 | mutex_unlock(&slab_mutex); | 106 | mutex_unlock(&slab_mutex); |
| 105 | put_online_cpus(); | 107 | put_online_cpus(); |
| 106 | 108 | ||
| 107 | #ifdef CONFIG_DEBUG_VM | ||
| 108 | out: | ||
| 109 | #endif | ||
| 110 | if (!s && (flags & SLAB_PANIC)) | 109 | if (!s && (flags & SLAB_PANIC)) |
| 111 | panic("kmem_cache_create: Failed to create slab '%s'\n", name); | 110 | panic("kmem_cache_create: Failed to create slab '%s'\n", name); |
| 112 | 111 | ||
| @@ -194,7 +194,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node) | |||
| 194 | void *page; | 194 | void *page; |
| 195 | 195 | ||
| 196 | #ifdef CONFIG_NUMA | 196 | #ifdef CONFIG_NUMA |
| 197 | if (node != -1) | 197 | if (node != NUMA_NO_NODE) |
| 198 | page = alloc_pages_exact_node(node, gfp, order); | 198 | page = alloc_pages_exact_node(node, gfp, order); |
| 199 | else | 199 | else |
| 200 | #endif | 200 | #endif |
| @@ -290,7 +290,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node) | |||
| 290 | * If there's a node specification, search for a partial | 290 | * If there's a node specification, search for a partial |
| 291 | * page with a matching node id in the freelist. | 291 | * page with a matching node id in the freelist. |
| 292 | */ | 292 | */ |
| 293 | if (node != -1 && page_to_nid(sp) != node) | 293 | if (node != NUMA_NO_NODE && page_to_nid(sp) != node) |
| 294 | continue; | 294 | continue; |
| 295 | #endif | 295 | #endif |
| 296 | /* Enough room on this page? */ | 296 | /* Enough room on this page? */ |
| @@ -425,7 +425,8 @@ out: | |||
| 425 | * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend. | 425 | * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend. |
| 426 | */ | 426 | */ |
| 427 | 427 | ||
| 428 | void *__kmalloc_node(size_t size, gfp_t gfp, int node) | 428 | static __always_inline void * |
| 429 | __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller) | ||
| 429 | { | 430 | { |
| 430 | unsigned int *m; | 431 | unsigned int *m; |
| 431 | int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); | 432 | int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); |
| @@ -446,7 +447,7 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node) | |||
| 446 | *m = size; | 447 | *m = size; |
| 447 | ret = (void *)m + align; | 448 | ret = (void *)m + align; |
| 448 | 449 | ||
| 449 | trace_kmalloc_node(_RET_IP_, ret, | 450 | trace_kmalloc_node(caller, ret, |
| 450 | size, size + align, gfp, node); | 451 | size, size + align, gfp, node); |
| 451 | } else { | 452 | } else { |
| 452 | unsigned int order = get_order(size); | 453 | unsigned int order = get_order(size); |
| @@ -460,15 +461,35 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node) | |||
| 460 | page->private = size; | 461 | page->private = size; |
| 461 | } | 462 | } |
| 462 | 463 | ||
| 463 | trace_kmalloc_node(_RET_IP_, ret, | 464 | trace_kmalloc_node(caller, ret, |
| 464 | size, PAGE_SIZE << order, gfp, node); | 465 | size, PAGE_SIZE << order, gfp, node); |
| 465 | } | 466 | } |
| 466 | 467 | ||
| 467 | kmemleak_alloc(ret, size, 1, gfp); | 468 | kmemleak_alloc(ret, size, 1, gfp); |
| 468 | return ret; | 469 | return ret; |
| 469 | } | 470 | } |
| 471 | |||
| 472 | void *__kmalloc_node(size_t size, gfp_t gfp, int node) | ||
| 473 | { | ||
| 474 | return __do_kmalloc_node(size, gfp, node, _RET_IP_); | ||
| 475 | } | ||
| 470 | EXPORT_SYMBOL(__kmalloc_node); | 476 | EXPORT_SYMBOL(__kmalloc_node); |
| 471 | 477 | ||
| 478 | #ifdef CONFIG_TRACING | ||
| 479 | void *__kmalloc_track_caller(size_t size, gfp_t gfp, unsigned long caller) | ||
| 480 | { | ||
| 481 | return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, caller); | ||
| 482 | } | ||
| 483 | |||
| 484 | #ifdef CONFIG_NUMA | ||
| 485 | void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, | ||
| 486 | int node, unsigned long caller) | ||
| 487 | { | ||
| 488 | return __do_kmalloc_node(size, gfp, node, caller); | ||
| 489 | } | ||
| 490 | #endif | ||
| 491 | #endif | ||
| 492 | |||
| 472 | void kfree(const void *block) | 493 | void kfree(const void *block) |
| 473 | { | 494 | { |
| 474 | struct page *sp; | 495 | struct page *sp; |
| @@ -514,7 +535,7 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size, | |||
| 514 | struct kmem_cache *c; | 535 | struct kmem_cache *c; |
| 515 | 536 | ||
| 516 | c = slob_alloc(sizeof(struct kmem_cache), | 537 | c = slob_alloc(sizeof(struct kmem_cache), |
| 517 | GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1); | 538 | GFP_KERNEL, ARCH_KMALLOC_MINALIGN, NUMA_NO_NODE); |
| 518 | 539 | ||
| 519 | if (c) { | 540 | if (c) { |
| 520 | c->name = name; | 541 | c->name = name; |
| @@ -568,6 +568,8 @@ static void slab_bug(struct kmem_cache *s, char *fmt, ...) | |||
| 568 | printk(KERN_ERR "BUG %s (%s): %s\n", s->name, print_tainted(), buf); | 568 | printk(KERN_ERR "BUG %s (%s): %s\n", s->name, print_tainted(), buf); |
| 569 | printk(KERN_ERR "----------------------------------------" | 569 | printk(KERN_ERR "----------------------------------------" |
| 570 | "-------------------------------------\n\n"); | 570 | "-------------------------------------\n\n"); |
| 571 | |||
| 572 | add_taint(TAINT_BAD_PAGE); | ||
| 571 | } | 573 | } |
| 572 | 574 | ||
| 573 | static void slab_fix(struct kmem_cache *s, char *fmt, ...) | 575 | static void slab_fix(struct kmem_cache *s, char *fmt, ...) |
| @@ -1069,13 +1071,13 @@ bad: | |||
| 1069 | return 0; | 1071 | return 0; |
| 1070 | } | 1072 | } |
| 1071 | 1073 | ||
| 1072 | static noinline int free_debug_processing(struct kmem_cache *s, | 1074 | static noinline struct kmem_cache_node *free_debug_processing( |
| 1073 | struct page *page, void *object, unsigned long addr) | 1075 | struct kmem_cache *s, struct page *page, void *object, |
| 1076 | unsigned long addr, unsigned long *flags) | ||
| 1074 | { | 1077 | { |
| 1075 | unsigned long flags; | 1078 | struct kmem_cache_node *n = get_node(s, page_to_nid(page)); |
| 1076 | int rc = 0; | ||
| 1077 | 1079 | ||
| 1078 | local_irq_save(flags); | 1080 | spin_lock_irqsave(&n->list_lock, *flags); |
| 1079 | slab_lock(page); | 1081 | slab_lock(page); |
| 1080 | 1082 | ||
| 1081 | if (!check_slab(s, page)) | 1083 | if (!check_slab(s, page)) |
| @@ -1113,15 +1115,19 @@ static noinline int free_debug_processing(struct kmem_cache *s, | |||
| 1113 | set_track(s, object, TRACK_FREE, addr); | 1115 | set_track(s, object, TRACK_FREE, addr); |
| 1114 | trace(s, page, object, 0); | 1116 | trace(s, page, object, 0); |
| 1115 | init_object(s, object, SLUB_RED_INACTIVE); | 1117 | init_object(s, object, SLUB_RED_INACTIVE); |
| 1116 | rc = 1; | ||
| 1117 | out: | 1118 | out: |
| 1118 | slab_unlock(page); | 1119 | slab_unlock(page); |
| 1119 | local_irq_restore(flags); | 1120 | /* |
| 1120 | return rc; | 1121 | * Keep node_lock to preserve integrity |
| 1122 | * until the object is actually freed | ||
| 1123 | */ | ||
| 1124 | return n; | ||
| 1121 | 1125 | ||
| 1122 | fail: | 1126 | fail: |
| 1127 | slab_unlock(page); | ||
| 1128 | spin_unlock_irqrestore(&n->list_lock, *flags); | ||
| 1123 | slab_fix(s, "Object at 0x%p not freed", object); | 1129 | slab_fix(s, "Object at 0x%p not freed", object); |
| 1124 | goto out; | 1130 | return NULL; |
| 1125 | } | 1131 | } |
| 1126 | 1132 | ||
| 1127 | static int __init setup_slub_debug(char *str) | 1133 | static int __init setup_slub_debug(char *str) |
| @@ -1214,8 +1220,9 @@ static inline void setup_object_debug(struct kmem_cache *s, | |||
| 1214 | static inline int alloc_debug_processing(struct kmem_cache *s, | 1220 | static inline int alloc_debug_processing(struct kmem_cache *s, |
| 1215 | struct page *page, void *object, unsigned long addr) { return 0; } | 1221 | struct page *page, void *object, unsigned long addr) { return 0; } |
| 1216 | 1222 | ||
| 1217 | static inline int free_debug_processing(struct kmem_cache *s, | 1223 | static inline struct kmem_cache_node *free_debug_processing( |
| 1218 | struct page *page, void *object, unsigned long addr) { return 0; } | 1224 | struct kmem_cache *s, struct page *page, void *object, |
| 1225 | unsigned long addr, unsigned long *flags) { return NULL; } | ||
| 1219 | 1226 | ||
| 1220 | static inline int slab_pad_check(struct kmem_cache *s, struct page *page) | 1227 | static inline int slab_pad_check(struct kmem_cache *s, struct page *page) |
| 1221 | { return 1; } | 1228 | { return 1; } |
| @@ -1714,7 +1721,7 @@ static inline void note_cmpxchg_failure(const char *n, | |||
| 1714 | stat(s, CMPXCHG_DOUBLE_CPU_FAIL); | 1721 | stat(s, CMPXCHG_DOUBLE_CPU_FAIL); |
| 1715 | } | 1722 | } |
| 1716 | 1723 | ||
| 1717 | void init_kmem_cache_cpus(struct kmem_cache *s) | 1724 | static void init_kmem_cache_cpus(struct kmem_cache *s) |
| 1718 | { | 1725 | { |
| 1719 | int cpu; | 1726 | int cpu; |
| 1720 | 1727 | ||
| @@ -1939,7 +1946,7 @@ static void unfreeze_partials(struct kmem_cache *s) | |||
| 1939 | * If we did not find a slot then simply move all the partials to the | 1946 | * If we did not find a slot then simply move all the partials to the |
| 1940 | * per node partial list. | 1947 | * per node partial list. |
| 1941 | */ | 1948 | */ |
| 1942 | int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) | 1949 | static int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) |
| 1943 | { | 1950 | { |
| 1944 | struct page *oldpage; | 1951 | struct page *oldpage; |
| 1945 | int pages; | 1952 | int pages; |
| @@ -1962,6 +1969,7 @@ int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) | |||
| 1962 | local_irq_save(flags); | 1969 | local_irq_save(flags); |
| 1963 | unfreeze_partials(s); | 1970 | unfreeze_partials(s); |
| 1964 | local_irq_restore(flags); | 1971 | local_irq_restore(flags); |
| 1972 | oldpage = NULL; | ||
| 1965 | pobjects = 0; | 1973 | pobjects = 0; |
| 1966 | pages = 0; | 1974 | pages = 0; |
| 1967 | stat(s, CPU_PARTIAL_DRAIN); | 1975 | stat(s, CPU_PARTIAL_DRAIN); |
| @@ -2310,7 +2318,7 @@ new_slab: | |||
| 2310 | * | 2318 | * |
| 2311 | * Otherwise we can simply pick the next object from the lockless free list. | 2319 | * Otherwise we can simply pick the next object from the lockless free list. |
| 2312 | */ | 2320 | */ |
| 2313 | static __always_inline void *slab_alloc(struct kmem_cache *s, | 2321 | static __always_inline void *slab_alloc_node(struct kmem_cache *s, |
| 2314 | gfp_t gfpflags, int node, unsigned long addr) | 2322 | gfp_t gfpflags, int node, unsigned long addr) |
| 2315 | { | 2323 | { |
| 2316 | void **object; | 2324 | void **object; |
| @@ -2380,9 +2388,15 @@ redo: | |||
| 2380 | return object; | 2388 | return object; |
| 2381 | } | 2389 | } |
| 2382 | 2390 | ||
| 2391 | static __always_inline void *slab_alloc(struct kmem_cache *s, | ||
| 2392 | gfp_t gfpflags, unsigned long addr) | ||
| 2393 | { | ||
| 2394 | return slab_alloc_node(s, gfpflags, NUMA_NO_NODE, addr); | ||
| 2395 | } | ||
| 2396 | |||
| 2383 | void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) | 2397 | void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) |
| 2384 | { | 2398 | { |
| 2385 | void *ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_); | 2399 | void *ret = slab_alloc(s, gfpflags, _RET_IP_); |
| 2386 | 2400 | ||
| 2387 | trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size, s->size, gfpflags); | 2401 | trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size, s->size, gfpflags); |
| 2388 | 2402 | ||
| @@ -2393,7 +2407,7 @@ EXPORT_SYMBOL(kmem_cache_alloc); | |||
| 2393 | #ifdef CONFIG_TRACING | 2407 | #ifdef CONFIG_TRACING |
| 2394 | void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size) | 2408 | void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size) |
| 2395 | { | 2409 | { |
| 2396 | void *ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_); | 2410 | void *ret = slab_alloc(s, gfpflags, _RET_IP_); |
| 2397 | trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags); | 2411 | trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags); |
| 2398 | return ret; | 2412 | return ret; |
| 2399 | } | 2413 | } |
| @@ -2411,7 +2425,7 @@ EXPORT_SYMBOL(kmalloc_order_trace); | |||
| 2411 | #ifdef CONFIG_NUMA | 2425 | #ifdef CONFIG_NUMA |
| 2412 | void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) | 2426 | void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) |
| 2413 | { | 2427 | { |
| 2414 | void *ret = slab_alloc(s, gfpflags, node, _RET_IP_); | 2428 | void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_); |
| 2415 | 2429 | ||
| 2416 | trace_kmem_cache_alloc_node(_RET_IP_, ret, | 2430 | trace_kmem_cache_alloc_node(_RET_IP_, ret, |
| 2417 | s->object_size, s->size, gfpflags, node); | 2431 | s->object_size, s->size, gfpflags, node); |
| @@ -2425,7 +2439,7 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *s, | |||
| 2425 | gfp_t gfpflags, | 2439 | gfp_t gfpflags, |
| 2426 | int node, size_t size) | 2440 | int node, size_t size) |
| 2427 | { | 2441 | { |
| 2428 | void *ret = slab_alloc(s, gfpflags, node, _RET_IP_); | 2442 | void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_); |
| 2429 | 2443 | ||
| 2430 | trace_kmalloc_node(_RET_IP_, ret, | 2444 | trace_kmalloc_node(_RET_IP_, ret, |
| 2431 | size, s->size, gfpflags, node); | 2445 | size, s->size, gfpflags, node); |
| @@ -2457,7 +2471,8 @@ static void __slab_free(struct kmem_cache *s, struct page *page, | |||
| 2457 | 2471 | ||
| 2458 | stat(s, FREE_SLOWPATH); | 2472 | stat(s, FREE_SLOWPATH); |
| 2459 | 2473 | ||
| 2460 | if (kmem_cache_debug(s) && !free_debug_processing(s, page, x, addr)) | 2474 | if (kmem_cache_debug(s) && |
| 2475 | !(n = free_debug_processing(s, page, x, addr, &flags))) | ||
| 2461 | return; | 2476 | return; |
| 2462 | 2477 | ||
| 2463 | do { | 2478 | do { |
| @@ -3362,7 +3377,7 @@ void *__kmalloc(size_t size, gfp_t flags) | |||
| 3362 | if (unlikely(ZERO_OR_NULL_PTR(s))) | 3377 | if (unlikely(ZERO_OR_NULL_PTR(s))) |
| 3363 | return s; | 3378 | return s; |
| 3364 | 3379 | ||
| 3365 | ret = slab_alloc(s, flags, NUMA_NO_NODE, _RET_IP_); | 3380 | ret = slab_alloc(s, flags, _RET_IP_); |
| 3366 | 3381 | ||
| 3367 | trace_kmalloc(_RET_IP_, ret, size, s->size, flags); | 3382 | trace_kmalloc(_RET_IP_, ret, size, s->size, flags); |
| 3368 | 3383 | ||
| @@ -3405,7 +3420,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) | |||
| 3405 | if (unlikely(ZERO_OR_NULL_PTR(s))) | 3420 | if (unlikely(ZERO_OR_NULL_PTR(s))) |
| 3406 | return s; | 3421 | return s; |
| 3407 | 3422 | ||
| 3408 | ret = slab_alloc(s, flags, node, _RET_IP_); | 3423 | ret = slab_alloc_node(s, flags, node, _RET_IP_); |
| 3409 | 3424 | ||
| 3410 | trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node); | 3425 | trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node); |
| 3411 | 3426 | ||
| @@ -3482,7 +3497,7 @@ void kfree(const void *x) | |||
| 3482 | if (unlikely(!PageSlab(page))) { | 3497 | if (unlikely(!PageSlab(page))) { |
| 3483 | BUG_ON(!PageCompound(page)); | 3498 | BUG_ON(!PageCompound(page)); |
| 3484 | kmemleak_free(x); | 3499 | kmemleak_free(x); |
| 3485 | put_page(page); | 3500 | __free_pages(page, compound_order(page)); |
| 3486 | return; | 3501 | return; |
| 3487 | } | 3502 | } |
| 3488 | slab_free(page->slab, page, object, _RET_IP_); | 3503 | slab_free(page->slab, page, object, _RET_IP_); |
| @@ -4033,7 +4048,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) | |||
| 4033 | if (unlikely(ZERO_OR_NULL_PTR(s))) | 4048 | if (unlikely(ZERO_OR_NULL_PTR(s))) |
| 4034 | return s; | 4049 | return s; |
| 4035 | 4050 | ||
| 4036 | ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, caller); | 4051 | ret = slab_alloc(s, gfpflags, caller); |
| 4037 | 4052 | ||
| 4038 | /* Honor the call site pointer we received. */ | 4053 | /* Honor the call site pointer we received. */ |
| 4039 | trace_kmalloc(caller, ret, size, s->size, gfpflags); | 4054 | trace_kmalloc(caller, ret, size, s->size, gfpflags); |
| @@ -4063,7 +4078,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, | |||
| 4063 | if (unlikely(ZERO_OR_NULL_PTR(s))) | 4078 | if (unlikely(ZERO_OR_NULL_PTR(s))) |
| 4064 | return s; | 4079 | return s; |
| 4065 | 4080 | ||
| 4066 | ret = slab_alloc(s, gfpflags, node, caller); | 4081 | ret = slab_alloc_node(s, gfpflags, node, caller); |
| 4067 | 4082 | ||
| 4068 | /* Honor the call site pointer we received. */ | 4083 | /* Honor the call site pointer we received. */ |
| 4069 | trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node); | 4084 | trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node); |
| @@ -105,6 +105,25 @@ void *memdup_user(const void __user *src, size_t len) | |||
| 105 | } | 105 | } |
| 106 | EXPORT_SYMBOL(memdup_user); | 106 | EXPORT_SYMBOL(memdup_user); |
| 107 | 107 | ||
| 108 | static __always_inline void *__do_krealloc(const void *p, size_t new_size, | ||
| 109 | gfp_t flags) | ||
| 110 | { | ||
| 111 | void *ret; | ||
| 112 | size_t ks = 0; | ||
| 113 | |||
| 114 | if (p) | ||
| 115 | ks = ksize(p); | ||
| 116 | |||
| 117 | if (ks >= new_size) | ||
| 118 | return (void *)p; | ||
| 119 | |||
| 120 | ret = kmalloc_track_caller(new_size, flags); | ||
| 121 | if (ret && p) | ||
| 122 | memcpy(ret, p, ks); | ||
| 123 | |||
| 124 | return ret; | ||
| 125 | } | ||
| 126 | |||
| 108 | /** | 127 | /** |
| 109 | * __krealloc - like krealloc() but don't free @p. | 128 | * __krealloc - like krealloc() but don't free @p. |
| 110 | * @p: object to reallocate memory for. | 129 | * @p: object to reallocate memory for. |
| @@ -117,23 +136,11 @@ EXPORT_SYMBOL(memdup_user); | |||
| 117 | */ | 136 | */ |
| 118 | void *__krealloc(const void *p, size_t new_size, gfp_t flags) | 137 | void *__krealloc(const void *p, size_t new_size, gfp_t flags) |
| 119 | { | 138 | { |
| 120 | void *ret; | ||
| 121 | size_t ks = 0; | ||
| 122 | |||
| 123 | if (unlikely(!new_size)) | 139 | if (unlikely(!new_size)) |
| 124 | return ZERO_SIZE_PTR; | 140 | return ZERO_SIZE_PTR; |
| 125 | 141 | ||
| 126 | if (p) | 142 | return __do_krealloc(p, new_size, flags); |
| 127 | ks = ksize(p); | ||
| 128 | 143 | ||
| 129 | if (ks >= new_size) | ||
| 130 | return (void *)p; | ||
| 131 | |||
| 132 | ret = kmalloc_track_caller(new_size, flags); | ||
| 133 | if (ret && p) | ||
| 134 | memcpy(ret, p, ks); | ||
| 135 | |||
| 136 | return ret; | ||
| 137 | } | 144 | } |
| 138 | EXPORT_SYMBOL(__krealloc); | 145 | EXPORT_SYMBOL(__krealloc); |
| 139 | 146 | ||
| @@ -157,7 +164,7 @@ void *krealloc(const void *p, size_t new_size, gfp_t flags) | |||
| 157 | return ZERO_SIZE_PTR; | 164 | return ZERO_SIZE_PTR; |
| 158 | } | 165 | } |
| 159 | 166 | ||
| 160 | ret = __krealloc(p, new_size, flags); | 167 | ret = __do_krealloc(p, new_size, flags); |
| 161 | if (ret && p != ret) | 168 | if (ret && p != ret) |
| 162 | kfree(p); | 169 | kfree(p); |
| 163 | 170 | ||
