diff options
-rw-r--r-- | include/linux/slab.h | 23 | ||||
-rw-r--r-- | mm/slab.c | 55 |
2 files changed, 65 insertions, 13 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h index c4947b8a2c03..66c4640d3656 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h | |||
@@ -236,7 +236,25 @@ found: | |||
236 | } | 236 | } |
237 | return __kmalloc_node(size, flags, node); | 237 | return __kmalloc_node(size, flags, node); |
238 | } | 238 | } |
239 | |||
240 | /* | ||
241 | * kmalloc_node_track_caller is a special version of kmalloc_node that | ||
242 | * records the calling function of the routine calling it for slab leak | ||
243 | * tracking instead of just the calling function (confusing, eh?). | ||
244 | * It's useful when the call to kmalloc_node comes from a widely-used | ||
245 | * standard allocator where we care about the real place the memory | ||
246 | * allocation request comes from. | ||
247 | */ | ||
248 | #ifndef CONFIG_DEBUG_SLAB | ||
249 | #define kmalloc_node_track_caller(size, flags, node) \ | ||
250 | __kmalloc_node(size, flags, node) | ||
239 | #else | 251 | #else |
252 | extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, void *); | ||
253 | #define kmalloc_node_track_caller(size, flags, node) \ | ||
254 | __kmalloc_node_track_caller(size, flags, node, \ | ||
255 | __builtin_return_address(0)) | ||
256 | #endif | ||
257 | #else /* CONFIG_NUMA */ | ||
240 | static inline void *kmem_cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int node) | 258 | static inline void *kmem_cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int node) |
241 | { | 259 | { |
242 | return kmem_cache_alloc(cachep, flags); | 260 | return kmem_cache_alloc(cachep, flags); |
@@ -245,6 +263,9 @@ static inline void *kmalloc_node(size_t size, gfp_t flags, int node) | |||
245 | { | 263 | { |
246 | return kmalloc(size, flags); | 264 | return kmalloc(size, flags); |
247 | } | 265 | } |
266 | |||
267 | #define kmalloc_node_track_caller(size, flags, node) \ | ||
268 | kmalloc_track_caller(size, flags) | ||
248 | #endif | 269 | #endif |
249 | 270 | ||
250 | extern int FASTCALL(kmem_cache_reap(int)); | 271 | extern int FASTCALL(kmem_cache_reap(int)); |
@@ -283,6 +304,8 @@ static inline void *kcalloc(size_t n, size_t size, gfp_t flags) | |||
283 | #define kzalloc(s, f) __kzalloc(s, f) | 304 | #define kzalloc(s, f) __kzalloc(s, f) |
284 | #define kmalloc_track_caller kmalloc | 305 | #define kmalloc_track_caller kmalloc |
285 | 306 | ||
307 | #define kmalloc_node_track_caller kmalloc_node | ||
308 | |||
286 | #endif /* CONFIG_SLOB */ | 309 | #endif /* CONFIG_SLOB */ |
287 | 310 | ||
288 | /* System wide caches */ | 311 | /* System wide caches */ |
@@ -1015,7 +1015,7 @@ static inline void *alternate_node_alloc(struct kmem_cache *cachep, | |||
1015 | return NULL; | 1015 | return NULL; |
1016 | } | 1016 | } |
1017 | 1017 | ||
1018 | static inline void *__cache_alloc_node(struct kmem_cache *cachep, | 1018 | static inline void *____cache_alloc_node(struct kmem_cache *cachep, |
1019 | gfp_t flags, int nodeid) | 1019 | gfp_t flags, int nodeid) |
1020 | { | 1020 | { |
1021 | return NULL; | 1021 | return NULL; |
@@ -1023,7 +1023,7 @@ static inline void *__cache_alloc_node(struct kmem_cache *cachep, | |||
1023 | 1023 | ||
1024 | #else /* CONFIG_NUMA */ | 1024 | #else /* CONFIG_NUMA */ |
1025 | 1025 | ||
1026 | static void *__cache_alloc_node(struct kmem_cache *, gfp_t, int); | 1026 | static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int); |
1027 | static void *alternate_node_alloc(struct kmem_cache *, gfp_t); | 1027 | static void *alternate_node_alloc(struct kmem_cache *, gfp_t); |
1028 | 1028 | ||
1029 | static struct array_cache **alloc_alien_cache(int node, int limit) | 1029 | static struct array_cache **alloc_alien_cache(int node, int limit) |
@@ -3130,10 +3130,10 @@ static __always_inline void *__cache_alloc(struct kmem_cache *cachep, | |||
3130 | objp = ____cache_alloc(cachep, flags); | 3130 | objp = ____cache_alloc(cachep, flags); |
3131 | /* | 3131 | /* |
3132 | * We may just have run out of memory on the local node. | 3132 | * We may just have run out of memory on the local node. |
3133 | * __cache_alloc_node() knows how to locate memory on other nodes | 3133 | * ____cache_alloc_node() knows how to locate memory on other nodes |
3134 | */ | 3134 | */ |
3135 | if (NUMA_BUILD && !objp) | 3135 | if (NUMA_BUILD && !objp) |
3136 | objp = __cache_alloc_node(cachep, flags, numa_node_id()); | 3136 | objp = ____cache_alloc_node(cachep, flags, numa_node_id()); |
3137 | local_irq_restore(save_flags); | 3137 | local_irq_restore(save_flags); |
3138 | objp = cache_alloc_debugcheck_after(cachep, flags, objp, | 3138 | objp = cache_alloc_debugcheck_after(cachep, flags, objp, |
3139 | caller); | 3139 | caller); |
@@ -3160,7 +3160,7 @@ static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags) | |||
3160 | else if (current->mempolicy) | 3160 | else if (current->mempolicy) |
3161 | nid_alloc = slab_node(current->mempolicy); | 3161 | nid_alloc = slab_node(current->mempolicy); |
3162 | if (nid_alloc != nid_here) | 3162 | if (nid_alloc != nid_here) |
3163 | return __cache_alloc_node(cachep, flags, nid_alloc); | 3163 | return ____cache_alloc_node(cachep, flags, nid_alloc); |
3164 | return NULL; | 3164 | return NULL; |
3165 | } | 3165 | } |
3166 | 3166 | ||
@@ -3183,7 +3183,7 @@ void *fallback_alloc(struct kmem_cache *cache, gfp_t flags) | |||
3183 | if (zone_idx(*z) <= ZONE_NORMAL && | 3183 | if (zone_idx(*z) <= ZONE_NORMAL && |
3184 | cpuset_zone_allowed(*z, flags) && | 3184 | cpuset_zone_allowed(*z, flags) && |
3185 | cache->nodelists[nid]) | 3185 | cache->nodelists[nid]) |
3186 | obj = __cache_alloc_node(cache, | 3186 | obj = ____cache_alloc_node(cache, |
3187 | flags | __GFP_THISNODE, nid); | 3187 | flags | __GFP_THISNODE, nid); |
3188 | } | 3188 | } |
3189 | return obj; | 3189 | return obj; |
@@ -3192,7 +3192,7 @@ void *fallback_alloc(struct kmem_cache *cache, gfp_t flags) | |||
3192 | /* | 3192 | /* |
3193 | * A interface to enable slab creation on nodeid | 3193 | * A interface to enable slab creation on nodeid |
3194 | */ | 3194 | */ |
3195 | static void *__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, | 3195 | static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, |
3196 | int nodeid) | 3196 | int nodeid) |
3197 | { | 3197 | { |
3198 | struct list_head *entry; | 3198 | struct list_head *entry; |
@@ -3465,7 +3465,9 @@ out: | |||
3465 | * New and improved: it will now make sure that the object gets | 3465 | * New and improved: it will now make sure that the object gets |
3466 | * put on the correct node list so that there is no false sharing. | 3466 | * put on the correct node list so that there is no false sharing. |
3467 | */ | 3467 | */ |
3468 | void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) | 3468 | static __always_inline void * |
3469 | __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, | ||
3470 | int nodeid, void *caller) | ||
3469 | { | 3471 | { |
3470 | unsigned long save_flags; | 3472 | unsigned long save_flags; |
3471 | void *ptr; | 3473 | void *ptr; |
@@ -3477,17 +3479,23 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) | |||
3477 | !cachep->nodelists[nodeid]) | 3479 | !cachep->nodelists[nodeid]) |
3478 | ptr = ____cache_alloc(cachep, flags); | 3480 | ptr = ____cache_alloc(cachep, flags); |
3479 | else | 3481 | else |
3480 | ptr = __cache_alloc_node(cachep, flags, nodeid); | 3482 | ptr = ____cache_alloc_node(cachep, flags, nodeid); |
3481 | local_irq_restore(save_flags); | 3483 | local_irq_restore(save_flags); |
3482 | 3484 | ||
3483 | ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, | 3485 | ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller); |
3484 | __builtin_return_address(0)); | ||
3485 | 3486 | ||
3486 | return ptr; | 3487 | return ptr; |
3487 | } | 3488 | } |
3489 | |||
3490 | void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) | ||
3491 | { | ||
3492 | return __cache_alloc_node(cachep, flags, nodeid, | ||
3493 | __builtin_return_address(0)); | ||
3494 | } | ||
3488 | EXPORT_SYMBOL(kmem_cache_alloc_node); | 3495 | EXPORT_SYMBOL(kmem_cache_alloc_node); |
3489 | 3496 | ||
3490 | void *__kmalloc_node(size_t size, gfp_t flags, int node) | 3497 | static __always_inline void * |
3498 | __do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller) | ||
3491 | { | 3499 | { |
3492 | struct kmem_cache *cachep; | 3500 | struct kmem_cache *cachep; |
3493 | 3501 | ||
@@ -3496,8 +3504,29 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) | |||
3496 | return NULL; | 3504 | return NULL; |
3497 | return kmem_cache_alloc_node(cachep, flags, node); | 3505 | return kmem_cache_alloc_node(cachep, flags, node); |
3498 | } | 3506 | } |
3507 | |||
3508 | #ifdef CONFIG_DEBUG_SLAB | ||
3509 | void *__kmalloc_node(size_t size, gfp_t flags, int node) | ||
3510 | { | ||
3511 | return __do_kmalloc_node(size, flags, node, | ||
3512 | __builtin_return_address(0)); | ||
3513 | } | ||
3499 | EXPORT_SYMBOL(__kmalloc_node); | 3514 | EXPORT_SYMBOL(__kmalloc_node); |
3500 | #endif | 3515 | |
3516 | void *__kmalloc_node_track_caller(size_t size, gfp_t flags, | ||
3517 | int node, void *caller) | ||
3518 | { | ||
3519 | return __do_kmalloc_node(size, flags, node, caller); | ||
3520 | } | ||
3521 | EXPORT_SYMBOL(__kmalloc_node_track_caller); | ||
3522 | #else | ||
3523 | void *__kmalloc_node(size_t size, gfp_t flags, int node) | ||
3524 | { | ||
3525 | return __do_kmalloc_node(size, flags, node, NULL); | ||
3526 | } | ||
3527 | EXPORT_SYMBOL(__kmalloc_node); | ||
3528 | #endif /* CONFIG_DEBUG_SLAB */ | ||
3529 | #endif /* CONFIG_NUMA */ | ||
3501 | 3530 | ||
3502 | /** | 3531 | /** |
3503 | * __do_kmalloc - allocate memory | 3532 | * __do_kmalloc - allocate memory |