diff options
| -rw-r--r-- | mm/slob.c | 7 |
1 files changed, 7 insertions, 0 deletions
| @@ -67,6 +67,7 @@ | |||
| 67 | #include <linux/rcupdate.h> | 67 | #include <linux/rcupdate.h> |
| 68 | #include <linux/list.h> | 68 | #include <linux/list.h> |
| 69 | #include <linux/kmemtrace.h> | 69 | #include <linux/kmemtrace.h> |
| 70 | #include <linux/kmemleak.h> | ||
| 70 | #include <asm/atomic.h> | 71 | #include <asm/atomic.h> |
| 71 | 72 | ||
| 72 | /* | 73 | /* |
| @@ -509,6 +510,7 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node) | |||
| 509 | size, PAGE_SIZE << order, gfp, node); | 510 | size, PAGE_SIZE << order, gfp, node); |
| 510 | } | 511 | } |
| 511 | 512 | ||
| 513 | kmemleak_alloc(ret, size, 1, gfp); | ||
| 512 | return ret; | 514 | return ret; |
| 513 | } | 515 | } |
| 514 | EXPORT_SYMBOL(__kmalloc_node); | 516 | EXPORT_SYMBOL(__kmalloc_node); |
| @@ -521,6 +523,7 @@ void kfree(const void *block) | |||
| 521 | 523 | ||
| 522 | if (unlikely(ZERO_OR_NULL_PTR(block))) | 524 | if (unlikely(ZERO_OR_NULL_PTR(block))) |
| 523 | return; | 525 | return; |
| 526 | kmemleak_free(block); | ||
| 524 | 527 | ||
| 525 | sp = slob_page(block); | 528 | sp = slob_page(block); |
| 526 | if (is_slob_page(sp)) { | 529 | if (is_slob_page(sp)) { |
| @@ -584,12 +587,14 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, | |||
| 584 | } else if (flags & SLAB_PANIC) | 587 | } else if (flags & SLAB_PANIC) |
| 585 | panic("Cannot create slab cache %s\n", name); | 588 | panic("Cannot create slab cache %s\n", name); |
| 586 | 589 | ||
| 590 | kmemleak_alloc(c, sizeof(struct kmem_cache), 1, GFP_KERNEL); | ||
| 587 | return c; | 591 | return c; |
| 588 | } | 592 | } |
| 589 | EXPORT_SYMBOL(kmem_cache_create); | 593 | EXPORT_SYMBOL(kmem_cache_create); |
| 590 | 594 | ||
| 591 | void kmem_cache_destroy(struct kmem_cache *c) | 595 | void kmem_cache_destroy(struct kmem_cache *c) |
| 592 | { | 596 | { |
| 597 | kmemleak_free(c); | ||
| 593 | slob_free(c, sizeof(struct kmem_cache)); | 598 | slob_free(c, sizeof(struct kmem_cache)); |
| 594 | } | 599 | } |
| 595 | EXPORT_SYMBOL(kmem_cache_destroy); | 600 | EXPORT_SYMBOL(kmem_cache_destroy); |
| @@ -613,6 +618,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node) | |||
| 613 | if (c->ctor) | 618 | if (c->ctor) |
| 614 | c->ctor(b); | 619 | c->ctor(b); |
| 615 | 620 | ||
| 621 | kmemleak_alloc_recursive(b, c->size, 1, c->flags, flags); | ||
| 616 | return b; | 622 | return b; |
| 617 | } | 623 | } |
| 618 | EXPORT_SYMBOL(kmem_cache_alloc_node); | 624 | EXPORT_SYMBOL(kmem_cache_alloc_node); |
| @@ -635,6 +641,7 @@ static void kmem_rcu_free(struct rcu_head *head) | |||
| 635 | 641 | ||
| 636 | void kmem_cache_free(struct kmem_cache *c, void *b) | 642 | void kmem_cache_free(struct kmem_cache *c, void *b) |
| 637 | { | 643 | { |
| 644 | kmemleak_free_recursive(b, c->flags); | ||
| 638 | if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) { | 645 | if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) { |
| 639 | struct slob_rcu *slob_rcu; | 646 | struct slob_rcu *slob_rcu; |
| 640 | slob_rcu = b + (c->size - sizeof(struct slob_rcu)); | 647 | slob_rcu = b + (c->size - sizeof(struct slob_rcu)); |
