diff options
Diffstat (limited to 'mm/slob.c')
-rw-r--r-- | mm/slob.c | 19 |
1 files changed, 13 insertions, 6 deletions
@@ -46,7 +46,7 @@ | |||
46 | * NUMA support in SLOB is fairly simplistic, pushing most of the real | 46 | * NUMA support in SLOB is fairly simplistic, pushing most of the real |
47 | * logic down to the page allocator, and simply doing the node accounting | 47 | * logic down to the page allocator, and simply doing the node accounting |
48 | * on the upper levels. In the event that a node id is explicitly | 48 | * on the upper levels. In the event that a node id is explicitly |
49 | * provided, alloc_pages_node() with the specified node id is used | 49 | * provided, alloc_pages_exact_node() with the specified node id is used |
50 | * instead. The common case (or when the node id isn't explicitly provided) | 50 | * instead. The common case (or when the node id isn't explicitly provided) |
51 | * will default to the current node, as per numa_node_id(). | 51 | * will default to the current node, as per numa_node_id(). |
52 | * | 52 | * |
@@ -66,7 +66,8 @@ | |||
66 | #include <linux/module.h> | 66 | #include <linux/module.h> |
67 | #include <linux/rcupdate.h> | 67 | #include <linux/rcupdate.h> |
68 | #include <linux/list.h> | 68 | #include <linux/list.h> |
69 | #include <trace/kmemtrace.h> | 69 | #include <linux/kmemtrace.h> |
70 | #include <linux/kmemleak.h> | ||
70 | #include <asm/atomic.h> | 71 | #include <asm/atomic.h> |
71 | 72 | ||
72 | /* | 73 | /* |
@@ -132,17 +133,17 @@ static LIST_HEAD(free_slob_large); | |||
132 | */ | 133 | */ |
133 | static inline int is_slob_page(struct slob_page *sp) | 134 | static inline int is_slob_page(struct slob_page *sp) |
134 | { | 135 | { |
135 | return PageSlobPage((struct page *)sp); | 136 | return PageSlab((struct page *)sp); |
136 | } | 137 | } |
137 | 138 | ||
138 | static inline void set_slob_page(struct slob_page *sp) | 139 | static inline void set_slob_page(struct slob_page *sp) |
139 | { | 140 | { |
140 | __SetPageSlobPage((struct page *)sp); | 141 | __SetPageSlab((struct page *)sp); |
141 | } | 142 | } |
142 | 143 | ||
143 | static inline void clear_slob_page(struct slob_page *sp) | 144 | static inline void clear_slob_page(struct slob_page *sp) |
144 | { | 145 | { |
145 | __ClearPageSlobPage((struct page *)sp); | 146 | __ClearPageSlab((struct page *)sp); |
146 | } | 147 | } |
147 | 148 | ||
148 | static inline struct slob_page *slob_page(const void *addr) | 149 | static inline struct slob_page *slob_page(const void *addr) |
@@ -243,7 +244,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node) | |||
243 | 244 | ||
244 | #ifdef CONFIG_NUMA | 245 | #ifdef CONFIG_NUMA |
245 | if (node != -1) | 246 | if (node != -1) |
246 | page = alloc_pages_node(node, gfp, order); | 247 | page = alloc_pages_exact_node(node, gfp, order); |
247 | else | 248 | else |
248 | #endif | 249 | #endif |
249 | page = alloc_pages(gfp, order); | 250 | page = alloc_pages(gfp, order); |
@@ -509,6 +510,7 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node) | |||
509 | size, PAGE_SIZE << order, gfp, node); | 510 | size, PAGE_SIZE << order, gfp, node); |
510 | } | 511 | } |
511 | 512 | ||
513 | kmemleak_alloc(ret, size, 1, gfp); | ||
512 | return ret; | 514 | return ret; |
513 | } | 515 | } |
514 | EXPORT_SYMBOL(__kmalloc_node); | 516 | EXPORT_SYMBOL(__kmalloc_node); |
@@ -521,6 +523,7 @@ void kfree(const void *block) | |||
521 | 523 | ||
522 | if (unlikely(ZERO_OR_NULL_PTR(block))) | 524 | if (unlikely(ZERO_OR_NULL_PTR(block))) |
523 | return; | 525 | return; |
526 | kmemleak_free(block); | ||
524 | 527 | ||
525 | sp = slob_page(block); | 528 | sp = slob_page(block); |
526 | if (is_slob_page(sp)) { | 529 | if (is_slob_page(sp)) { |
@@ -584,12 +587,14 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, | |||
584 | } else if (flags & SLAB_PANIC) | 587 | } else if (flags & SLAB_PANIC) |
585 | panic("Cannot create slab cache %s\n", name); | 588 | panic("Cannot create slab cache %s\n", name); |
586 | 589 | ||
590 | kmemleak_alloc(c, sizeof(struct kmem_cache), 1, GFP_KERNEL); | ||
587 | return c; | 591 | return c; |
588 | } | 592 | } |
589 | EXPORT_SYMBOL(kmem_cache_create); | 593 | EXPORT_SYMBOL(kmem_cache_create); |
590 | 594 | ||
591 | void kmem_cache_destroy(struct kmem_cache *c) | 595 | void kmem_cache_destroy(struct kmem_cache *c) |
592 | { | 596 | { |
597 | kmemleak_free(c); | ||
593 | slob_free(c, sizeof(struct kmem_cache)); | 598 | slob_free(c, sizeof(struct kmem_cache)); |
594 | } | 599 | } |
595 | EXPORT_SYMBOL(kmem_cache_destroy); | 600 | EXPORT_SYMBOL(kmem_cache_destroy); |
@@ -613,6 +618,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node) | |||
613 | if (c->ctor) | 618 | if (c->ctor) |
614 | c->ctor(b); | 619 | c->ctor(b); |
615 | 620 | ||
621 | kmemleak_alloc_recursive(b, c->size, 1, c->flags, flags); | ||
616 | return b; | 622 | return b; |
617 | } | 623 | } |
618 | EXPORT_SYMBOL(kmem_cache_alloc_node); | 624 | EXPORT_SYMBOL(kmem_cache_alloc_node); |
@@ -635,6 +641,7 @@ static void kmem_rcu_free(struct rcu_head *head) | |||
635 | 641 | ||
636 | void kmem_cache_free(struct kmem_cache *c, void *b) | 642 | void kmem_cache_free(struct kmem_cache *c, void *b) |
637 | { | 643 | { |
644 | kmemleak_free_recursive(b, c->flags); | ||
638 | if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) { | 645 | if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) { |
639 | struct slob_rcu *slob_rcu; | 646 | struct slob_rcu *slob_rcu; |
640 | slob_rcu = b + (c->size - sizeof(struct slob_rcu)); | 647 | slob_rcu = b + (c->size - sizeof(struct slob_rcu)); |