diff options
Diffstat (limited to 'mm/slob.c')
-rw-r--r-- | mm/slob.c | 18 |
1 files changed, 14 insertions, 4 deletions
@@ -46,7 +46,7 @@ | |||
46 | * NUMA support in SLOB is fairly simplistic, pushing most of the real | 46 | * NUMA support in SLOB is fairly simplistic, pushing most of the real |
47 | * logic down to the page allocator, and simply doing the node accounting | 47 | * logic down to the page allocator, and simply doing the node accounting |
48 | * on the upper levels. In the event that a node id is explicitly | 48 | * on the upper levels. In the event that a node id is explicitly |
49 | * provided, alloc_pages_node() with the specified node id is used | 49 | * provided, alloc_pages_exact_node() with the specified node id is used |
50 | * instead. The common case (or when the node id isn't explicitly provided) | 50 | * instead. The common case (or when the node id isn't explicitly provided) |
51 | * will default to the current node, as per numa_node_id(). | 51 | * will default to the current node, as per numa_node_id(). |
52 | * | 52 | * |
@@ -60,12 +60,14 @@ | |||
60 | #include <linux/kernel.h> | 60 | #include <linux/kernel.h> |
61 | #include <linux/slab.h> | 61 | #include <linux/slab.h> |
62 | #include <linux/mm.h> | 62 | #include <linux/mm.h> |
63 | #include <linux/swap.h> /* struct reclaim_state */ | ||
63 | #include <linux/cache.h> | 64 | #include <linux/cache.h> |
64 | #include <linux/init.h> | 65 | #include <linux/init.h> |
65 | #include <linux/module.h> | 66 | #include <linux/module.h> |
66 | #include <linux/rcupdate.h> | 67 | #include <linux/rcupdate.h> |
67 | #include <linux/list.h> | 68 | #include <linux/list.h> |
68 | #include <trace/kmemtrace.h> | 69 | #include <linux/kmemtrace.h> |
70 | #include <linux/kmemleak.h> | ||
69 | #include <asm/atomic.h> | 71 | #include <asm/atomic.h> |
70 | 72 | ||
71 | /* | 73 | /* |
@@ -242,7 +244,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node) | |||
242 | 244 | ||
243 | #ifdef CONFIG_NUMA | 245 | #ifdef CONFIG_NUMA |
244 | if (node != -1) | 246 | if (node != -1) |
245 | page = alloc_pages_node(node, gfp, order); | 247 | page = alloc_pages_exact_node(node, gfp, order); |
246 | else | 248 | else |
247 | #endif | 249 | #endif |
248 | page = alloc_pages(gfp, order); | 250 | page = alloc_pages(gfp, order); |
@@ -255,6 +257,8 @@ static void *slob_new_pages(gfp_t gfp, int order, int node) | |||
255 | 257 | ||
256 | static void slob_free_pages(void *b, int order) | 258 | static void slob_free_pages(void *b, int order) |
257 | { | 259 | { |
260 | if (current->reclaim_state) | ||
261 | current->reclaim_state->reclaimed_slab += 1 << order; | ||
258 | free_pages((unsigned long)b, order); | 262 | free_pages((unsigned long)b, order); |
259 | } | 263 | } |
260 | 264 | ||
@@ -407,7 +411,7 @@ static void slob_free(void *block, int size) | |||
407 | spin_unlock_irqrestore(&slob_lock, flags); | 411 | spin_unlock_irqrestore(&slob_lock, flags); |
408 | clear_slob_page(sp); | 412 | clear_slob_page(sp); |
409 | free_slob_page(sp); | 413 | free_slob_page(sp); |
410 | free_page((unsigned long)b); | 414 | slob_free_pages(b, 0); |
411 | return; | 415 | return; |
412 | } | 416 | } |
413 | 417 | ||
@@ -506,6 +510,7 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node) | |||
506 | size, PAGE_SIZE << order, gfp, node); | 510 | size, PAGE_SIZE << order, gfp, node); |
507 | } | 511 | } |
508 | 512 | ||
513 | kmemleak_alloc(ret, size, 1, gfp); | ||
509 | return ret; | 514 | return ret; |
510 | } | 515 | } |
511 | EXPORT_SYMBOL(__kmalloc_node); | 516 | EXPORT_SYMBOL(__kmalloc_node); |
@@ -518,6 +523,7 @@ void kfree(const void *block) | |||
518 | 523 | ||
519 | if (unlikely(ZERO_OR_NULL_PTR(block))) | 524 | if (unlikely(ZERO_OR_NULL_PTR(block))) |
520 | return; | 525 | return; |
526 | kmemleak_free(block); | ||
521 | 527 | ||
522 | sp = slob_page(block); | 528 | sp = slob_page(block); |
523 | if (is_slob_page(sp)) { | 529 | if (is_slob_page(sp)) { |
@@ -581,12 +587,14 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, | |||
581 | } else if (flags & SLAB_PANIC) | 587 | } else if (flags & SLAB_PANIC) |
582 | panic("Cannot create slab cache %s\n", name); | 588 | panic("Cannot create slab cache %s\n", name); |
583 | 589 | ||
590 | kmemleak_alloc(c, sizeof(struct kmem_cache), 1, GFP_KERNEL); | ||
584 | return c; | 591 | return c; |
585 | } | 592 | } |
586 | EXPORT_SYMBOL(kmem_cache_create); | 593 | EXPORT_SYMBOL(kmem_cache_create); |
587 | 594 | ||
588 | void kmem_cache_destroy(struct kmem_cache *c) | 595 | void kmem_cache_destroy(struct kmem_cache *c) |
589 | { | 596 | { |
597 | kmemleak_free(c); | ||
590 | slob_free(c, sizeof(struct kmem_cache)); | 598 | slob_free(c, sizeof(struct kmem_cache)); |
591 | } | 599 | } |
592 | EXPORT_SYMBOL(kmem_cache_destroy); | 600 | EXPORT_SYMBOL(kmem_cache_destroy); |
@@ -610,6 +618,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node) | |||
610 | if (c->ctor) | 618 | if (c->ctor) |
611 | c->ctor(b); | 619 | c->ctor(b); |
612 | 620 | ||
621 | kmemleak_alloc_recursive(b, c->size, 1, c->flags, flags); | ||
613 | return b; | 622 | return b; |
614 | } | 623 | } |
615 | EXPORT_SYMBOL(kmem_cache_alloc_node); | 624 | EXPORT_SYMBOL(kmem_cache_alloc_node); |
@@ -632,6 +641,7 @@ static void kmem_rcu_free(struct rcu_head *head) | |||
632 | 641 | ||
633 | void kmem_cache_free(struct kmem_cache *c, void *b) | 642 | void kmem_cache_free(struct kmem_cache *c, void *b) |
634 | { | 643 | { |
644 | kmemleak_free_recursive(b, c->flags); | ||
635 | if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) { | 645 | if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) { |
636 | struct slob_rcu *slob_rcu; | 646 | struct slob_rcu *slob_rcu; |
637 | slob_rcu = b + (c->size - sizeof(struct slob_rcu)); | 647 | slob_rcu = b + (c->size - sizeof(struct slob_rcu)); |