aboutsummaryrefslogtreecommitdiffstats
path: root/mm/percpu.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-01-14 21:11:11 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-01-14 21:11:11 -0500
commit892d208bcf79e4e1058707786a7b6d486697cd78 (patch)
tree15446e4d19f7f98078344ab0bc37ae6af4898c6d /mm/percpu.c
parentdca88ad6915b65f6e037f8c3e632fcd92a70bd88 (diff)
parent029aeff5db879afd7760f11214b6fea45f76b58e (diff)
Merge tag 'kmemleak' of git://git.kernel.org/pub/scm/linux/kernel/git/cmarinas/linux
Kmemleak patches Main features: - Handle percpu memory allocations (only scanning them, not actually reporting). - Memory hotplug support. Usability improvements: - Show the origin of early allocations. - Report previously found leaks even if kmemleak has been disabled by some error. * tag 'kmemleak' of git://git.kernel.org/pub/scm/linux/kernel/git/cmarinas/linux: kmemleak: Add support for memory hotplug kmemleak: Handle percpu memory allocation kmemleak: Report previously found leaks even after an error kmemleak: When the early log buffer is exceeded, report the actual number kmemleak: Show where early_log issues come from
Diffstat (limited to 'mm/percpu.c')
-rw-r--r--mm/percpu.c12
1 files changed, 11 insertions, 1 deletions
diff --git a/mm/percpu.c b/mm/percpu.c
index 716eb4acf2fc..f47af9123af7 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -67,6 +67,7 @@
67#include <linux/spinlock.h> 67#include <linux/spinlock.h>
68#include <linux/vmalloc.h> 68#include <linux/vmalloc.h>
69#include <linux/workqueue.h> 69#include <linux/workqueue.h>
70#include <linux/kmemleak.h>
70 71
71#include <asm/cacheflush.h> 72#include <asm/cacheflush.h>
72#include <asm/sections.h> 73#include <asm/sections.h>
@@ -710,6 +711,7 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved)
710 const char *err; 711 const char *err;
711 int slot, off, new_alloc; 712 int slot, off, new_alloc;
712 unsigned long flags; 713 unsigned long flags;
714 void __percpu *ptr;
713 715
714 if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) { 716 if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) {
715 WARN(true, "illegal size (%zu) or align (%zu) for " 717 WARN(true, "illegal size (%zu) or align (%zu) for "
@@ -802,7 +804,9 @@ area_found:
802 mutex_unlock(&pcpu_alloc_mutex); 804 mutex_unlock(&pcpu_alloc_mutex);
803 805
804 /* return address relative to base address */ 806 /* return address relative to base address */
805 return __addr_to_pcpu_ptr(chunk->base_addr + off); 807 ptr = __addr_to_pcpu_ptr(chunk->base_addr + off);
808 kmemleak_alloc_percpu(ptr, size);
809 return ptr;
806 810
807fail_unlock: 811fail_unlock:
808 spin_unlock_irqrestore(&pcpu_lock, flags); 812 spin_unlock_irqrestore(&pcpu_lock, flags);
@@ -916,6 +920,8 @@ void free_percpu(void __percpu *ptr)
916 if (!ptr) 920 if (!ptr)
917 return; 921 return;
918 922
923 kmemleak_free_percpu(ptr);
924
919 addr = __pcpu_ptr_to_addr(ptr); 925 addr = __pcpu_ptr_to_addr(ptr);
920 926
921 spin_lock_irqsave(&pcpu_lock, flags); 927 spin_lock_irqsave(&pcpu_lock, flags);
@@ -1639,6 +1645,8 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
1639 rc = -ENOMEM; 1645 rc = -ENOMEM;
1640 goto out_free_areas; 1646 goto out_free_areas;
1641 } 1647 }
1648 /* kmemleak tracks the percpu allocations separately */
1649 kmemleak_free(ptr);
1642 areas[group] = ptr; 1650 areas[group] = ptr;
1643 1651
1644 base = min(ptr, base); 1652 base = min(ptr, base);
@@ -1753,6 +1761,8 @@ int __init pcpu_page_first_chunk(size_t reserved_size,
1753 "for cpu%u\n", psize_str, cpu); 1761 "for cpu%u\n", psize_str, cpu);
1754 goto enomem; 1762 goto enomem;
1755 } 1763 }
1764 /* kmemleak tracks the percpu allocations separately */
1765 kmemleak_free(ptr);
1756 pages[j++] = virt_to_page(ptr); 1766 pages[j++] = virt_to_page(ptr);
1757 } 1767 }
1758 1768