diff options
author | Catalin Marinas <catalin.marinas@arm.com> | 2011-09-26 12:12:53 -0400 |
---|---|---|
committer | Catalin Marinas <catalin.marinas@arm.com> | 2011-12-02 11:12:42 -0500 |
commit | f528f0b8e53d73b18be71e96693cfab9322f33c7 (patch) | |
tree | 5a25d6ac1c2f18561e554074aa7c06bf3d899183 /mm | |
parent | 74341703edca6bc68a165a18453071b097828407 (diff) |
kmemleak: Handle percpu memory allocation
This patch adds kmemleak callbacks from the percpu allocator, reducing a
number of false positives caused by kmemleak not scanning such memory
blocks. The percpu chunks are never reported as leaks because of current
kmemleak limitations with the __percpu pointer not pointing directly to
the actual chunks.
Reported-by: Huajun Li <huajun.li.lee@gmail.com>
Acked-by: Christoph Lameter <cl@gentwo.org>
Acked-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/kmemleak.c | 72 | ||||
-rw-r--r-- | mm/percpu.c | 12 |
2 files changed, 83 insertions, 1 deletions
diff --git a/mm/kmemleak.c b/mm/kmemleak.c index b4f4e6021c1b..15c50302ff93 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c | |||
@@ -230,8 +230,10 @@ static int kmemleak_skip_disable; | |||
230 | /* kmemleak operation type for early logging */ | 230 | /* kmemleak operation type for early logging */ |
231 | enum { | 231 | enum { |
232 | KMEMLEAK_ALLOC, | 232 | KMEMLEAK_ALLOC, |
233 | KMEMLEAK_ALLOC_PERCPU, | ||
233 | KMEMLEAK_FREE, | 234 | KMEMLEAK_FREE, |
234 | KMEMLEAK_FREE_PART, | 235 | KMEMLEAK_FREE_PART, |
236 | KMEMLEAK_FREE_PERCPU, | ||
235 | KMEMLEAK_NOT_LEAK, | 237 | KMEMLEAK_NOT_LEAK, |
236 | KMEMLEAK_IGNORE, | 238 | KMEMLEAK_IGNORE, |
237 | KMEMLEAK_SCAN_AREA, | 239 | KMEMLEAK_SCAN_AREA, |
@@ -852,6 +854,20 @@ out: | |||
852 | rcu_read_unlock(); | 854 | rcu_read_unlock(); |
853 | } | 855 | } |
854 | 856 | ||
857 | /* | ||
858 | * Log an early allocated block and populate the stack trace. | ||
859 | */ | ||
860 | static void early_alloc_percpu(struct early_log *log) | ||
861 | { | ||
862 | unsigned int cpu; | ||
863 | const void __percpu *ptr = log->ptr; | ||
864 | |||
865 | for_each_possible_cpu(cpu) { | ||
866 | log->ptr = per_cpu_ptr(ptr, cpu); | ||
867 | early_alloc(log); | ||
868 | } | ||
869 | } | ||
870 | |||
855 | /** | 871 | /** |
856 | * kmemleak_alloc - register a newly allocated object | 872 | * kmemleak_alloc - register a newly allocated object |
857 | * @ptr: pointer to beginning of the object | 873 | * @ptr: pointer to beginning of the object |
@@ -879,6 +895,34 @@ void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count, | |||
879 | EXPORT_SYMBOL_GPL(kmemleak_alloc); | 895 | EXPORT_SYMBOL_GPL(kmemleak_alloc); |
880 | 896 | ||
881 | /** | 897 | /** |
898 | * kmemleak_alloc_percpu - register a newly allocated __percpu object | ||
899 | * @ptr: __percpu pointer to beginning of the object | ||
900 | * @size: size of the object | ||
901 | * | ||
902 | * This function is called from the kernel percpu allocator when a new object | ||
903 | * (memory block) is allocated (alloc_percpu). It assumes GFP_KERNEL | ||
904 | * allocation. | ||
905 | */ | ||
906 | void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size) | ||
907 | { | ||
908 | unsigned int cpu; | ||
909 | |||
910 | pr_debug("%s(0x%p, %zu)\n", __func__, ptr, size); | ||
911 | |||
912 | /* | ||
913 | * Percpu allocations are only scanned and not reported as leaks | ||
914 | * (min_count is set to 0). | ||
915 | */ | ||
916 | if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) | ||
917 | for_each_possible_cpu(cpu) | ||
918 | create_object((unsigned long)per_cpu_ptr(ptr, cpu), | ||
919 | size, 0, GFP_KERNEL); | ||
920 | else if (atomic_read(&kmemleak_early_log)) | ||
921 | log_early(KMEMLEAK_ALLOC_PERCPU, ptr, size, 0); | ||
922 | } | ||
923 | EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu); | ||
924 | |||
925 | /** | ||
882 | * kmemleak_free - unregister a previously registered object | 926 | * kmemleak_free - unregister a previously registered object |
883 | * @ptr: pointer to beginning of the object | 927 | * @ptr: pointer to beginning of the object |
884 | * | 928 | * |
@@ -917,6 +961,28 @@ void __ref kmemleak_free_part(const void *ptr, size_t size) | |||
917 | EXPORT_SYMBOL_GPL(kmemleak_free_part); | 961 | EXPORT_SYMBOL_GPL(kmemleak_free_part); |
918 | 962 | ||
919 | /** | 963 | /** |
964 | * kmemleak_free_percpu - unregister a previously registered __percpu object | ||
965 | * @ptr: __percpu pointer to beginning of the object | ||
966 | * | ||
967 | * This function is called from the kernel percpu allocator when an object | ||
968 | * (memory block) is freed (free_percpu). | ||
969 | */ | ||
970 | void __ref kmemleak_free_percpu(const void __percpu *ptr) | ||
971 | { | ||
972 | unsigned int cpu; | ||
973 | |||
974 | pr_debug("%s(0x%p)\n", __func__, ptr); | ||
975 | |||
976 | if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) | ||
977 | for_each_possible_cpu(cpu) | ||
978 | delete_object_full((unsigned long)per_cpu_ptr(ptr, | ||
979 | cpu)); | ||
980 | else if (atomic_read(&kmemleak_early_log)) | ||
981 | log_early(KMEMLEAK_FREE_PERCPU, ptr, 0, 0); | ||
982 | } | ||
983 | EXPORT_SYMBOL_GPL(kmemleak_free_percpu); | ||
984 | |||
985 | /** | ||
920 | * kmemleak_not_leak - mark an allocated object as false positive | 986 | * kmemleak_not_leak - mark an allocated object as false positive |
921 | * @ptr: pointer to beginning of the object | 987 | * @ptr: pointer to beginning of the object |
922 | * | 988 | * |
@@ -1727,12 +1793,18 @@ void __init kmemleak_init(void) | |||
1727 | case KMEMLEAK_ALLOC: | 1793 | case KMEMLEAK_ALLOC: |
1728 | early_alloc(log); | 1794 | early_alloc(log); |
1729 | break; | 1795 | break; |
1796 | case KMEMLEAK_ALLOC_PERCPU: | ||
1797 | early_alloc_percpu(log); | ||
1798 | break; | ||
1730 | case KMEMLEAK_FREE: | 1799 | case KMEMLEAK_FREE: |
1731 | kmemleak_free(log->ptr); | 1800 | kmemleak_free(log->ptr); |
1732 | break; | 1801 | break; |
1733 | case KMEMLEAK_FREE_PART: | 1802 | case KMEMLEAK_FREE_PART: |
1734 | kmemleak_free_part(log->ptr, log->size); | 1803 | kmemleak_free_part(log->ptr, log->size); |
1735 | break; | 1804 | break; |
1805 | case KMEMLEAK_FREE_PERCPU: | ||
1806 | kmemleak_free_percpu(log->ptr); | ||
1807 | break; | ||
1736 | case KMEMLEAK_NOT_LEAK: | 1808 | case KMEMLEAK_NOT_LEAK: |
1737 | kmemleak_not_leak(log->ptr); | 1809 | kmemleak_not_leak(log->ptr); |
1738 | break; | 1810 | break; |
diff --git a/mm/percpu.c b/mm/percpu.c index 3bb810a72006..86c5bdbdc370 100644 --- a/mm/percpu.c +++ b/mm/percpu.c | |||
@@ -67,6 +67,7 @@ | |||
67 | #include <linux/spinlock.h> | 67 | #include <linux/spinlock.h> |
68 | #include <linux/vmalloc.h> | 68 | #include <linux/vmalloc.h> |
69 | #include <linux/workqueue.h> | 69 | #include <linux/workqueue.h> |
70 | #include <linux/kmemleak.h> | ||
70 | 71 | ||
71 | #include <asm/cacheflush.h> | 72 | #include <asm/cacheflush.h> |
72 | #include <asm/sections.h> | 73 | #include <asm/sections.h> |
@@ -710,6 +711,7 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved) | |||
710 | const char *err; | 711 | const char *err; |
711 | int slot, off, new_alloc; | 712 | int slot, off, new_alloc; |
712 | unsigned long flags; | 713 | unsigned long flags; |
714 | void __percpu *ptr; | ||
713 | 715 | ||
714 | if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) { | 716 | if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) { |
715 | WARN(true, "illegal size (%zu) or align (%zu) for " | 717 | WARN(true, "illegal size (%zu) or align (%zu) for " |
@@ -802,7 +804,9 @@ area_found: | |||
802 | mutex_unlock(&pcpu_alloc_mutex); | 804 | mutex_unlock(&pcpu_alloc_mutex); |
803 | 805 | ||
804 | /* return address relative to base address */ | 806 | /* return address relative to base address */ |
805 | return __addr_to_pcpu_ptr(chunk->base_addr + off); | 807 | ptr = __addr_to_pcpu_ptr(chunk->base_addr + off); |
808 | kmemleak_alloc_percpu(ptr, size); | ||
809 | return ptr; | ||
806 | 810 | ||
807 | fail_unlock: | 811 | fail_unlock: |
808 | spin_unlock_irqrestore(&pcpu_lock, flags); | 812 | spin_unlock_irqrestore(&pcpu_lock, flags); |
@@ -916,6 +920,8 @@ void free_percpu(void __percpu *ptr) | |||
916 | if (!ptr) | 920 | if (!ptr) |
917 | return; | 921 | return; |
918 | 922 | ||
923 | kmemleak_free_percpu(ptr); | ||
924 | |||
919 | addr = __pcpu_ptr_to_addr(ptr); | 925 | addr = __pcpu_ptr_to_addr(ptr); |
920 | 926 | ||
921 | spin_lock_irqsave(&pcpu_lock, flags); | 927 | spin_lock_irqsave(&pcpu_lock, flags); |
@@ -1637,6 +1643,8 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size, | |||
1637 | rc = -ENOMEM; | 1643 | rc = -ENOMEM; |
1638 | goto out_free_areas; | 1644 | goto out_free_areas; |
1639 | } | 1645 | } |
1646 | /* kmemleak tracks the percpu allocations separately */ | ||
1647 | kmemleak_free(ptr); | ||
1640 | areas[group] = ptr; | 1648 | areas[group] = ptr; |
1641 | 1649 | ||
1642 | base = min(ptr, base); | 1650 | base = min(ptr, base); |
@@ -1751,6 +1759,8 @@ int __init pcpu_page_first_chunk(size_t reserved_size, | |||
1751 | "for cpu%u\n", psize_str, cpu); | 1759 | "for cpu%u\n", psize_str, cpu); |
1752 | goto enomem; | 1760 | goto enomem; |
1753 | } | 1761 | } |
1762 | /* kmemleak tracks the percpu allocations separately */ | ||
1763 | kmemleak_free(ptr); | ||
1754 | pages[j++] = virt_to_page(ptr); | 1764 | pages[j++] = virt_to_page(ptr); |
1755 | } | 1765 | } |
1756 | 1766 | ||