aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/kmemleak.txt3
-rw-r--r--include/linux/kmemleak.h8
-rw-r--r--mm/kmemleak.c72
-rw-r--r--mm/percpu.c12
4 files changed, 94 insertions, 1 deletions
diff --git a/Documentation/kmemleak.txt b/Documentation/kmemleak.txt
index 51063e681ca4..b6e39739a36d 100644
--- a/Documentation/kmemleak.txt
+++ b/Documentation/kmemleak.txt
@@ -127,7 +127,10 @@ See the include/linux/kmemleak.h header for the functions prototype.
127 127
128kmemleak_init - initialize kmemleak 128kmemleak_init - initialize kmemleak
129kmemleak_alloc - notify of a memory block allocation 129kmemleak_alloc - notify of a memory block allocation
130kmemleak_alloc_percpu - notify of a percpu memory block allocation
130kmemleak_free - notify of a memory block freeing 131kmemleak_free - notify of a memory block freeing
132kmemleak_free_part - notify of a partial memory block freeing
133kmemleak_free_percpu - notify of a percpu memory block freeing
131kmemleak_not_leak - mark an object as not a leak 134kmemleak_not_leak - mark an object as not a leak
132kmemleak_ignore - do not scan or report an object as leak 135kmemleak_ignore - do not scan or report an object as leak
133kmemleak_scan_area - add scan areas inside a memory block 136kmemleak_scan_area - add scan areas inside a memory block
diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h
index 99d9a6766f7e..2a5e5548a1d2 100644
--- a/include/linux/kmemleak.h
+++ b/include/linux/kmemleak.h
@@ -26,8 +26,10 @@
26extern void kmemleak_init(void) __ref; 26extern void kmemleak_init(void) __ref;
27extern void kmemleak_alloc(const void *ptr, size_t size, int min_count, 27extern void kmemleak_alloc(const void *ptr, size_t size, int min_count,
28 gfp_t gfp) __ref; 28 gfp_t gfp) __ref;
29extern void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size) __ref;
29extern void kmemleak_free(const void *ptr) __ref; 30extern void kmemleak_free(const void *ptr) __ref;
30extern void kmemleak_free_part(const void *ptr, size_t size) __ref; 31extern void kmemleak_free_part(const void *ptr, size_t size) __ref;
32extern void kmemleak_free_percpu(const void __percpu *ptr) __ref;
31extern void kmemleak_padding(const void *ptr, unsigned long offset, 33extern void kmemleak_padding(const void *ptr, unsigned long offset,
32 size_t size) __ref; 34 size_t size) __ref;
33extern void kmemleak_not_leak(const void *ptr) __ref; 35extern void kmemleak_not_leak(const void *ptr) __ref;
@@ -68,6 +70,9 @@ static inline void kmemleak_alloc_recursive(const void *ptr, size_t size,
68 gfp_t gfp) 70 gfp_t gfp)
69{ 71{
70} 72}
73static inline void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size)
74{
75}
71static inline void kmemleak_free(const void *ptr) 76static inline void kmemleak_free(const void *ptr)
72{ 77{
73} 78}
@@ -77,6 +82,9 @@ static inline void kmemleak_free_part(const void *ptr, size_t size)
77static inline void kmemleak_free_recursive(const void *ptr, unsigned long flags) 82static inline void kmemleak_free_recursive(const void *ptr, unsigned long flags)
78{ 83{
79} 84}
85static inline void kmemleak_free_percpu(const void __percpu *ptr)
86{
87}
80static inline void kmemleak_not_leak(const void *ptr) 88static inline void kmemleak_not_leak(const void *ptr)
81{ 89{
82} 90}
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index b4f4e6021c1b..15c50302ff93 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -230,8 +230,10 @@ static int kmemleak_skip_disable;
230/* kmemleak operation type for early logging */ 230/* kmemleak operation type for early logging */
231enum { 231enum {
232 KMEMLEAK_ALLOC, 232 KMEMLEAK_ALLOC,
233 KMEMLEAK_ALLOC_PERCPU,
233 KMEMLEAK_FREE, 234 KMEMLEAK_FREE,
234 KMEMLEAK_FREE_PART, 235 KMEMLEAK_FREE_PART,
236 KMEMLEAK_FREE_PERCPU,
235 KMEMLEAK_NOT_LEAK, 237 KMEMLEAK_NOT_LEAK,
236 KMEMLEAK_IGNORE, 238 KMEMLEAK_IGNORE,
237 KMEMLEAK_SCAN_AREA, 239 KMEMLEAK_SCAN_AREA,
@@ -852,6 +854,20 @@ out:
852 rcu_read_unlock(); 854 rcu_read_unlock();
853} 855}
854 856
857/*
858 * Log an early allocated block and populate the stack trace.
859 */
860static void early_alloc_percpu(struct early_log *log)
861{
862 unsigned int cpu;
863 const void __percpu *ptr = log->ptr;
864
865 for_each_possible_cpu(cpu) {
866 log->ptr = per_cpu_ptr(ptr, cpu);
867 early_alloc(log);
868 }
869}
870
855/** 871/**
856 * kmemleak_alloc - register a newly allocated object 872 * kmemleak_alloc - register a newly allocated object
857 * @ptr: pointer to beginning of the object 873 * @ptr: pointer to beginning of the object
@@ -879,6 +895,34 @@ void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
879EXPORT_SYMBOL_GPL(kmemleak_alloc); 895EXPORT_SYMBOL_GPL(kmemleak_alloc);
880 896
881/** 897/**
898 * kmemleak_alloc_percpu - register a newly allocated __percpu object
899 * @ptr: __percpu pointer to beginning of the object
900 * @size: size of the object
901 *
902 * This function is called from the kernel percpu allocator when a new object
903 * (memory block) is allocated (alloc_percpu). It assumes GFP_KERNEL
904 * allocation.
905 */
906void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size)
907{
908 unsigned int cpu;
909
910 pr_debug("%s(0x%p, %zu)\n", __func__, ptr, size);
911
912 /*
913 * Percpu allocations are only scanned and not reported as leaks
914 * (min_count is set to 0).
915 */
916 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
917 for_each_possible_cpu(cpu)
918 create_object((unsigned long)per_cpu_ptr(ptr, cpu),
919 size, 0, GFP_KERNEL);
920 else if (atomic_read(&kmemleak_early_log))
921 log_early(KMEMLEAK_ALLOC_PERCPU, ptr, size, 0);
922}
923EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu);
924
925/**
882 * kmemleak_free - unregister a previously registered object 926 * kmemleak_free - unregister a previously registered object
883 * @ptr: pointer to beginning of the object 927 * @ptr: pointer to beginning of the object
884 * 928 *
@@ -917,6 +961,28 @@ void __ref kmemleak_free_part(const void *ptr, size_t size)
917EXPORT_SYMBOL_GPL(kmemleak_free_part); 961EXPORT_SYMBOL_GPL(kmemleak_free_part);
918 962
919/** 963/**
964 * kmemleak_free_percpu - unregister a previously registered __percpu object
965 * @ptr: __percpu pointer to beginning of the object
966 *
967 * This function is called from the kernel percpu allocator when an object
968 * (memory block) is freed (free_percpu).
969 */
970void __ref kmemleak_free_percpu(const void __percpu *ptr)
971{
972 unsigned int cpu;
973
974 pr_debug("%s(0x%p)\n", __func__, ptr);
975
976 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
977 for_each_possible_cpu(cpu)
978 delete_object_full((unsigned long)per_cpu_ptr(ptr,
979 cpu));
980 else if (atomic_read(&kmemleak_early_log))
981 log_early(KMEMLEAK_FREE_PERCPU, ptr, 0, 0);
982}
983EXPORT_SYMBOL_GPL(kmemleak_free_percpu);
984
985/**
920 * kmemleak_not_leak - mark an allocated object as false positive 986 * kmemleak_not_leak - mark an allocated object as false positive
921 * @ptr: pointer to beginning of the object 987 * @ptr: pointer to beginning of the object
922 * 988 *
@@ -1727,12 +1793,18 @@ void __init kmemleak_init(void)
1727 case KMEMLEAK_ALLOC: 1793 case KMEMLEAK_ALLOC:
1728 early_alloc(log); 1794 early_alloc(log);
1729 break; 1795 break;
1796 case KMEMLEAK_ALLOC_PERCPU:
1797 early_alloc_percpu(log);
1798 break;
1730 case KMEMLEAK_FREE: 1799 case KMEMLEAK_FREE:
1731 kmemleak_free(log->ptr); 1800 kmemleak_free(log->ptr);
1732 break; 1801 break;
1733 case KMEMLEAK_FREE_PART: 1802 case KMEMLEAK_FREE_PART:
1734 kmemleak_free_part(log->ptr, log->size); 1803 kmemleak_free_part(log->ptr, log->size);
1735 break; 1804 break;
1805 case KMEMLEAK_FREE_PERCPU:
1806 kmemleak_free_percpu(log->ptr);
1807 break;
1736 case KMEMLEAK_NOT_LEAK: 1808 case KMEMLEAK_NOT_LEAK:
1737 kmemleak_not_leak(log->ptr); 1809 kmemleak_not_leak(log->ptr);
1738 break; 1810 break;
diff --git a/mm/percpu.c b/mm/percpu.c
index 3bb810a72006..86c5bdbdc370 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -67,6 +67,7 @@
67#include <linux/spinlock.h> 67#include <linux/spinlock.h>
68#include <linux/vmalloc.h> 68#include <linux/vmalloc.h>
69#include <linux/workqueue.h> 69#include <linux/workqueue.h>
70#include <linux/kmemleak.h>
70 71
71#include <asm/cacheflush.h> 72#include <asm/cacheflush.h>
72#include <asm/sections.h> 73#include <asm/sections.h>
@@ -710,6 +711,7 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved)
710 const char *err; 711 const char *err;
711 int slot, off, new_alloc; 712 int slot, off, new_alloc;
712 unsigned long flags; 713 unsigned long flags;
714 void __percpu *ptr;
713 715
714 if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) { 716 if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) {
715 WARN(true, "illegal size (%zu) or align (%zu) for " 717 WARN(true, "illegal size (%zu) or align (%zu) for "
@@ -802,7 +804,9 @@ area_found:
802 mutex_unlock(&pcpu_alloc_mutex); 804 mutex_unlock(&pcpu_alloc_mutex);
803 805
804 /* return address relative to base address */ 806 /* return address relative to base address */
805 return __addr_to_pcpu_ptr(chunk->base_addr + off); 807 ptr = __addr_to_pcpu_ptr(chunk->base_addr + off);
808 kmemleak_alloc_percpu(ptr, size);
809 return ptr;
806 810
807fail_unlock: 811fail_unlock:
808 spin_unlock_irqrestore(&pcpu_lock, flags); 812 spin_unlock_irqrestore(&pcpu_lock, flags);
@@ -916,6 +920,8 @@ void free_percpu(void __percpu *ptr)
916 if (!ptr) 920 if (!ptr)
917 return; 921 return;
918 922
923 kmemleak_free_percpu(ptr);
924
919 addr = __pcpu_ptr_to_addr(ptr); 925 addr = __pcpu_ptr_to_addr(ptr);
920 926
921 spin_lock_irqsave(&pcpu_lock, flags); 927 spin_lock_irqsave(&pcpu_lock, flags);
@@ -1637,6 +1643,8 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
1637 rc = -ENOMEM; 1643 rc = -ENOMEM;
1638 goto out_free_areas; 1644 goto out_free_areas;
1639 } 1645 }
1646 /* kmemleak tracks the percpu allocations separately */
1647 kmemleak_free(ptr);
1640 areas[group] = ptr; 1648 areas[group] = ptr;
1641 1649
1642 base = min(ptr, base); 1650 base = min(ptr, base);
@@ -1751,6 +1759,8 @@ int __init pcpu_page_first_chunk(size_t reserved_size,
1751 "for cpu%u\n", psize_str, cpu); 1759 "for cpu%u\n", psize_str, cpu);
1752 goto enomem; 1760 goto enomem;
1753 } 1761 }
1762 /* kmemleak tracks the percpu allocations separately */
1763 kmemleak_free(ptr);
1754 pages[j++] = virt_to_page(ptr); 1764 pages[j++] = virt_to_page(ptr);
1755 } 1765 }
1756 1766