diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-01-14 21:11:11 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-01-14 21:11:11 -0500 |
commit | 892d208bcf79e4e1058707786a7b6d486697cd78 (patch) | |
tree | 15446e4d19f7f98078344ab0bc37ae6af4898c6d | |
parent | dca88ad6915b65f6e037f8c3e632fcd92a70bd88 (diff) | |
parent | 029aeff5db879afd7760f11214b6fea45f76b58e (diff) |
Merge tag 'kmemleak' of git://git.kernel.org/pub/scm/linux/kernel/git/cmarinas/linux
Kmemleak patches
Main features:
- Handle percpu memory allocations (only scanning them, not actually
reporting).
- Memory hotplug support.
Usability improvements:
- Show the origin of early allocations.
- Report previously found leaks even if kmemleak has been disabled by
some error.
* tag 'kmemleak' of git://git.kernel.org/pub/scm/linux/kernel/git/cmarinas/linux:
kmemleak: Add support for memory hotplug
kmemleak: Handle percpu memory allocation
kmemleak: Report previously found leaks even after an error
kmemleak: When the early log buffer is exceeded, report the actual number
kmemleak: Show where early_log issues come from
-rw-r--r-- | Documentation/kmemleak.txt | 3 | ||||
-rw-r--r-- | include/linux/kmemleak.h | 8 | ||||
-rw-r--r-- | lib/Kconfig.debug | 2 | ||||
-rw-r--r-- | mm/kmemleak.c | 158 | ||||
-rw-r--r-- | mm/percpu.c | 12 |
5 files changed, 155 insertions, 28 deletions
diff --git a/Documentation/kmemleak.txt b/Documentation/kmemleak.txt index 51063e681ca4..b6e39739a36d 100644 --- a/Documentation/kmemleak.txt +++ b/Documentation/kmemleak.txt | |||
@@ -127,7 +127,10 @@ See the include/linux/kmemleak.h header for the functions prototype. | |||
127 | 127 | ||
128 | kmemleak_init - initialize kmemleak | 128 | kmemleak_init - initialize kmemleak |
129 | kmemleak_alloc - notify of a memory block allocation | 129 | kmemleak_alloc - notify of a memory block allocation |
130 | kmemleak_alloc_percpu - notify of a percpu memory block allocation | ||
130 | kmemleak_free - notify of a memory block freeing | 131 | kmemleak_free - notify of a memory block freeing |
132 | kmemleak_free_part - notify of a partial memory block freeing | ||
133 | kmemleak_free_percpu - notify of a percpu memory block freeing | ||
131 | kmemleak_not_leak - mark an object as not a leak | 134 | kmemleak_not_leak - mark an object as not a leak |
132 | kmemleak_ignore - do not scan or report an object as leak | 135 | kmemleak_ignore - do not scan or report an object as leak |
133 | kmemleak_scan_area - add scan areas inside a memory block | 136 | kmemleak_scan_area - add scan areas inside a memory block |
diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h index 99d9a6766f7e..2a5e5548a1d2 100644 --- a/include/linux/kmemleak.h +++ b/include/linux/kmemleak.h | |||
@@ -26,8 +26,10 @@ | |||
26 | extern void kmemleak_init(void) __ref; | 26 | extern void kmemleak_init(void) __ref; |
27 | extern void kmemleak_alloc(const void *ptr, size_t size, int min_count, | 27 | extern void kmemleak_alloc(const void *ptr, size_t size, int min_count, |
28 | gfp_t gfp) __ref; | 28 | gfp_t gfp) __ref; |
29 | extern void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size) __ref; | ||
29 | extern void kmemleak_free(const void *ptr) __ref; | 30 | extern void kmemleak_free(const void *ptr) __ref; |
30 | extern void kmemleak_free_part(const void *ptr, size_t size) __ref; | 31 | extern void kmemleak_free_part(const void *ptr, size_t size) __ref; |
32 | extern void kmemleak_free_percpu(const void __percpu *ptr) __ref; | ||
31 | extern void kmemleak_padding(const void *ptr, unsigned long offset, | 33 | extern void kmemleak_padding(const void *ptr, unsigned long offset, |
32 | size_t size) __ref; | 34 | size_t size) __ref; |
33 | extern void kmemleak_not_leak(const void *ptr) __ref; | 35 | extern void kmemleak_not_leak(const void *ptr) __ref; |
@@ -68,6 +70,9 @@ static inline void kmemleak_alloc_recursive(const void *ptr, size_t size, | |||
68 | gfp_t gfp) | 70 | gfp_t gfp) |
69 | { | 71 | { |
70 | } | 72 | } |
73 | static inline void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size) | ||
74 | { | ||
75 | } | ||
71 | static inline void kmemleak_free(const void *ptr) | 76 | static inline void kmemleak_free(const void *ptr) |
72 | { | 77 | { |
73 | } | 78 | } |
@@ -77,6 +82,9 @@ static inline void kmemleak_free_part(const void *ptr, size_t size) | |||
77 | static inline void kmemleak_free_recursive(const void *ptr, unsigned long flags) | 82 | static inline void kmemleak_free_recursive(const void *ptr, unsigned long flags) |
78 | { | 83 | { |
79 | } | 84 | } |
85 | static inline void kmemleak_free_percpu(const void __percpu *ptr) | ||
86 | { | ||
87 | } | ||
80 | static inline void kmemleak_not_leak(const void *ptr) | 88 | static inline void kmemleak_not_leak(const void *ptr) |
81 | { | 89 | { |
82 | } | 90 | } |
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 82928f5ea049..8745ac7d1f75 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
@@ -414,7 +414,7 @@ config SLUB_STATS | |||
414 | 414 | ||
415 | config DEBUG_KMEMLEAK | 415 | config DEBUG_KMEMLEAK |
416 | bool "Kernel memory leak detector" | 416 | bool "Kernel memory leak detector" |
417 | depends on DEBUG_KERNEL && EXPERIMENTAL && !MEMORY_HOTPLUG && \ | 417 | depends on DEBUG_KERNEL && EXPERIMENTAL && \ |
418 | (X86 || ARM || PPC || MIPS || S390 || SPARC64 || SUPERH || MICROBLAZE || TILE) | 418 | (X86 || ARM || PPC || MIPS || S390 || SPARC64 || SUPERH || MICROBLAZE || TILE) |
419 | 419 | ||
420 | select DEBUG_FS | 420 | select DEBUG_FS |
diff --git a/mm/kmemleak.c b/mm/kmemleak.c index f3b2a00fe9c1..c833addd94d7 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c | |||
@@ -100,6 +100,7 @@ | |||
100 | 100 | ||
101 | #include <linux/kmemcheck.h> | 101 | #include <linux/kmemcheck.h> |
102 | #include <linux/kmemleak.h> | 102 | #include <linux/kmemleak.h> |
103 | #include <linux/memory_hotplug.h> | ||
103 | 104 | ||
104 | /* | 105 | /* |
105 | * Kmemleak configuration and common defines. | 106 | * Kmemleak configuration and common defines. |
@@ -196,7 +197,9 @@ static atomic_t kmemleak_enabled = ATOMIC_INIT(0); | |||
196 | static atomic_t kmemleak_initialized = ATOMIC_INIT(0); | 197 | static atomic_t kmemleak_initialized = ATOMIC_INIT(0); |
197 | /* enables or disables early logging of the memory operations */ | 198 | /* enables or disables early logging of the memory operations */ |
198 | static atomic_t kmemleak_early_log = ATOMIC_INIT(1); | 199 | static atomic_t kmemleak_early_log = ATOMIC_INIT(1); |
199 | /* set if a fata kmemleak error has occurred */ | 200 | /* set if a kmemleak warning was issued */ |
201 | static atomic_t kmemleak_warning = ATOMIC_INIT(0); | ||
202 | /* set if a fatal kmemleak error has occurred */ | ||
200 | static atomic_t kmemleak_error = ATOMIC_INIT(0); | 203 | static atomic_t kmemleak_error = ATOMIC_INIT(0); |
201 | 204 | ||
202 | /* minimum and maximum address that may be valid pointers */ | 205 | /* minimum and maximum address that may be valid pointers */ |
@@ -228,8 +231,10 @@ static int kmemleak_skip_disable; | |||
228 | /* kmemleak operation type for early logging */ | 231 | /* kmemleak operation type for early logging */ |
229 | enum { | 232 | enum { |
230 | KMEMLEAK_ALLOC, | 233 | KMEMLEAK_ALLOC, |
234 | KMEMLEAK_ALLOC_PERCPU, | ||
231 | KMEMLEAK_FREE, | 235 | KMEMLEAK_FREE, |
232 | KMEMLEAK_FREE_PART, | 236 | KMEMLEAK_FREE_PART, |
237 | KMEMLEAK_FREE_PERCPU, | ||
233 | KMEMLEAK_NOT_LEAK, | 238 | KMEMLEAK_NOT_LEAK, |
234 | KMEMLEAK_IGNORE, | 239 | KMEMLEAK_IGNORE, |
235 | KMEMLEAK_SCAN_AREA, | 240 | KMEMLEAK_SCAN_AREA, |
@@ -259,9 +264,10 @@ static void kmemleak_disable(void); | |||
259 | /* | 264 | /* |
260 | * Print a warning and dump the stack trace. | 265 | * Print a warning and dump the stack trace. |
261 | */ | 266 | */ |
262 | #define kmemleak_warn(x...) do { \ | 267 | #define kmemleak_warn(x...) do { \ |
263 | pr_warning(x); \ | 268 | pr_warning(x); \ |
264 | dump_stack(); \ | 269 | dump_stack(); \ |
270 | atomic_set(&kmemleak_warning, 1); \ | ||
265 | } while (0) | 271 | } while (0) |
266 | 272 | ||
267 | /* | 273 | /* |
@@ -403,8 +409,8 @@ static struct kmemleak_object *lookup_object(unsigned long ptr, int alias) | |||
403 | object = prio_tree_entry(node, struct kmemleak_object, | 409 | object = prio_tree_entry(node, struct kmemleak_object, |
404 | tree_node); | 410 | tree_node); |
405 | if (!alias && object->pointer != ptr) { | 411 | if (!alias && object->pointer != ptr) { |
406 | pr_warning("Found object by alias at 0x%08lx\n", ptr); | 412 | kmemleak_warn("Found object by alias at 0x%08lx\n", |
407 | dump_stack(); | 413 | ptr); |
408 | dump_object_info(object); | 414 | dump_object_info(object); |
409 | object = NULL; | 415 | object = NULL; |
410 | } | 416 | } |
@@ -794,9 +800,13 @@ static void __init log_early(int op_type, const void *ptr, size_t size, | |||
794 | unsigned long flags; | 800 | unsigned long flags; |
795 | struct early_log *log; | 801 | struct early_log *log; |
796 | 802 | ||
803 | if (atomic_read(&kmemleak_error)) { | ||
804 | /* kmemleak stopped recording, just count the requests */ | ||
805 | crt_early_log++; | ||
806 | return; | ||
807 | } | ||
808 | |||
797 | if (crt_early_log >= ARRAY_SIZE(early_log)) { | 809 | if (crt_early_log >= ARRAY_SIZE(early_log)) { |
798 | pr_warning("Early log buffer exceeded, " | ||
799 | "please increase DEBUG_KMEMLEAK_EARLY_LOG_SIZE\n"); | ||
800 | kmemleak_disable(); | 810 | kmemleak_disable(); |
801 | return; | 811 | return; |
802 | } | 812 | } |
@@ -811,8 +821,7 @@ static void __init log_early(int op_type, const void *ptr, size_t size, | |||
811 | log->ptr = ptr; | 821 | log->ptr = ptr; |
812 | log->size = size; | 822 | log->size = size; |
813 | log->min_count = min_count; | 823 | log->min_count = min_count; |
814 | if (op_type == KMEMLEAK_ALLOC) | 824 | log->trace_len = __save_stack_trace(log->trace); |
815 | log->trace_len = __save_stack_trace(log->trace); | ||
816 | crt_early_log++; | 825 | crt_early_log++; |
817 | local_irq_restore(flags); | 826 | local_irq_restore(flags); |
818 | } | 827 | } |
@@ -846,6 +855,20 @@ out: | |||
846 | rcu_read_unlock(); | 855 | rcu_read_unlock(); |
847 | } | 856 | } |
848 | 857 | ||
858 | /* | ||
859 | * Log an early allocated block and populate the stack trace. | ||
860 | */ | ||
861 | static void early_alloc_percpu(struct early_log *log) | ||
862 | { | ||
863 | unsigned int cpu; | ||
864 | const void __percpu *ptr = log->ptr; | ||
865 | |||
866 | for_each_possible_cpu(cpu) { | ||
867 | log->ptr = per_cpu_ptr(ptr, cpu); | ||
868 | early_alloc(log); | ||
869 | } | ||
870 | } | ||
871 | |||
849 | /** | 872 | /** |
850 | * kmemleak_alloc - register a newly allocated object | 873 | * kmemleak_alloc - register a newly allocated object |
851 | * @ptr: pointer to beginning of the object | 874 | * @ptr: pointer to beginning of the object |
@@ -873,6 +896,34 @@ void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count, | |||
873 | EXPORT_SYMBOL_GPL(kmemleak_alloc); | 896 | EXPORT_SYMBOL_GPL(kmemleak_alloc); |
874 | 897 | ||
875 | /** | 898 | /** |
899 | * kmemleak_alloc_percpu - register a newly allocated __percpu object | ||
900 | * @ptr: __percpu pointer to beginning of the object | ||
901 | * @size: size of the object | ||
902 | * | ||
903 | * This function is called from the kernel percpu allocator when a new object | ||
904 | * (memory block) is allocated (alloc_percpu). It assumes GFP_KERNEL | ||
905 | * allocation. | ||
906 | */ | ||
907 | void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size) | ||
908 | { | ||
909 | unsigned int cpu; | ||
910 | |||
911 | pr_debug("%s(0x%p, %zu)\n", __func__, ptr, size); | ||
912 | |||
913 | /* | ||
914 | * Percpu allocations are only scanned and not reported as leaks | ||
915 | * (min_count is set to 0). | ||
916 | */ | ||
917 | if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) | ||
918 | for_each_possible_cpu(cpu) | ||
919 | create_object((unsigned long)per_cpu_ptr(ptr, cpu), | ||
920 | size, 0, GFP_KERNEL); | ||
921 | else if (atomic_read(&kmemleak_early_log)) | ||
922 | log_early(KMEMLEAK_ALLOC_PERCPU, ptr, size, 0); | ||
923 | } | ||
924 | EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu); | ||
925 | |||
926 | /** | ||
876 | * kmemleak_free - unregister a previously registered object | 927 | * kmemleak_free - unregister a previously registered object |
877 | * @ptr: pointer to beginning of the object | 928 | * @ptr: pointer to beginning of the object |
878 | * | 929 | * |
@@ -911,6 +962,28 @@ void __ref kmemleak_free_part(const void *ptr, size_t size) | |||
911 | EXPORT_SYMBOL_GPL(kmemleak_free_part); | 962 | EXPORT_SYMBOL_GPL(kmemleak_free_part); |
912 | 963 | ||
913 | /** | 964 | /** |
965 | * kmemleak_free_percpu - unregister a previously registered __percpu object | ||
966 | * @ptr: __percpu pointer to beginning of the object | ||
967 | * | ||
968 | * This function is called from the kernel percpu allocator when an object | ||
969 | * (memory block) is freed (free_percpu). | ||
970 | */ | ||
971 | void __ref kmemleak_free_percpu(const void __percpu *ptr) | ||
972 | { | ||
973 | unsigned int cpu; | ||
974 | |||
975 | pr_debug("%s(0x%p)\n", __func__, ptr); | ||
976 | |||
977 | if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) | ||
978 | for_each_possible_cpu(cpu) | ||
979 | delete_object_full((unsigned long)per_cpu_ptr(ptr, | ||
980 | cpu)); | ||
981 | else if (atomic_read(&kmemleak_early_log)) | ||
982 | log_early(KMEMLEAK_FREE_PERCPU, ptr, 0, 0); | ||
983 | } | ||
984 | EXPORT_SYMBOL_GPL(kmemleak_free_percpu); | ||
985 | |||
986 | /** | ||
914 | * kmemleak_not_leak - mark an allocated object as false positive | 987 | * kmemleak_not_leak - mark an allocated object as false positive |
915 | * @ptr: pointer to beginning of the object | 988 | * @ptr: pointer to beginning of the object |
916 | * | 989 | * |
@@ -1220,9 +1293,9 @@ static void kmemleak_scan(void) | |||
1220 | #endif | 1293 | #endif |
1221 | 1294 | ||
1222 | /* | 1295 | /* |
1223 | * Struct page scanning for each node. The code below is not yet safe | 1296 | * Struct page scanning for each node. |
1224 | * with MEMORY_HOTPLUG. | ||
1225 | */ | 1297 | */ |
1298 | lock_memory_hotplug(); | ||
1226 | for_each_online_node(i) { | 1299 | for_each_online_node(i) { |
1227 | pg_data_t *pgdat = NODE_DATA(i); | 1300 | pg_data_t *pgdat = NODE_DATA(i); |
1228 | unsigned long start_pfn = pgdat->node_start_pfn; | 1301 | unsigned long start_pfn = pgdat->node_start_pfn; |
@@ -1241,6 +1314,7 @@ static void kmemleak_scan(void) | |||
1241 | scan_block(page, page + 1, NULL, 1); | 1314 | scan_block(page, page + 1, NULL, 1); |
1242 | } | 1315 | } |
1243 | } | 1316 | } |
1317 | unlock_memory_hotplug(); | ||
1244 | 1318 | ||
1245 | /* | 1319 | /* |
1246 | * Scanning the task stacks (may introduce false negatives). | 1320 | * Scanning the task stacks (may introduce false negatives). |
@@ -1467,9 +1541,6 @@ static const struct seq_operations kmemleak_seq_ops = { | |||
1467 | 1541 | ||
1468 | static int kmemleak_open(struct inode *inode, struct file *file) | 1542 | static int kmemleak_open(struct inode *inode, struct file *file) |
1469 | { | 1543 | { |
1470 | if (!atomic_read(&kmemleak_enabled)) | ||
1471 | return -EBUSY; | ||
1472 | |||
1473 | return seq_open(file, &kmemleak_seq_ops); | 1544 | return seq_open(file, &kmemleak_seq_ops); |
1474 | } | 1545 | } |
1475 | 1546 | ||
@@ -1543,6 +1614,9 @@ static ssize_t kmemleak_write(struct file *file, const char __user *user_buf, | |||
1543 | int buf_size; | 1614 | int buf_size; |
1544 | int ret; | 1615 | int ret; |
1545 | 1616 | ||
1617 | if (!atomic_read(&kmemleak_enabled)) | ||
1618 | return -EBUSY; | ||
1619 | |||
1546 | buf_size = min(size, (sizeof(buf) - 1)); | 1620 | buf_size = min(size, (sizeof(buf) - 1)); |
1547 | if (strncpy_from_user(buf, user_buf, buf_size) < 0) | 1621 | if (strncpy_from_user(buf, user_buf, buf_size) < 0) |
1548 | return -EFAULT; | 1622 | return -EFAULT; |
@@ -1602,20 +1676,24 @@ static const struct file_operations kmemleak_fops = { | |||
1602 | }; | 1676 | }; |
1603 | 1677 | ||
1604 | /* | 1678 | /* |
1605 | * Perform the freeing of the kmemleak internal objects after waiting for any | 1679 | * Stop the memory scanning thread and free the kmemleak internal objects if |
1606 | * current memory scan to complete. | 1680 | * no previous scan thread (otherwise, kmemleak may still have some useful |
1681 | * information on memory leaks). | ||
1607 | */ | 1682 | */ |
1608 | static void kmemleak_do_cleanup(struct work_struct *work) | 1683 | static void kmemleak_do_cleanup(struct work_struct *work) |
1609 | { | 1684 | { |
1610 | struct kmemleak_object *object; | 1685 | struct kmemleak_object *object; |
1686 | bool cleanup = scan_thread == NULL; | ||
1611 | 1687 | ||
1612 | mutex_lock(&scan_mutex); | 1688 | mutex_lock(&scan_mutex); |
1613 | stop_scan_thread(); | 1689 | stop_scan_thread(); |
1614 | 1690 | ||
1615 | rcu_read_lock(); | 1691 | if (cleanup) { |
1616 | list_for_each_entry_rcu(object, &object_list, object_list) | 1692 | rcu_read_lock(); |
1617 | delete_object_full(object->pointer); | 1693 | list_for_each_entry_rcu(object, &object_list, object_list) |
1618 | rcu_read_unlock(); | 1694 | delete_object_full(object->pointer); |
1695 | rcu_read_unlock(); | ||
1696 | } | ||
1619 | mutex_unlock(&scan_mutex); | 1697 | mutex_unlock(&scan_mutex); |
1620 | } | 1698 | } |
1621 | 1699 | ||
@@ -1632,7 +1710,6 @@ static void kmemleak_disable(void) | |||
1632 | return; | 1710 | return; |
1633 | 1711 | ||
1634 | /* stop any memory operation tracing */ | 1712 | /* stop any memory operation tracing */ |
1635 | atomic_set(&kmemleak_early_log, 0); | ||
1636 | atomic_set(&kmemleak_enabled, 0); | 1713 | atomic_set(&kmemleak_enabled, 0); |
1637 | 1714 | ||
1638 | /* check whether it is too early for a kernel thread */ | 1715 | /* check whether it is too early for a kernel thread */ |
@@ -1659,6 +1736,17 @@ static int kmemleak_boot_config(char *str) | |||
1659 | } | 1736 | } |
1660 | early_param("kmemleak", kmemleak_boot_config); | 1737 | early_param("kmemleak", kmemleak_boot_config); |
1661 | 1738 | ||
1739 | static void __init print_log_trace(struct early_log *log) | ||
1740 | { | ||
1741 | struct stack_trace trace; | ||
1742 | |||
1743 | trace.nr_entries = log->trace_len; | ||
1744 | trace.entries = log->trace; | ||
1745 | |||
1746 | pr_notice("Early log backtrace:\n"); | ||
1747 | print_stack_trace(&trace, 2); | ||
1748 | } | ||
1749 | |||
1662 | /* | 1750 | /* |
1663 | * Kmemleak initialization. | 1751 | * Kmemleak initialization. |
1664 | */ | 1752 | */ |
@@ -1681,12 +1769,18 @@ void __init kmemleak_init(void) | |||
1681 | scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE); | 1769 | scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE); |
1682 | INIT_PRIO_TREE_ROOT(&object_tree_root); | 1770 | INIT_PRIO_TREE_ROOT(&object_tree_root); |
1683 | 1771 | ||
1772 | if (crt_early_log >= ARRAY_SIZE(early_log)) | ||
1773 | pr_warning("Early log buffer exceeded (%d), please increase " | ||
1774 | "DEBUG_KMEMLEAK_EARLY_LOG_SIZE\n", crt_early_log); | ||
1775 | |||
1684 | /* the kernel is still in UP mode, so disabling the IRQs is enough */ | 1776 | /* the kernel is still in UP mode, so disabling the IRQs is enough */ |
1685 | local_irq_save(flags); | 1777 | local_irq_save(flags); |
1686 | if (!atomic_read(&kmemleak_error)) { | 1778 | atomic_set(&kmemleak_early_log, 0); |
1779 | if (atomic_read(&kmemleak_error)) { | ||
1780 | local_irq_restore(flags); | ||
1781 | return; | ||
1782 | } else | ||
1687 | atomic_set(&kmemleak_enabled, 1); | 1783 | atomic_set(&kmemleak_enabled, 1); |
1688 | atomic_set(&kmemleak_early_log, 0); | ||
1689 | } | ||
1690 | local_irq_restore(flags); | 1784 | local_irq_restore(flags); |
1691 | 1785 | ||
1692 | /* | 1786 | /* |
@@ -1701,12 +1795,18 @@ void __init kmemleak_init(void) | |||
1701 | case KMEMLEAK_ALLOC: | 1795 | case KMEMLEAK_ALLOC: |
1702 | early_alloc(log); | 1796 | early_alloc(log); |
1703 | break; | 1797 | break; |
1798 | case KMEMLEAK_ALLOC_PERCPU: | ||
1799 | early_alloc_percpu(log); | ||
1800 | break; | ||
1704 | case KMEMLEAK_FREE: | 1801 | case KMEMLEAK_FREE: |
1705 | kmemleak_free(log->ptr); | 1802 | kmemleak_free(log->ptr); |
1706 | break; | 1803 | break; |
1707 | case KMEMLEAK_FREE_PART: | 1804 | case KMEMLEAK_FREE_PART: |
1708 | kmemleak_free_part(log->ptr, log->size); | 1805 | kmemleak_free_part(log->ptr, log->size); |
1709 | break; | 1806 | break; |
1807 | case KMEMLEAK_FREE_PERCPU: | ||
1808 | kmemleak_free_percpu(log->ptr); | ||
1809 | break; | ||
1710 | case KMEMLEAK_NOT_LEAK: | 1810 | case KMEMLEAK_NOT_LEAK: |
1711 | kmemleak_not_leak(log->ptr); | 1811 | kmemleak_not_leak(log->ptr); |
1712 | break; | 1812 | break; |
@@ -1720,7 +1820,13 @@ void __init kmemleak_init(void) | |||
1720 | kmemleak_no_scan(log->ptr); | 1820 | kmemleak_no_scan(log->ptr); |
1721 | break; | 1821 | break; |
1722 | default: | 1822 | default: |
1723 | WARN_ON(1); | 1823 | kmemleak_warn("Unknown early log operation: %d\n", |
1824 | log->op_type); | ||
1825 | } | ||
1826 | |||
1827 | if (atomic_read(&kmemleak_warning)) { | ||
1828 | print_log_trace(log); | ||
1829 | atomic_set(&kmemleak_warning, 0); | ||
1724 | } | 1830 | } |
1725 | } | 1831 | } |
1726 | } | 1832 | } |
diff --git a/mm/percpu.c b/mm/percpu.c index 716eb4acf2fc..f47af9123af7 100644 --- a/mm/percpu.c +++ b/mm/percpu.c | |||
@@ -67,6 +67,7 @@ | |||
67 | #include <linux/spinlock.h> | 67 | #include <linux/spinlock.h> |
68 | #include <linux/vmalloc.h> | 68 | #include <linux/vmalloc.h> |
69 | #include <linux/workqueue.h> | 69 | #include <linux/workqueue.h> |
70 | #include <linux/kmemleak.h> | ||
70 | 71 | ||
71 | #include <asm/cacheflush.h> | 72 | #include <asm/cacheflush.h> |
72 | #include <asm/sections.h> | 73 | #include <asm/sections.h> |
@@ -710,6 +711,7 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved) | |||
710 | const char *err; | 711 | const char *err; |
711 | int slot, off, new_alloc; | 712 | int slot, off, new_alloc; |
712 | unsigned long flags; | 713 | unsigned long flags; |
714 | void __percpu *ptr; | ||
713 | 715 | ||
714 | if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) { | 716 | if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) { |
715 | WARN(true, "illegal size (%zu) or align (%zu) for " | 717 | WARN(true, "illegal size (%zu) or align (%zu) for " |
@@ -802,7 +804,9 @@ area_found: | |||
802 | mutex_unlock(&pcpu_alloc_mutex); | 804 | mutex_unlock(&pcpu_alloc_mutex); |
803 | 805 | ||
804 | /* return address relative to base address */ | 806 | /* return address relative to base address */ |
805 | return __addr_to_pcpu_ptr(chunk->base_addr + off); | 807 | ptr = __addr_to_pcpu_ptr(chunk->base_addr + off); |
808 | kmemleak_alloc_percpu(ptr, size); | ||
809 | return ptr; | ||
806 | 810 | ||
807 | fail_unlock: | 811 | fail_unlock: |
808 | spin_unlock_irqrestore(&pcpu_lock, flags); | 812 | spin_unlock_irqrestore(&pcpu_lock, flags); |
@@ -916,6 +920,8 @@ void free_percpu(void __percpu *ptr) | |||
916 | if (!ptr) | 920 | if (!ptr) |
917 | return; | 921 | return; |
918 | 922 | ||
923 | kmemleak_free_percpu(ptr); | ||
924 | |||
919 | addr = __pcpu_ptr_to_addr(ptr); | 925 | addr = __pcpu_ptr_to_addr(ptr); |
920 | 926 | ||
921 | spin_lock_irqsave(&pcpu_lock, flags); | 927 | spin_lock_irqsave(&pcpu_lock, flags); |
@@ -1639,6 +1645,8 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size, | |||
1639 | rc = -ENOMEM; | 1645 | rc = -ENOMEM; |
1640 | goto out_free_areas; | 1646 | goto out_free_areas; |
1641 | } | 1647 | } |
1648 | /* kmemleak tracks the percpu allocations separately */ | ||
1649 | kmemleak_free(ptr); | ||
1642 | areas[group] = ptr; | 1650 | areas[group] = ptr; |
1643 | 1651 | ||
1644 | base = min(ptr, base); | 1652 | base = min(ptr, base); |
@@ -1753,6 +1761,8 @@ int __init pcpu_page_first_chunk(size_t reserved_size, | |||
1753 | "for cpu%u\n", psize_str, cpu); | 1761 | "for cpu%u\n", psize_str, cpu); |
1754 | goto enomem; | 1762 | goto enomem; |
1755 | } | 1763 | } |
1764 | /* kmemleak tracks the percpu allocations separately */ | ||
1765 | kmemleak_free(ptr); | ||
1756 | pages[j++] = virt_to_page(ptr); | 1766 | pages[j++] = virt_to_page(ptr); |
1757 | } | 1767 | } |
1758 | 1768 | ||