diff options
author | Catalin Marinas <catalin.marinas@arm.com> | 2015-06-24 19:58:26 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2015-07-21 13:10:03 -0400 |
commit | 3baf726f001b69454f3eb18a589c508992622be9 (patch) | |
tree | 6ceba78f44f3d17d0f757a1c40cf973af5169dc0 /mm/kmemleak.c | |
parent | e3334dca73de24e5798759b14ed9e4f58e241fbd (diff) |
mm: kmemleak: allow safe memory scanning during kmemleak disabling
commit c5f3b1a51a591c18c8b33983908e7fdda6ae417e upstream.
The kmemleak scanning thread can run for minutes. Callbacks like
kmemleak_free() are allowed during this time, the race being taken care
of by the object->lock spinlock. Such lock also prevents a memory block
from being freed or unmapped while it is being scanned by blocking the
kmemleak_free() -> ... -> __delete_object() function until the lock is
released in scan_object().
When a kmemleak error occurs (e.g. it fails to allocate its metadata),
kmemleak_enabled is set and __delete_object() is no longer called on
freed objects. If kmemleak_scan is running at the same time,
kmemleak_free() no longer waits for the object scanning to complete,
allowing the corresponding memory block to be freed or unmapped (in the
case of vfree()). This leads to kmemleak_scan potentially triggering a
page fault.
This patch separates the kmemleak_free() enabling/disabling from the
overall kmemleak_enabled nob so that we can defer the disabling of the
object freeing tracking until the scanning thread completed. The
kmemleak_free_part() is deliberately ignored by this patch since this is
only called during boot before the scanning thread started.
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Reported-by: Vignesh Radhakrishnan <vigneshr@codeaurora.org>
Tested-by: Vignesh Radhakrishnan <vigneshr@codeaurora.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'mm/kmemleak.c')
-rw-r--r-- | mm/kmemleak.c | 19 |
1 files changed, 16 insertions, 3 deletions
diff --git a/mm/kmemleak.c b/mm/kmemleak.c index f0fe4f2c1fa7..41df5b8efd25 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c | |||
@@ -195,6 +195,8 @@ static struct kmem_cache *scan_area_cache; | |||
195 | 195 | ||
196 | /* set if tracing memory operations is enabled */ | 196 | /* set if tracing memory operations is enabled */ |
197 | static int kmemleak_enabled; | 197 | static int kmemleak_enabled; |
198 | /* same as above but only for the kmemleak_free() callback */ | ||
199 | static int kmemleak_free_enabled; | ||
198 | /* set in the late_initcall if there were no errors */ | 200 | /* set in the late_initcall if there were no errors */ |
199 | static int kmemleak_initialized; | 201 | static int kmemleak_initialized; |
200 | /* enables or disables early logging of the memory operations */ | 202 | /* enables or disables early logging of the memory operations */ |
@@ -942,7 +944,7 @@ void __ref kmemleak_free(const void *ptr) | |||
942 | { | 944 | { |
943 | pr_debug("%s(0x%p)\n", __func__, ptr); | 945 | pr_debug("%s(0x%p)\n", __func__, ptr); |
944 | 946 | ||
945 | if (kmemleak_enabled && ptr && !IS_ERR(ptr)) | 947 | if (kmemleak_free_enabled && ptr && !IS_ERR(ptr)) |
946 | delete_object_full((unsigned long)ptr); | 948 | delete_object_full((unsigned long)ptr); |
947 | else if (kmemleak_early_log) | 949 | else if (kmemleak_early_log) |
948 | log_early(KMEMLEAK_FREE, ptr, 0, 0); | 950 | log_early(KMEMLEAK_FREE, ptr, 0, 0); |
@@ -982,7 +984,7 @@ void __ref kmemleak_free_percpu(const void __percpu *ptr) | |||
982 | 984 | ||
983 | pr_debug("%s(0x%p)\n", __func__, ptr); | 985 | pr_debug("%s(0x%p)\n", __func__, ptr); |
984 | 986 | ||
985 | if (kmemleak_enabled && ptr && !IS_ERR(ptr)) | 987 | if (kmemleak_free_enabled && ptr && !IS_ERR(ptr)) |
986 | for_each_possible_cpu(cpu) | 988 | for_each_possible_cpu(cpu) |
987 | delete_object_full((unsigned long)per_cpu_ptr(ptr, | 989 | delete_object_full((unsigned long)per_cpu_ptr(ptr, |
988 | cpu)); | 990 | cpu)); |
@@ -1750,6 +1752,13 @@ static void kmemleak_do_cleanup(struct work_struct *work) | |||
1750 | mutex_lock(&scan_mutex); | 1752 | mutex_lock(&scan_mutex); |
1751 | stop_scan_thread(); | 1753 | stop_scan_thread(); |
1752 | 1754 | ||
1755 | /* | ||
1756 | * Once the scan thread has stopped, it is safe to no longer track | ||
1757 | * object freeing. Ordering of the scan thread stopping and the memory | ||
1758 | * accesses below is guaranteed by the kthread_stop() function. | ||
1759 | */ | ||
1760 | kmemleak_free_enabled = 0; | ||
1761 | |||
1753 | if (!kmemleak_found_leaks) | 1762 | if (!kmemleak_found_leaks) |
1754 | __kmemleak_do_cleanup(); | 1763 | __kmemleak_do_cleanup(); |
1755 | else | 1764 | else |
@@ -1776,6 +1785,8 @@ static void kmemleak_disable(void) | |||
1776 | /* check whether it is too early for a kernel thread */ | 1785 | /* check whether it is too early for a kernel thread */ |
1777 | if (kmemleak_initialized) | 1786 | if (kmemleak_initialized) |
1778 | schedule_work(&cleanup_work); | 1787 | schedule_work(&cleanup_work); |
1788 | else | ||
1789 | kmemleak_free_enabled = 0; | ||
1779 | 1790 | ||
1780 | pr_info("Kernel memory leak detector disabled\n"); | 1791 | pr_info("Kernel memory leak detector disabled\n"); |
1781 | } | 1792 | } |
@@ -1840,8 +1851,10 @@ void __init kmemleak_init(void) | |||
1840 | if (kmemleak_error) { | 1851 | if (kmemleak_error) { |
1841 | local_irq_restore(flags); | 1852 | local_irq_restore(flags); |
1842 | return; | 1853 | return; |
1843 | } else | 1854 | } else { |
1844 | kmemleak_enabled = 1; | 1855 | kmemleak_enabled = 1; |
1856 | kmemleak_free_enabled = 1; | ||
1857 | } | ||
1845 | local_irq_restore(flags); | 1858 | local_irq_restore(flags); |
1846 | 1859 | ||
1847 | /* | 1860 | /* |