diff options
author | Li Zefan <lizefan@huawei.com> | 2014-04-03 17:46:29 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-04-03 19:20:50 -0400 |
commit | 8910ae896c8c961ef9c7d309262730bd2859e747 (patch) | |
tree | a79a4b4052817edf2f7270f96e5b9ef994a1e8ea /mm/kmemleak.c | |
parent | 5f3bf19aeb8ed5cef0926bc10c80b6a50ac6bdeb (diff) |
kmemleak: change some global variables to int
They don't have to be atomic_t, because they are simple boolean toggles.
Signed-off-by: Li Zefan <lizefan@huawei.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/kmemleak.c')
-rw-r--r-- | mm/kmemleak.c | 80 |
1 files changed, 40 insertions, 40 deletions
diff --git a/mm/kmemleak.c b/mm/kmemleak.c index 6f90d003830a..91d67eaee050 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c | |||
@@ -192,15 +192,15 @@ static struct kmem_cache *object_cache; | |||
192 | static struct kmem_cache *scan_area_cache; | 192 | static struct kmem_cache *scan_area_cache; |
193 | 193 | ||
194 | /* set if tracing memory operations is enabled */ | 194 | /* set if tracing memory operations is enabled */ |
195 | static atomic_t kmemleak_enabled = ATOMIC_INIT(0); | 195 | static int kmemleak_enabled; |
196 | /* set in the late_initcall if there were no errors */ | 196 | /* set in the late_initcall if there were no errors */ |
197 | static atomic_t kmemleak_initialized = ATOMIC_INIT(0); | 197 | static int kmemleak_initialized; |
198 | /* enables or disables early logging of the memory operations */ | 198 | /* enables or disables early logging of the memory operations */ |
199 | static atomic_t kmemleak_early_log = ATOMIC_INIT(1); | 199 | static int kmemleak_early_log = 1; |
200 | /* set if a kmemleak warning was issued */ | 200 | /* set if a kmemleak warning was issued */ |
201 | static atomic_t kmemleak_warning = ATOMIC_INIT(0); | 201 | static int kmemleak_warning; |
202 | /* set if a fatal kmemleak error has occurred */ | 202 | /* set if a fatal kmemleak error has occurred */ |
203 | static atomic_t kmemleak_error = ATOMIC_INIT(0); | 203 | static int kmemleak_error; |
204 | 204 | ||
205 | /* minimum and maximum address that may be valid pointers */ | 205 | /* minimum and maximum address that may be valid pointers */ |
206 | static unsigned long min_addr = ULONG_MAX; | 206 | static unsigned long min_addr = ULONG_MAX; |
@@ -268,7 +268,7 @@ static void kmemleak_disable(void); | |||
268 | #define kmemleak_warn(x...) do { \ | 268 | #define kmemleak_warn(x...) do { \ |
269 | pr_warning(x); \ | 269 | pr_warning(x); \ |
270 | dump_stack(); \ | 270 | dump_stack(); \ |
271 | atomic_set(&kmemleak_warning, 1); \ | 271 | kmemleak_warning = 1; \ |
272 | } while (0) | 272 | } while (0) |
273 | 273 | ||
274 | /* | 274 | /* |
@@ -806,7 +806,7 @@ static void __init log_early(int op_type, const void *ptr, size_t size, | |||
806 | unsigned long flags; | 806 | unsigned long flags; |
807 | struct early_log *log; | 807 | struct early_log *log; |
808 | 808 | ||
809 | if (atomic_read(&kmemleak_error)) { | 809 | if (kmemleak_error) { |
810 | /* kmemleak stopped recording, just count the requests */ | 810 | /* kmemleak stopped recording, just count the requests */ |
811 | crt_early_log++; | 811 | crt_early_log++; |
812 | return; | 812 | return; |
@@ -841,7 +841,7 @@ static void early_alloc(struct early_log *log) | |||
841 | unsigned long flags; | 841 | unsigned long flags; |
842 | int i; | 842 | int i; |
843 | 843 | ||
844 | if (!atomic_read(&kmemleak_enabled) || !log->ptr || IS_ERR(log->ptr)) | 844 | if (!kmemleak_enabled || !log->ptr || IS_ERR(log->ptr)) |
845 | return; | 845 | return; |
846 | 846 | ||
847 | /* | 847 | /* |
@@ -894,9 +894,9 @@ void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count, | |||
894 | { | 894 | { |
895 | pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count); | 895 | pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count); |
896 | 896 | ||
897 | if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) | 897 | if (kmemleak_enabled && ptr && !IS_ERR(ptr)) |
898 | create_object((unsigned long)ptr, size, min_count, gfp); | 898 | create_object((unsigned long)ptr, size, min_count, gfp); |
899 | else if (atomic_read(&kmemleak_early_log)) | 899 | else if (kmemleak_early_log) |
900 | log_early(KMEMLEAK_ALLOC, ptr, size, min_count); | 900 | log_early(KMEMLEAK_ALLOC, ptr, size, min_count); |
901 | } | 901 | } |
902 | EXPORT_SYMBOL_GPL(kmemleak_alloc); | 902 | EXPORT_SYMBOL_GPL(kmemleak_alloc); |
@@ -920,11 +920,11 @@ void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size) | |||
920 | * Percpu allocations are only scanned and not reported as leaks | 920 | * Percpu allocations are only scanned and not reported as leaks |
921 | * (min_count is set to 0). | 921 | * (min_count is set to 0). |
922 | */ | 922 | */ |
923 | if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) | 923 | if (kmemleak_enabled && ptr && !IS_ERR(ptr)) |
924 | for_each_possible_cpu(cpu) | 924 | for_each_possible_cpu(cpu) |
925 | create_object((unsigned long)per_cpu_ptr(ptr, cpu), | 925 | create_object((unsigned long)per_cpu_ptr(ptr, cpu), |
926 | size, 0, GFP_KERNEL); | 926 | size, 0, GFP_KERNEL); |
927 | else if (atomic_read(&kmemleak_early_log)) | 927 | else if (kmemleak_early_log) |
928 | log_early(KMEMLEAK_ALLOC_PERCPU, ptr, size, 0); | 928 | log_early(KMEMLEAK_ALLOC_PERCPU, ptr, size, 0); |
929 | } | 929 | } |
930 | EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu); | 930 | EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu); |
@@ -940,9 +940,9 @@ void __ref kmemleak_free(const void *ptr) | |||
940 | { | 940 | { |
941 | pr_debug("%s(0x%p)\n", __func__, ptr); | 941 | pr_debug("%s(0x%p)\n", __func__, ptr); |
942 | 942 | ||
943 | if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) | 943 | if (kmemleak_enabled && ptr && !IS_ERR(ptr)) |
944 | delete_object_full((unsigned long)ptr); | 944 | delete_object_full((unsigned long)ptr); |
945 | else if (atomic_read(&kmemleak_early_log)) | 945 | else if (kmemleak_early_log) |
946 | log_early(KMEMLEAK_FREE, ptr, 0, 0); | 946 | log_early(KMEMLEAK_FREE, ptr, 0, 0); |
947 | } | 947 | } |
948 | EXPORT_SYMBOL_GPL(kmemleak_free); | 948 | EXPORT_SYMBOL_GPL(kmemleak_free); |
@@ -960,9 +960,9 @@ void __ref kmemleak_free_part(const void *ptr, size_t size) | |||
960 | { | 960 | { |
961 | pr_debug("%s(0x%p)\n", __func__, ptr); | 961 | pr_debug("%s(0x%p)\n", __func__, ptr); |
962 | 962 | ||
963 | if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) | 963 | if (kmemleak_enabled && ptr && !IS_ERR(ptr)) |
964 | delete_object_part((unsigned long)ptr, size); | 964 | delete_object_part((unsigned long)ptr, size); |
965 | else if (atomic_read(&kmemleak_early_log)) | 965 | else if (kmemleak_early_log) |
966 | log_early(KMEMLEAK_FREE_PART, ptr, size, 0); | 966 | log_early(KMEMLEAK_FREE_PART, ptr, size, 0); |
967 | } | 967 | } |
968 | EXPORT_SYMBOL_GPL(kmemleak_free_part); | 968 | EXPORT_SYMBOL_GPL(kmemleak_free_part); |
@@ -980,11 +980,11 @@ void __ref kmemleak_free_percpu(const void __percpu *ptr) | |||
980 | 980 | ||
981 | pr_debug("%s(0x%p)\n", __func__, ptr); | 981 | pr_debug("%s(0x%p)\n", __func__, ptr); |
982 | 982 | ||
983 | if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) | 983 | if (kmemleak_enabled && ptr && !IS_ERR(ptr)) |
984 | for_each_possible_cpu(cpu) | 984 | for_each_possible_cpu(cpu) |
985 | delete_object_full((unsigned long)per_cpu_ptr(ptr, | 985 | delete_object_full((unsigned long)per_cpu_ptr(ptr, |
986 | cpu)); | 986 | cpu)); |
987 | else if (atomic_read(&kmemleak_early_log)) | 987 | else if (kmemleak_early_log) |
988 | log_early(KMEMLEAK_FREE_PERCPU, ptr, 0, 0); | 988 | log_early(KMEMLEAK_FREE_PERCPU, ptr, 0, 0); |
989 | } | 989 | } |
990 | EXPORT_SYMBOL_GPL(kmemleak_free_percpu); | 990 | EXPORT_SYMBOL_GPL(kmemleak_free_percpu); |
@@ -1000,9 +1000,9 @@ void __ref kmemleak_not_leak(const void *ptr) | |||
1000 | { | 1000 | { |
1001 | pr_debug("%s(0x%p)\n", __func__, ptr); | 1001 | pr_debug("%s(0x%p)\n", __func__, ptr); |
1002 | 1002 | ||
1003 | if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) | 1003 | if (kmemleak_enabled && ptr && !IS_ERR(ptr)) |
1004 | make_gray_object((unsigned long)ptr); | 1004 | make_gray_object((unsigned long)ptr); |
1005 | else if (atomic_read(&kmemleak_early_log)) | 1005 | else if (kmemleak_early_log) |
1006 | log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0); | 1006 | log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0); |
1007 | } | 1007 | } |
1008 | EXPORT_SYMBOL(kmemleak_not_leak); | 1008 | EXPORT_SYMBOL(kmemleak_not_leak); |
@@ -1020,9 +1020,9 @@ void __ref kmemleak_ignore(const void *ptr) | |||
1020 | { | 1020 | { |
1021 | pr_debug("%s(0x%p)\n", __func__, ptr); | 1021 | pr_debug("%s(0x%p)\n", __func__, ptr); |
1022 | 1022 | ||
1023 | if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) | 1023 | if (kmemleak_enabled && ptr && !IS_ERR(ptr)) |
1024 | make_black_object((unsigned long)ptr); | 1024 | make_black_object((unsigned long)ptr); |
1025 | else if (atomic_read(&kmemleak_early_log)) | 1025 | else if (kmemleak_early_log) |
1026 | log_early(KMEMLEAK_IGNORE, ptr, 0, 0); | 1026 | log_early(KMEMLEAK_IGNORE, ptr, 0, 0); |
1027 | } | 1027 | } |
1028 | EXPORT_SYMBOL(kmemleak_ignore); | 1028 | EXPORT_SYMBOL(kmemleak_ignore); |
@@ -1042,9 +1042,9 @@ void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) | |||
1042 | { | 1042 | { |
1043 | pr_debug("%s(0x%p)\n", __func__, ptr); | 1043 | pr_debug("%s(0x%p)\n", __func__, ptr); |
1044 | 1044 | ||
1045 | if (atomic_read(&kmemleak_enabled) && ptr && size && !IS_ERR(ptr)) | 1045 | if (kmemleak_enabled && ptr && size && !IS_ERR(ptr)) |
1046 | add_scan_area((unsigned long)ptr, size, gfp); | 1046 | add_scan_area((unsigned long)ptr, size, gfp); |
1047 | else if (atomic_read(&kmemleak_early_log)) | 1047 | else if (kmemleak_early_log) |
1048 | log_early(KMEMLEAK_SCAN_AREA, ptr, size, 0); | 1048 | log_early(KMEMLEAK_SCAN_AREA, ptr, size, 0); |
1049 | } | 1049 | } |
1050 | EXPORT_SYMBOL(kmemleak_scan_area); | 1050 | EXPORT_SYMBOL(kmemleak_scan_area); |
@@ -1062,9 +1062,9 @@ void __ref kmemleak_no_scan(const void *ptr) | |||
1062 | { | 1062 | { |
1063 | pr_debug("%s(0x%p)\n", __func__, ptr); | 1063 | pr_debug("%s(0x%p)\n", __func__, ptr); |
1064 | 1064 | ||
1065 | if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) | 1065 | if (kmemleak_enabled && ptr && !IS_ERR(ptr)) |
1066 | object_no_scan((unsigned long)ptr); | 1066 | object_no_scan((unsigned long)ptr); |
1067 | else if (atomic_read(&kmemleak_early_log)) | 1067 | else if (kmemleak_early_log) |
1068 | log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0); | 1068 | log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0); |
1069 | } | 1069 | } |
1070 | EXPORT_SYMBOL(kmemleak_no_scan); | 1070 | EXPORT_SYMBOL(kmemleak_no_scan); |
@@ -1089,7 +1089,7 @@ static bool update_checksum(struct kmemleak_object *object) | |||
1089 | */ | 1089 | */ |
1090 | static int scan_should_stop(void) | 1090 | static int scan_should_stop(void) |
1091 | { | 1091 | { |
1092 | if (!atomic_read(&kmemleak_enabled)) | 1092 | if (!kmemleak_enabled) |
1093 | return 1; | 1093 | return 1; |
1094 | 1094 | ||
1095 | /* | 1095 | /* |
@@ -1630,14 +1630,14 @@ static ssize_t kmemleak_write(struct file *file, const char __user *user_buf, | |||
1630 | return ret; | 1630 | return ret; |
1631 | 1631 | ||
1632 | if (strncmp(buf, "clear", 5) == 0) { | 1632 | if (strncmp(buf, "clear", 5) == 0) { |
1633 | if (atomic_read(&kmemleak_enabled)) | 1633 | if (kmemleak_enabled) |
1634 | kmemleak_clear(); | 1634 | kmemleak_clear(); |
1635 | else | 1635 | else |
1636 | __kmemleak_do_cleanup(); | 1636 | __kmemleak_do_cleanup(); |
1637 | goto out; | 1637 | goto out; |
1638 | } | 1638 | } |
1639 | 1639 | ||
1640 | if (!atomic_read(&kmemleak_enabled)) { | 1640 | if (!kmemleak_enabled) { |
1641 | ret = -EBUSY; | 1641 | ret = -EBUSY; |
1642 | goto out; | 1642 | goto out; |
1643 | } | 1643 | } |
@@ -1726,14 +1726,14 @@ static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup); | |||
1726 | static void kmemleak_disable(void) | 1726 | static void kmemleak_disable(void) |
1727 | { | 1727 | { |
1728 | /* atomically check whether it was already invoked */ | 1728 | /* atomically check whether it was already invoked */ |
1729 | if (atomic_cmpxchg(&kmemleak_error, 0, 1)) | 1729 | if (cmpxchg(&kmemleak_error, 0, 1)) |
1730 | return; | 1730 | return; |
1731 | 1731 | ||
1732 | /* stop any memory operation tracing */ | 1732 | /* stop any memory operation tracing */ |
1733 | atomic_set(&kmemleak_enabled, 0); | 1733 | kmemleak_enabled = 0; |
1734 | 1734 | ||
1735 | /* check whether it is too early for a kernel thread */ | 1735 | /* check whether it is too early for a kernel thread */ |
1736 | if (atomic_read(&kmemleak_initialized)) | 1736 | if (kmemleak_initialized) |
1737 | schedule_work(&cleanup_work); | 1737 | schedule_work(&cleanup_work); |
1738 | 1738 | ||
1739 | pr_info("Kernel memory leak detector disabled\n"); | 1739 | pr_info("Kernel memory leak detector disabled\n"); |
@@ -1775,9 +1775,10 @@ void __init kmemleak_init(void) | |||
1775 | int i; | 1775 | int i; |
1776 | unsigned long flags; | 1776 | unsigned long flags; |
1777 | 1777 | ||
1778 | kmemleak_early_log = 0; | ||
1779 | |||
1778 | #ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF | 1780 | #ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF |
1779 | if (!kmemleak_skip_disable) { | 1781 | if (!kmemleak_skip_disable) { |
1780 | atomic_set(&kmemleak_early_log, 0); | ||
1781 | kmemleak_disable(); | 1782 | kmemleak_disable(); |
1782 | return; | 1783 | return; |
1783 | } | 1784 | } |
@@ -1795,12 +1796,11 @@ void __init kmemleak_init(void) | |||
1795 | 1796 | ||
1796 | /* the kernel is still in UP mode, so disabling the IRQs is enough */ | 1797 | /* the kernel is still in UP mode, so disabling the IRQs is enough */ |
1797 | local_irq_save(flags); | 1798 | local_irq_save(flags); |
1798 | atomic_set(&kmemleak_early_log, 0); | 1799 | if (kmemleak_error) { |
1799 | if (atomic_read(&kmemleak_error)) { | ||
1800 | local_irq_restore(flags); | 1800 | local_irq_restore(flags); |
1801 | return; | 1801 | return; |
1802 | } else | 1802 | } else |
1803 | atomic_set(&kmemleak_enabled, 1); | 1803 | kmemleak_enabled = 1; |
1804 | local_irq_restore(flags); | 1804 | local_irq_restore(flags); |
1805 | 1805 | ||
1806 | /* | 1806 | /* |
@@ -1844,9 +1844,9 @@ void __init kmemleak_init(void) | |||
1844 | log->op_type); | 1844 | log->op_type); |
1845 | } | 1845 | } |
1846 | 1846 | ||
1847 | if (atomic_read(&kmemleak_warning)) { | 1847 | if (kmemleak_warning) { |
1848 | print_log_trace(log); | 1848 | print_log_trace(log); |
1849 | atomic_set(&kmemleak_warning, 0); | 1849 | kmemleak_warning = 0; |
1850 | } | 1850 | } |
1851 | } | 1851 | } |
1852 | } | 1852 | } |
@@ -1858,9 +1858,9 @@ static int __init kmemleak_late_init(void) | |||
1858 | { | 1858 | { |
1859 | struct dentry *dentry; | 1859 | struct dentry *dentry; |
1860 | 1860 | ||
1861 | atomic_set(&kmemleak_initialized, 1); | 1861 | kmemleak_initialized = 1; |
1862 | 1862 | ||
1863 | if (atomic_read(&kmemleak_error)) { | 1863 | if (kmemleak_error) { |
1864 | /* | 1864 | /* |
1865 | * Some error occurred and kmemleak was disabled. There is a | 1865 | * Some error occurred and kmemleak was disabled. There is a |
1866 | * small chance that kmemleak_disable() was called immediately | 1866 | * small chance that kmemleak_disable() was called immediately |