aboutsummaryrefslogtreecommitdiffstats
path: root/mm/kmemleak.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/kmemleak.c')
-rw-r--r--mm/kmemleak.c193
1 files changed, 103 insertions, 90 deletions
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 8bf765c4f58d..2c0d032ac898 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -72,7 +72,6 @@
72#include <linux/module.h> 72#include <linux/module.h>
73#include <linux/kthread.h> 73#include <linux/kthread.h>
74#include <linux/prio_tree.h> 74#include <linux/prio_tree.h>
75#include <linux/gfp.h>
76#include <linux/fs.h> 75#include <linux/fs.h>
77#include <linux/debugfs.h> 76#include <linux/debugfs.h>
78#include <linux/seq_file.h> 77#include <linux/seq_file.h>
@@ -93,6 +92,7 @@
93#include <linux/nodemask.h> 92#include <linux/nodemask.h>
94#include <linux/mm.h> 93#include <linux/mm.h>
95#include <linux/workqueue.h> 94#include <linux/workqueue.h>
95#include <linux/crc32.h>
96 96
97#include <asm/sections.h> 97#include <asm/sections.h>
98#include <asm/processor.h> 98#include <asm/processor.h>
@@ -108,7 +108,6 @@
108#define MSECS_MIN_AGE 5000 /* minimum object age for reporting */ 108#define MSECS_MIN_AGE 5000 /* minimum object age for reporting */
109#define SECS_FIRST_SCAN 60 /* delay before the first scan */ 109#define SECS_FIRST_SCAN 60 /* delay before the first scan */
110#define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */ 110#define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */
111#define GRAY_LIST_PASSES 25 /* maximum number of gray list scans */
112#define MAX_SCAN_SIZE 4096 /* maximum size of a scanned block */ 111#define MAX_SCAN_SIZE 4096 /* maximum size of a scanned block */
113 112
114#define BYTES_PER_POINTER sizeof(void *) 113#define BYTES_PER_POINTER sizeof(void *)
@@ -119,8 +118,8 @@
119/* scanning area inside a memory block */ 118/* scanning area inside a memory block */
120struct kmemleak_scan_area { 119struct kmemleak_scan_area {
121 struct hlist_node node; 120 struct hlist_node node;
122 unsigned long offset; 121 unsigned long start;
123 size_t length; 122 size_t size;
124}; 123};
125 124
126#define KMEMLEAK_GREY 0 125#define KMEMLEAK_GREY 0
@@ -149,6 +148,8 @@ struct kmemleak_object {
149 int min_count; 148 int min_count;
150 /* the total number of pointers found pointing to this object */ 149 /* the total number of pointers found pointing to this object */
151 int count; 150 int count;
151 /* checksum for detecting modified objects */
152 u32 checksum;
152 /* memory ranges to be scanned inside an object (empty for all) */ 153 /* memory ranges to be scanned inside an object (empty for all) */
153 struct hlist_head area_list; 154 struct hlist_head area_list;
154 unsigned long trace[MAX_TRACE]; 155 unsigned long trace[MAX_TRACE];
@@ -164,8 +165,6 @@ struct kmemleak_object {
164#define OBJECT_REPORTED (1 << 1) 165#define OBJECT_REPORTED (1 << 1)
165/* flag set to not scan the object */ 166/* flag set to not scan the object */
166#define OBJECT_NO_SCAN (1 << 2) 167#define OBJECT_NO_SCAN (1 << 2)
167/* flag set on newly allocated objects */
168#define OBJECT_NEW (1 << 3)
169 168
170/* number of bytes to print per line; must be 16 or 32 */ 169/* number of bytes to print per line; must be 16 or 32 */
171#define HEX_ROW_SIZE 16 170#define HEX_ROW_SIZE 16
@@ -241,8 +240,6 @@ struct early_log {
241 const void *ptr; /* allocated/freed memory block */ 240 const void *ptr; /* allocated/freed memory block */
242 size_t size; /* memory block size */ 241 size_t size; /* memory block size */
243 int min_count; /* minimum reference count */ 242 int min_count; /* minimum reference count */
244 unsigned long offset; /* scan area offset */
245 size_t length; /* scan area length */
246 unsigned long trace[MAX_TRACE]; /* stack trace */ 243 unsigned long trace[MAX_TRACE]; /* stack trace */
247 unsigned int trace_len; /* stack trace length */ 244 unsigned int trace_len; /* stack trace length */
248}; 245};
@@ -323,11 +320,6 @@ static bool color_gray(const struct kmemleak_object *object)
323 object->count >= object->min_count; 320 object->count >= object->min_count;
324} 321}
325 322
326static bool color_black(const struct kmemleak_object *object)
327{
328 return object->min_count == KMEMLEAK_BLACK;
329}
330
331/* 323/*
332 * Objects are considered unreferenced only if their color is white, they have 324 * Objects are considered unreferenced only if their color is white, they have
333 * not be deleted and have a minimum age to avoid false positives caused by 325 * not be deleted and have a minimum age to avoid false positives caused by
@@ -335,7 +327,7 @@ static bool color_black(const struct kmemleak_object *object)
335 */ 327 */
336static bool unreferenced_object(struct kmemleak_object *object) 328static bool unreferenced_object(struct kmemleak_object *object)
337{ 329{
338 return (object->flags & OBJECT_ALLOCATED) && color_white(object) && 330 return (color_white(object) && object->flags & OBJECT_ALLOCATED) &&
339 time_before_eq(object->jiffies + jiffies_min_age, 331 time_before_eq(object->jiffies + jiffies_min_age,
340 jiffies_last_scan); 332 jiffies_last_scan);
341} 333}
@@ -348,11 +340,13 @@ static void print_unreferenced(struct seq_file *seq,
348 struct kmemleak_object *object) 340 struct kmemleak_object *object)
349{ 341{
350 int i; 342 int i;
343 unsigned int msecs_age = jiffies_to_msecs(jiffies - object->jiffies);
351 344
352 seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n", 345 seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n",
353 object->pointer, object->size); 346 object->pointer, object->size);
354 seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu\n", 347 seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu (age %d.%03ds)\n",
355 object->comm, object->pid, object->jiffies); 348 object->comm, object->pid, object->jiffies,
349 msecs_age / 1000, msecs_age % 1000);
356 hex_dump_object(seq, object); 350 hex_dump_object(seq, object);
357 seq_printf(seq, " backtrace:\n"); 351 seq_printf(seq, " backtrace:\n");
358 352
@@ -381,6 +375,7 @@ static void dump_object_info(struct kmemleak_object *object)
381 pr_notice(" min_count = %d\n", object->min_count); 375 pr_notice(" min_count = %d\n", object->min_count);
382 pr_notice(" count = %d\n", object->count); 376 pr_notice(" count = %d\n", object->count);
383 pr_notice(" flags = 0x%lx\n", object->flags); 377 pr_notice(" flags = 0x%lx\n", object->flags);
378 pr_notice(" checksum = %d\n", object->checksum);
384 pr_notice(" backtrace:\n"); 379 pr_notice(" backtrace:\n");
385 print_stack_trace(&trace, 4); 380 print_stack_trace(&trace, 4);
386} 381}
@@ -522,12 +517,13 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
522 INIT_HLIST_HEAD(&object->area_list); 517 INIT_HLIST_HEAD(&object->area_list);
523 spin_lock_init(&object->lock); 518 spin_lock_init(&object->lock);
524 atomic_set(&object->use_count, 1); 519 atomic_set(&object->use_count, 1);
525 object->flags = OBJECT_ALLOCATED | OBJECT_NEW; 520 object->flags = OBJECT_ALLOCATED;
526 object->pointer = ptr; 521 object->pointer = ptr;
527 object->size = size; 522 object->size = size;
528 object->min_count = min_count; 523 object->min_count = min_count;
529 object->count = -1; /* no color initially */ 524 object->count = 0; /* white color initially */
530 object->jiffies = jiffies; 525 object->jiffies = jiffies;
526 object->checksum = 0;
531 527
532 /* task information */ 528 /* task information */
533 if (in_irq()) { 529 if (in_irq()) {
@@ -720,14 +716,13 @@ static void make_black_object(unsigned long ptr)
720 * Add a scanning area to the object. If at least one such area is added, 716 * Add a scanning area to the object. If at least one such area is added,
721 * kmemleak will only scan these ranges rather than the whole memory block. 717 * kmemleak will only scan these ranges rather than the whole memory block.
722 */ 718 */
723static void add_scan_area(unsigned long ptr, unsigned long offset, 719static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
724 size_t length, gfp_t gfp)
725{ 720{
726 unsigned long flags; 721 unsigned long flags;
727 struct kmemleak_object *object; 722 struct kmemleak_object *object;
728 struct kmemleak_scan_area *area; 723 struct kmemleak_scan_area *area;
729 724
730 object = find_and_get_object(ptr, 0); 725 object = find_and_get_object(ptr, 1);
731 if (!object) { 726 if (!object) {
732 kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n", 727 kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
733 ptr); 728 ptr);
@@ -741,7 +736,7 @@ static void add_scan_area(unsigned long ptr, unsigned long offset,
741 } 736 }
742 737
743 spin_lock_irqsave(&object->lock, flags); 738 spin_lock_irqsave(&object->lock, flags);
744 if (offset + length > object->size) { 739 if (ptr + size > object->pointer + object->size) {
745 kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr); 740 kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
746 dump_object_info(object); 741 dump_object_info(object);
747 kmem_cache_free(scan_area_cache, area); 742 kmem_cache_free(scan_area_cache, area);
@@ -749,8 +744,8 @@ static void add_scan_area(unsigned long ptr, unsigned long offset,
749 } 744 }
750 745
751 INIT_HLIST_NODE(&area->node); 746 INIT_HLIST_NODE(&area->node);
752 area->offset = offset; 747 area->start = ptr;
753 area->length = length; 748 area->size = size;
754 749
755 hlist_add_head(&area->node, &object->area_list); 750 hlist_add_head(&area->node, &object->area_list);
756out_unlock: 751out_unlock:
@@ -786,7 +781,7 @@ static void object_no_scan(unsigned long ptr)
786 * processed later once kmemleak is fully initialized. 781 * processed later once kmemleak is fully initialized.
787 */ 782 */
788static void __init log_early(int op_type, const void *ptr, size_t size, 783static void __init log_early(int op_type, const void *ptr, size_t size,
789 int min_count, unsigned long offset, size_t length) 784 int min_count)
790{ 785{
791 unsigned long flags; 786 unsigned long flags;
792 struct early_log *log; 787 struct early_log *log;
@@ -808,8 +803,6 @@ static void __init log_early(int op_type, const void *ptr, size_t size,
808 log->ptr = ptr; 803 log->ptr = ptr;
809 log->size = size; 804 log->size = size;
810 log->min_count = min_count; 805 log->min_count = min_count;
811 log->offset = offset;
812 log->length = length;
813 if (op_type == KMEMLEAK_ALLOC) 806 if (op_type == KMEMLEAK_ALLOC)
814 log->trace_len = __save_stack_trace(log->trace); 807 log->trace_len = __save_stack_trace(log->trace);
815 crt_early_log++; 808 crt_early_log++;
@@ -858,7 +851,7 @@ void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
858 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) 851 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
859 create_object((unsigned long)ptr, size, min_count, gfp); 852 create_object((unsigned long)ptr, size, min_count, gfp);
860 else if (atomic_read(&kmemleak_early_log)) 853 else if (atomic_read(&kmemleak_early_log))
861 log_early(KMEMLEAK_ALLOC, ptr, size, min_count, 0, 0); 854 log_early(KMEMLEAK_ALLOC, ptr, size, min_count);
862} 855}
863EXPORT_SYMBOL_GPL(kmemleak_alloc); 856EXPORT_SYMBOL_GPL(kmemleak_alloc);
864 857
@@ -873,7 +866,7 @@ void __ref kmemleak_free(const void *ptr)
873 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) 866 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
874 delete_object_full((unsigned long)ptr); 867 delete_object_full((unsigned long)ptr);
875 else if (atomic_read(&kmemleak_early_log)) 868 else if (atomic_read(&kmemleak_early_log))
876 log_early(KMEMLEAK_FREE, ptr, 0, 0, 0, 0); 869 log_early(KMEMLEAK_FREE, ptr, 0, 0);
877} 870}
878EXPORT_SYMBOL_GPL(kmemleak_free); 871EXPORT_SYMBOL_GPL(kmemleak_free);
879 872
@@ -888,7 +881,7 @@ void __ref kmemleak_free_part(const void *ptr, size_t size)
888 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) 881 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
889 delete_object_part((unsigned long)ptr, size); 882 delete_object_part((unsigned long)ptr, size);
890 else if (atomic_read(&kmemleak_early_log)) 883 else if (atomic_read(&kmemleak_early_log))
891 log_early(KMEMLEAK_FREE_PART, ptr, size, 0, 0, 0); 884 log_early(KMEMLEAK_FREE_PART, ptr, size, 0);
892} 885}
893EXPORT_SYMBOL_GPL(kmemleak_free_part); 886EXPORT_SYMBOL_GPL(kmemleak_free_part);
894 887
@@ -903,7 +896,7 @@ void __ref kmemleak_not_leak(const void *ptr)
903 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) 896 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
904 make_gray_object((unsigned long)ptr); 897 make_gray_object((unsigned long)ptr);
905 else if (atomic_read(&kmemleak_early_log)) 898 else if (atomic_read(&kmemleak_early_log))
906 log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0, 0, 0); 899 log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0);
907} 900}
908EXPORT_SYMBOL(kmemleak_not_leak); 901EXPORT_SYMBOL(kmemleak_not_leak);
909 902
@@ -919,22 +912,21 @@ void __ref kmemleak_ignore(const void *ptr)
919 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) 912 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
920 make_black_object((unsigned long)ptr); 913 make_black_object((unsigned long)ptr);
921 else if (atomic_read(&kmemleak_early_log)) 914 else if (atomic_read(&kmemleak_early_log))
922 log_early(KMEMLEAK_IGNORE, ptr, 0, 0, 0, 0); 915 log_early(KMEMLEAK_IGNORE, ptr, 0, 0);
923} 916}
924EXPORT_SYMBOL(kmemleak_ignore); 917EXPORT_SYMBOL(kmemleak_ignore);
925 918
926/* 919/*
927 * Limit the range to be scanned in an allocated memory block. 920 * Limit the range to be scanned in an allocated memory block.
928 */ 921 */
929void __ref kmemleak_scan_area(const void *ptr, unsigned long offset, 922void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
930 size_t length, gfp_t gfp)
931{ 923{
932 pr_debug("%s(0x%p)\n", __func__, ptr); 924 pr_debug("%s(0x%p)\n", __func__, ptr);
933 925
934 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) 926 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
935 add_scan_area((unsigned long)ptr, offset, length, gfp); 927 add_scan_area((unsigned long)ptr, size, gfp);
936 else if (atomic_read(&kmemleak_early_log)) 928 else if (atomic_read(&kmemleak_early_log))
937 log_early(KMEMLEAK_SCAN_AREA, ptr, 0, 0, offset, length); 929 log_early(KMEMLEAK_SCAN_AREA, ptr, size, 0);
938} 930}
939EXPORT_SYMBOL(kmemleak_scan_area); 931EXPORT_SYMBOL(kmemleak_scan_area);
940 932
@@ -948,11 +940,25 @@ void __ref kmemleak_no_scan(const void *ptr)
948 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) 940 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
949 object_no_scan((unsigned long)ptr); 941 object_no_scan((unsigned long)ptr);
950 else if (atomic_read(&kmemleak_early_log)) 942 else if (atomic_read(&kmemleak_early_log))
951 log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0, 0, 0); 943 log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0);
952} 944}
953EXPORT_SYMBOL(kmemleak_no_scan); 945EXPORT_SYMBOL(kmemleak_no_scan);
954 946
955/* 947/*
948 * Update an object's checksum and return true if it was modified.
949 */
950static bool update_checksum(struct kmemleak_object *object)
951{
952 u32 old_csum = object->checksum;
953
954 if (!kmemcheck_is_obj_initialized(object->pointer, object->size))
955 return false;
956
957 object->checksum = crc32(0, (void *)object->pointer, object->size);
958 return object->checksum != old_csum;
959}
960
961/*
956 * Memory scanning is a long process and it needs to be interruptable. This 962 * Memory scanning is a long process and it needs to be interruptable. This
957 * function checks whether such interrupt condition occured. 963 * function checks whether such interrupt condition occured.
958 */ 964 */
@@ -1031,11 +1037,14 @@ static void scan_block(void *_start, void *_end,
1031 * added to the gray_list. 1037 * added to the gray_list.
1032 */ 1038 */
1033 object->count++; 1039 object->count++;
1034 if (color_gray(object)) 1040 if (color_gray(object)) {
1035 list_add_tail(&object->gray_list, &gray_list); 1041 list_add_tail(&object->gray_list, &gray_list);
1036 else 1042 spin_unlock_irqrestore(&object->lock, flags);
1037 put_object(object); 1043 continue;
1044 }
1045
1038 spin_unlock_irqrestore(&object->lock, flags); 1046 spin_unlock_irqrestore(&object->lock, flags);
1047 put_object(object);
1039 } 1048 }
1040} 1049}
1041 1050
@@ -1050,8 +1059,8 @@ static void scan_object(struct kmemleak_object *object)
1050 unsigned long flags; 1059 unsigned long flags;
1051 1060
1052 /* 1061 /*
1053 * Once the object->lock is aquired, the corresponding memory block 1062 * Once the object->lock is acquired, the corresponding memory block
1054 * cannot be freed (the same lock is aquired in delete_object). 1063 * cannot be freed (the same lock is acquired in delete_object).
1055 */ 1064 */
1056 spin_lock_irqsave(&object->lock, flags); 1065 spin_lock_irqsave(&object->lock, flags);
1057 if (object->flags & OBJECT_NO_SCAN) 1066 if (object->flags & OBJECT_NO_SCAN)
@@ -1075,14 +1084,47 @@ static void scan_object(struct kmemleak_object *object)
1075 } 1084 }
1076 } else 1085 } else
1077 hlist_for_each_entry(area, elem, &object->area_list, node) 1086 hlist_for_each_entry(area, elem, &object->area_list, node)
1078 scan_block((void *)(object->pointer + area->offset), 1087 scan_block((void *)area->start,
1079 (void *)(object->pointer + area->offset 1088 (void *)(area->start + area->size),
1080 + area->length), object, 0); 1089 object, 0);
1081out: 1090out:
1082 spin_unlock_irqrestore(&object->lock, flags); 1091 spin_unlock_irqrestore(&object->lock, flags);
1083} 1092}
1084 1093
1085/* 1094/*
1095 * Scan the objects already referenced (gray objects). More objects will be
1096 * referenced and, if there are no memory leaks, all the objects are scanned.
1097 */
1098static void scan_gray_list(void)
1099{
1100 struct kmemleak_object *object, *tmp;
1101
1102 /*
1103 * The list traversal is safe for both tail additions and removals
1104 * from inside the loop. The kmemleak objects cannot be freed from
1105 * outside the loop because their use_count was incremented.
1106 */
1107 object = list_entry(gray_list.next, typeof(*object), gray_list);
1108 while (&object->gray_list != &gray_list) {
1109 cond_resched();
1110
1111 /* may add new objects to the list */
1112 if (!scan_should_stop())
1113 scan_object(object);
1114
1115 tmp = list_entry(object->gray_list.next, typeof(*object),
1116 gray_list);
1117
1118 /* remove the object from the list and release it */
1119 list_del(&object->gray_list);
1120 put_object(object);
1121
1122 object = tmp;
1123 }
1124 WARN_ON(!list_empty(&gray_list));
1125}
1126
1127/*
1086 * Scan data sections and all the referenced memory blocks allocated via the 1128 * Scan data sections and all the referenced memory blocks allocated via the
1087 * kernel's standard allocators. This function must be called with the 1129 * kernel's standard allocators. This function must be called with the
1088 * scan_mutex held. 1130 * scan_mutex held.
@@ -1090,10 +1132,9 @@ out:
1090static void kmemleak_scan(void) 1132static void kmemleak_scan(void)
1091{ 1133{
1092 unsigned long flags; 1134 unsigned long flags;
1093 struct kmemleak_object *object, *tmp; 1135 struct kmemleak_object *object;
1094 int i; 1136 int i;
1095 int new_leaks = 0; 1137 int new_leaks = 0;
1096 int gray_list_pass = 0;
1097 1138
1098 jiffies_last_scan = jiffies; 1139 jiffies_last_scan = jiffies;
1099 1140
@@ -1114,7 +1155,6 @@ static void kmemleak_scan(void)
1114#endif 1155#endif
1115 /* reset the reference count (whiten the object) */ 1156 /* reset the reference count (whiten the object) */
1116 object->count = 0; 1157 object->count = 0;
1117 object->flags &= ~OBJECT_NEW;
1118 if (color_gray(object) && get_object(object)) 1158 if (color_gray(object) && get_object(object))
1119 list_add_tail(&object->gray_list, &gray_list); 1159 list_add_tail(&object->gray_list, &gray_list);
1120 1160
@@ -1172,62 +1212,36 @@ static void kmemleak_scan(void)
1172 1212
1173 /* 1213 /*
1174 * Scan the objects already referenced from the sections scanned 1214 * Scan the objects already referenced from the sections scanned
1175 * above. More objects will be referenced and, if there are no memory 1215 * above.
1176 * leaks, all the objects will be scanned. The list traversal is safe
1177 * for both tail additions and removals from inside the loop. The
1178 * kmemleak objects cannot be freed from outside the loop because their
1179 * use_count was increased.
1180 */ 1216 */
1181repeat: 1217 scan_gray_list();
1182 object = list_entry(gray_list.next, typeof(*object), gray_list);
1183 while (&object->gray_list != &gray_list) {
1184 cond_resched();
1185
1186 /* may add new objects to the list */
1187 if (!scan_should_stop())
1188 scan_object(object);
1189
1190 tmp = list_entry(object->gray_list.next, typeof(*object),
1191 gray_list);
1192
1193 /* remove the object from the list and release it */
1194 list_del(&object->gray_list);
1195 put_object(object);
1196
1197 object = tmp;
1198 }
1199
1200 if (scan_should_stop() || ++gray_list_pass >= GRAY_LIST_PASSES)
1201 goto scan_end;
1202 1218
1203 /* 1219 /*
1204 * Check for new objects allocated during this scanning and add them 1220 * Check for new or unreferenced objects modified since the previous
1205 * to the gray list. 1221 * scan and color them gray until the next scan.
1206 */ 1222 */
1207 rcu_read_lock(); 1223 rcu_read_lock();
1208 list_for_each_entry_rcu(object, &object_list, object_list) { 1224 list_for_each_entry_rcu(object, &object_list, object_list) {
1209 spin_lock_irqsave(&object->lock, flags); 1225 spin_lock_irqsave(&object->lock, flags);
1210 if ((object->flags & OBJECT_NEW) && !color_black(object) && 1226 if (color_white(object) && (object->flags & OBJECT_ALLOCATED)
1211 get_object(object)) { 1227 && update_checksum(object) && get_object(object)) {
1212 object->flags &= ~OBJECT_NEW; 1228 /* color it gray temporarily */
1229 object->count = object->min_count;
1213 list_add_tail(&object->gray_list, &gray_list); 1230 list_add_tail(&object->gray_list, &gray_list);
1214 } 1231 }
1215 spin_unlock_irqrestore(&object->lock, flags); 1232 spin_unlock_irqrestore(&object->lock, flags);
1216 } 1233 }
1217 rcu_read_unlock(); 1234 rcu_read_unlock();
1218 1235
1219 if (!list_empty(&gray_list)) 1236 /*
1220 goto repeat; 1237 * Re-scan the gray list for modified unreferenced objects.
1221 1238 */
1222scan_end: 1239 scan_gray_list();
1223 WARN_ON(!list_empty(&gray_list));
1224 1240
1225 /* 1241 /*
1226 * If scanning was stopped or new objects were being allocated at a 1242 * If scanning was stopped do not report any new unreferenced objects.
1227 * higher rate than gray list scanning, do not report any new
1228 * unreferenced objects.
1229 */ 1243 */
1230 if (scan_should_stop() || gray_list_pass >= GRAY_LIST_PASSES) 1244 if (scan_should_stop())
1231 return; 1245 return;
1232 1246
1233 /* 1247 /*
@@ -1642,8 +1656,7 @@ void __init kmemleak_init(void)
1642 kmemleak_ignore(log->ptr); 1656 kmemleak_ignore(log->ptr);
1643 break; 1657 break;
1644 case KMEMLEAK_SCAN_AREA: 1658 case KMEMLEAK_SCAN_AREA:
1645 kmemleak_scan_area(log->ptr, log->offset, log->length, 1659 kmemleak_scan_area(log->ptr, log->size, GFP_KERNEL);
1646 GFP_KERNEL);
1647 break; 1660 break;
1648 case KMEMLEAK_NO_SCAN: 1661 case KMEMLEAK_NO_SCAN:
1649 kmemleak_no_scan(log->ptr); 1662 kmemleak_no_scan(log->ptr);