aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorCatalin Marinas <catalin.marinas@arm.com>2009-10-28 09:33:12 -0400
committerCatalin Marinas <catalin.marinas@arm.com>2009-10-28 13:07:54 -0400
commit04609ccc40c4e8f3eabe8894eb0de881c8b984fd (patch)
treebb48d91643e04b49312bc12a0a55ffa4ca2e4baa
parentfefdd336b2a2f7617e0c8a0777c731d9ed6454ae (diff)
kmemleak: Reduce the false positives by checking for modified objects
If an object was modified since it was previously suspected as leak, do not report it. The modification check is done by calculating the checksum (CRC32) of such object. Several false positives are caused by objects being removed from linked lists (e.g. allocation pools) and temporarily breaking the reference chain since kmemleak runs concurrently with such list mutation primitives. Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
-rw-r--r--mm/kmemleak.c124
1 files changed, 70 insertions, 54 deletions
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index ce79d91eeef7..002adc3cf3a1 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -93,6 +93,7 @@
93#include <linux/nodemask.h> 93#include <linux/nodemask.h>
94#include <linux/mm.h> 94#include <linux/mm.h>
95#include <linux/workqueue.h> 95#include <linux/workqueue.h>
96#include <linux/crc32.h>
96 97
97#include <asm/sections.h> 98#include <asm/sections.h>
98#include <asm/processor.h> 99#include <asm/processor.h>
@@ -108,7 +109,6 @@
108#define MSECS_MIN_AGE 5000 /* minimum object age for reporting */ 109#define MSECS_MIN_AGE 5000 /* minimum object age for reporting */
109#define SECS_FIRST_SCAN 60 /* delay before the first scan */ 110#define SECS_FIRST_SCAN 60 /* delay before the first scan */
110#define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */ 111#define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */
111#define GRAY_LIST_PASSES 25 /* maximum number of gray list scans */
112#define MAX_SCAN_SIZE 4096 /* maximum size of a scanned block */ 112#define MAX_SCAN_SIZE 4096 /* maximum size of a scanned block */
113 113
114#define BYTES_PER_POINTER sizeof(void *) 114#define BYTES_PER_POINTER sizeof(void *)
@@ -149,6 +149,8 @@ struct kmemleak_object {
149 int min_count; 149 int min_count;
150 /* the total number of pointers found pointing to this object */ 150 /* the total number of pointers found pointing to this object */
151 int count; 151 int count;
152 /* checksum for detecting modified objects */
153 u32 checksum;
152 /* memory ranges to be scanned inside an object (empty for all) */ 154 /* memory ranges to be scanned inside an object (empty for all) */
153 struct hlist_head area_list; 155 struct hlist_head area_list;
154 unsigned long trace[MAX_TRACE]; 156 unsigned long trace[MAX_TRACE];
@@ -164,8 +166,6 @@ struct kmemleak_object {
164#define OBJECT_REPORTED (1 << 1) 166#define OBJECT_REPORTED (1 << 1)
165/* flag set to not scan the object */ 167/* flag set to not scan the object */
166#define OBJECT_NO_SCAN (1 << 2) 168#define OBJECT_NO_SCAN (1 << 2)
167/* flag set on newly allocated objects */
168#define OBJECT_NEW (1 << 3)
169 169
170/* number of bytes to print per line; must be 16 or 32 */ 170/* number of bytes to print per line; must be 16 or 32 */
171#define HEX_ROW_SIZE 16 171#define HEX_ROW_SIZE 16
@@ -321,11 +321,6 @@ static bool color_gray(const struct kmemleak_object *object)
321 object->count >= object->min_count; 321 object->count >= object->min_count;
322} 322}
323 323
324static bool color_black(const struct kmemleak_object *object)
325{
326 return object->min_count == KMEMLEAK_BLACK;
327}
328
329/* 324/*
330 * Objects are considered unreferenced only if their color is white, they have 325 * Objects are considered unreferenced only if their color is white, they have
331 * not be deleted and have a minimum age to avoid false positives caused by 326 * not be deleted and have a minimum age to avoid false positives caused by
@@ -333,7 +328,7 @@ static bool color_black(const struct kmemleak_object *object)
333 */ 328 */
334static bool unreferenced_object(struct kmemleak_object *object) 329static bool unreferenced_object(struct kmemleak_object *object)
335{ 330{
336 return (object->flags & OBJECT_ALLOCATED) && color_white(object) && 331 return (color_white(object) && object->flags & OBJECT_ALLOCATED) &&
337 time_before_eq(object->jiffies + jiffies_min_age, 332 time_before_eq(object->jiffies + jiffies_min_age,
338 jiffies_last_scan); 333 jiffies_last_scan);
339} 334}
@@ -381,6 +376,7 @@ static void dump_object_info(struct kmemleak_object *object)
381 pr_notice(" min_count = %d\n", object->min_count); 376 pr_notice(" min_count = %d\n", object->min_count);
382 pr_notice(" count = %d\n", object->count); 377 pr_notice(" count = %d\n", object->count);
383 pr_notice(" flags = 0x%lx\n", object->flags); 378 pr_notice(" flags = 0x%lx\n", object->flags);
379 pr_notice(" checksum = %d\n", object->checksum);
384 pr_notice(" backtrace:\n"); 380 pr_notice(" backtrace:\n");
385 print_stack_trace(&trace, 4); 381 print_stack_trace(&trace, 4);
386} 382}
@@ -522,12 +518,13 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
522 INIT_HLIST_HEAD(&object->area_list); 518 INIT_HLIST_HEAD(&object->area_list);
523 spin_lock_init(&object->lock); 519 spin_lock_init(&object->lock);
524 atomic_set(&object->use_count, 1); 520 atomic_set(&object->use_count, 1);
525 object->flags = OBJECT_ALLOCATED | OBJECT_NEW; 521 object->flags = OBJECT_ALLOCATED;
526 object->pointer = ptr; 522 object->pointer = ptr;
527 object->size = size; 523 object->size = size;
528 object->min_count = min_count; 524 object->min_count = min_count;
529 object->count = -1; /* no color initially */ 525 object->count = 0; /* white color initially */
530 object->jiffies = jiffies; 526 object->jiffies = jiffies;
527 object->checksum = 0;
531 528
532 /* task information */ 529 /* task information */
533 if (in_irq()) { 530 if (in_irq()) {
@@ -949,6 +946,20 @@ void __ref kmemleak_no_scan(const void *ptr)
949EXPORT_SYMBOL(kmemleak_no_scan); 946EXPORT_SYMBOL(kmemleak_no_scan);
950 947
951/* 948/*
949 * Update an object's checksum and return true if it was modified.
950 */
951static bool update_checksum(struct kmemleak_object *object)
952{
953 u32 old_csum = object->checksum;
954
955 if (!kmemcheck_is_obj_initialized(object->pointer, object->size))
956 return false;
957
958 object->checksum = crc32(0, (void *)object->pointer, object->size);
959 return object->checksum != old_csum;
960}
961
962/*
952 * Memory scanning is a long process and it needs to be interruptable. This 963 * Memory scanning is a long process and it needs to be interruptable. This
953 * function checks whether such interrupt condition occured. 964 * function checks whether such interrupt condition occured.
954 */ 965 */
@@ -1082,6 +1093,39 @@ out:
1082} 1093}
1083 1094
1084/* 1095/*
1096 * Scan the objects already referenced (gray objects). More objects will be
1097 * referenced and, if there are no memory leaks, all the objects are scanned.
1098 */
1099static void scan_gray_list(void)
1100{
1101 struct kmemleak_object *object, *tmp;
1102
1103 /*
1104 * The list traversal is safe for both tail additions and removals
1105 * from inside the loop. The kmemleak objects cannot be freed from
1106 * outside the loop because their use_count was incremented.
1107 */
1108 object = list_entry(gray_list.next, typeof(*object), gray_list);
1109 while (&object->gray_list != &gray_list) {
1110 cond_resched();
1111
1112 /* may add new objects to the list */
1113 if (!scan_should_stop())
1114 scan_object(object);
1115
1116 tmp = list_entry(object->gray_list.next, typeof(*object),
1117 gray_list);
1118
1119 /* remove the object from the list and release it */
1120 list_del(&object->gray_list);
1121 put_object(object);
1122
1123 object = tmp;
1124 }
1125 WARN_ON(!list_empty(&gray_list));
1126}
1127
1128/*
1085 * Scan data sections and all the referenced memory blocks allocated via the 1129 * Scan data sections and all the referenced memory blocks allocated via the
1086 * kernel's standard allocators. This function must be called with the 1130 * kernel's standard allocators. This function must be called with the
1087 * scan_mutex held. 1131 * scan_mutex held.
@@ -1089,10 +1133,9 @@ out:
1089static void kmemleak_scan(void) 1133static void kmemleak_scan(void)
1090{ 1134{
1091 unsigned long flags; 1135 unsigned long flags;
1092 struct kmemleak_object *object, *tmp; 1136 struct kmemleak_object *object;
1093 int i; 1137 int i;
1094 int new_leaks = 0; 1138 int new_leaks = 0;
1095 int gray_list_pass = 0;
1096 1139
1097 jiffies_last_scan = jiffies; 1140 jiffies_last_scan = jiffies;
1098 1141
@@ -1113,7 +1156,6 @@ static void kmemleak_scan(void)
1113#endif 1156#endif
1114 /* reset the reference count (whiten the object) */ 1157 /* reset the reference count (whiten the object) */
1115 object->count = 0; 1158 object->count = 0;
1116 object->flags &= ~OBJECT_NEW;
1117 if (color_gray(object) && get_object(object)) 1159 if (color_gray(object) && get_object(object))
1118 list_add_tail(&object->gray_list, &gray_list); 1160 list_add_tail(&object->gray_list, &gray_list);
1119 1161
@@ -1171,62 +1213,36 @@ static void kmemleak_scan(void)
1171 1213
1172 /* 1214 /*
1173 * Scan the objects already referenced from the sections scanned 1215 * Scan the objects already referenced from the sections scanned
1174 * above. More objects will be referenced and, if there are no memory 1216 * above.
1175 * leaks, all the objects will be scanned. The list traversal is safe
1176 * for both tail additions and removals from inside the loop. The
1177 * kmemleak objects cannot be freed from outside the loop because their
1178 * use_count was increased.
1179 */ 1217 */
1180repeat: 1218 scan_gray_list();
1181 object = list_entry(gray_list.next, typeof(*object), gray_list);
1182 while (&object->gray_list != &gray_list) {
1183 cond_resched();
1184
1185 /* may add new objects to the list */
1186 if (!scan_should_stop())
1187 scan_object(object);
1188
1189 tmp = list_entry(object->gray_list.next, typeof(*object),
1190 gray_list);
1191
1192 /* remove the object from the list and release it */
1193 list_del(&object->gray_list);
1194 put_object(object);
1195
1196 object = tmp;
1197 }
1198
1199 if (scan_should_stop() || ++gray_list_pass >= GRAY_LIST_PASSES)
1200 goto scan_end;
1201 1219
1202 /* 1220 /*
1203 * Check for new objects allocated during this scanning and add them 1221 * Check for new or unreferenced objects modified since the previous
1204 * to the gray list. 1222 * scan and color them gray until the next scan.
1205 */ 1223 */
1206 rcu_read_lock(); 1224 rcu_read_lock();
1207 list_for_each_entry_rcu(object, &object_list, object_list) { 1225 list_for_each_entry_rcu(object, &object_list, object_list) {
1208 spin_lock_irqsave(&object->lock, flags); 1226 spin_lock_irqsave(&object->lock, flags);
1209 if ((object->flags & OBJECT_NEW) && !color_black(object) && 1227 if (color_white(object) && (object->flags & OBJECT_ALLOCATED)
1210 get_object(object)) { 1228 && update_checksum(object) && get_object(object)) {
1211 object->flags &= ~OBJECT_NEW; 1229 /* color it gray temporarily */
1230 object->count = object->min_count;
1212 list_add_tail(&object->gray_list, &gray_list); 1231 list_add_tail(&object->gray_list, &gray_list);
1213 } 1232 }
1214 spin_unlock_irqrestore(&object->lock, flags); 1233 spin_unlock_irqrestore(&object->lock, flags);
1215 } 1234 }
1216 rcu_read_unlock(); 1235 rcu_read_unlock();
1217 1236
1218 if (!list_empty(&gray_list)) 1237 /*
1219 goto repeat; 1238 * Re-scan the gray list for modified unreferenced objects.
1220 1239 */
1221scan_end: 1240 scan_gray_list();
1222 WARN_ON(!list_empty(&gray_list));
1223 1241
1224 /* 1242 /*
1225 * If scanning was stopped or new objects were being allocated at a 1243 * If scanning was stopped do not report any new unreferenced objects.
1226 * higher rate than gray list scanning, do not report any new
1227 * unreferenced objects.
1228 */ 1244 */
1229 if (scan_should_stop() || gray_list_pass >= GRAY_LIST_PASSES) 1245 if (scan_should_stop())
1230 return; 1246 return;
1231 1247
1232 /* 1248 /*