diff options
Diffstat (limited to 'mm/kmemleak.c')
-rw-r--r-- | mm/kmemleak.c | 166 |
1 files changed, 70 insertions, 96 deletions
diff --git a/mm/kmemleak.c b/mm/kmemleak.c index c96f2c8700aa..eeece2deace2 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c | |||
@@ -48,10 +48,10 @@ | |||
48 | * scanned. This list is only modified during a scanning episode when the | 48 | * scanned. This list is only modified during a scanning episode when the |
49 | * scan_mutex is held. At the end of a scan, the gray_list is always empty. | 49 | * scan_mutex is held. At the end of a scan, the gray_list is always empty. |
50 | * Note that the kmemleak_object.use_count is incremented when an object is | 50 | * Note that the kmemleak_object.use_count is incremented when an object is |
51 | * added to the gray_list and therefore cannot be freed | 51 | * added to the gray_list and therefore cannot be freed. This mutex also |
52 | * - kmemleak_mutex (mutex): prevents multiple users of the "kmemleak" debugfs | 52 | * prevents multiple users of the "kmemleak" debugfs file together with |
53 | * file together with modifications to the memory scanning parameters | 53 | * modifications to the memory scanning parameters including the scan_thread |
54 | * including the scan_thread pointer | 54 | * pointer |
55 | * | 55 | * |
56 | * The kmemleak_object structures have a use_count incremented or decremented | 56 | * The kmemleak_object structures have a use_count incremented or decremented |
57 | * using the get_object()/put_object() functions. When the use_count becomes | 57 | * using the get_object()/put_object() functions. When the use_count becomes |
@@ -190,15 +190,15 @@ static unsigned long max_addr; | |||
190 | static unsigned long next_scan_yield; | 190 | static unsigned long next_scan_yield; |
191 | static struct task_struct *scan_thread; | 191 | static struct task_struct *scan_thread; |
192 | static unsigned long jiffies_scan_yield; | 192 | static unsigned long jiffies_scan_yield; |
193 | /* used to avoid reporting of recently allocated objects */ | ||
193 | static unsigned long jiffies_min_age; | 194 | static unsigned long jiffies_min_age; |
195 | static unsigned long jiffies_last_scan; | ||
194 | /* delay between automatic memory scannings */ | 196 | /* delay between automatic memory scannings */ |
195 | static signed long jiffies_scan_wait; | 197 | static signed long jiffies_scan_wait; |
196 | /* enables or disables the task stacks scanning */ | 198 | /* enables or disables the task stacks scanning */ |
197 | static int kmemleak_stack_scan; | 199 | static int kmemleak_stack_scan = 1; |
198 | /* mutex protecting the memory scanning */ | 200 | /* protects the memory scanning, parameters and debug/kmemleak file access */ |
199 | static DEFINE_MUTEX(scan_mutex); | 201 | static DEFINE_MUTEX(scan_mutex); |
200 | /* mutex protecting the access to the /sys/kernel/debug/kmemleak file */ | ||
201 | static DEFINE_MUTEX(kmemleak_mutex); | ||
202 | 202 | ||
203 | /* number of leaks reported (for limitation purposes) */ | 203 | /* number of leaks reported (for limitation purposes) */ |
204 | static int reported_leaks; | 204 | static int reported_leaks; |
@@ -235,7 +235,7 @@ struct early_log { | |||
235 | }; | 235 | }; |
236 | 236 | ||
237 | /* early logging buffer and current position */ | 237 | /* early logging buffer and current position */ |
238 | static struct early_log early_log[200]; | 238 | static struct early_log early_log[CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE]; |
239 | static int crt_early_log; | 239 | static int crt_early_log; |
240 | 240 | ||
241 | static void kmemleak_disable(void); | 241 | static void kmemleak_disable(void); |
@@ -279,15 +279,6 @@ static int color_gray(const struct kmemleak_object *object) | |||
279 | } | 279 | } |
280 | 280 | ||
281 | /* | 281 | /* |
282 | * Objects are considered referenced if their color is gray and they have not | ||
283 | * been deleted. | ||
284 | */ | ||
285 | static int referenced_object(struct kmemleak_object *object) | ||
286 | { | ||
287 | return (object->flags & OBJECT_ALLOCATED) && color_gray(object); | ||
288 | } | ||
289 | |||
290 | /* | ||
291 | * Objects are considered unreferenced only if their color is white, they have | 282 | * Objects are considered unreferenced only if their color is white, they have |
292 | * not be deleted and have a minimum age to avoid false positives caused by | 283 | * not be deleted and have a minimum age to avoid false positives caused by |
293 | * pointers temporarily stored in CPU registers. | 284 | * pointers temporarily stored in CPU registers. |
@@ -295,42 +286,28 @@ static int referenced_object(struct kmemleak_object *object) | |||
295 | static int unreferenced_object(struct kmemleak_object *object) | 286 | static int unreferenced_object(struct kmemleak_object *object) |
296 | { | 287 | { |
297 | return (object->flags & OBJECT_ALLOCATED) && color_white(object) && | 288 | return (object->flags & OBJECT_ALLOCATED) && color_white(object) && |
298 | time_is_before_eq_jiffies(object->jiffies + jiffies_min_age); | 289 | time_before_eq(object->jiffies + jiffies_min_age, |
290 | jiffies_last_scan); | ||
299 | } | 291 | } |
300 | 292 | ||
301 | /* | 293 | /* |
302 | * Printing of the (un)referenced objects information, either to the seq file | 294 | * Printing of the unreferenced objects information to the seq file. The |
303 | * or to the kernel log. The print_referenced/print_unreferenced functions | 295 | * print_unreferenced function must be called with the object->lock held. |
304 | * must be called with the object->lock held. | ||
305 | */ | 296 | */ |
306 | #define print_helper(seq, x...) do { \ | ||
307 | struct seq_file *s = (seq); \ | ||
308 | if (s) \ | ||
309 | seq_printf(s, x); \ | ||
310 | else \ | ||
311 | pr_info(x); \ | ||
312 | } while (0) | ||
313 | |||
314 | static void print_referenced(struct kmemleak_object *object) | ||
315 | { | ||
316 | pr_info("referenced object 0x%08lx (size %zu)\n", | ||
317 | object->pointer, object->size); | ||
318 | } | ||
319 | |||
320 | static void print_unreferenced(struct seq_file *seq, | 297 | static void print_unreferenced(struct seq_file *seq, |
321 | struct kmemleak_object *object) | 298 | struct kmemleak_object *object) |
322 | { | 299 | { |
323 | int i; | 300 | int i; |
324 | 301 | ||
325 | print_helper(seq, "unreferenced object 0x%08lx (size %zu):\n", | 302 | seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n", |
326 | object->pointer, object->size); | 303 | object->pointer, object->size); |
327 | print_helper(seq, " comm \"%s\", pid %d, jiffies %lu\n", | 304 | seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu\n", |
328 | object->comm, object->pid, object->jiffies); | 305 | object->comm, object->pid, object->jiffies); |
329 | print_helper(seq, " backtrace:\n"); | 306 | seq_printf(seq, " backtrace:\n"); |
330 | 307 | ||
331 | for (i = 0; i < object->trace_len; i++) { | 308 | for (i = 0; i < object->trace_len; i++) { |
332 | void *ptr = (void *)object->trace[i]; | 309 | void *ptr = (void *)object->trace[i]; |
333 | print_helper(seq, " [<%p>] %pS\n", ptr, ptr); | 310 | seq_printf(seq, " [<%p>] %pS\n", ptr, ptr); |
334 | } | 311 | } |
335 | } | 312 | } |
336 | 313 | ||
@@ -554,8 +531,10 @@ static void delete_object(unsigned long ptr) | |||
554 | write_lock_irqsave(&kmemleak_lock, flags); | 531 | write_lock_irqsave(&kmemleak_lock, flags); |
555 | object = lookup_object(ptr, 0); | 532 | object = lookup_object(ptr, 0); |
556 | if (!object) { | 533 | if (!object) { |
534 | #ifdef DEBUG | ||
557 | kmemleak_warn("Freeing unknown object at 0x%08lx\n", | 535 | kmemleak_warn("Freeing unknown object at 0x%08lx\n", |
558 | ptr); | 536 | ptr); |
537 | #endif | ||
559 | write_unlock_irqrestore(&kmemleak_lock, flags); | 538 | write_unlock_irqrestore(&kmemleak_lock, flags); |
560 | return; | 539 | return; |
561 | } | 540 | } |
@@ -571,8 +550,6 @@ static void delete_object(unsigned long ptr) | |||
571 | * cannot be freed when it is being scanned. | 550 | * cannot be freed when it is being scanned. |
572 | */ | 551 | */ |
573 | spin_lock_irqsave(&object->lock, flags); | 552 | spin_lock_irqsave(&object->lock, flags); |
574 | if (object->flags & OBJECT_REPORTED) | ||
575 | print_referenced(object); | ||
576 | object->flags &= ~OBJECT_ALLOCATED; | 553 | object->flags &= ~OBJECT_ALLOCATED; |
577 | spin_unlock_irqrestore(&object->lock, flags); | 554 | spin_unlock_irqrestore(&object->lock, flags); |
578 | put_object(object); | 555 | put_object(object); |
@@ -696,7 +673,8 @@ static void log_early(int op_type, const void *ptr, size_t size, | |||
696 | struct early_log *log; | 673 | struct early_log *log; |
697 | 674 | ||
698 | if (crt_early_log >= ARRAY_SIZE(early_log)) { | 675 | if (crt_early_log >= ARRAY_SIZE(early_log)) { |
699 | kmemleak_stop("Early log buffer exceeded\n"); | 676 | pr_warning("Early log buffer exceeded\n"); |
677 | kmemleak_disable(); | ||
700 | return; | 678 | return; |
701 | } | 679 | } |
702 | 680 | ||
@@ -952,6 +930,9 @@ static void kmemleak_scan(void) | |||
952 | struct kmemleak_object *object, *tmp; | 930 | struct kmemleak_object *object, *tmp; |
953 | struct task_struct *task; | 931 | struct task_struct *task; |
954 | int i; | 932 | int i; |
933 | int new_leaks = 0; | ||
934 | |||
935 | jiffies_last_scan = jiffies; | ||
955 | 936 | ||
956 | /* prepare the kmemleak_object's */ | 937 | /* prepare the kmemleak_object's */ |
957 | rcu_read_lock(); | 938 | rcu_read_lock(); |
@@ -1049,6 +1030,32 @@ static void kmemleak_scan(void) | |||
1049 | object = tmp; | 1030 | object = tmp; |
1050 | } | 1031 | } |
1051 | WARN_ON(!list_empty(&gray_list)); | 1032 | WARN_ON(!list_empty(&gray_list)); |
1033 | |||
1034 | /* | ||
1035 | * If scanning was stopped do not report any new unreferenced objects. | ||
1036 | */ | ||
1037 | if (scan_should_stop()) | ||
1038 | return; | ||
1039 | |||
1040 | /* | ||
1041 | * Scanning result reporting. | ||
1042 | */ | ||
1043 | rcu_read_lock(); | ||
1044 | list_for_each_entry_rcu(object, &object_list, object_list) { | ||
1045 | spin_lock_irqsave(&object->lock, flags); | ||
1046 | if (unreferenced_object(object) && | ||
1047 | !(object->flags & OBJECT_REPORTED)) { | ||
1048 | object->flags |= OBJECT_REPORTED; | ||
1049 | new_leaks++; | ||
1050 | } | ||
1051 | spin_unlock_irqrestore(&object->lock, flags); | ||
1052 | } | ||
1053 | rcu_read_unlock(); | ||
1054 | |||
1055 | if (new_leaks) | ||
1056 | pr_info("%d new suspected memory leaks (see " | ||
1057 | "/sys/kernel/debug/kmemleak)\n", new_leaks); | ||
1058 | |||
1052 | } | 1059 | } |
1053 | 1060 | ||
1054 | /* | 1061 | /* |
@@ -1070,36 +1077,12 @@ static int kmemleak_scan_thread(void *arg) | |||
1070 | } | 1077 | } |
1071 | 1078 | ||
1072 | while (!kthread_should_stop()) { | 1079 | while (!kthread_should_stop()) { |
1073 | struct kmemleak_object *object; | ||
1074 | signed long timeout = jiffies_scan_wait; | 1080 | signed long timeout = jiffies_scan_wait; |
1075 | 1081 | ||
1076 | mutex_lock(&scan_mutex); | 1082 | mutex_lock(&scan_mutex); |
1077 | |||
1078 | kmemleak_scan(); | 1083 | kmemleak_scan(); |
1079 | reported_leaks = 0; | ||
1080 | |||
1081 | rcu_read_lock(); | ||
1082 | list_for_each_entry_rcu(object, &object_list, object_list) { | ||
1083 | unsigned long flags; | ||
1084 | |||
1085 | if (reported_leaks >= REPORTS_NR) | ||
1086 | break; | ||
1087 | spin_lock_irqsave(&object->lock, flags); | ||
1088 | if (!(object->flags & OBJECT_REPORTED) && | ||
1089 | unreferenced_object(object)) { | ||
1090 | print_unreferenced(NULL, object); | ||
1091 | object->flags |= OBJECT_REPORTED; | ||
1092 | reported_leaks++; | ||
1093 | } else if ((object->flags & OBJECT_REPORTED) && | ||
1094 | referenced_object(object)) { | ||
1095 | print_referenced(object); | ||
1096 | object->flags &= ~OBJECT_REPORTED; | ||
1097 | } | ||
1098 | spin_unlock_irqrestore(&object->lock, flags); | ||
1099 | } | ||
1100 | rcu_read_unlock(); | ||
1101 | |||
1102 | mutex_unlock(&scan_mutex); | 1084 | mutex_unlock(&scan_mutex); |
1085 | |||
1103 | /* wait before the next scan */ | 1086 | /* wait before the next scan */ |
1104 | while (timeout && !kthread_should_stop()) | 1087 | while (timeout && !kthread_should_stop()) |
1105 | timeout = schedule_timeout_interruptible(timeout); | 1088 | timeout = schedule_timeout_interruptible(timeout); |
@@ -1112,7 +1095,7 @@ static int kmemleak_scan_thread(void *arg) | |||
1112 | 1095 | ||
1113 | /* | 1096 | /* |
1114 | * Start the automatic memory scanning thread. This function must be called | 1097 | * Start the automatic memory scanning thread. This function must be called |
1115 | * with the kmemleak_mutex held. | 1098 | * with the scan_mutex held. |
1116 | */ | 1099 | */ |
1117 | void start_scan_thread(void) | 1100 | void start_scan_thread(void) |
1118 | { | 1101 | { |
@@ -1127,7 +1110,7 @@ void start_scan_thread(void) | |||
1127 | 1110 | ||
1128 | /* | 1111 | /* |
1129 | * Stop the automatic memory scanning thread. This function must be called | 1112 | * Stop the automatic memory scanning thread. This function must be called |
1130 | * with the kmemleak_mutex held. | 1113 | * with the scan_mutex held. |
1131 | */ | 1114 | */ |
1132 | void stop_scan_thread(void) | 1115 | void stop_scan_thread(void) |
1133 | { | 1116 | { |
@@ -1147,10 +1130,8 @@ static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos) | |||
1147 | struct kmemleak_object *object; | 1130 | struct kmemleak_object *object; |
1148 | loff_t n = *pos; | 1131 | loff_t n = *pos; |
1149 | 1132 | ||
1150 | if (!n) { | 1133 | if (!n) |
1151 | kmemleak_scan(); | ||
1152 | reported_leaks = 0; | 1134 | reported_leaks = 0; |
1153 | } | ||
1154 | if (reported_leaks >= REPORTS_NR) | 1135 | if (reported_leaks >= REPORTS_NR) |
1155 | return NULL; | 1136 | return NULL; |
1156 | 1137 | ||
@@ -1211,11 +1192,10 @@ static int kmemleak_seq_show(struct seq_file *seq, void *v) | |||
1211 | unsigned long flags; | 1192 | unsigned long flags; |
1212 | 1193 | ||
1213 | spin_lock_irqsave(&object->lock, flags); | 1194 | spin_lock_irqsave(&object->lock, flags); |
1214 | if (!unreferenced_object(object)) | 1195 | if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object)) { |
1215 | goto out; | 1196 | print_unreferenced(seq, object); |
1216 | print_unreferenced(seq, object); | 1197 | reported_leaks++; |
1217 | reported_leaks++; | 1198 | } |
1218 | out: | ||
1219 | spin_unlock_irqrestore(&object->lock, flags); | 1199 | spin_unlock_irqrestore(&object->lock, flags); |
1220 | return 0; | 1200 | return 0; |
1221 | } | 1201 | } |
@@ -1234,13 +1214,10 @@ static int kmemleak_open(struct inode *inode, struct file *file) | |||
1234 | if (!atomic_read(&kmemleak_enabled)) | 1214 | if (!atomic_read(&kmemleak_enabled)) |
1235 | return -EBUSY; | 1215 | return -EBUSY; |
1236 | 1216 | ||
1237 | ret = mutex_lock_interruptible(&kmemleak_mutex); | 1217 | ret = mutex_lock_interruptible(&scan_mutex); |
1238 | if (ret < 0) | 1218 | if (ret < 0) |
1239 | goto out; | 1219 | goto out; |
1240 | if (file->f_mode & FMODE_READ) { | 1220 | if (file->f_mode & FMODE_READ) { |
1241 | ret = mutex_lock_interruptible(&scan_mutex); | ||
1242 | if (ret < 0) | ||
1243 | goto kmemleak_unlock; | ||
1244 | ret = seq_open(file, &kmemleak_seq_ops); | 1221 | ret = seq_open(file, &kmemleak_seq_ops); |
1245 | if (ret < 0) | 1222 | if (ret < 0) |
1246 | goto scan_unlock; | 1223 | goto scan_unlock; |
@@ -1249,8 +1226,6 @@ static int kmemleak_open(struct inode *inode, struct file *file) | |||
1249 | 1226 | ||
1250 | scan_unlock: | 1227 | scan_unlock: |
1251 | mutex_unlock(&scan_mutex); | 1228 | mutex_unlock(&scan_mutex); |
1252 | kmemleak_unlock: | ||
1253 | mutex_unlock(&kmemleak_mutex); | ||
1254 | out: | 1229 | out: |
1255 | return ret; | 1230 | return ret; |
1256 | } | 1231 | } |
@@ -1259,11 +1234,9 @@ static int kmemleak_release(struct inode *inode, struct file *file) | |||
1259 | { | 1234 | { |
1260 | int ret = 0; | 1235 | int ret = 0; |
1261 | 1236 | ||
1262 | if (file->f_mode & FMODE_READ) { | 1237 | if (file->f_mode & FMODE_READ) |
1263 | seq_release(inode, file); | 1238 | seq_release(inode, file); |
1264 | mutex_unlock(&scan_mutex); | 1239 | mutex_unlock(&scan_mutex); |
1265 | } | ||
1266 | mutex_unlock(&kmemleak_mutex); | ||
1267 | 1240 | ||
1268 | return ret; | 1241 | return ret; |
1269 | } | 1242 | } |
@@ -1278,6 +1251,7 @@ static int kmemleak_release(struct inode *inode, struct file *file) | |||
1278 | * scan=off - stop the automatic memory scanning thread | 1251 | * scan=off - stop the automatic memory scanning thread |
1279 | * scan=... - set the automatic memory scanning period in seconds (0 to | 1252 | * scan=... - set the automatic memory scanning period in seconds (0 to |
1280 | * disable it) | 1253 | * disable it) |
1254 | * scan - trigger a memory scan | ||
1281 | */ | 1255 | */ |
1282 | static ssize_t kmemleak_write(struct file *file, const char __user *user_buf, | 1256 | static ssize_t kmemleak_write(struct file *file, const char __user *user_buf, |
1283 | size_t size, loff_t *ppos) | 1257 | size_t size, loff_t *ppos) |
@@ -1315,7 +1289,9 @@ static ssize_t kmemleak_write(struct file *file, const char __user *user_buf, | |||
1315 | jiffies_scan_wait = msecs_to_jiffies(secs * 1000); | 1289 | jiffies_scan_wait = msecs_to_jiffies(secs * 1000); |
1316 | start_scan_thread(); | 1290 | start_scan_thread(); |
1317 | } | 1291 | } |
1318 | } else | 1292 | } else if (strncmp(buf, "scan", 4) == 0) |
1293 | kmemleak_scan(); | ||
1294 | else | ||
1319 | return -EINVAL; | 1295 | return -EINVAL; |
1320 | 1296 | ||
1321 | /* ignore the rest of the buffer, only one command at a time */ | 1297 | /* ignore the rest of the buffer, only one command at a time */ |
@@ -1340,11 +1316,9 @@ static int kmemleak_cleanup_thread(void *arg) | |||
1340 | { | 1316 | { |
1341 | struct kmemleak_object *object; | 1317 | struct kmemleak_object *object; |
1342 | 1318 | ||
1343 | mutex_lock(&kmemleak_mutex); | 1319 | mutex_lock(&scan_mutex); |
1344 | stop_scan_thread(); | 1320 | stop_scan_thread(); |
1345 | mutex_unlock(&kmemleak_mutex); | ||
1346 | 1321 | ||
1347 | mutex_lock(&scan_mutex); | ||
1348 | rcu_read_lock(); | 1322 | rcu_read_lock(); |
1349 | list_for_each_entry_rcu(object, &object_list, object_list) | 1323 | list_for_each_entry_rcu(object, &object_list, object_list) |
1350 | delete_object(object->pointer); | 1324 | delete_object(object->pointer); |
@@ -1486,9 +1460,9 @@ static int __init kmemleak_late_init(void) | |||
1486 | &kmemleak_fops); | 1460 | &kmemleak_fops); |
1487 | if (!dentry) | 1461 | if (!dentry) |
1488 | pr_warning("Failed to create the debugfs kmemleak file\n"); | 1462 | pr_warning("Failed to create the debugfs kmemleak file\n"); |
1489 | mutex_lock(&kmemleak_mutex); | 1463 | mutex_lock(&scan_mutex); |
1490 | start_scan_thread(); | 1464 | start_scan_thread(); |
1491 | mutex_unlock(&kmemleak_mutex); | 1465 | mutex_unlock(&scan_mutex); |
1492 | 1466 | ||
1493 | pr_info("Kernel memory leak detector initialized\n"); | 1467 | pr_info("Kernel memory leak detector initialized\n"); |
1494 | 1468 | ||