aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/Kconfig1
-rw-r--r--mm/kmemleak.c188
-rw-r--r--mm/readahead.c12
-rw-r--r--mm/shmem.c2
-rw-r--r--mm/slab.c10
5 files changed, 122 insertions, 91 deletions
diff --git a/mm/Kconfig b/mm/Kconfig
index 43ea8c3a2bbf..ee9f3e0f2b69 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -221,6 +221,7 @@ config KSM
221 221
222config DEFAULT_MMAP_MIN_ADDR 222config DEFAULT_MMAP_MIN_ADDR
223 int "Low address space to protect from user allocation" 223 int "Low address space to protect from user allocation"
224 depends on MMU
224 default 4096 225 default 4096
225 help 226 help
226 This is the portion of low virtual memory which should be protected 227 This is the portion of low virtual memory which should be protected
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 13f33b3081ec..5b069e4f5e48 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -93,6 +93,7 @@
93#include <linux/nodemask.h> 93#include <linux/nodemask.h>
94#include <linux/mm.h> 94#include <linux/mm.h>
95#include <linux/workqueue.h> 95#include <linux/workqueue.h>
96#include <linux/crc32.h>
96 97
97#include <asm/sections.h> 98#include <asm/sections.h>
98#include <asm/processor.h> 99#include <asm/processor.h>
@@ -108,7 +109,6 @@
108#define MSECS_MIN_AGE 5000 /* minimum object age for reporting */ 109#define MSECS_MIN_AGE 5000 /* minimum object age for reporting */
109#define SECS_FIRST_SCAN 60 /* delay before the first scan */ 110#define SECS_FIRST_SCAN 60 /* delay before the first scan */
110#define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */ 111#define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */
111#define GRAY_LIST_PASSES 25 /* maximum number of gray list scans */
112#define MAX_SCAN_SIZE 4096 /* maximum size of a scanned block */ 112#define MAX_SCAN_SIZE 4096 /* maximum size of a scanned block */
113 113
114#define BYTES_PER_POINTER sizeof(void *) 114#define BYTES_PER_POINTER sizeof(void *)
@@ -119,8 +119,8 @@
119/* scanning area inside a memory block */ 119/* scanning area inside a memory block */
120struct kmemleak_scan_area { 120struct kmemleak_scan_area {
121 struct hlist_node node; 121 struct hlist_node node;
122 unsigned long offset; 122 unsigned long start;
123 size_t length; 123 size_t size;
124}; 124};
125 125
126#define KMEMLEAK_GREY 0 126#define KMEMLEAK_GREY 0
@@ -149,6 +149,8 @@ struct kmemleak_object {
149 int min_count; 149 int min_count;
150 /* the total number of pointers found pointing to this object */ 150 /* the total number of pointers found pointing to this object */
151 int count; 151 int count;
152 /* checksum for detecting modified objects */
153 u32 checksum;
152 /* memory ranges to be scanned inside an object (empty for all) */ 154 /* memory ranges to be scanned inside an object (empty for all) */
153 struct hlist_head area_list; 155 struct hlist_head area_list;
154 unsigned long trace[MAX_TRACE]; 156 unsigned long trace[MAX_TRACE];
@@ -164,8 +166,6 @@ struct kmemleak_object {
164#define OBJECT_REPORTED (1 << 1) 166#define OBJECT_REPORTED (1 << 1)
165/* flag set to not scan the object */ 167/* flag set to not scan the object */
166#define OBJECT_NO_SCAN (1 << 2) 168#define OBJECT_NO_SCAN (1 << 2)
167/* flag set on newly allocated objects */
168#define OBJECT_NEW (1 << 3)
169 169
170/* number of bytes to print per line; must be 16 or 32 */ 170/* number of bytes to print per line; must be 16 or 32 */
171#define HEX_ROW_SIZE 16 171#define HEX_ROW_SIZE 16
@@ -241,8 +241,6 @@ struct early_log {
241 const void *ptr; /* allocated/freed memory block */ 241 const void *ptr; /* allocated/freed memory block */
242 size_t size; /* memory block size */ 242 size_t size; /* memory block size */
243 int min_count; /* minimum reference count */ 243 int min_count; /* minimum reference count */
244 unsigned long offset; /* scan area offset */
245 size_t length; /* scan area length */
246 unsigned long trace[MAX_TRACE]; /* stack trace */ 244 unsigned long trace[MAX_TRACE]; /* stack trace */
247 unsigned int trace_len; /* stack trace length */ 245 unsigned int trace_len; /* stack trace length */
248}; 246};
@@ -323,11 +321,6 @@ static bool color_gray(const struct kmemleak_object *object)
323 object->count >= object->min_count; 321 object->count >= object->min_count;
324} 322}
325 323
326static bool color_black(const struct kmemleak_object *object)
327{
328 return object->min_count == KMEMLEAK_BLACK;
329}
330
331/* 324/*
332 * Objects are considered unreferenced only if their color is white, they have 325 * Objects are considered unreferenced only if their color is white, they have
333 * not be deleted and have a minimum age to avoid false positives caused by 326 * not be deleted and have a minimum age to avoid false positives caused by
@@ -335,7 +328,7 @@ static bool color_black(const struct kmemleak_object *object)
335 */ 328 */
336static bool unreferenced_object(struct kmemleak_object *object) 329static bool unreferenced_object(struct kmemleak_object *object)
337{ 330{
338 return (object->flags & OBJECT_ALLOCATED) && color_white(object) && 331 return (color_white(object) && object->flags & OBJECT_ALLOCATED) &&
339 time_before_eq(object->jiffies + jiffies_min_age, 332 time_before_eq(object->jiffies + jiffies_min_age,
340 jiffies_last_scan); 333 jiffies_last_scan);
341} 334}
@@ -348,11 +341,13 @@ static void print_unreferenced(struct seq_file *seq,
348 struct kmemleak_object *object) 341 struct kmemleak_object *object)
349{ 342{
350 int i; 343 int i;
344 unsigned int msecs_age = jiffies_to_msecs(jiffies - object->jiffies);
351 345
352 seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n", 346 seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n",
353 object->pointer, object->size); 347 object->pointer, object->size);
354 seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu\n", 348 seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu (age %d.%03ds)\n",
355 object->comm, object->pid, object->jiffies); 349 object->comm, object->pid, object->jiffies,
350 msecs_age / 1000, msecs_age % 1000);
356 hex_dump_object(seq, object); 351 hex_dump_object(seq, object);
357 seq_printf(seq, " backtrace:\n"); 352 seq_printf(seq, " backtrace:\n");
358 353
@@ -381,6 +376,7 @@ static void dump_object_info(struct kmemleak_object *object)
381 pr_notice(" min_count = %d\n", object->min_count); 376 pr_notice(" min_count = %d\n", object->min_count);
382 pr_notice(" count = %d\n", object->count); 377 pr_notice(" count = %d\n", object->count);
383 pr_notice(" flags = 0x%lx\n", object->flags); 378 pr_notice(" flags = 0x%lx\n", object->flags);
379 pr_notice(" checksum = %d\n", object->checksum);
384 pr_notice(" backtrace:\n"); 380 pr_notice(" backtrace:\n");
385 print_stack_trace(&trace, 4); 381 print_stack_trace(&trace, 4);
386} 382}
@@ -522,12 +518,13 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
522 INIT_HLIST_HEAD(&object->area_list); 518 INIT_HLIST_HEAD(&object->area_list);
523 spin_lock_init(&object->lock); 519 spin_lock_init(&object->lock);
524 atomic_set(&object->use_count, 1); 520 atomic_set(&object->use_count, 1);
525 object->flags = OBJECT_ALLOCATED | OBJECT_NEW; 521 object->flags = OBJECT_ALLOCATED;
526 object->pointer = ptr; 522 object->pointer = ptr;
527 object->size = size; 523 object->size = size;
528 object->min_count = min_count; 524 object->min_count = min_count;
529 object->count = -1; /* no color initially */ 525 object->count = 0; /* white color initially */
530 object->jiffies = jiffies; 526 object->jiffies = jiffies;
527 object->checksum = 0;
531 528
532 /* task information */ 529 /* task information */
533 if (in_irq()) { 530 if (in_irq()) {
@@ -720,14 +717,13 @@ static void make_black_object(unsigned long ptr)
720 * Add a scanning area to the object. If at least one such area is added, 717 * Add a scanning area to the object. If at least one such area is added,
721 * kmemleak will only scan these ranges rather than the whole memory block. 718 * kmemleak will only scan these ranges rather than the whole memory block.
722 */ 719 */
723static void add_scan_area(unsigned long ptr, unsigned long offset, 720static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
724 size_t length, gfp_t gfp)
725{ 721{
726 unsigned long flags; 722 unsigned long flags;
727 struct kmemleak_object *object; 723 struct kmemleak_object *object;
728 struct kmemleak_scan_area *area; 724 struct kmemleak_scan_area *area;
729 725
730 object = find_and_get_object(ptr, 0); 726 object = find_and_get_object(ptr, 1);
731 if (!object) { 727 if (!object) {
732 kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n", 728 kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
733 ptr); 729 ptr);
@@ -741,7 +737,7 @@ static void add_scan_area(unsigned long ptr, unsigned long offset,
741 } 737 }
742 738
743 spin_lock_irqsave(&object->lock, flags); 739 spin_lock_irqsave(&object->lock, flags);
744 if (offset + length > object->size) { 740 if (ptr + size > object->pointer + object->size) {
745 kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr); 741 kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
746 dump_object_info(object); 742 dump_object_info(object);
747 kmem_cache_free(scan_area_cache, area); 743 kmem_cache_free(scan_area_cache, area);
@@ -749,8 +745,8 @@ static void add_scan_area(unsigned long ptr, unsigned long offset,
749 } 745 }
750 746
751 INIT_HLIST_NODE(&area->node); 747 INIT_HLIST_NODE(&area->node);
752 area->offset = offset; 748 area->start = ptr;
753 area->length = length; 749 area->size = size;
754 750
755 hlist_add_head(&area->node, &object->area_list); 751 hlist_add_head(&area->node, &object->area_list);
756out_unlock: 752out_unlock:
@@ -786,7 +782,7 @@ static void object_no_scan(unsigned long ptr)
786 * processed later once kmemleak is fully initialized. 782 * processed later once kmemleak is fully initialized.
787 */ 783 */
788static void __init log_early(int op_type, const void *ptr, size_t size, 784static void __init log_early(int op_type, const void *ptr, size_t size,
789 int min_count, unsigned long offset, size_t length) 785 int min_count)
790{ 786{
791 unsigned long flags; 787 unsigned long flags;
792 struct early_log *log; 788 struct early_log *log;
@@ -808,8 +804,6 @@ static void __init log_early(int op_type, const void *ptr, size_t size,
808 log->ptr = ptr; 804 log->ptr = ptr;
809 log->size = size; 805 log->size = size;
810 log->min_count = min_count; 806 log->min_count = min_count;
811 log->offset = offset;
812 log->length = length;
813 if (op_type == KMEMLEAK_ALLOC) 807 if (op_type == KMEMLEAK_ALLOC)
814 log->trace_len = __save_stack_trace(log->trace); 808 log->trace_len = __save_stack_trace(log->trace);
815 crt_early_log++; 809 crt_early_log++;
@@ -858,7 +852,7 @@ void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
858 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) 852 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
859 create_object((unsigned long)ptr, size, min_count, gfp); 853 create_object((unsigned long)ptr, size, min_count, gfp);
860 else if (atomic_read(&kmemleak_early_log)) 854 else if (atomic_read(&kmemleak_early_log))
861 log_early(KMEMLEAK_ALLOC, ptr, size, min_count, 0, 0); 855 log_early(KMEMLEAK_ALLOC, ptr, size, min_count);
862} 856}
863EXPORT_SYMBOL_GPL(kmemleak_alloc); 857EXPORT_SYMBOL_GPL(kmemleak_alloc);
864 858
@@ -873,7 +867,7 @@ void __ref kmemleak_free(const void *ptr)
873 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) 867 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
874 delete_object_full((unsigned long)ptr); 868 delete_object_full((unsigned long)ptr);
875 else if (atomic_read(&kmemleak_early_log)) 869 else if (atomic_read(&kmemleak_early_log))
876 log_early(KMEMLEAK_FREE, ptr, 0, 0, 0, 0); 870 log_early(KMEMLEAK_FREE, ptr, 0, 0);
877} 871}
878EXPORT_SYMBOL_GPL(kmemleak_free); 872EXPORT_SYMBOL_GPL(kmemleak_free);
879 873
@@ -888,7 +882,7 @@ void __ref kmemleak_free_part(const void *ptr, size_t size)
888 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) 882 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
889 delete_object_part((unsigned long)ptr, size); 883 delete_object_part((unsigned long)ptr, size);
890 else if (atomic_read(&kmemleak_early_log)) 884 else if (atomic_read(&kmemleak_early_log))
891 log_early(KMEMLEAK_FREE_PART, ptr, size, 0, 0, 0); 885 log_early(KMEMLEAK_FREE_PART, ptr, size, 0);
892} 886}
893EXPORT_SYMBOL_GPL(kmemleak_free_part); 887EXPORT_SYMBOL_GPL(kmemleak_free_part);
894 888
@@ -903,7 +897,7 @@ void __ref kmemleak_not_leak(const void *ptr)
903 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) 897 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
904 make_gray_object((unsigned long)ptr); 898 make_gray_object((unsigned long)ptr);
905 else if (atomic_read(&kmemleak_early_log)) 899 else if (atomic_read(&kmemleak_early_log))
906 log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0, 0, 0); 900 log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0);
907} 901}
908EXPORT_SYMBOL(kmemleak_not_leak); 902EXPORT_SYMBOL(kmemleak_not_leak);
909 903
@@ -919,22 +913,21 @@ void __ref kmemleak_ignore(const void *ptr)
919 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) 913 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
920 make_black_object((unsigned long)ptr); 914 make_black_object((unsigned long)ptr);
921 else if (atomic_read(&kmemleak_early_log)) 915 else if (atomic_read(&kmemleak_early_log))
922 log_early(KMEMLEAK_IGNORE, ptr, 0, 0, 0, 0); 916 log_early(KMEMLEAK_IGNORE, ptr, 0, 0);
923} 917}
924EXPORT_SYMBOL(kmemleak_ignore); 918EXPORT_SYMBOL(kmemleak_ignore);
925 919
926/* 920/*
927 * Limit the range to be scanned in an allocated memory block. 921 * Limit the range to be scanned in an allocated memory block.
928 */ 922 */
929void __ref kmemleak_scan_area(const void *ptr, unsigned long offset, 923void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
930 size_t length, gfp_t gfp)
931{ 924{
932 pr_debug("%s(0x%p)\n", __func__, ptr); 925 pr_debug("%s(0x%p)\n", __func__, ptr);
933 926
934 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) 927 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
935 add_scan_area((unsigned long)ptr, offset, length, gfp); 928 add_scan_area((unsigned long)ptr, size, gfp);
936 else if (atomic_read(&kmemleak_early_log)) 929 else if (atomic_read(&kmemleak_early_log))
937 log_early(KMEMLEAK_SCAN_AREA, ptr, 0, 0, offset, length); 930 log_early(KMEMLEAK_SCAN_AREA, ptr, size, 0);
938} 931}
939EXPORT_SYMBOL(kmemleak_scan_area); 932EXPORT_SYMBOL(kmemleak_scan_area);
940 933
@@ -948,11 +941,25 @@ void __ref kmemleak_no_scan(const void *ptr)
948 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) 941 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
949 object_no_scan((unsigned long)ptr); 942 object_no_scan((unsigned long)ptr);
950 else if (atomic_read(&kmemleak_early_log)) 943 else if (atomic_read(&kmemleak_early_log))
951 log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0, 0, 0); 944 log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0);
952} 945}
953EXPORT_SYMBOL(kmemleak_no_scan); 946EXPORT_SYMBOL(kmemleak_no_scan);
954 947
955/* 948/*
949 * Update an object's checksum and return true if it was modified.
950 */
951static bool update_checksum(struct kmemleak_object *object)
952{
953 u32 old_csum = object->checksum;
954
955 if (!kmemcheck_is_obj_initialized(object->pointer, object->size))
956 return false;
957
958 object->checksum = crc32(0, (void *)object->pointer, object->size);
959 return object->checksum != old_csum;
960}
961
962/*
956 * Memory scanning is a long process and it needs to be interruptable. This 963 * Memory scanning is a long process and it needs to be interruptable. This
957 * function checks whether such interrupt condition occured. 964 * function checks whether such interrupt condition occured.
958 */ 965 */
@@ -1031,11 +1038,14 @@ static void scan_block(void *_start, void *_end,
1031 * added to the gray_list. 1038 * added to the gray_list.
1032 */ 1039 */
1033 object->count++; 1040 object->count++;
1034 if (color_gray(object)) 1041 if (color_gray(object)) {
1035 list_add_tail(&object->gray_list, &gray_list); 1042 list_add_tail(&object->gray_list, &gray_list);
1036 else 1043 spin_unlock_irqrestore(&object->lock, flags);
1037 put_object(object); 1044 continue;
1045 }
1046
1038 spin_unlock_irqrestore(&object->lock, flags); 1047 spin_unlock_irqrestore(&object->lock, flags);
1048 put_object(object);
1039 } 1049 }
1040} 1050}
1041 1051
@@ -1075,14 +1085,47 @@ static void scan_object(struct kmemleak_object *object)
1075 } 1085 }
1076 } else 1086 } else
1077 hlist_for_each_entry(area, elem, &object->area_list, node) 1087 hlist_for_each_entry(area, elem, &object->area_list, node)
1078 scan_block((void *)(object->pointer + area->offset), 1088 scan_block((void *)area->start,
1079 (void *)(object->pointer + area->offset 1089 (void *)(area->start + area->size),
1080 + area->length), object, 0); 1090 object, 0);
1081out: 1091out:
1082 spin_unlock_irqrestore(&object->lock, flags); 1092 spin_unlock_irqrestore(&object->lock, flags);
1083} 1093}
1084 1094
1085/* 1095/*
1096 * Scan the objects already referenced (gray objects). More objects will be
1097 * referenced and, if there are no memory leaks, all the objects are scanned.
1098 */
1099static void scan_gray_list(void)
1100{
1101 struct kmemleak_object *object, *tmp;
1102
1103 /*
1104 * The list traversal is safe for both tail additions and removals
1105 * from inside the loop. The kmemleak objects cannot be freed from
1106 * outside the loop because their use_count was incremented.
1107 */
1108 object = list_entry(gray_list.next, typeof(*object), gray_list);
1109 while (&object->gray_list != &gray_list) {
1110 cond_resched();
1111
1112 /* may add new objects to the list */
1113 if (!scan_should_stop())
1114 scan_object(object);
1115
1116 tmp = list_entry(object->gray_list.next, typeof(*object),
1117 gray_list);
1118
1119 /* remove the object from the list and release it */
1120 list_del(&object->gray_list);
1121 put_object(object);
1122
1123 object = tmp;
1124 }
1125 WARN_ON(!list_empty(&gray_list));
1126}
1127
1128/*
1086 * Scan data sections and all the referenced memory blocks allocated via the 1129 * Scan data sections and all the referenced memory blocks allocated via the
1087 * kernel's standard allocators. This function must be called with the 1130 * kernel's standard allocators. This function must be called with the
1088 * scan_mutex held. 1131 * scan_mutex held.
@@ -1090,10 +1133,9 @@ out:
1090static void kmemleak_scan(void) 1133static void kmemleak_scan(void)
1091{ 1134{
1092 unsigned long flags; 1135 unsigned long flags;
1093 struct kmemleak_object *object, *tmp; 1136 struct kmemleak_object *object;
1094 int i; 1137 int i;
1095 int new_leaks = 0; 1138 int new_leaks = 0;
1096 int gray_list_pass = 0;
1097 1139
1098 jiffies_last_scan = jiffies; 1140 jiffies_last_scan = jiffies;
1099 1141
@@ -1114,7 +1156,6 @@ static void kmemleak_scan(void)
1114#endif 1156#endif
1115 /* reset the reference count (whiten the object) */ 1157 /* reset the reference count (whiten the object) */
1116 object->count = 0; 1158 object->count = 0;
1117 object->flags &= ~OBJECT_NEW;
1118 if (color_gray(object) && get_object(object)) 1159 if (color_gray(object) && get_object(object))
1119 list_add_tail(&object->gray_list, &gray_list); 1160 list_add_tail(&object->gray_list, &gray_list);
1120 1161
@@ -1172,62 +1213,36 @@ static void kmemleak_scan(void)
1172 1213
1173 /* 1214 /*
1174 * Scan the objects already referenced from the sections scanned 1215 * Scan the objects already referenced from the sections scanned
1175 * above. More objects will be referenced and, if there are no memory 1216 * above.
1176 * leaks, all the objects will be scanned. The list traversal is safe
1177 * for both tail additions and removals from inside the loop. The
1178 * kmemleak objects cannot be freed from outside the loop because their
1179 * use_count was increased.
1180 */ 1217 */
1181repeat: 1218 scan_gray_list();
1182 object = list_entry(gray_list.next, typeof(*object), gray_list);
1183 while (&object->gray_list != &gray_list) {
1184 cond_resched();
1185
1186 /* may add new objects to the list */
1187 if (!scan_should_stop())
1188 scan_object(object);
1189
1190 tmp = list_entry(object->gray_list.next, typeof(*object),
1191 gray_list);
1192
1193 /* remove the object from the list and release it */
1194 list_del(&object->gray_list);
1195 put_object(object);
1196
1197 object = tmp;
1198 }
1199
1200 if (scan_should_stop() || ++gray_list_pass >= GRAY_LIST_PASSES)
1201 goto scan_end;
1202 1219
1203 /* 1220 /*
1204 * Check for new objects allocated during this scanning and add them 1221 * Check for new or unreferenced objects modified since the previous
1205 * to the gray list. 1222 * scan and color them gray until the next scan.
1206 */ 1223 */
1207 rcu_read_lock(); 1224 rcu_read_lock();
1208 list_for_each_entry_rcu(object, &object_list, object_list) { 1225 list_for_each_entry_rcu(object, &object_list, object_list) {
1209 spin_lock_irqsave(&object->lock, flags); 1226 spin_lock_irqsave(&object->lock, flags);
1210 if ((object->flags & OBJECT_NEW) && !color_black(object) && 1227 if (color_white(object) && (object->flags & OBJECT_ALLOCATED)
1211 get_object(object)) { 1228 && update_checksum(object) && get_object(object)) {
1212 object->flags &= ~OBJECT_NEW; 1229 /* color it gray temporarily */
1230 object->count = object->min_count;
1213 list_add_tail(&object->gray_list, &gray_list); 1231 list_add_tail(&object->gray_list, &gray_list);
1214 } 1232 }
1215 spin_unlock_irqrestore(&object->lock, flags); 1233 spin_unlock_irqrestore(&object->lock, flags);
1216 } 1234 }
1217 rcu_read_unlock(); 1235 rcu_read_unlock();
1218 1236
1219 if (!list_empty(&gray_list)) 1237 /*
1220 goto repeat; 1238 * Re-scan the gray list for modified unreferenced objects.
1221 1239 */
1222scan_end: 1240 scan_gray_list();
1223 WARN_ON(!list_empty(&gray_list));
1224 1241
1225 /* 1242 /*
1226 * If scanning was stopped or new objects were being allocated at a 1243 * If scanning was stopped do not report any new unreferenced objects.
1227 * higher rate than gray list scanning, do not report any new
1228 * unreferenced objects.
1229 */ 1244 */
1230 if (scan_should_stop() || gray_list_pass >= GRAY_LIST_PASSES) 1245 if (scan_should_stop())
1231 return; 1246 return;
1232 1247
1233 /* 1248 /*
@@ -1642,8 +1657,7 @@ void __init kmemleak_init(void)
1642 kmemleak_ignore(log->ptr); 1657 kmemleak_ignore(log->ptr);
1643 break; 1658 break;
1644 case KMEMLEAK_SCAN_AREA: 1659 case KMEMLEAK_SCAN_AREA:
1645 kmemleak_scan_area(log->ptr, log->offset, log->length, 1660 kmemleak_scan_area(log->ptr, log->size, GFP_KERNEL);
1646 GFP_KERNEL);
1647 break; 1661 break;
1648 case KMEMLEAK_NO_SCAN: 1662 case KMEMLEAK_NO_SCAN:
1649 kmemleak_no_scan(log->ptr); 1663 kmemleak_no_scan(log->ptr);
diff --git a/mm/readahead.c b/mm/readahead.c
index aa1aa2345235..033bc135a41f 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -547,5 +547,17 @@ page_cache_async_readahead(struct address_space *mapping,
547 547
548 /* do read-ahead */ 548 /* do read-ahead */
549 ondemand_readahead(mapping, ra, filp, true, offset, req_size); 549 ondemand_readahead(mapping, ra, filp, true, offset, req_size);
550
551#ifdef CONFIG_BLOCK
552 /*
553 * Normally the current page is !uptodate and lock_page() will be
554 * immediately called to implicitly unplug the device. However this
555 * is not always true for RAID conifgurations, where data arrives
556 * not strictly in their submission order. In this case we need to
557 * explicitly kick off the IO.
558 */
559 if (PageUptodate(page))
560 blk_run_backing_dev(mapping->backing_dev_info, NULL);
561#endif
550} 562}
551EXPORT_SYMBOL_GPL(page_cache_async_readahead); 563EXPORT_SYMBOL_GPL(page_cache_async_readahead);
diff --git a/mm/shmem.c b/mm/shmem.c
index f8485062f3ba..eef4ebea5158 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1830,6 +1830,8 @@ shmem_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
1830 iput(inode); 1830 iput(inode);
1831 return error; 1831 return error;
1832 } 1832 }
1833#else
1834 error = 0;
1833#endif 1835#endif
1834 if (dir->i_mode & S_ISGID) { 1836 if (dir->i_mode & S_ISGID) {
1835 inode->i_gid = dir->i_gid; 1837 inode->i_gid = dir->i_gid;
diff --git a/mm/slab.c b/mm/slab.c
index 7560eb00637c..7d41f15b48d3 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2275,9 +2275,11 @@ kmem_cache_create (const char *name, size_t size, size_t align,
2275 /* 2275 /*
2276 * Determine if the slab management is 'on' or 'off' slab. 2276 * Determine if the slab management is 'on' or 'off' slab.
2277 * (bootstrapping cannot cope with offslab caches so don't do 2277 * (bootstrapping cannot cope with offslab caches so don't do
2278 * it too early on.) 2278 * it too early on. Always use on-slab management when
2279 * SLAB_NOLEAKTRACE to avoid recursive calls into kmemleak)
2279 */ 2280 */
2280 if ((size >= (PAGE_SIZE >> 3)) && !slab_early_init) 2281 if ((size >= (PAGE_SIZE >> 3)) && !slab_early_init &&
2282 !(flags & SLAB_NOLEAKTRACE))
2281 /* 2283 /*
2282 * Size is large, assume best to place the slab management obj 2284 * Size is large, assume best to place the slab management obj
2283 * off-slab (should allow better packing of objs). 2285 * off-slab (should allow better packing of objs).
@@ -2596,8 +2598,8 @@ static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp,
2596 * kmemleak does not treat the ->s_mem pointer as a reference 2598 * kmemleak does not treat the ->s_mem pointer as a reference
2597 * to the object. Otherwise we will not report the leak. 2599 * to the object. Otherwise we will not report the leak.
2598 */ 2600 */
2599 kmemleak_scan_area(slabp, offsetof(struct slab, list), 2601 kmemleak_scan_area(&slabp->list, sizeof(struct list_head),
2600 sizeof(struct list_head), local_flags); 2602 local_flags);
2601 if (!slabp) 2603 if (!slabp)
2602 return NULL; 2604 return NULL;
2603 } else { 2605 } else {