aboutsummaryrefslogtreecommitdiffstats
path: root/mm/kmemleak.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/kmemleak.c')
-rw-r--r--mm/kmemleak.c26
1 files changed, 16 insertions, 10 deletions
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index bd9bc214091b..aacee45616fc 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -113,7 +113,9 @@
113#define BYTES_PER_POINTER sizeof(void *) 113#define BYTES_PER_POINTER sizeof(void *)
114 114
115/* GFP bitmask for kmemleak internal allocations */ 115/* GFP bitmask for kmemleak internal allocations */
116#define GFP_KMEMLEAK_MASK (GFP_KERNEL | GFP_ATOMIC) 116#define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \
117 __GFP_NORETRY | __GFP_NOMEMALLOC | \
118 __GFP_NOWARN)
117 119
118/* scanning area inside a memory block */ 120/* scanning area inside a memory block */
119struct kmemleak_scan_area { 121struct kmemleak_scan_area {
@@ -263,7 +265,7 @@ static void kmemleak_disable(void);
263} while (0) 265} while (0)
264 266
265/* 267/*
266 * Macro invoked when a serious kmemleak condition occured and cannot be 268 * Macro invoked when a serious kmemleak condition occurred and cannot be
267 * recovered from. Kmemleak will be disabled and further allocation/freeing 269 * recovered from. Kmemleak will be disabled and further allocation/freeing
268 * tracing no longer available. 270 * tracing no longer available.
269 */ 271 */
@@ -511,9 +513,10 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
511 struct kmemleak_object *object; 513 struct kmemleak_object *object;
512 struct prio_tree_node *node; 514 struct prio_tree_node *node;
513 515
514 object = kmem_cache_alloc(object_cache, gfp & GFP_KMEMLEAK_MASK); 516 object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp));
515 if (!object) { 517 if (!object) {
516 kmemleak_stop("Cannot allocate a kmemleak_object structure\n"); 518 pr_warning("Cannot allocate a kmemleak_object structure\n");
519 kmemleak_disable();
517 return NULL; 520 return NULL;
518 } 521 }
519 522
@@ -734,9 +737,9 @@ static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
734 return; 737 return;
735 } 738 }
736 739
737 area = kmem_cache_alloc(scan_area_cache, gfp & GFP_KMEMLEAK_MASK); 740 area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp));
738 if (!area) { 741 if (!area) {
739 kmemleak_warn("Cannot allocate a scan area\n"); 742 pr_warning("Cannot allocate a scan area\n");
740 goto out; 743 goto out;
741 } 744 }
742 745
@@ -1003,7 +1006,7 @@ static bool update_checksum(struct kmemleak_object *object)
1003 1006
1004/* 1007/*
1005 * Memory scanning is a long process and it needs to be interruptable. This 1008 * Memory scanning is a long process and it needs to be interruptable. This
1006 * function checks whether such interrupt condition occured. 1009 * function checks whether such interrupt condition occurred.
1007 */ 1010 */
1008static int scan_should_stop(void) 1011static int scan_should_stop(void)
1009{ 1012{
@@ -1411,9 +1414,12 @@ static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1411 ++(*pos); 1414 ++(*pos);
1412 1415
1413 list_for_each_continue_rcu(n, &object_list) { 1416 list_for_each_continue_rcu(n, &object_list) {
1414 next_obj = list_entry(n, struct kmemleak_object, object_list); 1417 struct kmemleak_object *obj =
1415 if (get_object(next_obj)) 1418 list_entry(n, struct kmemleak_object, object_list);
1419 if (get_object(obj)) {
1420 next_obj = obj;
1416 break; 1421 break;
1422 }
1417 } 1423 }
1418 1424
1419 put_object(prev_obj); 1425 put_object(prev_obj);
@@ -1730,7 +1736,7 @@ static int __init kmemleak_late_init(void)
1730 1736
1731 if (atomic_read(&kmemleak_error)) { 1737 if (atomic_read(&kmemleak_error)) {
1732 /* 1738 /*
1733 * Some error occured and kmemleak was disabled. There is a 1739 * Some error occurred and kmemleak was disabled. There is a
1734 * small chance that kmemleak_disable() was called immediately 1740 * small chance that kmemleak_disable() was called immediately
1735 * after setting kmemleak_initialized and we may end up with 1741 * after setting kmemleak_initialized and we may end up with
1736 * two clean-up threads but serialized by scan_mutex. 1742 * two clean-up threads but serialized by scan_mutex.