aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-06-17 13:42:21 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-06-17 13:42:21 -0400
commit3fe0344faf7fdcb158bd5c1a9aec960a8d70c8e8 (patch)
tree30d5b5fd277b43f2b33276c3b879e4f4b1918aaa /mm
parentc30938d59e7468259855da91a885b19e8044b5f4 (diff)
parent2030117d2761c4c955e1a0683fa96ab62e4b197b (diff)
Merge branch 'kmemleak' of git://linux-arm.org/linux-2.6
* 'kmemleak' of git://linux-arm.org/linux-2.6: kmemleak: Fix some typos in comments kmemleak: Rename kmemleak_panic to kmemleak_stop kmemleak: Only use GFP_KERNEL|GFP_ATOMIC for the internal allocations
Diffstat (limited to 'mm')
-rw-r--r--mm/kmemleak.c27
1 files changed, 15 insertions, 12 deletions
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 58ec86c9e58a..ec759b60077a 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -109,6 +109,9 @@
109 109
110#define BYTES_PER_POINTER sizeof(void *) 110#define BYTES_PER_POINTER sizeof(void *)
111 111
112/* GFP bitmask for kmemleak internal allocations */
113#define GFP_KMEMLEAK_MASK (GFP_KERNEL | GFP_ATOMIC)
114
112/* scanning area inside a memory block */ 115/* scanning area inside a memory block */
113struct kmemleak_scan_area { 116struct kmemleak_scan_area {
114 struct hlist_node node; 117 struct hlist_node node;
@@ -199,9 +202,9 @@ static DEFINE_MUTEX(kmemleak_mutex);
199static int reported_leaks; 202static int reported_leaks;
200 203
201/* 204/*
202 * Early object allocation/freeing logging. Kkmemleak is initialized after the 205 * Early object allocation/freeing logging. Kmemleak is initialized after the
203 * kernel allocator. However, both the kernel allocator and kmemleak may 206 * kernel allocator. However, both the kernel allocator and kmemleak may
204 * allocate memory blocks which need to be tracked. Kkmemleak defines an 207 * allocate memory blocks which need to be tracked. Kmemleak defines an
205 * arbitrary buffer to hold the allocation/freeing information before it is 208 * arbitrary buffer to hold the allocation/freeing information before it is
206 * fully initialized. 209 * fully initialized.
207 */ 210 */
@@ -245,10 +248,10 @@ static void kmemleak_disable(void);
245 248
246/* 249/*
247 * Macro invoked when a serious kmemleak condition occured and cannot be 250 * Macro invoked when a serious kmemleak condition occured and cannot be
248 * recovered from. Kkmemleak will be disabled and further allocation/freeing 251 * recovered from. Kmemleak will be disabled and further allocation/freeing
249 * tracing no longer available. 252 * tracing no longer available.
250 */ 253 */
251#define kmemleak_panic(x...) do { \ 254#define kmemleak_stop(x...) do { \
252 kmemleak_warn(x); \ 255 kmemleak_warn(x); \
253 kmemleak_disable(); \ 256 kmemleak_disable(); \
254} while (0) 257} while (0)
@@ -462,10 +465,10 @@ static void create_object(unsigned long ptr, size_t size, int min_count,
462 struct prio_tree_node *node; 465 struct prio_tree_node *node;
463 struct stack_trace trace; 466 struct stack_trace trace;
464 467
465 object = kmem_cache_alloc(object_cache, gfp & ~GFP_SLAB_BUG_MASK); 468 object = kmem_cache_alloc(object_cache, gfp & GFP_KMEMLEAK_MASK);
466 if (!object) { 469 if (!object) {
467 kmemleak_panic("kmemleak: Cannot allocate a kmemleak_object " 470 kmemleak_stop("kmemleak: Cannot allocate a kmemleak_object "
468 "structure\n"); 471 "structure\n");
469 return; 472 return;
470 } 473 }
471 474
@@ -524,8 +527,8 @@ static void create_object(unsigned long ptr, size_t size, int min_count,
524 if (node != &object->tree_node) { 527 if (node != &object->tree_node) {
525 unsigned long flags; 528 unsigned long flags;
526 529
527 kmemleak_panic("kmemleak: Cannot insert 0x%lx into the object " 530 kmemleak_stop("kmemleak: Cannot insert 0x%lx into the object "
528 "search tree (already existing)\n", ptr); 531 "search tree (already existing)\n", ptr);
529 object = lookup_object(ptr, 1); 532 object = lookup_object(ptr, 1);
530 spin_lock_irqsave(&object->lock, flags); 533 spin_lock_irqsave(&object->lock, flags);
531 dump_object_info(object); 534 dump_object_info(object);
@@ -636,7 +639,7 @@ static void add_scan_area(unsigned long ptr, unsigned long offset,
636 return; 639 return;
637 } 640 }
638 641
639 area = kmem_cache_alloc(scan_area_cache, gfp & ~GFP_SLAB_BUG_MASK); 642 area = kmem_cache_alloc(scan_area_cache, gfp & GFP_KMEMLEAK_MASK);
640 if (!area) { 643 if (!area) {
641 kmemleak_warn("kmemleak: Cannot allocate a scan area\n"); 644 kmemleak_warn("kmemleak: Cannot allocate a scan area\n");
642 goto out; 645 goto out;
@@ -696,7 +699,7 @@ static void log_early(int op_type, const void *ptr, size_t size,
696 struct early_log *log; 699 struct early_log *log;
697 700
698 if (crt_early_log >= ARRAY_SIZE(early_log)) { 701 if (crt_early_log >= ARRAY_SIZE(early_log)) {
699 kmemleak_panic("kmemleak: Early log buffer exceeded\n"); 702 kmemleak_stop("kmemleak: Early log buffer exceeded\n");
700 return; 703 return;
701 } 704 }
702 705
@@ -1404,7 +1407,7 @@ static int kmemleak_boot_config(char *str)
1404early_param("kmemleak", kmemleak_boot_config); 1407early_param("kmemleak", kmemleak_boot_config);
1405 1408
1406/* 1409/*
1407 * Kkmemleak initialization. 1410 * Kmemleak initialization.
1408 */ 1411 */
1409void __init kmemleak_init(void) 1412void __init kmemleak_init(void)
1410{ 1413{