aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorCatalin Marinas <catalin.marinas@arm.com>2009-07-07 05:32:56 -0400
committerCatalin Marinas <catalin.marinas@arm.com>2009-07-07 05:32:56 -0400
commit4b8a96744c0c27ab94fb4e8155d4384c3b399e27 (patch)
treee769e01395a3a13e28bf146fce20a3f3f2a95750 /mm
parentbf2a76b317c6ccc6f7b6b1dc09664c5b6a155c61 (diff)
kmemleak: Add more cond_resched() calls in the scanning thread
Following recent fix to no longer reschedule in the scan_block() function, the system may become unresponsive with !PREEMPT. This patch re-adds the cond_resched() call to scan_block() but conditioned by the allow_resched parameter. Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> Cc: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'mm')
-rw-r--r--mm/kmemleak.c19
1 files changed, 11 insertions, 8 deletions
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 60065531f60c..93f14818e901 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -807,7 +807,7 @@ static int scan_should_stop(void)
807 * found to the gray list. 807 * found to the gray list.
808 */ 808 */
809static void scan_block(void *_start, void *_end, 809static void scan_block(void *_start, void *_end,
810 struct kmemleak_object *scanned) 810 struct kmemleak_object *scanned, int allow_resched)
811{ 811{
812 unsigned long *ptr; 812 unsigned long *ptr;
813 unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER); 813 unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER);
@@ -818,6 +818,8 @@ static void scan_block(void *_start, void *_end,
818 unsigned long pointer = *ptr; 818 unsigned long pointer = *ptr;
819 struct kmemleak_object *object; 819 struct kmemleak_object *object;
820 820
821 if (allow_resched)
822 cond_resched();
821 if (scan_should_stop()) 823 if (scan_should_stop())
822 break; 824 break;
823 825
@@ -881,12 +883,12 @@ static void scan_object(struct kmemleak_object *object)
881 goto out; 883 goto out;
882 if (hlist_empty(&object->area_list)) 884 if (hlist_empty(&object->area_list))
883 scan_block((void *)object->pointer, 885 scan_block((void *)object->pointer,
884 (void *)(object->pointer + object->size), object); 886 (void *)(object->pointer + object->size), object, 0);
885 else 887 else
886 hlist_for_each_entry(area, elem, &object->area_list, node) 888 hlist_for_each_entry(area, elem, &object->area_list, node)
887 scan_block((void *)(object->pointer + area->offset), 889 scan_block((void *)(object->pointer + area->offset),
888 (void *)(object->pointer + area->offset 890 (void *)(object->pointer + area->offset
889 + area->length), object); 891 + area->length), object, 0);
890out: 892out:
891 spin_unlock_irqrestore(&object->lock, flags); 893 spin_unlock_irqrestore(&object->lock, flags);
892} 894}
@@ -931,14 +933,14 @@ static void kmemleak_scan(void)
931 rcu_read_unlock(); 933 rcu_read_unlock();
932 934
933 /* data/bss scanning */ 935 /* data/bss scanning */
934 scan_block(_sdata, _edata, NULL); 936 scan_block(_sdata, _edata, NULL, 1);
935 scan_block(__bss_start, __bss_stop, NULL); 937 scan_block(__bss_start, __bss_stop, NULL, 1);
936 938
937#ifdef CONFIG_SMP 939#ifdef CONFIG_SMP
938 /* per-cpu sections scanning */ 940 /* per-cpu sections scanning */
939 for_each_possible_cpu(i) 941 for_each_possible_cpu(i)
940 scan_block(__per_cpu_start + per_cpu_offset(i), 942 scan_block(__per_cpu_start + per_cpu_offset(i),
941 __per_cpu_end + per_cpu_offset(i), NULL); 943 __per_cpu_end + per_cpu_offset(i), NULL, 1);
942#endif 944#endif
943 945
944 /* 946 /*
@@ -960,7 +962,7 @@ static void kmemleak_scan(void)
960 /* only scan if page is in use */ 962 /* only scan if page is in use */
961 if (page_count(page) == 0) 963 if (page_count(page) == 0)
962 continue; 964 continue;
963 scan_block(page, page + 1, NULL); 965 scan_block(page, page + 1, NULL, 1);
964 } 966 }
965 } 967 }
966 968
@@ -972,7 +974,8 @@ static void kmemleak_scan(void)
972 read_lock(&tasklist_lock); 974 read_lock(&tasklist_lock);
973 for_each_process(task) 975 for_each_process(task)
974 scan_block(task_stack_page(task), 976 scan_block(task_stack_page(task),
975 task_stack_page(task) + THREAD_SIZE, NULL); 977 task_stack_page(task) + THREAD_SIZE,
978 NULL, 0);
976 read_unlock(&tasklist_lock); 979 read_unlock(&tasklist_lock);
977 } 980 }
978 981