aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorVegard Nossum <vegard.nossum@gmail.com>2008-11-25 10:55:53 -0500
committerVegard Nossum <vegard.nossum@gmail.com>2009-06-15 09:48:33 -0400
commitb1eeab67682a5e397aecf172046b3a8bd4808ae4 (patch)
treec357b6ac1945dc8beecc2f8c4d84660ad8d35aae /mm/slub.c
parent9b5cab31897e9e89e36c0c2a89b16b93ff1a971a (diff)
kmemcheck: add hooks for the page allocator
This adds support for tracking the initializedness of memory that was allocated with the page allocator. Highmem requests are not tracked. Cc: Dave Hansen <dave@linux.vnet.ibm.com> Acked-by: Pekka Enberg <penberg@cs.helsinki.fi> [build fix for !CONFIG_KMEMCHECK] Signed-off-by: Ingo Molnar <mingo@elte.hu> [rebased for mainline inclusion] Signed-off-by: Vegard Nossum <vegard.nossum@gmail.com>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c23
1 files changed, 18 insertions, 5 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 1cebaa747ad3..898fb5047dcc 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1066,6 +1066,8 @@ static inline struct page *alloc_slab_page(gfp_t flags, int node,
1066{ 1066{
1067 int order = oo_order(oo); 1067 int order = oo_order(oo);
1068 1068
1069 flags |= __GFP_NOTRACK;
1070
1069 if (node == -1) 1071 if (node == -1)
1070 return alloc_pages(flags, order); 1072 return alloc_pages(flags, order);
1071 else 1073 else
@@ -1097,7 +1099,18 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
1097 if (kmemcheck_enabled 1099 if (kmemcheck_enabled
1098 && !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) 1100 && !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS)))
1099 { 1101 {
1100 kmemcheck_alloc_shadow(s, flags, node, page, compound_order(page)); 1102 int pages = 1 << oo_order(oo);
1103
1104 kmemcheck_alloc_shadow(page, oo_order(oo), flags, node);
1105
1106 /*
1107 * Objects from caches that have a constructor don't get
1108 * cleared when they're allocated, so we need to do it here.
1109 */
1110 if (s->ctor)
1111 kmemcheck_mark_uninitialized_pages(page, pages);
1112 else
1113 kmemcheck_mark_unallocated_pages(page, pages);
1101 } 1114 }
1102 1115
1103 page->objects = oo_objects(oo); 1116 page->objects = oo_objects(oo);
@@ -1173,8 +1186,7 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
1173 __ClearPageSlubDebug(page); 1186 __ClearPageSlubDebug(page);
1174 } 1187 }
1175 1188
1176 if (kmemcheck_page_is_tracked(page)) 1189 kmemcheck_free_shadow(page, compound_order(page));
1177 kmemcheck_free_shadow(s, page, compound_order(page));
1178 1190
1179 mod_zone_page_state(page_zone(page), 1191 mod_zone_page_state(page_zone(page),
1180 (s->flags & SLAB_RECLAIM_ACCOUNT) ? 1192 (s->flags & SLAB_RECLAIM_ACCOUNT) ?
@@ -2734,9 +2746,10 @@ EXPORT_SYMBOL(__kmalloc);
2734 2746
2735static void *kmalloc_large_node(size_t size, gfp_t flags, int node) 2747static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
2736{ 2748{
2737 struct page *page = alloc_pages_node(node, flags | __GFP_COMP, 2749 struct page *page;
2738 get_order(size));
2739 2750
2751 flags |= __GFP_COMP | __GFP_NOTRACK;
2752 page = alloc_pages_node(node, flags, get_order(size));
2740 if (page) 2753 if (page)
2741 return page_address(page); 2754 return page_address(page);
2742 else 2755 else