diff options
author | Vegard Nossum <vegard.nossum@gmail.com> | 2008-11-25 10:55:53 -0500 |
---|---|---|
committer | Vegard Nossum <vegard.nossum@gmail.com> | 2009-06-15 09:48:33 -0400 |
commit | b1eeab67682a5e397aecf172046b3a8bd4808ae4 (patch) | |
tree | c357b6ac1945dc8beecc2f8c4d84660ad8d35aae /mm | |
parent | 9b5cab31897e9e89e36c0c2a89b16b93ff1a971a (diff) |
kmemcheck: add hooks for the page allocator
This adds support for tracking the initializedness of memory that
was allocated with the page allocator. Highmem requests are not
tracked.
Cc: Dave Hansen <dave@linux.vnet.ibm.com>
Acked-by: Pekka Enberg <penberg@cs.helsinki.fi>
[build fix for !CONFIG_KMEMCHECK]
Signed-off-by: Ingo Molnar <mingo@elte.hu>
[rebased for mainline inclusion]
Signed-off-by: Vegard Nossum <vegard.nossum@gmail.com>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/kmemcheck.c | 45 | ||||
-rw-r--r-- | mm/page_alloc.c | 18 | ||||
-rw-r--r-- | mm/slab.c | 15 | ||||
-rw-r--r-- | mm/slub.c | 23 |
4 files changed, 78 insertions, 23 deletions
diff --git a/mm/kmemcheck.c b/mm/kmemcheck.c index eaa41b802611..fd814fd61319 100644 --- a/mm/kmemcheck.c +++ b/mm/kmemcheck.c | |||
@@ -1,10 +1,10 @@ | |||
1 | #include <linux/gfp.h> | ||
1 | #include <linux/mm_types.h> | 2 | #include <linux/mm_types.h> |
2 | #include <linux/mm.h> | 3 | #include <linux/mm.h> |
3 | #include <linux/slab.h> | 4 | #include <linux/slab.h> |
4 | #include <linux/kmemcheck.h> | 5 | #include <linux/kmemcheck.h> |
5 | 6 | ||
6 | void kmemcheck_alloc_shadow(struct kmem_cache *s, gfp_t flags, int node, | 7 | void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node) |
7 | struct page *page, int order) | ||
8 | { | 8 | { |
9 | struct page *shadow; | 9 | struct page *shadow; |
10 | int pages; | 10 | int pages; |
@@ -16,7 +16,7 @@ void kmemcheck_alloc_shadow(struct kmem_cache *s, gfp_t flags, int node, | |||
16 | * With kmemcheck enabled, we need to allocate a memory area for the | 16 | * With kmemcheck enabled, we need to allocate a memory area for the |
17 | * shadow bits as well. | 17 | * shadow bits as well. |
18 | */ | 18 | */ |
19 | shadow = alloc_pages_node(node, flags, order); | 19 | shadow = alloc_pages_node(node, flags | __GFP_NOTRACK, order); |
20 | if (!shadow) { | 20 | if (!shadow) { |
21 | if (printk_ratelimit()) | 21 | if (printk_ratelimit()) |
22 | printk(KERN_ERR "kmemcheck: failed to allocate " | 22 | printk(KERN_ERR "kmemcheck: failed to allocate " |
@@ -33,23 +33,17 @@ void kmemcheck_alloc_shadow(struct kmem_cache *s, gfp_t flags, int node, | |||
33 | * the memory accesses. | 33 | * the memory accesses. |
34 | */ | 34 | */ |
35 | kmemcheck_hide_pages(page, pages); | 35 | kmemcheck_hide_pages(page, pages); |
36 | |||
37 | /* | ||
38 | * Objects from caches that have a constructor don't get | ||
39 | * cleared when they're allocated, so we need to do it here. | ||
40 | */ | ||
41 | if (s->ctor) | ||
42 | kmemcheck_mark_uninitialized_pages(page, pages); | ||
43 | else | ||
44 | kmemcheck_mark_unallocated_pages(page, pages); | ||
45 | } | 36 | } |
46 | 37 | ||
47 | void kmemcheck_free_shadow(struct kmem_cache *s, struct page *page, int order) | 38 | void kmemcheck_free_shadow(struct page *page, int order) |
48 | { | 39 | { |
49 | struct page *shadow; | 40 | struct page *shadow; |
50 | int pages; | 41 | int pages; |
51 | int i; | 42 | int i; |
52 | 43 | ||
44 | if (!kmemcheck_page_is_tracked(page)) | ||
45 | return; | ||
46 | |||
53 | pages = 1 << order; | 47 | pages = 1 << order; |
54 | 48 | ||
55 | kmemcheck_show_pages(page, pages); | 49 | kmemcheck_show_pages(page, pages); |
@@ -101,3 +95,28 @@ void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size) | |||
101 | if (!s->ctor && !(s->flags & SLAB_DESTROY_BY_RCU)) | 95 | if (!s->ctor && !(s->flags & SLAB_DESTROY_BY_RCU)) |
102 | kmemcheck_mark_freed(object, size); | 96 | kmemcheck_mark_freed(object, size); |
103 | } | 97 | } |
98 | |||
99 | void kmemcheck_pagealloc_alloc(struct page *page, unsigned int order, | ||
100 | gfp_t gfpflags) | ||
101 | { | ||
102 | int pages; | ||
103 | |||
104 | if (gfpflags & (__GFP_HIGHMEM | __GFP_NOTRACK)) | ||
105 | return; | ||
106 | |||
107 | pages = 1 << order; | ||
108 | |||
109 | /* | ||
110 | * NOTE: We choose to track GFP_ZERO pages too; in fact, they | ||
111 | * can become uninitialized by copying uninitialized memory | ||
112 | * into them. | ||
113 | */ | ||
114 | |||
115 | /* XXX: Can use zone->node for node? */ | ||
116 | kmemcheck_alloc_shadow(page, order, gfpflags, -1); | ||
117 | |||
118 | if (gfpflags & __GFP_ZERO) | ||
119 | kmemcheck_mark_initialized_pages(page, pages); | ||
120 | else | ||
121 | kmemcheck_mark_uninitialized_pages(page, pages); | ||
122 | } | ||
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 17d5f539a9aa..0727896a88ac 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/bootmem.h> | 23 | #include <linux/bootmem.h> |
24 | #include <linux/compiler.h> | 24 | #include <linux/compiler.h> |
25 | #include <linux/kernel.h> | 25 | #include <linux/kernel.h> |
26 | #include <linux/kmemcheck.h> | ||
26 | #include <linux/module.h> | 27 | #include <linux/module.h> |
27 | #include <linux/suspend.h> | 28 | #include <linux/suspend.h> |
28 | #include <linux/pagevec.h> | 29 | #include <linux/pagevec.h> |
@@ -546,6 +547,8 @@ static void __free_pages_ok(struct page *page, unsigned int order) | |||
546 | int i; | 547 | int i; |
547 | int bad = 0; | 548 | int bad = 0; |
548 | 549 | ||
550 | kmemcheck_free_shadow(page, order); | ||
551 | |||
549 | for (i = 0 ; i < (1 << order) ; ++i) | 552 | for (i = 0 ; i < (1 << order) ; ++i) |
550 | bad += free_pages_check(page + i); | 553 | bad += free_pages_check(page + i); |
551 | if (bad) | 554 | if (bad) |
@@ -994,6 +997,8 @@ static void free_hot_cold_page(struct page *page, int cold) | |||
994 | struct per_cpu_pages *pcp; | 997 | struct per_cpu_pages *pcp; |
995 | unsigned long flags; | 998 | unsigned long flags; |
996 | 999 | ||
1000 | kmemcheck_free_shadow(page, 0); | ||
1001 | |||
997 | if (PageAnon(page)) | 1002 | if (PageAnon(page)) |
998 | page->mapping = NULL; | 1003 | page->mapping = NULL; |
999 | if (free_pages_check(page)) | 1004 | if (free_pages_check(page)) |
@@ -1047,6 +1052,16 @@ void split_page(struct page *page, unsigned int order) | |||
1047 | 1052 | ||
1048 | VM_BUG_ON(PageCompound(page)); | 1053 | VM_BUG_ON(PageCompound(page)); |
1049 | VM_BUG_ON(!page_count(page)); | 1054 | VM_BUG_ON(!page_count(page)); |
1055 | |||
1056 | #ifdef CONFIG_KMEMCHECK | ||
1057 | /* | ||
1058 | * Split shadow pages too, because free(page[0]) would | ||
1059 | * otherwise free the whole shadow. | ||
1060 | */ | ||
1061 | if (kmemcheck_page_is_tracked(page)) | ||
1062 | split_page(virt_to_page(page[0].shadow), order); | ||
1063 | #endif | ||
1064 | |||
1050 | for (i = 1; i < (1 << order); i++) | 1065 | for (i = 1; i < (1 << order); i++) |
1051 | set_page_refcounted(page + i); | 1066 | set_page_refcounted(page + i); |
1052 | } | 1067 | } |
@@ -1667,7 +1682,10 @@ nopage: | |||
1667 | dump_stack(); | 1682 | dump_stack(); |
1668 | show_mem(); | 1683 | show_mem(); |
1669 | } | 1684 | } |
1685 | return page; | ||
1670 | got_pg: | 1686 | got_pg: |
1687 | if (kmemcheck_enabled) | ||
1688 | kmemcheck_pagealloc_alloc(page, order, gfp_mask); | ||
1671 | return page; | 1689 | return page; |
1672 | } | 1690 | } |
1673 | EXPORT_SYMBOL(__alloc_pages_internal); | 1691 | EXPORT_SYMBOL(__alloc_pages_internal); |
@@ -1612,7 +1612,7 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid) | |||
1612 | if (cachep->flags & SLAB_RECLAIM_ACCOUNT) | 1612 | if (cachep->flags & SLAB_RECLAIM_ACCOUNT) |
1613 | flags |= __GFP_RECLAIMABLE; | 1613 | flags |= __GFP_RECLAIMABLE; |
1614 | 1614 | ||
1615 | page = alloc_pages_node(nodeid, flags, cachep->gfporder); | 1615 | page = alloc_pages_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder); |
1616 | if (!page) | 1616 | if (!page) |
1617 | return NULL; | 1617 | return NULL; |
1618 | 1618 | ||
@@ -1626,8 +1626,14 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid) | |||
1626 | for (i = 0; i < nr_pages; i++) | 1626 | for (i = 0; i < nr_pages; i++) |
1627 | __SetPageSlab(page + i); | 1627 | __SetPageSlab(page + i); |
1628 | 1628 | ||
1629 | if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) | 1629 | if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) { |
1630 | kmemcheck_alloc_shadow(cachep, flags, nodeid, page, cachep->gfporder); | 1630 | kmemcheck_alloc_shadow(page, cachep->gfporder, flags, nodeid); |
1631 | |||
1632 | if (cachep->ctor) | ||
1633 | kmemcheck_mark_uninitialized_pages(page, nr_pages); | ||
1634 | else | ||
1635 | kmemcheck_mark_unallocated_pages(page, nr_pages); | ||
1636 | } | ||
1631 | 1637 | ||
1632 | return page_address(page); | 1638 | return page_address(page); |
1633 | } | 1639 | } |
@@ -1641,8 +1647,7 @@ static void kmem_freepages(struct kmem_cache *cachep, void *addr) | |||
1641 | struct page *page = virt_to_page(addr); | 1647 | struct page *page = virt_to_page(addr); |
1642 | const unsigned long nr_freed = i; | 1648 | const unsigned long nr_freed = i; |
1643 | 1649 | ||
1644 | if (kmemcheck_page_is_tracked(page)) | 1650 | kmemcheck_free_shadow(page, cachep->gfporder); |
1645 | kmemcheck_free_shadow(cachep, page, cachep->gfporder); | ||
1646 | 1651 | ||
1647 | if (cachep->flags & SLAB_RECLAIM_ACCOUNT) | 1652 | if (cachep->flags & SLAB_RECLAIM_ACCOUNT) |
1648 | sub_zone_page_state(page_zone(page), | 1653 | sub_zone_page_state(page_zone(page), |
@@ -1066,6 +1066,8 @@ static inline struct page *alloc_slab_page(gfp_t flags, int node, | |||
1066 | { | 1066 | { |
1067 | int order = oo_order(oo); | 1067 | int order = oo_order(oo); |
1068 | 1068 | ||
1069 | flags |= __GFP_NOTRACK; | ||
1070 | |||
1069 | if (node == -1) | 1071 | if (node == -1) |
1070 | return alloc_pages(flags, order); | 1072 | return alloc_pages(flags, order); |
1071 | else | 1073 | else |
@@ -1097,7 +1099,18 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) | |||
1097 | if (kmemcheck_enabled | 1099 | if (kmemcheck_enabled |
1098 | && !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) | 1100 | && !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) |
1099 | { | 1101 | { |
1100 | kmemcheck_alloc_shadow(s, flags, node, page, compound_order(page)); | 1102 | int pages = 1 << oo_order(oo); |
1103 | |||
1104 | kmemcheck_alloc_shadow(page, oo_order(oo), flags, node); | ||
1105 | |||
1106 | /* | ||
1107 | * Objects from caches that have a constructor don't get | ||
1108 | * cleared when they're allocated, so we need to do it here. | ||
1109 | */ | ||
1110 | if (s->ctor) | ||
1111 | kmemcheck_mark_uninitialized_pages(page, pages); | ||
1112 | else | ||
1113 | kmemcheck_mark_unallocated_pages(page, pages); | ||
1101 | } | 1114 | } |
1102 | 1115 | ||
1103 | page->objects = oo_objects(oo); | 1116 | page->objects = oo_objects(oo); |
@@ -1173,8 +1186,7 @@ static void __free_slab(struct kmem_cache *s, struct page *page) | |||
1173 | __ClearPageSlubDebug(page); | 1186 | __ClearPageSlubDebug(page); |
1174 | } | 1187 | } |
1175 | 1188 | ||
1176 | if (kmemcheck_page_is_tracked(page)) | 1189 | kmemcheck_free_shadow(page, compound_order(page)); |
1177 | kmemcheck_free_shadow(s, page, compound_order(page)); | ||
1178 | 1190 | ||
1179 | mod_zone_page_state(page_zone(page), | 1191 | mod_zone_page_state(page_zone(page), |
1180 | (s->flags & SLAB_RECLAIM_ACCOUNT) ? | 1192 | (s->flags & SLAB_RECLAIM_ACCOUNT) ? |
@@ -2734,9 +2746,10 @@ EXPORT_SYMBOL(__kmalloc); | |||
2734 | 2746 | ||
2735 | static void *kmalloc_large_node(size_t size, gfp_t flags, int node) | 2747 | static void *kmalloc_large_node(size_t size, gfp_t flags, int node) |
2736 | { | 2748 | { |
2737 | struct page *page = alloc_pages_node(node, flags | __GFP_COMP, | 2749 | struct page *page; |
2738 | get_order(size)); | ||
2739 | 2750 | ||
2751 | flags |= __GFP_COMP | __GFP_NOTRACK; | ||
2752 | page = alloc_pages_node(node, flags, get_order(size)); | ||
2740 | if (page) | 2753 | if (page) |
2741 | return page_address(page); | 2754 | return page_address(page); |
2742 | else | 2755 | else |