diff options
author | Vegard Nossum <vegard.nossum@gmail.com> | 2008-11-25 10:55:53 -0500 |
---|---|---|
committer | Vegard Nossum <vegard.nossum@gmail.com> | 2009-06-15 09:48:33 -0400 |
commit | b1eeab67682a5e397aecf172046b3a8bd4808ae4 (patch) | |
tree | c357b6ac1945dc8beecc2f8c4d84660ad8d35aae | |
parent | 9b5cab31897e9e89e36c0c2a89b16b93ff1a971a (diff) |
kmemcheck: add hooks for the page allocator
This adds support for tracking the initializedness of memory that
was allocated with the page allocator. Highmem requests are not
tracked.
Cc: Dave Hansen <dave@linux.vnet.ibm.com>
Acked-by: Pekka Enberg <penberg@cs.helsinki.fi>
[build fix for !CONFIG_KMEMCHECK]
Signed-off-by: Ingo Molnar <mingo@elte.hu>
[rebased for mainline inclusion]
Signed-off-by: Vegard Nossum <vegard.nossum@gmail.com>
-rw-r--r-- | arch/x86/include/asm/thread_info.h | 4 | ||||
-rw-r--r-- | arch/x86/mm/kmemcheck/shadow.c | 8 | ||||
-rw-r--r-- | include/linux/gfp.h | 5 | ||||
-rw-r--r-- | include/linux/kmemcheck.h | 35 | ||||
-rw-r--r-- | mm/kmemcheck.c | 45 | ||||
-rw-r--r-- | mm/page_alloc.c | 18 | ||||
-rw-r--r-- | mm/slab.c | 15 | ||||
-rw-r--r-- | mm/slub.c | 23 |
8 files changed, 122 insertions, 31 deletions
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index 602c769fc98c..b0783520988b 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h | |||
@@ -154,9 +154,9 @@ struct thread_info { | |||
154 | 154 | ||
155 | /* thread information allocation */ | 155 | /* thread information allocation */ |
156 | #ifdef CONFIG_DEBUG_STACK_USAGE | 156 | #ifdef CONFIG_DEBUG_STACK_USAGE |
157 | #define THREAD_FLAGS (GFP_KERNEL | __GFP_ZERO) | 157 | #define THREAD_FLAGS (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO) |
158 | #else | 158 | #else |
159 | #define THREAD_FLAGS GFP_KERNEL | 159 | #define THREAD_FLAGS (GFP_KERNEL | __GFP_NOTRACK) |
160 | #endif | 160 | #endif |
161 | 161 | ||
162 | #define __HAVE_ARCH_THREAD_INFO_ALLOCATOR | 162 | #define __HAVE_ARCH_THREAD_INFO_ALLOCATOR |
diff --git a/arch/x86/mm/kmemcheck/shadow.c b/arch/x86/mm/kmemcheck/shadow.c index e7346d3873b3..e773b6bd0079 100644 --- a/arch/x86/mm/kmemcheck/shadow.c +++ b/arch/x86/mm/kmemcheck/shadow.c | |||
@@ -116,6 +116,14 @@ void kmemcheck_mark_uninitialized_pages(struct page *p, unsigned int n) | |||
116 | kmemcheck_mark_uninitialized(page_address(&p[i]), PAGE_SIZE); | 116 | kmemcheck_mark_uninitialized(page_address(&p[i]), PAGE_SIZE); |
117 | } | 117 | } |
118 | 118 | ||
119 | void kmemcheck_mark_initialized_pages(struct page *p, unsigned int n) | ||
120 | { | ||
121 | unsigned int i; | ||
122 | |||
123 | for (i = 0; i < n; ++i) | ||
124 | kmemcheck_mark_initialized(page_address(&p[i]), PAGE_SIZE); | ||
125 | } | ||
126 | |||
119 | enum kmemcheck_shadow kmemcheck_shadow_test(void *shadow, unsigned int size) | 127 | enum kmemcheck_shadow kmemcheck_shadow_test(void *shadow, unsigned int size) |
120 | { | 128 | { |
121 | uint8_t *x; | 129 | uint8_t *x; |
diff --git a/include/linux/gfp.h b/include/linux/gfp.h index daeaa8fe1bbd..3885e7f75562 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h | |||
@@ -51,7 +51,12 @@ struct vm_area_struct; | |||
51 | #define __GFP_THISNODE ((__force gfp_t)0x40000u)/* No fallback, no policies */ | 51 | #define __GFP_THISNODE ((__force gfp_t)0x40000u)/* No fallback, no policies */ |
52 | #define __GFP_RECLAIMABLE ((__force gfp_t)0x80000u) /* Page is reclaimable */ | 52 | #define __GFP_RECLAIMABLE ((__force gfp_t)0x80000u) /* Page is reclaimable */ |
53 | #define __GFP_MOVABLE ((__force gfp_t)0x100000u) /* Page is movable */ | 53 | #define __GFP_MOVABLE ((__force gfp_t)0x100000u) /* Page is movable */ |
54 | |||
55 | #ifdef CONFIG_KMEMCHECK | ||
54 | #define __GFP_NOTRACK ((__force gfp_t)0x200000u) /* Don't track with kmemcheck */ | 56 | #define __GFP_NOTRACK ((__force gfp_t)0x200000u) /* Don't track with kmemcheck */ |
57 | #else | ||
58 | #define __GFP_NOTRACK ((__force gfp_t)0) | ||
59 | #endif | ||
55 | 60 | ||
56 | /* | 61 | /* |
57 | * This may seem redundant, but it's a way of annotating false positives vs. | 62 | * This may seem redundant, but it's a way of annotating false positives vs. |
diff --git a/include/linux/kmemcheck.h b/include/linux/kmemcheck.h index 71f21ae33d1d..093d23969b1b 100644 --- a/include/linux/kmemcheck.h +++ b/include/linux/kmemcheck.h | |||
@@ -8,13 +8,15 @@ | |||
8 | extern int kmemcheck_enabled; | 8 | extern int kmemcheck_enabled; |
9 | 9 | ||
10 | /* The slab-related functions. */ | 10 | /* The slab-related functions. */ |
11 | void kmemcheck_alloc_shadow(struct kmem_cache *s, gfp_t flags, int node, | 11 | void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node); |
12 | struct page *page, int order); | 12 | void kmemcheck_free_shadow(struct page *page, int order); |
13 | void kmemcheck_free_shadow(struct kmem_cache *s, struct page *page, int order); | ||
14 | void kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object, | 13 | void kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object, |
15 | size_t size); | 14 | size_t size); |
16 | void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size); | 15 | void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size); |
17 | 16 | ||
17 | void kmemcheck_pagealloc_alloc(struct page *p, unsigned int order, | ||
18 | gfp_t gfpflags); | ||
19 | |||
18 | void kmemcheck_show_pages(struct page *p, unsigned int n); | 20 | void kmemcheck_show_pages(struct page *p, unsigned int n); |
19 | void kmemcheck_hide_pages(struct page *p, unsigned int n); | 21 | void kmemcheck_hide_pages(struct page *p, unsigned int n); |
20 | 22 | ||
@@ -27,6 +29,7 @@ void kmemcheck_mark_freed(void *address, unsigned int n); | |||
27 | 29 | ||
28 | void kmemcheck_mark_unallocated_pages(struct page *p, unsigned int n); | 30 | void kmemcheck_mark_unallocated_pages(struct page *p, unsigned int n); |
29 | void kmemcheck_mark_uninitialized_pages(struct page *p, unsigned int n); | 31 | void kmemcheck_mark_uninitialized_pages(struct page *p, unsigned int n); |
32 | void kmemcheck_mark_initialized_pages(struct page *p, unsigned int n); | ||
30 | 33 | ||
31 | int kmemcheck_show_addr(unsigned long address); | 34 | int kmemcheck_show_addr(unsigned long address); |
32 | int kmemcheck_hide_addr(unsigned long address); | 35 | int kmemcheck_hide_addr(unsigned long address); |
@@ -34,13 +37,12 @@ int kmemcheck_hide_addr(unsigned long address); | |||
34 | #define kmemcheck_enabled 0 | 37 | #define kmemcheck_enabled 0 |
35 | 38 | ||
36 | static inline void | 39 | static inline void |
37 | kmemcheck_alloc_shadow(struct kmem_cache *s, gfp_t flags, int node, | 40 | kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node) |
38 | struct page *page, int order) | ||
39 | { | 41 | { |
40 | } | 42 | } |
41 | 43 | ||
42 | static inline void | 44 | static inline void |
43 | kmemcheck_free_shadow(struct kmem_cache *s, struct page *page, int order) | 45 | kmemcheck_free_shadow(struct page *page, int order) |
44 | { | 46 | { |
45 | } | 47 | } |
46 | 48 | ||
@@ -55,6 +57,11 @@ static inline void kmemcheck_slab_free(struct kmem_cache *s, void *object, | |||
55 | { | 57 | { |
56 | } | 58 | } |
57 | 59 | ||
60 | static inline void kmemcheck_pagealloc_alloc(struct page *p, | ||
61 | unsigned int order, gfp_t gfpflags) | ||
62 | { | ||
63 | } | ||
64 | |||
58 | static inline bool kmemcheck_page_is_tracked(struct page *p) | 65 | static inline bool kmemcheck_page_is_tracked(struct page *p) |
59 | { | 66 | { |
60 | return false; | 67 | return false; |
@@ -75,6 +82,22 @@ static inline void kmemcheck_mark_initialized(void *address, unsigned int n) | |||
75 | static inline void kmemcheck_mark_freed(void *address, unsigned int n) | 82 | static inline void kmemcheck_mark_freed(void *address, unsigned int n) |
76 | { | 83 | { |
77 | } | 84 | } |
85 | |||
86 | static inline void kmemcheck_mark_unallocated_pages(struct page *p, | ||
87 | unsigned int n) | ||
88 | { | ||
89 | } | ||
90 | |||
91 | static inline void kmemcheck_mark_uninitialized_pages(struct page *p, | ||
92 | unsigned int n) | ||
93 | { | ||
94 | } | ||
95 | |||
96 | static inline void kmemcheck_mark_initialized_pages(struct page *p, | ||
97 | unsigned int n) | ||
98 | { | ||
99 | } | ||
100 | |||
78 | #endif /* CONFIG_KMEMCHECK */ | 101 | #endif /* CONFIG_KMEMCHECK */ |
79 | 102 | ||
80 | #endif /* LINUX_KMEMCHECK_H */ | 103 | #endif /* LINUX_KMEMCHECK_H */ |
diff --git a/mm/kmemcheck.c b/mm/kmemcheck.c index eaa41b802611..fd814fd61319 100644 --- a/mm/kmemcheck.c +++ b/mm/kmemcheck.c | |||
@@ -1,10 +1,10 @@ | |||
1 | #include <linux/gfp.h> | ||
1 | #include <linux/mm_types.h> | 2 | #include <linux/mm_types.h> |
2 | #include <linux/mm.h> | 3 | #include <linux/mm.h> |
3 | #include <linux/slab.h> | 4 | #include <linux/slab.h> |
4 | #include <linux/kmemcheck.h> | 5 | #include <linux/kmemcheck.h> |
5 | 6 | ||
6 | void kmemcheck_alloc_shadow(struct kmem_cache *s, gfp_t flags, int node, | 7 | void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node) |
7 | struct page *page, int order) | ||
8 | { | 8 | { |
9 | struct page *shadow; | 9 | struct page *shadow; |
10 | int pages; | 10 | int pages; |
@@ -16,7 +16,7 @@ void kmemcheck_alloc_shadow(struct kmem_cache *s, gfp_t flags, int node, | |||
16 | * With kmemcheck enabled, we need to allocate a memory area for the | 16 | * With kmemcheck enabled, we need to allocate a memory area for the |
17 | * shadow bits as well. | 17 | * shadow bits as well. |
18 | */ | 18 | */ |
19 | shadow = alloc_pages_node(node, flags, order); | 19 | shadow = alloc_pages_node(node, flags | __GFP_NOTRACK, order); |
20 | if (!shadow) { | 20 | if (!shadow) { |
21 | if (printk_ratelimit()) | 21 | if (printk_ratelimit()) |
22 | printk(KERN_ERR "kmemcheck: failed to allocate " | 22 | printk(KERN_ERR "kmemcheck: failed to allocate " |
@@ -33,23 +33,17 @@ void kmemcheck_alloc_shadow(struct kmem_cache *s, gfp_t flags, int node, | |||
33 | * the memory accesses. | 33 | * the memory accesses. |
34 | */ | 34 | */ |
35 | kmemcheck_hide_pages(page, pages); | 35 | kmemcheck_hide_pages(page, pages); |
36 | |||
37 | /* | ||
38 | * Objects from caches that have a constructor don't get | ||
39 | * cleared when they're allocated, so we need to do it here. | ||
40 | */ | ||
41 | if (s->ctor) | ||
42 | kmemcheck_mark_uninitialized_pages(page, pages); | ||
43 | else | ||
44 | kmemcheck_mark_unallocated_pages(page, pages); | ||
45 | } | 36 | } |
46 | 37 | ||
47 | void kmemcheck_free_shadow(struct kmem_cache *s, struct page *page, int order) | 38 | void kmemcheck_free_shadow(struct page *page, int order) |
48 | { | 39 | { |
49 | struct page *shadow; | 40 | struct page *shadow; |
50 | int pages; | 41 | int pages; |
51 | int i; | 42 | int i; |
52 | 43 | ||
44 | if (!kmemcheck_page_is_tracked(page)) | ||
45 | return; | ||
46 | |||
53 | pages = 1 << order; | 47 | pages = 1 << order; |
54 | 48 | ||
55 | kmemcheck_show_pages(page, pages); | 49 | kmemcheck_show_pages(page, pages); |
@@ -101,3 +95,28 @@ void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size) | |||
101 | if (!s->ctor && !(s->flags & SLAB_DESTROY_BY_RCU)) | 95 | if (!s->ctor && !(s->flags & SLAB_DESTROY_BY_RCU)) |
102 | kmemcheck_mark_freed(object, size); | 96 | kmemcheck_mark_freed(object, size); |
103 | } | 97 | } |
98 | |||
99 | void kmemcheck_pagealloc_alloc(struct page *page, unsigned int order, | ||
100 | gfp_t gfpflags) | ||
101 | { | ||
102 | int pages; | ||
103 | |||
104 | if (gfpflags & (__GFP_HIGHMEM | __GFP_NOTRACK)) | ||
105 | return; | ||
106 | |||
107 | pages = 1 << order; | ||
108 | |||
109 | /* | ||
110 | * NOTE: We choose to track GFP_ZERO pages too; in fact, they | ||
111 | * can become uninitialized by copying uninitialized memory | ||
112 | * into them. | ||
113 | */ | ||
114 | |||
115 | /* XXX: Can use zone->node for node? */ | ||
116 | kmemcheck_alloc_shadow(page, order, gfpflags, -1); | ||
117 | |||
118 | if (gfpflags & __GFP_ZERO) | ||
119 | kmemcheck_mark_initialized_pages(page, pages); | ||
120 | else | ||
121 | kmemcheck_mark_uninitialized_pages(page, pages); | ||
122 | } | ||
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 17d5f539a9aa..0727896a88ac 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/bootmem.h> | 23 | #include <linux/bootmem.h> |
24 | #include <linux/compiler.h> | 24 | #include <linux/compiler.h> |
25 | #include <linux/kernel.h> | 25 | #include <linux/kernel.h> |
26 | #include <linux/kmemcheck.h> | ||
26 | #include <linux/module.h> | 27 | #include <linux/module.h> |
27 | #include <linux/suspend.h> | 28 | #include <linux/suspend.h> |
28 | #include <linux/pagevec.h> | 29 | #include <linux/pagevec.h> |
@@ -546,6 +547,8 @@ static void __free_pages_ok(struct page *page, unsigned int order) | |||
546 | int i; | 547 | int i; |
547 | int bad = 0; | 548 | int bad = 0; |
548 | 549 | ||
550 | kmemcheck_free_shadow(page, order); | ||
551 | |||
549 | for (i = 0 ; i < (1 << order) ; ++i) | 552 | for (i = 0 ; i < (1 << order) ; ++i) |
550 | bad += free_pages_check(page + i); | 553 | bad += free_pages_check(page + i); |
551 | if (bad) | 554 | if (bad) |
@@ -994,6 +997,8 @@ static void free_hot_cold_page(struct page *page, int cold) | |||
994 | struct per_cpu_pages *pcp; | 997 | struct per_cpu_pages *pcp; |
995 | unsigned long flags; | 998 | unsigned long flags; |
996 | 999 | ||
1000 | kmemcheck_free_shadow(page, 0); | ||
1001 | |||
997 | if (PageAnon(page)) | 1002 | if (PageAnon(page)) |
998 | page->mapping = NULL; | 1003 | page->mapping = NULL; |
999 | if (free_pages_check(page)) | 1004 | if (free_pages_check(page)) |
@@ -1047,6 +1052,16 @@ void split_page(struct page *page, unsigned int order) | |||
1047 | 1052 | ||
1048 | VM_BUG_ON(PageCompound(page)); | 1053 | VM_BUG_ON(PageCompound(page)); |
1049 | VM_BUG_ON(!page_count(page)); | 1054 | VM_BUG_ON(!page_count(page)); |
1055 | |||
1056 | #ifdef CONFIG_KMEMCHECK | ||
1057 | /* | ||
1058 | * Split shadow pages too, because free(page[0]) would | ||
1059 | * otherwise free the whole shadow. | ||
1060 | */ | ||
1061 | if (kmemcheck_page_is_tracked(page)) | ||
1062 | split_page(virt_to_page(page[0].shadow), order); | ||
1063 | #endif | ||
1064 | |||
1050 | for (i = 1; i < (1 << order); i++) | 1065 | for (i = 1; i < (1 << order); i++) |
1051 | set_page_refcounted(page + i); | 1066 | set_page_refcounted(page + i); |
1052 | } | 1067 | } |
@@ -1667,7 +1682,10 @@ nopage: | |||
1667 | dump_stack(); | 1682 | dump_stack(); |
1668 | show_mem(); | 1683 | show_mem(); |
1669 | } | 1684 | } |
1685 | return page; | ||
1670 | got_pg: | 1686 | got_pg: |
1687 | if (kmemcheck_enabled) | ||
1688 | kmemcheck_pagealloc_alloc(page, order, gfp_mask); | ||
1671 | return page; | 1689 | return page; |
1672 | } | 1690 | } |
1673 | EXPORT_SYMBOL(__alloc_pages_internal); | 1691 | EXPORT_SYMBOL(__alloc_pages_internal); |
@@ -1612,7 +1612,7 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid) | |||
1612 | if (cachep->flags & SLAB_RECLAIM_ACCOUNT) | 1612 | if (cachep->flags & SLAB_RECLAIM_ACCOUNT) |
1613 | flags |= __GFP_RECLAIMABLE; | 1613 | flags |= __GFP_RECLAIMABLE; |
1614 | 1614 | ||
1615 | page = alloc_pages_node(nodeid, flags, cachep->gfporder); | 1615 | page = alloc_pages_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder); |
1616 | if (!page) | 1616 | if (!page) |
1617 | return NULL; | 1617 | return NULL; |
1618 | 1618 | ||
@@ -1626,8 +1626,14 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid) | |||
1626 | for (i = 0; i < nr_pages; i++) | 1626 | for (i = 0; i < nr_pages; i++) |
1627 | __SetPageSlab(page + i); | 1627 | __SetPageSlab(page + i); |
1628 | 1628 | ||
1629 | if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) | 1629 | if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) { |
1630 | kmemcheck_alloc_shadow(cachep, flags, nodeid, page, cachep->gfporder); | 1630 | kmemcheck_alloc_shadow(page, cachep->gfporder, flags, nodeid); |
1631 | |||
1632 | if (cachep->ctor) | ||
1633 | kmemcheck_mark_uninitialized_pages(page, nr_pages); | ||
1634 | else | ||
1635 | kmemcheck_mark_unallocated_pages(page, nr_pages); | ||
1636 | } | ||
1631 | 1637 | ||
1632 | return page_address(page); | 1638 | return page_address(page); |
1633 | } | 1639 | } |
@@ -1641,8 +1647,7 @@ static void kmem_freepages(struct kmem_cache *cachep, void *addr) | |||
1641 | struct page *page = virt_to_page(addr); | 1647 | struct page *page = virt_to_page(addr); |
1642 | const unsigned long nr_freed = i; | 1648 | const unsigned long nr_freed = i; |
1643 | 1649 | ||
1644 | if (kmemcheck_page_is_tracked(page)) | 1650 | kmemcheck_free_shadow(page, cachep->gfporder); |
1645 | kmemcheck_free_shadow(cachep, page, cachep->gfporder); | ||
1646 | 1651 | ||
1647 | if (cachep->flags & SLAB_RECLAIM_ACCOUNT) | 1652 | if (cachep->flags & SLAB_RECLAIM_ACCOUNT) |
1648 | sub_zone_page_state(page_zone(page), | 1653 | sub_zone_page_state(page_zone(page), |
@@ -1066,6 +1066,8 @@ static inline struct page *alloc_slab_page(gfp_t flags, int node, | |||
1066 | { | 1066 | { |
1067 | int order = oo_order(oo); | 1067 | int order = oo_order(oo); |
1068 | 1068 | ||
1069 | flags |= __GFP_NOTRACK; | ||
1070 | |||
1069 | if (node == -1) | 1071 | if (node == -1) |
1070 | return alloc_pages(flags, order); | 1072 | return alloc_pages(flags, order); |
1071 | else | 1073 | else |
@@ -1097,7 +1099,18 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) | |||
1097 | if (kmemcheck_enabled | 1099 | if (kmemcheck_enabled |
1098 | && !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) | 1100 | && !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) |
1099 | { | 1101 | { |
1100 | kmemcheck_alloc_shadow(s, flags, node, page, compound_order(page)); | 1102 | int pages = 1 << oo_order(oo); |
1103 | |||
1104 | kmemcheck_alloc_shadow(page, oo_order(oo), flags, node); | ||
1105 | |||
1106 | /* | ||
1107 | * Objects from caches that have a constructor don't get | ||
1108 | * cleared when they're allocated, so we need to do it here. | ||
1109 | */ | ||
1110 | if (s->ctor) | ||
1111 | kmemcheck_mark_uninitialized_pages(page, pages); | ||
1112 | else | ||
1113 | kmemcheck_mark_unallocated_pages(page, pages); | ||
1101 | } | 1114 | } |
1102 | 1115 | ||
1103 | page->objects = oo_objects(oo); | 1116 | page->objects = oo_objects(oo); |
@@ -1173,8 +1186,7 @@ static void __free_slab(struct kmem_cache *s, struct page *page) | |||
1173 | __ClearPageSlubDebug(page); | 1186 | __ClearPageSlubDebug(page); |
1174 | } | 1187 | } |
1175 | 1188 | ||
1176 | if (kmemcheck_page_is_tracked(page)) | 1189 | kmemcheck_free_shadow(page, compound_order(page)); |
1177 | kmemcheck_free_shadow(s, page, compound_order(page)); | ||
1178 | 1190 | ||
1179 | mod_zone_page_state(page_zone(page), | 1191 | mod_zone_page_state(page_zone(page), |
1180 | (s->flags & SLAB_RECLAIM_ACCOUNT) ? | 1192 | (s->flags & SLAB_RECLAIM_ACCOUNT) ? |
@@ -2734,9 +2746,10 @@ EXPORT_SYMBOL(__kmalloc); | |||
2734 | 2746 | ||
2735 | static void *kmalloc_large_node(size_t size, gfp_t flags, int node) | 2747 | static void *kmalloc_large_node(size_t size, gfp_t flags, int node) |
2736 | { | 2748 | { |
2737 | struct page *page = alloc_pages_node(node, flags | __GFP_COMP, | 2749 | struct page *page; |
2738 | get_order(size)); | ||
2739 | 2750 | ||
2751 | flags |= __GFP_COMP | __GFP_NOTRACK; | ||
2752 | page = alloc_pages_node(node, flags, get_order(size)); | ||
2740 | if (page) | 2753 | if (page) |
2741 | return page_address(page); | 2754 | return page_address(page); |
2742 | else | 2755 | else |