diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/Kconfig | 4 | ||||
-rw-r--r-- | mm/Kconfig.debug | 1 | ||||
-rw-r--r-- | mm/Makefile | 1 | ||||
-rw-r--r-- | mm/bounce.c | 1 | ||||
-rw-r--r-- | mm/highmem.c | 1 | ||||
-rw-r--r-- | mm/kmemcheck.c | 122 | ||||
-rw-r--r-- | mm/page_alloc.c | 18 | ||||
-rw-r--r-- | mm/slab.c | 108 | ||||
-rw-r--r-- | mm/slub.c | 38 |
9 files changed, 202 insertions, 92 deletions
diff --git a/mm/Kconfig b/mm/Kconfig index 97d2c88b745e..c948d4ca8bde 100644 --- a/mm/Kconfig +++ b/mm/Kconfig | |||
@@ -128,11 +128,11 @@ config SPARSEMEM_VMEMMAP | |||
128 | config MEMORY_HOTPLUG | 128 | config MEMORY_HOTPLUG |
129 | bool "Allow for memory hot-add" | 129 | bool "Allow for memory hot-add" |
130 | depends on SPARSEMEM || X86_64_ACPI_NUMA | 130 | depends on SPARSEMEM || X86_64_ACPI_NUMA |
131 | depends on HOTPLUG && !HIBERNATION && ARCH_ENABLE_MEMORY_HOTPLUG | 131 | depends on HOTPLUG && !(HIBERNATION && !S390) && ARCH_ENABLE_MEMORY_HOTPLUG |
132 | depends on (IA64 || X86 || PPC64 || SUPERH || S390) | 132 | depends on (IA64 || X86 || PPC64 || SUPERH || S390) |
133 | 133 | ||
134 | comment "Memory hotplug is currently incompatible with Software Suspend" | 134 | comment "Memory hotplug is currently incompatible with Software Suspend" |
135 | depends on SPARSEMEM && HOTPLUG && HIBERNATION | 135 | depends on SPARSEMEM && HOTPLUG && HIBERNATION && !S390 |
136 | 136 | ||
137 | config MEMORY_HOTPLUG_SPARSE | 137 | config MEMORY_HOTPLUG_SPARSE |
138 | def_bool y | 138 | def_bool y |
diff --git a/mm/Kconfig.debug b/mm/Kconfig.debug index bb01e298f260..aa99fd1f7109 100644 --- a/mm/Kconfig.debug +++ b/mm/Kconfig.debug | |||
@@ -2,6 +2,7 @@ config DEBUG_PAGEALLOC | |||
2 | bool "Debug page memory allocations" | 2 | bool "Debug page memory allocations" |
3 | depends on DEBUG_KERNEL && ARCH_SUPPORTS_DEBUG_PAGEALLOC | 3 | depends on DEBUG_KERNEL && ARCH_SUPPORTS_DEBUG_PAGEALLOC |
4 | depends on !HIBERNATION || !PPC && !SPARC | 4 | depends on !HIBERNATION || !PPC && !SPARC |
5 | depends on !KMEMCHECK | ||
5 | ---help--- | 6 | ---help--- |
6 | Unmap pages from the kernel linear mapping after free_pages(). | 7 | Unmap pages from the kernel linear mapping after free_pages(). |
7 | This results in a large slowdown, but helps to find certain types | 8 | This results in a large slowdown, but helps to find certain types |
diff --git a/mm/Makefile b/mm/Makefile index cf76be785add..5e0bd6426693 100644 --- a/mm/Makefile +++ b/mm/Makefile | |||
@@ -28,6 +28,7 @@ obj-$(CONFIG_MMU_NOTIFIER) += mmu_notifier.o | |||
28 | obj-$(CONFIG_PAGE_POISONING) += debug-pagealloc.o | 28 | obj-$(CONFIG_PAGE_POISONING) += debug-pagealloc.o |
29 | obj-$(CONFIG_SLAB) += slab.o | 29 | obj-$(CONFIG_SLAB) += slab.o |
30 | obj-$(CONFIG_SLUB) += slub.o | 30 | obj-$(CONFIG_SLUB) += slub.o |
31 | obj-$(CONFIG_KMEMCHECK) += kmemcheck.o | ||
31 | obj-$(CONFIG_FAILSLAB) += failslab.o | 32 | obj-$(CONFIG_FAILSLAB) += failslab.o |
32 | obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o | 33 | obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o |
33 | obj-$(CONFIG_FS_XIP) += filemap_xip.o | 34 | obj-$(CONFIG_FS_XIP) += filemap_xip.o |
diff --git a/mm/bounce.c b/mm/bounce.c index 4ebe3ea83795..a2b76a588e34 100644 --- a/mm/bounce.c +++ b/mm/bounce.c | |||
@@ -13,7 +13,6 @@ | |||
13 | #include <linux/init.h> | 13 | #include <linux/init.h> |
14 | #include <linux/hash.h> | 14 | #include <linux/hash.h> |
15 | #include <linux/highmem.h> | 15 | #include <linux/highmem.h> |
16 | #include <linux/blktrace_api.h> | ||
17 | #include <asm/tlbflush.h> | 16 | #include <asm/tlbflush.h> |
18 | 17 | ||
19 | #include <trace/events/block.h> | 18 | #include <trace/events/block.h> |
diff --git a/mm/highmem.c b/mm/highmem.c index 68eb1d9b63fa..25878cc49daa 100644 --- a/mm/highmem.c +++ b/mm/highmem.c | |||
@@ -26,7 +26,6 @@ | |||
26 | #include <linux/init.h> | 26 | #include <linux/init.h> |
27 | #include <linux/hash.h> | 27 | #include <linux/hash.h> |
28 | #include <linux/highmem.h> | 28 | #include <linux/highmem.h> |
29 | #include <linux/blktrace_api.h> | ||
30 | #include <asm/tlbflush.h> | 29 | #include <asm/tlbflush.h> |
31 | 30 | ||
32 | /* | 31 | /* |
diff --git a/mm/kmemcheck.c b/mm/kmemcheck.c new file mode 100644 index 000000000000..fd814fd61319 --- /dev/null +++ b/mm/kmemcheck.c | |||
@@ -0,0 +1,122 @@ | |||
1 | #include <linux/gfp.h> | ||
2 | #include <linux/mm_types.h> | ||
3 | #include <linux/mm.h> | ||
4 | #include <linux/slab.h> | ||
5 | #include <linux/kmemcheck.h> | ||
6 | |||
7 | void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node) | ||
8 | { | ||
9 | struct page *shadow; | ||
10 | int pages; | ||
11 | int i; | ||
12 | |||
13 | pages = 1 << order; | ||
14 | |||
15 | /* | ||
16 | * With kmemcheck enabled, we need to allocate a memory area for the | ||
17 | * shadow bits as well. | ||
18 | */ | ||
19 | shadow = alloc_pages_node(node, flags | __GFP_NOTRACK, order); | ||
20 | if (!shadow) { | ||
21 | if (printk_ratelimit()) | ||
22 | printk(KERN_ERR "kmemcheck: failed to allocate " | ||
23 | "shadow bitmap\n"); | ||
24 | return; | ||
25 | } | ||
26 | |||
27 | for(i = 0; i < pages; ++i) | ||
28 | page[i].shadow = page_address(&shadow[i]); | ||
29 | |||
30 | /* | ||
31 | * Mark it as non-present for the MMU so that our accesses to | ||
32 | * this memory will trigger a page fault and let us analyze | ||
33 | * the memory accesses. | ||
34 | */ | ||
35 | kmemcheck_hide_pages(page, pages); | ||
36 | } | ||
37 | |||
38 | void kmemcheck_free_shadow(struct page *page, int order) | ||
39 | { | ||
40 | struct page *shadow; | ||
41 | int pages; | ||
42 | int i; | ||
43 | |||
44 | if (!kmemcheck_page_is_tracked(page)) | ||
45 | return; | ||
46 | |||
47 | pages = 1 << order; | ||
48 | |||
49 | kmemcheck_show_pages(page, pages); | ||
50 | |||
51 | shadow = virt_to_page(page[0].shadow); | ||
52 | |||
53 | for(i = 0; i < pages; ++i) | ||
54 | page[i].shadow = NULL; | ||
55 | |||
56 | __free_pages(shadow, order); | ||
57 | } | ||
58 | |||
59 | void kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object, | ||
60 | size_t size) | ||
61 | { | ||
62 | /* | ||
63 | * Has already been memset(), which initializes the shadow for us | ||
64 | * as well. | ||
65 | */ | ||
66 | if (gfpflags & __GFP_ZERO) | ||
67 | return; | ||
68 | |||
69 | /* No need to initialize the shadow of a non-tracked slab. */ | ||
70 | if (s->flags & SLAB_NOTRACK) | ||
71 | return; | ||
72 | |||
73 | if (!kmemcheck_enabled || gfpflags & __GFP_NOTRACK) { | ||
74 | /* | ||
75 | * Allow notracked objects to be allocated from | ||
76 | * tracked caches. Note however that these objects | ||
77 | * will still get page faults on access, they just | ||
78 | * won't ever be flagged as uninitialized. If page | ||
79 | * faults are not acceptable, the slab cache itself | ||
80 | * should be marked NOTRACK. | ||
81 | */ | ||
82 | kmemcheck_mark_initialized(object, size); | ||
83 | } else if (!s->ctor) { | ||
84 | /* | ||
85 | * New objects should be marked uninitialized before | ||
86 | * they're returned to the called. | ||
87 | */ | ||
88 | kmemcheck_mark_uninitialized(object, size); | ||
89 | } | ||
90 | } | ||
91 | |||
92 | void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size) | ||
93 | { | ||
94 | /* TODO: RCU freeing is unsupported for now; hide false positives. */ | ||
95 | if (!s->ctor && !(s->flags & SLAB_DESTROY_BY_RCU)) | ||
96 | kmemcheck_mark_freed(object, size); | ||
97 | } | ||
98 | |||
99 | void kmemcheck_pagealloc_alloc(struct page *page, unsigned int order, | ||
100 | gfp_t gfpflags) | ||
101 | { | ||
102 | int pages; | ||
103 | |||
104 | if (gfpflags & (__GFP_HIGHMEM | __GFP_NOTRACK)) | ||
105 | return; | ||
106 | |||
107 | pages = 1 << order; | ||
108 | |||
109 | /* | ||
110 | * NOTE: We choose to track GFP_ZERO pages too; in fact, they | ||
111 | * can become uninitialized by copying uninitialized memory | ||
112 | * into them. | ||
113 | */ | ||
114 | |||
115 | /* XXX: Can use zone->node for node? */ | ||
116 | kmemcheck_alloc_shadow(page, order, gfpflags, -1); | ||
117 | |||
118 | if (gfpflags & __GFP_ZERO) | ||
119 | kmemcheck_mark_initialized_pages(page, pages); | ||
120 | else | ||
121 | kmemcheck_mark_uninitialized_pages(page, pages); | ||
122 | } | ||
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 2f457a756d46..a5f3c278c573 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/bootmem.h> | 23 | #include <linux/bootmem.h> |
24 | #include <linux/compiler.h> | 24 | #include <linux/compiler.h> |
25 | #include <linux/kernel.h> | 25 | #include <linux/kernel.h> |
26 | #include <linux/kmemcheck.h> | ||
26 | #include <linux/module.h> | 27 | #include <linux/module.h> |
27 | #include <linux/suspend.h> | 28 | #include <linux/suspend.h> |
28 | #include <linux/pagevec.h> | 29 | #include <linux/pagevec.h> |
@@ -558,6 +559,8 @@ static void __free_pages_ok(struct page *page, unsigned int order) | |||
558 | int bad = 0; | 559 | int bad = 0; |
559 | int clearMlocked = PageMlocked(page); | 560 | int clearMlocked = PageMlocked(page); |
560 | 561 | ||
562 | kmemcheck_free_shadow(page, order); | ||
563 | |||
561 | for (i = 0 ; i < (1 << order) ; ++i) | 564 | for (i = 0 ; i < (1 << order) ; ++i) |
562 | bad += free_pages_check(page + i); | 565 | bad += free_pages_check(page + i); |
563 | if (bad) | 566 | if (bad) |
@@ -1020,6 +1023,8 @@ static void free_hot_cold_page(struct page *page, int cold) | |||
1020 | unsigned long flags; | 1023 | unsigned long flags; |
1021 | int clearMlocked = PageMlocked(page); | 1024 | int clearMlocked = PageMlocked(page); |
1022 | 1025 | ||
1026 | kmemcheck_free_shadow(page, 0); | ||
1027 | |||
1023 | if (PageAnon(page)) | 1028 | if (PageAnon(page)) |
1024 | page->mapping = NULL; | 1029 | page->mapping = NULL; |
1025 | if (free_pages_check(page)) | 1030 | if (free_pages_check(page)) |
@@ -1076,6 +1081,16 @@ void split_page(struct page *page, unsigned int order) | |||
1076 | 1081 | ||
1077 | VM_BUG_ON(PageCompound(page)); | 1082 | VM_BUG_ON(PageCompound(page)); |
1078 | VM_BUG_ON(!page_count(page)); | 1083 | VM_BUG_ON(!page_count(page)); |
1084 | |||
1085 | #ifdef CONFIG_KMEMCHECK | ||
1086 | /* | ||
1087 | * Split shadow pages too, because free(page[0]) would | ||
1088 | * otherwise free the whole shadow. | ||
1089 | */ | ||
1090 | if (kmemcheck_page_is_tracked(page)) | ||
1091 | split_page(virt_to_page(page[0].shadow), order); | ||
1092 | #endif | ||
1093 | |||
1079 | for (i = 1; i < (1 << order); i++) | 1094 | for (i = 1; i < (1 << order); i++) |
1080 | set_page_refcounted(page + i); | 1095 | set_page_refcounted(page + i); |
1081 | } | 1096 | } |
@@ -1828,7 +1843,10 @@ nopage: | |||
1828 | dump_stack(); | 1843 | dump_stack(); |
1829 | show_mem(); | 1844 | show_mem(); |
1830 | } | 1845 | } |
1846 | return page; | ||
1831 | got_pg: | 1847 | got_pg: |
1848 | if (kmemcheck_enabled) | ||
1849 | kmemcheck_pagealloc_alloc(page, order, gfp_mask); | ||
1832 | return page; | 1850 | return page; |
1833 | 1851 | ||
1834 | } | 1852 | } |
@@ -114,6 +114,7 @@ | |||
114 | #include <linux/rtmutex.h> | 114 | #include <linux/rtmutex.h> |
115 | #include <linux/reciprocal_div.h> | 115 | #include <linux/reciprocal_div.h> |
116 | #include <linux/debugobjects.h> | 116 | #include <linux/debugobjects.h> |
117 | #include <linux/kmemcheck.h> | ||
117 | 118 | ||
118 | #include <asm/cacheflush.h> | 119 | #include <asm/cacheflush.h> |
119 | #include <asm/tlbflush.h> | 120 | #include <asm/tlbflush.h> |
@@ -179,13 +180,13 @@ | |||
179 | SLAB_STORE_USER | \ | 180 | SLAB_STORE_USER | \ |
180 | SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ | 181 | SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ |
181 | SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \ | 182 | SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \ |
182 | SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE) | 183 | SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK) |
183 | #else | 184 | #else |
184 | # define CREATE_MASK (SLAB_HWCACHE_ALIGN | \ | 185 | # define CREATE_MASK (SLAB_HWCACHE_ALIGN | \ |
185 | SLAB_CACHE_DMA | \ | 186 | SLAB_CACHE_DMA | \ |
186 | SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ | 187 | SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ |
187 | SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \ | 188 | SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \ |
188 | SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE) | 189 | SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK) |
189 | #endif | 190 | #endif |
190 | 191 | ||
191 | /* | 192 | /* |
@@ -380,87 +381,6 @@ static void kmem_list3_init(struct kmem_list3 *parent) | |||
380 | MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid); \ | 381 | MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid); \ |
381 | } while (0) | 382 | } while (0) |
382 | 383 | ||
383 | /* | ||
384 | * struct kmem_cache | ||
385 | * | ||
386 | * manages a cache. | ||
387 | */ | ||
388 | |||
389 | struct kmem_cache { | ||
390 | /* 1) per-cpu data, touched during every alloc/free */ | ||
391 | struct array_cache *array[NR_CPUS]; | ||
392 | /* 2) Cache tunables. Protected by cache_chain_mutex */ | ||
393 | unsigned int batchcount; | ||
394 | unsigned int limit; | ||
395 | unsigned int shared; | ||
396 | |||
397 | unsigned int buffer_size; | ||
398 | u32 reciprocal_buffer_size; | ||
399 | /* 3) touched by every alloc & free from the backend */ | ||
400 | |||
401 | unsigned int flags; /* constant flags */ | ||
402 | unsigned int num; /* # of objs per slab */ | ||
403 | |||
404 | /* 4) cache_grow/shrink */ | ||
405 | /* order of pgs per slab (2^n) */ | ||
406 | unsigned int gfporder; | ||
407 | |||
408 | /* force GFP flags, e.g. GFP_DMA */ | ||
409 | gfp_t gfpflags; | ||
410 | |||
411 | size_t colour; /* cache colouring range */ | ||
412 | unsigned int colour_off; /* colour offset */ | ||
413 | struct kmem_cache *slabp_cache; | ||
414 | unsigned int slab_size; | ||
415 | unsigned int dflags; /* dynamic flags */ | ||
416 | |||
417 | /* constructor func */ | ||
418 | void (*ctor)(void *obj); | ||
419 | |||
420 | /* 5) cache creation/removal */ | ||
421 | const char *name; | ||
422 | struct list_head next; | ||
423 | |||
424 | /* 6) statistics */ | ||
425 | #if STATS | ||
426 | unsigned long num_active; | ||
427 | unsigned long num_allocations; | ||
428 | unsigned long high_mark; | ||
429 | unsigned long grown; | ||
430 | unsigned long reaped; | ||
431 | unsigned long errors; | ||
432 | unsigned long max_freeable; | ||
433 | unsigned long node_allocs; | ||
434 | unsigned long node_frees; | ||
435 | unsigned long node_overflow; | ||
436 | atomic_t allochit; | ||
437 | atomic_t allocmiss; | ||
438 | atomic_t freehit; | ||
439 | atomic_t freemiss; | ||
440 | #endif | ||
441 | #if DEBUG | ||
442 | /* | ||
443 | * If debugging is enabled, then the allocator can add additional | ||
444 | * fields and/or padding to every object. buffer_size contains the total | ||
445 | * object size including these internal fields, the following two | ||
446 | * variables contain the offset to the user object and its size. | ||
447 | */ | ||
448 | int obj_offset; | ||
449 | int obj_size; | ||
450 | #endif | ||
451 | /* | ||
452 | * We put nodelists[] at the end of kmem_cache, because we want to size | ||
453 | * this array to nr_node_ids slots instead of MAX_NUMNODES | ||
454 | * (see kmem_cache_init()) | ||
455 | * We still use [MAX_NUMNODES] and not [1] or [0] because cache_cache | ||
456 | * is statically defined, so we reserve the max number of nodes. | ||
457 | */ | ||
458 | struct kmem_list3 *nodelists[MAX_NUMNODES]; | ||
459 | /* | ||
460 | * Do not add fields after nodelists[] | ||
461 | */ | ||
462 | }; | ||
463 | |||
464 | #define CFLGS_OFF_SLAB (0x80000000UL) | 384 | #define CFLGS_OFF_SLAB (0x80000000UL) |
465 | #define OFF_SLAB(x) ((x)->flags & CFLGS_OFF_SLAB) | 385 | #define OFF_SLAB(x) ((x)->flags & CFLGS_OFF_SLAB) |
466 | 386 | ||
@@ -1704,7 +1624,7 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid) | |||
1704 | if (cachep->flags & SLAB_RECLAIM_ACCOUNT) | 1624 | if (cachep->flags & SLAB_RECLAIM_ACCOUNT) |
1705 | flags |= __GFP_RECLAIMABLE; | 1625 | flags |= __GFP_RECLAIMABLE; |
1706 | 1626 | ||
1707 | page = alloc_pages_exact_node(nodeid, flags, cachep->gfporder); | 1627 | page = alloc_pages_exact_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder); |
1708 | if (!page) | 1628 | if (!page) |
1709 | return NULL; | 1629 | return NULL; |
1710 | 1630 | ||
@@ -1717,6 +1637,16 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid) | |||
1717 | NR_SLAB_UNRECLAIMABLE, nr_pages); | 1637 | NR_SLAB_UNRECLAIMABLE, nr_pages); |
1718 | for (i = 0; i < nr_pages; i++) | 1638 | for (i = 0; i < nr_pages; i++) |
1719 | __SetPageSlab(page + i); | 1639 | __SetPageSlab(page + i); |
1640 | |||
1641 | if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) { | ||
1642 | kmemcheck_alloc_shadow(page, cachep->gfporder, flags, nodeid); | ||
1643 | |||
1644 | if (cachep->ctor) | ||
1645 | kmemcheck_mark_uninitialized_pages(page, nr_pages); | ||
1646 | else | ||
1647 | kmemcheck_mark_unallocated_pages(page, nr_pages); | ||
1648 | } | ||
1649 | |||
1720 | return page_address(page); | 1650 | return page_address(page); |
1721 | } | 1651 | } |
1722 | 1652 | ||
@@ -1729,6 +1659,8 @@ static void kmem_freepages(struct kmem_cache *cachep, void *addr) | |||
1729 | struct page *page = virt_to_page(addr); | 1659 | struct page *page = virt_to_page(addr); |
1730 | const unsigned long nr_freed = i; | 1660 | const unsigned long nr_freed = i; |
1731 | 1661 | ||
1662 | kmemcheck_free_shadow(page, cachep->gfporder); | ||
1663 | |||
1732 | if (cachep->flags & SLAB_RECLAIM_ACCOUNT) | 1664 | if (cachep->flags & SLAB_RECLAIM_ACCOUNT) |
1733 | sub_zone_page_state(page_zone(page), | 1665 | sub_zone_page_state(page_zone(page), |
1734 | NR_SLAB_RECLAIMABLE, nr_freed); | 1666 | NR_SLAB_RECLAIMABLE, nr_freed); |
@@ -3404,6 +3336,9 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, | |||
3404 | kmemleak_alloc_recursive(ptr, obj_size(cachep), 1, cachep->flags, | 3336 | kmemleak_alloc_recursive(ptr, obj_size(cachep), 1, cachep->flags, |
3405 | flags); | 3337 | flags); |
3406 | 3338 | ||
3339 | if (likely(ptr)) | ||
3340 | kmemcheck_slab_alloc(cachep, flags, ptr, obj_size(cachep)); | ||
3341 | |||
3407 | if (unlikely((flags & __GFP_ZERO) && ptr)) | 3342 | if (unlikely((flags & __GFP_ZERO) && ptr)) |
3408 | memset(ptr, 0, obj_size(cachep)); | 3343 | memset(ptr, 0, obj_size(cachep)); |
3409 | 3344 | ||
@@ -3464,6 +3399,9 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller) | |||
3464 | flags); | 3399 | flags); |
3465 | prefetchw(objp); | 3400 | prefetchw(objp); |
3466 | 3401 | ||
3402 | if (likely(objp)) | ||
3403 | kmemcheck_slab_alloc(cachep, flags, objp, obj_size(cachep)); | ||
3404 | |||
3467 | if (unlikely((flags & __GFP_ZERO) && objp)) | 3405 | if (unlikely((flags & __GFP_ZERO) && objp)) |
3468 | memset(objp, 0, obj_size(cachep)); | 3406 | memset(objp, 0, obj_size(cachep)); |
3469 | 3407 | ||
@@ -3580,6 +3518,8 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp) | |||
3580 | kmemleak_free_recursive(objp, cachep->flags); | 3518 | kmemleak_free_recursive(objp, cachep->flags); |
3581 | objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0)); | 3519 | objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0)); |
3582 | 3520 | ||
3521 | kmemcheck_slab_free(cachep, objp, obj_size(cachep)); | ||
3522 | |||
3583 | /* | 3523 | /* |
3584 | * Skip calling cache_free_alien() when the platform is not numa. | 3524 | * Skip calling cache_free_alien() when the platform is not numa. |
3585 | * This will avoid cache misses that happen while accessing slabp (which | 3525 | * This will avoid cache misses that happen while accessing slabp (which |
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/proc_fs.h> | 18 | #include <linux/proc_fs.h> |
19 | #include <linux/seq_file.h> | 19 | #include <linux/seq_file.h> |
20 | #include <linux/kmemtrace.h> | 20 | #include <linux/kmemtrace.h> |
21 | #include <linux/kmemcheck.h> | ||
21 | #include <linux/cpu.h> | 22 | #include <linux/cpu.h> |
22 | #include <linux/cpuset.h> | 23 | #include <linux/cpuset.h> |
23 | #include <linux/kmemleak.h> | 24 | #include <linux/kmemleak.h> |
@@ -147,7 +148,7 @@ | |||
147 | SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE) | 148 | SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE) |
148 | 149 | ||
149 | #define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \ | 150 | #define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \ |
150 | SLAB_CACHE_DMA) | 151 | SLAB_CACHE_DMA | SLAB_NOTRACK) |
151 | 152 | ||
152 | #ifndef ARCH_KMALLOC_MINALIGN | 153 | #ifndef ARCH_KMALLOC_MINALIGN |
153 | #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long) | 154 | #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long) |
@@ -1071,6 +1072,8 @@ static inline struct page *alloc_slab_page(gfp_t flags, int node, | |||
1071 | { | 1072 | { |
1072 | int order = oo_order(oo); | 1073 | int order = oo_order(oo); |
1073 | 1074 | ||
1075 | flags |= __GFP_NOTRACK; | ||
1076 | |||
1074 | if (node == -1) | 1077 | if (node == -1) |
1075 | return alloc_pages(flags, order); | 1078 | return alloc_pages(flags, order); |
1076 | else | 1079 | else |
@@ -1098,6 +1101,24 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) | |||
1098 | 1101 | ||
1099 | stat(get_cpu_slab(s, raw_smp_processor_id()), ORDER_FALLBACK); | 1102 | stat(get_cpu_slab(s, raw_smp_processor_id()), ORDER_FALLBACK); |
1100 | } | 1103 | } |
1104 | |||
1105 | if (kmemcheck_enabled | ||
1106 | && !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) | ||
1107 | { | ||
1108 | int pages = 1 << oo_order(oo); | ||
1109 | |||
1110 | kmemcheck_alloc_shadow(page, oo_order(oo), flags, node); | ||
1111 | |||
1112 | /* | ||
1113 | * Objects from caches that have a constructor don't get | ||
1114 | * cleared when they're allocated, so we need to do it here. | ||
1115 | */ | ||
1116 | if (s->ctor) | ||
1117 | kmemcheck_mark_uninitialized_pages(page, pages); | ||
1118 | else | ||
1119 | kmemcheck_mark_unallocated_pages(page, pages); | ||
1120 | } | ||
1121 | |||
1101 | page->objects = oo_objects(oo); | 1122 | page->objects = oo_objects(oo); |
1102 | mod_zone_page_state(page_zone(page), | 1123 | mod_zone_page_state(page_zone(page), |
1103 | (s->flags & SLAB_RECLAIM_ACCOUNT) ? | 1124 | (s->flags & SLAB_RECLAIM_ACCOUNT) ? |
@@ -1171,6 +1192,8 @@ static void __free_slab(struct kmem_cache *s, struct page *page) | |||
1171 | __ClearPageSlubDebug(page); | 1192 | __ClearPageSlubDebug(page); |
1172 | } | 1193 | } |
1173 | 1194 | ||
1195 | kmemcheck_free_shadow(page, compound_order(page)); | ||
1196 | |||
1174 | mod_zone_page_state(page_zone(page), | 1197 | mod_zone_page_state(page_zone(page), |
1175 | (s->flags & SLAB_RECLAIM_ACCOUNT) ? | 1198 | (s->flags & SLAB_RECLAIM_ACCOUNT) ? |
1176 | NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, | 1199 | NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, |
@@ -1626,7 +1649,9 @@ static __always_inline void *slab_alloc(struct kmem_cache *s, | |||
1626 | if (unlikely((gfpflags & __GFP_ZERO) && object)) | 1649 | if (unlikely((gfpflags & __GFP_ZERO) && object)) |
1627 | memset(object, 0, objsize); | 1650 | memset(object, 0, objsize); |
1628 | 1651 | ||
1652 | kmemcheck_slab_alloc(s, gfpflags, object, c->objsize); | ||
1629 | kmemleak_alloc_recursive(object, objsize, 1, s->flags, gfpflags); | 1653 | kmemleak_alloc_recursive(object, objsize, 1, s->flags, gfpflags); |
1654 | |||
1630 | return object; | 1655 | return object; |
1631 | } | 1656 | } |
1632 | 1657 | ||
@@ -1759,6 +1784,7 @@ static __always_inline void slab_free(struct kmem_cache *s, | |||
1759 | kmemleak_free_recursive(x, s->flags); | 1784 | kmemleak_free_recursive(x, s->flags); |
1760 | local_irq_save(flags); | 1785 | local_irq_save(flags); |
1761 | c = get_cpu_slab(s, smp_processor_id()); | 1786 | c = get_cpu_slab(s, smp_processor_id()); |
1787 | kmemcheck_slab_free(s, object, c->objsize); | ||
1762 | debug_check_no_locks_freed(object, c->objsize); | 1788 | debug_check_no_locks_freed(object, c->objsize); |
1763 | if (!(s->flags & SLAB_DEBUG_OBJECTS)) | 1789 | if (!(s->flags & SLAB_DEBUG_OBJECTS)) |
1764 | debug_check_no_obj_freed(object, c->objsize); | 1790 | debug_check_no_obj_freed(object, c->objsize); |
@@ -2633,7 +2659,8 @@ static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags) | |||
2633 | 2659 | ||
2634 | if (!s || !text || !kmem_cache_open(s, flags, text, | 2660 | if (!s || !text || !kmem_cache_open(s, flags, text, |
2635 | realsize, ARCH_KMALLOC_MINALIGN, | 2661 | realsize, ARCH_KMALLOC_MINALIGN, |
2636 | SLAB_CACHE_DMA|__SYSFS_ADD_DEFERRED, NULL)) { | 2662 | SLAB_CACHE_DMA|SLAB_NOTRACK|__SYSFS_ADD_DEFERRED, |
2663 | NULL)) { | ||
2637 | kfree(s); | 2664 | kfree(s); |
2638 | kfree(text); | 2665 | kfree(text); |
2639 | goto unlock_out; | 2666 | goto unlock_out; |
@@ -2727,9 +2754,10 @@ EXPORT_SYMBOL(__kmalloc); | |||
2727 | 2754 | ||
2728 | static void *kmalloc_large_node(size_t size, gfp_t flags, int node) | 2755 | static void *kmalloc_large_node(size_t size, gfp_t flags, int node) |
2729 | { | 2756 | { |
2730 | struct page *page = alloc_pages_node(node, flags | __GFP_COMP, | 2757 | struct page *page; |
2731 | get_order(size)); | ||
2732 | 2758 | ||
2759 | flags |= __GFP_COMP | __GFP_NOTRACK; | ||
2760 | page = alloc_pages_node(node, flags, get_order(size)); | ||
2733 | if (page) | 2761 | if (page) |
2734 | return page_address(page); | 2762 | return page_address(page); |
2735 | else | 2763 | else |
@@ -4412,6 +4440,8 @@ static char *create_unique_id(struct kmem_cache *s) | |||
4412 | *p++ = 'a'; | 4440 | *p++ = 'a'; |
4413 | if (s->flags & SLAB_DEBUG_FREE) | 4441 | if (s->flags & SLAB_DEBUG_FREE) |
4414 | *p++ = 'F'; | 4442 | *p++ = 'F'; |
4443 | if (!(s->flags & SLAB_NOTRACK)) | ||
4444 | *p++ = 't'; | ||
4415 | if (p != name + 1) | 4445 | if (p != name + 1) |
4416 | *p++ = '-'; | 4446 | *p++ = '-'; |
4417 | p += sprintf(p, "%07d", s->size); | 4447 | p += sprintf(p, "%07d", s->size); |