diff options
Diffstat (limited to 'mm/mempool.c')
-rw-r--r-- | mm/mempool.c | 117 |
1 files changed, 115 insertions, 2 deletions
diff --git a/mm/mempool.c b/mm/mempool.c index 949970db2874..2cc08de8b1db 100644 --- a/mm/mempool.c +++ b/mm/mempool.c | |||
@@ -6,26 +6,138 @@ | |||
6 | * extreme VM load. | 6 | * extreme VM load. |
7 | * | 7 | * |
8 | * started by Ingo Molnar, Copyright (C) 2001 | 8 | * started by Ingo Molnar, Copyright (C) 2001 |
9 | * debugging by David Rientjes, Copyright (C) 2015 | ||
9 | */ | 10 | */ |
10 | 11 | ||
11 | #include <linux/mm.h> | 12 | #include <linux/mm.h> |
12 | #include <linux/slab.h> | 13 | #include <linux/slab.h> |
14 | #include <linux/highmem.h> | ||
15 | #include <linux/kasan.h> | ||
13 | #include <linux/kmemleak.h> | 16 | #include <linux/kmemleak.h> |
14 | #include <linux/export.h> | 17 | #include <linux/export.h> |
15 | #include <linux/mempool.h> | 18 | #include <linux/mempool.h> |
16 | #include <linux/blkdev.h> | 19 | #include <linux/blkdev.h> |
17 | #include <linux/writeback.h> | 20 | #include <linux/writeback.h> |
21 | #include "slab.h" | ||
22 | |||
23 | #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON) | ||
24 | static void poison_error(mempool_t *pool, void *element, size_t size, | ||
25 | size_t byte) | ||
26 | { | ||
27 | const int nr = pool->curr_nr; | ||
28 | const int start = max_t(int, byte - (BITS_PER_LONG / 8), 0); | ||
29 | const int end = min_t(int, byte + (BITS_PER_LONG / 8), size); | ||
30 | int i; | ||
31 | |||
32 | pr_err("BUG: mempool element poison mismatch\n"); | ||
33 | pr_err("Mempool %p size %zu\n", pool, size); | ||
34 | pr_err(" nr=%d @ %p: %s0x", nr, element, start > 0 ? "... " : ""); | ||
35 | for (i = start; i < end; i++) | ||
36 | pr_cont("%x ", *(u8 *)(element + i)); | ||
37 | pr_cont("%s\n", end < size ? "..." : ""); | ||
38 | dump_stack(); | ||
39 | } | ||
40 | |||
41 | static void __check_element(mempool_t *pool, void *element, size_t size) | ||
42 | { | ||
43 | u8 *obj = element; | ||
44 | size_t i; | ||
45 | |||
46 | for (i = 0; i < size; i++) { | ||
47 | u8 exp = (i < size - 1) ? POISON_FREE : POISON_END; | ||
48 | |||
49 | if (obj[i] != exp) { | ||
50 | poison_error(pool, element, size, i); | ||
51 | return; | ||
52 | } | ||
53 | } | ||
54 | memset(obj, POISON_INUSE, size); | ||
55 | } | ||
56 | |||
57 | static void check_element(mempool_t *pool, void *element) | ||
58 | { | ||
59 | /* Mempools backed by slab allocator */ | ||
60 | if (pool->free == mempool_free_slab || pool->free == mempool_kfree) | ||
61 | __check_element(pool, element, ksize(element)); | ||
62 | |||
63 | /* Mempools backed by page allocator */ | ||
64 | if (pool->free == mempool_free_pages) { | ||
65 | int order = (int)(long)pool->pool_data; | ||
66 | void *addr = kmap_atomic((struct page *)element); | ||
67 | |||
68 | __check_element(pool, addr, 1UL << (PAGE_SHIFT + order)); | ||
69 | kunmap_atomic(addr); | ||
70 | } | ||
71 | } | ||
72 | |||
73 | static void __poison_element(void *element, size_t size) | ||
74 | { | ||
75 | u8 *obj = element; | ||
76 | |||
77 | memset(obj, POISON_FREE, size - 1); | ||
78 | obj[size - 1] = POISON_END; | ||
79 | } | ||
80 | |||
81 | static void poison_element(mempool_t *pool, void *element) | ||
82 | { | ||
83 | /* Mempools backed by slab allocator */ | ||
84 | if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc) | ||
85 | __poison_element(element, ksize(element)); | ||
86 | |||
87 | /* Mempools backed by page allocator */ | ||
88 | if (pool->alloc == mempool_alloc_pages) { | ||
89 | int order = (int)(long)pool->pool_data; | ||
90 | void *addr = kmap_atomic((struct page *)element); | ||
91 | |||
92 | __poison_element(addr, 1UL << (PAGE_SHIFT + order)); | ||
93 | kunmap_atomic(addr); | ||
94 | } | ||
95 | } | ||
96 | #else /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */ | ||
97 | static inline void check_element(mempool_t *pool, void *element) | ||
98 | { | ||
99 | } | ||
100 | static inline void poison_element(mempool_t *pool, void *element) | ||
101 | { | ||
102 | } | ||
103 | #endif /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */ | ||
104 | |||
105 | static void kasan_poison_element(mempool_t *pool, void *element) | ||
106 | { | ||
107 | if (pool->alloc == mempool_alloc_slab) | ||
108 | kasan_slab_free(pool->pool_data, element); | ||
109 | if (pool->alloc == mempool_kmalloc) | ||
110 | kasan_kfree(element); | ||
111 | if (pool->alloc == mempool_alloc_pages) | ||
112 | kasan_free_pages(element, (unsigned long)pool->pool_data); | ||
113 | } | ||
114 | |||
115 | static void kasan_unpoison_element(mempool_t *pool, void *element) | ||
116 | { | ||
117 | if (pool->alloc == mempool_alloc_slab) | ||
118 | kasan_slab_alloc(pool->pool_data, element); | ||
119 | if (pool->alloc == mempool_kmalloc) | ||
120 | kasan_krealloc(element, (size_t)pool->pool_data); | ||
121 | if (pool->alloc == mempool_alloc_pages) | ||
122 | kasan_alloc_pages(element, (unsigned long)pool->pool_data); | ||
123 | } | ||
18 | 124 | ||
19 | static void add_element(mempool_t *pool, void *element) | 125 | static void add_element(mempool_t *pool, void *element) |
20 | { | 126 | { |
21 | BUG_ON(pool->curr_nr >= pool->min_nr); | 127 | BUG_ON(pool->curr_nr >= pool->min_nr); |
128 | poison_element(pool, element); | ||
129 | kasan_poison_element(pool, element); | ||
22 | pool->elements[pool->curr_nr++] = element; | 130 | pool->elements[pool->curr_nr++] = element; |
23 | } | 131 | } |
24 | 132 | ||
25 | static void *remove_element(mempool_t *pool) | 133 | static void *remove_element(mempool_t *pool) |
26 | { | 134 | { |
27 | BUG_ON(pool->curr_nr <= 0); | 135 | void *element = pool->elements[--pool->curr_nr]; |
28 | return pool->elements[--pool->curr_nr]; | 136 | |
137 | BUG_ON(pool->curr_nr < 0); | ||
138 | check_element(pool, element); | ||
139 | kasan_unpoison_element(pool, element); | ||
140 | return element; | ||
29 | } | 141 | } |
30 | 142 | ||
31 | /** | 143 | /** |
@@ -334,6 +446,7 @@ EXPORT_SYMBOL(mempool_free); | |||
334 | void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data) | 446 | void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data) |
335 | { | 447 | { |
336 | struct kmem_cache *mem = pool_data; | 448 | struct kmem_cache *mem = pool_data; |
449 | VM_BUG_ON(mem->ctor); | ||
337 | return kmem_cache_alloc(mem, gfp_mask); | 450 | return kmem_cache_alloc(mem, gfp_mask); |
338 | } | 451 | } |
339 | EXPORT_SYMBOL(mempool_alloc_slab); | 452 | EXPORT_SYMBOL(mempool_alloc_slab); |