diff options
-rw-r--r-- | Documentation/kernel-parameters.txt | 2 | ||||
-rw-r--r-- | include/linux/debugobjects.h | 90 | ||||
-rw-r--r-- | init/main.c | 3 | ||||
-rw-r--r-- | lib/Kconfig.debug | 23 | ||||
-rw-r--r-- | lib/Makefile | 1 | ||||
-rw-r--r-- | lib/debugobjects.c | 890 | ||||
-rw-r--r-- | mm/page_alloc.c | 10 | ||||
-rw-r--r-- | mm/slab.c | 10 | ||||
-rw-r--r-- | mm/slub.c | 3 | ||||
-rw-r--r-- | mm/vmalloc.c | 2 |
10 files changed, 1030 insertions, 4 deletions
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 0ba0861b5d18..a3c35446e755 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt | |||
@@ -561,6 +561,8 @@ and is between 256 and 4096 characters. It is defined in the file | |||
561 | 1 will print _a lot_ more information - normally | 561 | 1 will print _a lot_ more information - normally |
562 | only useful to kernel developers. | 562 | only useful to kernel developers. |
563 | 563 | ||
564 | debug_objects [KNL] Enable object debugging | ||
565 | |||
564 | decnet.addr= [HW,NET] | 566 | decnet.addr= [HW,NET] |
565 | Format: <area>[,<node>] | 567 | Format: <area>[,<node>] |
566 | See also Documentation/networking/decnet.txt. | 568 | See also Documentation/networking/decnet.txt. |
diff --git a/include/linux/debugobjects.h b/include/linux/debugobjects.h new file mode 100644 index 000000000000..8c243aaa86a7 --- /dev/null +++ b/include/linux/debugobjects.h | |||
@@ -0,0 +1,90 @@ | |||
1 | #ifndef _LINUX_DEBUGOBJECTS_H | ||
2 | #define _LINUX_DEBUGOBJECTS_H | ||
3 | |||
4 | #include <linux/list.h> | ||
5 | #include <linux/spinlock.h> | ||
6 | |||
7 | enum debug_obj_state { | ||
8 | ODEBUG_STATE_NONE, | ||
9 | ODEBUG_STATE_INIT, | ||
10 | ODEBUG_STATE_INACTIVE, | ||
11 | ODEBUG_STATE_ACTIVE, | ||
12 | ODEBUG_STATE_DESTROYED, | ||
13 | ODEBUG_STATE_NOTAVAILABLE, | ||
14 | ODEBUG_STATE_MAX, | ||
15 | }; | ||
16 | |||
17 | struct debug_obj_descr; | ||
18 | |||
19 | /** | ||
20 | * struct debug_obj - representaion of an tracked object | ||
21 | * @node: hlist node to link the object into the tracker list | ||
22 | * @state: tracked object state | ||
23 | * @object: pointer to the real object | ||
24 | * @descr: pointer to an object type specific debug description structure | ||
25 | */ | ||
26 | struct debug_obj { | ||
27 | struct hlist_node node; | ||
28 | enum debug_obj_state state; | ||
29 | void *object; | ||
30 | struct debug_obj_descr *descr; | ||
31 | }; | ||
32 | |||
33 | /** | ||
34 | * struct debug_obj_descr - object type specific debug description structure | ||
35 | * @name: name of the object typee | ||
36 | * @fixup_init: fixup function, which is called when the init check | ||
37 | * fails | ||
38 | * @fixup_activate: fixup function, which is called when the activate check | ||
39 | * fails | ||
40 | * @fixup_destroy: fixup function, which is called when the destroy check | ||
41 | * fails | ||
42 | * @fixup_free: fixup function, which is called when the free check | ||
43 | * fails | ||
44 | */ | ||
45 | struct debug_obj_descr { | ||
46 | const char *name; | ||
47 | |||
48 | int (*fixup_init) (void *addr, enum debug_obj_state state); | ||
49 | int (*fixup_activate) (void *addr, enum debug_obj_state state); | ||
50 | int (*fixup_destroy) (void *addr, enum debug_obj_state state); | ||
51 | int (*fixup_free) (void *addr, enum debug_obj_state state); | ||
52 | }; | ||
53 | |||
54 | #ifdef CONFIG_DEBUG_OBJECTS | ||
55 | extern void debug_object_init (void *addr, struct debug_obj_descr *descr); | ||
56 | extern void | ||
57 | debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr); | ||
58 | extern void debug_object_activate (void *addr, struct debug_obj_descr *descr); | ||
59 | extern void debug_object_deactivate(void *addr, struct debug_obj_descr *descr); | ||
60 | extern void debug_object_destroy (void *addr, struct debug_obj_descr *descr); | ||
61 | extern void debug_object_free (void *addr, struct debug_obj_descr *descr); | ||
62 | |||
63 | extern void debug_objects_early_init(void); | ||
64 | extern void debug_objects_mem_init(void); | ||
65 | #else | ||
66 | static inline void | ||
67 | debug_object_init (void *addr, struct debug_obj_descr *descr) { } | ||
68 | static inline void | ||
69 | debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr) { } | ||
70 | static inline void | ||
71 | debug_object_activate (void *addr, struct debug_obj_descr *descr) { } | ||
72 | static inline void | ||
73 | debug_object_deactivate(void *addr, struct debug_obj_descr *descr) { } | ||
74 | static inline void | ||
75 | debug_object_destroy (void *addr, struct debug_obj_descr *descr) { } | ||
76 | static inline void | ||
77 | debug_object_free (void *addr, struct debug_obj_descr *descr) { } | ||
78 | |||
79 | static inline void debug_objects_early_init(void) { } | ||
80 | static inline void debug_objects_mem_init(void) { } | ||
81 | #endif | ||
82 | |||
83 | #ifdef CONFIG_DEBUG_OBJECTS_FREE | ||
84 | extern void debug_check_no_obj_freed(const void *address, unsigned long size); | ||
85 | #else | ||
86 | static inline void | ||
87 | debug_check_no_obj_freed(const void *address, unsigned long size) { } | ||
88 | #endif | ||
89 | |||
90 | #endif | ||
diff --git a/init/main.c b/init/main.c index dff253cfcd9f..a87d4ca5c36c 100644 --- a/init/main.c +++ b/init/main.c | |||
@@ -52,6 +52,7 @@ | |||
52 | #include <linux/unwind.h> | 52 | #include <linux/unwind.h> |
53 | #include <linux/buffer_head.h> | 53 | #include <linux/buffer_head.h> |
54 | #include <linux/debug_locks.h> | 54 | #include <linux/debug_locks.h> |
55 | #include <linux/debugobjects.h> | ||
55 | #include <linux/lockdep.h> | 56 | #include <linux/lockdep.h> |
56 | #include <linux/pid_namespace.h> | 57 | #include <linux/pid_namespace.h> |
57 | #include <linux/device.h> | 58 | #include <linux/device.h> |
@@ -543,6 +544,7 @@ asmlinkage void __init start_kernel(void) | |||
543 | */ | 544 | */ |
544 | unwind_init(); | 545 | unwind_init(); |
545 | lockdep_init(); | 546 | lockdep_init(); |
547 | debug_objects_early_init(); | ||
546 | cgroup_init_early(); | 548 | cgroup_init_early(); |
547 | 549 | ||
548 | local_irq_disable(); | 550 | local_irq_disable(); |
@@ -638,6 +640,7 @@ asmlinkage void __init start_kernel(void) | |||
638 | enable_debug_pagealloc(); | 640 | enable_debug_pagealloc(); |
639 | cpu_hotplug_init(); | 641 | cpu_hotplug_init(); |
640 | kmem_cache_init(); | 642 | kmem_cache_init(); |
643 | debug_objects_mem_init(); | ||
641 | idr_init_cache(); | 644 | idr_init_cache(); |
642 | setup_per_cpu_pageset(); | 645 | setup_per_cpu_pageset(); |
643 | numa_policy_init(); | 646 | numa_policy_init(); |
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 754cc0027f2a..3e132b0a59cc 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
@@ -194,6 +194,29 @@ config TIMER_STATS | |||
194 | (it defaults to deactivated on bootup and will only be activated | 194 | (it defaults to deactivated on bootup and will only be activated |
195 | if some application like powertop activates it explicitly). | 195 | if some application like powertop activates it explicitly). |
196 | 196 | ||
197 | config DEBUG_OBJECTS | ||
198 | bool "Debug object operations" | ||
199 | depends on DEBUG_KERNEL | ||
200 | help | ||
201 | If you say Y here, additional code will be inserted into the | ||
202 | kernel to track the life time of various objects and validate | ||
203 | the operations on those objects. | ||
204 | |||
205 | config DEBUG_OBJECTS_SELFTEST | ||
206 | bool "Debug objects selftest" | ||
207 | depends on DEBUG_OBJECTS | ||
208 | help | ||
209 | This enables the selftest of the object debug code. | ||
210 | |||
211 | config DEBUG_OBJECTS_FREE | ||
212 | bool "Debug objects in freed memory" | ||
213 | depends on DEBUG_OBJECTS | ||
214 | help | ||
215 | This enables checks whether a k/v free operation frees an area | ||
216 | which contains an object which has not been deactivated | ||
217 | properly. This can make kmalloc/kfree-intensive workloads | ||
218 | much slower. | ||
219 | |||
197 | config DEBUG_SLAB | 220 | config DEBUG_SLAB |
198 | bool "Debug slab memory allocations" | 221 | bool "Debug slab memory allocations" |
199 | depends on DEBUG_KERNEL && SLAB | 222 | depends on DEBUG_KERNEL && SLAB |
diff --git a/lib/Makefile b/lib/Makefile index 0ae4eb047aac..74b0cfb1fcc3 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
@@ -36,6 +36,7 @@ obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o | |||
36 | obj-$(CONFIG_PLIST) += plist.o | 36 | obj-$(CONFIG_PLIST) += plist.o |
37 | obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o | 37 | obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o |
38 | obj-$(CONFIG_DEBUG_LIST) += list_debug.o | 38 | obj-$(CONFIG_DEBUG_LIST) += list_debug.o |
39 | obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o | ||
39 | 40 | ||
40 | ifneq ($(CONFIG_HAVE_DEC_LOCK),y) | 41 | ifneq ($(CONFIG_HAVE_DEC_LOCK),y) |
41 | lib-y += dec_and_lock.o | 42 | lib-y += dec_and_lock.o |
diff --git a/lib/debugobjects.c b/lib/debugobjects.c new file mode 100644 index 000000000000..a76a5e122ae1 --- /dev/null +++ b/lib/debugobjects.c | |||
@@ -0,0 +1,890 @@ | |||
1 | /* | ||
2 | * Generic infrastructure for lifetime debugging of objects. | ||
3 | * | ||
4 | * Started by Thomas Gleixner | ||
5 | * | ||
6 | * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de> | ||
7 | * | ||
8 | * For licencing details see kernel-base/COPYING | ||
9 | */ | ||
10 | #include <linux/debugobjects.h> | ||
11 | #include <linux/interrupt.h> | ||
12 | #include <linux/seq_file.h> | ||
13 | #include <linux/debugfs.h> | ||
14 | #include <linux/hash.h> | ||
15 | |||
16 | #define ODEBUG_HASH_BITS 14 | ||
17 | #define ODEBUG_HASH_SIZE (1 << ODEBUG_HASH_BITS) | ||
18 | |||
19 | #define ODEBUG_POOL_SIZE 512 | ||
20 | #define ODEBUG_POOL_MIN_LEVEL 256 | ||
21 | |||
22 | #define ODEBUG_CHUNK_SHIFT PAGE_SHIFT | ||
23 | #define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT) | ||
24 | #define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1)) | ||
25 | |||
26 | struct debug_bucket { | ||
27 | struct hlist_head list; | ||
28 | spinlock_t lock; | ||
29 | }; | ||
30 | |||
31 | static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE]; | ||
32 | |||
33 | static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE]; | ||
34 | |||
35 | static DEFINE_SPINLOCK(pool_lock); | ||
36 | |||
37 | static HLIST_HEAD(obj_pool); | ||
38 | |||
39 | static int obj_pool_min_free = ODEBUG_POOL_SIZE; | ||
40 | static int obj_pool_free = ODEBUG_POOL_SIZE; | ||
41 | static int obj_pool_used; | ||
42 | static int obj_pool_max_used; | ||
43 | static struct kmem_cache *obj_cache; | ||
44 | |||
45 | static int debug_objects_maxchain __read_mostly; | ||
46 | static int debug_objects_fixups __read_mostly; | ||
47 | static int debug_objects_warnings __read_mostly; | ||
48 | static int debug_objects_enabled __read_mostly; | ||
49 | static struct debug_obj_descr *descr_test __read_mostly; | ||
50 | |||
51 | static int __init enable_object_debug(char *str) | ||
52 | { | ||
53 | debug_objects_enabled = 1; | ||
54 | return 0; | ||
55 | } | ||
56 | early_param("debug_objects", enable_object_debug); | ||
57 | |||
58 | static const char *obj_states[ODEBUG_STATE_MAX] = { | ||
59 | [ODEBUG_STATE_NONE] = "none", | ||
60 | [ODEBUG_STATE_INIT] = "initialized", | ||
61 | [ODEBUG_STATE_INACTIVE] = "inactive", | ||
62 | [ODEBUG_STATE_ACTIVE] = "active", | ||
63 | [ODEBUG_STATE_DESTROYED] = "destroyed", | ||
64 | [ODEBUG_STATE_NOTAVAILABLE] = "not available", | ||
65 | }; | ||
66 | |||
67 | static int fill_pool(void) | ||
68 | { | ||
69 | gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN; | ||
70 | struct debug_obj *new; | ||
71 | |||
72 | if (likely(obj_pool_free >= ODEBUG_POOL_MIN_LEVEL)) | ||
73 | return obj_pool_free; | ||
74 | |||
75 | if (unlikely(!obj_cache)) | ||
76 | return obj_pool_free; | ||
77 | |||
78 | while (obj_pool_free < ODEBUG_POOL_MIN_LEVEL) { | ||
79 | |||
80 | new = kmem_cache_zalloc(obj_cache, gfp); | ||
81 | if (!new) | ||
82 | return obj_pool_free; | ||
83 | |||
84 | spin_lock(&pool_lock); | ||
85 | hlist_add_head(&new->node, &obj_pool); | ||
86 | obj_pool_free++; | ||
87 | spin_unlock(&pool_lock); | ||
88 | } | ||
89 | return obj_pool_free; | ||
90 | } | ||
91 | |||
92 | /* | ||
93 | * Lookup an object in the hash bucket. | ||
94 | */ | ||
95 | static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b) | ||
96 | { | ||
97 | struct hlist_node *node; | ||
98 | struct debug_obj *obj; | ||
99 | int cnt = 0; | ||
100 | |||
101 | hlist_for_each_entry(obj, node, &b->list, node) { | ||
102 | cnt++; | ||
103 | if (obj->object == addr) | ||
104 | return obj; | ||
105 | } | ||
106 | if (cnt > debug_objects_maxchain) | ||
107 | debug_objects_maxchain = cnt; | ||
108 | |||
109 | return NULL; | ||
110 | } | ||
111 | |||
112 | /* | ||
113 | * Allocate a new object. If the pool is empty and no refill possible, | ||
114 | * switch off the debugger. | ||
115 | */ | ||
116 | static struct debug_obj * | ||
117 | alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr) | ||
118 | { | ||
119 | struct debug_obj *obj = NULL; | ||
120 | int retry = 0; | ||
121 | |||
122 | repeat: | ||
123 | spin_lock(&pool_lock); | ||
124 | if (obj_pool.first) { | ||
125 | obj = hlist_entry(obj_pool.first, typeof(*obj), node); | ||
126 | |||
127 | obj->object = addr; | ||
128 | obj->descr = descr; | ||
129 | obj->state = ODEBUG_STATE_NONE; | ||
130 | hlist_del(&obj->node); | ||
131 | |||
132 | hlist_add_head(&obj->node, &b->list); | ||
133 | |||
134 | obj_pool_used++; | ||
135 | if (obj_pool_used > obj_pool_max_used) | ||
136 | obj_pool_max_used = obj_pool_used; | ||
137 | |||
138 | obj_pool_free--; | ||
139 | if (obj_pool_free < obj_pool_min_free) | ||
140 | obj_pool_min_free = obj_pool_free; | ||
141 | } | ||
142 | spin_unlock(&pool_lock); | ||
143 | |||
144 | if (fill_pool() && !obj && !retry++) | ||
145 | goto repeat; | ||
146 | |||
147 | return obj; | ||
148 | } | ||
149 | |||
150 | /* | ||
151 | * Put the object back into the pool or give it back to kmem_cache: | ||
152 | */ | ||
153 | static void free_object(struct debug_obj *obj) | ||
154 | { | ||
155 | unsigned long idx = (unsigned long)(obj - obj_static_pool); | ||
156 | |||
157 | if (obj_pool_free < ODEBUG_POOL_SIZE || idx < ODEBUG_POOL_SIZE) { | ||
158 | spin_lock(&pool_lock); | ||
159 | hlist_add_head(&obj->node, &obj_pool); | ||
160 | obj_pool_free++; | ||
161 | obj_pool_used--; | ||
162 | spin_unlock(&pool_lock); | ||
163 | } else { | ||
164 | spin_lock(&pool_lock); | ||
165 | obj_pool_used--; | ||
166 | spin_unlock(&pool_lock); | ||
167 | kmem_cache_free(obj_cache, obj); | ||
168 | } | ||
169 | } | ||
170 | |||
171 | /* | ||
172 | * We run out of memory. That means we probably have tons of objects | ||
173 | * allocated. | ||
174 | */ | ||
175 | static void debug_objects_oom(void) | ||
176 | { | ||
177 | struct debug_bucket *db = obj_hash; | ||
178 | struct hlist_node *node, *tmp; | ||
179 | struct debug_obj *obj; | ||
180 | unsigned long flags; | ||
181 | int i; | ||
182 | |||
183 | printk(KERN_WARNING "ODEBUG: Out of memory. ODEBUG disabled\n"); | ||
184 | |||
185 | for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) { | ||
186 | spin_lock_irqsave(&db->lock, flags); | ||
187 | hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) { | ||
188 | hlist_del(&obj->node); | ||
189 | free_object(obj); | ||
190 | } | ||
191 | spin_unlock_irqrestore(&db->lock, flags); | ||
192 | } | ||
193 | } | ||
194 | |||
195 | /* | ||
196 | * We use the pfn of the address for the hash. That way we can check | ||
197 | * for freed objects simply by checking the affected bucket. | ||
198 | */ | ||
199 | static struct debug_bucket *get_bucket(unsigned long addr) | ||
200 | { | ||
201 | unsigned long hash; | ||
202 | |||
203 | hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS); | ||
204 | return &obj_hash[hash]; | ||
205 | } | ||
206 | |||
207 | static void debug_print_object(struct debug_obj *obj, char *msg) | ||
208 | { | ||
209 | static int limit; | ||
210 | |||
211 | if (limit < 5 && obj->descr != descr_test) { | ||
212 | limit++; | ||
213 | printk(KERN_ERR "ODEBUG: %s %s object type: %s\n", msg, | ||
214 | obj_states[obj->state], obj->descr->name); | ||
215 | WARN_ON(1); | ||
216 | } | ||
217 | debug_objects_warnings++; | ||
218 | } | ||
219 | |||
220 | /* | ||
221 | * Try to repair the damage, so we have a better chance to get useful | ||
222 | * debug output. | ||
223 | */ | ||
224 | static void | ||
225 | debug_object_fixup(int (*fixup)(void *addr, enum debug_obj_state state), | ||
226 | void * addr, enum debug_obj_state state) | ||
227 | { | ||
228 | if (fixup) | ||
229 | debug_objects_fixups += fixup(addr, state); | ||
230 | } | ||
231 | |||
232 | static void debug_object_is_on_stack(void *addr, int onstack) | ||
233 | { | ||
234 | void *stack = current->stack; | ||
235 | int is_on_stack; | ||
236 | static int limit; | ||
237 | |||
238 | if (limit > 4) | ||
239 | return; | ||
240 | |||
241 | is_on_stack = (addr >= stack && addr < (stack + THREAD_SIZE)); | ||
242 | |||
243 | if (is_on_stack == onstack) | ||
244 | return; | ||
245 | |||
246 | limit++; | ||
247 | if (is_on_stack) | ||
248 | printk(KERN_WARNING | ||
249 | "ODEBUG: object is on stack, but not annotated\n"); | ||
250 | else | ||
251 | printk(KERN_WARNING | ||
252 | "ODEBUG: object is not on stack, but annotated\n"); | ||
253 | WARN_ON(1); | ||
254 | } | ||
255 | |||
256 | static void | ||
257 | __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack) | ||
258 | { | ||
259 | enum debug_obj_state state; | ||
260 | struct debug_bucket *db; | ||
261 | struct debug_obj *obj; | ||
262 | unsigned long flags; | ||
263 | |||
264 | db = get_bucket((unsigned long) addr); | ||
265 | |||
266 | spin_lock_irqsave(&db->lock, flags); | ||
267 | |||
268 | obj = lookup_object(addr, db); | ||
269 | if (!obj) { | ||
270 | obj = alloc_object(addr, db, descr); | ||
271 | if (!obj) { | ||
272 | debug_objects_enabled = 0; | ||
273 | spin_unlock_irqrestore(&db->lock, flags); | ||
274 | debug_objects_oom(); | ||
275 | return; | ||
276 | } | ||
277 | debug_object_is_on_stack(addr, onstack); | ||
278 | } | ||
279 | |||
280 | switch (obj->state) { | ||
281 | case ODEBUG_STATE_NONE: | ||
282 | case ODEBUG_STATE_INIT: | ||
283 | case ODEBUG_STATE_INACTIVE: | ||
284 | obj->state = ODEBUG_STATE_INIT; | ||
285 | break; | ||
286 | |||
287 | case ODEBUG_STATE_ACTIVE: | ||
288 | debug_print_object(obj, "init"); | ||
289 | state = obj->state; | ||
290 | spin_unlock_irqrestore(&db->lock, flags); | ||
291 | debug_object_fixup(descr->fixup_init, addr, state); | ||
292 | return; | ||
293 | |||
294 | case ODEBUG_STATE_DESTROYED: | ||
295 | debug_print_object(obj, "init"); | ||
296 | break; | ||
297 | default: | ||
298 | break; | ||
299 | } | ||
300 | |||
301 | spin_unlock_irqrestore(&db->lock, flags); | ||
302 | } | ||
303 | |||
304 | /** | ||
305 | * debug_object_init - debug checks when an object is initialized | ||
306 | * @addr: address of the object | ||
307 | * @descr: pointer to an object specific debug description structure | ||
308 | */ | ||
309 | void debug_object_init(void *addr, struct debug_obj_descr *descr) | ||
310 | { | ||
311 | if (!debug_objects_enabled) | ||
312 | return; | ||
313 | |||
314 | __debug_object_init(addr, descr, 0); | ||
315 | } | ||
316 | |||
317 | /** | ||
318 | * debug_object_init_on_stack - debug checks when an object on stack is | ||
319 | * initialized | ||
320 | * @addr: address of the object | ||
321 | * @descr: pointer to an object specific debug description structure | ||
322 | */ | ||
323 | void debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr) | ||
324 | { | ||
325 | if (!debug_objects_enabled) | ||
326 | return; | ||
327 | |||
328 | __debug_object_init(addr, descr, 1); | ||
329 | } | ||
330 | |||
331 | /** | ||
332 | * debug_object_activate - debug checks when an object is activated | ||
333 | * @addr: address of the object | ||
334 | * @descr: pointer to an object specific debug description structure | ||
335 | */ | ||
336 | void debug_object_activate(void *addr, struct debug_obj_descr *descr) | ||
337 | { | ||
338 | enum debug_obj_state state; | ||
339 | struct debug_bucket *db; | ||
340 | struct debug_obj *obj; | ||
341 | unsigned long flags; | ||
342 | |||
343 | if (!debug_objects_enabled) | ||
344 | return; | ||
345 | |||
346 | db = get_bucket((unsigned long) addr); | ||
347 | |||
348 | spin_lock_irqsave(&db->lock, flags); | ||
349 | |||
350 | obj = lookup_object(addr, db); | ||
351 | if (obj) { | ||
352 | switch (obj->state) { | ||
353 | case ODEBUG_STATE_INIT: | ||
354 | case ODEBUG_STATE_INACTIVE: | ||
355 | obj->state = ODEBUG_STATE_ACTIVE; | ||
356 | break; | ||
357 | |||
358 | case ODEBUG_STATE_ACTIVE: | ||
359 | debug_print_object(obj, "activate"); | ||
360 | state = obj->state; | ||
361 | spin_unlock_irqrestore(&db->lock, flags); | ||
362 | debug_object_fixup(descr->fixup_activate, addr, state); | ||
363 | return; | ||
364 | |||
365 | case ODEBUG_STATE_DESTROYED: | ||
366 | debug_print_object(obj, "activate"); | ||
367 | break; | ||
368 | default: | ||
369 | break; | ||
370 | } | ||
371 | spin_unlock_irqrestore(&db->lock, flags); | ||
372 | return; | ||
373 | } | ||
374 | |||
375 | spin_unlock_irqrestore(&db->lock, flags); | ||
376 | /* | ||
377 | * This happens when a static object is activated. We | ||
378 | * let the type specific code decide whether this is | ||
379 | * true or not. | ||
380 | */ | ||
381 | debug_object_fixup(descr->fixup_activate, addr, | ||
382 | ODEBUG_STATE_NOTAVAILABLE); | ||
383 | } | ||
384 | |||
385 | /** | ||
386 | * debug_object_deactivate - debug checks when an object is deactivated | ||
387 | * @addr: address of the object | ||
388 | * @descr: pointer to an object specific debug description structure | ||
389 | */ | ||
390 | void debug_object_deactivate(void *addr, struct debug_obj_descr *descr) | ||
391 | { | ||
392 | struct debug_bucket *db; | ||
393 | struct debug_obj *obj; | ||
394 | unsigned long flags; | ||
395 | |||
396 | if (!debug_objects_enabled) | ||
397 | return; | ||
398 | |||
399 | db = get_bucket((unsigned long) addr); | ||
400 | |||
401 | spin_lock_irqsave(&db->lock, flags); | ||
402 | |||
403 | obj = lookup_object(addr, db); | ||
404 | if (obj) { | ||
405 | switch (obj->state) { | ||
406 | case ODEBUG_STATE_INIT: | ||
407 | case ODEBUG_STATE_INACTIVE: | ||
408 | case ODEBUG_STATE_ACTIVE: | ||
409 | obj->state = ODEBUG_STATE_INACTIVE; | ||
410 | break; | ||
411 | |||
412 | case ODEBUG_STATE_DESTROYED: | ||
413 | debug_print_object(obj, "deactivate"); | ||
414 | break; | ||
415 | default: | ||
416 | break; | ||
417 | } | ||
418 | } else { | ||
419 | struct debug_obj o = { .object = addr, | ||
420 | .state = ODEBUG_STATE_NOTAVAILABLE, | ||
421 | .descr = descr }; | ||
422 | |||
423 | debug_print_object(&o, "deactivate"); | ||
424 | } | ||
425 | |||
426 | spin_unlock_irqrestore(&db->lock, flags); | ||
427 | } | ||
428 | |||
429 | /** | ||
430 | * debug_object_destroy - debug checks when an object is destroyed | ||
431 | * @addr: address of the object | ||
432 | * @descr: pointer to an object specific debug description structure | ||
433 | */ | ||
434 | void debug_object_destroy(void *addr, struct debug_obj_descr *descr) | ||
435 | { | ||
436 | enum debug_obj_state state; | ||
437 | struct debug_bucket *db; | ||
438 | struct debug_obj *obj; | ||
439 | unsigned long flags; | ||
440 | |||
441 | if (!debug_objects_enabled) | ||
442 | return; | ||
443 | |||
444 | db = get_bucket((unsigned long) addr); | ||
445 | |||
446 | spin_lock_irqsave(&db->lock, flags); | ||
447 | |||
448 | obj = lookup_object(addr, db); | ||
449 | if (!obj) | ||
450 | goto out_unlock; | ||
451 | |||
452 | switch (obj->state) { | ||
453 | case ODEBUG_STATE_NONE: | ||
454 | case ODEBUG_STATE_INIT: | ||
455 | case ODEBUG_STATE_INACTIVE: | ||
456 | obj->state = ODEBUG_STATE_DESTROYED; | ||
457 | break; | ||
458 | case ODEBUG_STATE_ACTIVE: | ||
459 | debug_print_object(obj, "destroy"); | ||
460 | state = obj->state; | ||
461 | spin_unlock_irqrestore(&db->lock, flags); | ||
462 | debug_object_fixup(descr->fixup_destroy, addr, state); | ||
463 | return; | ||
464 | |||
465 | case ODEBUG_STATE_DESTROYED: | ||
466 | debug_print_object(obj, "destroy"); | ||
467 | break; | ||
468 | default: | ||
469 | break; | ||
470 | } | ||
471 | out_unlock: | ||
472 | spin_unlock_irqrestore(&db->lock, flags); | ||
473 | } | ||
474 | |||
475 | /** | ||
476 | * debug_object_free - debug checks when an object is freed | ||
477 | * @addr: address of the object | ||
478 | * @descr: pointer to an object specific debug description structure | ||
479 | */ | ||
480 | void debug_object_free(void *addr, struct debug_obj_descr *descr) | ||
481 | { | ||
482 | enum debug_obj_state state; | ||
483 | struct debug_bucket *db; | ||
484 | struct debug_obj *obj; | ||
485 | unsigned long flags; | ||
486 | |||
487 | if (!debug_objects_enabled) | ||
488 | return; | ||
489 | |||
490 | db = get_bucket((unsigned long) addr); | ||
491 | |||
492 | spin_lock_irqsave(&db->lock, flags); | ||
493 | |||
494 | obj = lookup_object(addr, db); | ||
495 | if (!obj) | ||
496 | goto out_unlock; | ||
497 | |||
498 | switch (obj->state) { | ||
499 | case ODEBUG_STATE_ACTIVE: | ||
500 | debug_print_object(obj, "free"); | ||
501 | state = obj->state; | ||
502 | spin_unlock_irqrestore(&db->lock, flags); | ||
503 | debug_object_fixup(descr->fixup_free, addr, state); | ||
504 | return; | ||
505 | default: | ||
506 | hlist_del(&obj->node); | ||
507 | free_object(obj); | ||
508 | break; | ||
509 | } | ||
510 | out_unlock: | ||
511 | spin_unlock_irqrestore(&db->lock, flags); | ||
512 | } | ||
513 | |||
514 | #ifdef CONFIG_DEBUG_OBJECTS_FREE | ||
515 | static void __debug_check_no_obj_freed(const void *address, unsigned long size) | ||
516 | { | ||
517 | unsigned long flags, oaddr, saddr, eaddr, paddr, chunks; | ||
518 | struct hlist_node *node, *tmp; | ||
519 | struct debug_obj_descr *descr; | ||
520 | enum debug_obj_state state; | ||
521 | struct debug_bucket *db; | ||
522 | struct debug_obj *obj; | ||
523 | int cnt; | ||
524 | |||
525 | saddr = (unsigned long) address; | ||
526 | eaddr = saddr + size; | ||
527 | paddr = saddr & ODEBUG_CHUNK_MASK; | ||
528 | chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1)); | ||
529 | chunks >>= ODEBUG_CHUNK_SHIFT; | ||
530 | |||
531 | for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) { | ||
532 | db = get_bucket(paddr); | ||
533 | |||
534 | repeat: | ||
535 | cnt = 0; | ||
536 | spin_lock_irqsave(&db->lock, flags); | ||
537 | hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) { | ||
538 | cnt++; | ||
539 | oaddr = (unsigned long) obj->object; | ||
540 | if (oaddr < saddr || oaddr >= eaddr) | ||
541 | continue; | ||
542 | |||
543 | switch (obj->state) { | ||
544 | case ODEBUG_STATE_ACTIVE: | ||
545 | debug_print_object(obj, "free"); | ||
546 | descr = obj->descr; | ||
547 | state = obj->state; | ||
548 | spin_unlock_irqrestore(&db->lock, flags); | ||
549 | debug_object_fixup(descr->fixup_free, | ||
550 | (void *) oaddr, state); | ||
551 | goto repeat; | ||
552 | default: | ||
553 | hlist_del(&obj->node); | ||
554 | free_object(obj); | ||
555 | break; | ||
556 | } | ||
557 | } | ||
558 | spin_unlock_irqrestore(&db->lock, flags); | ||
559 | if (cnt > debug_objects_maxchain) | ||
560 | debug_objects_maxchain = cnt; | ||
561 | } | ||
562 | } | ||
563 | |||
564 | void debug_check_no_obj_freed(const void *address, unsigned long size) | ||
565 | { | ||
566 | if (debug_objects_enabled) | ||
567 | __debug_check_no_obj_freed(address, size); | ||
568 | } | ||
569 | #endif | ||
570 | |||
571 | #ifdef CONFIG_DEBUG_FS | ||
572 | |||
573 | static int debug_stats_show(struct seq_file *m, void *v) | ||
574 | { | ||
575 | seq_printf(m, "max_chain :%d\n", debug_objects_maxchain); | ||
576 | seq_printf(m, "warnings :%d\n", debug_objects_warnings); | ||
577 | seq_printf(m, "fixups :%d\n", debug_objects_fixups); | ||
578 | seq_printf(m, "pool_free :%d\n", obj_pool_free); | ||
579 | seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free); | ||
580 | seq_printf(m, "pool_used :%d\n", obj_pool_used); | ||
581 | seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used); | ||
582 | return 0; | ||
583 | } | ||
584 | |||
585 | static int debug_stats_open(struct inode *inode, struct file *filp) | ||
586 | { | ||
587 | return single_open(filp, debug_stats_show, NULL); | ||
588 | } | ||
589 | |||
590 | static const struct file_operations debug_stats_fops = { | ||
591 | .open = debug_stats_open, | ||
592 | .read = seq_read, | ||
593 | .llseek = seq_lseek, | ||
594 | .release = single_release, | ||
595 | }; | ||
596 | |||
597 | static int __init debug_objects_init_debugfs(void) | ||
598 | { | ||
599 | struct dentry *dbgdir, *dbgstats; | ||
600 | |||
601 | if (!debug_objects_enabled) | ||
602 | return 0; | ||
603 | |||
604 | dbgdir = debugfs_create_dir("debug_objects", NULL); | ||
605 | if (!dbgdir) | ||
606 | return -ENOMEM; | ||
607 | |||
608 | dbgstats = debugfs_create_file("stats", 0444, dbgdir, NULL, | ||
609 | &debug_stats_fops); | ||
610 | if (!dbgstats) | ||
611 | goto err; | ||
612 | |||
613 | return 0; | ||
614 | |||
615 | err: | ||
616 | debugfs_remove(dbgdir); | ||
617 | |||
618 | return -ENOMEM; | ||
619 | } | ||
620 | __initcall(debug_objects_init_debugfs); | ||
621 | |||
622 | #else | ||
623 | static inline void debug_objects_init_debugfs(void) { } | ||
624 | #endif | ||
625 | |||
626 | #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST | ||
627 | |||
628 | /* Random data structure for the self test */ | ||
629 | struct self_test { | ||
630 | unsigned long dummy1[6]; | ||
631 | int static_init; | ||
632 | unsigned long dummy2[3]; | ||
633 | }; | ||
634 | |||
635 | static __initdata struct debug_obj_descr descr_type_test; | ||
636 | |||
637 | /* | ||
638 | * fixup_init is called when: | ||
639 | * - an active object is initialized | ||
640 | */ | ||
641 | static int __init fixup_init(void *addr, enum debug_obj_state state) | ||
642 | { | ||
643 | struct self_test *obj = addr; | ||
644 | |||
645 | switch (state) { | ||
646 | case ODEBUG_STATE_ACTIVE: | ||
647 | debug_object_deactivate(obj, &descr_type_test); | ||
648 | debug_object_init(obj, &descr_type_test); | ||
649 | return 1; | ||
650 | default: | ||
651 | return 0; | ||
652 | } | ||
653 | } | ||
654 | |||
655 | /* | ||
656 | * fixup_activate is called when: | ||
657 | * - an active object is activated | ||
658 | * - an unknown object is activated (might be a statically initialized object) | ||
659 | */ | ||
660 | static int __init fixup_activate(void *addr, enum debug_obj_state state) | ||
661 | { | ||
662 | struct self_test *obj = addr; | ||
663 | |||
664 | switch (state) { | ||
665 | case ODEBUG_STATE_NOTAVAILABLE: | ||
666 | if (obj->static_init == 1) { | ||
667 | debug_object_init(obj, &descr_type_test); | ||
668 | debug_object_activate(obj, &descr_type_test); | ||
669 | /* | ||
670 | * Real code should return 0 here ! This is | ||
671 | * not a fixup of some bad behaviour. We | ||
672 | * merily call the debug_init function to keep | ||
673 | * track of the object. | ||
674 | */ | ||
675 | return 1; | ||
676 | } else { | ||
677 | /* Real code needs to emit a warning here */ | ||
678 | } | ||
679 | return 0; | ||
680 | |||
681 | case ODEBUG_STATE_ACTIVE: | ||
682 | debug_object_deactivate(obj, &descr_type_test); | ||
683 | debug_object_activate(obj, &descr_type_test); | ||
684 | return 1; | ||
685 | |||
686 | default: | ||
687 | return 0; | ||
688 | } | ||
689 | } | ||
690 | |||
691 | /* | ||
692 | * fixup_destroy is called when: | ||
693 | * - an active object is destroyed | ||
694 | */ | ||
695 | static int __init fixup_destroy(void *addr, enum debug_obj_state state) | ||
696 | { | ||
697 | struct self_test *obj = addr; | ||
698 | |||
699 | switch (state) { | ||
700 | case ODEBUG_STATE_ACTIVE: | ||
701 | debug_object_deactivate(obj, &descr_type_test); | ||
702 | debug_object_destroy(obj, &descr_type_test); | ||
703 | return 1; | ||
704 | default: | ||
705 | return 0; | ||
706 | } | ||
707 | } | ||
708 | |||
709 | /* | ||
710 | * fixup_free is called when: | ||
711 | * - an active object is freed | ||
712 | */ | ||
713 | static int __init fixup_free(void *addr, enum debug_obj_state state) | ||
714 | { | ||
715 | struct self_test *obj = addr; | ||
716 | |||
717 | switch (state) { | ||
718 | case ODEBUG_STATE_ACTIVE: | ||
719 | debug_object_deactivate(obj, &descr_type_test); | ||
720 | debug_object_free(obj, &descr_type_test); | ||
721 | return 1; | ||
722 | default: | ||
723 | return 0; | ||
724 | } | ||
725 | } | ||
726 | |||
727 | static int | ||
728 | check_results(void *addr, enum debug_obj_state state, int fixups, int warnings) | ||
729 | { | ||
730 | struct debug_bucket *db; | ||
731 | struct debug_obj *obj; | ||
732 | unsigned long flags; | ||
733 | int res = -EINVAL; | ||
734 | |||
735 | db = get_bucket((unsigned long) addr); | ||
736 | |||
737 | spin_lock_irqsave(&db->lock, flags); | ||
738 | |||
739 | obj = lookup_object(addr, db); | ||
740 | if (!obj && state != ODEBUG_STATE_NONE) { | ||
741 | printk(KERN_ERR "ODEBUG: selftest object not found\n"); | ||
742 | WARN_ON(1); | ||
743 | goto out; | ||
744 | } | ||
745 | if (obj && obj->state != state) { | ||
746 | printk(KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n", | ||
747 | obj->state, state); | ||
748 | WARN_ON(1); | ||
749 | goto out; | ||
750 | } | ||
751 | if (fixups != debug_objects_fixups) { | ||
752 | printk(KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n", | ||
753 | fixups, debug_objects_fixups); | ||
754 | WARN_ON(1); | ||
755 | goto out; | ||
756 | } | ||
757 | if (warnings != debug_objects_warnings) { | ||
758 | printk(KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n", | ||
759 | warnings, debug_objects_warnings); | ||
760 | WARN_ON(1); | ||
761 | goto out; | ||
762 | } | ||
763 | res = 0; | ||
764 | out: | ||
765 | spin_unlock_irqrestore(&db->lock, flags); | ||
766 | if (res) | ||
767 | debug_objects_enabled = 0; | ||
768 | return res; | ||
769 | } | ||
770 | |||
771 | static __initdata struct debug_obj_descr descr_type_test = { | ||
772 | .name = "selftest", | ||
773 | .fixup_init = fixup_init, | ||
774 | .fixup_activate = fixup_activate, | ||
775 | .fixup_destroy = fixup_destroy, | ||
776 | .fixup_free = fixup_free, | ||
777 | }; | ||
778 | |||
779 | static __initdata struct self_test obj = { .static_init = 0 }; | ||
780 | |||
781 | static void __init debug_objects_selftest(void) | ||
782 | { | ||
783 | int fixups, oldfixups, warnings, oldwarnings; | ||
784 | unsigned long flags; | ||
785 | |||
786 | local_irq_save(flags); | ||
787 | |||
788 | fixups = oldfixups = debug_objects_fixups; | ||
789 | warnings = oldwarnings = debug_objects_warnings; | ||
790 | descr_test = &descr_type_test; | ||
791 | |||
792 | debug_object_init(&obj, &descr_type_test); | ||
793 | if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings)) | ||
794 | goto out; | ||
795 | debug_object_activate(&obj, &descr_type_test); | ||
796 | if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings)) | ||
797 | goto out; | ||
798 | debug_object_activate(&obj, &descr_type_test); | ||
799 | if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings)) | ||
800 | goto out; | ||
801 | debug_object_deactivate(&obj, &descr_type_test); | ||
802 | if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings)) | ||
803 | goto out; | ||
804 | debug_object_destroy(&obj, &descr_type_test); | ||
805 | if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings)) | ||
806 | goto out; | ||
807 | debug_object_init(&obj, &descr_type_test); | ||
808 | if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings)) | ||
809 | goto out; | ||
810 | debug_object_activate(&obj, &descr_type_test); | ||
811 | if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings)) | ||
812 | goto out; | ||
813 | debug_object_deactivate(&obj, &descr_type_test); | ||
814 | if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings)) | ||
815 | goto out; | ||
816 | debug_object_free(&obj, &descr_type_test); | ||
817 | if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings)) | ||
818 | goto out; | ||
819 | |||
820 | obj.static_init = 1; | ||
821 | debug_object_activate(&obj, &descr_type_test); | ||
822 | if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, warnings)) | ||
823 | goto out; | ||
824 | debug_object_init(&obj, &descr_type_test); | ||
825 | if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings)) | ||
826 | goto out; | ||
827 | debug_object_free(&obj, &descr_type_test); | ||
828 | if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings)) | ||
829 | goto out; | ||
830 | |||
831 | #ifdef CONFIG_DEBUG_OBJECTS_FREE | ||
832 | debug_object_init(&obj, &descr_type_test); | ||
833 | if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings)) | ||
834 | goto out; | ||
835 | debug_object_activate(&obj, &descr_type_test); | ||
836 | if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings)) | ||
837 | goto out; | ||
838 | __debug_check_no_obj_freed(&obj, sizeof(obj)); | ||
839 | if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings)) | ||
840 | goto out; | ||
841 | #endif | ||
842 | printk(KERN_INFO "ODEBUG: selftest passed\n"); | ||
843 | |||
844 | out: | ||
845 | debug_objects_fixups = oldfixups; | ||
846 | debug_objects_warnings = oldwarnings; | ||
847 | descr_test = NULL; | ||
848 | |||
849 | local_irq_restore(flags); | ||
850 | } | ||
851 | #else | ||
852 | static inline void debug_objects_selftest(void) { } | ||
853 | #endif | ||
854 | |||
855 | /* | ||
856 | * Called during early boot to initialize the hash buckets and link | ||
857 | * the static object pool objects into the poll list. After this call | ||
858 | * the object tracker is fully operational. | ||
859 | */ | ||
860 | void __init debug_objects_early_init(void) | ||
861 | { | ||
862 | int i; | ||
863 | |||
864 | for (i = 0; i < ODEBUG_HASH_SIZE; i++) | ||
865 | spin_lock_init(&obj_hash[i].lock); | ||
866 | |||
867 | for (i = 0; i < ODEBUG_POOL_SIZE; i++) | ||
868 | hlist_add_head(&obj_static_pool[i].node, &obj_pool); | ||
869 | } | ||
870 | |||
871 | /* | ||
872 | * Called after the kmem_caches are functional to setup a dedicated | ||
873 | * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag | ||
874 | * prevents that the debug code is called on kmem_cache_free() for the | ||
875 | * debug tracker objects to avoid recursive calls. | ||
876 | */ | ||
877 | void __init debug_objects_mem_init(void) | ||
878 | { | ||
879 | if (!debug_objects_enabled) | ||
880 | return; | ||
881 | |||
882 | obj_cache = kmem_cache_create("debug_objects_cache", | ||
883 | sizeof (struct debug_obj), 0, | ||
884 | SLAB_DEBUG_OBJECTS, NULL); | ||
885 | |||
886 | if (!obj_cache) | ||
887 | debug_objects_enabled = 0; | ||
888 | else | ||
889 | debug_objects_selftest(); | ||
890 | } | ||
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 0a502e99ee22..bdd5c432c426 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -45,6 +45,7 @@ | |||
45 | #include <linux/fault-inject.h> | 45 | #include <linux/fault-inject.h> |
46 | #include <linux/page-isolation.h> | 46 | #include <linux/page-isolation.h> |
47 | #include <linux/memcontrol.h> | 47 | #include <linux/memcontrol.h> |
48 | #include <linux/debugobjects.h> | ||
48 | 49 | ||
49 | #include <asm/tlbflush.h> | 50 | #include <asm/tlbflush.h> |
50 | #include <asm/div64.h> | 51 | #include <asm/div64.h> |
@@ -532,8 +533,11 @@ static void __free_pages_ok(struct page *page, unsigned int order) | |||
532 | if (reserved) | 533 | if (reserved) |
533 | return; | 534 | return; |
534 | 535 | ||
535 | if (!PageHighMem(page)) | 536 | if (!PageHighMem(page)) { |
536 | debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order); | 537 | debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order); |
538 | debug_check_no_obj_freed(page_address(page), | ||
539 | PAGE_SIZE << order); | ||
540 | } | ||
537 | arch_free_page(page, order); | 541 | arch_free_page(page, order); |
538 | kernel_map_pages(page, 1 << order, 0); | 542 | kernel_map_pages(page, 1 << order, 0); |
539 | 543 | ||
@@ -995,8 +999,10 @@ static void free_hot_cold_page(struct page *page, int cold) | |||
995 | if (free_pages_check(page)) | 999 | if (free_pages_check(page)) |
996 | return; | 1000 | return; |
997 | 1001 | ||
998 | if (!PageHighMem(page)) | 1002 | if (!PageHighMem(page)) { |
999 | debug_check_no_locks_freed(page_address(page), PAGE_SIZE); | 1003 | debug_check_no_locks_freed(page_address(page), PAGE_SIZE); |
1004 | debug_check_no_obj_freed(page_address(page), PAGE_SIZE); | ||
1005 | } | ||
1000 | arch_free_page(page, 0); | 1006 | arch_free_page(page, 0); |
1001 | kernel_map_pages(page, 1, 0); | 1007 | kernel_map_pages(page, 1, 0); |
1002 | 1008 | ||
@@ -110,6 +110,7 @@ | |||
110 | #include <linux/fault-inject.h> | 110 | #include <linux/fault-inject.h> |
111 | #include <linux/rtmutex.h> | 111 | #include <linux/rtmutex.h> |
112 | #include <linux/reciprocal_div.h> | 112 | #include <linux/reciprocal_div.h> |
113 | #include <linux/debugobjects.h> | ||
113 | 114 | ||
114 | #include <asm/cacheflush.h> | 115 | #include <asm/cacheflush.h> |
115 | #include <asm/tlbflush.h> | 116 | #include <asm/tlbflush.h> |
@@ -174,12 +175,14 @@ | |||
174 | SLAB_CACHE_DMA | \ | 175 | SLAB_CACHE_DMA | \ |
175 | SLAB_STORE_USER | \ | 176 | SLAB_STORE_USER | \ |
176 | SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ | 177 | SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ |
177 | SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD) | 178 | SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \ |
179 | SLAB_DEBUG_OBJECTS) | ||
178 | #else | 180 | #else |
179 | # define CREATE_MASK (SLAB_HWCACHE_ALIGN | \ | 181 | # define CREATE_MASK (SLAB_HWCACHE_ALIGN | \ |
180 | SLAB_CACHE_DMA | \ | 182 | SLAB_CACHE_DMA | \ |
181 | SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ | 183 | SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ |
182 | SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD) | 184 | SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \ |
185 | SLAB_DEBUG_OBJECTS) | ||
183 | #endif | 186 | #endif |
184 | 187 | ||
185 | /* | 188 | /* |
@@ -3760,6 +3763,8 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp) | |||
3760 | 3763 | ||
3761 | local_irq_save(flags); | 3764 | local_irq_save(flags); |
3762 | debug_check_no_locks_freed(objp, obj_size(cachep)); | 3765 | debug_check_no_locks_freed(objp, obj_size(cachep)); |
3766 | if (!(cachep->flags & SLAB_DEBUG_OBJECTS)) | ||
3767 | debug_check_no_obj_freed(objp, obj_size(cachep)); | ||
3763 | __cache_free(cachep, objp); | 3768 | __cache_free(cachep, objp); |
3764 | local_irq_restore(flags); | 3769 | local_irq_restore(flags); |
3765 | } | 3770 | } |
@@ -3785,6 +3790,7 @@ void kfree(const void *objp) | |||
3785 | kfree_debugcheck(objp); | 3790 | kfree_debugcheck(objp); |
3786 | c = virt_to_cache(objp); | 3791 | c = virt_to_cache(objp); |
3787 | debug_check_no_locks_freed(objp, obj_size(c)); | 3792 | debug_check_no_locks_freed(objp, obj_size(c)); |
3793 | debug_check_no_obj_freed(objp, obj_size(c)); | ||
3788 | __cache_free(c, (void *)objp); | 3794 | __cache_free(c, (void *)objp); |
3789 | local_irq_restore(flags); | 3795 | local_irq_restore(flags); |
3790 | } | 3796 | } |
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/cpuset.h> | 19 | #include <linux/cpuset.h> |
20 | #include <linux/mempolicy.h> | 20 | #include <linux/mempolicy.h> |
21 | #include <linux/ctype.h> | 21 | #include <linux/ctype.h> |
22 | #include <linux/debugobjects.h> | ||
22 | #include <linux/kallsyms.h> | 23 | #include <linux/kallsyms.h> |
23 | #include <linux/memory.h> | 24 | #include <linux/memory.h> |
24 | 25 | ||
@@ -1747,6 +1748,8 @@ static __always_inline void slab_free(struct kmem_cache *s, | |||
1747 | local_irq_save(flags); | 1748 | local_irq_save(flags); |
1748 | c = get_cpu_slab(s, smp_processor_id()); | 1749 | c = get_cpu_slab(s, smp_processor_id()); |
1749 | debug_check_no_locks_freed(object, c->objsize); | 1750 | debug_check_no_locks_freed(object, c->objsize); |
1751 | if (!(s->flags & SLAB_DEBUG_OBJECTS)) | ||
1752 | debug_check_no_obj_freed(object, s->objsize); | ||
1750 | if (likely(page == c->page && c->node >= 0)) { | 1753 | if (likely(page == c->page && c->node >= 0)) { |
1751 | object[c->offset] = c->freelist; | 1754 | object[c->offset] = c->freelist; |
1752 | c->freelist = object; | 1755 | c->freelist = object; |
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index e33e0ae69ad1..2a39cf128aba 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/spinlock.h> | 15 | #include <linux/spinlock.h> |
16 | #include <linux/interrupt.h> | 16 | #include <linux/interrupt.h> |
17 | #include <linux/seq_file.h> | 17 | #include <linux/seq_file.h> |
18 | #include <linux/debugobjects.h> | ||
18 | #include <linux/vmalloc.h> | 19 | #include <linux/vmalloc.h> |
19 | #include <linux/kallsyms.h> | 20 | #include <linux/kallsyms.h> |
20 | 21 | ||
@@ -394,6 +395,7 @@ static void __vunmap(const void *addr, int deallocate_pages) | |||
394 | } | 395 | } |
395 | 396 | ||
396 | debug_check_no_locks_freed(addr, area->size); | 397 | debug_check_no_locks_freed(addr, area->size); |
398 | debug_check_no_obj_freed(addr, area->size); | ||
397 | 399 | ||
398 | if (deallocate_pages) { | 400 | if (deallocate_pages) { |
399 | int i; | 401 | int i; |