diff options
Diffstat (limited to 'lib/debugobjects.c')
-rw-r--r-- | lib/debugobjects.c | 890 |
1 files changed, 890 insertions, 0 deletions
diff --git a/lib/debugobjects.c b/lib/debugobjects.c new file mode 100644 index 000000000000..a76a5e122ae1 --- /dev/null +++ b/lib/debugobjects.c | |||
@@ -0,0 +1,890 @@ | |||
1 | /* | ||
2 | * Generic infrastructure for lifetime debugging of objects. | ||
3 | * | ||
4 | * Started by Thomas Gleixner | ||
5 | * | ||
6 | * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de> | ||
7 | * | ||
8 | * For licencing details see kernel-base/COPYING | ||
9 | */ | ||
10 | #include <linux/debugobjects.h> | ||
11 | #include <linux/interrupt.h> | ||
12 | #include <linux/seq_file.h> | ||
13 | #include <linux/debugfs.h> | ||
14 | #include <linux/hash.h> | ||
15 | |||
16 | #define ODEBUG_HASH_BITS 14 | ||
17 | #define ODEBUG_HASH_SIZE (1 << ODEBUG_HASH_BITS) | ||
18 | |||
19 | #define ODEBUG_POOL_SIZE 512 | ||
20 | #define ODEBUG_POOL_MIN_LEVEL 256 | ||
21 | |||
22 | #define ODEBUG_CHUNK_SHIFT PAGE_SHIFT | ||
23 | #define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT) | ||
24 | #define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1)) | ||
25 | |||
26 | struct debug_bucket { | ||
27 | struct hlist_head list; | ||
28 | spinlock_t lock; | ||
29 | }; | ||
30 | |||
31 | static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE]; | ||
32 | |||
33 | static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE]; | ||
34 | |||
35 | static DEFINE_SPINLOCK(pool_lock); | ||
36 | |||
37 | static HLIST_HEAD(obj_pool); | ||
38 | |||
39 | static int obj_pool_min_free = ODEBUG_POOL_SIZE; | ||
40 | static int obj_pool_free = ODEBUG_POOL_SIZE; | ||
41 | static int obj_pool_used; | ||
42 | static int obj_pool_max_used; | ||
43 | static struct kmem_cache *obj_cache; | ||
44 | |||
45 | static int debug_objects_maxchain __read_mostly; | ||
46 | static int debug_objects_fixups __read_mostly; | ||
47 | static int debug_objects_warnings __read_mostly; | ||
48 | static int debug_objects_enabled __read_mostly; | ||
49 | static struct debug_obj_descr *descr_test __read_mostly; | ||
50 | |||
51 | static int __init enable_object_debug(char *str) | ||
52 | { | ||
53 | debug_objects_enabled = 1; | ||
54 | return 0; | ||
55 | } | ||
56 | early_param("debug_objects", enable_object_debug); | ||
57 | |||
58 | static const char *obj_states[ODEBUG_STATE_MAX] = { | ||
59 | [ODEBUG_STATE_NONE] = "none", | ||
60 | [ODEBUG_STATE_INIT] = "initialized", | ||
61 | [ODEBUG_STATE_INACTIVE] = "inactive", | ||
62 | [ODEBUG_STATE_ACTIVE] = "active", | ||
63 | [ODEBUG_STATE_DESTROYED] = "destroyed", | ||
64 | [ODEBUG_STATE_NOTAVAILABLE] = "not available", | ||
65 | }; | ||
66 | |||
67 | static int fill_pool(void) | ||
68 | { | ||
69 | gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN; | ||
70 | struct debug_obj *new; | ||
71 | |||
72 | if (likely(obj_pool_free >= ODEBUG_POOL_MIN_LEVEL)) | ||
73 | return obj_pool_free; | ||
74 | |||
75 | if (unlikely(!obj_cache)) | ||
76 | return obj_pool_free; | ||
77 | |||
78 | while (obj_pool_free < ODEBUG_POOL_MIN_LEVEL) { | ||
79 | |||
80 | new = kmem_cache_zalloc(obj_cache, gfp); | ||
81 | if (!new) | ||
82 | return obj_pool_free; | ||
83 | |||
84 | spin_lock(&pool_lock); | ||
85 | hlist_add_head(&new->node, &obj_pool); | ||
86 | obj_pool_free++; | ||
87 | spin_unlock(&pool_lock); | ||
88 | } | ||
89 | return obj_pool_free; | ||
90 | } | ||
91 | |||
92 | /* | ||
93 | * Lookup an object in the hash bucket. | ||
94 | */ | ||
95 | static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b) | ||
96 | { | ||
97 | struct hlist_node *node; | ||
98 | struct debug_obj *obj; | ||
99 | int cnt = 0; | ||
100 | |||
101 | hlist_for_each_entry(obj, node, &b->list, node) { | ||
102 | cnt++; | ||
103 | if (obj->object == addr) | ||
104 | return obj; | ||
105 | } | ||
106 | if (cnt > debug_objects_maxchain) | ||
107 | debug_objects_maxchain = cnt; | ||
108 | |||
109 | return NULL; | ||
110 | } | ||
111 | |||
112 | /* | ||
113 | * Allocate a new object. If the pool is empty and no refill possible, | ||
114 | * switch off the debugger. | ||
115 | */ | ||
116 | static struct debug_obj * | ||
117 | alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr) | ||
118 | { | ||
119 | struct debug_obj *obj = NULL; | ||
120 | int retry = 0; | ||
121 | |||
122 | repeat: | ||
123 | spin_lock(&pool_lock); | ||
124 | if (obj_pool.first) { | ||
125 | obj = hlist_entry(obj_pool.first, typeof(*obj), node); | ||
126 | |||
127 | obj->object = addr; | ||
128 | obj->descr = descr; | ||
129 | obj->state = ODEBUG_STATE_NONE; | ||
130 | hlist_del(&obj->node); | ||
131 | |||
132 | hlist_add_head(&obj->node, &b->list); | ||
133 | |||
134 | obj_pool_used++; | ||
135 | if (obj_pool_used > obj_pool_max_used) | ||
136 | obj_pool_max_used = obj_pool_used; | ||
137 | |||
138 | obj_pool_free--; | ||
139 | if (obj_pool_free < obj_pool_min_free) | ||
140 | obj_pool_min_free = obj_pool_free; | ||
141 | } | ||
142 | spin_unlock(&pool_lock); | ||
143 | |||
144 | if (fill_pool() && !obj && !retry++) | ||
145 | goto repeat; | ||
146 | |||
147 | return obj; | ||
148 | } | ||
149 | |||
150 | /* | ||
151 | * Put the object back into the pool or give it back to kmem_cache: | ||
152 | */ | ||
153 | static void free_object(struct debug_obj *obj) | ||
154 | { | ||
155 | unsigned long idx = (unsigned long)(obj - obj_static_pool); | ||
156 | |||
157 | if (obj_pool_free < ODEBUG_POOL_SIZE || idx < ODEBUG_POOL_SIZE) { | ||
158 | spin_lock(&pool_lock); | ||
159 | hlist_add_head(&obj->node, &obj_pool); | ||
160 | obj_pool_free++; | ||
161 | obj_pool_used--; | ||
162 | spin_unlock(&pool_lock); | ||
163 | } else { | ||
164 | spin_lock(&pool_lock); | ||
165 | obj_pool_used--; | ||
166 | spin_unlock(&pool_lock); | ||
167 | kmem_cache_free(obj_cache, obj); | ||
168 | } | ||
169 | } | ||
170 | |||
171 | /* | ||
172 | * We run out of memory. That means we probably have tons of objects | ||
173 | * allocated. | ||
174 | */ | ||
175 | static void debug_objects_oom(void) | ||
176 | { | ||
177 | struct debug_bucket *db = obj_hash; | ||
178 | struct hlist_node *node, *tmp; | ||
179 | struct debug_obj *obj; | ||
180 | unsigned long flags; | ||
181 | int i; | ||
182 | |||
183 | printk(KERN_WARNING "ODEBUG: Out of memory. ODEBUG disabled\n"); | ||
184 | |||
185 | for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) { | ||
186 | spin_lock_irqsave(&db->lock, flags); | ||
187 | hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) { | ||
188 | hlist_del(&obj->node); | ||
189 | free_object(obj); | ||
190 | } | ||
191 | spin_unlock_irqrestore(&db->lock, flags); | ||
192 | } | ||
193 | } | ||
194 | |||
195 | /* | ||
196 | * We use the pfn of the address for the hash. That way we can check | ||
197 | * for freed objects simply by checking the affected bucket. | ||
198 | */ | ||
199 | static struct debug_bucket *get_bucket(unsigned long addr) | ||
200 | { | ||
201 | unsigned long hash; | ||
202 | |||
203 | hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS); | ||
204 | return &obj_hash[hash]; | ||
205 | } | ||
206 | |||
207 | static void debug_print_object(struct debug_obj *obj, char *msg) | ||
208 | { | ||
209 | static int limit; | ||
210 | |||
211 | if (limit < 5 && obj->descr != descr_test) { | ||
212 | limit++; | ||
213 | printk(KERN_ERR "ODEBUG: %s %s object type: %s\n", msg, | ||
214 | obj_states[obj->state], obj->descr->name); | ||
215 | WARN_ON(1); | ||
216 | } | ||
217 | debug_objects_warnings++; | ||
218 | } | ||
219 | |||
220 | /* | ||
221 | * Try to repair the damage, so we have a better chance to get useful | ||
222 | * debug output. | ||
223 | */ | ||
224 | static void | ||
225 | debug_object_fixup(int (*fixup)(void *addr, enum debug_obj_state state), | ||
226 | void * addr, enum debug_obj_state state) | ||
227 | { | ||
228 | if (fixup) | ||
229 | debug_objects_fixups += fixup(addr, state); | ||
230 | } | ||
231 | |||
232 | static void debug_object_is_on_stack(void *addr, int onstack) | ||
233 | { | ||
234 | void *stack = current->stack; | ||
235 | int is_on_stack; | ||
236 | static int limit; | ||
237 | |||
238 | if (limit > 4) | ||
239 | return; | ||
240 | |||
241 | is_on_stack = (addr >= stack && addr < (stack + THREAD_SIZE)); | ||
242 | |||
243 | if (is_on_stack == onstack) | ||
244 | return; | ||
245 | |||
246 | limit++; | ||
247 | if (is_on_stack) | ||
248 | printk(KERN_WARNING | ||
249 | "ODEBUG: object is on stack, but not annotated\n"); | ||
250 | else | ||
251 | printk(KERN_WARNING | ||
252 | "ODEBUG: object is not on stack, but annotated\n"); | ||
253 | WARN_ON(1); | ||
254 | } | ||
255 | |||
256 | static void | ||
257 | __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack) | ||
258 | { | ||
259 | enum debug_obj_state state; | ||
260 | struct debug_bucket *db; | ||
261 | struct debug_obj *obj; | ||
262 | unsigned long flags; | ||
263 | |||
264 | db = get_bucket((unsigned long) addr); | ||
265 | |||
266 | spin_lock_irqsave(&db->lock, flags); | ||
267 | |||
268 | obj = lookup_object(addr, db); | ||
269 | if (!obj) { | ||
270 | obj = alloc_object(addr, db, descr); | ||
271 | if (!obj) { | ||
272 | debug_objects_enabled = 0; | ||
273 | spin_unlock_irqrestore(&db->lock, flags); | ||
274 | debug_objects_oom(); | ||
275 | return; | ||
276 | } | ||
277 | debug_object_is_on_stack(addr, onstack); | ||
278 | } | ||
279 | |||
280 | switch (obj->state) { | ||
281 | case ODEBUG_STATE_NONE: | ||
282 | case ODEBUG_STATE_INIT: | ||
283 | case ODEBUG_STATE_INACTIVE: | ||
284 | obj->state = ODEBUG_STATE_INIT; | ||
285 | break; | ||
286 | |||
287 | case ODEBUG_STATE_ACTIVE: | ||
288 | debug_print_object(obj, "init"); | ||
289 | state = obj->state; | ||
290 | spin_unlock_irqrestore(&db->lock, flags); | ||
291 | debug_object_fixup(descr->fixup_init, addr, state); | ||
292 | return; | ||
293 | |||
294 | case ODEBUG_STATE_DESTROYED: | ||
295 | debug_print_object(obj, "init"); | ||
296 | break; | ||
297 | default: | ||
298 | break; | ||
299 | } | ||
300 | |||
301 | spin_unlock_irqrestore(&db->lock, flags); | ||
302 | } | ||
303 | |||
304 | /** | ||
305 | * debug_object_init - debug checks when an object is initialized | ||
306 | * @addr: address of the object | ||
307 | * @descr: pointer to an object specific debug description structure | ||
308 | */ | ||
309 | void debug_object_init(void *addr, struct debug_obj_descr *descr) | ||
310 | { | ||
311 | if (!debug_objects_enabled) | ||
312 | return; | ||
313 | |||
314 | __debug_object_init(addr, descr, 0); | ||
315 | } | ||
316 | |||
317 | /** | ||
318 | * debug_object_init_on_stack - debug checks when an object on stack is | ||
319 | * initialized | ||
320 | * @addr: address of the object | ||
321 | * @descr: pointer to an object specific debug description structure | ||
322 | */ | ||
323 | void debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr) | ||
324 | { | ||
325 | if (!debug_objects_enabled) | ||
326 | return; | ||
327 | |||
328 | __debug_object_init(addr, descr, 1); | ||
329 | } | ||
330 | |||
331 | /** | ||
332 | * debug_object_activate - debug checks when an object is activated | ||
333 | * @addr: address of the object | ||
334 | * @descr: pointer to an object specific debug description structure | ||
335 | */ | ||
336 | void debug_object_activate(void *addr, struct debug_obj_descr *descr) | ||
337 | { | ||
338 | enum debug_obj_state state; | ||
339 | struct debug_bucket *db; | ||
340 | struct debug_obj *obj; | ||
341 | unsigned long flags; | ||
342 | |||
343 | if (!debug_objects_enabled) | ||
344 | return; | ||
345 | |||
346 | db = get_bucket((unsigned long) addr); | ||
347 | |||
348 | spin_lock_irqsave(&db->lock, flags); | ||
349 | |||
350 | obj = lookup_object(addr, db); | ||
351 | if (obj) { | ||
352 | switch (obj->state) { | ||
353 | case ODEBUG_STATE_INIT: | ||
354 | case ODEBUG_STATE_INACTIVE: | ||
355 | obj->state = ODEBUG_STATE_ACTIVE; | ||
356 | break; | ||
357 | |||
358 | case ODEBUG_STATE_ACTIVE: | ||
359 | debug_print_object(obj, "activate"); | ||
360 | state = obj->state; | ||
361 | spin_unlock_irqrestore(&db->lock, flags); | ||
362 | debug_object_fixup(descr->fixup_activate, addr, state); | ||
363 | return; | ||
364 | |||
365 | case ODEBUG_STATE_DESTROYED: | ||
366 | debug_print_object(obj, "activate"); | ||
367 | break; | ||
368 | default: | ||
369 | break; | ||
370 | } | ||
371 | spin_unlock_irqrestore(&db->lock, flags); | ||
372 | return; | ||
373 | } | ||
374 | |||
375 | spin_unlock_irqrestore(&db->lock, flags); | ||
376 | /* | ||
377 | * This happens when a static object is activated. We | ||
378 | * let the type specific code decide whether this is | ||
379 | * true or not. | ||
380 | */ | ||
381 | debug_object_fixup(descr->fixup_activate, addr, | ||
382 | ODEBUG_STATE_NOTAVAILABLE); | ||
383 | } | ||
384 | |||
385 | /** | ||
386 | * debug_object_deactivate - debug checks when an object is deactivated | ||
387 | * @addr: address of the object | ||
388 | * @descr: pointer to an object specific debug description structure | ||
389 | */ | ||
390 | void debug_object_deactivate(void *addr, struct debug_obj_descr *descr) | ||
391 | { | ||
392 | struct debug_bucket *db; | ||
393 | struct debug_obj *obj; | ||
394 | unsigned long flags; | ||
395 | |||
396 | if (!debug_objects_enabled) | ||
397 | return; | ||
398 | |||
399 | db = get_bucket((unsigned long) addr); | ||
400 | |||
401 | spin_lock_irqsave(&db->lock, flags); | ||
402 | |||
403 | obj = lookup_object(addr, db); | ||
404 | if (obj) { | ||
405 | switch (obj->state) { | ||
406 | case ODEBUG_STATE_INIT: | ||
407 | case ODEBUG_STATE_INACTIVE: | ||
408 | case ODEBUG_STATE_ACTIVE: | ||
409 | obj->state = ODEBUG_STATE_INACTIVE; | ||
410 | break; | ||
411 | |||
412 | case ODEBUG_STATE_DESTROYED: | ||
413 | debug_print_object(obj, "deactivate"); | ||
414 | break; | ||
415 | default: | ||
416 | break; | ||
417 | } | ||
418 | } else { | ||
419 | struct debug_obj o = { .object = addr, | ||
420 | .state = ODEBUG_STATE_NOTAVAILABLE, | ||
421 | .descr = descr }; | ||
422 | |||
423 | debug_print_object(&o, "deactivate"); | ||
424 | } | ||
425 | |||
426 | spin_unlock_irqrestore(&db->lock, flags); | ||
427 | } | ||
428 | |||
429 | /** | ||
430 | * debug_object_destroy - debug checks when an object is destroyed | ||
431 | * @addr: address of the object | ||
432 | * @descr: pointer to an object specific debug description structure | ||
433 | */ | ||
434 | void debug_object_destroy(void *addr, struct debug_obj_descr *descr) | ||
435 | { | ||
436 | enum debug_obj_state state; | ||
437 | struct debug_bucket *db; | ||
438 | struct debug_obj *obj; | ||
439 | unsigned long flags; | ||
440 | |||
441 | if (!debug_objects_enabled) | ||
442 | return; | ||
443 | |||
444 | db = get_bucket((unsigned long) addr); | ||
445 | |||
446 | spin_lock_irqsave(&db->lock, flags); | ||
447 | |||
448 | obj = lookup_object(addr, db); | ||
449 | if (!obj) | ||
450 | goto out_unlock; | ||
451 | |||
452 | switch (obj->state) { | ||
453 | case ODEBUG_STATE_NONE: | ||
454 | case ODEBUG_STATE_INIT: | ||
455 | case ODEBUG_STATE_INACTIVE: | ||
456 | obj->state = ODEBUG_STATE_DESTROYED; | ||
457 | break; | ||
458 | case ODEBUG_STATE_ACTIVE: | ||
459 | debug_print_object(obj, "destroy"); | ||
460 | state = obj->state; | ||
461 | spin_unlock_irqrestore(&db->lock, flags); | ||
462 | debug_object_fixup(descr->fixup_destroy, addr, state); | ||
463 | return; | ||
464 | |||
465 | case ODEBUG_STATE_DESTROYED: | ||
466 | debug_print_object(obj, "destroy"); | ||
467 | break; | ||
468 | default: | ||
469 | break; | ||
470 | } | ||
471 | out_unlock: | ||
472 | spin_unlock_irqrestore(&db->lock, flags); | ||
473 | } | ||
474 | |||
475 | /** | ||
476 | * debug_object_free - debug checks when an object is freed | ||
477 | * @addr: address of the object | ||
478 | * @descr: pointer to an object specific debug description structure | ||
479 | */ | ||
480 | void debug_object_free(void *addr, struct debug_obj_descr *descr) | ||
481 | { | ||
482 | enum debug_obj_state state; | ||
483 | struct debug_bucket *db; | ||
484 | struct debug_obj *obj; | ||
485 | unsigned long flags; | ||
486 | |||
487 | if (!debug_objects_enabled) | ||
488 | return; | ||
489 | |||
490 | db = get_bucket((unsigned long) addr); | ||
491 | |||
492 | spin_lock_irqsave(&db->lock, flags); | ||
493 | |||
494 | obj = lookup_object(addr, db); | ||
495 | if (!obj) | ||
496 | goto out_unlock; | ||
497 | |||
498 | switch (obj->state) { | ||
499 | case ODEBUG_STATE_ACTIVE: | ||
500 | debug_print_object(obj, "free"); | ||
501 | state = obj->state; | ||
502 | spin_unlock_irqrestore(&db->lock, flags); | ||
503 | debug_object_fixup(descr->fixup_free, addr, state); | ||
504 | return; | ||
505 | default: | ||
506 | hlist_del(&obj->node); | ||
507 | free_object(obj); | ||
508 | break; | ||
509 | } | ||
510 | out_unlock: | ||
511 | spin_unlock_irqrestore(&db->lock, flags); | ||
512 | } | ||
513 | |||
514 | #ifdef CONFIG_DEBUG_OBJECTS_FREE | ||
515 | static void __debug_check_no_obj_freed(const void *address, unsigned long size) | ||
516 | { | ||
517 | unsigned long flags, oaddr, saddr, eaddr, paddr, chunks; | ||
518 | struct hlist_node *node, *tmp; | ||
519 | struct debug_obj_descr *descr; | ||
520 | enum debug_obj_state state; | ||
521 | struct debug_bucket *db; | ||
522 | struct debug_obj *obj; | ||
523 | int cnt; | ||
524 | |||
525 | saddr = (unsigned long) address; | ||
526 | eaddr = saddr + size; | ||
527 | paddr = saddr & ODEBUG_CHUNK_MASK; | ||
528 | chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1)); | ||
529 | chunks >>= ODEBUG_CHUNK_SHIFT; | ||
530 | |||
531 | for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) { | ||
532 | db = get_bucket(paddr); | ||
533 | |||
534 | repeat: | ||
535 | cnt = 0; | ||
536 | spin_lock_irqsave(&db->lock, flags); | ||
537 | hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) { | ||
538 | cnt++; | ||
539 | oaddr = (unsigned long) obj->object; | ||
540 | if (oaddr < saddr || oaddr >= eaddr) | ||
541 | continue; | ||
542 | |||
543 | switch (obj->state) { | ||
544 | case ODEBUG_STATE_ACTIVE: | ||
545 | debug_print_object(obj, "free"); | ||
546 | descr = obj->descr; | ||
547 | state = obj->state; | ||
548 | spin_unlock_irqrestore(&db->lock, flags); | ||
549 | debug_object_fixup(descr->fixup_free, | ||
550 | (void *) oaddr, state); | ||
551 | goto repeat; | ||
552 | default: | ||
553 | hlist_del(&obj->node); | ||
554 | free_object(obj); | ||
555 | break; | ||
556 | } | ||
557 | } | ||
558 | spin_unlock_irqrestore(&db->lock, flags); | ||
559 | if (cnt > debug_objects_maxchain) | ||
560 | debug_objects_maxchain = cnt; | ||
561 | } | ||
562 | } | ||
563 | |||
564 | void debug_check_no_obj_freed(const void *address, unsigned long size) | ||
565 | { | ||
566 | if (debug_objects_enabled) | ||
567 | __debug_check_no_obj_freed(address, size); | ||
568 | } | ||
569 | #endif | ||
570 | |||
571 | #ifdef CONFIG_DEBUG_FS | ||
572 | |||
573 | static int debug_stats_show(struct seq_file *m, void *v) | ||
574 | { | ||
575 | seq_printf(m, "max_chain :%d\n", debug_objects_maxchain); | ||
576 | seq_printf(m, "warnings :%d\n", debug_objects_warnings); | ||
577 | seq_printf(m, "fixups :%d\n", debug_objects_fixups); | ||
578 | seq_printf(m, "pool_free :%d\n", obj_pool_free); | ||
579 | seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free); | ||
580 | seq_printf(m, "pool_used :%d\n", obj_pool_used); | ||
581 | seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used); | ||
582 | return 0; | ||
583 | } | ||
584 | |||
585 | static int debug_stats_open(struct inode *inode, struct file *filp) | ||
586 | { | ||
587 | return single_open(filp, debug_stats_show, NULL); | ||
588 | } | ||
589 | |||
590 | static const struct file_operations debug_stats_fops = { | ||
591 | .open = debug_stats_open, | ||
592 | .read = seq_read, | ||
593 | .llseek = seq_lseek, | ||
594 | .release = single_release, | ||
595 | }; | ||
596 | |||
597 | static int __init debug_objects_init_debugfs(void) | ||
598 | { | ||
599 | struct dentry *dbgdir, *dbgstats; | ||
600 | |||
601 | if (!debug_objects_enabled) | ||
602 | return 0; | ||
603 | |||
604 | dbgdir = debugfs_create_dir("debug_objects", NULL); | ||
605 | if (!dbgdir) | ||
606 | return -ENOMEM; | ||
607 | |||
608 | dbgstats = debugfs_create_file("stats", 0444, dbgdir, NULL, | ||
609 | &debug_stats_fops); | ||
610 | if (!dbgstats) | ||
611 | goto err; | ||
612 | |||
613 | return 0; | ||
614 | |||
615 | err: | ||
616 | debugfs_remove(dbgdir); | ||
617 | |||
618 | return -ENOMEM; | ||
619 | } | ||
620 | __initcall(debug_objects_init_debugfs); | ||
621 | |||
622 | #else | ||
623 | static inline void debug_objects_init_debugfs(void) { } | ||
624 | #endif | ||
625 | |||
626 | #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST | ||
627 | |||
628 | /* Random data structure for the self test */ | ||
629 | struct self_test { | ||
630 | unsigned long dummy1[6]; | ||
631 | int static_init; | ||
632 | unsigned long dummy2[3]; | ||
633 | }; | ||
634 | |||
635 | static __initdata struct debug_obj_descr descr_type_test; | ||
636 | |||
637 | /* | ||
638 | * fixup_init is called when: | ||
639 | * - an active object is initialized | ||
640 | */ | ||
641 | static int __init fixup_init(void *addr, enum debug_obj_state state) | ||
642 | { | ||
643 | struct self_test *obj = addr; | ||
644 | |||
645 | switch (state) { | ||
646 | case ODEBUG_STATE_ACTIVE: | ||
647 | debug_object_deactivate(obj, &descr_type_test); | ||
648 | debug_object_init(obj, &descr_type_test); | ||
649 | return 1; | ||
650 | default: | ||
651 | return 0; | ||
652 | } | ||
653 | } | ||
654 | |||
655 | /* | ||
656 | * fixup_activate is called when: | ||
657 | * - an active object is activated | ||
658 | * - an unknown object is activated (might be a statically initialized object) | ||
659 | */ | ||
660 | static int __init fixup_activate(void *addr, enum debug_obj_state state) | ||
661 | { | ||
662 | struct self_test *obj = addr; | ||
663 | |||
664 | switch (state) { | ||
665 | case ODEBUG_STATE_NOTAVAILABLE: | ||
666 | if (obj->static_init == 1) { | ||
667 | debug_object_init(obj, &descr_type_test); | ||
668 | debug_object_activate(obj, &descr_type_test); | ||
669 | /* | ||
670 | * Real code should return 0 here ! This is | ||
671 | * not a fixup of some bad behaviour. We | ||
672 | * merily call the debug_init function to keep | ||
673 | * track of the object. | ||
674 | */ | ||
675 | return 1; | ||
676 | } else { | ||
677 | /* Real code needs to emit a warning here */ | ||
678 | } | ||
679 | return 0; | ||
680 | |||
681 | case ODEBUG_STATE_ACTIVE: | ||
682 | debug_object_deactivate(obj, &descr_type_test); | ||
683 | debug_object_activate(obj, &descr_type_test); | ||
684 | return 1; | ||
685 | |||
686 | default: | ||
687 | return 0; | ||
688 | } | ||
689 | } | ||
690 | |||
691 | /* | ||
692 | * fixup_destroy is called when: | ||
693 | * - an active object is destroyed | ||
694 | */ | ||
695 | static int __init fixup_destroy(void *addr, enum debug_obj_state state) | ||
696 | { | ||
697 | struct self_test *obj = addr; | ||
698 | |||
699 | switch (state) { | ||
700 | case ODEBUG_STATE_ACTIVE: | ||
701 | debug_object_deactivate(obj, &descr_type_test); | ||
702 | debug_object_destroy(obj, &descr_type_test); | ||
703 | return 1; | ||
704 | default: | ||
705 | return 0; | ||
706 | } | ||
707 | } | ||
708 | |||
709 | /* | ||
710 | * fixup_free is called when: | ||
711 | * - an active object is freed | ||
712 | */ | ||
713 | static int __init fixup_free(void *addr, enum debug_obj_state state) | ||
714 | { | ||
715 | struct self_test *obj = addr; | ||
716 | |||
717 | switch (state) { | ||
718 | case ODEBUG_STATE_ACTIVE: | ||
719 | debug_object_deactivate(obj, &descr_type_test); | ||
720 | debug_object_free(obj, &descr_type_test); | ||
721 | return 1; | ||
722 | default: | ||
723 | return 0; | ||
724 | } | ||
725 | } | ||
726 | |||
727 | static int | ||
728 | check_results(void *addr, enum debug_obj_state state, int fixups, int warnings) | ||
729 | { | ||
730 | struct debug_bucket *db; | ||
731 | struct debug_obj *obj; | ||
732 | unsigned long flags; | ||
733 | int res = -EINVAL; | ||
734 | |||
735 | db = get_bucket((unsigned long) addr); | ||
736 | |||
737 | spin_lock_irqsave(&db->lock, flags); | ||
738 | |||
739 | obj = lookup_object(addr, db); | ||
740 | if (!obj && state != ODEBUG_STATE_NONE) { | ||
741 | printk(KERN_ERR "ODEBUG: selftest object not found\n"); | ||
742 | WARN_ON(1); | ||
743 | goto out; | ||
744 | } | ||
745 | if (obj && obj->state != state) { | ||
746 | printk(KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n", | ||
747 | obj->state, state); | ||
748 | WARN_ON(1); | ||
749 | goto out; | ||
750 | } | ||
751 | if (fixups != debug_objects_fixups) { | ||
752 | printk(KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n", | ||
753 | fixups, debug_objects_fixups); | ||
754 | WARN_ON(1); | ||
755 | goto out; | ||
756 | } | ||
757 | if (warnings != debug_objects_warnings) { | ||
758 | printk(KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n", | ||
759 | warnings, debug_objects_warnings); | ||
760 | WARN_ON(1); | ||
761 | goto out; | ||
762 | } | ||
763 | res = 0; | ||
764 | out: | ||
765 | spin_unlock_irqrestore(&db->lock, flags); | ||
766 | if (res) | ||
767 | debug_objects_enabled = 0; | ||
768 | return res; | ||
769 | } | ||
770 | |||
771 | static __initdata struct debug_obj_descr descr_type_test = { | ||
772 | .name = "selftest", | ||
773 | .fixup_init = fixup_init, | ||
774 | .fixup_activate = fixup_activate, | ||
775 | .fixup_destroy = fixup_destroy, | ||
776 | .fixup_free = fixup_free, | ||
777 | }; | ||
778 | |||
779 | static __initdata struct self_test obj = { .static_init = 0 }; | ||
780 | |||
781 | static void __init debug_objects_selftest(void) | ||
782 | { | ||
783 | int fixups, oldfixups, warnings, oldwarnings; | ||
784 | unsigned long flags; | ||
785 | |||
786 | local_irq_save(flags); | ||
787 | |||
788 | fixups = oldfixups = debug_objects_fixups; | ||
789 | warnings = oldwarnings = debug_objects_warnings; | ||
790 | descr_test = &descr_type_test; | ||
791 | |||
792 | debug_object_init(&obj, &descr_type_test); | ||
793 | if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings)) | ||
794 | goto out; | ||
795 | debug_object_activate(&obj, &descr_type_test); | ||
796 | if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings)) | ||
797 | goto out; | ||
798 | debug_object_activate(&obj, &descr_type_test); | ||
799 | if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings)) | ||
800 | goto out; | ||
801 | debug_object_deactivate(&obj, &descr_type_test); | ||
802 | if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings)) | ||
803 | goto out; | ||
804 | debug_object_destroy(&obj, &descr_type_test); | ||
805 | if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings)) | ||
806 | goto out; | ||
807 | debug_object_init(&obj, &descr_type_test); | ||
808 | if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings)) | ||
809 | goto out; | ||
810 | debug_object_activate(&obj, &descr_type_test); | ||
811 | if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings)) | ||
812 | goto out; | ||
813 | debug_object_deactivate(&obj, &descr_type_test); | ||
814 | if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings)) | ||
815 | goto out; | ||
816 | debug_object_free(&obj, &descr_type_test); | ||
817 | if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings)) | ||
818 | goto out; | ||
819 | |||
820 | obj.static_init = 1; | ||
821 | debug_object_activate(&obj, &descr_type_test); | ||
822 | if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, warnings)) | ||
823 | goto out; | ||
824 | debug_object_init(&obj, &descr_type_test); | ||
825 | if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings)) | ||
826 | goto out; | ||
827 | debug_object_free(&obj, &descr_type_test); | ||
828 | if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings)) | ||
829 | goto out; | ||
830 | |||
831 | #ifdef CONFIG_DEBUG_OBJECTS_FREE | ||
832 | debug_object_init(&obj, &descr_type_test); | ||
833 | if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings)) | ||
834 | goto out; | ||
835 | debug_object_activate(&obj, &descr_type_test); | ||
836 | if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings)) | ||
837 | goto out; | ||
838 | __debug_check_no_obj_freed(&obj, sizeof(obj)); | ||
839 | if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings)) | ||
840 | goto out; | ||
841 | #endif | ||
842 | printk(KERN_INFO "ODEBUG: selftest passed\n"); | ||
843 | |||
844 | out: | ||
845 | debug_objects_fixups = oldfixups; | ||
846 | debug_objects_warnings = oldwarnings; | ||
847 | descr_test = NULL; | ||
848 | |||
849 | local_irq_restore(flags); | ||
850 | } | ||
851 | #else | ||
852 | static inline void debug_objects_selftest(void) { } | ||
853 | #endif | ||
854 | |||
855 | /* | ||
856 | * Called during early boot to initialize the hash buckets and link | ||
857 | * the static object pool objects into the poll list. After this call | ||
858 | * the object tracker is fully operational. | ||
859 | */ | ||
860 | void __init debug_objects_early_init(void) | ||
861 | { | ||
862 | int i; | ||
863 | |||
864 | for (i = 0; i < ODEBUG_HASH_SIZE; i++) | ||
865 | spin_lock_init(&obj_hash[i].lock); | ||
866 | |||
867 | for (i = 0; i < ODEBUG_POOL_SIZE; i++) | ||
868 | hlist_add_head(&obj_static_pool[i].node, &obj_pool); | ||
869 | } | ||
870 | |||
871 | /* | ||
872 | * Called after the kmem_caches are functional to setup a dedicated | ||
873 | * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag | ||
874 | * prevents that the debug code is called on kmem_cache_free() for the | ||
875 | * debug tracker objects to avoid recursive calls. | ||
876 | */ | ||
877 | void __init debug_objects_mem_init(void) | ||
878 | { | ||
879 | if (!debug_objects_enabled) | ||
880 | return; | ||
881 | |||
882 | obj_cache = kmem_cache_create("debug_objects_cache", | ||
883 | sizeof (struct debug_obj), 0, | ||
884 | SLAB_DEBUG_OBJECTS, NULL); | ||
885 | |||
886 | if (!obj_cache) | ||
887 | debug_objects_enabled = 0; | ||
888 | else | ||
889 | debug_objects_selftest(); | ||
890 | } | ||