diff options
Diffstat (limited to 'lib/debugobjects.c')
-rw-r--r-- | lib/debugobjects.c | 321 |
1 files changed, 253 insertions, 68 deletions
diff --git a/lib/debugobjects.c b/lib/debugobjects.c index 55437fd5128b..61261195f5b6 100644 --- a/lib/debugobjects.c +++ b/lib/debugobjects.c | |||
@@ -25,16 +25,37 @@ | |||
25 | 25 | ||
26 | #define ODEBUG_POOL_SIZE 1024 | 26 | #define ODEBUG_POOL_SIZE 1024 |
27 | #define ODEBUG_POOL_MIN_LEVEL 256 | 27 | #define ODEBUG_POOL_MIN_LEVEL 256 |
28 | #define ODEBUG_POOL_PERCPU_SIZE 64 | ||
29 | #define ODEBUG_BATCH_SIZE 16 | ||
28 | 30 | ||
29 | #define ODEBUG_CHUNK_SHIFT PAGE_SHIFT | 31 | #define ODEBUG_CHUNK_SHIFT PAGE_SHIFT |
30 | #define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT) | 32 | #define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT) |
31 | #define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1)) | 33 | #define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1)) |
32 | 34 | ||
35 | /* | ||
36 | * We limit the freeing of debug objects via workqueue at a maximum | ||
37 | * frequency of 10Hz and about 1024 objects for each freeing operation. | ||
38 | * So it is freeing at most 10k debug objects per second. | ||
39 | */ | ||
40 | #define ODEBUG_FREE_WORK_MAX 1024 | ||
41 | #define ODEBUG_FREE_WORK_DELAY DIV_ROUND_UP(HZ, 10) | ||
42 | |||
33 | struct debug_bucket { | 43 | struct debug_bucket { |
34 | struct hlist_head list; | 44 | struct hlist_head list; |
35 | raw_spinlock_t lock; | 45 | raw_spinlock_t lock; |
36 | }; | 46 | }; |
37 | 47 | ||
48 | /* | ||
49 | * Debug object percpu free list | ||
50 | * Access is protected by disabling irq | ||
51 | */ | ||
52 | struct debug_percpu_free { | ||
53 | struct hlist_head free_objs; | ||
54 | int obj_free; | ||
55 | }; | ||
56 | |||
57 | static DEFINE_PER_CPU(struct debug_percpu_free, percpu_obj_pool); | ||
58 | |||
38 | static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE]; | 59 | static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE]; |
39 | 60 | ||
40 | static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata; | 61 | static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata; |
@@ -44,13 +65,20 @@ static DEFINE_RAW_SPINLOCK(pool_lock); | |||
44 | static HLIST_HEAD(obj_pool); | 65 | static HLIST_HEAD(obj_pool); |
45 | static HLIST_HEAD(obj_to_free); | 66 | static HLIST_HEAD(obj_to_free); |
46 | 67 | ||
68 | /* | ||
69 | * Because of the presence of percpu free pools, obj_pool_free will | ||
70 | * under-count those in the percpu free pools. Similarly, obj_pool_used | ||
71 | * will over-count those in the percpu free pools. Adjustments will be | ||
72 | * made at debug_stats_show(). Both obj_pool_min_free and obj_pool_max_used | ||
73 | * can be off. | ||
74 | */ | ||
47 | static int obj_pool_min_free = ODEBUG_POOL_SIZE; | 75 | static int obj_pool_min_free = ODEBUG_POOL_SIZE; |
48 | static int obj_pool_free = ODEBUG_POOL_SIZE; | 76 | static int obj_pool_free = ODEBUG_POOL_SIZE; |
49 | static int obj_pool_used; | 77 | static int obj_pool_used; |
50 | static int obj_pool_max_used; | 78 | static int obj_pool_max_used; |
79 | static bool obj_freeing; | ||
51 | /* The number of objs on the global free list */ | 80 | /* The number of objs on the global free list */ |
52 | static int obj_nr_tofree; | 81 | static int obj_nr_tofree; |
53 | static struct kmem_cache *obj_cache; | ||
54 | 82 | ||
55 | static int debug_objects_maxchain __read_mostly; | 83 | static int debug_objects_maxchain __read_mostly; |
56 | static int __maybe_unused debug_objects_maxchecked __read_mostly; | 84 | static int __maybe_unused debug_objects_maxchecked __read_mostly; |
@@ -63,6 +91,7 @@ static int debug_objects_pool_size __read_mostly | |||
63 | static int debug_objects_pool_min_level __read_mostly | 91 | static int debug_objects_pool_min_level __read_mostly |
64 | = ODEBUG_POOL_MIN_LEVEL; | 92 | = ODEBUG_POOL_MIN_LEVEL; |
65 | static struct debug_obj_descr *descr_test __read_mostly; | 93 | static struct debug_obj_descr *descr_test __read_mostly; |
94 | static struct kmem_cache *obj_cache __read_mostly; | ||
66 | 95 | ||
67 | /* | 96 | /* |
68 | * Track numbers of kmem_cache_alloc()/free() calls done. | 97 | * Track numbers of kmem_cache_alloc()/free() calls done. |
@@ -71,7 +100,7 @@ static int debug_objects_allocated; | |||
71 | static int debug_objects_freed; | 100 | static int debug_objects_freed; |
72 | 101 | ||
73 | static void free_obj_work(struct work_struct *work); | 102 | static void free_obj_work(struct work_struct *work); |
74 | static DECLARE_WORK(debug_obj_work, free_obj_work); | 103 | static DECLARE_DELAYED_WORK(debug_obj_work, free_obj_work); |
75 | 104 | ||
76 | static int __init enable_object_debug(char *str) | 105 | static int __init enable_object_debug(char *str) |
77 | { | 106 | { |
@@ -100,7 +129,7 @@ static const char *obj_states[ODEBUG_STATE_MAX] = { | |||
100 | static void fill_pool(void) | 129 | static void fill_pool(void) |
101 | { | 130 | { |
102 | gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN; | 131 | gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN; |
103 | struct debug_obj *new, *obj; | 132 | struct debug_obj *obj; |
104 | unsigned long flags; | 133 | unsigned long flags; |
105 | 134 | ||
106 | if (likely(obj_pool_free >= debug_objects_pool_min_level)) | 135 | if (likely(obj_pool_free >= debug_objects_pool_min_level)) |
@@ -116,7 +145,7 @@ static void fill_pool(void) | |||
116 | * Recheck with the lock held as the worker thread might have | 145 | * Recheck with the lock held as the worker thread might have |
117 | * won the race and freed the global free list already. | 146 | * won the race and freed the global free list already. |
118 | */ | 147 | */ |
119 | if (obj_nr_tofree) { | 148 | while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) { |
120 | obj = hlist_entry(obj_to_free.first, typeof(*obj), node); | 149 | obj = hlist_entry(obj_to_free.first, typeof(*obj), node); |
121 | hlist_del(&obj->node); | 150 | hlist_del(&obj->node); |
122 | obj_nr_tofree--; | 151 | obj_nr_tofree--; |
@@ -130,15 +159,23 @@ static void fill_pool(void) | |||
130 | return; | 159 | return; |
131 | 160 | ||
132 | while (obj_pool_free < debug_objects_pool_min_level) { | 161 | while (obj_pool_free < debug_objects_pool_min_level) { |
162 | struct debug_obj *new[ODEBUG_BATCH_SIZE]; | ||
163 | int cnt; | ||
133 | 164 | ||
134 | new = kmem_cache_zalloc(obj_cache, gfp); | 165 | for (cnt = 0; cnt < ODEBUG_BATCH_SIZE; cnt++) { |
135 | if (!new) | 166 | new[cnt] = kmem_cache_zalloc(obj_cache, gfp); |
167 | if (!new[cnt]) | ||
168 | break; | ||
169 | } | ||
170 | if (!cnt) | ||
136 | return; | 171 | return; |
137 | 172 | ||
138 | raw_spin_lock_irqsave(&pool_lock, flags); | 173 | raw_spin_lock_irqsave(&pool_lock, flags); |
139 | hlist_add_head(&new->node, &obj_pool); | 174 | while (cnt) { |
140 | debug_objects_allocated++; | 175 | hlist_add_head(&new[--cnt]->node, &obj_pool); |
141 | obj_pool_free++; | 176 | debug_objects_allocated++; |
177 | obj_pool_free++; | ||
178 | } | ||
142 | raw_spin_unlock_irqrestore(&pool_lock, flags); | 179 | raw_spin_unlock_irqrestore(&pool_lock, flags); |
143 | } | 180 | } |
144 | } | 181 | } |
@@ -163,36 +200,81 @@ static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b) | |||
163 | } | 200 | } |
164 | 201 | ||
165 | /* | 202 | /* |
203 | * Allocate a new object from the hlist | ||
204 | */ | ||
205 | static struct debug_obj *__alloc_object(struct hlist_head *list) | ||
206 | { | ||
207 | struct debug_obj *obj = NULL; | ||
208 | |||
209 | if (list->first) { | ||
210 | obj = hlist_entry(list->first, typeof(*obj), node); | ||
211 | hlist_del(&obj->node); | ||
212 | } | ||
213 | |||
214 | return obj; | ||
215 | } | ||
216 | |||
217 | /* | ||
166 | * Allocate a new object. If the pool is empty, switch off the debugger. | 218 | * Allocate a new object. If the pool is empty, switch off the debugger. |
167 | * Must be called with interrupts disabled. | 219 | * Must be called with interrupts disabled. |
168 | */ | 220 | */ |
169 | static struct debug_obj * | 221 | static struct debug_obj * |
170 | alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr) | 222 | alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr) |
171 | { | 223 | { |
172 | struct debug_obj *obj = NULL; | 224 | struct debug_percpu_free *percpu_pool = this_cpu_ptr(&percpu_obj_pool); |
225 | struct debug_obj *obj; | ||
173 | 226 | ||
174 | raw_spin_lock(&pool_lock); | 227 | if (likely(obj_cache)) { |
175 | if (obj_pool.first) { | 228 | obj = __alloc_object(&percpu_pool->free_objs); |
176 | obj = hlist_entry(obj_pool.first, typeof(*obj), node); | 229 | if (obj) { |
230 | percpu_pool->obj_free--; | ||
231 | goto init_obj; | ||
232 | } | ||
233 | } | ||
177 | 234 | ||
178 | obj->object = addr; | 235 | raw_spin_lock(&pool_lock); |
179 | obj->descr = descr; | 236 | obj = __alloc_object(&obj_pool); |
180 | obj->state = ODEBUG_STATE_NONE; | 237 | if (obj) { |
181 | obj->astate = 0; | 238 | obj_pool_used++; |
182 | hlist_del(&obj->node); | 239 | obj_pool_free--; |
183 | 240 | ||
184 | hlist_add_head(&obj->node, &b->list); | 241 | /* |
242 | * Looking ahead, allocate one batch of debug objects and | ||
243 | * put them into the percpu free pool. | ||
244 | */ | ||
245 | if (likely(obj_cache)) { | ||
246 | int i; | ||
247 | |||
248 | for (i = 0; i < ODEBUG_BATCH_SIZE; i++) { | ||
249 | struct debug_obj *obj2; | ||
250 | |||
251 | obj2 = __alloc_object(&obj_pool); | ||
252 | if (!obj2) | ||
253 | break; | ||
254 | hlist_add_head(&obj2->node, | ||
255 | &percpu_pool->free_objs); | ||
256 | percpu_pool->obj_free++; | ||
257 | obj_pool_used++; | ||
258 | obj_pool_free--; | ||
259 | } | ||
260 | } | ||
185 | 261 | ||
186 | obj_pool_used++; | ||
187 | if (obj_pool_used > obj_pool_max_used) | 262 | if (obj_pool_used > obj_pool_max_used) |
188 | obj_pool_max_used = obj_pool_used; | 263 | obj_pool_max_used = obj_pool_used; |
189 | 264 | ||
190 | obj_pool_free--; | ||
191 | if (obj_pool_free < obj_pool_min_free) | 265 | if (obj_pool_free < obj_pool_min_free) |
192 | obj_pool_min_free = obj_pool_free; | 266 | obj_pool_min_free = obj_pool_free; |
193 | } | 267 | } |
194 | raw_spin_unlock(&pool_lock); | 268 | raw_spin_unlock(&pool_lock); |
195 | 269 | ||
270 | init_obj: | ||
271 | if (obj) { | ||
272 | obj->object = addr; | ||
273 | obj->descr = descr; | ||
274 | obj->state = ODEBUG_STATE_NONE; | ||
275 | obj->astate = 0; | ||
276 | hlist_add_head(&obj->node, &b->list); | ||
277 | } | ||
196 | return obj; | 278 | return obj; |
197 | } | 279 | } |
198 | 280 | ||
@@ -209,13 +291,19 @@ static void free_obj_work(struct work_struct *work) | |||
209 | unsigned long flags; | 291 | unsigned long flags; |
210 | HLIST_HEAD(tofree); | 292 | HLIST_HEAD(tofree); |
211 | 293 | ||
294 | WRITE_ONCE(obj_freeing, false); | ||
212 | if (!raw_spin_trylock_irqsave(&pool_lock, flags)) | 295 | if (!raw_spin_trylock_irqsave(&pool_lock, flags)) |
213 | return; | 296 | return; |
214 | 297 | ||
298 | if (obj_pool_free >= debug_objects_pool_size) | ||
299 | goto free_objs; | ||
300 | |||
215 | /* | 301 | /* |
216 | * The objs on the pool list might be allocated before the work is | 302 | * The objs on the pool list might be allocated before the work is |
217 | * run, so recheck if pool list it full or not, if not fill pool | 303 | * run, so recheck if pool list it full or not, if not fill pool |
218 | * list from the global free list | 304 | * list from the global free list. As it is likely that a workload |
305 | * may be gearing up to use more and more objects, don't free any | ||
306 | * of them until the next round. | ||
219 | */ | 307 | */ |
220 | while (obj_nr_tofree && obj_pool_free < debug_objects_pool_size) { | 308 | while (obj_nr_tofree && obj_pool_free < debug_objects_pool_size) { |
221 | obj = hlist_entry(obj_to_free.first, typeof(*obj), node); | 309 | obj = hlist_entry(obj_to_free.first, typeof(*obj), node); |
@@ -224,7 +312,10 @@ static void free_obj_work(struct work_struct *work) | |||
224 | obj_pool_free++; | 312 | obj_pool_free++; |
225 | obj_nr_tofree--; | 313 | obj_nr_tofree--; |
226 | } | 314 | } |
315 | raw_spin_unlock_irqrestore(&pool_lock, flags); | ||
316 | return; | ||
227 | 317 | ||
318 | free_objs: | ||
228 | /* | 319 | /* |
229 | * Pool list is already full and there are still objs on the free | 320 | * Pool list is already full and there are still objs on the free |
230 | * list. Move remaining free objs to a temporary list to free the | 321 | * list. Move remaining free objs to a temporary list to free the |
@@ -243,24 +334,86 @@ static void free_obj_work(struct work_struct *work) | |||
243 | } | 334 | } |
244 | } | 335 | } |
245 | 336 | ||
246 | static bool __free_object(struct debug_obj *obj) | 337 | static void __free_object(struct debug_obj *obj) |
247 | { | 338 | { |
339 | struct debug_obj *objs[ODEBUG_BATCH_SIZE]; | ||
340 | struct debug_percpu_free *percpu_pool; | ||
341 | int lookahead_count = 0; | ||
248 | unsigned long flags; | 342 | unsigned long flags; |
249 | bool work; | 343 | bool work; |
250 | 344 | ||
251 | raw_spin_lock_irqsave(&pool_lock, flags); | 345 | local_irq_save(flags); |
252 | work = (obj_pool_free > debug_objects_pool_size) && obj_cache; | 346 | if (!obj_cache) |
347 | goto free_to_obj_pool; | ||
348 | |||
349 | /* | ||
350 | * Try to free it into the percpu pool first. | ||
351 | */ | ||
352 | percpu_pool = this_cpu_ptr(&percpu_obj_pool); | ||
353 | if (percpu_pool->obj_free < ODEBUG_POOL_PERCPU_SIZE) { | ||
354 | hlist_add_head(&obj->node, &percpu_pool->free_objs); | ||
355 | percpu_pool->obj_free++; | ||
356 | local_irq_restore(flags); | ||
357 | return; | ||
358 | } | ||
359 | |||
360 | /* | ||
361 | * As the percpu pool is full, look ahead and pull out a batch | ||
362 | * of objects from the percpu pool and free them as well. | ||
363 | */ | ||
364 | for (; lookahead_count < ODEBUG_BATCH_SIZE; lookahead_count++) { | ||
365 | objs[lookahead_count] = __alloc_object(&percpu_pool->free_objs); | ||
366 | if (!objs[lookahead_count]) | ||
367 | break; | ||
368 | percpu_pool->obj_free--; | ||
369 | } | ||
370 | |||
371 | free_to_obj_pool: | ||
372 | raw_spin_lock(&pool_lock); | ||
373 | work = (obj_pool_free > debug_objects_pool_size) && obj_cache && | ||
374 | (obj_nr_tofree < ODEBUG_FREE_WORK_MAX); | ||
253 | obj_pool_used--; | 375 | obj_pool_used--; |
254 | 376 | ||
255 | if (work) { | 377 | if (work) { |
256 | obj_nr_tofree++; | 378 | obj_nr_tofree++; |
257 | hlist_add_head(&obj->node, &obj_to_free); | 379 | hlist_add_head(&obj->node, &obj_to_free); |
380 | if (lookahead_count) { | ||
381 | obj_nr_tofree += lookahead_count; | ||
382 | obj_pool_used -= lookahead_count; | ||
383 | while (lookahead_count) { | ||
384 | hlist_add_head(&objs[--lookahead_count]->node, | ||
385 | &obj_to_free); | ||
386 | } | ||
387 | } | ||
388 | |||
389 | if ((obj_pool_free > debug_objects_pool_size) && | ||
390 | (obj_nr_tofree < ODEBUG_FREE_WORK_MAX)) { | ||
391 | int i; | ||
392 | |||
393 | /* | ||
394 | * Free one more batch of objects from obj_pool. | ||
395 | */ | ||
396 | for (i = 0; i < ODEBUG_BATCH_SIZE; i++) { | ||
397 | obj = __alloc_object(&obj_pool); | ||
398 | hlist_add_head(&obj->node, &obj_to_free); | ||
399 | obj_pool_free--; | ||
400 | obj_nr_tofree++; | ||
401 | } | ||
402 | } | ||
258 | } else { | 403 | } else { |
259 | obj_pool_free++; | 404 | obj_pool_free++; |
260 | hlist_add_head(&obj->node, &obj_pool); | 405 | hlist_add_head(&obj->node, &obj_pool); |
406 | if (lookahead_count) { | ||
407 | obj_pool_free += lookahead_count; | ||
408 | obj_pool_used -= lookahead_count; | ||
409 | while (lookahead_count) { | ||
410 | hlist_add_head(&objs[--lookahead_count]->node, | ||
411 | &obj_pool); | ||
412 | } | ||
413 | } | ||
261 | } | 414 | } |
262 | raw_spin_unlock_irqrestore(&pool_lock, flags); | 415 | raw_spin_unlock(&pool_lock); |
263 | return work; | 416 | local_irq_restore(flags); |
264 | } | 417 | } |
265 | 418 | ||
266 | /* | 419 | /* |
@@ -269,8 +422,11 @@ static bool __free_object(struct debug_obj *obj) | |||
269 | */ | 422 | */ |
270 | static void free_object(struct debug_obj *obj) | 423 | static void free_object(struct debug_obj *obj) |
271 | { | 424 | { |
272 | if (__free_object(obj)) | 425 | __free_object(obj); |
273 | schedule_work(&debug_obj_work); | 426 | if (!obj_freeing && obj_nr_tofree) { |
427 | WRITE_ONCE(obj_freeing, true); | ||
428 | schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY); | ||
429 | } | ||
274 | } | 430 | } |
275 | 431 | ||
276 | /* | 432 | /* |
@@ -372,6 +528,7 @@ static void | |||
372 | __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack) | 528 | __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack) |
373 | { | 529 | { |
374 | enum debug_obj_state state; | 530 | enum debug_obj_state state; |
531 | bool check_stack = false; | ||
375 | struct debug_bucket *db; | 532 | struct debug_bucket *db; |
376 | struct debug_obj *obj; | 533 | struct debug_obj *obj; |
377 | unsigned long flags; | 534 | unsigned long flags; |
@@ -391,7 +548,7 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack) | |||
391 | debug_objects_oom(); | 548 | debug_objects_oom(); |
392 | return; | 549 | return; |
393 | } | 550 | } |
394 | debug_object_is_on_stack(addr, onstack); | 551 | check_stack = true; |
395 | } | 552 | } |
396 | 553 | ||
397 | switch (obj->state) { | 554 | switch (obj->state) { |
@@ -402,20 +559,23 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack) | |||
402 | break; | 559 | break; |
403 | 560 | ||
404 | case ODEBUG_STATE_ACTIVE: | 561 | case ODEBUG_STATE_ACTIVE: |
405 | debug_print_object(obj, "init"); | ||
406 | state = obj->state; | 562 | state = obj->state; |
407 | raw_spin_unlock_irqrestore(&db->lock, flags); | 563 | raw_spin_unlock_irqrestore(&db->lock, flags); |
564 | debug_print_object(obj, "init"); | ||
408 | debug_object_fixup(descr->fixup_init, addr, state); | 565 | debug_object_fixup(descr->fixup_init, addr, state); |
409 | return; | 566 | return; |
410 | 567 | ||
411 | case ODEBUG_STATE_DESTROYED: | 568 | case ODEBUG_STATE_DESTROYED: |
569 | raw_spin_unlock_irqrestore(&db->lock, flags); | ||
412 | debug_print_object(obj, "init"); | 570 | debug_print_object(obj, "init"); |
413 | break; | 571 | return; |
414 | default: | 572 | default: |
415 | break; | 573 | break; |
416 | } | 574 | } |
417 | 575 | ||
418 | raw_spin_unlock_irqrestore(&db->lock, flags); | 576 | raw_spin_unlock_irqrestore(&db->lock, flags); |
577 | if (check_stack) | ||
578 | debug_object_is_on_stack(addr, onstack); | ||
419 | } | 579 | } |
420 | 580 | ||
421 | /** | 581 | /** |
@@ -473,6 +633,8 @@ int debug_object_activate(void *addr, struct debug_obj_descr *descr) | |||
473 | 633 | ||
474 | obj = lookup_object(addr, db); | 634 | obj = lookup_object(addr, db); |
475 | if (obj) { | 635 | if (obj) { |
636 | bool print_object = false; | ||
637 | |||
476 | switch (obj->state) { | 638 | switch (obj->state) { |
477 | case ODEBUG_STATE_INIT: | 639 | case ODEBUG_STATE_INIT: |
478 | case ODEBUG_STATE_INACTIVE: | 640 | case ODEBUG_STATE_INACTIVE: |
@@ -481,14 +643,14 @@ int debug_object_activate(void *addr, struct debug_obj_descr *descr) | |||
481 | break; | 643 | break; |
482 | 644 | ||
483 | case ODEBUG_STATE_ACTIVE: | 645 | case ODEBUG_STATE_ACTIVE: |
484 | debug_print_object(obj, "activate"); | ||
485 | state = obj->state; | 646 | state = obj->state; |
486 | raw_spin_unlock_irqrestore(&db->lock, flags); | 647 | raw_spin_unlock_irqrestore(&db->lock, flags); |
648 | debug_print_object(obj, "activate"); | ||
487 | ret = debug_object_fixup(descr->fixup_activate, addr, state); | 649 | ret = debug_object_fixup(descr->fixup_activate, addr, state); |
488 | return ret ? 0 : -EINVAL; | 650 | return ret ? 0 : -EINVAL; |
489 | 651 | ||
490 | case ODEBUG_STATE_DESTROYED: | 652 | case ODEBUG_STATE_DESTROYED: |
491 | debug_print_object(obj, "activate"); | 653 | print_object = true; |
492 | ret = -EINVAL; | 654 | ret = -EINVAL; |
493 | break; | 655 | break; |
494 | default: | 656 | default: |
@@ -496,10 +658,13 @@ int debug_object_activate(void *addr, struct debug_obj_descr *descr) | |||
496 | break; | 658 | break; |
497 | } | 659 | } |
498 | raw_spin_unlock_irqrestore(&db->lock, flags); | 660 | raw_spin_unlock_irqrestore(&db->lock, flags); |
661 | if (print_object) | ||
662 | debug_print_object(obj, "activate"); | ||
499 | return ret; | 663 | return ret; |
500 | } | 664 | } |
501 | 665 | ||
502 | raw_spin_unlock_irqrestore(&db->lock, flags); | 666 | raw_spin_unlock_irqrestore(&db->lock, flags); |
667 | |||
503 | /* | 668 | /* |
504 | * We are here when a static object is activated. We | 669 | * We are here when a static object is activated. We |
505 | * let the type specific code confirm whether this is | 670 | * let the type specific code confirm whether this is |
@@ -531,6 +696,7 @@ void debug_object_deactivate(void *addr, struct debug_obj_descr *descr) | |||
531 | struct debug_bucket *db; | 696 | struct debug_bucket *db; |
532 | struct debug_obj *obj; | 697 | struct debug_obj *obj; |
533 | unsigned long flags; | 698 | unsigned long flags; |
699 | bool print_object = false; | ||
534 | 700 | ||
535 | if (!debug_objects_enabled) | 701 | if (!debug_objects_enabled) |
536 | return; | 702 | return; |
@@ -548,24 +714,27 @@ void debug_object_deactivate(void *addr, struct debug_obj_descr *descr) | |||
548 | if (!obj->astate) | 714 | if (!obj->astate) |
549 | obj->state = ODEBUG_STATE_INACTIVE; | 715 | obj->state = ODEBUG_STATE_INACTIVE; |
550 | else | 716 | else |
551 | debug_print_object(obj, "deactivate"); | 717 | print_object = true; |
552 | break; | 718 | break; |
553 | 719 | ||
554 | case ODEBUG_STATE_DESTROYED: | 720 | case ODEBUG_STATE_DESTROYED: |
555 | debug_print_object(obj, "deactivate"); | 721 | print_object = true; |
556 | break; | 722 | break; |
557 | default: | 723 | default: |
558 | break; | 724 | break; |
559 | } | 725 | } |
560 | } else { | 726 | } |
727 | |||
728 | raw_spin_unlock_irqrestore(&db->lock, flags); | ||
729 | if (!obj) { | ||
561 | struct debug_obj o = { .object = addr, | 730 | struct debug_obj o = { .object = addr, |
562 | .state = ODEBUG_STATE_NOTAVAILABLE, | 731 | .state = ODEBUG_STATE_NOTAVAILABLE, |
563 | .descr = descr }; | 732 | .descr = descr }; |
564 | 733 | ||
565 | debug_print_object(&o, "deactivate"); | 734 | debug_print_object(&o, "deactivate"); |
735 | } else if (print_object) { | ||
736 | debug_print_object(obj, "deactivate"); | ||
566 | } | 737 | } |
567 | |||
568 | raw_spin_unlock_irqrestore(&db->lock, flags); | ||
569 | } | 738 | } |
570 | EXPORT_SYMBOL_GPL(debug_object_deactivate); | 739 | EXPORT_SYMBOL_GPL(debug_object_deactivate); |
571 | 740 | ||
@@ -580,6 +749,7 @@ void debug_object_destroy(void *addr, struct debug_obj_descr *descr) | |||
580 | struct debug_bucket *db; | 749 | struct debug_bucket *db; |
581 | struct debug_obj *obj; | 750 | struct debug_obj *obj; |
582 | unsigned long flags; | 751 | unsigned long flags; |
752 | bool print_object = false; | ||
583 | 753 | ||
584 | if (!debug_objects_enabled) | 754 | if (!debug_objects_enabled) |
585 | return; | 755 | return; |
@@ -599,20 +769,22 @@ void debug_object_destroy(void *addr, struct debug_obj_descr *descr) | |||
599 | obj->state = ODEBUG_STATE_DESTROYED; | 769 | obj->state = ODEBUG_STATE_DESTROYED; |
600 | break; | 770 | break; |
601 | case ODEBUG_STATE_ACTIVE: | 771 | case ODEBUG_STATE_ACTIVE: |
602 | debug_print_object(obj, "destroy"); | ||
603 | state = obj->state; | 772 | state = obj->state; |
604 | raw_spin_unlock_irqrestore(&db->lock, flags); | 773 | raw_spin_unlock_irqrestore(&db->lock, flags); |
774 | debug_print_object(obj, "destroy"); | ||
605 | debug_object_fixup(descr->fixup_destroy, addr, state); | 775 | debug_object_fixup(descr->fixup_destroy, addr, state); |
606 | return; | 776 | return; |
607 | 777 | ||
608 | case ODEBUG_STATE_DESTROYED: | 778 | case ODEBUG_STATE_DESTROYED: |
609 | debug_print_object(obj, "destroy"); | 779 | print_object = true; |
610 | break; | 780 | break; |
611 | default: | 781 | default: |
612 | break; | 782 | break; |
613 | } | 783 | } |
614 | out_unlock: | 784 | out_unlock: |
615 | raw_spin_unlock_irqrestore(&db->lock, flags); | 785 | raw_spin_unlock_irqrestore(&db->lock, flags); |
786 | if (print_object) | ||
787 | debug_print_object(obj, "destroy"); | ||
616 | } | 788 | } |
617 | EXPORT_SYMBOL_GPL(debug_object_destroy); | 789 | EXPORT_SYMBOL_GPL(debug_object_destroy); |
618 | 790 | ||
@@ -641,9 +813,9 @@ void debug_object_free(void *addr, struct debug_obj_descr *descr) | |||
641 | 813 | ||
642 | switch (obj->state) { | 814 | switch (obj->state) { |
643 | case ODEBUG_STATE_ACTIVE: | 815 | case ODEBUG_STATE_ACTIVE: |
644 | debug_print_object(obj, "free"); | ||
645 | state = obj->state; | 816 | state = obj->state; |
646 | raw_spin_unlock_irqrestore(&db->lock, flags); | 817 | raw_spin_unlock_irqrestore(&db->lock, flags); |
818 | debug_print_object(obj, "free"); | ||
647 | debug_object_fixup(descr->fixup_free, addr, state); | 819 | debug_object_fixup(descr->fixup_free, addr, state); |
648 | return; | 820 | return; |
649 | default: | 821 | default: |
@@ -716,6 +888,7 @@ debug_object_active_state(void *addr, struct debug_obj_descr *descr, | |||
716 | struct debug_bucket *db; | 888 | struct debug_bucket *db; |
717 | struct debug_obj *obj; | 889 | struct debug_obj *obj; |
718 | unsigned long flags; | 890 | unsigned long flags; |
891 | bool print_object = false; | ||
719 | 892 | ||
720 | if (!debug_objects_enabled) | 893 | if (!debug_objects_enabled) |
721 | return; | 894 | return; |
@@ -731,22 +904,25 @@ debug_object_active_state(void *addr, struct debug_obj_descr *descr, | |||
731 | if (obj->astate == expect) | 904 | if (obj->astate == expect) |
732 | obj->astate = next; | 905 | obj->astate = next; |
733 | else | 906 | else |
734 | debug_print_object(obj, "active_state"); | 907 | print_object = true; |
735 | break; | 908 | break; |
736 | 909 | ||
737 | default: | 910 | default: |
738 | debug_print_object(obj, "active_state"); | 911 | print_object = true; |
739 | break; | 912 | break; |
740 | } | 913 | } |
741 | } else { | 914 | } |
915 | |||
916 | raw_spin_unlock_irqrestore(&db->lock, flags); | ||
917 | if (!obj) { | ||
742 | struct debug_obj o = { .object = addr, | 918 | struct debug_obj o = { .object = addr, |
743 | .state = ODEBUG_STATE_NOTAVAILABLE, | 919 | .state = ODEBUG_STATE_NOTAVAILABLE, |
744 | .descr = descr }; | 920 | .descr = descr }; |
745 | 921 | ||
746 | debug_print_object(&o, "active_state"); | 922 | debug_print_object(&o, "active_state"); |
923 | } else if (print_object) { | ||
924 | debug_print_object(obj, "active_state"); | ||
747 | } | 925 | } |
748 | |||
749 | raw_spin_unlock_irqrestore(&db->lock, flags); | ||
750 | } | 926 | } |
751 | EXPORT_SYMBOL_GPL(debug_object_active_state); | 927 | EXPORT_SYMBOL_GPL(debug_object_active_state); |
752 | 928 | ||
@@ -760,7 +936,6 @@ static void __debug_check_no_obj_freed(const void *address, unsigned long size) | |||
760 | struct hlist_node *tmp; | 936 | struct hlist_node *tmp; |
761 | struct debug_obj *obj; | 937 | struct debug_obj *obj; |
762 | int cnt, objs_checked = 0; | 938 | int cnt, objs_checked = 0; |
763 | bool work = false; | ||
764 | 939 | ||
765 | saddr = (unsigned long) address; | 940 | saddr = (unsigned long) address; |
766 | eaddr = saddr + size; | 941 | eaddr = saddr + size; |
@@ -782,16 +957,16 @@ repeat: | |||
782 | 957 | ||
783 | switch (obj->state) { | 958 | switch (obj->state) { |
784 | case ODEBUG_STATE_ACTIVE: | 959 | case ODEBUG_STATE_ACTIVE: |
785 | debug_print_object(obj, "free"); | ||
786 | descr = obj->descr; | 960 | descr = obj->descr; |
787 | state = obj->state; | 961 | state = obj->state; |
788 | raw_spin_unlock_irqrestore(&db->lock, flags); | 962 | raw_spin_unlock_irqrestore(&db->lock, flags); |
963 | debug_print_object(obj, "free"); | ||
789 | debug_object_fixup(descr->fixup_free, | 964 | debug_object_fixup(descr->fixup_free, |
790 | (void *) oaddr, state); | 965 | (void *) oaddr, state); |
791 | goto repeat; | 966 | goto repeat; |
792 | default: | 967 | default: |
793 | hlist_del(&obj->node); | 968 | hlist_del(&obj->node); |
794 | work |= __free_object(obj); | 969 | __free_object(obj); |
795 | break; | 970 | break; |
796 | } | 971 | } |
797 | } | 972 | } |
@@ -807,8 +982,10 @@ repeat: | |||
807 | debug_objects_maxchecked = objs_checked; | 982 | debug_objects_maxchecked = objs_checked; |
808 | 983 | ||
809 | /* Schedule work to actually kmem_cache_free() objects */ | 984 | /* Schedule work to actually kmem_cache_free() objects */ |
810 | if (work) | 985 | if (!obj_freeing && obj_nr_tofree) { |
811 | schedule_work(&debug_obj_work); | 986 | WRITE_ONCE(obj_freeing, true); |
987 | schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY); | ||
988 | } | ||
812 | } | 989 | } |
813 | 990 | ||
814 | void debug_check_no_obj_freed(const void *address, unsigned long size) | 991 | void debug_check_no_obj_freed(const void *address, unsigned long size) |
@@ -822,13 +999,19 @@ void debug_check_no_obj_freed(const void *address, unsigned long size) | |||
822 | 999 | ||
823 | static int debug_stats_show(struct seq_file *m, void *v) | 1000 | static int debug_stats_show(struct seq_file *m, void *v) |
824 | { | 1001 | { |
1002 | int cpu, obj_percpu_free = 0; | ||
1003 | |||
1004 | for_each_possible_cpu(cpu) | ||
1005 | obj_percpu_free += per_cpu(percpu_obj_pool.obj_free, cpu); | ||
1006 | |||
825 | seq_printf(m, "max_chain :%d\n", debug_objects_maxchain); | 1007 | seq_printf(m, "max_chain :%d\n", debug_objects_maxchain); |
826 | seq_printf(m, "max_checked :%d\n", debug_objects_maxchecked); | 1008 | seq_printf(m, "max_checked :%d\n", debug_objects_maxchecked); |
827 | seq_printf(m, "warnings :%d\n", debug_objects_warnings); | 1009 | seq_printf(m, "warnings :%d\n", debug_objects_warnings); |
828 | seq_printf(m, "fixups :%d\n", debug_objects_fixups); | 1010 | seq_printf(m, "fixups :%d\n", debug_objects_fixups); |
829 | seq_printf(m, "pool_free :%d\n", obj_pool_free); | 1011 | seq_printf(m, "pool_free :%d\n", obj_pool_free + obj_percpu_free); |
1012 | seq_printf(m, "pool_pcp_free :%d\n", obj_percpu_free); | ||
830 | seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free); | 1013 | seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free); |
831 | seq_printf(m, "pool_used :%d\n", obj_pool_used); | 1014 | seq_printf(m, "pool_used :%d\n", obj_pool_used - obj_percpu_free); |
832 | seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used); | 1015 | seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used); |
833 | seq_printf(m, "on_free_list :%d\n", obj_nr_tofree); | 1016 | seq_printf(m, "on_free_list :%d\n", obj_nr_tofree); |
834 | seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated); | 1017 | seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated); |
@@ -850,26 +1033,16 @@ static const struct file_operations debug_stats_fops = { | |||
850 | 1033 | ||
851 | static int __init debug_objects_init_debugfs(void) | 1034 | static int __init debug_objects_init_debugfs(void) |
852 | { | 1035 | { |
853 | struct dentry *dbgdir, *dbgstats; | 1036 | struct dentry *dbgdir; |
854 | 1037 | ||
855 | if (!debug_objects_enabled) | 1038 | if (!debug_objects_enabled) |
856 | return 0; | 1039 | return 0; |
857 | 1040 | ||
858 | dbgdir = debugfs_create_dir("debug_objects", NULL); | 1041 | dbgdir = debugfs_create_dir("debug_objects", NULL); |
859 | if (!dbgdir) | ||
860 | return -ENOMEM; | ||
861 | 1042 | ||
862 | dbgstats = debugfs_create_file("stats", 0444, dbgdir, NULL, | 1043 | debugfs_create_file("stats", 0444, dbgdir, NULL, &debug_stats_fops); |
863 | &debug_stats_fops); | ||
864 | if (!dbgstats) | ||
865 | goto err; | ||
866 | 1044 | ||
867 | return 0; | 1045 | return 0; |
868 | |||
869 | err: | ||
870 | debugfs_remove(dbgdir); | ||
871 | |||
872 | return -ENOMEM; | ||
873 | } | 1046 | } |
874 | __initcall(debug_objects_init_debugfs); | 1047 | __initcall(debug_objects_init_debugfs); |
875 | 1048 | ||
@@ -1175,9 +1348,20 @@ free: | |||
1175 | */ | 1348 | */ |
1176 | void __init debug_objects_mem_init(void) | 1349 | void __init debug_objects_mem_init(void) |
1177 | { | 1350 | { |
1351 | int cpu, extras; | ||
1352 | |||
1178 | if (!debug_objects_enabled) | 1353 | if (!debug_objects_enabled) |
1179 | return; | 1354 | return; |
1180 | 1355 | ||
1356 | /* | ||
1357 | * Initialize the percpu object pools | ||
1358 | * | ||
1359 | * Initialization is not strictly necessary, but was done for | ||
1360 | * completeness. | ||
1361 | */ | ||
1362 | for_each_possible_cpu(cpu) | ||
1363 | INIT_HLIST_HEAD(&per_cpu(percpu_obj_pool.free_objs, cpu)); | ||
1364 | |||
1181 | obj_cache = kmem_cache_create("debug_objects_cache", | 1365 | obj_cache = kmem_cache_create("debug_objects_cache", |
1182 | sizeof (struct debug_obj), 0, | 1366 | sizeof (struct debug_obj), 0, |
1183 | SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE, | 1367 | SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE, |
@@ -1194,6 +1378,7 @@ void __init debug_objects_mem_init(void) | |||
1194 | * Increase the thresholds for allocating and freeing objects | 1378 | * Increase the thresholds for allocating and freeing objects |
1195 | * according to the number of possible CPUs available in the system. | 1379 | * according to the number of possible CPUs available in the system. |
1196 | */ | 1380 | */ |
1197 | debug_objects_pool_size += num_possible_cpus() * 32; | 1381 | extras = num_possible_cpus() * ODEBUG_BATCH_SIZE; |
1198 | debug_objects_pool_min_level += num_possible_cpus() * 4; | 1382 | debug_objects_pool_size += extras; |
1383 | debug_objects_pool_min_level += extras; | ||
1199 | } | 1384 | } |