diff options
Diffstat (limited to 'lib/debugobjects.c')
-rw-r--r-- | lib/debugobjects.c | 141 |
1 files changed, 92 insertions, 49 deletions
diff --git a/lib/debugobjects.c b/lib/debugobjects.c index 2f5349c6e81a..994be4805cec 100644 --- a/lib/debugobjects.c +++ b/lib/debugobjects.c | |||
@@ -42,14 +42,18 @@ static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata; | |||
42 | static DEFINE_RAW_SPINLOCK(pool_lock); | 42 | static DEFINE_RAW_SPINLOCK(pool_lock); |
43 | 43 | ||
44 | static HLIST_HEAD(obj_pool); | 44 | static HLIST_HEAD(obj_pool); |
45 | static HLIST_HEAD(obj_to_free); | ||
45 | 46 | ||
46 | static int obj_pool_min_free = ODEBUG_POOL_SIZE; | 47 | static int obj_pool_min_free = ODEBUG_POOL_SIZE; |
47 | static int obj_pool_free = ODEBUG_POOL_SIZE; | 48 | static int obj_pool_free = ODEBUG_POOL_SIZE; |
48 | static int obj_pool_used; | 49 | static int obj_pool_used; |
49 | static int obj_pool_max_used; | 50 | static int obj_pool_max_used; |
51 | /* The number of objs on the global free list */ | ||
52 | static int obj_nr_tofree; | ||
50 | static struct kmem_cache *obj_cache; | 53 | static struct kmem_cache *obj_cache; |
51 | 54 | ||
52 | static int debug_objects_maxchain __read_mostly; | 55 | static int debug_objects_maxchain __read_mostly; |
56 | static int __maybe_unused debug_objects_maxchecked __read_mostly; | ||
53 | static int debug_objects_fixups __read_mostly; | 57 | static int debug_objects_fixups __read_mostly; |
54 | static int debug_objects_warnings __read_mostly; | 58 | static int debug_objects_warnings __read_mostly; |
55 | static int debug_objects_enabled __read_mostly | 59 | static int debug_objects_enabled __read_mostly |
@@ -96,12 +100,32 @@ static const char *obj_states[ODEBUG_STATE_MAX] = { | |||
96 | static void fill_pool(void) | 100 | static void fill_pool(void) |
97 | { | 101 | { |
98 | gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN; | 102 | gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN; |
99 | struct debug_obj *new; | 103 | struct debug_obj *new, *obj; |
100 | unsigned long flags; | 104 | unsigned long flags; |
101 | 105 | ||
102 | if (likely(obj_pool_free >= debug_objects_pool_min_level)) | 106 | if (likely(obj_pool_free >= debug_objects_pool_min_level)) |
103 | return; | 107 | return; |
104 | 108 | ||
109 | /* | ||
110 | * Reuse objs from the global free list; they will be reinitialized | ||
111 | * when allocating. | ||
112 | */ | ||
113 | while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) { | ||
114 | raw_spin_lock_irqsave(&pool_lock, flags); | ||
115 | /* | ||
116 | * Recheck with the lock held as the worker thread might have | ||
117 | * won the race and freed the global free list already. | ||
118 | */ | ||
119 | if (obj_nr_tofree) { | ||
120 | obj = hlist_entry(obj_to_free.first, typeof(*obj), node); | ||
121 | hlist_del(&obj->node); | ||
122 | obj_nr_tofree--; | ||
123 | hlist_add_head(&obj->node, &obj_pool); | ||
124 | obj_pool_free++; | ||
125 | } | ||
126 | raw_spin_unlock_irqrestore(&pool_lock, flags); | ||
127 | } | ||
128 | |||
105 | if (unlikely(!obj_cache)) | 129 | if (unlikely(!obj_cache)) |
106 | return; | 130 | return; |
107 | 131 | ||
@@ -177,62 +201,76 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr) | |||
177 | * workqueue function to free objects. | 201 | * workqueue function to free objects. |
178 | * | 202 | * |
179 | * To reduce contention on the global pool_lock, the actual freeing of | 203 | * To reduce contention on the global pool_lock, the actual freeing of |
180 | * debug objects will be delayed if the pool_lock is busy. We also free | 204 | * debug objects will be delayed if the pool_lock is busy. |
181 | * the objects in a batch of 4 for each lock/unlock cycle. | ||
182 | */ | 205 | */ |
183 | #define ODEBUG_FREE_BATCH 4 | ||
184 | |||
185 | static void free_obj_work(struct work_struct *work) | 206 | static void free_obj_work(struct work_struct *work) |
186 | { | 207 | { |
187 | struct debug_obj *objs[ODEBUG_FREE_BATCH]; | 208 | struct hlist_node *tmp; |
209 | struct debug_obj *obj; | ||
188 | unsigned long flags; | 210 | unsigned long flags; |
189 | int i; | 211 | HLIST_HEAD(tofree); |
190 | 212 | ||
191 | if (!raw_spin_trylock_irqsave(&pool_lock, flags)) | 213 | if (!raw_spin_trylock_irqsave(&pool_lock, flags)) |
192 | return; | 214 | return; |
193 | while (obj_pool_free >= debug_objects_pool_size + ODEBUG_FREE_BATCH) { | ||
194 | for (i = 0; i < ODEBUG_FREE_BATCH; i++) { | ||
195 | objs[i] = hlist_entry(obj_pool.first, | ||
196 | typeof(*objs[0]), node); | ||
197 | hlist_del(&objs[i]->node); | ||
198 | } | ||
199 | 215 | ||
200 | obj_pool_free -= ODEBUG_FREE_BATCH; | 216 | /* |
201 | debug_objects_freed += ODEBUG_FREE_BATCH; | 217 | * The objs on the pool list might be allocated before the work is |
202 | /* | 218 | * run, so recheck if pool list it full or not, if not fill pool |
203 | * We release pool_lock across kmem_cache_free() to | 219 | * list from the global free list |
204 | * avoid contention on pool_lock. | 220 | */ |
205 | */ | 221 | while (obj_nr_tofree && obj_pool_free < debug_objects_pool_size) { |
206 | raw_spin_unlock_irqrestore(&pool_lock, flags); | 222 | obj = hlist_entry(obj_to_free.first, typeof(*obj), node); |
207 | for (i = 0; i < ODEBUG_FREE_BATCH; i++) | 223 | hlist_del(&obj->node); |
208 | kmem_cache_free(obj_cache, objs[i]); | 224 | hlist_add_head(&obj->node, &obj_pool); |
209 | if (!raw_spin_trylock_irqsave(&pool_lock, flags)) | 225 | obj_pool_free++; |
210 | return; | 226 | obj_nr_tofree--; |
227 | } | ||
228 | |||
229 | /* | ||
230 | * Pool list is already full and there are still objs on the free | ||
231 | * list. Move remaining free objs to a temporary list to free the | ||
232 | * memory outside the pool_lock held region. | ||
233 | */ | ||
234 | if (obj_nr_tofree) { | ||
235 | hlist_move_list(&obj_to_free, &tofree); | ||
236 | debug_objects_freed += obj_nr_tofree; | ||
237 | obj_nr_tofree = 0; | ||
211 | } | 238 | } |
212 | raw_spin_unlock_irqrestore(&pool_lock, flags); | 239 | raw_spin_unlock_irqrestore(&pool_lock, flags); |
240 | |||
241 | hlist_for_each_entry_safe(obj, tmp, &tofree, node) { | ||
242 | hlist_del(&obj->node); | ||
243 | kmem_cache_free(obj_cache, obj); | ||
244 | } | ||
213 | } | 245 | } |
214 | 246 | ||
215 | /* | 247 | static bool __free_object(struct debug_obj *obj) |
216 | * Put the object back into the pool and schedule work to free objects | ||
217 | * if necessary. | ||
218 | */ | ||
219 | static void free_object(struct debug_obj *obj) | ||
220 | { | 248 | { |
221 | unsigned long flags; | 249 | unsigned long flags; |
222 | int sched = 0; | 250 | bool work; |
223 | 251 | ||
224 | raw_spin_lock_irqsave(&pool_lock, flags); | 252 | raw_spin_lock_irqsave(&pool_lock, flags); |
225 | /* | 253 | work = (obj_pool_free > debug_objects_pool_size) && obj_cache; |
226 | * schedule work when the pool is filled and the cache is | ||
227 | * initialized: | ||
228 | */ | ||
229 | if (obj_pool_free > debug_objects_pool_size && obj_cache) | ||
230 | sched = 1; | ||
231 | hlist_add_head(&obj->node, &obj_pool); | ||
232 | obj_pool_free++; | ||
233 | obj_pool_used--; | 254 | obj_pool_used--; |
255 | |||
256 | if (work) { | ||
257 | obj_nr_tofree++; | ||
258 | hlist_add_head(&obj->node, &obj_to_free); | ||
259 | } else { | ||
260 | obj_pool_free++; | ||
261 | hlist_add_head(&obj->node, &obj_pool); | ||
262 | } | ||
234 | raw_spin_unlock_irqrestore(&pool_lock, flags); | 263 | raw_spin_unlock_irqrestore(&pool_lock, flags); |
235 | if (sched) | 264 | return work; |
265 | } | ||
266 | |||
267 | /* | ||
268 | * Put the object back into the pool and schedule work to free objects | ||
269 | * if necessary. | ||
270 | */ | ||
271 | static void free_object(struct debug_obj *obj) | ||
272 | { | ||
273 | if (__free_object(obj)) | ||
236 | schedule_work(&debug_obj_work); | 274 | schedule_work(&debug_obj_work); |
237 | } | 275 | } |
238 | 276 | ||
@@ -714,13 +752,13 @@ EXPORT_SYMBOL_GPL(debug_object_active_state); | |||
714 | static void __debug_check_no_obj_freed(const void *address, unsigned long size) | 752 | static void __debug_check_no_obj_freed(const void *address, unsigned long size) |
715 | { | 753 | { |
716 | unsigned long flags, oaddr, saddr, eaddr, paddr, chunks; | 754 | unsigned long flags, oaddr, saddr, eaddr, paddr, chunks; |
717 | struct hlist_node *tmp; | ||
718 | HLIST_HEAD(freelist); | ||
719 | struct debug_obj_descr *descr; | 755 | struct debug_obj_descr *descr; |
720 | enum debug_obj_state state; | 756 | enum debug_obj_state state; |
721 | struct debug_bucket *db; | 757 | struct debug_bucket *db; |
758 | struct hlist_node *tmp; | ||
722 | struct debug_obj *obj; | 759 | struct debug_obj *obj; |
723 | int cnt; | 760 | int cnt, objs_checked = 0; |
761 | bool work = false; | ||
724 | 762 | ||
725 | saddr = (unsigned long) address; | 763 | saddr = (unsigned long) address; |
726 | eaddr = saddr + size; | 764 | eaddr = saddr + size; |
@@ -751,21 +789,24 @@ repeat: | |||
751 | goto repeat; | 789 | goto repeat; |
752 | default: | 790 | default: |
753 | hlist_del(&obj->node); | 791 | hlist_del(&obj->node); |
754 | hlist_add_head(&obj->node, &freelist); | 792 | work |= __free_object(obj); |
755 | break; | 793 | break; |
756 | } | 794 | } |
757 | } | 795 | } |
758 | raw_spin_unlock_irqrestore(&db->lock, flags); | 796 | raw_spin_unlock_irqrestore(&db->lock, flags); |
759 | 797 | ||
760 | /* Now free them */ | ||
761 | hlist_for_each_entry_safe(obj, tmp, &freelist, node) { | ||
762 | hlist_del(&obj->node); | ||
763 | free_object(obj); | ||
764 | } | ||
765 | |||
766 | if (cnt > debug_objects_maxchain) | 798 | if (cnt > debug_objects_maxchain) |
767 | debug_objects_maxchain = cnt; | 799 | debug_objects_maxchain = cnt; |
800 | |||
801 | objs_checked += cnt; | ||
768 | } | 802 | } |
803 | |||
804 | if (objs_checked > debug_objects_maxchecked) | ||
805 | debug_objects_maxchecked = objs_checked; | ||
806 | |||
807 | /* Schedule work to actually kmem_cache_free() objects */ | ||
808 | if (work) | ||
809 | schedule_work(&debug_obj_work); | ||
769 | } | 810 | } |
770 | 811 | ||
771 | void debug_check_no_obj_freed(const void *address, unsigned long size) | 812 | void debug_check_no_obj_freed(const void *address, unsigned long size) |
@@ -780,12 +821,14 @@ void debug_check_no_obj_freed(const void *address, unsigned long size) | |||
780 | static int debug_stats_show(struct seq_file *m, void *v) | 821 | static int debug_stats_show(struct seq_file *m, void *v) |
781 | { | 822 | { |
782 | seq_printf(m, "max_chain :%d\n", debug_objects_maxchain); | 823 | seq_printf(m, "max_chain :%d\n", debug_objects_maxchain); |
824 | seq_printf(m, "max_checked :%d\n", debug_objects_maxchecked); | ||
783 | seq_printf(m, "warnings :%d\n", debug_objects_warnings); | 825 | seq_printf(m, "warnings :%d\n", debug_objects_warnings); |
784 | seq_printf(m, "fixups :%d\n", debug_objects_fixups); | 826 | seq_printf(m, "fixups :%d\n", debug_objects_fixups); |
785 | seq_printf(m, "pool_free :%d\n", obj_pool_free); | 827 | seq_printf(m, "pool_free :%d\n", obj_pool_free); |
786 | seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free); | 828 | seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free); |
787 | seq_printf(m, "pool_used :%d\n", obj_pool_used); | 829 | seq_printf(m, "pool_used :%d\n", obj_pool_used); |
788 | seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used); | 830 | seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used); |
831 | seq_printf(m, "on_free_list :%d\n", obj_nr_tofree); | ||
789 | seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated); | 832 | seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated); |
790 | seq_printf(m, "objs_freed :%d\n", debug_objects_freed); | 833 | seq_printf(m, "objs_freed :%d\n", debug_objects_freed); |
791 | return 0; | 834 | return 0; |