aboutsummaryrefslogtreecommitdiffstats
path: root/lib/debugobjects.c
diff options
context:
space:
mode:
Diffstat (limited to 'lib/debugobjects.c')
-rw-r--r--lib/debugobjects.c58
1 files changed, 57 insertions, 1 deletions
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index f6d57a11c927..e31273b45da5 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -42,11 +42,14 @@ static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
42static DEFINE_RAW_SPINLOCK(pool_lock); 42static DEFINE_RAW_SPINLOCK(pool_lock);
43 43
44static HLIST_HEAD(obj_pool); 44static HLIST_HEAD(obj_pool);
45static HLIST_HEAD(obj_to_free);
45 46
46static int obj_pool_min_free = ODEBUG_POOL_SIZE; 47static int obj_pool_min_free = ODEBUG_POOL_SIZE;
47static int obj_pool_free = ODEBUG_POOL_SIZE; 48static int obj_pool_free = ODEBUG_POOL_SIZE;
48static int obj_pool_used; 49static int obj_pool_used;
49static int obj_pool_max_used; 50static int obj_pool_max_used;
51/* The number of objs on the global free list */
52static int obj_nr_tofree;
50static struct kmem_cache *obj_cache; 53static struct kmem_cache *obj_cache;
51 54
52static int debug_objects_maxchain __read_mostly; 55static int debug_objects_maxchain __read_mostly;
@@ -97,12 +100,32 @@ static const char *obj_states[ODEBUG_STATE_MAX] = {
97static void fill_pool(void) 100static void fill_pool(void)
98{ 101{
99 gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN; 102 gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
100 struct debug_obj *new; 103 struct debug_obj *new, *obj;
101 unsigned long flags; 104 unsigned long flags;
102 105
103 if (likely(obj_pool_free >= debug_objects_pool_min_level)) 106 if (likely(obj_pool_free >= debug_objects_pool_min_level))
104 return; 107 return;
105 108
109 /*
110 * Reuse objs from the global free list; they will be reinitialized
111 * when allocating.
112 */
113 while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) {
114 raw_spin_lock_irqsave(&pool_lock, flags);
115 /*
116 * Recheck with the lock held as the worker thread might have
117 * won the race and freed the global free list already.
118 */
119 if (obj_nr_tofree) {
120 obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
121 hlist_del(&obj->node);
122 obj_nr_tofree--;
123 hlist_add_head(&obj->node, &obj_pool);
124 obj_pool_free++;
125 }
126 raw_spin_unlock_irqrestore(&pool_lock, flags);
127 }
128
106 if (unlikely(!obj_cache)) 129 if (unlikely(!obj_cache))
107 return; 130 return;
108 131
@@ -186,11 +209,38 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
186static void free_obj_work(struct work_struct *work) 209static void free_obj_work(struct work_struct *work)
187{ 210{
188 struct debug_obj *objs[ODEBUG_FREE_BATCH]; 211 struct debug_obj *objs[ODEBUG_FREE_BATCH];
212 struct hlist_node *tmp;
213 struct debug_obj *obj;
189 unsigned long flags; 214 unsigned long flags;
190 int i; 215 int i;
216 HLIST_HEAD(tofree);
191 217
192 if (!raw_spin_trylock_irqsave(&pool_lock, flags)) 218 if (!raw_spin_trylock_irqsave(&pool_lock, flags))
193 return; 219 return;
220
221 /*
222 * The objs on the pool list might be allocated before the work is
223 * run, so recheck if pool list it full or not, if not fill pool
224 * list from the global free list
225 */
226 while (obj_nr_tofree && obj_pool_free < debug_objects_pool_size) {
227 obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
228 hlist_del(&obj->node);
229 hlist_add_head(&obj->node, &obj_pool);
230 obj_pool_free++;
231 obj_nr_tofree--;
232 }
233
234 /*
235 * Pool list is already full and there are still objs on the free
236 * list. Move remaining free objs to a temporary list to free the
237 * memory outside the pool_lock held region.
238 */
239 if (obj_nr_tofree) {
240 hlist_move_list(&obj_to_free, &tofree);
241 obj_nr_tofree = 0;
242 }
243
194 while (obj_pool_free >= debug_objects_pool_size + ODEBUG_FREE_BATCH) { 244 while (obj_pool_free >= debug_objects_pool_size + ODEBUG_FREE_BATCH) {
195 for (i = 0; i < ODEBUG_FREE_BATCH; i++) { 245 for (i = 0; i < ODEBUG_FREE_BATCH; i++) {
196 objs[i] = hlist_entry(obj_pool.first, 246 objs[i] = hlist_entry(obj_pool.first,
@@ -211,6 +261,11 @@ static void free_obj_work(struct work_struct *work)
211 return; 261 return;
212 } 262 }
213 raw_spin_unlock_irqrestore(&pool_lock, flags); 263 raw_spin_unlock_irqrestore(&pool_lock, flags);
264
265 hlist_for_each_entry_safe(obj, tmp, &tofree, node) {
266 hlist_del(&obj->node);
267 kmem_cache_free(obj_cache, obj);
268 }
214} 269}
215 270
216/* 271/*
@@ -793,6 +848,7 @@ static int debug_stats_show(struct seq_file *m, void *v)
793 seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free); 848 seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
794 seq_printf(m, "pool_used :%d\n", obj_pool_used); 849 seq_printf(m, "pool_used :%d\n", obj_pool_used);
795 seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used); 850 seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
851 seq_printf(m, "on_free_list :%d\n", obj_nr_tofree);
796 seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated); 852 seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated);
797 seq_printf(m, "objs_freed :%d\n", debug_objects_freed); 853 seq_printf(m, "objs_freed :%d\n", debug_objects_freed);
798 return 0; 854 return 0;