aboutsummaryrefslogtreecommitdiffstats
path: root/lib/debugobjects.c
diff options
context:
space:
mode:
Diffstat (limited to 'lib/debugobjects.c')
-rw-r--r--lib/debugobjects.c59
1 files changed, 47 insertions, 12 deletions
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index 04c1ef717fe0..17afb0430161 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -13,6 +13,7 @@
13#include <linux/debugobjects.h> 13#include <linux/debugobjects.h>
14#include <linux/interrupt.h> 14#include <linux/interrupt.h>
15#include <linux/sched.h> 15#include <linux/sched.h>
16#include <linux/sched/task_stack.h>
16#include <linux/seq_file.h> 17#include <linux/seq_file.h>
17#include <linux/debugfs.h> 18#include <linux/debugfs.h>
18#include <linux/slab.h> 19#include <linux/slab.h>
@@ -52,9 +53,18 @@ static int debug_objects_fixups __read_mostly;
52static int debug_objects_warnings __read_mostly; 53static int debug_objects_warnings __read_mostly;
53static int debug_objects_enabled __read_mostly 54static int debug_objects_enabled __read_mostly
54 = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT; 55 = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
55 56static int debug_objects_pool_size __read_mostly
57 = ODEBUG_POOL_SIZE;
58static int debug_objects_pool_min_level __read_mostly
59 = ODEBUG_POOL_MIN_LEVEL;
56static struct debug_obj_descr *descr_test __read_mostly; 60static struct debug_obj_descr *descr_test __read_mostly;
57 61
62/*
63 * Track numbers of kmem_cache_alloc()/free() calls done.
64 */
65static int debug_objects_allocated;
66static int debug_objects_freed;
67
58static void free_obj_work(struct work_struct *work); 68static void free_obj_work(struct work_struct *work);
59static DECLARE_WORK(debug_obj_work, free_obj_work); 69static DECLARE_WORK(debug_obj_work, free_obj_work);
60 70
@@ -88,13 +98,13 @@ static void fill_pool(void)
88 struct debug_obj *new; 98 struct debug_obj *new;
89 unsigned long flags; 99 unsigned long flags;
90 100
91 if (likely(obj_pool_free >= ODEBUG_POOL_MIN_LEVEL)) 101 if (likely(obj_pool_free >= debug_objects_pool_min_level))
92 return; 102 return;
93 103
94 if (unlikely(!obj_cache)) 104 if (unlikely(!obj_cache))
95 return; 105 return;
96 106
97 while (obj_pool_free < ODEBUG_POOL_MIN_LEVEL) { 107 while (obj_pool_free < debug_objects_pool_min_level) {
98 108
99 new = kmem_cache_zalloc(obj_cache, gfp); 109 new = kmem_cache_zalloc(obj_cache, gfp);
100 if (!new) 110 if (!new)
@@ -102,6 +112,7 @@ static void fill_pool(void)
102 112
103 raw_spin_lock_irqsave(&pool_lock, flags); 113 raw_spin_lock_irqsave(&pool_lock, flags);
104 hlist_add_head(&new->node, &obj_pool); 114 hlist_add_head(&new->node, &obj_pool);
115 debug_objects_allocated++;
105 obj_pool_free++; 116 obj_pool_free++;
106 raw_spin_unlock_irqrestore(&pool_lock, flags); 117 raw_spin_unlock_irqrestore(&pool_lock, flags);
107 } 118 }
@@ -162,24 +173,39 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
162 173
163/* 174/*
164 * workqueue function to free objects. 175 * workqueue function to free objects.
176 *
177 * To reduce contention on the global pool_lock, the actual freeing of
178 * debug objects will be delayed if the pool_lock is busy. We also free
179 * the objects in a batch of 4 for each lock/unlock cycle.
165 */ 180 */
181#define ODEBUG_FREE_BATCH 4
182
166static void free_obj_work(struct work_struct *work) 183static void free_obj_work(struct work_struct *work)
167{ 184{
168 struct debug_obj *obj; 185 struct debug_obj *objs[ODEBUG_FREE_BATCH];
169 unsigned long flags; 186 unsigned long flags;
187 int i;
170 188
171 raw_spin_lock_irqsave(&pool_lock, flags); 189 if (!raw_spin_trylock_irqsave(&pool_lock, flags))
172 while (obj_pool_free > ODEBUG_POOL_SIZE) { 190 return;
173 obj = hlist_entry(obj_pool.first, typeof(*obj), node); 191 while (obj_pool_free >= debug_objects_pool_size + ODEBUG_FREE_BATCH) {
174 hlist_del(&obj->node); 192 for (i = 0; i < ODEBUG_FREE_BATCH; i++) {
175 obj_pool_free--; 193 objs[i] = hlist_entry(obj_pool.first,
194 typeof(*objs[0]), node);
195 hlist_del(&objs[i]->node);
196 }
197
198 obj_pool_free -= ODEBUG_FREE_BATCH;
199 debug_objects_freed += ODEBUG_FREE_BATCH;
176 /* 200 /*
177 * We release pool_lock across kmem_cache_free() to 201 * We release pool_lock across kmem_cache_free() to
178 * avoid contention on pool_lock. 202 * avoid contention on pool_lock.
179 */ 203 */
180 raw_spin_unlock_irqrestore(&pool_lock, flags); 204 raw_spin_unlock_irqrestore(&pool_lock, flags);
181 kmem_cache_free(obj_cache, obj); 205 for (i = 0; i < ODEBUG_FREE_BATCH; i++)
182 raw_spin_lock_irqsave(&pool_lock, flags); 206 kmem_cache_free(obj_cache, objs[i]);
207 if (!raw_spin_trylock_irqsave(&pool_lock, flags))
208 return;
183 } 209 }
184 raw_spin_unlock_irqrestore(&pool_lock, flags); 210 raw_spin_unlock_irqrestore(&pool_lock, flags);
185} 211}
@@ -198,7 +224,7 @@ static void free_object(struct debug_obj *obj)
198 * schedule work when the pool is filled and the cache is 224 * schedule work when the pool is filled and the cache is
199 * initialized: 225 * initialized:
200 */ 226 */
201 if (obj_pool_free > ODEBUG_POOL_SIZE && obj_cache) 227 if (obj_pool_free > debug_objects_pool_size && obj_cache)
202 sched = 1; 228 sched = 1;
203 hlist_add_head(&obj->node, &obj_pool); 229 hlist_add_head(&obj->node, &obj_pool);
204 obj_pool_free++; 230 obj_pool_free++;
@@ -758,6 +784,8 @@ static int debug_stats_show(struct seq_file *m, void *v)
758 seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free); 784 seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
759 seq_printf(m, "pool_used :%d\n", obj_pool_used); 785 seq_printf(m, "pool_used :%d\n", obj_pool_used);
760 seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used); 786 seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
787 seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated);
788 seq_printf(m, "objs_freed :%d\n", debug_objects_freed);
761 return 0; 789 return 0;
762} 790}
763 791
@@ -1116,4 +1144,11 @@ void __init debug_objects_mem_init(void)
1116 pr_warn("out of memory.\n"); 1144 pr_warn("out of memory.\n");
1117 } else 1145 } else
1118 debug_objects_selftest(); 1146 debug_objects_selftest();
1147
1148 /*
1149 * Increase the thresholds for allocating and freeing objects
1150 * according to the number of possible CPUs available in the system.
1151 */
1152 debug_objects_pool_size += num_possible_cpus() * 32;
1153 debug_objects_pool_min_level += num_possible_cpus() * 4;
1119} 1154}