aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug40
-rw-r--r--lib/debugobjects.c58
-rw-r--r--lib/ioremap.c1
-rw-r--r--lib/radix-tree.c2
-rw-r--r--lib/timerqueue.c3
5 files changed, 74 insertions, 30 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index eb9e9a7870fa..acedbe626d47 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -716,6 +716,19 @@ source "lib/Kconfig.kmemcheck"
716 716
717source "lib/Kconfig.kasan" 717source "lib/Kconfig.kasan"
718 718
719config DEBUG_REFCOUNT
720 bool "Verbose refcount checks"
721 help
722 Say Y here if you want reference counters (refcount_t and kref) to
723 generate WARNs on dubious usage. Without this refcount_t will still
724 be a saturating counter and avoid Use-After-Free by turning it into
725 a resource leak Denial-Of-Service.
726
727 Use of this option will increase kernel text size but will alert the
728 admin of potential abuse.
729
730 If in doubt, say "N".
731
719endmenu # "Memory Debugging" 732endmenu # "Memory Debugging"
720 733
721config ARCH_HAS_KCOV 734config ARCH_HAS_KCOV
@@ -980,20 +993,6 @@ config DEBUG_TIMEKEEPING
980 993
981 If unsure, say N. 994 If unsure, say N.
982 995
983config TIMER_STATS
984 bool "Collect kernel timers statistics"
985 depends on DEBUG_KERNEL && PROC_FS
986 help
987 If you say Y here, additional code will be inserted into the
988 timer routines to collect statistics about kernel timers being
989 reprogrammed. The statistics can be read from /proc/timer_stats.
990 The statistics collection is started by writing 1 to /proc/timer_stats,
991 writing 0 stops it. This feature is useful to collect information
992 about timer usage patterns in kernel and userspace. This feature
993 is lightweight if enabled in the kernel config but not activated
994 (it defaults to deactivated on bootup and will only be activated
995 if some application like powertop activates it explicitly).
996
997config DEBUG_PREEMPT 996config DEBUG_PREEMPT
998 bool "Debug preemptible kernel" 997 bool "Debug preemptible kernel"
999 depends on DEBUG_KERNEL && PREEMPT && TRACE_IRQFLAGS_SUPPORT 998 depends on DEBUG_KERNEL && PREEMPT && TRACE_IRQFLAGS_SUPPORT
@@ -1180,6 +1179,18 @@ config LOCK_TORTURE_TEST
1180 Say M if you want these torture tests to build as a module. 1179 Say M if you want these torture tests to build as a module.
1181 Say N if you are unsure. 1180 Say N if you are unsure.
1182 1181
1182config WW_MUTEX_SELFTEST
1183 tristate "Wait/wound mutex selftests"
1184 help
1185 This option provides a kernel module that runs tests on the
1186 on the struct ww_mutex locking API.
1187
1188 It is recommended to enable DEBUG_WW_MUTEX_SLOWPATH in conjunction
1189 with this test harness.
1190
1191 Say M if you want these self tests to build as a module.
1192 Say N if you are unsure.
1193
1183endmenu # lock debugging 1194endmenu # lock debugging
1184 1195
1185config TRACE_IRQFLAGS 1196config TRACE_IRQFLAGS
@@ -1450,6 +1461,7 @@ config RCU_CPU_STALL_TIMEOUT
1450config RCU_TRACE 1461config RCU_TRACE
1451 bool "Enable tracing for RCU" 1462 bool "Enable tracing for RCU"
1452 depends on DEBUG_KERNEL 1463 depends on DEBUG_KERNEL
1464 default y if TREE_RCU
1453 select TRACE_CLOCK 1465 select TRACE_CLOCK
1454 help 1466 help
1455 This option provides tracing in RCU which presents stats 1467 This option provides tracing in RCU which presents stats
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index 04c1ef717fe0..8c28cbd7e104 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -52,9 +52,18 @@ static int debug_objects_fixups __read_mostly;
52static int debug_objects_warnings __read_mostly; 52static int debug_objects_warnings __read_mostly;
53static int debug_objects_enabled __read_mostly 53static int debug_objects_enabled __read_mostly
54 = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT; 54 = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
55 55static int debug_objects_pool_size __read_mostly
56 = ODEBUG_POOL_SIZE;
57static int debug_objects_pool_min_level __read_mostly
58 = ODEBUG_POOL_MIN_LEVEL;
56static struct debug_obj_descr *descr_test __read_mostly; 59static struct debug_obj_descr *descr_test __read_mostly;
57 60
61/*
62 * Track numbers of kmem_cache_alloc()/free() calls done.
63 */
64static int debug_objects_allocated;
65static int debug_objects_freed;
66
58static void free_obj_work(struct work_struct *work); 67static void free_obj_work(struct work_struct *work);
59static DECLARE_WORK(debug_obj_work, free_obj_work); 68static DECLARE_WORK(debug_obj_work, free_obj_work);
60 69
@@ -88,13 +97,13 @@ static void fill_pool(void)
88 struct debug_obj *new; 97 struct debug_obj *new;
89 unsigned long flags; 98 unsigned long flags;
90 99
91 if (likely(obj_pool_free >= ODEBUG_POOL_MIN_LEVEL)) 100 if (likely(obj_pool_free >= debug_objects_pool_min_level))
92 return; 101 return;
93 102
94 if (unlikely(!obj_cache)) 103 if (unlikely(!obj_cache))
95 return; 104 return;
96 105
97 while (obj_pool_free < ODEBUG_POOL_MIN_LEVEL) { 106 while (obj_pool_free < debug_objects_pool_min_level) {
98 107
99 new = kmem_cache_zalloc(obj_cache, gfp); 108 new = kmem_cache_zalloc(obj_cache, gfp);
100 if (!new) 109 if (!new)
@@ -102,6 +111,7 @@ static void fill_pool(void)
102 111
103 raw_spin_lock_irqsave(&pool_lock, flags); 112 raw_spin_lock_irqsave(&pool_lock, flags);
104 hlist_add_head(&new->node, &obj_pool); 113 hlist_add_head(&new->node, &obj_pool);
114 debug_objects_allocated++;
105 obj_pool_free++; 115 obj_pool_free++;
106 raw_spin_unlock_irqrestore(&pool_lock, flags); 116 raw_spin_unlock_irqrestore(&pool_lock, flags);
107 } 117 }
@@ -162,24 +172,39 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
162 172
163/* 173/*
164 * workqueue function to free objects. 174 * workqueue function to free objects.
175 *
176 * To reduce contention on the global pool_lock, the actual freeing of
177 * debug objects will be delayed if the pool_lock is busy. We also free
178 * the objects in a batch of 4 for each lock/unlock cycle.
165 */ 179 */
180#define ODEBUG_FREE_BATCH 4
181
166static void free_obj_work(struct work_struct *work) 182static void free_obj_work(struct work_struct *work)
167{ 183{
168 struct debug_obj *obj; 184 struct debug_obj *objs[ODEBUG_FREE_BATCH];
169 unsigned long flags; 185 unsigned long flags;
186 int i;
170 187
171 raw_spin_lock_irqsave(&pool_lock, flags); 188 if (!raw_spin_trylock_irqsave(&pool_lock, flags))
172 while (obj_pool_free > ODEBUG_POOL_SIZE) { 189 return;
173 obj = hlist_entry(obj_pool.first, typeof(*obj), node); 190 while (obj_pool_free >= debug_objects_pool_size + ODEBUG_FREE_BATCH) {
174 hlist_del(&obj->node); 191 for (i = 0; i < ODEBUG_FREE_BATCH; i++) {
175 obj_pool_free--; 192 objs[i] = hlist_entry(obj_pool.first,
193 typeof(*objs[0]), node);
194 hlist_del(&objs[i]->node);
195 }
196
197 obj_pool_free -= ODEBUG_FREE_BATCH;
198 debug_objects_freed += ODEBUG_FREE_BATCH;
176 /* 199 /*
177 * We release pool_lock across kmem_cache_free() to 200 * We release pool_lock across kmem_cache_free() to
178 * avoid contention on pool_lock. 201 * avoid contention on pool_lock.
179 */ 202 */
180 raw_spin_unlock_irqrestore(&pool_lock, flags); 203 raw_spin_unlock_irqrestore(&pool_lock, flags);
181 kmem_cache_free(obj_cache, obj); 204 for (i = 0; i < ODEBUG_FREE_BATCH; i++)
182 raw_spin_lock_irqsave(&pool_lock, flags); 205 kmem_cache_free(obj_cache, objs[i]);
206 if (!raw_spin_trylock_irqsave(&pool_lock, flags))
207 return;
183 } 208 }
184 raw_spin_unlock_irqrestore(&pool_lock, flags); 209 raw_spin_unlock_irqrestore(&pool_lock, flags);
185} 210}
@@ -198,7 +223,7 @@ static void free_object(struct debug_obj *obj)
198 * schedule work when the pool is filled and the cache is 223 * schedule work when the pool is filled and the cache is
199 * initialized: 224 * initialized:
200 */ 225 */
201 if (obj_pool_free > ODEBUG_POOL_SIZE && obj_cache) 226 if (obj_pool_free > debug_objects_pool_size && obj_cache)
202 sched = 1; 227 sched = 1;
203 hlist_add_head(&obj->node, &obj_pool); 228 hlist_add_head(&obj->node, &obj_pool);
204 obj_pool_free++; 229 obj_pool_free++;
@@ -758,6 +783,8 @@ static int debug_stats_show(struct seq_file *m, void *v)
758 seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free); 783 seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
759 seq_printf(m, "pool_used :%d\n", obj_pool_used); 784 seq_printf(m, "pool_used :%d\n", obj_pool_used);
760 seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used); 785 seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
786 seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated);
787 seq_printf(m, "objs_freed :%d\n", debug_objects_freed);
761 return 0; 788 return 0;
762} 789}
763 790
@@ -1116,4 +1143,11 @@ void __init debug_objects_mem_init(void)
1116 pr_warn("out of memory.\n"); 1143 pr_warn("out of memory.\n");
1117 } else 1144 } else
1118 debug_objects_selftest(); 1145 debug_objects_selftest();
1146
1147 /*
1148 * Increase the thresholds for allocating and freeing objects
1149 * according to the number of possible CPUs available in the system.
1150 */
1151 debug_objects_pool_size += num_possible_cpus() * 32;
1152 debug_objects_pool_min_level += num_possible_cpus() * 4;
1119} 1153}
diff --git a/lib/ioremap.c b/lib/ioremap.c
index 86c8911b0e3a..a3e14ce92a56 100644
--- a/lib/ioremap.c
+++ b/lib/ioremap.c
@@ -144,4 +144,3 @@ int ioremap_page_range(unsigned long addr,
144 144
145 return err; 145 return err;
146} 146}
147EXPORT_SYMBOL_GPL(ioremap_page_range);
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 0b92d605fb69..84812a9fb16f 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -769,7 +769,7 @@ static void radix_tree_free_nodes(struct radix_tree_node *node)
769 struct radix_tree_node *old = child; 769 struct radix_tree_node *old = child;
770 offset = child->offset + 1; 770 offset = child->offset + 1;
771 child = child->parent; 771 child = child->parent;
772 WARN_ON_ONCE(!list_empty(&node->private_list)); 772 WARN_ON_ONCE(!list_empty(&old->private_list));
773 radix_tree_node_free(old); 773 radix_tree_node_free(old);
774 if (old == entry_to_node(node)) 774 if (old == entry_to_node(node))
775 return; 775 return;
diff --git a/lib/timerqueue.c b/lib/timerqueue.c
index adc6ee0a5126..4a720ed4fdaf 100644
--- a/lib/timerqueue.c
+++ b/lib/timerqueue.c
@@ -80,8 +80,7 @@ bool timerqueue_del(struct timerqueue_head *head, struct timerqueue_node *node)
80 if (head->next == node) { 80 if (head->next == node) {
81 struct rb_node *rbn = rb_next(&node->node); 81 struct rb_node *rbn = rb_next(&node->node);
82 82
83 head->next = rbn ? 83 head->next = rb_entry_safe(rbn, struct timerqueue_node, node);
84 rb_entry(rbn, struct timerqueue_node, node) : NULL;
85 } 84 }
86 rb_erase(&node->node, &head->head); 85 rb_erase(&node->node, &head->head);
87 RB_CLEAR_NODE(&node->node); 86 RB_CLEAR_NODE(&node->node);