aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug40
-rw-r--r--lib/Makefile2
-rw-r--r--lib/debugobjects.c58
-rw-r--r--lib/halfmd4.c67
-rw-r--r--lib/sbitmap.c139
-rw-r--r--lib/timerqueue.c3
6 files changed, 205 insertions, 104 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index eb9e9a7870fa..acedbe626d47 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -716,6 +716,19 @@ source "lib/Kconfig.kmemcheck"
716 716
717source "lib/Kconfig.kasan" 717source "lib/Kconfig.kasan"
718 718
719config DEBUG_REFCOUNT
720 bool "Verbose refcount checks"
721 help
722 Say Y here if you want reference counters (refcount_t and kref) to
723 generate WARNs on dubious usage. Without this refcount_t will still
724 be a saturating counter and avoid Use-After-Free by turning it into
725 a resource leak Denial-Of-Service.
726
727 Use of this option will increase kernel text size but will alert the
728 admin of potential abuse.
729
730 If in doubt, say "N".
731
719endmenu # "Memory Debugging" 732endmenu # "Memory Debugging"
720 733
721config ARCH_HAS_KCOV 734config ARCH_HAS_KCOV
@@ -980,20 +993,6 @@ config DEBUG_TIMEKEEPING
980 993
981 If unsure, say N. 994 If unsure, say N.
982 995
983config TIMER_STATS
984 bool "Collect kernel timers statistics"
985 depends on DEBUG_KERNEL && PROC_FS
986 help
987 If you say Y here, additional code will be inserted into the
988 timer routines to collect statistics about kernel timers being
989 reprogrammed. The statistics can be read from /proc/timer_stats.
990 The statistics collection is started by writing 1 to /proc/timer_stats,
991 writing 0 stops it. This feature is useful to collect information
992 about timer usage patterns in kernel and userspace. This feature
993 is lightweight if enabled in the kernel config but not activated
994 (it defaults to deactivated on bootup and will only be activated
995 if some application like powertop activates it explicitly).
996
997config DEBUG_PREEMPT 996config DEBUG_PREEMPT
998 bool "Debug preemptible kernel" 997 bool "Debug preemptible kernel"
999 depends on DEBUG_KERNEL && PREEMPT && TRACE_IRQFLAGS_SUPPORT 998 depends on DEBUG_KERNEL && PREEMPT && TRACE_IRQFLAGS_SUPPORT
@@ -1180,6 +1179,18 @@ config LOCK_TORTURE_TEST
1180 Say M if you want these torture tests to build as a module. 1179 Say M if you want these torture tests to build as a module.
1181 Say N if you are unsure. 1180 Say N if you are unsure.
1182 1181
1182config WW_MUTEX_SELFTEST
1183 tristate "Wait/wound mutex selftests"
1184 help
1185 This option provides a kernel module that runs tests on the
1186 on the struct ww_mutex locking API.
1187
1188 It is recommended to enable DEBUG_WW_MUTEX_SLOWPATH in conjunction
1189 with this test harness.
1190
1191 Say M if you want these self tests to build as a module.
1192 Say N if you are unsure.
1193
1183endmenu # lock debugging 1194endmenu # lock debugging
1184 1195
1185config TRACE_IRQFLAGS 1196config TRACE_IRQFLAGS
@@ -1450,6 +1461,7 @@ config RCU_CPU_STALL_TIMEOUT
1450config RCU_TRACE 1461config RCU_TRACE
1451 bool "Enable tracing for RCU" 1462 bool "Enable tracing for RCU"
1452 depends on DEBUG_KERNEL 1463 depends on DEBUG_KERNEL
1464 default y if TREE_RCU
1453 select TRACE_CLOCK 1465 select TRACE_CLOCK
1454 help 1466 help
1455 This option provides tracing in RCU which presents stats 1467 This option provides tracing in RCU which presents stats
diff --git a/lib/Makefile b/lib/Makefile
index bc4073a8cd08..19ea76149a37 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -31,7 +31,7 @@ lib-$(CONFIG_HAS_DMA) += dma-noop.o
31lib-y += kobject.o klist.o 31lib-y += kobject.o klist.o
32obj-y += lockref.o 32obj-y += lockref.o
33 33
34obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ 34obj-y += bcd.o div64.o sort.o parser.o debug_locks.o random32.o \
35 bust_spinlocks.o kasprintf.o bitmap.o scatterlist.o \ 35 bust_spinlocks.o kasprintf.o bitmap.o scatterlist.o \
36 gcd.o lcm.o list_sort.o uuid.o flex_array.o iov_iter.o clz_ctz.o \ 36 gcd.o lcm.o list_sort.o uuid.o flex_array.o iov_iter.o clz_ctz.o \
37 bsearch.o find_bit.o llist.o memweight.o kfifo.o \ 37 bsearch.o find_bit.o llist.o memweight.o kfifo.o \
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index 04c1ef717fe0..8c28cbd7e104 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -52,9 +52,18 @@ static int debug_objects_fixups __read_mostly;
52static int debug_objects_warnings __read_mostly; 52static int debug_objects_warnings __read_mostly;
53static int debug_objects_enabled __read_mostly 53static int debug_objects_enabled __read_mostly
54 = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT; 54 = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
55 55static int debug_objects_pool_size __read_mostly
56 = ODEBUG_POOL_SIZE;
57static int debug_objects_pool_min_level __read_mostly
58 = ODEBUG_POOL_MIN_LEVEL;
56static struct debug_obj_descr *descr_test __read_mostly; 59static struct debug_obj_descr *descr_test __read_mostly;
57 60
61/*
62 * Track numbers of kmem_cache_alloc()/free() calls done.
63 */
64static int debug_objects_allocated;
65static int debug_objects_freed;
66
58static void free_obj_work(struct work_struct *work); 67static void free_obj_work(struct work_struct *work);
59static DECLARE_WORK(debug_obj_work, free_obj_work); 68static DECLARE_WORK(debug_obj_work, free_obj_work);
60 69
@@ -88,13 +97,13 @@ static void fill_pool(void)
88 struct debug_obj *new; 97 struct debug_obj *new;
89 unsigned long flags; 98 unsigned long flags;
90 99
91 if (likely(obj_pool_free >= ODEBUG_POOL_MIN_LEVEL)) 100 if (likely(obj_pool_free >= debug_objects_pool_min_level))
92 return; 101 return;
93 102
94 if (unlikely(!obj_cache)) 103 if (unlikely(!obj_cache))
95 return; 104 return;
96 105
97 while (obj_pool_free < ODEBUG_POOL_MIN_LEVEL) { 106 while (obj_pool_free < debug_objects_pool_min_level) {
98 107
99 new = kmem_cache_zalloc(obj_cache, gfp); 108 new = kmem_cache_zalloc(obj_cache, gfp);
100 if (!new) 109 if (!new)
@@ -102,6 +111,7 @@ static void fill_pool(void)
102 111
103 raw_spin_lock_irqsave(&pool_lock, flags); 112 raw_spin_lock_irqsave(&pool_lock, flags);
104 hlist_add_head(&new->node, &obj_pool); 113 hlist_add_head(&new->node, &obj_pool);
114 debug_objects_allocated++;
105 obj_pool_free++; 115 obj_pool_free++;
106 raw_spin_unlock_irqrestore(&pool_lock, flags); 116 raw_spin_unlock_irqrestore(&pool_lock, flags);
107 } 117 }
@@ -162,24 +172,39 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
162 172
163/* 173/*
164 * workqueue function to free objects. 174 * workqueue function to free objects.
175 *
176 * To reduce contention on the global pool_lock, the actual freeing of
177 * debug objects will be delayed if the pool_lock is busy. We also free
178 * the objects in a batch of 4 for each lock/unlock cycle.
165 */ 179 */
180#define ODEBUG_FREE_BATCH 4
181
166static void free_obj_work(struct work_struct *work) 182static void free_obj_work(struct work_struct *work)
167{ 183{
168 struct debug_obj *obj; 184 struct debug_obj *objs[ODEBUG_FREE_BATCH];
169 unsigned long flags; 185 unsigned long flags;
186 int i;
170 187
171 raw_spin_lock_irqsave(&pool_lock, flags); 188 if (!raw_spin_trylock_irqsave(&pool_lock, flags))
172 while (obj_pool_free > ODEBUG_POOL_SIZE) { 189 return;
173 obj = hlist_entry(obj_pool.first, typeof(*obj), node); 190 while (obj_pool_free >= debug_objects_pool_size + ODEBUG_FREE_BATCH) {
174 hlist_del(&obj->node); 191 for (i = 0; i < ODEBUG_FREE_BATCH; i++) {
175 obj_pool_free--; 192 objs[i] = hlist_entry(obj_pool.first,
193 typeof(*objs[0]), node);
194 hlist_del(&objs[i]->node);
195 }
196
197 obj_pool_free -= ODEBUG_FREE_BATCH;
198 debug_objects_freed += ODEBUG_FREE_BATCH;
176 /* 199 /*
177 * We release pool_lock across kmem_cache_free() to 200 * We release pool_lock across kmem_cache_free() to
178 * avoid contention on pool_lock. 201 * avoid contention on pool_lock.
179 */ 202 */
180 raw_spin_unlock_irqrestore(&pool_lock, flags); 203 raw_spin_unlock_irqrestore(&pool_lock, flags);
181 kmem_cache_free(obj_cache, obj); 204 for (i = 0; i < ODEBUG_FREE_BATCH; i++)
182 raw_spin_lock_irqsave(&pool_lock, flags); 205 kmem_cache_free(obj_cache, objs[i]);
206 if (!raw_spin_trylock_irqsave(&pool_lock, flags))
207 return;
183 } 208 }
184 raw_spin_unlock_irqrestore(&pool_lock, flags); 209 raw_spin_unlock_irqrestore(&pool_lock, flags);
185} 210}
@@ -198,7 +223,7 @@ static void free_object(struct debug_obj *obj)
198 * schedule work when the pool is filled and the cache is 223 * schedule work when the pool is filled and the cache is
199 * initialized: 224 * initialized:
200 */ 225 */
201 if (obj_pool_free > ODEBUG_POOL_SIZE && obj_cache) 226 if (obj_pool_free > debug_objects_pool_size && obj_cache)
202 sched = 1; 227 sched = 1;
203 hlist_add_head(&obj->node, &obj_pool); 228 hlist_add_head(&obj->node, &obj_pool);
204 obj_pool_free++; 229 obj_pool_free++;
@@ -758,6 +783,8 @@ static int debug_stats_show(struct seq_file *m, void *v)
758 seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free); 783 seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
759 seq_printf(m, "pool_used :%d\n", obj_pool_used); 784 seq_printf(m, "pool_used :%d\n", obj_pool_used);
760 seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used); 785 seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
786 seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated);
787 seq_printf(m, "objs_freed :%d\n", debug_objects_freed);
761 return 0; 788 return 0;
762} 789}
763 790
@@ -1116,4 +1143,11 @@ void __init debug_objects_mem_init(void)
1116 pr_warn("out of memory.\n"); 1143 pr_warn("out of memory.\n");
1117 } else 1144 } else
1118 debug_objects_selftest(); 1145 debug_objects_selftest();
1146
1147 /*
1148 * Increase the thresholds for allocating and freeing objects
1149 * according to the number of possible CPUs available in the system.
1150 */
1151 debug_objects_pool_size += num_possible_cpus() * 32;
1152 debug_objects_pool_min_level += num_possible_cpus() * 4;
1119} 1153}
diff --git a/lib/halfmd4.c b/lib/halfmd4.c
deleted file mode 100644
index 137e861d9690..000000000000
--- a/lib/halfmd4.c
+++ /dev/null
@@ -1,67 +0,0 @@
1#include <linux/compiler.h>
2#include <linux/export.h>
3#include <linux/cryptohash.h>
4#include <linux/bitops.h>
5
6/* F, G and H are basic MD4 functions: selection, majority, parity */
7#define F(x, y, z) ((z) ^ ((x) & ((y) ^ (z))))
8#define G(x, y, z) (((x) & (y)) + (((x) ^ (y)) & (z)))
9#define H(x, y, z) ((x) ^ (y) ^ (z))
10
11/*
12 * The generic round function. The application is so specific that
13 * we don't bother protecting all the arguments with parens, as is generally
14 * good macro practice, in favor of extra legibility.
15 * Rotation is separate from addition to prevent recomputation
16 */
17#define ROUND(f, a, b, c, d, x, s) \
18 (a += f(b, c, d) + x, a = rol32(a, s))
19#define K1 0
20#define K2 013240474631UL
21#define K3 015666365641UL
22
23/*
24 * Basic cut-down MD4 transform. Returns only 32 bits of result.
25 */
26__u32 half_md4_transform(__u32 buf[4], __u32 const in[8])
27{
28 __u32 a = buf[0], b = buf[1], c = buf[2], d = buf[3];
29
30 /* Round 1 */
31 ROUND(F, a, b, c, d, in[0] + K1, 3);
32 ROUND(F, d, a, b, c, in[1] + K1, 7);
33 ROUND(F, c, d, a, b, in[2] + K1, 11);
34 ROUND(F, b, c, d, a, in[3] + K1, 19);
35 ROUND(F, a, b, c, d, in[4] + K1, 3);
36 ROUND(F, d, a, b, c, in[5] + K1, 7);
37 ROUND(F, c, d, a, b, in[6] + K1, 11);
38 ROUND(F, b, c, d, a, in[7] + K1, 19);
39
40 /* Round 2 */
41 ROUND(G, a, b, c, d, in[1] + K2, 3);
42 ROUND(G, d, a, b, c, in[3] + K2, 5);
43 ROUND(G, c, d, a, b, in[5] + K2, 9);
44 ROUND(G, b, c, d, a, in[7] + K2, 13);
45 ROUND(G, a, b, c, d, in[0] + K2, 3);
46 ROUND(G, d, a, b, c, in[2] + K2, 5);
47 ROUND(G, c, d, a, b, in[4] + K2, 9);
48 ROUND(G, b, c, d, a, in[6] + K2, 13);
49
50 /* Round 3 */
51 ROUND(H, a, b, c, d, in[3] + K3, 3);
52 ROUND(H, d, a, b, c, in[7] + K3, 9);
53 ROUND(H, c, d, a, b, in[2] + K3, 11);
54 ROUND(H, b, c, d, a, in[6] + K3, 15);
55 ROUND(H, a, b, c, d, in[1] + K3, 3);
56 ROUND(H, d, a, b, c, in[5] + K3, 9);
57 ROUND(H, c, d, a, b, in[0] + K3, 11);
58 ROUND(H, b, c, d, a, in[4] + K3, 15);
59
60 buf[0] += a;
61 buf[1] += b;
62 buf[2] += c;
63 buf[3] += d;
64
65 return buf[1]; /* "most hashed" word */
66}
67EXPORT_SYMBOL(half_md4_transform);
diff --git a/lib/sbitmap.c b/lib/sbitmap.c
index 2cecf05c82fd..55e11c4b2f3b 100644
--- a/lib/sbitmap.c
+++ b/lib/sbitmap.c
@@ -17,6 +17,7 @@
17 17
18#include <linux/random.h> 18#include <linux/random.h>
19#include <linux/sbitmap.h> 19#include <linux/sbitmap.h>
20#include <linux/seq_file.h>
20 21
21int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift, 22int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
22 gfp_t flags, int node) 23 gfp_t flags, int node)
@@ -180,6 +181,62 @@ unsigned int sbitmap_weight(const struct sbitmap *sb)
180} 181}
181EXPORT_SYMBOL_GPL(sbitmap_weight); 182EXPORT_SYMBOL_GPL(sbitmap_weight);
182 183
184void sbitmap_show(struct sbitmap *sb, struct seq_file *m)
185{
186 seq_printf(m, "depth=%u\n", sb->depth);
187 seq_printf(m, "busy=%u\n", sbitmap_weight(sb));
188 seq_printf(m, "bits_per_word=%u\n", 1U << sb->shift);
189 seq_printf(m, "map_nr=%u\n", sb->map_nr);
190}
191EXPORT_SYMBOL_GPL(sbitmap_show);
192
193static inline void emit_byte(struct seq_file *m, unsigned int offset, u8 byte)
194{
195 if ((offset & 0xf) == 0) {
196 if (offset != 0)
197 seq_putc(m, '\n');
198 seq_printf(m, "%08x:", offset);
199 }
200 if ((offset & 0x1) == 0)
201 seq_putc(m, ' ');
202 seq_printf(m, "%02x", byte);
203}
204
205void sbitmap_bitmap_show(struct sbitmap *sb, struct seq_file *m)
206{
207 u8 byte = 0;
208 unsigned int byte_bits = 0;
209 unsigned int offset = 0;
210 int i;
211
212 for (i = 0; i < sb->map_nr; i++) {
213 unsigned long word = READ_ONCE(sb->map[i].word);
214 unsigned int word_bits = READ_ONCE(sb->map[i].depth);
215
216 while (word_bits > 0) {
217 unsigned int bits = min(8 - byte_bits, word_bits);
218
219 byte |= (word & (BIT(bits) - 1)) << byte_bits;
220 byte_bits += bits;
221 if (byte_bits == 8) {
222 emit_byte(m, offset, byte);
223 byte = 0;
224 byte_bits = 0;
225 offset++;
226 }
227 word >>= bits;
228 word_bits -= bits;
229 }
230 }
231 if (byte_bits) {
232 emit_byte(m, offset, byte);
233 offset++;
234 }
235 if (offset)
236 seq_putc(m, '\n');
237}
238EXPORT_SYMBOL_GPL(sbitmap_bitmap_show);
239
183static unsigned int sbq_calc_wake_batch(unsigned int depth) 240static unsigned int sbq_calc_wake_batch(unsigned int depth)
184{ 241{
185 unsigned int wake_batch; 242 unsigned int wake_batch;
@@ -239,7 +296,19 @@ EXPORT_SYMBOL_GPL(sbitmap_queue_init_node);
239 296
240void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth) 297void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth)
241{ 298{
242 sbq->wake_batch = sbq_calc_wake_batch(depth); 299 unsigned int wake_batch = sbq_calc_wake_batch(depth);
300 int i;
301
302 if (sbq->wake_batch != wake_batch) {
303 WRITE_ONCE(sbq->wake_batch, wake_batch);
304 /*
305 * Pairs with the memory barrier in sbq_wake_up() to ensure that
306 * the batch size is updated before the wait counts.
307 */
308 smp_mb__before_atomic();
309 for (i = 0; i < SBQ_WAIT_QUEUES; i++)
310 atomic_set(&sbq->ws[i].wait_cnt, 1);
311 }
243 sbitmap_resize(&sbq->sb, depth); 312 sbitmap_resize(&sbq->sb, depth);
244} 313}
245EXPORT_SYMBOL_GPL(sbitmap_queue_resize); 314EXPORT_SYMBOL_GPL(sbitmap_queue_resize);
@@ -297,20 +366,39 @@ static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq)
297static void sbq_wake_up(struct sbitmap_queue *sbq) 366static void sbq_wake_up(struct sbitmap_queue *sbq)
298{ 367{
299 struct sbq_wait_state *ws; 368 struct sbq_wait_state *ws;
369 unsigned int wake_batch;
300 int wait_cnt; 370 int wait_cnt;
301 371
302 /* Ensure that the wait list checks occur after clear_bit(). */ 372 /*
303 smp_mb(); 373 * Pairs with the memory barrier in set_current_state() to ensure the
374 * proper ordering of clear_bit()/waitqueue_active() in the waker and
375 * test_and_set_bit()/prepare_to_wait()/finish_wait() in the waiter. See
376 * the comment on waitqueue_active(). This is __after_atomic because we
377 * just did clear_bit() in the caller.
378 */
379 smp_mb__after_atomic();
304 380
305 ws = sbq_wake_ptr(sbq); 381 ws = sbq_wake_ptr(sbq);
306 if (!ws) 382 if (!ws)
307 return; 383 return;
308 384
309 wait_cnt = atomic_dec_return(&ws->wait_cnt); 385 wait_cnt = atomic_dec_return(&ws->wait_cnt);
310 if (unlikely(wait_cnt < 0)) 386 if (wait_cnt <= 0) {
311 wait_cnt = atomic_inc_return(&ws->wait_cnt); 387 wake_batch = READ_ONCE(sbq->wake_batch);
312 if (wait_cnt == 0) { 388 /*
313 atomic_add(sbq->wake_batch, &ws->wait_cnt); 389 * Pairs with the memory barrier in sbitmap_queue_resize() to
390 * ensure that we see the batch size update before the wait
391 * count is reset.
392 */
393 smp_mb__before_atomic();
394 /*
395 * If there are concurrent callers to sbq_wake_up(), the last
396 * one to decrement the wait count below zero will bump it back
397 * up. If there is a concurrent resize, the count reset will
398 * either cause the cmpxchg to fail or overwrite after the
399 * cmpxchg.
400 */
401 atomic_cmpxchg(&ws->wait_cnt, wait_cnt, wait_cnt + wake_batch);
314 sbq_index_atomic_inc(&sbq->wake_index); 402 sbq_index_atomic_inc(&sbq->wake_index);
315 wake_up(&ws->wait); 403 wake_up(&ws->wait);
316 } 404 }
@@ -331,7 +419,8 @@ void sbitmap_queue_wake_all(struct sbitmap_queue *sbq)
331 int i, wake_index; 419 int i, wake_index;
332 420
333 /* 421 /*
334 * Make sure all changes prior to this are visible from other CPUs. 422 * Pairs with the memory barrier in set_current_state() like in
423 * sbq_wake_up().
335 */ 424 */
336 smp_mb(); 425 smp_mb();
337 wake_index = atomic_read(&sbq->wake_index); 426 wake_index = atomic_read(&sbq->wake_index);
@@ -345,3 +434,37 @@ void sbitmap_queue_wake_all(struct sbitmap_queue *sbq)
345 } 434 }
346} 435}
347EXPORT_SYMBOL_GPL(sbitmap_queue_wake_all); 436EXPORT_SYMBOL_GPL(sbitmap_queue_wake_all);
437
438void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m)
439{
440 bool first;
441 int i;
442
443 sbitmap_show(&sbq->sb, m);
444
445 seq_puts(m, "alloc_hint={");
446 first = true;
447 for_each_possible_cpu(i) {
448 if (!first)
449 seq_puts(m, ", ");
450 first = false;
451 seq_printf(m, "%u", *per_cpu_ptr(sbq->alloc_hint, i));
452 }
453 seq_puts(m, "}\n");
454
455 seq_printf(m, "wake_batch=%u\n", sbq->wake_batch);
456 seq_printf(m, "wake_index=%d\n", atomic_read(&sbq->wake_index));
457
458 seq_puts(m, "ws={\n");
459 for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
460 struct sbq_wait_state *ws = &sbq->ws[i];
461
462 seq_printf(m, "\t{.wait_cnt=%d, .wait=%s},\n",
463 atomic_read(&ws->wait_cnt),
464 waitqueue_active(&ws->wait) ? "active" : "inactive");
465 }
466 seq_puts(m, "}\n");
467
468 seq_printf(m, "round_robin=%d\n", sbq->round_robin);
469}
470EXPORT_SYMBOL_GPL(sbitmap_queue_show);
diff --git a/lib/timerqueue.c b/lib/timerqueue.c
index adc6ee0a5126..4a720ed4fdaf 100644
--- a/lib/timerqueue.c
+++ b/lib/timerqueue.c
@@ -80,8 +80,7 @@ bool timerqueue_del(struct timerqueue_head *head, struct timerqueue_node *node)
80 if (head->next == node) { 80 if (head->next == node) {
81 struct rb_node *rbn = rb_next(&node->node); 81 struct rb_node *rbn = rb_next(&node->node);
82 82
83 head->next = rbn ? 83 head->next = rb_entry_safe(rbn, struct timerqueue_node, node);
84 rb_entry(rbn, struct timerqueue_node, node) : NULL;
85 } 84 }
86 rb_erase(&node->node, &head->head); 85 rb_erase(&node->node, &head->head);
87 RB_CLEAR_NODE(&node->node); 86 RB_CLEAR_NODE(&node->node);