aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug21
-rw-r--r--lib/Makefile5
-rw-r--r--lib/atomic64.c4
-rw-r--r--lib/atomic64_test.c164
-rw-r--r--lib/btree.c3
-rw-r--r--lib/debugobjects.c63
-rw-r--r--lib/hweight.c19
-rw-r--r--lib/rbtree.c48
-rw-r--r--lib/rwsem.c5
9 files changed, 308 insertions, 24 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 935248bdbc47..d85be90d5888 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -512,6 +512,18 @@ config PROVE_RCU
512 512
513 Say N if you are unsure. 513 Say N if you are unsure.
514 514
515config PROVE_RCU_REPEATEDLY
516 bool "RCU debugging: don't disable PROVE_RCU on first splat"
517 depends on PROVE_RCU
518 default n
519 help
520 By itself, PROVE_RCU will disable checking upon issuing the
521 first warning (or "splat"). This feature prevents such
522 disabling, allowing multiple RCU-lockdep warnings to be printed
523 on a single reboot.
524
525 Say N if you are unsure.
526
515config LOCKDEP 527config LOCKDEP
516 bool 528 bool
517 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT 529 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
@@ -793,7 +805,7 @@ config RCU_CPU_STALL_DETECTOR
793config RCU_CPU_STALL_VERBOSE 805config RCU_CPU_STALL_VERBOSE
794 bool "Print additional per-task information for RCU_CPU_STALL_DETECTOR" 806 bool "Print additional per-task information for RCU_CPU_STALL_DETECTOR"
795 depends on RCU_CPU_STALL_DETECTOR && TREE_PREEMPT_RCU 807 depends on RCU_CPU_STALL_DETECTOR && TREE_PREEMPT_RCU
796 default n 808 default y
797 help 809 help
798 This option causes RCU to printk detailed per-task information 810 This option causes RCU to printk detailed per-task information
799 for any tasks that are stalling the current RCU grace period. 811 for any tasks that are stalling the current RCU grace period.
@@ -1086,6 +1098,13 @@ config DMA_API_DEBUG
1086 This option causes a performance degredation. Use only if you want 1098 This option causes a performance degredation. Use only if you want
1087 to debug device drivers. If unsure, say N. 1099 to debug device drivers. If unsure, say N.
1088 1100
1101config ATOMIC64_SELFTEST
1102 bool "Perform an atomic64_t self-test at boot"
1103 help
1104 Enable this option to test the atomic64_t functions at boot.
1105
1106 If unsure, say N.
1107
1089source "samples/Kconfig" 1108source "samples/Kconfig"
1090 1109
1091source "lib/Kconfig.kgdb" 1110source "lib/Kconfig.kgdb"
diff --git a/lib/Makefile b/lib/Makefile
index 0d4015205c64..9e6d3c29d73a 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -39,7 +39,10 @@ lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
39lib-$(CONFIG_GENERIC_FIND_FIRST_BIT) += find_next_bit.o 39lib-$(CONFIG_GENERIC_FIND_FIRST_BIT) += find_next_bit.o
40lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o 40lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o
41obj-$(CONFIG_GENERIC_FIND_LAST_BIT) += find_last_bit.o 41obj-$(CONFIG_GENERIC_FIND_LAST_BIT) += find_last_bit.o
42
43CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
42obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o 44obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
45
43obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o 46obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o
44obj-$(CONFIG_BTREE) += btree.o 47obj-$(CONFIG_BTREE) += btree.o
45obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o 48obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
@@ -101,6 +104,8 @@ obj-$(CONFIG_GENERIC_CSUM) += checksum.o
101 104
102obj-$(CONFIG_GENERIC_ATOMIC64) += atomic64.o 105obj-$(CONFIG_GENERIC_ATOMIC64) += atomic64.o
103 106
107obj-$(CONFIG_ATOMIC64_SELFTEST) += atomic64_test.o
108
104hostprogs-y := gen_crc32table 109hostprogs-y := gen_crc32table
105clean-files := crc32table.h 110clean-files := crc32table.h
106 111
diff --git a/lib/atomic64.c b/lib/atomic64.c
index 8bee16ec7524..a21c12bc727c 100644
--- a/lib/atomic64.c
+++ b/lib/atomic64.c
@@ -162,12 +162,12 @@ int atomic64_add_unless(atomic64_t *v, long long a, long long u)
162{ 162{
163 unsigned long flags; 163 unsigned long flags;
164 spinlock_t *lock = lock_addr(v); 164 spinlock_t *lock = lock_addr(v);
165 int ret = 1; 165 int ret = 0;
166 166
167 spin_lock_irqsave(lock, flags); 167 spin_lock_irqsave(lock, flags);
168 if (v->counter != u) { 168 if (v->counter != u) {
169 v->counter += a; 169 v->counter += a;
170 ret = 0; 170 ret = 1;
171 } 171 }
172 spin_unlock_irqrestore(lock, flags); 172 spin_unlock_irqrestore(lock, flags);
173 return ret; 173 return ret;
diff --git a/lib/atomic64_test.c b/lib/atomic64_test.c
new file mode 100644
index 000000000000..65e482caf5e9
--- /dev/null
+++ b/lib/atomic64_test.c
@@ -0,0 +1,164 @@
1/*
2 * Testsuite for atomic64_t functions
3 *
4 * Copyright © 2010 Luca Barbieri
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11#include <linux/init.h>
12#include <asm/atomic.h>
13
14#define INIT(c) do { atomic64_set(&v, c); r = c; } while (0)
15static __init int test_atomic64(void)
16{
17 long long v0 = 0xaaa31337c001d00dLL;
18 long long v1 = 0xdeadbeefdeafcafeLL;
19 long long v2 = 0xfaceabadf00df001LL;
20 long long onestwos = 0x1111111122222222LL;
21 long long one = 1LL;
22
23 atomic64_t v = ATOMIC64_INIT(v0);
24 long long r = v0;
25 BUG_ON(v.counter != r);
26
27 atomic64_set(&v, v1);
28 r = v1;
29 BUG_ON(v.counter != r);
30 BUG_ON(atomic64_read(&v) != r);
31
32 INIT(v0);
33 atomic64_add(onestwos, &v);
34 r += onestwos;
35 BUG_ON(v.counter != r);
36
37 INIT(v0);
38 atomic64_add(-one, &v);
39 r += -one;
40 BUG_ON(v.counter != r);
41
42 INIT(v0);
43 r += onestwos;
44 BUG_ON(atomic64_add_return(onestwos, &v) != r);
45 BUG_ON(v.counter != r);
46
47 INIT(v0);
48 r += -one;
49 BUG_ON(atomic64_add_return(-one, &v) != r);
50 BUG_ON(v.counter != r);
51
52 INIT(v0);
53 atomic64_sub(onestwos, &v);
54 r -= onestwos;
55 BUG_ON(v.counter != r);
56
57 INIT(v0);
58 atomic64_sub(-one, &v);
59 r -= -one;
60 BUG_ON(v.counter != r);
61
62 INIT(v0);
63 r -= onestwos;
64 BUG_ON(atomic64_sub_return(onestwos, &v) != r);
65 BUG_ON(v.counter != r);
66
67 INIT(v0);
68 r -= -one;
69 BUG_ON(atomic64_sub_return(-one, &v) != r);
70 BUG_ON(v.counter != r);
71
72 INIT(v0);
73 atomic64_inc(&v);
74 r += one;
75 BUG_ON(v.counter != r);
76
77 INIT(v0);
78 r += one;
79 BUG_ON(atomic64_inc_return(&v) != r);
80 BUG_ON(v.counter != r);
81
82 INIT(v0);
83 atomic64_dec(&v);
84 r -= one;
85 BUG_ON(v.counter != r);
86
87 INIT(v0);
88 r -= one;
89 BUG_ON(atomic64_dec_return(&v) != r);
90 BUG_ON(v.counter != r);
91
92 INIT(v0);
93 BUG_ON(atomic64_xchg(&v, v1) != v0);
94 r = v1;
95 BUG_ON(v.counter != r);
96
97 INIT(v0);
98 BUG_ON(atomic64_cmpxchg(&v, v0, v1) != v0);
99 r = v1;
100 BUG_ON(v.counter != r);
101
102 INIT(v0);
103 BUG_ON(atomic64_cmpxchg(&v, v2, v1) != v0);
104 BUG_ON(v.counter != r);
105
106 INIT(v0);
107 BUG_ON(atomic64_add_unless(&v, one, v0));
108 BUG_ON(v.counter != r);
109
110 INIT(v0);
111 BUG_ON(!atomic64_add_unless(&v, one, v1));
112 r += one;
113 BUG_ON(v.counter != r);
114
115#if defined(CONFIG_X86) || defined(CONFIG_MIPS) || defined(CONFIG_PPC) || defined(_ASM_GENERIC_ATOMIC64_H)
116 INIT(onestwos);
117 BUG_ON(atomic64_dec_if_positive(&v) != (onestwos - 1));
118 r -= one;
119 BUG_ON(v.counter != r);
120
121 INIT(0);
122 BUG_ON(atomic64_dec_if_positive(&v) != -one);
123 BUG_ON(v.counter != r);
124
125 INIT(-one);
126 BUG_ON(atomic64_dec_if_positive(&v) != (-one - one));
127 BUG_ON(v.counter != r);
128#else
129#warning Please implement atomic64_dec_if_positive for your architecture, and add it to the IF above
130#endif
131
132 INIT(onestwos);
133 BUG_ON(!atomic64_inc_not_zero(&v));
134 r += one;
135 BUG_ON(v.counter != r);
136
137 INIT(0);
138 BUG_ON(atomic64_inc_not_zero(&v));
139 BUG_ON(v.counter != r);
140
141 INIT(-one);
142 BUG_ON(!atomic64_inc_not_zero(&v));
143 r += one;
144 BUG_ON(v.counter != r);
145
146#ifdef CONFIG_X86
147 printk(KERN_INFO "atomic64 test passed for %s platform %s CX8 and %s SSE\n",
148#ifdef CONFIG_X86_64
149 "x86-64",
150#elif defined(CONFIG_X86_CMPXCHG64)
151 "i586+",
152#else
153 "i386+",
154#endif
155 boot_cpu_has(X86_FEATURE_CX8) ? "with" : "without",
156 boot_cpu_has(X86_FEATURE_XMM) ? "with" : "without");
157#else
158 printk(KERN_INFO "atomic64 test passed\n");
159#endif
160
161 return 0;
162}
163
164core_initcall(test_atomic64);
diff --git a/lib/btree.c b/lib/btree.c
index 41859a820218..c9c6f0351526 100644
--- a/lib/btree.c
+++ b/lib/btree.c
@@ -95,7 +95,8 @@ static unsigned long *btree_node_alloc(struct btree_head *head, gfp_t gfp)
95 unsigned long *node; 95 unsigned long *node;
96 96
97 node = mempool_alloc(head->mempool, gfp); 97 node = mempool_alloc(head->mempool, gfp);
98 memset(node, 0, NODESIZE); 98 if (likely(node))
99 memset(node, 0, NODESIZE);
99 return node; 100 return node;
100} 101}
101 102
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index b862b30369ff..deebcc57d4e6 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -141,6 +141,7 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
141 obj->object = addr; 141 obj->object = addr;
142 obj->descr = descr; 142 obj->descr = descr;
143 obj->state = ODEBUG_STATE_NONE; 143 obj->state = ODEBUG_STATE_NONE;
144 obj->astate = 0;
144 hlist_del(&obj->node); 145 hlist_del(&obj->node);
145 146
146 hlist_add_head(&obj->node, &b->list); 147 hlist_add_head(&obj->node, &b->list);
@@ -252,8 +253,10 @@ static void debug_print_object(struct debug_obj *obj, char *msg)
252 253
253 if (limit < 5 && obj->descr != descr_test) { 254 if (limit < 5 && obj->descr != descr_test) {
254 limit++; 255 limit++;
255 WARN(1, KERN_ERR "ODEBUG: %s %s object type: %s\n", msg, 256 WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) "
256 obj_states[obj->state], obj->descr->name); 257 "object type: %s\n",
258 msg, obj_states[obj->state], obj->astate,
259 obj->descr->name);
257 } 260 }
258 debug_objects_warnings++; 261 debug_objects_warnings++;
259} 262}
@@ -447,7 +450,10 @@ void debug_object_deactivate(void *addr, struct debug_obj_descr *descr)
447 case ODEBUG_STATE_INIT: 450 case ODEBUG_STATE_INIT:
448 case ODEBUG_STATE_INACTIVE: 451 case ODEBUG_STATE_INACTIVE:
449 case ODEBUG_STATE_ACTIVE: 452 case ODEBUG_STATE_ACTIVE:
450 obj->state = ODEBUG_STATE_INACTIVE; 453 if (!obj->astate)
454 obj->state = ODEBUG_STATE_INACTIVE;
455 else
456 debug_print_object(obj, "deactivate");
451 break; 457 break;
452 458
453 case ODEBUG_STATE_DESTROYED: 459 case ODEBUG_STATE_DESTROYED:
@@ -553,6 +559,53 @@ out_unlock:
553 raw_spin_unlock_irqrestore(&db->lock, flags); 559 raw_spin_unlock_irqrestore(&db->lock, flags);
554} 560}
555 561
562/**
563 * debug_object_active_state - debug checks object usage state machine
564 * @addr: address of the object
565 * @descr: pointer to an object specific debug description structure
566 * @expect: expected state
567 * @next: state to move to if expected state is found
568 */
569void
570debug_object_active_state(void *addr, struct debug_obj_descr *descr,
571 unsigned int expect, unsigned int next)
572{
573 struct debug_bucket *db;
574 struct debug_obj *obj;
575 unsigned long flags;
576
577 if (!debug_objects_enabled)
578 return;
579
580 db = get_bucket((unsigned long) addr);
581
582 raw_spin_lock_irqsave(&db->lock, flags);
583
584 obj = lookup_object(addr, db);
585 if (obj) {
586 switch (obj->state) {
587 case ODEBUG_STATE_ACTIVE:
588 if (obj->astate == expect)
589 obj->astate = next;
590 else
591 debug_print_object(obj, "active_state");
592 break;
593
594 default:
595 debug_print_object(obj, "active_state");
596 break;
597 }
598 } else {
599 struct debug_obj o = { .object = addr,
600 .state = ODEBUG_STATE_NOTAVAILABLE,
601 .descr = descr };
602
603 debug_print_object(&o, "active_state");
604 }
605
606 raw_spin_unlock_irqrestore(&db->lock, flags);
607}
608
556#ifdef CONFIG_DEBUG_OBJECTS_FREE 609#ifdef CONFIG_DEBUG_OBJECTS_FREE
557static void __debug_check_no_obj_freed(const void *address, unsigned long size) 610static void __debug_check_no_obj_freed(const void *address, unsigned long size)
558{ 611{
@@ -774,7 +827,7 @@ static int __init fixup_free(void *addr, enum debug_obj_state state)
774 } 827 }
775} 828}
776 829
777static int 830static int __init
778check_results(void *addr, enum debug_obj_state state, int fixups, int warnings) 831check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
779{ 832{
780 struct debug_bucket *db; 833 struct debug_bucket *db;
@@ -917,7 +970,7 @@ void __init debug_objects_early_init(void)
917/* 970/*
918 * Convert the statically allocated objects to dynamic ones: 971 * Convert the statically allocated objects to dynamic ones:
919 */ 972 */
920static int debug_objects_replace_static_objects(void) 973static int __init debug_objects_replace_static_objects(void)
921{ 974{
922 struct debug_bucket *db = obj_hash; 975 struct debug_bucket *db = obj_hash;
923 struct hlist_node *node, *tmp; 976 struct hlist_node *node, *tmp;
diff --git a/lib/hweight.c b/lib/hweight.c
index 63ee4eb1228d..3c79d50814cf 100644
--- a/lib/hweight.c
+++ b/lib/hweight.c
@@ -9,7 +9,7 @@
9 * The Hamming Weight of a number is the total number of bits set in it. 9 * The Hamming Weight of a number is the total number of bits set in it.
10 */ 10 */
11 11
12unsigned int hweight32(unsigned int w) 12unsigned int __sw_hweight32(unsigned int w)
13{ 13{
14#ifdef ARCH_HAS_FAST_MULTIPLIER 14#ifdef ARCH_HAS_FAST_MULTIPLIER
15 w -= (w >> 1) & 0x55555555; 15 w -= (w >> 1) & 0x55555555;
@@ -24,29 +24,30 @@ unsigned int hweight32(unsigned int w)
24 return (res + (res >> 16)) & 0x000000FF; 24 return (res + (res >> 16)) & 0x000000FF;
25#endif 25#endif
26} 26}
27EXPORT_SYMBOL(hweight32); 27EXPORT_SYMBOL(__sw_hweight32);
28 28
29unsigned int hweight16(unsigned int w) 29unsigned int __sw_hweight16(unsigned int w)
30{ 30{
31 unsigned int res = w - ((w >> 1) & 0x5555); 31 unsigned int res = w - ((w >> 1) & 0x5555);
32 res = (res & 0x3333) + ((res >> 2) & 0x3333); 32 res = (res & 0x3333) + ((res >> 2) & 0x3333);
33 res = (res + (res >> 4)) & 0x0F0F; 33 res = (res + (res >> 4)) & 0x0F0F;
34 return (res + (res >> 8)) & 0x00FF; 34 return (res + (res >> 8)) & 0x00FF;
35} 35}
36EXPORT_SYMBOL(hweight16); 36EXPORT_SYMBOL(__sw_hweight16);
37 37
38unsigned int hweight8(unsigned int w) 38unsigned int __sw_hweight8(unsigned int w)
39{ 39{
40 unsigned int res = w - ((w >> 1) & 0x55); 40 unsigned int res = w - ((w >> 1) & 0x55);
41 res = (res & 0x33) + ((res >> 2) & 0x33); 41 res = (res & 0x33) + ((res >> 2) & 0x33);
42 return (res + (res >> 4)) & 0x0F; 42 return (res + (res >> 4)) & 0x0F;
43} 43}
44EXPORT_SYMBOL(hweight8); 44EXPORT_SYMBOL(__sw_hweight8);
45 45
46unsigned long hweight64(__u64 w) 46unsigned long __sw_hweight64(__u64 w)
47{ 47{
48#if BITS_PER_LONG == 32 48#if BITS_PER_LONG == 32
49 return hweight32((unsigned int)(w >> 32)) + hweight32((unsigned int)w); 49 return __sw_hweight32((unsigned int)(w >> 32)) +
50 __sw_hweight32((unsigned int)w);
50#elif BITS_PER_LONG == 64 51#elif BITS_PER_LONG == 64
51#ifdef ARCH_HAS_FAST_MULTIPLIER 52#ifdef ARCH_HAS_FAST_MULTIPLIER
52 w -= (w >> 1) & 0x5555555555555555ul; 53 w -= (w >> 1) & 0x5555555555555555ul;
@@ -63,4 +64,4 @@ unsigned long hweight64(__u64 w)
63#endif 64#endif
64#endif 65#endif
65} 66}
66EXPORT_SYMBOL(hweight64); 67EXPORT_SYMBOL(__sw_hweight64);
diff --git a/lib/rbtree.c b/lib/rbtree.c
index e2aa3be29858..15e10b1afdd2 100644
--- a/lib/rbtree.c
+++ b/lib/rbtree.c
@@ -44,6 +44,11 @@ static void __rb_rotate_left(struct rb_node *node, struct rb_root *root)
44 else 44 else
45 root->rb_node = right; 45 root->rb_node = right;
46 rb_set_parent(node, right); 46 rb_set_parent(node, right);
47
48 if (root->augment_cb) {
49 root->augment_cb(node);
50 root->augment_cb(right);
51 }
47} 52}
48 53
49static void __rb_rotate_right(struct rb_node *node, struct rb_root *root) 54static void __rb_rotate_right(struct rb_node *node, struct rb_root *root)
@@ -67,12 +72,20 @@ static void __rb_rotate_right(struct rb_node *node, struct rb_root *root)
67 else 72 else
68 root->rb_node = left; 73 root->rb_node = left;
69 rb_set_parent(node, left); 74 rb_set_parent(node, left);
75
76 if (root->augment_cb) {
77 root->augment_cb(node);
78 root->augment_cb(left);
79 }
70} 80}
71 81
72void rb_insert_color(struct rb_node *node, struct rb_root *root) 82void rb_insert_color(struct rb_node *node, struct rb_root *root)
73{ 83{
74 struct rb_node *parent, *gparent; 84 struct rb_node *parent, *gparent;
75 85
86 if (root->augment_cb)
87 root->augment_cb(node);
88
76 while ((parent = rb_parent(node)) && rb_is_red(parent)) 89 while ((parent = rb_parent(node)) && rb_is_red(parent))
77 { 90 {
78 gparent = rb_parent(parent); 91 gparent = rb_parent(parent);
@@ -227,12 +240,15 @@ void rb_erase(struct rb_node *node, struct rb_root *root)
227 else 240 else
228 { 241 {
229 struct rb_node *old = node, *left; 242 struct rb_node *old = node, *left;
243 int old_parent_cb = 0;
244 int successor_parent_cb = 0;
230 245
231 node = node->rb_right; 246 node = node->rb_right;
232 while ((left = node->rb_left) != NULL) 247 while ((left = node->rb_left) != NULL)
233 node = left; 248 node = left;
234 249
235 if (rb_parent(old)) { 250 if (rb_parent(old)) {
251 old_parent_cb = 1;
236 if (rb_parent(old)->rb_left == old) 252 if (rb_parent(old)->rb_left == old)
237 rb_parent(old)->rb_left = node; 253 rb_parent(old)->rb_left = node;
238 else 254 else
@@ -247,8 +263,10 @@ void rb_erase(struct rb_node *node, struct rb_root *root)
247 if (parent == old) { 263 if (parent == old) {
248 parent = node; 264 parent = node;
249 } else { 265 } else {
266 successor_parent_cb = 1;
250 if (child) 267 if (child)
251 rb_set_parent(child, parent); 268 rb_set_parent(child, parent);
269
252 parent->rb_left = child; 270 parent->rb_left = child;
253 271
254 node->rb_right = old->rb_right; 272 node->rb_right = old->rb_right;
@@ -259,6 +277,24 @@ void rb_erase(struct rb_node *node, struct rb_root *root)
259 node->rb_left = old->rb_left; 277 node->rb_left = old->rb_left;
260 rb_set_parent(old->rb_left, node); 278 rb_set_parent(old->rb_left, node);
261 279
280 if (root->augment_cb) {
281 /*
282 * Here, three different nodes can have new children.
283 * The parent of the successor node that was selected
284 * to replace the node to be erased.
285 * The node that is getting erased and is now replaced
286 * by its successor.
287 * The parent of the node getting erased-replaced.
288 */
289 if (successor_parent_cb)
290 root->augment_cb(parent);
291
292 root->augment_cb(node);
293
294 if (old_parent_cb)
295 root->augment_cb(rb_parent(old));
296 }
297
262 goto color; 298 goto color;
263 } 299 }
264 300
@@ -267,15 +303,19 @@ void rb_erase(struct rb_node *node, struct rb_root *root)
267 303
268 if (child) 304 if (child)
269 rb_set_parent(child, parent); 305 rb_set_parent(child, parent);
270 if (parent) 306
271 { 307 if (parent) {
272 if (parent->rb_left == node) 308 if (parent->rb_left == node)
273 parent->rb_left = child; 309 parent->rb_left = child;
274 else 310 else
275 parent->rb_right = child; 311 parent->rb_right = child;
276 } 312
277 else 313 if (root->augment_cb)
314 root->augment_cb(parent);
315
316 } else {
278 root->rb_node = child; 317 root->rb_node = child;
318 }
279 319
280 color: 320 color:
281 if (color == RB_BLACK) 321 if (color == RB_BLACK)
diff --git a/lib/rwsem.c b/lib/rwsem.c
index 3e3365e5665e..ceba8e28807a 100644
--- a/lib/rwsem.c
+++ b/lib/rwsem.c
@@ -136,9 +136,10 @@ __rwsem_do_wake(struct rw_semaphore *sem, int downgrading)
136 out: 136 out:
137 return sem; 137 return sem;
138 138
139 /* undo the change to count, but check for a transition 1->0 */ 139 /* undo the change to the active count, but check for a transition
140 * 1->0 */
140 undo: 141 undo:
141 if (rwsem_atomic_update(-RWSEM_ACTIVE_BIAS, sem) != 0) 142 if (rwsem_atomic_update(-RWSEM_ACTIVE_BIAS, sem) & RWSEM_ACTIVE_MASK)
142 goto out; 143 goto out;
143 goto try_again; 144 goto try_again;
144} 145}