diff options
author | David S. Miller <davem@davemloft.net> | 2010-02-28 22:23:06 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-02-28 22:23:06 -0500 |
commit | 47871889c601d8199c51a4086f77eebd77c29b0b (patch) | |
tree | 40cdcac3bff0ee40cc33dcca61d0577cdf965f77 /lib | |
parent | c16cc0b464b8876cfd57ce1c1dbcb6f9a6a0bce3 (diff) | |
parent | 30ff056c42c665b9ea535d8515890857ae382540 (diff) |
Merge branch 'master' of /home/davem/src/GIT/linux-2.6/
Conflicts:
drivers/firmware/iscsi_ibft.c
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig.debug | 28 | ||||
-rw-r--r-- | lib/debug_locks.c | 1 | ||||
-rw-r--r-- | lib/hweight.c | 7 | ||||
-rw-r--r-- | lib/idr.c | 12 | ||||
-rw-r--r-- | lib/lmb.c | 13 | ||||
-rw-r--r-- | lib/radix-tree.c | 24 |
6 files changed, 64 insertions, 21 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 25c3ed594c54..5e3407d997b2 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
@@ -355,7 +355,7 @@ config SLUB_STATS | |||
355 | config DEBUG_KMEMLEAK | 355 | config DEBUG_KMEMLEAK |
356 | bool "Kernel memory leak detector" | 356 | bool "Kernel memory leak detector" |
357 | depends on DEBUG_KERNEL && EXPERIMENTAL && !MEMORY_HOTPLUG && \ | 357 | depends on DEBUG_KERNEL && EXPERIMENTAL && !MEMORY_HOTPLUG && \ |
358 | (X86 || ARM || PPC || S390) | 358 | (X86 || ARM || PPC || S390 || SUPERH) |
359 | 359 | ||
360 | select DEBUG_FS if SYSFS | 360 | select DEBUG_FS if SYSFS |
361 | select STACKTRACE if STACKTRACE_SUPPORT | 361 | select STACKTRACE if STACKTRACE_SUPPORT |
@@ -499,6 +499,18 @@ config PROVE_LOCKING | |||
499 | 499 | ||
500 | For more details, see Documentation/lockdep-design.txt. | 500 | For more details, see Documentation/lockdep-design.txt. |
501 | 501 | ||
502 | config PROVE_RCU | ||
503 | bool "RCU debugging: prove RCU correctness" | ||
504 | depends on PROVE_LOCKING | ||
505 | default n | ||
506 | help | ||
507 | This feature enables lockdep extensions that check for correct | ||
508 | use of RCU APIs. This is currently under development. Say Y | ||
509 | if you want to debug RCU usage or help work on the PROVE_RCU | ||
510 | feature. | ||
511 | |||
512 | Say N if you are unsure. | ||
513 | |||
502 | config LOCKDEP | 514 | config LOCKDEP |
503 | bool | 515 | bool |
504 | depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT | 516 | depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT |
@@ -765,10 +777,22 @@ config RCU_CPU_STALL_DETECTOR | |||
765 | CPUs are delaying the current grace period, but only when | 777 | CPUs are delaying the current grace period, but only when |
766 | the grace period extends for excessive time periods. | 778 | the grace period extends for excessive time periods. |
767 | 779 | ||
768 | Say Y if you want RCU to perform such checks. | 780 | Say N if you want to disable such checks. |
781 | |||
782 | Say Y if you are unsure. | ||
783 | |||
784 | config RCU_CPU_STALL_VERBOSE | ||
785 | bool "Print additional per-task information for RCU_CPU_STALL_DETECTOR" | ||
786 | depends on RCU_CPU_STALL_DETECTOR && TREE_PREEMPT_RCU | ||
787 | default n | ||
788 | help | ||
789 | This option causes RCU to printk detailed per-task information | ||
790 | for any tasks that are stalling the current RCU grace period. | ||
769 | 791 | ||
770 | Say N if you are unsure. | 792 | Say N if you are unsure. |
771 | 793 | ||
794 | Say Y if you want to enable such checks. | ||
795 | |||
772 | config KPROBES_SANITY_TEST | 796 | config KPROBES_SANITY_TEST |
773 | bool "Kprobes sanity tests" | 797 | bool "Kprobes sanity tests" |
774 | depends on DEBUG_KERNEL | 798 | depends on DEBUG_KERNEL |
diff --git a/lib/debug_locks.c b/lib/debug_locks.c index bc3b11731b9c..5bf0020b9248 100644 --- a/lib/debug_locks.c +++ b/lib/debug_locks.c | |||
@@ -23,6 +23,7 @@ | |||
23 | * shut up after that. | 23 | * shut up after that. |
24 | */ | 24 | */ |
25 | int debug_locks = 1; | 25 | int debug_locks = 1; |
26 | EXPORT_SYMBOL_GPL(debug_locks); | ||
26 | 27 | ||
27 | /* | 28 | /* |
28 | * The locking-testsuite uses <debug_locks_silent> to get a | 29 | * The locking-testsuite uses <debug_locks_silent> to get a |
diff --git a/lib/hweight.c b/lib/hweight.c index 389424ecb129..63ee4eb1228d 100644 --- a/lib/hweight.c +++ b/lib/hweight.c | |||
@@ -11,11 +11,18 @@ | |||
11 | 11 | ||
12 | unsigned int hweight32(unsigned int w) | 12 | unsigned int hweight32(unsigned int w) |
13 | { | 13 | { |
14 | #ifdef ARCH_HAS_FAST_MULTIPLIER | ||
15 | w -= (w >> 1) & 0x55555555; | ||
16 | w = (w & 0x33333333) + ((w >> 2) & 0x33333333); | ||
17 | w = (w + (w >> 4)) & 0x0f0f0f0f; | ||
18 | return (w * 0x01010101) >> 24; | ||
19 | #else | ||
14 | unsigned int res = w - ((w >> 1) & 0x55555555); | 20 | unsigned int res = w - ((w >> 1) & 0x55555555); |
15 | res = (res & 0x33333333) + ((res >> 2) & 0x33333333); | 21 | res = (res & 0x33333333) + ((res >> 2) & 0x33333333); |
16 | res = (res + (res >> 4)) & 0x0F0F0F0F; | 22 | res = (res + (res >> 4)) & 0x0F0F0F0F; |
17 | res = res + (res >> 8); | 23 | res = res + (res >> 8); |
18 | return (res + (res >> 16)) & 0x000000FF; | 24 | return (res + (res >> 16)) & 0x000000FF; |
25 | #endif | ||
19 | } | 26 | } |
20 | EXPORT_SYMBOL(hweight32); | 27 | EXPORT_SYMBOL(hweight32); |
21 | 28 | ||
@@ -156,10 +156,12 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa) | |||
156 | id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1; | 156 | id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1; |
157 | 157 | ||
158 | /* if already at the top layer, we need to grow */ | 158 | /* if already at the top layer, we need to grow */ |
159 | if (!(p = pa[l])) { | 159 | if (id >= 1 << (idp->layers * IDR_BITS)) { |
160 | *starting_id = id; | 160 | *starting_id = id; |
161 | return IDR_NEED_TO_GROW; | 161 | return IDR_NEED_TO_GROW; |
162 | } | 162 | } |
163 | p = pa[l]; | ||
164 | BUG_ON(!p); | ||
163 | 165 | ||
164 | /* If we need to go up one layer, continue the | 166 | /* If we need to go up one layer, continue the |
165 | * loop; otherwise, restart from the top. | 167 | * loop; otherwise, restart from the top. |
@@ -502,7 +504,7 @@ void *idr_find(struct idr *idp, int id) | |||
502 | int n; | 504 | int n; |
503 | struct idr_layer *p; | 505 | struct idr_layer *p; |
504 | 506 | ||
505 | p = rcu_dereference(idp->top); | 507 | p = rcu_dereference_raw(idp->top); |
506 | if (!p) | 508 | if (!p) |
507 | return NULL; | 509 | return NULL; |
508 | n = (p->layer+1) * IDR_BITS; | 510 | n = (p->layer+1) * IDR_BITS; |
@@ -517,7 +519,7 @@ void *idr_find(struct idr *idp, int id) | |||
517 | while (n > 0 && p) { | 519 | while (n > 0 && p) { |
518 | n -= IDR_BITS; | 520 | n -= IDR_BITS; |
519 | BUG_ON(n != p->layer*IDR_BITS); | 521 | BUG_ON(n != p->layer*IDR_BITS); |
520 | p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); | 522 | p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]); |
521 | } | 523 | } |
522 | return((void *)p); | 524 | return((void *)p); |
523 | } | 525 | } |
@@ -550,7 +552,7 @@ int idr_for_each(struct idr *idp, | |||
550 | struct idr_layer **paa = &pa[0]; | 552 | struct idr_layer **paa = &pa[0]; |
551 | 553 | ||
552 | n = idp->layers * IDR_BITS; | 554 | n = idp->layers * IDR_BITS; |
553 | p = rcu_dereference(idp->top); | 555 | p = rcu_dereference_raw(idp->top); |
554 | max = 1 << n; | 556 | max = 1 << n; |
555 | 557 | ||
556 | id = 0; | 558 | id = 0; |
@@ -558,7 +560,7 @@ int idr_for_each(struct idr *idp, | |||
558 | while (n > 0 && p) { | 560 | while (n > 0 && p) { |
559 | n -= IDR_BITS; | 561 | n -= IDR_BITS; |
560 | *paa++ = p; | 562 | *paa++ = p; |
561 | p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); | 563 | p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]); |
562 | } | 564 | } |
563 | 565 | ||
564 | if (p) { | 566 | if (p) { |
@@ -205,9 +205,8 @@ long lmb_add(u64 base, u64 size) | |||
205 | 205 | ||
206 | } | 206 | } |
207 | 207 | ||
208 | long lmb_remove(u64 base, u64 size) | 208 | static long __lmb_remove(struct lmb_region *rgn, u64 base, u64 size) |
209 | { | 209 | { |
210 | struct lmb_region *rgn = &(lmb.memory); | ||
211 | u64 rgnbegin, rgnend; | 210 | u64 rgnbegin, rgnend; |
212 | u64 end = base + size; | 211 | u64 end = base + size; |
213 | int i; | 212 | int i; |
@@ -254,6 +253,16 @@ long lmb_remove(u64 base, u64 size) | |||
254 | return lmb_add_region(rgn, end, rgnend - end); | 253 | return lmb_add_region(rgn, end, rgnend - end); |
255 | } | 254 | } |
256 | 255 | ||
256 | long lmb_remove(u64 base, u64 size) | ||
257 | { | ||
258 | return __lmb_remove(&lmb.memory, base, size); | ||
259 | } | ||
260 | |||
261 | long __init lmb_free(u64 base, u64 size) | ||
262 | { | ||
263 | return __lmb_remove(&lmb.reserved, base, size); | ||
264 | } | ||
265 | |||
257 | long __init lmb_reserve(u64 base, u64 size) | 266 | long __init lmb_reserve(u64 base, u64 size) |
258 | { | 267 | { |
259 | struct lmb_region *_rgn = &lmb.reserved; | 268 | struct lmb_region *_rgn = &lmb.reserved; |
diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 92cdd9936e3d..6b9670d6bbf9 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c | |||
@@ -364,7 +364,7 @@ static void *radix_tree_lookup_element(struct radix_tree_root *root, | |||
364 | unsigned int height, shift; | 364 | unsigned int height, shift; |
365 | struct radix_tree_node *node, **slot; | 365 | struct radix_tree_node *node, **slot; |
366 | 366 | ||
367 | node = rcu_dereference(root->rnode); | 367 | node = rcu_dereference_raw(root->rnode); |
368 | if (node == NULL) | 368 | if (node == NULL) |
369 | return NULL; | 369 | return NULL; |
370 | 370 | ||
@@ -384,7 +384,7 @@ static void *radix_tree_lookup_element(struct radix_tree_root *root, | |||
384 | do { | 384 | do { |
385 | slot = (struct radix_tree_node **) | 385 | slot = (struct radix_tree_node **) |
386 | (node->slots + ((index>>shift) & RADIX_TREE_MAP_MASK)); | 386 | (node->slots + ((index>>shift) & RADIX_TREE_MAP_MASK)); |
387 | node = rcu_dereference(*slot); | 387 | node = rcu_dereference_raw(*slot); |
388 | if (node == NULL) | 388 | if (node == NULL) |
389 | return NULL; | 389 | return NULL; |
390 | 390 | ||
@@ -568,7 +568,7 @@ int radix_tree_tag_get(struct radix_tree_root *root, | |||
568 | if (!root_tag_get(root, tag)) | 568 | if (!root_tag_get(root, tag)) |
569 | return 0; | 569 | return 0; |
570 | 570 | ||
571 | node = rcu_dereference(root->rnode); | 571 | node = rcu_dereference_raw(root->rnode); |
572 | if (node == NULL) | 572 | if (node == NULL) |
573 | return 0; | 573 | return 0; |
574 | 574 | ||
@@ -602,7 +602,7 @@ int radix_tree_tag_get(struct radix_tree_root *root, | |||
602 | BUG_ON(ret && saw_unset_tag); | 602 | BUG_ON(ret && saw_unset_tag); |
603 | return !!ret; | 603 | return !!ret; |
604 | } | 604 | } |
605 | node = rcu_dereference(node->slots[offset]); | 605 | node = rcu_dereference_raw(node->slots[offset]); |
606 | shift -= RADIX_TREE_MAP_SHIFT; | 606 | shift -= RADIX_TREE_MAP_SHIFT; |
607 | height--; | 607 | height--; |
608 | } | 608 | } |
@@ -711,7 +711,7 @@ __lookup(struct radix_tree_node *slot, void ***results, unsigned long index, | |||
711 | } | 711 | } |
712 | 712 | ||
713 | shift -= RADIX_TREE_MAP_SHIFT; | 713 | shift -= RADIX_TREE_MAP_SHIFT; |
714 | slot = rcu_dereference(slot->slots[i]); | 714 | slot = rcu_dereference_raw(slot->slots[i]); |
715 | if (slot == NULL) | 715 | if (slot == NULL) |
716 | goto out; | 716 | goto out; |
717 | } | 717 | } |
@@ -758,7 +758,7 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results, | |||
758 | unsigned long cur_index = first_index; | 758 | unsigned long cur_index = first_index; |
759 | unsigned int ret; | 759 | unsigned int ret; |
760 | 760 | ||
761 | node = rcu_dereference(root->rnode); | 761 | node = rcu_dereference_raw(root->rnode); |
762 | if (!node) | 762 | if (!node) |
763 | return 0; | 763 | return 0; |
764 | 764 | ||
@@ -787,7 +787,7 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results, | |||
787 | slot = *(((void ***)results)[ret + i]); | 787 | slot = *(((void ***)results)[ret + i]); |
788 | if (!slot) | 788 | if (!slot) |
789 | continue; | 789 | continue; |
790 | results[ret + nr_found] = rcu_dereference(slot); | 790 | results[ret + nr_found] = rcu_dereference_raw(slot); |
791 | nr_found++; | 791 | nr_found++; |
792 | } | 792 | } |
793 | ret += nr_found; | 793 | ret += nr_found; |
@@ -826,7 +826,7 @@ radix_tree_gang_lookup_slot(struct radix_tree_root *root, void ***results, | |||
826 | unsigned long cur_index = first_index; | 826 | unsigned long cur_index = first_index; |
827 | unsigned int ret; | 827 | unsigned int ret; |
828 | 828 | ||
829 | node = rcu_dereference(root->rnode); | 829 | node = rcu_dereference_raw(root->rnode); |
830 | if (!node) | 830 | if (!node) |
831 | return 0; | 831 | return 0; |
832 | 832 | ||
@@ -915,7 +915,7 @@ __lookup_tag(struct radix_tree_node *slot, void ***results, unsigned long index, | |||
915 | } | 915 | } |
916 | } | 916 | } |
917 | shift -= RADIX_TREE_MAP_SHIFT; | 917 | shift -= RADIX_TREE_MAP_SHIFT; |
918 | slot = rcu_dereference(slot->slots[i]); | 918 | slot = rcu_dereference_raw(slot->slots[i]); |
919 | if (slot == NULL) | 919 | if (slot == NULL) |
920 | break; | 920 | break; |
921 | } | 921 | } |
@@ -951,7 +951,7 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results, | |||
951 | if (!root_tag_get(root, tag)) | 951 | if (!root_tag_get(root, tag)) |
952 | return 0; | 952 | return 0; |
953 | 953 | ||
954 | node = rcu_dereference(root->rnode); | 954 | node = rcu_dereference_raw(root->rnode); |
955 | if (!node) | 955 | if (!node) |
956 | return 0; | 956 | return 0; |
957 | 957 | ||
@@ -980,7 +980,7 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results, | |||
980 | slot = *(((void ***)results)[ret + i]); | 980 | slot = *(((void ***)results)[ret + i]); |
981 | if (!slot) | 981 | if (!slot) |
982 | continue; | 982 | continue; |
983 | results[ret + nr_found] = rcu_dereference(slot); | 983 | results[ret + nr_found] = rcu_dereference_raw(slot); |
984 | nr_found++; | 984 | nr_found++; |
985 | } | 985 | } |
986 | ret += nr_found; | 986 | ret += nr_found; |
@@ -1020,7 +1020,7 @@ radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results, | |||
1020 | if (!root_tag_get(root, tag)) | 1020 | if (!root_tag_get(root, tag)) |
1021 | return 0; | 1021 | return 0; |
1022 | 1022 | ||
1023 | node = rcu_dereference(root->rnode); | 1023 | node = rcu_dereference_raw(root->rnode); |
1024 | if (!node) | 1024 | if (!node) |
1025 | return 0; | 1025 | return 0; |
1026 | 1026 | ||