diff options
Diffstat (limited to 'lib')
| -rw-r--r-- | lib/Kconfig.debug | 5 | ||||
| -rw-r--r-- | lib/bug.c | 6 | ||||
| -rw-r--r-- | lib/kobject_uevent.c | 4 | ||||
| -rw-r--r-- | lib/list_sort.c | 2 | ||||
| -rw-r--r-- | lib/radix-tree.c | 68 | ||||
| -rw-r--r-- | lib/raid6/.gitignore | 4 | ||||
| -rw-r--r-- | lib/scatterlist.c | 14 |
7 files changed, 77 insertions, 26 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 9e06b7f5ecf1..1b4afd2e6ca0 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
| @@ -994,13 +994,16 @@ config FAULT_INJECTION_STACKTRACE_FILTER | |||
| 994 | 994 | ||
| 995 | config LATENCYTOP | 995 | config LATENCYTOP |
| 996 | bool "Latency measuring infrastructure" | 996 | bool "Latency measuring infrastructure" |
| 997 | depends on HAVE_LATENCYTOP_SUPPORT | ||
| 998 | depends on DEBUG_KERNEL | ||
| 999 | depends on STACKTRACE_SUPPORT | ||
| 1000 | depends on PROC_FS | ||
| 997 | select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE | 1001 | select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE |
| 998 | select KALLSYMS | 1002 | select KALLSYMS |
| 999 | select KALLSYMS_ALL | 1003 | select KALLSYMS_ALL |
| 1000 | select STACKTRACE | 1004 | select STACKTRACE |
| 1001 | select SCHEDSTATS | 1005 | select SCHEDSTATS |
| 1002 | select SCHED_DEBUG | 1006 | select SCHED_DEBUG |
| 1003 | depends on HAVE_LATENCYTOP_SUPPORT | ||
| 1004 | help | 1007 | help |
| 1005 | Enable this option if you want to use the LatencyTOP tool | 1008 | Enable this option if you want to use the LatencyTOP tool |
| 1006 | to find out which userspace is blocking on what kernel operations. | 1009 | to find out which userspace is blocking on what kernel operations. |
| @@ -72,8 +72,8 @@ static const struct bug_entry *module_find_bug(unsigned long bugaddr) | |||
| 72 | return NULL; | 72 | return NULL; |
| 73 | } | 73 | } |
| 74 | 74 | ||
| 75 | int module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, | 75 | void module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, |
| 76 | struct module *mod) | 76 | struct module *mod) |
| 77 | { | 77 | { |
| 78 | char *secstrings; | 78 | char *secstrings; |
| 79 | unsigned int i; | 79 | unsigned int i; |
| @@ -97,8 +97,6 @@ int module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, | |||
| 97 | * could potentially lead to deadlock and thus be counter-productive. | 97 | * could potentially lead to deadlock and thus be counter-productive. |
| 98 | */ | 98 | */ |
| 99 | list_add(&mod->bug_list, &module_bug_list); | 99 | list_add(&mod->bug_list, &module_bug_list); |
| 100 | |||
| 101 | return 0; | ||
| 102 | } | 100 | } |
| 103 | 101 | ||
| 104 | void module_bug_cleanup(struct module *mod) | 102 | void module_bug_cleanup(struct module *mod) |
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c index b93579504dfa..70af0a7f97c0 100644 --- a/lib/kobject_uevent.c +++ b/lib/kobject_uevent.c | |||
| @@ -123,7 +123,7 @@ static int kobj_usermode_filter(struct kobject *kobj) | |||
| 123 | * @kobj: struct kobject that the action is happening to | 123 | * @kobj: struct kobject that the action is happening to |
| 124 | * @envp_ext: pointer to environmental data | 124 | * @envp_ext: pointer to environmental data |
| 125 | * | 125 | * |
| 126 | * Returns 0 if kobject_uevent() is completed with success or the | 126 | * Returns 0 if kobject_uevent_env() is completed with success or the |
| 127 | * corresponding error when it fails. | 127 | * corresponding error when it fails. |
| 128 | */ | 128 | */ |
| 129 | int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, | 129 | int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, |
| @@ -317,7 +317,7 @@ exit: | |||
| 317 | EXPORT_SYMBOL_GPL(kobject_uevent_env); | 317 | EXPORT_SYMBOL_GPL(kobject_uevent_env); |
| 318 | 318 | ||
| 319 | /** | 319 | /** |
| 320 | * kobject_uevent - notify userspace by ending an uevent | 320 | * kobject_uevent - notify userspace by sending an uevent |
| 321 | * | 321 | * |
| 322 | * @action: action that is happening | 322 | * @action: action that is happening |
| 323 | * @kobj: struct kobject that the action is happening to | 323 | * @kobj: struct kobject that the action is happening to |
diff --git a/lib/list_sort.c b/lib/list_sort.c index 4b5cb794c38b..a7616fa3162e 100644 --- a/lib/list_sort.c +++ b/lib/list_sort.c | |||
| @@ -70,7 +70,7 @@ static void merge_and_restore_back_links(void *priv, | |||
| 70 | * element comparison is needed, so the client's cmp() | 70 | * element comparison is needed, so the client's cmp() |
| 71 | * routine can invoke cond_resched() periodically. | 71 | * routine can invoke cond_resched() periodically. |
| 72 | */ | 72 | */ |
| 73 | (*cmp)(priv, tail, tail); | 73 | (*cmp)(priv, tail->next, tail->next); |
| 74 | 74 | ||
| 75 | tail->next->prev = tail; | 75 | tail->next->prev = tail; |
| 76 | tail = tail->next; | 76 | tail = tail->next; |
diff --git a/lib/radix-tree.c b/lib/radix-tree.c index e907858498a6..efd16fa80b1c 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c | |||
| @@ -174,14 +174,16 @@ static void radix_tree_node_rcu_free(struct rcu_head *head) | |||
| 174 | { | 174 | { |
| 175 | struct radix_tree_node *node = | 175 | struct radix_tree_node *node = |
| 176 | container_of(head, struct radix_tree_node, rcu_head); | 176 | container_of(head, struct radix_tree_node, rcu_head); |
| 177 | int i; | ||
| 177 | 178 | ||
| 178 | /* | 179 | /* |
| 179 | * must only free zeroed nodes into the slab. radix_tree_shrink | 180 | * must only free zeroed nodes into the slab. radix_tree_shrink |
| 180 | * can leave us with a non-NULL entry in the first slot, so clear | 181 | * can leave us with a non-NULL entry in the first slot, so clear |
| 181 | * that here to make sure. | 182 | * that here to make sure. |
| 182 | */ | 183 | */ |
| 183 | tag_clear(node, 0, 0); | 184 | for (i = 0; i < RADIX_TREE_MAX_TAGS; i++) |
| 184 | tag_clear(node, 1, 0); | 185 | tag_clear(node, i, 0); |
| 186 | |||
| 185 | node->slots[0] = NULL; | 187 | node->slots[0] = NULL; |
| 186 | node->count = 0; | 188 | node->count = 0; |
| 187 | 189 | ||
| @@ -623,17 +625,30 @@ EXPORT_SYMBOL(radix_tree_tag_get); | |||
| 623 | * also settag. The function stops either after tagging nr_to_tag items or | 625 | * also settag. The function stops either after tagging nr_to_tag items or |
| 624 | * after reaching last_index. | 626 | * after reaching last_index. |
| 625 | * | 627 | * |
| 628 | * The tags must be set from the leaf level only and propagated back up the | ||
| 629 | * path to the root. We must do this so that we resolve the full path before | ||
| 630 | * setting any tags on intermediate nodes. If we set tags as we descend, then | ||
| 631 | * we can get to the leaf node and find that the index that has the iftag | ||
| 632 | * set is outside the range we are scanning. This reults in dangling tags and | ||
| 633 | * can lead to problems with later tag operations (e.g. livelocks on lookups). | ||
| 634 | * | ||
| 626 | * The function returns number of leaves where the tag was set and sets | 635 | * The function returns number of leaves where the tag was set and sets |
| 627 | * *first_indexp to the first unscanned index. | 636 | * *first_indexp to the first unscanned index. |
| 637 | * WARNING! *first_indexp can wrap if last_index is ULONG_MAX. Caller must | ||
| 638 | * be prepared to handle that. | ||
| 628 | */ | 639 | */ |
| 629 | unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root, | 640 | unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root, |
| 630 | unsigned long *first_indexp, unsigned long last_index, | 641 | unsigned long *first_indexp, unsigned long last_index, |
| 631 | unsigned long nr_to_tag, | 642 | unsigned long nr_to_tag, |
| 632 | unsigned int iftag, unsigned int settag) | 643 | unsigned int iftag, unsigned int settag) |
| 633 | { | 644 | { |
| 634 | unsigned int height = root->height, shift; | 645 | unsigned int height = root->height; |
| 635 | unsigned long tagged = 0, index = *first_indexp; | 646 | struct radix_tree_path path[height]; |
| 636 | struct radix_tree_node *open_slots[height], *slot; | 647 | struct radix_tree_path *pathp = path; |
| 648 | struct radix_tree_node *slot; | ||
| 649 | unsigned int shift; | ||
| 650 | unsigned long tagged = 0; | ||
| 651 | unsigned long index = *first_indexp; | ||
| 637 | 652 | ||
| 638 | last_index = min(last_index, radix_tree_maxindex(height)); | 653 | last_index = min(last_index, radix_tree_maxindex(height)); |
| 639 | if (index > last_index) | 654 | if (index > last_index) |
| @@ -653,6 +668,13 @@ unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root, | |||
| 653 | shift = (height - 1) * RADIX_TREE_MAP_SHIFT; | 668 | shift = (height - 1) * RADIX_TREE_MAP_SHIFT; |
| 654 | slot = radix_tree_indirect_to_ptr(root->rnode); | 669 | slot = radix_tree_indirect_to_ptr(root->rnode); |
| 655 | 670 | ||
| 671 | /* | ||
| 672 | * we fill the path from (root->height - 2) to 0, leaving the index at | ||
| 673 | * (root->height - 1) as a terminator. Zero the node in the terminator | ||
| 674 | * so that we can use this to end walk loops back up the path. | ||
| 675 | */ | ||
| 676 | path[height - 1].node = NULL; | ||
| 677 | |||
| 656 | for (;;) { | 678 | for (;;) { |
| 657 | int offset; | 679 | int offset; |
| 658 | 680 | ||
| @@ -661,21 +683,35 @@ unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root, | |||
| 661 | goto next; | 683 | goto next; |
| 662 | if (!tag_get(slot, iftag, offset)) | 684 | if (!tag_get(slot, iftag, offset)) |
| 663 | goto next; | 685 | goto next; |
| 686 | if (height > 1) { | ||
| 687 | /* Go down one level */ | ||
| 688 | height--; | ||
| 689 | shift -= RADIX_TREE_MAP_SHIFT; | ||
| 690 | path[height - 1].node = slot; | ||
| 691 | path[height - 1].offset = offset; | ||
| 692 | slot = slot->slots[offset]; | ||
| 693 | continue; | ||
| 694 | } | ||
| 695 | |||
| 696 | /* tag the leaf */ | ||
| 697 | tagged++; | ||
| 664 | tag_set(slot, settag, offset); | 698 | tag_set(slot, settag, offset); |
| 665 | if (height == 1) { | 699 | |
| 666 | tagged++; | 700 | /* walk back up the path tagging interior nodes */ |
| 667 | goto next; | 701 | pathp = &path[0]; |
| 702 | while (pathp->node) { | ||
| 703 | /* stop if we find a node with the tag already set */ | ||
| 704 | if (tag_get(pathp->node, settag, pathp->offset)) | ||
| 705 | break; | ||
| 706 | tag_set(pathp->node, settag, pathp->offset); | ||
| 707 | pathp++; | ||
| 668 | } | 708 | } |
| 669 | /* Go down one level */ | 709 | |
| 670 | height--; | ||
| 671 | shift -= RADIX_TREE_MAP_SHIFT; | ||
| 672 | open_slots[height] = slot; | ||
| 673 | slot = slot->slots[offset]; | ||
| 674 | continue; | ||
| 675 | next: | 710 | next: |
| 676 | /* Go to next item at level determined by 'shift' */ | 711 | /* Go to next item at level determined by 'shift' */ |
| 677 | index = ((index >> shift) + 1) << shift; | 712 | index = ((index >> shift) + 1) << shift; |
| 678 | if (index > last_index) | 713 | /* Overflow can happen when last_index is ~0UL... */ |
| 714 | if (index > last_index || !index) | ||
| 679 | break; | 715 | break; |
| 680 | if (tagged >= nr_to_tag) | 716 | if (tagged >= nr_to_tag) |
| 681 | break; | 717 | break; |
| @@ -685,7 +721,7 @@ next: | |||
| 685 | * last_index is guaranteed to be in the tree, what | 721 | * last_index is guaranteed to be in the tree, what |
| 686 | * we do below cannot wander astray. | 722 | * we do below cannot wander astray. |
| 687 | */ | 723 | */ |
| 688 | slot = open_slots[height]; | 724 | slot = path[height - 1].node; |
| 689 | height++; | 725 | height++; |
| 690 | shift += RADIX_TREE_MAP_SHIFT; | 726 | shift += RADIX_TREE_MAP_SHIFT; |
| 691 | } | 727 | } |
diff --git a/lib/raid6/.gitignore b/lib/raid6/.gitignore new file mode 100644 index 000000000000..162becacf97c --- /dev/null +++ b/lib/raid6/.gitignore | |||
| @@ -0,0 +1,4 @@ | |||
| 1 | mktables | ||
| 2 | altivec*.c | ||
| 3 | int*.c | ||
| 4 | tables.c | ||
diff --git a/lib/scatterlist.c b/lib/scatterlist.c index a5ec42868f99..4ceb05d772ae 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c | |||
| @@ -248,8 +248,18 @@ int __sg_alloc_table(struct sg_table *table, unsigned int nents, | |||
| 248 | left -= sg_size; | 248 | left -= sg_size; |
| 249 | 249 | ||
| 250 | sg = alloc_fn(alloc_size, gfp_mask); | 250 | sg = alloc_fn(alloc_size, gfp_mask); |
| 251 | if (unlikely(!sg)) | 251 | if (unlikely(!sg)) { |
| 252 | return -ENOMEM; | 252 | /* |
| 253 | * Adjust entry count to reflect that the last | ||
| 254 | * entry of the previous table won't be used for | ||
| 255 | * linkage. Without this, sg_kfree() may get | ||
| 256 | * confused. | ||
| 257 | */ | ||
| 258 | if (prv) | ||
| 259 | table->nents = ++table->orig_nents; | ||
| 260 | |||
| 261 | return -ENOMEM; | ||
| 262 | } | ||
| 253 | 263 | ||
| 254 | sg_init_table(sg, alloc_size); | 264 | sg_init_table(sg, alloc_size); |
| 255 | table->nents = table->orig_nents += sg_size; | 265 | table->nents = table->orig_nents += sg_size; |
