diff options
Diffstat (limited to 'lib')
| -rw-r--r-- | lib/Kconfig.debug | 60 | ||||
| -rw-r--r-- | lib/bug.c | 6 | ||||
| -rw-r--r-- | lib/dma-debug.c | 1 | ||||
| -rw-r--r-- | lib/dynamic_debug.c | 140 | ||||
| -rw-r--r-- | lib/idr.c | 13 | ||||
| -rw-r--r-- | lib/kobject.c | 39 | ||||
| -rw-r--r-- | lib/kobject_uevent.c | 4 | ||||
| -rw-r--r-- | lib/list_sort.c | 2 | ||||
| -rw-r--r-- | lib/radix-tree.c | 70 | ||||
| -rw-r--r-- | lib/raid6/.gitignore | 4 | ||||
| -rw-r--r-- | lib/scatterlist.c | 14 | ||||
| -rw-r--r-- | lib/swiotlb.c | 18 |
12 files changed, 261 insertions, 110 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 1b4afd2e6ca0..69a32664c289 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
| @@ -353,7 +353,7 @@ config SLUB_DEBUG_ON | |||
| 353 | config SLUB_STATS | 353 | config SLUB_STATS |
| 354 | default n | 354 | default n |
| 355 | bool "Enable SLUB performance statistics" | 355 | bool "Enable SLUB performance statistics" |
| 356 | depends on SLUB && SLUB_DEBUG && SYSFS | 356 | depends on SLUB && SYSFS |
| 357 | help | 357 | help |
| 358 | SLUB statistics are useful to debug SLUBs allocation behavior in | 358 | SLUB statistics are useful to debug SLUBs allocation behavior in |
| 359 | order find ways to optimize the allocator. This should never be | 359 | order find ways to optimize the allocator. This should never be |
| @@ -461,6 +461,15 @@ config DEBUG_MUTEXES | |||
| 461 | This feature allows mutex semantics violations to be detected and | 461 | This feature allows mutex semantics violations to be detected and |
| 462 | reported. | 462 | reported. |
| 463 | 463 | ||
| 464 | config BKL | ||
| 465 | bool "Big Kernel Lock" if (SMP || PREEMPT) | ||
| 466 | default y | ||
| 467 | help | ||
| 468 | This is the traditional lock that is used in old code instead | ||
| 469 | of proper locking. All drivers that use the BKL should depend | ||
| 470 | on this symbol. | ||
| 471 | Say Y here unless you are working on removing the BKL. | ||
| 472 | |||
| 464 | config DEBUG_LOCK_ALLOC | 473 | config DEBUG_LOCK_ALLOC |
| 465 | bool "Lock debugging: detect incorrect freeing of live locks" | 474 | bool "Lock debugging: detect incorrect freeing of live locks" |
| 466 | depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT | 475 | depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT |
| @@ -482,6 +491,7 @@ config PROVE_LOCKING | |||
| 482 | select DEBUG_SPINLOCK | 491 | select DEBUG_SPINLOCK |
| 483 | select DEBUG_MUTEXES | 492 | select DEBUG_MUTEXES |
| 484 | select DEBUG_LOCK_ALLOC | 493 | select DEBUG_LOCK_ALLOC |
| 494 | select TRACE_IRQFLAGS | ||
| 485 | default n | 495 | default n |
| 486 | help | 496 | help |
| 487 | This feature enables the kernel to prove that all locking | 497 | This feature enables the kernel to prove that all locking |
| @@ -539,6 +549,23 @@ config PROVE_RCU_REPEATEDLY | |||
| 539 | disabling, allowing multiple RCU-lockdep warnings to be printed | 549 | disabling, allowing multiple RCU-lockdep warnings to be printed |
| 540 | on a single reboot. | 550 | on a single reboot. |
| 541 | 551 | ||
| 552 | Say Y to allow multiple RCU-lockdep warnings per boot. | ||
| 553 | |||
| 554 | Say N if you are unsure. | ||
| 555 | |||
| 556 | config SPARSE_RCU_POINTER | ||
| 557 | bool "RCU debugging: sparse-based checks for pointer usage" | ||
| 558 | default n | ||
| 559 | help | ||
| 560 | This feature enables the __rcu sparse annotation for | ||
| 561 | RCU-protected pointers. This annotation will cause sparse | ||
| 562 | to flag any non-RCU used of annotated pointers. This can be | ||
| 563 | helpful when debugging RCU usage. Please note that this feature | ||
| 564 | is not intended to enforce code cleanliness; it is instead merely | ||
| 565 | a debugging aid. | ||
| 566 | |||
| 567 | Say Y to make sparse flag questionable use of RCU-protected pointers | ||
| 568 | |||
| 542 | Say N if you are unsure. | 569 | Say N if you are unsure. |
| 543 | 570 | ||
| 544 | config LOCKDEP | 571 | config LOCKDEP |
| @@ -579,11 +606,10 @@ config DEBUG_LOCKDEP | |||
| 579 | of more runtime overhead. | 606 | of more runtime overhead. |
| 580 | 607 | ||
| 581 | config TRACE_IRQFLAGS | 608 | config TRACE_IRQFLAGS |
| 582 | depends on DEBUG_KERNEL | ||
| 583 | bool | 609 | bool |
| 584 | default y | 610 | help |
| 585 | depends on TRACE_IRQFLAGS_SUPPORT | 611 | Enables hooks to interrupt enabling and disabling for |
| 586 | depends on PROVE_LOCKING | 612 | either tracing or lock debugging. |
| 587 | 613 | ||
| 588 | config DEBUG_SPINLOCK_SLEEP | 614 | config DEBUG_SPINLOCK_SLEEP |
| 589 | bool "Spinlock debugging: sleep-inside-spinlock checking" | 615 | bool "Spinlock debugging: sleep-inside-spinlock checking" |
| @@ -832,6 +858,30 @@ config RCU_CPU_STALL_DETECTOR | |||
| 832 | 858 | ||
| 833 | Say Y if you are unsure. | 859 | Say Y if you are unsure. |
| 834 | 860 | ||
| 861 | config RCU_CPU_STALL_TIMEOUT | ||
| 862 | int "RCU CPU stall timeout in seconds" | ||
| 863 | depends on RCU_CPU_STALL_DETECTOR | ||
| 864 | range 3 300 | ||
| 865 | default 60 | ||
| 866 | help | ||
| 867 | If a given RCU grace period extends more than the specified | ||
| 868 | number of seconds, a CPU stall warning is printed. If the | ||
| 869 | RCU grace period persists, additional CPU stall warnings are | ||
| 870 | printed at more widely spaced intervals. | ||
| 871 | |||
| 872 | config RCU_CPU_STALL_DETECTOR_RUNNABLE | ||
| 873 | bool "RCU CPU stall checking starts automatically at boot" | ||
| 874 | depends on RCU_CPU_STALL_DETECTOR | ||
| 875 | default y | ||
| 876 | help | ||
| 877 | If set, start checking for RCU CPU stalls immediately on | ||
| 878 | boot. Otherwise, RCU CPU stall checking must be manually | ||
| 879 | enabled. | ||
| 880 | |||
| 881 | Say Y if you are unsure. | ||
| 882 | |||
| 883 | Say N if you wish to suppress RCU CPU stall checking during boot. | ||
| 884 | |||
| 835 | config RCU_CPU_STALL_VERBOSE | 885 | config RCU_CPU_STALL_VERBOSE |
| 836 | bool "Print additional per-task information for RCU_CPU_STALL_DETECTOR" | 886 | bool "Print additional per-task information for RCU_CPU_STALL_DETECTOR" |
| 837 | depends on RCU_CPU_STALL_DETECTOR && TREE_PREEMPT_RCU | 887 | depends on RCU_CPU_STALL_DETECTOR && TREE_PREEMPT_RCU |
| @@ -72,8 +72,8 @@ static const struct bug_entry *module_find_bug(unsigned long bugaddr) | |||
| 72 | return NULL; | 72 | return NULL; |
| 73 | } | 73 | } |
| 74 | 74 | ||
| 75 | int module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, | 75 | void module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, |
| 76 | struct module *mod) | 76 | struct module *mod) |
| 77 | { | 77 | { |
| 78 | char *secstrings; | 78 | char *secstrings; |
| 79 | unsigned int i; | 79 | unsigned int i; |
| @@ -97,8 +97,6 @@ int module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, | |||
| 97 | * could potentially lead to deadlock and thus be counter-productive. | 97 | * could potentially lead to deadlock and thus be counter-productive. |
| 98 | */ | 98 | */ |
| 99 | list_add(&mod->bug_list, &module_bug_list); | 99 | list_add(&mod->bug_list, &module_bug_list); |
| 100 | |||
| 101 | return 0; | ||
| 102 | } | 100 | } |
| 103 | 101 | ||
| 104 | void module_bug_cleanup(struct module *mod) | 102 | void module_bug_cleanup(struct module *mod) |
diff --git a/lib/dma-debug.c b/lib/dma-debug.c index 01e64270e246..4bfb0471f106 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c | |||
| @@ -590,6 +590,7 @@ out_unlock: | |||
| 590 | static const struct file_operations filter_fops = { | 590 | static const struct file_operations filter_fops = { |
| 591 | .read = filter_read, | 591 | .read = filter_read, |
| 592 | .write = filter_write, | 592 | .write = filter_write, |
| 593 | .llseek = default_llseek, | ||
| 593 | }; | 594 | }; |
| 594 | 595 | ||
| 595 | static int dma_debug_fs_init(void) | 596 | static int dma_debug_fs_init(void) |
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c index 02afc2533728..3094318bfea7 100644 --- a/lib/dynamic_debug.c +++ b/lib/dynamic_debug.c | |||
| @@ -26,19 +26,11 @@ | |||
| 26 | #include <linux/dynamic_debug.h> | 26 | #include <linux/dynamic_debug.h> |
| 27 | #include <linux/debugfs.h> | 27 | #include <linux/debugfs.h> |
| 28 | #include <linux/slab.h> | 28 | #include <linux/slab.h> |
| 29 | #include <linux/jump_label.h> | ||
| 29 | 30 | ||
| 30 | extern struct _ddebug __start___verbose[]; | 31 | extern struct _ddebug __start___verbose[]; |
| 31 | extern struct _ddebug __stop___verbose[]; | 32 | extern struct _ddebug __stop___verbose[]; |
| 32 | 33 | ||
| 33 | /* dynamic_debug_enabled, and dynamic_debug_enabled2 are bitmasks in which | ||
| 34 | * bit n is set to 1 if any modname hashes into the bucket n, 0 otherwise. They | ||
| 35 | * use independent hash functions, to reduce the chance of false positives. | ||
| 36 | */ | ||
| 37 | long long dynamic_debug_enabled; | ||
| 38 | EXPORT_SYMBOL_GPL(dynamic_debug_enabled); | ||
| 39 | long long dynamic_debug_enabled2; | ||
| 40 | EXPORT_SYMBOL_GPL(dynamic_debug_enabled2); | ||
| 41 | |||
| 42 | struct ddebug_table { | 34 | struct ddebug_table { |
| 43 | struct list_head link; | 35 | struct list_head link; |
| 44 | char *mod_name; | 36 | char *mod_name; |
| @@ -88,26 +80,6 @@ static char *ddebug_describe_flags(struct _ddebug *dp, char *buf, | |||
| 88 | } | 80 | } |
| 89 | 81 | ||
| 90 | /* | 82 | /* |
| 91 | * must be called with ddebug_lock held | ||
| 92 | */ | ||
| 93 | |||
| 94 | static int disabled_hash(char hash, bool first_table) | ||
| 95 | { | ||
| 96 | struct ddebug_table *dt; | ||
| 97 | char table_hash_value; | ||
| 98 | |||
| 99 | list_for_each_entry(dt, &ddebug_tables, link) { | ||
| 100 | if (first_table) | ||
| 101 | table_hash_value = dt->ddebugs->primary_hash; | ||
| 102 | else | ||
| 103 | table_hash_value = dt->ddebugs->secondary_hash; | ||
| 104 | if (dt->num_enabled && (hash == table_hash_value)) | ||
| 105 | return 0; | ||
| 106 | } | ||
| 107 | return 1; | ||
| 108 | } | ||
| 109 | |||
| 110 | /* | ||
| 111 | * Search the tables for _ddebug's which match the given | 83 | * Search the tables for _ddebug's which match the given |
| 112 | * `query' and apply the `flags' and `mask' to them. Tells | 84 | * `query' and apply the `flags' and `mask' to them. Tells |
| 113 | * the user which ddebug's were changed, or whether none | 85 | * the user which ddebug's were changed, or whether none |
| @@ -170,17 +142,9 @@ static void ddebug_change(const struct ddebug_query *query, | |||
| 170 | dt->num_enabled++; | 142 | dt->num_enabled++; |
| 171 | dp->flags = newflags; | 143 | dp->flags = newflags; |
| 172 | if (newflags) { | 144 | if (newflags) { |
| 173 | dynamic_debug_enabled |= | 145 | jump_label_enable(&dp->enabled); |
| 174 | (1LL << dp->primary_hash); | ||
| 175 | dynamic_debug_enabled2 |= | ||
| 176 | (1LL << dp->secondary_hash); | ||
| 177 | } else { | 146 | } else { |
| 178 | if (disabled_hash(dp->primary_hash, true)) | 147 | jump_label_disable(&dp->enabled); |
| 179 | dynamic_debug_enabled &= | ||
| 180 | ~(1LL << dp->primary_hash); | ||
| 181 | if (disabled_hash(dp->secondary_hash, false)) | ||
| 182 | dynamic_debug_enabled2 &= | ||
| 183 | ~(1LL << dp->secondary_hash); | ||
| 184 | } | 148 | } |
| 185 | if (verbose) | 149 | if (verbose) |
| 186 | printk(KERN_INFO | 150 | printk(KERN_INFO |
| @@ -429,6 +393,40 @@ static int ddebug_parse_flags(const char *str, unsigned int *flagsp, | |||
| 429 | return 0; | 393 | return 0; |
| 430 | } | 394 | } |
| 431 | 395 | ||
| 396 | static int ddebug_exec_query(char *query_string) | ||
| 397 | { | ||
| 398 | unsigned int flags = 0, mask = 0; | ||
| 399 | struct ddebug_query query; | ||
| 400 | #define MAXWORDS 9 | ||
| 401 | int nwords; | ||
| 402 | char *words[MAXWORDS]; | ||
| 403 | |||
| 404 | nwords = ddebug_tokenize(query_string, words, MAXWORDS); | ||
| 405 | if (nwords <= 0) | ||
| 406 | return -EINVAL; | ||
| 407 | if (ddebug_parse_query(words, nwords-1, &query)) | ||
| 408 | return -EINVAL; | ||
| 409 | if (ddebug_parse_flags(words[nwords-1], &flags, &mask)) | ||
| 410 | return -EINVAL; | ||
| 411 | |||
| 412 | /* actually go and implement the change */ | ||
| 413 | ddebug_change(&query, flags, mask); | ||
| 414 | return 0; | ||
| 415 | } | ||
| 416 | |||
| 417 | static __initdata char ddebug_setup_string[1024]; | ||
| 418 | static __init int ddebug_setup_query(char *str) | ||
| 419 | { | ||
| 420 | if (strlen(str) >= 1024) { | ||
| 421 | pr_warning("ddebug boot param string too large\n"); | ||
| 422 | return 0; | ||
| 423 | } | ||
| 424 | strcpy(ddebug_setup_string, str); | ||
| 425 | return 1; | ||
| 426 | } | ||
| 427 | |||
| 428 | __setup("ddebug_query=", ddebug_setup_query); | ||
| 429 | |||
| 432 | /* | 430 | /* |
| 433 | * File_ops->write method for <debugfs>/dynamic_debug/conrol. Gathers the | 431 | * File_ops->write method for <debugfs>/dynamic_debug/conrol. Gathers the |
| 434 | * command text from userspace, parses and executes it. | 432 | * command text from userspace, parses and executes it. |
| @@ -436,12 +434,8 @@ static int ddebug_parse_flags(const char *str, unsigned int *flagsp, | |||
| 436 | static ssize_t ddebug_proc_write(struct file *file, const char __user *ubuf, | 434 | static ssize_t ddebug_proc_write(struct file *file, const char __user *ubuf, |
| 437 | size_t len, loff_t *offp) | 435 | size_t len, loff_t *offp) |
| 438 | { | 436 | { |
| 439 | unsigned int flags = 0, mask = 0; | ||
| 440 | struct ddebug_query query; | ||
| 441 | #define MAXWORDS 9 | ||
| 442 | int nwords; | ||
| 443 | char *words[MAXWORDS]; | ||
| 444 | char tmpbuf[256]; | 437 | char tmpbuf[256]; |
| 438 | int ret; | ||
| 445 | 439 | ||
| 446 | if (len == 0) | 440 | if (len == 0) |
| 447 | return 0; | 441 | return 0; |
| @@ -455,16 +449,9 @@ static ssize_t ddebug_proc_write(struct file *file, const char __user *ubuf, | |||
| 455 | printk(KERN_INFO "%s: read %d bytes from userspace\n", | 449 | printk(KERN_INFO "%s: read %d bytes from userspace\n", |
| 456 | __func__, (int)len); | 450 | __func__, (int)len); |
| 457 | 451 | ||
| 458 | nwords = ddebug_tokenize(tmpbuf, words, MAXWORDS); | 452 | ret = ddebug_exec_query(tmpbuf); |
| 459 | if (nwords <= 0) | 453 | if (ret) |
| 460 | return -EINVAL; | 454 | return ret; |
| 461 | if (ddebug_parse_query(words, nwords-1, &query)) | ||
| 462 | return -EINVAL; | ||
| 463 | if (ddebug_parse_flags(words[nwords-1], &flags, &mask)) | ||
| 464 | return -EINVAL; | ||
| 465 | |||
| 466 | /* actually go and implement the change */ | ||
| 467 | ddebug_change(&query, flags, mask); | ||
| 468 | 455 | ||
| 469 | *offp += len; | 456 | *offp += len; |
| 470 | return len; | 457 | return len; |
| @@ -725,13 +712,14 @@ static void ddebug_remove_all_tables(void) | |||
| 725 | mutex_unlock(&ddebug_lock); | 712 | mutex_unlock(&ddebug_lock); |
| 726 | } | 713 | } |
| 727 | 714 | ||
| 728 | static int __init dynamic_debug_init(void) | 715 | static __initdata int ddebug_init_success; |
| 716 | |||
| 717 | static int __init dynamic_debug_init_debugfs(void) | ||
| 729 | { | 718 | { |
| 730 | struct dentry *dir, *file; | 719 | struct dentry *dir, *file; |
| 731 | struct _ddebug *iter, *iter_start; | 720 | |
| 732 | const char *modname = NULL; | 721 | if (!ddebug_init_success) |
| 733 | int ret = 0; | 722 | return -ENODEV; |
| 734 | int n = 0; | ||
| 735 | 723 | ||
| 736 | dir = debugfs_create_dir("dynamic_debug", NULL); | 724 | dir = debugfs_create_dir("dynamic_debug", NULL); |
| 737 | if (!dir) | 725 | if (!dir) |
| @@ -742,6 +730,16 @@ static int __init dynamic_debug_init(void) | |||
| 742 | debugfs_remove(dir); | 730 | debugfs_remove(dir); |
| 743 | return -ENOMEM; | 731 | return -ENOMEM; |
| 744 | } | 732 | } |
| 733 | return 0; | ||
| 734 | } | ||
| 735 | |||
| 736 | static int __init dynamic_debug_init(void) | ||
| 737 | { | ||
| 738 | struct _ddebug *iter, *iter_start; | ||
| 739 | const char *modname = NULL; | ||
| 740 | int ret = 0; | ||
| 741 | int n = 0; | ||
| 742 | |||
| 745 | if (__start___verbose != __stop___verbose) { | 743 | if (__start___verbose != __stop___verbose) { |
| 746 | iter = __start___verbose; | 744 | iter = __start___verbose; |
| 747 | modname = iter->modname; | 745 | modname = iter->modname; |
| @@ -759,12 +757,26 @@ static int __init dynamic_debug_init(void) | |||
| 759 | } | 757 | } |
| 760 | ret = ddebug_add_module(iter_start, n, modname); | 758 | ret = ddebug_add_module(iter_start, n, modname); |
| 761 | } | 759 | } |
| 760 | |||
| 761 | /* ddebug_query boot param got passed -> set it up */ | ||
| 762 | if (ddebug_setup_string[0] != '\0') { | ||
| 763 | ret = ddebug_exec_query(ddebug_setup_string); | ||
| 764 | if (ret) | ||
| 765 | pr_warning("Invalid ddebug boot param %s", | ||
| 766 | ddebug_setup_string); | ||
| 767 | else | ||
| 768 | pr_info("ddebug initialized with string %s", | ||
| 769 | ddebug_setup_string); | ||
| 770 | } | ||
| 771 | |||
| 762 | out_free: | 772 | out_free: |
| 763 | if (ret) { | 773 | if (ret) |
| 764 | ddebug_remove_all_tables(); | 774 | ddebug_remove_all_tables(); |
| 765 | debugfs_remove(dir); | 775 | else |
| 766 | debugfs_remove(file); | 776 | ddebug_init_success = 1; |
| 767 | } | ||
| 768 | return 0; | 777 | return 0; |
| 769 | } | 778 | } |
| 770 | module_init(dynamic_debug_init); | 779 | /* Allow early initialization for boot messages via boot param */ |
| 780 | arch_initcall(dynamic_debug_init); | ||
| 781 | /* Debugfs setup must be done later */ | ||
| 782 | module_init(dynamic_debug_init_debugfs); | ||
| @@ -284,7 +284,7 @@ static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id) | |||
| 284 | * idr_get_new_above - allocate new idr entry above or equal to a start id | 284 | * idr_get_new_above - allocate new idr entry above or equal to a start id |
| 285 | * @idp: idr handle | 285 | * @idp: idr handle |
| 286 | * @ptr: pointer you want associated with the id | 286 | * @ptr: pointer you want associated with the id |
| 287 | * @start_id: id to start search at | 287 | * @starting_id: id to start search at |
| 288 | * @id: pointer to the allocated handle | 288 | * @id: pointer to the allocated handle |
| 289 | * | 289 | * |
| 290 | * This is the allocate id function. It should be called with any | 290 | * This is the allocate id function. It should be called with any |
| @@ -479,7 +479,7 @@ EXPORT_SYMBOL(idr_remove_all); | |||
| 479 | 479 | ||
| 480 | /** | 480 | /** |
| 481 | * idr_destroy - release all cached layers within an idr tree | 481 | * idr_destroy - release all cached layers within an idr tree |
| 482 | * idp: idr handle | 482 | * @idp: idr handle |
| 483 | */ | 483 | */ |
| 484 | void idr_destroy(struct idr *idp) | 484 | void idr_destroy(struct idr *idp) |
| 485 | { | 485 | { |
| @@ -586,10 +586,11 @@ EXPORT_SYMBOL(idr_for_each); | |||
| 586 | /** | 586 | /** |
| 587 | * idr_get_next - lookup next object of id to given id. | 587 | * idr_get_next - lookup next object of id to given id. |
| 588 | * @idp: idr handle | 588 | * @idp: idr handle |
| 589 | * @id: pointer to lookup key | 589 | * @nextidp: pointer to lookup key |
| 590 | * | 590 | * |
| 591 | * Returns pointer to registered object with id, which is next number to | 591 | * Returns pointer to registered object with id, which is next number to |
| 592 | * given id. | 592 | * given id. After being looked up, *@nextidp will be updated for the next |
| 593 | * iteration. | ||
| 593 | */ | 594 | */ |
| 594 | 595 | ||
| 595 | void *idr_get_next(struct idr *idp, int *nextidp) | 596 | void *idr_get_next(struct idr *idp, int *nextidp) |
| @@ -758,7 +759,7 @@ EXPORT_SYMBOL(ida_pre_get); | |||
| 758 | /** | 759 | /** |
| 759 | * ida_get_new_above - allocate new ID above or equal to a start id | 760 | * ida_get_new_above - allocate new ID above or equal to a start id |
| 760 | * @ida: ida handle | 761 | * @ida: ida handle |
| 761 | * @staring_id: id to start search at | 762 | * @starting_id: id to start search at |
| 762 | * @p_id: pointer to the allocated handle | 763 | * @p_id: pointer to the allocated handle |
| 763 | * | 764 | * |
| 764 | * Allocate new ID above or equal to @ida. It should be called with | 765 | * Allocate new ID above or equal to @ida. It should be called with |
| @@ -912,7 +913,7 @@ EXPORT_SYMBOL(ida_remove); | |||
| 912 | 913 | ||
| 913 | /** | 914 | /** |
| 914 | * ida_destroy - release all cached layers within an ida tree | 915 | * ida_destroy - release all cached layers within an ida tree |
| 915 | * ida: ida handle | 916 | * @ida: ida handle |
| 916 | */ | 917 | */ |
| 917 | void ida_destroy(struct ida *ida) | 918 | void ida_destroy(struct ida *ida) |
| 918 | { | 919 | { |
diff --git a/lib/kobject.c b/lib/kobject.c index f07c57252e82..82dc34c095c2 100644 --- a/lib/kobject.c +++ b/lib/kobject.c | |||
| @@ -746,17 +746,56 @@ void kset_unregister(struct kset *k) | |||
| 746 | */ | 746 | */ |
| 747 | struct kobject *kset_find_obj(struct kset *kset, const char *name) | 747 | struct kobject *kset_find_obj(struct kset *kset, const char *name) |
| 748 | { | 748 | { |
| 749 | return kset_find_obj_hinted(kset, name, NULL); | ||
| 750 | } | ||
| 751 | |||
| 752 | /** | ||
| 753 | * kset_find_obj_hinted - search for object in kset given a predecessor hint. | ||
| 754 | * @kset: kset we're looking in. | ||
| 755 | * @name: object's name. | ||
| 756 | * @hint: hint to possible object's predecessor. | ||
| 757 | * | ||
| 758 | * Check the hint's next object and if it is a match return it directly, | ||
| 759 | * otherwise, fall back to the behavior of kset_find_obj(). Either way | ||
| 760 | * a reference for the returned object is held and the reference on the | ||
| 761 | * hinted object is released. | ||
| 762 | */ | ||
| 763 | struct kobject *kset_find_obj_hinted(struct kset *kset, const char *name, | ||
| 764 | struct kobject *hint) | ||
| 765 | { | ||
| 749 | struct kobject *k; | 766 | struct kobject *k; |
| 750 | struct kobject *ret = NULL; | 767 | struct kobject *ret = NULL; |
| 751 | 768 | ||
| 752 | spin_lock(&kset->list_lock); | 769 | spin_lock(&kset->list_lock); |
| 770 | |||
| 771 | if (!hint) | ||
| 772 | goto slow_search; | ||
| 773 | |||
| 774 | /* end of list detection */ | ||
| 775 | if (hint->entry.next == kset->list.next) | ||
| 776 | goto slow_search; | ||
| 777 | |||
| 778 | k = container_of(hint->entry.next, struct kobject, entry); | ||
| 779 | if (!kobject_name(k) || strcmp(kobject_name(k), name)) | ||
| 780 | goto slow_search; | ||
| 781 | |||
| 782 | ret = kobject_get(k); | ||
| 783 | goto unlock_exit; | ||
| 784 | |||
| 785 | slow_search: | ||
| 753 | list_for_each_entry(k, &kset->list, entry) { | 786 | list_for_each_entry(k, &kset->list, entry) { |
| 754 | if (kobject_name(k) && !strcmp(kobject_name(k), name)) { | 787 | if (kobject_name(k) && !strcmp(kobject_name(k), name)) { |
| 755 | ret = kobject_get(k); | 788 | ret = kobject_get(k); |
| 756 | break; | 789 | break; |
| 757 | } | 790 | } |
| 758 | } | 791 | } |
| 792 | |||
| 793 | unlock_exit: | ||
| 759 | spin_unlock(&kset->list_lock); | 794 | spin_unlock(&kset->list_lock); |
| 795 | |||
| 796 | if (hint) | ||
| 797 | kobject_put(hint); | ||
| 798 | |||
| 760 | return ret; | 799 | return ret; |
| 761 | } | 800 | } |
| 762 | 801 | ||
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c index b93579504dfa..70af0a7f97c0 100644 --- a/lib/kobject_uevent.c +++ b/lib/kobject_uevent.c | |||
| @@ -123,7 +123,7 @@ static int kobj_usermode_filter(struct kobject *kobj) | |||
| 123 | * @kobj: struct kobject that the action is happening to | 123 | * @kobj: struct kobject that the action is happening to |
| 124 | * @envp_ext: pointer to environmental data | 124 | * @envp_ext: pointer to environmental data |
| 125 | * | 125 | * |
| 126 | * Returns 0 if kobject_uevent() is completed with success or the | 126 | * Returns 0 if kobject_uevent_env() is completed with success or the |
| 127 | * corresponding error when it fails. | 127 | * corresponding error when it fails. |
| 128 | */ | 128 | */ |
| 129 | int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, | 129 | int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, |
| @@ -317,7 +317,7 @@ exit: | |||
| 317 | EXPORT_SYMBOL_GPL(kobject_uevent_env); | 317 | EXPORT_SYMBOL_GPL(kobject_uevent_env); |
| 318 | 318 | ||
| 319 | /** | 319 | /** |
| 320 | * kobject_uevent - notify userspace by ending an uevent | 320 | * kobject_uevent - notify userspace by sending an uevent |
| 321 | * | 321 | * |
| 322 | * @action: action that is happening | 322 | * @action: action that is happening |
| 323 | * @kobj: struct kobject that the action is happening to | 323 | * @kobj: struct kobject that the action is happening to |
diff --git a/lib/list_sort.c b/lib/list_sort.c index 4b5cb794c38b..a7616fa3162e 100644 --- a/lib/list_sort.c +++ b/lib/list_sort.c | |||
| @@ -70,7 +70,7 @@ static void merge_and_restore_back_links(void *priv, | |||
| 70 | * element comparison is needed, so the client's cmp() | 70 | * element comparison is needed, so the client's cmp() |
| 71 | * routine can invoke cond_resched() periodically. | 71 | * routine can invoke cond_resched() periodically. |
| 72 | */ | 72 | */ |
| 73 | (*cmp)(priv, tail, tail); | 73 | (*cmp)(priv, tail->next, tail->next); |
| 74 | 74 | ||
| 75 | tail->next->prev = tail; | 75 | tail->next->prev = tail; |
| 76 | tail = tail->next; | 76 | tail = tail->next; |
diff --git a/lib/radix-tree.c b/lib/radix-tree.c index e907858498a6..6f412ab4c24f 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c | |||
| @@ -49,7 +49,7 @@ struct radix_tree_node { | |||
| 49 | unsigned int height; /* Height from the bottom */ | 49 | unsigned int height; /* Height from the bottom */ |
| 50 | unsigned int count; | 50 | unsigned int count; |
| 51 | struct rcu_head rcu_head; | 51 | struct rcu_head rcu_head; |
| 52 | void *slots[RADIX_TREE_MAP_SIZE]; | 52 | void __rcu *slots[RADIX_TREE_MAP_SIZE]; |
| 53 | unsigned long tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS]; | 53 | unsigned long tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS]; |
| 54 | }; | 54 | }; |
| 55 | 55 | ||
| @@ -174,14 +174,16 @@ static void radix_tree_node_rcu_free(struct rcu_head *head) | |||
| 174 | { | 174 | { |
| 175 | struct radix_tree_node *node = | 175 | struct radix_tree_node *node = |
| 176 | container_of(head, struct radix_tree_node, rcu_head); | 176 | container_of(head, struct radix_tree_node, rcu_head); |
| 177 | int i; | ||
| 177 | 178 | ||
| 178 | /* | 179 | /* |
| 179 | * must only free zeroed nodes into the slab. radix_tree_shrink | 180 | * must only free zeroed nodes into the slab. radix_tree_shrink |
| 180 | * can leave us with a non-NULL entry in the first slot, so clear | 181 | * can leave us with a non-NULL entry in the first slot, so clear |
| 181 | * that here to make sure. | 182 | * that here to make sure. |
| 182 | */ | 183 | */ |
| 183 | tag_clear(node, 0, 0); | 184 | for (i = 0; i < RADIX_TREE_MAX_TAGS; i++) |
| 184 | tag_clear(node, 1, 0); | 185 | tag_clear(node, i, 0); |
| 186 | |||
| 185 | node->slots[0] = NULL; | 187 | node->slots[0] = NULL; |
| 186 | node->count = 0; | 188 | node->count = 0; |
| 187 | 189 | ||
| @@ -623,17 +625,30 @@ EXPORT_SYMBOL(radix_tree_tag_get); | |||
| 623 | * also settag. The function stops either after tagging nr_to_tag items or | 625 | * also settag. The function stops either after tagging nr_to_tag items or |
| 624 | * after reaching last_index. | 626 | * after reaching last_index. |
| 625 | * | 627 | * |
| 628 | * The tags must be set from the leaf level only and propagated back up the | ||
| 629 | * path to the root. We must do this so that we resolve the full path before | ||
| 630 | * setting any tags on intermediate nodes. If we set tags as we descend, then | ||
| 631 | * we can get to the leaf node and find that the index that has the iftag | ||
| 632 | * set is outside the range we are scanning. This reults in dangling tags and | ||
| 633 | * can lead to problems with later tag operations (e.g. livelocks on lookups). | ||
| 634 | * | ||
| 626 | * The function returns number of leaves where the tag was set and sets | 635 | * The function returns number of leaves where the tag was set and sets |
| 627 | * *first_indexp to the first unscanned index. | 636 | * *first_indexp to the first unscanned index. |
| 637 | * WARNING! *first_indexp can wrap if last_index is ULONG_MAX. Caller must | ||
| 638 | * be prepared to handle that. | ||
| 628 | */ | 639 | */ |
| 629 | unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root, | 640 | unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root, |
| 630 | unsigned long *first_indexp, unsigned long last_index, | 641 | unsigned long *first_indexp, unsigned long last_index, |
| 631 | unsigned long nr_to_tag, | 642 | unsigned long nr_to_tag, |
| 632 | unsigned int iftag, unsigned int settag) | 643 | unsigned int iftag, unsigned int settag) |
| 633 | { | 644 | { |
| 634 | unsigned int height = root->height, shift; | 645 | unsigned int height = root->height; |
| 635 | unsigned long tagged = 0, index = *first_indexp; | 646 | struct radix_tree_path path[height]; |
| 636 | struct radix_tree_node *open_slots[height], *slot; | 647 | struct radix_tree_path *pathp = path; |
| 648 | struct radix_tree_node *slot; | ||
| 649 | unsigned int shift; | ||
| 650 | unsigned long tagged = 0; | ||
| 651 | unsigned long index = *first_indexp; | ||
| 637 | 652 | ||
| 638 | last_index = min(last_index, radix_tree_maxindex(height)); | 653 | last_index = min(last_index, radix_tree_maxindex(height)); |
| 639 | if (index > last_index) | 654 | if (index > last_index) |
| @@ -653,6 +668,13 @@ unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root, | |||
| 653 | shift = (height - 1) * RADIX_TREE_MAP_SHIFT; | 668 | shift = (height - 1) * RADIX_TREE_MAP_SHIFT; |
| 654 | slot = radix_tree_indirect_to_ptr(root->rnode); | 669 | slot = radix_tree_indirect_to_ptr(root->rnode); |
| 655 | 670 | ||
| 671 | /* | ||
| 672 | * we fill the path from (root->height - 2) to 0, leaving the index at | ||
| 673 | * (root->height - 1) as a terminator. Zero the node in the terminator | ||
| 674 | * so that we can use this to end walk loops back up the path. | ||
| 675 | */ | ||
| 676 | path[height - 1].node = NULL; | ||
| 677 | |||
| 656 | for (;;) { | 678 | for (;;) { |
| 657 | int offset; | 679 | int offset; |
| 658 | 680 | ||
| @@ -661,21 +683,35 @@ unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root, | |||
| 661 | goto next; | 683 | goto next; |
| 662 | if (!tag_get(slot, iftag, offset)) | 684 | if (!tag_get(slot, iftag, offset)) |
| 663 | goto next; | 685 | goto next; |
| 686 | if (height > 1) { | ||
| 687 | /* Go down one level */ | ||
| 688 | height--; | ||
| 689 | shift -= RADIX_TREE_MAP_SHIFT; | ||
| 690 | path[height - 1].node = slot; | ||
| 691 | path[height - 1].offset = offset; | ||
| 692 | slot = slot->slots[offset]; | ||
| 693 | continue; | ||
| 694 | } | ||
| 695 | |||
| 696 | /* tag the leaf */ | ||
| 697 | tagged++; | ||
| 664 | tag_set(slot, settag, offset); | 698 | tag_set(slot, settag, offset); |
| 665 | if (height == 1) { | 699 | |
| 666 | tagged++; | 700 | /* walk back up the path tagging interior nodes */ |
| 667 | goto next; | 701 | pathp = &path[0]; |
| 702 | while (pathp->node) { | ||
| 703 | /* stop if we find a node with the tag already set */ | ||
| 704 | if (tag_get(pathp->node, settag, pathp->offset)) | ||
| 705 | break; | ||
| 706 | tag_set(pathp->node, settag, pathp->offset); | ||
| 707 | pathp++; | ||
| 668 | } | 708 | } |
| 669 | /* Go down one level */ | 709 | |
| 670 | height--; | ||
| 671 | shift -= RADIX_TREE_MAP_SHIFT; | ||
| 672 | open_slots[height] = slot; | ||
| 673 | slot = slot->slots[offset]; | ||
| 674 | continue; | ||
| 675 | next: | 710 | next: |
| 676 | /* Go to next item at level determined by 'shift' */ | 711 | /* Go to next item at level determined by 'shift' */ |
| 677 | index = ((index >> shift) + 1) << shift; | 712 | index = ((index >> shift) + 1) << shift; |
| 678 | if (index > last_index) | 713 | /* Overflow can happen when last_index is ~0UL... */ |
| 714 | if (index > last_index || !index) | ||
| 679 | break; | 715 | break; |
| 680 | if (tagged >= nr_to_tag) | 716 | if (tagged >= nr_to_tag) |
| 681 | break; | 717 | break; |
| @@ -685,7 +721,7 @@ next: | |||
| 685 | * last_index is guaranteed to be in the tree, what | 721 | * last_index is guaranteed to be in the tree, what |
| 686 | * we do below cannot wander astray. | 722 | * we do below cannot wander astray. |
| 687 | */ | 723 | */ |
| 688 | slot = open_slots[height]; | 724 | slot = path[height - 1].node; |
| 689 | height++; | 725 | height++; |
| 690 | shift += RADIX_TREE_MAP_SHIFT; | 726 | shift += RADIX_TREE_MAP_SHIFT; |
| 691 | } | 727 | } |
diff --git a/lib/raid6/.gitignore b/lib/raid6/.gitignore new file mode 100644 index 000000000000..162becacf97c --- /dev/null +++ b/lib/raid6/.gitignore | |||
| @@ -0,0 +1,4 @@ | |||
| 1 | mktables | ||
| 2 | altivec*.c | ||
| 3 | int*.c | ||
| 4 | tables.c | ||
diff --git a/lib/scatterlist.c b/lib/scatterlist.c index a5ec42868f99..4ceb05d772ae 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c | |||
| @@ -248,8 +248,18 @@ int __sg_alloc_table(struct sg_table *table, unsigned int nents, | |||
| 248 | left -= sg_size; | 248 | left -= sg_size; |
| 249 | 249 | ||
| 250 | sg = alloc_fn(alloc_size, gfp_mask); | 250 | sg = alloc_fn(alloc_size, gfp_mask); |
| 251 | if (unlikely(!sg)) | 251 | if (unlikely(!sg)) { |
| 252 | return -ENOMEM; | 252 | /* |
| 253 | * Adjust entry count to reflect that the last | ||
| 254 | * entry of the previous table won't be used for | ||
| 255 | * linkage. Without this, sg_kfree() may get | ||
| 256 | * confused. | ||
| 257 | */ | ||
| 258 | if (prv) | ||
| 259 | table->nents = ++table->orig_nents; | ||
| 260 | |||
| 261 | return -ENOMEM; | ||
| 262 | } | ||
| 253 | 263 | ||
| 254 | sg_init_table(sg, alloc_size); | 264 | sg_init_table(sg, alloc_size); |
| 255 | table->nents = table->orig_nents += sg_size; | 265 | table->nents = table->orig_nents += sg_size; |
diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 34e3082632d8..7c06ee51a29a 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c | |||
| @@ -70,7 +70,7 @@ static unsigned long io_tlb_nslabs; | |||
| 70 | */ | 70 | */ |
| 71 | static unsigned long io_tlb_overflow = 32*1024; | 71 | static unsigned long io_tlb_overflow = 32*1024; |
| 72 | 72 | ||
| 73 | void *io_tlb_overflow_buffer; | 73 | static void *io_tlb_overflow_buffer; |
| 74 | 74 | ||
| 75 | /* | 75 | /* |
| 76 | * This is a free list describing the number of free entries available from | 76 | * This is a free list describing the number of free entries available from |
| @@ -147,16 +147,16 @@ void __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) | |||
| 147 | * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE | 147 | * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE |
| 148 | * between io_tlb_start and io_tlb_end. | 148 | * between io_tlb_start and io_tlb_end. |
| 149 | */ | 149 | */ |
| 150 | io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int)); | 150 | io_tlb_list = alloc_bootmem_pages(PAGE_ALIGN(io_tlb_nslabs * sizeof(int))); |
| 151 | for (i = 0; i < io_tlb_nslabs; i++) | 151 | for (i = 0; i < io_tlb_nslabs; i++) |
| 152 | io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); | 152 | io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); |
| 153 | io_tlb_index = 0; | 153 | io_tlb_index = 0; |
| 154 | io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(phys_addr_t)); | 154 | io_tlb_orig_addr = alloc_bootmem_pages(PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t))); |
| 155 | 155 | ||
| 156 | /* | 156 | /* |
| 157 | * Get the overflow emergency buffer | 157 | * Get the overflow emergency buffer |
| 158 | */ | 158 | */ |
| 159 | io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow); | 159 | io_tlb_overflow_buffer = alloc_bootmem_low_pages(PAGE_ALIGN(io_tlb_overflow)); |
| 160 | if (!io_tlb_overflow_buffer) | 160 | if (!io_tlb_overflow_buffer) |
| 161 | panic("Cannot allocate SWIOTLB overflow buffer!\n"); | 161 | panic("Cannot allocate SWIOTLB overflow buffer!\n"); |
| 162 | if (verbose) | 162 | if (verbose) |
| @@ -182,7 +182,7 @@ swiotlb_init_with_default_size(size_t default_size, int verbose) | |||
| 182 | /* | 182 | /* |
| 183 | * Get IO TLB memory from the low pages | 183 | * Get IO TLB memory from the low pages |
| 184 | */ | 184 | */ |
| 185 | io_tlb_start = alloc_bootmem_low_pages(bytes); | 185 | io_tlb_start = alloc_bootmem_low_pages(PAGE_ALIGN(bytes)); |
| 186 | if (!io_tlb_start) | 186 | if (!io_tlb_start) |
| 187 | panic("Cannot allocate SWIOTLB buffer"); | 187 | panic("Cannot allocate SWIOTLB buffer"); |
| 188 | 188 | ||
| @@ -308,13 +308,13 @@ void __init swiotlb_free(void) | |||
| 308 | get_order(io_tlb_nslabs << IO_TLB_SHIFT)); | 308 | get_order(io_tlb_nslabs << IO_TLB_SHIFT)); |
| 309 | } else { | 309 | } else { |
| 310 | free_bootmem_late(__pa(io_tlb_overflow_buffer), | 310 | free_bootmem_late(__pa(io_tlb_overflow_buffer), |
| 311 | io_tlb_overflow); | 311 | PAGE_ALIGN(io_tlb_overflow)); |
| 312 | free_bootmem_late(__pa(io_tlb_orig_addr), | 312 | free_bootmem_late(__pa(io_tlb_orig_addr), |
| 313 | io_tlb_nslabs * sizeof(phys_addr_t)); | 313 | PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t))); |
| 314 | free_bootmem_late(__pa(io_tlb_list), | 314 | free_bootmem_late(__pa(io_tlb_list), |
| 315 | io_tlb_nslabs * sizeof(int)); | 315 | PAGE_ALIGN(io_tlb_nslabs * sizeof(int))); |
| 316 | free_bootmem_late(__pa(io_tlb_start), | 316 | free_bootmem_late(__pa(io_tlb_start), |
| 317 | io_tlb_nslabs << IO_TLB_SHIFT); | 317 | PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT)); |
| 318 | } | 318 | } |
| 319 | } | 319 | } |
| 320 | 320 | ||
