diff options
Diffstat (limited to 'lib')
| -rw-r--r-- | lib/Kconfig.debug | 58 | ||||
| -rw-r--r-- | lib/bug.c | 6 | ||||
| -rw-r--r-- | lib/dynamic_debug.c | 42 | ||||
| -rw-r--r-- | lib/list_sort.c | 2 | ||||
| -rw-r--r-- | lib/radix-tree.c | 2 | ||||
| -rw-r--r-- | lib/swiotlb.c | 18 |
6 files changed, 70 insertions, 58 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 1b4afd2e6ca0..7b2a8ca97ada 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
| @@ -461,6 +461,15 @@ config DEBUG_MUTEXES | |||
| 461 | This feature allows mutex semantics violations to be detected and | 461 | This feature allows mutex semantics violations to be detected and |
| 462 | reported. | 462 | reported. |
| 463 | 463 | ||
| 464 | config BKL | ||
| 465 | bool "Big Kernel Lock" if (SMP || PREEMPT) | ||
| 466 | default y | ||
| 467 | help | ||
| 468 | This is the traditional lock that is used in old code instead | ||
| 469 | of proper locking. All drivers that use the BKL should depend | ||
| 470 | on this symbol. | ||
| 471 | Say Y here unless you are working on removing the BKL. | ||
| 472 | |||
| 464 | config DEBUG_LOCK_ALLOC | 473 | config DEBUG_LOCK_ALLOC |
| 465 | bool "Lock debugging: detect incorrect freeing of live locks" | 474 | bool "Lock debugging: detect incorrect freeing of live locks" |
| 466 | depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT | 475 | depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT |
| @@ -482,6 +491,7 @@ config PROVE_LOCKING | |||
| 482 | select DEBUG_SPINLOCK | 491 | select DEBUG_SPINLOCK |
| 483 | select DEBUG_MUTEXES | 492 | select DEBUG_MUTEXES |
| 484 | select DEBUG_LOCK_ALLOC | 493 | select DEBUG_LOCK_ALLOC |
| 494 | select TRACE_IRQFLAGS | ||
| 485 | default n | 495 | default n |
| 486 | help | 496 | help |
| 487 | This feature enables the kernel to prove that all locking | 497 | This feature enables the kernel to prove that all locking |
| @@ -539,6 +549,23 @@ config PROVE_RCU_REPEATEDLY | |||
| 539 | disabling, allowing multiple RCU-lockdep warnings to be printed | 549 | disabling, allowing multiple RCU-lockdep warnings to be printed |
| 540 | on a single reboot. | 550 | on a single reboot. |
| 541 | 551 | ||
| 552 | Say Y to allow multiple RCU-lockdep warnings per boot. | ||
| 553 | |||
| 554 | Say N if you are unsure. | ||
| 555 | |||
| 556 | config SPARSE_RCU_POINTER | ||
| 557 | bool "RCU debugging: sparse-based checks for pointer usage" | ||
| 558 | default n | ||
| 559 | help | ||
| 560 | This feature enables the __rcu sparse annotation for | ||
| 561 | RCU-protected pointers. This annotation will cause sparse | ||
| 562 | to flag any non-RCU used of annotated pointers. This can be | ||
| 563 | helpful when debugging RCU usage. Please note that this feature | ||
| 564 | is not intended to enforce code cleanliness; it is instead merely | ||
| 565 | a debugging aid. | ||
| 566 | |||
| 567 | Say Y to make sparse flag questionable use of RCU-protected pointers | ||
| 568 | |||
| 542 | Say N if you are unsure. | 569 | Say N if you are unsure. |
| 543 | 570 | ||
| 544 | config LOCKDEP | 571 | config LOCKDEP |
| @@ -579,11 +606,10 @@ config DEBUG_LOCKDEP | |||
| 579 | of more runtime overhead. | 606 | of more runtime overhead. |
| 580 | 607 | ||
| 581 | config TRACE_IRQFLAGS | 608 | config TRACE_IRQFLAGS |
| 582 | depends on DEBUG_KERNEL | ||
| 583 | bool | 609 | bool |
| 584 | default y | 610 | help |
| 585 | depends on TRACE_IRQFLAGS_SUPPORT | 611 | Enables hooks to interrupt enabling and disabling for |
| 586 | depends on PROVE_LOCKING | 612 | either tracing or lock debugging. |
| 587 | 613 | ||
| 588 | config DEBUG_SPINLOCK_SLEEP | 614 | config DEBUG_SPINLOCK_SLEEP |
| 589 | bool "Spinlock debugging: sleep-inside-spinlock checking" | 615 | bool "Spinlock debugging: sleep-inside-spinlock checking" |
| @@ -832,6 +858,30 @@ config RCU_CPU_STALL_DETECTOR | |||
| 832 | 858 | ||
| 833 | Say Y if you are unsure. | 859 | Say Y if you are unsure. |
| 834 | 860 | ||
| 861 | config RCU_CPU_STALL_TIMEOUT | ||
| 862 | int "RCU CPU stall timeout in seconds" | ||
| 863 | depends on RCU_CPU_STALL_DETECTOR | ||
| 864 | range 3 300 | ||
| 865 | default 60 | ||
| 866 | help | ||
| 867 | If a given RCU grace period extends more than the specified | ||
| 868 | number of seconds, a CPU stall warning is printed. If the | ||
| 869 | RCU grace period persists, additional CPU stall warnings are | ||
| 870 | printed at more widely spaced intervals. | ||
| 871 | |||
| 872 | config RCU_CPU_STALL_DETECTOR_RUNNABLE | ||
| 873 | bool "RCU CPU stall checking starts automatically at boot" | ||
| 874 | depends on RCU_CPU_STALL_DETECTOR | ||
| 875 | default y | ||
| 876 | help | ||
| 877 | If set, start checking for RCU CPU stalls immediately on | ||
| 878 | boot. Otherwise, RCU CPU stall checking must be manually | ||
| 879 | enabled. | ||
| 880 | |||
| 881 | Say Y if you are unsure. | ||
| 882 | |||
| 883 | Say N if you wish to suppress RCU CPU stall checking during boot. | ||
| 884 | |||
| 835 | config RCU_CPU_STALL_VERBOSE | 885 | config RCU_CPU_STALL_VERBOSE |
| 836 | bool "Print additional per-task information for RCU_CPU_STALL_DETECTOR" | 886 | bool "Print additional per-task information for RCU_CPU_STALL_DETECTOR" |
| 837 | depends on RCU_CPU_STALL_DETECTOR && TREE_PREEMPT_RCU | 887 | depends on RCU_CPU_STALL_DETECTOR && TREE_PREEMPT_RCU |
| @@ -72,8 +72,8 @@ static const struct bug_entry *module_find_bug(unsigned long bugaddr) | |||
| 72 | return NULL; | 72 | return NULL; |
| 73 | } | 73 | } |
| 74 | 74 | ||
| 75 | int module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, | 75 | void module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, |
| 76 | struct module *mod) | 76 | struct module *mod) |
| 77 | { | 77 | { |
| 78 | char *secstrings; | 78 | char *secstrings; |
| 79 | unsigned int i; | 79 | unsigned int i; |
| @@ -97,8 +97,6 @@ int module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, | |||
| 97 | * could potentially lead to deadlock and thus be counter-productive. | 97 | * could potentially lead to deadlock and thus be counter-productive. |
| 98 | */ | 98 | */ |
| 99 | list_add(&mod->bug_list, &module_bug_list); | 99 | list_add(&mod->bug_list, &module_bug_list); |
| 100 | |||
| 101 | return 0; | ||
| 102 | } | 100 | } |
| 103 | 101 | ||
| 104 | void module_bug_cleanup(struct module *mod) | 102 | void module_bug_cleanup(struct module *mod) |
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c index 02afc2533728..7bd6df781ce5 100644 --- a/lib/dynamic_debug.c +++ b/lib/dynamic_debug.c | |||
| @@ -26,19 +26,11 @@ | |||
| 26 | #include <linux/dynamic_debug.h> | 26 | #include <linux/dynamic_debug.h> |
| 27 | #include <linux/debugfs.h> | 27 | #include <linux/debugfs.h> |
| 28 | #include <linux/slab.h> | 28 | #include <linux/slab.h> |
| 29 | #include <linux/jump_label.h> | ||
| 29 | 30 | ||
| 30 | extern struct _ddebug __start___verbose[]; | 31 | extern struct _ddebug __start___verbose[]; |
| 31 | extern struct _ddebug __stop___verbose[]; | 32 | extern struct _ddebug __stop___verbose[]; |
| 32 | 33 | ||
| 33 | /* dynamic_debug_enabled, and dynamic_debug_enabled2 are bitmasks in which | ||
| 34 | * bit n is set to 1 if any modname hashes into the bucket n, 0 otherwise. They | ||
| 35 | * use independent hash functions, to reduce the chance of false positives. | ||
| 36 | */ | ||
| 37 | long long dynamic_debug_enabled; | ||
| 38 | EXPORT_SYMBOL_GPL(dynamic_debug_enabled); | ||
| 39 | long long dynamic_debug_enabled2; | ||
| 40 | EXPORT_SYMBOL_GPL(dynamic_debug_enabled2); | ||
| 41 | |||
| 42 | struct ddebug_table { | 34 | struct ddebug_table { |
| 43 | struct list_head link; | 35 | struct list_head link; |
| 44 | char *mod_name; | 36 | char *mod_name; |
| @@ -88,26 +80,6 @@ static char *ddebug_describe_flags(struct _ddebug *dp, char *buf, | |||
| 88 | } | 80 | } |
| 89 | 81 | ||
| 90 | /* | 82 | /* |
| 91 | * must be called with ddebug_lock held | ||
| 92 | */ | ||
| 93 | |||
| 94 | static int disabled_hash(char hash, bool first_table) | ||
| 95 | { | ||
| 96 | struct ddebug_table *dt; | ||
| 97 | char table_hash_value; | ||
| 98 | |||
| 99 | list_for_each_entry(dt, &ddebug_tables, link) { | ||
| 100 | if (first_table) | ||
| 101 | table_hash_value = dt->ddebugs->primary_hash; | ||
| 102 | else | ||
| 103 | table_hash_value = dt->ddebugs->secondary_hash; | ||
| 104 | if (dt->num_enabled && (hash == table_hash_value)) | ||
| 105 | return 0; | ||
| 106 | } | ||
| 107 | return 1; | ||
| 108 | } | ||
| 109 | |||
| 110 | /* | ||
| 111 | * Search the tables for _ddebug's which match the given | 83 | * Search the tables for _ddebug's which match the given |
| 112 | * `query' and apply the `flags' and `mask' to them. Tells | 84 | * `query' and apply the `flags' and `mask' to them. Tells |
| 113 | * the user which ddebug's were changed, or whether none | 85 | * the user which ddebug's were changed, or whether none |
| @@ -170,17 +142,9 @@ static void ddebug_change(const struct ddebug_query *query, | |||
| 170 | dt->num_enabled++; | 142 | dt->num_enabled++; |
| 171 | dp->flags = newflags; | 143 | dp->flags = newflags; |
| 172 | if (newflags) { | 144 | if (newflags) { |
| 173 | dynamic_debug_enabled |= | 145 | jump_label_enable(&dp->enabled); |
| 174 | (1LL << dp->primary_hash); | ||
| 175 | dynamic_debug_enabled2 |= | ||
| 176 | (1LL << dp->secondary_hash); | ||
| 177 | } else { | 146 | } else { |
| 178 | if (disabled_hash(dp->primary_hash, true)) | 147 | jump_label_disable(&dp->enabled); |
| 179 | dynamic_debug_enabled &= | ||
| 180 | ~(1LL << dp->primary_hash); | ||
| 181 | if (disabled_hash(dp->secondary_hash, false)) | ||
| 182 | dynamic_debug_enabled2 &= | ||
| 183 | ~(1LL << dp->secondary_hash); | ||
| 184 | } | 148 | } |
| 185 | if (verbose) | 149 | if (verbose) |
| 186 | printk(KERN_INFO | 150 | printk(KERN_INFO |
diff --git a/lib/list_sort.c b/lib/list_sort.c index 4b5cb794c38b..a7616fa3162e 100644 --- a/lib/list_sort.c +++ b/lib/list_sort.c | |||
| @@ -70,7 +70,7 @@ static void merge_and_restore_back_links(void *priv, | |||
| 70 | * element comparison is needed, so the client's cmp() | 70 | * element comparison is needed, so the client's cmp() |
| 71 | * routine can invoke cond_resched() periodically. | 71 | * routine can invoke cond_resched() periodically. |
| 72 | */ | 72 | */ |
| 73 | (*cmp)(priv, tail, tail); | 73 | (*cmp)(priv, tail->next, tail->next); |
| 74 | 74 | ||
| 75 | tail->next->prev = tail; | 75 | tail->next->prev = tail; |
| 76 | tail = tail->next; | 76 | tail = tail->next; |
diff --git a/lib/radix-tree.c b/lib/radix-tree.c index efd16fa80b1c..6f412ab4c24f 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c | |||
| @@ -49,7 +49,7 @@ struct radix_tree_node { | |||
| 49 | unsigned int height; /* Height from the bottom */ | 49 | unsigned int height; /* Height from the bottom */ |
| 50 | unsigned int count; | 50 | unsigned int count; |
| 51 | struct rcu_head rcu_head; | 51 | struct rcu_head rcu_head; |
| 52 | void *slots[RADIX_TREE_MAP_SIZE]; | 52 | void __rcu *slots[RADIX_TREE_MAP_SIZE]; |
| 53 | unsigned long tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS]; | 53 | unsigned long tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS]; |
| 54 | }; | 54 | }; |
| 55 | 55 | ||
diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 34e3082632d8..7c06ee51a29a 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c | |||
| @@ -70,7 +70,7 @@ static unsigned long io_tlb_nslabs; | |||
| 70 | */ | 70 | */ |
| 71 | static unsigned long io_tlb_overflow = 32*1024; | 71 | static unsigned long io_tlb_overflow = 32*1024; |
| 72 | 72 | ||
| 73 | void *io_tlb_overflow_buffer; | 73 | static void *io_tlb_overflow_buffer; |
| 74 | 74 | ||
| 75 | /* | 75 | /* |
| 76 | * This is a free list describing the number of free entries available from | 76 | * This is a free list describing the number of free entries available from |
| @@ -147,16 +147,16 @@ void __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) | |||
| 147 | * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE | 147 | * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE |
| 148 | * between io_tlb_start and io_tlb_end. | 148 | * between io_tlb_start and io_tlb_end. |
| 149 | */ | 149 | */ |
| 150 | io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int)); | 150 | io_tlb_list = alloc_bootmem_pages(PAGE_ALIGN(io_tlb_nslabs * sizeof(int))); |
| 151 | for (i = 0; i < io_tlb_nslabs; i++) | 151 | for (i = 0; i < io_tlb_nslabs; i++) |
| 152 | io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); | 152 | io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); |
| 153 | io_tlb_index = 0; | 153 | io_tlb_index = 0; |
| 154 | io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(phys_addr_t)); | 154 | io_tlb_orig_addr = alloc_bootmem_pages(PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t))); |
| 155 | 155 | ||
| 156 | /* | 156 | /* |
| 157 | * Get the overflow emergency buffer | 157 | * Get the overflow emergency buffer |
| 158 | */ | 158 | */ |
| 159 | io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow); | 159 | io_tlb_overflow_buffer = alloc_bootmem_low_pages(PAGE_ALIGN(io_tlb_overflow)); |
| 160 | if (!io_tlb_overflow_buffer) | 160 | if (!io_tlb_overflow_buffer) |
| 161 | panic("Cannot allocate SWIOTLB overflow buffer!\n"); | 161 | panic("Cannot allocate SWIOTLB overflow buffer!\n"); |
| 162 | if (verbose) | 162 | if (verbose) |
| @@ -182,7 +182,7 @@ swiotlb_init_with_default_size(size_t default_size, int verbose) | |||
| 182 | /* | 182 | /* |
| 183 | * Get IO TLB memory from the low pages | 183 | * Get IO TLB memory from the low pages |
| 184 | */ | 184 | */ |
| 185 | io_tlb_start = alloc_bootmem_low_pages(bytes); | 185 | io_tlb_start = alloc_bootmem_low_pages(PAGE_ALIGN(bytes)); |
| 186 | if (!io_tlb_start) | 186 | if (!io_tlb_start) |
| 187 | panic("Cannot allocate SWIOTLB buffer"); | 187 | panic("Cannot allocate SWIOTLB buffer"); |
| 188 | 188 | ||
| @@ -308,13 +308,13 @@ void __init swiotlb_free(void) | |||
| 308 | get_order(io_tlb_nslabs << IO_TLB_SHIFT)); | 308 | get_order(io_tlb_nslabs << IO_TLB_SHIFT)); |
| 309 | } else { | 309 | } else { |
| 310 | free_bootmem_late(__pa(io_tlb_overflow_buffer), | 310 | free_bootmem_late(__pa(io_tlb_overflow_buffer), |
| 311 | io_tlb_overflow); | 311 | PAGE_ALIGN(io_tlb_overflow)); |
| 312 | free_bootmem_late(__pa(io_tlb_orig_addr), | 312 | free_bootmem_late(__pa(io_tlb_orig_addr), |
| 313 | io_tlb_nslabs * sizeof(phys_addr_t)); | 313 | PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t))); |
| 314 | free_bootmem_late(__pa(io_tlb_list), | 314 | free_bootmem_late(__pa(io_tlb_list), |
| 315 | io_tlb_nslabs * sizeof(int)); | 315 | PAGE_ALIGN(io_tlb_nslabs * sizeof(int))); |
| 316 | free_bootmem_late(__pa(io_tlb_start), | 316 | free_bootmem_late(__pa(io_tlb_start), |
| 317 | io_tlb_nslabs << IO_TLB_SHIFT); | 317 | PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT)); |
| 318 | } | 318 | } |
| 319 | } | 319 | } |
| 320 | 320 | ||
