aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug49
-rw-r--r--lib/bug.c6
-rw-r--r--lib/dynamic_debug.c42
-rw-r--r--lib/list_sort.c2
-rw-r--r--lib/radix-tree.c2
-rw-r--r--lib/swiotlb.c18
6 files changed, 61 insertions, 58 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 088eea1c2bef..7b2a8ca97ada 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -491,6 +491,7 @@ config PROVE_LOCKING
491 select DEBUG_SPINLOCK 491 select DEBUG_SPINLOCK
492 select DEBUG_MUTEXES 492 select DEBUG_MUTEXES
493 select DEBUG_LOCK_ALLOC 493 select DEBUG_LOCK_ALLOC
494 select TRACE_IRQFLAGS
494 default n 495 default n
495 help 496 help
496 This feature enables the kernel to prove that all locking 497 This feature enables the kernel to prove that all locking
@@ -548,6 +549,23 @@ config PROVE_RCU_REPEATEDLY
548 disabling, allowing multiple RCU-lockdep warnings to be printed 549 disabling, allowing multiple RCU-lockdep warnings to be printed
549 on a single reboot. 550 on a single reboot.
550 551
552 Say Y to allow multiple RCU-lockdep warnings per boot.
553
554 Say N if you are unsure.
555
556config SPARSE_RCU_POINTER
557 bool "RCU debugging: sparse-based checks for pointer usage"
558 default n
559 help
560 This feature enables the __rcu sparse annotation for
561 RCU-protected pointers. This annotation will cause sparse
562 to flag any non-RCU used of annotated pointers. This can be
563 helpful when debugging RCU usage. Please note that this feature
564 is not intended to enforce code cleanliness; it is instead merely
565 a debugging aid.
566
567 Say Y to make sparse flag questionable use of RCU-protected pointers
568
551 Say N if you are unsure. 569 Say N if you are unsure.
552 570
553config LOCKDEP 571config LOCKDEP
@@ -588,11 +606,10 @@ config DEBUG_LOCKDEP
588 of more runtime overhead. 606 of more runtime overhead.
589 607
590config TRACE_IRQFLAGS 608config TRACE_IRQFLAGS
591 depends on DEBUG_KERNEL
592 bool 609 bool
593 default y 610 help
594 depends on TRACE_IRQFLAGS_SUPPORT 611 Enables hooks to interrupt enabling and disabling for
595 depends on PROVE_LOCKING 612 either tracing or lock debugging.
596 613
597config DEBUG_SPINLOCK_SLEEP 614config DEBUG_SPINLOCK_SLEEP
598 bool "Spinlock debugging: sleep-inside-spinlock checking" 615 bool "Spinlock debugging: sleep-inside-spinlock checking"
@@ -841,6 +858,30 @@ config RCU_CPU_STALL_DETECTOR
841 858
842 Say Y if you are unsure. 859 Say Y if you are unsure.
843 860
861config RCU_CPU_STALL_TIMEOUT
862 int "RCU CPU stall timeout in seconds"
863 depends on RCU_CPU_STALL_DETECTOR
864 range 3 300
865 default 60
866 help
867 If a given RCU grace period extends more than the specified
868 number of seconds, a CPU stall warning is printed. If the
869 RCU grace period persists, additional CPU stall warnings are
870 printed at more widely spaced intervals.
871
872config RCU_CPU_STALL_DETECTOR_RUNNABLE
873 bool "RCU CPU stall checking starts automatically at boot"
874 depends on RCU_CPU_STALL_DETECTOR
875 default y
876 help
877 If set, start checking for RCU CPU stalls immediately on
878 boot. Otherwise, RCU CPU stall checking must be manually
879 enabled.
880
881 Say Y if you are unsure.
882
883 Say N if you wish to suppress RCU CPU stall checking during boot.
884
844config RCU_CPU_STALL_VERBOSE 885config RCU_CPU_STALL_VERBOSE
845 bool "Print additional per-task information for RCU_CPU_STALL_DETECTOR" 886 bool "Print additional per-task information for RCU_CPU_STALL_DETECTOR"
846 depends on RCU_CPU_STALL_DETECTOR && TREE_PREEMPT_RCU 887 depends on RCU_CPU_STALL_DETECTOR && TREE_PREEMPT_RCU
diff --git a/lib/bug.c b/lib/bug.c
index 7cdfad88128f..19552096d16b 100644
--- a/lib/bug.c
+++ b/lib/bug.c
@@ -72,8 +72,8 @@ static const struct bug_entry *module_find_bug(unsigned long bugaddr)
72 return NULL; 72 return NULL;
73} 73}
74 74
75int module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, 75void module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
76 struct module *mod) 76 struct module *mod)
77{ 77{
78 char *secstrings; 78 char *secstrings;
79 unsigned int i; 79 unsigned int i;
@@ -97,8 +97,6 @@ int module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
97 * could potentially lead to deadlock and thus be counter-productive. 97 * could potentially lead to deadlock and thus be counter-productive.
98 */ 98 */
99 list_add(&mod->bug_list, &module_bug_list); 99 list_add(&mod->bug_list, &module_bug_list);
100
101 return 0;
102} 100}
103 101
104void module_bug_cleanup(struct module *mod) 102void module_bug_cleanup(struct module *mod)
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index 02afc2533728..7bd6df781ce5 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -26,19 +26,11 @@
26#include <linux/dynamic_debug.h> 26#include <linux/dynamic_debug.h>
27#include <linux/debugfs.h> 27#include <linux/debugfs.h>
28#include <linux/slab.h> 28#include <linux/slab.h>
29#include <linux/jump_label.h>
29 30
30extern struct _ddebug __start___verbose[]; 31extern struct _ddebug __start___verbose[];
31extern struct _ddebug __stop___verbose[]; 32extern struct _ddebug __stop___verbose[];
32 33
33/* dynamic_debug_enabled, and dynamic_debug_enabled2 are bitmasks in which
34 * bit n is set to 1 if any modname hashes into the bucket n, 0 otherwise. They
35 * use independent hash functions, to reduce the chance of false positives.
36 */
37long long dynamic_debug_enabled;
38EXPORT_SYMBOL_GPL(dynamic_debug_enabled);
39long long dynamic_debug_enabled2;
40EXPORT_SYMBOL_GPL(dynamic_debug_enabled2);
41
42struct ddebug_table { 34struct ddebug_table {
43 struct list_head link; 35 struct list_head link;
44 char *mod_name; 36 char *mod_name;
@@ -88,26 +80,6 @@ static char *ddebug_describe_flags(struct _ddebug *dp, char *buf,
88} 80}
89 81
90/* 82/*
91 * must be called with ddebug_lock held
92 */
93
94static int disabled_hash(char hash, bool first_table)
95{
96 struct ddebug_table *dt;
97 char table_hash_value;
98
99 list_for_each_entry(dt, &ddebug_tables, link) {
100 if (first_table)
101 table_hash_value = dt->ddebugs->primary_hash;
102 else
103 table_hash_value = dt->ddebugs->secondary_hash;
104 if (dt->num_enabled && (hash == table_hash_value))
105 return 0;
106 }
107 return 1;
108}
109
110/*
111 * Search the tables for _ddebug's which match the given 83 * Search the tables for _ddebug's which match the given
112 * `query' and apply the `flags' and `mask' to them. Tells 84 * `query' and apply the `flags' and `mask' to them. Tells
113 * the user which ddebug's were changed, or whether none 85 * the user which ddebug's were changed, or whether none
@@ -170,17 +142,9 @@ static void ddebug_change(const struct ddebug_query *query,
170 dt->num_enabled++; 142 dt->num_enabled++;
171 dp->flags = newflags; 143 dp->flags = newflags;
172 if (newflags) { 144 if (newflags) {
173 dynamic_debug_enabled |= 145 jump_label_enable(&dp->enabled);
174 (1LL << dp->primary_hash);
175 dynamic_debug_enabled2 |=
176 (1LL << dp->secondary_hash);
177 } else { 146 } else {
178 if (disabled_hash(dp->primary_hash, true)) 147 jump_label_disable(&dp->enabled);
179 dynamic_debug_enabled &=
180 ~(1LL << dp->primary_hash);
181 if (disabled_hash(dp->secondary_hash, false))
182 dynamic_debug_enabled2 &=
183 ~(1LL << dp->secondary_hash);
184 } 148 }
185 if (verbose) 149 if (verbose)
186 printk(KERN_INFO 150 printk(KERN_INFO
diff --git a/lib/list_sort.c b/lib/list_sort.c
index 4b5cb794c38b..a7616fa3162e 100644
--- a/lib/list_sort.c
+++ b/lib/list_sort.c
@@ -70,7 +70,7 @@ static void merge_and_restore_back_links(void *priv,
70 * element comparison is needed, so the client's cmp() 70 * element comparison is needed, so the client's cmp()
71 * routine can invoke cond_resched() periodically. 71 * routine can invoke cond_resched() periodically.
72 */ 72 */
73 (*cmp)(priv, tail, tail); 73 (*cmp)(priv, tail->next, tail->next);
74 74
75 tail->next->prev = tail; 75 tail->next->prev = tail;
76 tail = tail->next; 76 tail = tail->next;
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index efd16fa80b1c..6f412ab4c24f 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -49,7 +49,7 @@ struct radix_tree_node {
49 unsigned int height; /* Height from the bottom */ 49 unsigned int height; /* Height from the bottom */
50 unsigned int count; 50 unsigned int count;
51 struct rcu_head rcu_head; 51 struct rcu_head rcu_head;
52 void *slots[RADIX_TREE_MAP_SIZE]; 52 void __rcu *slots[RADIX_TREE_MAP_SIZE];
53 unsigned long tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS]; 53 unsigned long tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS];
54}; 54};
55 55
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 34e3082632d8..7c06ee51a29a 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -70,7 +70,7 @@ static unsigned long io_tlb_nslabs;
70 */ 70 */
71static unsigned long io_tlb_overflow = 32*1024; 71static unsigned long io_tlb_overflow = 32*1024;
72 72
73void *io_tlb_overflow_buffer; 73static void *io_tlb_overflow_buffer;
74 74
75/* 75/*
76 * This is a free list describing the number of free entries available from 76 * This is a free list describing the number of free entries available from
@@ -147,16 +147,16 @@ void __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
147 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE 147 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
148 * between io_tlb_start and io_tlb_end. 148 * between io_tlb_start and io_tlb_end.
149 */ 149 */
150 io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int)); 150 io_tlb_list = alloc_bootmem_pages(PAGE_ALIGN(io_tlb_nslabs * sizeof(int)));
151 for (i = 0; i < io_tlb_nslabs; i++) 151 for (i = 0; i < io_tlb_nslabs; i++)
152 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); 152 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
153 io_tlb_index = 0; 153 io_tlb_index = 0;
154 io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(phys_addr_t)); 154 io_tlb_orig_addr = alloc_bootmem_pages(PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)));
155 155
156 /* 156 /*
157 * Get the overflow emergency buffer 157 * Get the overflow emergency buffer
158 */ 158 */
159 io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow); 159 io_tlb_overflow_buffer = alloc_bootmem_low_pages(PAGE_ALIGN(io_tlb_overflow));
160 if (!io_tlb_overflow_buffer) 160 if (!io_tlb_overflow_buffer)
161 panic("Cannot allocate SWIOTLB overflow buffer!\n"); 161 panic("Cannot allocate SWIOTLB overflow buffer!\n");
162 if (verbose) 162 if (verbose)
@@ -182,7 +182,7 @@ swiotlb_init_with_default_size(size_t default_size, int verbose)
182 /* 182 /*
183 * Get IO TLB memory from the low pages 183 * Get IO TLB memory from the low pages
184 */ 184 */
185 io_tlb_start = alloc_bootmem_low_pages(bytes); 185 io_tlb_start = alloc_bootmem_low_pages(PAGE_ALIGN(bytes));
186 if (!io_tlb_start) 186 if (!io_tlb_start)
187 panic("Cannot allocate SWIOTLB buffer"); 187 panic("Cannot allocate SWIOTLB buffer");
188 188
@@ -308,13 +308,13 @@ void __init swiotlb_free(void)
308 get_order(io_tlb_nslabs << IO_TLB_SHIFT)); 308 get_order(io_tlb_nslabs << IO_TLB_SHIFT));
309 } else { 309 } else {
310 free_bootmem_late(__pa(io_tlb_overflow_buffer), 310 free_bootmem_late(__pa(io_tlb_overflow_buffer),
311 io_tlb_overflow); 311 PAGE_ALIGN(io_tlb_overflow));
312 free_bootmem_late(__pa(io_tlb_orig_addr), 312 free_bootmem_late(__pa(io_tlb_orig_addr),
313 io_tlb_nslabs * sizeof(phys_addr_t)); 313 PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)));
314 free_bootmem_late(__pa(io_tlb_list), 314 free_bootmem_late(__pa(io_tlb_list),
315 io_tlb_nslabs * sizeof(int)); 315 PAGE_ALIGN(io_tlb_nslabs * sizeof(int)));
316 free_bootmem_late(__pa(io_tlb_start), 316 free_bootmem_late(__pa(io_tlb_start),
317 io_tlb_nslabs << IO_TLB_SHIFT); 317 PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
318 } 318 }
319} 319}
320 320