aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug41
-rw-r--r--lib/radix-tree.c2
-rw-r--r--lib/swiotlb.c18
3 files changed, 51 insertions, 10 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index e85d549b6eac..21ac83070a80 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -540,6 +540,23 @@ config PROVE_RCU_REPEATEDLY
540 disabling, allowing multiple RCU-lockdep warnings to be printed 540 disabling, allowing multiple RCU-lockdep warnings to be printed
541 on a single reboot. 541 on a single reboot.
542 542
543 Say Y to allow multiple RCU-lockdep warnings per boot.
544
545 Say N if you are unsure.
546
547config SPARSE_RCU_POINTER
548 bool "RCU debugging: sparse-based checks for pointer usage"
549 default n
550 help
551 This feature enables the __rcu sparse annotation for
552 RCU-protected pointers. This annotation will cause sparse
553 to flag any non-RCU used of annotated pointers. This can be
554 helpful when debugging RCU usage. Please note that this feature
555 is not intended to enforce code cleanliness; it is instead merely
556 a debugging aid.
557
558 Say Y to make sparse flag questionable use of RCU-protected pointers
559
543 Say N if you are unsure. 560 Say N if you are unsure.
544 561
545config LOCKDEP 562config LOCKDEP
@@ -832,6 +849,30 @@ config RCU_CPU_STALL_DETECTOR
832 849
833 Say Y if you are unsure. 850 Say Y if you are unsure.
834 851
852config RCU_CPU_STALL_TIMEOUT
853 int "RCU CPU stall timeout in seconds"
854 depends on RCU_CPU_STALL_DETECTOR
855 range 3 300
856 default 60
857 help
858 If a given RCU grace period extends more than the specified
859 number of seconds, a CPU stall warning is printed. If the
860 RCU grace period persists, additional CPU stall warnings are
861 printed at more widely spaced intervals.
862
863config RCU_CPU_STALL_DETECTOR_RUNNABLE
864 bool "RCU CPU stall checking starts automatically at boot"
865 depends on RCU_CPU_STALL_DETECTOR
866 default y
867 help
868 If set, start checking for RCU CPU stalls immediately on
869 boot. Otherwise, RCU CPU stall checking must be manually
870 enabled.
871
872 Say Y if you are unsure.
873
874 Say N if you wish to suppress RCU CPU stall checking during boot.
875
835config RCU_CPU_STALL_VERBOSE 876config RCU_CPU_STALL_VERBOSE
836 bool "Print additional per-task information for RCU_CPU_STALL_DETECTOR" 877 bool "Print additional per-task information for RCU_CPU_STALL_DETECTOR"
837 depends on RCU_CPU_STALL_DETECTOR && TREE_PREEMPT_RCU 878 depends on RCU_CPU_STALL_DETECTOR && TREE_PREEMPT_RCU
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index efd16fa80b1c..6f412ab4c24f 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -49,7 +49,7 @@ struct radix_tree_node {
49 unsigned int height; /* Height from the bottom */ 49 unsigned int height; /* Height from the bottom */
50 unsigned int count; 50 unsigned int count;
51 struct rcu_head rcu_head; 51 struct rcu_head rcu_head;
52 void *slots[RADIX_TREE_MAP_SIZE]; 52 void __rcu *slots[RADIX_TREE_MAP_SIZE];
53 unsigned long tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS]; 53 unsigned long tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS];
54}; 54};
55 55
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 34e3082632d8..7c06ee51a29a 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -70,7 +70,7 @@ static unsigned long io_tlb_nslabs;
70 */ 70 */
71static unsigned long io_tlb_overflow = 32*1024; 71static unsigned long io_tlb_overflow = 32*1024;
72 72
73void *io_tlb_overflow_buffer; 73static void *io_tlb_overflow_buffer;
74 74
75/* 75/*
76 * This is a free list describing the number of free entries available from 76 * This is a free list describing the number of free entries available from
@@ -147,16 +147,16 @@ void __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
147 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE 147 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
148 * between io_tlb_start and io_tlb_end. 148 * between io_tlb_start and io_tlb_end.
149 */ 149 */
150 io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int)); 150 io_tlb_list = alloc_bootmem_pages(PAGE_ALIGN(io_tlb_nslabs * sizeof(int)));
151 for (i = 0; i < io_tlb_nslabs; i++) 151 for (i = 0; i < io_tlb_nslabs; i++)
152 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); 152 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
153 io_tlb_index = 0; 153 io_tlb_index = 0;
154 io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(phys_addr_t)); 154 io_tlb_orig_addr = alloc_bootmem_pages(PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)));
155 155
156 /* 156 /*
157 * Get the overflow emergency buffer 157 * Get the overflow emergency buffer
158 */ 158 */
159 io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow); 159 io_tlb_overflow_buffer = alloc_bootmem_low_pages(PAGE_ALIGN(io_tlb_overflow));
160 if (!io_tlb_overflow_buffer) 160 if (!io_tlb_overflow_buffer)
161 panic("Cannot allocate SWIOTLB overflow buffer!\n"); 161 panic("Cannot allocate SWIOTLB overflow buffer!\n");
162 if (verbose) 162 if (verbose)
@@ -182,7 +182,7 @@ swiotlb_init_with_default_size(size_t default_size, int verbose)
182 /* 182 /*
183 * Get IO TLB memory from the low pages 183 * Get IO TLB memory from the low pages
184 */ 184 */
185 io_tlb_start = alloc_bootmem_low_pages(bytes); 185 io_tlb_start = alloc_bootmem_low_pages(PAGE_ALIGN(bytes));
186 if (!io_tlb_start) 186 if (!io_tlb_start)
187 panic("Cannot allocate SWIOTLB buffer"); 187 panic("Cannot allocate SWIOTLB buffer");
188 188
@@ -308,13 +308,13 @@ void __init swiotlb_free(void)
308 get_order(io_tlb_nslabs << IO_TLB_SHIFT)); 308 get_order(io_tlb_nslabs << IO_TLB_SHIFT));
309 } else { 309 } else {
310 free_bootmem_late(__pa(io_tlb_overflow_buffer), 310 free_bootmem_late(__pa(io_tlb_overflow_buffer),
311 io_tlb_overflow); 311 PAGE_ALIGN(io_tlb_overflow));
312 free_bootmem_late(__pa(io_tlb_orig_addr), 312 free_bootmem_late(__pa(io_tlb_orig_addr),
313 io_tlb_nslabs * sizeof(phys_addr_t)); 313 PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)));
314 free_bootmem_late(__pa(io_tlb_list), 314 free_bootmem_late(__pa(io_tlb_list),
315 io_tlb_nslabs * sizeof(int)); 315 PAGE_ALIGN(io_tlb_nslabs * sizeof(int)));
316 free_bootmem_late(__pa(io_tlb_start), 316 free_bootmem_late(__pa(io_tlb_start),
317 io_tlb_nslabs << IO_TLB_SHIFT); 317 PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
318 } 318 }
319} 319}
320 320