diff options
Diffstat (limited to 'lib')
| -rw-r--r-- | lib/Kconfig.debug | 4 | ||||
| -rw-r--r-- | lib/dma-debug.c | 6 | ||||
| -rw-r--r-- | lib/kernel_lock.c | 20 | ||||
| -rw-r--r-- | lib/radix-tree.c | 5 | ||||
| -rw-r--r-- | lib/ratelimit.c | 45 | ||||
| -rw-r--r-- | lib/string.c | 20 | ||||
| -rw-r--r-- | lib/swiotlb.c | 46 |
7 files changed, 101 insertions, 45 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 30df5865ecbe..a79c4d0407ab 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
| @@ -392,7 +392,7 @@ config DEBUG_KMEMLEAK_TEST | |||
| 392 | 392 | ||
| 393 | config DEBUG_PREEMPT | 393 | config DEBUG_PREEMPT |
| 394 | bool "Debug preemptible kernel" | 394 | bool "Debug preemptible kernel" |
| 395 | depends on DEBUG_KERNEL && PREEMPT && (TRACE_IRQFLAGS_SUPPORT || PPC64) | 395 | depends on DEBUG_KERNEL && PREEMPT && TRACE_IRQFLAGS_SUPPORT |
| 396 | default y | 396 | default y |
| 397 | help | 397 | help |
| 398 | If you say Y here then the kernel will use a debug variant of the | 398 | If you say Y here then the kernel will use a debug variant of the |
| @@ -750,7 +750,7 @@ config RCU_TORTURE_TEST_RUNNABLE | |||
| 750 | config RCU_CPU_STALL_DETECTOR | 750 | config RCU_CPU_STALL_DETECTOR |
| 751 | bool "Check for stalled CPUs delaying RCU grace periods" | 751 | bool "Check for stalled CPUs delaying RCU grace periods" |
| 752 | depends on TREE_RCU || TREE_PREEMPT_RCU | 752 | depends on TREE_RCU || TREE_PREEMPT_RCU |
| 753 | default n | 753 | default y |
| 754 | help | 754 | help |
| 755 | This option causes RCU to printk information on which | 755 | This option causes RCU to printk information on which |
| 756 | CPUs are delaying the current grace period, but only when | 756 | CPUs are delaying the current grace period, but only when |
diff --git a/lib/dma-debug.c b/lib/dma-debug.c index 58a9f9fc609a..ce6b7eabf674 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c | |||
| @@ -819,9 +819,11 @@ static void check_unmap(struct dma_debug_entry *ref) | |||
| 819 | err_printk(ref->dev, entry, "DMA-API: device driver frees " | 819 | err_printk(ref->dev, entry, "DMA-API: device driver frees " |
| 820 | "DMA memory with different CPU address " | 820 | "DMA memory with different CPU address " |
| 821 | "[device address=0x%016llx] [size=%llu bytes] " | 821 | "[device address=0x%016llx] [size=%llu bytes] " |
| 822 | "[cpu alloc address=%p] [cpu free address=%p]", | 822 | "[cpu alloc address=0x%016llx] " |
| 823 | "[cpu free address=0x%016llx]", | ||
| 823 | ref->dev_addr, ref->size, | 824 | ref->dev_addr, ref->size, |
| 824 | (void *)entry->paddr, (void *)ref->paddr); | 825 | (unsigned long long)entry->paddr, |
| 826 | (unsigned long long)ref->paddr); | ||
| 825 | } | 827 | } |
| 826 | 828 | ||
| 827 | if (ref->sg_call_ents && ref->type == dma_debug_sg && | 829 | if (ref->sg_call_ents && ref->type == dma_debug_sg && |
diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c index 39f1029e3525..4ebfa5a164d7 100644 --- a/lib/kernel_lock.c +++ b/lib/kernel_lock.c | |||
| @@ -5,10 +5,13 @@ | |||
| 5 | * relegated to obsolescence, but used by various less | 5 | * relegated to obsolescence, but used by various less |
| 6 | * important (or lazy) subsystems. | 6 | * important (or lazy) subsystems. |
| 7 | */ | 7 | */ |
| 8 | #include <linux/smp_lock.h> | ||
| 9 | #include <linux/module.h> | 8 | #include <linux/module.h> |
| 10 | #include <linux/kallsyms.h> | 9 | #include <linux/kallsyms.h> |
| 11 | #include <linux/semaphore.h> | 10 | #include <linux/semaphore.h> |
| 11 | #include <linux/smp_lock.h> | ||
| 12 | |||
| 13 | #define CREATE_TRACE_POINTS | ||
| 14 | #include <trace/events/bkl.h> | ||
| 12 | 15 | ||
| 13 | /* | 16 | /* |
| 14 | * The 'big kernel lock' | 17 | * The 'big kernel lock' |
| @@ -113,21 +116,26 @@ static inline void __unlock_kernel(void) | |||
| 113 | * This cannot happen asynchronously, so we only need to | 116 | * This cannot happen asynchronously, so we only need to |
| 114 | * worry about other CPU's. | 117 | * worry about other CPU's. |
| 115 | */ | 118 | */ |
| 116 | void __lockfunc lock_kernel(void) | 119 | void __lockfunc _lock_kernel(const char *func, const char *file, int line) |
| 117 | { | 120 | { |
| 118 | int depth = current->lock_depth+1; | 121 | int depth = current->lock_depth + 1; |
| 122 | |||
| 123 | trace_lock_kernel(func, file, line); | ||
| 124 | |||
| 119 | if (likely(!depth)) | 125 | if (likely(!depth)) |
| 120 | __lock_kernel(); | 126 | __lock_kernel(); |
| 121 | current->lock_depth = depth; | 127 | current->lock_depth = depth; |
| 122 | } | 128 | } |
| 123 | 129 | ||
| 124 | void __lockfunc unlock_kernel(void) | 130 | void __lockfunc _unlock_kernel(const char *func, const char *file, int line) |
| 125 | { | 131 | { |
| 126 | BUG_ON(current->lock_depth < 0); | 132 | BUG_ON(current->lock_depth < 0); |
| 127 | if (likely(--current->lock_depth < 0)) | 133 | if (likely(--current->lock_depth < 0)) |
| 128 | __unlock_kernel(); | 134 | __unlock_kernel(); |
| 135 | |||
| 136 | trace_unlock_kernel(func, file, line); | ||
| 129 | } | 137 | } |
| 130 | 138 | ||
| 131 | EXPORT_SYMBOL(lock_kernel); | 139 | EXPORT_SYMBOL(_lock_kernel); |
| 132 | EXPORT_SYMBOL(unlock_kernel); | 140 | EXPORT_SYMBOL(_unlock_kernel); |
| 133 | 141 | ||
diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 23abbd93cae1..92cdd9936e3d 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c | |||
| @@ -200,6 +200,9 @@ radix_tree_node_free(struct radix_tree_node *node) | |||
| 200 | * ensure that the addition of a single element in the tree cannot fail. On | 200 | * ensure that the addition of a single element in the tree cannot fail. On |
| 201 | * success, return zero, with preemption disabled. On error, return -ENOMEM | 201 | * success, return zero, with preemption disabled. On error, return -ENOMEM |
| 202 | * with preemption not disabled. | 202 | * with preemption not disabled. |
| 203 | * | ||
| 204 | * To make use of this facility, the radix tree must be initialised without | ||
| 205 | * __GFP_WAIT being passed to INIT_RADIX_TREE(). | ||
| 203 | */ | 206 | */ |
| 204 | int radix_tree_preload(gfp_t gfp_mask) | 207 | int radix_tree_preload(gfp_t gfp_mask) |
| 205 | { | 208 | { |
| @@ -543,7 +546,6 @@ out: | |||
| 543 | } | 546 | } |
| 544 | EXPORT_SYMBOL(radix_tree_tag_clear); | 547 | EXPORT_SYMBOL(radix_tree_tag_clear); |
| 545 | 548 | ||
| 546 | #ifndef __KERNEL__ /* Only the test harness uses this at present */ | ||
| 547 | /** | 549 | /** |
| 548 | * radix_tree_tag_get - get a tag on a radix tree node | 550 | * radix_tree_tag_get - get a tag on a radix tree node |
| 549 | * @root: radix tree root | 551 | * @root: radix tree root |
| @@ -606,7 +608,6 @@ int radix_tree_tag_get(struct radix_tree_root *root, | |||
| 606 | } | 608 | } |
| 607 | } | 609 | } |
| 608 | EXPORT_SYMBOL(radix_tree_tag_get); | 610 | EXPORT_SYMBOL(radix_tree_tag_get); |
| 609 | #endif | ||
| 610 | 611 | ||
| 611 | /** | 612 | /** |
| 612 | * radix_tree_next_hole - find the next hole (not-present entry) | 613 | * radix_tree_next_hole - find the next hole (not-present entry) |
diff --git a/lib/ratelimit.c b/lib/ratelimit.c index 26187edcc7ea..09f5ce1810dc 100644 --- a/lib/ratelimit.c +++ b/lib/ratelimit.c | |||
| @@ -7,15 +7,12 @@ | |||
| 7 | * parameter. Now every user can use their own standalone ratelimit_state. | 7 | * parameter. Now every user can use their own standalone ratelimit_state. |
| 8 | * | 8 | * |
| 9 | * This file is released under the GPLv2. | 9 | * This file is released under the GPLv2. |
| 10 | * | ||
| 11 | */ | 10 | */ |
| 12 | 11 | ||
| 13 | #include <linux/kernel.h> | 12 | #include <linux/ratelimit.h> |
| 14 | #include <linux/jiffies.h> | 13 | #include <linux/jiffies.h> |
| 15 | #include <linux/module.h> | 14 | #include <linux/module.h> |
| 16 | 15 | ||
| 17 | static DEFINE_SPINLOCK(ratelimit_lock); | ||
| 18 | |||
| 19 | /* | 16 | /* |
| 20 | * __ratelimit - rate limiting | 17 | * __ratelimit - rate limiting |
| 21 | * @rs: ratelimit_state data | 18 | * @rs: ratelimit_state data |
| @@ -23,35 +20,43 @@ static DEFINE_SPINLOCK(ratelimit_lock); | |||
| 23 | * This enforces a rate limit: not more than @rs->ratelimit_burst callbacks | 20 | * This enforces a rate limit: not more than @rs->ratelimit_burst callbacks |
| 24 | * in every @rs->ratelimit_jiffies | 21 | * in every @rs->ratelimit_jiffies |
| 25 | */ | 22 | */ |
| 26 | int __ratelimit(struct ratelimit_state *rs) | 23 | int ___ratelimit(struct ratelimit_state *rs, const char *func) |
| 27 | { | 24 | { |
| 28 | unsigned long flags; | 25 | unsigned long flags; |
| 26 | int ret; | ||
| 29 | 27 | ||
| 30 | if (!rs->interval) | 28 | if (!rs->interval) |
| 31 | return 1; | 29 | return 1; |
| 32 | 30 | ||
| 33 | spin_lock_irqsave(&ratelimit_lock, flags); | 31 | /* |
| 32 | * If we contend on this state's lock then almost | ||
| 33 | * by definition we are too busy to print a message, | ||
| 34 | * in addition to the one that will be printed by | ||
| 35 | * the entity that is holding the lock already: | ||
| 36 | */ | ||
| 37 | if (!spin_trylock_irqsave(&rs->lock, flags)) | ||
| 38 | return 1; | ||
| 39 | |||
| 34 | if (!rs->begin) | 40 | if (!rs->begin) |
| 35 | rs->begin = jiffies; | 41 | rs->begin = jiffies; |
| 36 | 42 | ||
| 37 | if (time_is_before_jiffies(rs->begin + rs->interval)) { | 43 | if (time_is_before_jiffies(rs->begin + rs->interval)) { |
| 38 | if (rs->missed) | 44 | if (rs->missed) |
| 39 | printk(KERN_WARNING "%s: %d callbacks suppressed\n", | 45 | printk(KERN_WARNING "%s: %d callbacks suppressed\n", |
| 40 | __func__, rs->missed); | 46 | func, rs->missed); |
| 41 | rs->begin = 0; | 47 | rs->begin = 0; |
| 42 | rs->printed = 0; | 48 | rs->printed = 0; |
| 43 | rs->missed = 0; | 49 | rs->missed = 0; |
| 44 | } | 50 | } |
| 45 | if (rs->burst && rs->burst > rs->printed) | 51 | if (rs->burst && rs->burst > rs->printed) { |
| 46 | goto print; | 52 | rs->printed++; |
| 47 | 53 | ret = 1; | |
| 48 | rs->missed++; | 54 | } else { |
| 49 | spin_unlock_irqrestore(&ratelimit_lock, flags); | 55 | rs->missed++; |
| 50 | return 0; | 56 | ret = 0; |
| 57 | } | ||
| 58 | spin_unlock_irqrestore(&rs->lock, flags); | ||
| 51 | 59 | ||
| 52 | print: | 60 | return ret; |
| 53 | rs->printed++; | ||
| 54 | spin_unlock_irqrestore(&ratelimit_lock, flags); | ||
| 55 | return 1; | ||
| 56 | } | 61 | } |
| 57 | EXPORT_SYMBOL(__ratelimit); | 62 | EXPORT_SYMBOL(___ratelimit); |
diff --git a/lib/string.c b/lib/string.c index b19b87af65a3..e96421ab9a9a 100644 --- a/lib/string.c +++ b/lib/string.c | |||
| @@ -246,13 +246,17 @@ EXPORT_SYMBOL(strlcat); | |||
| 246 | #undef strcmp | 246 | #undef strcmp |
| 247 | int strcmp(const char *cs, const char *ct) | 247 | int strcmp(const char *cs, const char *ct) |
| 248 | { | 248 | { |
| 249 | signed char __res; | 249 | unsigned char c1, c2; |
| 250 | 250 | ||
| 251 | while (1) { | 251 | while (1) { |
| 252 | if ((__res = *cs - *ct++) != 0 || !*cs++) | 252 | c1 = *cs++; |
| 253 | c2 = *ct++; | ||
| 254 | if (c1 != c2) | ||
| 255 | return c1 < c2 ? -1 : 1; | ||
| 256 | if (!c1) | ||
| 253 | break; | 257 | break; |
| 254 | } | 258 | } |
| 255 | return __res; | 259 | return 0; |
| 256 | } | 260 | } |
| 257 | EXPORT_SYMBOL(strcmp); | 261 | EXPORT_SYMBOL(strcmp); |
| 258 | #endif | 262 | #endif |
| @@ -266,14 +270,18 @@ EXPORT_SYMBOL(strcmp); | |||
| 266 | */ | 270 | */ |
| 267 | int strncmp(const char *cs, const char *ct, size_t count) | 271 | int strncmp(const char *cs, const char *ct, size_t count) |
| 268 | { | 272 | { |
| 269 | signed char __res = 0; | 273 | unsigned char c1, c2; |
| 270 | 274 | ||
| 271 | while (count) { | 275 | while (count) { |
| 272 | if ((__res = *cs - *ct++) != 0 || !*cs++) | 276 | c1 = *cs++; |
| 277 | c2 = *ct++; | ||
| 278 | if (c1 != c2) | ||
| 279 | return c1 < c2 ? -1 : 1; | ||
| 280 | if (!c1) | ||
| 273 | break; | 281 | break; |
| 274 | count--; | 282 | count--; |
| 275 | } | 283 | } |
| 276 | return __res; | 284 | return 0; |
| 277 | } | 285 | } |
| 278 | EXPORT_SYMBOL(strncmp); | 286 | EXPORT_SYMBOL(strncmp); |
| 279 | #endif | 287 | #endif |
diff --git a/lib/swiotlb.c b/lib/swiotlb.c index ac25cd28e807..795472d8ae24 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c | |||
| @@ -97,6 +97,8 @@ static phys_addr_t *io_tlb_orig_addr; | |||
| 97 | */ | 97 | */ |
| 98 | static DEFINE_SPINLOCK(io_tlb_lock); | 98 | static DEFINE_SPINLOCK(io_tlb_lock); |
| 99 | 99 | ||
| 100 | static int late_alloc; | ||
| 101 | |||
| 100 | static int __init | 102 | static int __init |
| 101 | setup_io_tlb_npages(char *str) | 103 | setup_io_tlb_npages(char *str) |
| 102 | { | 104 | { |
| @@ -109,6 +111,7 @@ setup_io_tlb_npages(char *str) | |||
| 109 | ++str; | 111 | ++str; |
| 110 | if (!strcmp(str, "force")) | 112 | if (!strcmp(str, "force")) |
| 111 | swiotlb_force = 1; | 113 | swiotlb_force = 1; |
| 114 | |||
| 112 | return 1; | 115 | return 1; |
| 113 | } | 116 | } |
| 114 | __setup("swiotlb=", setup_io_tlb_npages); | 117 | __setup("swiotlb=", setup_io_tlb_npages); |
| @@ -121,8 +124,9 @@ static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev, | |||
| 121 | return phys_to_dma(hwdev, virt_to_phys(address)); | 124 | return phys_to_dma(hwdev, virt_to_phys(address)); |
| 122 | } | 125 | } |
| 123 | 126 | ||
| 124 | static void swiotlb_print_info(unsigned long bytes) | 127 | void swiotlb_print_info(void) |
| 125 | { | 128 | { |
| 129 | unsigned long bytes = io_tlb_nslabs << IO_TLB_SHIFT; | ||
| 126 | phys_addr_t pstart, pend; | 130 | phys_addr_t pstart, pend; |
| 127 | 131 | ||
| 128 | pstart = virt_to_phys(io_tlb_start); | 132 | pstart = virt_to_phys(io_tlb_start); |
| @@ -140,7 +144,7 @@ static void swiotlb_print_info(unsigned long bytes) | |||
| 140 | * structures for the software IO TLB used to implement the DMA API. | 144 | * structures for the software IO TLB used to implement the DMA API. |
| 141 | */ | 145 | */ |
| 142 | void __init | 146 | void __init |
| 143 | swiotlb_init_with_default_size(size_t default_size) | 147 | swiotlb_init_with_default_size(size_t default_size, int verbose) |
| 144 | { | 148 | { |
| 145 | unsigned long i, bytes; | 149 | unsigned long i, bytes; |
| 146 | 150 | ||
| @@ -176,14 +180,14 @@ swiotlb_init_with_default_size(size_t default_size) | |||
| 176 | io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow); | 180 | io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow); |
| 177 | if (!io_tlb_overflow_buffer) | 181 | if (!io_tlb_overflow_buffer) |
| 178 | panic("Cannot allocate SWIOTLB overflow buffer!\n"); | 182 | panic("Cannot allocate SWIOTLB overflow buffer!\n"); |
| 179 | 183 | if (verbose) | |
| 180 | swiotlb_print_info(bytes); | 184 | swiotlb_print_info(); |
| 181 | } | 185 | } |
| 182 | 186 | ||
| 183 | void __init | 187 | void __init |
| 184 | swiotlb_init(void) | 188 | swiotlb_init(int verbose) |
| 185 | { | 189 | { |
| 186 | swiotlb_init_with_default_size(64 * (1<<20)); /* default to 64MB */ | 190 | swiotlb_init_with_default_size(64 * (1<<20), verbose); /* default to 64MB */ |
| 187 | } | 191 | } |
| 188 | 192 | ||
| 189 | /* | 193 | /* |
| @@ -260,7 +264,9 @@ swiotlb_late_init_with_default_size(size_t default_size) | |||
| 260 | if (!io_tlb_overflow_buffer) | 264 | if (!io_tlb_overflow_buffer) |
| 261 | goto cleanup4; | 265 | goto cleanup4; |
| 262 | 266 | ||
| 263 | swiotlb_print_info(bytes); | 267 | swiotlb_print_info(); |
| 268 | |||
| 269 | late_alloc = 1; | ||
| 264 | 270 | ||
| 265 | return 0; | 271 | return 0; |
| 266 | 272 | ||
| @@ -281,6 +287,32 @@ cleanup1: | |||
| 281 | return -ENOMEM; | 287 | return -ENOMEM; |
| 282 | } | 288 | } |
| 283 | 289 | ||
| 290 | void __init swiotlb_free(void) | ||
| 291 | { | ||
| 292 | if (!io_tlb_overflow_buffer) | ||
| 293 | return; | ||
| 294 | |||
| 295 | if (late_alloc) { | ||
| 296 | free_pages((unsigned long)io_tlb_overflow_buffer, | ||
| 297 | get_order(io_tlb_overflow)); | ||
| 298 | free_pages((unsigned long)io_tlb_orig_addr, | ||
| 299 | get_order(io_tlb_nslabs * sizeof(phys_addr_t))); | ||
| 300 | free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs * | ||
| 301 | sizeof(int))); | ||
| 302 | free_pages((unsigned long)io_tlb_start, | ||
| 303 | get_order(io_tlb_nslabs << IO_TLB_SHIFT)); | ||
| 304 | } else { | ||
| 305 | free_bootmem_late(__pa(io_tlb_overflow_buffer), | ||
| 306 | io_tlb_overflow); | ||
| 307 | free_bootmem_late(__pa(io_tlb_orig_addr), | ||
| 308 | io_tlb_nslabs * sizeof(phys_addr_t)); | ||
| 309 | free_bootmem_late(__pa(io_tlb_list), | ||
| 310 | io_tlb_nslabs * sizeof(int)); | ||
| 311 | free_bootmem_late(__pa(io_tlb_start), | ||
| 312 | io_tlb_nslabs << IO_TLB_SHIFT); | ||
| 313 | } | ||
| 314 | } | ||
| 315 | |||
| 284 | static int is_swiotlb_buffer(phys_addr_t paddr) | 316 | static int is_swiotlb_buffer(phys_addr_t paddr) |
| 285 | { | 317 | { |
| 286 | return paddr >= virt_to_phys(io_tlb_start) && | 318 | return paddr >= virt_to_phys(io_tlb_start) && |
