diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Makefile | 2 | ||||
-rw-r--r-- | lib/bug.c | 19 | ||||
-rw-r--r-- | lib/cpumask.c | 79 | ||||
-rw-r--r-- | lib/dynamic_printk.c | 10 | ||||
-rw-r--r-- | lib/idr.c | 22 | ||||
-rw-r--r-- | lib/percpu_counter.c | 7 | ||||
-rw-r--r-- | lib/scatterlist.c | 2 | ||||
-rw-r--r-- | lib/swiotlb.c | 16 |
8 files changed, 136 insertions, 21 deletions
diff --git a/lib/Makefile b/lib/Makefile index 16feaab057b2..7cb65d85aeb0 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
@@ -2,7 +2,7 @@ | |||
2 | # Makefile for some libs needed in the kernel. | 2 | # Makefile for some libs needed in the kernel. |
3 | # | 3 | # |
4 | 4 | ||
5 | ifdef CONFIG_FTRACE | 5 | ifdef CONFIG_FUNCTION_TRACER |
6 | ORIG_CFLAGS := $(KBUILD_CFLAGS) | 6 | ORIG_CFLAGS := $(KBUILD_CFLAGS) |
7 | KBUILD_CFLAGS = $(subst -pg,,$(ORIG_CFLAGS)) | 7 | KBUILD_CFLAGS = $(subst -pg,,$(ORIG_CFLAGS)) |
8 | endif | 8 | endif |
@@ -5,6 +5,8 @@ | |||
5 | 5 | ||
6 | CONFIG_BUG - emit BUG traps. Nothing happens without this. | 6 | CONFIG_BUG - emit BUG traps. Nothing happens without this. |
7 | CONFIG_GENERIC_BUG - enable this code. | 7 | CONFIG_GENERIC_BUG - enable this code. |
8 | CONFIG_GENERIC_BUG_RELATIVE_POINTERS - use 32-bit pointers relative to | ||
9 | the containing struct bug_entry for bug_addr and file. | ||
8 | CONFIG_DEBUG_BUGVERBOSE - emit full file+line information for each BUG | 10 | CONFIG_DEBUG_BUGVERBOSE - emit full file+line information for each BUG |
9 | 11 | ||
10 | CONFIG_BUG and CONFIG_DEBUG_BUGVERBOSE are potentially user-settable | 12 | CONFIG_BUG and CONFIG_DEBUG_BUGVERBOSE are potentially user-settable |
@@ -43,6 +45,15 @@ | |||
43 | 45 | ||
44 | extern const struct bug_entry __start___bug_table[], __stop___bug_table[]; | 46 | extern const struct bug_entry __start___bug_table[], __stop___bug_table[]; |
45 | 47 | ||
48 | static inline unsigned long bug_addr(const struct bug_entry *bug) | ||
49 | { | ||
50 | #ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS | ||
51 | return bug->bug_addr; | ||
52 | #else | ||
53 | return (unsigned long)bug + bug->bug_addr_disp; | ||
54 | #endif | ||
55 | } | ||
56 | |||
46 | #ifdef CONFIG_MODULES | 57 | #ifdef CONFIG_MODULES |
47 | static LIST_HEAD(module_bug_list); | 58 | static LIST_HEAD(module_bug_list); |
48 | 59 | ||
@@ -55,7 +66,7 @@ static const struct bug_entry *module_find_bug(unsigned long bugaddr) | |||
55 | unsigned i; | 66 | unsigned i; |
56 | 67 | ||
57 | for (i = 0; i < mod->num_bugs; ++i, ++bug) | 68 | for (i = 0; i < mod->num_bugs; ++i, ++bug) |
58 | if (bugaddr == bug->bug_addr) | 69 | if (bugaddr == bug_addr(bug)) |
59 | return bug; | 70 | return bug; |
60 | } | 71 | } |
61 | return NULL; | 72 | return NULL; |
@@ -108,7 +119,7 @@ const struct bug_entry *find_bug(unsigned long bugaddr) | |||
108 | const struct bug_entry *bug; | 119 | const struct bug_entry *bug; |
109 | 120 | ||
110 | for (bug = __start___bug_table; bug < __stop___bug_table; ++bug) | 121 | for (bug = __start___bug_table; bug < __stop___bug_table; ++bug) |
111 | if (bugaddr == bug->bug_addr) | 122 | if (bugaddr == bug_addr(bug)) |
112 | return bug; | 123 | return bug; |
113 | 124 | ||
114 | return module_find_bug(bugaddr); | 125 | return module_find_bug(bugaddr); |
@@ -133,7 +144,11 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs) | |||
133 | 144 | ||
134 | if (bug) { | 145 | if (bug) { |
135 | #ifdef CONFIG_DEBUG_BUGVERBOSE | 146 | #ifdef CONFIG_DEBUG_BUGVERBOSE |
147 | #ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS | ||
136 | file = bug->file; | 148 | file = bug->file; |
149 | #else | ||
150 | file = (const char *)bug + bug->file_disp; | ||
151 | #endif | ||
137 | line = bug->line; | 152 | line = bug->line; |
138 | #endif | 153 | #endif |
139 | warning = (bug->flags & BUGFLAG_WARNING) != 0; | 154 | warning = (bug->flags & BUGFLAG_WARNING) != 0; |
diff --git a/lib/cpumask.c b/lib/cpumask.c index 5f97dc25ef9c..8d03f22c6ced 100644 --- a/lib/cpumask.c +++ b/lib/cpumask.c | |||
@@ -2,6 +2,7 @@ | |||
2 | #include <linux/bitops.h> | 2 | #include <linux/bitops.h> |
3 | #include <linux/cpumask.h> | 3 | #include <linux/cpumask.h> |
4 | #include <linux/module.h> | 4 | #include <linux/module.h> |
5 | #include <linux/bootmem.h> | ||
5 | 6 | ||
6 | int __first_cpu(const cpumask_t *srcp) | 7 | int __first_cpu(const cpumask_t *srcp) |
7 | { | 8 | { |
@@ -35,3 +36,81 @@ int __any_online_cpu(const cpumask_t *mask) | |||
35 | return cpu; | 36 | return cpu; |
36 | } | 37 | } |
37 | EXPORT_SYMBOL(__any_online_cpu); | 38 | EXPORT_SYMBOL(__any_online_cpu); |
39 | |||
40 | /** | ||
41 | * cpumask_next_and - get the next cpu in *src1p & *src2p | ||
42 | * @n: the cpu prior to the place to search (ie. return will be > @n) | ||
43 | * @src1p: the first cpumask pointer | ||
44 | * @src2p: the second cpumask pointer | ||
45 | * | ||
46 | * Returns >= nr_cpu_ids if no further cpus set in both. | ||
47 | */ | ||
48 | int cpumask_next_and(int n, const struct cpumask *src1p, | ||
49 | const struct cpumask *src2p) | ||
50 | { | ||
51 | while ((n = cpumask_next(n, src1p)) < nr_cpu_ids) | ||
52 | if (cpumask_test_cpu(n, src2p)) | ||
53 | break; | ||
54 | return n; | ||
55 | } | ||
56 | EXPORT_SYMBOL(cpumask_next_and); | ||
57 | |||
58 | /** | ||
59 | * cpumask_any_but - return a "random" in a cpumask, but not this one. | ||
60 | * @mask: the cpumask to search | ||
61 | * @cpu: the cpu to ignore. | ||
62 | * | ||
63 | * Often used to find any cpu but smp_processor_id() in a mask. | ||
64 | * Returns >= nr_cpu_ids if no cpus set. | ||
65 | */ | ||
66 | int cpumask_any_but(const struct cpumask *mask, unsigned int cpu) | ||
67 | { | ||
68 | unsigned int i; | ||
69 | |||
70 | cpumask_check(cpu); | ||
71 | for_each_cpu(i, mask) | ||
72 | if (i != cpu) | ||
73 | break; | ||
74 | return i; | ||
75 | } | ||
76 | |||
77 | /* These are not inline because of header tangles. */ | ||
78 | #ifdef CONFIG_CPUMASK_OFFSTACK | ||
79 | bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) | ||
80 | { | ||
81 | if (likely(slab_is_available())) | ||
82 | *mask = kmalloc(cpumask_size(), flags); | ||
83 | else { | ||
84 | #ifdef CONFIG_DEBUG_PER_CPU_MAPS | ||
85 | printk(KERN_ERR | ||
86 | "=> alloc_cpumask_var: kmalloc not available!\n"); | ||
87 | dump_stack(); | ||
88 | #endif | ||
89 | *mask = NULL; | ||
90 | } | ||
91 | #ifdef CONFIG_DEBUG_PER_CPU_MAPS | ||
92 | if (!*mask) { | ||
93 | printk(KERN_ERR "=> alloc_cpumask_var: failed!\n"); | ||
94 | dump_stack(); | ||
95 | } | ||
96 | #endif | ||
97 | return *mask != NULL; | ||
98 | } | ||
99 | EXPORT_SYMBOL(alloc_cpumask_var); | ||
100 | |||
101 | void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask) | ||
102 | { | ||
103 | *mask = alloc_bootmem(cpumask_size()); | ||
104 | } | ||
105 | |||
106 | void free_cpumask_var(cpumask_var_t mask) | ||
107 | { | ||
108 | kfree(mask); | ||
109 | } | ||
110 | EXPORT_SYMBOL(free_cpumask_var); | ||
111 | |||
112 | void __init free_bootmem_cpumask_var(cpumask_var_t mask) | ||
113 | { | ||
114 | free_bootmem((unsigned long)mask, cpumask_size()); | ||
115 | } | ||
116 | #endif | ||
diff --git a/lib/dynamic_printk.c b/lib/dynamic_printk.c index d640f87bdc9e..8e30295e8566 100644 --- a/lib/dynamic_printk.c +++ b/lib/dynamic_printk.c | |||
@@ -135,7 +135,7 @@ int unregister_dynamic_debug_module(char *mod_name) | |||
135 | nr_entries--; | 135 | nr_entries--; |
136 | out: | 136 | out: |
137 | up(&debug_list_mutex); | 137 | up(&debug_list_mutex); |
138 | return 0; | 138 | return ret; |
139 | } | 139 | } |
140 | EXPORT_SYMBOL_GPL(unregister_dynamic_debug_module); | 140 | EXPORT_SYMBOL_GPL(unregister_dynamic_debug_module); |
141 | 141 | ||
@@ -289,7 +289,7 @@ static ssize_t pr_debug_write(struct file *file, const char __user *buf, | |||
289 | dynamic_enabled = DYNAMIC_ENABLED_SOME; | 289 | dynamic_enabled = DYNAMIC_ENABLED_SOME; |
290 | err = 0; | 290 | err = 0; |
291 | printk(KERN_DEBUG | 291 | printk(KERN_DEBUG |
292 | "debugging enabled for module %s", | 292 | "debugging enabled for module %s\n", |
293 | elem->name); | 293 | elem->name); |
294 | } else if (!value && (elem->enable == 1)) { | 294 | } else if (!value && (elem->enable == 1)) { |
295 | elem->enable = 0; | 295 | elem->enable = 0; |
@@ -309,7 +309,7 @@ static ssize_t pr_debug_write(struct file *file, const char __user *buf, | |||
309 | err = 0; | 309 | err = 0; |
310 | printk(KERN_DEBUG | 310 | printk(KERN_DEBUG |
311 | "debugging disabled for module " | 311 | "debugging disabled for module " |
312 | "%s", elem->name); | 312 | "%s\n", elem->name); |
313 | } | 313 | } |
314 | } | 314 | } |
315 | } | 315 | } |
@@ -402,6 +402,8 @@ static int __init dynamic_printk_init(void) | |||
402 | iter->logical_modname, | 402 | iter->logical_modname, |
403 | iter->flag_names, iter->hash, iter->hash2); | 403 | iter->flag_names, iter->hash, iter->hash2); |
404 | } | 404 | } |
405 | if (dynamic_enabled == DYNAMIC_ENABLED_ALL) | ||
406 | set_all(true); | ||
405 | return 0; | 407 | return 0; |
406 | } | 408 | } |
407 | module_init(dynamic_printk_init); | 409 | module_init(dynamic_printk_init); |
@@ -411,7 +413,7 @@ static int __init dynamic_printk_setup(char *str) | |||
411 | { | 413 | { |
412 | if (str) | 414 | if (str) |
413 | return -ENOENT; | 415 | return -ENOENT; |
414 | set_all(true); | 416 | dynamic_enabled = DYNAMIC_ENABLED_ALL; |
415 | return 0; | 417 | return 0; |
416 | } | 418 | } |
417 | /* Use early_param(), so we can get debug output as early as possible */ | 419 | /* Use early_param(), so we can get debug output as early as possible */ |
@@ -185,6 +185,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa) | |||
185 | new = get_from_free_list(idp); | 185 | new = get_from_free_list(idp); |
186 | if (!new) | 186 | if (!new) |
187 | return -1; | 187 | return -1; |
188 | new->layer = l-1; | ||
188 | rcu_assign_pointer(p->ary[m], new); | 189 | rcu_assign_pointer(p->ary[m], new); |
189 | p->count++; | 190 | p->count++; |
190 | } | 191 | } |
@@ -210,6 +211,7 @@ build_up: | |||
210 | if (unlikely(!p)) { | 211 | if (unlikely(!p)) { |
211 | if (!(p = get_from_free_list(idp))) | 212 | if (!(p = get_from_free_list(idp))) |
212 | return -1; | 213 | return -1; |
214 | p->layer = 0; | ||
213 | layers = 1; | 215 | layers = 1; |
214 | } | 216 | } |
215 | /* | 217 | /* |
@@ -218,8 +220,14 @@ build_up: | |||
218 | */ | 220 | */ |
219 | while ((layers < (MAX_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) { | 221 | while ((layers < (MAX_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) { |
220 | layers++; | 222 | layers++; |
221 | if (!p->count) | 223 | if (!p->count) { |
224 | /* special case: if the tree is currently empty, | ||
225 | * then we grow the tree by moving the top node | ||
226 | * upwards. | ||
227 | */ | ||
228 | p->layer++; | ||
222 | continue; | 229 | continue; |
230 | } | ||
223 | if (!(new = get_from_free_list(idp))) { | 231 | if (!(new = get_from_free_list(idp))) { |
224 | /* | 232 | /* |
225 | * The allocation failed. If we built part of | 233 | * The allocation failed. If we built part of |
@@ -237,6 +245,7 @@ build_up: | |||
237 | } | 245 | } |
238 | new->ary[0] = p; | 246 | new->ary[0] = p; |
239 | new->count = 1; | 247 | new->count = 1; |
248 | new->layer = layers-1; | ||
240 | if (p->bitmap == IDR_FULL) | 249 | if (p->bitmap == IDR_FULL) |
241 | __set_bit(0, &new->bitmap); | 250 | __set_bit(0, &new->bitmap); |
242 | p = new; | 251 | p = new; |
@@ -493,17 +502,21 @@ void *idr_find(struct idr *idp, int id) | |||
493 | int n; | 502 | int n; |
494 | struct idr_layer *p; | 503 | struct idr_layer *p; |
495 | 504 | ||
496 | n = idp->layers * IDR_BITS; | ||
497 | p = rcu_dereference(idp->top); | 505 | p = rcu_dereference(idp->top); |
506 | if (!p) | ||
507 | return NULL; | ||
508 | n = (p->layer+1) * IDR_BITS; | ||
498 | 509 | ||
499 | /* Mask off upper bits we don't use for the search. */ | 510 | /* Mask off upper bits we don't use for the search. */ |
500 | id &= MAX_ID_MASK; | 511 | id &= MAX_ID_MASK; |
501 | 512 | ||
502 | if (id >= (1 << n)) | 513 | if (id >= (1 << n)) |
503 | return NULL; | 514 | return NULL; |
515 | BUG_ON(n == 0); | ||
504 | 516 | ||
505 | while (n > 0 && p) { | 517 | while (n > 0 && p) { |
506 | n -= IDR_BITS; | 518 | n -= IDR_BITS; |
519 | BUG_ON(n != p->layer*IDR_BITS); | ||
507 | p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); | 520 | p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); |
508 | } | 521 | } |
509 | return((void *)p); | 522 | return((void *)p); |
@@ -582,8 +595,11 @@ void *idr_replace(struct idr *idp, void *ptr, int id) | |||
582 | int n; | 595 | int n; |
583 | struct idr_layer *p, *old_p; | 596 | struct idr_layer *p, *old_p; |
584 | 597 | ||
585 | n = idp->layers * IDR_BITS; | ||
586 | p = idp->top; | 598 | p = idp->top; |
599 | if (!p) | ||
600 | return ERR_PTR(-EINVAL); | ||
601 | |||
602 | n = (p->layer+1) * IDR_BITS; | ||
587 | 603 | ||
588 | id &= MAX_ID_MASK; | 604 | id &= MAX_ID_MASK; |
589 | 605 | ||
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c index a8663890a88c..b255b939bc1b 100644 --- a/lib/percpu_counter.c +++ b/lib/percpu_counter.c | |||
@@ -62,10 +62,7 @@ s64 __percpu_counter_sum(struct percpu_counter *fbc) | |||
62 | for_each_online_cpu(cpu) { | 62 | for_each_online_cpu(cpu) { |
63 | s32 *pcount = per_cpu_ptr(fbc->counters, cpu); | 63 | s32 *pcount = per_cpu_ptr(fbc->counters, cpu); |
64 | ret += *pcount; | 64 | ret += *pcount; |
65 | *pcount = 0; | ||
66 | } | 65 | } |
67 | fbc->count = ret; | ||
68 | |||
69 | spin_unlock(&fbc->lock); | 66 | spin_unlock(&fbc->lock); |
70 | return ret; | 67 | return ret; |
71 | } | 68 | } |
@@ -104,13 +101,13 @@ void percpu_counter_destroy(struct percpu_counter *fbc) | |||
104 | if (!fbc->counters) | 101 | if (!fbc->counters) |
105 | return; | 102 | return; |
106 | 103 | ||
107 | free_percpu(fbc->counters); | ||
108 | fbc->counters = NULL; | ||
109 | #ifdef CONFIG_HOTPLUG_CPU | 104 | #ifdef CONFIG_HOTPLUG_CPU |
110 | mutex_lock(&percpu_counters_lock); | 105 | mutex_lock(&percpu_counters_lock); |
111 | list_del(&fbc->list); | 106 | list_del(&fbc->list); |
112 | mutex_unlock(&percpu_counters_lock); | 107 | mutex_unlock(&percpu_counters_lock); |
113 | #endif | 108 | #endif |
109 | free_percpu(fbc->counters); | ||
110 | fbc->counters = NULL; | ||
114 | } | 111 | } |
115 | EXPORT_SYMBOL(percpu_counter_destroy); | 112 | EXPORT_SYMBOL(percpu_counter_destroy); |
116 | 113 | ||
diff --git a/lib/scatterlist.c b/lib/scatterlist.c index 8d2688ff1352..b7b449dafbe5 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c | |||
@@ -395,7 +395,7 @@ void sg_miter_stop(struct sg_mapping_iter *miter) | |||
395 | WARN_ON(!irqs_disabled()); | 395 | WARN_ON(!irqs_disabled()); |
396 | kunmap_atomic(miter->addr, KM_BIO_SRC_IRQ); | 396 | kunmap_atomic(miter->addr, KM_BIO_SRC_IRQ); |
397 | } else | 397 | } else |
398 | kunmap(miter->addr); | 398 | kunmap(miter->page); |
399 | 399 | ||
400 | miter->page = NULL; | 400 | miter->page = NULL; |
401 | miter->addr = NULL; | 401 | miter->addr = NULL; |
diff --git a/lib/swiotlb.c b/lib/swiotlb.c index f8eebd489149..5f6c629a924d 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c | |||
@@ -467,9 +467,13 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, | |||
467 | dma_addr_t dev_addr; | 467 | dma_addr_t dev_addr; |
468 | void *ret; | 468 | void *ret; |
469 | int order = get_order(size); | 469 | int order = get_order(size); |
470 | u64 dma_mask = DMA_32BIT_MASK; | ||
471 | |||
472 | if (hwdev && hwdev->coherent_dma_mask) | ||
473 | dma_mask = hwdev->coherent_dma_mask; | ||
470 | 474 | ||
471 | ret = (void *)__get_free_pages(flags, order); | 475 | ret = (void *)__get_free_pages(flags, order); |
472 | if (ret && address_needs_mapping(hwdev, virt_to_bus(ret), size)) { | 476 | if (ret && !is_buffer_dma_capable(dma_mask, virt_to_bus(ret), size)) { |
473 | /* | 477 | /* |
474 | * The allocated memory isn't reachable by the device. | 478 | * The allocated memory isn't reachable by the device. |
475 | * Fall back on swiotlb_map_single(). | 479 | * Fall back on swiotlb_map_single(). |
@@ -493,12 +497,14 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, | |||
493 | dev_addr = virt_to_bus(ret); | 497 | dev_addr = virt_to_bus(ret); |
494 | 498 | ||
495 | /* Confirm address can be DMA'd by device */ | 499 | /* Confirm address can be DMA'd by device */ |
496 | if (address_needs_mapping(hwdev, dev_addr, size)) { | 500 | if (!is_buffer_dma_capable(dma_mask, dev_addr, size)) { |
497 | printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n", | 501 | printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n", |
498 | (unsigned long long)*hwdev->dma_mask, | 502 | (unsigned long long)dma_mask, |
499 | (unsigned long long)dev_addr); | 503 | (unsigned long long)dev_addr); |
500 | panic("swiotlb_alloc_coherent: allocated memory is out of " | 504 | |
501 | "range for device"); | 505 | /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ |
506 | unmap_single(hwdev, ret, size, DMA_TO_DEVICE); | ||
507 | return NULL; | ||
502 | } | 508 | } |
503 | *dma_handle = dev_addr; | 509 | *dma_handle = dev_addr; |
504 | return ret; | 510 | return ret; |