diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-12-18 15:54:49 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-12-18 15:54:49 -0500 |
commit | d110ec3a1e1f522e2e9dfceb9c36d6590c26d2d4 (patch) | |
tree | 86b2f8f1d22b74b05239525c55bd42e3db6afc03 /lib | |
parent | 343e9099c8152daff20e10d6269edec21da44fc0 (diff) | |
parent | 55dac3a5553b13891f0ae4bbd11920619b5436d4 (diff) |
Merge branch 'linus' into core/rcu
Diffstat (limited to 'lib')
-rw-r--r-- | lib/cpumask.c | 79 | ||||
-rw-r--r-- | lib/dynamic_printk.c | 6 | ||||
-rw-r--r-- | lib/idr.c | 22 | ||||
-rw-r--r-- | lib/percpu_counter.c | 7 | ||||
-rw-r--r-- | lib/scatterlist.c | 2 | ||||
-rw-r--r-- | lib/swiotlb.c | 10 |
6 files changed, 111 insertions, 15 deletions
diff --git a/lib/cpumask.c b/lib/cpumask.c index 5f97dc25ef9c..8d03f22c6ced 100644 --- a/lib/cpumask.c +++ b/lib/cpumask.c | |||
@@ -2,6 +2,7 @@ | |||
2 | #include <linux/bitops.h> | 2 | #include <linux/bitops.h> |
3 | #include <linux/cpumask.h> | 3 | #include <linux/cpumask.h> |
4 | #include <linux/module.h> | 4 | #include <linux/module.h> |
5 | #include <linux/bootmem.h> | ||
5 | 6 | ||
6 | int __first_cpu(const cpumask_t *srcp) | 7 | int __first_cpu(const cpumask_t *srcp) |
7 | { | 8 | { |
@@ -35,3 +36,81 @@ int __any_online_cpu(const cpumask_t *mask) | |||
35 | return cpu; | 36 | return cpu; |
36 | } | 37 | } |
37 | EXPORT_SYMBOL(__any_online_cpu); | 38 | EXPORT_SYMBOL(__any_online_cpu); |
39 | |||
40 | /** | ||
41 | * cpumask_next_and - get the next cpu in *src1p & *src2p | ||
42 | * @n: the cpu prior to the place to search (ie. return will be > @n) | ||
43 | * @src1p: the first cpumask pointer | ||
44 | * @src2p: the second cpumask pointer | ||
45 | * | ||
46 | * Returns >= nr_cpu_ids if no further cpus set in both. | ||
47 | */ | ||
48 | int cpumask_next_and(int n, const struct cpumask *src1p, | ||
49 | const struct cpumask *src2p) | ||
50 | { | ||
51 | while ((n = cpumask_next(n, src1p)) < nr_cpu_ids) | ||
52 | if (cpumask_test_cpu(n, src2p)) | ||
53 | break; | ||
54 | return n; | ||
55 | } | ||
56 | EXPORT_SYMBOL(cpumask_next_and); | ||
57 | |||
58 | /** | ||
59 | * cpumask_any_but - return a "random" in a cpumask, but not this one. | ||
60 | * @mask: the cpumask to search | ||
61 | * @cpu: the cpu to ignore. | ||
62 | * | ||
63 | * Often used to find any cpu but smp_processor_id() in a mask. | ||
64 | * Returns >= nr_cpu_ids if no cpus set. | ||
65 | */ | ||
66 | int cpumask_any_but(const struct cpumask *mask, unsigned int cpu) | ||
67 | { | ||
68 | unsigned int i; | ||
69 | |||
70 | cpumask_check(cpu); | ||
71 | for_each_cpu(i, mask) | ||
72 | if (i != cpu) | ||
73 | break; | ||
74 | return i; | ||
75 | } | ||
76 | |||
77 | /* These are not inline because of header tangles. */ | ||
78 | #ifdef CONFIG_CPUMASK_OFFSTACK | ||
79 | bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) | ||
80 | { | ||
81 | if (likely(slab_is_available())) | ||
82 | *mask = kmalloc(cpumask_size(), flags); | ||
83 | else { | ||
84 | #ifdef CONFIG_DEBUG_PER_CPU_MAPS | ||
85 | printk(KERN_ERR | ||
86 | "=> alloc_cpumask_var: kmalloc not available!\n"); | ||
87 | dump_stack(); | ||
88 | #endif | ||
89 | *mask = NULL; | ||
90 | } | ||
91 | #ifdef CONFIG_DEBUG_PER_CPU_MAPS | ||
92 | if (!*mask) { | ||
93 | printk(KERN_ERR "=> alloc_cpumask_var: failed!\n"); | ||
94 | dump_stack(); | ||
95 | } | ||
96 | #endif | ||
97 | return *mask != NULL; | ||
98 | } | ||
99 | EXPORT_SYMBOL(alloc_cpumask_var); | ||
100 | |||
101 | void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask) | ||
102 | { | ||
103 | *mask = alloc_bootmem(cpumask_size()); | ||
104 | } | ||
105 | |||
106 | void free_cpumask_var(cpumask_var_t mask) | ||
107 | { | ||
108 | kfree(mask); | ||
109 | } | ||
110 | EXPORT_SYMBOL(free_cpumask_var); | ||
111 | |||
112 | void __init free_bootmem_cpumask_var(cpumask_var_t mask) | ||
113 | { | ||
114 | free_bootmem((unsigned long)mask, cpumask_size()); | ||
115 | } | ||
116 | #endif | ||
diff --git a/lib/dynamic_printk.c b/lib/dynamic_printk.c index d83660fd6fdd..8e30295e8566 100644 --- a/lib/dynamic_printk.c +++ b/lib/dynamic_printk.c | |||
@@ -135,7 +135,7 @@ int unregister_dynamic_debug_module(char *mod_name) | |||
135 | nr_entries--; | 135 | nr_entries--; |
136 | out: | 136 | out: |
137 | up(&debug_list_mutex); | 137 | up(&debug_list_mutex); |
138 | return 0; | 138 | return ret; |
139 | } | 139 | } |
140 | EXPORT_SYMBOL_GPL(unregister_dynamic_debug_module); | 140 | EXPORT_SYMBOL_GPL(unregister_dynamic_debug_module); |
141 | 141 | ||
@@ -289,7 +289,7 @@ static ssize_t pr_debug_write(struct file *file, const char __user *buf, | |||
289 | dynamic_enabled = DYNAMIC_ENABLED_SOME; | 289 | dynamic_enabled = DYNAMIC_ENABLED_SOME; |
290 | err = 0; | 290 | err = 0; |
291 | printk(KERN_DEBUG | 291 | printk(KERN_DEBUG |
292 | "debugging enabled for module %s", | 292 | "debugging enabled for module %s\n", |
293 | elem->name); | 293 | elem->name); |
294 | } else if (!value && (elem->enable == 1)) { | 294 | } else if (!value && (elem->enable == 1)) { |
295 | elem->enable = 0; | 295 | elem->enable = 0; |
@@ -309,7 +309,7 @@ static ssize_t pr_debug_write(struct file *file, const char __user *buf, | |||
309 | err = 0; | 309 | err = 0; |
310 | printk(KERN_DEBUG | 310 | printk(KERN_DEBUG |
311 | "debugging disabled for module " | 311 | "debugging disabled for module " |
312 | "%s", elem->name); | 312 | "%s\n", elem->name); |
313 | } | 313 | } |
314 | } | 314 | } |
315 | } | 315 | } |
@@ -185,6 +185,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa) | |||
185 | new = get_from_free_list(idp); | 185 | new = get_from_free_list(idp); |
186 | if (!new) | 186 | if (!new) |
187 | return -1; | 187 | return -1; |
188 | new->layer = l-1; | ||
188 | rcu_assign_pointer(p->ary[m], new); | 189 | rcu_assign_pointer(p->ary[m], new); |
189 | p->count++; | 190 | p->count++; |
190 | } | 191 | } |
@@ -210,6 +211,7 @@ build_up: | |||
210 | if (unlikely(!p)) { | 211 | if (unlikely(!p)) { |
211 | if (!(p = get_from_free_list(idp))) | 212 | if (!(p = get_from_free_list(idp))) |
212 | return -1; | 213 | return -1; |
214 | p->layer = 0; | ||
213 | layers = 1; | 215 | layers = 1; |
214 | } | 216 | } |
215 | /* | 217 | /* |
@@ -218,8 +220,14 @@ build_up: | |||
218 | */ | 220 | */ |
219 | while ((layers < (MAX_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) { | 221 | while ((layers < (MAX_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) { |
220 | layers++; | 222 | layers++; |
221 | if (!p->count) | 223 | if (!p->count) { |
224 | /* special case: if the tree is currently empty, | ||
225 | * then we grow the tree by moving the top node | ||
226 | * upwards. | ||
227 | */ | ||
228 | p->layer++; | ||
222 | continue; | 229 | continue; |
230 | } | ||
223 | if (!(new = get_from_free_list(idp))) { | 231 | if (!(new = get_from_free_list(idp))) { |
224 | /* | 232 | /* |
225 | * The allocation failed. If we built part of | 233 | * The allocation failed. If we built part of |
@@ -237,6 +245,7 @@ build_up: | |||
237 | } | 245 | } |
238 | new->ary[0] = p; | 246 | new->ary[0] = p; |
239 | new->count = 1; | 247 | new->count = 1; |
248 | new->layer = layers-1; | ||
240 | if (p->bitmap == IDR_FULL) | 249 | if (p->bitmap == IDR_FULL) |
241 | __set_bit(0, &new->bitmap); | 250 | __set_bit(0, &new->bitmap); |
242 | p = new; | 251 | p = new; |
@@ -493,17 +502,21 @@ void *idr_find(struct idr *idp, int id) | |||
493 | int n; | 502 | int n; |
494 | struct idr_layer *p; | 503 | struct idr_layer *p; |
495 | 504 | ||
496 | n = idp->layers * IDR_BITS; | ||
497 | p = rcu_dereference(idp->top); | 505 | p = rcu_dereference(idp->top); |
506 | if (!p) | ||
507 | return NULL; | ||
508 | n = (p->layer+1) * IDR_BITS; | ||
498 | 509 | ||
499 | /* Mask off upper bits we don't use for the search. */ | 510 | /* Mask off upper bits we don't use for the search. */ |
500 | id &= MAX_ID_MASK; | 511 | id &= MAX_ID_MASK; |
501 | 512 | ||
502 | if (id >= (1 << n)) | 513 | if (id >= (1 << n)) |
503 | return NULL; | 514 | return NULL; |
515 | BUG_ON(n == 0); | ||
504 | 516 | ||
505 | while (n > 0 && p) { | 517 | while (n > 0 && p) { |
506 | n -= IDR_BITS; | 518 | n -= IDR_BITS; |
519 | BUG_ON(n != p->layer*IDR_BITS); | ||
507 | p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); | 520 | p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); |
508 | } | 521 | } |
509 | return((void *)p); | 522 | return((void *)p); |
@@ -582,8 +595,11 @@ void *idr_replace(struct idr *idp, void *ptr, int id) | |||
582 | int n; | 595 | int n; |
583 | struct idr_layer *p, *old_p; | 596 | struct idr_layer *p, *old_p; |
584 | 597 | ||
585 | n = idp->layers * IDR_BITS; | ||
586 | p = idp->top; | 598 | p = idp->top; |
599 | if (!p) | ||
600 | return ERR_PTR(-EINVAL); | ||
601 | |||
602 | n = (p->layer+1) * IDR_BITS; | ||
587 | 603 | ||
588 | id &= MAX_ID_MASK; | 604 | id &= MAX_ID_MASK; |
589 | 605 | ||
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c index a8663890a88c..b255b939bc1b 100644 --- a/lib/percpu_counter.c +++ b/lib/percpu_counter.c | |||
@@ -62,10 +62,7 @@ s64 __percpu_counter_sum(struct percpu_counter *fbc) | |||
62 | for_each_online_cpu(cpu) { | 62 | for_each_online_cpu(cpu) { |
63 | s32 *pcount = per_cpu_ptr(fbc->counters, cpu); | 63 | s32 *pcount = per_cpu_ptr(fbc->counters, cpu); |
64 | ret += *pcount; | 64 | ret += *pcount; |
65 | *pcount = 0; | ||
66 | } | 65 | } |
67 | fbc->count = ret; | ||
68 | |||
69 | spin_unlock(&fbc->lock); | 66 | spin_unlock(&fbc->lock); |
70 | return ret; | 67 | return ret; |
71 | } | 68 | } |
@@ -104,13 +101,13 @@ void percpu_counter_destroy(struct percpu_counter *fbc) | |||
104 | if (!fbc->counters) | 101 | if (!fbc->counters) |
105 | return; | 102 | return; |
106 | 103 | ||
107 | free_percpu(fbc->counters); | ||
108 | fbc->counters = NULL; | ||
109 | #ifdef CONFIG_HOTPLUG_CPU | 104 | #ifdef CONFIG_HOTPLUG_CPU |
110 | mutex_lock(&percpu_counters_lock); | 105 | mutex_lock(&percpu_counters_lock); |
111 | list_del(&fbc->list); | 106 | list_del(&fbc->list); |
112 | mutex_unlock(&percpu_counters_lock); | 107 | mutex_unlock(&percpu_counters_lock); |
113 | #endif | 108 | #endif |
109 | free_percpu(fbc->counters); | ||
110 | fbc->counters = NULL; | ||
114 | } | 111 | } |
115 | EXPORT_SYMBOL(percpu_counter_destroy); | 112 | EXPORT_SYMBOL(percpu_counter_destroy); |
116 | 113 | ||
diff --git a/lib/scatterlist.c b/lib/scatterlist.c index 8d2688ff1352..b7b449dafbe5 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c | |||
@@ -395,7 +395,7 @@ void sg_miter_stop(struct sg_mapping_iter *miter) | |||
395 | WARN_ON(!irqs_disabled()); | 395 | WARN_ON(!irqs_disabled()); |
396 | kunmap_atomic(miter->addr, KM_BIO_SRC_IRQ); | 396 | kunmap_atomic(miter->addr, KM_BIO_SRC_IRQ); |
397 | } else | 397 | } else |
398 | kunmap(miter->addr); | 398 | kunmap(miter->page); |
399 | 399 | ||
400 | miter->page = NULL; | 400 | miter->page = NULL; |
401 | miter->addr = NULL; | 401 | miter->addr = NULL; |
diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 78330c37a61b..5f6c629a924d 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c | |||
@@ -467,9 +467,13 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, | |||
467 | dma_addr_t dev_addr; | 467 | dma_addr_t dev_addr; |
468 | void *ret; | 468 | void *ret; |
469 | int order = get_order(size); | 469 | int order = get_order(size); |
470 | u64 dma_mask = DMA_32BIT_MASK; | ||
471 | |||
472 | if (hwdev && hwdev->coherent_dma_mask) | ||
473 | dma_mask = hwdev->coherent_dma_mask; | ||
470 | 474 | ||
471 | ret = (void *)__get_free_pages(flags, order); | 475 | ret = (void *)__get_free_pages(flags, order); |
472 | if (ret && address_needs_mapping(hwdev, virt_to_bus(ret), size)) { | 476 | if (ret && !is_buffer_dma_capable(dma_mask, virt_to_bus(ret), size)) { |
473 | /* | 477 | /* |
474 | * The allocated memory isn't reachable by the device. | 478 | * The allocated memory isn't reachable by the device. |
475 | * Fall back on swiotlb_map_single(). | 479 | * Fall back on swiotlb_map_single(). |
@@ -493,9 +497,9 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, | |||
493 | dev_addr = virt_to_bus(ret); | 497 | dev_addr = virt_to_bus(ret); |
494 | 498 | ||
495 | /* Confirm address can be DMA'd by device */ | 499 | /* Confirm address can be DMA'd by device */ |
496 | if (address_needs_mapping(hwdev, dev_addr, size)) { | 500 | if (!is_buffer_dma_capable(dma_mask, dev_addr, size)) { |
497 | printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n", | 501 | printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n", |
498 | (unsigned long long)*hwdev->dma_mask, | 502 | (unsigned long long)dma_mask, |
499 | (unsigned long long)dev_addr); | 503 | (unsigned long long)dev_addr); |
500 | 504 | ||
501 | /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ | 505 | /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ |