aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig7
-rw-r--r--lib/Kconfig.debug10
-rw-r--r--lib/idr.c22
-rw-r--r--lib/percpu_counter.c7
-rw-r--r--lib/scatterlist.c2
-rw-r--r--lib/swiotlb.c10
6 files changed, 46 insertions, 12 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index 85cf7ea978aa..7823f8342abf 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -157,4 +157,11 @@ config CHECK_SIGNATURE
157config HAVE_LMB 157config HAVE_LMB
158 boolean 158 boolean
159 159
160config CPUMASK_OFFSTACK
161 bool "Force CPU masks off stack" if DEBUG_PER_CPU_MAPS
162 help
163 Use dynamic allocation for cpumask_var_t, instead of putting
164 them on the stack. This is a bit more expensive, but avoids
165 stack overflow.
166
160endmenu 167endmenu
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index b0f239e443bc..1e3fd3e3436a 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -545,6 +545,16 @@ config DEBUG_SG
545 545
546 If unsure, say N. 546 If unsure, say N.
547 547
548config DEBUG_NOTIFIERS
549 bool "Debug notifier call chains"
550 depends on DEBUG_KERNEL
551 help
552 Enable this to turn on sanity checking for notifier call chains.
553 This is most useful for kernel developers to make sure that
554 modules properly unregister themselves from notifier chains.
555 This is a relatively cheap check but if you care about maximum
556 performance, say N.
557
548config FRAME_POINTER 558config FRAME_POINTER
549 bool "Compile the kernel with frame pointers" 559 bool "Compile the kernel with frame pointers"
550 depends on DEBUG_KERNEL && \ 560 depends on DEBUG_KERNEL && \
diff --git a/lib/idr.c b/lib/idr.c
index e728c7fccc4d..1c4f9281f412 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -185,6 +185,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
185 new = get_from_free_list(idp); 185 new = get_from_free_list(idp);
186 if (!new) 186 if (!new)
187 return -1; 187 return -1;
188 new->layer = l-1;
188 rcu_assign_pointer(p->ary[m], new); 189 rcu_assign_pointer(p->ary[m], new);
189 p->count++; 190 p->count++;
190 } 191 }
@@ -210,6 +211,7 @@ build_up:
210 if (unlikely(!p)) { 211 if (unlikely(!p)) {
211 if (!(p = get_from_free_list(idp))) 212 if (!(p = get_from_free_list(idp)))
212 return -1; 213 return -1;
214 p->layer = 0;
213 layers = 1; 215 layers = 1;
214 } 216 }
215 /* 217 /*
@@ -218,8 +220,14 @@ build_up:
218 */ 220 */
219 while ((layers < (MAX_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) { 221 while ((layers < (MAX_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) {
220 layers++; 222 layers++;
221 if (!p->count) 223 if (!p->count) {
224 /* special case: if the tree is currently empty,
225 * then we grow the tree by moving the top node
226 * upwards.
227 */
228 p->layer++;
222 continue; 229 continue;
230 }
223 if (!(new = get_from_free_list(idp))) { 231 if (!(new = get_from_free_list(idp))) {
224 /* 232 /*
225 * The allocation failed. If we built part of 233 * The allocation failed. If we built part of
@@ -237,6 +245,7 @@ build_up:
237 } 245 }
238 new->ary[0] = p; 246 new->ary[0] = p;
239 new->count = 1; 247 new->count = 1;
248 new->layer = layers-1;
240 if (p->bitmap == IDR_FULL) 249 if (p->bitmap == IDR_FULL)
241 __set_bit(0, &new->bitmap); 250 __set_bit(0, &new->bitmap);
242 p = new; 251 p = new;
@@ -493,17 +502,21 @@ void *idr_find(struct idr *idp, int id)
493 int n; 502 int n;
494 struct idr_layer *p; 503 struct idr_layer *p;
495 504
496 n = idp->layers * IDR_BITS;
497 p = rcu_dereference(idp->top); 505 p = rcu_dereference(idp->top);
506 if (!p)
507 return NULL;
508 n = (p->layer+1) * IDR_BITS;
498 509
499 /* Mask off upper bits we don't use for the search. */ 510 /* Mask off upper bits we don't use for the search. */
500 id &= MAX_ID_MASK; 511 id &= MAX_ID_MASK;
501 512
502 if (id >= (1 << n)) 513 if (id >= (1 << n))
503 return NULL; 514 return NULL;
515 BUG_ON(n == 0);
504 516
505 while (n > 0 && p) { 517 while (n > 0 && p) {
506 n -= IDR_BITS; 518 n -= IDR_BITS;
519 BUG_ON(n != p->layer*IDR_BITS);
507 p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); 520 p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]);
508 } 521 }
509 return((void *)p); 522 return((void *)p);
@@ -582,8 +595,11 @@ void *idr_replace(struct idr *idp, void *ptr, int id)
582 int n; 595 int n;
583 struct idr_layer *p, *old_p; 596 struct idr_layer *p, *old_p;
584 597
585 n = idp->layers * IDR_BITS;
586 p = idp->top; 598 p = idp->top;
599 if (!p)
600 return ERR_PTR(-EINVAL);
601
602 n = (p->layer+1) * IDR_BITS;
587 603
588 id &= MAX_ID_MASK; 604 id &= MAX_ID_MASK;
589 605
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index a8663890a88c..b255b939bc1b 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -62,10 +62,7 @@ s64 __percpu_counter_sum(struct percpu_counter *fbc)
62 for_each_online_cpu(cpu) { 62 for_each_online_cpu(cpu) {
63 s32 *pcount = per_cpu_ptr(fbc->counters, cpu); 63 s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
64 ret += *pcount; 64 ret += *pcount;
65 *pcount = 0;
66 } 65 }
67 fbc->count = ret;
68
69 spin_unlock(&fbc->lock); 66 spin_unlock(&fbc->lock);
70 return ret; 67 return ret;
71} 68}
@@ -104,13 +101,13 @@ void percpu_counter_destroy(struct percpu_counter *fbc)
104 if (!fbc->counters) 101 if (!fbc->counters)
105 return; 102 return;
106 103
107 free_percpu(fbc->counters);
108 fbc->counters = NULL;
109#ifdef CONFIG_HOTPLUG_CPU 104#ifdef CONFIG_HOTPLUG_CPU
110 mutex_lock(&percpu_counters_lock); 105 mutex_lock(&percpu_counters_lock);
111 list_del(&fbc->list); 106 list_del(&fbc->list);
112 mutex_unlock(&percpu_counters_lock); 107 mutex_unlock(&percpu_counters_lock);
113#endif 108#endif
109 free_percpu(fbc->counters);
110 fbc->counters = NULL;
114} 111}
115EXPORT_SYMBOL(percpu_counter_destroy); 112EXPORT_SYMBOL(percpu_counter_destroy);
116 113
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index 8d2688ff1352..b7b449dafbe5 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -395,7 +395,7 @@ void sg_miter_stop(struct sg_mapping_iter *miter)
395 WARN_ON(!irqs_disabled()); 395 WARN_ON(!irqs_disabled());
396 kunmap_atomic(miter->addr, KM_BIO_SRC_IRQ); 396 kunmap_atomic(miter->addr, KM_BIO_SRC_IRQ);
397 } else 397 } else
398 kunmap(miter->addr); 398 kunmap(miter->page);
399 399
400 miter->page = NULL; 400 miter->page = NULL;
401 miter->addr = NULL; 401 miter->addr = NULL;
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 78330c37a61b..5f6c629a924d 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -467,9 +467,13 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
467 dma_addr_t dev_addr; 467 dma_addr_t dev_addr;
468 void *ret; 468 void *ret;
469 int order = get_order(size); 469 int order = get_order(size);
470 u64 dma_mask = DMA_32BIT_MASK;
471
472 if (hwdev && hwdev->coherent_dma_mask)
473 dma_mask = hwdev->coherent_dma_mask;
470 474
471 ret = (void *)__get_free_pages(flags, order); 475 ret = (void *)__get_free_pages(flags, order);
472 if (ret && address_needs_mapping(hwdev, virt_to_bus(ret), size)) { 476 if (ret && !is_buffer_dma_capable(dma_mask, virt_to_bus(ret), size)) {
473 /* 477 /*
474 * The allocated memory isn't reachable by the device. 478 * The allocated memory isn't reachable by the device.
475 * Fall back on swiotlb_map_single(). 479 * Fall back on swiotlb_map_single().
@@ -493,9 +497,9 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
493 dev_addr = virt_to_bus(ret); 497 dev_addr = virt_to_bus(ret);
494 498
495 /* Confirm address can be DMA'd by device */ 499 /* Confirm address can be DMA'd by device */
496 if (address_needs_mapping(hwdev, dev_addr, size)) { 500 if (!is_buffer_dma_capable(dma_mask, dev_addr, size)) {
497 printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n", 501 printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
498 (unsigned long long)*hwdev->dma_mask, 502 (unsigned long long)dma_mask,
499 (unsigned long long)dev_addr); 503 (unsigned long long)dev_addr);
500 504
501 /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ 505 /* DMA_TO_DEVICE to avoid memcpy in unmap_single */