aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug10
-rw-r--r--lib/scatterlist.c2
-rw-r--r--lib/swiotlb.c10
3 files changed, 18 insertions, 4 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index b0f239e443bc..1e3fd3e3436a 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -545,6 +545,16 @@ config DEBUG_SG
545 545
546 If unsure, say N. 546 If unsure, say N.
547 547
548config DEBUG_NOTIFIERS
549 bool "Debug notifier call chains"
550 depends on DEBUG_KERNEL
551 help
552 Enable this to turn on sanity checking for notifier call chains.
553 This is most useful for kernel developers to make sure that
554 modules properly unregister themselves from notifier chains.
555 This is a relatively cheap check but if you care about maximum
556 performance, say N.
557
548config FRAME_POINTER 558config FRAME_POINTER
549 bool "Compile the kernel with frame pointers" 559 bool "Compile the kernel with frame pointers"
550 depends on DEBUG_KERNEL && \ 560 depends on DEBUG_KERNEL && \
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index 8d2688ff1352..b7b449dafbe5 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -395,7 +395,7 @@ void sg_miter_stop(struct sg_mapping_iter *miter)
395 WARN_ON(!irqs_disabled()); 395 WARN_ON(!irqs_disabled());
396 kunmap_atomic(miter->addr, KM_BIO_SRC_IRQ); 396 kunmap_atomic(miter->addr, KM_BIO_SRC_IRQ);
397 } else 397 } else
398 kunmap(miter->addr); 398 kunmap(miter->page);
399 399
400 miter->page = NULL; 400 miter->page = NULL;
401 miter->addr = NULL; 401 miter->addr = NULL;
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 78330c37a61b..5f6c629a924d 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -467,9 +467,13 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
467 dma_addr_t dev_addr; 467 dma_addr_t dev_addr;
468 void *ret; 468 void *ret;
469 int order = get_order(size); 469 int order = get_order(size);
470 u64 dma_mask = DMA_32BIT_MASK;
471
472 if (hwdev && hwdev->coherent_dma_mask)
473 dma_mask = hwdev->coherent_dma_mask;
470 474
471 ret = (void *)__get_free_pages(flags, order); 475 ret = (void *)__get_free_pages(flags, order);
472 if (ret && address_needs_mapping(hwdev, virt_to_bus(ret), size)) { 476 if (ret && !is_buffer_dma_capable(dma_mask, virt_to_bus(ret), size)) {
473 /* 477 /*
474 * The allocated memory isn't reachable by the device. 478 * The allocated memory isn't reachable by the device.
475 * Fall back on swiotlb_map_single(). 479 * Fall back on swiotlb_map_single().
@@ -493,9 +497,9 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
493 dev_addr = virt_to_bus(ret); 497 dev_addr = virt_to_bus(ret);
494 498
495 /* Confirm address can be DMA'd by device */ 499 /* Confirm address can be DMA'd by device */
496 if (address_needs_mapping(hwdev, dev_addr, size)) { 500 if (!is_buffer_dma_capable(dma_mask, dev_addr, size)) {
497 printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n", 501 printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
498 (unsigned long long)*hwdev->dma_mask, 502 (unsigned long long)dma_mask,
499 (unsigned long long)dev_addr); 503 (unsigned long long)dev_addr);
500 504
501 /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ 505 /* DMA_TO_DEVICE to avoid memcpy in unmap_single */