diff options
| author | FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> | 2008-11-17 02:24:34 -0500 |
|---|---|---|
| committer | Ingo Molnar <mingo@elte.hu> | 2008-11-17 03:12:02 -0500 |
| commit | 1e74f3000b86969de421ca0da08f42e7d21cbd99 (patch) | |
| tree | 45dd090997526dbe31f0a2ac781195580772067a /lib | |
| parent | e47411b1f4456480d6c60ebdc7a733e81ccd5d66 (diff) | |
swiotlb: use coherent_dma_mask in alloc_coherent
Impact: fix DMA buffer allocation coherency bug in certain configs
This patch fixes swiotlb to use dev->coherent_dma_mask in
swiotlb_alloc_coherent().
coherent_dma_mask is a subset of dma_mask (equal to it most of
the time), enumerating the address range that a given device
is able to DMA to/from in a cache-coherent way.
But currently, swiotlb uses dev->dma_mask in alloc_coherent()
implicitly via address_needs_mapping(), but alloc_coherent is really
supposed to use coherent_dma_mask.
This bug could break drivers that uses smaller coherent_dma_mask than
dma_mask (though the current code works for the majority that use the
same mask for coherent_dma_mask and dma_mask).
Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Cc: tony.luck@intel.com
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'lib')
| -rw-r--r-- | lib/swiotlb.c | 10 |
1 files changed, 7 insertions, 3 deletions
diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 78330c37a61b..5f6c629a924d 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c | |||
| @@ -467,9 +467,13 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, | |||
| 467 | dma_addr_t dev_addr; | 467 | dma_addr_t dev_addr; |
| 468 | void *ret; | 468 | void *ret; |
| 469 | int order = get_order(size); | 469 | int order = get_order(size); |
| 470 | u64 dma_mask = DMA_32BIT_MASK; | ||
| 471 | |||
| 472 | if (hwdev && hwdev->coherent_dma_mask) | ||
| 473 | dma_mask = hwdev->coherent_dma_mask; | ||
| 470 | 474 | ||
| 471 | ret = (void *)__get_free_pages(flags, order); | 475 | ret = (void *)__get_free_pages(flags, order); |
| 472 | if (ret && address_needs_mapping(hwdev, virt_to_bus(ret), size)) { | 476 | if (ret && !is_buffer_dma_capable(dma_mask, virt_to_bus(ret), size)) { |
| 473 | /* | 477 | /* |
| 474 | * The allocated memory isn't reachable by the device. | 478 | * The allocated memory isn't reachable by the device. |
| 475 | * Fall back on swiotlb_map_single(). | 479 | * Fall back on swiotlb_map_single(). |
| @@ -493,9 +497,9 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, | |||
| 493 | dev_addr = virt_to_bus(ret); | 497 | dev_addr = virt_to_bus(ret); |
| 494 | 498 | ||
| 495 | /* Confirm address can be DMA'd by device */ | 499 | /* Confirm address can be DMA'd by device */ |
| 496 | if (address_needs_mapping(hwdev, dev_addr, size)) { | 500 | if (!is_buffer_dma_capable(dma_mask, dev_addr, size)) { |
| 497 | printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n", | 501 | printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n", |
| 498 | (unsigned long long)*hwdev->dma_mask, | 502 | (unsigned long long)dma_mask, |
| 499 | (unsigned long long)dev_addr); | 503 | (unsigned long long)dev_addr); |
| 500 | 504 | ||
| 501 | /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ | 505 | /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ |
