diff options
author | Christoph Hellwig <hch@lst.de> | 2018-09-20 08:04:08 -0400 |
---|---|---|
committer | Christoph Hellwig <hch@lst.de> | 2018-10-01 10:28:03 -0400 |
commit | b4ebe6063204da58e48600b810a97c29ae9e5d12 (patch) | |
tree | 5bf1cf53eeaa87f1bf0d89e8f55cf27d720599b9 /kernel | |
parent | 7d21ee4c719f00896767ce19c4c01a56374c2ced (diff) |
dma-direct: implement complete bus_dma_mask handling
Instead of rejecting devices with a too small bus_dma_mask we can handle
by taking the bus dma_mask into account for allocations and bounce
buffering decisions.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/dma/direct.c | 21 |
1 files changed, 11 insertions, 10 deletions
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index e78548397a92..60c433b880e0 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c | |||
@@ -44,10 +44,11 @@ check_addr(struct device *dev, dma_addr_t dma_addr, size_t size, | |||
44 | return false; | 44 | return false; |
45 | } | 45 | } |
46 | 46 | ||
47 | if (*dev->dma_mask >= DMA_BIT_MASK(32)) { | 47 | if (*dev->dma_mask >= DMA_BIT_MASK(32) || dev->bus_dma_mask) { |
48 | dev_err(dev, | 48 | dev_err(dev, |
49 | "%s: overflow %pad+%zu of device mask %llx\n", | 49 | "%s: overflow %pad+%zu of device mask %llx bus mask %llx\n", |
50 | caller, &dma_addr, size, *dev->dma_mask); | 50 | caller, &dma_addr, size, |
51 | *dev->dma_mask, dev->bus_dma_mask); | ||
51 | } | 52 | } |
52 | return false; | 53 | return false; |
53 | } | 54 | } |
@@ -66,12 +67,18 @@ u64 dma_direct_get_required_mask(struct device *dev) | |||
66 | { | 67 | { |
67 | u64 max_dma = phys_to_dma_direct(dev, (max_pfn - 1) << PAGE_SHIFT); | 68 | u64 max_dma = phys_to_dma_direct(dev, (max_pfn - 1) << PAGE_SHIFT); |
68 | 69 | ||
70 | if (dev->bus_dma_mask && dev->bus_dma_mask < max_dma) | ||
71 | max_dma = dev->bus_dma_mask; | ||
72 | |||
69 | return (1ULL << (fls64(max_dma) - 1)) * 2 - 1; | 73 | return (1ULL << (fls64(max_dma) - 1)) * 2 - 1; |
70 | } | 74 | } |
71 | 75 | ||
72 | static gfp_t __dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask, | 76 | static gfp_t __dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask, |
73 | u64 *phys_mask) | 77 | u64 *phys_mask) |
74 | { | 78 | { |
79 | if (dev->bus_dma_mask && dev->bus_dma_mask < dma_mask) | ||
80 | dma_mask = dev->bus_dma_mask; | ||
81 | |||
75 | if (force_dma_unencrypted()) | 82 | if (force_dma_unencrypted()) |
76 | *phys_mask = __dma_to_phys(dev, dma_mask); | 83 | *phys_mask = __dma_to_phys(dev, dma_mask); |
77 | else | 84 | else |
@@ -88,7 +95,7 @@ static gfp_t __dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask, | |||
88 | static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size) | 95 | static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size) |
89 | { | 96 | { |
90 | return phys_to_dma_direct(dev, phys) + size - 1 <= | 97 | return phys_to_dma_direct(dev, phys) + size - 1 <= |
91 | dev->coherent_dma_mask; | 98 | min_not_zero(dev->coherent_dma_mask, dev->bus_dma_mask); |
92 | } | 99 | } |
93 | 100 | ||
94 | void *dma_direct_alloc_pages(struct device *dev, size_t size, | 101 | void *dma_direct_alloc_pages(struct device *dev, size_t size, |
@@ -292,12 +299,6 @@ int dma_direct_supported(struct device *dev, u64 mask) | |||
292 | if (mask < phys_to_dma(dev, DMA_BIT_MASK(32))) | 299 | if (mask < phys_to_dma(dev, DMA_BIT_MASK(32))) |
293 | return 0; | 300 | return 0; |
294 | #endif | 301 | #endif |
295 | /* | ||
296 | * Upstream PCI/PCIe bridges or SoC interconnects may not carry | ||
297 | * as many DMA address bits as the device itself supports. | ||
298 | */ | ||
299 | if (dev->bus_dma_mask && mask > dev->bus_dma_mask) | ||
300 | return 0; | ||
301 | return 1; | 302 | return 1; |
302 | } | 303 | } |
303 | 304 | ||