aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/dma-mapping.c
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2013-07-09 07:14:49 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2013-10-31 10:49:21 -0400
commit4dcfa60071b3d23f0181f27d8519f12e37cefbb9 (patch)
treea3c568b34226152a8f825c7ce1cff8a8bf1a20c2 /arch/arm/mm/dma-mapping.c
parent42536b9f9ce38b07ac213c5f4550301f9a0d251d (diff)
ARM: DMA-API: better handing of DMA masks for coherent allocations
We need to start treating DMA masks as something which is specific to the bus that the device resides on, otherwise we're going to hit all sorts of nasty issues with LPAE and 32-bit DMA controllers in >32-bit systems, where memory is offset from PFN 0. In order to start doing this, we convert the DMA mask to a PFN using the device specific dma_to_pfn() macro. This is the reverse of the pfn_to_dma() macro which is used to get the DMA address for the device. This gives us a PFN mask, which we can then check against the PFN limit of the DMA zone. Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/mm/dma-mapping.c')
-rw-r--r--arch/arm/mm/dma-mapping.c51
1 files changed, 45 insertions, 6 deletions
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index f5e1a8471714..7b3f2426a1fb 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -159,7 +159,7 @@ EXPORT_SYMBOL(arm_coherent_dma_ops);
159 159
160static u64 get_coherent_dma_mask(struct device *dev) 160static u64 get_coherent_dma_mask(struct device *dev)
161{ 161{
162 u64 mask = (u64)arm_dma_limit; 162 u64 mask = (u64)DMA_BIT_MASK(32);
163 163
164 if (dev) { 164 if (dev) {
165 mask = dev->coherent_dma_mask; 165 mask = dev->coherent_dma_mask;
@@ -173,10 +173,30 @@ static u64 get_coherent_dma_mask(struct device *dev)
173 return 0; 173 return 0;
174 } 174 }
175 175
176 if ((~mask) & (u64)arm_dma_limit) { 176 /*
177 dev_warn(dev, "coherent DMA mask %#llx is smaller " 177 * If the mask allows for more memory than we can address,
178 "than system GFP_DMA mask %#llx\n", 178 * and we actually have that much memory, then fail the
179 mask, (u64)arm_dma_limit); 179 * allocation.
180 */
181 if (sizeof(mask) != sizeof(dma_addr_t) &&
182 mask > (dma_addr_t)~0 &&
183 dma_to_pfn(dev, ~0) > arm_dma_pfn_limit) {
184 dev_warn(dev, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n",
185 mask);
186 dev_warn(dev, "Driver did not use or check the return value from dma_set_coherent_mask()?\n");
187 return 0;
188 }
189
190 /*
191 * Now check that the mask, when translated to a PFN,
192 * fits within the allowable addresses which we can
193 * allocate.
194 */
195 if (dma_to_pfn(dev, mask) < arm_dma_pfn_limit) {
196 dev_warn(dev, "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory than the DMA zone pfn 0x0-%#lx\n",
197 mask,
198 dma_to_pfn(dev, 0), dma_to_pfn(dev, mask) + 1,
199 arm_dma_pfn_limit + 1);
180 return 0; 200 return 0;
181 } 201 }
182 } 202 }
@@ -1007,8 +1027,27 @@ void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
1007 */ 1027 */
1008int dma_supported(struct device *dev, u64 mask) 1028int dma_supported(struct device *dev, u64 mask)
1009{ 1029{
1010 if (mask < (u64)arm_dma_limit) 1030 unsigned long limit;
1031
1032 /*
1033 * If the mask allows for more memory than we can address,
1034 * and we actually have that much memory, then we must
1035 * indicate that DMA to this device is not supported.
1036 */
1037 if (sizeof(mask) != sizeof(dma_addr_t) &&
1038 mask > (dma_addr_t)~0 &&
1039 dma_to_pfn(dev, ~0) > arm_dma_pfn_limit)
1040 return 0;
1041
1042 /*
1043 * Translate the device's DMA mask to a PFN limit. This
1044 * PFN number includes the page which we can DMA to.
1045 */
1046 limit = dma_to_pfn(dev, mask);
1047
1048 if (limit < arm_dma_pfn_limit)
1011 return 0; 1049 return 0;
1050
1012 return 1; 1051 return 1;
1013} 1052}
1014EXPORT_SYMBOL(dma_supported); 1053EXPORT_SYMBOL(dma_supported);