aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/dma-mapping.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm/dma-mapping.c')
-rw-r--r--arch/arm/mm/dma-mapping.c51
1 files changed, 45 insertions, 6 deletions
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index f5e1a8471714..7b3f2426a1fb 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -159,7 +159,7 @@ EXPORT_SYMBOL(arm_coherent_dma_ops);
159 159
160static u64 get_coherent_dma_mask(struct device *dev) 160static u64 get_coherent_dma_mask(struct device *dev)
161{ 161{
162 u64 mask = (u64)arm_dma_limit; 162 u64 mask = (u64)DMA_BIT_MASK(32);
163 163
164 if (dev) { 164 if (dev) {
165 mask = dev->coherent_dma_mask; 165 mask = dev->coherent_dma_mask;
@@ -173,10 +173,30 @@ static u64 get_coherent_dma_mask(struct device *dev)
173 return 0; 173 return 0;
174 } 174 }
175 175
176 if ((~mask) & (u64)arm_dma_limit) { 176 /*
177 dev_warn(dev, "coherent DMA mask %#llx is smaller " 177 * If the mask allows for more memory than we can address,
178 "than system GFP_DMA mask %#llx\n", 178 * and we actually have that much memory, then fail the
179 mask, (u64)arm_dma_limit); 179 * allocation.
180 */
181 if (sizeof(mask) != sizeof(dma_addr_t) &&
182 mask > (dma_addr_t)~0 &&
183 dma_to_pfn(dev, ~0) > arm_dma_pfn_limit) {
184 dev_warn(dev, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n",
185 mask);
186 dev_warn(dev, "Driver did not use or check the return value from dma_set_coherent_mask()?\n");
187 return 0;
188 }
189
190 /*
191 * Now check that the mask, when translated to a PFN,
192 * fits within the allowable addresses which we can
193 * allocate.
194 */
195 if (dma_to_pfn(dev, mask) < arm_dma_pfn_limit) {
196 dev_warn(dev, "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory than the DMA zone pfn 0x0-%#lx\n",
197 mask,
198 dma_to_pfn(dev, 0), dma_to_pfn(dev, mask) + 1,
199 arm_dma_pfn_limit + 1);
180 return 0; 200 return 0;
181 } 201 }
182 } 202 }
@@ -1007,8 +1027,27 @@ void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
1007 */ 1027 */
1008int dma_supported(struct device *dev, u64 mask) 1028int dma_supported(struct device *dev, u64 mask)
1009{ 1029{
1010 if (mask < (u64)arm_dma_limit) 1030 unsigned long limit;
1031
1032 /*
1033 * If the mask allows for more memory than we can address,
1034 * and we actually have that much memory, then we must
1035 * indicate that DMA to this device is not supported.
1036 */
1037 if (sizeof(mask) != sizeof(dma_addr_t) &&
1038 mask > (dma_addr_t)~0 &&
1039 dma_to_pfn(dev, ~0) > arm_dma_pfn_limit)
1040 return 0;
1041
1042 /*
1043 * Translate the device's DMA mask to a PFN limit. This
1044 * PFN number includes the page which we can DMA to.
1045 */
1046 limit = dma_to_pfn(dev, mask);
1047
1048 if (limit < arm_dma_pfn_limit)
1011 return 0; 1049 return 0;
1050
1012 return 1; 1051 return 1;
1013} 1052}
1014EXPORT_SYMBOL(dma_supported); 1053EXPORT_SYMBOL(dma_supported);