aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2013-07-09 07:14:49 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2013-10-31 10:49:21 -0400
commit4dcfa60071b3d23f0181f27d8519f12e37cefbb9 (patch)
treea3c568b34226152a8f825c7ce1cff8a8bf1a20c2
parent42536b9f9ce38b07ac213c5f4550301f9a0d251d (diff)
ARM: DMA-API: better handing of DMA masks for coherent allocations
We need to start treating DMA masks as something which is specific to the bus that the device resides on, otherwise we're going to hit all sorts of nasty issues with LPAE and 32-bit DMA controllers in >32-bit systems, where memory is offset from PFN 0. In order to start doing this, we convert the DMA mask to a PFN using the device specific dma_to_pfn() macro. This is the reverse of the pfn_to_dma() macro which is used to get the DMA address for the device. This gives us a PFN mask, which we can then check against the PFN limit of the DMA zone. Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
-rw-r--r--arch/arm/mm/dma-mapping.c51
-rw-r--r--arch/arm/mm/init.c2
-rw-r--r--arch/arm/mm/mm.h2
3 files changed, 49 insertions, 6 deletions
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index f5e1a8471714..7b3f2426a1fb 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -159,7 +159,7 @@ EXPORT_SYMBOL(arm_coherent_dma_ops);
159 159
160static u64 get_coherent_dma_mask(struct device *dev) 160static u64 get_coherent_dma_mask(struct device *dev)
161{ 161{
162 u64 mask = (u64)arm_dma_limit; 162 u64 mask = (u64)DMA_BIT_MASK(32);
163 163
164 if (dev) { 164 if (dev) {
165 mask = dev->coherent_dma_mask; 165 mask = dev->coherent_dma_mask;
@@ -173,10 +173,30 @@ static u64 get_coherent_dma_mask(struct device *dev)
173 return 0; 173 return 0;
174 } 174 }
175 175
176 if ((~mask) & (u64)arm_dma_limit) { 176 /*
177 dev_warn(dev, "coherent DMA mask %#llx is smaller " 177 * If the mask allows for more memory than we can address,
178 "than system GFP_DMA mask %#llx\n", 178 * and we actually have that much memory, then fail the
179 mask, (u64)arm_dma_limit); 179 * allocation.
180 */
181 if (sizeof(mask) != sizeof(dma_addr_t) &&
182 mask > (dma_addr_t)~0 &&
183 dma_to_pfn(dev, ~0) > arm_dma_pfn_limit) {
184 dev_warn(dev, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n",
185 mask);
186 dev_warn(dev, "Driver did not use or check the return value from dma_set_coherent_mask()?\n");
187 return 0;
188 }
189
190 /*
191 * Now check that the mask, when translated to a PFN,
192 * fits within the allowable addresses which we can
193 * allocate.
194 */
195 if (dma_to_pfn(dev, mask) < arm_dma_pfn_limit) {
196 dev_warn(dev, "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory than the DMA zone pfn 0x0-%#lx\n",
197 mask,
198 dma_to_pfn(dev, 0), dma_to_pfn(dev, mask) + 1,
199 arm_dma_pfn_limit + 1);
180 return 0; 200 return 0;
181 } 201 }
182 } 202 }
@@ -1007,8 +1027,27 @@ void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
1007 */ 1027 */
1008int dma_supported(struct device *dev, u64 mask) 1028int dma_supported(struct device *dev, u64 mask)
1009{ 1029{
1010 if (mask < (u64)arm_dma_limit) 1030 unsigned long limit;
1031
1032 /*
1033 * If the mask allows for more memory than we can address,
1034 * and we actually have that much memory, then we must
1035 * indicate that DMA to this device is not supported.
1036 */
1037 if (sizeof(mask) != sizeof(dma_addr_t) &&
1038 mask > (dma_addr_t)~0 &&
1039 dma_to_pfn(dev, ~0) > arm_dma_pfn_limit)
1040 return 0;
1041
1042 /*
1043 * Translate the device's DMA mask to a PFN limit. This
1044 * PFN number includes the page which we can DMA to.
1045 */
1046 limit = dma_to_pfn(dev, mask);
1047
1048 if (limit < arm_dma_pfn_limit)
1011 return 0; 1049 return 0;
1050
1012 return 1; 1051 return 1;
1013} 1052}
1014EXPORT_SYMBOL(dma_supported); 1053EXPORT_SYMBOL(dma_supported);
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index febaee7ca57b..8aab24f35a5e 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -218,6 +218,7 @@ EXPORT_SYMBOL(arm_dma_zone_size);
218 * so a successful GFP_DMA allocation will always satisfy this. 218 * so a successful GFP_DMA allocation will always satisfy this.
219 */ 219 */
220phys_addr_t arm_dma_limit; 220phys_addr_t arm_dma_limit;
221unsigned long arm_dma_pfn_limit;
221 222
222static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole, 223static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
223 unsigned long dma_size) 224 unsigned long dma_size)
@@ -240,6 +241,7 @@ void __init setup_dma_zone(const struct machine_desc *mdesc)
240 arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1; 241 arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
241 } else 242 } else
242 arm_dma_limit = 0xffffffff; 243 arm_dma_limit = 0xffffffff;
244 arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT;
243#endif 245#endif
244} 246}
245 247
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index d5a4e9ad8f0f..d5a982d15a88 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -81,8 +81,10 @@ extern __init void add_static_vm_early(struct static_vm *svm);
81 81
82#ifdef CONFIG_ZONE_DMA 82#ifdef CONFIG_ZONE_DMA
83extern phys_addr_t arm_dma_limit; 83extern phys_addr_t arm_dma_limit;
84extern unsigned long arm_dma_pfn_limit;
84#else 85#else
85#define arm_dma_limit ((phys_addr_t)~0) 86#define arm_dma_limit ((phys_addr_t)~0)
87#define arm_dma_pfn_limit (~0ul >> PAGE_SHIFT)
86#endif 88#endif
87 89
88extern phys_addr_t arm_lowmem_limit; 90extern phys_addr_t arm_lowmem_limit;