aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-11-13 17:55:21 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-11-13 17:55:21 -0500
commit8ceafbfa91ffbdbb2afaea5c24ccb519ffb8b587 (patch)
tree98c9ea93362536f1ddd73175b13b7847583350df /arch/arm/mm
parent42a2d923cc349583ebf6fdd52a7d35e1c2f7e6bd (diff)
parent26ba47b18318abe7dadbe9294a611c0e932651d8 (diff)
Merge branch 'for-linus-dma-masks' of git://git.linaro.org/people/rmk/linux-arm
Pull DMA mask updates from Russell King: "This series cleans up the handling of DMA masks in a lot of drivers, fixing some bugs as we go. Some of the more serious errors include: - drivers which only set their coherent DMA mask if the attempt to set the streaming mask fails. - drivers which test for a NULL dma mask pointer, and then set the dma mask pointer to a location in their module .data section - which will cause problems if the module is reloaded. To counter these, I have introduced two helper functions: - dma_set_mask_and_coherent() takes care of setting both the streaming and coherent masks at the same time, with the correct error handling as specified by the API. - dma_coerce_mask_and_coherent() which resolves the problem of drivers forcefully setting DMA masks. This is more a marker for future work to further clean these locations up - the code which creates the devices really should be initialising these, but to fix that in one go along with this change could potentially be very disruptive. The last thing this series does is prise away some of Linux's addition to "DMA addresses are physical addresses and RAM always starts at zero". We have ARM LPAE systems where all system memory is above 4GB physical, hence having DMA masks interpreted by (eg) the block layers as describing physical addresses in the range 0..DMAMASK fails on these platforms. Santosh Shilimkar addresses this in this series; the patches were copied to the appropriate people multiple times but were ignored. Fixing this also gets rid of some ARM weirdness in the setup of the max*pfn variables, and brings ARM into line with every other Linux architecture as far as those go" * 'for-linus-dma-masks' of git://git.linaro.org/people/rmk/linux-arm: (52 commits) ARM: 7805/1: mm: change max*pfn to include the physical offset of memory ARM: 7797/1: mmc: Use dma_max_pfn(dev) helper for bounce_limit calculations ARM: 7796/1: scsi: Use dma_max_pfn(dev) helper for bounce_limit calculations ARM: 7795/1: mm: dma-mapping: Add dma_max_pfn(dev) helper function ARM: 7794/1: block: Rename parameter dma_mask to max_addr for blk_queue_bounce_limit() ARM: DMA-API: better handing of DMA masks for coherent allocations ARM: 7857/1: dma: imx-sdma: setup dma mask DMA-API: firmware/google/gsmi.c: avoid direct access to DMA masks DMA-API: dcdbas: update DMA mask handing DMA-API: dma: edma.c: no need to explicitly initialize DMA masks DMA-API: usb: musb: use platform_device_register_full() to avoid directly messing with dma masks DMA-API: crypto: remove last references to 'static struct device *dev' DMA-API: crypto: fix ixp4xx crypto platform device support DMA-API: others: use dma_set_coherent_mask() DMA-API: staging: use dma_set_coherent_mask() DMA-API: usb: use new dma_coerce_mask_and_coherent() DMA-API: usb: use dma_set_coherent_mask() DMA-API: parport: parport_pc.c: use dma_coerce_mask_and_coherent() DMA-API: net: octeon: use dma_coerce_mask_and_coherent() DMA-API: net: nxp/lpc_eth: use dma_coerce_mask_and_coherent() ...
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/dma-mapping.c51
-rw-r--r--arch/arm/mm/init.c12
-rw-r--r--arch/arm/mm/mm.h2
3 files changed, 53 insertions, 12 deletions
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 1272ed202dde..644d91f73b00 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -159,7 +159,7 @@ EXPORT_SYMBOL(arm_coherent_dma_ops);
159 159
160static u64 get_coherent_dma_mask(struct device *dev) 160static u64 get_coherent_dma_mask(struct device *dev)
161{ 161{
162 u64 mask = (u64)arm_dma_limit; 162 u64 mask = (u64)DMA_BIT_MASK(32);
163 163
164 if (dev) { 164 if (dev) {
165 mask = dev->coherent_dma_mask; 165 mask = dev->coherent_dma_mask;
@@ -173,10 +173,30 @@ static u64 get_coherent_dma_mask(struct device *dev)
173 return 0; 173 return 0;
174 } 174 }
175 175
176 if ((~mask) & (u64)arm_dma_limit) { 176 /*
177 dev_warn(dev, "coherent DMA mask %#llx is smaller " 177 * If the mask allows for more memory than we can address,
178 "than system GFP_DMA mask %#llx\n", 178 * and we actually have that much memory, then fail the
179 mask, (u64)arm_dma_limit); 179 * allocation.
180 */
181 if (sizeof(mask) != sizeof(dma_addr_t) &&
182 mask > (dma_addr_t)~0 &&
183 dma_to_pfn(dev, ~0) > arm_dma_pfn_limit) {
184 dev_warn(dev, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n",
185 mask);
186 dev_warn(dev, "Driver did not use or check the return value from dma_set_coherent_mask()?\n");
187 return 0;
188 }
189
190 /*
191 * Now check that the mask, when translated to a PFN,
192 * fits within the allowable addresses which we can
193 * allocate.
194 */
195 if (dma_to_pfn(dev, mask) < arm_dma_pfn_limit) {
196 dev_warn(dev, "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory than the DMA zone pfn 0x0-%#lx\n",
197 mask,
198 dma_to_pfn(dev, 0), dma_to_pfn(dev, mask) + 1,
199 arm_dma_pfn_limit + 1);
180 return 0; 200 return 0;
181 } 201 }
182 } 202 }
@@ -1007,8 +1027,27 @@ void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
1007 */ 1027 */
1008int dma_supported(struct device *dev, u64 mask) 1028int dma_supported(struct device *dev, u64 mask)
1009{ 1029{
1010 if (mask < (u64)arm_dma_limit) 1030 unsigned long limit;
1031
1032 /*
1033 * If the mask allows for more memory than we can address,
1034 * and we actually have that much memory, then we must
1035 * indicate that DMA to this device is not supported.
1036 */
1037 if (sizeof(mask) != sizeof(dma_addr_t) &&
1038 mask > (dma_addr_t)~0 &&
1039 dma_to_pfn(dev, ~0) > arm_dma_pfn_limit)
1040 return 0;
1041
1042 /*
1043 * Translate the device's DMA mask to a PFN limit. This
1044 * PFN number includes the page which we can DMA to.
1045 */
1046 limit = dma_to_pfn(dev, mask);
1047
1048 if (limit < arm_dma_pfn_limit)
1011 return 0; 1049 return 0;
1050
1012 return 1; 1051 return 1;
1013} 1052}
1014EXPORT_SYMBOL(dma_supported); 1053EXPORT_SYMBOL(dma_supported);
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index ca907f805c57..3e8f106ee5fe 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -209,6 +209,7 @@ EXPORT_SYMBOL(arm_dma_zone_size);
209 * so a successful GFP_DMA allocation will always satisfy this. 209 * so a successful GFP_DMA allocation will always satisfy this.
210 */ 210 */
211phys_addr_t arm_dma_limit; 211phys_addr_t arm_dma_limit;
212unsigned long arm_dma_pfn_limit;
212 213
213static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole, 214static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
214 unsigned long dma_size) 215 unsigned long dma_size)
@@ -231,6 +232,7 @@ void __init setup_dma_zone(const struct machine_desc *mdesc)
231 arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1; 232 arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
232 } else 233 } else
233 arm_dma_limit = 0xffffffff; 234 arm_dma_limit = 0xffffffff;
235 arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT;
234#endif 236#endif
235} 237}
236 238
@@ -418,12 +420,10 @@ void __init bootmem_init(void)
418 * This doesn't seem to be used by the Linux memory manager any 420 * This doesn't seem to be used by the Linux memory manager any
419 * more, but is used by ll_rw_block. If we can get rid of it, we 421 * more, but is used by ll_rw_block. If we can get rid of it, we
420 * also get rid of some of the stuff above as well. 422 * also get rid of some of the stuff above as well.
421 *
422 * Note: max_low_pfn and max_pfn reflect the number of _pages_ in
423 * the system, not the maximum PFN.
424 */ 423 */
425 max_low_pfn = max_low - PHYS_PFN_OFFSET; 424 min_low_pfn = min;
426 max_pfn = max_high - PHYS_PFN_OFFSET; 425 max_low_pfn = max_low;
426 max_pfn = max_high;
427} 427}
428 428
429/* 429/*
@@ -529,7 +529,7 @@ static inline void free_area_high(unsigned long pfn, unsigned long end)
529static void __init free_highpages(void) 529static void __init free_highpages(void)
530{ 530{
531#ifdef CONFIG_HIGHMEM 531#ifdef CONFIG_HIGHMEM
532 unsigned long max_low = max_low_pfn + PHYS_PFN_OFFSET; 532 unsigned long max_low = max_low_pfn;
533 struct memblock_region *mem, *res; 533 struct memblock_region *mem, *res;
534 534
535 /* set highmem page free */ 535 /* set highmem page free */
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index d5a4e9ad8f0f..d5a982d15a88 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -81,8 +81,10 @@ extern __init void add_static_vm_early(struct static_vm *svm);
81 81
82#ifdef CONFIG_ZONE_DMA 82#ifdef CONFIG_ZONE_DMA
83extern phys_addr_t arm_dma_limit; 83extern phys_addr_t arm_dma_limit;
84extern unsigned long arm_dma_pfn_limit;
84#else 85#else
85#define arm_dma_limit ((phys_addr_t)~0) 86#define arm_dma_limit ((phys_addr_t)~0)
87#define arm_dma_pfn_limit (~0ul >> PAGE_SHIFT)
86#endif 88#endif
87 89
88extern phys_addr_t arm_lowmem_limit; 90extern phys_addr_t arm_lowmem_limit;