aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/DMA-API-HOWTO.txt37
-rw-r--r--Documentation/DMA-API.txt8
-rw-r--r--arch/arm/include/asm/dma-mapping.h8
-rw-r--r--arch/arm/mm/dma-mapping.c51
-rw-r--r--arch/arm/mm/init.c12
-rw-r--r--arch/arm/mm/mm.h2
-rw-r--r--arch/powerpc/kernel/vio.c3
-rw-r--r--block/blk-settings.c8
-rw-r--r--drivers/amba/bus.c6
-rw-r--r--drivers/ata/pata_ixp4xx_cf.c5
-rw-r--r--drivers/ata/pata_octeon_cf.c5
-rw-r--r--drivers/block/nvme-core.c10
-rw-r--r--drivers/crypto/ixp4xx_crypto.c48
-rw-r--r--drivers/dma/amba-pl08x.c5
-rw-r--r--drivers/dma/dw/platform.c8
-rw-r--r--drivers/dma/edma.c10
-rw-r--r--drivers/dma/imx-sdma.c4
-rw-r--r--drivers/dma/pl330.c4
-rw-r--r--drivers/firmware/dcdbas.c32
-rw-r--r--drivers/firmware/google/gsmi.c13
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c6
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.c5
-rw-r--r--drivers/media/platform/omap3isp/isp.c6
-rw-r--r--drivers/media/platform/omap3isp/isp.h3
-rw-r--r--drivers/mmc/card/queue.c3
-rw-r--r--drivers/mmc/host/sdhci-acpi.c5
-rw-r--r--drivers/net/ethernet/broadcom/b44.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c8
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c13
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c12
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c9
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c18
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c18
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c18
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c16
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c15
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c15
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c6
-rw-r--r--drivers/net/ethernet/octeon/octeon_mgmt.c5
-rw-r--r--drivers/net/ethernet/sfc/efx.c12
-rw-r--r--drivers/net/wireless/b43/dma.c9
-rw-r--r--drivers/net/wireless/b43legacy/dma.c9
-rw-r--r--drivers/of/platform.c3
-rw-r--r--drivers/parport/parport_pc.c8
-rw-r--r--drivers/scsi/scsi_lib.c2
-rw-r--r--drivers/staging/dwc2/platform.c5
-rw-r--r--drivers/staging/et131x/et131x.c17
-rw-r--r--drivers/staging/imx-drm/imx-drm-core.c8
-rw-r--r--drivers/staging/imx-drm/ipuv3-crtc.c4
-rw-r--r--drivers/staging/media/dt3155v4l/dt3155v4l.c5
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c7
-rw-r--r--drivers/usb/dwc3/dwc3-exynos.c7
-rw-r--r--drivers/usb/gadget/lpc32xx_udc.c4
-rw-r--r--drivers/usb/host/bcma-hcd.c3
-rw-r--r--drivers/usb/host/ehci-atmel.c7
-rw-r--r--drivers/usb/host/ehci-exynos.c7
-rw-r--r--drivers/usb/host/ehci-octeon.c4
-rw-r--r--drivers/usb/host/ehci-omap.c10
-rw-r--r--drivers/usb/host/ehci-orion.c7
-rw-r--r--drivers/usb/host/ehci-platform.c10
-rw-r--r--drivers/usb/host/ehci-spear.c7
-rw-r--r--drivers/usb/host/ehci-tegra.c7
-rw-r--r--drivers/usb/host/ohci-at91.c9
-rw-r--r--drivers/usb/host/ohci-exynos.c7
-rw-r--r--drivers/usb/host/ohci-nxp.c5
-rw-r--r--drivers/usb/host/ohci-octeon.c5
-rw-r--r--drivers/usb/host/ohci-omap3.c10
-rw-r--r--drivers/usb/host/ohci-pxa27x.c8
-rw-r--r--drivers/usb/host/ohci-sa1111.c6
-rw-r--r--drivers/usb/host/ohci-spear.c7
-rw-r--r--drivers/usb/host/ssb-hcd.c3
-rw-r--r--drivers/usb/host/uhci-platform.c7
-rw-r--r--drivers/video/amba-clcd.c5
-rw-r--r--include/linux/amba/bus.h2
-rw-r--r--include/linux/dma-mapping.h31
-rw-r--r--sound/arm/pxa2xx-pcm.c10
-rw-r--r--sound/soc/atmel/atmel-pcm.c11
-rw-r--r--sound/soc/blackfin/bf5xx-ac97-pcm.c11
-rw-r--r--sound/soc/blackfin/bf5xx-i2s-pcm.c10
-rw-r--r--sound/soc/davinci/davinci-pcm.c9
-rw-r--r--sound/soc/fsl/fsl_dma.c9
-rw-r--r--sound/soc/fsl/imx-pcm-fiq.c12
-rw-r--r--sound/soc/fsl/mpc5200_dma.c10
-rw-r--r--sound/soc/jz4740/jz4740-pcm.c12
-rw-r--r--sound/soc/kirkwood/kirkwood-dma.c9
-rw-r--r--sound/soc/nuc900/nuc900-pcm.c9
-rw-r--r--sound/soc/omap/omap-pcm.c11
-rw-r--r--sound/soc/pxa/pxa2xx-pcm.c11
-rw-r--r--sound/soc/s6000/s6000-pcm.c9
-rw-r--r--sound/soc/samsung/dma.c11
-rw-r--r--sound/soc/samsung/idma.c11
91 files changed, 447 insertions, 448 deletions
diff --git a/Documentation/DMA-API-HOWTO.txt b/Documentation/DMA-API-HOWTO.txt
index 14129f149a75..5e983031cc11 100644
--- a/Documentation/DMA-API-HOWTO.txt
+++ b/Documentation/DMA-API-HOWTO.txt
@@ -101,14 +101,23 @@ style to do this even if your device holds the default setting,
101because this shows that you did think about these issues wrt. your 101because this shows that you did think about these issues wrt. your
102device. 102device.
103 103
104The query is performed via a call to dma_set_mask(): 104The query is performed via a call to dma_set_mask_and_coherent():
105 105
106 int dma_set_mask(struct device *dev, u64 mask); 106 int dma_set_mask_and_coherent(struct device *dev, u64 mask);
107 107
108The query for consistent allocations is performed via a call to 108which will query the mask for both streaming and coherent APIs together.
109dma_set_coherent_mask(): 109If you have some special requirements, then the following two separate
110queries can be used instead:
110 111
111 int dma_set_coherent_mask(struct device *dev, u64 mask); 112 The query for streaming mappings is performed via a call to
113 dma_set_mask():
114
115 int dma_set_mask(struct device *dev, u64 mask);
116
117 The query for consistent allocations is performed via a call
118 to dma_set_coherent_mask():
119
120 int dma_set_coherent_mask(struct device *dev, u64 mask);
112 121
113Here, dev is a pointer to the device struct of your device, and mask 122Here, dev is a pointer to the device struct of your device, and mask
114is a bit mask describing which bits of an address your device 123is a bit mask describing which bits of an address your device
@@ -137,7 +146,7 @@ exactly why.
137 146
138The standard 32-bit addressing device would do something like this: 147The standard 32-bit addressing device would do something like this:
139 148
140 if (dma_set_mask(dev, DMA_BIT_MASK(32))) { 149 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) {
141 printk(KERN_WARNING 150 printk(KERN_WARNING
142 "mydev: No suitable DMA available.\n"); 151 "mydev: No suitable DMA available.\n");
143 goto ignore_this_device; 152 goto ignore_this_device;
@@ -171,22 +180,20 @@ the case would look like this:
171 180
172 int using_dac, consistent_using_dac; 181 int using_dac, consistent_using_dac;
173 182
174 if (!dma_set_mask(dev, DMA_BIT_MASK(64))) { 183 if (!dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) {
175 using_dac = 1; 184 using_dac = 1;
176 consistent_using_dac = 1; 185 consistent_using_dac = 1;
177 dma_set_coherent_mask(dev, DMA_BIT_MASK(64)); 186 } else if (!dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) {
178 } else if (!dma_set_mask(dev, DMA_BIT_MASK(32))) {
179 using_dac = 0; 187 using_dac = 0;
180 consistent_using_dac = 0; 188 consistent_using_dac = 0;
181 dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
182 } else { 189 } else {
183 printk(KERN_WARNING 190 printk(KERN_WARNING
184 "mydev: No suitable DMA available.\n"); 191 "mydev: No suitable DMA available.\n");
185 goto ignore_this_device; 192 goto ignore_this_device;
186 } 193 }
187 194
188dma_set_coherent_mask() will always be able to set the same or a 195The coherent coherent mask will always be able to set the same or a
189smaller mask as dma_set_mask(). However for the rare case that a 196smaller mask as the streaming mask. However for the rare case that a
190device driver only uses consistent allocations, one would have to 197device driver only uses consistent allocations, one would have to
191check the return value from dma_set_coherent_mask(). 198check the return value from dma_set_coherent_mask().
192 199
@@ -199,9 +206,9 @@ address you might do something like:
199 goto ignore_this_device; 206 goto ignore_this_device;
200 } 207 }
201 208
202When dma_set_mask() is successful, and returns zero, the kernel saves 209When dma_set_mask() or dma_set_mask_and_coherent() is successful, and
203away this mask you have provided. The kernel will use this 210returns zero, the kernel saves away this mask you have provided. The
204information later when you make DMA mappings. 211kernel will use this information later when you make DMA mappings.
205 212
206There is a case which we are aware of at this time, which is worth 213There is a case which we are aware of at this time, which is worth
207mentioning in this documentation. If your device supports multiple 214mentioning in this documentation. If your device supports multiple
diff --git a/Documentation/DMA-API.txt b/Documentation/DMA-API.txt
index 78a6c569d204..e865279cec58 100644
--- a/Documentation/DMA-API.txt
+++ b/Documentation/DMA-API.txt
@@ -142,6 +142,14 @@ internal API for use by the platform than an external API for use by
142driver writers. 142driver writers.
143 143
144int 144int
145dma_set_mask_and_coherent(struct device *dev, u64 mask)
146
147Checks to see if the mask is possible and updates the device
148streaming and coherent DMA mask parameters if it is.
149
150Returns: 0 if successful and a negative error if not.
151
152int
145dma_set_mask(struct device *dev, u64 mask) 153dma_set_mask(struct device *dev, u64 mask)
146 154
147Checks to see if the mask is possible and updates the device 155Checks to see if the mask is possible and updates the device
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index 5b579b951503..863cd84eb1a2 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -64,6 +64,7 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
64{ 64{
65 return (dma_addr_t)__virt_to_bus((unsigned long)(addr)); 65 return (dma_addr_t)__virt_to_bus((unsigned long)(addr));
66} 66}
67
67#else 68#else
68static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn) 69static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
69{ 70{
@@ -86,6 +87,13 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
86} 87}
87#endif 88#endif
88 89
90/* The ARM override for dma_max_pfn() */
91static inline unsigned long dma_max_pfn(struct device *dev)
92{
93 return PHYS_PFN_OFFSET + dma_to_pfn(dev, *dev->dma_mask);
94}
95#define dma_max_pfn(dev) dma_max_pfn(dev)
96
89/* 97/*
90 * DMA errors are defined by all-bits-set in the DMA address. 98 * DMA errors are defined by all-bits-set in the DMA address.
91 */ 99 */
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 1272ed202dde..644d91f73b00 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -159,7 +159,7 @@ EXPORT_SYMBOL(arm_coherent_dma_ops);
159 159
160static u64 get_coherent_dma_mask(struct device *dev) 160static u64 get_coherent_dma_mask(struct device *dev)
161{ 161{
162 u64 mask = (u64)arm_dma_limit; 162 u64 mask = (u64)DMA_BIT_MASK(32);
163 163
164 if (dev) { 164 if (dev) {
165 mask = dev->coherent_dma_mask; 165 mask = dev->coherent_dma_mask;
@@ -173,10 +173,30 @@ static u64 get_coherent_dma_mask(struct device *dev)
173 return 0; 173 return 0;
174 } 174 }
175 175
176 if ((~mask) & (u64)arm_dma_limit) { 176 /*
177 dev_warn(dev, "coherent DMA mask %#llx is smaller " 177 * If the mask allows for more memory than we can address,
178 "than system GFP_DMA mask %#llx\n", 178 * and we actually have that much memory, then fail the
179 mask, (u64)arm_dma_limit); 179 * allocation.
180 */
181 if (sizeof(mask) != sizeof(dma_addr_t) &&
182 mask > (dma_addr_t)~0 &&
183 dma_to_pfn(dev, ~0) > arm_dma_pfn_limit) {
184 dev_warn(dev, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n",
185 mask);
186 dev_warn(dev, "Driver did not use or check the return value from dma_set_coherent_mask()?\n");
187 return 0;
188 }
189
190 /*
191 * Now check that the mask, when translated to a PFN,
192 * fits within the allowable addresses which we can
193 * allocate.
194 */
195 if (dma_to_pfn(dev, mask) < arm_dma_pfn_limit) {
196 dev_warn(dev, "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory than the DMA zone pfn 0x0-%#lx\n",
197 mask,
198 dma_to_pfn(dev, 0), dma_to_pfn(dev, mask) + 1,
199 arm_dma_pfn_limit + 1);
180 return 0; 200 return 0;
181 } 201 }
182 } 202 }
@@ -1007,8 +1027,27 @@ void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
1007 */ 1027 */
1008int dma_supported(struct device *dev, u64 mask) 1028int dma_supported(struct device *dev, u64 mask)
1009{ 1029{
1010 if (mask < (u64)arm_dma_limit) 1030 unsigned long limit;
1031
1032 /*
1033 * If the mask allows for more memory than we can address,
1034 * and we actually have that much memory, then we must
1035 * indicate that DMA to this device is not supported.
1036 */
1037 if (sizeof(mask) != sizeof(dma_addr_t) &&
1038 mask > (dma_addr_t)~0 &&
1039 dma_to_pfn(dev, ~0) > arm_dma_pfn_limit)
1040 return 0;
1041
1042 /*
1043 * Translate the device's DMA mask to a PFN limit. This
1044 * PFN number includes the page which we can DMA to.
1045 */
1046 limit = dma_to_pfn(dev, mask);
1047
1048 if (limit < arm_dma_pfn_limit)
1011 return 0; 1049 return 0;
1050
1012 return 1; 1051 return 1;
1013} 1052}
1014EXPORT_SYMBOL(dma_supported); 1053EXPORT_SYMBOL(dma_supported);
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index ca907f805c57..3e8f106ee5fe 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -209,6 +209,7 @@ EXPORT_SYMBOL(arm_dma_zone_size);
209 * so a successful GFP_DMA allocation will always satisfy this. 209 * so a successful GFP_DMA allocation will always satisfy this.
210 */ 210 */
211phys_addr_t arm_dma_limit; 211phys_addr_t arm_dma_limit;
212unsigned long arm_dma_pfn_limit;
212 213
213static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole, 214static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
214 unsigned long dma_size) 215 unsigned long dma_size)
@@ -231,6 +232,7 @@ void __init setup_dma_zone(const struct machine_desc *mdesc)
231 arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1; 232 arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
232 } else 233 } else
233 arm_dma_limit = 0xffffffff; 234 arm_dma_limit = 0xffffffff;
235 arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT;
234#endif 236#endif
235} 237}
236 238
@@ -418,12 +420,10 @@ void __init bootmem_init(void)
418 * This doesn't seem to be used by the Linux memory manager any 420 * This doesn't seem to be used by the Linux memory manager any
419 * more, but is used by ll_rw_block. If we can get rid of it, we 421 * more, but is used by ll_rw_block. If we can get rid of it, we
420 * also get rid of some of the stuff above as well. 422 * also get rid of some of the stuff above as well.
421 *
422 * Note: max_low_pfn and max_pfn reflect the number of _pages_ in
423 * the system, not the maximum PFN.
424 */ 423 */
425 max_low_pfn = max_low - PHYS_PFN_OFFSET; 424 min_low_pfn = min;
426 max_pfn = max_high - PHYS_PFN_OFFSET; 425 max_low_pfn = max_low;
426 max_pfn = max_high;
427} 427}
428 428
429/* 429/*
@@ -529,7 +529,7 @@ static inline void free_area_high(unsigned long pfn, unsigned long end)
529static void __init free_highpages(void) 529static void __init free_highpages(void)
530{ 530{
531#ifdef CONFIG_HIGHMEM 531#ifdef CONFIG_HIGHMEM
532 unsigned long max_low = max_low_pfn + PHYS_PFN_OFFSET; 532 unsigned long max_low = max_low_pfn;
533 struct memblock_region *mem, *res; 533 struct memblock_region *mem, *res;
534 534
535 /* set highmem page free */ 535 /* set highmem page free */
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index d5a4e9ad8f0f..d5a982d15a88 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -81,8 +81,10 @@ extern __init void add_static_vm_early(struct static_vm *svm);
81 81
82#ifdef CONFIG_ZONE_DMA 82#ifdef CONFIG_ZONE_DMA
83extern phys_addr_t arm_dma_limit; 83extern phys_addr_t arm_dma_limit;
84extern unsigned long arm_dma_pfn_limit;
84#else 85#else
85#define arm_dma_limit ((phys_addr_t)~0) 86#define arm_dma_limit ((phys_addr_t)~0)
87#define arm_dma_pfn_limit (~0ul >> PAGE_SHIFT)
86#endif 88#endif
87 89
88extern phys_addr_t arm_lowmem_limit; 90extern phys_addr_t arm_lowmem_limit;
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index f99cefbd84e3..e7d0c88f621a 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -1419,8 +1419,7 @@ struct vio_dev *vio_register_device_node(struct device_node *of_node)
1419 1419
1420 /* needed to ensure proper operation of coherent allocations 1420 /* needed to ensure proper operation of coherent allocations
1421 * later, in case driver doesn't set it explicitly */ 1421 * later, in case driver doesn't set it explicitly */
1422 dma_set_mask(&viodev->dev, DMA_BIT_MASK(64)); 1422 dma_set_mask_and_coherent(&viodev->dev, DMA_BIT_MASK(64));
1423 dma_set_coherent_mask(&viodev->dev, DMA_BIT_MASK(64));
1424 } 1423 }
1425 1424
1426 /* register with generic device framework */ 1425 /* register with generic device framework */
diff --git a/block/blk-settings.c b/block/blk-settings.c
index c50ecf0ea3b1..026c1517505f 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -195,17 +195,17 @@ EXPORT_SYMBOL(blk_queue_make_request);
195/** 195/**
196 * blk_queue_bounce_limit - set bounce buffer limit for queue 196 * blk_queue_bounce_limit - set bounce buffer limit for queue
197 * @q: the request queue for the device 197 * @q: the request queue for the device
198 * @dma_mask: the maximum address the device can handle 198 * @max_addr: the maximum address the device can handle
199 * 199 *
200 * Description: 200 * Description:
201 * Different hardware can have different requirements as to what pages 201 * Different hardware can have different requirements as to what pages
202 * it can do I/O directly to. A low level driver can call 202 * it can do I/O directly to. A low level driver can call
203 * blk_queue_bounce_limit to have lower memory pages allocated as bounce 203 * blk_queue_bounce_limit to have lower memory pages allocated as bounce
204 * buffers for doing I/O to pages residing above @dma_mask. 204 * buffers for doing I/O to pages residing above @max_addr.
205 **/ 205 **/
206void blk_queue_bounce_limit(struct request_queue *q, u64 dma_mask) 206void blk_queue_bounce_limit(struct request_queue *q, u64 max_addr)
207{ 207{
208 unsigned long b_pfn = dma_mask >> PAGE_SHIFT; 208 unsigned long b_pfn = max_addr >> PAGE_SHIFT;
209 int dma = 0; 209 int dma = 0;
210 210
211 q->bounce_gfp = GFP_NOIO; 211 q->bounce_gfp = GFP_NOIO;
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index c6707278a6bb..c4876ac9151a 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -552,7 +552,6 @@ amba_aphb_device_add(struct device *parent, const char *name,
552 if (!dev) 552 if (!dev)
553 return ERR_PTR(-ENOMEM); 553 return ERR_PTR(-ENOMEM);
554 554
555 dev->dma_mask = dma_mask;
556 dev->dev.coherent_dma_mask = dma_mask; 555 dev->dev.coherent_dma_mask = dma_mask;
557 dev->irq[0] = irq1; 556 dev->irq[0] = irq1;
558 dev->irq[1] = irq2; 557 dev->irq[1] = irq2;
@@ -619,7 +618,7 @@ static void amba_device_initialize(struct amba_device *dev, const char *name)
619 dev_set_name(&dev->dev, "%s", name); 618 dev_set_name(&dev->dev, "%s", name);
620 dev->dev.release = amba_device_release; 619 dev->dev.release = amba_device_release;
621 dev->dev.bus = &amba_bustype; 620 dev->dev.bus = &amba_bustype;
622 dev->dev.dma_mask = &dev->dma_mask; 621 dev->dev.dma_mask = &dev->dev.coherent_dma_mask;
623 dev->res.name = dev_name(&dev->dev); 622 dev->res.name = dev_name(&dev->dev);
624} 623}
625 624
@@ -663,9 +662,6 @@ int amba_device_register(struct amba_device *dev, struct resource *parent)
663 amba_device_initialize(dev, dev->dev.init_name); 662 amba_device_initialize(dev, dev->dev.init_name);
664 dev->dev.init_name = NULL; 663 dev->dev.init_name = NULL;
665 664
666 if (!dev->dev.coherent_dma_mask && dev->dma_mask)
667 dev_warn(&dev->dev, "coherent dma mask is unset\n");
668
669 return amba_device_add(dev, parent); 665 return amba_device_add(dev, parent);
670} 666}
671 667
diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c
index 1ec53f8ca96f..ddf470c2341d 100644
--- a/drivers/ata/pata_ixp4xx_cf.c
+++ b/drivers/ata/pata_ixp4xx_cf.c
@@ -144,6 +144,7 @@ static int ixp4xx_pata_probe(struct platform_device *pdev)
144 struct ata_host *host; 144 struct ata_host *host;
145 struct ata_port *ap; 145 struct ata_port *ap;
146 struct ixp4xx_pata_data *data = dev_get_platdata(&pdev->dev); 146 struct ixp4xx_pata_data *data = dev_get_platdata(&pdev->dev);
147 int ret;
147 148
148 cs0 = platform_get_resource(pdev, IORESOURCE_MEM, 0); 149 cs0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
149 cs1 = platform_get_resource(pdev, IORESOURCE_MEM, 1); 150 cs1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
@@ -157,7 +158,9 @@ static int ixp4xx_pata_probe(struct platform_device *pdev)
157 return -ENOMEM; 158 return -ENOMEM;
158 159
159 /* acquire resources and fill host */ 160 /* acquire resources and fill host */
160 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); 161 ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
162 if (ret)
163 return ret;
161 164
162 data->cs0 = devm_ioremap(&pdev->dev, cs0->start, 0x1000); 165 data->cs0 = devm_ioremap(&pdev->dev, cs0->start, 0x1000);
163 data->cs1 = devm_ioremap(&pdev->dev, cs1->start, 0x1000); 166 data->cs1 = devm_ioremap(&pdev->dev, cs1->start, 0x1000);
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
index c51bbb9ea8e8..83c4ddb1bc7f 100644
--- a/drivers/ata/pata_octeon_cf.c
+++ b/drivers/ata/pata_octeon_cf.c
@@ -1014,8 +1014,9 @@ static int octeon_cf_probe(struct platform_device *pdev)
1014 } 1014 }
1015 cf_port->c0 = ap->ioaddr.ctl_addr; 1015 cf_port->c0 = ap->ioaddr.ctl_addr;
1016 1016
1017 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(64); 1017 rv = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1018 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; 1018 if (rv)
1019 return rv;
1019 1020
1020 ata_port_desc(ap, "cmd %p ctl %p", base, ap->ioaddr.ctl_addr); 1021 ata_port_desc(ap, "cmd %p ctl %p", base, ap->ioaddr.ctl_addr);
1021 1022
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index da52092980e2..26d03fa0bf26 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -1949,12 +1949,9 @@ static int nvme_dev_map(struct nvme_dev *dev)
1949 if (pci_request_selected_regions(pdev, bars, "nvme")) 1949 if (pci_request_selected_regions(pdev, bars, "nvme"))
1950 goto disable_pci; 1950 goto disable_pci;
1951 1951
1952 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) 1952 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) &&
1953 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); 1953 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
1954 else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) 1954 goto disable;
1955 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
1956 else
1957 goto disable_pci;
1958 1955
1959 pci_set_drvdata(pdev, dev); 1956 pci_set_drvdata(pdev, dev);
1960 dev->bar = ioremap(pci_resource_start(pdev, 0), 8192); 1957 dev->bar = ioremap(pci_resource_start(pdev, 0), 8192);
@@ -2168,6 +2165,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2168 2165
2169 INIT_LIST_HEAD(&dev->namespaces); 2166 INIT_LIST_HEAD(&dev->namespaces);
2170 dev->pci_dev = pdev; 2167 dev->pci_dev = pdev;
2168
2171 result = nvme_set_instance(dev); 2169 result = nvme_set_instance(dev);
2172 if (result) 2170 if (result)
2173 goto free; 2171 goto free;
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 21180d6cad6e..214357e12dc0 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -218,23 +218,9 @@ static dma_addr_t crypt_phys;
218 218
219static int support_aes = 1; 219static int support_aes = 1;
220 220
221static void dev_release(struct device *dev)
222{
223 return;
224}
225
226#define DRIVER_NAME "ixp4xx_crypto" 221#define DRIVER_NAME "ixp4xx_crypto"
227static struct platform_device pseudo_dev = {
228 .name = DRIVER_NAME,
229 .id = 0,
230 .num_resources = 0,
231 .dev = {
232 .coherent_dma_mask = DMA_BIT_MASK(32),
233 .release = dev_release,
234 }
235};
236 222
237static struct device *dev = &pseudo_dev.dev; 223static struct platform_device *pdev;
238 224
239static inline dma_addr_t crypt_virt2phys(struct crypt_ctl *virt) 225static inline dma_addr_t crypt_virt2phys(struct crypt_ctl *virt)
240{ 226{
@@ -263,6 +249,7 @@ static inline const struct ix_hash_algo *ix_hash(struct crypto_tfm *tfm)
263 249
264static int setup_crypt_desc(void) 250static int setup_crypt_desc(void)
265{ 251{
252 struct device *dev = &pdev->dev;
266 BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64); 253 BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64);
267 crypt_virt = dma_alloc_coherent(dev, 254 crypt_virt = dma_alloc_coherent(dev,
268 NPE_QLEN * sizeof(struct crypt_ctl), 255 NPE_QLEN * sizeof(struct crypt_ctl),
@@ -363,6 +350,7 @@ static void finish_scattered_hmac(struct crypt_ctl *crypt)
363 350
364static void one_packet(dma_addr_t phys) 351static void one_packet(dma_addr_t phys)
365{ 352{
353 struct device *dev = &pdev->dev;
366 struct crypt_ctl *crypt; 354 struct crypt_ctl *crypt;
367 struct ixp_ctx *ctx; 355 struct ixp_ctx *ctx;
368 int failed; 356 int failed;
@@ -432,7 +420,7 @@ static void crypto_done_action(unsigned long arg)
432 tasklet_schedule(&crypto_done_tasklet); 420 tasklet_schedule(&crypto_done_tasklet);
433} 421}
434 422
435static int init_ixp_crypto(void) 423static int init_ixp_crypto(struct device *dev)
436{ 424{
437 int ret = -ENODEV; 425 int ret = -ENODEV;
438 u32 msg[2] = { 0, 0 }; 426 u32 msg[2] = { 0, 0 };
@@ -519,7 +507,7 @@ err:
519 return ret; 507 return ret;
520} 508}
521 509
522static void release_ixp_crypto(void) 510static void release_ixp_crypto(struct device *dev)
523{ 511{
524 qmgr_disable_irq(RECV_QID); 512 qmgr_disable_irq(RECV_QID);
525 tasklet_kill(&crypto_done_tasklet); 513 tasklet_kill(&crypto_done_tasklet);
@@ -886,6 +874,7 @@ static int ablk_perform(struct ablkcipher_request *req, int encrypt)
886 enum dma_data_direction src_direction = DMA_BIDIRECTIONAL; 874 enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
887 struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req); 875 struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req);
888 struct buffer_desc src_hook; 876 struct buffer_desc src_hook;
877 struct device *dev = &pdev->dev;
889 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? 878 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
890 GFP_KERNEL : GFP_ATOMIC; 879 GFP_KERNEL : GFP_ATOMIC;
891 880
@@ -1010,6 +999,7 @@ static int aead_perform(struct aead_request *req, int encrypt,
1010 unsigned int cryptlen; 999 unsigned int cryptlen;
1011 struct buffer_desc *buf, src_hook; 1000 struct buffer_desc *buf, src_hook;
1012 struct aead_ctx *req_ctx = aead_request_ctx(req); 1001 struct aead_ctx *req_ctx = aead_request_ctx(req);
1002 struct device *dev = &pdev->dev;
1013 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? 1003 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
1014 GFP_KERNEL : GFP_ATOMIC; 1004 GFP_KERNEL : GFP_ATOMIC;
1015 1005
@@ -1418,20 +1408,30 @@ static struct ixp_alg ixp4xx_algos[] = {
1418} }; 1408} };
1419 1409
1420#define IXP_POSTFIX "-ixp4xx" 1410#define IXP_POSTFIX "-ixp4xx"
1411
1412static const struct platform_device_info ixp_dev_info __initdata = {
1413 .name = DRIVER_NAME,
1414 .id = 0,
1415 .dma_mask = DMA_BIT_MASK(32),
1416};
1417
1421static int __init ixp_module_init(void) 1418static int __init ixp_module_init(void)
1422{ 1419{
1423 int num = ARRAY_SIZE(ixp4xx_algos); 1420 int num = ARRAY_SIZE(ixp4xx_algos);
1424 int i,err ; 1421 int i, err ;
1425 1422
1426 if (platform_device_register(&pseudo_dev)) 1423 pdev = platform_device_register_full(&ixp_dev_info);
1427 return -ENODEV; 1424 if (IS_ERR(pdev))
1425 return PTR_ERR(pdev);
1426
1427 dev = &pdev->dev;
1428 1428
1429 spin_lock_init(&desc_lock); 1429 spin_lock_init(&desc_lock);
1430 spin_lock_init(&emerg_lock); 1430 spin_lock_init(&emerg_lock);
1431 1431
1432 err = init_ixp_crypto(); 1432 err = init_ixp_crypto(&pdev->dev);
1433 if (err) { 1433 if (err) {
1434 platform_device_unregister(&pseudo_dev); 1434 platform_device_unregister(pdev);
1435 return err; 1435 return err;
1436 } 1436 }
1437 for (i=0; i< num; i++) { 1437 for (i=0; i< num; i++) {
@@ -1495,8 +1495,8 @@ static void __exit ixp_module_exit(void)
1495 if (ixp4xx_algos[i].registered) 1495 if (ixp4xx_algos[i].registered)
1496 crypto_unregister_alg(&ixp4xx_algos[i].crypto); 1496 crypto_unregister_alg(&ixp4xx_algos[i].crypto);
1497 } 1497 }
1498 release_ixp_crypto(); 1498 release_ixp_crypto(&pdev->dev);
1499 platform_device_unregister(&pseudo_dev); 1499 platform_device_unregister(pdev);
1500} 1500}
1501 1501
1502module_init(ixp_module_init); 1502module_init(ixp_module_init);
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index fce46c5bf1c7..e51a9832ef0d 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -2055,6 +2055,11 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
2055 if (ret) 2055 if (ret)
2056 return ret; 2056 return ret;
2057 2057
2058 /* Ensure that we can do DMA */
2059 ret = dma_set_mask_and_coherent(&adev->dev, DMA_BIT_MASK(32));
2060 if (ret)
2061 goto out_no_pl08x;
2062
2058 /* Create the driver state holder */ 2063 /* Create the driver state holder */
2059 pl08x = kzalloc(sizeof(*pl08x), GFP_KERNEL); 2064 pl08x = kzalloc(sizeof(*pl08x), GFP_KERNEL);
2060 if (!pl08x) { 2065 if (!pl08x) {
diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c
index e35d97590311..453822cc4f9d 100644
--- a/drivers/dma/dw/platform.c
+++ b/drivers/dma/dw/platform.c
@@ -191,11 +191,9 @@ static int dw_probe(struct platform_device *pdev)
191 if (IS_ERR(chip->regs)) 191 if (IS_ERR(chip->regs))
192 return PTR_ERR(chip->regs); 192 return PTR_ERR(chip->regs);
193 193
194 /* Apply default dma_mask if needed */ 194 err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
195 if (!dev->dma_mask) { 195 if (err)
196 dev->dma_mask = &dev->coherent_dma_mask; 196 return err;
197 dev->coherent_dma_mask = DMA_BIT_MASK(32);
198 }
199 197
200 pdata = dev_get_platdata(dev); 198 pdata = dev_get_platdata(dev);
201 if (!pdata) 199 if (!pdata)
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index 10b577fcf48d..bef8a368c8dd 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -634,6 +634,10 @@ static int edma_probe(struct platform_device *pdev)
634 struct edma_cc *ecc; 634 struct edma_cc *ecc;
635 int ret; 635 int ret;
636 636
637 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
638 if (ret)
639 return ret;
640
637 ecc = devm_kzalloc(&pdev->dev, sizeof(*ecc), GFP_KERNEL); 641 ecc = devm_kzalloc(&pdev->dev, sizeof(*ecc), GFP_KERNEL);
638 if (!ecc) { 642 if (!ecc) {
639 dev_err(&pdev->dev, "Can't allocate controller\n"); 643 dev_err(&pdev->dev, "Can't allocate controller\n");
@@ -705,11 +709,13 @@ static struct platform_device *pdev0, *pdev1;
705static const struct platform_device_info edma_dev_info0 = { 709static const struct platform_device_info edma_dev_info0 = {
706 .name = "edma-dma-engine", 710 .name = "edma-dma-engine",
707 .id = 0, 711 .id = 0,
712 .dma_mask = DMA_BIT_MASK(32),
708}; 713};
709 714
710static const struct platform_device_info edma_dev_info1 = { 715static const struct platform_device_info edma_dev_info1 = {
711 .name = "edma-dma-engine", 716 .name = "edma-dma-engine",
712 .id = 1, 717 .id = 1,
718 .dma_mask = DMA_BIT_MASK(32),
713}; 719};
714 720
715static int edma_init(void) 721static int edma_init(void)
@@ -723,8 +729,6 @@ static int edma_init(void)
723 ret = PTR_ERR(pdev0); 729 ret = PTR_ERR(pdev0);
724 goto out; 730 goto out;
725 } 731 }
726 pdev0->dev.dma_mask = &pdev0->dev.coherent_dma_mask;
727 pdev0->dev.coherent_dma_mask = DMA_BIT_MASK(32);
728 } 732 }
729 733
730 if (EDMA_CTLRS == 2) { 734 if (EDMA_CTLRS == 2) {
@@ -734,8 +738,6 @@ static int edma_init(void)
734 platform_device_unregister(pdev0); 738 platform_device_unregister(pdev0);
735 ret = PTR_ERR(pdev1); 739 ret = PTR_ERR(pdev1);
736 } 740 }
737 pdev1->dev.dma_mask = &pdev1->dev.coherent_dma_mask;
738 pdev1->dev.coherent_dma_mask = DMA_BIT_MASK(32);
739 } 741 }
740 742
741out: 743out:
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index fc43603cf0bb..c1fd504cae28 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -1432,6 +1432,10 @@ static int __init sdma_probe(struct platform_device *pdev)
1432 return -EINVAL; 1432 return -EINVAL;
1433 } 1433 }
1434 1434
1435 ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1436 if (ret)
1437 return ret;
1438
1435 sdma = kzalloc(sizeof(*sdma), GFP_KERNEL); 1439 sdma = kzalloc(sizeof(*sdma), GFP_KERNEL);
1436 if (!sdma) 1440 if (!sdma)
1437 return -ENOMEM; 1441 return -ENOMEM;
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index a562d24d20bf..df8b10fd1726 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -2903,6 +2903,10 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
2903 2903
2904 pdat = dev_get_platdata(&adev->dev); 2904 pdat = dev_get_platdata(&adev->dev);
2905 2905
2906 ret = dma_set_mask_and_coherent(&adev->dev, DMA_BIT_MASK(32));
2907 if (ret)
2908 return ret;
2909
2906 /* Allocate a new DMAC and its Channels */ 2910 /* Allocate a new DMAC and its Channels */
2907 pdmac = devm_kzalloc(&adev->dev, sizeof(*pdmac), GFP_KERNEL); 2911 pdmac = devm_kzalloc(&adev->dev, sizeof(*pdmac), GFP_KERNEL);
2908 if (!pdmac) { 2912 if (!pdmac) {
diff --git a/drivers/firmware/dcdbas.c b/drivers/firmware/dcdbas.c
index ff080ee20197..1b5e8e46226d 100644
--- a/drivers/firmware/dcdbas.c
+++ b/drivers/firmware/dcdbas.c
@@ -545,12 +545,15 @@ static int dcdbas_probe(struct platform_device *dev)
545 host_control_action = HC_ACTION_NONE; 545 host_control_action = HC_ACTION_NONE;
546 host_control_smi_type = HC_SMITYPE_NONE; 546 host_control_smi_type = HC_SMITYPE_NONE;
547 547
548 dcdbas_pdev = dev;
549
548 /* 550 /*
549 * BIOS SMI calls require buffer addresses be in 32-bit address space. 551 * BIOS SMI calls require buffer addresses be in 32-bit address space.
550 * This is done by setting the DMA mask below. 552 * This is done by setting the DMA mask below.
551 */ 553 */
552 dcdbas_pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); 554 error = dma_set_coherent_mask(&dcdbas_pdev->dev, DMA_BIT_MASK(32));
553 dcdbas_pdev->dev.dma_mask = &dcdbas_pdev->dev.coherent_dma_mask; 555 if (error)
556 return error;
554 557
555 error = sysfs_create_group(&dev->dev.kobj, &dcdbas_attr_group); 558 error = sysfs_create_group(&dev->dev.kobj, &dcdbas_attr_group);
556 if (error) 559 if (error)
@@ -581,6 +584,14 @@ static struct platform_driver dcdbas_driver = {
581 .remove = dcdbas_remove, 584 .remove = dcdbas_remove,
582}; 585};
583 586
587static const struct platform_device_info dcdbas_dev_info __initdata = {
588 .name = DRIVER_NAME,
589 .id = -1,
590 .dma_mask = DMA_BIT_MASK(32),
591};
592
593static struct platform_device *dcdbas_pdev_reg;
594
584/** 595/**
585 * dcdbas_init: initialize driver 596 * dcdbas_init: initialize driver
586 */ 597 */
@@ -592,20 +603,14 @@ static int __init dcdbas_init(void)
592 if (error) 603 if (error)
593 return error; 604 return error;
594 605
595 dcdbas_pdev = platform_device_alloc(DRIVER_NAME, -1); 606 dcdbas_pdev_reg = platform_device_register_full(&dcdbas_dev_info);
596 if (!dcdbas_pdev) { 607 if (IS_ERR(dcdbas_pdev_reg)) {
597 error = -ENOMEM; 608 error = PTR_ERR(dcdbas_pdev_reg);
598 goto err_unregister_driver; 609 goto err_unregister_driver;
599 } 610 }
600 611
601 error = platform_device_add(dcdbas_pdev);
602 if (error)
603 goto err_free_device;
604
605 return 0; 612 return 0;
606 613
607 err_free_device:
608 platform_device_put(dcdbas_pdev);
609 err_unregister_driver: 614 err_unregister_driver:
610 platform_driver_unregister(&dcdbas_driver); 615 platform_driver_unregister(&dcdbas_driver);
611 return error; 616 return error;
@@ -628,8 +633,9 @@ static void __exit dcdbas_exit(void)
628 * all sysfs attributes belonging to this module have been 633 * all sysfs attributes belonging to this module have been
629 * released. 634 * released.
630 */ 635 */
631 smi_data_buf_free(); 636 if (dcdbas_pdev)
632 platform_device_unregister(dcdbas_pdev); 637 smi_data_buf_free();
638 platform_device_unregister(dcdbas_pdev_reg);
633 platform_driver_unregister(&dcdbas_driver); 639 platform_driver_unregister(&dcdbas_driver);
634} 640}
635 641
diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c
index 6eb535ffeddc..e5a67b24587a 100644
--- a/drivers/firmware/google/gsmi.c
+++ b/drivers/firmware/google/gsmi.c
@@ -764,6 +764,13 @@ static __init int gsmi_system_valid(void)
764static struct kobject *gsmi_kobj; 764static struct kobject *gsmi_kobj;
765static struct efivars efivars; 765static struct efivars efivars;
766 766
767static const struct platform_device_info gsmi_dev_info = {
768 .name = "gsmi",
769 .id = -1,
770 /* SMI callbacks require 32bit addresses */
771 .dma_mask = DMA_BIT_MASK(32),
772};
773
767static __init int gsmi_init(void) 774static __init int gsmi_init(void)
768{ 775{
769 unsigned long flags; 776 unsigned long flags;
@@ -776,7 +783,7 @@ static __init int gsmi_init(void)
776 gsmi_dev.smi_cmd = acpi_gbl_FADT.smi_command; 783 gsmi_dev.smi_cmd = acpi_gbl_FADT.smi_command;
777 784
778 /* register device */ 785 /* register device */
779 gsmi_dev.pdev = platform_device_register_simple("gsmi", -1, NULL, 0); 786 gsmi_dev.pdev = platform_device_register_full(&gsmi_dev_info);
780 if (IS_ERR(gsmi_dev.pdev)) { 787 if (IS_ERR(gsmi_dev.pdev)) {
781 printk(KERN_ERR "gsmi: unable to register platform device\n"); 788 printk(KERN_ERR "gsmi: unable to register platform device\n");
782 return PTR_ERR(gsmi_dev.pdev); 789 return PTR_ERR(gsmi_dev.pdev);
@@ -785,10 +792,6 @@ static __init int gsmi_init(void)
785 /* SMI access needs to be serialized */ 792 /* SMI access needs to be serialized */
786 spin_lock_init(&gsmi_dev.lock); 793 spin_lock_init(&gsmi_dev.lock);
787 794
788 /* SMI callbacks require 32bit addresses */
789 gsmi_dev.pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
790 gsmi_dev.pdev->dev.dma_mask =
791 &gsmi_dev.pdev->dev.coherent_dma_mask;
792 ret = -ENOMEM; 795 ret = -ENOMEM;
793 gsmi_dev.dma_pool = dma_pool_create("gsmi", &gsmi_dev.pdev->dev, 796 gsmi_dev.dma_pool = dma_pool_create("gsmi", &gsmi_dev.pdev->dev,
794 GSMI_BUF_SIZE, GSMI_BUF_ALIGN, 0); 797 GSMI_BUF_SIZE, GSMI_BUF_ALIGN, 0);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index bb82ef78ca85..81192d00b39e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -286,7 +286,11 @@ static struct drm_driver exynos_drm_driver = {
286 286
287static int exynos_drm_platform_probe(struct platform_device *pdev) 287static int exynos_drm_platform_probe(struct platform_device *pdev)
288{ 288{
289 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); 289 int ret;
290
291 ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
292 if (ret)
293 return ret;
290 294
291 return drm_platform_init(&exynos_drm_driver, pdev); 295 return drm_platform_init(&exynos_drm_driver, pdev);
292} 296}
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
index acf667859cb6..701c4c10e08b 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
@@ -664,8 +664,9 @@ static int omap_dmm_probe(struct platform_device *dev)
664 } 664 }
665 665
666 /* set dma mask for device */ 666 /* set dma mask for device */
667 /* NOTE: this is a workaround for the hwmod not initializing properly */ 667 ret = dma_set_coherent_mask(&dev->dev, DMA_BIT_MASK(32));
668 dev->dev.coherent_dma_mask = DMA_BIT_MASK(32); 668 if (ret)
669 goto fail;
669 670
670 omap_dmm->dummy_pa = page_to_phys(omap_dmm->dummy_page); 671 omap_dmm->dummy_pa = page_to_phys(omap_dmm->dummy_page);
671 672
diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
index df3a0ec7fd2c..1c3608039663 100644
--- a/drivers/media/platform/omap3isp/isp.c
+++ b/drivers/media/platform/omap3isp/isp.c
@@ -2182,9 +2182,9 @@ static int isp_probe(struct platform_device *pdev)
2182 isp->pdata = pdata; 2182 isp->pdata = pdata;
2183 isp->ref_count = 0; 2183 isp->ref_count = 0;
2184 2184
2185 isp->raw_dmamask = DMA_BIT_MASK(32); 2185 ret = dma_coerce_mask_and_coherent(isp->dev, DMA_BIT_MASK(32));
2186 isp->dev->dma_mask = &isp->raw_dmamask; 2186 if (ret)
2187 isp->dev->coherent_dma_mask = DMA_BIT_MASK(32); 2187 return ret;
2188 2188
2189 platform_set_drvdata(pdev, isp); 2189 platform_set_drvdata(pdev, isp);
2190 2190
diff --git a/drivers/media/platform/omap3isp/isp.h b/drivers/media/platform/omap3isp/isp.h
index cd3eff45ae7d..ce65d3ae1aa7 100644
--- a/drivers/media/platform/omap3isp/isp.h
+++ b/drivers/media/platform/omap3isp/isp.h
@@ -152,7 +152,6 @@ struct isp_xclk {
152 * @mmio_base_phys: Array with physical L4 bus addresses for ISP register 152 * @mmio_base_phys: Array with physical L4 bus addresses for ISP register
153 * regions. 153 * regions.
154 * @mmio_size: Array with ISP register regions size in bytes. 154 * @mmio_size: Array with ISP register regions size in bytes.
155 * @raw_dmamask: Raw DMA mask
156 * @stat_lock: Spinlock for handling statistics 155 * @stat_lock: Spinlock for handling statistics
157 * @isp_mutex: Mutex for serializing requests to ISP. 156 * @isp_mutex: Mutex for serializing requests to ISP.
158 * @crashed: Bitmask of crashed entities (indexed by entity ID) 157 * @crashed: Bitmask of crashed entities (indexed by entity ID)
@@ -190,8 +189,6 @@ struct isp_device {
190 unsigned long mmio_base_phys[OMAP3_ISP_IOMEM_LAST]; 189 unsigned long mmio_base_phys[OMAP3_ISP_IOMEM_LAST];
191 resource_size_t mmio_size[OMAP3_ISP_IOMEM_LAST]; 190 resource_size_t mmio_size[OMAP3_ISP_IOMEM_LAST];
192 191
193 u64 raw_dmamask;
194
195 /* ISP Obj */ 192 /* ISP Obj */
196 spinlock_t stat_lock; /* common lock for statistic drivers */ 193 spinlock_t stat_lock; /* common lock for statistic drivers */
197 struct mutex isp_mutex; /* For handling ref_count field */ 194 struct mutex isp_mutex; /* For handling ref_count field */
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index fa9632eb63f1..357bbc54fe4b 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -15,6 +15,7 @@
15#include <linux/freezer.h> 15#include <linux/freezer.h>
16#include <linux/kthread.h> 16#include <linux/kthread.h>
17#include <linux/scatterlist.h> 17#include <linux/scatterlist.h>
18#include <linux/dma-mapping.h>
18 19
19#include <linux/mmc/card.h> 20#include <linux/mmc/card.h>
20#include <linux/mmc/host.h> 21#include <linux/mmc/host.h>
@@ -196,7 +197,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
196 struct mmc_queue_req *mqrq_prev = &mq->mqrq[1]; 197 struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
197 198
198 if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) 199 if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
199 limit = *mmc_dev(host)->dma_mask; 200 limit = dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
200 201
201 mq->card = card; 202 mq->card = card;
202 mq->queue = blk_init_queue(mmc_request_fn, lock); 203 mq->queue = blk_init_queue(mmc_request_fn, lock);
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index cdd4ce0d7c90..ef19874fcd1f 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -310,8 +310,9 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
310 dma_mask = DMA_BIT_MASK(32); 310 dma_mask = DMA_BIT_MASK(32);
311 } 311 }
312 312
313 dev->dma_mask = &dev->coherent_dma_mask; 313 err = dma_coerce_mask_and_coherent(dev, dma_mask);
314 dev->coherent_dma_mask = dma_mask; 314 if (err)
315 goto err_free;
315 } 316 }
316 317
317 if (c->slot) { 318 if (c->slot) {
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 079a597fa20c..90e54d5488dc 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -2193,8 +2193,7 @@ static int b44_init_one(struct ssb_device *sdev,
2193 goto err_out_free_dev; 2193 goto err_out_free_dev;
2194 } 2194 }
2195 2195
2196 if (dma_set_mask(sdev->dma_dev, DMA_BIT_MASK(30)) || 2196 if (dma_set_mask_and_coherent(sdev->dma_dev, DMA_BIT_MASK(30))) {
2197 dma_set_coherent_mask(sdev->dma_dev, DMA_BIT_MASK(30))) {
2198 dev_err(sdev->dev, 2197 dev_err(sdev->dev,
2199 "Required 30BIT DMA mask unsupported by the system\n"); 2198 "Required 30BIT DMA mask unsupported by the system\n");
2200 goto err_out_powerdown; 2199 goto err_out_powerdown;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index bb2f20291509..e622cc1f96ff 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -12140,12 +12140,8 @@ static int bnx2x_set_coherency_mask(struct bnx2x *bp)
12140{ 12140{
12141 struct device *dev = &bp->pdev->dev; 12141 struct device *dev = &bp->pdev->dev;
12142 12142
12143 if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) { 12143 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) != 0 &&
12144 if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) { 12144 dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)) != 0) {
12145 dev_err(dev, "dma_set_coherent_mask failed, aborting\n");
12146 return -EIO;
12147 }
12148 } else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) {
12149 dev_err(dev, "System does not support DMA, aborting\n"); 12145 dev_err(dev, "System does not support DMA, aborting\n");
12150 return -EIO; 12146 return -EIO;
12151 } 12147 }
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index f276433d37ce..248bc37cb41b 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -3299,17 +3299,12 @@ bnad_pci_init(struct bnad *bnad,
3299 err = pci_request_regions(pdev, BNAD_NAME); 3299 err = pci_request_regions(pdev, BNAD_NAME);
3300 if (err) 3300 if (err)
3301 goto disable_device; 3301 goto disable_device;
3302 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) && 3302 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
3303 !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
3304 *using_dac = true; 3303 *using_dac = true;
3305 } else { 3304 } else {
3306 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 3305 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
3307 if (err) { 3306 if (err)
3308 err = dma_set_coherent_mask(&pdev->dev, 3307 goto release_regions;
3309 DMA_BIT_MASK(32));
3310 if (err)
3311 goto release_regions;
3312 }
3313 *using_dac = false; 3308 *using_dac = false;
3314 } 3309 }
3315 pci_set_master(pdev); 3310 pci_set_master(pdev);
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 741d3bff5ae7..cb2bb6fccbc8 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -4487,19 +4487,11 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
4487 adapter->netdev = netdev; 4487 adapter->netdev = netdev;
4488 SET_NETDEV_DEV(netdev, &pdev->dev); 4488 SET_NETDEV_DEV(netdev, &pdev->dev);
4489 4489
4490 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); 4490 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
4491 if (!status) { 4491 if (!status) {
4492 status = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
4493 if (status < 0) {
4494 dev_err(&pdev->dev, "dma_set_coherent_mask failed\n");
4495 goto free_netdev;
4496 }
4497 netdev->features |= NETIF_F_HIGHDMA; 4492 netdev->features |= NETIF_F_HIGHDMA;
4498 } else { 4493 } else {
4499 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 4494 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
4500 if (!status)
4501 status = dma_set_coherent_mask(&pdev->dev,
4502 DMA_BIT_MASK(32));
4503 if (status) { 4495 if (status) {
4504 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n"); 4496 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4505 goto free_netdev; 4497 goto free_netdev;
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index ad6800ad1bfc..e38622825fa7 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -1018,19 +1018,14 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1018 */ 1018 */
1019 pci_using_dac = 0; 1019 pci_using_dac = 0;
1020 if ((hw->bus_type == e1000_bus_type_pcix) && 1020 if ((hw->bus_type == e1000_bus_type_pcix) &&
1021 !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { 1021 !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
1022 /* according to DMA-API-HOWTO, coherent calls will always
1023 * succeed if the set call did
1024 */
1025 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
1026 pci_using_dac = 1; 1022 pci_using_dac = 1;
1027 } else { 1023 } else {
1028 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 1024 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1029 if (err) { 1025 if (err) {
1030 pr_err("No usable DMA config, aborting\n"); 1026 pr_err("No usable DMA config, aborting\n");
1031 goto err_dma; 1027 goto err_dma;
1032 } 1028 }
1033 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
1034 } 1029 }
1035 1030
1036 netdev->netdev_ops = &e1000_netdev_ops; 1031 netdev->netdev_ops = &e1000_netdev_ops;
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 4ef786775acb..aedd5736a87d 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -6553,21 +6553,15 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
6553 return err; 6553 return err;
6554 6554
6555 pci_using_dac = 0; 6555 pci_using_dac = 0;
6556 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); 6556 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
6557 if (!err) { 6557 if (!err) {
6558 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); 6558 pci_using_dac = 1;
6559 if (!err)
6560 pci_using_dac = 1;
6561 } else { 6559 } else {
6562 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 6560 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
6563 if (err) { 6561 if (err) {
6564 err = dma_set_coherent_mask(&pdev->dev, 6562 dev_err(&pdev->dev,
6565 DMA_BIT_MASK(32)); 6563 "No usable DMA configuration, aborting\n");
6566 if (err) { 6564 goto err_dma;
6567 dev_err(&pdev->dev,
6568 "No usable DMA configuration, aborting\n");
6569 goto err_dma;
6570 }
6571 } 6565 }
6572 } 6566 }
6573 6567
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index ebe6370c4b18..2ac14bdd5fbb 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -2035,21 +2035,15 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2035 return err; 2035 return err;
2036 2036
2037 pci_using_dac = 0; 2037 pci_using_dac = 0;
2038 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); 2038 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
2039 if (!err) { 2039 if (!err) {
2040 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); 2040 pci_using_dac = 1;
2041 if (!err)
2042 pci_using_dac = 1;
2043 } else { 2041 } else {
2044 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 2042 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
2045 if (err) { 2043 if (err) {
2046 err = dma_set_coherent_mask(&pdev->dev, 2044 dev_err(&pdev->dev,
2047 DMA_BIT_MASK(32)); 2045 "No usable DMA configuration, aborting\n");
2048 if (err) { 2046 goto err_dma;
2049 dev_err(&pdev->dev,
2050 "No usable DMA configuration, aborting\n");
2051 goto err_dma;
2052 }
2053 } 2047 }
2054 } 2048 }
2055 2049
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 9fadbb28cf08..04bf22e5ee31 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -2637,21 +2637,15 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2637 return err; 2637 return err;
2638 2638
2639 pci_using_dac = 0; 2639 pci_using_dac = 0;
2640 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); 2640 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
2641 if (!err) { 2641 if (!err) {
2642 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); 2642 pci_using_dac = 1;
2643 if (!err)
2644 pci_using_dac = 1;
2645 } else { 2643 } else {
2646 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 2644 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
2647 if (err) { 2645 if (err) {
2648 err = dma_set_coherent_mask(&pdev->dev, 2646 dev_err(&pdev->dev, "No usable DMA "
2649 DMA_BIT_MASK(32)); 2647 "configuration, aborting\n");
2650 if (err) { 2648 goto err_dma;
2651 dev_err(&pdev->dev, "No usable DMA "
2652 "configuration, aborting\n");
2653 goto err_dma;
2654 }
2655 } 2649 }
2656 } 2650 }
2657 2651
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index 9f6b236828e6..57e390cbe6d0 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -408,20 +408,14 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
408 return err; 408 return err;
409 409
410 pci_using_dac = 0; 410 pci_using_dac = 0;
411 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); 411 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
412 if (!err) { 412 if (!err) {
413 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); 413 pci_using_dac = 1;
414 if (!err)
415 pci_using_dac = 1;
416 } else { 414 } else {
417 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 415 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
418 if (err) { 416 if (err) {
419 err = dma_set_coherent_mask(&pdev->dev, 417 pr_err("No usable DMA configuration, aborting\n");
420 DMA_BIT_MASK(32)); 418 goto err_dma_mask;
421 if (err) {
422 pr_err("No usable DMA configuration, aborting\n");
423 goto err_dma_mask;
424 }
425 } 419 }
426 } 420 }
427 421
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index bd8f5239dfe6..0066f0aefbfa 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -7824,19 +7824,14 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
7824 if (err) 7824 if (err)
7825 return err; 7825 return err;
7826 7826
7827 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) && 7827 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
7828 !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
7829 pci_using_dac = 1; 7828 pci_using_dac = 1;
7830 } else { 7829 } else {
7831 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 7830 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
7832 if (err) { 7831 if (err) {
7833 err = dma_set_coherent_mask(&pdev->dev, 7832 dev_err(&pdev->dev,
7834 DMA_BIT_MASK(32)); 7833 "No usable DMA configuration, aborting\n");
7835 if (err) { 7834 goto err_dma;
7836 dev_err(&pdev->dev,
7837 "No usable DMA configuration, aborting\n");
7838 goto err_dma;
7839 }
7840 } 7835 }
7841 pci_using_dac = 0; 7836 pci_using_dac = 0;
7842 } 7837 }
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 038bfc8b7616..92ef4cb5a8e8 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -3421,19 +3421,14 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3421 if (err) 3421 if (err)
3422 return err; 3422 return err;
3423 3423
3424 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) && 3424 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
3425 !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
3426 pci_using_dac = 1; 3425 pci_using_dac = 1;
3427 } else { 3426 } else {
3428 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 3427 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
3429 if (err) { 3428 if (err) {
3430 err = dma_set_coherent_mask(&pdev->dev, 3429 dev_err(&pdev->dev, "No usable DMA "
3431 DMA_BIT_MASK(32)); 3430 "configuration, aborting\n");
3432 if (err) { 3431 goto err_dma;
3433 dev_err(&pdev->dev, "No usable DMA "
3434 "configuration, aborting\n");
3435 goto err_dma;
3436 }
3437 } 3432 }
3438 pci_using_dac = 0; 3433 pci_using_dac = 0;
3439 } 3434 }
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index a061b93efe66..ba3ca18611f7 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -1399,8 +1399,10 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
1399 } 1399 }
1400 1400
1401 if (pldat->dma_buff_base_v == 0) { 1401 if (pldat->dma_buff_base_v == 0) {
1402 pldat->pdev->dev.coherent_dma_mask = 0xFFFFFFFF; 1402 ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1403 pldat->pdev->dev.dma_mask = &pldat->pdev->dev.coherent_dma_mask; 1403 if (ret)
1404 goto err_out_free_irq;
1405
1404 pldat->dma_buff_size = PAGE_ALIGN(pldat->dma_buff_size); 1406 pldat->dma_buff_size = PAGE_ALIGN(pldat->dma_buff_size);
1405 1407
1406 /* Allocate a chunk of memory for the DMA ethernet buffers 1408 /* Allocate a chunk of memory for the DMA ethernet buffers
diff --git a/drivers/net/ethernet/octeon/octeon_mgmt.c b/drivers/net/ethernet/octeon/octeon_mgmt.c
index 1b326cbcd34b..7dc3e9b06d75 100644
--- a/drivers/net/ethernet/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/octeon/octeon_mgmt.c
@@ -1552,8 +1552,9 @@ static int octeon_mgmt_probe(struct platform_device *pdev)
1552 1552
1553 p->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); 1553 p->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
1554 1554
1555 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(64); 1555 result = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1556 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; 1556 if (result)
1557 goto err;
1557 1558
1558 netif_carrier_off(netdev); 1559 netif_carrier_off(netdev);
1559 result = register_netdev(netdev); 1560 result = register_netdev(netdev);
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 07c9bc4c61bc..2e27837ce6a2 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -1121,7 +1121,7 @@ static int efx_init_io(struct efx_nic *efx)
1121 */ 1121 */
1122 while (dma_mask > 0x7fffffffUL) { 1122 while (dma_mask > 0x7fffffffUL) {
1123 if (dma_supported(&pci_dev->dev, dma_mask)) { 1123 if (dma_supported(&pci_dev->dev, dma_mask)) {
1124 rc = dma_set_mask(&pci_dev->dev, dma_mask); 1124 rc = dma_set_mask_and_coherent(&pci_dev->dev, dma_mask);
1125 if (rc == 0) 1125 if (rc == 0)
1126 break; 1126 break;
1127 } 1127 }
@@ -1134,16 +1134,6 @@ static int efx_init_io(struct efx_nic *efx)
1134 } 1134 }
1135 netif_dbg(efx, probe, efx->net_dev, 1135 netif_dbg(efx, probe, efx->net_dev,
1136 "using DMA mask %llx\n", (unsigned long long) dma_mask); 1136 "using DMA mask %llx\n", (unsigned long long) dma_mask);
1137 rc = dma_set_coherent_mask(&pci_dev->dev, dma_mask);
1138 if (rc) {
1139 /* dma_set_coherent_mask() is not *allowed* to
1140 * fail with a mask that dma_set_mask() accepted,
1141 * but just in case...
1142 */
1143 netif_err(efx, probe, efx->net_dev,
1144 "failed to set consistent DMA mask\n");
1145 goto fail2;
1146 }
1147 1137
1148 efx->membase_phys = pci_resource_start(efx->pci_dev, EFX_MEM_BAR); 1138 efx->membase_phys = pci_resource_start(efx->pci_dev, EFX_MEM_BAR);
1149 rc = pci_request_region(pci_dev, EFX_MEM_BAR, "sfc"); 1139 rc = pci_request_region(pci_dev, EFX_MEM_BAR, "sfc");
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index c51d2dc489e4..1d7982afc0ad 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -1065,12 +1065,9 @@ static int b43_dma_set_mask(struct b43_wldev *dev, u64 mask)
1065 /* Try to set the DMA mask. If it fails, try falling back to a 1065 /* Try to set the DMA mask. If it fails, try falling back to a
1066 * lower mask, as we can always also support a lower one. */ 1066 * lower mask, as we can always also support a lower one. */
1067 while (1) { 1067 while (1) {
1068 err = dma_set_mask(dev->dev->dma_dev, mask); 1068 err = dma_set_mask_and_coherent(dev->dev->dma_dev, mask);
1069 if (!err) { 1069 if (!err)
1070 err = dma_set_coherent_mask(dev->dev->dma_dev, mask); 1070 break;
1071 if (!err)
1072 break;
1073 }
1074 if (mask == DMA_BIT_MASK(64)) { 1071 if (mask == DMA_BIT_MASK(64)) {
1075 mask = DMA_BIT_MASK(32); 1072 mask = DMA_BIT_MASK(32);
1076 fallback = true; 1073 fallback = true;
diff --git a/drivers/net/wireless/b43legacy/dma.c b/drivers/net/wireless/b43legacy/dma.c
index 42eb26c99e11..b2ed1795130b 100644
--- a/drivers/net/wireless/b43legacy/dma.c
+++ b/drivers/net/wireless/b43legacy/dma.c
@@ -806,12 +806,9 @@ static int b43legacy_dma_set_mask(struct b43legacy_wldev *dev, u64 mask)
806 /* Try to set the DMA mask. If it fails, try falling back to a 806 /* Try to set the DMA mask. If it fails, try falling back to a
807 * lower mask, as we can always also support a lower one. */ 807 * lower mask, as we can always also support a lower one. */
808 while (1) { 808 while (1) {
809 err = dma_set_mask(dev->dev->dma_dev, mask); 809 err = dma_set_mask_and_coherent(dev->dev->dma_dev, mask);
810 if (!err) { 810 if (!err)
811 err = dma_set_coherent_mask(dev->dev->dma_dev, mask); 811 break;
812 if (!err)
813 break;
814 }
815 if (mask == DMA_BIT_MASK(64)) { 812 if (mask == DMA_BIT_MASK(64)) {
816 mask = DMA_BIT_MASK(32); 813 mask = DMA_BIT_MASK(32);
817 fallback = true; 814 fallback = true;
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index fce088e6f54e..404d1daebefa 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -282,9 +282,6 @@ static struct amba_device *of_amba_device_create(struct device_node *node,
282 else 282 else
283 of_device_make_bus_id(&dev->dev); 283 of_device_make_bus_id(&dev->dev);
284 284
285 /* setup amba-specific device info */
286 dev->dma_mask = ~0;
287
288 /* Allow the HW Peripheral ID to be overridden */ 285 /* Allow the HW Peripheral ID to be overridden */
289 prop = of_get_property(node, "arm,primecell-periphid", NULL); 286 prop = of_get_property(node, "arm,primecell-periphid", NULL);
290 if (prop) 287 if (prop)
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index 903e1285fda0..963761526229 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -2004,6 +2004,7 @@ struct parport *parport_pc_probe_port(unsigned long int base,
2004 struct resource *ECR_res = NULL; 2004 struct resource *ECR_res = NULL;
2005 struct resource *EPP_res = NULL; 2005 struct resource *EPP_res = NULL;
2006 struct platform_device *pdev = NULL; 2006 struct platform_device *pdev = NULL;
2007 int ret;
2007 2008
2008 if (!dev) { 2009 if (!dev) {
2009 /* We need a physical device to attach to, but none was 2010 /* We need a physical device to attach to, but none was
@@ -2014,8 +2015,11 @@ struct parport *parport_pc_probe_port(unsigned long int base,
2014 return NULL; 2015 return NULL;
2015 dev = &pdev->dev; 2016 dev = &pdev->dev;
2016 2017
2017 dev->coherent_dma_mask = DMA_BIT_MASK(24); 2018 ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(24));
2018 dev->dma_mask = &dev->coherent_dma_mask; 2019 if (ret) {
2020 dev_err(dev, "Unable to set coherent dma mask: disabling DMA\n");
2021 dma = PARPORT_DMA_NONE;
2022 }
2019 } 2023 }
2020 2024
2021 ops = kmalloc(sizeof(struct parport_operations), GFP_KERNEL); 2025 ops = kmalloc(sizeof(struct parport_operations), GFP_KERNEL);
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index d1549b74e2d1..7bd7f0d5f050 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1684,7 +1684,7 @@ u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
1684 1684
1685 host_dev = scsi_get_device(shost); 1685 host_dev = scsi_get_device(shost);
1686 if (host_dev && host_dev->dma_mask) 1686 if (host_dev && host_dev->dma_mask)
1687 bounce_limit = *host_dev->dma_mask; 1687 bounce_limit = dma_max_pfn(host_dev) << PAGE_SHIFT;
1688 1688
1689 return bounce_limit; 1689 return bounce_limit;
1690} 1690}
diff --git a/drivers/staging/dwc2/platform.c b/drivers/staging/dwc2/platform.c
index 76ae6e210f55..83ca1053bb1d 100644
--- a/drivers/staging/dwc2/platform.c
+++ b/drivers/staging/dwc2/platform.c
@@ -100,8 +100,9 @@ static int dwc2_driver_probe(struct platform_device *dev)
100 */ 100 */
101 if (!dev->dev.dma_mask) 101 if (!dev->dev.dma_mask)
102 dev->dev.dma_mask = &dev->dev.coherent_dma_mask; 102 dev->dev.dma_mask = &dev->dev.coherent_dma_mask;
103 if (!dev->dev.coherent_dma_mask) 103 retval = dma_set_coherent_mask(&dev->dev, DMA_BIT_MASK(32));
104 dev->dev.coherent_dma_mask = DMA_BIT_MASK(32); 104 if (retval)
105 return retval;
105 106
106 irq = platform_get_irq(dev, 0); 107 irq = platform_get_irq(dev, 0);
107 if (irq < 0) { 108 if (irq < 0) {
diff --git a/drivers/staging/et131x/et131x.c b/drivers/staging/et131x/et131x.c
index d9446c47bf2e..820a332f3188 100644
--- a/drivers/staging/et131x/et131x.c
+++ b/drivers/staging/et131x/et131x.c
@@ -4791,21 +4791,8 @@ static int et131x_pci_setup(struct pci_dev *pdev,
4791 pci_set_master(pdev); 4791 pci_set_master(pdev);
4792 4792
4793 /* Check the DMA addressing support of this device */ 4793 /* Check the DMA addressing support of this device */
4794 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { 4794 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) &&
4795 rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); 4795 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
4796 if (rc < 0) {
4797 dev_err(&pdev->dev,
4798 "Unable to obtain 64 bit DMA for consistent allocations\n");
4799 goto err_release_res;
4800 }
4801 } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
4802 rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
4803 if (rc < 0) {
4804 dev_err(&pdev->dev,
4805 "Unable to obtain 32 bit DMA for consistent allocations\n");
4806 goto err_release_res;
4807 }
4808 } else {
4809 dev_err(&pdev->dev, "No usable DMA addressing method\n"); 4796 dev_err(&pdev->dev, "No usable DMA addressing method\n");
4810 rc = -EIO; 4797 rc = -EIO;
4811 goto err_release_res; 4798 goto err_release_res;
diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c
index 4483d47f7395..3d3a824f6de7 100644
--- a/drivers/staging/imx-drm/imx-drm-core.c
+++ b/drivers/staging/imx-drm/imx-drm-core.c
@@ -815,6 +815,12 @@ static struct drm_driver imx_drm_driver = {
815 815
816static int imx_drm_platform_probe(struct platform_device *pdev) 816static int imx_drm_platform_probe(struct platform_device *pdev)
817{ 817{
818 int ret;
819
820 ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
821 if (ret)
822 return ret;
823
818 imx_drm_device->dev = &pdev->dev; 824 imx_drm_device->dev = &pdev->dev;
819 825
820 return drm_platform_init(&imx_drm_driver, pdev); 826 return drm_platform_init(&imx_drm_driver, pdev);
@@ -857,8 +863,6 @@ static int __init imx_drm_init(void)
857 goto err_pdev; 863 goto err_pdev;
858 } 864 }
859 865
860 imx_drm_pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32),
861
862 ret = platform_driver_register(&imx_drm_pdrv); 866 ret = platform_driver_register(&imx_drm_pdrv);
863 if (ret) 867 if (ret)
864 goto err_pdrv; 868 goto err_pdrv;
diff --git a/drivers/staging/imx-drm/ipuv3-crtc.c b/drivers/staging/imx-drm/ipuv3-crtc.c
index 670a56a834f1..ce6ba987ec91 100644
--- a/drivers/staging/imx-drm/ipuv3-crtc.c
+++ b/drivers/staging/imx-drm/ipuv3-crtc.c
@@ -407,7 +407,9 @@ static int ipu_drm_probe(struct platform_device *pdev)
407 if (!pdata) 407 if (!pdata)
408 return -EINVAL; 408 return -EINVAL;
409 409
410 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); 410 ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
411 if (ret)
412 return ret;
411 413
412 ipu_crtc = devm_kzalloc(&pdev->dev, sizeof(*ipu_crtc), GFP_KERNEL); 414 ipu_crtc = devm_kzalloc(&pdev->dev, sizeof(*ipu_crtc), GFP_KERNEL);
413 if (!ipu_crtc) 415 if (!ipu_crtc)
diff --git a/drivers/staging/media/dt3155v4l/dt3155v4l.c b/drivers/staging/media/dt3155v4l/dt3155v4l.c
index 90d6ac469355..081407be33ab 100644
--- a/drivers/staging/media/dt3155v4l/dt3155v4l.c
+++ b/drivers/staging/media/dt3155v4l/dt3155v4l.c
@@ -901,10 +901,7 @@ dt3155_probe(struct pci_dev *pdev, const struct pci_device_id *id)
901 int err; 901 int err;
902 struct dt3155_priv *pd; 902 struct dt3155_priv *pd;
903 903
904 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 904 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
905 if (err)
906 return -ENODEV;
907 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
908 if (err) 905 if (err)
909 return -ENODEV; 906 return -ENODEV;
910 pd = kzalloc(sizeof(*pd), GFP_KERNEL); 907 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
index 023d3cb6aa0a..bb5d976e5b81 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.c
+++ b/drivers/usb/chipidea/ci_hdrc_imx.c
@@ -115,10 +115,9 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
115 115
116 pdata.phy = data->phy; 116 pdata.phy = data->phy;
117 117
118 if (!pdev->dev.dma_mask) 118 ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
119 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; 119 if (ret)
120 if (!pdev->dev.coherent_dma_mask) 120 goto err_clk;
121 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
122 121
123 if (data->usbmisc_data) { 122 if (data->usbmisc_data) {
124 ret = imx_usbmisc_init(data->usbmisc_data); 123 ret = imx_usbmisc_init(data->usbmisc_data);
diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c
index 2f2e88a3a11a..8b20c70d91e7 100644
--- a/drivers/usb/dwc3/dwc3-exynos.c
+++ b/drivers/usb/dwc3/dwc3-exynos.c
@@ -119,10 +119,9 @@ static int dwc3_exynos_probe(struct platform_device *pdev)
119 * Since shared usb code relies on it, set it here for now. 119 * Since shared usb code relies on it, set it here for now.
120 * Once we move to full device tree support this will vanish off. 120 * Once we move to full device tree support this will vanish off.
121 */ 121 */
122 if (!dev->dma_mask) 122 ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
123 dev->dma_mask = &dev->coherent_dma_mask; 123 if (ret)
124 if (!dev->coherent_dma_mask) 124 goto err1;
125 dev->coherent_dma_mask = DMA_BIT_MASK(32);
126 125
127 platform_set_drvdata(pdev, exynos); 126 platform_set_drvdata(pdev, exynos);
128 127
diff --git a/drivers/usb/gadget/lpc32xx_udc.c b/drivers/usb/gadget/lpc32xx_udc.c
index 67128be1e1b7..6a2a65aa0057 100644
--- a/drivers/usb/gadget/lpc32xx_udc.c
+++ b/drivers/usb/gadget/lpc32xx_udc.c
@@ -3078,7 +3078,9 @@ static int __init lpc32xx_udc_probe(struct platform_device *pdev)
3078 udc->isp1301_i2c_client->addr); 3078 udc->isp1301_i2c_client->addr);
3079 3079
3080 pdev->dev.dma_mask = &lpc32xx_usbd_dmamask; 3080 pdev->dev.dma_mask = &lpc32xx_usbd_dmamask;
3081 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); 3081 retval = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
3082 if (retval)
3083 goto resource_fail;
3082 3084
3083 udc->board = &lpc32xx_usbddata; 3085 udc->board = &lpc32xx_usbddata;
3084 3086
diff --git a/drivers/usb/host/bcma-hcd.c b/drivers/usb/host/bcma-hcd.c
index df13d425e9c5..205f4a336583 100644
--- a/drivers/usb/host/bcma-hcd.c
+++ b/drivers/usb/host/bcma-hcd.c
@@ -227,8 +227,7 @@ static int bcma_hcd_probe(struct bcma_device *dev)
227 227
228 /* TODO: Probably need checks here; is the core connected? */ 228 /* TODO: Probably need checks here; is the core connected? */
229 229
230 if (dma_set_mask(dev->dma_dev, DMA_BIT_MASK(32)) || 230 if (dma_set_mask_and_coherent(dev->dma_dev, DMA_BIT_MASK(32)))
231 dma_set_coherent_mask(dev->dma_dev, DMA_BIT_MASK(32)))
232 return -EOPNOTSUPP; 231 return -EOPNOTSUPP;
233 232
234 usb_dev = kzalloc(sizeof(struct bcma_hcd_device), GFP_KERNEL); 233 usb_dev = kzalloc(sizeof(struct bcma_hcd_device), GFP_KERNEL);
diff --git a/drivers/usb/host/ehci-atmel.c b/drivers/usb/host/ehci-atmel.c
index f417526fb1f4..284f8417eae5 100644
--- a/drivers/usb/host/ehci-atmel.c
+++ b/drivers/usb/host/ehci-atmel.c
@@ -96,10 +96,9 @@ static int ehci_atmel_drv_probe(struct platform_device *pdev)
96 * Since shared usb code relies on it, set it here for now. 96 * Since shared usb code relies on it, set it here for now.
97 * Once we have dma capability bindings this can go away. 97 * Once we have dma capability bindings this can go away.
98 */ 98 */
99 if (!pdev->dev.dma_mask) 99 retval = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
100 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; 100 if (retval)
101 if (!pdev->dev.coherent_dma_mask) 101 goto fail_create_hcd;
102 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
103 102
104 hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev)); 103 hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
105 if (!hcd) { 104 if (!hcd) {
diff --git a/drivers/usb/host/ehci-exynos.c b/drivers/usb/host/ehci-exynos.c
index 016352e0f5a7..e97c198e052f 100644
--- a/drivers/usb/host/ehci-exynos.c
+++ b/drivers/usb/host/ehci-exynos.c
@@ -84,10 +84,9 @@ static int exynos_ehci_probe(struct platform_device *pdev)
84 * Since shared usb code relies on it, set it here for now. 84 * Since shared usb code relies on it, set it here for now.
85 * Once we move to full device tree support this will vanish off. 85 * Once we move to full device tree support this will vanish off.
86 */ 86 */
87 if (!pdev->dev.dma_mask) 87 err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
88 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; 88 if (err)
89 if (!pdev->dev.coherent_dma_mask) 89 return err;
90 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
91 90
92 exynos_setup_vbus_gpio(pdev); 91 exynos_setup_vbus_gpio(pdev);
93 92
diff --git a/drivers/usb/host/ehci-octeon.c b/drivers/usb/host/ehci-octeon.c
index ab0397e4d8f3..4c528b2c033a 100644
--- a/drivers/usb/host/ehci-octeon.c
+++ b/drivers/usb/host/ehci-octeon.c
@@ -116,8 +116,10 @@ static int ehci_octeon_drv_probe(struct platform_device *pdev)
116 * We can DMA from anywhere. But the descriptors must be in 116 * We can DMA from anywhere. But the descriptors must be in
117 * the lower 4GB. 117 * the lower 4GB.
118 */ 118 */
119 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
120 pdev->dev.dma_mask = &ehci_octeon_dma_mask; 119 pdev->dev.dma_mask = &ehci_octeon_dma_mask;
120 ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
121 if (ret)
122 return ret;
121 123
122 hcd = usb_create_hcd(&ehci_octeon_hc_driver, &pdev->dev, "octeon"); 124 hcd = usb_create_hcd(&ehci_octeon_hc_driver, &pdev->dev, "octeon");
123 if (!hcd) 125 if (!hcd)
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
index 78b01fa475bb..6fa82d6b7661 100644
--- a/drivers/usb/host/ehci-omap.c
+++ b/drivers/usb/host/ehci-omap.c
@@ -104,7 +104,7 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
104 struct resource *res; 104 struct resource *res;
105 struct usb_hcd *hcd; 105 struct usb_hcd *hcd;
106 void __iomem *regs; 106 void __iomem *regs;
107 int ret = -ENODEV; 107 int ret;
108 int irq; 108 int irq;
109 int i; 109 int i;
110 struct omap_hcd *omap; 110 struct omap_hcd *omap;
@@ -144,11 +144,11 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
144 * Since shared usb code relies on it, set it here for now. 144 * Since shared usb code relies on it, set it here for now.
145 * Once we have dma capability bindings this can go away. 145 * Once we have dma capability bindings this can go away.
146 */ 146 */
147 if (!dev->dma_mask) 147 ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
148 dev->dma_mask = &dev->coherent_dma_mask; 148 if (ret)
149 if (!dev->coherent_dma_mask) 149 return ret;
150 dev->coherent_dma_mask = DMA_BIT_MASK(32);
151 150
151 ret = -ENODEV;
152 hcd = usb_create_hcd(&ehci_omap_hc_driver, dev, 152 hcd = usb_create_hcd(&ehci_omap_hc_driver, dev,
153 dev_name(dev)); 153 dev_name(dev));
154 if (!hcd) { 154 if (!hcd) {
diff --git a/drivers/usb/host/ehci-orion.c b/drivers/usb/host/ehci-orion.c
index d1dfb9db5b42..2ba76730e650 100644
--- a/drivers/usb/host/ehci-orion.c
+++ b/drivers/usb/host/ehci-orion.c
@@ -180,10 +180,9 @@ static int ehci_orion_drv_probe(struct platform_device *pdev)
180 * set. Since shared usb code relies on it, set it here for 180 * set. Since shared usb code relies on it, set it here for
181 * now. Once we have dma capability bindings this can go away. 181 * now. Once we have dma capability bindings this can go away.
182 */ 182 */
183 if (!pdev->dev.dma_mask) 183 err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
184 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; 184 if (err)
185 if (!pdev->dev.coherent_dma_mask) 185 goto err1;
186 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
187 186
188 if (!request_mem_region(res->start, resource_size(res), 187 if (!request_mem_region(res->start, resource_size(res),
189 ehci_orion_hc_driver.description)) { 188 ehci_orion_hc_driver.description)) {
diff --git a/drivers/usb/host/ehci-platform.c b/drivers/usb/host/ehci-platform.c
index f6b790ca8cf2..7f30b7168d5a 100644
--- a/drivers/usb/host/ehci-platform.c
+++ b/drivers/usb/host/ehci-platform.c
@@ -78,7 +78,7 @@ static int ehci_platform_probe(struct platform_device *dev)
78 struct resource *res_mem; 78 struct resource *res_mem;
79 struct usb_ehci_pdata *pdata; 79 struct usb_ehci_pdata *pdata;
80 int irq; 80 int irq;
81 int err = -ENOMEM; 81 int err;
82 82
83 if (usb_disabled()) 83 if (usb_disabled())
84 return -ENODEV; 84 return -ENODEV;
@@ -89,10 +89,10 @@ static int ehci_platform_probe(struct platform_device *dev)
89 */ 89 */
90 if (!dev_get_platdata(&dev->dev)) 90 if (!dev_get_platdata(&dev->dev))
91 dev->dev.platform_data = &ehci_platform_defaults; 91 dev->dev.platform_data = &ehci_platform_defaults;
92 if (!dev->dev.dma_mask) 92
93 dev->dev.dma_mask = &dev->dev.coherent_dma_mask; 93 err = dma_coerce_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32));
94 if (!dev->dev.coherent_dma_mask) 94 if (err)
95 dev->dev.coherent_dma_mask = DMA_BIT_MASK(32); 95 return err;
96 96
97 pdata = dev_get_platdata(&dev->dev); 97 pdata = dev_get_platdata(&dev->dev);
98 98
diff --git a/drivers/usb/host/ehci-spear.c b/drivers/usb/host/ehci-spear.c
index 1cf0adba3fc8..ee6f9ffaa0e7 100644
--- a/drivers/usb/host/ehci-spear.c
+++ b/drivers/usb/host/ehci-spear.c
@@ -81,10 +81,9 @@ static int spear_ehci_hcd_drv_probe(struct platform_device *pdev)
81 * Since shared usb code relies on it, set it here for now. 81 * Since shared usb code relies on it, set it here for now.
82 * Once we have dma capability bindings this can go away. 82 * Once we have dma capability bindings this can go away.
83 */ 83 */
84 if (!pdev->dev.dma_mask) 84 retval = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
85 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; 85 if (retval)
86 if (!pdev->dev.coherent_dma_mask) 86 goto fail;
87 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
88 87
89 usbh_clk = devm_clk_get(&pdev->dev, NULL); 88 usbh_clk = devm_clk_get(&pdev->dev, NULL);
90 if (IS_ERR(usbh_clk)) { 89 if (IS_ERR(usbh_clk)) {
diff --git a/drivers/usb/host/ehci-tegra.c b/drivers/usb/host/ehci-tegra.c
index e6d8e26e48cc..b9fd0396011e 100644
--- a/drivers/usb/host/ehci-tegra.c
+++ b/drivers/usb/host/ehci-tegra.c
@@ -362,10 +362,9 @@ static int tegra_ehci_probe(struct platform_device *pdev)
362 * Since shared usb code relies on it, set it here for now. 362 * Since shared usb code relies on it, set it here for now.
363 * Once we have dma capability bindings this can go away. 363 * Once we have dma capability bindings this can go away.
364 */ 364 */
365 if (!pdev->dev.dma_mask) 365 err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
366 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; 366 if (err)
367 if (!pdev->dev.coherent_dma_mask) 367 return err;
368 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
369 368
370 hcd = usb_create_hcd(&tegra_ehci_hc_driver, &pdev->dev, 369 hcd = usb_create_hcd(&tegra_ehci_hc_driver, &pdev->dev,
371 dev_name(&pdev->dev)); 370 dev_name(&pdev->dev));
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index 476b5a5baf25..418444ebb1b8 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -469,7 +469,7 @@ MODULE_DEVICE_TABLE(of, at91_ohci_dt_ids);
469static int ohci_at91_of_init(struct platform_device *pdev) 469static int ohci_at91_of_init(struct platform_device *pdev)
470{ 470{
471 struct device_node *np = pdev->dev.of_node; 471 struct device_node *np = pdev->dev.of_node;
472 int i, gpio; 472 int i, gpio, ret;
473 enum of_gpio_flags flags; 473 enum of_gpio_flags flags;
474 struct at91_usbh_data *pdata; 474 struct at91_usbh_data *pdata;
475 u32 ports; 475 u32 ports;
@@ -481,10 +481,9 @@ static int ohci_at91_of_init(struct platform_device *pdev)
481 * Since shared usb code relies on it, set it here for now. 481 * Since shared usb code relies on it, set it here for now.
482 * Once we have dma capability bindings this can go away. 482 * Once we have dma capability bindings this can go away.
483 */ 483 */
484 if (!pdev->dev.dma_mask) 484 ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
485 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; 485 if (ret)
486 if (!pdev->dev.coherent_dma_mask) 486 return ret;
487 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
488 487
489 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); 488 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
490 if (!pdata) 489 if (!pdata)
diff --git a/drivers/usb/host/ohci-exynos.c b/drivers/usb/host/ohci-exynos.c
index a87baedc0aa7..91ec9b2cd378 100644
--- a/drivers/usb/host/ohci-exynos.c
+++ b/drivers/usb/host/ohci-exynos.c
@@ -71,10 +71,9 @@ static int exynos_ohci_probe(struct platform_device *pdev)
71 * Since shared usb code relies on it, set it here for now. 71 * Since shared usb code relies on it, set it here for now.
72 * Once we move to full device tree support this will vanish off. 72 * Once we move to full device tree support this will vanish off.
73 */ 73 */
74 if (!pdev->dev.dma_mask) 74 err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
75 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; 75 if (err)
76 if (!pdev->dev.coherent_dma_mask) 76 return err;
77 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
78 77
79 hcd = usb_create_hcd(&exynos_ohci_hc_driver, 78 hcd = usb_create_hcd(&exynos_ohci_hc_driver,
80 &pdev->dev, dev_name(&pdev->dev)); 79 &pdev->dev, dev_name(&pdev->dev));
diff --git a/drivers/usb/host/ohci-nxp.c b/drivers/usb/host/ohci-nxp.c
index 9ab7e24ba65d..e99db8a6d55f 100644
--- a/drivers/usb/host/ohci-nxp.c
+++ b/drivers/usb/host/ohci-nxp.c
@@ -181,8 +181,9 @@ static int ohci_hcd_nxp_probe(struct platform_device *pdev)
181 return -EPROBE_DEFER; 181 return -EPROBE_DEFER;
182 } 182 }
183 183
184 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); 184 ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
185 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; 185 if (ret)
186 goto fail_disable;
186 187
187 dev_dbg(&pdev->dev, "%s: " DRIVER_DESC " (nxp)\n", hcd_name); 188 dev_dbg(&pdev->dev, "%s: " DRIVER_DESC " (nxp)\n", hcd_name);
188 if (usb_disabled()) { 189 if (usb_disabled()) {
diff --git a/drivers/usb/host/ohci-octeon.c b/drivers/usb/host/ohci-octeon.c
index 342dc7e543b8..6c16dcef15c6 100644
--- a/drivers/usb/host/ohci-octeon.c
+++ b/drivers/usb/host/ohci-octeon.c
@@ -127,8 +127,9 @@ static int ohci_octeon_drv_probe(struct platform_device *pdev)
127 } 127 }
128 128
129 /* Ohci is a 32-bit device. */ 129 /* Ohci is a 32-bit device. */
130 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); 130 ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
131 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; 131 if (ret)
132 return ret;
132 133
133 hcd = usb_create_hcd(&ohci_octeon_hc_driver, &pdev->dev, "octeon"); 134 hcd = usb_create_hcd(&ohci_octeon_hc_driver, &pdev->dev, "octeon");
134 if (!hcd) 135 if (!hcd)
diff --git a/drivers/usb/host/ohci-omap3.c b/drivers/usb/host/ohci-omap3.c
index 408d06a68571..21457417a856 100644
--- a/drivers/usb/host/ohci-omap3.c
+++ b/drivers/usb/host/ohci-omap3.c
@@ -65,7 +65,7 @@ static int ohci_hcd_omap3_probe(struct platform_device *pdev)
65 struct usb_hcd *hcd = NULL; 65 struct usb_hcd *hcd = NULL;
66 void __iomem *regs = NULL; 66 void __iomem *regs = NULL;
67 struct resource *res; 67 struct resource *res;
68 int ret = -ENODEV; 68 int ret;
69 int irq; 69 int irq;
70 70
71 if (usb_disabled()) 71 if (usb_disabled())
@@ -99,11 +99,11 @@ static int ohci_hcd_omap3_probe(struct platform_device *pdev)
99 * Since shared usb code relies on it, set it here for now. 99 * Since shared usb code relies on it, set it here for now.
100 * Once we have dma capability bindings this can go away. 100 * Once we have dma capability bindings this can go away.
101 */ 101 */
102 if (!dev->dma_mask) 102 ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
103 dev->dma_mask = &dev->coherent_dma_mask; 103 if (ret)
104 if (!dev->coherent_dma_mask) 104 goto err_io;
105 dev->coherent_dma_mask = DMA_BIT_MASK(32);
106 105
106 ret = -ENODEV;
107 hcd = usb_create_hcd(&ohci_omap3_hc_driver, dev, 107 hcd = usb_create_hcd(&ohci_omap3_hc_driver, dev,
108 dev_name(dev)); 108 dev_name(dev));
109 if (!hcd) { 109 if (!hcd) {
diff --git a/drivers/usb/host/ohci-pxa27x.c b/drivers/usb/host/ohci-pxa27x.c
index deea5d1d6394..e89ac4d4b87e 100644
--- a/drivers/usb/host/ohci-pxa27x.c
+++ b/drivers/usb/host/ohci-pxa27x.c
@@ -298,6 +298,7 @@ static int ohci_pxa_of_init(struct platform_device *pdev)
298 struct device_node *np = pdev->dev.of_node; 298 struct device_node *np = pdev->dev.of_node;
299 struct pxaohci_platform_data *pdata; 299 struct pxaohci_platform_data *pdata;
300 u32 tmp; 300 u32 tmp;
301 int ret;
301 302
302 if (!np) 303 if (!np)
303 return 0; 304 return 0;
@@ -306,10 +307,9 @@ static int ohci_pxa_of_init(struct platform_device *pdev)
306 * Since shared usb code relies on it, set it here for now. 307 * Since shared usb code relies on it, set it here for now.
307 * Once we have dma capability bindings this can go away. 308 * Once we have dma capability bindings this can go away.
308 */ 309 */
309 if (!pdev->dev.dma_mask) 310 ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
310 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; 311 if (ret)
311 if (!pdev->dev.coherent_dma_mask) 312 return ret;
312 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
313 313
314 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); 314 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
315 if (!pdata) 315 if (!pdata)
diff --git a/drivers/usb/host/ohci-sa1111.c b/drivers/usb/host/ohci-sa1111.c
index 17b2a7dad77b..aa9e127bbe71 100644
--- a/drivers/usb/host/ohci-sa1111.c
+++ b/drivers/usb/host/ohci-sa1111.c
@@ -185,6 +185,12 @@ static int ohci_hcd_sa1111_probe(struct sa1111_dev *dev)
185 if (usb_disabled()) 185 if (usb_disabled())
186 return -ENODEV; 186 return -ENODEV;
187 187
188 /*
189 * We don't call dma_set_mask_and_coherent() here because the
190 * DMA mask has already been appropraitely setup by the core
191 * SA-1111 bus code (which includes bug workarounds.)
192 */
193
188 hcd = usb_create_hcd(&ohci_sa1111_hc_driver, &dev->dev, "sa1111"); 194 hcd = usb_create_hcd(&ohci_sa1111_hc_driver, &dev->dev, "sa1111");
189 if (!hcd) 195 if (!hcd)
190 return -ENOMEM; 196 return -ENOMEM;
diff --git a/drivers/usb/host/ohci-spear.c b/drivers/usb/host/ohci-spear.c
index 31ff3fc4e26f..6b02107d281d 100644
--- a/drivers/usb/host/ohci-spear.c
+++ b/drivers/usb/host/ohci-spear.c
@@ -56,10 +56,9 @@ static int spear_ohci_hcd_drv_probe(struct platform_device *pdev)
56 * Since shared usb code relies on it, set it here for now. 56 * Since shared usb code relies on it, set it here for now.
57 * Once we have dma capability bindings this can go away. 57 * Once we have dma capability bindings this can go away.
58 */ 58 */
59 if (!pdev->dev.dma_mask) 59 retval = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
60 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; 60 if (retval)
61 if (!pdev->dev.coherent_dma_mask) 61 goto fail;
62 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
63 62
64 usbh_clk = devm_clk_get(&pdev->dev, NULL); 63 usbh_clk = devm_clk_get(&pdev->dev, NULL);
65 if (IS_ERR(usbh_clk)) { 64 if (IS_ERR(usbh_clk)) {
diff --git a/drivers/usb/host/ssb-hcd.c b/drivers/usb/host/ssb-hcd.c
index 74af2c6287d2..0196f766df73 100644
--- a/drivers/usb/host/ssb-hcd.c
+++ b/drivers/usb/host/ssb-hcd.c
@@ -163,8 +163,7 @@ static int ssb_hcd_probe(struct ssb_device *dev,
163 163
164 /* TODO: Probably need checks here; is the core connected? */ 164 /* TODO: Probably need checks here; is the core connected? */
165 165
166 if (dma_set_mask(dev->dma_dev, DMA_BIT_MASK(32)) || 166 if (dma_set_mask_and_coherent(dev->dma_dev, DMA_BIT_MASK(32)))
167 dma_set_coherent_mask(dev->dma_dev, DMA_BIT_MASK(32)))
168 return -EOPNOTSUPP; 167 return -EOPNOTSUPP;
169 168
170 usb_dev = kzalloc(sizeof(struct ssb_hcd_device), GFP_KERNEL); 169 usb_dev = kzalloc(sizeof(struct ssb_hcd_device), GFP_KERNEL);
diff --git a/drivers/usb/host/uhci-platform.c b/drivers/usb/host/uhci-platform.c
index ded842bc6578..3003fefaa964 100644
--- a/drivers/usb/host/uhci-platform.c
+++ b/drivers/usb/host/uhci-platform.c
@@ -75,10 +75,9 @@ static int uhci_hcd_platform_probe(struct platform_device *pdev)
75 * Since shared usb code relies on it, set it here for now. 75 * Since shared usb code relies on it, set it here for now.
76 * Once we have dma capability bindings this can go away. 76 * Once we have dma capability bindings this can go away.
77 */ 77 */
78 if (!pdev->dev.dma_mask) 78 ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
79 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; 79 if (ret)
80 if (!pdev->dev.coherent_dma_mask) 80 return ret;
81 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
82 81
83 hcd = usb_create_hcd(&uhci_platform_hc_driver, &pdev->dev, 82 hcd = usb_create_hcd(&uhci_platform_hc_driver, &pdev->dev,
84 pdev->name); 83 pdev->name);
diff --git a/drivers/video/amba-clcd.c b/drivers/video/amba-clcd.c
index 0a2cce7285be..afe4702a5528 100644
--- a/drivers/video/amba-clcd.c
+++ b/drivers/video/amba-clcd.c
@@ -10,6 +10,7 @@
10 * 10 *
11 * ARM PrimeCell PL110 Color LCD Controller 11 * ARM PrimeCell PL110 Color LCD Controller
12 */ 12 */
13#include <linux/dma-mapping.h>
13#include <linux/module.h> 14#include <linux/module.h>
14#include <linux/kernel.h> 15#include <linux/kernel.h>
15#include <linux/errno.h> 16#include <linux/errno.h>
@@ -551,6 +552,10 @@ static int clcdfb_probe(struct amba_device *dev, const struct amba_id *id)
551 if (!board) 552 if (!board)
552 return -EINVAL; 553 return -EINVAL;
553 554
555 ret = dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32));
556 if (ret)
557 goto out;
558
554 ret = amba_request_regions(dev, NULL); 559 ret = amba_request_regions(dev, NULL);
555 if (ret) { 560 if (ret) {
556 printk(KERN_ERR "CLCD: unable to reserve regs region\n"); 561 printk(KERN_ERR "CLCD: unable to reserve regs region\n");
diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h
index 43ec7e247a80..682df0e1954a 100644
--- a/include/linux/amba/bus.h
+++ b/include/linux/amba/bus.h
@@ -30,7 +30,6 @@ struct amba_device {
30 struct device dev; 30 struct device dev;
31 struct resource res; 31 struct resource res;
32 struct clk *pclk; 32 struct clk *pclk;
33 u64 dma_mask;
34 unsigned int periphid; 33 unsigned int periphid;
35 unsigned int irq[AMBA_NR_IRQS]; 34 unsigned int irq[AMBA_NR_IRQS];
36}; 35};
@@ -131,7 +130,6 @@ struct amba_device name##_device = { \
131struct amba_device name##_device = { \ 130struct amba_device name##_device = { \
132 .dev = __AMBA_DEV(busid, data, ~0ULL), \ 131 .dev = __AMBA_DEV(busid, data, ~0ULL), \
133 .res = DEFINE_RES_MEM(base, SZ_4K), \ 132 .res = DEFINE_RES_MEM(base, SZ_4K), \
134 .dma_mask = ~0ULL, \
135 .irq = irqs, \ 133 .irq = irqs, \
136 .periphid = id, \ 134 .periphid = id, \
137} 135}
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 3a8d0a2af607..fd4aee29ad10 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -97,6 +97,30 @@ static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
97} 97}
98#endif 98#endif
99 99
100/*
101 * Set both the DMA mask and the coherent DMA mask to the same thing.
102 * Note that we don't check the return value from dma_set_coherent_mask()
103 * as the DMA API guarantees that the coherent DMA mask can be set to
104 * the same or smaller than the streaming DMA mask.
105 */
106static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask)
107{
108 int rc = dma_set_mask(dev, mask);
109 if (rc == 0)
110 dma_set_coherent_mask(dev, mask);
111 return rc;
112}
113
114/*
115 * Similar to the above, except it deals with the case where the device
116 * does not have dev->dma_mask appropriately setup.
117 */
118static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
119{
120 dev->dma_mask = &dev->coherent_dma_mask;
121 return dma_set_mask_and_coherent(dev, mask);
122}
123
100extern u64 dma_get_required_mask(struct device *dev); 124extern u64 dma_get_required_mask(struct device *dev);
101 125
102static inline unsigned int dma_get_max_seg_size(struct device *dev) 126static inline unsigned int dma_get_max_seg_size(struct device *dev)
@@ -129,6 +153,13 @@ static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
129 return -EIO; 153 return -EIO;
130} 154}
131 155
156#ifndef dma_max_pfn
157static inline unsigned long dma_max_pfn(struct device *dev)
158{
159 return *dev->dma_mask >> PAGE_SHIFT;
160}
161#endif
162
132static inline void *dma_zalloc_coherent(struct device *dev, size_t size, 163static inline void *dma_zalloc_coherent(struct device *dev, size_t size,
133 dma_addr_t *dma_handle, gfp_t flag) 164 dma_addr_t *dma_handle, gfp_t flag)
134{ 165{
diff --git a/sound/arm/pxa2xx-pcm.c b/sound/arm/pxa2xx-pcm.c
index 69a2455b4472..e6c727b317fb 100644
--- a/sound/arm/pxa2xx-pcm.c
+++ b/sound/arm/pxa2xx-pcm.c
@@ -11,6 +11,7 @@
11 */ 11 */
12 12
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/dma-mapping.h>
14#include <linux/dmaengine.h> 15#include <linux/dmaengine.h>
15 16
16#include <sound/core.h> 17#include <sound/core.h>
@@ -83,8 +84,6 @@ static struct snd_pcm_ops pxa2xx_pcm_ops = {
83 .mmap = pxa2xx_pcm_mmap, 84 .mmap = pxa2xx_pcm_mmap,
84}; 85};
85 86
86static u64 pxa2xx_pcm_dmamask = 0xffffffff;
87
88int pxa2xx_pcm_new(struct snd_card *card, struct pxa2xx_pcm_client *client, 87int pxa2xx_pcm_new(struct snd_card *card, struct pxa2xx_pcm_client *client,
89 struct snd_pcm **rpcm) 88 struct snd_pcm **rpcm)
90{ 89{
@@ -100,10 +99,9 @@ int pxa2xx_pcm_new(struct snd_card *card, struct pxa2xx_pcm_client *client,
100 pcm->private_data = client; 99 pcm->private_data = client;
101 pcm->private_free = pxa2xx_pcm_free_dma_buffers; 100 pcm->private_free = pxa2xx_pcm_free_dma_buffers;
102 101
103 if (!card->dev->dma_mask) 102 ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
104 card->dev->dma_mask = &pxa2xx_pcm_dmamask; 103 if (ret)
105 if (!card->dev->coherent_dma_mask) 104 goto out;
106 card->dev->coherent_dma_mask = 0xffffffff;
107 105
108 if (play) { 106 if (play) {
109 int stream = SNDRV_PCM_STREAM_PLAYBACK; 107 int stream = SNDRV_PCM_STREAM_PLAYBACK;
diff --git a/sound/soc/atmel/atmel-pcm.c b/sound/soc/atmel/atmel-pcm.c
index 612e5801003f..8ae3fa5ac60a 100644
--- a/sound/soc/atmel/atmel-pcm.c
+++ b/sound/soc/atmel/atmel-pcm.c
@@ -68,18 +68,15 @@ int atmel_pcm_mmap(struct snd_pcm_substream *substream,
68} 68}
69EXPORT_SYMBOL_GPL(atmel_pcm_mmap); 69EXPORT_SYMBOL_GPL(atmel_pcm_mmap);
70 70
71static u64 atmel_pcm_dmamask = DMA_BIT_MASK(32);
72
73int atmel_pcm_new(struct snd_soc_pcm_runtime *rtd) 71int atmel_pcm_new(struct snd_soc_pcm_runtime *rtd)
74{ 72{
75 struct snd_card *card = rtd->card->snd_card; 73 struct snd_card *card = rtd->card->snd_card;
76 struct snd_pcm *pcm = rtd->pcm; 74 struct snd_pcm *pcm = rtd->pcm;
77 int ret = 0; 75 int ret;
78 76
79 if (!card->dev->dma_mask) 77 ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
80 card->dev->dma_mask = &atmel_pcm_dmamask; 78 if (ret)
81 if (!card->dev->coherent_dma_mask) 79 return ret;
82 card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
83 80
84 if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) { 81 if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
85 pr_debug("atmel-pcm: allocating PCM playback DMA buffer\n"); 82 pr_debug("atmel-pcm: allocating PCM playback DMA buffer\n");
diff --git a/sound/soc/blackfin/bf5xx-ac97-pcm.c b/sound/soc/blackfin/bf5xx-ac97-pcm.c
index 53f84085bf1f..1d4c676eb6cc 100644
--- a/sound/soc/blackfin/bf5xx-ac97-pcm.c
+++ b/sound/soc/blackfin/bf5xx-ac97-pcm.c
@@ -415,19 +415,16 @@ static void bf5xx_pcm_free_dma_buffers(struct snd_pcm *pcm)
415 } 415 }
416} 416}
417 417
418static u64 bf5xx_pcm_dmamask = DMA_BIT_MASK(32);
419
420static int bf5xx_pcm_ac97_new(struct snd_soc_pcm_runtime *rtd) 418static int bf5xx_pcm_ac97_new(struct snd_soc_pcm_runtime *rtd)
421{ 419{
422 struct snd_card *card = rtd->card->snd_card; 420 struct snd_card *card = rtd->card->snd_card;
423 struct snd_pcm *pcm = rtd->pcm; 421 struct snd_pcm *pcm = rtd->pcm;
424 int ret = 0; 422 int ret;
425 423
426 pr_debug("%s enter\n", __func__); 424 pr_debug("%s enter\n", __func__);
427 if (!card->dev->dma_mask) 425 ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
428 card->dev->dma_mask = &bf5xx_pcm_dmamask; 426 if (ret)
429 if (!card->dev->coherent_dma_mask) 427 return ret;
430 card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
431 428
432 if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) { 429 if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
433 ret = bf5xx_pcm_preallocate_dma_buffer(pcm, 430 ret = bf5xx_pcm_preallocate_dma_buffer(pcm,
diff --git a/sound/soc/blackfin/bf5xx-i2s-pcm.c b/sound/soc/blackfin/bf5xx-i2s-pcm.c
index 9cb4a80df98e..2a5b43417fd5 100644
--- a/sound/soc/blackfin/bf5xx-i2s-pcm.c
+++ b/sound/soc/blackfin/bf5xx-i2s-pcm.c
@@ -323,18 +323,16 @@ static struct snd_pcm_ops bf5xx_pcm_i2s_ops = {
323 .silence = bf5xx_pcm_silence, 323 .silence = bf5xx_pcm_silence,
324}; 324};
325 325
326static u64 bf5xx_pcm_dmamask = DMA_BIT_MASK(32);
327
328static int bf5xx_pcm_i2s_new(struct snd_soc_pcm_runtime *rtd) 326static int bf5xx_pcm_i2s_new(struct snd_soc_pcm_runtime *rtd)
329{ 327{
330 struct snd_card *card = rtd->card->snd_card; 328 struct snd_card *card = rtd->card->snd_card;
331 size_t size = bf5xx_pcm_hardware.buffer_bytes_max; 329 size_t size = bf5xx_pcm_hardware.buffer_bytes_max;
330 int ret;
332 331
333 pr_debug("%s enter\n", __func__); 332 pr_debug("%s enter\n", __func__);
334 if (!card->dev->dma_mask) 333 ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
335 card->dev->dma_mask = &bf5xx_pcm_dmamask; 334 if (ret)
336 if (!card->dev->coherent_dma_mask) 335 return ret;
337 card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
338 336
339 return snd_pcm_lib_preallocate_pages_for_all(rtd->pcm, 337 return snd_pcm_lib_preallocate_pages_for_all(rtd->pcm,
340 SNDRV_DMA_TYPE_DEV, card->dev, size, size); 338 SNDRV_DMA_TYPE_DEV, card->dev, size, size);
diff --git a/sound/soc/davinci/davinci-pcm.c b/sound/soc/davinci/davinci-pcm.c
index 443e9e599a75..fa64cd85204f 100644
--- a/sound/soc/davinci/davinci-pcm.c
+++ b/sound/soc/davinci/davinci-pcm.c
@@ -843,18 +843,15 @@ static void davinci_pcm_free(struct snd_pcm *pcm)
843 } 843 }
844} 844}
845 845
846static u64 davinci_pcm_dmamask = DMA_BIT_MASK(32);
847
848static int davinci_pcm_new(struct snd_soc_pcm_runtime *rtd) 846static int davinci_pcm_new(struct snd_soc_pcm_runtime *rtd)
849{ 847{
850 struct snd_card *card = rtd->card->snd_card; 848 struct snd_card *card = rtd->card->snd_card;
851 struct snd_pcm *pcm = rtd->pcm; 849 struct snd_pcm *pcm = rtd->pcm;
852 int ret; 850 int ret;
853 851
854 if (!card->dev->dma_mask) 852 ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
855 card->dev->dma_mask = &davinci_pcm_dmamask; 853 if (ret)
856 if (!card->dev->coherent_dma_mask) 854 return ret;
857 card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
858 855
859 if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) { 856 if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
860 ret = davinci_pcm_preallocate_dma_buffer(pcm, 857 ret = davinci_pcm_preallocate_dma_buffer(pcm,
diff --git a/sound/soc/fsl/fsl_dma.c b/sound/soc/fsl/fsl_dma.c
index d1b111e7fc07..fb9bb9eb5ca3 100644
--- a/sound/soc/fsl/fsl_dma.c
+++ b/sound/soc/fsl/fsl_dma.c
@@ -300,14 +300,11 @@ static int fsl_dma_new(struct snd_soc_pcm_runtime *rtd)
300{ 300{
301 struct snd_card *card = rtd->card->snd_card; 301 struct snd_card *card = rtd->card->snd_card;
302 struct snd_pcm *pcm = rtd->pcm; 302 struct snd_pcm *pcm = rtd->pcm;
303 static u64 fsl_dma_dmamask = DMA_BIT_MASK(36);
304 int ret; 303 int ret;
305 304
306 if (!card->dev->dma_mask) 305 ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(36));
307 card->dev->dma_mask = &fsl_dma_dmamask; 306 if (ret)
308 307 return ret;
309 if (!card->dev->coherent_dma_mask)
310 card->dev->coherent_dma_mask = fsl_dma_dmamask;
311 308
312 /* Some codecs have separate DAIs for playback and capture, so we 309 /* Some codecs have separate DAIs for playback and capture, so we
313 * should allocate a DMA buffer only for the streams that are valid. 310 * should allocate a DMA buffer only for the streams that are valid.
diff --git a/sound/soc/fsl/imx-pcm-fiq.c b/sound/soc/fsl/imx-pcm-fiq.c
index 10e330514ed8..41740e488820 100644
--- a/sound/soc/fsl/imx-pcm-fiq.c
+++ b/sound/soc/fsl/imx-pcm-fiq.c
@@ -254,18 +254,16 @@ static int imx_pcm_preallocate_dma_buffer(struct snd_pcm *pcm, int stream)
254 return 0; 254 return 0;
255} 255}
256 256
257static u64 imx_pcm_dmamask = DMA_BIT_MASK(32);
258
259static int imx_pcm_new(struct snd_soc_pcm_runtime *rtd) 257static int imx_pcm_new(struct snd_soc_pcm_runtime *rtd)
260{ 258{
261 struct snd_card *card = rtd->card->snd_card; 259 struct snd_card *card = rtd->card->snd_card;
262 struct snd_pcm *pcm = rtd->pcm; 260 struct snd_pcm *pcm = rtd->pcm;
263 int ret = 0; 261 int ret;
262
263 ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
264 if (ret)
265 return ret;
264 266
265 if (!card->dev->dma_mask)
266 card->dev->dma_mask = &imx_pcm_dmamask;
267 if (!card->dev->coherent_dma_mask)
268 card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
269 if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) { 267 if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
270 ret = imx_pcm_preallocate_dma_buffer(pcm, 268 ret = imx_pcm_preallocate_dma_buffer(pcm,
271 SNDRV_PCM_STREAM_PLAYBACK); 269 SNDRV_PCM_STREAM_PLAYBACK);
diff --git a/sound/soc/fsl/mpc5200_dma.c b/sound/soc/fsl/mpc5200_dma.c
index 161e5055ce94..71bf2f248cd4 100644
--- a/sound/soc/fsl/mpc5200_dma.c
+++ b/sound/soc/fsl/mpc5200_dma.c
@@ -301,7 +301,6 @@ static struct snd_pcm_ops psc_dma_ops = {
301 .hw_params = psc_dma_hw_params, 301 .hw_params = psc_dma_hw_params,
302}; 302};
303 303
304static u64 psc_dma_dmamask = DMA_BIT_MASK(32);
305static int psc_dma_new(struct snd_soc_pcm_runtime *rtd) 304static int psc_dma_new(struct snd_soc_pcm_runtime *rtd)
306{ 305{
307 struct snd_card *card = rtd->card->snd_card; 306 struct snd_card *card = rtd->card->snd_card;
@@ -309,15 +308,14 @@ static int psc_dma_new(struct snd_soc_pcm_runtime *rtd)
309 struct snd_pcm *pcm = rtd->pcm; 308 struct snd_pcm *pcm = rtd->pcm;
310 struct psc_dma *psc_dma = snd_soc_dai_get_drvdata(rtd->cpu_dai); 309 struct psc_dma *psc_dma = snd_soc_dai_get_drvdata(rtd->cpu_dai);
311 size_t size = psc_dma_hardware.buffer_bytes_max; 310 size_t size = psc_dma_hardware.buffer_bytes_max;
312 int rc = 0; 311 int rc;
313 312
314 dev_dbg(rtd->platform->dev, "psc_dma_new(card=%p, dai=%p, pcm=%p)\n", 313 dev_dbg(rtd->platform->dev, "psc_dma_new(card=%p, dai=%p, pcm=%p)\n",
315 card, dai, pcm); 314 card, dai, pcm);
316 315
317 if (!card->dev->dma_mask) 316 rc = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
318 card->dev->dma_mask = &psc_dma_dmamask; 317 if (rc)
319 if (!card->dev->coherent_dma_mask) 318 return rc;
320 card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
321 319
322 if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) { 320 if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
323 rc = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, pcm->card->dev, 321 rc = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, pcm->card->dev,
diff --git a/sound/soc/jz4740/jz4740-pcm.c b/sound/soc/jz4740/jz4740-pcm.c
index 710059292318..1d7ef28585e1 100644
--- a/sound/soc/jz4740/jz4740-pcm.c
+++ b/sound/soc/jz4740/jz4740-pcm.c
@@ -297,19 +297,15 @@ static void jz4740_pcm_free(struct snd_pcm *pcm)
297 } 297 }
298} 298}
299 299
300static u64 jz4740_pcm_dmamask = DMA_BIT_MASK(32);
301
302static int jz4740_pcm_new(struct snd_soc_pcm_runtime *rtd) 300static int jz4740_pcm_new(struct snd_soc_pcm_runtime *rtd)
303{ 301{
304 struct snd_card *card = rtd->card->snd_card; 302 struct snd_card *card = rtd->card->snd_card;
305 struct snd_pcm *pcm = rtd->pcm; 303 struct snd_pcm *pcm = rtd->pcm;
306 int ret = 0; 304 int ret;
307
308 if (!card->dev->dma_mask)
309 card->dev->dma_mask = &jz4740_pcm_dmamask;
310 305
311 if (!card->dev->coherent_dma_mask) 306 ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
312 card->dev->coherent_dma_mask = DMA_BIT_MASK(32); 307 if (ret)
308 return ret;
313 309
314 if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) { 310 if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
315 ret = jz4740_pcm_preallocate_dma_buffer(pcm, 311 ret = jz4740_pcm_preallocate_dma_buffer(pcm,
diff --git a/sound/soc/kirkwood/kirkwood-dma.c b/sound/soc/kirkwood/kirkwood-dma.c
index 55d0d9d3a9fd..4af1936cf0f4 100644
--- a/sound/soc/kirkwood/kirkwood-dma.c
+++ b/sound/soc/kirkwood/kirkwood-dma.c
@@ -57,8 +57,6 @@ static struct snd_pcm_hardware kirkwood_dma_snd_hw = {
57 .fifo_size = 0, 57 .fifo_size = 0,
58}; 58};
59 59
60static u64 kirkwood_dma_dmamask = DMA_BIT_MASK(32);
61
62static irqreturn_t kirkwood_dma_irq(int irq, void *dev_id) 60static irqreturn_t kirkwood_dma_irq(int irq, void *dev_id)
63{ 61{
64 struct kirkwood_dma_data *priv = dev_id; 62 struct kirkwood_dma_data *priv = dev_id;
@@ -290,10 +288,9 @@ static int kirkwood_dma_new(struct snd_soc_pcm_runtime *rtd)
290 struct snd_pcm *pcm = rtd->pcm; 288 struct snd_pcm *pcm = rtd->pcm;
291 int ret; 289 int ret;
292 290
293 if (!card->dev->dma_mask) 291 ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
294 card->dev->dma_mask = &kirkwood_dma_dmamask; 292 if (ret)
295 if (!card->dev->coherent_dma_mask) 293 return ret;
296 card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
297 294
298 if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) { 295 if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
299 ret = kirkwood_dma_preallocate_dma_buffer(pcm, 296 ret = kirkwood_dma_preallocate_dma_buffer(pcm,
diff --git a/sound/soc/nuc900/nuc900-pcm.c b/sound/soc/nuc900/nuc900-pcm.c
index c894ff0f2580..f588ee45b4fd 100644
--- a/sound/soc/nuc900/nuc900-pcm.c
+++ b/sound/soc/nuc900/nuc900-pcm.c
@@ -314,16 +314,15 @@ static void nuc900_dma_free_dma_buffers(struct snd_pcm *pcm)
314 snd_pcm_lib_preallocate_free_for_all(pcm); 314 snd_pcm_lib_preallocate_free_for_all(pcm);
315} 315}
316 316
317static u64 nuc900_pcm_dmamask = DMA_BIT_MASK(32);
318static int nuc900_dma_new(struct snd_soc_pcm_runtime *rtd) 317static int nuc900_dma_new(struct snd_soc_pcm_runtime *rtd)
319{ 318{
320 struct snd_card *card = rtd->card->snd_card; 319 struct snd_card *card = rtd->card->snd_card;
321 struct snd_pcm *pcm = rtd->pcm; 320 struct snd_pcm *pcm = rtd->pcm;
321 int ret;
322 322
323 if (!card->dev->dma_mask) 323 ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
324 card->dev->dma_mask = &nuc900_pcm_dmamask; 324 if (ret)
325 if (!card->dev->coherent_dma_mask) 325 return ret;
326 card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
327 326
328 snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, 327 snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
329 card->dev, 4 * 1024, (4 * 1024) - 1); 328 card->dev, 4 * 1024, (4 * 1024) - 1);
diff --git a/sound/soc/omap/omap-pcm.c b/sound/soc/omap/omap-pcm.c
index a11405de86e8..b8fa9862e54c 100644
--- a/sound/soc/omap/omap-pcm.c
+++ b/sound/soc/omap/omap-pcm.c
@@ -156,8 +156,6 @@ static struct snd_pcm_ops omap_pcm_ops = {
156 .mmap = omap_pcm_mmap, 156 .mmap = omap_pcm_mmap,
157}; 157};
158 158
159static u64 omap_pcm_dmamask = DMA_BIT_MASK(64);
160
161static int omap_pcm_preallocate_dma_buffer(struct snd_pcm *pcm, 159static int omap_pcm_preallocate_dma_buffer(struct snd_pcm *pcm,
162 int stream) 160 int stream)
163{ 161{
@@ -202,12 +200,11 @@ static int omap_pcm_new(struct snd_soc_pcm_runtime *rtd)
202{ 200{
203 struct snd_card *card = rtd->card->snd_card; 201 struct snd_card *card = rtd->card->snd_card;
204 struct snd_pcm *pcm = rtd->pcm; 202 struct snd_pcm *pcm = rtd->pcm;
205 int ret = 0; 203 int ret;
206 204
207 if (!card->dev->dma_mask) 205 ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(64));
208 card->dev->dma_mask = &omap_pcm_dmamask; 206 if (ret)
209 if (!card->dev->coherent_dma_mask) 207 return ret;
210 card->dev->coherent_dma_mask = DMA_BIT_MASK(64);
211 208
212 if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) { 209 if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
213 ret = omap_pcm_preallocate_dma_buffer(pcm, 210 ret = omap_pcm_preallocate_dma_buffer(pcm,
diff --git a/sound/soc/pxa/pxa2xx-pcm.c b/sound/soc/pxa/pxa2xx-pcm.c
index 806da27b8b67..d58b09f4f7a4 100644
--- a/sound/soc/pxa/pxa2xx-pcm.c
+++ b/sound/soc/pxa/pxa2xx-pcm.c
@@ -87,18 +87,15 @@ static struct snd_pcm_ops pxa2xx_pcm_ops = {
87 .mmap = pxa2xx_pcm_mmap, 87 .mmap = pxa2xx_pcm_mmap,
88}; 88};
89 89
90static u64 pxa2xx_pcm_dmamask = DMA_BIT_MASK(32);
91
92static int pxa2xx_soc_pcm_new(struct snd_soc_pcm_runtime *rtd) 90static int pxa2xx_soc_pcm_new(struct snd_soc_pcm_runtime *rtd)
93{ 91{
94 struct snd_card *card = rtd->card->snd_card; 92 struct snd_card *card = rtd->card->snd_card;
95 struct snd_pcm *pcm = rtd->pcm; 93 struct snd_pcm *pcm = rtd->pcm;
96 int ret = 0; 94 int ret;
97 95
98 if (!card->dev->dma_mask) 96 ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
99 card->dev->dma_mask = &pxa2xx_pcm_dmamask; 97 if (ret)
100 if (!card->dev->coherent_dma_mask) 98 return ret;
101 card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
102 99
103 if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) { 100 if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
104 ret = pxa2xx_pcm_preallocate_dma_buffer(pcm, 101 ret = pxa2xx_pcm_preallocate_dma_buffer(pcm,
diff --git a/sound/soc/s6000/s6000-pcm.c b/sound/soc/s6000/s6000-pcm.c
index 5cfaa5464eba..d219880815c0 100644
--- a/sound/soc/s6000/s6000-pcm.c
+++ b/sound/soc/s6000/s6000-pcm.c
@@ -445,8 +445,6 @@ static void s6000_pcm_free(struct snd_pcm *pcm)
445 snd_pcm_lib_preallocate_free_for_all(pcm); 445 snd_pcm_lib_preallocate_free_for_all(pcm);
446} 446}
447 447
448static u64 s6000_pcm_dmamask = DMA_BIT_MASK(32);
449
450static int s6000_pcm_new(struct snd_soc_pcm_runtime *runtime) 448static int s6000_pcm_new(struct snd_soc_pcm_runtime *runtime)
451{ 449{
452 struct snd_card *card = runtime->card->snd_card; 450 struct snd_card *card = runtime->card->snd_card;
@@ -457,10 +455,9 @@ static int s6000_pcm_new(struct snd_soc_pcm_runtime *runtime)
457 params = snd_soc_dai_get_dma_data(runtime->cpu_dai, 455 params = snd_soc_dai_get_dma_data(runtime->cpu_dai,
458 pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream); 456 pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream);
459 457
460 if (!card->dev->dma_mask) 458 res = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
461 card->dev->dma_mask = &s6000_pcm_dmamask; 459 if (res)
462 if (!card->dev->coherent_dma_mask) 460 return res;
463 card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
464 461
465 if (params->dma_in) { 462 if (params->dma_in) {
466 s6dmac_disable_chan(DMA_MASK_DMAC(params->dma_in), 463 s6dmac_disable_chan(DMA_MASK_DMAC(params->dma_in),
diff --git a/sound/soc/samsung/dma.c b/sound/soc/samsung/dma.c
index 9338d11e9216..fe2748b494d4 100644
--- a/sound/soc/samsung/dma.c
+++ b/sound/soc/samsung/dma.c
@@ -406,20 +406,17 @@ static void dma_free_dma_buffers(struct snd_pcm *pcm)
406 } 406 }
407} 407}
408 408
409static u64 dma_mask = DMA_BIT_MASK(32);
410
411static int dma_new(struct snd_soc_pcm_runtime *rtd) 409static int dma_new(struct snd_soc_pcm_runtime *rtd)
412{ 410{
413 struct snd_card *card = rtd->card->snd_card; 411 struct snd_card *card = rtd->card->snd_card;
414 struct snd_pcm *pcm = rtd->pcm; 412 struct snd_pcm *pcm = rtd->pcm;
415 int ret = 0; 413 int ret;
416 414
417 pr_debug("Entered %s\n", __func__); 415 pr_debug("Entered %s\n", __func__);
418 416
419 if (!card->dev->dma_mask) 417 ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
420 card->dev->dma_mask = &dma_mask; 418 if (ret)
421 if (!card->dev->coherent_dma_mask) 419 return ret;
422 card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
423 420
424 if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) { 421 if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
425 ret = preallocate_dma_buffer(pcm, 422 ret = preallocate_dma_buffer(pcm,
diff --git a/sound/soc/samsung/idma.c b/sound/soc/samsung/idma.c
index ce1e1e16f250..e4f318fc2f82 100644
--- a/sound/soc/samsung/idma.c
+++ b/sound/soc/samsung/idma.c
@@ -383,18 +383,15 @@ static int preallocate_idma_buffer(struct snd_pcm *pcm, int stream)
383 return 0; 383 return 0;
384} 384}
385 385
386static u64 idma_mask = DMA_BIT_MASK(32);
387
388static int idma_new(struct snd_soc_pcm_runtime *rtd) 386static int idma_new(struct snd_soc_pcm_runtime *rtd)
389{ 387{
390 struct snd_card *card = rtd->card->snd_card; 388 struct snd_card *card = rtd->card->snd_card;
391 struct snd_pcm *pcm = rtd->pcm; 389 struct snd_pcm *pcm = rtd->pcm;
392 int ret = 0; 390 int ret;
393 391
394 if (!card->dev->dma_mask) 392 ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
395 card->dev->dma_mask = &idma_mask; 393 if (ret)
396 if (!card->dev->coherent_dma_mask) 394 return ret;
397 card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
398 395
399 if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) { 396 if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
400 ret = preallocate_idma_buffer(pcm, 397 ret = preallocate_idma_buffer(pcm,