aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/ia64/include/asm/dma-mapping.h19
-rw-r--r--arch/powerpc/include/asm/dma-mapping.h23
-rw-r--r--arch/powerpc/kernel/dma-swiotlb.c48
-rw-r--r--arch/sparc/Kconfig2
-rw-r--r--arch/sparc/include/asm/dma-mapping.h145
-rw-r--r--arch/sparc/include/asm/pci.h3
-rw-r--r--arch/sparc/include/asm/pci_32.h105
-rw-r--r--arch/sparc/include/asm/pci_64.h88
-rw-r--r--arch/sparc/kernel/Makefile2
-rw-r--r--arch/sparc/kernel/dma.c175
-rw-r--r--arch/sparc/kernel/dma.h14
-rw-r--r--arch/sparc/kernel/iommu.c20
-rw-r--r--arch/sparc/kernel/ioport.c190
-rw-r--r--arch/sparc/kernel/pci.c2
-rw-r--r--arch/sparc/kernel/pci_sun4v.c30
-rw-r--r--arch/x86/include/asm/dma-mapping.h18
-rw-r--r--arch/x86/kernel/pci-dma.c2
-rw-r--r--arch/x86/kernel/pci-gart_64.c5
-rw-r--r--arch/x86/kernel/pci-nommu.c29
-rw-r--r--arch/x86/kernel/pci-swiotlb.c25
-rw-r--r--include/asm-generic/dma-mapping-common.h6
-rw-r--r--include/linux/dma-mapping.h5
-rw-r--r--include/linux/swiotlb.h11
-rw-r--r--lib/swiotlb.c124
24 files changed, 291 insertions, 800 deletions
diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h
index 5a61b5c2e18f..8d3c79cd81e7 100644
--- a/arch/ia64/include/asm/dma-mapping.h
+++ b/arch/ia64/include/asm/dma-mapping.h
@@ -44,7 +44,6 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
44#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) 44#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
45 45
46#define get_dma_ops(dev) platform_dma_get_ops(dev) 46#define get_dma_ops(dev) platform_dma_get_ops(dev)
47#define flush_write_buffers()
48 47
49#include <asm-generic/dma-mapping-common.h> 48#include <asm-generic/dma-mapping-common.h>
50 49
@@ -69,6 +68,24 @@ dma_set_mask (struct device *dev, u64 mask)
69 return 0; 68 return 0;
70} 69}
71 70
71static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
72{
73 if (!dev->dma_mask)
74 return 0;
75
76 return addr + size <= *dev->dma_mask;
77}
78
79static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
80{
81 return paddr;
82}
83
84static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
85{
86 return daddr;
87}
88
72extern int dma_get_cache_alignment(void); 89extern int dma_get_cache_alignment(void);
73 90
74static inline void 91static inline void
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
index b44aaabdd1a6..0c34371ec49c 100644
--- a/arch/powerpc/include/asm/dma-mapping.h
+++ b/arch/powerpc/include/asm/dma-mapping.h
@@ -424,6 +424,29 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
424#endif 424#endif
425} 425}
426 426
427static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
428{
429 struct dma_mapping_ops *ops = get_dma_ops(dev);
430
431 if (ops->addr_needs_map && ops->addr_needs_map(dev, addr, size))
432 return 0;
433
434 if (!dev->dma_mask)
435 return 0;
436
437 return addr + size <= *dev->dma_mask;
438}
439
440static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
441{
442 return paddr + get_dma_direct_offset(dev);
443}
444
445static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
446{
447 return daddr - get_dma_direct_offset(dev);
448}
449
427#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) 450#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
428#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) 451#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
429#ifdef CONFIG_NOT_COHERENT_CACHE 452#ifdef CONFIG_NOT_COHERENT_CACHE
diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c
index 68ccf11e4f19..e8a57de85bcf 100644
--- a/arch/powerpc/kernel/dma-swiotlb.c
+++ b/arch/powerpc/kernel/dma-swiotlb.c
@@ -24,50 +24,12 @@
24int swiotlb __read_mostly; 24int swiotlb __read_mostly;
25unsigned int ppc_swiotlb_enable; 25unsigned int ppc_swiotlb_enable;
26 26
27void *swiotlb_bus_to_virt(struct device *hwdev, dma_addr_t addr)
28{
29 unsigned long pfn = PFN_DOWN(swiotlb_bus_to_phys(hwdev, addr));
30 void *pageaddr = page_address(pfn_to_page(pfn));
31
32 if (pageaddr != NULL)
33 return pageaddr + (addr % PAGE_SIZE);
34 return NULL;
35}
36
37dma_addr_t swiotlb_phys_to_bus(struct device *hwdev, phys_addr_t paddr)
38{
39 return paddr + get_dma_direct_offset(hwdev);
40}
41
42phys_addr_t swiotlb_bus_to_phys(struct device *hwdev, dma_addr_t baddr)
43
44{
45 return baddr - get_dma_direct_offset(hwdev);
46}
47
48/*
49 * Determine if an address needs bounce buffering via swiotlb.
50 * Going forward I expect the swiotlb code to generalize on using
51 * a dma_ops->addr_needs_map, and this function will move from here to the
52 * generic swiotlb code.
53 */
54int
55swiotlb_arch_address_needs_mapping(struct device *hwdev, dma_addr_t addr,
56 size_t size)
57{
58 struct dma_mapping_ops *dma_ops = get_dma_ops(hwdev);
59
60 BUG_ON(!dma_ops);
61 return dma_ops->addr_needs_map(hwdev, addr, size);
62}
63
64/* 27/*
65 * Determine if an address is reachable by a pci device, or if we must bounce. 28 * Determine if an address is reachable by a pci device, or if we must bounce.
66 */ 29 */
67static int 30static int
68swiotlb_pci_addr_needs_map(struct device *hwdev, dma_addr_t addr, size_t size) 31swiotlb_pci_addr_needs_map(struct device *hwdev, dma_addr_t addr, size_t size)
69{ 32{
70 u64 mask = dma_get_mask(hwdev);
71 dma_addr_t max; 33 dma_addr_t max;
72 struct pci_controller *hose; 34 struct pci_controller *hose;
73 struct pci_dev *pdev = to_pci_dev(hwdev); 35 struct pci_dev *pdev = to_pci_dev(hwdev);
@@ -79,16 +41,9 @@ swiotlb_pci_addr_needs_map(struct device *hwdev, dma_addr_t addr, size_t size)
79 if ((addr + size > max) | (addr < hose->dma_window_base_cur)) 41 if ((addr + size > max) | (addr < hose->dma_window_base_cur))
80 return 1; 42 return 1;
81 43
82 return !is_buffer_dma_capable(mask, addr, size); 44 return 0;
83}
84
85static int
86swiotlb_addr_needs_map(struct device *hwdev, dma_addr_t addr, size_t size)
87{
88 return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size);
89} 45}
90 46
91
92/* 47/*
93 * At the moment, all platforms that use this code only require 48 * At the moment, all platforms that use this code only require
94 * swiotlb to be used if we're operating on HIGHMEM. Since 49 * swiotlb to be used if we're operating on HIGHMEM. Since
@@ -104,7 +59,6 @@ struct dma_mapping_ops swiotlb_dma_ops = {
104 .dma_supported = swiotlb_dma_supported, 59 .dma_supported = swiotlb_dma_supported,
105 .map_page = swiotlb_map_page, 60 .map_page = swiotlb_map_page,
106 .unmap_page = swiotlb_unmap_page, 61 .unmap_page = swiotlb_unmap_page,
107 .addr_needs_map = swiotlb_addr_needs_map,
108 .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu, 62 .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu,
109 .sync_single_range_for_device = swiotlb_sync_single_range_for_device, 63 .sync_single_range_for_device = swiotlb_sync_single_range_for_device,
110 .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, 64 .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 3f8b6a92eabd..233cff53a623 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -25,6 +25,8 @@ config SPARC
25 select ARCH_WANT_OPTIONAL_GPIOLIB 25 select ARCH_WANT_OPTIONAL_GPIOLIB
26 select RTC_CLASS 26 select RTC_CLASS
27 select RTC_DRV_M48T59 27 select RTC_DRV_M48T59
28 select HAVE_DMA_ATTRS
29 select HAVE_DMA_API_DEBUG
28 30
29config SPARC32 31config SPARC32
30 def_bool !64BIT 32 def_bool !64BIT
diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h
index 204e4bf64438..5a8c308e2b5c 100644
--- a/arch/sparc/include/asm/dma-mapping.h
+++ b/arch/sparc/include/asm/dma-mapping.h
@@ -3,6 +3,7 @@
3 3
4#include <linux/scatterlist.h> 4#include <linux/scatterlist.h>
5#include <linux/mm.h> 5#include <linux/mm.h>
6#include <linux/dma-debug.h>
6 7
7#define DMA_ERROR_CODE (~(dma_addr_t)0x0) 8#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
8 9
@@ -13,142 +14,40 @@ extern int dma_set_mask(struct device *dev, u64 dma_mask);
13#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) 14#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
14#define dma_is_consistent(d, h) (1) 15#define dma_is_consistent(d, h) (1)
15 16
16struct dma_ops { 17extern struct dma_map_ops *dma_ops, pci32_dma_ops;
17 void *(*alloc_coherent)(struct device *dev, size_t size, 18extern struct bus_type pci_bus_type;
18 dma_addr_t *dma_handle, gfp_t flag);
19 void (*free_coherent)(struct device *dev, size_t size,
20 void *cpu_addr, dma_addr_t dma_handle);
21 dma_addr_t (*map_page)(struct device *dev, struct page *page,
22 unsigned long offset, size_t size,
23 enum dma_data_direction direction);
24 void (*unmap_page)(struct device *dev, dma_addr_t dma_addr,
25 size_t size,
26 enum dma_data_direction direction);
27 int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents,
28 enum dma_data_direction direction);
29 void (*unmap_sg)(struct device *dev, struct scatterlist *sg,
30 int nhwentries,
31 enum dma_data_direction direction);
32 void (*sync_single_for_cpu)(struct device *dev,
33 dma_addr_t dma_handle, size_t size,
34 enum dma_data_direction direction);
35 void (*sync_single_for_device)(struct device *dev,
36 dma_addr_t dma_handle, size_t size,
37 enum dma_data_direction direction);
38 void (*sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg,
39 int nelems,
40 enum dma_data_direction direction);
41 void (*sync_sg_for_device)(struct device *dev,
42 struct scatterlist *sg, int nents,
43 enum dma_data_direction dir);
44};
45extern const struct dma_ops *dma_ops;
46 19
47static inline void *dma_alloc_coherent(struct device *dev, size_t size, 20static inline struct dma_map_ops *get_dma_ops(struct device *dev)
48 dma_addr_t *dma_handle, gfp_t flag)
49{
50 return dma_ops->alloc_coherent(dev, size, dma_handle, flag);
51}
52
53static inline void dma_free_coherent(struct device *dev, size_t size,
54 void *cpu_addr, dma_addr_t dma_handle)
55{
56 dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
57}
58
59static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
60 size_t size,
61 enum dma_data_direction direction)
62{
63 return dma_ops->map_page(dev, virt_to_page(cpu_addr),
64 (unsigned long)cpu_addr & ~PAGE_MASK, size,
65 direction);
66}
67
68static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
69 size_t size,
70 enum dma_data_direction direction)
71{
72 dma_ops->unmap_page(dev, dma_addr, size, direction);
73}
74
75static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
76 unsigned long offset, size_t size,
77 enum dma_data_direction direction)
78{
79 return dma_ops->map_page(dev, page, offset, size, direction);
80}
81
82static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
83 size_t size,
84 enum dma_data_direction direction)
85{
86 dma_ops->unmap_page(dev, dma_address, size, direction);
87}
88
89static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
90 int nents, enum dma_data_direction direction)
91{
92 return dma_ops->map_sg(dev, sg, nents, direction);
93}
94
95static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
96 int nents, enum dma_data_direction direction)
97{ 21{
98 dma_ops->unmap_sg(dev, sg, nents, direction); 22#if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
99} 23 if (dev->bus == &pci_bus_type)
100 24 return &pci32_dma_ops;
101static inline void dma_sync_single_for_cpu(struct device *dev, 25#endif
102 dma_addr_t dma_handle, size_t size, 26 return dma_ops;
103 enum dma_data_direction direction)
104{
105 dma_ops->sync_single_for_cpu(dev, dma_handle, size, direction);
106} 27}
107 28
108static inline void dma_sync_single_for_device(struct device *dev, 29#include <asm-generic/dma-mapping-common.h>
109 dma_addr_t dma_handle,
110 size_t size,
111 enum dma_data_direction direction)
112{
113 if (dma_ops->sync_single_for_device)
114 dma_ops->sync_single_for_device(dev, dma_handle, size,
115 direction);
116}
117 30
118static inline void dma_sync_sg_for_cpu(struct device *dev, 31static inline void *dma_alloc_coherent(struct device *dev, size_t size,
119 struct scatterlist *sg, int nelems, 32 dma_addr_t *dma_handle, gfp_t flag)
120 enum dma_data_direction direction)
121{ 33{
122 dma_ops->sync_sg_for_cpu(dev, sg, nelems, direction); 34 struct dma_map_ops *ops = get_dma_ops(dev);
123} 35 void *cpu_addr;
124 36
125static inline void dma_sync_sg_for_device(struct device *dev, 37 cpu_addr = ops->alloc_coherent(dev, size, dma_handle, flag);
126 struct scatterlist *sg, int nelems, 38 debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
127 enum dma_data_direction direction) 39 return cpu_addr;
128{
129 if (dma_ops->sync_sg_for_device)
130 dma_ops->sync_sg_for_device(dev, sg, nelems, direction);
131} 40}
132 41
133static inline void dma_sync_single_range_for_cpu(struct device *dev, 42static inline void dma_free_coherent(struct device *dev, size_t size,
134 dma_addr_t dma_handle, 43 void *cpu_addr, dma_addr_t dma_handle)
135 unsigned long offset,
136 size_t size,
137 enum dma_data_direction dir)
138{ 44{
139 dma_sync_single_for_cpu(dev, dma_handle+offset, size, dir); 45 struct dma_map_ops *ops = get_dma_ops(dev);
140}
141 46
142static inline void dma_sync_single_range_for_device(struct device *dev, 47 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
143 dma_addr_t dma_handle, 48 ops->free_coherent(dev, size, cpu_addr, dma_handle);
144 unsigned long offset,
145 size_t size,
146 enum dma_data_direction dir)
147{
148 dma_sync_single_for_device(dev, dma_handle+offset, size, dir);
149} 49}
150 50
151
152static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 51static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
153{ 52{
154 return (dma_addr == DMA_ERROR_CODE); 53 return (dma_addr == DMA_ERROR_CODE);
diff --git a/arch/sparc/include/asm/pci.h b/arch/sparc/include/asm/pci.h
index 6e14fd179335..d9c031f9910f 100644
--- a/arch/sparc/include/asm/pci.h
+++ b/arch/sparc/include/asm/pci.h
@@ -5,4 +5,7 @@
5#else 5#else
6#include <asm/pci_32.h> 6#include <asm/pci_32.h>
7#endif 7#endif
8
9#include <asm-generic/pci-dma-compat.h>
10
8#endif 11#endif
diff --git a/arch/sparc/include/asm/pci_32.h b/arch/sparc/include/asm/pci_32.h
index b41c4c198159..ac0e8369fd97 100644
--- a/arch/sparc/include/asm/pci_32.h
+++ b/arch/sparc/include/asm/pci_32.h
@@ -31,42 +31,8 @@ static inline void pcibios_penalize_isa_irq(int irq, int active)
31 */ 31 */
32#define PCI_DMA_BUS_IS_PHYS (0) 32#define PCI_DMA_BUS_IS_PHYS (0)
33 33
34#include <asm/scatterlist.h>
35
36struct pci_dev; 34struct pci_dev;
37 35
38/* Allocate and map kernel buffer using consistent mode DMA for a device.
39 * hwdev should be valid struct pci_dev pointer for PCI devices.
40 */
41extern void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle);
42
43/* Free and unmap a consistent DMA buffer.
44 * cpu_addr is what was returned from pci_alloc_consistent,
45 * size must be the same as what as passed into pci_alloc_consistent,
46 * and likewise dma_addr must be the same as what *dma_addrp was set to.
47 *
48 * References to the memory and mappings assosciated with cpu_addr/dma_addr
49 * past this call are illegal.
50 */
51extern void pci_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle);
52
53/* Map a single buffer of the indicated size for DMA in streaming mode.
54 * The 32-bit bus address to use is returned.
55 *
56 * Once the device is given the dma address, the device owns this memory
57 * until either pci_unmap_single or pci_dma_sync_single_for_cpu is performed.
58 */
59extern dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction);
60
61/* Unmap a single streaming mode DMA translation. The dma_addr and size
62 * must match what was provided for in a previous pci_map_single call. All
63 * other usages are undefined.
64 *
65 * After this call, reads by the cpu to the buffer are guaranteed to see
66 * whatever the device wrote there.
67 */
68extern void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size, int direction);
69
70/* pci_unmap_{single,page} is not a nop, thus... */ 36/* pci_unmap_{single,page} is not a nop, thus... */
71#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ 37#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
72 dma_addr_t ADDR_NAME; 38 dma_addr_t ADDR_NAME;
@@ -81,69 +47,6 @@ extern void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t
81#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ 47#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
82 (((PTR)->LEN_NAME) = (VAL)) 48 (((PTR)->LEN_NAME) = (VAL))
83 49
84/*
85 * Same as above, only with pages instead of mapped addresses.
86 */
87extern dma_addr_t pci_map_page(struct pci_dev *hwdev, struct page *page,
88 unsigned long offset, size_t size, int direction);
89extern void pci_unmap_page(struct pci_dev *hwdev,
90 dma_addr_t dma_address, size_t size, int direction);
91
92/* Map a set of buffers described by scatterlist in streaming
93 * mode for DMA. This is the scather-gather version of the
94 * above pci_map_single interface. Here the scatter gather list
95 * elements are each tagged with the appropriate dma address
96 * and length. They are obtained via sg_dma_{address,length}(SG).
97 *
98 * NOTE: An implementation may be able to use a smaller number of
99 * DMA address/length pairs than there are SG table elements.
100 * (for example via virtual mapping capabilities)
101 * The routine returns the number of addr/length pairs actually
102 * used, at most nents.
103 *
104 * Device ownership issues as mentioned above for pci_map_single are
105 * the same here.
106 */
107extern int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction);
108
109/* Unmap a set of streaming mode DMA translations.
110 * Again, cpu read rules concerning calls here are the same as for
111 * pci_unmap_single() above.
112 */
113extern void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nhwents, int direction);
114
115/* Make physical memory consistent for a single
116 * streaming mode DMA translation after a transfer.
117 *
118 * If you perform a pci_map_single() but wish to interrogate the
119 * buffer using the cpu, yet do not wish to teardown the PCI dma
120 * mapping, you must call this function before doing so. At the
121 * next point you give the PCI dma address back to the card, you
122 * must first perform a pci_dma_sync_for_device, and then the device
123 * again owns the buffer.
124 */
125extern void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction);
126extern void pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction);
127
128/* Make physical memory consistent for a set of streaming
129 * mode DMA translations after a transfer.
130 *
131 * The same as pci_dma_sync_single_* but for a scatter-gather list,
132 * same rules and usage.
133 */
134extern void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction);
135extern void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction);
136
137/* Return whether the given PCI device DMA address mask can
138 * be supported properly. For example, if your device can
139 * only drive the low 24-bits during PCI bus mastering, then
140 * you would pass 0x00ffffff as the mask to this function.
141 */
142static inline int pci_dma_supported(struct pci_dev *hwdev, u64 mask)
143{
144 return 1;
145}
146
147#ifdef CONFIG_PCI 50#ifdef CONFIG_PCI
148static inline void pci_dma_burst_advice(struct pci_dev *pdev, 51static inline void pci_dma_burst_advice(struct pci_dev *pdev,
149 enum pci_dma_burst_strategy *strat, 52 enum pci_dma_burst_strategy *strat,
@@ -154,14 +57,6 @@ static inline void pci_dma_burst_advice(struct pci_dev *pdev,
154} 57}
155#endif 58#endif
156 59
157#define PCI_DMA_ERROR_CODE (~(dma_addr_t)0x0)
158
159static inline int pci_dma_mapping_error(struct pci_dev *pdev,
160 dma_addr_t dma_addr)
161{
162 return (dma_addr == PCI_DMA_ERROR_CODE);
163}
164
165struct device_node; 60struct device_node;
166extern struct device_node *pci_device_to_OF_node(struct pci_dev *pdev); 61extern struct device_node *pci_device_to_OF_node(struct pci_dev *pdev);
167 62
diff --git a/arch/sparc/include/asm/pci_64.h b/arch/sparc/include/asm/pci_64.h
index 7a1e3566e59c..5cc9f6aa5494 100644
--- a/arch/sparc/include/asm/pci_64.h
+++ b/arch/sparc/include/asm/pci_64.h
@@ -35,37 +35,6 @@ static inline void pcibios_penalize_isa_irq(int irq, int active)
35 */ 35 */
36#define PCI_DMA_BUS_IS_PHYS (0) 36#define PCI_DMA_BUS_IS_PHYS (0)
37 37
38static inline void *pci_alloc_consistent(struct pci_dev *pdev, size_t size,
39 dma_addr_t *dma_handle)
40{
41 return dma_alloc_coherent(&pdev->dev, size, dma_handle, GFP_ATOMIC);
42}
43
44static inline void pci_free_consistent(struct pci_dev *pdev, size_t size,
45 void *vaddr, dma_addr_t dma_handle)
46{
47 return dma_free_coherent(&pdev->dev, size, vaddr, dma_handle);
48}
49
50static inline dma_addr_t pci_map_single(struct pci_dev *pdev, void *ptr,
51 size_t size, int direction)
52{
53 return dma_map_single(&pdev->dev, ptr, size,
54 (enum dma_data_direction) direction);
55}
56
57static inline void pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr,
58 size_t size, int direction)
59{
60 dma_unmap_single(&pdev->dev, dma_addr, size,
61 (enum dma_data_direction) direction);
62}
63
64#define pci_map_page(dev, page, off, size, dir) \
65 pci_map_single(dev, (page_address(page) + (off)), size, dir)
66#define pci_unmap_page(dev,addr,sz,dir) \
67 pci_unmap_single(dev,addr,sz,dir)
68
69/* pci_unmap_{single,page} is not a nop, thus... */ 38/* pci_unmap_{single,page} is not a nop, thus... */
70#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ 39#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
71 dma_addr_t ADDR_NAME; 40 dma_addr_t ADDR_NAME;
@@ -80,57 +49,6 @@ static inline void pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr,
80#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ 49#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
81 (((PTR)->LEN_NAME) = (VAL)) 50 (((PTR)->LEN_NAME) = (VAL))
82 51
83static inline int pci_map_sg(struct pci_dev *pdev, struct scatterlist *sg,
84 int nents, int direction)
85{
86 return dma_map_sg(&pdev->dev, sg, nents,
87 (enum dma_data_direction) direction);
88}
89
90static inline void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sg,
91 int nents, int direction)
92{
93 dma_unmap_sg(&pdev->dev, sg, nents,
94 (enum dma_data_direction) direction);
95}
96
97static inline void pci_dma_sync_single_for_cpu(struct pci_dev *pdev,
98 dma_addr_t dma_handle,
99 size_t size, int direction)
100{
101 dma_sync_single_for_cpu(&pdev->dev, dma_handle, size,
102 (enum dma_data_direction) direction);
103}
104
105static inline void pci_dma_sync_single_for_device(struct pci_dev *pdev,
106 dma_addr_t dma_handle,
107 size_t size, int direction)
108{
109 /* No flushing needed to sync cpu writes to the device. */
110}
111
112static inline void pci_dma_sync_sg_for_cpu(struct pci_dev *pdev,
113 struct scatterlist *sg,
114 int nents, int direction)
115{
116 dma_sync_sg_for_cpu(&pdev->dev, sg, nents,
117 (enum dma_data_direction) direction);
118}
119
120static inline void pci_dma_sync_sg_for_device(struct pci_dev *pdev,
121 struct scatterlist *sg,
122 int nelems, int direction)
123{
124 /* No flushing needed to sync cpu writes to the device. */
125}
126
127/* Return whether the given PCI device DMA address mask can
128 * be supported properly. For example, if your device can
129 * only drive the low 24-bits during PCI bus mastering, then
130 * you would pass 0x00ffffff as the mask to this function.
131 */
132extern int pci_dma_supported(struct pci_dev *hwdev, u64 mask);
133
134/* PCI IOMMU mapping bypass support. */ 52/* PCI IOMMU mapping bypass support. */
135 53
136/* PCI 64-bit addressing works for all slots on all controller 54/* PCI 64-bit addressing works for all slots on all controller
@@ -140,12 +58,6 @@ extern int pci_dma_supported(struct pci_dev *hwdev, u64 mask);
140#define PCI64_REQUIRED_MASK (~(dma64_addr_t)0) 58#define PCI64_REQUIRED_MASK (~(dma64_addr_t)0)
141#define PCI64_ADDR_BASE 0xfffc000000000000UL 59#define PCI64_ADDR_BASE 0xfffc000000000000UL
142 60
143static inline int pci_dma_mapping_error(struct pci_dev *pdev,
144 dma_addr_t dma_addr)
145{
146 return dma_mapping_error(&pdev->dev, dma_addr);
147}
148
149#ifdef CONFIG_PCI 61#ifdef CONFIG_PCI
150static inline void pci_dma_burst_advice(struct pci_dev *pdev, 62static inline void pci_dma_burst_advice(struct pci_dev *pdev,
151 enum pci_dma_burst_strategy *strat, 63 enum pci_dma_burst_strategy *strat,
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
index 475ce4696acd..29b88a580661 100644
--- a/arch/sparc/kernel/Makefile
+++ b/arch/sparc/kernel/Makefile
@@ -61,7 +61,7 @@ obj-$(CONFIG_SPARC64_SMP) += cpumap.o
61obj-$(CONFIG_SPARC32) += devres.o 61obj-$(CONFIG_SPARC32) += devres.o
62devres-y := ../../../kernel/irq/devres.o 62devres-y := ../../../kernel/irq/devres.o
63 63
64obj-$(CONFIG_SPARC32) += dma.o 64obj-y += dma.o
65 65
66obj-$(CONFIG_SPARC32_PCI) += pcic.o 66obj-$(CONFIG_SPARC32_PCI) += pcic.o
67 67
diff --git a/arch/sparc/kernel/dma.c b/arch/sparc/kernel/dma.c
index 524c32f97c55..e1ba8ee21b9a 100644
--- a/arch/sparc/kernel/dma.c
+++ b/arch/sparc/kernel/dma.c
@@ -1,178 +1,13 @@
1/* dma.c: PCI and SBUS DMA accessors for 32-bit sparc.
2 *
3 * Copyright (C) 2008 David S. Miller <davem@davemloft.net>
4 */
5
6#include <linux/kernel.h> 1#include <linux/kernel.h>
7#include <linux/module.h> 2#include <linux/module.h>
8#include <linux/dma-mapping.h> 3#include <linux/dma-mapping.h>
9#include <linux/scatterlist.h> 4#include <linux/dma-debug.h>
10#include <linux/mm.h>
11
12#ifdef CONFIG_PCI
13#include <linux/pci.h>
14#endif
15 5
16#include "dma.h" 6#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 15)
17 7
18int dma_supported(struct device *dev, u64 mask) 8static int __init dma_init(void)
19{ 9{
20#ifdef CONFIG_PCI 10 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
21 if (dev->bus == &pci_bus_type)
22 return pci_dma_supported(to_pci_dev(dev), mask);
23#endif
24 return 0; 11 return 0;
25} 12}
26EXPORT_SYMBOL(dma_supported); 13fs_initcall(dma_init);
27
28int dma_set_mask(struct device *dev, u64 dma_mask)
29{
30#ifdef CONFIG_PCI
31 if (dev->bus == &pci_bus_type)
32 return pci_set_dma_mask(to_pci_dev(dev), dma_mask);
33#endif
34 return -EOPNOTSUPP;
35}
36EXPORT_SYMBOL(dma_set_mask);
37
38static void *dma32_alloc_coherent(struct device *dev, size_t size,
39 dma_addr_t *dma_handle, gfp_t flag)
40{
41#ifdef CONFIG_PCI
42 if (dev->bus == &pci_bus_type)
43 return pci_alloc_consistent(to_pci_dev(dev), size, dma_handle);
44#endif
45 return sbus_alloc_consistent(dev, size, dma_handle);
46}
47
48static void dma32_free_coherent(struct device *dev, size_t size,
49 void *cpu_addr, dma_addr_t dma_handle)
50{
51#ifdef CONFIG_PCI
52 if (dev->bus == &pci_bus_type) {
53 pci_free_consistent(to_pci_dev(dev), size,
54 cpu_addr, dma_handle);
55 return;
56 }
57#endif
58 sbus_free_consistent(dev, size, cpu_addr, dma_handle);
59}
60
61static dma_addr_t dma32_map_page(struct device *dev, struct page *page,
62 unsigned long offset, size_t size,
63 enum dma_data_direction direction)
64{
65#ifdef CONFIG_PCI
66 if (dev->bus == &pci_bus_type)
67 return pci_map_page(to_pci_dev(dev), page, offset,
68 size, (int)direction);
69#endif
70 return sbus_map_single(dev, page_address(page) + offset,
71 size, (int)direction);
72}
73
74static void dma32_unmap_page(struct device *dev, dma_addr_t dma_address,
75 size_t size, enum dma_data_direction direction)
76{
77#ifdef CONFIG_PCI
78 if (dev->bus == &pci_bus_type) {
79 pci_unmap_page(to_pci_dev(dev), dma_address,
80 size, (int)direction);
81 return;
82 }
83#endif
84 sbus_unmap_single(dev, dma_address, size, (int)direction);
85}
86
87static int dma32_map_sg(struct device *dev, struct scatterlist *sg,
88 int nents, enum dma_data_direction direction)
89{
90#ifdef CONFIG_PCI
91 if (dev->bus == &pci_bus_type)
92 return pci_map_sg(to_pci_dev(dev), sg, nents, (int)direction);
93#endif
94 return sbus_map_sg(dev, sg, nents, direction);
95}
96
97void dma32_unmap_sg(struct device *dev, struct scatterlist *sg,
98 int nents, enum dma_data_direction direction)
99{
100#ifdef CONFIG_PCI
101 if (dev->bus == &pci_bus_type) {
102 pci_unmap_sg(to_pci_dev(dev), sg, nents, (int)direction);
103 return;
104 }
105#endif
106 sbus_unmap_sg(dev, sg, nents, (int)direction);
107}
108
109static void dma32_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
110 size_t size,
111 enum dma_data_direction direction)
112{
113#ifdef CONFIG_PCI
114 if (dev->bus == &pci_bus_type) {
115 pci_dma_sync_single_for_cpu(to_pci_dev(dev), dma_handle,
116 size, (int)direction);
117 return;
118 }
119#endif
120 sbus_dma_sync_single_for_cpu(dev, dma_handle, size, (int) direction);
121}
122
123static void dma32_sync_single_for_device(struct device *dev,
124 dma_addr_t dma_handle, size_t size,
125 enum dma_data_direction direction)
126{
127#ifdef CONFIG_PCI
128 if (dev->bus == &pci_bus_type) {
129 pci_dma_sync_single_for_device(to_pci_dev(dev), dma_handle,
130 size, (int)direction);
131 return;
132 }
133#endif
134 sbus_dma_sync_single_for_device(dev, dma_handle, size, (int) direction);
135}
136
137static void dma32_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
138 int nelems, enum dma_data_direction direction)
139{
140#ifdef CONFIG_PCI
141 if (dev->bus == &pci_bus_type) {
142 pci_dma_sync_sg_for_cpu(to_pci_dev(dev), sg,
143 nelems, (int)direction);
144 return;
145 }
146#endif
147 BUG();
148}
149
150static void dma32_sync_sg_for_device(struct device *dev,
151 struct scatterlist *sg, int nelems,
152 enum dma_data_direction direction)
153{
154#ifdef CONFIG_PCI
155 if (dev->bus == &pci_bus_type) {
156 pci_dma_sync_sg_for_device(to_pci_dev(dev), sg,
157 nelems, (int)direction);
158 return;
159 }
160#endif
161 BUG();
162}
163
164static const struct dma_ops dma32_dma_ops = {
165 .alloc_coherent = dma32_alloc_coherent,
166 .free_coherent = dma32_free_coherent,
167 .map_page = dma32_map_page,
168 .unmap_page = dma32_unmap_page,
169 .map_sg = dma32_map_sg,
170 .unmap_sg = dma32_unmap_sg,
171 .sync_single_for_cpu = dma32_sync_single_for_cpu,
172 .sync_single_for_device = dma32_sync_single_for_device,
173 .sync_sg_for_cpu = dma32_sync_sg_for_cpu,
174 .sync_sg_for_device = dma32_sync_sg_for_device,
175};
176
177const struct dma_ops *dma_ops = &dma32_dma_ops;
178EXPORT_SYMBOL(dma_ops);
diff --git a/arch/sparc/kernel/dma.h b/arch/sparc/kernel/dma.h
deleted file mode 100644
index f8d8951adb53..000000000000
--- a/arch/sparc/kernel/dma.h
+++ /dev/null
@@ -1,14 +0,0 @@
1void *sbus_alloc_consistent(struct device *dev, long len, u32 *dma_addrp);
2void sbus_free_consistent(struct device *dev, long n, void *p, u32 ba);
3dma_addr_t sbus_map_single(struct device *dev, void *va,
4 size_t len, int direction);
5void sbus_unmap_single(struct device *dev, dma_addr_t ba,
6 size_t n, int direction);
7int sbus_map_sg(struct device *dev, struct scatterlist *sg,
8 int n, int direction);
9void sbus_unmap_sg(struct device *dev, struct scatterlist *sg,
10 int n, int direction);
11void sbus_dma_sync_single_for_cpu(struct device *dev, dma_addr_t ba,
12 size_t size, int direction);
13void sbus_dma_sync_single_for_device(struct device *dev, dma_addr_t ba,
14 size_t size, int direction);
diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c
index 0aeaefe696b9..7690cc219ecc 100644
--- a/arch/sparc/kernel/iommu.c
+++ b/arch/sparc/kernel/iommu.c
@@ -353,7 +353,8 @@ static void dma_4u_free_coherent(struct device *dev, size_t size,
353 353
354static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page, 354static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page,
355 unsigned long offset, size_t sz, 355 unsigned long offset, size_t sz,
356 enum dma_data_direction direction) 356 enum dma_data_direction direction,
357 struct dma_attrs *attrs)
357{ 358{
358 struct iommu *iommu; 359 struct iommu *iommu;
359 struct strbuf *strbuf; 360 struct strbuf *strbuf;
@@ -474,7 +475,8 @@ do_flush_sync:
474} 475}
475 476
476static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr, 477static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr,
477 size_t sz, enum dma_data_direction direction) 478 size_t sz, enum dma_data_direction direction,
479 struct dma_attrs *attrs)
478{ 480{
479 struct iommu *iommu; 481 struct iommu *iommu;
480 struct strbuf *strbuf; 482 struct strbuf *strbuf;
@@ -520,7 +522,8 @@ static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr,
520} 522}
521 523
522static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist, 524static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
523 int nelems, enum dma_data_direction direction) 525 int nelems, enum dma_data_direction direction,
526 struct dma_attrs *attrs)
524{ 527{
525 struct scatterlist *s, *outs, *segstart; 528 struct scatterlist *s, *outs, *segstart;
526 unsigned long flags, handle, prot, ctx; 529 unsigned long flags, handle, prot, ctx;
@@ -691,7 +694,8 @@ static unsigned long fetch_sg_ctx(struct iommu *iommu, struct scatterlist *sg)
691} 694}
692 695
693static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist, 696static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
694 int nelems, enum dma_data_direction direction) 697 int nelems, enum dma_data_direction direction,
698 struct dma_attrs *attrs)
695{ 699{
696 unsigned long flags, ctx; 700 unsigned long flags, ctx;
697 struct scatterlist *sg; 701 struct scatterlist *sg;
@@ -822,7 +826,7 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev,
822 spin_unlock_irqrestore(&iommu->lock, flags); 826 spin_unlock_irqrestore(&iommu->lock, flags);
823} 827}
824 828
825static const struct dma_ops sun4u_dma_ops = { 829static struct dma_map_ops sun4u_dma_ops = {
826 .alloc_coherent = dma_4u_alloc_coherent, 830 .alloc_coherent = dma_4u_alloc_coherent,
827 .free_coherent = dma_4u_free_coherent, 831 .free_coherent = dma_4u_free_coherent,
828 .map_page = dma_4u_map_page, 832 .map_page = dma_4u_map_page,
@@ -833,9 +837,11 @@ static const struct dma_ops sun4u_dma_ops = {
833 .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu, 837 .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
834}; 838};
835 839
836const struct dma_ops *dma_ops = &sun4u_dma_ops; 840struct dma_map_ops *dma_ops = &sun4u_dma_ops;
837EXPORT_SYMBOL(dma_ops); 841EXPORT_SYMBOL(dma_ops);
838 842
843extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask);
844
839int dma_supported(struct device *dev, u64 device_mask) 845int dma_supported(struct device *dev, u64 device_mask)
840{ 846{
841 struct iommu *iommu = dev->archdata.iommu; 847 struct iommu *iommu = dev->archdata.iommu;
@@ -849,7 +855,7 @@ int dma_supported(struct device *dev, u64 device_mask)
849 855
850#ifdef CONFIG_PCI 856#ifdef CONFIG_PCI
851 if (dev->bus == &pci_bus_type) 857 if (dev->bus == &pci_bus_type)
852 return pci_dma_supported(to_pci_dev(dev), device_mask); 858 return pci64_dma_supported(to_pci_dev(dev), device_mask);
853#endif 859#endif
854 860
855 return 0; 861 return 0;
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
index 87ea0d03d975..edbea232c617 100644
--- a/arch/sparc/kernel/ioport.c
+++ b/arch/sparc/kernel/ioport.c
@@ -48,8 +48,6 @@
48#include <asm/iommu.h> 48#include <asm/iommu.h>
49#include <asm/io-unit.h> 49#include <asm/io-unit.h>
50 50
51#include "dma.h"
52
53#define mmu_inval_dma_area(p, l) /* Anton pulled it out for 2.4.0-xx */ 51#define mmu_inval_dma_area(p, l) /* Anton pulled it out for 2.4.0-xx */
54 52
55static struct resource *_sparc_find_resource(struct resource *r, 53static struct resource *_sparc_find_resource(struct resource *r,
@@ -246,7 +244,8 @@ EXPORT_SYMBOL(sbus_set_sbus64);
246 * Typically devices use them for control blocks. 244 * Typically devices use them for control blocks.
247 * CPU may access them without any explicit flushing. 245 * CPU may access them without any explicit flushing.
248 */ 246 */
249void *sbus_alloc_consistent(struct device *dev, long len, u32 *dma_addrp) 247static void *sbus_alloc_coherent(struct device *dev, size_t len,
248 dma_addr_t *dma_addrp, gfp_t gfp)
250{ 249{
251 struct of_device *op = to_of_device(dev); 250 struct of_device *op = to_of_device(dev);
252 unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK; 251 unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK;
@@ -299,7 +298,8 @@ err_nopages:
299 return NULL; 298 return NULL;
300} 299}
301 300
302void sbus_free_consistent(struct device *dev, long n, void *p, u32 ba) 301static void sbus_free_coherent(struct device *dev, size_t n, void *p,
302 dma_addr_t ba)
303{ 303{
304 struct resource *res; 304 struct resource *res;
305 struct page *pgv; 305 struct page *pgv;
@@ -317,7 +317,7 @@ void sbus_free_consistent(struct device *dev, long n, void *p, u32 ba)
317 317
318 n = (n + PAGE_SIZE-1) & PAGE_MASK; 318 n = (n + PAGE_SIZE-1) & PAGE_MASK;
319 if ((res->end-res->start)+1 != n) { 319 if ((res->end-res->start)+1 != n) {
320 printk("sbus_free_consistent: region 0x%lx asked 0x%lx\n", 320 printk("sbus_free_consistent: region 0x%lx asked 0x%zx\n",
321 (long)((res->end-res->start)+1), n); 321 (long)((res->end-res->start)+1), n);
322 return; 322 return;
323 } 323 }
@@ -337,8 +337,13 @@ void sbus_free_consistent(struct device *dev, long n, void *p, u32 ba)
337 * CPU view of this memory may be inconsistent with 337 * CPU view of this memory may be inconsistent with
338 * a device view and explicit flushing is necessary. 338 * a device view and explicit flushing is necessary.
339 */ 339 */
340dma_addr_t sbus_map_single(struct device *dev, void *va, size_t len, int direction) 340static dma_addr_t sbus_map_page(struct device *dev, struct page *page,
341 unsigned long offset, size_t len,
342 enum dma_data_direction dir,
343 struct dma_attrs *attrs)
341{ 344{
345 void *va = page_address(page) + offset;
346
342 /* XXX why are some lengths signed, others unsigned? */ 347 /* XXX why are some lengths signed, others unsigned? */
343 if (len <= 0) { 348 if (len <= 0) {
344 return 0; 349 return 0;
@@ -350,12 +355,14 @@ dma_addr_t sbus_map_single(struct device *dev, void *va, size_t len, int directi
350 return mmu_get_scsi_one(dev, va, len); 355 return mmu_get_scsi_one(dev, va, len);
351} 356}
352 357
353void sbus_unmap_single(struct device *dev, dma_addr_t ba, size_t n, int direction) 358static void sbus_unmap_page(struct device *dev, dma_addr_t ba, size_t n,
359 enum dma_data_direction dir, struct dma_attrs *attrs)
354{ 360{
355 mmu_release_scsi_one(dev, ba, n); 361 mmu_release_scsi_one(dev, ba, n);
356} 362}
357 363
358int sbus_map_sg(struct device *dev, struct scatterlist *sg, int n, int direction) 364static int sbus_map_sg(struct device *dev, struct scatterlist *sg, int n,
365 enum dma_data_direction dir, struct dma_attrs *attrs)
359{ 366{
360 mmu_get_scsi_sgl(dev, sg, n); 367 mmu_get_scsi_sgl(dev, sg, n);
361 368
@@ -366,19 +373,38 @@ int sbus_map_sg(struct device *dev, struct scatterlist *sg, int n, int direction
366 return n; 373 return n;
367} 374}
368 375
369void sbus_unmap_sg(struct device *dev, struct scatterlist *sg, int n, int direction) 376static void sbus_unmap_sg(struct device *dev, struct scatterlist *sg, int n,
377 enum dma_data_direction dir, struct dma_attrs *attrs)
370{ 378{
371 mmu_release_scsi_sgl(dev, sg, n); 379 mmu_release_scsi_sgl(dev, sg, n);
372} 380}
373 381
374void sbus_dma_sync_single_for_cpu(struct device *dev, dma_addr_t ba, size_t size, int direction) 382static void sbus_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
383 int n, enum dma_data_direction dir)
375{ 384{
385 BUG();
376} 386}
377 387
378void sbus_dma_sync_single_for_device(struct device *dev, dma_addr_t ba, size_t size, int direction) 388static void sbus_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
389 int n, enum dma_data_direction dir)
379{ 390{
391 BUG();
380} 392}
381 393
394struct dma_map_ops sbus_dma_ops = {
395 .alloc_coherent = sbus_alloc_coherent,
396 .free_coherent = sbus_free_coherent,
397 .map_page = sbus_map_page,
398 .unmap_page = sbus_unmap_page,
399 .map_sg = sbus_map_sg,
400 .unmap_sg = sbus_unmap_sg,
401 .sync_sg_for_cpu = sbus_sync_sg_for_cpu,
402 .sync_sg_for_device = sbus_sync_sg_for_device,
403};
404
405struct dma_map_ops *dma_ops = &sbus_dma_ops;
406EXPORT_SYMBOL(dma_ops);
407
382static int __init sparc_register_ioport(void) 408static int __init sparc_register_ioport(void)
383{ 409{
384 register_proc_sparc_ioport(); 410 register_proc_sparc_ioport();
@@ -395,7 +421,8 @@ arch_initcall(sparc_register_ioport);
395/* Allocate and map kernel buffer using consistent mode DMA for a device. 421/* Allocate and map kernel buffer using consistent mode DMA for a device.
396 * hwdev should be valid struct pci_dev pointer for PCI devices. 422 * hwdev should be valid struct pci_dev pointer for PCI devices.
397 */ 423 */
398void *pci_alloc_consistent(struct pci_dev *pdev, size_t len, dma_addr_t *pba) 424static void *pci32_alloc_coherent(struct device *dev, size_t len,
425 dma_addr_t *pba, gfp_t gfp)
399{ 426{
400 unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK; 427 unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK;
401 unsigned long va; 428 unsigned long va;
@@ -439,7 +466,6 @@ void *pci_alloc_consistent(struct pci_dev *pdev, size_t len, dma_addr_t *pba)
439 *pba = virt_to_phys(va); /* equals virt_to_bus (R.I.P.) for us. */ 466 *pba = virt_to_phys(va); /* equals virt_to_bus (R.I.P.) for us. */
440 return (void *) res->start; 467 return (void *) res->start;
441} 468}
442EXPORT_SYMBOL(pci_alloc_consistent);
443 469
444/* Free and unmap a consistent DMA buffer. 470/* Free and unmap a consistent DMA buffer.
445 * cpu_addr is what was returned from pci_alloc_consistent, 471 * cpu_addr is what was returned from pci_alloc_consistent,
@@ -449,7 +475,8 @@ EXPORT_SYMBOL(pci_alloc_consistent);
449 * References to the memory and mappings associated with cpu_addr/dma_addr 475 * References to the memory and mappings associated with cpu_addr/dma_addr
450 * past this call are illegal. 476 * past this call are illegal.
451 */ 477 */
452void pci_free_consistent(struct pci_dev *pdev, size_t n, void *p, dma_addr_t ba) 478static void pci32_free_coherent(struct device *dev, size_t n, void *p,
479 dma_addr_t ba)
453{ 480{
454 struct resource *res; 481 struct resource *res;
455 unsigned long pgp; 482 unsigned long pgp;
@@ -481,60 +508,18 @@ void pci_free_consistent(struct pci_dev *pdev, size_t n, void *p, dma_addr_t ba)
481 508
482 free_pages(pgp, get_order(n)); 509 free_pages(pgp, get_order(n));
483} 510}
484EXPORT_SYMBOL(pci_free_consistent);
485
486/* Map a single buffer of the indicated size for DMA in streaming mode.
487 * The 32-bit bus address to use is returned.
488 *
489 * Once the device is given the dma address, the device owns this memory
490 * until either pci_unmap_single or pci_dma_sync_single_* is performed.
491 */
492dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size,
493 int direction)
494{
495 BUG_ON(direction == PCI_DMA_NONE);
496 /* IIep is write-through, not flushing. */
497 return virt_to_phys(ptr);
498}
499EXPORT_SYMBOL(pci_map_single);
500
501/* Unmap a single streaming mode DMA translation. The dma_addr and size
502 * must match what was provided for in a previous pci_map_single call. All
503 * other usages are undefined.
504 *
505 * After this call, reads by the cpu to the buffer are guaranteed to see
506 * whatever the device wrote there.
507 */
508void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t ba, size_t size,
509 int direction)
510{
511 BUG_ON(direction == PCI_DMA_NONE);
512 if (direction != PCI_DMA_TODEVICE) {
513 mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
514 (size + PAGE_SIZE-1) & PAGE_MASK);
515 }
516}
517EXPORT_SYMBOL(pci_unmap_single);
518 511
519/* 512/*
520 * Same as pci_map_single, but with pages. 513 * Same as pci_map_single, but with pages.
521 */ 514 */
522dma_addr_t pci_map_page(struct pci_dev *hwdev, struct page *page, 515static dma_addr_t pci32_map_page(struct device *dev, struct page *page,
523 unsigned long offset, size_t size, int direction) 516 unsigned long offset, size_t size,
517 enum dma_data_direction dir,
518 struct dma_attrs *attrs)
524{ 519{
525 BUG_ON(direction == PCI_DMA_NONE);
526 /* IIep is write-through, not flushing. */ 520 /* IIep is write-through, not flushing. */
527 return page_to_phys(page) + offset; 521 return page_to_phys(page) + offset;
528} 522}
529EXPORT_SYMBOL(pci_map_page);
530
531void pci_unmap_page(struct pci_dev *hwdev,
532 dma_addr_t dma_address, size_t size, int direction)
533{
534 BUG_ON(direction == PCI_DMA_NONE);
535 /* mmu_inval_dma_area XXX */
536}
537EXPORT_SYMBOL(pci_unmap_page);
538 523
539/* Map a set of buffers described by scatterlist in streaming 524/* Map a set of buffers described by scatterlist in streaming
540 * mode for DMA. This is the scather-gather version of the 525 * mode for DMA. This is the scather-gather version of the
@@ -551,13 +536,13 @@ EXPORT_SYMBOL(pci_unmap_page);
551 * Device ownership issues as mentioned above for pci_map_single are 536 * Device ownership issues as mentioned above for pci_map_single are
552 * the same here. 537 * the same here.
553 */ 538 */
554int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, 539static int pci32_map_sg(struct device *device, struct scatterlist *sgl,
555 int direction) 540 int nents, enum dma_data_direction dir,
541 struct dma_attrs *attrs)
556{ 542{
557 struct scatterlist *sg; 543 struct scatterlist *sg;
558 int n; 544 int n;
559 545
560 BUG_ON(direction == PCI_DMA_NONE);
561 /* IIep is write-through, not flushing. */ 546 /* IIep is write-through, not flushing. */
562 for_each_sg(sgl, sg, nents, n) { 547 for_each_sg(sgl, sg, nents, n) {
563 BUG_ON(page_address(sg_page(sg)) == NULL); 548 BUG_ON(page_address(sg_page(sg)) == NULL);
@@ -566,20 +551,19 @@ int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents,
566 } 551 }
567 return nents; 552 return nents;
568} 553}
569EXPORT_SYMBOL(pci_map_sg);
570 554
571/* Unmap a set of streaming mode DMA translations. 555/* Unmap a set of streaming mode DMA translations.
572 * Again, cpu read rules concerning calls here are the same as for 556 * Again, cpu read rules concerning calls here are the same as for
573 * pci_unmap_single() above. 557 * pci_unmap_single() above.
574 */ 558 */
575void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, 559static void pci32_unmap_sg(struct device *dev, struct scatterlist *sgl,
576 int direction) 560 int nents, enum dma_data_direction dir,
561 struct dma_attrs *attrs)
577{ 562{
578 struct scatterlist *sg; 563 struct scatterlist *sg;
579 int n; 564 int n;
580 565
581 BUG_ON(direction == PCI_DMA_NONE); 566 if (dir != PCI_DMA_TODEVICE) {
582 if (direction != PCI_DMA_TODEVICE) {
583 for_each_sg(sgl, sg, nents, n) { 567 for_each_sg(sgl, sg, nents, n) {
584 BUG_ON(page_address(sg_page(sg)) == NULL); 568 BUG_ON(page_address(sg_page(sg)) == NULL);
585 mmu_inval_dma_area( 569 mmu_inval_dma_area(
@@ -588,7 +572,6 @@ void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents,
588 } 572 }
589 } 573 }
590} 574}
591EXPORT_SYMBOL(pci_unmap_sg);
592 575
593/* Make physical memory consistent for a single 576/* Make physical memory consistent for a single
594 * streaming mode DMA translation before or after a transfer. 577 * streaming mode DMA translation before or after a transfer.
@@ -600,25 +583,23 @@ EXPORT_SYMBOL(pci_unmap_sg);
600 * must first perform a pci_dma_sync_for_device, and then the 583 * must first perform a pci_dma_sync_for_device, and then the
601 * device again owns the buffer. 584 * device again owns the buffer.
602 */ 585 */
603void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int direction) 586static void pci32_sync_single_for_cpu(struct device *dev, dma_addr_t ba,
587 size_t size, enum dma_data_direction dir)
604{ 588{
605 BUG_ON(direction == PCI_DMA_NONE); 589 if (dir != PCI_DMA_TODEVICE) {
606 if (direction != PCI_DMA_TODEVICE) {
607 mmu_inval_dma_area((unsigned long)phys_to_virt(ba), 590 mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
608 (size + PAGE_SIZE-1) & PAGE_MASK); 591 (size + PAGE_SIZE-1) & PAGE_MASK);
609 } 592 }
610} 593}
611EXPORT_SYMBOL(pci_dma_sync_single_for_cpu);
612 594
613void pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int direction) 595static void pci32_sync_single_for_device(struct device *dev, dma_addr_t ba,
596 size_t size, enum dma_data_direction dir)
614{ 597{
615 BUG_ON(direction == PCI_DMA_NONE); 598 if (dir != PCI_DMA_TODEVICE) {
616 if (direction != PCI_DMA_TODEVICE) {
617 mmu_inval_dma_area((unsigned long)phys_to_virt(ba), 599 mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
618 (size + PAGE_SIZE-1) & PAGE_MASK); 600 (size + PAGE_SIZE-1) & PAGE_MASK);
619 } 601 }
620} 602}
621EXPORT_SYMBOL(pci_dma_sync_single_for_device);
622 603
623/* Make physical memory consistent for a set of streaming 604/* Make physical memory consistent for a set of streaming
624 * mode DMA translations after a transfer. 605 * mode DMA translations after a transfer.
@@ -626,13 +607,13 @@ EXPORT_SYMBOL(pci_dma_sync_single_for_device);
626 * The same as pci_dma_sync_single_* but for a scatter-gather list, 607 * The same as pci_dma_sync_single_* but for a scatter-gather list,
627 * same rules and usage. 608 * same rules and usage.
628 */ 609 */
629void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, int direction) 610static void pci32_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
611 int nents, enum dma_data_direction dir)
630{ 612{
631 struct scatterlist *sg; 613 struct scatterlist *sg;
632 int n; 614 int n;
633 615
634 BUG_ON(direction == PCI_DMA_NONE); 616 if (dir != PCI_DMA_TODEVICE) {
635 if (direction != PCI_DMA_TODEVICE) {
636 for_each_sg(sgl, sg, nents, n) { 617 for_each_sg(sgl, sg, nents, n) {
637 BUG_ON(page_address(sg_page(sg)) == NULL); 618 BUG_ON(page_address(sg_page(sg)) == NULL);
638 mmu_inval_dma_area( 619 mmu_inval_dma_area(
@@ -641,15 +622,14 @@ void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sgl, int
641 } 622 }
642 } 623 }
643} 624}
644EXPORT_SYMBOL(pci_dma_sync_sg_for_cpu);
645 625
646void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, int direction) 626static void pci32_sync_sg_for_device(struct device *device, struct scatterlist *sgl,
627 int nents, enum dma_data_direction dir)
647{ 628{
648 struct scatterlist *sg; 629 struct scatterlist *sg;
649 int n; 630 int n;
650 631
651 BUG_ON(direction == PCI_DMA_NONE); 632 if (dir != PCI_DMA_TODEVICE) {
652 if (direction != PCI_DMA_TODEVICE) {
653 for_each_sg(sgl, sg, nents, n) { 633 for_each_sg(sgl, sg, nents, n) {
654 BUG_ON(page_address(sg_page(sg)) == NULL); 634 BUG_ON(page_address(sg_page(sg)) == NULL);
655 mmu_inval_dma_area( 635 mmu_inval_dma_area(
@@ -658,9 +638,49 @@ void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sgl,
658 } 638 }
659 } 639 }
660} 640}
661EXPORT_SYMBOL(pci_dma_sync_sg_for_device); 641
642struct dma_map_ops pci32_dma_ops = {
643 .alloc_coherent = pci32_alloc_coherent,
644 .free_coherent = pci32_free_coherent,
645 .map_page = pci32_map_page,
646 .map_sg = pci32_map_sg,
647 .unmap_sg = pci32_unmap_sg,
648 .sync_single_for_cpu = pci32_sync_single_for_cpu,
649 .sync_single_for_device = pci32_sync_single_for_device,
650 .sync_sg_for_cpu = pci32_sync_sg_for_cpu,
651 .sync_sg_for_device = pci32_sync_sg_for_device,
652};
653EXPORT_SYMBOL(pci32_dma_ops);
654
662#endif /* CONFIG_PCI */ 655#endif /* CONFIG_PCI */
663 656
657/*
658 * Return whether the given PCI device DMA address mask can be
659 * supported properly. For example, if your device can only drive the
660 * low 24-bits during PCI bus mastering, then you would pass
661 * 0x00ffffff as the mask to this function.
662 */
663int dma_supported(struct device *dev, u64 mask)
664{
665#ifdef CONFIG_PCI
666 if (dev->bus == &pci_bus_type)
667 return 1;
668#endif
669 return 0;
670}
671EXPORT_SYMBOL(dma_supported);
672
673int dma_set_mask(struct device *dev, u64 dma_mask)
674{
675#ifdef CONFIG_PCI
676 if (dev->bus == &pci_bus_type)
677 return pci_set_dma_mask(to_pci_dev(dev), dma_mask);
678#endif
679 return -EOPNOTSUPP;
680}
681EXPORT_SYMBOL(dma_set_mask);
682
683
664#ifdef CONFIG_PROC_FS 684#ifdef CONFIG_PROC_FS
665 685
666static int 686static int
diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c
index 57859ad23547..c68648662802 100644
--- a/arch/sparc/kernel/pci.c
+++ b/arch/sparc/kernel/pci.c
@@ -1039,7 +1039,7 @@ static void ali_sound_dma_hack(struct pci_dev *pdev, int set_bit)
1039 pci_dev_put(ali_isa_bridge); 1039 pci_dev_put(ali_isa_bridge);
1040} 1040}
1041 1041
1042int pci_dma_supported(struct pci_dev *pdev, u64 device_mask) 1042int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask)
1043{ 1043{
1044 u64 dma_addr_mask; 1044 u64 dma_addr_mask;
1045 1045
diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
index 2485eaa23101..23c33ff9c31e 100644
--- a/arch/sparc/kernel/pci_sun4v.c
+++ b/arch/sparc/kernel/pci_sun4v.c
@@ -232,7 +232,8 @@ static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
232 232
233static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page, 233static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
234 unsigned long offset, size_t sz, 234 unsigned long offset, size_t sz,
235 enum dma_data_direction direction) 235 enum dma_data_direction direction,
236 struct dma_attrs *attrs)
236{ 237{
237 struct iommu *iommu; 238 struct iommu *iommu;
238 unsigned long flags, npages, oaddr; 239 unsigned long flags, npages, oaddr;
@@ -296,7 +297,8 @@ iommu_map_fail:
296} 297}
297 298
298static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr, 299static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
299 size_t sz, enum dma_data_direction direction) 300 size_t sz, enum dma_data_direction direction,
301 struct dma_attrs *attrs)
300{ 302{
301 struct pci_pbm_info *pbm; 303 struct pci_pbm_info *pbm;
302 struct iommu *iommu; 304 struct iommu *iommu;
@@ -336,7 +338,8 @@ static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
336} 338}
337 339
338static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, 340static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
339 int nelems, enum dma_data_direction direction) 341 int nelems, enum dma_data_direction direction,
342 struct dma_attrs *attrs)
340{ 343{
341 struct scatterlist *s, *outs, *segstart; 344 struct scatterlist *s, *outs, *segstart;
342 unsigned long flags, handle, prot; 345 unsigned long flags, handle, prot;
@@ -478,7 +481,8 @@ iommu_map_failed:
478} 481}
479 482
480static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist, 483static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
481 int nelems, enum dma_data_direction direction) 484 int nelems, enum dma_data_direction direction,
485 struct dma_attrs *attrs)
482{ 486{
483 struct pci_pbm_info *pbm; 487 struct pci_pbm_info *pbm;
484 struct scatterlist *sg; 488 struct scatterlist *sg;
@@ -521,29 +525,13 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
521 spin_unlock_irqrestore(&iommu->lock, flags); 525 spin_unlock_irqrestore(&iommu->lock, flags);
522} 526}
523 527
524static void dma_4v_sync_single_for_cpu(struct device *dev, 528static struct dma_map_ops sun4v_dma_ops = {
525 dma_addr_t bus_addr, size_t sz,
526 enum dma_data_direction direction)
527{
528 /* Nothing to do... */
529}
530
531static void dma_4v_sync_sg_for_cpu(struct device *dev,
532 struct scatterlist *sglist, int nelems,
533 enum dma_data_direction direction)
534{
535 /* Nothing to do... */
536}
537
538static const struct dma_ops sun4v_dma_ops = {
539 .alloc_coherent = dma_4v_alloc_coherent, 529 .alloc_coherent = dma_4v_alloc_coherent,
540 .free_coherent = dma_4v_free_coherent, 530 .free_coherent = dma_4v_free_coherent,
541 .map_page = dma_4v_map_page, 531 .map_page = dma_4v_map_page,
542 .unmap_page = dma_4v_unmap_page, 532 .unmap_page = dma_4v_unmap_page,
543 .map_sg = dma_4v_map_sg, 533 .map_sg = dma_4v_map_sg,
544 .unmap_sg = dma_4v_unmap_sg, 534 .unmap_sg = dma_4v_unmap_sg,
545 .sync_single_for_cpu = dma_4v_sync_single_for_cpu,
546 .sync_sg_for_cpu = dma_4v_sync_sg_for_cpu,
547}; 535};
548 536
549static void __devinit pci_sun4v_scan_bus(struct pci_pbm_info *pbm, 537static void __devinit pci_sun4v_scan_bus(struct pci_pbm_info *pbm,
diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h
index 1c3f9435f1c9..0ee770d23d0e 100644
--- a/arch/x86/include/asm/dma-mapping.h
+++ b/arch/x86/include/asm/dma-mapping.h
@@ -55,6 +55,24 @@ extern int dma_set_mask(struct device *dev, u64 mask);
55extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, 55extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
56 dma_addr_t *dma_addr, gfp_t flag); 56 dma_addr_t *dma_addr, gfp_t flag);
57 57
58static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
59{
60 if (!dev->dma_mask)
61 return 0;
62
63 return addr + size <= *dev->dma_mask;
64}
65
66static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
67{
68 return paddr;
69}
70
71static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
72{
73 return daddr;
74}
75
58static inline void 76static inline void
59dma_cache_sync(struct device *dev, void *vaddr, size_t size, 77dma_cache_sync(struct device *dev, void *vaddr, size_t size,
60 enum dma_data_direction dir) 78 enum dma_data_direction dir)
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index 873aa079d166..8fb4ce35bea2 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -154,7 +154,7 @@ again:
154 return NULL; 154 return NULL;
155 155
156 addr = page_to_phys(page); 156 addr = page_to_phys(page);
157 if (!is_buffer_dma_capable(dma_mask, addr, size)) { 157 if (addr + size > dma_mask) {
158 __free_pages(page, get_order(size)); 158 __free_pages(page, get_order(size));
159 159
160 if (dma_mask < DMA_BIT_MASK(32) && !(flag & GFP_DMA)) { 160 if (dma_mask < DMA_BIT_MASK(32) && !(flag & GFP_DMA)) {
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
index d2e56b8f48e7..98a827ee9ed7 100644
--- a/arch/x86/kernel/pci-gart_64.c
+++ b/arch/x86/kernel/pci-gart_64.c
@@ -190,14 +190,13 @@ static void iommu_full(struct device *dev, size_t size, int dir)
190static inline int 190static inline int
191need_iommu(struct device *dev, unsigned long addr, size_t size) 191need_iommu(struct device *dev, unsigned long addr, size_t size)
192{ 192{
193 return force_iommu || 193 return force_iommu || !dma_capable(dev, addr, size);
194 !is_buffer_dma_capable(*dev->dma_mask, addr, size);
195} 194}
196 195
197static inline int 196static inline int
198nonforced_iommu(struct device *dev, unsigned long addr, size_t size) 197nonforced_iommu(struct device *dev, unsigned long addr, size_t size)
199{ 198{
200 return !is_buffer_dma_capable(*dev->dma_mask, addr, size); 199 return !dma_capable(dev, addr, size);
201} 200}
202 201
203/* Map a single continuous physical area into the IOMMU. 202/* Map a single continuous physical area into the IOMMU.
diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c
index 71d412a09f30..a3933d4330cd 100644
--- a/arch/x86/kernel/pci-nommu.c
+++ b/arch/x86/kernel/pci-nommu.c
@@ -14,7 +14,7 @@
14static int 14static int
15check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size) 15check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size)
16{ 16{
17 if (hwdev && !is_buffer_dma_capable(*hwdev->dma_mask, bus, size)) { 17 if (hwdev && !dma_capable(hwdev, bus, size)) {
18 if (*hwdev->dma_mask >= DMA_BIT_MASK(32)) 18 if (*hwdev->dma_mask >= DMA_BIT_MASK(32))
19 printk(KERN_ERR 19 printk(KERN_ERR
20 "nommu_%s: overflow %Lx+%zu of device mask %Lx\n", 20 "nommu_%s: overflow %Lx+%zu of device mask %Lx\n",
@@ -79,12 +79,29 @@ static void nommu_free_coherent(struct device *dev, size_t size, void *vaddr,
79 free_pages((unsigned long)vaddr, get_order(size)); 79 free_pages((unsigned long)vaddr, get_order(size));
80} 80}
81 81
82static void nommu_sync_single_for_device(struct device *dev,
83 dma_addr_t addr, size_t size,
84 enum dma_data_direction dir)
85{
86 flush_write_buffers();
87}
88
89
90static void nommu_sync_sg_for_device(struct device *dev,
91 struct scatterlist *sg, int nelems,
92 enum dma_data_direction dir)
93{
94 flush_write_buffers();
95}
96
82struct dma_map_ops nommu_dma_ops = { 97struct dma_map_ops nommu_dma_ops = {
83 .alloc_coherent = dma_generic_alloc_coherent, 98 .alloc_coherent = dma_generic_alloc_coherent,
84 .free_coherent = nommu_free_coherent, 99 .free_coherent = nommu_free_coherent,
85 .map_sg = nommu_map_sg, 100 .map_sg = nommu_map_sg,
86 .map_page = nommu_map_page, 101 .map_page = nommu_map_page,
87 .is_phys = 1, 102 .sync_single_for_device = nommu_sync_single_for_device,
103 .sync_sg_for_device = nommu_sync_sg_for_device,
104 .is_phys = 1,
88}; 105};
89 106
90void __init no_iommu_init(void) 107void __init no_iommu_init(void)
diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
index 6af96ee44200..e8a35016115f 100644
--- a/arch/x86/kernel/pci-swiotlb.c
+++ b/arch/x86/kernel/pci-swiotlb.c
@@ -13,31 +13,6 @@
13 13
14int swiotlb __read_mostly; 14int swiotlb __read_mostly;
15 15
16void * __init swiotlb_alloc_boot(size_t size, unsigned long nslabs)
17{
18 return alloc_bootmem_low_pages(size);
19}
20
21void *swiotlb_alloc(unsigned order, unsigned long nslabs)
22{
23 return (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN, order);
24}
25
26dma_addr_t swiotlb_phys_to_bus(struct device *hwdev, phys_addr_t paddr)
27{
28 return paddr;
29}
30
31phys_addr_t swiotlb_bus_to_phys(struct device *hwdev, dma_addr_t baddr)
32{
33 return baddr;
34}
35
36int __weak swiotlb_arch_range_needs_mapping(phys_addr_t paddr, size_t size)
37{
38 return 0;
39}
40
41static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size, 16static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
42 dma_addr_t *dma_handle, gfp_t flags) 17 dma_addr_t *dma_handle, gfp_t flags)
43{ 18{
diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h
index 5406a601185c..e694263445f7 100644
--- a/include/asm-generic/dma-mapping-common.h
+++ b/include/asm-generic/dma-mapping-common.h
@@ -103,7 +103,6 @@ static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
103 if (ops->sync_single_for_cpu) 103 if (ops->sync_single_for_cpu)
104 ops->sync_single_for_cpu(dev, addr, size, dir); 104 ops->sync_single_for_cpu(dev, addr, size, dir);
105 debug_dma_sync_single_for_cpu(dev, addr, size, dir); 105 debug_dma_sync_single_for_cpu(dev, addr, size, dir);
106 flush_write_buffers();
107} 106}
108 107
109static inline void dma_sync_single_for_device(struct device *dev, 108static inline void dma_sync_single_for_device(struct device *dev,
@@ -116,7 +115,6 @@ static inline void dma_sync_single_for_device(struct device *dev,
116 if (ops->sync_single_for_device) 115 if (ops->sync_single_for_device)
117 ops->sync_single_for_device(dev, addr, size, dir); 116 ops->sync_single_for_device(dev, addr, size, dir);
118 debug_dma_sync_single_for_device(dev, addr, size, dir); 117 debug_dma_sync_single_for_device(dev, addr, size, dir);
119 flush_write_buffers();
120} 118}
121 119
122static inline void dma_sync_single_range_for_cpu(struct device *dev, 120static inline void dma_sync_single_range_for_cpu(struct device *dev,
@@ -132,7 +130,6 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev,
132 ops->sync_single_range_for_cpu(dev, addr, offset, size, dir); 130 ops->sync_single_range_for_cpu(dev, addr, offset, size, dir);
133 debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir); 131 debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
134 132
135 flush_write_buffers();
136 } else 133 } else
137 dma_sync_single_for_cpu(dev, addr, size, dir); 134 dma_sync_single_for_cpu(dev, addr, size, dir);
138} 135}
@@ -150,7 +147,6 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
150 ops->sync_single_range_for_device(dev, addr, offset, size, dir); 147 ops->sync_single_range_for_device(dev, addr, offset, size, dir);
151 debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir); 148 debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
152 149
153 flush_write_buffers();
154 } else 150 } else
155 dma_sync_single_for_device(dev, addr, size, dir); 151 dma_sync_single_for_device(dev, addr, size, dir);
156} 152}
@@ -165,7 +161,6 @@ dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
165 if (ops->sync_sg_for_cpu) 161 if (ops->sync_sg_for_cpu)
166 ops->sync_sg_for_cpu(dev, sg, nelems, dir); 162 ops->sync_sg_for_cpu(dev, sg, nelems, dir);
167 debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir); 163 debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
168 flush_write_buffers();
169} 164}
170 165
171static inline void 166static inline void
@@ -179,7 +174,6 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
179 ops->sync_sg_for_device(dev, sg, nelems, dir); 174 ops->sync_sg_for_device(dev, sg, nelems, dir);
180 debug_dma_sync_sg_for_device(dev, sg, nelems, dir); 175 debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
181 176
182 flush_write_buffers();
183} 177}
184 178
185#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL) 179#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL)
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 07dfd460d286..c0f6c3cd788c 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -98,11 +98,6 @@ static inline int is_device_dma_capable(struct device *dev)
98 return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE; 98 return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE;
99} 99}
100 100
101static inline int is_buffer_dma_capable(u64 mask, dma_addr_t addr, size_t size)
102{
103 return addr + size <= mask;
104}
105
106#ifdef CONFIG_HAS_DMA 101#ifdef CONFIG_HAS_DMA
107#include <asm/dma-mapping.h> 102#include <asm/dma-mapping.h>
108#else 103#else
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index cb1a6631b8f4..73b1f1cec423 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -14,7 +14,6 @@ struct scatterlist;
14 */ 14 */
15#define IO_TLB_SEGSIZE 128 15#define IO_TLB_SEGSIZE 128
16 16
17
18/* 17/*
19 * log of the size of each IO TLB slab. The number of slabs is command line 18 * log of the size of each IO TLB slab. The number of slabs is command line
20 * controllable. 19 * controllable.
@@ -24,16 +23,6 @@ struct scatterlist;
24extern void 23extern void
25swiotlb_init(void); 24swiotlb_init(void);
26 25
27extern void *swiotlb_alloc_boot(size_t bytes, unsigned long nslabs);
28extern void *swiotlb_alloc(unsigned order, unsigned long nslabs);
29
30extern dma_addr_t swiotlb_phys_to_bus(struct device *hwdev,
31 phys_addr_t address);
32extern phys_addr_t swiotlb_bus_to_phys(struct device *hwdev,
33 dma_addr_t address);
34
35extern int swiotlb_arch_range_needs_mapping(phys_addr_t paddr, size_t size);
36
37extern void 26extern void
38*swiotlb_alloc_coherent(struct device *hwdev, size_t size, 27*swiotlb_alloc_coherent(struct device *hwdev, size_t size,
39 dma_addr_t *dma_handle, gfp_t flags); 28 dma_addr_t *dma_handle, gfp_t flags);
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index bffe6d7ef9d9..ac25cd28e807 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -114,46 +114,11 @@ setup_io_tlb_npages(char *str)
114__setup("swiotlb=", setup_io_tlb_npages); 114__setup("swiotlb=", setup_io_tlb_npages);
115/* make io_tlb_overflow tunable too? */ 115/* make io_tlb_overflow tunable too? */
116 116
117void * __weak __init swiotlb_alloc_boot(size_t size, unsigned long nslabs) 117/* Note that this doesn't work with highmem page */
118{
119 return alloc_bootmem_low_pages(size);
120}
121
122void * __weak swiotlb_alloc(unsigned order, unsigned long nslabs)
123{
124 return (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN, order);
125}
126
127dma_addr_t __weak swiotlb_phys_to_bus(struct device *hwdev, phys_addr_t paddr)
128{
129 return paddr;
130}
131
132phys_addr_t __weak swiotlb_bus_to_phys(struct device *hwdev, dma_addr_t baddr)
133{
134 return baddr;
135}
136
137static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev, 118static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
138 volatile void *address) 119 volatile void *address)
139{ 120{
140 return swiotlb_phys_to_bus(hwdev, virt_to_phys(address)); 121 return phys_to_dma(hwdev, virt_to_phys(address));
141}
142
143void * __weak swiotlb_bus_to_virt(struct device *hwdev, dma_addr_t address)
144{
145 return phys_to_virt(swiotlb_bus_to_phys(hwdev, address));
146}
147
148int __weak swiotlb_arch_address_needs_mapping(struct device *hwdev,
149 dma_addr_t addr, size_t size)
150{
151 return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size);
152}
153
154int __weak swiotlb_arch_range_needs_mapping(phys_addr_t paddr, size_t size)
155{
156 return 0;
157} 122}
158 123
159static void swiotlb_print_info(unsigned long bytes) 124static void swiotlb_print_info(unsigned long bytes)
@@ -189,7 +154,7 @@ swiotlb_init_with_default_size(size_t default_size)
189 /* 154 /*
190 * Get IO TLB memory from the low pages 155 * Get IO TLB memory from the low pages
191 */ 156 */
192 io_tlb_start = swiotlb_alloc_boot(bytes, io_tlb_nslabs); 157 io_tlb_start = alloc_bootmem_low_pages(bytes);
193 if (!io_tlb_start) 158 if (!io_tlb_start)
194 panic("Cannot allocate SWIOTLB buffer"); 159 panic("Cannot allocate SWIOTLB buffer");
195 io_tlb_end = io_tlb_start + bytes; 160 io_tlb_end = io_tlb_start + bytes;
@@ -245,7 +210,8 @@ swiotlb_late_init_with_default_size(size_t default_size)
245 bytes = io_tlb_nslabs << IO_TLB_SHIFT; 210 bytes = io_tlb_nslabs << IO_TLB_SHIFT;
246 211
247 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) { 212 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
248 io_tlb_start = swiotlb_alloc(order, io_tlb_nslabs); 213 io_tlb_start = (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN,
214 order);
249 if (io_tlb_start) 215 if (io_tlb_start)
250 break; 216 break;
251 order--; 217 order--;
@@ -315,20 +281,10 @@ cleanup1:
315 return -ENOMEM; 281 return -ENOMEM;
316} 282}
317 283
318static inline int 284static int is_swiotlb_buffer(phys_addr_t paddr)
319address_needs_mapping(struct device *hwdev, dma_addr_t addr, size_t size)
320{ 285{
321 return swiotlb_arch_address_needs_mapping(hwdev, addr, size); 286 return paddr >= virt_to_phys(io_tlb_start) &&
322} 287 paddr < virt_to_phys(io_tlb_end);
323
324static inline int range_needs_mapping(phys_addr_t paddr, size_t size)
325{
326 return swiotlb_force || swiotlb_arch_range_needs_mapping(paddr, size);
327}
328
329static int is_swiotlb_buffer(char *addr)
330{
331 return addr >= io_tlb_start && addr < io_tlb_end;
332} 288}
333 289
334/* 290/*
@@ -561,9 +517,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
561 dma_mask = hwdev->coherent_dma_mask; 517 dma_mask = hwdev->coherent_dma_mask;
562 518
563 ret = (void *)__get_free_pages(flags, order); 519 ret = (void *)__get_free_pages(flags, order);
564 if (ret && 520 if (ret && swiotlb_virt_to_bus(hwdev, ret) + size > dma_mask) {
565 !is_buffer_dma_capable(dma_mask, swiotlb_virt_to_bus(hwdev, ret),
566 size)) {
567 /* 521 /*
568 * The allocated memory isn't reachable by the device. 522 * The allocated memory isn't reachable by the device.
569 */ 523 */
@@ -585,7 +539,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
585 dev_addr = swiotlb_virt_to_bus(hwdev, ret); 539 dev_addr = swiotlb_virt_to_bus(hwdev, ret);
586 540
587 /* Confirm address can be DMA'd by device */ 541 /* Confirm address can be DMA'd by device */
588 if (!is_buffer_dma_capable(dma_mask, dev_addr, size)) { 542 if (dev_addr + size > dma_mask) {
589 printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n", 543 printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
590 (unsigned long long)dma_mask, 544 (unsigned long long)dma_mask,
591 (unsigned long long)dev_addr); 545 (unsigned long long)dev_addr);
@@ -601,11 +555,13 @@ EXPORT_SYMBOL(swiotlb_alloc_coherent);
601 555
602void 556void
603swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, 557swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
604 dma_addr_t dma_handle) 558 dma_addr_t dev_addr)
605{ 559{
560 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
561
606 WARN_ON(irqs_disabled()); 562 WARN_ON(irqs_disabled());
607 if (!is_swiotlb_buffer(vaddr)) 563 if (!is_swiotlb_buffer(paddr))
608 free_pages((unsigned long) vaddr, get_order(size)); 564 free_pages((unsigned long)vaddr, get_order(size));
609 else 565 else
610 /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ 566 /* DMA_TO_DEVICE to avoid memcpy in unmap_single */
611 do_unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE); 567 do_unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE);
@@ -625,12 +581,15 @@ swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
625 printk(KERN_ERR "DMA: Out of SW-IOMMU space for %zu bytes at " 581 printk(KERN_ERR "DMA: Out of SW-IOMMU space for %zu bytes at "
626 "device %s\n", size, dev ? dev_name(dev) : "?"); 582 "device %s\n", size, dev ? dev_name(dev) : "?");
627 583
628 if (size > io_tlb_overflow && do_panic) { 584 if (size <= io_tlb_overflow || !do_panic)
629 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) 585 return;
630 panic("DMA: Memory would be corrupted\n"); 586
631 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) 587 if (dir == DMA_BIDIRECTIONAL)
632 panic("DMA: Random memory would be DMAed\n"); 588 panic("DMA: Random memory could be DMA accessed\n");
633 } 589 if (dir == DMA_FROM_DEVICE)
590 panic("DMA: Random memory could be DMA written\n");
591 if (dir == DMA_TO_DEVICE)
592 panic("DMA: Random memory could be DMA read\n");
634} 593}
635 594
636/* 595/*
@@ -646,7 +605,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
646 struct dma_attrs *attrs) 605 struct dma_attrs *attrs)
647{ 606{
648 phys_addr_t phys = page_to_phys(page) + offset; 607 phys_addr_t phys = page_to_phys(page) + offset;
649 dma_addr_t dev_addr = swiotlb_phys_to_bus(dev, phys); 608 dma_addr_t dev_addr = phys_to_dma(dev, phys);
650 void *map; 609 void *map;
651 610
652 BUG_ON(dir == DMA_NONE); 611 BUG_ON(dir == DMA_NONE);
@@ -655,8 +614,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
655 * we can safely return the device addr and not worry about bounce 614 * we can safely return the device addr and not worry about bounce
656 * buffering it. 615 * buffering it.
657 */ 616 */
658 if (!address_needs_mapping(dev, dev_addr, size) && 617 if (dma_capable(dev, dev_addr, size) && !swiotlb_force)
659 !range_needs_mapping(phys, size))
660 return dev_addr; 618 return dev_addr;
661 619
662 /* 620 /*
@@ -673,7 +631,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
673 /* 631 /*
674 * Ensure that the address returned is DMA'ble 632 * Ensure that the address returned is DMA'ble
675 */ 633 */
676 if (address_needs_mapping(dev, dev_addr, size)) 634 if (!dma_capable(dev, dev_addr, size))
677 panic("map_single: bounce buffer is not DMA'ble"); 635 panic("map_single: bounce buffer is not DMA'ble");
678 636
679 return dev_addr; 637 return dev_addr;
@@ -691,19 +649,25 @@ EXPORT_SYMBOL_GPL(swiotlb_map_page);
691static void unmap_single(struct device *hwdev, dma_addr_t dev_addr, 649static void unmap_single(struct device *hwdev, dma_addr_t dev_addr,
692 size_t size, int dir) 650 size_t size, int dir)
693{ 651{
694 char *dma_addr = swiotlb_bus_to_virt(hwdev, dev_addr); 652 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
695 653
696 BUG_ON(dir == DMA_NONE); 654 BUG_ON(dir == DMA_NONE);
697 655
698 if (is_swiotlb_buffer(dma_addr)) { 656 if (is_swiotlb_buffer(paddr)) {
699 do_unmap_single(hwdev, dma_addr, size, dir); 657 do_unmap_single(hwdev, phys_to_virt(paddr), size, dir);
700 return; 658 return;
701 } 659 }
702 660
703 if (dir != DMA_FROM_DEVICE) 661 if (dir != DMA_FROM_DEVICE)
704 return; 662 return;
705 663
706 dma_mark_clean(dma_addr, size); 664 /*
665 * phys_to_virt doesn't work with hihgmem page but we could
666 * call dma_mark_clean() with hihgmem page here. However, we
667 * are fine since dma_mark_clean() is null on POWERPC. We can
668 * make dma_mark_clean() take a physical address if necessary.
669 */
670 dma_mark_clean(phys_to_virt(paddr), size);
707} 671}
708 672
709void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, 673void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
@@ -728,19 +692,19 @@ static void
728swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, 692swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
729 size_t size, int dir, int target) 693 size_t size, int dir, int target)
730{ 694{
731 char *dma_addr = swiotlb_bus_to_virt(hwdev, dev_addr); 695 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
732 696
733 BUG_ON(dir == DMA_NONE); 697 BUG_ON(dir == DMA_NONE);
734 698
735 if (is_swiotlb_buffer(dma_addr)) { 699 if (is_swiotlb_buffer(paddr)) {
736 sync_single(hwdev, dma_addr, size, dir, target); 700 sync_single(hwdev, phys_to_virt(paddr), size, dir, target);
737 return; 701 return;
738 } 702 }
739 703
740 if (dir != DMA_FROM_DEVICE) 704 if (dir != DMA_FROM_DEVICE)
741 return; 705 return;
742 706
743 dma_mark_clean(dma_addr, size); 707 dma_mark_clean(phys_to_virt(paddr), size);
744} 708}
745 709
746void 710void
@@ -817,10 +781,10 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
817 781
818 for_each_sg(sgl, sg, nelems, i) { 782 for_each_sg(sgl, sg, nelems, i) {
819 phys_addr_t paddr = sg_phys(sg); 783 phys_addr_t paddr = sg_phys(sg);
820 dma_addr_t dev_addr = swiotlb_phys_to_bus(hwdev, paddr); 784 dma_addr_t dev_addr = phys_to_dma(hwdev, paddr);
821 785
822 if (range_needs_mapping(paddr, sg->length) || 786 if (swiotlb_force ||
823 address_needs_mapping(hwdev, dev_addr, sg->length)) { 787 !dma_capable(hwdev, dev_addr, sg->length)) {
824 void *map = map_single(hwdev, sg_phys(sg), 788 void *map = map_single(hwdev, sg_phys(sg),
825 sg->length, dir); 789 sg->length, dir);
826 if (!map) { 790 if (!map) {