aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>2009-01-05 09:59:02 -0500
committerIngo Molnar <mingo@elte.hu>2009-01-06 08:06:57 -0500
commit160c1d8e40866edfeae7d68816b7005d70acf391 (patch)
tree37dd78b2ea28a3953a46d401bd9657005eb444d7
parentf0402a262e1a4c03fc66b83659823bdcaac3c41a (diff)
x86, ia64: convert to use generic dma_map_ops struct
This converts X86 and IA64 to use include/linux/dma-mapping.h. It's a bit large but pretty boring. The major change for X86 is converting 'int dir' to 'enum dma_data_direction dir' in DMA mapping operations. The major changes for IA64 is using map_page and unmap_page instead of map_single and unmap_single. Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Acked-by: Tony Luck <tony.luck@intel.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--arch/ia64/dig/Makefile4
-rw-r--r--arch/ia64/dig/dig_vtd_iommu.c77
-rw-r--r--arch/ia64/hp/common/hwsw_iommu.c6
-rw-r--r--arch/ia64/hp/common/sba_iommu.c46
-rw-r--r--arch/ia64/include/asm/dma-mapping.h107
-rw-r--r--arch/ia64/include/asm/machvec.h12
-rw-r--r--arch/ia64/kernel/dma-mapping.c4
-rw-r--r--arch/ia64/kernel/machvec.c8
-rw-r--r--arch/ia64/kernel/pci-dma.c49
-rw-r--r--arch/ia64/kernel/pci-swiotlb.c32
-rw-r--r--arch/ia64/sn/pci/pci_dma.c58
-rw-r--r--arch/x86/include/asm/device.h2
-rw-r--r--arch/x86/include/asm/dma-mapping.h146
-rw-r--r--arch/x86/include/asm/iommu.h2
-rw-r--r--arch/x86/kernel/amd_iommu.c8
-rw-r--r--arch/x86/kernel/pci-calgary_64.c15
-rw-r--r--arch/x86/kernel/pci-dma.c4
-rw-r--r--arch/x86/kernel/pci-gart_64.c14
-rw-r--r--arch/x86/kernel/pci-nommu.c5
-rw-r--r--arch/x86/kernel/pci-swiotlb_64.c6
-rw-r--r--drivers/pci/intel-iommu.c9
-rw-r--r--include/linux/intel-iommu.h6
-rw-r--r--include/linux/swiotlb.h18
-rw-r--r--lib/swiotlb.c18
24 files changed, 278 insertions, 378 deletions
diff --git a/arch/ia64/dig/Makefile b/arch/ia64/dig/Makefile
index 5c0283830bd6..2f7caddf093e 100644
--- a/arch/ia64/dig/Makefile
+++ b/arch/ia64/dig/Makefile
@@ -7,8 +7,8 @@
7 7
8obj-y := setup.o 8obj-y := setup.o
9ifeq ($(CONFIG_DMAR), y) 9ifeq ($(CONFIG_DMAR), y)
10obj-$(CONFIG_IA64_GENERIC) += machvec.o machvec_vtd.o dig_vtd_iommu.o 10obj-$(CONFIG_IA64_GENERIC) += machvec.o machvec_vtd.o
11else 11else
12obj-$(CONFIG_IA64_GENERIC) += machvec.o 12obj-$(CONFIG_IA64_GENERIC) += machvec.o
13endif 13endif
14obj-$(CONFIG_IA64_DIG_VTD) += dig_vtd_iommu.o 14
diff --git a/arch/ia64/dig/dig_vtd_iommu.c b/arch/ia64/dig/dig_vtd_iommu.c
deleted file mode 100644
index fdb8ba9f4992..000000000000
--- a/arch/ia64/dig/dig_vtd_iommu.c
+++ /dev/null
@@ -1,77 +0,0 @@
1#include <linux/types.h>
2#include <linux/kernel.h>
3#include <linux/module.h>
4#include <linux/dma-mapping.h>
5#include <linux/intel-iommu.h>
6
7void *
8vtd_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
9 gfp_t flags)
10{
11 return intel_alloc_coherent(dev, size, dma_handle, flags);
12}
13EXPORT_SYMBOL_GPL(vtd_alloc_coherent);
14
15void
16vtd_free_coherent(struct device *dev, size_t size, void *vaddr,
17 dma_addr_t dma_handle)
18{
19 intel_free_coherent(dev, size, vaddr, dma_handle);
20}
21EXPORT_SYMBOL_GPL(vtd_free_coherent);
22
23dma_addr_t
24vtd_map_single_attrs(struct device *dev, void *addr, size_t size,
25 int dir, struct dma_attrs *attrs)
26{
27 return intel_map_single(dev, (phys_addr_t)addr, size, dir);
28}
29EXPORT_SYMBOL_GPL(vtd_map_single_attrs);
30
31void
32vtd_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size,
33 int dir, struct dma_attrs *attrs)
34{
35 intel_unmap_single(dev, iova, size, dir);
36}
37EXPORT_SYMBOL_GPL(vtd_unmap_single_attrs);
38
39int
40vtd_map_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents,
41 int dir, struct dma_attrs *attrs)
42{
43 return intel_map_sg(dev, sglist, nents, dir);
44}
45EXPORT_SYMBOL_GPL(vtd_map_sg_attrs);
46
47void
48vtd_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
49 int nents, int dir, struct dma_attrs *attrs)
50{
51 intel_unmap_sg(dev, sglist, nents, dir);
52}
53EXPORT_SYMBOL_GPL(vtd_unmap_sg_attrs);
54
55int
56vtd_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
57{
58 return 0;
59}
60EXPORT_SYMBOL_GPL(vtd_dma_mapping_error);
61
62extern int iommu_dma_supported(struct device *dev, u64 mask);
63
64struct dma_mapping_ops vtd_dma_ops = {
65 .alloc_coherent = vtd_alloc_coherent,
66 .free_coherent = vtd_free_coherent,
67 .map_single_attrs = vtd_map_single_attrs,
68 .unmap_single_attrs = vtd_unmap_single_attrs,
69 .map_sg_attrs = vtd_map_sg_attrs,
70 .unmap_sg_attrs = vtd_unmap_sg_attrs,
71 .sync_single_for_cpu = machvec_dma_sync_single,
72 .sync_sg_for_cpu = machvec_dma_sync_sg,
73 .sync_single_for_device = machvec_dma_sync_single,
74 .sync_sg_for_device = machvec_dma_sync_sg,
75 .dma_supported_op = iommu_dma_supported,
76 .mapping_error = vtd_dma_mapping_error,
77};
diff --git a/arch/ia64/hp/common/hwsw_iommu.c b/arch/ia64/hp/common/hwsw_iommu.c
index e5bbeba77810..e4a80d82e3d8 100644
--- a/arch/ia64/hp/common/hwsw_iommu.c
+++ b/arch/ia64/hp/common/hwsw_iommu.c
@@ -17,7 +17,7 @@
17#include <linux/swiotlb.h> 17#include <linux/swiotlb.h>
18#include <asm/machvec.h> 18#include <asm/machvec.h>
19 19
20extern struct dma_mapping_ops sba_dma_ops, swiotlb_dma_ops; 20extern struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
21 21
22/* swiotlb declarations & definitions: */ 22/* swiotlb declarations & definitions: */
23extern int swiotlb_late_init_with_default_size (size_t size); 23extern int swiotlb_late_init_with_default_size (size_t size);
@@ -30,10 +30,10 @@ extern int swiotlb_late_init_with_default_size (size_t size);
30static inline int use_swiotlb(struct device *dev) 30static inline int use_swiotlb(struct device *dev)
31{ 31{
32 return dev && dev->dma_mask && 32 return dev && dev->dma_mask &&
33 !sba_dma_ops.dma_supported_op(dev, *dev->dma_mask); 33 !sba_dma_ops.dma_supported(dev, *dev->dma_mask);
34} 34}
35 35
36struct dma_mapping_ops *hwsw_dma_get_ops(struct device *dev) 36struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
37{ 37{
38 if (use_swiotlb(dev)) 38 if (use_swiotlb(dev))
39 return &swiotlb_dma_ops; 39 return &swiotlb_dma_ops;
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
index 29e7206f3dc6..129b62eb39e5 100644
--- a/arch/ia64/hp/common/sba_iommu.c
+++ b/arch/ia64/hp/common/sba_iommu.c
@@ -909,11 +909,13 @@ sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
909 * 909 *
910 * See Documentation/DMA-mapping.txt 910 * See Documentation/DMA-mapping.txt
911 */ 911 */
912static dma_addr_t 912static dma_addr_t sba_map_page(struct device *dev, struct page *page,
913sba_map_single_attrs(struct device *dev, void *addr, size_t size, int dir, 913 unsigned long poff, size_t size,
914 struct dma_attrs *attrs) 914 enum dma_data_direction dir,
915 struct dma_attrs *attrs)
915{ 916{
916 struct ioc *ioc; 917 struct ioc *ioc;
918 void *addr = page_address(page) + poff;
917 dma_addr_t iovp; 919 dma_addr_t iovp;
918 dma_addr_t offset; 920 dma_addr_t offset;
919 u64 *pdir_start; 921 u64 *pdir_start;
@@ -992,6 +994,14 @@ sba_map_single_attrs(struct device *dev, void *addr, size_t size, int dir,
992 return SBA_IOVA(ioc, iovp, offset); 994 return SBA_IOVA(ioc, iovp, offset);
993} 995}
994 996
997static dma_addr_t sba_map_single_attrs(struct device *dev, void *addr,
998 size_t size, enum dma_data_direction dir,
999 struct dma_attrs *attrs)
1000{
1001 return sba_map_page(dev, virt_to_page(addr),
1002 (unsigned long)addr & ~PAGE_MASK, size, dir, attrs);
1003}
1004
995#ifdef ENABLE_MARK_CLEAN 1005#ifdef ENABLE_MARK_CLEAN
996static SBA_INLINE void 1006static SBA_INLINE void
997sba_mark_clean(struct ioc *ioc, dma_addr_t iova, size_t size) 1007sba_mark_clean(struct ioc *ioc, dma_addr_t iova, size_t size)
@@ -1026,8 +1036,8 @@ sba_mark_clean(struct ioc *ioc, dma_addr_t iova, size_t size)
1026 * 1036 *
1027 * See Documentation/DMA-mapping.txt 1037 * See Documentation/DMA-mapping.txt
1028 */ 1038 */
1029static void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size, 1039static void sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
1030 int dir, struct dma_attrs *attrs) 1040 enum dma_data_direction dir, struct dma_attrs *attrs)
1031{ 1041{
1032 struct ioc *ioc; 1042 struct ioc *ioc;
1033#if DELAYED_RESOURCE_CNT > 0 1043#if DELAYED_RESOURCE_CNT > 0
@@ -1095,6 +1105,12 @@ static void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t s
1095#endif /* DELAYED_RESOURCE_CNT == 0 */ 1105#endif /* DELAYED_RESOURCE_CNT == 0 */
1096} 1106}
1097 1107
1108void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size,
1109 enum dma_data_direction dir, struct dma_attrs *attrs)
1110{
1111 sba_unmap_page(dev, iova, size, dir, attrs);
1112}
1113
1098/** 1114/**
1099 * sba_alloc_coherent - allocate/map shared mem for DMA 1115 * sba_alloc_coherent - allocate/map shared mem for DMA
1100 * @dev: instance of PCI owned by the driver that's asking. 1116 * @dev: instance of PCI owned by the driver that's asking.
@@ -1423,7 +1439,8 @@ sba_coalesce_chunks(struct ioc *ioc, struct device *dev,
1423 * See Documentation/DMA-mapping.txt 1439 * See Documentation/DMA-mapping.txt
1424 */ 1440 */
1425static int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist, 1441static int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist,
1426 int nents, int dir, struct dma_attrs *attrs) 1442 int nents, enum dma_data_direction dir,
1443 struct dma_attrs *attrs)
1427{ 1444{
1428 struct ioc *ioc; 1445 struct ioc *ioc;
1429 int coalesced, filled = 0; 1446 int coalesced, filled = 0;
@@ -1514,7 +1531,8 @@ static int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist,
1514 * See Documentation/DMA-mapping.txt 1531 * See Documentation/DMA-mapping.txt
1515 */ 1532 */
1516static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist, 1533static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
1517 int nents, int dir, struct dma_attrs *attrs) 1534 int nents, enum dma_data_direction dir,
1535 struct dma_attrs *attrs)
1518{ 1536{
1519#ifdef ASSERT_PDIR_SANITY 1537#ifdef ASSERT_PDIR_SANITY
1520 struct ioc *ioc; 1538 struct ioc *ioc;
@@ -2062,7 +2080,7 @@ static struct acpi_driver acpi_sba_ioc_driver = {
2062 }, 2080 },
2063}; 2081};
2064 2082
2065extern struct dma_mapping_ops swiotlb_dma_ops; 2083extern struct dma_map_ops swiotlb_dma_ops;
2066 2084
2067static int __init 2085static int __init
2068sba_init(void) 2086sba_init(void)
@@ -2176,18 +2194,18 @@ sba_page_override(char *str)
2176 2194
2177__setup("sbapagesize=",sba_page_override); 2195__setup("sbapagesize=",sba_page_override);
2178 2196
2179struct dma_mapping_ops sba_dma_ops = { 2197struct dma_map_ops sba_dma_ops = {
2180 .alloc_coherent = sba_alloc_coherent, 2198 .alloc_coherent = sba_alloc_coherent,
2181 .free_coherent = sba_free_coherent, 2199 .free_coherent = sba_free_coherent,
2182 .map_single_attrs = sba_map_single_attrs, 2200 .map_page = sba_map_page,
2183 .unmap_single_attrs = sba_unmap_single_attrs, 2201 .unmap_page = sba_unmap_page,
2184 .map_sg_attrs = sba_map_sg_attrs, 2202 .map_sg = sba_map_sg_attrs,
2185 .unmap_sg_attrs = sba_unmap_sg_attrs, 2203 .unmap_sg = sba_unmap_sg_attrs,
2186 .sync_single_for_cpu = machvec_dma_sync_single, 2204 .sync_single_for_cpu = machvec_dma_sync_single,
2187 .sync_sg_for_cpu = machvec_dma_sync_sg, 2205 .sync_sg_for_cpu = machvec_dma_sync_sg,
2188 .sync_single_for_device = machvec_dma_sync_single, 2206 .sync_single_for_device = machvec_dma_sync_single,
2189 .sync_sg_for_device = machvec_dma_sync_sg, 2207 .sync_sg_for_device = machvec_dma_sync_sg,
2190 .dma_supported_op = sba_dma_supported, 2208 .dma_supported = sba_dma_supported,
2191 .mapping_error = sba_dma_mapping_error, 2209 .mapping_error = sba_dma_mapping_error,
2192}; 2210};
2193 2211
diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h
index bac3159379f7..d6230f514536 100644
--- a/arch/ia64/include/asm/dma-mapping.h
+++ b/arch/ia64/include/asm/dma-mapping.h
@@ -9,73 +9,21 @@
9#include <linux/scatterlist.h> 9#include <linux/scatterlist.h>
10#include <asm/swiotlb.h> 10#include <asm/swiotlb.h>
11 11
12struct dma_mapping_ops { 12extern struct dma_map_ops *dma_ops;
13 int (*mapping_error)(struct device *dev,
14 dma_addr_t dma_addr);
15 void* (*alloc_coherent)(struct device *dev, size_t size,
16 dma_addr_t *dma_handle, gfp_t gfp);
17 void (*free_coherent)(struct device *dev, size_t size,
18 void *vaddr, dma_addr_t dma_handle);
19 dma_addr_t (*map_single)(struct device *hwdev, unsigned long ptr,
20 size_t size, int direction);
21 void (*unmap_single)(struct device *dev, dma_addr_t addr,
22 size_t size, int direction);
23 dma_addr_t (*map_single_attrs)(struct device *dev, void *cpu_addr,
24 size_t size, int direction,
25 struct dma_attrs *attrs);
26 void (*unmap_single_attrs)(struct device *dev,
27 dma_addr_t dma_addr,
28 size_t size, int direction,
29 struct dma_attrs *attrs);
30 void (*sync_single_for_cpu)(struct device *hwdev,
31 dma_addr_t dma_handle, size_t size,
32 int direction);
33 void (*sync_single_for_device)(struct device *hwdev,
34 dma_addr_t dma_handle, size_t size,
35 int direction);
36 void (*sync_single_range_for_cpu)(struct device *hwdev,
37 dma_addr_t dma_handle, unsigned long offset,
38 size_t size, int direction);
39 void (*sync_single_range_for_device)(struct device *hwdev,
40 dma_addr_t dma_handle, unsigned long offset,
41 size_t size, int direction);
42 void (*sync_sg_for_cpu)(struct device *hwdev,
43 struct scatterlist *sg, int nelems,
44 int direction);
45 void (*sync_sg_for_device)(struct device *hwdev,
46 struct scatterlist *sg, int nelems,
47 int direction);
48 int (*map_sg)(struct device *hwdev, struct scatterlist *sg,
49 int nents, int direction);
50 void (*unmap_sg)(struct device *hwdev,
51 struct scatterlist *sg, int nents,
52 int direction);
53 int (*map_sg_attrs)(struct device *dev,
54 struct scatterlist *sg, int nents,
55 int direction, struct dma_attrs *attrs);
56 void (*unmap_sg_attrs)(struct device *dev,
57 struct scatterlist *sg, int nents,
58 int direction,
59 struct dma_attrs *attrs);
60 int (*dma_supported_op)(struct device *hwdev, u64 mask);
61 int is_phys;
62};
63
64extern struct dma_mapping_ops *dma_ops;
65extern struct ia64_machine_vector ia64_mv; 13extern struct ia64_machine_vector ia64_mv;
66extern void set_iommu_machvec(void); 14extern void set_iommu_machvec(void);
67 15
68static inline void *dma_alloc_coherent(struct device *dev, size_t size, 16static inline void *dma_alloc_coherent(struct device *dev, size_t size,
69 dma_addr_t *daddr, gfp_t gfp) 17 dma_addr_t *daddr, gfp_t gfp)
70{ 18{
71 struct dma_mapping_ops *ops = platform_dma_get_ops(dev); 19 struct dma_map_ops *ops = platform_dma_get_ops(dev);
72 return ops->alloc_coherent(dev, size, daddr, gfp | GFP_DMA); 20 return ops->alloc_coherent(dev, size, daddr, gfp | GFP_DMA);
73} 21}
74 22
75static inline void dma_free_coherent(struct device *dev, size_t size, 23static inline void dma_free_coherent(struct device *dev, size_t size,
76 void *caddr, dma_addr_t daddr) 24 void *caddr, dma_addr_t daddr)
77{ 25{
78 struct dma_mapping_ops *ops = platform_dma_get_ops(dev); 26 struct dma_map_ops *ops = platform_dma_get_ops(dev);
79 ops->free_coherent(dev, size, caddr, daddr); 27 ops->free_coherent(dev, size, caddr, daddr);
80} 28}
81 29
@@ -87,8 +35,10 @@ static inline dma_addr_t dma_map_single_attrs(struct device *dev,
87 enum dma_data_direction dir, 35 enum dma_data_direction dir,
88 struct dma_attrs *attrs) 36 struct dma_attrs *attrs)
89{ 37{
90 struct dma_mapping_ops *ops = platform_dma_get_ops(dev); 38 struct dma_map_ops *ops = platform_dma_get_ops(dev);
91 return ops->map_single_attrs(dev, caddr, size, dir, attrs); 39 return ops->map_page(dev, virt_to_page(caddr),
40 (unsigned long)caddr & ~PAGE_MASK, size,
41 dir, attrs);
92} 42}
93 43
94static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t daddr, 44static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t daddr,
@@ -96,8 +46,8 @@ static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t daddr,
96 enum dma_data_direction dir, 46 enum dma_data_direction dir,
97 struct dma_attrs *attrs) 47 struct dma_attrs *attrs)
98{ 48{
99 struct dma_mapping_ops *ops = platform_dma_get_ops(dev); 49 struct dma_map_ops *ops = platform_dma_get_ops(dev);
100 ops->unmap_single_attrs(dev, daddr, size, dir, attrs); 50 ops->unmap_page(dev, daddr, size, dir, attrs);
101} 51}
102 52
103#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL) 53#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL)
@@ -107,8 +57,8 @@ static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
107 int nents, enum dma_data_direction dir, 57 int nents, enum dma_data_direction dir,
108 struct dma_attrs *attrs) 58 struct dma_attrs *attrs)
109{ 59{
110 struct dma_mapping_ops *ops = platform_dma_get_ops(dev); 60 struct dma_map_ops *ops = platform_dma_get_ops(dev);
111 return ops->map_sg_attrs(dev, sgl, nents, dir, attrs); 61 return ops->map_sg(dev, sgl, nents, dir, attrs);
112} 62}
113 63
114static inline void dma_unmap_sg_attrs(struct device *dev, 64static inline void dma_unmap_sg_attrs(struct device *dev,
@@ -116,8 +66,8 @@ static inline void dma_unmap_sg_attrs(struct device *dev,
116 enum dma_data_direction dir, 66 enum dma_data_direction dir,
117 struct dma_attrs *attrs) 67 struct dma_attrs *attrs)
118{ 68{
119 struct dma_mapping_ops *ops = platform_dma_get_ops(dev); 69 struct dma_map_ops *ops = platform_dma_get_ops(dev);
120 ops->unmap_sg_attrs(dev, sgl, nents, dir, attrs); 70 ops->unmap_sg(dev, sgl, nents, dir, attrs);
121} 71}
122 72
123#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL) 73#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL)
@@ -127,7 +77,7 @@ static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t daddr,
127 size_t size, 77 size_t size,
128 enum dma_data_direction dir) 78 enum dma_data_direction dir)
129{ 79{
130 struct dma_mapping_ops *ops = platform_dma_get_ops(dev); 80 struct dma_map_ops *ops = platform_dma_get_ops(dev);
131 ops->sync_single_for_cpu(dev, daddr, size, dir); 81 ops->sync_single_for_cpu(dev, daddr, size, dir);
132} 82}
133 83
@@ -135,7 +85,7 @@ static inline void dma_sync_sg_for_cpu(struct device *dev,
135 struct scatterlist *sgl, 85 struct scatterlist *sgl,
136 int nents, enum dma_data_direction dir) 86 int nents, enum dma_data_direction dir)
137{ 87{
138 struct dma_mapping_ops *ops = platform_dma_get_ops(dev); 88 struct dma_map_ops *ops = platform_dma_get_ops(dev);
139 ops->sync_sg_for_cpu(dev, sgl, nents, dir); 89 ops->sync_sg_for_cpu(dev, sgl, nents, dir);
140} 90}
141 91
@@ -144,7 +94,7 @@ static inline void dma_sync_single_for_device(struct device *dev,
144 size_t size, 94 size_t size,
145 enum dma_data_direction dir) 95 enum dma_data_direction dir)
146{ 96{
147 struct dma_mapping_ops *ops = platform_dma_get_ops(dev); 97 struct dma_map_ops *ops = platform_dma_get_ops(dev);
148 ops->sync_single_for_device(dev, daddr, size, dir); 98 ops->sync_single_for_device(dev, daddr, size, dir);
149} 99}
150 100
@@ -153,20 +103,29 @@ static inline void dma_sync_sg_for_device(struct device *dev,
153 int nents, 103 int nents,
154 enum dma_data_direction dir) 104 enum dma_data_direction dir)
155{ 105{
156 struct dma_mapping_ops *ops = platform_dma_get_ops(dev); 106 struct dma_map_ops *ops = platform_dma_get_ops(dev);
157 ops->sync_sg_for_device(dev, sgl, nents, dir); 107 ops->sync_sg_for_device(dev, sgl, nents, dir);
158} 108}
159 109
160static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr) 110static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr)
161{ 111{
162 struct dma_mapping_ops *ops = platform_dma_get_ops(dev); 112 struct dma_map_ops *ops = platform_dma_get_ops(dev);
163 return ops->mapping_error(dev, daddr); 113 return ops->mapping_error(dev, daddr);
164} 114}
165 115
166#define dma_map_page(dev, pg, off, size, dir) \ 116static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
167 dma_map_single(dev, page_address(pg) + (off), (size), (dir)) 117 size_t offset, size_t size,
168#define dma_unmap_page(dev, dma_addr, size, dir) \ 118 enum dma_data_direction dir)
169 dma_unmap_single(dev, dma_addr, size, dir) 119{
120 struct dma_map_ops *ops = platform_dma_get_ops(dev);
121 return ops->map_page(dev, page, offset, size, dir, NULL);
122}
123
124static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
125 size_t size, enum dma_data_direction dir)
126{
127 dma_unmap_single(dev, addr, size, dir);
128}
170 129
171/* 130/*
172 * Rest of this file is part of the "Advanced DMA API". Use at your own risk. 131 * Rest of this file is part of the "Advanced DMA API". Use at your own risk.
@@ -180,8 +139,8 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr)
180 139
181static inline int dma_supported(struct device *dev, u64 mask) 140static inline int dma_supported(struct device *dev, u64 mask)
182{ 141{
183 struct dma_mapping_ops *ops = platform_dma_get_ops(dev); 142 struct dma_map_ops *ops = platform_dma_get_ops(dev);
184 return ops->dma_supported_op(dev, mask); 143 return ops->dma_supported(dev, mask);
185} 144}
186 145
187static inline int 146static inline int
diff --git a/arch/ia64/include/asm/machvec.h b/arch/ia64/include/asm/machvec.h
index 95e1708fa4e3..e8442c7e4cc8 100644
--- a/arch/ia64/include/asm/machvec.h
+++ b/arch/ia64/include/asm/machvec.h
@@ -11,7 +11,6 @@
11#define _ASM_IA64_MACHVEC_H 11#define _ASM_IA64_MACHVEC_H
12 12
13#include <linux/types.h> 13#include <linux/types.h>
14#include <linux/swiotlb.h>
15 14
16/* forward declarations: */ 15/* forward declarations: */
17struct device; 16struct device;
@@ -24,6 +23,7 @@ struct task_struct;
24struct pci_dev; 23struct pci_dev;
25struct msi_desc; 24struct msi_desc;
26struct dma_attrs; 25struct dma_attrs;
26enum dma_data_direction;
27 27
28typedef void ia64_mv_setup_t (char **); 28typedef void ia64_mv_setup_t (char **);
29typedef void ia64_mv_cpu_init_t (void); 29typedef void ia64_mv_cpu_init_t (void);
@@ -45,7 +45,7 @@ typedef void ia64_mv_kernel_launch_event_t(void);
45 45
46/* DMA-mapping interface: */ 46/* DMA-mapping interface: */
47typedef void ia64_mv_dma_init (void); 47typedef void ia64_mv_dma_init (void);
48typedef struct dma_mapping_ops *ia64_mv_dma_get_ops(struct device *); 48typedef struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
49 49
50/* 50/*
51 * WARNING: The legacy I/O space is _architected_. Platforms are 51 * WARNING: The legacy I/O space is _architected_. Platforms are
@@ -97,8 +97,10 @@ machvec_noop_bus (struct pci_bus *bus)
97 97
98extern void machvec_setup (char **); 98extern void machvec_setup (char **);
99extern void machvec_timer_interrupt (int, void *); 99extern void machvec_timer_interrupt (int, void *);
100extern void machvec_dma_sync_single (struct device *, dma_addr_t, size_t, int); 100extern void machvec_dma_sync_single(struct device *, dma_addr_t, size_t,
101extern void machvec_dma_sync_sg (struct device *, struct scatterlist *, int, int); 101 enum dma_data_direction);
102extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int,
103 enum dma_data_direction);
102extern void machvec_tlb_migrate_finish (struct mm_struct *); 104extern void machvec_tlb_migrate_finish (struct mm_struct *);
103 105
104# if defined (CONFIG_IA64_HP_SIM) 106# if defined (CONFIG_IA64_HP_SIM)
@@ -250,7 +252,7 @@ extern void machvec_init_from_cmdline(const char *cmdline);
250# endif /* CONFIG_IA64_GENERIC */ 252# endif /* CONFIG_IA64_GENERIC */
251 253
252extern void swiotlb_dma_init(void); 254extern void swiotlb_dma_init(void);
253extern struct dma_mapping_ops *dma_get_ops(struct device *); 255extern struct dma_map_ops *dma_get_ops(struct device *);
254 256
255/* 257/*
256 * Define default versions so we can extend machvec for new platforms without having 258 * Define default versions so we can extend machvec for new platforms without having
diff --git a/arch/ia64/kernel/dma-mapping.c b/arch/ia64/kernel/dma-mapping.c
index 427f69617226..7060e13fa421 100644
--- a/arch/ia64/kernel/dma-mapping.c
+++ b/arch/ia64/kernel/dma-mapping.c
@@ -1,9 +1,9 @@
1#include <linux/dma-mapping.h> 1#include <linux/dma-mapping.h>
2 2
3struct dma_mapping_ops *dma_ops; 3struct dma_map_ops *dma_ops;
4EXPORT_SYMBOL(dma_ops); 4EXPORT_SYMBOL(dma_ops);
5 5
6struct dma_mapping_ops *dma_get_ops(struct device *dev) 6struct dma_map_ops *dma_get_ops(struct device *dev)
7{ 7{
8 return dma_ops; 8 return dma_ops;
9} 9}
diff --git a/arch/ia64/kernel/machvec.c b/arch/ia64/kernel/machvec.c
index 7ccb228ceedc..d41a40ef80c0 100644
--- a/arch/ia64/kernel/machvec.c
+++ b/arch/ia64/kernel/machvec.c
@@ -1,5 +1,5 @@
1#include <linux/module.h> 1#include <linux/module.h>
2 2#include <linux/dma-mapping.h>
3#include <asm/machvec.h> 3#include <asm/machvec.h>
4#include <asm/system.h> 4#include <asm/system.h>
5 5
@@ -75,14 +75,16 @@ machvec_timer_interrupt (int irq, void *dev_id)
75EXPORT_SYMBOL(machvec_timer_interrupt); 75EXPORT_SYMBOL(machvec_timer_interrupt);
76 76
77void 77void
78machvec_dma_sync_single (struct device *hwdev, dma_addr_t dma_handle, size_t size, int dir) 78machvec_dma_sync_single(struct device *hwdev, dma_addr_t dma_handle, size_t size,
79 enum dma_data_direction dir)
79{ 80{
80 mb(); 81 mb();
81} 82}
82EXPORT_SYMBOL(machvec_dma_sync_single); 83EXPORT_SYMBOL(machvec_dma_sync_single);
83 84
84void 85void
85machvec_dma_sync_sg (struct device *hwdev, struct scatterlist *sg, int n, int dir) 86machvec_dma_sync_sg(struct device *hwdev, struct scatterlist *sg, int n,
87 enum dma_data_direction dir)
86{ 88{
87 mb(); 89 mb();
88} 90}
diff --git a/arch/ia64/kernel/pci-dma.c b/arch/ia64/kernel/pci-dma.c
index 640669eba5d4..b30209ec8c6e 100644
--- a/arch/ia64/kernel/pci-dma.c
+++ b/arch/ia64/kernel/pci-dma.c
@@ -41,21 +41,7 @@ struct device fallback_dev = {
41 .dma_mask = &fallback_dev.coherent_dma_mask, 41 .dma_mask = &fallback_dev.coherent_dma_mask,
42}; 42};
43 43
44extern struct dma_mapping_ops vtd_dma_ops; 44extern struct dma_map_ops intel_dma_ops;
45
46void __init pci_iommu_alloc(void)
47{
48 dma_ops = &vtd_dma_ops;
49 /*
50 * The order of these functions is important for
51 * fall-back/fail-over reasons
52 */
53 detect_intel_iommu();
54
55#ifdef CONFIG_SWIOTLB
56 pci_swiotlb_init();
57#endif
58}
59 45
60static int __init pci_iommu_init(void) 46static int __init pci_iommu_init(void)
61{ 47{
@@ -81,10 +67,10 @@ iommu_dma_init(void)
81 67
82int iommu_dma_supported(struct device *dev, u64 mask) 68int iommu_dma_supported(struct device *dev, u64 mask)
83{ 69{
84 struct dma_mapping_ops *ops = platform_dma_get_ops(dev); 70 struct dma_map_ops *ops = platform_dma_get_ops(dev);
85 71
86 if (ops->dma_supported_op) 72 if (ops->dma_supported)
87 return ops->dma_supported_op(dev, mask); 73 return ops->dma_supported(dev, mask);
88 74
89 /* Copied from i386. Doesn't make much sense, because it will 75 /* Copied from i386. Doesn't make much sense, because it will
90 only work for pci_alloc_coherent. 76 only work for pci_alloc_coherent.
@@ -113,4 +99,31 @@ int iommu_dma_supported(struct device *dev, u64 mask)
113} 99}
114EXPORT_SYMBOL(iommu_dma_supported); 100EXPORT_SYMBOL(iommu_dma_supported);
115 101
102static int vtd_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
103{
104 return 0;
105}
106
107void __init pci_iommu_alloc(void)
108{
109 dma_ops = &intel_dma_ops;
110
111 dma_ops->sync_single_for_cpu = machvec_dma_sync_single;
112 dma_ops->sync_sg_for_cpu = machvec_dma_sync_sg;
113 dma_ops->sync_single_for_device = machvec_dma_sync_single;
114 dma_ops->sync_sg_for_device = machvec_dma_sync_sg;
115 dma_ops->dma_supported = iommu_dma_supported;
116 dma_ops->mapping_error = vtd_dma_mapping_error;
117
118 /*
119 * The order of these functions is important for
120 * fall-back/fail-over reasons
121 */
122 detect_intel_iommu();
123
124#ifdef CONFIG_SWIOTLB
125 pci_swiotlb_init();
126#endif
127}
128
116#endif 129#endif
diff --git a/arch/ia64/kernel/pci-swiotlb.c b/arch/ia64/kernel/pci-swiotlb.c
index 9f172c864377..6bf8f66786bd 100644
--- a/arch/ia64/kernel/pci-swiotlb.c
+++ b/arch/ia64/kernel/pci-swiotlb.c
@@ -16,24 +16,36 @@ EXPORT_SYMBOL(swiotlb);
16/* Set this to 1 if there is a HW IOMMU in the system */ 16/* Set this to 1 if there is a HW IOMMU in the system */
17int iommu_detected __read_mostly; 17int iommu_detected __read_mostly;
18 18
19struct dma_mapping_ops swiotlb_dma_ops = { 19static dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
20 unsigned long offset, size_t size,
21 enum dma_data_direction dir,
22 struct dma_attrs *attrs)
23{
24 return swiotlb_map_single_attrs(dev, page_address(page) + offset, size,
25 dir, attrs);
26}
27
28static void swiotlb_unmap_page(struct device *dev, dma_addr_t dma_handle,
29 size_t size, enum dma_data_direction dir,
30 struct dma_attrs *attrs)
31{
32 swiotlb_unmap_single_attrs(dev, dma_handle, size, dir, attrs);
33}
34
35struct dma_map_ops swiotlb_dma_ops = {
20 .alloc_coherent = swiotlb_alloc_coherent, 36 .alloc_coherent = swiotlb_alloc_coherent,
21 .free_coherent = swiotlb_free_coherent, 37 .free_coherent = swiotlb_free_coherent,
22 .map_single = swiotlb_map_single, 38 .map_page = swiotlb_map_page,
23 .unmap_single = swiotlb_unmap_single, 39 .unmap_page = swiotlb_unmap_page,
24 .map_single_attrs = swiotlb_map_single_attrs, 40 .map_sg = swiotlb_map_sg_attrs,
25 .unmap_single_attrs = swiotlb_unmap_single_attrs, 41 .unmap_sg = swiotlb_unmap_sg_attrs,
26 .map_sg_attrs = swiotlb_map_sg_attrs,
27 .unmap_sg_attrs = swiotlb_unmap_sg_attrs,
28 .sync_single_for_cpu = swiotlb_sync_single_for_cpu, 42 .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
29 .sync_single_for_device = swiotlb_sync_single_for_device, 43 .sync_single_for_device = swiotlb_sync_single_for_device,
30 .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu, 44 .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu,
31 .sync_single_range_for_device = swiotlb_sync_single_range_for_device, 45 .sync_single_range_for_device = swiotlb_sync_single_range_for_device,
32 .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, 46 .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
33 .sync_sg_for_device = swiotlb_sync_sg_for_device, 47 .sync_sg_for_device = swiotlb_sync_sg_for_device,
34 .map_sg = swiotlb_map_sg, 48 .dma_supported = swiotlb_dma_supported,
35 .unmap_sg = swiotlb_unmap_sg,
36 .dma_supported_op = swiotlb_dma_supported,
37 .mapping_error = swiotlb_dma_mapping_error, 49 .mapping_error = swiotlb_dma_mapping_error,
38}; 50};
39 51
diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c
index efdd69490009..9c788f9cedfd 100644
--- a/arch/ia64/sn/pci/pci_dma.c
+++ b/arch/ia64/sn/pci/pci_dma.c
@@ -10,7 +10,6 @@
10 */ 10 */
11 11
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/dma-attrs.h>
14#include <linux/dma-mapping.h> 13#include <linux/dma-mapping.h>
15#include <asm/dma.h> 14#include <asm/dma.h>
16#include <asm/sn/intr.h> 15#include <asm/sn/intr.h>
@@ -171,10 +170,12 @@ static void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr
171 * TODO: simplify our interface; 170 * TODO: simplify our interface;
172 * figure out how to save dmamap handle so can use two step. 171 * figure out how to save dmamap handle so can use two step.
173 */ 172 */
174static dma_addr_t sn_dma_map_single_attrs(struct device *dev, void *cpu_addr, 173static dma_addr_t sn_dma_map_page(struct device *dev, struct page *page,
175 size_t size, int direction, 174 unsigned long offset, size_t size,
176 struct dma_attrs *attrs) 175 enum dma_data_direction dir,
176 struct dma_attrs *attrs)
177{ 177{
178 void *cpu_addr = page_address(page) + offset;
178 dma_addr_t dma_addr; 179 dma_addr_t dma_addr;
179 unsigned long phys_addr; 180 unsigned long phys_addr;
180 struct pci_dev *pdev = to_pci_dev(dev); 181 struct pci_dev *pdev = to_pci_dev(dev);
@@ -212,20 +213,20 @@ static dma_addr_t sn_dma_map_single_attrs(struct device *dev, void *cpu_addr,
212 * by @dma_handle into the coherence domain. On SN, we're always cache 213 * by @dma_handle into the coherence domain. On SN, we're always cache
213 * coherent, so we just need to free any ATEs associated with this mapping. 214 * coherent, so we just need to free any ATEs associated with this mapping.
214 */ 215 */
215static void sn_dma_unmap_single_attrs(struct device *dev, dma_addr_t dma_addr, 216static void sn_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
216 size_t size, int direction, 217 size_t size, enum dma_data_direction dir,
217 struct dma_attrs *attrs) 218 struct dma_attrs *attrs)
218{ 219{
219 struct pci_dev *pdev = to_pci_dev(dev); 220 struct pci_dev *pdev = to_pci_dev(dev);
220 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); 221 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
221 222
222 BUG_ON(dev->bus != &pci_bus_type); 223 BUG_ON(dev->bus != &pci_bus_type);
223 224
224 provider->dma_unmap(pdev, dma_addr, direction); 225 provider->dma_unmap(pdev, dma_addr, dir);
225} 226}
226 227
227/** 228/**
228 * sn_dma_unmap_sg_attrs - unmap a DMA scatterlist 229 * sn_dma_unmap_sg - unmap a DMA scatterlist
229 * @dev: device to unmap 230 * @dev: device to unmap
230 * @sg: scatterlist to unmap 231 * @sg: scatterlist to unmap
231 * @nhwentries: number of scatterlist entries 232 * @nhwentries: number of scatterlist entries
@@ -234,9 +235,9 @@ static void sn_dma_unmap_single_attrs(struct device *dev, dma_addr_t dma_addr,
234 * 235 *
235 * Unmap a set of streaming mode DMA translations. 236 * Unmap a set of streaming mode DMA translations.
236 */ 237 */
237static void sn_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl, 238static void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sgl,
238 int nhwentries, int direction, 239 int nhwentries, enum dma_data_direction dir,
239 struct dma_attrs *attrs) 240 struct dma_attrs *attrs)
240{ 241{
241 int i; 242 int i;
242 struct pci_dev *pdev = to_pci_dev(dev); 243 struct pci_dev *pdev = to_pci_dev(dev);
@@ -246,14 +247,14 @@ static void sn_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl,
246 BUG_ON(dev->bus != &pci_bus_type); 247 BUG_ON(dev->bus != &pci_bus_type);
247 248
248 for_each_sg(sgl, sg, nhwentries, i) { 249 for_each_sg(sgl, sg, nhwentries, i) {
249 provider->dma_unmap(pdev, sg->dma_address, direction); 250 provider->dma_unmap(pdev, sg->dma_address, dir);
250 sg->dma_address = (dma_addr_t) NULL; 251 sg->dma_address = (dma_addr_t) NULL;
251 sg->dma_length = 0; 252 sg->dma_length = 0;
252 } 253 }
253} 254}
254 255
255/** 256/**
256 * sn_dma_map_sg_attrs - map a scatterlist for DMA 257 * sn_dma_map_sg - map a scatterlist for DMA
257 * @dev: device to map for 258 * @dev: device to map for
258 * @sg: scatterlist to map 259 * @sg: scatterlist to map
259 * @nhwentries: number of entries 260 * @nhwentries: number of entries
@@ -267,8 +268,9 @@ static void sn_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl,
267 * 268 *
268 * Maps each entry of @sg for DMA. 269 * Maps each entry of @sg for DMA.
269 */ 270 */
270static int sn_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, 271static int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl,
271 int nhwentries, int direction, struct dma_attrs *attrs) 272 int nhwentries, enum dma_data_direction dir,
273 struct dma_attrs *attrs)
272{ 274{
273 unsigned long phys_addr; 275 unsigned long phys_addr;
274 struct scatterlist *saved_sg = sgl, *sg; 276 struct scatterlist *saved_sg = sgl, *sg;
@@ -305,8 +307,7 @@ static int sn_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
305 * Free any successfully allocated entries. 307 * Free any successfully allocated entries.
306 */ 308 */
307 if (i > 0) 309 if (i > 0)
308 sn_dma_unmap_sg_attrs(dev, saved_sg, i, 310 sn_dma_unmap_sg(dev, saved_sg, i, dir, attrs);
309 direction, attrs);
310 return 0; 311 return 0;
311 } 312 }
312 313
@@ -317,25 +318,26 @@ static int sn_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
317} 318}
318 319
319static void sn_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, 320static void sn_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
320 size_t size, int direction) 321 size_t size, enum dma_data_direction dir)
321{ 322{
322 BUG_ON(dev->bus != &pci_bus_type); 323 BUG_ON(dev->bus != &pci_bus_type);
323} 324}
324 325
325static void sn_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, 326static void sn_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
326 size_t size, int direction) 327 size_t size,
328 enum dma_data_direction dir)
327{ 329{
328 BUG_ON(dev->bus != &pci_bus_type); 330 BUG_ON(dev->bus != &pci_bus_type);
329} 331}
330 332
331static void sn_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, 333static void sn_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
332 int nelems, int direction) 334 int nelems, enum dma_data_direction dir)
333{ 335{
334 BUG_ON(dev->bus != &pci_bus_type); 336 BUG_ON(dev->bus != &pci_bus_type);
335} 337}
336 338
337static void sn_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, 339static void sn_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
338 int nelems, int direction) 340 int nelems, enum dma_data_direction dir)
339{ 341{
340 BUG_ON(dev->bus != &pci_bus_type); 342 BUG_ON(dev->bus != &pci_bus_type);
341} 343}
@@ -455,19 +457,19 @@ int sn_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
455 return ret; 457 return ret;
456} 458}
457 459
458static struct dma_mapping_ops sn_dma_ops = { 460static struct dma_map_ops sn_dma_ops = {
459 .alloc_coherent = sn_dma_alloc_coherent, 461 .alloc_coherent = sn_dma_alloc_coherent,
460 .free_coherent = sn_dma_free_coherent, 462 .free_coherent = sn_dma_free_coherent,
461 .map_single_attrs = sn_dma_map_single_attrs, 463 .map_page = sn_dma_map_page,
462 .unmap_single_attrs = sn_dma_unmap_single_attrs, 464 .unmap_page = sn_dma_unmap_page,
463 .map_sg_attrs = sn_dma_map_sg_attrs, 465 .map_sg = sn_dma_map_sg,
464 .unmap_sg_attrs = sn_dma_unmap_sg_attrs, 466 .unmap_sg = sn_dma_unmap_sg,
465 .sync_single_for_cpu = sn_dma_sync_single_for_cpu, 467 .sync_single_for_cpu = sn_dma_sync_single_for_cpu,
466 .sync_sg_for_cpu = sn_dma_sync_sg_for_cpu, 468 .sync_sg_for_cpu = sn_dma_sync_sg_for_cpu,
467 .sync_single_for_device = sn_dma_sync_single_for_device, 469 .sync_single_for_device = sn_dma_sync_single_for_device,
468 .sync_sg_for_device = sn_dma_sync_sg_for_device, 470 .sync_sg_for_device = sn_dma_sync_sg_for_device,
469 .mapping_error = sn_dma_mapping_error, 471 .mapping_error = sn_dma_mapping_error,
470 .dma_supported_op = sn_dma_supported, 472 .dma_supported = sn_dma_supported,
471}; 473};
472 474
473void sn_dma_init(void) 475void sn_dma_init(void)
diff --git a/arch/x86/include/asm/device.h b/arch/x86/include/asm/device.h
index 3c034f48fdb0..4994a20acbcb 100644
--- a/arch/x86/include/asm/device.h
+++ b/arch/x86/include/asm/device.h
@@ -6,7 +6,7 @@ struct dev_archdata {
6 void *acpi_handle; 6 void *acpi_handle;
7#endif 7#endif
8#ifdef CONFIG_X86_64 8#ifdef CONFIG_X86_64
9struct dma_mapping_ops *dma_ops; 9struct dma_map_ops *dma_ops;
10#endif 10#endif
11#ifdef CONFIG_DMAR 11#ifdef CONFIG_DMAR
12 void *iommu; /* hook for IOMMU specific extension */ 12 void *iommu; /* hook for IOMMU specific extension */
diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h
index b81f82268a16..5a347805a6c7 100644
--- a/arch/x86/include/asm/dma-mapping.h
+++ b/arch/x86/include/asm/dma-mapping.h
@@ -17,50 +17,9 @@ extern int iommu_merge;
17extern struct device x86_dma_fallback_dev; 17extern struct device x86_dma_fallback_dev;
18extern int panic_on_overflow; 18extern int panic_on_overflow;
19 19
20struct dma_mapping_ops { 20extern struct dma_map_ops *dma_ops;
21 int (*mapping_error)(struct device *dev, 21
22 dma_addr_t dma_addr); 22static inline struct dma_map_ops *get_dma_ops(struct device *dev)
23 void* (*alloc_coherent)(struct device *dev, size_t size,
24 dma_addr_t *dma_handle, gfp_t gfp);
25 void (*free_coherent)(struct device *dev, size_t size,
26 void *vaddr, dma_addr_t dma_handle);
27 void (*sync_single_for_cpu)(struct device *hwdev,
28 dma_addr_t dma_handle, size_t size,
29 int direction);
30 void (*sync_single_for_device)(struct device *hwdev,
31 dma_addr_t dma_handle, size_t size,
32 int direction);
33 void (*sync_single_range_for_cpu)(struct device *hwdev,
34 dma_addr_t dma_handle, unsigned long offset,
35 size_t size, int direction);
36 void (*sync_single_range_for_device)(struct device *hwdev,
37 dma_addr_t dma_handle, unsigned long offset,
38 size_t size, int direction);
39 void (*sync_sg_for_cpu)(struct device *hwdev,
40 struct scatterlist *sg, int nelems,
41 int direction);
42 void (*sync_sg_for_device)(struct device *hwdev,
43 struct scatterlist *sg, int nelems,
44 int direction);
45 int (*map_sg)(struct device *hwdev, struct scatterlist *sg,
46 int nents, int direction);
47 void (*unmap_sg)(struct device *hwdev,
48 struct scatterlist *sg, int nents,
49 int direction);
50 dma_addr_t (*map_page)(struct device *dev, struct page *page,
51 unsigned long offset, size_t size,
52 enum dma_data_direction dir,
53 struct dma_attrs *attrs);
54 void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
55 size_t size, enum dma_data_direction dir,
56 struct dma_attrs *attrs);
57 int (*dma_supported)(struct device *hwdev, u64 mask);
58 int is_phys;
59};
60
61extern struct dma_mapping_ops *dma_ops;
62
63static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
64{ 23{
65#ifdef CONFIG_X86_32 24#ifdef CONFIG_X86_32
66 return dma_ops; 25 return dma_ops;
@@ -75,7 +34,7 @@ static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
75/* Make sure we keep the same behaviour */ 34/* Make sure we keep the same behaviour */
76static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 35static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
77{ 36{
78 struct dma_mapping_ops *ops = get_dma_ops(dev); 37 struct dma_map_ops *ops = get_dma_ops(dev);
79 if (ops->mapping_error) 38 if (ops->mapping_error)
80 return ops->mapping_error(dev, dma_addr); 39 return ops->mapping_error(dev, dma_addr);
81 40
@@ -94,138 +53,139 @@ extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
94 53
95static inline dma_addr_t 54static inline dma_addr_t
96dma_map_single(struct device *hwdev, void *ptr, size_t size, 55dma_map_single(struct device *hwdev, void *ptr, size_t size,
97 int direction) 56 enum dma_data_direction dir)
98{ 57{
99 struct dma_mapping_ops *ops = get_dma_ops(hwdev); 58 struct dma_map_ops *ops = get_dma_ops(hwdev);
100 59
101 BUG_ON(!valid_dma_direction(direction)); 60 BUG_ON(!valid_dma_direction(dir));
102 return ops->map_page(hwdev, virt_to_page(ptr), 61 return ops->map_page(hwdev, virt_to_page(ptr),
103 (unsigned long)ptr & ~PAGE_MASK, size, 62 (unsigned long)ptr & ~PAGE_MASK, size,
104 direction, NULL); 63 dir, NULL);
105} 64}
106 65
107static inline void 66static inline void
108dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size, 67dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
109 int direction) 68 enum dma_data_direction dir)
110{ 69{
111 struct dma_mapping_ops *ops = get_dma_ops(dev); 70 struct dma_map_ops *ops = get_dma_ops(dev);
112 71
113 BUG_ON(!valid_dma_direction(direction)); 72 BUG_ON(!valid_dma_direction(dir));
114 if (ops->unmap_page) 73 if (ops->unmap_page)
115 ops->unmap_page(dev, addr, size, direction, NULL); 74 ops->unmap_page(dev, addr, size, dir, NULL);
116} 75}
117 76
118static inline int 77static inline int
119dma_map_sg(struct device *hwdev, struct scatterlist *sg, 78dma_map_sg(struct device *hwdev, struct scatterlist *sg,
120 int nents, int direction) 79 int nents, enum dma_data_direction dir)
121{ 80{
122 struct dma_mapping_ops *ops = get_dma_ops(hwdev); 81 struct dma_map_ops *ops = get_dma_ops(hwdev);
123 82
124 BUG_ON(!valid_dma_direction(direction)); 83 BUG_ON(!valid_dma_direction(dir));
125 return ops->map_sg(hwdev, sg, nents, direction); 84 return ops->map_sg(hwdev, sg, nents, dir, NULL);
126} 85}
127 86
128static inline void 87static inline void
129dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, 88dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
130 int direction) 89 enum dma_data_direction dir)
131{ 90{
132 struct dma_mapping_ops *ops = get_dma_ops(hwdev); 91 struct dma_map_ops *ops = get_dma_ops(hwdev);
133 92
134 BUG_ON(!valid_dma_direction(direction)); 93 BUG_ON(!valid_dma_direction(dir));
135 if (ops->unmap_sg) 94 if (ops->unmap_sg)
136 ops->unmap_sg(hwdev, sg, nents, direction); 95 ops->unmap_sg(hwdev, sg, nents, dir, NULL);
137} 96}
138 97
139static inline void 98static inline void
140dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle, 99dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
141 size_t size, int direction) 100 size_t size, enum dma_data_direction dir)
142{ 101{
143 struct dma_mapping_ops *ops = get_dma_ops(hwdev); 102 struct dma_map_ops *ops = get_dma_ops(hwdev);
144 103
145 BUG_ON(!valid_dma_direction(direction)); 104 BUG_ON(!valid_dma_direction(dir));
146 if (ops->sync_single_for_cpu) 105 if (ops->sync_single_for_cpu)
147 ops->sync_single_for_cpu(hwdev, dma_handle, size, direction); 106 ops->sync_single_for_cpu(hwdev, dma_handle, size, dir);
148 flush_write_buffers(); 107 flush_write_buffers();
149} 108}
150 109
151static inline void 110static inline void
152dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle, 111dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
153 size_t size, int direction) 112 size_t size, enum dma_data_direction dir)
154{ 113{
155 struct dma_mapping_ops *ops = get_dma_ops(hwdev); 114 struct dma_map_ops *ops = get_dma_ops(hwdev);
156 115
157 BUG_ON(!valid_dma_direction(direction)); 116 BUG_ON(!valid_dma_direction(dir));
158 if (ops->sync_single_for_device) 117 if (ops->sync_single_for_device)
159 ops->sync_single_for_device(hwdev, dma_handle, size, direction); 118 ops->sync_single_for_device(hwdev, dma_handle, size, dir);
160 flush_write_buffers(); 119 flush_write_buffers();
161} 120}
162 121
163static inline void 122static inline void
164dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle, 123dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
165 unsigned long offset, size_t size, int direction) 124 unsigned long offset, size_t size,
125 enum dma_data_direction dir)
166{ 126{
167 struct dma_mapping_ops *ops = get_dma_ops(hwdev); 127 struct dma_map_ops *ops = get_dma_ops(hwdev);
168 128
169 BUG_ON(!valid_dma_direction(direction)); 129 BUG_ON(!valid_dma_direction(dir));
170 if (ops->sync_single_range_for_cpu) 130 if (ops->sync_single_range_for_cpu)
171 ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, 131 ops->sync_single_range_for_cpu(hwdev, dma_handle, offset,
172 size, direction); 132 size, dir);
173 flush_write_buffers(); 133 flush_write_buffers();
174} 134}
175 135
176static inline void 136static inline void
177dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle, 137dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
178 unsigned long offset, size_t size, 138 unsigned long offset, size_t size,
179 int direction) 139 enum dma_data_direction dir)
180{ 140{
181 struct dma_mapping_ops *ops = get_dma_ops(hwdev); 141 struct dma_map_ops *ops = get_dma_ops(hwdev);
182 142
183 BUG_ON(!valid_dma_direction(direction)); 143 BUG_ON(!valid_dma_direction(dir));
184 if (ops->sync_single_range_for_device) 144 if (ops->sync_single_range_for_device)
185 ops->sync_single_range_for_device(hwdev, dma_handle, 145 ops->sync_single_range_for_device(hwdev, dma_handle,
186 offset, size, direction); 146 offset, size, dir);
187 flush_write_buffers(); 147 flush_write_buffers();
188} 148}
189 149
190static inline void 150static inline void
191dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, 151dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
192 int nelems, int direction) 152 int nelems, enum dma_data_direction dir)
193{ 153{
194 struct dma_mapping_ops *ops = get_dma_ops(hwdev); 154 struct dma_map_ops *ops = get_dma_ops(hwdev);
195 155
196 BUG_ON(!valid_dma_direction(direction)); 156 BUG_ON(!valid_dma_direction(dir));
197 if (ops->sync_sg_for_cpu) 157 if (ops->sync_sg_for_cpu)
198 ops->sync_sg_for_cpu(hwdev, sg, nelems, direction); 158 ops->sync_sg_for_cpu(hwdev, sg, nelems, dir);
199 flush_write_buffers(); 159 flush_write_buffers();
200} 160}
201 161
202static inline void 162static inline void
203dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, 163dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
204 int nelems, int direction) 164 int nelems, enum dma_data_direction dir)
205{ 165{
206 struct dma_mapping_ops *ops = get_dma_ops(hwdev); 166 struct dma_map_ops *ops = get_dma_ops(hwdev);
207 167
208 BUG_ON(!valid_dma_direction(direction)); 168 BUG_ON(!valid_dma_direction(dir));
209 if (ops->sync_sg_for_device) 169 if (ops->sync_sg_for_device)
210 ops->sync_sg_for_device(hwdev, sg, nelems, direction); 170 ops->sync_sg_for_device(hwdev, sg, nelems, dir);
211 171
212 flush_write_buffers(); 172 flush_write_buffers();
213} 173}
214 174
215static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, 175static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
216 size_t offset, size_t size, 176 size_t offset, size_t size,
217 int direction) 177 enum dma_data_direction dir)
218{ 178{
219 struct dma_mapping_ops *ops = get_dma_ops(dev); 179 struct dma_map_ops *ops = get_dma_ops(dev);
220 180
221 BUG_ON(!valid_dma_direction(direction)); 181 BUG_ON(!valid_dma_direction(dir));
222 return ops->map_page(dev, page, offset, size, direction, NULL); 182 return ops->map_page(dev, page, offset, size, dir, NULL);
223} 183}
224 184
225static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, 185static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
226 size_t size, int direction) 186 size_t size, enum dma_data_direction dir)
227{ 187{
228 dma_unmap_single(dev, addr, size, direction); 188 dma_unmap_single(dev, addr, size, dir);
229} 189}
230 190
231static inline void 191static inline void
@@ -271,7 +231,7 @@ static inline void *
271dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, 231dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
272 gfp_t gfp) 232 gfp_t gfp)
273{ 233{
274 struct dma_mapping_ops *ops = get_dma_ops(dev); 234 struct dma_map_ops *ops = get_dma_ops(dev);
275 void *memory; 235 void *memory;
276 236
277 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); 237 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
@@ -297,7 +257,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
297static inline void dma_free_coherent(struct device *dev, size_t size, 257static inline void dma_free_coherent(struct device *dev, size_t size,
298 void *vaddr, dma_addr_t bus) 258 void *vaddr, dma_addr_t bus)
299{ 259{
300 struct dma_mapping_ops *ops = get_dma_ops(dev); 260 struct dma_map_ops *ops = get_dma_ops(dev);
301 261
302 WARN_ON(irqs_disabled()); /* for portability */ 262 WARN_ON(irqs_disabled()); /* for portability */
303 263
diff --git a/arch/x86/include/asm/iommu.h b/arch/x86/include/asm/iommu.h
index a6ee9e6f530f..af326a2975b5 100644
--- a/arch/x86/include/asm/iommu.h
+++ b/arch/x86/include/asm/iommu.h
@@ -3,7 +3,7 @@
3 3
4extern void pci_iommu_shutdown(void); 4extern void pci_iommu_shutdown(void);
5extern void no_iommu_init(void); 5extern void no_iommu_init(void);
6extern struct dma_mapping_ops nommu_dma_ops; 6extern struct dma_map_ops nommu_dma_ops;
7extern int force_iommu, no_iommu; 7extern int force_iommu, no_iommu;
8extern int iommu_detected; 8extern int iommu_detected;
9 9
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index a5dedb690a9a..008e522b9536 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -1394,7 +1394,8 @@ static int map_sg_no_iommu(struct device *dev, struct scatterlist *sglist,
1394 * lists). 1394 * lists).
1395 */ 1395 */
1396static int map_sg(struct device *dev, struct scatterlist *sglist, 1396static int map_sg(struct device *dev, struct scatterlist *sglist,
1397 int nelems, int dir) 1397 int nelems, enum dma_data_direction dir,
1398 struct dma_attrs *attrs)
1398{ 1399{
1399 unsigned long flags; 1400 unsigned long flags;
1400 struct amd_iommu *iommu; 1401 struct amd_iommu *iommu;
@@ -1461,7 +1462,8 @@ unmap:
1461 * lists). 1462 * lists).
1462 */ 1463 */
1463static void unmap_sg(struct device *dev, struct scatterlist *sglist, 1464static void unmap_sg(struct device *dev, struct scatterlist *sglist,
1464 int nelems, int dir) 1465 int nelems, enum dma_data_direction dir,
1466 struct dma_attrs *attrs)
1465{ 1467{
1466 unsigned long flags; 1468 unsigned long flags;
1467 struct amd_iommu *iommu; 1469 struct amd_iommu *iommu;
@@ -1648,7 +1650,7 @@ static void prealloc_protection_domains(void)
1648 } 1650 }
1649} 1651}
1650 1652
1651static struct dma_mapping_ops amd_iommu_dma_ops = { 1653static struct dma_map_ops amd_iommu_dma_ops = {
1652 .alloc_coherent = alloc_coherent, 1654 .alloc_coherent = alloc_coherent,
1653 .free_coherent = free_coherent, 1655 .free_coherent = free_coherent,
1654 .map_page = map_page, 1656 .map_page = map_page,
diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
index 756138b604e1..755c21e906f3 100644
--- a/arch/x86/kernel/pci-calgary_64.c
+++ b/arch/x86/kernel/pci-calgary_64.c
@@ -380,8 +380,9 @@ static inline struct iommu_table *find_iommu_table(struct device *dev)
380 return tbl; 380 return tbl;
381} 381}
382 382
383static void calgary_unmap_sg(struct device *dev, 383static void calgary_unmap_sg(struct device *dev, struct scatterlist *sglist,
384 struct scatterlist *sglist, int nelems, int direction) 384 int nelems,enum dma_data_direction dir,
385 struct dma_attrs *attrs)
385{ 386{
386 struct iommu_table *tbl = find_iommu_table(dev); 387 struct iommu_table *tbl = find_iommu_table(dev);
387 struct scatterlist *s; 388 struct scatterlist *s;
@@ -404,7 +405,8 @@ static void calgary_unmap_sg(struct device *dev,
404} 405}
405 406
406static int calgary_map_sg(struct device *dev, struct scatterlist *sg, 407static int calgary_map_sg(struct device *dev, struct scatterlist *sg,
407 int nelems, int direction) 408 int nelems, enum dma_data_direction dir,
409 struct dma_attrs *attrs)
408{ 410{
409 struct iommu_table *tbl = find_iommu_table(dev); 411 struct iommu_table *tbl = find_iommu_table(dev);
410 struct scatterlist *s; 412 struct scatterlist *s;
@@ -429,15 +431,14 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg,
429 s->dma_address = (entry << PAGE_SHIFT) | s->offset; 431 s->dma_address = (entry << PAGE_SHIFT) | s->offset;
430 432
431 /* insert into HW table */ 433 /* insert into HW table */
432 tce_build(tbl, entry, npages, vaddr & PAGE_MASK, 434 tce_build(tbl, entry, npages, vaddr & PAGE_MASK, dir);
433 direction);
434 435
435 s->dma_length = s->length; 436 s->dma_length = s->length;
436 } 437 }
437 438
438 return nelems; 439 return nelems;
439error: 440error:
440 calgary_unmap_sg(dev, sg, nelems, direction); 441 calgary_unmap_sg(dev, sg, nelems, dir, NULL);
441 for_each_sg(sg, s, nelems, i) { 442 for_each_sg(sg, s, nelems, i) {
442 sg->dma_address = bad_dma_address; 443 sg->dma_address = bad_dma_address;
443 sg->dma_length = 0; 444 sg->dma_length = 0;
@@ -518,7 +519,7 @@ static void calgary_free_coherent(struct device *dev, size_t size,
518 free_pages((unsigned long)vaddr, get_order(size)); 519 free_pages((unsigned long)vaddr, get_order(size));
519} 520}
520 521
521static struct dma_mapping_ops calgary_dma_ops = { 522static struct dma_map_ops calgary_dma_ops = {
522 .alloc_coherent = calgary_alloc_coherent, 523 .alloc_coherent = calgary_alloc_coherent,
523 .free_coherent = calgary_free_coherent, 524 .free_coherent = calgary_free_coherent,
524 .map_sg = calgary_map_sg, 525 .map_sg = calgary_map_sg,
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index 19a1044a0cd9..0d75c129b18a 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -12,7 +12,7 @@
12 12
13static int forbid_dac __read_mostly; 13static int forbid_dac __read_mostly;
14 14
15struct dma_mapping_ops *dma_ops; 15struct dma_map_ops *dma_ops;
16EXPORT_SYMBOL(dma_ops); 16EXPORT_SYMBOL(dma_ops);
17 17
18static int iommu_sac_force __read_mostly; 18static int iommu_sac_force __read_mostly;
@@ -224,7 +224,7 @@ early_param("iommu", iommu_setup);
224 224
225int dma_supported(struct device *dev, u64 mask) 225int dma_supported(struct device *dev, u64 mask)
226{ 226{
227 struct dma_mapping_ops *ops = get_dma_ops(dev); 227 struct dma_map_ops *ops = get_dma_ops(dev);
228 228
229#ifdef CONFIG_PCI 229#ifdef CONFIG_PCI
230 if (mask > 0xffffffff && forbid_dac > 0) { 230 if (mask > 0xffffffff && forbid_dac > 0) {
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
index 9c557c0c928c..8cb3e45439cf 100644
--- a/arch/x86/kernel/pci-gart_64.c
+++ b/arch/x86/kernel/pci-gart_64.c
@@ -302,8 +302,8 @@ static void gart_unmap_page(struct device *dev, dma_addr_t dma_addr,
302/* 302/*
303 * Wrapper for pci_unmap_single working with scatterlists. 303 * Wrapper for pci_unmap_single working with scatterlists.
304 */ 304 */
305static void 305static void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
306gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir) 306 enum dma_data_direction dir, struct dma_attrs *attrs)
307{ 307{
308 struct scatterlist *s; 308 struct scatterlist *s;
309 int i; 309 int i;
@@ -333,7 +333,7 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
333 addr = dma_map_area(dev, addr, s->length, dir, 0); 333 addr = dma_map_area(dev, addr, s->length, dir, 0);
334 if (addr == bad_dma_address) { 334 if (addr == bad_dma_address) {
335 if (i > 0) 335 if (i > 0)
336 gart_unmap_sg(dev, sg, i, dir); 336 gart_unmap_sg(dev, sg, i, dir, NULL);
337 nents = 0; 337 nents = 0;
338 sg[0].dma_length = 0; 338 sg[0].dma_length = 0;
339 break; 339 break;
@@ -404,8 +404,8 @@ dma_map_cont(struct device *dev, struct scatterlist *start, int nelems,
404 * DMA map all entries in a scatterlist. 404 * DMA map all entries in a scatterlist.
405 * Merge chunks that have page aligned sizes into a continuous mapping. 405 * Merge chunks that have page aligned sizes into a continuous mapping.
406 */ 406 */
407static int 407static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents,
408gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir) 408 enum dma_data_direction dir, struct dma_attrs *attrs)
409{ 409{
410 struct scatterlist *s, *ps, *start_sg, *sgmap; 410 struct scatterlist *s, *ps, *start_sg, *sgmap;
411 int need = 0, nextneed, i, out, start; 411 int need = 0, nextneed, i, out, start;
@@ -472,7 +472,7 @@ gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
472 472
473error: 473error:
474 flush_gart(); 474 flush_gart();
475 gart_unmap_sg(dev, sg, out, dir); 475 gart_unmap_sg(dev, sg, out, dir, NULL);
476 476
477 /* When it was forced or merged try again in a dumb way */ 477 /* When it was forced or merged try again in a dumb way */
478 if (force_iommu || iommu_merge) { 478 if (force_iommu || iommu_merge) {
@@ -711,7 +711,7 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
711 return -1; 711 return -1;
712} 712}
713 713
714static struct dma_mapping_ops gart_dma_ops = { 714static struct dma_map_ops gart_dma_ops = {
715 .map_sg = gart_map_sg, 715 .map_sg = gart_map_sg,
716 .unmap_sg = gart_unmap_sg, 716 .unmap_sg = gart_unmap_sg,
717 .map_page = gart_map_page, 717 .map_page = gart_map_page,
diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c
index d42b69c90b40..fe50214db876 100644
--- a/arch/x86/kernel/pci-nommu.c
+++ b/arch/x86/kernel/pci-nommu.c
@@ -54,7 +54,8 @@ static dma_addr_t nommu_map_page(struct device *dev, struct page *page,
54 * the same here. 54 * the same here.
55 */ 55 */
56static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg, 56static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg,
57 int nents, int direction) 57 int nents, enum dma_data_direction dir,
58 struct dma_attrs *attrs)
58{ 59{
59 struct scatterlist *s; 60 struct scatterlist *s;
60 int i; 61 int i;
@@ -78,7 +79,7 @@ static void nommu_free_coherent(struct device *dev, size_t size, void *vaddr,
78 free_pages((unsigned long)vaddr, get_order(size)); 79 free_pages((unsigned long)vaddr, get_order(size));
79} 80}
80 81
81struct dma_mapping_ops nommu_dma_ops = { 82struct dma_map_ops nommu_dma_ops = {
82 .alloc_coherent = dma_generic_alloc_coherent, 83 .alloc_coherent = dma_generic_alloc_coherent,
83 .free_coherent = nommu_free_coherent, 84 .free_coherent = nommu_free_coherent,
84 .map_sg = nommu_map_sg, 85 .map_sg = nommu_map_sg,
diff --git a/arch/x86/kernel/pci-swiotlb_64.c b/arch/x86/kernel/pci-swiotlb_64.c
index 3ae354c0fdef..3f0d9924dd1c 100644
--- a/arch/x86/kernel/pci-swiotlb_64.c
+++ b/arch/x86/kernel/pci-swiotlb_64.c
@@ -67,7 +67,7 @@ static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
67 return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags); 67 return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags);
68} 68}
69 69
70struct dma_mapping_ops swiotlb_dma_ops = { 70struct dma_map_ops swiotlb_dma_ops = {
71 .mapping_error = swiotlb_dma_mapping_error, 71 .mapping_error = swiotlb_dma_mapping_error,
72 .alloc_coherent = x86_swiotlb_alloc_coherent, 72 .alloc_coherent = x86_swiotlb_alloc_coherent,
73 .free_coherent = swiotlb_free_coherent, 73 .free_coherent = swiotlb_free_coherent,
@@ -77,8 +77,8 @@ struct dma_mapping_ops swiotlb_dma_ops = {
77 .sync_single_range_for_device = swiotlb_sync_single_range_for_device, 77 .sync_single_range_for_device = swiotlb_sync_single_range_for_device,
78 .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, 78 .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
79 .sync_sg_for_device = swiotlb_sync_sg_for_device, 79 .sync_sg_for_device = swiotlb_sync_sg_for_device,
80 .map_sg = swiotlb_map_sg, 80 .map_sg = swiotlb_map_sg_attrs,
81 .unmap_sg = swiotlb_unmap_sg, 81 .unmap_sg = swiotlb_unmap_sg_attrs,
82 .map_page = swiotlb_map_page, 82 .map_page = swiotlb_map_page,
83 .unmap_page = swiotlb_unmap_page, 83 .unmap_page = swiotlb_unmap_page,
84 .dma_supported = NULL, 84 .dma_supported = NULL,
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index da273e4ef66c..b9a562933903 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -2441,7 +2441,8 @@ void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
2441#define SG_ENT_VIRT_ADDRESS(sg) (sg_virt((sg))) 2441#define SG_ENT_VIRT_ADDRESS(sg) (sg_virt((sg)))
2442 2442
2443void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, 2443void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2444 int nelems, int dir) 2444 int nelems, enum dma_data_direction dir,
2445 struct dma_attrs *attrs)
2445{ 2446{
2446 int i; 2447 int i;
2447 struct pci_dev *pdev = to_pci_dev(hwdev); 2448 struct pci_dev *pdev = to_pci_dev(hwdev);
@@ -2499,7 +2500,7 @@ static int intel_nontranslate_map_sg(struct device *hddev,
2499} 2500}
2500 2501
2501int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, 2502int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
2502 int dir) 2503 enum dma_data_direction dir, struct dma_attrs *attrs)
2503{ 2504{
2504 void *addr; 2505 void *addr;
2505 int i; 2506 int i;
@@ -2579,15 +2580,13 @@ int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
2579 return nelems; 2580 return nelems;
2580} 2581}
2581 2582
2582static struct dma_mapping_ops intel_dma_ops = { 2583struct dma_map_ops intel_dma_ops = {
2583 .alloc_coherent = intel_alloc_coherent, 2584 .alloc_coherent = intel_alloc_coherent,
2584 .free_coherent = intel_free_coherent, 2585 .free_coherent = intel_free_coherent,
2585 .map_sg = intel_map_sg, 2586 .map_sg = intel_map_sg,
2586 .unmap_sg = intel_unmap_sg, 2587 .unmap_sg = intel_unmap_sg,
2587#ifdef CONFIG_X86_64
2588 .map_page = intel_map_page, 2588 .map_page = intel_map_page,
2589 .unmap_page = intel_unmap_page, 2589 .unmap_page = intel_unmap_page,
2590#endif
2591}; 2590};
2592 2591
2593static inline int iommu_domain_cache_init(void) 2592static inline int iommu_domain_cache_init(void)
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index c4f6c101dbcd..a254db1decd0 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -334,7 +334,9 @@ extern void *intel_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t);
334extern void intel_free_coherent(struct device *, size_t, void *, dma_addr_t); 334extern void intel_free_coherent(struct device *, size_t, void *, dma_addr_t);
335extern dma_addr_t intel_map_single(struct device *, phys_addr_t, size_t, int); 335extern dma_addr_t intel_map_single(struct device *, phys_addr_t, size_t, int);
336extern void intel_unmap_single(struct device *, dma_addr_t, size_t, int); 336extern void intel_unmap_single(struct device *, dma_addr_t, size_t, int);
337extern int intel_map_sg(struct device *, struct scatterlist *, int, int); 337extern int intel_map_sg(struct device *, struct scatterlist *, int,
338extern void intel_unmap_sg(struct device *, struct scatterlist *, int, int); 338 enum dma_data_direction, struct dma_attrs *);
339extern void intel_unmap_sg(struct device *, struct scatterlist *, int,
340 enum dma_data_direction, struct dma_attrs *);
339 341
340#endif 342#endif
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index dedd3c0cfe30..0567c3d8633b 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -66,36 +66,38 @@ swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
66 66
67extern int 67extern int
68swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, 68swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
69 int dir, struct dma_attrs *attrs); 69 enum dma_data_direction dir, struct dma_attrs *attrs);
70 70
71extern void 71extern void
72swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, 72swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
73 int nelems, int dir, struct dma_attrs *attrs); 73 int nelems, enum dma_data_direction dir,
74 struct dma_attrs *attrs);
74 75
75extern void 76extern void
76swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr, 77swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
77 size_t size, int dir); 78 size_t size, enum dma_data_direction dir);
78 79
79extern void 80extern void
80swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, 81swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
81 int nelems, int dir); 82 int nelems, enum dma_data_direction dir);
82 83
83extern void 84extern void
84swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr, 85swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
85 size_t size, int dir); 86 size_t size, enum dma_data_direction dir);
86 87
87extern void 88extern void
88swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, 89swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
89 int nelems, int dir); 90 int nelems, enum dma_data_direction dir);
90 91
91extern void 92extern void
92swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr, 93swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
93 unsigned long offset, size_t size, int dir); 94 unsigned long offset, size_t size,
95 enum dma_data_direction dir);
94 96
95extern void 97extern void
96swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr, 98swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr,
97 unsigned long offset, size_t size, 99 unsigned long offset, size_t size,
98 int dir); 100 enum dma_data_direction dir);
99 101
100extern int 102extern int
101swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr); 103swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr);
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 48deef7e1976..d047de990a3f 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -736,7 +736,7 @@ swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
736 736
737void 737void
738swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr, 738swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
739 size_t size, int dir) 739 size_t size, enum dma_data_direction dir)
740{ 740{
741 swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU); 741 swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU);
742} 742}
@@ -744,7 +744,7 @@ EXPORT_SYMBOL(swiotlb_sync_single_for_cpu);
744 744
745void 745void
746swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr, 746swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
747 size_t size, int dir) 747 size_t size, enum dma_data_direction dir)
748{ 748{
749 swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE); 749 swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE);
750} 750}
@@ -769,7 +769,8 @@ swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr,
769 769
770void 770void
771swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr, 771swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
772 unsigned long offset, size_t size, int dir) 772 unsigned long offset, size_t size,
773 enum dma_data_direction dir)
773{ 774{
774 swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir, 775 swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir,
775 SYNC_FOR_CPU); 776 SYNC_FOR_CPU);
@@ -778,7 +779,8 @@ EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_cpu);
778 779
779void 780void
780swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr, 781swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr,
781 unsigned long offset, size_t size, int dir) 782 unsigned long offset, size_t size,
783 enum dma_data_direction dir)
782{ 784{
783 swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir, 785 swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir,
784 SYNC_FOR_DEVICE); 786 SYNC_FOR_DEVICE);
@@ -803,7 +805,7 @@ EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_device);
803 */ 805 */
804int 806int
805swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, 807swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
806 int dir, struct dma_attrs *attrs) 808 enum dma_data_direction dir, struct dma_attrs *attrs)
807{ 809{
808 struct scatterlist *sg; 810 struct scatterlist *sg;
809 int i; 811 int i;
@@ -850,7 +852,7 @@ EXPORT_SYMBOL(swiotlb_map_sg);
850 */ 852 */
851void 853void
852swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, 854swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
853 int nelems, int dir, struct dma_attrs *attrs) 855 int nelems, enum dma_data_direction dir, struct dma_attrs *attrs)
854{ 856{
855 struct scatterlist *sg; 857 struct scatterlist *sg;
856 int i; 858 int i;
@@ -902,7 +904,7 @@ swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
902 904
903void 905void
904swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, 906swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
905 int nelems, int dir) 907 int nelems, enum dma_data_direction dir)
906{ 908{
907 swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU); 909 swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU);
908} 910}
@@ -910,7 +912,7 @@ EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu);
910 912
911void 913void
912swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, 914swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
913 int nelems, int dir) 915 int nelems, enum dma_data_direction dir)
914{ 916{
915 swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE); 917 swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
916} 918}