aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/ia64/dig/Makefile4
-rw-r--r--arch/ia64/dig/dig_vtd_iommu.c59
-rw-r--r--arch/ia64/hp/common/hwsw_iommu.c165
-rw-r--r--arch/ia64/hp/common/sba_iommu.c79
-rw-r--r--arch/ia64/include/asm/dma-mapping.h194
-rw-r--r--arch/ia64/include/asm/machvec.h102
-rw-r--r--arch/ia64/include/asm/machvec_dig_vtd.h20
-rw-r--r--arch/ia64/include/asm/machvec_hpzx1.h23
-rw-r--r--arch/ia64/include/asm/machvec_hpzx1_swiotlb.h27
-rw-r--r--arch/ia64/include/asm/machvec_sn2.h27
-rw-r--r--arch/ia64/kernel/Makefile4
-rw-r--r--arch/ia64/kernel/dma-mapping.c13
-rw-r--r--arch/ia64/kernel/machvec.c8
-rw-r--r--arch/ia64/kernel/pci-dma.c46
-rw-r--r--arch/ia64/kernel/pci-swiotlb.c30
-rw-r--r--arch/ia64/sn/pci/pci_dma.c99
-rw-r--r--arch/x86/include/asm/device.h2
-rw-r--r--arch/x86/include/asm/dma-mapping.h149
-rw-r--r--arch/x86/include/asm/iommu.h2
-rw-r--r--arch/x86/kernel/Makefile2
-rw-r--r--arch/x86/kernel/amd_iommu.c26
-rw-r--r--arch/x86/kernel/pci-calgary_64.c38
-rw-r--r--arch/x86/kernel/pci-dma.c4
-rw-r--r--arch/x86/kernel/pci-gart_64.c34
-rw-r--r--arch/x86/kernel/pci-nommu.c21
-rw-r--r--arch/x86/kernel/pci-swiotlb.c (renamed from arch/x86/kernel/pci-swiotlb_64.c)19
26 files changed, 453 insertions, 744 deletions
diff --git a/arch/ia64/dig/Makefile b/arch/ia64/dig/Makefile
index 5c0283830bd6..2f7caddf093e 100644
--- a/arch/ia64/dig/Makefile
+++ b/arch/ia64/dig/Makefile
@@ -7,8 +7,8 @@
7 7
8obj-y := setup.o 8obj-y := setup.o
9ifeq ($(CONFIG_DMAR), y) 9ifeq ($(CONFIG_DMAR), y)
10obj-$(CONFIG_IA64_GENERIC) += machvec.o machvec_vtd.o dig_vtd_iommu.o 10obj-$(CONFIG_IA64_GENERIC) += machvec.o machvec_vtd.o
11else 11else
12obj-$(CONFIG_IA64_GENERIC) += machvec.o 12obj-$(CONFIG_IA64_GENERIC) += machvec.o
13endif 13endif
14obj-$(CONFIG_IA64_DIG_VTD) += dig_vtd_iommu.o 14
diff --git a/arch/ia64/dig/dig_vtd_iommu.c b/arch/ia64/dig/dig_vtd_iommu.c
deleted file mode 100644
index 1c8a079017a3..000000000000
--- a/arch/ia64/dig/dig_vtd_iommu.c
+++ /dev/null
@@ -1,59 +0,0 @@
1#include <linux/types.h>
2#include <linux/kernel.h>
3#include <linux/module.h>
4#include <linux/intel-iommu.h>
5
6void *
7vtd_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
8 gfp_t flags)
9{
10 return intel_alloc_coherent(dev, size, dma_handle, flags);
11}
12EXPORT_SYMBOL_GPL(vtd_alloc_coherent);
13
14void
15vtd_free_coherent(struct device *dev, size_t size, void *vaddr,
16 dma_addr_t dma_handle)
17{
18 intel_free_coherent(dev, size, vaddr, dma_handle);
19}
20EXPORT_SYMBOL_GPL(vtd_free_coherent);
21
22dma_addr_t
23vtd_map_single_attrs(struct device *dev, void *addr, size_t size,
24 int dir, struct dma_attrs *attrs)
25{
26 return intel_map_single(dev, (phys_addr_t)addr, size, dir);
27}
28EXPORT_SYMBOL_GPL(vtd_map_single_attrs);
29
30void
31vtd_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size,
32 int dir, struct dma_attrs *attrs)
33{
34 intel_unmap_single(dev, iova, size, dir);
35}
36EXPORT_SYMBOL_GPL(vtd_unmap_single_attrs);
37
38int
39vtd_map_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents,
40 int dir, struct dma_attrs *attrs)
41{
42 return intel_map_sg(dev, sglist, nents, dir);
43}
44EXPORT_SYMBOL_GPL(vtd_map_sg_attrs);
45
46void
47vtd_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
48 int nents, int dir, struct dma_attrs *attrs)
49{
50 intel_unmap_sg(dev, sglist, nents, dir);
51}
52EXPORT_SYMBOL_GPL(vtd_unmap_sg_attrs);
53
54int
55vtd_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
56{
57 return 0;
58}
59EXPORT_SYMBOL_GPL(vtd_dma_mapping_error);
diff --git a/arch/ia64/hp/common/hwsw_iommu.c b/arch/ia64/hp/common/hwsw_iommu.c
index 2769dbfd03bf..e4a80d82e3d8 100644
--- a/arch/ia64/hp/common/hwsw_iommu.c
+++ b/arch/ia64/hp/common/hwsw_iommu.c
@@ -13,49 +13,34 @@
13 */ 13 */
14 14
15#include <linux/device.h> 15#include <linux/device.h>
16#include <linux/dma-mapping.h>
16#include <linux/swiotlb.h> 17#include <linux/swiotlb.h>
17
18#include <asm/machvec.h> 18#include <asm/machvec.h>
19 19
20extern struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
21
20/* swiotlb declarations & definitions: */ 22/* swiotlb declarations & definitions: */
21extern int swiotlb_late_init_with_default_size (size_t size); 23extern int swiotlb_late_init_with_default_size (size_t size);
22 24
23/* hwiommu declarations & definitions: */
24
25extern ia64_mv_dma_alloc_coherent sba_alloc_coherent;
26extern ia64_mv_dma_free_coherent sba_free_coherent;
27extern ia64_mv_dma_map_single_attrs sba_map_single_attrs;
28extern ia64_mv_dma_unmap_single_attrs sba_unmap_single_attrs;
29extern ia64_mv_dma_map_sg_attrs sba_map_sg_attrs;
30extern ia64_mv_dma_unmap_sg_attrs sba_unmap_sg_attrs;
31extern ia64_mv_dma_supported sba_dma_supported;
32extern ia64_mv_dma_mapping_error sba_dma_mapping_error;
33
34#define hwiommu_alloc_coherent sba_alloc_coherent
35#define hwiommu_free_coherent sba_free_coherent
36#define hwiommu_map_single_attrs sba_map_single_attrs
37#define hwiommu_unmap_single_attrs sba_unmap_single_attrs
38#define hwiommu_map_sg_attrs sba_map_sg_attrs
39#define hwiommu_unmap_sg_attrs sba_unmap_sg_attrs
40#define hwiommu_dma_supported sba_dma_supported
41#define hwiommu_dma_mapping_error sba_dma_mapping_error
42#define hwiommu_sync_single_for_cpu machvec_dma_sync_single
43#define hwiommu_sync_sg_for_cpu machvec_dma_sync_sg
44#define hwiommu_sync_single_for_device machvec_dma_sync_single
45#define hwiommu_sync_sg_for_device machvec_dma_sync_sg
46
47
48/* 25/*
49 * Note: we need to make the determination of whether or not to use 26 * Note: we need to make the determination of whether or not to use
50 * the sw I/O TLB based purely on the device structure. Anything else 27 * the sw I/O TLB based purely on the device structure. Anything else
51 * would be unreliable or would be too intrusive. 28 * would be unreliable or would be too intrusive.
52 */ 29 */
53static inline int 30static inline int use_swiotlb(struct device *dev)
54use_swiotlb (struct device *dev)
55{ 31{
56 return dev && dev->dma_mask && !hwiommu_dma_supported(dev, *dev->dma_mask); 32 return dev && dev->dma_mask &&
33 !sba_dma_ops.dma_supported(dev, *dev->dma_mask);
57} 34}
58 35
36struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
37{
38 if (use_swiotlb(dev))
39 return &swiotlb_dma_ops;
40 return &sba_dma_ops;
41}
42EXPORT_SYMBOL(hwsw_dma_get_ops);
43
59void __init 44void __init
60hwsw_init (void) 45hwsw_init (void)
61{ 46{
@@ -71,125 +56,3 @@ hwsw_init (void)
71#endif 56#endif
72 } 57 }
73} 58}
74
75void *
76hwsw_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags)
77{
78 if (use_swiotlb(dev))
79 return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
80 else
81 return hwiommu_alloc_coherent(dev, size, dma_handle, flags);
82}
83
84void
85hwsw_free_coherent (struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle)
86{
87 if (use_swiotlb(dev))
88 swiotlb_free_coherent(dev, size, vaddr, dma_handle);
89 else
90 hwiommu_free_coherent(dev, size, vaddr, dma_handle);
91}
92
93dma_addr_t
94hwsw_map_single_attrs(struct device *dev, void *addr, size_t size, int dir,
95 struct dma_attrs *attrs)
96{
97 if (use_swiotlb(dev))
98 return swiotlb_map_single_attrs(dev, addr, size, dir, attrs);
99 else
100 return hwiommu_map_single_attrs(dev, addr, size, dir, attrs);
101}
102EXPORT_SYMBOL(hwsw_map_single_attrs);
103
104void
105hwsw_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size,
106 int dir, struct dma_attrs *attrs)
107{
108 if (use_swiotlb(dev))
109 return swiotlb_unmap_single_attrs(dev, iova, size, dir, attrs);
110 else
111 return hwiommu_unmap_single_attrs(dev, iova, size, dir, attrs);
112}
113EXPORT_SYMBOL(hwsw_unmap_single_attrs);
114
115int
116hwsw_map_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents,
117 int dir, struct dma_attrs *attrs)
118{
119 if (use_swiotlb(dev))
120 return swiotlb_map_sg_attrs(dev, sglist, nents, dir, attrs);
121 else
122 return hwiommu_map_sg_attrs(dev, sglist, nents, dir, attrs);
123}
124EXPORT_SYMBOL(hwsw_map_sg_attrs);
125
126void
127hwsw_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents,
128 int dir, struct dma_attrs *attrs)
129{
130 if (use_swiotlb(dev))
131 return swiotlb_unmap_sg_attrs(dev, sglist, nents, dir, attrs);
132 else
133 return hwiommu_unmap_sg_attrs(dev, sglist, nents, dir, attrs);
134}
135EXPORT_SYMBOL(hwsw_unmap_sg_attrs);
136
137void
138hwsw_sync_single_for_cpu (struct device *dev, dma_addr_t addr, size_t size, int dir)
139{
140 if (use_swiotlb(dev))
141 swiotlb_sync_single_for_cpu(dev, addr, size, dir);
142 else
143 hwiommu_sync_single_for_cpu(dev, addr, size, dir);
144}
145
146void
147hwsw_sync_sg_for_cpu (struct device *dev, struct scatterlist *sg, int nelems, int dir)
148{
149 if (use_swiotlb(dev))
150 swiotlb_sync_sg_for_cpu(dev, sg, nelems, dir);
151 else
152 hwiommu_sync_sg_for_cpu(dev, sg, nelems, dir);
153}
154
155void
156hwsw_sync_single_for_device (struct device *dev, dma_addr_t addr, size_t size, int dir)
157{
158 if (use_swiotlb(dev))
159 swiotlb_sync_single_for_device(dev, addr, size, dir);
160 else
161 hwiommu_sync_single_for_device(dev, addr, size, dir);
162}
163
164void
165hwsw_sync_sg_for_device (struct device *dev, struct scatterlist *sg, int nelems, int dir)
166{
167 if (use_swiotlb(dev))
168 swiotlb_sync_sg_for_device(dev, sg, nelems, dir);
169 else
170 hwiommu_sync_sg_for_device(dev, sg, nelems, dir);
171}
172
173int
174hwsw_dma_supported (struct device *dev, u64 mask)
175{
176 if (hwiommu_dma_supported(dev, mask))
177 return 1;
178 return swiotlb_dma_supported(dev, mask);
179}
180
181int
182hwsw_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
183{
184 return hwiommu_dma_mapping_error(dev, dma_addr) ||
185 swiotlb_dma_mapping_error(dev, dma_addr);
186}
187
188EXPORT_SYMBOL(hwsw_dma_mapping_error);
189EXPORT_SYMBOL(hwsw_dma_supported);
190EXPORT_SYMBOL(hwsw_alloc_coherent);
191EXPORT_SYMBOL(hwsw_free_coherent);
192EXPORT_SYMBOL(hwsw_sync_single_for_cpu);
193EXPORT_SYMBOL(hwsw_sync_single_for_device);
194EXPORT_SYMBOL(hwsw_sync_sg_for_cpu);
195EXPORT_SYMBOL(hwsw_sync_sg_for_device);
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
index 6d5e6c5630e3..56ceb68eb99d 100644
--- a/arch/ia64/hp/common/sba_iommu.c
+++ b/arch/ia64/hp/common/sba_iommu.c
@@ -36,6 +36,7 @@
36#include <linux/bitops.h> /* hweight64() */ 36#include <linux/bitops.h> /* hweight64() */
37#include <linux/crash_dump.h> 37#include <linux/crash_dump.h>
38#include <linux/iommu-helper.h> 38#include <linux/iommu-helper.h>
39#include <linux/dma-mapping.h>
39 40
40#include <asm/delay.h> /* ia64_get_itc() */ 41#include <asm/delay.h> /* ia64_get_itc() */
41#include <asm/io.h> 42#include <asm/io.h>
@@ -908,11 +909,13 @@ sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
908 * 909 *
909 * See Documentation/PCI/PCI-DMA-mapping.txt 910 * See Documentation/PCI/PCI-DMA-mapping.txt
910 */ 911 */
911dma_addr_t 912static dma_addr_t sba_map_page(struct device *dev, struct page *page,
912sba_map_single_attrs(struct device *dev, void *addr, size_t size, int dir, 913 unsigned long poff, size_t size,
913 struct dma_attrs *attrs) 914 enum dma_data_direction dir,
915 struct dma_attrs *attrs)
914{ 916{
915 struct ioc *ioc; 917 struct ioc *ioc;
918 void *addr = page_address(page) + poff;
916 dma_addr_t iovp; 919 dma_addr_t iovp;
917 dma_addr_t offset; 920 dma_addr_t offset;
918 u64 *pdir_start; 921 u64 *pdir_start;
@@ -990,7 +993,14 @@ sba_map_single_attrs(struct device *dev, void *addr, size_t size, int dir,
990#endif 993#endif
991 return SBA_IOVA(ioc, iovp, offset); 994 return SBA_IOVA(ioc, iovp, offset);
992} 995}
993EXPORT_SYMBOL(sba_map_single_attrs); 996
997static dma_addr_t sba_map_single_attrs(struct device *dev, void *addr,
998 size_t size, enum dma_data_direction dir,
999 struct dma_attrs *attrs)
1000{
1001 return sba_map_page(dev, virt_to_page(addr),
1002 (unsigned long)addr & ~PAGE_MASK, size, dir, attrs);
1003}
994 1004
995#ifdef ENABLE_MARK_CLEAN 1005#ifdef ENABLE_MARK_CLEAN
996static SBA_INLINE void 1006static SBA_INLINE void
@@ -1026,8 +1036,8 @@ sba_mark_clean(struct ioc *ioc, dma_addr_t iova, size_t size)
1026 * 1036 *
1027 * See Documentation/PCI/PCI-DMA-mapping.txt 1037 * See Documentation/PCI/PCI-DMA-mapping.txt
1028 */ 1038 */
1029void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size, 1039static void sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
1030 int dir, struct dma_attrs *attrs) 1040 enum dma_data_direction dir, struct dma_attrs *attrs)
1031{ 1041{
1032 struct ioc *ioc; 1042 struct ioc *ioc;
1033#if DELAYED_RESOURCE_CNT > 0 1043#if DELAYED_RESOURCE_CNT > 0
@@ -1094,7 +1104,12 @@ void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size,
1094 spin_unlock_irqrestore(&ioc->res_lock, flags); 1104 spin_unlock_irqrestore(&ioc->res_lock, flags);
1095#endif /* DELAYED_RESOURCE_CNT == 0 */ 1105#endif /* DELAYED_RESOURCE_CNT == 0 */
1096} 1106}
1097EXPORT_SYMBOL(sba_unmap_single_attrs); 1107
1108void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size,
1109 enum dma_data_direction dir, struct dma_attrs *attrs)
1110{
1111 sba_unmap_page(dev, iova, size, dir, attrs);
1112}
1098 1113
1099/** 1114/**
1100 * sba_alloc_coherent - allocate/map shared mem for DMA 1115 * sba_alloc_coherent - allocate/map shared mem for DMA
@@ -1104,7 +1119,7 @@ EXPORT_SYMBOL(sba_unmap_single_attrs);
1104 * 1119 *
1105 * See Documentation/PCI/PCI-DMA-mapping.txt 1120 * See Documentation/PCI/PCI-DMA-mapping.txt
1106 */ 1121 */
1107void * 1122static void *
1108sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags) 1123sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags)
1109{ 1124{
1110 struct ioc *ioc; 1125 struct ioc *ioc;
@@ -1167,7 +1182,8 @@ sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp
1167 * 1182 *
1168 * See Documentation/PCI/PCI-DMA-mapping.txt 1183 * See Documentation/PCI/PCI-DMA-mapping.txt
1169 */ 1184 */
1170void sba_free_coherent (struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle) 1185static void sba_free_coherent (struct device *dev, size_t size, void *vaddr,
1186 dma_addr_t dma_handle)
1171{ 1187{
1172 sba_unmap_single_attrs(dev, dma_handle, size, 0, NULL); 1188 sba_unmap_single_attrs(dev, dma_handle, size, 0, NULL);
1173 free_pages((unsigned long) vaddr, get_order(size)); 1189 free_pages((unsigned long) vaddr, get_order(size));
@@ -1422,8 +1438,9 @@ sba_coalesce_chunks(struct ioc *ioc, struct device *dev,
1422 * 1438 *
1423 * See Documentation/PCI/PCI-DMA-mapping.txt 1439 * See Documentation/PCI/PCI-DMA-mapping.txt
1424 */ 1440 */
1425int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents, 1441static int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist,
1426 int dir, struct dma_attrs *attrs) 1442 int nents, enum dma_data_direction dir,
1443 struct dma_attrs *attrs)
1427{ 1444{
1428 struct ioc *ioc; 1445 struct ioc *ioc;
1429 int coalesced, filled = 0; 1446 int coalesced, filled = 0;
@@ -1502,7 +1519,6 @@ int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents,
1502 1519
1503 return filled; 1520 return filled;
1504} 1521}
1505EXPORT_SYMBOL(sba_map_sg_attrs);
1506 1522
1507/** 1523/**
1508 * sba_unmap_sg_attrs - unmap Scatter/Gather list 1524 * sba_unmap_sg_attrs - unmap Scatter/Gather list
@@ -1514,8 +1530,9 @@ EXPORT_SYMBOL(sba_map_sg_attrs);
1514 * 1530 *
1515 * See Documentation/PCI/PCI-DMA-mapping.txt 1531 * See Documentation/PCI/PCI-DMA-mapping.txt
1516 */ 1532 */
1517void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist, 1533static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
1518 int nents, int dir, struct dma_attrs *attrs) 1534 int nents, enum dma_data_direction dir,
1535 struct dma_attrs *attrs)
1519{ 1536{
1520#ifdef ASSERT_PDIR_SANITY 1537#ifdef ASSERT_PDIR_SANITY
1521 struct ioc *ioc; 1538 struct ioc *ioc;
@@ -1551,7 +1568,6 @@ void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
1551#endif 1568#endif
1552 1569
1553} 1570}
1554EXPORT_SYMBOL(sba_unmap_sg_attrs);
1555 1571
1556/************************************************************** 1572/**************************************************************
1557* 1573*
@@ -2064,6 +2080,8 @@ static struct acpi_driver acpi_sba_ioc_driver = {
2064 }, 2080 },
2065}; 2081};
2066 2082
2083extern struct dma_map_ops swiotlb_dma_ops;
2084
2067static int __init 2085static int __init
2068sba_init(void) 2086sba_init(void)
2069{ 2087{
@@ -2077,6 +2095,7 @@ sba_init(void)
2077 * a successful kdump kernel boot is to use the swiotlb. 2095 * a successful kdump kernel boot is to use the swiotlb.
2078 */ 2096 */
2079 if (is_kdump_kernel()) { 2097 if (is_kdump_kernel()) {
2098 dma_ops = &swiotlb_dma_ops;
2080 if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0) 2099 if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0)
2081 panic("Unable to initialize software I/O TLB:" 2100 panic("Unable to initialize software I/O TLB:"
2082 " Try machvec=dig boot option"); 2101 " Try machvec=dig boot option");
@@ -2092,6 +2111,7 @@ sba_init(void)
2092 * If we didn't find something sba_iommu can claim, we 2111 * If we didn't find something sba_iommu can claim, we
2093 * need to setup the swiotlb and switch to the dig machvec. 2112 * need to setup the swiotlb and switch to the dig machvec.
2094 */ 2113 */
2114 dma_ops = &swiotlb_dma_ops;
2095 if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0) 2115 if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0)
2096 panic("Unable to find SBA IOMMU or initialize " 2116 panic("Unable to find SBA IOMMU or initialize "
2097 "software I/O TLB: Try machvec=dig boot option"); 2117 "software I/O TLB: Try machvec=dig boot option");
@@ -2138,15 +2158,13 @@ nosbagart(char *str)
2138 return 1; 2158 return 1;
2139} 2159}
2140 2160
2141int 2161static int sba_dma_supported (struct device *dev, u64 mask)
2142sba_dma_supported (struct device *dev, u64 mask)
2143{ 2162{
2144 /* make sure it's at least 32bit capable */ 2163 /* make sure it's at least 32bit capable */
2145 return ((mask & 0xFFFFFFFFUL) == 0xFFFFFFFFUL); 2164 return ((mask & 0xFFFFFFFFUL) == 0xFFFFFFFFUL);
2146} 2165}
2147 2166
2148int 2167static int sba_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
2149sba_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
2150{ 2168{
2151 return 0; 2169 return 0;
2152} 2170}
@@ -2176,7 +2194,22 @@ sba_page_override(char *str)
2176 2194
2177__setup("sbapagesize=",sba_page_override); 2195__setup("sbapagesize=",sba_page_override);
2178 2196
2179EXPORT_SYMBOL(sba_dma_mapping_error); 2197struct dma_map_ops sba_dma_ops = {
2180EXPORT_SYMBOL(sba_dma_supported); 2198 .alloc_coherent = sba_alloc_coherent,
2181EXPORT_SYMBOL(sba_alloc_coherent); 2199 .free_coherent = sba_free_coherent,
2182EXPORT_SYMBOL(sba_free_coherent); 2200 .map_page = sba_map_page,
2201 .unmap_page = sba_unmap_page,
2202 .map_sg = sba_map_sg_attrs,
2203 .unmap_sg = sba_unmap_sg_attrs,
2204 .sync_single_for_cpu = machvec_dma_sync_single,
2205 .sync_sg_for_cpu = machvec_dma_sync_sg,
2206 .sync_single_for_device = machvec_dma_sync_single,
2207 .sync_sg_for_device = machvec_dma_sync_sg,
2208 .dma_supported = sba_dma_supported,
2209 .mapping_error = sba_dma_mapping_error,
2210};
2211
2212void sba_dma_init(void)
2213{
2214 dma_ops = &sba_dma_ops;
2215}
diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h
index 1f912d927585..36c0009dbece 100644
--- a/arch/ia64/include/asm/dma-mapping.h
+++ b/arch/ia64/include/asm/dma-mapping.h
@@ -11,99 +11,128 @@
11 11
12#define ARCH_HAS_DMA_GET_REQUIRED_MASK 12#define ARCH_HAS_DMA_GET_REQUIRED_MASK
13 13
14struct dma_mapping_ops { 14extern struct dma_map_ops *dma_ops;
15 int (*mapping_error)(struct device *dev,
16 dma_addr_t dma_addr);
17 void* (*alloc_coherent)(struct device *dev, size_t size,
18 dma_addr_t *dma_handle, gfp_t gfp);
19 void (*free_coherent)(struct device *dev, size_t size,
20 void *vaddr, dma_addr_t dma_handle);
21 dma_addr_t (*map_single)(struct device *hwdev, unsigned long ptr,
22 size_t size, int direction);
23 void (*unmap_single)(struct device *dev, dma_addr_t addr,
24 size_t size, int direction);
25 void (*sync_single_for_cpu)(struct device *hwdev,
26 dma_addr_t dma_handle, size_t size,
27 int direction);
28 void (*sync_single_for_device)(struct device *hwdev,
29 dma_addr_t dma_handle, size_t size,
30 int direction);
31 void (*sync_single_range_for_cpu)(struct device *hwdev,
32 dma_addr_t dma_handle, unsigned long offset,
33 size_t size, int direction);
34 void (*sync_single_range_for_device)(struct device *hwdev,
35 dma_addr_t dma_handle, unsigned long offset,
36 size_t size, int direction);
37 void (*sync_sg_for_cpu)(struct device *hwdev,
38 struct scatterlist *sg, int nelems,
39 int direction);
40 void (*sync_sg_for_device)(struct device *hwdev,
41 struct scatterlist *sg, int nelems,
42 int direction);
43 int (*map_sg)(struct device *hwdev, struct scatterlist *sg,
44 int nents, int direction);
45 void (*unmap_sg)(struct device *hwdev,
46 struct scatterlist *sg, int nents,
47 int direction);
48 int (*dma_supported_op)(struct device *hwdev, u64 mask);
49 int is_phys;
50};
51
52extern struct dma_mapping_ops *dma_ops;
53extern struct ia64_machine_vector ia64_mv; 15extern struct ia64_machine_vector ia64_mv;
54extern void set_iommu_machvec(void); 16extern void set_iommu_machvec(void);
55 17
56#define dma_alloc_coherent(dev, size, handle, gfp) \ 18extern void machvec_dma_sync_single(struct device *, dma_addr_t, size_t,
57 platform_dma_alloc_coherent(dev, size, handle, (gfp) | GFP_DMA) 19 enum dma_data_direction);
20extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int,
21 enum dma_data_direction);
58 22
59/* coherent mem. is cheap */ 23static inline void *dma_alloc_coherent(struct device *dev, size_t size,
60static inline void * 24 dma_addr_t *daddr, gfp_t gfp)
61dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
62 gfp_t flag)
63{ 25{
64 return dma_alloc_coherent(dev, size, dma_handle, flag); 26 struct dma_map_ops *ops = platform_dma_get_ops(dev);
27 return ops->alloc_coherent(dev, size, daddr, gfp);
65} 28}
66#define dma_free_coherent platform_dma_free_coherent 29
67static inline void 30static inline void dma_free_coherent(struct device *dev, size_t size,
68dma_free_noncoherent(struct device *dev, size_t size, void *cpu_addr, 31 void *caddr, dma_addr_t daddr)
69 dma_addr_t dma_handle) 32{
33 struct dma_map_ops *ops = platform_dma_get_ops(dev);
34 ops->free_coherent(dev, size, caddr, daddr);
35}
36
37#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
38#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
39
40static inline dma_addr_t dma_map_single_attrs(struct device *dev,
41 void *caddr, size_t size,
42 enum dma_data_direction dir,
43 struct dma_attrs *attrs)
44{
45 struct dma_map_ops *ops = platform_dma_get_ops(dev);
46 return ops->map_page(dev, virt_to_page(caddr),
47 (unsigned long)caddr & ~PAGE_MASK, size,
48 dir, attrs);
49}
50
51static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t daddr,
52 size_t size,
53 enum dma_data_direction dir,
54 struct dma_attrs *attrs)
55{
56 struct dma_map_ops *ops = platform_dma_get_ops(dev);
57 ops->unmap_page(dev, daddr, size, dir, attrs);
58}
59
60#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL)
61#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL)
62
63static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
64 int nents, enum dma_data_direction dir,
65 struct dma_attrs *attrs)
66{
67 struct dma_map_ops *ops = platform_dma_get_ops(dev);
68 return ops->map_sg(dev, sgl, nents, dir, attrs);
69}
70
71static inline void dma_unmap_sg_attrs(struct device *dev,
72 struct scatterlist *sgl, int nents,
73 enum dma_data_direction dir,
74 struct dma_attrs *attrs)
75{
76 struct dma_map_ops *ops = platform_dma_get_ops(dev);
77 ops->unmap_sg(dev, sgl, nents, dir, attrs);
78}
79
80#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL)
81#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL)
82
83static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t daddr,
84 size_t size,
85 enum dma_data_direction dir)
70{ 86{
71 dma_free_coherent(dev, size, cpu_addr, dma_handle); 87 struct dma_map_ops *ops = platform_dma_get_ops(dev);
88 ops->sync_single_for_cpu(dev, daddr, size, dir);
72} 89}
73#define dma_map_single_attrs platform_dma_map_single_attrs 90
74static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, 91static inline void dma_sync_sg_for_cpu(struct device *dev,
75 size_t size, int dir) 92 struct scatterlist *sgl,
93 int nents, enum dma_data_direction dir)
76{ 94{
77 return dma_map_single_attrs(dev, cpu_addr, size, dir, NULL); 95 struct dma_map_ops *ops = platform_dma_get_ops(dev);
96 ops->sync_sg_for_cpu(dev, sgl, nents, dir);
78} 97}
79#define dma_map_sg_attrs platform_dma_map_sg_attrs 98
80static inline int dma_map_sg(struct device *dev, struct scatterlist *sgl, 99static inline void dma_sync_single_for_device(struct device *dev,
81 int nents, int dir) 100 dma_addr_t daddr,
101 size_t size,
102 enum dma_data_direction dir)
82{ 103{
83 return dma_map_sg_attrs(dev, sgl, nents, dir, NULL); 104 struct dma_map_ops *ops = platform_dma_get_ops(dev);
105 ops->sync_single_for_device(dev, daddr, size, dir);
84} 106}
85#define dma_unmap_single_attrs platform_dma_unmap_single_attrs 107
86static inline void dma_unmap_single(struct device *dev, dma_addr_t cpu_addr, 108static inline void dma_sync_sg_for_device(struct device *dev,
87 size_t size, int dir) 109 struct scatterlist *sgl,
110 int nents,
111 enum dma_data_direction dir)
88{ 112{
89 return dma_unmap_single_attrs(dev, cpu_addr, size, dir, NULL); 113 struct dma_map_ops *ops = platform_dma_get_ops(dev);
114 ops->sync_sg_for_device(dev, sgl, nents, dir);
90} 115}
91#define dma_unmap_sg_attrs platform_dma_unmap_sg_attrs 116
92static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sgl, 117static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr)
93 int nents, int dir) 118{
119 struct dma_map_ops *ops = platform_dma_get_ops(dev);
120 return ops->mapping_error(dev, daddr);
121}
122
123static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
124 size_t offset, size_t size,
125 enum dma_data_direction dir)
94{ 126{
95 return dma_unmap_sg_attrs(dev, sgl, nents, dir, NULL); 127 struct dma_map_ops *ops = platform_dma_get_ops(dev);
128 return ops->map_page(dev, page, offset, size, dir, NULL);
96} 129}
97#define dma_sync_single_for_cpu platform_dma_sync_single_for_cpu
98#define dma_sync_sg_for_cpu platform_dma_sync_sg_for_cpu
99#define dma_sync_single_for_device platform_dma_sync_single_for_device
100#define dma_sync_sg_for_device platform_dma_sync_sg_for_device
101#define dma_mapping_error platform_dma_mapping_error
102 130
103#define dma_map_page(dev, pg, off, size, dir) \ 131static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
104 dma_map_single(dev, page_address(pg) + (off), (size), (dir)) 132 size_t size, enum dma_data_direction dir)
105#define dma_unmap_page(dev, dma_addr, size, dir) \ 133{
106 dma_unmap_single(dev, dma_addr, size, dir) 134 dma_unmap_single(dev, addr, size, dir);
135}
107 136
108/* 137/*
109 * Rest of this file is part of the "Advanced DMA API". Use at your own risk. 138 * Rest of this file is part of the "Advanced DMA API". Use at your own risk.
@@ -115,7 +144,11 @@ static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sgl,
115#define dma_sync_single_range_for_device(dev, dma_handle, offset, size, dir) \ 144#define dma_sync_single_range_for_device(dev, dma_handle, offset, size, dir) \
116 dma_sync_single_for_device(dev, dma_handle, size, dir) 145 dma_sync_single_for_device(dev, dma_handle, size, dir)
117 146
118#define dma_supported platform_dma_supported 147static inline int dma_supported(struct device *dev, u64 mask)
148{
149 struct dma_map_ops *ops = platform_dma_get_ops(dev);
150 return ops->dma_supported(dev, mask);
151}
119 152
120static inline int 153static inline int
121dma_set_mask (struct device *dev, u64 mask) 154dma_set_mask (struct device *dev, u64 mask)
@@ -141,11 +174,4 @@ dma_cache_sync (struct device *dev, void *vaddr, size_t size,
141 174
142#define dma_is_consistent(d, h) (1) /* all we do is coherent memory... */ 175#define dma_is_consistent(d, h) (1) /* all we do is coherent memory... */
143 176
144static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
145{
146 return dma_ops;
147}
148
149
150
151#endif /* _ASM_IA64_DMA_MAPPING_H */ 177#endif /* _ASM_IA64_DMA_MAPPING_H */
diff --git a/arch/ia64/include/asm/machvec.h b/arch/ia64/include/asm/machvec.h
index fe87b2121707..367d299d9938 100644
--- a/arch/ia64/include/asm/machvec.h
+++ b/arch/ia64/include/asm/machvec.h
@@ -11,7 +11,6 @@
11#define _ASM_IA64_MACHVEC_H 11#define _ASM_IA64_MACHVEC_H
12 12
13#include <linux/types.h> 13#include <linux/types.h>
14#include <linux/swiotlb.h>
15 14
16/* forward declarations: */ 15/* forward declarations: */
17struct device; 16struct device;
@@ -45,24 +44,8 @@ typedef void ia64_mv_kernel_launch_event_t(void);
45 44
46/* DMA-mapping interface: */ 45/* DMA-mapping interface: */
47typedef void ia64_mv_dma_init (void); 46typedef void ia64_mv_dma_init (void);
48typedef void *ia64_mv_dma_alloc_coherent (struct device *, size_t, dma_addr_t *, gfp_t);
49typedef void ia64_mv_dma_free_coherent (struct device *, size_t, void *, dma_addr_t);
50typedef dma_addr_t ia64_mv_dma_map_single (struct device *, void *, size_t, int);
51typedef void ia64_mv_dma_unmap_single (struct device *, dma_addr_t, size_t, int);
52typedef int ia64_mv_dma_map_sg (struct device *, struct scatterlist *, int, int);
53typedef void ia64_mv_dma_unmap_sg (struct device *, struct scatterlist *, int, int);
54typedef void ia64_mv_dma_sync_single_for_cpu (struct device *, dma_addr_t, size_t, int);
55typedef void ia64_mv_dma_sync_sg_for_cpu (struct device *, struct scatterlist *, int, int);
56typedef void ia64_mv_dma_sync_single_for_device (struct device *, dma_addr_t, size_t, int);
57typedef void ia64_mv_dma_sync_sg_for_device (struct device *, struct scatterlist *, int, int);
58typedef int ia64_mv_dma_mapping_error(struct device *, dma_addr_t dma_addr);
59typedef int ia64_mv_dma_supported (struct device *, u64);
60
61typedef dma_addr_t ia64_mv_dma_map_single_attrs (struct device *, void *, size_t, int, struct dma_attrs *);
62typedef void ia64_mv_dma_unmap_single_attrs (struct device *, dma_addr_t, size_t, int, struct dma_attrs *);
63typedef int ia64_mv_dma_map_sg_attrs (struct device *, struct scatterlist *, int, int, struct dma_attrs *);
64typedef void ia64_mv_dma_unmap_sg_attrs (struct device *, struct scatterlist *, int, int, struct dma_attrs *);
65typedef u64 ia64_mv_dma_get_required_mask (struct device *); 47typedef u64 ia64_mv_dma_get_required_mask (struct device *);
48typedef struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
66 49
67/* 50/*
68 * WARNING: The legacy I/O space is _architected_. Platforms are 51 * WARNING: The legacy I/O space is _architected_. Platforms are
@@ -114,8 +97,6 @@ machvec_noop_bus (struct pci_bus *bus)
114 97
115extern void machvec_setup (char **); 98extern void machvec_setup (char **);
116extern void machvec_timer_interrupt (int, void *); 99extern void machvec_timer_interrupt (int, void *);
117extern void machvec_dma_sync_single (struct device *, dma_addr_t, size_t, int);
118extern void machvec_dma_sync_sg (struct device *, struct scatterlist *, int, int);
119extern void machvec_tlb_migrate_finish (struct mm_struct *); 100extern void machvec_tlb_migrate_finish (struct mm_struct *);
120 101
121# if defined (CONFIG_IA64_HP_SIM) 102# if defined (CONFIG_IA64_HP_SIM)
@@ -148,19 +129,8 @@ extern void machvec_tlb_migrate_finish (struct mm_struct *);
148# define platform_global_tlb_purge ia64_mv.global_tlb_purge 129# define platform_global_tlb_purge ia64_mv.global_tlb_purge
149# define platform_tlb_migrate_finish ia64_mv.tlb_migrate_finish 130# define platform_tlb_migrate_finish ia64_mv.tlb_migrate_finish
150# define platform_dma_init ia64_mv.dma_init 131# define platform_dma_init ia64_mv.dma_init
151# define platform_dma_alloc_coherent ia64_mv.dma_alloc_coherent
152# define platform_dma_free_coherent ia64_mv.dma_free_coherent
153# define platform_dma_map_single_attrs ia64_mv.dma_map_single_attrs
154# define platform_dma_unmap_single_attrs ia64_mv.dma_unmap_single_attrs
155# define platform_dma_map_sg_attrs ia64_mv.dma_map_sg_attrs
156# define platform_dma_unmap_sg_attrs ia64_mv.dma_unmap_sg_attrs
157# define platform_dma_sync_single_for_cpu ia64_mv.dma_sync_single_for_cpu
158# define platform_dma_sync_sg_for_cpu ia64_mv.dma_sync_sg_for_cpu
159# define platform_dma_sync_single_for_device ia64_mv.dma_sync_single_for_device
160# define platform_dma_sync_sg_for_device ia64_mv.dma_sync_sg_for_device
161# define platform_dma_mapping_error ia64_mv.dma_mapping_error
162# define platform_dma_supported ia64_mv.dma_supported
163# define platform_dma_get_required_mask ia64_mv.dma_get_required_mask 132# define platform_dma_get_required_mask ia64_mv.dma_get_required_mask
133# define platform_dma_get_ops ia64_mv.dma_get_ops
164# define platform_irq_to_vector ia64_mv.irq_to_vector 134# define platform_irq_to_vector ia64_mv.irq_to_vector
165# define platform_local_vector_to_irq ia64_mv.local_vector_to_irq 135# define platform_local_vector_to_irq ia64_mv.local_vector_to_irq
166# define platform_pci_get_legacy_mem ia64_mv.pci_get_legacy_mem 136# define platform_pci_get_legacy_mem ia64_mv.pci_get_legacy_mem
@@ -203,19 +173,8 @@ struct ia64_machine_vector {
203 ia64_mv_global_tlb_purge_t *global_tlb_purge; 173 ia64_mv_global_tlb_purge_t *global_tlb_purge;
204 ia64_mv_tlb_migrate_finish_t *tlb_migrate_finish; 174 ia64_mv_tlb_migrate_finish_t *tlb_migrate_finish;
205 ia64_mv_dma_init *dma_init; 175 ia64_mv_dma_init *dma_init;
206 ia64_mv_dma_alloc_coherent *dma_alloc_coherent;
207 ia64_mv_dma_free_coherent *dma_free_coherent;
208 ia64_mv_dma_map_single_attrs *dma_map_single_attrs;
209 ia64_mv_dma_unmap_single_attrs *dma_unmap_single_attrs;
210 ia64_mv_dma_map_sg_attrs *dma_map_sg_attrs;
211 ia64_mv_dma_unmap_sg_attrs *dma_unmap_sg_attrs;
212 ia64_mv_dma_sync_single_for_cpu *dma_sync_single_for_cpu;
213 ia64_mv_dma_sync_sg_for_cpu *dma_sync_sg_for_cpu;
214 ia64_mv_dma_sync_single_for_device *dma_sync_single_for_device;
215 ia64_mv_dma_sync_sg_for_device *dma_sync_sg_for_device;
216 ia64_mv_dma_mapping_error *dma_mapping_error;
217 ia64_mv_dma_supported *dma_supported;
218 ia64_mv_dma_get_required_mask *dma_get_required_mask; 176 ia64_mv_dma_get_required_mask *dma_get_required_mask;
177 ia64_mv_dma_get_ops *dma_get_ops;
219 ia64_mv_irq_to_vector *irq_to_vector; 178 ia64_mv_irq_to_vector *irq_to_vector;
220 ia64_mv_local_vector_to_irq *local_vector_to_irq; 179 ia64_mv_local_vector_to_irq *local_vector_to_irq;
221 ia64_mv_pci_get_legacy_mem_t *pci_get_legacy_mem; 180 ia64_mv_pci_get_legacy_mem_t *pci_get_legacy_mem;
@@ -254,19 +213,8 @@ struct ia64_machine_vector {
254 platform_global_tlb_purge, \ 213 platform_global_tlb_purge, \
255 platform_tlb_migrate_finish, \ 214 platform_tlb_migrate_finish, \
256 platform_dma_init, \ 215 platform_dma_init, \
257 platform_dma_alloc_coherent, \
258 platform_dma_free_coherent, \
259 platform_dma_map_single_attrs, \
260 platform_dma_unmap_single_attrs, \
261 platform_dma_map_sg_attrs, \
262 platform_dma_unmap_sg_attrs, \
263 platform_dma_sync_single_for_cpu, \
264 platform_dma_sync_sg_for_cpu, \
265 platform_dma_sync_single_for_device, \
266 platform_dma_sync_sg_for_device, \
267 platform_dma_mapping_error, \
268 platform_dma_supported, \
269 platform_dma_get_required_mask, \ 216 platform_dma_get_required_mask, \
217 platform_dma_get_ops, \
270 platform_irq_to_vector, \ 218 platform_irq_to_vector, \
271 platform_local_vector_to_irq, \ 219 platform_local_vector_to_irq, \
272 platform_pci_get_legacy_mem, \ 220 platform_pci_get_legacy_mem, \
@@ -302,6 +250,9 @@ extern void machvec_init_from_cmdline(const char *cmdline);
302# error Unknown configuration. Update arch/ia64/include/asm/machvec.h. 250# error Unknown configuration. Update arch/ia64/include/asm/machvec.h.
303# endif /* CONFIG_IA64_GENERIC */ 251# endif /* CONFIG_IA64_GENERIC */
304 252
253extern void swiotlb_dma_init(void);
254extern struct dma_map_ops *dma_get_ops(struct device *);
255
305/* 256/*
306 * Define default versions so we can extend machvec for new platforms without having 257 * Define default versions so we can extend machvec for new platforms without having
307 * to update the machvec files for all existing platforms. 258 * to update the machvec files for all existing platforms.
@@ -332,43 +283,10 @@ extern void machvec_init_from_cmdline(const char *cmdline);
332# define platform_kernel_launch_event machvec_noop 283# define platform_kernel_launch_event machvec_noop
333#endif 284#endif
334#ifndef platform_dma_init 285#ifndef platform_dma_init
335# define platform_dma_init swiotlb_init 286# define platform_dma_init swiotlb_dma_init
336#endif
337#ifndef platform_dma_alloc_coherent
338# define platform_dma_alloc_coherent swiotlb_alloc_coherent
339#endif
340#ifndef platform_dma_free_coherent
341# define platform_dma_free_coherent swiotlb_free_coherent
342#endif
343#ifndef platform_dma_map_single_attrs
344# define platform_dma_map_single_attrs swiotlb_map_single_attrs
345#endif
346#ifndef platform_dma_unmap_single_attrs
347# define platform_dma_unmap_single_attrs swiotlb_unmap_single_attrs
348#endif
349#ifndef platform_dma_map_sg_attrs
350# define platform_dma_map_sg_attrs swiotlb_map_sg_attrs
351#endif
352#ifndef platform_dma_unmap_sg_attrs
353# define platform_dma_unmap_sg_attrs swiotlb_unmap_sg_attrs
354#endif
355#ifndef platform_dma_sync_single_for_cpu
356# define platform_dma_sync_single_for_cpu swiotlb_sync_single_for_cpu
357#endif
358#ifndef platform_dma_sync_sg_for_cpu
359# define platform_dma_sync_sg_for_cpu swiotlb_sync_sg_for_cpu
360#endif
361#ifndef platform_dma_sync_single_for_device
362# define platform_dma_sync_single_for_device swiotlb_sync_single_for_device
363#endif
364#ifndef platform_dma_sync_sg_for_device
365# define platform_dma_sync_sg_for_device swiotlb_sync_sg_for_device
366#endif
367#ifndef platform_dma_mapping_error
368# define platform_dma_mapping_error swiotlb_dma_mapping_error
369#endif 287#endif
370#ifndef platform_dma_supported 288#ifndef platform_dma_get_ops
371# define platform_dma_supported swiotlb_dma_supported 289# define platform_dma_get_ops dma_get_ops
372#endif 290#endif
373#ifndef platform_dma_get_required_mask 291#ifndef platform_dma_get_required_mask
374# define platform_dma_get_required_mask ia64_dma_get_required_mask 292# define platform_dma_get_required_mask ia64_dma_get_required_mask
diff --git a/arch/ia64/include/asm/machvec_dig_vtd.h b/arch/ia64/include/asm/machvec_dig_vtd.h
index 3400b561e711..6ab1de5c45ef 100644
--- a/arch/ia64/include/asm/machvec_dig_vtd.h
+++ b/arch/ia64/include/asm/machvec_dig_vtd.h
@@ -2,14 +2,6 @@
2#define _ASM_IA64_MACHVEC_DIG_VTD_h 2#define _ASM_IA64_MACHVEC_DIG_VTD_h
3 3
4extern ia64_mv_setup_t dig_setup; 4extern ia64_mv_setup_t dig_setup;
5extern ia64_mv_dma_alloc_coherent vtd_alloc_coherent;
6extern ia64_mv_dma_free_coherent vtd_free_coherent;
7extern ia64_mv_dma_map_single_attrs vtd_map_single_attrs;
8extern ia64_mv_dma_unmap_single_attrs vtd_unmap_single_attrs;
9extern ia64_mv_dma_map_sg_attrs vtd_map_sg_attrs;
10extern ia64_mv_dma_unmap_sg_attrs vtd_unmap_sg_attrs;
11extern ia64_mv_dma_supported iommu_dma_supported;
12extern ia64_mv_dma_mapping_error vtd_dma_mapping_error;
13extern ia64_mv_dma_init pci_iommu_alloc; 5extern ia64_mv_dma_init pci_iommu_alloc;
14 6
15/* 7/*
@@ -22,17 +14,5 @@ extern ia64_mv_dma_init pci_iommu_alloc;
22#define platform_name "dig_vtd" 14#define platform_name "dig_vtd"
23#define platform_setup dig_setup 15#define platform_setup dig_setup
24#define platform_dma_init pci_iommu_alloc 16#define platform_dma_init pci_iommu_alloc
25#define platform_dma_alloc_coherent vtd_alloc_coherent
26#define platform_dma_free_coherent vtd_free_coherent
27#define platform_dma_map_single_attrs vtd_map_single_attrs
28#define platform_dma_unmap_single_attrs vtd_unmap_single_attrs
29#define platform_dma_map_sg_attrs vtd_map_sg_attrs
30#define platform_dma_unmap_sg_attrs vtd_unmap_sg_attrs
31#define platform_dma_sync_single_for_cpu machvec_dma_sync_single
32#define platform_dma_sync_sg_for_cpu machvec_dma_sync_sg
33#define platform_dma_sync_single_for_device machvec_dma_sync_single
34#define platform_dma_sync_sg_for_device machvec_dma_sync_sg
35#define platform_dma_supported iommu_dma_supported
36#define platform_dma_mapping_error vtd_dma_mapping_error
37 17
38#endif /* _ASM_IA64_MACHVEC_DIG_VTD_h */ 18#endif /* _ASM_IA64_MACHVEC_DIG_VTD_h */
diff --git a/arch/ia64/include/asm/machvec_hpzx1.h b/arch/ia64/include/asm/machvec_hpzx1.h
index 2f57f5144b9f..3bd83d78a412 100644
--- a/arch/ia64/include/asm/machvec_hpzx1.h
+++ b/arch/ia64/include/asm/machvec_hpzx1.h
@@ -2,14 +2,7 @@
2#define _ASM_IA64_MACHVEC_HPZX1_h 2#define _ASM_IA64_MACHVEC_HPZX1_h
3 3
4extern ia64_mv_setup_t dig_setup; 4extern ia64_mv_setup_t dig_setup;
5extern ia64_mv_dma_alloc_coherent sba_alloc_coherent; 5extern ia64_mv_dma_init sba_dma_init;
6extern ia64_mv_dma_free_coherent sba_free_coherent;
7extern ia64_mv_dma_map_single_attrs sba_map_single_attrs;
8extern ia64_mv_dma_unmap_single_attrs sba_unmap_single_attrs;
9extern ia64_mv_dma_map_sg_attrs sba_map_sg_attrs;
10extern ia64_mv_dma_unmap_sg_attrs sba_unmap_sg_attrs;
11extern ia64_mv_dma_supported sba_dma_supported;
12extern ia64_mv_dma_mapping_error sba_dma_mapping_error;
13 6
14/* 7/*
15 * This stuff has dual use! 8 * This stuff has dual use!
@@ -20,18 +13,6 @@ extern ia64_mv_dma_mapping_error sba_dma_mapping_error;
20 */ 13 */
21#define platform_name "hpzx1" 14#define platform_name "hpzx1"
22#define platform_setup dig_setup 15#define platform_setup dig_setup
23#define platform_dma_init machvec_noop 16#define platform_dma_init sba_dma_init
24#define platform_dma_alloc_coherent sba_alloc_coherent
25#define platform_dma_free_coherent sba_free_coherent
26#define platform_dma_map_single_attrs sba_map_single_attrs
27#define platform_dma_unmap_single_attrs sba_unmap_single_attrs
28#define platform_dma_map_sg_attrs sba_map_sg_attrs
29#define platform_dma_unmap_sg_attrs sba_unmap_sg_attrs
30#define platform_dma_sync_single_for_cpu machvec_dma_sync_single
31#define platform_dma_sync_sg_for_cpu machvec_dma_sync_sg
32#define platform_dma_sync_single_for_device machvec_dma_sync_single
33#define platform_dma_sync_sg_for_device machvec_dma_sync_sg
34#define platform_dma_supported sba_dma_supported
35#define platform_dma_mapping_error sba_dma_mapping_error
36 17
37#endif /* _ASM_IA64_MACHVEC_HPZX1_h */ 18#endif /* _ASM_IA64_MACHVEC_HPZX1_h */
diff --git a/arch/ia64/include/asm/machvec_hpzx1_swiotlb.h b/arch/ia64/include/asm/machvec_hpzx1_swiotlb.h
index a842cdda827b..1091ac39740c 100644
--- a/arch/ia64/include/asm/machvec_hpzx1_swiotlb.h
+++ b/arch/ia64/include/asm/machvec_hpzx1_swiotlb.h
@@ -2,18 +2,7 @@
2#define _ASM_IA64_MACHVEC_HPZX1_SWIOTLB_h 2#define _ASM_IA64_MACHVEC_HPZX1_SWIOTLB_h
3 3
4extern ia64_mv_setup_t dig_setup; 4extern ia64_mv_setup_t dig_setup;
5extern ia64_mv_dma_alloc_coherent hwsw_alloc_coherent; 5extern ia64_mv_dma_get_ops hwsw_dma_get_ops;
6extern ia64_mv_dma_free_coherent hwsw_free_coherent;
7extern ia64_mv_dma_map_single_attrs hwsw_map_single_attrs;
8extern ia64_mv_dma_unmap_single_attrs hwsw_unmap_single_attrs;
9extern ia64_mv_dma_map_sg_attrs hwsw_map_sg_attrs;
10extern ia64_mv_dma_unmap_sg_attrs hwsw_unmap_sg_attrs;
11extern ia64_mv_dma_supported hwsw_dma_supported;
12extern ia64_mv_dma_mapping_error hwsw_dma_mapping_error;
13extern ia64_mv_dma_sync_single_for_cpu hwsw_sync_single_for_cpu;
14extern ia64_mv_dma_sync_sg_for_cpu hwsw_sync_sg_for_cpu;
15extern ia64_mv_dma_sync_single_for_device hwsw_sync_single_for_device;
16extern ia64_mv_dma_sync_sg_for_device hwsw_sync_sg_for_device;
17 6
18/* 7/*
19 * This stuff has dual use! 8 * This stuff has dual use!
@@ -23,20 +12,8 @@ extern ia64_mv_dma_sync_sg_for_device hwsw_sync_sg_for_device;
23 * the macros are used directly. 12 * the macros are used directly.
24 */ 13 */
25#define platform_name "hpzx1_swiotlb" 14#define platform_name "hpzx1_swiotlb"
26
27#define platform_setup dig_setup 15#define platform_setup dig_setup
28#define platform_dma_init machvec_noop 16#define platform_dma_init machvec_noop
29#define platform_dma_alloc_coherent hwsw_alloc_coherent 17#define platform_dma_get_ops hwsw_dma_get_ops
30#define platform_dma_free_coherent hwsw_free_coherent
31#define platform_dma_map_single_attrs hwsw_map_single_attrs
32#define platform_dma_unmap_single_attrs hwsw_unmap_single_attrs
33#define platform_dma_map_sg_attrs hwsw_map_sg_attrs
34#define platform_dma_unmap_sg_attrs hwsw_unmap_sg_attrs
35#define platform_dma_supported hwsw_dma_supported
36#define platform_dma_mapping_error hwsw_dma_mapping_error
37#define platform_dma_sync_single_for_cpu hwsw_sync_single_for_cpu
38#define platform_dma_sync_sg_for_cpu hwsw_sync_sg_for_cpu
39#define platform_dma_sync_single_for_device hwsw_sync_single_for_device
40#define platform_dma_sync_sg_for_device hwsw_sync_sg_for_device
41 18
42#endif /* _ASM_IA64_MACHVEC_HPZX1_SWIOTLB_h */ 19#endif /* _ASM_IA64_MACHVEC_HPZX1_SWIOTLB_h */
diff --git a/arch/ia64/include/asm/machvec_sn2.h b/arch/ia64/include/asm/machvec_sn2.h
index f1a6e0d6dfa5..f061a30aac42 100644
--- a/arch/ia64/include/asm/machvec_sn2.h
+++ b/arch/ia64/include/asm/machvec_sn2.h
@@ -55,19 +55,8 @@ extern ia64_mv_readb_t __sn_readb_relaxed;
55extern ia64_mv_readw_t __sn_readw_relaxed; 55extern ia64_mv_readw_t __sn_readw_relaxed;
56extern ia64_mv_readl_t __sn_readl_relaxed; 56extern ia64_mv_readl_t __sn_readl_relaxed;
57extern ia64_mv_readq_t __sn_readq_relaxed; 57extern ia64_mv_readq_t __sn_readq_relaxed;
58extern ia64_mv_dma_alloc_coherent sn_dma_alloc_coherent;
59extern ia64_mv_dma_free_coherent sn_dma_free_coherent;
60extern ia64_mv_dma_map_single_attrs sn_dma_map_single_attrs;
61extern ia64_mv_dma_unmap_single_attrs sn_dma_unmap_single_attrs;
62extern ia64_mv_dma_map_sg_attrs sn_dma_map_sg_attrs;
63extern ia64_mv_dma_unmap_sg_attrs sn_dma_unmap_sg_attrs;
64extern ia64_mv_dma_sync_single_for_cpu sn_dma_sync_single_for_cpu;
65extern ia64_mv_dma_sync_sg_for_cpu sn_dma_sync_sg_for_cpu;
66extern ia64_mv_dma_sync_single_for_device sn_dma_sync_single_for_device;
67extern ia64_mv_dma_sync_sg_for_device sn_dma_sync_sg_for_device;
68extern ia64_mv_dma_mapping_error sn_dma_mapping_error;
69extern ia64_mv_dma_supported sn_dma_supported;
70extern ia64_mv_dma_get_required_mask sn_dma_get_required_mask; 58extern ia64_mv_dma_get_required_mask sn_dma_get_required_mask;
59extern ia64_mv_dma_init sn_dma_init;
71extern ia64_mv_migrate_t sn_migrate; 60extern ia64_mv_migrate_t sn_migrate;
72extern ia64_mv_kernel_launch_event_t sn_kernel_launch_event; 61extern ia64_mv_kernel_launch_event_t sn_kernel_launch_event;
73extern ia64_mv_setup_msi_irq_t sn_setup_msi_irq; 62extern ia64_mv_setup_msi_irq_t sn_setup_msi_irq;
@@ -111,20 +100,8 @@ extern ia64_mv_pci_fixup_bus_t sn_pci_fixup_bus;
111#define platform_pci_get_legacy_mem sn_pci_get_legacy_mem 100#define platform_pci_get_legacy_mem sn_pci_get_legacy_mem
112#define platform_pci_legacy_read sn_pci_legacy_read 101#define platform_pci_legacy_read sn_pci_legacy_read
113#define platform_pci_legacy_write sn_pci_legacy_write 102#define platform_pci_legacy_write sn_pci_legacy_write
114#define platform_dma_init machvec_noop
115#define platform_dma_alloc_coherent sn_dma_alloc_coherent
116#define platform_dma_free_coherent sn_dma_free_coherent
117#define platform_dma_map_single_attrs sn_dma_map_single_attrs
118#define platform_dma_unmap_single_attrs sn_dma_unmap_single_attrs
119#define platform_dma_map_sg_attrs sn_dma_map_sg_attrs
120#define platform_dma_unmap_sg_attrs sn_dma_unmap_sg_attrs
121#define platform_dma_sync_single_for_cpu sn_dma_sync_single_for_cpu
122#define platform_dma_sync_sg_for_cpu sn_dma_sync_sg_for_cpu
123#define platform_dma_sync_single_for_device sn_dma_sync_single_for_device
124#define platform_dma_sync_sg_for_device sn_dma_sync_sg_for_device
125#define platform_dma_mapping_error sn_dma_mapping_error
126#define platform_dma_supported sn_dma_supported
127#define platform_dma_get_required_mask sn_dma_get_required_mask 103#define platform_dma_get_required_mask sn_dma_get_required_mask
104#define platform_dma_init sn_dma_init
128#define platform_migrate sn_migrate 105#define platform_migrate sn_migrate
129#define platform_kernel_launch_event sn_kernel_launch_event 106#define platform_kernel_launch_event sn_kernel_launch_event
130#ifdef CONFIG_PCI_MSI 107#ifdef CONFIG_PCI_MSI
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile
index c381ea954892..f2778f2c4fd9 100644
--- a/arch/ia64/kernel/Makefile
+++ b/arch/ia64/kernel/Makefile
@@ -7,7 +7,7 @@ extra-y := head.o init_task.o vmlinux.lds
7obj-y := acpi.o entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64.o \ 7obj-y := acpi.o entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64.o \
8 irq_lsapic.o ivt.o machvec.o pal.o patch.o process.o perfmon.o ptrace.o sal.o \ 8 irq_lsapic.o ivt.o machvec.o pal.o patch.o process.o perfmon.o ptrace.o sal.o \
9 salinfo.o setup.o signal.o sys_ia64.o time.o traps.o unaligned.o \ 9 salinfo.o setup.o signal.o sys_ia64.o time.o traps.o unaligned.o \
10 unwind.o mca.o mca_asm.o topology.o 10 unwind.o mca.o mca_asm.o topology.o dma-mapping.o
11 11
12obj-$(CONFIG_IA64_BRL_EMU) += brl_emu.o 12obj-$(CONFIG_IA64_BRL_EMU) += brl_emu.o
13obj-$(CONFIG_IA64_GENERIC) += acpi-ext.o 13obj-$(CONFIG_IA64_GENERIC) += acpi-ext.o
@@ -43,9 +43,7 @@ ifneq ($(CONFIG_IA64_ESI),)
43obj-y += esi_stub.o # must be in kernel proper 43obj-y += esi_stub.o # must be in kernel proper
44endif 44endif
45obj-$(CONFIG_DMAR) += pci-dma.o 45obj-$(CONFIG_DMAR) += pci-dma.o
46ifeq ($(CONFIG_DMAR), y)
47obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o 46obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o
48endif
49 47
50# The gate DSO image is built using a special linker script. 48# The gate DSO image is built using a special linker script.
51targets += gate.so gate-syms.o 49targets += gate.so gate-syms.o
diff --git a/arch/ia64/kernel/dma-mapping.c b/arch/ia64/kernel/dma-mapping.c
new file mode 100644
index 000000000000..086a2aeb0404
--- /dev/null
+++ b/arch/ia64/kernel/dma-mapping.c
@@ -0,0 +1,13 @@
1#include <linux/dma-mapping.h>
2
3/* Set this to 1 if there is a HW IOMMU in the system */
4int iommu_detected __read_mostly;
5
6struct dma_map_ops *dma_ops;
7EXPORT_SYMBOL(dma_ops);
8
9struct dma_map_ops *dma_get_ops(struct device *dev)
10{
11 return dma_ops;
12}
13EXPORT_SYMBOL(dma_get_ops);
diff --git a/arch/ia64/kernel/machvec.c b/arch/ia64/kernel/machvec.c
index 7ccb228ceedc..d41a40ef80c0 100644
--- a/arch/ia64/kernel/machvec.c
+++ b/arch/ia64/kernel/machvec.c
@@ -1,5 +1,5 @@
1#include <linux/module.h> 1#include <linux/module.h>
2 2#include <linux/dma-mapping.h>
3#include <asm/machvec.h> 3#include <asm/machvec.h>
4#include <asm/system.h> 4#include <asm/system.h>
5 5
@@ -75,14 +75,16 @@ machvec_timer_interrupt (int irq, void *dev_id)
75EXPORT_SYMBOL(machvec_timer_interrupt); 75EXPORT_SYMBOL(machvec_timer_interrupt);
76 76
77void 77void
78machvec_dma_sync_single (struct device *hwdev, dma_addr_t dma_handle, size_t size, int dir) 78machvec_dma_sync_single(struct device *hwdev, dma_addr_t dma_handle, size_t size,
79 enum dma_data_direction dir)
79{ 80{
80 mb(); 81 mb();
81} 82}
82EXPORT_SYMBOL(machvec_dma_sync_single); 83EXPORT_SYMBOL(machvec_dma_sync_single);
83 84
84void 85void
85machvec_dma_sync_sg (struct device *hwdev, struct scatterlist *sg, int n, int dir) 86machvec_dma_sync_sg(struct device *hwdev, struct scatterlist *sg, int n,
87 enum dma_data_direction dir)
86{ 88{
87 mb(); 89 mb();
88} 90}
diff --git a/arch/ia64/kernel/pci-dma.c b/arch/ia64/kernel/pci-dma.c
index d0ada067a4af..e4cb443bb988 100644
--- a/arch/ia64/kernel/pci-dma.c
+++ b/arch/ia64/kernel/pci-dma.c
@@ -32,9 +32,6 @@ int force_iommu __read_mostly = 1;
32int force_iommu __read_mostly; 32int force_iommu __read_mostly;
33#endif 33#endif
34 34
35/* Set this to 1 if there is a HW IOMMU in the system */
36int iommu_detected __read_mostly;
37
38/* Dummy device used for NULL arguments (normally ISA). Better would 35/* Dummy device used for NULL arguments (normally ISA). Better would
39 be probably a smaller DMA mask, but this is bug-to-bug compatible 36 be probably a smaller DMA mask, but this is bug-to-bug compatible
40 to i386. */ 37 to i386. */
@@ -44,18 +41,7 @@ struct device fallback_dev = {
44 .dma_mask = &fallback_dev.coherent_dma_mask, 41 .dma_mask = &fallback_dev.coherent_dma_mask,
45}; 42};
46 43
47void __init pci_iommu_alloc(void) 44extern struct dma_map_ops intel_dma_ops;
48{
49 /*
50 * The order of these functions is important for
51 * fall-back/fail-over reasons
52 */
53 detect_intel_iommu();
54
55#ifdef CONFIG_SWIOTLB
56 pci_swiotlb_init();
57#endif
58}
59 45
60static int __init pci_iommu_init(void) 46static int __init pci_iommu_init(void)
61{ 47{
@@ -79,15 +65,12 @@ iommu_dma_init(void)
79 return; 65 return;
80} 66}
81 67
82struct dma_mapping_ops *dma_ops;
83EXPORT_SYMBOL(dma_ops);
84
85int iommu_dma_supported(struct device *dev, u64 mask) 68int iommu_dma_supported(struct device *dev, u64 mask)
86{ 69{
87 struct dma_mapping_ops *ops = get_dma_ops(dev); 70 struct dma_map_ops *ops = platform_dma_get_ops(dev);
88 71
89 if (ops->dma_supported_op) 72 if (ops->dma_supported)
90 return ops->dma_supported_op(dev, mask); 73 return ops->dma_supported(dev, mask);
91 74
92 /* Copied from i386. Doesn't make much sense, because it will 75 /* Copied from i386. Doesn't make much sense, because it will
93 only work for pci_alloc_coherent. 76 only work for pci_alloc_coherent.
@@ -116,4 +99,25 @@ int iommu_dma_supported(struct device *dev, u64 mask)
116} 99}
117EXPORT_SYMBOL(iommu_dma_supported); 100EXPORT_SYMBOL(iommu_dma_supported);
118 101
102void __init pci_iommu_alloc(void)
103{
104 dma_ops = &intel_dma_ops;
105
106 dma_ops->sync_single_for_cpu = machvec_dma_sync_single;
107 dma_ops->sync_sg_for_cpu = machvec_dma_sync_sg;
108 dma_ops->sync_single_for_device = machvec_dma_sync_single;
109 dma_ops->sync_sg_for_device = machvec_dma_sync_sg;
110 dma_ops->dma_supported = iommu_dma_supported;
111
112 /*
113 * The order of these functions is important for
114 * fall-back/fail-over reasons
115 */
116 detect_intel_iommu();
117
118#ifdef CONFIG_SWIOTLB
119 pci_swiotlb_init();
120#endif
121}
122
119#endif 123#endif
diff --git a/arch/ia64/kernel/pci-swiotlb.c b/arch/ia64/kernel/pci-swiotlb.c
index 16c50516dbc1..573f02c39a00 100644
--- a/arch/ia64/kernel/pci-swiotlb.c
+++ b/arch/ia64/kernel/pci-swiotlb.c
@@ -13,23 +13,37 @@
13int swiotlb __read_mostly; 13int swiotlb __read_mostly;
14EXPORT_SYMBOL(swiotlb); 14EXPORT_SYMBOL(swiotlb);
15 15
16struct dma_mapping_ops swiotlb_dma_ops = { 16static void *ia64_swiotlb_alloc_coherent(struct device *dev, size_t size,
17 .mapping_error = swiotlb_dma_mapping_error, 17 dma_addr_t *dma_handle, gfp_t gfp)
18 .alloc_coherent = swiotlb_alloc_coherent, 18{
19 if (dev->coherent_dma_mask != DMA_64BIT_MASK)
20 gfp |= GFP_DMA;
21 return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
22}
23
24struct dma_map_ops swiotlb_dma_ops = {
25 .alloc_coherent = ia64_swiotlb_alloc_coherent,
19 .free_coherent = swiotlb_free_coherent, 26 .free_coherent = swiotlb_free_coherent,
20 .map_single = swiotlb_map_single, 27 .map_page = swiotlb_map_page,
21 .unmap_single = swiotlb_unmap_single, 28 .unmap_page = swiotlb_unmap_page,
29 .map_sg = swiotlb_map_sg_attrs,
30 .unmap_sg = swiotlb_unmap_sg_attrs,
22 .sync_single_for_cpu = swiotlb_sync_single_for_cpu, 31 .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
23 .sync_single_for_device = swiotlb_sync_single_for_device, 32 .sync_single_for_device = swiotlb_sync_single_for_device,
24 .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu, 33 .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu,
25 .sync_single_range_for_device = swiotlb_sync_single_range_for_device, 34 .sync_single_range_for_device = swiotlb_sync_single_range_for_device,
26 .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, 35 .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
27 .sync_sg_for_device = swiotlb_sync_sg_for_device, 36 .sync_sg_for_device = swiotlb_sync_sg_for_device,
28 .map_sg = swiotlb_map_sg, 37 .dma_supported = swiotlb_dma_supported,
29 .unmap_sg = swiotlb_unmap_sg, 38 .mapping_error = swiotlb_dma_mapping_error,
30 .dma_supported_op = swiotlb_dma_supported,
31}; 39};
32 40
41void __init swiotlb_dma_init(void)
42{
43 dma_ops = &swiotlb_dma_ops;
44 swiotlb_init();
45}
46
33void __init pci_swiotlb_init(void) 47void __init pci_swiotlb_init(void)
34{ 48{
35 if (!iommu_detected) { 49 if (!iommu_detected) {
diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c
index 863f5017baae..8c130e8f00e1 100644
--- a/arch/ia64/sn/pci/pci_dma.c
+++ b/arch/ia64/sn/pci/pci_dma.c
@@ -10,7 +10,7 @@
10 */ 10 */
11 11
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/dma-attrs.h> 13#include <linux/dma-mapping.h>
14#include <asm/dma.h> 14#include <asm/dma.h>
15#include <asm/sn/intr.h> 15#include <asm/sn/intr.h>
16#include <asm/sn/pcibus_provider_defs.h> 16#include <asm/sn/pcibus_provider_defs.h>
@@ -31,7 +31,7 @@
31 * this function. Of course, SN only supports devices that have 32 or more 31 * this function. Of course, SN only supports devices that have 32 or more
32 * address bits when using the PMU. 32 * address bits when using the PMU.
33 */ 33 */
34int sn_dma_supported(struct device *dev, u64 mask) 34static int sn_dma_supported(struct device *dev, u64 mask)
35{ 35{
36 BUG_ON(dev->bus != &pci_bus_type); 36 BUG_ON(dev->bus != &pci_bus_type);
37 37
@@ -39,7 +39,6 @@ int sn_dma_supported(struct device *dev, u64 mask)
39 return 0; 39 return 0;
40 return 1; 40 return 1;
41} 41}
42EXPORT_SYMBOL(sn_dma_supported);
43 42
44/** 43/**
45 * sn_dma_set_mask - set the DMA mask 44 * sn_dma_set_mask - set the DMA mask
@@ -75,8 +74,8 @@ EXPORT_SYMBOL(sn_dma_set_mask);
75 * queue for a SCSI controller). See Documentation/DMA-API.txt for 74 * queue for a SCSI controller). See Documentation/DMA-API.txt for
76 * more information. 75 * more information.
77 */ 76 */
78void *sn_dma_alloc_coherent(struct device *dev, size_t size, 77static void *sn_dma_alloc_coherent(struct device *dev, size_t size,
79 dma_addr_t * dma_handle, gfp_t flags) 78 dma_addr_t * dma_handle, gfp_t flags)
80{ 79{
81 void *cpuaddr; 80 void *cpuaddr;
82 unsigned long phys_addr; 81 unsigned long phys_addr;
@@ -124,7 +123,6 @@ void *sn_dma_alloc_coherent(struct device *dev, size_t size,
124 123
125 return cpuaddr; 124 return cpuaddr;
126} 125}
127EXPORT_SYMBOL(sn_dma_alloc_coherent);
128 126
129/** 127/**
130 * sn_pci_free_coherent - free memory associated with coherent DMAable region 128 * sn_pci_free_coherent - free memory associated with coherent DMAable region
@@ -136,8 +134,8 @@ EXPORT_SYMBOL(sn_dma_alloc_coherent);
136 * Frees the memory allocated by dma_alloc_coherent(), potentially unmapping 134 * Frees the memory allocated by dma_alloc_coherent(), potentially unmapping
137 * any associated IOMMU mappings. 135 * any associated IOMMU mappings.
138 */ 136 */
139void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, 137static void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
140 dma_addr_t dma_handle) 138 dma_addr_t dma_handle)
141{ 139{
142 struct pci_dev *pdev = to_pci_dev(dev); 140 struct pci_dev *pdev = to_pci_dev(dev);
143 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); 141 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
@@ -147,7 +145,6 @@ void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
147 provider->dma_unmap(pdev, dma_handle, 0); 145 provider->dma_unmap(pdev, dma_handle, 0);
148 free_pages((unsigned long)cpu_addr, get_order(size)); 146 free_pages((unsigned long)cpu_addr, get_order(size));
149} 147}
150EXPORT_SYMBOL(sn_dma_free_coherent);
151 148
152/** 149/**
153 * sn_dma_map_single_attrs - map a single page for DMA 150 * sn_dma_map_single_attrs - map a single page for DMA
@@ -173,10 +170,12 @@ EXPORT_SYMBOL(sn_dma_free_coherent);
173 * TODO: simplify our interface; 170 * TODO: simplify our interface;
174 * figure out how to save dmamap handle so can use two step. 171 * figure out how to save dmamap handle so can use two step.
175 */ 172 */
176dma_addr_t sn_dma_map_single_attrs(struct device *dev, void *cpu_addr, 173static dma_addr_t sn_dma_map_page(struct device *dev, struct page *page,
177 size_t size, int direction, 174 unsigned long offset, size_t size,
178 struct dma_attrs *attrs) 175 enum dma_data_direction dir,
176 struct dma_attrs *attrs)
179{ 177{
178 void *cpu_addr = page_address(page) + offset;
180 dma_addr_t dma_addr; 179 dma_addr_t dma_addr;
181 unsigned long phys_addr; 180 unsigned long phys_addr;
182 struct pci_dev *pdev = to_pci_dev(dev); 181 struct pci_dev *pdev = to_pci_dev(dev);
@@ -201,7 +200,6 @@ dma_addr_t sn_dma_map_single_attrs(struct device *dev, void *cpu_addr,
201 } 200 }
202 return dma_addr; 201 return dma_addr;
203} 202}
204EXPORT_SYMBOL(sn_dma_map_single_attrs);
205 203
206/** 204/**
207 * sn_dma_unmap_single_attrs - unamp a DMA mapped page 205 * sn_dma_unmap_single_attrs - unamp a DMA mapped page
@@ -215,21 +213,20 @@ EXPORT_SYMBOL(sn_dma_map_single_attrs);
215 * by @dma_handle into the coherence domain. On SN, we're always cache 213 * by @dma_handle into the coherence domain. On SN, we're always cache
216 * coherent, so we just need to free any ATEs associated with this mapping. 214 * coherent, so we just need to free any ATEs associated with this mapping.
217 */ 215 */
218void sn_dma_unmap_single_attrs(struct device *dev, dma_addr_t dma_addr, 216static void sn_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
219 size_t size, int direction, 217 size_t size, enum dma_data_direction dir,
220 struct dma_attrs *attrs) 218 struct dma_attrs *attrs)
221{ 219{
222 struct pci_dev *pdev = to_pci_dev(dev); 220 struct pci_dev *pdev = to_pci_dev(dev);
223 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); 221 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
224 222
225 BUG_ON(dev->bus != &pci_bus_type); 223 BUG_ON(dev->bus != &pci_bus_type);
226 224
227 provider->dma_unmap(pdev, dma_addr, direction); 225 provider->dma_unmap(pdev, dma_addr, dir);
228} 226}
229EXPORT_SYMBOL(sn_dma_unmap_single_attrs);
230 227
231/** 228/**
232 * sn_dma_unmap_sg_attrs - unmap a DMA scatterlist 229 * sn_dma_unmap_sg - unmap a DMA scatterlist
233 * @dev: device to unmap 230 * @dev: device to unmap
234 * @sg: scatterlist to unmap 231 * @sg: scatterlist to unmap
235 * @nhwentries: number of scatterlist entries 232 * @nhwentries: number of scatterlist entries
@@ -238,9 +235,9 @@ EXPORT_SYMBOL(sn_dma_unmap_single_attrs);
238 * 235 *
239 * Unmap a set of streaming mode DMA translations. 236 * Unmap a set of streaming mode DMA translations.
240 */ 237 */
241void sn_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl, 238static void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sgl,
242 int nhwentries, int direction, 239 int nhwentries, enum dma_data_direction dir,
243 struct dma_attrs *attrs) 240 struct dma_attrs *attrs)
244{ 241{
245 int i; 242 int i;
246 struct pci_dev *pdev = to_pci_dev(dev); 243 struct pci_dev *pdev = to_pci_dev(dev);
@@ -250,15 +247,14 @@ void sn_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl,
250 BUG_ON(dev->bus != &pci_bus_type); 247 BUG_ON(dev->bus != &pci_bus_type);
251 248
252 for_each_sg(sgl, sg, nhwentries, i) { 249 for_each_sg(sgl, sg, nhwentries, i) {
253 provider->dma_unmap(pdev, sg->dma_address, direction); 250 provider->dma_unmap(pdev, sg->dma_address, dir);
254 sg->dma_address = (dma_addr_t) NULL; 251 sg->dma_address = (dma_addr_t) NULL;
255 sg->dma_length = 0; 252 sg->dma_length = 0;
256 } 253 }
257} 254}
258EXPORT_SYMBOL(sn_dma_unmap_sg_attrs);
259 255
260/** 256/**
261 * sn_dma_map_sg_attrs - map a scatterlist for DMA 257 * sn_dma_map_sg - map a scatterlist for DMA
262 * @dev: device to map for 258 * @dev: device to map for
263 * @sg: scatterlist to map 259 * @sg: scatterlist to map
264 * @nhwentries: number of entries 260 * @nhwentries: number of entries
@@ -272,8 +268,9 @@ EXPORT_SYMBOL(sn_dma_unmap_sg_attrs);
272 * 268 *
273 * Maps each entry of @sg for DMA. 269 * Maps each entry of @sg for DMA.
274 */ 270 */
275int sn_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, 271static int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl,
276 int nhwentries, int direction, struct dma_attrs *attrs) 272 int nhwentries, enum dma_data_direction dir,
273 struct dma_attrs *attrs)
277{ 274{
278 unsigned long phys_addr; 275 unsigned long phys_addr;
279 struct scatterlist *saved_sg = sgl, *sg; 276 struct scatterlist *saved_sg = sgl, *sg;
@@ -310,8 +307,7 @@ int sn_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
310 * Free any successfully allocated entries. 307 * Free any successfully allocated entries.
311 */ 308 */
312 if (i > 0) 309 if (i > 0)
313 sn_dma_unmap_sg_attrs(dev, saved_sg, i, 310 sn_dma_unmap_sg(dev, saved_sg, i, dir, attrs);
314 direction, attrs);
315 return 0; 311 return 0;
316 } 312 }
317 313
@@ -320,41 +316,36 @@ int sn_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
320 316
321 return nhwentries; 317 return nhwentries;
322} 318}
323EXPORT_SYMBOL(sn_dma_map_sg_attrs);
324 319
325void sn_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, 320static void sn_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
326 size_t size, int direction) 321 size_t size, enum dma_data_direction dir)
327{ 322{
328 BUG_ON(dev->bus != &pci_bus_type); 323 BUG_ON(dev->bus != &pci_bus_type);
329} 324}
330EXPORT_SYMBOL(sn_dma_sync_single_for_cpu);
331 325
332void sn_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, 326static void sn_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
333 size_t size, int direction) 327 size_t size,
328 enum dma_data_direction dir)
334{ 329{
335 BUG_ON(dev->bus != &pci_bus_type); 330 BUG_ON(dev->bus != &pci_bus_type);
336} 331}
337EXPORT_SYMBOL(sn_dma_sync_single_for_device);
338 332
339void sn_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, 333static void sn_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
340 int nelems, int direction) 334 int nelems, enum dma_data_direction dir)
341{ 335{
342 BUG_ON(dev->bus != &pci_bus_type); 336 BUG_ON(dev->bus != &pci_bus_type);
343} 337}
344EXPORT_SYMBOL(sn_dma_sync_sg_for_cpu);
345 338
346void sn_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, 339static void sn_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
347 int nelems, int direction) 340 int nelems, enum dma_data_direction dir)
348{ 341{
349 BUG_ON(dev->bus != &pci_bus_type); 342 BUG_ON(dev->bus != &pci_bus_type);
350} 343}
351EXPORT_SYMBOL(sn_dma_sync_sg_for_device);
352 344
353int sn_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 345static int sn_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
354{ 346{
355 return 0; 347 return 0;
356} 348}
357EXPORT_SYMBOL(sn_dma_mapping_error);
358 349
359u64 sn_dma_get_required_mask(struct device *dev) 350u64 sn_dma_get_required_mask(struct device *dev)
360{ 351{
@@ -471,3 +462,23 @@ int sn_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
471 out: 462 out:
472 return ret; 463 return ret;
473} 464}
465
466static struct dma_map_ops sn_dma_ops = {
467 .alloc_coherent = sn_dma_alloc_coherent,
468 .free_coherent = sn_dma_free_coherent,
469 .map_page = sn_dma_map_page,
470 .unmap_page = sn_dma_unmap_page,
471 .map_sg = sn_dma_map_sg,
472 .unmap_sg = sn_dma_unmap_sg,
473 .sync_single_for_cpu = sn_dma_sync_single_for_cpu,
474 .sync_sg_for_cpu = sn_dma_sync_sg_for_cpu,
475 .sync_single_for_device = sn_dma_sync_single_for_device,
476 .sync_sg_for_device = sn_dma_sync_sg_for_device,
477 .mapping_error = sn_dma_mapping_error,
478 .dma_supported = sn_dma_supported,
479};
480
481void sn_dma_init(void)
482{
483 dma_ops = &sn_dma_ops;
484}
diff --git a/arch/x86/include/asm/device.h b/arch/x86/include/asm/device.h
index 3c034f48fdb0..4994a20acbcb 100644
--- a/arch/x86/include/asm/device.h
+++ b/arch/x86/include/asm/device.h
@@ -6,7 +6,7 @@ struct dev_archdata {
6 void *acpi_handle; 6 void *acpi_handle;
7#endif 7#endif
8#ifdef CONFIG_X86_64 8#ifdef CONFIG_X86_64
9struct dma_mapping_ops *dma_ops; 9struct dma_map_ops *dma_ops;
10#endif 10#endif
11#ifdef CONFIG_DMAR 11#ifdef CONFIG_DMAR
12 void *iommu; /* hook for IOMMU specific extension */ 12 void *iommu; /* hook for IOMMU specific extension */
diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h
index 132a134d12f2..9c78bd40ebec 100644
--- a/arch/x86/include/asm/dma-mapping.h
+++ b/arch/x86/include/asm/dma-mapping.h
@@ -7,6 +7,7 @@
7 */ 7 */
8 8
9#include <linux/scatterlist.h> 9#include <linux/scatterlist.h>
10#include <linux/dma-attrs.h>
10#include <asm/io.h> 11#include <asm/io.h>
11#include <asm/swiotlb.h> 12#include <asm/swiotlb.h>
12#include <asm-generic/dma-coherent.h> 13#include <asm-generic/dma-coherent.h>
@@ -16,47 +17,9 @@ extern int iommu_merge;
16extern struct device x86_dma_fallback_dev; 17extern struct device x86_dma_fallback_dev;
17extern int panic_on_overflow; 18extern int panic_on_overflow;
18 19
19struct dma_mapping_ops { 20extern struct dma_map_ops *dma_ops;
20 int (*mapping_error)(struct device *dev, 21
21 dma_addr_t dma_addr); 22static inline struct dma_map_ops *get_dma_ops(struct device *dev)
22 void* (*alloc_coherent)(struct device *dev, size_t size,
23 dma_addr_t *dma_handle, gfp_t gfp);
24 void (*free_coherent)(struct device *dev, size_t size,
25 void *vaddr, dma_addr_t dma_handle);
26 dma_addr_t (*map_single)(struct device *hwdev, phys_addr_t ptr,
27 size_t size, int direction);
28 void (*unmap_single)(struct device *dev, dma_addr_t addr,
29 size_t size, int direction);
30 void (*sync_single_for_cpu)(struct device *hwdev,
31 dma_addr_t dma_handle, size_t size,
32 int direction);
33 void (*sync_single_for_device)(struct device *hwdev,
34 dma_addr_t dma_handle, size_t size,
35 int direction);
36 void (*sync_single_range_for_cpu)(struct device *hwdev,
37 dma_addr_t dma_handle, unsigned long offset,
38 size_t size, int direction);
39 void (*sync_single_range_for_device)(struct device *hwdev,
40 dma_addr_t dma_handle, unsigned long offset,
41 size_t size, int direction);
42 void (*sync_sg_for_cpu)(struct device *hwdev,
43 struct scatterlist *sg, int nelems,
44 int direction);
45 void (*sync_sg_for_device)(struct device *hwdev,
46 struct scatterlist *sg, int nelems,
47 int direction);
48 int (*map_sg)(struct device *hwdev, struct scatterlist *sg,
49 int nents, int direction);
50 void (*unmap_sg)(struct device *hwdev,
51 struct scatterlist *sg, int nents,
52 int direction);
53 int (*dma_supported)(struct device *hwdev, u64 mask);
54 int is_phys;
55};
56
57extern struct dma_mapping_ops *dma_ops;
58
59static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
60{ 23{
61#ifdef CONFIG_X86_32 24#ifdef CONFIG_X86_32
62 return dma_ops; 25 return dma_ops;
@@ -71,7 +34,7 @@ static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
71/* Make sure we keep the same behaviour */ 34/* Make sure we keep the same behaviour */
72static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 35static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
73{ 36{
74 struct dma_mapping_ops *ops = get_dma_ops(dev); 37 struct dma_map_ops *ops = get_dma_ops(dev);
75 if (ops->mapping_error) 38 if (ops->mapping_error)
76 return ops->mapping_error(dev, dma_addr); 39 return ops->mapping_error(dev, dma_addr);
77 40
@@ -90,137 +53,139 @@ extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
90 53
91static inline dma_addr_t 54static inline dma_addr_t
92dma_map_single(struct device *hwdev, void *ptr, size_t size, 55dma_map_single(struct device *hwdev, void *ptr, size_t size,
93 int direction) 56 enum dma_data_direction dir)
94{ 57{
95 struct dma_mapping_ops *ops = get_dma_ops(hwdev); 58 struct dma_map_ops *ops = get_dma_ops(hwdev);
96 59
97 BUG_ON(!valid_dma_direction(direction)); 60 BUG_ON(!valid_dma_direction(dir));
98 return ops->map_single(hwdev, virt_to_phys(ptr), size, direction); 61 return ops->map_page(hwdev, virt_to_page(ptr),
62 (unsigned long)ptr & ~PAGE_MASK, size,
63 dir, NULL);
99} 64}
100 65
101static inline void 66static inline void
102dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size, 67dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
103 int direction) 68 enum dma_data_direction dir)
104{ 69{
105 struct dma_mapping_ops *ops = get_dma_ops(dev); 70 struct dma_map_ops *ops = get_dma_ops(dev);
106 71
107 BUG_ON(!valid_dma_direction(direction)); 72 BUG_ON(!valid_dma_direction(dir));
108 if (ops->unmap_single) 73 if (ops->unmap_page)
109 ops->unmap_single(dev, addr, size, direction); 74 ops->unmap_page(dev, addr, size, dir, NULL);
110} 75}
111 76
112static inline int 77static inline int
113dma_map_sg(struct device *hwdev, struct scatterlist *sg, 78dma_map_sg(struct device *hwdev, struct scatterlist *sg,
114 int nents, int direction) 79 int nents, enum dma_data_direction dir)
115{ 80{
116 struct dma_mapping_ops *ops = get_dma_ops(hwdev); 81 struct dma_map_ops *ops = get_dma_ops(hwdev);
117 82
118 BUG_ON(!valid_dma_direction(direction)); 83 BUG_ON(!valid_dma_direction(dir));
119 return ops->map_sg(hwdev, sg, nents, direction); 84 return ops->map_sg(hwdev, sg, nents, dir, NULL);
120} 85}
121 86
122static inline void 87static inline void
123dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, 88dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
124 int direction) 89 enum dma_data_direction dir)
125{ 90{
126 struct dma_mapping_ops *ops = get_dma_ops(hwdev); 91 struct dma_map_ops *ops = get_dma_ops(hwdev);
127 92
128 BUG_ON(!valid_dma_direction(direction)); 93 BUG_ON(!valid_dma_direction(dir));
129 if (ops->unmap_sg) 94 if (ops->unmap_sg)
130 ops->unmap_sg(hwdev, sg, nents, direction); 95 ops->unmap_sg(hwdev, sg, nents, dir, NULL);
131} 96}
132 97
133static inline void 98static inline void
134dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle, 99dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
135 size_t size, int direction) 100 size_t size, enum dma_data_direction dir)
136{ 101{
137 struct dma_mapping_ops *ops = get_dma_ops(hwdev); 102 struct dma_map_ops *ops = get_dma_ops(hwdev);
138 103
139 BUG_ON(!valid_dma_direction(direction)); 104 BUG_ON(!valid_dma_direction(dir));
140 if (ops->sync_single_for_cpu) 105 if (ops->sync_single_for_cpu)
141 ops->sync_single_for_cpu(hwdev, dma_handle, size, direction); 106 ops->sync_single_for_cpu(hwdev, dma_handle, size, dir);
142 flush_write_buffers(); 107 flush_write_buffers();
143} 108}
144 109
145static inline void 110static inline void
146dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle, 111dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
147 size_t size, int direction) 112 size_t size, enum dma_data_direction dir)
148{ 113{
149 struct dma_mapping_ops *ops = get_dma_ops(hwdev); 114 struct dma_map_ops *ops = get_dma_ops(hwdev);
150 115
151 BUG_ON(!valid_dma_direction(direction)); 116 BUG_ON(!valid_dma_direction(dir));
152 if (ops->sync_single_for_device) 117 if (ops->sync_single_for_device)
153 ops->sync_single_for_device(hwdev, dma_handle, size, direction); 118 ops->sync_single_for_device(hwdev, dma_handle, size, dir);
154 flush_write_buffers(); 119 flush_write_buffers();
155} 120}
156 121
157static inline void 122static inline void
158dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle, 123dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
159 unsigned long offset, size_t size, int direction) 124 unsigned long offset, size_t size,
125 enum dma_data_direction dir)
160{ 126{
161 struct dma_mapping_ops *ops = get_dma_ops(hwdev); 127 struct dma_map_ops *ops = get_dma_ops(hwdev);
162 128
163 BUG_ON(!valid_dma_direction(direction)); 129 BUG_ON(!valid_dma_direction(dir));
164 if (ops->sync_single_range_for_cpu) 130 if (ops->sync_single_range_for_cpu)
165 ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, 131 ops->sync_single_range_for_cpu(hwdev, dma_handle, offset,
166 size, direction); 132 size, dir);
167 flush_write_buffers(); 133 flush_write_buffers();
168} 134}
169 135
170static inline void 136static inline void
171dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle, 137dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
172 unsigned long offset, size_t size, 138 unsigned long offset, size_t size,
173 int direction) 139 enum dma_data_direction dir)
174{ 140{
175 struct dma_mapping_ops *ops = get_dma_ops(hwdev); 141 struct dma_map_ops *ops = get_dma_ops(hwdev);
176 142
177 BUG_ON(!valid_dma_direction(direction)); 143 BUG_ON(!valid_dma_direction(dir));
178 if (ops->sync_single_range_for_device) 144 if (ops->sync_single_range_for_device)
179 ops->sync_single_range_for_device(hwdev, dma_handle, 145 ops->sync_single_range_for_device(hwdev, dma_handle,
180 offset, size, direction); 146 offset, size, dir);
181 flush_write_buffers(); 147 flush_write_buffers();
182} 148}
183 149
184static inline void 150static inline void
185dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, 151dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
186 int nelems, int direction) 152 int nelems, enum dma_data_direction dir)
187{ 153{
188 struct dma_mapping_ops *ops = get_dma_ops(hwdev); 154 struct dma_map_ops *ops = get_dma_ops(hwdev);
189 155
190 BUG_ON(!valid_dma_direction(direction)); 156 BUG_ON(!valid_dma_direction(dir));
191 if (ops->sync_sg_for_cpu) 157 if (ops->sync_sg_for_cpu)
192 ops->sync_sg_for_cpu(hwdev, sg, nelems, direction); 158 ops->sync_sg_for_cpu(hwdev, sg, nelems, dir);
193 flush_write_buffers(); 159 flush_write_buffers();
194} 160}
195 161
196static inline void 162static inline void
197dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, 163dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
198 int nelems, int direction) 164 int nelems, enum dma_data_direction dir)
199{ 165{
200 struct dma_mapping_ops *ops = get_dma_ops(hwdev); 166 struct dma_map_ops *ops = get_dma_ops(hwdev);
201 167
202 BUG_ON(!valid_dma_direction(direction)); 168 BUG_ON(!valid_dma_direction(dir));
203 if (ops->sync_sg_for_device) 169 if (ops->sync_sg_for_device)
204 ops->sync_sg_for_device(hwdev, sg, nelems, direction); 170 ops->sync_sg_for_device(hwdev, sg, nelems, dir);
205 171
206 flush_write_buffers(); 172 flush_write_buffers();
207} 173}
208 174
209static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, 175static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
210 size_t offset, size_t size, 176 size_t offset, size_t size,
211 int direction) 177 enum dma_data_direction dir)
212{ 178{
213 struct dma_mapping_ops *ops = get_dma_ops(dev); 179 struct dma_map_ops *ops = get_dma_ops(dev);
214 180
215 BUG_ON(!valid_dma_direction(direction)); 181 BUG_ON(!valid_dma_direction(dir));
216 return ops->map_single(dev, page_to_phys(page) + offset, 182 return ops->map_page(dev, page, offset, size, dir, NULL);
217 size, direction);
218} 183}
219 184
220static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, 185static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
221 size_t size, int direction) 186 size_t size, enum dma_data_direction dir)
222{ 187{
223 dma_unmap_single(dev, addr, size, direction); 188 dma_unmap_single(dev, addr, size, dir);
224} 189}
225 190
226static inline void 191static inline void
@@ -266,7 +231,7 @@ static inline void *
266dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, 231dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
267 gfp_t gfp) 232 gfp_t gfp)
268{ 233{
269 struct dma_mapping_ops *ops = get_dma_ops(dev); 234 struct dma_map_ops *ops = get_dma_ops(dev);
270 void *memory; 235 void *memory;
271 236
272 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); 237 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
@@ -292,7 +257,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
292static inline void dma_free_coherent(struct device *dev, size_t size, 257static inline void dma_free_coherent(struct device *dev, size_t size,
293 void *vaddr, dma_addr_t bus) 258 void *vaddr, dma_addr_t bus)
294{ 259{
295 struct dma_mapping_ops *ops = get_dma_ops(dev); 260 struct dma_map_ops *ops = get_dma_ops(dev);
296 261
297 WARN_ON(irqs_disabled()); /* for portability */ 262 WARN_ON(irqs_disabled()); /* for portability */
298 263
diff --git a/arch/x86/include/asm/iommu.h b/arch/x86/include/asm/iommu.h
index a6ee9e6f530f..af326a2975b5 100644
--- a/arch/x86/include/asm/iommu.h
+++ b/arch/x86/include/asm/iommu.h
@@ -3,7 +3,7 @@
3 3
4extern void pci_iommu_shutdown(void); 4extern void pci_iommu_shutdown(void);
5extern void no_iommu_init(void); 5extern void no_iommu_init(void);
6extern struct dma_mapping_ops nommu_dma_ops; 6extern struct dma_map_ops nommu_dma_ops;
7extern int force_iommu, no_iommu; 7extern int force_iommu, no_iommu;
8extern int iommu_detected; 8extern int iommu_detected;
9 9
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index d364df03c1d6..bb1eef62d8f0 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -109,7 +109,7 @@ obj-$(CONFIG_MICROCODE) += microcode.o
109 109
110obj-$(CONFIG_X86_CHECK_BIOS_CORRUPTION) += check.o 110obj-$(CONFIG_X86_CHECK_BIOS_CORRUPTION) += check.o
111 111
112obj-$(CONFIG_SWIOTLB) += pci-swiotlb_64.o # NB rename without _64 112obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o
113 113
114### 114###
115# 64 bit specific files 115# 64 bit specific files
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index 5113c080f0c4..c5962fe3796f 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -22,10 +22,9 @@
22#include <linux/bitops.h> 22#include <linux/bitops.h>
23#include <linux/debugfs.h> 23#include <linux/debugfs.h>
24#include <linux/scatterlist.h> 24#include <linux/scatterlist.h>
25#include <linux/dma-mapping.h>
25#include <linux/iommu-helper.h> 26#include <linux/iommu-helper.h>
26#ifdef CONFIG_IOMMU_API
27#include <linux/iommu.h> 27#include <linux/iommu.h>
28#endif
29#include <asm/proto.h> 28#include <asm/proto.h>
30#include <asm/iommu.h> 29#include <asm/iommu.h>
31#include <asm/gart.h> 30#include <asm/gart.h>
@@ -1297,8 +1296,10 @@ static void __unmap_single(struct amd_iommu *iommu,
1297/* 1296/*
1298 * The exported map_single function for dma_ops. 1297 * The exported map_single function for dma_ops.
1299 */ 1298 */
1300static dma_addr_t map_single(struct device *dev, phys_addr_t paddr, 1299static dma_addr_t map_page(struct device *dev, struct page *page,
1301 size_t size, int dir) 1300 unsigned long offset, size_t size,
1301 enum dma_data_direction dir,
1302 struct dma_attrs *attrs)
1302{ 1303{
1303 unsigned long flags; 1304 unsigned long flags;
1304 struct amd_iommu *iommu; 1305 struct amd_iommu *iommu;
@@ -1306,6 +1307,7 @@ static dma_addr_t map_single(struct device *dev, phys_addr_t paddr,
1306 u16 devid; 1307 u16 devid;
1307 dma_addr_t addr; 1308 dma_addr_t addr;
1308 u64 dma_mask; 1309 u64 dma_mask;
1310 phys_addr_t paddr = page_to_phys(page) + offset;
1309 1311
1310 INC_STATS_COUNTER(cnt_map_single); 1312 INC_STATS_COUNTER(cnt_map_single);
1311 1313
@@ -1340,8 +1342,8 @@ out:
1340/* 1342/*
1341 * The exported unmap_single function for dma_ops. 1343 * The exported unmap_single function for dma_ops.
1342 */ 1344 */
1343static void unmap_single(struct device *dev, dma_addr_t dma_addr, 1345static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
1344 size_t size, int dir) 1346 enum dma_data_direction dir, struct dma_attrs *attrs)
1345{ 1347{
1346 unsigned long flags; 1348 unsigned long flags;
1347 struct amd_iommu *iommu; 1349 struct amd_iommu *iommu;
@@ -1390,7 +1392,8 @@ static int map_sg_no_iommu(struct device *dev, struct scatterlist *sglist,
1390 * lists). 1392 * lists).
1391 */ 1393 */
1392static int map_sg(struct device *dev, struct scatterlist *sglist, 1394static int map_sg(struct device *dev, struct scatterlist *sglist,
1393 int nelems, int dir) 1395 int nelems, enum dma_data_direction dir,
1396 struct dma_attrs *attrs)
1394{ 1397{
1395 unsigned long flags; 1398 unsigned long flags;
1396 struct amd_iommu *iommu; 1399 struct amd_iommu *iommu;
@@ -1457,7 +1460,8 @@ unmap:
1457 * lists). 1460 * lists).
1458 */ 1461 */
1459static void unmap_sg(struct device *dev, struct scatterlist *sglist, 1462static void unmap_sg(struct device *dev, struct scatterlist *sglist,
1460 int nelems, int dir) 1463 int nelems, enum dma_data_direction dir,
1464 struct dma_attrs *attrs)
1461{ 1465{
1462 unsigned long flags; 1466 unsigned long flags;
1463 struct amd_iommu *iommu; 1467 struct amd_iommu *iommu;
@@ -1644,11 +1648,11 @@ static void prealloc_protection_domains(void)
1644 } 1648 }
1645} 1649}
1646 1650
1647static struct dma_mapping_ops amd_iommu_dma_ops = { 1651static struct dma_map_ops amd_iommu_dma_ops = {
1648 .alloc_coherent = alloc_coherent, 1652 .alloc_coherent = alloc_coherent,
1649 .free_coherent = free_coherent, 1653 .free_coherent = free_coherent,
1650 .map_single = map_single, 1654 .map_page = map_page,
1651 .unmap_single = unmap_single, 1655 .unmap_page = unmap_page,
1652 .map_sg = map_sg, 1656 .map_sg = map_sg,
1653 .unmap_sg = unmap_sg, 1657 .unmap_sg = unmap_sg,
1654 .dma_supported = amd_iommu_dma_supported, 1658 .dma_supported = amd_iommu_dma_supported,
diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
index d28bbdc35e4e..755c21e906f3 100644
--- a/arch/x86/kernel/pci-calgary_64.c
+++ b/arch/x86/kernel/pci-calgary_64.c
@@ -380,8 +380,9 @@ static inline struct iommu_table *find_iommu_table(struct device *dev)
380 return tbl; 380 return tbl;
381} 381}
382 382
383static void calgary_unmap_sg(struct device *dev, 383static void calgary_unmap_sg(struct device *dev, struct scatterlist *sglist,
384 struct scatterlist *sglist, int nelems, int direction) 384 int nelems,enum dma_data_direction dir,
385 struct dma_attrs *attrs)
385{ 386{
386 struct iommu_table *tbl = find_iommu_table(dev); 387 struct iommu_table *tbl = find_iommu_table(dev);
387 struct scatterlist *s; 388 struct scatterlist *s;
@@ -404,7 +405,8 @@ static void calgary_unmap_sg(struct device *dev,
404} 405}
405 406
406static int calgary_map_sg(struct device *dev, struct scatterlist *sg, 407static int calgary_map_sg(struct device *dev, struct scatterlist *sg,
407 int nelems, int direction) 408 int nelems, enum dma_data_direction dir,
409 struct dma_attrs *attrs)
408{ 410{
409 struct iommu_table *tbl = find_iommu_table(dev); 411 struct iommu_table *tbl = find_iommu_table(dev);
410 struct scatterlist *s; 412 struct scatterlist *s;
@@ -429,15 +431,14 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg,
429 s->dma_address = (entry << PAGE_SHIFT) | s->offset; 431 s->dma_address = (entry << PAGE_SHIFT) | s->offset;
430 432
431 /* insert into HW table */ 433 /* insert into HW table */
432 tce_build(tbl, entry, npages, vaddr & PAGE_MASK, 434 tce_build(tbl, entry, npages, vaddr & PAGE_MASK, dir);
433 direction);
434 435
435 s->dma_length = s->length; 436 s->dma_length = s->length;
436 } 437 }
437 438
438 return nelems; 439 return nelems;
439error: 440error:
440 calgary_unmap_sg(dev, sg, nelems, direction); 441 calgary_unmap_sg(dev, sg, nelems, dir, NULL);
441 for_each_sg(sg, s, nelems, i) { 442 for_each_sg(sg, s, nelems, i) {
442 sg->dma_address = bad_dma_address; 443 sg->dma_address = bad_dma_address;
443 sg->dma_length = 0; 444 sg->dma_length = 0;
@@ -445,10 +446,12 @@ error:
445 return 0; 446 return 0;
446} 447}
447 448
448static dma_addr_t calgary_map_single(struct device *dev, phys_addr_t paddr, 449static dma_addr_t calgary_map_page(struct device *dev, struct page *page,
449 size_t size, int direction) 450 unsigned long offset, size_t size,
451 enum dma_data_direction dir,
452 struct dma_attrs *attrs)
450{ 453{
451 void *vaddr = phys_to_virt(paddr); 454 void *vaddr = page_address(page) + offset;
452 unsigned long uaddr; 455 unsigned long uaddr;
453 unsigned int npages; 456 unsigned int npages;
454 struct iommu_table *tbl = find_iommu_table(dev); 457 struct iommu_table *tbl = find_iommu_table(dev);
@@ -456,17 +459,18 @@ static dma_addr_t calgary_map_single(struct device *dev, phys_addr_t paddr,
456 uaddr = (unsigned long)vaddr; 459 uaddr = (unsigned long)vaddr;
457 npages = iommu_num_pages(uaddr, size, PAGE_SIZE); 460 npages = iommu_num_pages(uaddr, size, PAGE_SIZE);
458 461
459 return iommu_alloc(dev, tbl, vaddr, npages, direction); 462 return iommu_alloc(dev, tbl, vaddr, npages, dir);
460} 463}
461 464
462static void calgary_unmap_single(struct device *dev, dma_addr_t dma_handle, 465static void calgary_unmap_page(struct device *dev, dma_addr_t dma_addr,
463 size_t size, int direction) 466 size_t size, enum dma_data_direction dir,
467 struct dma_attrs *attrs)
464{ 468{
465 struct iommu_table *tbl = find_iommu_table(dev); 469 struct iommu_table *tbl = find_iommu_table(dev);
466 unsigned int npages; 470 unsigned int npages;
467 471
468 npages = iommu_num_pages(dma_handle, size, PAGE_SIZE); 472 npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
469 iommu_free(tbl, dma_handle, npages); 473 iommu_free(tbl, dma_addr, npages);
470} 474}
471 475
472static void* calgary_alloc_coherent(struct device *dev, size_t size, 476static void* calgary_alloc_coherent(struct device *dev, size_t size,
@@ -515,13 +519,13 @@ static void calgary_free_coherent(struct device *dev, size_t size,
515 free_pages((unsigned long)vaddr, get_order(size)); 519 free_pages((unsigned long)vaddr, get_order(size));
516} 520}
517 521
518static struct dma_mapping_ops calgary_dma_ops = { 522static struct dma_map_ops calgary_dma_ops = {
519 .alloc_coherent = calgary_alloc_coherent, 523 .alloc_coherent = calgary_alloc_coherent,
520 .free_coherent = calgary_free_coherent, 524 .free_coherent = calgary_free_coherent,
521 .map_single = calgary_map_single,
522 .unmap_single = calgary_unmap_single,
523 .map_sg = calgary_map_sg, 525 .map_sg = calgary_map_sg,
524 .unmap_sg = calgary_unmap_sg, 526 .unmap_sg = calgary_unmap_sg,
527 .map_page = calgary_map_page,
528 .unmap_page = calgary_unmap_page,
525}; 529};
526 530
527static inline void __iomem * busno_to_bbar(unsigned char num) 531static inline void __iomem * busno_to_bbar(unsigned char num)
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index b25428533141..f293a8df6828 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -12,7 +12,7 @@
12 12
13static int forbid_dac __read_mostly; 13static int forbid_dac __read_mostly;
14 14
15struct dma_mapping_ops *dma_ops; 15struct dma_map_ops *dma_ops;
16EXPORT_SYMBOL(dma_ops); 16EXPORT_SYMBOL(dma_ops);
17 17
18static int iommu_sac_force __read_mostly; 18static int iommu_sac_force __read_mostly;
@@ -224,7 +224,7 @@ early_param("iommu", iommu_setup);
224 224
225int dma_supported(struct device *dev, u64 mask) 225int dma_supported(struct device *dev, u64 mask)
226{ 226{
227 struct dma_mapping_ops *ops = get_dma_ops(dev); 227 struct dma_map_ops *ops = get_dma_ops(dev);
228 228
229#ifdef CONFIG_PCI 229#ifdef CONFIG_PCI
230 if (mask > 0xffffffff && forbid_dac > 0) { 230 if (mask > 0xffffffff && forbid_dac > 0) {
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
index d5768b1af080..b284b58c035c 100644
--- a/arch/x86/kernel/pci-gart_64.c
+++ b/arch/x86/kernel/pci-gart_64.c
@@ -255,10 +255,13 @@ static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
255} 255}
256 256
257/* Map a single area into the IOMMU */ 257/* Map a single area into the IOMMU */
258static dma_addr_t 258static dma_addr_t gart_map_page(struct device *dev, struct page *page,
259gart_map_single(struct device *dev, phys_addr_t paddr, size_t size, int dir) 259 unsigned long offset, size_t size,
260 enum dma_data_direction dir,
261 struct dma_attrs *attrs)
260{ 262{
261 unsigned long bus; 263 unsigned long bus;
264 phys_addr_t paddr = page_to_phys(page) + offset;
262 265
263 if (!dev) 266 if (!dev)
264 dev = &x86_dma_fallback_dev; 267 dev = &x86_dma_fallback_dev;
@@ -275,8 +278,9 @@ gart_map_single(struct device *dev, phys_addr_t paddr, size_t size, int dir)
275/* 278/*
276 * Free a DMA mapping. 279 * Free a DMA mapping.
277 */ 280 */
278static void gart_unmap_single(struct device *dev, dma_addr_t dma_addr, 281static void gart_unmap_page(struct device *dev, dma_addr_t dma_addr,
279 size_t size, int direction) 282 size_t size, enum dma_data_direction dir,
283 struct dma_attrs *attrs)
280{ 284{
281 unsigned long iommu_page; 285 unsigned long iommu_page;
282 int npages; 286 int npages;
@@ -298,8 +302,8 @@ static void gart_unmap_single(struct device *dev, dma_addr_t dma_addr,
298/* 302/*
299 * Wrapper for pci_unmap_single working with scatterlists. 303 * Wrapper for pci_unmap_single working with scatterlists.
300 */ 304 */
301static void 305static void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
302gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir) 306 enum dma_data_direction dir, struct dma_attrs *attrs)
303{ 307{
304 struct scatterlist *s; 308 struct scatterlist *s;
305 int i; 309 int i;
@@ -307,7 +311,7 @@ gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
307 for_each_sg(sg, s, nents, i) { 311 for_each_sg(sg, s, nents, i) {
308 if (!s->dma_length || !s->length) 312 if (!s->dma_length || !s->length)
309 break; 313 break;
310 gart_unmap_single(dev, s->dma_address, s->dma_length, dir); 314 gart_unmap_page(dev, s->dma_address, s->dma_length, dir, NULL);
311 } 315 }
312} 316}
313 317
@@ -329,7 +333,7 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
329 addr = dma_map_area(dev, addr, s->length, dir, 0); 333 addr = dma_map_area(dev, addr, s->length, dir, 0);
330 if (addr == bad_dma_address) { 334 if (addr == bad_dma_address) {
331 if (i > 0) 335 if (i > 0)
332 gart_unmap_sg(dev, sg, i, dir); 336 gart_unmap_sg(dev, sg, i, dir, NULL);
333 nents = 0; 337 nents = 0;
334 sg[0].dma_length = 0; 338 sg[0].dma_length = 0;
335 break; 339 break;
@@ -400,8 +404,8 @@ dma_map_cont(struct device *dev, struct scatterlist *start, int nelems,
400 * DMA map all entries in a scatterlist. 404 * DMA map all entries in a scatterlist.
401 * Merge chunks that have page aligned sizes into a continuous mapping. 405 * Merge chunks that have page aligned sizes into a continuous mapping.
402 */ 406 */
403static int 407static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents,
404gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir) 408 enum dma_data_direction dir, struct dma_attrs *attrs)
405{ 409{
406 struct scatterlist *s, *ps, *start_sg, *sgmap; 410 struct scatterlist *s, *ps, *start_sg, *sgmap;
407 int need = 0, nextneed, i, out, start; 411 int need = 0, nextneed, i, out, start;
@@ -468,7 +472,7 @@ gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
468 472
469error: 473error:
470 flush_gart(); 474 flush_gart();
471 gart_unmap_sg(dev, sg, out, dir); 475 gart_unmap_sg(dev, sg, out, dir, NULL);
472 476
473 /* When it was forced or merged try again in a dumb way */ 477 /* When it was forced or merged try again in a dumb way */
474 if (force_iommu || iommu_merge) { 478 if (force_iommu || iommu_merge) {
@@ -521,7 +525,7 @@ static void
521gart_free_coherent(struct device *dev, size_t size, void *vaddr, 525gart_free_coherent(struct device *dev, size_t size, void *vaddr,
522 dma_addr_t dma_addr) 526 dma_addr_t dma_addr)
523{ 527{
524 gart_unmap_single(dev, dma_addr, size, DMA_BIDIRECTIONAL); 528 gart_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL, NULL);
525 free_pages((unsigned long)vaddr, get_order(size)); 529 free_pages((unsigned long)vaddr, get_order(size));
526} 530}
527 531
@@ -707,11 +711,11 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
707 return -1; 711 return -1;
708} 712}
709 713
710static struct dma_mapping_ops gart_dma_ops = { 714static struct dma_map_ops gart_dma_ops = {
711 .map_single = gart_map_single,
712 .unmap_single = gart_unmap_single,
713 .map_sg = gart_map_sg, 715 .map_sg = gart_map_sg,
714 .unmap_sg = gart_unmap_sg, 716 .unmap_sg = gart_unmap_sg,
717 .map_page = gart_map_page,
718 .unmap_page = gart_unmap_page,
715 .alloc_coherent = gart_alloc_coherent, 719 .alloc_coherent = gart_alloc_coherent,
716 .free_coherent = gart_free_coherent, 720 .free_coherent = gart_free_coherent,
717}; 721};
diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c
index c70ab5a5d4c8..fe50214db876 100644
--- a/arch/x86/kernel/pci-nommu.c
+++ b/arch/x86/kernel/pci-nommu.c
@@ -25,19 +25,19 @@ check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size)
25 return 1; 25 return 1;
26} 26}
27 27
28static dma_addr_t 28static dma_addr_t nommu_map_page(struct device *dev, struct page *page,
29nommu_map_single(struct device *hwdev, phys_addr_t paddr, size_t size, 29 unsigned long offset, size_t size,
30 int direction) 30 enum dma_data_direction dir,
31 struct dma_attrs *attrs)
31{ 32{
32 dma_addr_t bus = paddr; 33 dma_addr_t bus = page_to_phys(page) + offset;
33 WARN_ON(size == 0); 34 WARN_ON(size == 0);
34 if (!check_addr("map_single", hwdev, bus, size)) 35 if (!check_addr("map_single", dev, bus, size))
35 return bad_dma_address; 36 return bad_dma_address;
36 flush_write_buffers(); 37 flush_write_buffers();
37 return bus; 38 return bus;
38} 39}
39 40
40
41/* Map a set of buffers described by scatterlist in streaming 41/* Map a set of buffers described by scatterlist in streaming
42 * mode for DMA. This is the scatter-gather version of the 42 * mode for DMA. This is the scatter-gather version of the
43 * above pci_map_single interface. Here the scatter gather list 43 * above pci_map_single interface. Here the scatter gather list
@@ -54,7 +54,8 @@ nommu_map_single(struct device *hwdev, phys_addr_t paddr, size_t size,
54 * the same here. 54 * the same here.
55 */ 55 */
56static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg, 56static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg,
57 int nents, int direction) 57 int nents, enum dma_data_direction dir,
58 struct dma_attrs *attrs)
58{ 59{
59 struct scatterlist *s; 60 struct scatterlist *s;
60 int i; 61 int i;
@@ -78,11 +79,11 @@ static void nommu_free_coherent(struct device *dev, size_t size, void *vaddr,
78 free_pages((unsigned long)vaddr, get_order(size)); 79 free_pages((unsigned long)vaddr, get_order(size));
79} 80}
80 81
81struct dma_mapping_ops nommu_dma_ops = { 82struct dma_map_ops nommu_dma_ops = {
82 .alloc_coherent = dma_generic_alloc_coherent, 83 .alloc_coherent = dma_generic_alloc_coherent,
83 .free_coherent = nommu_free_coherent, 84 .free_coherent = nommu_free_coherent,
84 .map_single = nommu_map_single,
85 .map_sg = nommu_map_sg, 85 .map_sg = nommu_map_sg,
86 .map_page = nommu_map_page,
86 .is_phys = 1, 87 .is_phys = 1,
87}; 88};
88 89
diff --git a/arch/x86/kernel/pci-swiotlb_64.c b/arch/x86/kernel/pci-swiotlb.c
index d59c91747665..34f12e9996ed 100644
--- a/arch/x86/kernel/pci-swiotlb_64.c
+++ b/arch/x86/kernel/pci-swiotlb.c
@@ -33,18 +33,11 @@ phys_addr_t swiotlb_bus_to_phys(dma_addr_t baddr)
33 return baddr; 33 return baddr;
34} 34}
35 35
36int __weak swiotlb_arch_range_needs_mapping(void *ptr, size_t size) 36int __weak swiotlb_arch_range_needs_mapping(phys_addr_t paddr, size_t size)
37{ 37{
38 return 0; 38 return 0;
39} 39}
40 40
41static dma_addr_t
42swiotlb_map_single_phys(struct device *hwdev, phys_addr_t paddr, size_t size,
43 int direction)
44{
45 return swiotlb_map_single(hwdev, phys_to_virt(paddr), size, direction);
46}
47
48static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size, 41static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
49 dma_addr_t *dma_handle, gfp_t flags) 42 dma_addr_t *dma_handle, gfp_t flags)
50{ 43{
@@ -57,20 +50,20 @@ static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
57 return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags); 50 return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags);
58} 51}
59 52
60struct dma_mapping_ops swiotlb_dma_ops = { 53struct dma_map_ops swiotlb_dma_ops = {
61 .mapping_error = swiotlb_dma_mapping_error, 54 .mapping_error = swiotlb_dma_mapping_error,
62 .alloc_coherent = x86_swiotlb_alloc_coherent, 55 .alloc_coherent = x86_swiotlb_alloc_coherent,
63 .free_coherent = swiotlb_free_coherent, 56 .free_coherent = swiotlb_free_coherent,
64 .map_single = swiotlb_map_single_phys,
65 .unmap_single = swiotlb_unmap_single,
66 .sync_single_for_cpu = swiotlb_sync_single_for_cpu, 57 .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
67 .sync_single_for_device = swiotlb_sync_single_for_device, 58 .sync_single_for_device = swiotlb_sync_single_for_device,
68 .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu, 59 .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu,
69 .sync_single_range_for_device = swiotlb_sync_single_range_for_device, 60 .sync_single_range_for_device = swiotlb_sync_single_range_for_device,
70 .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, 61 .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
71 .sync_sg_for_device = swiotlb_sync_sg_for_device, 62 .sync_sg_for_device = swiotlb_sync_sg_for_device,
72 .map_sg = swiotlb_map_sg, 63 .map_sg = swiotlb_map_sg_attrs,
73 .unmap_sg = swiotlb_unmap_sg, 64 .unmap_sg = swiotlb_unmap_sg_attrs,
65 .map_page = swiotlb_map_page,
66 .unmap_page = swiotlb_unmap_page,
74 .dma_supported = NULL, 67 .dma_supported = NULL,
75}; 68};
76 69