aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/dig/Makefile4
-rw-r--r--arch/ia64/dig/dig_vtd_iommu.c59
-rw-r--r--arch/ia64/hp/common/hwsw_iommu.c165
-rw-r--r--arch/ia64/hp/common/sba_iommu.c79
-rw-r--r--arch/ia64/include/asm/dma-mapping.h194
-rw-r--r--arch/ia64/include/asm/machvec.h102
-rw-r--r--arch/ia64/include/asm/machvec_dig_vtd.h20
-rw-r--r--arch/ia64/include/asm/machvec_hpzx1.h23
-rw-r--r--arch/ia64/include/asm/machvec_hpzx1_swiotlb.h27
-rw-r--r--arch/ia64/include/asm/machvec_sn2.h27
-rw-r--r--arch/ia64/kernel/Makefile4
-rw-r--r--arch/ia64/kernel/dma-mapping.c10
-rw-r--r--arch/ia64/kernel/machvec.c8
-rw-r--r--arch/ia64/kernel/pci-dma.c52
-rw-r--r--arch/ia64/kernel/pci-swiotlb.c23
-rw-r--r--arch/ia64/sn/pci/pci_dma.c99
16 files changed, 316 insertions, 580 deletions
diff --git a/arch/ia64/dig/Makefile b/arch/ia64/dig/Makefile
index 5c0283830bd6..2f7caddf093e 100644
--- a/arch/ia64/dig/Makefile
+++ b/arch/ia64/dig/Makefile
@@ -7,8 +7,8 @@
7 7
8obj-y := setup.o 8obj-y := setup.o
9ifeq ($(CONFIG_DMAR), y) 9ifeq ($(CONFIG_DMAR), y)
10obj-$(CONFIG_IA64_GENERIC) += machvec.o machvec_vtd.o dig_vtd_iommu.o 10obj-$(CONFIG_IA64_GENERIC) += machvec.o machvec_vtd.o
11else 11else
12obj-$(CONFIG_IA64_GENERIC) += machvec.o 12obj-$(CONFIG_IA64_GENERIC) += machvec.o
13endif 13endif
14obj-$(CONFIG_IA64_DIG_VTD) += dig_vtd_iommu.o 14
diff --git a/arch/ia64/dig/dig_vtd_iommu.c b/arch/ia64/dig/dig_vtd_iommu.c
deleted file mode 100644
index 1c8a079017a3..000000000000
--- a/arch/ia64/dig/dig_vtd_iommu.c
+++ /dev/null
@@ -1,59 +0,0 @@
1#include <linux/types.h>
2#include <linux/kernel.h>
3#include <linux/module.h>
4#include <linux/intel-iommu.h>
5
6void *
7vtd_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
8 gfp_t flags)
9{
10 return intel_alloc_coherent(dev, size, dma_handle, flags);
11}
12EXPORT_SYMBOL_GPL(vtd_alloc_coherent);
13
14void
15vtd_free_coherent(struct device *dev, size_t size, void *vaddr,
16 dma_addr_t dma_handle)
17{
18 intel_free_coherent(dev, size, vaddr, dma_handle);
19}
20EXPORT_SYMBOL_GPL(vtd_free_coherent);
21
22dma_addr_t
23vtd_map_single_attrs(struct device *dev, void *addr, size_t size,
24 int dir, struct dma_attrs *attrs)
25{
26 return intel_map_single(dev, (phys_addr_t)addr, size, dir);
27}
28EXPORT_SYMBOL_GPL(vtd_map_single_attrs);
29
30void
31vtd_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size,
32 int dir, struct dma_attrs *attrs)
33{
34 intel_unmap_single(dev, iova, size, dir);
35}
36EXPORT_SYMBOL_GPL(vtd_unmap_single_attrs);
37
38int
39vtd_map_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents,
40 int dir, struct dma_attrs *attrs)
41{
42 return intel_map_sg(dev, sglist, nents, dir);
43}
44EXPORT_SYMBOL_GPL(vtd_map_sg_attrs);
45
46void
47vtd_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
48 int nents, int dir, struct dma_attrs *attrs)
49{
50 intel_unmap_sg(dev, sglist, nents, dir);
51}
52EXPORT_SYMBOL_GPL(vtd_unmap_sg_attrs);
53
54int
55vtd_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
56{
57 return 0;
58}
59EXPORT_SYMBOL_GPL(vtd_dma_mapping_error);
diff --git a/arch/ia64/hp/common/hwsw_iommu.c b/arch/ia64/hp/common/hwsw_iommu.c
index 2769dbfd03bf..e4a80d82e3d8 100644
--- a/arch/ia64/hp/common/hwsw_iommu.c
+++ b/arch/ia64/hp/common/hwsw_iommu.c
@@ -13,49 +13,34 @@
13 */ 13 */
14 14
15#include <linux/device.h> 15#include <linux/device.h>
16#include <linux/dma-mapping.h>
16#include <linux/swiotlb.h> 17#include <linux/swiotlb.h>
17
18#include <asm/machvec.h> 18#include <asm/machvec.h>
19 19
20extern struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
21
20/* swiotlb declarations & definitions: */ 22/* swiotlb declarations & definitions: */
21extern int swiotlb_late_init_with_default_size (size_t size); 23extern int swiotlb_late_init_with_default_size (size_t size);
22 24
23/* hwiommu declarations & definitions: */
24
25extern ia64_mv_dma_alloc_coherent sba_alloc_coherent;
26extern ia64_mv_dma_free_coherent sba_free_coherent;
27extern ia64_mv_dma_map_single_attrs sba_map_single_attrs;
28extern ia64_mv_dma_unmap_single_attrs sba_unmap_single_attrs;
29extern ia64_mv_dma_map_sg_attrs sba_map_sg_attrs;
30extern ia64_mv_dma_unmap_sg_attrs sba_unmap_sg_attrs;
31extern ia64_mv_dma_supported sba_dma_supported;
32extern ia64_mv_dma_mapping_error sba_dma_mapping_error;
33
34#define hwiommu_alloc_coherent sba_alloc_coherent
35#define hwiommu_free_coherent sba_free_coherent
36#define hwiommu_map_single_attrs sba_map_single_attrs
37#define hwiommu_unmap_single_attrs sba_unmap_single_attrs
38#define hwiommu_map_sg_attrs sba_map_sg_attrs
39#define hwiommu_unmap_sg_attrs sba_unmap_sg_attrs
40#define hwiommu_dma_supported sba_dma_supported
41#define hwiommu_dma_mapping_error sba_dma_mapping_error
42#define hwiommu_sync_single_for_cpu machvec_dma_sync_single
43#define hwiommu_sync_sg_for_cpu machvec_dma_sync_sg
44#define hwiommu_sync_single_for_device machvec_dma_sync_single
45#define hwiommu_sync_sg_for_device machvec_dma_sync_sg
46
47
48/* 25/*
49 * Note: we need to make the determination of whether or not to use 26 * Note: we need to make the determination of whether or not to use
50 * the sw I/O TLB based purely on the device structure. Anything else 27 * the sw I/O TLB based purely on the device structure. Anything else
51 * would be unreliable or would be too intrusive. 28 * would be unreliable or would be too intrusive.
52 */ 29 */
53static inline int 30static inline int use_swiotlb(struct device *dev)
54use_swiotlb (struct device *dev)
55{ 31{
56 return dev && dev->dma_mask && !hwiommu_dma_supported(dev, *dev->dma_mask); 32 return dev && dev->dma_mask &&
33 !sba_dma_ops.dma_supported(dev, *dev->dma_mask);
57} 34}
58 35
36struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
37{
38 if (use_swiotlb(dev))
39 return &swiotlb_dma_ops;
40 return &sba_dma_ops;
41}
42EXPORT_SYMBOL(hwsw_dma_get_ops);
43
59void __init 44void __init
60hwsw_init (void) 45hwsw_init (void)
61{ 46{
@@ -71,125 +56,3 @@ hwsw_init (void)
71#endif 56#endif
72 } 57 }
73} 58}
74
75void *
76hwsw_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags)
77{
78 if (use_swiotlb(dev))
79 return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
80 else
81 return hwiommu_alloc_coherent(dev, size, dma_handle, flags);
82}
83
84void
85hwsw_free_coherent (struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle)
86{
87 if (use_swiotlb(dev))
88 swiotlb_free_coherent(dev, size, vaddr, dma_handle);
89 else
90 hwiommu_free_coherent(dev, size, vaddr, dma_handle);
91}
92
93dma_addr_t
94hwsw_map_single_attrs(struct device *dev, void *addr, size_t size, int dir,
95 struct dma_attrs *attrs)
96{
97 if (use_swiotlb(dev))
98 return swiotlb_map_single_attrs(dev, addr, size, dir, attrs);
99 else
100 return hwiommu_map_single_attrs(dev, addr, size, dir, attrs);
101}
102EXPORT_SYMBOL(hwsw_map_single_attrs);
103
104void
105hwsw_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size,
106 int dir, struct dma_attrs *attrs)
107{
108 if (use_swiotlb(dev))
109 return swiotlb_unmap_single_attrs(dev, iova, size, dir, attrs);
110 else
111 return hwiommu_unmap_single_attrs(dev, iova, size, dir, attrs);
112}
113EXPORT_SYMBOL(hwsw_unmap_single_attrs);
114
115int
116hwsw_map_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents,
117 int dir, struct dma_attrs *attrs)
118{
119 if (use_swiotlb(dev))
120 return swiotlb_map_sg_attrs(dev, sglist, nents, dir, attrs);
121 else
122 return hwiommu_map_sg_attrs(dev, sglist, nents, dir, attrs);
123}
124EXPORT_SYMBOL(hwsw_map_sg_attrs);
125
126void
127hwsw_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents,
128 int dir, struct dma_attrs *attrs)
129{
130 if (use_swiotlb(dev))
131 return swiotlb_unmap_sg_attrs(dev, sglist, nents, dir, attrs);
132 else
133 return hwiommu_unmap_sg_attrs(dev, sglist, nents, dir, attrs);
134}
135EXPORT_SYMBOL(hwsw_unmap_sg_attrs);
136
137void
138hwsw_sync_single_for_cpu (struct device *dev, dma_addr_t addr, size_t size, int dir)
139{
140 if (use_swiotlb(dev))
141 swiotlb_sync_single_for_cpu(dev, addr, size, dir);
142 else
143 hwiommu_sync_single_for_cpu(dev, addr, size, dir);
144}
145
146void
147hwsw_sync_sg_for_cpu (struct device *dev, struct scatterlist *sg, int nelems, int dir)
148{
149 if (use_swiotlb(dev))
150 swiotlb_sync_sg_for_cpu(dev, sg, nelems, dir);
151 else
152 hwiommu_sync_sg_for_cpu(dev, sg, nelems, dir);
153}
154
155void
156hwsw_sync_single_for_device (struct device *dev, dma_addr_t addr, size_t size, int dir)
157{
158 if (use_swiotlb(dev))
159 swiotlb_sync_single_for_device(dev, addr, size, dir);
160 else
161 hwiommu_sync_single_for_device(dev, addr, size, dir);
162}
163
164void
165hwsw_sync_sg_for_device (struct device *dev, struct scatterlist *sg, int nelems, int dir)
166{
167 if (use_swiotlb(dev))
168 swiotlb_sync_sg_for_device(dev, sg, nelems, dir);
169 else
170 hwiommu_sync_sg_for_device(dev, sg, nelems, dir);
171}
172
173int
174hwsw_dma_supported (struct device *dev, u64 mask)
175{
176 if (hwiommu_dma_supported(dev, mask))
177 return 1;
178 return swiotlb_dma_supported(dev, mask);
179}
180
181int
182hwsw_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
183{
184 return hwiommu_dma_mapping_error(dev, dma_addr) ||
185 swiotlb_dma_mapping_error(dev, dma_addr);
186}
187
188EXPORT_SYMBOL(hwsw_dma_mapping_error);
189EXPORT_SYMBOL(hwsw_dma_supported);
190EXPORT_SYMBOL(hwsw_alloc_coherent);
191EXPORT_SYMBOL(hwsw_free_coherent);
192EXPORT_SYMBOL(hwsw_sync_single_for_cpu);
193EXPORT_SYMBOL(hwsw_sync_single_for_device);
194EXPORT_SYMBOL(hwsw_sync_sg_for_cpu);
195EXPORT_SYMBOL(hwsw_sync_sg_for_device);
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
index d98f0f4ff83f..129b62eb39e5 100644
--- a/arch/ia64/hp/common/sba_iommu.c
+++ b/arch/ia64/hp/common/sba_iommu.c
@@ -36,6 +36,7 @@
36#include <linux/bitops.h> /* hweight64() */ 36#include <linux/bitops.h> /* hweight64() */
37#include <linux/crash_dump.h> 37#include <linux/crash_dump.h>
38#include <linux/iommu-helper.h> 38#include <linux/iommu-helper.h>
39#include <linux/dma-mapping.h>
39 40
40#include <asm/delay.h> /* ia64_get_itc() */ 41#include <asm/delay.h> /* ia64_get_itc() */
41#include <asm/io.h> 42#include <asm/io.h>
@@ -908,11 +909,13 @@ sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
908 * 909 *
909 * See Documentation/DMA-mapping.txt 910 * See Documentation/DMA-mapping.txt
910 */ 911 */
911dma_addr_t 912static dma_addr_t sba_map_page(struct device *dev, struct page *page,
912sba_map_single_attrs(struct device *dev, void *addr, size_t size, int dir, 913 unsigned long poff, size_t size,
913 struct dma_attrs *attrs) 914 enum dma_data_direction dir,
915 struct dma_attrs *attrs)
914{ 916{
915 struct ioc *ioc; 917 struct ioc *ioc;
918 void *addr = page_address(page) + poff;
916 dma_addr_t iovp; 919 dma_addr_t iovp;
917 dma_addr_t offset; 920 dma_addr_t offset;
918 u64 *pdir_start; 921 u64 *pdir_start;
@@ -990,7 +993,14 @@ sba_map_single_attrs(struct device *dev, void *addr, size_t size, int dir,
990#endif 993#endif
991 return SBA_IOVA(ioc, iovp, offset); 994 return SBA_IOVA(ioc, iovp, offset);
992} 995}
993EXPORT_SYMBOL(sba_map_single_attrs); 996
997static dma_addr_t sba_map_single_attrs(struct device *dev, void *addr,
998 size_t size, enum dma_data_direction dir,
999 struct dma_attrs *attrs)
1000{
1001 return sba_map_page(dev, virt_to_page(addr),
1002 (unsigned long)addr & ~PAGE_MASK, size, dir, attrs);
1003}
994 1004
995#ifdef ENABLE_MARK_CLEAN 1005#ifdef ENABLE_MARK_CLEAN
996static SBA_INLINE void 1006static SBA_INLINE void
@@ -1026,8 +1036,8 @@ sba_mark_clean(struct ioc *ioc, dma_addr_t iova, size_t size)
1026 * 1036 *
1027 * See Documentation/DMA-mapping.txt 1037 * See Documentation/DMA-mapping.txt
1028 */ 1038 */
1029void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size, 1039static void sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
1030 int dir, struct dma_attrs *attrs) 1040 enum dma_data_direction dir, struct dma_attrs *attrs)
1031{ 1041{
1032 struct ioc *ioc; 1042 struct ioc *ioc;
1033#if DELAYED_RESOURCE_CNT > 0 1043#if DELAYED_RESOURCE_CNT > 0
@@ -1094,7 +1104,12 @@ void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size,
1094 spin_unlock_irqrestore(&ioc->res_lock, flags); 1104 spin_unlock_irqrestore(&ioc->res_lock, flags);
1095#endif /* DELAYED_RESOURCE_CNT == 0 */ 1105#endif /* DELAYED_RESOURCE_CNT == 0 */
1096} 1106}
1097EXPORT_SYMBOL(sba_unmap_single_attrs); 1107
1108void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size,
1109 enum dma_data_direction dir, struct dma_attrs *attrs)
1110{
1111 sba_unmap_page(dev, iova, size, dir, attrs);
1112}
1098 1113
1099/** 1114/**
1100 * sba_alloc_coherent - allocate/map shared mem for DMA 1115 * sba_alloc_coherent - allocate/map shared mem for DMA
@@ -1104,7 +1119,7 @@ EXPORT_SYMBOL(sba_unmap_single_attrs);
1104 * 1119 *
1105 * See Documentation/DMA-mapping.txt 1120 * See Documentation/DMA-mapping.txt
1106 */ 1121 */
1107void * 1122static void *
1108sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags) 1123sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags)
1109{ 1124{
1110 struct ioc *ioc; 1125 struct ioc *ioc;
@@ -1167,7 +1182,8 @@ sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp
1167 * 1182 *
1168 * See Documentation/DMA-mapping.txt 1183 * See Documentation/DMA-mapping.txt
1169 */ 1184 */
1170void sba_free_coherent (struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle) 1185static void sba_free_coherent (struct device *dev, size_t size, void *vaddr,
1186 dma_addr_t dma_handle)
1171{ 1187{
1172 sba_unmap_single_attrs(dev, dma_handle, size, 0, NULL); 1188 sba_unmap_single_attrs(dev, dma_handle, size, 0, NULL);
1173 free_pages((unsigned long) vaddr, get_order(size)); 1189 free_pages((unsigned long) vaddr, get_order(size));
@@ -1422,8 +1438,9 @@ sba_coalesce_chunks(struct ioc *ioc, struct device *dev,
1422 * 1438 *
1423 * See Documentation/DMA-mapping.txt 1439 * See Documentation/DMA-mapping.txt
1424 */ 1440 */
1425int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents, 1441static int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist,
1426 int dir, struct dma_attrs *attrs) 1442 int nents, enum dma_data_direction dir,
1443 struct dma_attrs *attrs)
1427{ 1444{
1428 struct ioc *ioc; 1445 struct ioc *ioc;
1429 int coalesced, filled = 0; 1446 int coalesced, filled = 0;
@@ -1502,7 +1519,6 @@ int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents,
1502 1519
1503 return filled; 1520 return filled;
1504} 1521}
1505EXPORT_SYMBOL(sba_map_sg_attrs);
1506 1522
1507/** 1523/**
1508 * sba_unmap_sg_attrs - unmap Scatter/Gather list 1524 * sba_unmap_sg_attrs - unmap Scatter/Gather list
@@ -1514,8 +1530,9 @@ EXPORT_SYMBOL(sba_map_sg_attrs);
1514 * 1530 *
1515 * See Documentation/DMA-mapping.txt 1531 * See Documentation/DMA-mapping.txt
1516 */ 1532 */
1517void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist, 1533static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
1518 int nents, int dir, struct dma_attrs *attrs) 1534 int nents, enum dma_data_direction dir,
1535 struct dma_attrs *attrs)
1519{ 1536{
1520#ifdef ASSERT_PDIR_SANITY 1537#ifdef ASSERT_PDIR_SANITY
1521 struct ioc *ioc; 1538 struct ioc *ioc;
@@ -1551,7 +1568,6 @@ void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
1551#endif 1568#endif
1552 1569
1553} 1570}
1554EXPORT_SYMBOL(sba_unmap_sg_attrs);
1555 1571
1556/************************************************************** 1572/**************************************************************
1557* 1573*
@@ -2064,6 +2080,8 @@ static struct acpi_driver acpi_sba_ioc_driver = {
2064 }, 2080 },
2065}; 2081};
2066 2082
2083extern struct dma_map_ops swiotlb_dma_ops;
2084
2067static int __init 2085static int __init
2068sba_init(void) 2086sba_init(void)
2069{ 2087{
@@ -2077,6 +2095,7 @@ sba_init(void)
2077 * a successful kdump kernel boot is to use the swiotlb. 2095 * a successful kdump kernel boot is to use the swiotlb.
2078 */ 2096 */
2079 if (is_kdump_kernel()) { 2097 if (is_kdump_kernel()) {
2098 dma_ops = &swiotlb_dma_ops;
2080 if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0) 2099 if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0)
2081 panic("Unable to initialize software I/O TLB:" 2100 panic("Unable to initialize software I/O TLB:"
2082 " Try machvec=dig boot option"); 2101 " Try machvec=dig boot option");
@@ -2092,6 +2111,7 @@ sba_init(void)
2092 * If we didn't find something sba_iommu can claim, we 2111 * If we didn't find something sba_iommu can claim, we
2093 * need to setup the swiotlb and switch to the dig machvec. 2112 * need to setup the swiotlb and switch to the dig machvec.
2094 */ 2113 */
2114 dma_ops = &swiotlb_dma_ops;
2095 if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0) 2115 if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0)
2096 panic("Unable to find SBA IOMMU or initialize " 2116 panic("Unable to find SBA IOMMU or initialize "
2097 "software I/O TLB: Try machvec=dig boot option"); 2117 "software I/O TLB: Try machvec=dig boot option");
@@ -2138,15 +2158,13 @@ nosbagart(char *str)
2138 return 1; 2158 return 1;
2139} 2159}
2140 2160
2141int 2161static int sba_dma_supported (struct device *dev, u64 mask)
2142sba_dma_supported (struct device *dev, u64 mask)
2143{ 2162{
2144 /* make sure it's at least 32bit capable */ 2163 /* make sure it's at least 32bit capable */
2145 return ((mask & 0xFFFFFFFFUL) == 0xFFFFFFFFUL); 2164 return ((mask & 0xFFFFFFFFUL) == 0xFFFFFFFFUL);
2146} 2165}
2147 2166
2148int 2167static int sba_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
2149sba_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
2150{ 2168{
2151 return 0; 2169 return 0;
2152} 2170}
@@ -2176,7 +2194,22 @@ sba_page_override(char *str)
2176 2194
2177__setup("sbapagesize=",sba_page_override); 2195__setup("sbapagesize=",sba_page_override);
2178 2196
2179EXPORT_SYMBOL(sba_dma_mapping_error); 2197struct dma_map_ops sba_dma_ops = {
2180EXPORT_SYMBOL(sba_dma_supported); 2198 .alloc_coherent = sba_alloc_coherent,
2181EXPORT_SYMBOL(sba_alloc_coherent); 2199 .free_coherent = sba_free_coherent,
2182EXPORT_SYMBOL(sba_free_coherent); 2200 .map_page = sba_map_page,
2201 .unmap_page = sba_unmap_page,
2202 .map_sg = sba_map_sg_attrs,
2203 .unmap_sg = sba_unmap_sg_attrs,
2204 .sync_single_for_cpu = machvec_dma_sync_single,
2205 .sync_sg_for_cpu = machvec_dma_sync_sg,
2206 .sync_single_for_device = machvec_dma_sync_single,
2207 .sync_sg_for_device = machvec_dma_sync_sg,
2208 .dma_supported = sba_dma_supported,
2209 .mapping_error = sba_dma_mapping_error,
2210};
2211
2212void sba_dma_init(void)
2213{
2214 dma_ops = &sba_dma_ops;
2215}
diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h
index bbab7e2b0fc9..f4d4b1850a7e 100644
--- a/arch/ia64/include/asm/dma-mapping.h
+++ b/arch/ia64/include/asm/dma-mapping.h
@@ -9,99 +9,128 @@
9#include <linux/scatterlist.h> 9#include <linux/scatterlist.h>
10#include <asm/swiotlb.h> 10#include <asm/swiotlb.h>
11 11
12struct dma_mapping_ops { 12extern struct dma_map_ops *dma_ops;
13 int (*mapping_error)(struct device *dev,
14 dma_addr_t dma_addr);
15 void* (*alloc_coherent)(struct device *dev, size_t size,
16 dma_addr_t *dma_handle, gfp_t gfp);
17 void (*free_coherent)(struct device *dev, size_t size,
18 void *vaddr, dma_addr_t dma_handle);
19 dma_addr_t (*map_single)(struct device *hwdev, unsigned long ptr,
20 size_t size, int direction);
21 void (*unmap_single)(struct device *dev, dma_addr_t addr,
22 size_t size, int direction);
23 void (*sync_single_for_cpu)(struct device *hwdev,
24 dma_addr_t dma_handle, size_t size,
25 int direction);
26 void (*sync_single_for_device)(struct device *hwdev,
27 dma_addr_t dma_handle, size_t size,
28 int direction);
29 void (*sync_single_range_for_cpu)(struct device *hwdev,
30 dma_addr_t dma_handle, unsigned long offset,
31 size_t size, int direction);
32 void (*sync_single_range_for_device)(struct device *hwdev,
33 dma_addr_t dma_handle, unsigned long offset,
34 size_t size, int direction);
35 void (*sync_sg_for_cpu)(struct device *hwdev,
36 struct scatterlist *sg, int nelems,
37 int direction);
38 void (*sync_sg_for_device)(struct device *hwdev,
39 struct scatterlist *sg, int nelems,
40 int direction);
41 int (*map_sg)(struct device *hwdev, struct scatterlist *sg,
42 int nents, int direction);
43 void (*unmap_sg)(struct device *hwdev,
44 struct scatterlist *sg, int nents,
45 int direction);
46 int (*dma_supported_op)(struct device *hwdev, u64 mask);
47 int is_phys;
48};
49
50extern struct dma_mapping_ops *dma_ops;
51extern struct ia64_machine_vector ia64_mv; 13extern struct ia64_machine_vector ia64_mv;
52extern void set_iommu_machvec(void); 14extern void set_iommu_machvec(void);
53 15
54#define dma_alloc_coherent(dev, size, handle, gfp) \ 16extern void machvec_dma_sync_single(struct device *, dma_addr_t, size_t,
55 platform_dma_alloc_coherent(dev, size, handle, (gfp) | GFP_DMA) 17 enum dma_data_direction);
18extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int,
19 enum dma_data_direction);
56 20
57/* coherent mem. is cheap */ 21static inline void *dma_alloc_coherent(struct device *dev, size_t size,
58static inline void * 22 dma_addr_t *daddr, gfp_t gfp)
59dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
60 gfp_t flag)
61{ 23{
62 return dma_alloc_coherent(dev, size, dma_handle, flag); 24 struct dma_map_ops *ops = platform_dma_get_ops(dev);
25 return ops->alloc_coherent(dev, size, daddr, gfp | GFP_DMA);
63} 26}
64#define dma_free_coherent platform_dma_free_coherent 27
65static inline void 28static inline void dma_free_coherent(struct device *dev, size_t size,
66dma_free_noncoherent(struct device *dev, size_t size, void *cpu_addr, 29 void *caddr, dma_addr_t daddr)
67 dma_addr_t dma_handle) 30{
31 struct dma_map_ops *ops = platform_dma_get_ops(dev);
32 ops->free_coherent(dev, size, caddr, daddr);
33}
34
35#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
36#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
37
38static inline dma_addr_t dma_map_single_attrs(struct device *dev,
39 void *caddr, size_t size,
40 enum dma_data_direction dir,
41 struct dma_attrs *attrs)
42{
43 struct dma_map_ops *ops = platform_dma_get_ops(dev);
44 return ops->map_page(dev, virt_to_page(caddr),
45 (unsigned long)caddr & ~PAGE_MASK, size,
46 dir, attrs);
47}
48
49static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t daddr,
50 size_t size,
51 enum dma_data_direction dir,
52 struct dma_attrs *attrs)
53{
54 struct dma_map_ops *ops = platform_dma_get_ops(dev);
55 ops->unmap_page(dev, daddr, size, dir, attrs);
56}
57
58#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL)
59#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL)
60
61static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
62 int nents, enum dma_data_direction dir,
63 struct dma_attrs *attrs)
64{
65 struct dma_map_ops *ops = platform_dma_get_ops(dev);
66 return ops->map_sg(dev, sgl, nents, dir, attrs);
67}
68
69static inline void dma_unmap_sg_attrs(struct device *dev,
70 struct scatterlist *sgl, int nents,
71 enum dma_data_direction dir,
72 struct dma_attrs *attrs)
73{
74 struct dma_map_ops *ops = platform_dma_get_ops(dev);
75 ops->unmap_sg(dev, sgl, nents, dir, attrs);
76}
77
78#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL)
79#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL)
80
81static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t daddr,
82 size_t size,
83 enum dma_data_direction dir)
68{ 84{
69 dma_free_coherent(dev, size, cpu_addr, dma_handle); 85 struct dma_map_ops *ops = platform_dma_get_ops(dev);
86 ops->sync_single_for_cpu(dev, daddr, size, dir);
70} 87}
71#define dma_map_single_attrs platform_dma_map_single_attrs 88
72static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, 89static inline void dma_sync_sg_for_cpu(struct device *dev,
73 size_t size, int dir) 90 struct scatterlist *sgl,
91 int nents, enum dma_data_direction dir)
74{ 92{
75 return dma_map_single_attrs(dev, cpu_addr, size, dir, NULL); 93 struct dma_map_ops *ops = platform_dma_get_ops(dev);
94 ops->sync_sg_for_cpu(dev, sgl, nents, dir);
76} 95}
77#define dma_map_sg_attrs platform_dma_map_sg_attrs 96
78static inline int dma_map_sg(struct device *dev, struct scatterlist *sgl, 97static inline void dma_sync_single_for_device(struct device *dev,
79 int nents, int dir) 98 dma_addr_t daddr,
99 size_t size,
100 enum dma_data_direction dir)
80{ 101{
81 return dma_map_sg_attrs(dev, sgl, nents, dir, NULL); 102 struct dma_map_ops *ops = platform_dma_get_ops(dev);
103 ops->sync_single_for_device(dev, daddr, size, dir);
82} 104}
83#define dma_unmap_single_attrs platform_dma_unmap_single_attrs 105
84static inline void dma_unmap_single(struct device *dev, dma_addr_t cpu_addr, 106static inline void dma_sync_sg_for_device(struct device *dev,
85 size_t size, int dir) 107 struct scatterlist *sgl,
108 int nents,
109 enum dma_data_direction dir)
86{ 110{
87 return dma_unmap_single_attrs(dev, cpu_addr, size, dir, NULL); 111 struct dma_map_ops *ops = platform_dma_get_ops(dev);
112 ops->sync_sg_for_device(dev, sgl, nents, dir);
88} 113}
89#define dma_unmap_sg_attrs platform_dma_unmap_sg_attrs 114
90static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sgl, 115static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr)
91 int nents, int dir) 116{
117 struct dma_map_ops *ops = platform_dma_get_ops(dev);
118 return ops->mapping_error(dev, daddr);
119}
120
121static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
122 size_t offset, size_t size,
123 enum dma_data_direction dir)
92{ 124{
93 return dma_unmap_sg_attrs(dev, sgl, nents, dir, NULL); 125 struct dma_map_ops *ops = platform_dma_get_ops(dev);
126 return ops->map_page(dev, page, offset, size, dir, NULL);
94} 127}
95#define dma_sync_single_for_cpu platform_dma_sync_single_for_cpu
96#define dma_sync_sg_for_cpu platform_dma_sync_sg_for_cpu
97#define dma_sync_single_for_device platform_dma_sync_single_for_device
98#define dma_sync_sg_for_device platform_dma_sync_sg_for_device
99#define dma_mapping_error platform_dma_mapping_error
100 128
101#define dma_map_page(dev, pg, off, size, dir) \ 129static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
102 dma_map_single(dev, page_address(pg) + (off), (size), (dir)) 130 size_t size, enum dma_data_direction dir)
103#define dma_unmap_page(dev, dma_addr, size, dir) \ 131{
104 dma_unmap_single(dev, dma_addr, size, dir) 132 dma_unmap_single(dev, addr, size, dir);
133}
105 134
106/* 135/*
107 * Rest of this file is part of the "Advanced DMA API". Use at your own risk. 136 * Rest of this file is part of the "Advanced DMA API". Use at your own risk.
@@ -113,7 +142,11 @@ static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sgl,
113#define dma_sync_single_range_for_device(dev, dma_handle, offset, size, dir) \ 142#define dma_sync_single_range_for_device(dev, dma_handle, offset, size, dir) \
114 dma_sync_single_for_device(dev, dma_handle, size, dir) 143 dma_sync_single_for_device(dev, dma_handle, size, dir)
115 144
116#define dma_supported platform_dma_supported 145static inline int dma_supported(struct device *dev, u64 mask)
146{
147 struct dma_map_ops *ops = platform_dma_get_ops(dev);
148 return ops->dma_supported(dev, mask);
149}
117 150
118static inline int 151static inline int
119dma_set_mask (struct device *dev, u64 mask) 152dma_set_mask (struct device *dev, u64 mask)
@@ -139,11 +172,4 @@ dma_cache_sync (struct device *dev, void *vaddr, size_t size,
139 172
140#define dma_is_consistent(d, h) (1) /* all we do is coherent memory... */ 173#define dma_is_consistent(d, h) (1) /* all we do is coherent memory... */
141 174
142static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
143{
144 return dma_ops;
145}
146
147
148
149#endif /* _ASM_IA64_DMA_MAPPING_H */ 175#endif /* _ASM_IA64_DMA_MAPPING_H */
diff --git a/arch/ia64/include/asm/machvec.h b/arch/ia64/include/asm/machvec.h
index 59c17e446683..22a75fb55adb 100644
--- a/arch/ia64/include/asm/machvec.h
+++ b/arch/ia64/include/asm/machvec.h
@@ -11,7 +11,6 @@
11#define _ASM_IA64_MACHVEC_H 11#define _ASM_IA64_MACHVEC_H
12 12
13#include <linux/types.h> 13#include <linux/types.h>
14#include <linux/swiotlb.h>
15 14
16/* forward declarations: */ 15/* forward declarations: */
17struct device; 16struct device;
@@ -45,23 +44,7 @@ typedef void ia64_mv_kernel_launch_event_t(void);
45 44
46/* DMA-mapping interface: */ 45/* DMA-mapping interface: */
47typedef void ia64_mv_dma_init (void); 46typedef void ia64_mv_dma_init (void);
48typedef void *ia64_mv_dma_alloc_coherent (struct device *, size_t, dma_addr_t *, gfp_t); 47typedef struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
49typedef void ia64_mv_dma_free_coherent (struct device *, size_t, void *, dma_addr_t);
50typedef dma_addr_t ia64_mv_dma_map_single (struct device *, void *, size_t, int);
51typedef void ia64_mv_dma_unmap_single (struct device *, dma_addr_t, size_t, int);
52typedef int ia64_mv_dma_map_sg (struct device *, struct scatterlist *, int, int);
53typedef void ia64_mv_dma_unmap_sg (struct device *, struct scatterlist *, int, int);
54typedef void ia64_mv_dma_sync_single_for_cpu (struct device *, dma_addr_t, size_t, int);
55typedef void ia64_mv_dma_sync_sg_for_cpu (struct device *, struct scatterlist *, int, int);
56typedef void ia64_mv_dma_sync_single_for_device (struct device *, dma_addr_t, size_t, int);
57typedef void ia64_mv_dma_sync_sg_for_device (struct device *, struct scatterlist *, int, int);
58typedef int ia64_mv_dma_mapping_error(struct device *, dma_addr_t dma_addr);
59typedef int ia64_mv_dma_supported (struct device *, u64);
60
61typedef dma_addr_t ia64_mv_dma_map_single_attrs (struct device *, void *, size_t, int, struct dma_attrs *);
62typedef void ia64_mv_dma_unmap_single_attrs (struct device *, dma_addr_t, size_t, int, struct dma_attrs *);
63typedef int ia64_mv_dma_map_sg_attrs (struct device *, struct scatterlist *, int, int, struct dma_attrs *);
64typedef void ia64_mv_dma_unmap_sg_attrs (struct device *, struct scatterlist *, int, int, struct dma_attrs *);
65 48
66/* 49/*
67 * WARNING: The legacy I/O space is _architected_. Platforms are 50 * WARNING: The legacy I/O space is _architected_. Platforms are
@@ -113,8 +96,6 @@ machvec_noop_bus (struct pci_bus *bus)
113 96
114extern void machvec_setup (char **); 97extern void machvec_setup (char **);
115extern void machvec_timer_interrupt (int, void *); 98extern void machvec_timer_interrupt (int, void *);
116extern void machvec_dma_sync_single (struct device *, dma_addr_t, size_t, int);
117extern void machvec_dma_sync_sg (struct device *, struct scatterlist *, int, int);
118extern void machvec_tlb_migrate_finish (struct mm_struct *); 99extern void machvec_tlb_migrate_finish (struct mm_struct *);
119 100
120# if defined (CONFIG_IA64_HP_SIM) 101# if defined (CONFIG_IA64_HP_SIM)
@@ -147,18 +128,7 @@ extern void machvec_tlb_migrate_finish (struct mm_struct *);
147# define platform_global_tlb_purge ia64_mv.global_tlb_purge 128# define platform_global_tlb_purge ia64_mv.global_tlb_purge
148# define platform_tlb_migrate_finish ia64_mv.tlb_migrate_finish 129# define platform_tlb_migrate_finish ia64_mv.tlb_migrate_finish
149# define platform_dma_init ia64_mv.dma_init 130# define platform_dma_init ia64_mv.dma_init
150# define platform_dma_alloc_coherent ia64_mv.dma_alloc_coherent 131# define platform_dma_get_ops ia64_mv.dma_get_ops
151# define platform_dma_free_coherent ia64_mv.dma_free_coherent
152# define platform_dma_map_single_attrs ia64_mv.dma_map_single_attrs
153# define platform_dma_unmap_single_attrs ia64_mv.dma_unmap_single_attrs
154# define platform_dma_map_sg_attrs ia64_mv.dma_map_sg_attrs
155# define platform_dma_unmap_sg_attrs ia64_mv.dma_unmap_sg_attrs
156# define platform_dma_sync_single_for_cpu ia64_mv.dma_sync_single_for_cpu
157# define platform_dma_sync_sg_for_cpu ia64_mv.dma_sync_sg_for_cpu
158# define platform_dma_sync_single_for_device ia64_mv.dma_sync_single_for_device
159# define platform_dma_sync_sg_for_device ia64_mv.dma_sync_sg_for_device
160# define platform_dma_mapping_error ia64_mv.dma_mapping_error
161# define platform_dma_supported ia64_mv.dma_supported
162# define platform_irq_to_vector ia64_mv.irq_to_vector 132# define platform_irq_to_vector ia64_mv.irq_to_vector
163# define platform_local_vector_to_irq ia64_mv.local_vector_to_irq 133# define platform_local_vector_to_irq ia64_mv.local_vector_to_irq
164# define platform_pci_get_legacy_mem ia64_mv.pci_get_legacy_mem 134# define platform_pci_get_legacy_mem ia64_mv.pci_get_legacy_mem
@@ -201,18 +171,7 @@ struct ia64_machine_vector {
201 ia64_mv_global_tlb_purge_t *global_tlb_purge; 171 ia64_mv_global_tlb_purge_t *global_tlb_purge;
202 ia64_mv_tlb_migrate_finish_t *tlb_migrate_finish; 172 ia64_mv_tlb_migrate_finish_t *tlb_migrate_finish;
203 ia64_mv_dma_init *dma_init; 173 ia64_mv_dma_init *dma_init;
204 ia64_mv_dma_alloc_coherent *dma_alloc_coherent; 174 ia64_mv_dma_get_ops *dma_get_ops;
205 ia64_mv_dma_free_coherent *dma_free_coherent;
206 ia64_mv_dma_map_single_attrs *dma_map_single_attrs;
207 ia64_mv_dma_unmap_single_attrs *dma_unmap_single_attrs;
208 ia64_mv_dma_map_sg_attrs *dma_map_sg_attrs;
209 ia64_mv_dma_unmap_sg_attrs *dma_unmap_sg_attrs;
210 ia64_mv_dma_sync_single_for_cpu *dma_sync_single_for_cpu;
211 ia64_mv_dma_sync_sg_for_cpu *dma_sync_sg_for_cpu;
212 ia64_mv_dma_sync_single_for_device *dma_sync_single_for_device;
213 ia64_mv_dma_sync_sg_for_device *dma_sync_sg_for_device;
214 ia64_mv_dma_mapping_error *dma_mapping_error;
215 ia64_mv_dma_supported *dma_supported;
216 ia64_mv_irq_to_vector *irq_to_vector; 175 ia64_mv_irq_to_vector *irq_to_vector;
217 ia64_mv_local_vector_to_irq *local_vector_to_irq; 176 ia64_mv_local_vector_to_irq *local_vector_to_irq;
218 ia64_mv_pci_get_legacy_mem_t *pci_get_legacy_mem; 177 ia64_mv_pci_get_legacy_mem_t *pci_get_legacy_mem;
@@ -251,18 +210,7 @@ struct ia64_machine_vector {
251 platform_global_tlb_purge, \ 210 platform_global_tlb_purge, \
252 platform_tlb_migrate_finish, \ 211 platform_tlb_migrate_finish, \
253 platform_dma_init, \ 212 platform_dma_init, \
254 platform_dma_alloc_coherent, \ 213 platform_dma_get_ops, \
255 platform_dma_free_coherent, \
256 platform_dma_map_single_attrs, \
257 platform_dma_unmap_single_attrs, \
258 platform_dma_map_sg_attrs, \
259 platform_dma_unmap_sg_attrs, \
260 platform_dma_sync_single_for_cpu, \
261 platform_dma_sync_sg_for_cpu, \
262 platform_dma_sync_single_for_device, \
263 platform_dma_sync_sg_for_device, \
264 platform_dma_mapping_error, \
265 platform_dma_supported, \
266 platform_irq_to_vector, \ 214 platform_irq_to_vector, \
267 platform_local_vector_to_irq, \ 215 platform_local_vector_to_irq, \
268 platform_pci_get_legacy_mem, \ 216 platform_pci_get_legacy_mem, \
@@ -298,6 +246,9 @@ extern void machvec_init_from_cmdline(const char *cmdline);
298# error Unknown configuration. Update arch/ia64/include/asm/machvec.h. 246# error Unknown configuration. Update arch/ia64/include/asm/machvec.h.
299# endif /* CONFIG_IA64_GENERIC */ 247# endif /* CONFIG_IA64_GENERIC */
300 248
249extern void swiotlb_dma_init(void);
250extern struct dma_map_ops *dma_get_ops(struct device *);
251
301/* 252/*
302 * Define default versions so we can extend machvec for new platforms without having 253 * Define default versions so we can extend machvec for new platforms without having
303 * to update the machvec files for all existing platforms. 254 * to update the machvec files for all existing platforms.
@@ -328,43 +279,10 @@ extern void machvec_init_from_cmdline(const char *cmdline);
328# define platform_kernel_launch_event machvec_noop 279# define platform_kernel_launch_event machvec_noop
329#endif 280#endif
330#ifndef platform_dma_init 281#ifndef platform_dma_init
331# define platform_dma_init swiotlb_init 282# define platform_dma_init swiotlb_dma_init
332#endif
333#ifndef platform_dma_alloc_coherent
334# define platform_dma_alloc_coherent swiotlb_alloc_coherent
335#endif
336#ifndef platform_dma_free_coherent
337# define platform_dma_free_coherent swiotlb_free_coherent
338#endif
339#ifndef platform_dma_map_single_attrs
340# define platform_dma_map_single_attrs swiotlb_map_single_attrs
341#endif
342#ifndef platform_dma_unmap_single_attrs
343# define platform_dma_unmap_single_attrs swiotlb_unmap_single_attrs
344#endif
345#ifndef platform_dma_map_sg_attrs
346# define platform_dma_map_sg_attrs swiotlb_map_sg_attrs
347#endif
348#ifndef platform_dma_unmap_sg_attrs
349# define platform_dma_unmap_sg_attrs swiotlb_unmap_sg_attrs
350#endif
351#ifndef platform_dma_sync_single_for_cpu
352# define platform_dma_sync_single_for_cpu swiotlb_sync_single_for_cpu
353#endif
354#ifndef platform_dma_sync_sg_for_cpu
355# define platform_dma_sync_sg_for_cpu swiotlb_sync_sg_for_cpu
356#endif
357#ifndef platform_dma_sync_single_for_device
358# define platform_dma_sync_single_for_device swiotlb_sync_single_for_device
359#endif
360#ifndef platform_dma_sync_sg_for_device
361# define platform_dma_sync_sg_for_device swiotlb_sync_sg_for_device
362#endif
363#ifndef platform_dma_mapping_error
364# define platform_dma_mapping_error swiotlb_dma_mapping_error
365#endif 283#endif
366#ifndef platform_dma_supported 284#ifndef platform_dma_get_ops
367# define platform_dma_supported swiotlb_dma_supported 285# define platform_dma_get_ops dma_get_ops
368#endif 286#endif
369#ifndef platform_irq_to_vector 287#ifndef platform_irq_to_vector
370# define platform_irq_to_vector __ia64_irq_to_vector 288# define platform_irq_to_vector __ia64_irq_to_vector
diff --git a/arch/ia64/include/asm/machvec_dig_vtd.h b/arch/ia64/include/asm/machvec_dig_vtd.h
index 3400b561e711..6ab1de5c45ef 100644
--- a/arch/ia64/include/asm/machvec_dig_vtd.h
+++ b/arch/ia64/include/asm/machvec_dig_vtd.h
@@ -2,14 +2,6 @@
2#define _ASM_IA64_MACHVEC_DIG_VTD_h 2#define _ASM_IA64_MACHVEC_DIG_VTD_h
3 3
4extern ia64_mv_setup_t dig_setup; 4extern ia64_mv_setup_t dig_setup;
5extern ia64_mv_dma_alloc_coherent vtd_alloc_coherent;
6extern ia64_mv_dma_free_coherent vtd_free_coherent;
7extern ia64_mv_dma_map_single_attrs vtd_map_single_attrs;
8extern ia64_mv_dma_unmap_single_attrs vtd_unmap_single_attrs;
9extern ia64_mv_dma_map_sg_attrs vtd_map_sg_attrs;
10extern ia64_mv_dma_unmap_sg_attrs vtd_unmap_sg_attrs;
11extern ia64_mv_dma_supported iommu_dma_supported;
12extern ia64_mv_dma_mapping_error vtd_dma_mapping_error;
13extern ia64_mv_dma_init pci_iommu_alloc; 5extern ia64_mv_dma_init pci_iommu_alloc;
14 6
15/* 7/*
@@ -22,17 +14,5 @@ extern ia64_mv_dma_init pci_iommu_alloc;
22#define platform_name "dig_vtd" 14#define platform_name "dig_vtd"
23#define platform_setup dig_setup 15#define platform_setup dig_setup
24#define platform_dma_init pci_iommu_alloc 16#define platform_dma_init pci_iommu_alloc
25#define platform_dma_alloc_coherent vtd_alloc_coherent
26#define platform_dma_free_coherent vtd_free_coherent
27#define platform_dma_map_single_attrs vtd_map_single_attrs
28#define platform_dma_unmap_single_attrs vtd_unmap_single_attrs
29#define platform_dma_map_sg_attrs vtd_map_sg_attrs
30#define platform_dma_unmap_sg_attrs vtd_unmap_sg_attrs
31#define platform_dma_sync_single_for_cpu machvec_dma_sync_single
32#define platform_dma_sync_sg_for_cpu machvec_dma_sync_sg
33#define platform_dma_sync_single_for_device machvec_dma_sync_single
34#define platform_dma_sync_sg_for_device machvec_dma_sync_sg
35#define platform_dma_supported iommu_dma_supported
36#define platform_dma_mapping_error vtd_dma_mapping_error
37 17
38#endif /* _ASM_IA64_MACHVEC_DIG_VTD_h */ 18#endif /* _ASM_IA64_MACHVEC_DIG_VTD_h */
diff --git a/arch/ia64/include/asm/machvec_hpzx1.h b/arch/ia64/include/asm/machvec_hpzx1.h
index 2f57f5144b9f..3bd83d78a412 100644
--- a/arch/ia64/include/asm/machvec_hpzx1.h
+++ b/arch/ia64/include/asm/machvec_hpzx1.h
@@ -2,14 +2,7 @@
2#define _ASM_IA64_MACHVEC_HPZX1_h 2#define _ASM_IA64_MACHVEC_HPZX1_h
3 3
4extern ia64_mv_setup_t dig_setup; 4extern ia64_mv_setup_t dig_setup;
5extern ia64_mv_dma_alloc_coherent sba_alloc_coherent; 5extern ia64_mv_dma_init sba_dma_init;
6extern ia64_mv_dma_free_coherent sba_free_coherent;
7extern ia64_mv_dma_map_single_attrs sba_map_single_attrs;
8extern ia64_mv_dma_unmap_single_attrs sba_unmap_single_attrs;
9extern ia64_mv_dma_map_sg_attrs sba_map_sg_attrs;
10extern ia64_mv_dma_unmap_sg_attrs sba_unmap_sg_attrs;
11extern ia64_mv_dma_supported sba_dma_supported;
12extern ia64_mv_dma_mapping_error sba_dma_mapping_error;
13 6
14/* 7/*
15 * This stuff has dual use! 8 * This stuff has dual use!
@@ -20,18 +13,6 @@ extern ia64_mv_dma_mapping_error sba_dma_mapping_error;
20 */ 13 */
21#define platform_name "hpzx1" 14#define platform_name "hpzx1"
22#define platform_setup dig_setup 15#define platform_setup dig_setup
23#define platform_dma_init machvec_noop 16#define platform_dma_init sba_dma_init
24#define platform_dma_alloc_coherent sba_alloc_coherent
25#define platform_dma_free_coherent sba_free_coherent
26#define platform_dma_map_single_attrs sba_map_single_attrs
27#define platform_dma_unmap_single_attrs sba_unmap_single_attrs
28#define platform_dma_map_sg_attrs sba_map_sg_attrs
29#define platform_dma_unmap_sg_attrs sba_unmap_sg_attrs
30#define platform_dma_sync_single_for_cpu machvec_dma_sync_single
31#define platform_dma_sync_sg_for_cpu machvec_dma_sync_sg
32#define platform_dma_sync_single_for_device machvec_dma_sync_single
33#define platform_dma_sync_sg_for_device machvec_dma_sync_sg
34#define platform_dma_supported sba_dma_supported
35#define platform_dma_mapping_error sba_dma_mapping_error
36 17
37#endif /* _ASM_IA64_MACHVEC_HPZX1_h */ 18#endif /* _ASM_IA64_MACHVEC_HPZX1_h */
diff --git a/arch/ia64/include/asm/machvec_hpzx1_swiotlb.h b/arch/ia64/include/asm/machvec_hpzx1_swiotlb.h
index a842cdda827b..1091ac39740c 100644
--- a/arch/ia64/include/asm/machvec_hpzx1_swiotlb.h
+++ b/arch/ia64/include/asm/machvec_hpzx1_swiotlb.h
@@ -2,18 +2,7 @@
2#define _ASM_IA64_MACHVEC_HPZX1_SWIOTLB_h 2#define _ASM_IA64_MACHVEC_HPZX1_SWIOTLB_h
3 3
4extern ia64_mv_setup_t dig_setup; 4extern ia64_mv_setup_t dig_setup;
5extern ia64_mv_dma_alloc_coherent hwsw_alloc_coherent; 5extern ia64_mv_dma_get_ops hwsw_dma_get_ops;
6extern ia64_mv_dma_free_coherent hwsw_free_coherent;
7extern ia64_mv_dma_map_single_attrs hwsw_map_single_attrs;
8extern ia64_mv_dma_unmap_single_attrs hwsw_unmap_single_attrs;
9extern ia64_mv_dma_map_sg_attrs hwsw_map_sg_attrs;
10extern ia64_mv_dma_unmap_sg_attrs hwsw_unmap_sg_attrs;
11extern ia64_mv_dma_supported hwsw_dma_supported;
12extern ia64_mv_dma_mapping_error hwsw_dma_mapping_error;
13extern ia64_mv_dma_sync_single_for_cpu hwsw_sync_single_for_cpu;
14extern ia64_mv_dma_sync_sg_for_cpu hwsw_sync_sg_for_cpu;
15extern ia64_mv_dma_sync_single_for_device hwsw_sync_single_for_device;
16extern ia64_mv_dma_sync_sg_for_device hwsw_sync_sg_for_device;
17 6
18/* 7/*
19 * This stuff has dual use! 8 * This stuff has dual use!
@@ -23,20 +12,8 @@ extern ia64_mv_dma_sync_sg_for_device hwsw_sync_sg_for_device;
23 * the macros are used directly. 12 * the macros are used directly.
24 */ 13 */
25#define platform_name "hpzx1_swiotlb" 14#define platform_name "hpzx1_swiotlb"
26
27#define platform_setup dig_setup 15#define platform_setup dig_setup
28#define platform_dma_init machvec_noop 16#define platform_dma_init machvec_noop
29#define platform_dma_alloc_coherent hwsw_alloc_coherent 17#define platform_dma_get_ops hwsw_dma_get_ops
30#define platform_dma_free_coherent hwsw_free_coherent
31#define platform_dma_map_single_attrs hwsw_map_single_attrs
32#define platform_dma_unmap_single_attrs hwsw_unmap_single_attrs
33#define platform_dma_map_sg_attrs hwsw_map_sg_attrs
34#define platform_dma_unmap_sg_attrs hwsw_unmap_sg_attrs
35#define platform_dma_supported hwsw_dma_supported
36#define platform_dma_mapping_error hwsw_dma_mapping_error
37#define platform_dma_sync_single_for_cpu hwsw_sync_single_for_cpu
38#define platform_dma_sync_sg_for_cpu hwsw_sync_sg_for_cpu
39#define platform_dma_sync_single_for_device hwsw_sync_single_for_device
40#define platform_dma_sync_sg_for_device hwsw_sync_sg_for_device
41 18
42#endif /* _ASM_IA64_MACHVEC_HPZX1_SWIOTLB_h */ 19#endif /* _ASM_IA64_MACHVEC_HPZX1_SWIOTLB_h */
diff --git a/arch/ia64/include/asm/machvec_sn2.h b/arch/ia64/include/asm/machvec_sn2.h
index 781308ea7b88..afd029b4797e 100644
--- a/arch/ia64/include/asm/machvec_sn2.h
+++ b/arch/ia64/include/asm/machvec_sn2.h
@@ -55,18 +55,7 @@ extern ia64_mv_readb_t __sn_readb_relaxed;
55extern ia64_mv_readw_t __sn_readw_relaxed; 55extern ia64_mv_readw_t __sn_readw_relaxed;
56extern ia64_mv_readl_t __sn_readl_relaxed; 56extern ia64_mv_readl_t __sn_readl_relaxed;
57extern ia64_mv_readq_t __sn_readq_relaxed; 57extern ia64_mv_readq_t __sn_readq_relaxed;
58extern ia64_mv_dma_alloc_coherent sn_dma_alloc_coherent; 58extern ia64_mv_dma_init sn_dma_init;
59extern ia64_mv_dma_free_coherent sn_dma_free_coherent;
60extern ia64_mv_dma_map_single_attrs sn_dma_map_single_attrs;
61extern ia64_mv_dma_unmap_single_attrs sn_dma_unmap_single_attrs;
62extern ia64_mv_dma_map_sg_attrs sn_dma_map_sg_attrs;
63extern ia64_mv_dma_unmap_sg_attrs sn_dma_unmap_sg_attrs;
64extern ia64_mv_dma_sync_single_for_cpu sn_dma_sync_single_for_cpu;
65extern ia64_mv_dma_sync_sg_for_cpu sn_dma_sync_sg_for_cpu;
66extern ia64_mv_dma_sync_single_for_device sn_dma_sync_single_for_device;
67extern ia64_mv_dma_sync_sg_for_device sn_dma_sync_sg_for_device;
68extern ia64_mv_dma_mapping_error sn_dma_mapping_error;
69extern ia64_mv_dma_supported sn_dma_supported;
70extern ia64_mv_migrate_t sn_migrate; 59extern ia64_mv_migrate_t sn_migrate;
71extern ia64_mv_kernel_launch_event_t sn_kernel_launch_event; 60extern ia64_mv_kernel_launch_event_t sn_kernel_launch_event;
72extern ia64_mv_setup_msi_irq_t sn_setup_msi_irq; 61extern ia64_mv_setup_msi_irq_t sn_setup_msi_irq;
@@ -110,19 +99,7 @@ extern ia64_mv_pci_fixup_bus_t sn_pci_fixup_bus;
110#define platform_pci_get_legacy_mem sn_pci_get_legacy_mem 99#define platform_pci_get_legacy_mem sn_pci_get_legacy_mem
111#define platform_pci_legacy_read sn_pci_legacy_read 100#define platform_pci_legacy_read sn_pci_legacy_read
112#define platform_pci_legacy_write sn_pci_legacy_write 101#define platform_pci_legacy_write sn_pci_legacy_write
113#define platform_dma_init machvec_noop 102#define platform_dma_init sn_dma_init
114#define platform_dma_alloc_coherent sn_dma_alloc_coherent
115#define platform_dma_free_coherent sn_dma_free_coherent
116#define platform_dma_map_single_attrs sn_dma_map_single_attrs
117#define platform_dma_unmap_single_attrs sn_dma_unmap_single_attrs
118#define platform_dma_map_sg_attrs sn_dma_map_sg_attrs
119#define platform_dma_unmap_sg_attrs sn_dma_unmap_sg_attrs
120#define platform_dma_sync_single_for_cpu sn_dma_sync_single_for_cpu
121#define platform_dma_sync_sg_for_cpu sn_dma_sync_sg_for_cpu
122#define platform_dma_sync_single_for_device sn_dma_sync_single_for_device
123#define platform_dma_sync_sg_for_device sn_dma_sync_sg_for_device
124#define platform_dma_mapping_error sn_dma_mapping_error
125#define platform_dma_supported sn_dma_supported
126#define platform_migrate sn_migrate 103#define platform_migrate sn_migrate
127#define platform_kernel_launch_event sn_kernel_launch_event 104#define platform_kernel_launch_event sn_kernel_launch_event
128#ifdef CONFIG_PCI_MSI 105#ifdef CONFIG_PCI_MSI
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile
index c381ea954892..f2778f2c4fd9 100644
--- a/arch/ia64/kernel/Makefile
+++ b/arch/ia64/kernel/Makefile
@@ -7,7 +7,7 @@ extra-y := head.o init_task.o vmlinux.lds
7obj-y := acpi.o entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64.o \ 7obj-y := acpi.o entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64.o \
8 irq_lsapic.o ivt.o machvec.o pal.o patch.o process.o perfmon.o ptrace.o sal.o \ 8 irq_lsapic.o ivt.o machvec.o pal.o patch.o process.o perfmon.o ptrace.o sal.o \
9 salinfo.o setup.o signal.o sys_ia64.o time.o traps.o unaligned.o \ 9 salinfo.o setup.o signal.o sys_ia64.o time.o traps.o unaligned.o \
10 unwind.o mca.o mca_asm.o topology.o 10 unwind.o mca.o mca_asm.o topology.o dma-mapping.o
11 11
12obj-$(CONFIG_IA64_BRL_EMU) += brl_emu.o 12obj-$(CONFIG_IA64_BRL_EMU) += brl_emu.o
13obj-$(CONFIG_IA64_GENERIC) += acpi-ext.o 13obj-$(CONFIG_IA64_GENERIC) += acpi-ext.o
@@ -43,9 +43,7 @@ ifneq ($(CONFIG_IA64_ESI),)
43obj-y += esi_stub.o # must be in kernel proper 43obj-y += esi_stub.o # must be in kernel proper
44endif 44endif
45obj-$(CONFIG_DMAR) += pci-dma.o 45obj-$(CONFIG_DMAR) += pci-dma.o
46ifeq ($(CONFIG_DMAR), y)
47obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o 46obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o
48endif
49 47
50# The gate DSO image is built using a special linker script. 48# The gate DSO image is built using a special linker script.
51targets += gate.so gate-syms.o 49targets += gate.so gate-syms.o
diff --git a/arch/ia64/kernel/dma-mapping.c b/arch/ia64/kernel/dma-mapping.c
new file mode 100644
index 000000000000..7060e13fa421
--- /dev/null
+++ b/arch/ia64/kernel/dma-mapping.c
@@ -0,0 +1,10 @@
1#include <linux/dma-mapping.h>
2
3struct dma_map_ops *dma_ops;
4EXPORT_SYMBOL(dma_ops);
5
6struct dma_map_ops *dma_get_ops(struct device *dev)
7{
8 return dma_ops;
9}
10EXPORT_SYMBOL(dma_get_ops);
diff --git a/arch/ia64/kernel/machvec.c b/arch/ia64/kernel/machvec.c
index 7ccb228ceedc..d41a40ef80c0 100644
--- a/arch/ia64/kernel/machvec.c
+++ b/arch/ia64/kernel/machvec.c
@@ -1,5 +1,5 @@
1#include <linux/module.h> 1#include <linux/module.h>
2 2#include <linux/dma-mapping.h>
3#include <asm/machvec.h> 3#include <asm/machvec.h>
4#include <asm/system.h> 4#include <asm/system.h>
5 5
@@ -75,14 +75,16 @@ machvec_timer_interrupt (int irq, void *dev_id)
75EXPORT_SYMBOL(machvec_timer_interrupt); 75EXPORT_SYMBOL(machvec_timer_interrupt);
76 76
77void 77void
78machvec_dma_sync_single (struct device *hwdev, dma_addr_t dma_handle, size_t size, int dir) 78machvec_dma_sync_single(struct device *hwdev, dma_addr_t dma_handle, size_t size,
79 enum dma_data_direction dir)
79{ 80{
80 mb(); 81 mb();
81} 82}
82EXPORT_SYMBOL(machvec_dma_sync_single); 83EXPORT_SYMBOL(machvec_dma_sync_single);
83 84
84void 85void
85machvec_dma_sync_sg (struct device *hwdev, struct scatterlist *sg, int n, int dir) 86machvec_dma_sync_sg(struct device *hwdev, struct scatterlist *sg, int n,
87 enum dma_data_direction dir)
86{ 88{
87 mb(); 89 mb();
88} 90}
diff --git a/arch/ia64/kernel/pci-dma.c b/arch/ia64/kernel/pci-dma.c
index d0ada067a4af..a647f116a155 100644
--- a/arch/ia64/kernel/pci-dma.c
+++ b/arch/ia64/kernel/pci-dma.c
@@ -32,9 +32,6 @@ int force_iommu __read_mostly = 1;
32int force_iommu __read_mostly; 32int force_iommu __read_mostly;
33#endif 33#endif
34 34
35/* Set this to 1 if there is a HW IOMMU in the system */
36int iommu_detected __read_mostly;
37
38/* Dummy device used for NULL arguments (normally ISA). Better would 35/* Dummy device used for NULL arguments (normally ISA). Better would
39 be probably a smaller DMA mask, but this is bug-to-bug compatible 36 be probably a smaller DMA mask, but this is bug-to-bug compatible
40 to i386. */ 37 to i386. */
@@ -44,18 +41,7 @@ struct device fallback_dev = {
44 .dma_mask = &fallback_dev.coherent_dma_mask, 41 .dma_mask = &fallback_dev.coherent_dma_mask,
45}; 42};
46 43
47void __init pci_iommu_alloc(void) 44extern struct dma_map_ops intel_dma_ops;
48{
49 /*
50 * The order of these functions is important for
51 * fall-back/fail-over reasons
52 */
53 detect_intel_iommu();
54
55#ifdef CONFIG_SWIOTLB
56 pci_swiotlb_init();
57#endif
58}
59 45
60static int __init pci_iommu_init(void) 46static int __init pci_iommu_init(void)
61{ 47{
@@ -79,15 +65,12 @@ iommu_dma_init(void)
79 return; 65 return;
80} 66}
81 67
82struct dma_mapping_ops *dma_ops;
83EXPORT_SYMBOL(dma_ops);
84
85int iommu_dma_supported(struct device *dev, u64 mask) 68int iommu_dma_supported(struct device *dev, u64 mask)
86{ 69{
87 struct dma_mapping_ops *ops = get_dma_ops(dev); 70 struct dma_map_ops *ops = platform_dma_get_ops(dev);
88 71
89 if (ops->dma_supported_op) 72 if (ops->dma_supported)
90 return ops->dma_supported_op(dev, mask); 73 return ops->dma_supported(dev, mask);
91 74
92 /* Copied from i386. Doesn't make much sense, because it will 75 /* Copied from i386. Doesn't make much sense, because it will
93 only work for pci_alloc_coherent. 76 only work for pci_alloc_coherent.
@@ -116,4 +99,31 @@ int iommu_dma_supported(struct device *dev, u64 mask)
116} 99}
117EXPORT_SYMBOL(iommu_dma_supported); 100EXPORT_SYMBOL(iommu_dma_supported);
118 101
102static int vtd_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
103{
104 return 0;
105}
106
107void __init pci_iommu_alloc(void)
108{
109 dma_ops = &intel_dma_ops;
110
111 dma_ops->sync_single_for_cpu = machvec_dma_sync_single;
112 dma_ops->sync_sg_for_cpu = machvec_dma_sync_sg;
113 dma_ops->sync_single_for_device = machvec_dma_sync_single;
114 dma_ops->sync_sg_for_device = machvec_dma_sync_sg;
115 dma_ops->dma_supported = iommu_dma_supported;
116 dma_ops->mapping_error = vtd_dma_mapping_error;
117
118 /*
119 * The order of these functions is important for
120 * fall-back/fail-over reasons
121 */
122 detect_intel_iommu();
123
124#ifdef CONFIG_SWIOTLB
125 pci_swiotlb_init();
126#endif
127}
128
119#endif 129#endif
diff --git a/arch/ia64/kernel/pci-swiotlb.c b/arch/ia64/kernel/pci-swiotlb.c
index 16c50516dbc1..d21dea44e76f 100644
--- a/arch/ia64/kernel/pci-swiotlb.c
+++ b/arch/ia64/kernel/pci-swiotlb.c
@@ -13,23 +13,32 @@
13int swiotlb __read_mostly; 13int swiotlb __read_mostly;
14EXPORT_SYMBOL(swiotlb); 14EXPORT_SYMBOL(swiotlb);
15 15
16struct dma_mapping_ops swiotlb_dma_ops = { 16/* Set this to 1 if there is a HW IOMMU in the system */
17 .mapping_error = swiotlb_dma_mapping_error, 17int iommu_detected __read_mostly;
18
19struct dma_map_ops swiotlb_dma_ops = {
18 .alloc_coherent = swiotlb_alloc_coherent, 20 .alloc_coherent = swiotlb_alloc_coherent,
19 .free_coherent = swiotlb_free_coherent, 21 .free_coherent = swiotlb_free_coherent,
20 .map_single = swiotlb_map_single, 22 .map_page = swiotlb_map_page,
21 .unmap_single = swiotlb_unmap_single, 23 .unmap_page = swiotlb_unmap_page,
24 .map_sg = swiotlb_map_sg_attrs,
25 .unmap_sg = swiotlb_unmap_sg_attrs,
22 .sync_single_for_cpu = swiotlb_sync_single_for_cpu, 26 .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
23 .sync_single_for_device = swiotlb_sync_single_for_device, 27 .sync_single_for_device = swiotlb_sync_single_for_device,
24 .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu, 28 .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu,
25 .sync_single_range_for_device = swiotlb_sync_single_range_for_device, 29 .sync_single_range_for_device = swiotlb_sync_single_range_for_device,
26 .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, 30 .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
27 .sync_sg_for_device = swiotlb_sync_sg_for_device, 31 .sync_sg_for_device = swiotlb_sync_sg_for_device,
28 .map_sg = swiotlb_map_sg, 32 .dma_supported = swiotlb_dma_supported,
29 .unmap_sg = swiotlb_unmap_sg, 33 .mapping_error = swiotlb_dma_mapping_error,
30 .dma_supported_op = swiotlb_dma_supported,
31}; 34};
32 35
36void __init swiotlb_dma_init(void)
37{
38 dma_ops = &swiotlb_dma_ops;
39 swiotlb_init();
40}
41
33void __init pci_swiotlb_init(void) 42void __init pci_swiotlb_init(void)
34{ 43{
35 if (!iommu_detected) { 44 if (!iommu_detected) {
diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c
index 53ebb6484495..9c788f9cedfd 100644
--- a/arch/ia64/sn/pci/pci_dma.c
+++ b/arch/ia64/sn/pci/pci_dma.c
@@ -10,7 +10,7 @@
10 */ 10 */
11 11
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/dma-attrs.h> 13#include <linux/dma-mapping.h>
14#include <asm/dma.h> 14#include <asm/dma.h>
15#include <asm/sn/intr.h> 15#include <asm/sn/intr.h>
16#include <asm/sn/pcibus_provider_defs.h> 16#include <asm/sn/pcibus_provider_defs.h>
@@ -31,7 +31,7 @@
31 * this function. Of course, SN only supports devices that have 32 or more 31 * this function. Of course, SN only supports devices that have 32 or more
32 * address bits when using the PMU. 32 * address bits when using the PMU.
33 */ 33 */
34int sn_dma_supported(struct device *dev, u64 mask) 34static int sn_dma_supported(struct device *dev, u64 mask)
35{ 35{
36 BUG_ON(dev->bus != &pci_bus_type); 36 BUG_ON(dev->bus != &pci_bus_type);
37 37
@@ -39,7 +39,6 @@ int sn_dma_supported(struct device *dev, u64 mask)
39 return 0; 39 return 0;
40 return 1; 40 return 1;
41} 41}
42EXPORT_SYMBOL(sn_dma_supported);
43 42
44/** 43/**
45 * sn_dma_set_mask - set the DMA mask 44 * sn_dma_set_mask - set the DMA mask
@@ -75,8 +74,8 @@ EXPORT_SYMBOL(sn_dma_set_mask);
75 * queue for a SCSI controller). See Documentation/DMA-API.txt for 74 * queue for a SCSI controller). See Documentation/DMA-API.txt for
76 * more information. 75 * more information.
77 */ 76 */
78void *sn_dma_alloc_coherent(struct device *dev, size_t size, 77static void *sn_dma_alloc_coherent(struct device *dev, size_t size,
79 dma_addr_t * dma_handle, gfp_t flags) 78 dma_addr_t * dma_handle, gfp_t flags)
80{ 79{
81 void *cpuaddr; 80 void *cpuaddr;
82 unsigned long phys_addr; 81 unsigned long phys_addr;
@@ -124,7 +123,6 @@ void *sn_dma_alloc_coherent(struct device *dev, size_t size,
124 123
125 return cpuaddr; 124 return cpuaddr;
126} 125}
127EXPORT_SYMBOL(sn_dma_alloc_coherent);
128 126
129/** 127/**
130 * sn_pci_free_coherent - free memory associated with coherent DMAable region 128 * sn_pci_free_coherent - free memory associated with coherent DMAable region
@@ -136,8 +134,8 @@ EXPORT_SYMBOL(sn_dma_alloc_coherent);
136 * Frees the memory allocated by dma_alloc_coherent(), potentially unmapping 134 * Frees the memory allocated by dma_alloc_coherent(), potentially unmapping
137 * any associated IOMMU mappings. 135 * any associated IOMMU mappings.
138 */ 136 */
139void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, 137static void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
140 dma_addr_t dma_handle) 138 dma_addr_t dma_handle)
141{ 139{
142 struct pci_dev *pdev = to_pci_dev(dev); 140 struct pci_dev *pdev = to_pci_dev(dev);
143 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); 141 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
@@ -147,7 +145,6 @@ void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
147 provider->dma_unmap(pdev, dma_handle, 0); 145 provider->dma_unmap(pdev, dma_handle, 0);
148 free_pages((unsigned long)cpu_addr, get_order(size)); 146 free_pages((unsigned long)cpu_addr, get_order(size));
149} 147}
150EXPORT_SYMBOL(sn_dma_free_coherent);
151 148
152/** 149/**
153 * sn_dma_map_single_attrs - map a single page for DMA 150 * sn_dma_map_single_attrs - map a single page for DMA
@@ -173,10 +170,12 @@ EXPORT_SYMBOL(sn_dma_free_coherent);
173 * TODO: simplify our interface; 170 * TODO: simplify our interface;
174 * figure out how to save dmamap handle so can use two step. 171 * figure out how to save dmamap handle so can use two step.
175 */ 172 */
176dma_addr_t sn_dma_map_single_attrs(struct device *dev, void *cpu_addr, 173static dma_addr_t sn_dma_map_page(struct device *dev, struct page *page,
177 size_t size, int direction, 174 unsigned long offset, size_t size,
178 struct dma_attrs *attrs) 175 enum dma_data_direction dir,
176 struct dma_attrs *attrs)
179{ 177{
178 void *cpu_addr = page_address(page) + offset;
180 dma_addr_t dma_addr; 179 dma_addr_t dma_addr;
181 unsigned long phys_addr; 180 unsigned long phys_addr;
182 struct pci_dev *pdev = to_pci_dev(dev); 181 struct pci_dev *pdev = to_pci_dev(dev);
@@ -201,7 +200,6 @@ dma_addr_t sn_dma_map_single_attrs(struct device *dev, void *cpu_addr,
201 } 200 }
202 return dma_addr; 201 return dma_addr;
203} 202}
204EXPORT_SYMBOL(sn_dma_map_single_attrs);
205 203
206/** 204/**
207 * sn_dma_unmap_single_attrs - unamp a DMA mapped page 205 * sn_dma_unmap_single_attrs - unamp a DMA mapped page
@@ -215,21 +213,20 @@ EXPORT_SYMBOL(sn_dma_map_single_attrs);
215 * by @dma_handle into the coherence domain. On SN, we're always cache 213 * by @dma_handle into the coherence domain. On SN, we're always cache
216 * coherent, so we just need to free any ATEs associated with this mapping. 214 * coherent, so we just need to free any ATEs associated with this mapping.
217 */ 215 */
218void sn_dma_unmap_single_attrs(struct device *dev, dma_addr_t dma_addr, 216static void sn_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
219 size_t size, int direction, 217 size_t size, enum dma_data_direction dir,
220 struct dma_attrs *attrs) 218 struct dma_attrs *attrs)
221{ 219{
222 struct pci_dev *pdev = to_pci_dev(dev); 220 struct pci_dev *pdev = to_pci_dev(dev);
223 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); 221 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
224 222
225 BUG_ON(dev->bus != &pci_bus_type); 223 BUG_ON(dev->bus != &pci_bus_type);
226 224
227 provider->dma_unmap(pdev, dma_addr, direction); 225 provider->dma_unmap(pdev, dma_addr, dir);
228} 226}
229EXPORT_SYMBOL(sn_dma_unmap_single_attrs);
230 227
231/** 228/**
232 * sn_dma_unmap_sg_attrs - unmap a DMA scatterlist 229 * sn_dma_unmap_sg - unmap a DMA scatterlist
233 * @dev: device to unmap 230 * @dev: device to unmap
234 * @sg: scatterlist to unmap 231 * @sg: scatterlist to unmap
235 * @nhwentries: number of scatterlist entries 232 * @nhwentries: number of scatterlist entries
@@ -238,9 +235,9 @@ EXPORT_SYMBOL(sn_dma_unmap_single_attrs);
238 * 235 *
239 * Unmap a set of streaming mode DMA translations. 236 * Unmap a set of streaming mode DMA translations.
240 */ 237 */
241void sn_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl, 238static void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sgl,
242 int nhwentries, int direction, 239 int nhwentries, enum dma_data_direction dir,
243 struct dma_attrs *attrs) 240 struct dma_attrs *attrs)
244{ 241{
245 int i; 242 int i;
246 struct pci_dev *pdev = to_pci_dev(dev); 243 struct pci_dev *pdev = to_pci_dev(dev);
@@ -250,15 +247,14 @@ void sn_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl,
250 BUG_ON(dev->bus != &pci_bus_type); 247 BUG_ON(dev->bus != &pci_bus_type);
251 248
252 for_each_sg(sgl, sg, nhwentries, i) { 249 for_each_sg(sgl, sg, nhwentries, i) {
253 provider->dma_unmap(pdev, sg->dma_address, direction); 250 provider->dma_unmap(pdev, sg->dma_address, dir);
254 sg->dma_address = (dma_addr_t) NULL; 251 sg->dma_address = (dma_addr_t) NULL;
255 sg->dma_length = 0; 252 sg->dma_length = 0;
256 } 253 }
257} 254}
258EXPORT_SYMBOL(sn_dma_unmap_sg_attrs);
259 255
260/** 256/**
261 * sn_dma_map_sg_attrs - map a scatterlist for DMA 257 * sn_dma_map_sg - map a scatterlist for DMA
262 * @dev: device to map for 258 * @dev: device to map for
263 * @sg: scatterlist to map 259 * @sg: scatterlist to map
264 * @nhwentries: number of entries 260 * @nhwentries: number of entries
@@ -272,8 +268,9 @@ EXPORT_SYMBOL(sn_dma_unmap_sg_attrs);
272 * 268 *
273 * Maps each entry of @sg for DMA. 269 * Maps each entry of @sg for DMA.
274 */ 270 */
275int sn_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, 271static int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl,
276 int nhwentries, int direction, struct dma_attrs *attrs) 272 int nhwentries, enum dma_data_direction dir,
273 struct dma_attrs *attrs)
277{ 274{
278 unsigned long phys_addr; 275 unsigned long phys_addr;
279 struct scatterlist *saved_sg = sgl, *sg; 276 struct scatterlist *saved_sg = sgl, *sg;
@@ -310,8 +307,7 @@ int sn_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
310 * Free any successfully allocated entries. 307 * Free any successfully allocated entries.
311 */ 308 */
312 if (i > 0) 309 if (i > 0)
313 sn_dma_unmap_sg_attrs(dev, saved_sg, i, 310 sn_dma_unmap_sg(dev, saved_sg, i, dir, attrs);
314 direction, attrs);
315 return 0; 311 return 0;
316 } 312 }
317 313
@@ -320,41 +316,36 @@ int sn_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
320 316
321 return nhwentries; 317 return nhwentries;
322} 318}
323EXPORT_SYMBOL(sn_dma_map_sg_attrs);
324 319
325void sn_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, 320static void sn_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
326 size_t size, int direction) 321 size_t size, enum dma_data_direction dir)
327{ 322{
328 BUG_ON(dev->bus != &pci_bus_type); 323 BUG_ON(dev->bus != &pci_bus_type);
329} 324}
330EXPORT_SYMBOL(sn_dma_sync_single_for_cpu);
331 325
332void sn_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, 326static void sn_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
333 size_t size, int direction) 327 size_t size,
328 enum dma_data_direction dir)
334{ 329{
335 BUG_ON(dev->bus != &pci_bus_type); 330 BUG_ON(dev->bus != &pci_bus_type);
336} 331}
337EXPORT_SYMBOL(sn_dma_sync_single_for_device);
338 332
339void sn_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, 333static void sn_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
340 int nelems, int direction) 334 int nelems, enum dma_data_direction dir)
341{ 335{
342 BUG_ON(dev->bus != &pci_bus_type); 336 BUG_ON(dev->bus != &pci_bus_type);
343} 337}
344EXPORT_SYMBOL(sn_dma_sync_sg_for_cpu);
345 338
346void sn_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, 339static void sn_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
347 int nelems, int direction) 340 int nelems, enum dma_data_direction dir)
348{ 341{
349 BUG_ON(dev->bus != &pci_bus_type); 342 BUG_ON(dev->bus != &pci_bus_type);
350} 343}
351EXPORT_SYMBOL(sn_dma_sync_sg_for_device);
352 344
353int sn_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 345static int sn_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
354{ 346{
355 return 0; 347 return 0;
356} 348}
357EXPORT_SYMBOL(sn_dma_mapping_error);
358 349
359char *sn_pci_get_legacy_mem(struct pci_bus *bus) 350char *sn_pci_get_legacy_mem(struct pci_bus *bus)
360{ 351{
@@ -465,3 +456,23 @@ int sn_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
465 out: 456 out:
466 return ret; 457 return ret;
467} 458}
459
460static struct dma_map_ops sn_dma_ops = {
461 .alloc_coherent = sn_dma_alloc_coherent,
462 .free_coherent = sn_dma_free_coherent,
463 .map_page = sn_dma_map_page,
464 .unmap_page = sn_dma_unmap_page,
465 .map_sg = sn_dma_map_sg,
466 .unmap_sg = sn_dma_unmap_sg,
467 .sync_single_for_cpu = sn_dma_sync_single_for_cpu,
468 .sync_sg_for_cpu = sn_dma_sync_sg_for_cpu,
469 .sync_single_for_device = sn_dma_sync_single_for_device,
470 .sync_sg_for_device = sn_dma_sync_sg_for_device,
471 .mapping_error = sn_dma_mapping_error,
472 .dma_supported = sn_dma_supported,
473};
474
475void sn_dma_init(void)
476{
477 dma_ops = &sn_dma_ops;
478}