aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/dig/Makefile4
-rw-r--r--arch/ia64/dig/dig_vtd_iommu.c59
-rw-r--r--arch/ia64/hp/common/hwsw_iommu.c165
-rw-r--r--arch/ia64/hp/common/sba_iommu.c79
-rw-r--r--arch/ia64/include/asm/dma-mapping.h194
-rw-r--r--arch/ia64/include/asm/machvec.h102
-rw-r--r--arch/ia64/include/asm/machvec_dig_vtd.h20
-rw-r--r--arch/ia64/include/asm/machvec_hpzx1.h23
-rw-r--r--arch/ia64/include/asm/machvec_hpzx1_swiotlb.h27
-rw-r--r--arch/ia64/include/asm/machvec_sn2.h27
-rw-r--r--arch/ia64/kernel/Makefile4
-rw-r--r--arch/ia64/kernel/dma-mapping.c13
-rw-r--r--arch/ia64/kernel/machvec.c8
-rw-r--r--arch/ia64/kernel/palinfo.c2
-rw-r--r--arch/ia64/kernel/pci-dma.c46
-rw-r--r--arch/ia64/kernel/pci-swiotlb.c30
-rw-r--r--arch/ia64/sn/kernel/sn2/prominfo_proc.c9
-rw-r--r--arch/ia64/sn/pci/pci_dma.c99
18 files changed, 321 insertions, 590 deletions
diff --git a/arch/ia64/dig/Makefile b/arch/ia64/dig/Makefile
index 5c0283830bd6..2f7caddf093e 100644
--- a/arch/ia64/dig/Makefile
+++ b/arch/ia64/dig/Makefile
@@ -7,8 +7,8 @@
7 7
8obj-y := setup.o 8obj-y := setup.o
9ifeq ($(CONFIG_DMAR), y) 9ifeq ($(CONFIG_DMAR), y)
10obj-$(CONFIG_IA64_GENERIC) += machvec.o machvec_vtd.o dig_vtd_iommu.o 10obj-$(CONFIG_IA64_GENERIC) += machvec.o machvec_vtd.o
11else 11else
12obj-$(CONFIG_IA64_GENERIC) += machvec.o 12obj-$(CONFIG_IA64_GENERIC) += machvec.o
13endif 13endif
14obj-$(CONFIG_IA64_DIG_VTD) += dig_vtd_iommu.o 14
diff --git a/arch/ia64/dig/dig_vtd_iommu.c b/arch/ia64/dig/dig_vtd_iommu.c
deleted file mode 100644
index 1c8a079017a3..000000000000
--- a/arch/ia64/dig/dig_vtd_iommu.c
+++ /dev/null
@@ -1,59 +0,0 @@
1#include <linux/types.h>
2#include <linux/kernel.h>
3#include <linux/module.h>
4#include <linux/intel-iommu.h>
5
6void *
7vtd_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
8 gfp_t flags)
9{
10 return intel_alloc_coherent(dev, size, dma_handle, flags);
11}
12EXPORT_SYMBOL_GPL(vtd_alloc_coherent);
13
14void
15vtd_free_coherent(struct device *dev, size_t size, void *vaddr,
16 dma_addr_t dma_handle)
17{
18 intel_free_coherent(dev, size, vaddr, dma_handle);
19}
20EXPORT_SYMBOL_GPL(vtd_free_coherent);
21
22dma_addr_t
23vtd_map_single_attrs(struct device *dev, void *addr, size_t size,
24 int dir, struct dma_attrs *attrs)
25{
26 return intel_map_single(dev, (phys_addr_t)addr, size, dir);
27}
28EXPORT_SYMBOL_GPL(vtd_map_single_attrs);
29
30void
31vtd_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size,
32 int dir, struct dma_attrs *attrs)
33{
34 intel_unmap_single(dev, iova, size, dir);
35}
36EXPORT_SYMBOL_GPL(vtd_unmap_single_attrs);
37
38int
39vtd_map_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents,
40 int dir, struct dma_attrs *attrs)
41{
42 return intel_map_sg(dev, sglist, nents, dir);
43}
44EXPORT_SYMBOL_GPL(vtd_map_sg_attrs);
45
46void
47vtd_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
48 int nents, int dir, struct dma_attrs *attrs)
49{
50 intel_unmap_sg(dev, sglist, nents, dir);
51}
52EXPORT_SYMBOL_GPL(vtd_unmap_sg_attrs);
53
54int
55vtd_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
56{
57 return 0;
58}
59EXPORT_SYMBOL_GPL(vtd_dma_mapping_error);
diff --git a/arch/ia64/hp/common/hwsw_iommu.c b/arch/ia64/hp/common/hwsw_iommu.c
index 2769dbfd03bf..e4a80d82e3d8 100644
--- a/arch/ia64/hp/common/hwsw_iommu.c
+++ b/arch/ia64/hp/common/hwsw_iommu.c
@@ -13,49 +13,34 @@
13 */ 13 */
14 14
15#include <linux/device.h> 15#include <linux/device.h>
16#include <linux/dma-mapping.h>
16#include <linux/swiotlb.h> 17#include <linux/swiotlb.h>
17
18#include <asm/machvec.h> 18#include <asm/machvec.h>
19 19
20extern struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
21
20/* swiotlb declarations & definitions: */ 22/* swiotlb declarations & definitions: */
21extern int swiotlb_late_init_with_default_size (size_t size); 23extern int swiotlb_late_init_with_default_size (size_t size);
22 24
23/* hwiommu declarations & definitions: */
24
25extern ia64_mv_dma_alloc_coherent sba_alloc_coherent;
26extern ia64_mv_dma_free_coherent sba_free_coherent;
27extern ia64_mv_dma_map_single_attrs sba_map_single_attrs;
28extern ia64_mv_dma_unmap_single_attrs sba_unmap_single_attrs;
29extern ia64_mv_dma_map_sg_attrs sba_map_sg_attrs;
30extern ia64_mv_dma_unmap_sg_attrs sba_unmap_sg_attrs;
31extern ia64_mv_dma_supported sba_dma_supported;
32extern ia64_mv_dma_mapping_error sba_dma_mapping_error;
33
34#define hwiommu_alloc_coherent sba_alloc_coherent
35#define hwiommu_free_coherent sba_free_coherent
36#define hwiommu_map_single_attrs sba_map_single_attrs
37#define hwiommu_unmap_single_attrs sba_unmap_single_attrs
38#define hwiommu_map_sg_attrs sba_map_sg_attrs
39#define hwiommu_unmap_sg_attrs sba_unmap_sg_attrs
40#define hwiommu_dma_supported sba_dma_supported
41#define hwiommu_dma_mapping_error sba_dma_mapping_error
42#define hwiommu_sync_single_for_cpu machvec_dma_sync_single
43#define hwiommu_sync_sg_for_cpu machvec_dma_sync_sg
44#define hwiommu_sync_single_for_device machvec_dma_sync_single
45#define hwiommu_sync_sg_for_device machvec_dma_sync_sg
46
47
48/* 25/*
49 * Note: we need to make the determination of whether or not to use 26 * Note: we need to make the determination of whether or not to use
50 * the sw I/O TLB based purely on the device structure. Anything else 27 * the sw I/O TLB based purely on the device structure. Anything else
51 * would be unreliable or would be too intrusive. 28 * would be unreliable or would be too intrusive.
52 */ 29 */
53static inline int 30static inline int use_swiotlb(struct device *dev)
54use_swiotlb (struct device *dev)
55{ 31{
56 return dev && dev->dma_mask && !hwiommu_dma_supported(dev, *dev->dma_mask); 32 return dev && dev->dma_mask &&
33 !sba_dma_ops.dma_supported(dev, *dev->dma_mask);
57} 34}
58 35
36struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
37{
38 if (use_swiotlb(dev))
39 return &swiotlb_dma_ops;
40 return &sba_dma_ops;
41}
42EXPORT_SYMBOL(hwsw_dma_get_ops);
43
59void __init 44void __init
60hwsw_init (void) 45hwsw_init (void)
61{ 46{
@@ -71,125 +56,3 @@ hwsw_init (void)
71#endif 56#endif
72 } 57 }
73} 58}
74
75void *
76hwsw_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags)
77{
78 if (use_swiotlb(dev))
79 return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
80 else
81 return hwiommu_alloc_coherent(dev, size, dma_handle, flags);
82}
83
84void
85hwsw_free_coherent (struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle)
86{
87 if (use_swiotlb(dev))
88 swiotlb_free_coherent(dev, size, vaddr, dma_handle);
89 else
90 hwiommu_free_coherent(dev, size, vaddr, dma_handle);
91}
92
93dma_addr_t
94hwsw_map_single_attrs(struct device *dev, void *addr, size_t size, int dir,
95 struct dma_attrs *attrs)
96{
97 if (use_swiotlb(dev))
98 return swiotlb_map_single_attrs(dev, addr, size, dir, attrs);
99 else
100 return hwiommu_map_single_attrs(dev, addr, size, dir, attrs);
101}
102EXPORT_SYMBOL(hwsw_map_single_attrs);
103
104void
105hwsw_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size,
106 int dir, struct dma_attrs *attrs)
107{
108 if (use_swiotlb(dev))
109 return swiotlb_unmap_single_attrs(dev, iova, size, dir, attrs);
110 else
111 return hwiommu_unmap_single_attrs(dev, iova, size, dir, attrs);
112}
113EXPORT_SYMBOL(hwsw_unmap_single_attrs);
114
115int
116hwsw_map_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents,
117 int dir, struct dma_attrs *attrs)
118{
119 if (use_swiotlb(dev))
120 return swiotlb_map_sg_attrs(dev, sglist, nents, dir, attrs);
121 else
122 return hwiommu_map_sg_attrs(dev, sglist, nents, dir, attrs);
123}
124EXPORT_SYMBOL(hwsw_map_sg_attrs);
125
126void
127hwsw_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents,
128 int dir, struct dma_attrs *attrs)
129{
130 if (use_swiotlb(dev))
131 return swiotlb_unmap_sg_attrs(dev, sglist, nents, dir, attrs);
132 else
133 return hwiommu_unmap_sg_attrs(dev, sglist, nents, dir, attrs);
134}
135EXPORT_SYMBOL(hwsw_unmap_sg_attrs);
136
137void
138hwsw_sync_single_for_cpu (struct device *dev, dma_addr_t addr, size_t size, int dir)
139{
140 if (use_swiotlb(dev))
141 swiotlb_sync_single_for_cpu(dev, addr, size, dir);
142 else
143 hwiommu_sync_single_for_cpu(dev, addr, size, dir);
144}
145
146void
147hwsw_sync_sg_for_cpu (struct device *dev, struct scatterlist *sg, int nelems, int dir)
148{
149 if (use_swiotlb(dev))
150 swiotlb_sync_sg_for_cpu(dev, sg, nelems, dir);
151 else
152 hwiommu_sync_sg_for_cpu(dev, sg, nelems, dir);
153}
154
155void
156hwsw_sync_single_for_device (struct device *dev, dma_addr_t addr, size_t size, int dir)
157{
158 if (use_swiotlb(dev))
159 swiotlb_sync_single_for_device(dev, addr, size, dir);
160 else
161 hwiommu_sync_single_for_device(dev, addr, size, dir);
162}
163
164void
165hwsw_sync_sg_for_device (struct device *dev, struct scatterlist *sg, int nelems, int dir)
166{
167 if (use_swiotlb(dev))
168 swiotlb_sync_sg_for_device(dev, sg, nelems, dir);
169 else
170 hwiommu_sync_sg_for_device(dev, sg, nelems, dir);
171}
172
173int
174hwsw_dma_supported (struct device *dev, u64 mask)
175{
176 if (hwiommu_dma_supported(dev, mask))
177 return 1;
178 return swiotlb_dma_supported(dev, mask);
179}
180
181int
182hwsw_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
183{
184 return hwiommu_dma_mapping_error(dev, dma_addr) ||
185 swiotlb_dma_mapping_error(dev, dma_addr);
186}
187
188EXPORT_SYMBOL(hwsw_dma_mapping_error);
189EXPORT_SYMBOL(hwsw_dma_supported);
190EXPORT_SYMBOL(hwsw_alloc_coherent);
191EXPORT_SYMBOL(hwsw_free_coherent);
192EXPORT_SYMBOL(hwsw_sync_single_for_cpu);
193EXPORT_SYMBOL(hwsw_sync_single_for_device);
194EXPORT_SYMBOL(hwsw_sync_sg_for_cpu);
195EXPORT_SYMBOL(hwsw_sync_sg_for_device);
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
index 6d5e6c5630e3..56ceb68eb99d 100644
--- a/arch/ia64/hp/common/sba_iommu.c
+++ b/arch/ia64/hp/common/sba_iommu.c
@@ -36,6 +36,7 @@
36#include <linux/bitops.h> /* hweight64() */ 36#include <linux/bitops.h> /* hweight64() */
37#include <linux/crash_dump.h> 37#include <linux/crash_dump.h>
38#include <linux/iommu-helper.h> 38#include <linux/iommu-helper.h>
39#include <linux/dma-mapping.h>
39 40
40#include <asm/delay.h> /* ia64_get_itc() */ 41#include <asm/delay.h> /* ia64_get_itc() */
41#include <asm/io.h> 42#include <asm/io.h>
@@ -908,11 +909,13 @@ sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
908 * 909 *
909 * See Documentation/PCI/PCI-DMA-mapping.txt 910 * See Documentation/PCI/PCI-DMA-mapping.txt
910 */ 911 */
911dma_addr_t 912static dma_addr_t sba_map_page(struct device *dev, struct page *page,
912sba_map_single_attrs(struct device *dev, void *addr, size_t size, int dir, 913 unsigned long poff, size_t size,
913 struct dma_attrs *attrs) 914 enum dma_data_direction dir,
915 struct dma_attrs *attrs)
914{ 916{
915 struct ioc *ioc; 917 struct ioc *ioc;
918 void *addr = page_address(page) + poff;
916 dma_addr_t iovp; 919 dma_addr_t iovp;
917 dma_addr_t offset; 920 dma_addr_t offset;
918 u64 *pdir_start; 921 u64 *pdir_start;
@@ -990,7 +993,14 @@ sba_map_single_attrs(struct device *dev, void *addr, size_t size, int dir,
990#endif 993#endif
991 return SBA_IOVA(ioc, iovp, offset); 994 return SBA_IOVA(ioc, iovp, offset);
992} 995}
993EXPORT_SYMBOL(sba_map_single_attrs); 996
997static dma_addr_t sba_map_single_attrs(struct device *dev, void *addr,
998 size_t size, enum dma_data_direction dir,
999 struct dma_attrs *attrs)
1000{
1001 return sba_map_page(dev, virt_to_page(addr),
1002 (unsigned long)addr & ~PAGE_MASK, size, dir, attrs);
1003}
994 1004
995#ifdef ENABLE_MARK_CLEAN 1005#ifdef ENABLE_MARK_CLEAN
996static SBA_INLINE void 1006static SBA_INLINE void
@@ -1026,8 +1036,8 @@ sba_mark_clean(struct ioc *ioc, dma_addr_t iova, size_t size)
1026 * 1036 *
1027 * See Documentation/PCI/PCI-DMA-mapping.txt 1037 * See Documentation/PCI/PCI-DMA-mapping.txt
1028 */ 1038 */
1029void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size, 1039static void sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
1030 int dir, struct dma_attrs *attrs) 1040 enum dma_data_direction dir, struct dma_attrs *attrs)
1031{ 1041{
1032 struct ioc *ioc; 1042 struct ioc *ioc;
1033#if DELAYED_RESOURCE_CNT > 0 1043#if DELAYED_RESOURCE_CNT > 0
@@ -1094,7 +1104,12 @@ void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size,
1094 spin_unlock_irqrestore(&ioc->res_lock, flags); 1104 spin_unlock_irqrestore(&ioc->res_lock, flags);
1095#endif /* DELAYED_RESOURCE_CNT == 0 */ 1105#endif /* DELAYED_RESOURCE_CNT == 0 */
1096} 1106}
1097EXPORT_SYMBOL(sba_unmap_single_attrs); 1107
1108void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size,
1109 enum dma_data_direction dir, struct dma_attrs *attrs)
1110{
1111 sba_unmap_page(dev, iova, size, dir, attrs);
1112}
1098 1113
1099/** 1114/**
1100 * sba_alloc_coherent - allocate/map shared mem for DMA 1115 * sba_alloc_coherent - allocate/map shared mem for DMA
@@ -1104,7 +1119,7 @@ EXPORT_SYMBOL(sba_unmap_single_attrs);
1104 * 1119 *
1105 * See Documentation/PCI/PCI-DMA-mapping.txt 1120 * See Documentation/PCI/PCI-DMA-mapping.txt
1106 */ 1121 */
1107void * 1122static void *
1108sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags) 1123sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags)
1109{ 1124{
1110 struct ioc *ioc; 1125 struct ioc *ioc;
@@ -1167,7 +1182,8 @@ sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp
1167 * 1182 *
1168 * See Documentation/PCI/PCI-DMA-mapping.txt 1183 * See Documentation/PCI/PCI-DMA-mapping.txt
1169 */ 1184 */
1170void sba_free_coherent (struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle) 1185static void sba_free_coherent (struct device *dev, size_t size, void *vaddr,
1186 dma_addr_t dma_handle)
1171{ 1187{
1172 sba_unmap_single_attrs(dev, dma_handle, size, 0, NULL); 1188 sba_unmap_single_attrs(dev, dma_handle, size, 0, NULL);
1173 free_pages((unsigned long) vaddr, get_order(size)); 1189 free_pages((unsigned long) vaddr, get_order(size));
@@ -1422,8 +1438,9 @@ sba_coalesce_chunks(struct ioc *ioc, struct device *dev,
1422 * 1438 *
1423 * See Documentation/PCI/PCI-DMA-mapping.txt 1439 * See Documentation/PCI/PCI-DMA-mapping.txt
1424 */ 1440 */
1425int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents, 1441static int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist,
1426 int dir, struct dma_attrs *attrs) 1442 int nents, enum dma_data_direction dir,
1443 struct dma_attrs *attrs)
1427{ 1444{
1428 struct ioc *ioc; 1445 struct ioc *ioc;
1429 int coalesced, filled = 0; 1446 int coalesced, filled = 0;
@@ -1502,7 +1519,6 @@ int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents,
1502 1519
1503 return filled; 1520 return filled;
1504} 1521}
1505EXPORT_SYMBOL(sba_map_sg_attrs);
1506 1522
1507/** 1523/**
1508 * sba_unmap_sg_attrs - unmap Scatter/Gather list 1524 * sba_unmap_sg_attrs - unmap Scatter/Gather list
@@ -1514,8 +1530,9 @@ EXPORT_SYMBOL(sba_map_sg_attrs);
1514 * 1530 *
1515 * See Documentation/PCI/PCI-DMA-mapping.txt 1531 * See Documentation/PCI/PCI-DMA-mapping.txt
1516 */ 1532 */
1517void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist, 1533static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
1518 int nents, int dir, struct dma_attrs *attrs) 1534 int nents, enum dma_data_direction dir,
1535 struct dma_attrs *attrs)
1519{ 1536{
1520#ifdef ASSERT_PDIR_SANITY 1537#ifdef ASSERT_PDIR_SANITY
1521 struct ioc *ioc; 1538 struct ioc *ioc;
@@ -1551,7 +1568,6 @@ void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
1551#endif 1568#endif
1552 1569
1553} 1570}
1554EXPORT_SYMBOL(sba_unmap_sg_attrs);
1555 1571
1556/************************************************************** 1572/**************************************************************
1557* 1573*
@@ -2064,6 +2080,8 @@ static struct acpi_driver acpi_sba_ioc_driver = {
2064 }, 2080 },
2065}; 2081};
2066 2082
2083extern struct dma_map_ops swiotlb_dma_ops;
2084
2067static int __init 2085static int __init
2068sba_init(void) 2086sba_init(void)
2069{ 2087{
@@ -2077,6 +2095,7 @@ sba_init(void)
2077 * a successful kdump kernel boot is to use the swiotlb. 2095 * a successful kdump kernel boot is to use the swiotlb.
2078 */ 2096 */
2079 if (is_kdump_kernel()) { 2097 if (is_kdump_kernel()) {
2098 dma_ops = &swiotlb_dma_ops;
2080 if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0) 2099 if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0)
2081 panic("Unable to initialize software I/O TLB:" 2100 panic("Unable to initialize software I/O TLB:"
2082 " Try machvec=dig boot option"); 2101 " Try machvec=dig boot option");
@@ -2092,6 +2111,7 @@ sba_init(void)
2092 * If we didn't find something sba_iommu can claim, we 2111 * If we didn't find something sba_iommu can claim, we
2093 * need to setup the swiotlb and switch to the dig machvec. 2112 * need to setup the swiotlb and switch to the dig machvec.
2094 */ 2113 */
2114 dma_ops = &swiotlb_dma_ops;
2095 if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0) 2115 if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0)
2096 panic("Unable to find SBA IOMMU or initialize " 2116 panic("Unable to find SBA IOMMU or initialize "
2097 "software I/O TLB: Try machvec=dig boot option"); 2117 "software I/O TLB: Try machvec=dig boot option");
@@ -2138,15 +2158,13 @@ nosbagart(char *str)
2138 return 1; 2158 return 1;
2139} 2159}
2140 2160
2141int 2161static int sba_dma_supported (struct device *dev, u64 mask)
2142sba_dma_supported (struct device *dev, u64 mask)
2143{ 2162{
2144 /* make sure it's at least 32bit capable */ 2163 /* make sure it's at least 32bit capable */
2145 return ((mask & 0xFFFFFFFFUL) == 0xFFFFFFFFUL); 2164 return ((mask & 0xFFFFFFFFUL) == 0xFFFFFFFFUL);
2146} 2165}
2147 2166
2148int 2167static int sba_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
2149sba_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
2150{ 2168{
2151 return 0; 2169 return 0;
2152} 2170}
@@ -2176,7 +2194,22 @@ sba_page_override(char *str)
2176 2194
2177__setup("sbapagesize=",sba_page_override); 2195__setup("sbapagesize=",sba_page_override);
2178 2196
2179EXPORT_SYMBOL(sba_dma_mapping_error); 2197struct dma_map_ops sba_dma_ops = {
2180EXPORT_SYMBOL(sba_dma_supported); 2198 .alloc_coherent = sba_alloc_coherent,
2181EXPORT_SYMBOL(sba_alloc_coherent); 2199 .free_coherent = sba_free_coherent,
2182EXPORT_SYMBOL(sba_free_coherent); 2200 .map_page = sba_map_page,
2201 .unmap_page = sba_unmap_page,
2202 .map_sg = sba_map_sg_attrs,
2203 .unmap_sg = sba_unmap_sg_attrs,
2204 .sync_single_for_cpu = machvec_dma_sync_single,
2205 .sync_sg_for_cpu = machvec_dma_sync_sg,
2206 .sync_single_for_device = machvec_dma_sync_single,
2207 .sync_sg_for_device = machvec_dma_sync_sg,
2208 .dma_supported = sba_dma_supported,
2209 .mapping_error = sba_dma_mapping_error,
2210};
2211
2212void sba_dma_init(void)
2213{
2214 dma_ops = &sba_dma_ops;
2215}
diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h
index 1f912d927585..36c0009dbece 100644
--- a/arch/ia64/include/asm/dma-mapping.h
+++ b/arch/ia64/include/asm/dma-mapping.h
@@ -11,99 +11,128 @@
11 11
12#define ARCH_HAS_DMA_GET_REQUIRED_MASK 12#define ARCH_HAS_DMA_GET_REQUIRED_MASK
13 13
14struct dma_mapping_ops { 14extern struct dma_map_ops *dma_ops;
15 int (*mapping_error)(struct device *dev,
16 dma_addr_t dma_addr);
17 void* (*alloc_coherent)(struct device *dev, size_t size,
18 dma_addr_t *dma_handle, gfp_t gfp);
19 void (*free_coherent)(struct device *dev, size_t size,
20 void *vaddr, dma_addr_t dma_handle);
21 dma_addr_t (*map_single)(struct device *hwdev, unsigned long ptr,
22 size_t size, int direction);
23 void (*unmap_single)(struct device *dev, dma_addr_t addr,
24 size_t size, int direction);
25 void (*sync_single_for_cpu)(struct device *hwdev,
26 dma_addr_t dma_handle, size_t size,
27 int direction);
28 void (*sync_single_for_device)(struct device *hwdev,
29 dma_addr_t dma_handle, size_t size,
30 int direction);
31 void (*sync_single_range_for_cpu)(struct device *hwdev,
32 dma_addr_t dma_handle, unsigned long offset,
33 size_t size, int direction);
34 void (*sync_single_range_for_device)(struct device *hwdev,
35 dma_addr_t dma_handle, unsigned long offset,
36 size_t size, int direction);
37 void (*sync_sg_for_cpu)(struct device *hwdev,
38 struct scatterlist *sg, int nelems,
39 int direction);
40 void (*sync_sg_for_device)(struct device *hwdev,
41 struct scatterlist *sg, int nelems,
42 int direction);
43 int (*map_sg)(struct device *hwdev, struct scatterlist *sg,
44 int nents, int direction);
45 void (*unmap_sg)(struct device *hwdev,
46 struct scatterlist *sg, int nents,
47 int direction);
48 int (*dma_supported_op)(struct device *hwdev, u64 mask);
49 int is_phys;
50};
51
52extern struct dma_mapping_ops *dma_ops;
53extern struct ia64_machine_vector ia64_mv; 15extern struct ia64_machine_vector ia64_mv;
54extern void set_iommu_machvec(void); 16extern void set_iommu_machvec(void);
55 17
56#define dma_alloc_coherent(dev, size, handle, gfp) \ 18extern void machvec_dma_sync_single(struct device *, dma_addr_t, size_t,
57 platform_dma_alloc_coherent(dev, size, handle, (gfp) | GFP_DMA) 19 enum dma_data_direction);
20extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int,
21 enum dma_data_direction);
58 22
59/* coherent mem. is cheap */ 23static inline void *dma_alloc_coherent(struct device *dev, size_t size,
60static inline void * 24 dma_addr_t *daddr, gfp_t gfp)
61dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
62 gfp_t flag)
63{ 25{
64 return dma_alloc_coherent(dev, size, dma_handle, flag); 26 struct dma_map_ops *ops = platform_dma_get_ops(dev);
27 return ops->alloc_coherent(dev, size, daddr, gfp);
65} 28}
66#define dma_free_coherent platform_dma_free_coherent 29
67static inline void 30static inline void dma_free_coherent(struct device *dev, size_t size,
68dma_free_noncoherent(struct device *dev, size_t size, void *cpu_addr, 31 void *caddr, dma_addr_t daddr)
69 dma_addr_t dma_handle) 32{
33 struct dma_map_ops *ops = platform_dma_get_ops(dev);
34 ops->free_coherent(dev, size, caddr, daddr);
35}
36
37#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
38#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
39
40static inline dma_addr_t dma_map_single_attrs(struct device *dev,
41 void *caddr, size_t size,
42 enum dma_data_direction dir,
43 struct dma_attrs *attrs)
44{
45 struct dma_map_ops *ops = platform_dma_get_ops(dev);
46 return ops->map_page(dev, virt_to_page(caddr),
47 (unsigned long)caddr & ~PAGE_MASK, size,
48 dir, attrs);
49}
50
51static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t daddr,
52 size_t size,
53 enum dma_data_direction dir,
54 struct dma_attrs *attrs)
55{
56 struct dma_map_ops *ops = platform_dma_get_ops(dev);
57 ops->unmap_page(dev, daddr, size, dir, attrs);
58}
59
60#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL)
61#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL)
62
63static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
64 int nents, enum dma_data_direction dir,
65 struct dma_attrs *attrs)
66{
67 struct dma_map_ops *ops = platform_dma_get_ops(dev);
68 return ops->map_sg(dev, sgl, nents, dir, attrs);
69}
70
71static inline void dma_unmap_sg_attrs(struct device *dev,
72 struct scatterlist *sgl, int nents,
73 enum dma_data_direction dir,
74 struct dma_attrs *attrs)
75{
76 struct dma_map_ops *ops = platform_dma_get_ops(dev);
77 ops->unmap_sg(dev, sgl, nents, dir, attrs);
78}
79
80#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL)
81#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL)
82
83static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t daddr,
84 size_t size,
85 enum dma_data_direction dir)
70{ 86{
71 dma_free_coherent(dev, size, cpu_addr, dma_handle); 87 struct dma_map_ops *ops = platform_dma_get_ops(dev);
88 ops->sync_single_for_cpu(dev, daddr, size, dir);
72} 89}
73#define dma_map_single_attrs platform_dma_map_single_attrs 90
74static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, 91static inline void dma_sync_sg_for_cpu(struct device *dev,
75 size_t size, int dir) 92 struct scatterlist *sgl,
93 int nents, enum dma_data_direction dir)
76{ 94{
77 return dma_map_single_attrs(dev, cpu_addr, size, dir, NULL); 95 struct dma_map_ops *ops = platform_dma_get_ops(dev);
96 ops->sync_sg_for_cpu(dev, sgl, nents, dir);
78} 97}
79#define dma_map_sg_attrs platform_dma_map_sg_attrs 98
80static inline int dma_map_sg(struct device *dev, struct scatterlist *sgl, 99static inline void dma_sync_single_for_device(struct device *dev,
81 int nents, int dir) 100 dma_addr_t daddr,
101 size_t size,
102 enum dma_data_direction dir)
82{ 103{
83 return dma_map_sg_attrs(dev, sgl, nents, dir, NULL); 104 struct dma_map_ops *ops = platform_dma_get_ops(dev);
105 ops->sync_single_for_device(dev, daddr, size, dir);
84} 106}
85#define dma_unmap_single_attrs platform_dma_unmap_single_attrs 107
86static inline void dma_unmap_single(struct device *dev, dma_addr_t cpu_addr, 108static inline void dma_sync_sg_for_device(struct device *dev,
87 size_t size, int dir) 109 struct scatterlist *sgl,
110 int nents,
111 enum dma_data_direction dir)
88{ 112{
89 return dma_unmap_single_attrs(dev, cpu_addr, size, dir, NULL); 113 struct dma_map_ops *ops = platform_dma_get_ops(dev);
114 ops->sync_sg_for_device(dev, sgl, nents, dir);
90} 115}
91#define dma_unmap_sg_attrs platform_dma_unmap_sg_attrs 116
92static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sgl, 117static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr)
93 int nents, int dir) 118{
119 struct dma_map_ops *ops = platform_dma_get_ops(dev);
120 return ops->mapping_error(dev, daddr);
121}
122
123static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
124 size_t offset, size_t size,
125 enum dma_data_direction dir)
94{ 126{
95 return dma_unmap_sg_attrs(dev, sgl, nents, dir, NULL); 127 struct dma_map_ops *ops = platform_dma_get_ops(dev);
128 return ops->map_page(dev, page, offset, size, dir, NULL);
96} 129}
97#define dma_sync_single_for_cpu platform_dma_sync_single_for_cpu
98#define dma_sync_sg_for_cpu platform_dma_sync_sg_for_cpu
99#define dma_sync_single_for_device platform_dma_sync_single_for_device
100#define dma_sync_sg_for_device platform_dma_sync_sg_for_device
101#define dma_mapping_error platform_dma_mapping_error
102 130
103#define dma_map_page(dev, pg, off, size, dir) \ 131static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
104 dma_map_single(dev, page_address(pg) + (off), (size), (dir)) 132 size_t size, enum dma_data_direction dir)
105#define dma_unmap_page(dev, dma_addr, size, dir) \ 133{
106 dma_unmap_single(dev, dma_addr, size, dir) 134 dma_unmap_single(dev, addr, size, dir);
135}
107 136
108/* 137/*
109 * Rest of this file is part of the "Advanced DMA API". Use at your own risk. 138 * Rest of this file is part of the "Advanced DMA API". Use at your own risk.
@@ -115,7 +144,11 @@ static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sgl,
115#define dma_sync_single_range_for_device(dev, dma_handle, offset, size, dir) \ 144#define dma_sync_single_range_for_device(dev, dma_handle, offset, size, dir) \
116 dma_sync_single_for_device(dev, dma_handle, size, dir) 145 dma_sync_single_for_device(dev, dma_handle, size, dir)
117 146
118#define dma_supported platform_dma_supported 147static inline int dma_supported(struct device *dev, u64 mask)
148{
149 struct dma_map_ops *ops = platform_dma_get_ops(dev);
150 return ops->dma_supported(dev, mask);
151}
119 152
120static inline int 153static inline int
121dma_set_mask (struct device *dev, u64 mask) 154dma_set_mask (struct device *dev, u64 mask)
@@ -141,11 +174,4 @@ dma_cache_sync (struct device *dev, void *vaddr, size_t size,
141 174
142#define dma_is_consistent(d, h) (1) /* all we do is coherent memory... */ 175#define dma_is_consistent(d, h) (1) /* all we do is coherent memory... */
143 176
144static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
145{
146 return dma_ops;
147}
148
149
150
151#endif /* _ASM_IA64_DMA_MAPPING_H */ 177#endif /* _ASM_IA64_DMA_MAPPING_H */
diff --git a/arch/ia64/include/asm/machvec.h b/arch/ia64/include/asm/machvec.h
index fe87b2121707..367d299d9938 100644
--- a/arch/ia64/include/asm/machvec.h
+++ b/arch/ia64/include/asm/machvec.h
@@ -11,7 +11,6 @@
11#define _ASM_IA64_MACHVEC_H 11#define _ASM_IA64_MACHVEC_H
12 12
13#include <linux/types.h> 13#include <linux/types.h>
14#include <linux/swiotlb.h>
15 14
16/* forward declarations: */ 15/* forward declarations: */
17struct device; 16struct device;
@@ -45,24 +44,8 @@ typedef void ia64_mv_kernel_launch_event_t(void);
45 44
46/* DMA-mapping interface: */ 45/* DMA-mapping interface: */
47typedef void ia64_mv_dma_init (void); 46typedef void ia64_mv_dma_init (void);
48typedef void *ia64_mv_dma_alloc_coherent (struct device *, size_t, dma_addr_t *, gfp_t);
49typedef void ia64_mv_dma_free_coherent (struct device *, size_t, void *, dma_addr_t);
50typedef dma_addr_t ia64_mv_dma_map_single (struct device *, void *, size_t, int);
51typedef void ia64_mv_dma_unmap_single (struct device *, dma_addr_t, size_t, int);
52typedef int ia64_mv_dma_map_sg (struct device *, struct scatterlist *, int, int);
53typedef void ia64_mv_dma_unmap_sg (struct device *, struct scatterlist *, int, int);
54typedef void ia64_mv_dma_sync_single_for_cpu (struct device *, dma_addr_t, size_t, int);
55typedef void ia64_mv_dma_sync_sg_for_cpu (struct device *, struct scatterlist *, int, int);
56typedef void ia64_mv_dma_sync_single_for_device (struct device *, dma_addr_t, size_t, int);
57typedef void ia64_mv_dma_sync_sg_for_device (struct device *, struct scatterlist *, int, int);
58typedef int ia64_mv_dma_mapping_error(struct device *, dma_addr_t dma_addr);
59typedef int ia64_mv_dma_supported (struct device *, u64);
60
61typedef dma_addr_t ia64_mv_dma_map_single_attrs (struct device *, void *, size_t, int, struct dma_attrs *);
62typedef void ia64_mv_dma_unmap_single_attrs (struct device *, dma_addr_t, size_t, int, struct dma_attrs *);
63typedef int ia64_mv_dma_map_sg_attrs (struct device *, struct scatterlist *, int, int, struct dma_attrs *);
64typedef void ia64_mv_dma_unmap_sg_attrs (struct device *, struct scatterlist *, int, int, struct dma_attrs *);
65typedef u64 ia64_mv_dma_get_required_mask (struct device *); 47typedef u64 ia64_mv_dma_get_required_mask (struct device *);
48typedef struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
66 49
67/* 50/*
68 * WARNING: The legacy I/O space is _architected_. Platforms are 51 * WARNING: The legacy I/O space is _architected_. Platforms are
@@ -114,8 +97,6 @@ machvec_noop_bus (struct pci_bus *bus)
114 97
115extern void machvec_setup (char **); 98extern void machvec_setup (char **);
116extern void machvec_timer_interrupt (int, void *); 99extern void machvec_timer_interrupt (int, void *);
117extern void machvec_dma_sync_single (struct device *, dma_addr_t, size_t, int);
118extern void machvec_dma_sync_sg (struct device *, struct scatterlist *, int, int);
119extern void machvec_tlb_migrate_finish (struct mm_struct *); 100extern void machvec_tlb_migrate_finish (struct mm_struct *);
120 101
121# if defined (CONFIG_IA64_HP_SIM) 102# if defined (CONFIG_IA64_HP_SIM)
@@ -148,19 +129,8 @@ extern void machvec_tlb_migrate_finish (struct mm_struct *);
148# define platform_global_tlb_purge ia64_mv.global_tlb_purge 129# define platform_global_tlb_purge ia64_mv.global_tlb_purge
149# define platform_tlb_migrate_finish ia64_mv.tlb_migrate_finish 130# define platform_tlb_migrate_finish ia64_mv.tlb_migrate_finish
150# define platform_dma_init ia64_mv.dma_init 131# define platform_dma_init ia64_mv.dma_init
151# define platform_dma_alloc_coherent ia64_mv.dma_alloc_coherent
152# define platform_dma_free_coherent ia64_mv.dma_free_coherent
153# define platform_dma_map_single_attrs ia64_mv.dma_map_single_attrs
154# define platform_dma_unmap_single_attrs ia64_mv.dma_unmap_single_attrs
155# define platform_dma_map_sg_attrs ia64_mv.dma_map_sg_attrs
156# define platform_dma_unmap_sg_attrs ia64_mv.dma_unmap_sg_attrs
157# define platform_dma_sync_single_for_cpu ia64_mv.dma_sync_single_for_cpu
158# define platform_dma_sync_sg_for_cpu ia64_mv.dma_sync_sg_for_cpu
159# define platform_dma_sync_single_for_device ia64_mv.dma_sync_single_for_device
160# define platform_dma_sync_sg_for_device ia64_mv.dma_sync_sg_for_device
161# define platform_dma_mapping_error ia64_mv.dma_mapping_error
162# define platform_dma_supported ia64_mv.dma_supported
163# define platform_dma_get_required_mask ia64_mv.dma_get_required_mask 132# define platform_dma_get_required_mask ia64_mv.dma_get_required_mask
133# define platform_dma_get_ops ia64_mv.dma_get_ops
164# define platform_irq_to_vector ia64_mv.irq_to_vector 134# define platform_irq_to_vector ia64_mv.irq_to_vector
165# define platform_local_vector_to_irq ia64_mv.local_vector_to_irq 135# define platform_local_vector_to_irq ia64_mv.local_vector_to_irq
166# define platform_pci_get_legacy_mem ia64_mv.pci_get_legacy_mem 136# define platform_pci_get_legacy_mem ia64_mv.pci_get_legacy_mem
@@ -203,19 +173,8 @@ struct ia64_machine_vector {
203 ia64_mv_global_tlb_purge_t *global_tlb_purge; 173 ia64_mv_global_tlb_purge_t *global_tlb_purge;
204 ia64_mv_tlb_migrate_finish_t *tlb_migrate_finish; 174 ia64_mv_tlb_migrate_finish_t *tlb_migrate_finish;
205 ia64_mv_dma_init *dma_init; 175 ia64_mv_dma_init *dma_init;
206 ia64_mv_dma_alloc_coherent *dma_alloc_coherent;
207 ia64_mv_dma_free_coherent *dma_free_coherent;
208 ia64_mv_dma_map_single_attrs *dma_map_single_attrs;
209 ia64_mv_dma_unmap_single_attrs *dma_unmap_single_attrs;
210 ia64_mv_dma_map_sg_attrs *dma_map_sg_attrs;
211 ia64_mv_dma_unmap_sg_attrs *dma_unmap_sg_attrs;
212 ia64_mv_dma_sync_single_for_cpu *dma_sync_single_for_cpu;
213 ia64_mv_dma_sync_sg_for_cpu *dma_sync_sg_for_cpu;
214 ia64_mv_dma_sync_single_for_device *dma_sync_single_for_device;
215 ia64_mv_dma_sync_sg_for_device *dma_sync_sg_for_device;
216 ia64_mv_dma_mapping_error *dma_mapping_error;
217 ia64_mv_dma_supported *dma_supported;
218 ia64_mv_dma_get_required_mask *dma_get_required_mask; 176 ia64_mv_dma_get_required_mask *dma_get_required_mask;
177 ia64_mv_dma_get_ops *dma_get_ops;
219 ia64_mv_irq_to_vector *irq_to_vector; 178 ia64_mv_irq_to_vector *irq_to_vector;
220 ia64_mv_local_vector_to_irq *local_vector_to_irq; 179 ia64_mv_local_vector_to_irq *local_vector_to_irq;
221 ia64_mv_pci_get_legacy_mem_t *pci_get_legacy_mem; 180 ia64_mv_pci_get_legacy_mem_t *pci_get_legacy_mem;
@@ -254,19 +213,8 @@ struct ia64_machine_vector {
254 platform_global_tlb_purge, \ 213 platform_global_tlb_purge, \
255 platform_tlb_migrate_finish, \ 214 platform_tlb_migrate_finish, \
256 platform_dma_init, \ 215 platform_dma_init, \
257 platform_dma_alloc_coherent, \
258 platform_dma_free_coherent, \
259 platform_dma_map_single_attrs, \
260 platform_dma_unmap_single_attrs, \
261 platform_dma_map_sg_attrs, \
262 platform_dma_unmap_sg_attrs, \
263 platform_dma_sync_single_for_cpu, \
264 platform_dma_sync_sg_for_cpu, \
265 platform_dma_sync_single_for_device, \
266 platform_dma_sync_sg_for_device, \
267 platform_dma_mapping_error, \
268 platform_dma_supported, \
269 platform_dma_get_required_mask, \ 216 platform_dma_get_required_mask, \
217 platform_dma_get_ops, \
270 platform_irq_to_vector, \ 218 platform_irq_to_vector, \
271 platform_local_vector_to_irq, \ 219 platform_local_vector_to_irq, \
272 platform_pci_get_legacy_mem, \ 220 platform_pci_get_legacy_mem, \
@@ -302,6 +250,9 @@ extern void machvec_init_from_cmdline(const char *cmdline);
302# error Unknown configuration. Update arch/ia64/include/asm/machvec.h. 250# error Unknown configuration. Update arch/ia64/include/asm/machvec.h.
303# endif /* CONFIG_IA64_GENERIC */ 251# endif /* CONFIG_IA64_GENERIC */
304 252
253extern void swiotlb_dma_init(void);
254extern struct dma_map_ops *dma_get_ops(struct device *);
255
305/* 256/*
306 * Define default versions so we can extend machvec for new platforms without having 257 * Define default versions so we can extend machvec for new platforms without having
307 * to update the machvec files for all existing platforms. 258 * to update the machvec files for all existing platforms.
@@ -332,43 +283,10 @@ extern void machvec_init_from_cmdline(const char *cmdline);
332# define platform_kernel_launch_event machvec_noop 283# define platform_kernel_launch_event machvec_noop
333#endif 284#endif
334#ifndef platform_dma_init 285#ifndef platform_dma_init
335# define platform_dma_init swiotlb_init 286# define platform_dma_init swiotlb_dma_init
336#endif
337#ifndef platform_dma_alloc_coherent
338# define platform_dma_alloc_coherent swiotlb_alloc_coherent
339#endif
340#ifndef platform_dma_free_coherent
341# define platform_dma_free_coherent swiotlb_free_coherent
342#endif
343#ifndef platform_dma_map_single_attrs
344# define platform_dma_map_single_attrs swiotlb_map_single_attrs
345#endif
346#ifndef platform_dma_unmap_single_attrs
347# define platform_dma_unmap_single_attrs swiotlb_unmap_single_attrs
348#endif
349#ifndef platform_dma_map_sg_attrs
350# define platform_dma_map_sg_attrs swiotlb_map_sg_attrs
351#endif
352#ifndef platform_dma_unmap_sg_attrs
353# define platform_dma_unmap_sg_attrs swiotlb_unmap_sg_attrs
354#endif
355#ifndef platform_dma_sync_single_for_cpu
356# define platform_dma_sync_single_for_cpu swiotlb_sync_single_for_cpu
357#endif
358#ifndef platform_dma_sync_sg_for_cpu
359# define platform_dma_sync_sg_for_cpu swiotlb_sync_sg_for_cpu
360#endif
361#ifndef platform_dma_sync_single_for_device
362# define platform_dma_sync_single_for_device swiotlb_sync_single_for_device
363#endif
364#ifndef platform_dma_sync_sg_for_device
365# define platform_dma_sync_sg_for_device swiotlb_sync_sg_for_device
366#endif
367#ifndef platform_dma_mapping_error
368# define platform_dma_mapping_error swiotlb_dma_mapping_error
369#endif 287#endif
370#ifndef platform_dma_supported 288#ifndef platform_dma_get_ops
371# define platform_dma_supported swiotlb_dma_supported 289# define platform_dma_get_ops dma_get_ops
372#endif 290#endif
373#ifndef platform_dma_get_required_mask 291#ifndef platform_dma_get_required_mask
374# define platform_dma_get_required_mask ia64_dma_get_required_mask 292# define platform_dma_get_required_mask ia64_dma_get_required_mask
diff --git a/arch/ia64/include/asm/machvec_dig_vtd.h b/arch/ia64/include/asm/machvec_dig_vtd.h
index 3400b561e711..6ab1de5c45ef 100644
--- a/arch/ia64/include/asm/machvec_dig_vtd.h
+++ b/arch/ia64/include/asm/machvec_dig_vtd.h
@@ -2,14 +2,6 @@
2#define _ASM_IA64_MACHVEC_DIG_VTD_h 2#define _ASM_IA64_MACHVEC_DIG_VTD_h
3 3
4extern ia64_mv_setup_t dig_setup; 4extern ia64_mv_setup_t dig_setup;
5extern ia64_mv_dma_alloc_coherent vtd_alloc_coherent;
6extern ia64_mv_dma_free_coherent vtd_free_coherent;
7extern ia64_mv_dma_map_single_attrs vtd_map_single_attrs;
8extern ia64_mv_dma_unmap_single_attrs vtd_unmap_single_attrs;
9extern ia64_mv_dma_map_sg_attrs vtd_map_sg_attrs;
10extern ia64_mv_dma_unmap_sg_attrs vtd_unmap_sg_attrs;
11extern ia64_mv_dma_supported iommu_dma_supported;
12extern ia64_mv_dma_mapping_error vtd_dma_mapping_error;
13extern ia64_mv_dma_init pci_iommu_alloc; 5extern ia64_mv_dma_init pci_iommu_alloc;
14 6
15/* 7/*
@@ -22,17 +14,5 @@ extern ia64_mv_dma_init pci_iommu_alloc;
22#define platform_name "dig_vtd" 14#define platform_name "dig_vtd"
23#define platform_setup dig_setup 15#define platform_setup dig_setup
24#define platform_dma_init pci_iommu_alloc 16#define platform_dma_init pci_iommu_alloc
25#define platform_dma_alloc_coherent vtd_alloc_coherent
26#define platform_dma_free_coherent vtd_free_coherent
27#define platform_dma_map_single_attrs vtd_map_single_attrs
28#define platform_dma_unmap_single_attrs vtd_unmap_single_attrs
29#define platform_dma_map_sg_attrs vtd_map_sg_attrs
30#define platform_dma_unmap_sg_attrs vtd_unmap_sg_attrs
31#define platform_dma_sync_single_for_cpu machvec_dma_sync_single
32#define platform_dma_sync_sg_for_cpu machvec_dma_sync_sg
33#define platform_dma_sync_single_for_device machvec_dma_sync_single
34#define platform_dma_sync_sg_for_device machvec_dma_sync_sg
35#define platform_dma_supported iommu_dma_supported
36#define platform_dma_mapping_error vtd_dma_mapping_error
37 17
38#endif /* _ASM_IA64_MACHVEC_DIG_VTD_h */ 18#endif /* _ASM_IA64_MACHVEC_DIG_VTD_h */
diff --git a/arch/ia64/include/asm/machvec_hpzx1.h b/arch/ia64/include/asm/machvec_hpzx1.h
index 2f57f5144b9f..3bd83d78a412 100644
--- a/arch/ia64/include/asm/machvec_hpzx1.h
+++ b/arch/ia64/include/asm/machvec_hpzx1.h
@@ -2,14 +2,7 @@
2#define _ASM_IA64_MACHVEC_HPZX1_h 2#define _ASM_IA64_MACHVEC_HPZX1_h
3 3
4extern ia64_mv_setup_t dig_setup; 4extern ia64_mv_setup_t dig_setup;
5extern ia64_mv_dma_alloc_coherent sba_alloc_coherent; 5extern ia64_mv_dma_init sba_dma_init;
6extern ia64_mv_dma_free_coherent sba_free_coherent;
7extern ia64_mv_dma_map_single_attrs sba_map_single_attrs;
8extern ia64_mv_dma_unmap_single_attrs sba_unmap_single_attrs;
9extern ia64_mv_dma_map_sg_attrs sba_map_sg_attrs;
10extern ia64_mv_dma_unmap_sg_attrs sba_unmap_sg_attrs;
11extern ia64_mv_dma_supported sba_dma_supported;
12extern ia64_mv_dma_mapping_error sba_dma_mapping_error;
13 6
14/* 7/*
15 * This stuff has dual use! 8 * This stuff has dual use!
@@ -20,18 +13,6 @@ extern ia64_mv_dma_mapping_error sba_dma_mapping_error;
20 */ 13 */
21#define platform_name "hpzx1" 14#define platform_name "hpzx1"
22#define platform_setup dig_setup 15#define platform_setup dig_setup
23#define platform_dma_init machvec_noop 16#define platform_dma_init sba_dma_init
24#define platform_dma_alloc_coherent sba_alloc_coherent
25#define platform_dma_free_coherent sba_free_coherent
26#define platform_dma_map_single_attrs sba_map_single_attrs
27#define platform_dma_unmap_single_attrs sba_unmap_single_attrs
28#define platform_dma_map_sg_attrs sba_map_sg_attrs
29#define platform_dma_unmap_sg_attrs sba_unmap_sg_attrs
30#define platform_dma_sync_single_for_cpu machvec_dma_sync_single
31#define platform_dma_sync_sg_for_cpu machvec_dma_sync_sg
32#define platform_dma_sync_single_for_device machvec_dma_sync_single
33#define platform_dma_sync_sg_for_device machvec_dma_sync_sg
34#define platform_dma_supported sba_dma_supported
35#define platform_dma_mapping_error sba_dma_mapping_error
36 17
37#endif /* _ASM_IA64_MACHVEC_HPZX1_h */ 18#endif /* _ASM_IA64_MACHVEC_HPZX1_h */
diff --git a/arch/ia64/include/asm/machvec_hpzx1_swiotlb.h b/arch/ia64/include/asm/machvec_hpzx1_swiotlb.h
index a842cdda827b..1091ac39740c 100644
--- a/arch/ia64/include/asm/machvec_hpzx1_swiotlb.h
+++ b/arch/ia64/include/asm/machvec_hpzx1_swiotlb.h
@@ -2,18 +2,7 @@
2#define _ASM_IA64_MACHVEC_HPZX1_SWIOTLB_h 2#define _ASM_IA64_MACHVEC_HPZX1_SWIOTLB_h
3 3
4extern ia64_mv_setup_t dig_setup; 4extern ia64_mv_setup_t dig_setup;
5extern ia64_mv_dma_alloc_coherent hwsw_alloc_coherent; 5extern ia64_mv_dma_get_ops hwsw_dma_get_ops;
6extern ia64_mv_dma_free_coherent hwsw_free_coherent;
7extern ia64_mv_dma_map_single_attrs hwsw_map_single_attrs;
8extern ia64_mv_dma_unmap_single_attrs hwsw_unmap_single_attrs;
9extern ia64_mv_dma_map_sg_attrs hwsw_map_sg_attrs;
10extern ia64_mv_dma_unmap_sg_attrs hwsw_unmap_sg_attrs;
11extern ia64_mv_dma_supported hwsw_dma_supported;
12extern ia64_mv_dma_mapping_error hwsw_dma_mapping_error;
13extern ia64_mv_dma_sync_single_for_cpu hwsw_sync_single_for_cpu;
14extern ia64_mv_dma_sync_sg_for_cpu hwsw_sync_sg_for_cpu;
15extern ia64_mv_dma_sync_single_for_device hwsw_sync_single_for_device;
16extern ia64_mv_dma_sync_sg_for_device hwsw_sync_sg_for_device;
17 6
18/* 7/*
19 * This stuff has dual use! 8 * This stuff has dual use!
@@ -23,20 +12,8 @@ extern ia64_mv_dma_sync_sg_for_device hwsw_sync_sg_for_device;
23 * the macros are used directly. 12 * the macros are used directly.
24 */ 13 */
25#define platform_name "hpzx1_swiotlb" 14#define platform_name "hpzx1_swiotlb"
26
27#define platform_setup dig_setup 15#define platform_setup dig_setup
28#define platform_dma_init machvec_noop 16#define platform_dma_init machvec_noop
29#define platform_dma_alloc_coherent hwsw_alloc_coherent 17#define platform_dma_get_ops hwsw_dma_get_ops
30#define platform_dma_free_coherent hwsw_free_coherent
31#define platform_dma_map_single_attrs hwsw_map_single_attrs
32#define platform_dma_unmap_single_attrs hwsw_unmap_single_attrs
33#define platform_dma_map_sg_attrs hwsw_map_sg_attrs
34#define platform_dma_unmap_sg_attrs hwsw_unmap_sg_attrs
35#define platform_dma_supported hwsw_dma_supported
36#define platform_dma_mapping_error hwsw_dma_mapping_error
37#define platform_dma_sync_single_for_cpu hwsw_sync_single_for_cpu
38#define platform_dma_sync_sg_for_cpu hwsw_sync_sg_for_cpu
39#define platform_dma_sync_single_for_device hwsw_sync_single_for_device
40#define platform_dma_sync_sg_for_device hwsw_sync_sg_for_device
41 18
42#endif /* _ASM_IA64_MACHVEC_HPZX1_SWIOTLB_h */ 19#endif /* _ASM_IA64_MACHVEC_HPZX1_SWIOTLB_h */
diff --git a/arch/ia64/include/asm/machvec_sn2.h b/arch/ia64/include/asm/machvec_sn2.h
index f1a6e0d6dfa5..f061a30aac42 100644
--- a/arch/ia64/include/asm/machvec_sn2.h
+++ b/arch/ia64/include/asm/machvec_sn2.h
@@ -55,19 +55,8 @@ extern ia64_mv_readb_t __sn_readb_relaxed;
55extern ia64_mv_readw_t __sn_readw_relaxed; 55extern ia64_mv_readw_t __sn_readw_relaxed;
56extern ia64_mv_readl_t __sn_readl_relaxed; 56extern ia64_mv_readl_t __sn_readl_relaxed;
57extern ia64_mv_readq_t __sn_readq_relaxed; 57extern ia64_mv_readq_t __sn_readq_relaxed;
58extern ia64_mv_dma_alloc_coherent sn_dma_alloc_coherent;
59extern ia64_mv_dma_free_coherent sn_dma_free_coherent;
60extern ia64_mv_dma_map_single_attrs sn_dma_map_single_attrs;
61extern ia64_mv_dma_unmap_single_attrs sn_dma_unmap_single_attrs;
62extern ia64_mv_dma_map_sg_attrs sn_dma_map_sg_attrs;
63extern ia64_mv_dma_unmap_sg_attrs sn_dma_unmap_sg_attrs;
64extern ia64_mv_dma_sync_single_for_cpu sn_dma_sync_single_for_cpu;
65extern ia64_mv_dma_sync_sg_for_cpu sn_dma_sync_sg_for_cpu;
66extern ia64_mv_dma_sync_single_for_device sn_dma_sync_single_for_device;
67extern ia64_mv_dma_sync_sg_for_device sn_dma_sync_sg_for_device;
68extern ia64_mv_dma_mapping_error sn_dma_mapping_error;
69extern ia64_mv_dma_supported sn_dma_supported;
70extern ia64_mv_dma_get_required_mask sn_dma_get_required_mask; 58extern ia64_mv_dma_get_required_mask sn_dma_get_required_mask;
59extern ia64_mv_dma_init sn_dma_init;
71extern ia64_mv_migrate_t sn_migrate; 60extern ia64_mv_migrate_t sn_migrate;
72extern ia64_mv_kernel_launch_event_t sn_kernel_launch_event; 61extern ia64_mv_kernel_launch_event_t sn_kernel_launch_event;
73extern ia64_mv_setup_msi_irq_t sn_setup_msi_irq; 62extern ia64_mv_setup_msi_irq_t sn_setup_msi_irq;
@@ -111,20 +100,8 @@ extern ia64_mv_pci_fixup_bus_t sn_pci_fixup_bus;
111#define platform_pci_get_legacy_mem sn_pci_get_legacy_mem 100#define platform_pci_get_legacy_mem sn_pci_get_legacy_mem
112#define platform_pci_legacy_read sn_pci_legacy_read 101#define platform_pci_legacy_read sn_pci_legacy_read
113#define platform_pci_legacy_write sn_pci_legacy_write 102#define platform_pci_legacy_write sn_pci_legacy_write
114#define platform_dma_init machvec_noop
115#define platform_dma_alloc_coherent sn_dma_alloc_coherent
116#define platform_dma_free_coherent sn_dma_free_coherent
117#define platform_dma_map_single_attrs sn_dma_map_single_attrs
118#define platform_dma_unmap_single_attrs sn_dma_unmap_single_attrs
119#define platform_dma_map_sg_attrs sn_dma_map_sg_attrs
120#define platform_dma_unmap_sg_attrs sn_dma_unmap_sg_attrs
121#define platform_dma_sync_single_for_cpu sn_dma_sync_single_for_cpu
122#define platform_dma_sync_sg_for_cpu sn_dma_sync_sg_for_cpu
123#define platform_dma_sync_single_for_device sn_dma_sync_single_for_device
124#define platform_dma_sync_sg_for_device sn_dma_sync_sg_for_device
125#define platform_dma_mapping_error sn_dma_mapping_error
126#define platform_dma_supported sn_dma_supported
127#define platform_dma_get_required_mask sn_dma_get_required_mask 103#define platform_dma_get_required_mask sn_dma_get_required_mask
104#define platform_dma_init sn_dma_init
128#define platform_migrate sn_migrate 105#define platform_migrate sn_migrate
129#define platform_kernel_launch_event sn_kernel_launch_event 106#define platform_kernel_launch_event sn_kernel_launch_event
130#ifdef CONFIG_PCI_MSI 107#ifdef CONFIG_PCI_MSI
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile
index c381ea954892..f2778f2c4fd9 100644
--- a/arch/ia64/kernel/Makefile
+++ b/arch/ia64/kernel/Makefile
@@ -7,7 +7,7 @@ extra-y := head.o init_task.o vmlinux.lds
7obj-y := acpi.o entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64.o \ 7obj-y := acpi.o entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64.o \
8 irq_lsapic.o ivt.o machvec.o pal.o patch.o process.o perfmon.o ptrace.o sal.o \ 8 irq_lsapic.o ivt.o machvec.o pal.o patch.o process.o perfmon.o ptrace.o sal.o \
9 salinfo.o setup.o signal.o sys_ia64.o time.o traps.o unaligned.o \ 9 salinfo.o setup.o signal.o sys_ia64.o time.o traps.o unaligned.o \
10 unwind.o mca.o mca_asm.o topology.o 10 unwind.o mca.o mca_asm.o topology.o dma-mapping.o
11 11
12obj-$(CONFIG_IA64_BRL_EMU) += brl_emu.o 12obj-$(CONFIG_IA64_BRL_EMU) += brl_emu.o
13obj-$(CONFIG_IA64_GENERIC) += acpi-ext.o 13obj-$(CONFIG_IA64_GENERIC) += acpi-ext.o
@@ -43,9 +43,7 @@ ifneq ($(CONFIG_IA64_ESI),)
43obj-y += esi_stub.o # must be in kernel proper 43obj-y += esi_stub.o # must be in kernel proper
44endif 44endif
45obj-$(CONFIG_DMAR) += pci-dma.o 45obj-$(CONFIG_DMAR) += pci-dma.o
46ifeq ($(CONFIG_DMAR), y)
47obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o 46obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o
48endif
49 47
50# The gate DSO image is built using a special linker script. 48# The gate DSO image is built using a special linker script.
51targets += gate.so gate-syms.o 49targets += gate.so gate-syms.o
diff --git a/arch/ia64/kernel/dma-mapping.c b/arch/ia64/kernel/dma-mapping.c
new file mode 100644
index 000000000000..086a2aeb0404
--- /dev/null
+++ b/arch/ia64/kernel/dma-mapping.c
@@ -0,0 +1,13 @@
1#include <linux/dma-mapping.h>
2
3/* Set this to 1 if there is a HW IOMMU in the system */
4int iommu_detected __read_mostly;
5
6struct dma_map_ops *dma_ops;
7EXPORT_SYMBOL(dma_ops);
8
9struct dma_map_ops *dma_get_ops(struct device *dev)
10{
11 return dma_ops;
12}
13EXPORT_SYMBOL(dma_get_ops);
diff --git a/arch/ia64/kernel/machvec.c b/arch/ia64/kernel/machvec.c
index 7ccb228ceedc..d41a40ef80c0 100644
--- a/arch/ia64/kernel/machvec.c
+++ b/arch/ia64/kernel/machvec.c
@@ -1,5 +1,5 @@
1#include <linux/module.h> 1#include <linux/module.h>
2 2#include <linux/dma-mapping.h>
3#include <asm/machvec.h> 3#include <asm/machvec.h>
4#include <asm/system.h> 4#include <asm/system.h>
5 5
@@ -75,14 +75,16 @@ machvec_timer_interrupt (int irq, void *dev_id)
75EXPORT_SYMBOL(machvec_timer_interrupt); 75EXPORT_SYMBOL(machvec_timer_interrupt);
76 76
77void 77void
78machvec_dma_sync_single (struct device *hwdev, dma_addr_t dma_handle, size_t size, int dir) 78machvec_dma_sync_single(struct device *hwdev, dma_addr_t dma_handle, size_t size,
79 enum dma_data_direction dir)
79{ 80{
80 mb(); 81 mb();
81} 82}
82EXPORT_SYMBOL(machvec_dma_sync_single); 83EXPORT_SYMBOL(machvec_dma_sync_single);
83 84
84void 85void
85machvec_dma_sync_sg (struct device *hwdev, struct scatterlist *sg, int n, int dir) 86machvec_dma_sync_sg(struct device *hwdev, struct scatterlist *sg, int n,
87 enum dma_data_direction dir)
86{ 88{
87 mb(); 89 mb();
88} 90}
diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
index e5c57f413ca2..a4f19c70aadd 100644
--- a/arch/ia64/kernel/palinfo.c
+++ b/arch/ia64/kernel/palinfo.c
@@ -1002,8 +1002,6 @@ create_palinfo_proc_entries(unsigned int cpu)
1002 *pdir = create_proc_read_entry( 1002 *pdir = create_proc_read_entry(
1003 palinfo_entries[j].name, 0, cpu_dir, 1003 palinfo_entries[j].name, 0, cpu_dir,
1004 palinfo_read_entry, (void *)f.value); 1004 palinfo_read_entry, (void *)f.value);
1005 if (*pdir)
1006 (*pdir)->owner = THIS_MODULE;
1007 pdir++; 1005 pdir++;
1008 } 1006 }
1009} 1007}
diff --git a/arch/ia64/kernel/pci-dma.c b/arch/ia64/kernel/pci-dma.c
index d0ada067a4af..e4cb443bb988 100644
--- a/arch/ia64/kernel/pci-dma.c
+++ b/arch/ia64/kernel/pci-dma.c
@@ -32,9 +32,6 @@ int force_iommu __read_mostly = 1;
32int force_iommu __read_mostly; 32int force_iommu __read_mostly;
33#endif 33#endif
34 34
35/* Set this to 1 if there is a HW IOMMU in the system */
36int iommu_detected __read_mostly;
37
38/* Dummy device used for NULL arguments (normally ISA). Better would 35/* Dummy device used for NULL arguments (normally ISA). Better would
39 be probably a smaller DMA mask, but this is bug-to-bug compatible 36 be probably a smaller DMA mask, but this is bug-to-bug compatible
40 to i386. */ 37 to i386. */
@@ -44,18 +41,7 @@ struct device fallback_dev = {
44 .dma_mask = &fallback_dev.coherent_dma_mask, 41 .dma_mask = &fallback_dev.coherent_dma_mask,
45}; 42};
46 43
47void __init pci_iommu_alloc(void) 44extern struct dma_map_ops intel_dma_ops;
48{
49 /*
50 * The order of these functions is important for
51 * fall-back/fail-over reasons
52 */
53 detect_intel_iommu();
54
55#ifdef CONFIG_SWIOTLB
56 pci_swiotlb_init();
57#endif
58}
59 45
60static int __init pci_iommu_init(void) 46static int __init pci_iommu_init(void)
61{ 47{
@@ -79,15 +65,12 @@ iommu_dma_init(void)
79 return; 65 return;
80} 66}
81 67
82struct dma_mapping_ops *dma_ops;
83EXPORT_SYMBOL(dma_ops);
84
85int iommu_dma_supported(struct device *dev, u64 mask) 68int iommu_dma_supported(struct device *dev, u64 mask)
86{ 69{
87 struct dma_mapping_ops *ops = get_dma_ops(dev); 70 struct dma_map_ops *ops = platform_dma_get_ops(dev);
88 71
89 if (ops->dma_supported_op) 72 if (ops->dma_supported)
90 return ops->dma_supported_op(dev, mask); 73 return ops->dma_supported(dev, mask);
91 74
92 /* Copied from i386. Doesn't make much sense, because it will 75 /* Copied from i386. Doesn't make much sense, because it will
93 only work for pci_alloc_coherent. 76 only work for pci_alloc_coherent.
@@ -116,4 +99,25 @@ int iommu_dma_supported(struct device *dev, u64 mask)
116} 99}
117EXPORT_SYMBOL(iommu_dma_supported); 100EXPORT_SYMBOL(iommu_dma_supported);
118 101
102void __init pci_iommu_alloc(void)
103{
104 dma_ops = &intel_dma_ops;
105
106 dma_ops->sync_single_for_cpu = machvec_dma_sync_single;
107 dma_ops->sync_sg_for_cpu = machvec_dma_sync_sg;
108 dma_ops->sync_single_for_device = machvec_dma_sync_single;
109 dma_ops->sync_sg_for_device = machvec_dma_sync_sg;
110 dma_ops->dma_supported = iommu_dma_supported;
111
112 /*
113 * The order of these functions is important for
114 * fall-back/fail-over reasons
115 */
116 detect_intel_iommu();
117
118#ifdef CONFIG_SWIOTLB
119 pci_swiotlb_init();
120#endif
121}
122
119#endif 123#endif
diff --git a/arch/ia64/kernel/pci-swiotlb.c b/arch/ia64/kernel/pci-swiotlb.c
index 16c50516dbc1..573f02c39a00 100644
--- a/arch/ia64/kernel/pci-swiotlb.c
+++ b/arch/ia64/kernel/pci-swiotlb.c
@@ -13,23 +13,37 @@
13int swiotlb __read_mostly; 13int swiotlb __read_mostly;
14EXPORT_SYMBOL(swiotlb); 14EXPORT_SYMBOL(swiotlb);
15 15
16struct dma_mapping_ops swiotlb_dma_ops = { 16static void *ia64_swiotlb_alloc_coherent(struct device *dev, size_t size,
17 .mapping_error = swiotlb_dma_mapping_error, 17 dma_addr_t *dma_handle, gfp_t gfp)
18 .alloc_coherent = swiotlb_alloc_coherent, 18{
19 if (dev->coherent_dma_mask != DMA_64BIT_MASK)
20 gfp |= GFP_DMA;
21 return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
22}
23
24struct dma_map_ops swiotlb_dma_ops = {
25 .alloc_coherent = ia64_swiotlb_alloc_coherent,
19 .free_coherent = swiotlb_free_coherent, 26 .free_coherent = swiotlb_free_coherent,
20 .map_single = swiotlb_map_single, 27 .map_page = swiotlb_map_page,
21 .unmap_single = swiotlb_unmap_single, 28 .unmap_page = swiotlb_unmap_page,
29 .map_sg = swiotlb_map_sg_attrs,
30 .unmap_sg = swiotlb_unmap_sg_attrs,
22 .sync_single_for_cpu = swiotlb_sync_single_for_cpu, 31 .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
23 .sync_single_for_device = swiotlb_sync_single_for_device, 32 .sync_single_for_device = swiotlb_sync_single_for_device,
24 .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu, 33 .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu,
25 .sync_single_range_for_device = swiotlb_sync_single_range_for_device, 34 .sync_single_range_for_device = swiotlb_sync_single_range_for_device,
26 .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, 35 .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
27 .sync_sg_for_device = swiotlb_sync_sg_for_device, 36 .sync_sg_for_device = swiotlb_sync_sg_for_device,
28 .map_sg = swiotlb_map_sg, 37 .dma_supported = swiotlb_dma_supported,
29 .unmap_sg = swiotlb_unmap_sg, 38 .mapping_error = swiotlb_dma_mapping_error,
30 .dma_supported_op = swiotlb_dma_supported,
31}; 39};
32 40
41void __init swiotlb_dma_init(void)
42{
43 dma_ops = &swiotlb_dma_ops;
44 swiotlb_init();
45}
46
33void __init pci_swiotlb_init(void) 47void __init pci_swiotlb_init(void)
34{ 48{
35 if (!iommu_detected) { 49 if (!iommu_detected) {
diff --git a/arch/ia64/sn/kernel/sn2/prominfo_proc.c b/arch/ia64/sn/kernel/sn2/prominfo_proc.c
index 4dcce3d0e04c..e63328818643 100644
--- a/arch/ia64/sn/kernel/sn2/prominfo_proc.c
+++ b/arch/ia64/sn/kernel/sn2/prominfo_proc.c
@@ -225,7 +225,6 @@ static struct proc_dir_entry *sgi_prominfo_entry;
225int __init prominfo_init(void) 225int __init prominfo_init(void)
226{ 226{
227 struct proc_dir_entry **entp; 227 struct proc_dir_entry **entp;
228 struct proc_dir_entry *p;
229 cnodeid_t cnodeid; 228 cnodeid_t cnodeid;
230 unsigned long nasid; 229 unsigned long nasid;
231 int size; 230 int size;
@@ -246,14 +245,10 @@ int __init prominfo_init(void)
246 sprintf(name, "node%d", cnodeid); 245 sprintf(name, "node%d", cnodeid);
247 *entp = proc_mkdir(name, sgi_prominfo_entry); 246 *entp = proc_mkdir(name, sgi_prominfo_entry);
248 nasid = cnodeid_to_nasid(cnodeid); 247 nasid = cnodeid_to_nasid(cnodeid);
249 p = create_proc_read_entry("fit", 0, *entp, read_fit_entry, 248 create_proc_read_entry("fit", 0, *entp, read_fit_entry,
250 (void *)nasid); 249 (void *)nasid);
251 if (p) 250 create_proc_read_entry("version", 0, *entp,
252 p->owner = THIS_MODULE;
253 p = create_proc_read_entry("version", 0, *entp,
254 read_version_entry, (void *)nasid); 251 read_version_entry, (void *)nasid);
255 if (p)
256 p->owner = THIS_MODULE;
257 entp++; 252 entp++;
258 } 253 }
259 254
diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c
index 863f5017baae..8c130e8f00e1 100644
--- a/arch/ia64/sn/pci/pci_dma.c
+++ b/arch/ia64/sn/pci/pci_dma.c
@@ -10,7 +10,7 @@
10 */ 10 */
11 11
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/dma-attrs.h> 13#include <linux/dma-mapping.h>
14#include <asm/dma.h> 14#include <asm/dma.h>
15#include <asm/sn/intr.h> 15#include <asm/sn/intr.h>
16#include <asm/sn/pcibus_provider_defs.h> 16#include <asm/sn/pcibus_provider_defs.h>
@@ -31,7 +31,7 @@
31 * this function. Of course, SN only supports devices that have 32 or more 31 * this function. Of course, SN only supports devices that have 32 or more
32 * address bits when using the PMU. 32 * address bits when using the PMU.
33 */ 33 */
34int sn_dma_supported(struct device *dev, u64 mask) 34static int sn_dma_supported(struct device *dev, u64 mask)
35{ 35{
36 BUG_ON(dev->bus != &pci_bus_type); 36 BUG_ON(dev->bus != &pci_bus_type);
37 37
@@ -39,7 +39,6 @@ int sn_dma_supported(struct device *dev, u64 mask)
39 return 0; 39 return 0;
40 return 1; 40 return 1;
41} 41}
42EXPORT_SYMBOL(sn_dma_supported);
43 42
44/** 43/**
45 * sn_dma_set_mask - set the DMA mask 44 * sn_dma_set_mask - set the DMA mask
@@ -75,8 +74,8 @@ EXPORT_SYMBOL(sn_dma_set_mask);
75 * queue for a SCSI controller). See Documentation/DMA-API.txt for 74 * queue for a SCSI controller). See Documentation/DMA-API.txt for
76 * more information. 75 * more information.
77 */ 76 */
78void *sn_dma_alloc_coherent(struct device *dev, size_t size, 77static void *sn_dma_alloc_coherent(struct device *dev, size_t size,
79 dma_addr_t * dma_handle, gfp_t flags) 78 dma_addr_t * dma_handle, gfp_t flags)
80{ 79{
81 void *cpuaddr; 80 void *cpuaddr;
82 unsigned long phys_addr; 81 unsigned long phys_addr;
@@ -124,7 +123,6 @@ void *sn_dma_alloc_coherent(struct device *dev, size_t size,
124 123
125 return cpuaddr; 124 return cpuaddr;
126} 125}
127EXPORT_SYMBOL(sn_dma_alloc_coherent);
128 126
129/** 127/**
130 * sn_pci_free_coherent - free memory associated with coherent DMAable region 128 * sn_pci_free_coherent - free memory associated with coherent DMAable region
@@ -136,8 +134,8 @@ EXPORT_SYMBOL(sn_dma_alloc_coherent);
136 * Frees the memory allocated by dma_alloc_coherent(), potentially unmapping 134 * Frees the memory allocated by dma_alloc_coherent(), potentially unmapping
137 * any associated IOMMU mappings. 135 * any associated IOMMU mappings.
138 */ 136 */
139void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, 137static void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
140 dma_addr_t dma_handle) 138 dma_addr_t dma_handle)
141{ 139{
142 struct pci_dev *pdev = to_pci_dev(dev); 140 struct pci_dev *pdev = to_pci_dev(dev);
143 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); 141 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
@@ -147,7 +145,6 @@ void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
147 provider->dma_unmap(pdev, dma_handle, 0); 145 provider->dma_unmap(pdev, dma_handle, 0);
148 free_pages((unsigned long)cpu_addr, get_order(size)); 146 free_pages((unsigned long)cpu_addr, get_order(size));
149} 147}
150EXPORT_SYMBOL(sn_dma_free_coherent);
151 148
152/** 149/**
153 * sn_dma_map_single_attrs - map a single page for DMA 150 * sn_dma_map_single_attrs - map a single page for DMA
@@ -173,10 +170,12 @@ EXPORT_SYMBOL(sn_dma_free_coherent);
173 * TODO: simplify our interface; 170 * TODO: simplify our interface;
174 * figure out how to save dmamap handle so can use two step. 171 * figure out how to save dmamap handle so can use two step.
175 */ 172 */
176dma_addr_t sn_dma_map_single_attrs(struct device *dev, void *cpu_addr, 173static dma_addr_t sn_dma_map_page(struct device *dev, struct page *page,
177 size_t size, int direction, 174 unsigned long offset, size_t size,
178 struct dma_attrs *attrs) 175 enum dma_data_direction dir,
176 struct dma_attrs *attrs)
179{ 177{
178 void *cpu_addr = page_address(page) + offset;
180 dma_addr_t dma_addr; 179 dma_addr_t dma_addr;
181 unsigned long phys_addr; 180 unsigned long phys_addr;
182 struct pci_dev *pdev = to_pci_dev(dev); 181 struct pci_dev *pdev = to_pci_dev(dev);
@@ -201,7 +200,6 @@ dma_addr_t sn_dma_map_single_attrs(struct device *dev, void *cpu_addr,
201 } 200 }
202 return dma_addr; 201 return dma_addr;
203} 202}
204EXPORT_SYMBOL(sn_dma_map_single_attrs);
205 203
206/** 204/**
207 * sn_dma_unmap_single_attrs - unamp a DMA mapped page 205 * sn_dma_unmap_single_attrs - unamp a DMA mapped page
@@ -215,21 +213,20 @@ EXPORT_SYMBOL(sn_dma_map_single_attrs);
215 * by @dma_handle into the coherence domain. On SN, we're always cache 213 * by @dma_handle into the coherence domain. On SN, we're always cache
216 * coherent, so we just need to free any ATEs associated with this mapping. 214 * coherent, so we just need to free any ATEs associated with this mapping.
217 */ 215 */
218void sn_dma_unmap_single_attrs(struct device *dev, dma_addr_t dma_addr, 216static void sn_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
219 size_t size, int direction, 217 size_t size, enum dma_data_direction dir,
220 struct dma_attrs *attrs) 218 struct dma_attrs *attrs)
221{ 219{
222 struct pci_dev *pdev = to_pci_dev(dev); 220 struct pci_dev *pdev = to_pci_dev(dev);
223 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); 221 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
224 222
225 BUG_ON(dev->bus != &pci_bus_type); 223 BUG_ON(dev->bus != &pci_bus_type);
226 224
227 provider->dma_unmap(pdev, dma_addr, direction); 225 provider->dma_unmap(pdev, dma_addr, dir);
228} 226}
229EXPORT_SYMBOL(sn_dma_unmap_single_attrs);
230 227
231/** 228/**
232 * sn_dma_unmap_sg_attrs - unmap a DMA scatterlist 229 * sn_dma_unmap_sg - unmap a DMA scatterlist
233 * @dev: device to unmap 230 * @dev: device to unmap
234 * @sg: scatterlist to unmap 231 * @sg: scatterlist to unmap
235 * @nhwentries: number of scatterlist entries 232 * @nhwentries: number of scatterlist entries
@@ -238,9 +235,9 @@ EXPORT_SYMBOL(sn_dma_unmap_single_attrs);
238 * 235 *
239 * Unmap a set of streaming mode DMA translations. 236 * Unmap a set of streaming mode DMA translations.
240 */ 237 */
241void sn_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl, 238static void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sgl,
242 int nhwentries, int direction, 239 int nhwentries, enum dma_data_direction dir,
243 struct dma_attrs *attrs) 240 struct dma_attrs *attrs)
244{ 241{
245 int i; 242 int i;
246 struct pci_dev *pdev = to_pci_dev(dev); 243 struct pci_dev *pdev = to_pci_dev(dev);
@@ -250,15 +247,14 @@ void sn_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl,
250 BUG_ON(dev->bus != &pci_bus_type); 247 BUG_ON(dev->bus != &pci_bus_type);
251 248
252 for_each_sg(sgl, sg, nhwentries, i) { 249 for_each_sg(sgl, sg, nhwentries, i) {
253 provider->dma_unmap(pdev, sg->dma_address, direction); 250 provider->dma_unmap(pdev, sg->dma_address, dir);
254 sg->dma_address = (dma_addr_t) NULL; 251 sg->dma_address = (dma_addr_t) NULL;
255 sg->dma_length = 0; 252 sg->dma_length = 0;
256 } 253 }
257} 254}
258EXPORT_SYMBOL(sn_dma_unmap_sg_attrs);
259 255
260/** 256/**
261 * sn_dma_map_sg_attrs - map a scatterlist for DMA 257 * sn_dma_map_sg - map a scatterlist for DMA
262 * @dev: device to map for 258 * @dev: device to map for
263 * @sg: scatterlist to map 259 * @sg: scatterlist to map
264 * @nhwentries: number of entries 260 * @nhwentries: number of entries
@@ -272,8 +268,9 @@ EXPORT_SYMBOL(sn_dma_unmap_sg_attrs);
272 * 268 *
273 * Maps each entry of @sg for DMA. 269 * Maps each entry of @sg for DMA.
274 */ 270 */
275int sn_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, 271static int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl,
276 int nhwentries, int direction, struct dma_attrs *attrs) 272 int nhwentries, enum dma_data_direction dir,
273 struct dma_attrs *attrs)
277{ 274{
278 unsigned long phys_addr; 275 unsigned long phys_addr;
279 struct scatterlist *saved_sg = sgl, *sg; 276 struct scatterlist *saved_sg = sgl, *sg;
@@ -310,8 +307,7 @@ int sn_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
310 * Free any successfully allocated entries. 307 * Free any successfully allocated entries.
311 */ 308 */
312 if (i > 0) 309 if (i > 0)
313 sn_dma_unmap_sg_attrs(dev, saved_sg, i, 310 sn_dma_unmap_sg(dev, saved_sg, i, dir, attrs);
314 direction, attrs);
315 return 0; 311 return 0;
316 } 312 }
317 313
@@ -320,41 +316,36 @@ int sn_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
320 316
321 return nhwentries; 317 return nhwentries;
322} 318}
323EXPORT_SYMBOL(sn_dma_map_sg_attrs);
324 319
325void sn_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, 320static void sn_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
326 size_t size, int direction) 321 size_t size, enum dma_data_direction dir)
327{ 322{
328 BUG_ON(dev->bus != &pci_bus_type); 323 BUG_ON(dev->bus != &pci_bus_type);
329} 324}
330EXPORT_SYMBOL(sn_dma_sync_single_for_cpu);
331 325
332void sn_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, 326static void sn_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
333 size_t size, int direction) 327 size_t size,
328 enum dma_data_direction dir)
334{ 329{
335 BUG_ON(dev->bus != &pci_bus_type); 330 BUG_ON(dev->bus != &pci_bus_type);
336} 331}
337EXPORT_SYMBOL(sn_dma_sync_single_for_device);
338 332
339void sn_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, 333static void sn_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
340 int nelems, int direction) 334 int nelems, enum dma_data_direction dir)
341{ 335{
342 BUG_ON(dev->bus != &pci_bus_type); 336 BUG_ON(dev->bus != &pci_bus_type);
343} 337}
344EXPORT_SYMBOL(sn_dma_sync_sg_for_cpu);
345 338
346void sn_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, 339static void sn_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
347 int nelems, int direction) 340 int nelems, enum dma_data_direction dir)
348{ 341{
349 BUG_ON(dev->bus != &pci_bus_type); 342 BUG_ON(dev->bus != &pci_bus_type);
350} 343}
351EXPORT_SYMBOL(sn_dma_sync_sg_for_device);
352 344
353int sn_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 345static int sn_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
354{ 346{
355 return 0; 347 return 0;
356} 348}
357EXPORT_SYMBOL(sn_dma_mapping_error);
358 349
359u64 sn_dma_get_required_mask(struct device *dev) 350u64 sn_dma_get_required_mask(struct device *dev)
360{ 351{
@@ -471,3 +462,23 @@ int sn_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
471 out: 462 out:
472 return ret; 463 return ret;
473} 464}
465
466static struct dma_map_ops sn_dma_ops = {
467 .alloc_coherent = sn_dma_alloc_coherent,
468 .free_coherent = sn_dma_free_coherent,
469 .map_page = sn_dma_map_page,
470 .unmap_page = sn_dma_unmap_page,
471 .map_sg = sn_dma_map_sg,
472 .unmap_sg = sn_dma_unmap_sg,
473 .sync_single_for_cpu = sn_dma_sync_single_for_cpu,
474 .sync_sg_for_cpu = sn_dma_sync_sg_for_cpu,
475 .sync_single_for_device = sn_dma_sync_single_for_device,
476 .sync_sg_for_device = sn_dma_sync_sg_for_device,
477 .mapping_error = sn_dma_mapping_error,
478 .dma_supported = sn_dma_supported,
479};
480
481void sn_dma_init(void)
482{
483 dma_ops = &sn_dma_ops;
484}