diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-03-30 16:41:00 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-03-30 16:41:00 -0400 |
commit | 712b0006bf3a9ed0b14a56c3291975e582127766 (patch) | |
tree | aff33e947673137ae21734321e1f036600297223 /arch | |
parent | e1c502482853f84606928f5a2f2eb6da1993cda1 (diff) | |
parent | b0d44c0dbbd52effb731b1c0af9afd56215c48de (diff) |
Merge branch 'iommu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'iommu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (60 commits)
dma-debug: make memory range checks more consistent
dma-debug: warn of unmapping an invalid dma address
dma-debug: fix dma_debug_add_bus() definition for !CONFIG_DMA_API_DEBUG
dma-debug/x86: register pci bus for dma-debug leak detection
dma-debug: add a check dma memory leaks
dma-debug: add checks for kernel text and rodata
dma-debug: print stacktrace of mapping path on unmap error
dma-debug: Documentation update
dma-debug: x86 architecture bindings
dma-debug: add function to dump dma mappings
dma-debug: add checks for sync_single_sg_*
dma-debug: add checks for sync_single_range_*
dma-debug: add checks for sync_single_*
dma-debug: add checking for [alloc|free]_coherent
dma-debug: add add checking for map/unmap_sg
dma-debug: add checking for map/unmap_page/single
dma-debug: add core checking functions
dma-debug: add debugfs interface
dma-debug: add kernel command line parameters
dma-debug: add initialization code
...
Fix trivial conflicts due to whitespace changes in arch/x86/kernel/pci-nommu.c
Diffstat (limited to 'arch')
28 files changed, 502 insertions, 747 deletions
diff --git a/arch/Kconfig b/arch/Kconfig index 550dab22daa1..830c16a2b801 100644 --- a/arch/Kconfig +++ b/arch/Kconfig | |||
@@ -106,3 +106,5 @@ config HAVE_CLK | |||
106 | The <linux/clk.h> calls support software clock gating and | 106 | The <linux/clk.h> calls support software clock gating and |
107 | thus are a key power management tool on many systems. | 107 | thus are a key power management tool on many systems. |
108 | 108 | ||
109 | config HAVE_DMA_API_DEBUG | ||
110 | bool | ||
diff --git a/arch/ia64/dig/Makefile b/arch/ia64/dig/Makefile index 5c0283830bd6..2f7caddf093e 100644 --- a/arch/ia64/dig/Makefile +++ b/arch/ia64/dig/Makefile | |||
@@ -7,8 +7,8 @@ | |||
7 | 7 | ||
8 | obj-y := setup.o | 8 | obj-y := setup.o |
9 | ifeq ($(CONFIG_DMAR), y) | 9 | ifeq ($(CONFIG_DMAR), y) |
10 | obj-$(CONFIG_IA64_GENERIC) += machvec.o machvec_vtd.o dig_vtd_iommu.o | 10 | obj-$(CONFIG_IA64_GENERIC) += machvec.o machvec_vtd.o |
11 | else | 11 | else |
12 | obj-$(CONFIG_IA64_GENERIC) += machvec.o | 12 | obj-$(CONFIG_IA64_GENERIC) += machvec.o |
13 | endif | 13 | endif |
14 | obj-$(CONFIG_IA64_DIG_VTD) += dig_vtd_iommu.o | 14 | |
diff --git a/arch/ia64/dig/dig_vtd_iommu.c b/arch/ia64/dig/dig_vtd_iommu.c deleted file mode 100644 index 1c8a079017a3..000000000000 --- a/arch/ia64/dig/dig_vtd_iommu.c +++ /dev/null | |||
@@ -1,59 +0,0 @@ | |||
1 | #include <linux/types.h> | ||
2 | #include <linux/kernel.h> | ||
3 | #include <linux/module.h> | ||
4 | #include <linux/intel-iommu.h> | ||
5 | |||
6 | void * | ||
7 | vtd_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, | ||
8 | gfp_t flags) | ||
9 | { | ||
10 | return intel_alloc_coherent(dev, size, dma_handle, flags); | ||
11 | } | ||
12 | EXPORT_SYMBOL_GPL(vtd_alloc_coherent); | ||
13 | |||
14 | void | ||
15 | vtd_free_coherent(struct device *dev, size_t size, void *vaddr, | ||
16 | dma_addr_t dma_handle) | ||
17 | { | ||
18 | intel_free_coherent(dev, size, vaddr, dma_handle); | ||
19 | } | ||
20 | EXPORT_SYMBOL_GPL(vtd_free_coherent); | ||
21 | |||
22 | dma_addr_t | ||
23 | vtd_map_single_attrs(struct device *dev, void *addr, size_t size, | ||
24 | int dir, struct dma_attrs *attrs) | ||
25 | { | ||
26 | return intel_map_single(dev, (phys_addr_t)addr, size, dir); | ||
27 | } | ||
28 | EXPORT_SYMBOL_GPL(vtd_map_single_attrs); | ||
29 | |||
30 | void | ||
31 | vtd_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size, | ||
32 | int dir, struct dma_attrs *attrs) | ||
33 | { | ||
34 | intel_unmap_single(dev, iova, size, dir); | ||
35 | } | ||
36 | EXPORT_SYMBOL_GPL(vtd_unmap_single_attrs); | ||
37 | |||
38 | int | ||
39 | vtd_map_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents, | ||
40 | int dir, struct dma_attrs *attrs) | ||
41 | { | ||
42 | return intel_map_sg(dev, sglist, nents, dir); | ||
43 | } | ||
44 | EXPORT_SYMBOL_GPL(vtd_map_sg_attrs); | ||
45 | |||
46 | void | ||
47 | vtd_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist, | ||
48 | int nents, int dir, struct dma_attrs *attrs) | ||
49 | { | ||
50 | intel_unmap_sg(dev, sglist, nents, dir); | ||
51 | } | ||
52 | EXPORT_SYMBOL_GPL(vtd_unmap_sg_attrs); | ||
53 | |||
54 | int | ||
55 | vtd_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | ||
56 | { | ||
57 | return 0; | ||
58 | } | ||
59 | EXPORT_SYMBOL_GPL(vtd_dma_mapping_error); | ||
diff --git a/arch/ia64/hp/common/hwsw_iommu.c b/arch/ia64/hp/common/hwsw_iommu.c index 2769dbfd03bf..e4a80d82e3d8 100644 --- a/arch/ia64/hp/common/hwsw_iommu.c +++ b/arch/ia64/hp/common/hwsw_iommu.c | |||
@@ -13,49 +13,34 @@ | |||
13 | */ | 13 | */ |
14 | 14 | ||
15 | #include <linux/device.h> | 15 | #include <linux/device.h> |
16 | #include <linux/dma-mapping.h> | ||
16 | #include <linux/swiotlb.h> | 17 | #include <linux/swiotlb.h> |
17 | |||
18 | #include <asm/machvec.h> | 18 | #include <asm/machvec.h> |
19 | 19 | ||
20 | extern struct dma_map_ops sba_dma_ops, swiotlb_dma_ops; | ||
21 | |||
20 | /* swiotlb declarations & definitions: */ | 22 | /* swiotlb declarations & definitions: */ |
21 | extern int swiotlb_late_init_with_default_size (size_t size); | 23 | extern int swiotlb_late_init_with_default_size (size_t size); |
22 | 24 | ||
23 | /* hwiommu declarations & definitions: */ | ||
24 | |||
25 | extern ia64_mv_dma_alloc_coherent sba_alloc_coherent; | ||
26 | extern ia64_mv_dma_free_coherent sba_free_coherent; | ||
27 | extern ia64_mv_dma_map_single_attrs sba_map_single_attrs; | ||
28 | extern ia64_mv_dma_unmap_single_attrs sba_unmap_single_attrs; | ||
29 | extern ia64_mv_dma_map_sg_attrs sba_map_sg_attrs; | ||
30 | extern ia64_mv_dma_unmap_sg_attrs sba_unmap_sg_attrs; | ||
31 | extern ia64_mv_dma_supported sba_dma_supported; | ||
32 | extern ia64_mv_dma_mapping_error sba_dma_mapping_error; | ||
33 | |||
34 | #define hwiommu_alloc_coherent sba_alloc_coherent | ||
35 | #define hwiommu_free_coherent sba_free_coherent | ||
36 | #define hwiommu_map_single_attrs sba_map_single_attrs | ||
37 | #define hwiommu_unmap_single_attrs sba_unmap_single_attrs | ||
38 | #define hwiommu_map_sg_attrs sba_map_sg_attrs | ||
39 | #define hwiommu_unmap_sg_attrs sba_unmap_sg_attrs | ||
40 | #define hwiommu_dma_supported sba_dma_supported | ||
41 | #define hwiommu_dma_mapping_error sba_dma_mapping_error | ||
42 | #define hwiommu_sync_single_for_cpu machvec_dma_sync_single | ||
43 | #define hwiommu_sync_sg_for_cpu machvec_dma_sync_sg | ||
44 | #define hwiommu_sync_single_for_device machvec_dma_sync_single | ||
45 | #define hwiommu_sync_sg_for_device machvec_dma_sync_sg | ||
46 | |||
47 | |||
48 | /* | 25 | /* |
49 | * Note: we need to make the determination of whether or not to use | 26 | * Note: we need to make the determination of whether or not to use |
50 | * the sw I/O TLB based purely on the device structure. Anything else | 27 | * the sw I/O TLB based purely on the device structure. Anything else |
51 | * would be unreliable or would be too intrusive. | 28 | * would be unreliable or would be too intrusive. |
52 | */ | 29 | */ |
53 | static inline int | 30 | static inline int use_swiotlb(struct device *dev) |
54 | use_swiotlb (struct device *dev) | ||
55 | { | 31 | { |
56 | return dev && dev->dma_mask && !hwiommu_dma_supported(dev, *dev->dma_mask); | 32 | return dev && dev->dma_mask && |
33 | !sba_dma_ops.dma_supported(dev, *dev->dma_mask); | ||
57 | } | 34 | } |
58 | 35 | ||
36 | struct dma_map_ops *hwsw_dma_get_ops(struct device *dev) | ||
37 | { | ||
38 | if (use_swiotlb(dev)) | ||
39 | return &swiotlb_dma_ops; | ||
40 | return &sba_dma_ops; | ||
41 | } | ||
42 | EXPORT_SYMBOL(hwsw_dma_get_ops); | ||
43 | |||
59 | void __init | 44 | void __init |
60 | hwsw_init (void) | 45 | hwsw_init (void) |
61 | { | 46 | { |
@@ -71,125 +56,3 @@ hwsw_init (void) | |||
71 | #endif | 56 | #endif |
72 | } | 57 | } |
73 | } | 58 | } |
74 | |||
75 | void * | ||
76 | hwsw_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags) | ||
77 | { | ||
78 | if (use_swiotlb(dev)) | ||
79 | return swiotlb_alloc_coherent(dev, size, dma_handle, flags); | ||
80 | else | ||
81 | return hwiommu_alloc_coherent(dev, size, dma_handle, flags); | ||
82 | } | ||
83 | |||
84 | void | ||
85 | hwsw_free_coherent (struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle) | ||
86 | { | ||
87 | if (use_swiotlb(dev)) | ||
88 | swiotlb_free_coherent(dev, size, vaddr, dma_handle); | ||
89 | else | ||
90 | hwiommu_free_coherent(dev, size, vaddr, dma_handle); | ||
91 | } | ||
92 | |||
93 | dma_addr_t | ||
94 | hwsw_map_single_attrs(struct device *dev, void *addr, size_t size, int dir, | ||
95 | struct dma_attrs *attrs) | ||
96 | { | ||
97 | if (use_swiotlb(dev)) | ||
98 | return swiotlb_map_single_attrs(dev, addr, size, dir, attrs); | ||
99 | else | ||
100 | return hwiommu_map_single_attrs(dev, addr, size, dir, attrs); | ||
101 | } | ||
102 | EXPORT_SYMBOL(hwsw_map_single_attrs); | ||
103 | |||
104 | void | ||
105 | hwsw_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size, | ||
106 | int dir, struct dma_attrs *attrs) | ||
107 | { | ||
108 | if (use_swiotlb(dev)) | ||
109 | return swiotlb_unmap_single_attrs(dev, iova, size, dir, attrs); | ||
110 | else | ||
111 | return hwiommu_unmap_single_attrs(dev, iova, size, dir, attrs); | ||
112 | } | ||
113 | EXPORT_SYMBOL(hwsw_unmap_single_attrs); | ||
114 | |||
115 | int | ||
116 | hwsw_map_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents, | ||
117 | int dir, struct dma_attrs *attrs) | ||
118 | { | ||
119 | if (use_swiotlb(dev)) | ||
120 | return swiotlb_map_sg_attrs(dev, sglist, nents, dir, attrs); | ||
121 | else | ||
122 | return hwiommu_map_sg_attrs(dev, sglist, nents, dir, attrs); | ||
123 | } | ||
124 | EXPORT_SYMBOL(hwsw_map_sg_attrs); | ||
125 | |||
126 | void | ||
127 | hwsw_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents, | ||
128 | int dir, struct dma_attrs *attrs) | ||
129 | { | ||
130 | if (use_swiotlb(dev)) | ||
131 | return swiotlb_unmap_sg_attrs(dev, sglist, nents, dir, attrs); | ||
132 | else | ||
133 | return hwiommu_unmap_sg_attrs(dev, sglist, nents, dir, attrs); | ||
134 | } | ||
135 | EXPORT_SYMBOL(hwsw_unmap_sg_attrs); | ||
136 | |||
137 | void | ||
138 | hwsw_sync_single_for_cpu (struct device *dev, dma_addr_t addr, size_t size, int dir) | ||
139 | { | ||
140 | if (use_swiotlb(dev)) | ||
141 | swiotlb_sync_single_for_cpu(dev, addr, size, dir); | ||
142 | else | ||
143 | hwiommu_sync_single_for_cpu(dev, addr, size, dir); | ||
144 | } | ||
145 | |||
146 | void | ||
147 | hwsw_sync_sg_for_cpu (struct device *dev, struct scatterlist *sg, int nelems, int dir) | ||
148 | { | ||
149 | if (use_swiotlb(dev)) | ||
150 | swiotlb_sync_sg_for_cpu(dev, sg, nelems, dir); | ||
151 | else | ||
152 | hwiommu_sync_sg_for_cpu(dev, sg, nelems, dir); | ||
153 | } | ||
154 | |||
155 | void | ||
156 | hwsw_sync_single_for_device (struct device *dev, dma_addr_t addr, size_t size, int dir) | ||
157 | { | ||
158 | if (use_swiotlb(dev)) | ||
159 | swiotlb_sync_single_for_device(dev, addr, size, dir); | ||
160 | else | ||
161 | hwiommu_sync_single_for_device(dev, addr, size, dir); | ||
162 | } | ||
163 | |||
164 | void | ||
165 | hwsw_sync_sg_for_device (struct device *dev, struct scatterlist *sg, int nelems, int dir) | ||
166 | { | ||
167 | if (use_swiotlb(dev)) | ||
168 | swiotlb_sync_sg_for_device(dev, sg, nelems, dir); | ||
169 | else | ||
170 | hwiommu_sync_sg_for_device(dev, sg, nelems, dir); | ||
171 | } | ||
172 | |||
173 | int | ||
174 | hwsw_dma_supported (struct device *dev, u64 mask) | ||
175 | { | ||
176 | if (hwiommu_dma_supported(dev, mask)) | ||
177 | return 1; | ||
178 | return swiotlb_dma_supported(dev, mask); | ||
179 | } | ||
180 | |||
181 | int | ||
182 | hwsw_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | ||
183 | { | ||
184 | return hwiommu_dma_mapping_error(dev, dma_addr) || | ||
185 | swiotlb_dma_mapping_error(dev, dma_addr); | ||
186 | } | ||
187 | |||
188 | EXPORT_SYMBOL(hwsw_dma_mapping_error); | ||
189 | EXPORT_SYMBOL(hwsw_dma_supported); | ||
190 | EXPORT_SYMBOL(hwsw_alloc_coherent); | ||
191 | EXPORT_SYMBOL(hwsw_free_coherent); | ||
192 | EXPORT_SYMBOL(hwsw_sync_single_for_cpu); | ||
193 | EXPORT_SYMBOL(hwsw_sync_single_for_device); | ||
194 | EXPORT_SYMBOL(hwsw_sync_sg_for_cpu); | ||
195 | EXPORT_SYMBOL(hwsw_sync_sg_for_device); | ||
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c index 6d5e6c5630e3..56ceb68eb99d 100644 --- a/arch/ia64/hp/common/sba_iommu.c +++ b/arch/ia64/hp/common/sba_iommu.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #include <linux/bitops.h> /* hweight64() */ | 36 | #include <linux/bitops.h> /* hweight64() */ |
37 | #include <linux/crash_dump.h> | 37 | #include <linux/crash_dump.h> |
38 | #include <linux/iommu-helper.h> | 38 | #include <linux/iommu-helper.h> |
39 | #include <linux/dma-mapping.h> | ||
39 | 40 | ||
40 | #include <asm/delay.h> /* ia64_get_itc() */ | 41 | #include <asm/delay.h> /* ia64_get_itc() */ |
41 | #include <asm/io.h> | 42 | #include <asm/io.h> |
@@ -908,11 +909,13 @@ sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt) | |||
908 | * | 909 | * |
909 | * See Documentation/PCI/PCI-DMA-mapping.txt | 910 | * See Documentation/PCI/PCI-DMA-mapping.txt |
910 | */ | 911 | */ |
911 | dma_addr_t | 912 | static dma_addr_t sba_map_page(struct device *dev, struct page *page, |
912 | sba_map_single_attrs(struct device *dev, void *addr, size_t size, int dir, | 913 | unsigned long poff, size_t size, |
913 | struct dma_attrs *attrs) | 914 | enum dma_data_direction dir, |
915 | struct dma_attrs *attrs) | ||
914 | { | 916 | { |
915 | struct ioc *ioc; | 917 | struct ioc *ioc; |
918 | void *addr = page_address(page) + poff; | ||
916 | dma_addr_t iovp; | 919 | dma_addr_t iovp; |
917 | dma_addr_t offset; | 920 | dma_addr_t offset; |
918 | u64 *pdir_start; | 921 | u64 *pdir_start; |
@@ -990,7 +993,14 @@ sba_map_single_attrs(struct device *dev, void *addr, size_t size, int dir, | |||
990 | #endif | 993 | #endif |
991 | return SBA_IOVA(ioc, iovp, offset); | 994 | return SBA_IOVA(ioc, iovp, offset); |
992 | } | 995 | } |
993 | EXPORT_SYMBOL(sba_map_single_attrs); | 996 | |
997 | static dma_addr_t sba_map_single_attrs(struct device *dev, void *addr, | ||
998 | size_t size, enum dma_data_direction dir, | ||
999 | struct dma_attrs *attrs) | ||
1000 | { | ||
1001 | return sba_map_page(dev, virt_to_page(addr), | ||
1002 | (unsigned long)addr & ~PAGE_MASK, size, dir, attrs); | ||
1003 | } | ||
994 | 1004 | ||
995 | #ifdef ENABLE_MARK_CLEAN | 1005 | #ifdef ENABLE_MARK_CLEAN |
996 | static SBA_INLINE void | 1006 | static SBA_INLINE void |
@@ -1026,8 +1036,8 @@ sba_mark_clean(struct ioc *ioc, dma_addr_t iova, size_t size) | |||
1026 | * | 1036 | * |
1027 | * See Documentation/PCI/PCI-DMA-mapping.txt | 1037 | * See Documentation/PCI/PCI-DMA-mapping.txt |
1028 | */ | 1038 | */ |
1029 | void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size, | 1039 | static void sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size, |
1030 | int dir, struct dma_attrs *attrs) | 1040 | enum dma_data_direction dir, struct dma_attrs *attrs) |
1031 | { | 1041 | { |
1032 | struct ioc *ioc; | 1042 | struct ioc *ioc; |
1033 | #if DELAYED_RESOURCE_CNT > 0 | 1043 | #if DELAYED_RESOURCE_CNT > 0 |
@@ -1094,7 +1104,12 @@ void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size, | |||
1094 | spin_unlock_irqrestore(&ioc->res_lock, flags); | 1104 | spin_unlock_irqrestore(&ioc->res_lock, flags); |
1095 | #endif /* DELAYED_RESOURCE_CNT == 0 */ | 1105 | #endif /* DELAYED_RESOURCE_CNT == 0 */ |
1096 | } | 1106 | } |
1097 | EXPORT_SYMBOL(sba_unmap_single_attrs); | 1107 | |
1108 | void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size, | ||
1109 | enum dma_data_direction dir, struct dma_attrs *attrs) | ||
1110 | { | ||
1111 | sba_unmap_page(dev, iova, size, dir, attrs); | ||
1112 | } | ||
1098 | 1113 | ||
1099 | /** | 1114 | /** |
1100 | * sba_alloc_coherent - allocate/map shared mem for DMA | 1115 | * sba_alloc_coherent - allocate/map shared mem for DMA |
@@ -1104,7 +1119,7 @@ EXPORT_SYMBOL(sba_unmap_single_attrs); | |||
1104 | * | 1119 | * |
1105 | * See Documentation/PCI/PCI-DMA-mapping.txt | 1120 | * See Documentation/PCI/PCI-DMA-mapping.txt |
1106 | */ | 1121 | */ |
1107 | void * | 1122 | static void * |
1108 | sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags) | 1123 | sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags) |
1109 | { | 1124 | { |
1110 | struct ioc *ioc; | 1125 | struct ioc *ioc; |
@@ -1167,7 +1182,8 @@ sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp | |||
1167 | * | 1182 | * |
1168 | * See Documentation/PCI/PCI-DMA-mapping.txt | 1183 | * See Documentation/PCI/PCI-DMA-mapping.txt |
1169 | */ | 1184 | */ |
1170 | void sba_free_coherent (struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle) | 1185 | static void sba_free_coherent (struct device *dev, size_t size, void *vaddr, |
1186 | dma_addr_t dma_handle) | ||
1171 | { | 1187 | { |
1172 | sba_unmap_single_attrs(dev, dma_handle, size, 0, NULL); | 1188 | sba_unmap_single_attrs(dev, dma_handle, size, 0, NULL); |
1173 | free_pages((unsigned long) vaddr, get_order(size)); | 1189 | free_pages((unsigned long) vaddr, get_order(size)); |
@@ -1422,8 +1438,9 @@ sba_coalesce_chunks(struct ioc *ioc, struct device *dev, | |||
1422 | * | 1438 | * |
1423 | * See Documentation/PCI/PCI-DMA-mapping.txt | 1439 | * See Documentation/PCI/PCI-DMA-mapping.txt |
1424 | */ | 1440 | */ |
1425 | int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents, | 1441 | static int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist, |
1426 | int dir, struct dma_attrs *attrs) | 1442 | int nents, enum dma_data_direction dir, |
1443 | struct dma_attrs *attrs) | ||
1427 | { | 1444 | { |
1428 | struct ioc *ioc; | 1445 | struct ioc *ioc; |
1429 | int coalesced, filled = 0; | 1446 | int coalesced, filled = 0; |
@@ -1502,7 +1519,6 @@ int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents, | |||
1502 | 1519 | ||
1503 | return filled; | 1520 | return filled; |
1504 | } | 1521 | } |
1505 | EXPORT_SYMBOL(sba_map_sg_attrs); | ||
1506 | 1522 | ||
1507 | /** | 1523 | /** |
1508 | * sba_unmap_sg_attrs - unmap Scatter/Gather list | 1524 | * sba_unmap_sg_attrs - unmap Scatter/Gather list |
@@ -1514,8 +1530,9 @@ EXPORT_SYMBOL(sba_map_sg_attrs); | |||
1514 | * | 1530 | * |
1515 | * See Documentation/PCI/PCI-DMA-mapping.txt | 1531 | * See Documentation/PCI/PCI-DMA-mapping.txt |
1516 | */ | 1532 | */ |
1517 | void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist, | 1533 | static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist, |
1518 | int nents, int dir, struct dma_attrs *attrs) | 1534 | int nents, enum dma_data_direction dir, |
1535 | struct dma_attrs *attrs) | ||
1519 | { | 1536 | { |
1520 | #ifdef ASSERT_PDIR_SANITY | 1537 | #ifdef ASSERT_PDIR_SANITY |
1521 | struct ioc *ioc; | 1538 | struct ioc *ioc; |
@@ -1551,7 +1568,6 @@ void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist, | |||
1551 | #endif | 1568 | #endif |
1552 | 1569 | ||
1553 | } | 1570 | } |
1554 | EXPORT_SYMBOL(sba_unmap_sg_attrs); | ||
1555 | 1571 | ||
1556 | /************************************************************** | 1572 | /************************************************************** |
1557 | * | 1573 | * |
@@ -2064,6 +2080,8 @@ static struct acpi_driver acpi_sba_ioc_driver = { | |||
2064 | }, | 2080 | }, |
2065 | }; | 2081 | }; |
2066 | 2082 | ||
2083 | extern struct dma_map_ops swiotlb_dma_ops; | ||
2084 | |||
2067 | static int __init | 2085 | static int __init |
2068 | sba_init(void) | 2086 | sba_init(void) |
2069 | { | 2087 | { |
@@ -2077,6 +2095,7 @@ sba_init(void) | |||
2077 | * a successful kdump kernel boot is to use the swiotlb. | 2095 | * a successful kdump kernel boot is to use the swiotlb. |
2078 | */ | 2096 | */ |
2079 | if (is_kdump_kernel()) { | 2097 | if (is_kdump_kernel()) { |
2098 | dma_ops = &swiotlb_dma_ops; | ||
2080 | if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0) | 2099 | if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0) |
2081 | panic("Unable to initialize software I/O TLB:" | 2100 | panic("Unable to initialize software I/O TLB:" |
2082 | " Try machvec=dig boot option"); | 2101 | " Try machvec=dig boot option"); |
@@ -2092,6 +2111,7 @@ sba_init(void) | |||
2092 | * If we didn't find something sba_iommu can claim, we | 2111 | * If we didn't find something sba_iommu can claim, we |
2093 | * need to setup the swiotlb and switch to the dig machvec. | 2112 | * need to setup the swiotlb and switch to the dig machvec. |
2094 | */ | 2113 | */ |
2114 | dma_ops = &swiotlb_dma_ops; | ||
2095 | if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0) | 2115 | if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0) |
2096 | panic("Unable to find SBA IOMMU or initialize " | 2116 | panic("Unable to find SBA IOMMU or initialize " |
2097 | "software I/O TLB: Try machvec=dig boot option"); | 2117 | "software I/O TLB: Try machvec=dig boot option"); |
@@ -2138,15 +2158,13 @@ nosbagart(char *str) | |||
2138 | return 1; | 2158 | return 1; |
2139 | } | 2159 | } |
2140 | 2160 | ||
2141 | int | 2161 | static int sba_dma_supported (struct device *dev, u64 mask) |
2142 | sba_dma_supported (struct device *dev, u64 mask) | ||
2143 | { | 2162 | { |
2144 | /* make sure it's at least 32bit capable */ | 2163 | /* make sure it's at least 32bit capable */ |
2145 | return ((mask & 0xFFFFFFFFUL) == 0xFFFFFFFFUL); | 2164 | return ((mask & 0xFFFFFFFFUL) == 0xFFFFFFFFUL); |
2146 | } | 2165 | } |
2147 | 2166 | ||
2148 | int | 2167 | static int sba_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
2149 | sba_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | ||
2150 | { | 2168 | { |
2151 | return 0; | 2169 | return 0; |
2152 | } | 2170 | } |
@@ -2176,7 +2194,22 @@ sba_page_override(char *str) | |||
2176 | 2194 | ||
2177 | __setup("sbapagesize=",sba_page_override); | 2195 | __setup("sbapagesize=",sba_page_override); |
2178 | 2196 | ||
2179 | EXPORT_SYMBOL(sba_dma_mapping_error); | 2197 | struct dma_map_ops sba_dma_ops = { |
2180 | EXPORT_SYMBOL(sba_dma_supported); | 2198 | .alloc_coherent = sba_alloc_coherent, |
2181 | EXPORT_SYMBOL(sba_alloc_coherent); | 2199 | .free_coherent = sba_free_coherent, |
2182 | EXPORT_SYMBOL(sba_free_coherent); | 2200 | .map_page = sba_map_page, |
2201 | .unmap_page = sba_unmap_page, | ||
2202 | .map_sg = sba_map_sg_attrs, | ||
2203 | .unmap_sg = sba_unmap_sg_attrs, | ||
2204 | .sync_single_for_cpu = machvec_dma_sync_single, | ||
2205 | .sync_sg_for_cpu = machvec_dma_sync_sg, | ||
2206 | .sync_single_for_device = machvec_dma_sync_single, | ||
2207 | .sync_sg_for_device = machvec_dma_sync_sg, | ||
2208 | .dma_supported = sba_dma_supported, | ||
2209 | .mapping_error = sba_dma_mapping_error, | ||
2210 | }; | ||
2211 | |||
2212 | void sba_dma_init(void) | ||
2213 | { | ||
2214 | dma_ops = &sba_dma_ops; | ||
2215 | } | ||
diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h index 1f912d927585..36c0009dbece 100644 --- a/arch/ia64/include/asm/dma-mapping.h +++ b/arch/ia64/include/asm/dma-mapping.h | |||
@@ -11,99 +11,128 @@ | |||
11 | 11 | ||
12 | #define ARCH_HAS_DMA_GET_REQUIRED_MASK | 12 | #define ARCH_HAS_DMA_GET_REQUIRED_MASK |
13 | 13 | ||
14 | struct dma_mapping_ops { | 14 | extern struct dma_map_ops *dma_ops; |
15 | int (*mapping_error)(struct device *dev, | ||
16 | dma_addr_t dma_addr); | ||
17 | void* (*alloc_coherent)(struct device *dev, size_t size, | ||
18 | dma_addr_t *dma_handle, gfp_t gfp); | ||
19 | void (*free_coherent)(struct device *dev, size_t size, | ||
20 | void *vaddr, dma_addr_t dma_handle); | ||
21 | dma_addr_t (*map_single)(struct device *hwdev, unsigned long ptr, | ||
22 | size_t size, int direction); | ||
23 | void (*unmap_single)(struct device *dev, dma_addr_t addr, | ||
24 | size_t size, int direction); | ||
25 | void (*sync_single_for_cpu)(struct device *hwdev, | ||
26 | dma_addr_t dma_handle, size_t size, | ||
27 | int direction); | ||
28 | void (*sync_single_for_device)(struct device *hwdev, | ||
29 | dma_addr_t dma_handle, size_t size, | ||
30 | int direction); | ||
31 | void (*sync_single_range_for_cpu)(struct device *hwdev, | ||
32 | dma_addr_t dma_handle, unsigned long offset, | ||
33 | size_t size, int direction); | ||
34 | void (*sync_single_range_for_device)(struct device *hwdev, | ||
35 | dma_addr_t dma_handle, unsigned long offset, | ||
36 | size_t size, int direction); | ||
37 | void (*sync_sg_for_cpu)(struct device *hwdev, | ||
38 | struct scatterlist *sg, int nelems, | ||
39 | int direction); | ||
40 | void (*sync_sg_for_device)(struct device *hwdev, | ||
41 | struct scatterlist *sg, int nelems, | ||
42 | int direction); | ||
43 | int (*map_sg)(struct device *hwdev, struct scatterlist *sg, | ||
44 | int nents, int direction); | ||
45 | void (*unmap_sg)(struct device *hwdev, | ||
46 | struct scatterlist *sg, int nents, | ||
47 | int direction); | ||
48 | int (*dma_supported_op)(struct device *hwdev, u64 mask); | ||
49 | int is_phys; | ||
50 | }; | ||
51 | |||
52 | extern struct dma_mapping_ops *dma_ops; | ||
53 | extern struct ia64_machine_vector ia64_mv; | 15 | extern struct ia64_machine_vector ia64_mv; |
54 | extern void set_iommu_machvec(void); | 16 | extern void set_iommu_machvec(void); |
55 | 17 | ||
56 | #define dma_alloc_coherent(dev, size, handle, gfp) \ | 18 | extern void machvec_dma_sync_single(struct device *, dma_addr_t, size_t, |
57 | platform_dma_alloc_coherent(dev, size, handle, (gfp) | GFP_DMA) | 19 | enum dma_data_direction); |
20 | extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int, | ||
21 | enum dma_data_direction); | ||
58 | 22 | ||
59 | /* coherent mem. is cheap */ | 23 | static inline void *dma_alloc_coherent(struct device *dev, size_t size, |
60 | static inline void * | 24 | dma_addr_t *daddr, gfp_t gfp) |
61 | dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t *dma_handle, | ||
62 | gfp_t flag) | ||
63 | { | 25 | { |
64 | return dma_alloc_coherent(dev, size, dma_handle, flag); | 26 | struct dma_map_ops *ops = platform_dma_get_ops(dev); |
27 | return ops->alloc_coherent(dev, size, daddr, gfp); | ||
65 | } | 28 | } |
66 | #define dma_free_coherent platform_dma_free_coherent | 29 | |
67 | static inline void | 30 | static inline void dma_free_coherent(struct device *dev, size_t size, |
68 | dma_free_noncoherent(struct device *dev, size_t size, void *cpu_addr, | 31 | void *caddr, dma_addr_t daddr) |
69 | dma_addr_t dma_handle) | 32 | { |
33 | struct dma_map_ops *ops = platform_dma_get_ops(dev); | ||
34 | ops->free_coherent(dev, size, caddr, daddr); | ||
35 | } | ||
36 | |||
37 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) | ||
38 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) | ||
39 | |||
40 | static inline dma_addr_t dma_map_single_attrs(struct device *dev, | ||
41 | void *caddr, size_t size, | ||
42 | enum dma_data_direction dir, | ||
43 | struct dma_attrs *attrs) | ||
44 | { | ||
45 | struct dma_map_ops *ops = platform_dma_get_ops(dev); | ||
46 | return ops->map_page(dev, virt_to_page(caddr), | ||
47 | (unsigned long)caddr & ~PAGE_MASK, size, | ||
48 | dir, attrs); | ||
49 | } | ||
50 | |||
51 | static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t daddr, | ||
52 | size_t size, | ||
53 | enum dma_data_direction dir, | ||
54 | struct dma_attrs *attrs) | ||
55 | { | ||
56 | struct dma_map_ops *ops = platform_dma_get_ops(dev); | ||
57 | ops->unmap_page(dev, daddr, size, dir, attrs); | ||
58 | } | ||
59 | |||
60 | #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL) | ||
61 | #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL) | ||
62 | |||
63 | static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, | ||
64 | int nents, enum dma_data_direction dir, | ||
65 | struct dma_attrs *attrs) | ||
66 | { | ||
67 | struct dma_map_ops *ops = platform_dma_get_ops(dev); | ||
68 | return ops->map_sg(dev, sgl, nents, dir, attrs); | ||
69 | } | ||
70 | |||
71 | static inline void dma_unmap_sg_attrs(struct device *dev, | ||
72 | struct scatterlist *sgl, int nents, | ||
73 | enum dma_data_direction dir, | ||
74 | struct dma_attrs *attrs) | ||
75 | { | ||
76 | struct dma_map_ops *ops = platform_dma_get_ops(dev); | ||
77 | ops->unmap_sg(dev, sgl, nents, dir, attrs); | ||
78 | } | ||
79 | |||
80 | #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL) | ||
81 | #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL) | ||
82 | |||
83 | static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t daddr, | ||
84 | size_t size, | ||
85 | enum dma_data_direction dir) | ||
70 | { | 86 | { |
71 | dma_free_coherent(dev, size, cpu_addr, dma_handle); | 87 | struct dma_map_ops *ops = platform_dma_get_ops(dev); |
88 | ops->sync_single_for_cpu(dev, daddr, size, dir); | ||
72 | } | 89 | } |
73 | #define dma_map_single_attrs platform_dma_map_single_attrs | 90 | |
74 | static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, | 91 | static inline void dma_sync_sg_for_cpu(struct device *dev, |
75 | size_t size, int dir) | 92 | struct scatterlist *sgl, |
93 | int nents, enum dma_data_direction dir) | ||
76 | { | 94 | { |
77 | return dma_map_single_attrs(dev, cpu_addr, size, dir, NULL); | 95 | struct dma_map_ops *ops = platform_dma_get_ops(dev); |
96 | ops->sync_sg_for_cpu(dev, sgl, nents, dir); | ||
78 | } | 97 | } |
79 | #define dma_map_sg_attrs platform_dma_map_sg_attrs | 98 | |
80 | static inline int dma_map_sg(struct device *dev, struct scatterlist *sgl, | 99 | static inline void dma_sync_single_for_device(struct device *dev, |
81 | int nents, int dir) | 100 | dma_addr_t daddr, |
101 | size_t size, | ||
102 | enum dma_data_direction dir) | ||
82 | { | 103 | { |
83 | return dma_map_sg_attrs(dev, sgl, nents, dir, NULL); | 104 | struct dma_map_ops *ops = platform_dma_get_ops(dev); |
105 | ops->sync_single_for_device(dev, daddr, size, dir); | ||
84 | } | 106 | } |
85 | #define dma_unmap_single_attrs platform_dma_unmap_single_attrs | 107 | |
86 | static inline void dma_unmap_single(struct device *dev, dma_addr_t cpu_addr, | 108 | static inline void dma_sync_sg_for_device(struct device *dev, |
87 | size_t size, int dir) | 109 | struct scatterlist *sgl, |
110 | int nents, | ||
111 | enum dma_data_direction dir) | ||
88 | { | 112 | { |
89 | return dma_unmap_single_attrs(dev, cpu_addr, size, dir, NULL); | 113 | struct dma_map_ops *ops = platform_dma_get_ops(dev); |
114 | ops->sync_sg_for_device(dev, sgl, nents, dir); | ||
90 | } | 115 | } |
91 | #define dma_unmap_sg_attrs platform_dma_unmap_sg_attrs | 116 | |
92 | static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sgl, | 117 | static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr) |
93 | int nents, int dir) | 118 | { |
119 | struct dma_map_ops *ops = platform_dma_get_ops(dev); | ||
120 | return ops->mapping_error(dev, daddr); | ||
121 | } | ||
122 | |||
123 | static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, | ||
124 | size_t offset, size_t size, | ||
125 | enum dma_data_direction dir) | ||
94 | { | 126 | { |
95 | return dma_unmap_sg_attrs(dev, sgl, nents, dir, NULL); | 127 | struct dma_map_ops *ops = platform_dma_get_ops(dev); |
128 | return ops->map_page(dev, page, offset, size, dir, NULL); | ||
96 | } | 129 | } |
97 | #define dma_sync_single_for_cpu platform_dma_sync_single_for_cpu | ||
98 | #define dma_sync_sg_for_cpu platform_dma_sync_sg_for_cpu | ||
99 | #define dma_sync_single_for_device platform_dma_sync_single_for_device | ||
100 | #define dma_sync_sg_for_device platform_dma_sync_sg_for_device | ||
101 | #define dma_mapping_error platform_dma_mapping_error | ||
102 | 130 | ||
103 | #define dma_map_page(dev, pg, off, size, dir) \ | 131 | static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, |
104 | dma_map_single(dev, page_address(pg) + (off), (size), (dir)) | 132 | size_t size, enum dma_data_direction dir) |
105 | #define dma_unmap_page(dev, dma_addr, size, dir) \ | 133 | { |
106 | dma_unmap_single(dev, dma_addr, size, dir) | 134 | dma_unmap_single(dev, addr, size, dir); |
135 | } | ||
107 | 136 | ||
108 | /* | 137 | /* |
109 | * Rest of this file is part of the "Advanced DMA API". Use at your own risk. | 138 | * Rest of this file is part of the "Advanced DMA API". Use at your own risk. |
@@ -115,7 +144,11 @@ static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sgl, | |||
115 | #define dma_sync_single_range_for_device(dev, dma_handle, offset, size, dir) \ | 144 | #define dma_sync_single_range_for_device(dev, dma_handle, offset, size, dir) \ |
116 | dma_sync_single_for_device(dev, dma_handle, size, dir) | 145 | dma_sync_single_for_device(dev, dma_handle, size, dir) |
117 | 146 | ||
118 | #define dma_supported platform_dma_supported | 147 | static inline int dma_supported(struct device *dev, u64 mask) |
148 | { | ||
149 | struct dma_map_ops *ops = platform_dma_get_ops(dev); | ||
150 | return ops->dma_supported(dev, mask); | ||
151 | } | ||
119 | 152 | ||
120 | static inline int | 153 | static inline int |
121 | dma_set_mask (struct device *dev, u64 mask) | 154 | dma_set_mask (struct device *dev, u64 mask) |
@@ -141,11 +174,4 @@ dma_cache_sync (struct device *dev, void *vaddr, size_t size, | |||
141 | 174 | ||
142 | #define dma_is_consistent(d, h) (1) /* all we do is coherent memory... */ | 175 | #define dma_is_consistent(d, h) (1) /* all we do is coherent memory... */ |
143 | 176 | ||
144 | static inline struct dma_mapping_ops *get_dma_ops(struct device *dev) | ||
145 | { | ||
146 | return dma_ops; | ||
147 | } | ||
148 | |||
149 | |||
150 | |||
151 | #endif /* _ASM_IA64_DMA_MAPPING_H */ | 177 | #endif /* _ASM_IA64_DMA_MAPPING_H */ |
diff --git a/arch/ia64/include/asm/machvec.h b/arch/ia64/include/asm/machvec.h index fe87b2121707..367d299d9938 100644 --- a/arch/ia64/include/asm/machvec.h +++ b/arch/ia64/include/asm/machvec.h | |||
@@ -11,7 +11,6 @@ | |||
11 | #define _ASM_IA64_MACHVEC_H | 11 | #define _ASM_IA64_MACHVEC_H |
12 | 12 | ||
13 | #include <linux/types.h> | 13 | #include <linux/types.h> |
14 | #include <linux/swiotlb.h> | ||
15 | 14 | ||
16 | /* forward declarations: */ | 15 | /* forward declarations: */ |
17 | struct device; | 16 | struct device; |
@@ -45,24 +44,8 @@ typedef void ia64_mv_kernel_launch_event_t(void); | |||
45 | 44 | ||
46 | /* DMA-mapping interface: */ | 45 | /* DMA-mapping interface: */ |
47 | typedef void ia64_mv_dma_init (void); | 46 | typedef void ia64_mv_dma_init (void); |
48 | typedef void *ia64_mv_dma_alloc_coherent (struct device *, size_t, dma_addr_t *, gfp_t); | ||
49 | typedef void ia64_mv_dma_free_coherent (struct device *, size_t, void *, dma_addr_t); | ||
50 | typedef dma_addr_t ia64_mv_dma_map_single (struct device *, void *, size_t, int); | ||
51 | typedef void ia64_mv_dma_unmap_single (struct device *, dma_addr_t, size_t, int); | ||
52 | typedef int ia64_mv_dma_map_sg (struct device *, struct scatterlist *, int, int); | ||
53 | typedef void ia64_mv_dma_unmap_sg (struct device *, struct scatterlist *, int, int); | ||
54 | typedef void ia64_mv_dma_sync_single_for_cpu (struct device *, dma_addr_t, size_t, int); | ||
55 | typedef void ia64_mv_dma_sync_sg_for_cpu (struct device *, struct scatterlist *, int, int); | ||
56 | typedef void ia64_mv_dma_sync_single_for_device (struct device *, dma_addr_t, size_t, int); | ||
57 | typedef void ia64_mv_dma_sync_sg_for_device (struct device *, struct scatterlist *, int, int); | ||
58 | typedef int ia64_mv_dma_mapping_error(struct device *, dma_addr_t dma_addr); | ||
59 | typedef int ia64_mv_dma_supported (struct device *, u64); | ||
60 | |||
61 | typedef dma_addr_t ia64_mv_dma_map_single_attrs (struct device *, void *, size_t, int, struct dma_attrs *); | ||
62 | typedef void ia64_mv_dma_unmap_single_attrs (struct device *, dma_addr_t, size_t, int, struct dma_attrs *); | ||
63 | typedef int ia64_mv_dma_map_sg_attrs (struct device *, struct scatterlist *, int, int, struct dma_attrs *); | ||
64 | typedef void ia64_mv_dma_unmap_sg_attrs (struct device *, struct scatterlist *, int, int, struct dma_attrs *); | ||
65 | typedef u64 ia64_mv_dma_get_required_mask (struct device *); | 47 | typedef u64 ia64_mv_dma_get_required_mask (struct device *); |
48 | typedef struct dma_map_ops *ia64_mv_dma_get_ops(struct device *); | ||
66 | 49 | ||
67 | /* | 50 | /* |
68 | * WARNING: The legacy I/O space is _architected_. Platforms are | 51 | * WARNING: The legacy I/O space is _architected_. Platforms are |
@@ -114,8 +97,6 @@ machvec_noop_bus (struct pci_bus *bus) | |||
114 | 97 | ||
115 | extern void machvec_setup (char **); | 98 | extern void machvec_setup (char **); |
116 | extern void machvec_timer_interrupt (int, void *); | 99 | extern void machvec_timer_interrupt (int, void *); |
117 | extern void machvec_dma_sync_single (struct device *, dma_addr_t, size_t, int); | ||
118 | extern void machvec_dma_sync_sg (struct device *, struct scatterlist *, int, int); | ||
119 | extern void machvec_tlb_migrate_finish (struct mm_struct *); | 100 | extern void machvec_tlb_migrate_finish (struct mm_struct *); |
120 | 101 | ||
121 | # if defined (CONFIG_IA64_HP_SIM) | 102 | # if defined (CONFIG_IA64_HP_SIM) |
@@ -148,19 +129,8 @@ extern void machvec_tlb_migrate_finish (struct mm_struct *); | |||
148 | # define platform_global_tlb_purge ia64_mv.global_tlb_purge | 129 | # define platform_global_tlb_purge ia64_mv.global_tlb_purge |
149 | # define platform_tlb_migrate_finish ia64_mv.tlb_migrate_finish | 130 | # define platform_tlb_migrate_finish ia64_mv.tlb_migrate_finish |
150 | # define platform_dma_init ia64_mv.dma_init | 131 | # define platform_dma_init ia64_mv.dma_init |
151 | # define platform_dma_alloc_coherent ia64_mv.dma_alloc_coherent | ||
152 | # define platform_dma_free_coherent ia64_mv.dma_free_coherent | ||
153 | # define platform_dma_map_single_attrs ia64_mv.dma_map_single_attrs | ||
154 | # define platform_dma_unmap_single_attrs ia64_mv.dma_unmap_single_attrs | ||
155 | # define platform_dma_map_sg_attrs ia64_mv.dma_map_sg_attrs | ||
156 | # define platform_dma_unmap_sg_attrs ia64_mv.dma_unmap_sg_attrs | ||
157 | # define platform_dma_sync_single_for_cpu ia64_mv.dma_sync_single_for_cpu | ||
158 | # define platform_dma_sync_sg_for_cpu ia64_mv.dma_sync_sg_for_cpu | ||
159 | # define platform_dma_sync_single_for_device ia64_mv.dma_sync_single_for_device | ||
160 | # define platform_dma_sync_sg_for_device ia64_mv.dma_sync_sg_for_device | ||
161 | # define platform_dma_mapping_error ia64_mv.dma_mapping_error | ||
162 | # define platform_dma_supported ia64_mv.dma_supported | ||
163 | # define platform_dma_get_required_mask ia64_mv.dma_get_required_mask | 132 | # define platform_dma_get_required_mask ia64_mv.dma_get_required_mask |
133 | # define platform_dma_get_ops ia64_mv.dma_get_ops | ||
164 | # define platform_irq_to_vector ia64_mv.irq_to_vector | 134 | # define platform_irq_to_vector ia64_mv.irq_to_vector |
165 | # define platform_local_vector_to_irq ia64_mv.local_vector_to_irq | 135 | # define platform_local_vector_to_irq ia64_mv.local_vector_to_irq |
166 | # define platform_pci_get_legacy_mem ia64_mv.pci_get_legacy_mem | 136 | # define platform_pci_get_legacy_mem ia64_mv.pci_get_legacy_mem |
@@ -203,19 +173,8 @@ struct ia64_machine_vector { | |||
203 | ia64_mv_global_tlb_purge_t *global_tlb_purge; | 173 | ia64_mv_global_tlb_purge_t *global_tlb_purge; |
204 | ia64_mv_tlb_migrate_finish_t *tlb_migrate_finish; | 174 | ia64_mv_tlb_migrate_finish_t *tlb_migrate_finish; |
205 | ia64_mv_dma_init *dma_init; | 175 | ia64_mv_dma_init *dma_init; |
206 | ia64_mv_dma_alloc_coherent *dma_alloc_coherent; | ||
207 | ia64_mv_dma_free_coherent *dma_free_coherent; | ||
208 | ia64_mv_dma_map_single_attrs *dma_map_single_attrs; | ||
209 | ia64_mv_dma_unmap_single_attrs *dma_unmap_single_attrs; | ||
210 | ia64_mv_dma_map_sg_attrs *dma_map_sg_attrs; | ||
211 | ia64_mv_dma_unmap_sg_attrs *dma_unmap_sg_attrs; | ||
212 | ia64_mv_dma_sync_single_for_cpu *dma_sync_single_for_cpu; | ||
213 | ia64_mv_dma_sync_sg_for_cpu *dma_sync_sg_for_cpu; | ||
214 | ia64_mv_dma_sync_single_for_device *dma_sync_single_for_device; | ||
215 | ia64_mv_dma_sync_sg_for_device *dma_sync_sg_for_device; | ||
216 | ia64_mv_dma_mapping_error *dma_mapping_error; | ||
217 | ia64_mv_dma_supported *dma_supported; | ||
218 | ia64_mv_dma_get_required_mask *dma_get_required_mask; | 176 | ia64_mv_dma_get_required_mask *dma_get_required_mask; |
177 | ia64_mv_dma_get_ops *dma_get_ops; | ||
219 | ia64_mv_irq_to_vector *irq_to_vector; | 178 | ia64_mv_irq_to_vector *irq_to_vector; |
220 | ia64_mv_local_vector_to_irq *local_vector_to_irq; | 179 | ia64_mv_local_vector_to_irq *local_vector_to_irq; |
221 | ia64_mv_pci_get_legacy_mem_t *pci_get_legacy_mem; | 180 | ia64_mv_pci_get_legacy_mem_t *pci_get_legacy_mem; |
@@ -254,19 +213,8 @@ struct ia64_machine_vector { | |||
254 | platform_global_tlb_purge, \ | 213 | platform_global_tlb_purge, \ |
255 | platform_tlb_migrate_finish, \ | 214 | platform_tlb_migrate_finish, \ |
256 | platform_dma_init, \ | 215 | platform_dma_init, \ |
257 | platform_dma_alloc_coherent, \ | ||
258 | platform_dma_free_coherent, \ | ||
259 | platform_dma_map_single_attrs, \ | ||
260 | platform_dma_unmap_single_attrs, \ | ||
261 | platform_dma_map_sg_attrs, \ | ||
262 | platform_dma_unmap_sg_attrs, \ | ||
263 | platform_dma_sync_single_for_cpu, \ | ||
264 | platform_dma_sync_sg_for_cpu, \ | ||
265 | platform_dma_sync_single_for_device, \ | ||
266 | platform_dma_sync_sg_for_device, \ | ||
267 | platform_dma_mapping_error, \ | ||
268 | platform_dma_supported, \ | ||
269 | platform_dma_get_required_mask, \ | 216 | platform_dma_get_required_mask, \ |
217 | platform_dma_get_ops, \ | ||
270 | platform_irq_to_vector, \ | 218 | platform_irq_to_vector, \ |
271 | platform_local_vector_to_irq, \ | 219 | platform_local_vector_to_irq, \ |
272 | platform_pci_get_legacy_mem, \ | 220 | platform_pci_get_legacy_mem, \ |
@@ -302,6 +250,9 @@ extern void machvec_init_from_cmdline(const char *cmdline); | |||
302 | # error Unknown configuration. Update arch/ia64/include/asm/machvec.h. | 250 | # error Unknown configuration. Update arch/ia64/include/asm/machvec.h. |
303 | # endif /* CONFIG_IA64_GENERIC */ | 251 | # endif /* CONFIG_IA64_GENERIC */ |
304 | 252 | ||
253 | extern void swiotlb_dma_init(void); | ||
254 | extern struct dma_map_ops *dma_get_ops(struct device *); | ||
255 | |||
305 | /* | 256 | /* |
306 | * Define default versions so we can extend machvec for new platforms without having | 257 | * Define default versions so we can extend machvec for new platforms without having |
307 | * to update the machvec files for all existing platforms. | 258 | * to update the machvec files for all existing platforms. |
@@ -332,43 +283,10 @@ extern void machvec_init_from_cmdline(const char *cmdline); | |||
332 | # define platform_kernel_launch_event machvec_noop | 283 | # define platform_kernel_launch_event machvec_noop |
333 | #endif | 284 | #endif |
334 | #ifndef platform_dma_init | 285 | #ifndef platform_dma_init |
335 | # define platform_dma_init swiotlb_init | 286 | # define platform_dma_init swiotlb_dma_init |
336 | #endif | ||
337 | #ifndef platform_dma_alloc_coherent | ||
338 | # define platform_dma_alloc_coherent swiotlb_alloc_coherent | ||
339 | #endif | ||
340 | #ifndef platform_dma_free_coherent | ||
341 | # define platform_dma_free_coherent swiotlb_free_coherent | ||
342 | #endif | ||
343 | #ifndef platform_dma_map_single_attrs | ||
344 | # define platform_dma_map_single_attrs swiotlb_map_single_attrs | ||
345 | #endif | ||
346 | #ifndef platform_dma_unmap_single_attrs | ||
347 | # define platform_dma_unmap_single_attrs swiotlb_unmap_single_attrs | ||
348 | #endif | ||
349 | #ifndef platform_dma_map_sg_attrs | ||
350 | # define platform_dma_map_sg_attrs swiotlb_map_sg_attrs | ||
351 | #endif | ||
352 | #ifndef platform_dma_unmap_sg_attrs | ||
353 | # define platform_dma_unmap_sg_attrs swiotlb_unmap_sg_attrs | ||
354 | #endif | ||
355 | #ifndef platform_dma_sync_single_for_cpu | ||
356 | # define platform_dma_sync_single_for_cpu swiotlb_sync_single_for_cpu | ||
357 | #endif | ||
358 | #ifndef platform_dma_sync_sg_for_cpu | ||
359 | # define platform_dma_sync_sg_for_cpu swiotlb_sync_sg_for_cpu | ||
360 | #endif | ||
361 | #ifndef platform_dma_sync_single_for_device | ||
362 | # define platform_dma_sync_single_for_device swiotlb_sync_single_for_device | ||
363 | #endif | ||
364 | #ifndef platform_dma_sync_sg_for_device | ||
365 | # define platform_dma_sync_sg_for_device swiotlb_sync_sg_for_device | ||
366 | #endif | ||
367 | #ifndef platform_dma_mapping_error | ||
368 | # define platform_dma_mapping_error swiotlb_dma_mapping_error | ||
369 | #endif | 287 | #endif |
370 | #ifndef platform_dma_supported | 288 | #ifndef platform_dma_get_ops |
371 | # define platform_dma_supported swiotlb_dma_supported | 289 | # define platform_dma_get_ops dma_get_ops |
372 | #endif | 290 | #endif |
373 | #ifndef platform_dma_get_required_mask | 291 | #ifndef platform_dma_get_required_mask |
374 | # define platform_dma_get_required_mask ia64_dma_get_required_mask | 292 | # define platform_dma_get_required_mask ia64_dma_get_required_mask |
diff --git a/arch/ia64/include/asm/machvec_dig_vtd.h b/arch/ia64/include/asm/machvec_dig_vtd.h index 3400b561e711..6ab1de5c45ef 100644 --- a/arch/ia64/include/asm/machvec_dig_vtd.h +++ b/arch/ia64/include/asm/machvec_dig_vtd.h | |||
@@ -2,14 +2,6 @@ | |||
2 | #define _ASM_IA64_MACHVEC_DIG_VTD_h | 2 | #define _ASM_IA64_MACHVEC_DIG_VTD_h |
3 | 3 | ||
4 | extern ia64_mv_setup_t dig_setup; | 4 | extern ia64_mv_setup_t dig_setup; |
5 | extern ia64_mv_dma_alloc_coherent vtd_alloc_coherent; | ||
6 | extern ia64_mv_dma_free_coherent vtd_free_coherent; | ||
7 | extern ia64_mv_dma_map_single_attrs vtd_map_single_attrs; | ||
8 | extern ia64_mv_dma_unmap_single_attrs vtd_unmap_single_attrs; | ||
9 | extern ia64_mv_dma_map_sg_attrs vtd_map_sg_attrs; | ||
10 | extern ia64_mv_dma_unmap_sg_attrs vtd_unmap_sg_attrs; | ||
11 | extern ia64_mv_dma_supported iommu_dma_supported; | ||
12 | extern ia64_mv_dma_mapping_error vtd_dma_mapping_error; | ||
13 | extern ia64_mv_dma_init pci_iommu_alloc; | 5 | extern ia64_mv_dma_init pci_iommu_alloc; |
14 | 6 | ||
15 | /* | 7 | /* |
@@ -22,17 +14,5 @@ extern ia64_mv_dma_init pci_iommu_alloc; | |||
22 | #define platform_name "dig_vtd" | 14 | #define platform_name "dig_vtd" |
23 | #define platform_setup dig_setup | 15 | #define platform_setup dig_setup |
24 | #define platform_dma_init pci_iommu_alloc | 16 | #define platform_dma_init pci_iommu_alloc |
25 | #define platform_dma_alloc_coherent vtd_alloc_coherent | ||
26 | #define platform_dma_free_coherent vtd_free_coherent | ||
27 | #define platform_dma_map_single_attrs vtd_map_single_attrs | ||
28 | #define platform_dma_unmap_single_attrs vtd_unmap_single_attrs | ||
29 | #define platform_dma_map_sg_attrs vtd_map_sg_attrs | ||
30 | #define platform_dma_unmap_sg_attrs vtd_unmap_sg_attrs | ||
31 | #define platform_dma_sync_single_for_cpu machvec_dma_sync_single | ||
32 | #define platform_dma_sync_sg_for_cpu machvec_dma_sync_sg | ||
33 | #define platform_dma_sync_single_for_device machvec_dma_sync_single | ||
34 | #define platform_dma_sync_sg_for_device machvec_dma_sync_sg | ||
35 | #define platform_dma_supported iommu_dma_supported | ||
36 | #define platform_dma_mapping_error vtd_dma_mapping_error | ||
37 | 17 | ||
38 | #endif /* _ASM_IA64_MACHVEC_DIG_VTD_h */ | 18 | #endif /* _ASM_IA64_MACHVEC_DIG_VTD_h */ |
diff --git a/arch/ia64/include/asm/machvec_hpzx1.h b/arch/ia64/include/asm/machvec_hpzx1.h index 2f57f5144b9f..3bd83d78a412 100644 --- a/arch/ia64/include/asm/machvec_hpzx1.h +++ b/arch/ia64/include/asm/machvec_hpzx1.h | |||
@@ -2,14 +2,7 @@ | |||
2 | #define _ASM_IA64_MACHVEC_HPZX1_h | 2 | #define _ASM_IA64_MACHVEC_HPZX1_h |
3 | 3 | ||
4 | extern ia64_mv_setup_t dig_setup; | 4 | extern ia64_mv_setup_t dig_setup; |
5 | extern ia64_mv_dma_alloc_coherent sba_alloc_coherent; | 5 | extern ia64_mv_dma_init sba_dma_init; |
6 | extern ia64_mv_dma_free_coherent sba_free_coherent; | ||
7 | extern ia64_mv_dma_map_single_attrs sba_map_single_attrs; | ||
8 | extern ia64_mv_dma_unmap_single_attrs sba_unmap_single_attrs; | ||
9 | extern ia64_mv_dma_map_sg_attrs sba_map_sg_attrs; | ||
10 | extern ia64_mv_dma_unmap_sg_attrs sba_unmap_sg_attrs; | ||
11 | extern ia64_mv_dma_supported sba_dma_supported; | ||
12 | extern ia64_mv_dma_mapping_error sba_dma_mapping_error; | ||
13 | 6 | ||
14 | /* | 7 | /* |
15 | * This stuff has dual use! | 8 | * This stuff has dual use! |
@@ -20,18 +13,6 @@ extern ia64_mv_dma_mapping_error sba_dma_mapping_error; | |||
20 | */ | 13 | */ |
21 | #define platform_name "hpzx1" | 14 | #define platform_name "hpzx1" |
22 | #define platform_setup dig_setup | 15 | #define platform_setup dig_setup |
23 | #define platform_dma_init machvec_noop | 16 | #define platform_dma_init sba_dma_init |
24 | #define platform_dma_alloc_coherent sba_alloc_coherent | ||
25 | #define platform_dma_free_coherent sba_free_coherent | ||
26 | #define platform_dma_map_single_attrs sba_map_single_attrs | ||
27 | #define platform_dma_unmap_single_attrs sba_unmap_single_attrs | ||
28 | #define platform_dma_map_sg_attrs sba_map_sg_attrs | ||
29 | #define platform_dma_unmap_sg_attrs sba_unmap_sg_attrs | ||
30 | #define platform_dma_sync_single_for_cpu machvec_dma_sync_single | ||
31 | #define platform_dma_sync_sg_for_cpu machvec_dma_sync_sg | ||
32 | #define platform_dma_sync_single_for_device machvec_dma_sync_single | ||
33 | #define platform_dma_sync_sg_for_device machvec_dma_sync_sg | ||
34 | #define platform_dma_supported sba_dma_supported | ||
35 | #define platform_dma_mapping_error sba_dma_mapping_error | ||
36 | 17 | ||
37 | #endif /* _ASM_IA64_MACHVEC_HPZX1_h */ | 18 | #endif /* _ASM_IA64_MACHVEC_HPZX1_h */ |
diff --git a/arch/ia64/include/asm/machvec_hpzx1_swiotlb.h b/arch/ia64/include/asm/machvec_hpzx1_swiotlb.h index a842cdda827b..1091ac39740c 100644 --- a/arch/ia64/include/asm/machvec_hpzx1_swiotlb.h +++ b/arch/ia64/include/asm/machvec_hpzx1_swiotlb.h | |||
@@ -2,18 +2,7 @@ | |||
2 | #define _ASM_IA64_MACHVEC_HPZX1_SWIOTLB_h | 2 | #define _ASM_IA64_MACHVEC_HPZX1_SWIOTLB_h |
3 | 3 | ||
4 | extern ia64_mv_setup_t dig_setup; | 4 | extern ia64_mv_setup_t dig_setup; |
5 | extern ia64_mv_dma_alloc_coherent hwsw_alloc_coherent; | 5 | extern ia64_mv_dma_get_ops hwsw_dma_get_ops; |
6 | extern ia64_mv_dma_free_coherent hwsw_free_coherent; | ||
7 | extern ia64_mv_dma_map_single_attrs hwsw_map_single_attrs; | ||
8 | extern ia64_mv_dma_unmap_single_attrs hwsw_unmap_single_attrs; | ||
9 | extern ia64_mv_dma_map_sg_attrs hwsw_map_sg_attrs; | ||
10 | extern ia64_mv_dma_unmap_sg_attrs hwsw_unmap_sg_attrs; | ||
11 | extern ia64_mv_dma_supported hwsw_dma_supported; | ||
12 | extern ia64_mv_dma_mapping_error hwsw_dma_mapping_error; | ||
13 | extern ia64_mv_dma_sync_single_for_cpu hwsw_sync_single_for_cpu; | ||
14 | extern ia64_mv_dma_sync_sg_for_cpu hwsw_sync_sg_for_cpu; | ||
15 | extern ia64_mv_dma_sync_single_for_device hwsw_sync_single_for_device; | ||
16 | extern ia64_mv_dma_sync_sg_for_device hwsw_sync_sg_for_device; | ||
17 | 6 | ||
18 | /* | 7 | /* |
19 | * This stuff has dual use! | 8 | * This stuff has dual use! |
@@ -23,20 +12,8 @@ extern ia64_mv_dma_sync_sg_for_device hwsw_sync_sg_for_device; | |||
23 | * the macros are used directly. | 12 | * the macros are used directly. |
24 | */ | 13 | */ |
25 | #define platform_name "hpzx1_swiotlb" | 14 | #define platform_name "hpzx1_swiotlb" |
26 | |||
27 | #define platform_setup dig_setup | 15 | #define platform_setup dig_setup |
28 | #define platform_dma_init machvec_noop | 16 | #define platform_dma_init machvec_noop |
29 | #define platform_dma_alloc_coherent hwsw_alloc_coherent | 17 | #define platform_dma_get_ops hwsw_dma_get_ops |
30 | #define platform_dma_free_coherent hwsw_free_coherent | ||
31 | #define platform_dma_map_single_attrs hwsw_map_single_attrs | ||
32 | #define platform_dma_unmap_single_attrs hwsw_unmap_single_attrs | ||
33 | #define platform_dma_map_sg_attrs hwsw_map_sg_attrs | ||
34 | #define platform_dma_unmap_sg_attrs hwsw_unmap_sg_attrs | ||
35 | #define platform_dma_supported hwsw_dma_supported | ||
36 | #define platform_dma_mapping_error hwsw_dma_mapping_error | ||
37 | #define platform_dma_sync_single_for_cpu hwsw_sync_single_for_cpu | ||
38 | #define platform_dma_sync_sg_for_cpu hwsw_sync_sg_for_cpu | ||
39 | #define platform_dma_sync_single_for_device hwsw_sync_single_for_device | ||
40 | #define platform_dma_sync_sg_for_device hwsw_sync_sg_for_device | ||
41 | 18 | ||
42 | #endif /* _ASM_IA64_MACHVEC_HPZX1_SWIOTLB_h */ | 19 | #endif /* _ASM_IA64_MACHVEC_HPZX1_SWIOTLB_h */ |
diff --git a/arch/ia64/include/asm/machvec_sn2.h b/arch/ia64/include/asm/machvec_sn2.h index f1a6e0d6dfa5..f061a30aac42 100644 --- a/arch/ia64/include/asm/machvec_sn2.h +++ b/arch/ia64/include/asm/machvec_sn2.h | |||
@@ -55,19 +55,8 @@ extern ia64_mv_readb_t __sn_readb_relaxed; | |||
55 | extern ia64_mv_readw_t __sn_readw_relaxed; | 55 | extern ia64_mv_readw_t __sn_readw_relaxed; |
56 | extern ia64_mv_readl_t __sn_readl_relaxed; | 56 | extern ia64_mv_readl_t __sn_readl_relaxed; |
57 | extern ia64_mv_readq_t __sn_readq_relaxed; | 57 | extern ia64_mv_readq_t __sn_readq_relaxed; |
58 | extern ia64_mv_dma_alloc_coherent sn_dma_alloc_coherent; | ||
59 | extern ia64_mv_dma_free_coherent sn_dma_free_coherent; | ||
60 | extern ia64_mv_dma_map_single_attrs sn_dma_map_single_attrs; | ||
61 | extern ia64_mv_dma_unmap_single_attrs sn_dma_unmap_single_attrs; | ||
62 | extern ia64_mv_dma_map_sg_attrs sn_dma_map_sg_attrs; | ||
63 | extern ia64_mv_dma_unmap_sg_attrs sn_dma_unmap_sg_attrs; | ||
64 | extern ia64_mv_dma_sync_single_for_cpu sn_dma_sync_single_for_cpu; | ||
65 | extern ia64_mv_dma_sync_sg_for_cpu sn_dma_sync_sg_for_cpu; | ||
66 | extern ia64_mv_dma_sync_single_for_device sn_dma_sync_single_for_device; | ||
67 | extern ia64_mv_dma_sync_sg_for_device sn_dma_sync_sg_for_device; | ||
68 | extern ia64_mv_dma_mapping_error sn_dma_mapping_error; | ||
69 | extern ia64_mv_dma_supported sn_dma_supported; | ||
70 | extern ia64_mv_dma_get_required_mask sn_dma_get_required_mask; | 58 | extern ia64_mv_dma_get_required_mask sn_dma_get_required_mask; |
59 | extern ia64_mv_dma_init sn_dma_init; | ||
71 | extern ia64_mv_migrate_t sn_migrate; | 60 | extern ia64_mv_migrate_t sn_migrate; |
72 | extern ia64_mv_kernel_launch_event_t sn_kernel_launch_event; | 61 | extern ia64_mv_kernel_launch_event_t sn_kernel_launch_event; |
73 | extern ia64_mv_setup_msi_irq_t sn_setup_msi_irq; | 62 | extern ia64_mv_setup_msi_irq_t sn_setup_msi_irq; |
@@ -111,20 +100,8 @@ extern ia64_mv_pci_fixup_bus_t sn_pci_fixup_bus; | |||
111 | #define platform_pci_get_legacy_mem sn_pci_get_legacy_mem | 100 | #define platform_pci_get_legacy_mem sn_pci_get_legacy_mem |
112 | #define platform_pci_legacy_read sn_pci_legacy_read | 101 | #define platform_pci_legacy_read sn_pci_legacy_read |
113 | #define platform_pci_legacy_write sn_pci_legacy_write | 102 | #define platform_pci_legacy_write sn_pci_legacy_write |
114 | #define platform_dma_init machvec_noop | ||
115 | #define platform_dma_alloc_coherent sn_dma_alloc_coherent | ||
116 | #define platform_dma_free_coherent sn_dma_free_coherent | ||
117 | #define platform_dma_map_single_attrs sn_dma_map_single_attrs | ||
118 | #define platform_dma_unmap_single_attrs sn_dma_unmap_single_attrs | ||
119 | #define platform_dma_map_sg_attrs sn_dma_map_sg_attrs | ||
120 | #define platform_dma_unmap_sg_attrs sn_dma_unmap_sg_attrs | ||
121 | #define platform_dma_sync_single_for_cpu sn_dma_sync_single_for_cpu | ||
122 | #define platform_dma_sync_sg_for_cpu sn_dma_sync_sg_for_cpu | ||
123 | #define platform_dma_sync_single_for_device sn_dma_sync_single_for_device | ||
124 | #define platform_dma_sync_sg_for_device sn_dma_sync_sg_for_device | ||
125 | #define platform_dma_mapping_error sn_dma_mapping_error | ||
126 | #define platform_dma_supported sn_dma_supported | ||
127 | #define platform_dma_get_required_mask sn_dma_get_required_mask | 103 | #define platform_dma_get_required_mask sn_dma_get_required_mask |
104 | #define platform_dma_init sn_dma_init | ||
128 | #define platform_migrate sn_migrate | 105 | #define platform_migrate sn_migrate |
129 | #define platform_kernel_launch_event sn_kernel_launch_event | 106 | #define platform_kernel_launch_event sn_kernel_launch_event |
130 | #ifdef CONFIG_PCI_MSI | 107 | #ifdef CONFIG_PCI_MSI |
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile index c381ea954892..f2778f2c4fd9 100644 --- a/arch/ia64/kernel/Makefile +++ b/arch/ia64/kernel/Makefile | |||
@@ -7,7 +7,7 @@ extra-y := head.o init_task.o vmlinux.lds | |||
7 | obj-y := acpi.o entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64.o \ | 7 | obj-y := acpi.o entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64.o \ |
8 | irq_lsapic.o ivt.o machvec.o pal.o patch.o process.o perfmon.o ptrace.o sal.o \ | 8 | irq_lsapic.o ivt.o machvec.o pal.o patch.o process.o perfmon.o ptrace.o sal.o \ |
9 | salinfo.o setup.o signal.o sys_ia64.o time.o traps.o unaligned.o \ | 9 | salinfo.o setup.o signal.o sys_ia64.o time.o traps.o unaligned.o \ |
10 | unwind.o mca.o mca_asm.o topology.o | 10 | unwind.o mca.o mca_asm.o topology.o dma-mapping.o |
11 | 11 | ||
12 | obj-$(CONFIG_IA64_BRL_EMU) += brl_emu.o | 12 | obj-$(CONFIG_IA64_BRL_EMU) += brl_emu.o |
13 | obj-$(CONFIG_IA64_GENERIC) += acpi-ext.o | 13 | obj-$(CONFIG_IA64_GENERIC) += acpi-ext.o |
@@ -43,9 +43,7 @@ ifneq ($(CONFIG_IA64_ESI),) | |||
43 | obj-y += esi_stub.o # must be in kernel proper | 43 | obj-y += esi_stub.o # must be in kernel proper |
44 | endif | 44 | endif |
45 | obj-$(CONFIG_DMAR) += pci-dma.o | 45 | obj-$(CONFIG_DMAR) += pci-dma.o |
46 | ifeq ($(CONFIG_DMAR), y) | ||
47 | obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o | 46 | obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o |
48 | endif | ||
49 | 47 | ||
50 | # The gate DSO image is built using a special linker script. | 48 | # The gate DSO image is built using a special linker script. |
51 | targets += gate.so gate-syms.o | 49 | targets += gate.so gate-syms.o |
diff --git a/arch/ia64/kernel/dma-mapping.c b/arch/ia64/kernel/dma-mapping.c new file mode 100644 index 000000000000..086a2aeb0404 --- /dev/null +++ b/arch/ia64/kernel/dma-mapping.c | |||
@@ -0,0 +1,13 @@ | |||
1 | #include <linux/dma-mapping.h> | ||
2 | |||
3 | /* Set this to 1 if there is a HW IOMMU in the system */ | ||
4 | int iommu_detected __read_mostly; | ||
5 | |||
6 | struct dma_map_ops *dma_ops; | ||
7 | EXPORT_SYMBOL(dma_ops); | ||
8 | |||
9 | struct dma_map_ops *dma_get_ops(struct device *dev) | ||
10 | { | ||
11 | return dma_ops; | ||
12 | } | ||
13 | EXPORT_SYMBOL(dma_get_ops); | ||
diff --git a/arch/ia64/kernel/machvec.c b/arch/ia64/kernel/machvec.c index 7ccb228ceedc..d41a40ef80c0 100644 --- a/arch/ia64/kernel/machvec.c +++ b/arch/ia64/kernel/machvec.c | |||
@@ -1,5 +1,5 @@ | |||
1 | #include <linux/module.h> | 1 | #include <linux/module.h> |
2 | 2 | #include <linux/dma-mapping.h> | |
3 | #include <asm/machvec.h> | 3 | #include <asm/machvec.h> |
4 | #include <asm/system.h> | 4 | #include <asm/system.h> |
5 | 5 | ||
@@ -75,14 +75,16 @@ machvec_timer_interrupt (int irq, void *dev_id) | |||
75 | EXPORT_SYMBOL(machvec_timer_interrupt); | 75 | EXPORT_SYMBOL(machvec_timer_interrupt); |
76 | 76 | ||
77 | void | 77 | void |
78 | machvec_dma_sync_single (struct device *hwdev, dma_addr_t dma_handle, size_t size, int dir) | 78 | machvec_dma_sync_single(struct device *hwdev, dma_addr_t dma_handle, size_t size, |
79 | enum dma_data_direction dir) | ||
79 | { | 80 | { |
80 | mb(); | 81 | mb(); |
81 | } | 82 | } |
82 | EXPORT_SYMBOL(machvec_dma_sync_single); | 83 | EXPORT_SYMBOL(machvec_dma_sync_single); |
83 | 84 | ||
84 | void | 85 | void |
85 | machvec_dma_sync_sg (struct device *hwdev, struct scatterlist *sg, int n, int dir) | 86 | machvec_dma_sync_sg(struct device *hwdev, struct scatterlist *sg, int n, |
87 | enum dma_data_direction dir) | ||
86 | { | 88 | { |
87 | mb(); | 89 | mb(); |
88 | } | 90 | } |
diff --git a/arch/ia64/kernel/pci-dma.c b/arch/ia64/kernel/pci-dma.c index d0ada067a4af..e4cb443bb988 100644 --- a/arch/ia64/kernel/pci-dma.c +++ b/arch/ia64/kernel/pci-dma.c | |||
@@ -32,9 +32,6 @@ int force_iommu __read_mostly = 1; | |||
32 | int force_iommu __read_mostly; | 32 | int force_iommu __read_mostly; |
33 | #endif | 33 | #endif |
34 | 34 | ||
35 | /* Set this to 1 if there is a HW IOMMU in the system */ | ||
36 | int iommu_detected __read_mostly; | ||
37 | |||
38 | /* Dummy device used for NULL arguments (normally ISA). Better would | 35 | /* Dummy device used for NULL arguments (normally ISA). Better would |
39 | be probably a smaller DMA mask, but this is bug-to-bug compatible | 36 | be probably a smaller DMA mask, but this is bug-to-bug compatible |
40 | to i386. */ | 37 | to i386. */ |
@@ -44,18 +41,7 @@ struct device fallback_dev = { | |||
44 | .dma_mask = &fallback_dev.coherent_dma_mask, | 41 | .dma_mask = &fallback_dev.coherent_dma_mask, |
45 | }; | 42 | }; |
46 | 43 | ||
47 | void __init pci_iommu_alloc(void) | 44 | extern struct dma_map_ops intel_dma_ops; |
48 | { | ||
49 | /* | ||
50 | * The order of these functions is important for | ||
51 | * fall-back/fail-over reasons | ||
52 | */ | ||
53 | detect_intel_iommu(); | ||
54 | |||
55 | #ifdef CONFIG_SWIOTLB | ||
56 | pci_swiotlb_init(); | ||
57 | #endif | ||
58 | } | ||
59 | 45 | ||
60 | static int __init pci_iommu_init(void) | 46 | static int __init pci_iommu_init(void) |
61 | { | 47 | { |
@@ -79,15 +65,12 @@ iommu_dma_init(void) | |||
79 | return; | 65 | return; |
80 | } | 66 | } |
81 | 67 | ||
82 | struct dma_mapping_ops *dma_ops; | ||
83 | EXPORT_SYMBOL(dma_ops); | ||
84 | |||
85 | int iommu_dma_supported(struct device *dev, u64 mask) | 68 | int iommu_dma_supported(struct device *dev, u64 mask) |
86 | { | 69 | { |
87 | struct dma_mapping_ops *ops = get_dma_ops(dev); | 70 | struct dma_map_ops *ops = platform_dma_get_ops(dev); |
88 | 71 | ||
89 | if (ops->dma_supported_op) | 72 | if (ops->dma_supported) |
90 | return ops->dma_supported_op(dev, mask); | 73 | return ops->dma_supported(dev, mask); |
91 | 74 | ||
92 | /* Copied from i386. Doesn't make much sense, because it will | 75 | /* Copied from i386. Doesn't make much sense, because it will |
93 | only work for pci_alloc_coherent. | 76 | only work for pci_alloc_coherent. |
@@ -116,4 +99,25 @@ int iommu_dma_supported(struct device *dev, u64 mask) | |||
116 | } | 99 | } |
117 | EXPORT_SYMBOL(iommu_dma_supported); | 100 | EXPORT_SYMBOL(iommu_dma_supported); |
118 | 101 | ||
102 | void __init pci_iommu_alloc(void) | ||
103 | { | ||
104 | dma_ops = &intel_dma_ops; | ||
105 | |||
106 | dma_ops->sync_single_for_cpu = machvec_dma_sync_single; | ||
107 | dma_ops->sync_sg_for_cpu = machvec_dma_sync_sg; | ||
108 | dma_ops->sync_single_for_device = machvec_dma_sync_single; | ||
109 | dma_ops->sync_sg_for_device = machvec_dma_sync_sg; | ||
110 | dma_ops->dma_supported = iommu_dma_supported; | ||
111 | |||
112 | /* | ||
113 | * The order of these functions is important for | ||
114 | * fall-back/fail-over reasons | ||
115 | */ | ||
116 | detect_intel_iommu(); | ||
117 | |||
118 | #ifdef CONFIG_SWIOTLB | ||
119 | pci_swiotlb_init(); | ||
120 | #endif | ||
121 | } | ||
122 | |||
119 | #endif | 123 | #endif |
diff --git a/arch/ia64/kernel/pci-swiotlb.c b/arch/ia64/kernel/pci-swiotlb.c index 16c50516dbc1..573f02c39a00 100644 --- a/arch/ia64/kernel/pci-swiotlb.c +++ b/arch/ia64/kernel/pci-swiotlb.c | |||
@@ -13,23 +13,37 @@ | |||
13 | int swiotlb __read_mostly; | 13 | int swiotlb __read_mostly; |
14 | EXPORT_SYMBOL(swiotlb); | 14 | EXPORT_SYMBOL(swiotlb); |
15 | 15 | ||
16 | struct dma_mapping_ops swiotlb_dma_ops = { | 16 | static void *ia64_swiotlb_alloc_coherent(struct device *dev, size_t size, |
17 | .mapping_error = swiotlb_dma_mapping_error, | 17 | dma_addr_t *dma_handle, gfp_t gfp) |
18 | .alloc_coherent = swiotlb_alloc_coherent, | 18 | { |
19 | if (dev->coherent_dma_mask != DMA_64BIT_MASK) | ||
20 | gfp |= GFP_DMA; | ||
21 | return swiotlb_alloc_coherent(dev, size, dma_handle, gfp); | ||
22 | } | ||
23 | |||
24 | struct dma_map_ops swiotlb_dma_ops = { | ||
25 | .alloc_coherent = ia64_swiotlb_alloc_coherent, | ||
19 | .free_coherent = swiotlb_free_coherent, | 26 | .free_coherent = swiotlb_free_coherent, |
20 | .map_single = swiotlb_map_single, | 27 | .map_page = swiotlb_map_page, |
21 | .unmap_single = swiotlb_unmap_single, | 28 | .unmap_page = swiotlb_unmap_page, |
29 | .map_sg = swiotlb_map_sg_attrs, | ||
30 | .unmap_sg = swiotlb_unmap_sg_attrs, | ||
22 | .sync_single_for_cpu = swiotlb_sync_single_for_cpu, | 31 | .sync_single_for_cpu = swiotlb_sync_single_for_cpu, |
23 | .sync_single_for_device = swiotlb_sync_single_for_device, | 32 | .sync_single_for_device = swiotlb_sync_single_for_device, |
24 | .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu, | 33 | .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu, |
25 | .sync_single_range_for_device = swiotlb_sync_single_range_for_device, | 34 | .sync_single_range_for_device = swiotlb_sync_single_range_for_device, |
26 | .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, | 35 | .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, |
27 | .sync_sg_for_device = swiotlb_sync_sg_for_device, | 36 | .sync_sg_for_device = swiotlb_sync_sg_for_device, |
28 | .map_sg = swiotlb_map_sg, | 37 | .dma_supported = swiotlb_dma_supported, |
29 | .unmap_sg = swiotlb_unmap_sg, | 38 | .mapping_error = swiotlb_dma_mapping_error, |
30 | .dma_supported_op = swiotlb_dma_supported, | ||
31 | }; | 39 | }; |
32 | 40 | ||
41 | void __init swiotlb_dma_init(void) | ||
42 | { | ||
43 | dma_ops = &swiotlb_dma_ops; | ||
44 | swiotlb_init(); | ||
45 | } | ||
46 | |||
33 | void __init pci_swiotlb_init(void) | 47 | void __init pci_swiotlb_init(void) |
34 | { | 48 | { |
35 | if (!iommu_detected) { | 49 | if (!iommu_detected) { |
diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c index 863f5017baae..8c130e8f00e1 100644 --- a/arch/ia64/sn/pci/pci_dma.c +++ b/arch/ia64/sn/pci/pci_dma.c | |||
@@ -10,7 +10,7 @@ | |||
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/module.h> | 12 | #include <linux/module.h> |
13 | #include <linux/dma-attrs.h> | 13 | #include <linux/dma-mapping.h> |
14 | #include <asm/dma.h> | 14 | #include <asm/dma.h> |
15 | #include <asm/sn/intr.h> | 15 | #include <asm/sn/intr.h> |
16 | #include <asm/sn/pcibus_provider_defs.h> | 16 | #include <asm/sn/pcibus_provider_defs.h> |
@@ -31,7 +31,7 @@ | |||
31 | * this function. Of course, SN only supports devices that have 32 or more | 31 | * this function. Of course, SN only supports devices that have 32 or more |
32 | * address bits when using the PMU. | 32 | * address bits when using the PMU. |
33 | */ | 33 | */ |
34 | int sn_dma_supported(struct device *dev, u64 mask) | 34 | static int sn_dma_supported(struct device *dev, u64 mask) |
35 | { | 35 | { |
36 | BUG_ON(dev->bus != &pci_bus_type); | 36 | BUG_ON(dev->bus != &pci_bus_type); |
37 | 37 | ||
@@ -39,7 +39,6 @@ int sn_dma_supported(struct device *dev, u64 mask) | |||
39 | return 0; | 39 | return 0; |
40 | return 1; | 40 | return 1; |
41 | } | 41 | } |
42 | EXPORT_SYMBOL(sn_dma_supported); | ||
43 | 42 | ||
44 | /** | 43 | /** |
45 | * sn_dma_set_mask - set the DMA mask | 44 | * sn_dma_set_mask - set the DMA mask |
@@ -75,8 +74,8 @@ EXPORT_SYMBOL(sn_dma_set_mask); | |||
75 | * queue for a SCSI controller). See Documentation/DMA-API.txt for | 74 | * queue for a SCSI controller). See Documentation/DMA-API.txt for |
76 | * more information. | 75 | * more information. |
77 | */ | 76 | */ |
78 | void *sn_dma_alloc_coherent(struct device *dev, size_t size, | 77 | static void *sn_dma_alloc_coherent(struct device *dev, size_t size, |
79 | dma_addr_t * dma_handle, gfp_t flags) | 78 | dma_addr_t * dma_handle, gfp_t flags) |
80 | { | 79 | { |
81 | void *cpuaddr; | 80 | void *cpuaddr; |
82 | unsigned long phys_addr; | 81 | unsigned long phys_addr; |
@@ -124,7 +123,6 @@ void *sn_dma_alloc_coherent(struct device *dev, size_t size, | |||
124 | 123 | ||
125 | return cpuaddr; | 124 | return cpuaddr; |
126 | } | 125 | } |
127 | EXPORT_SYMBOL(sn_dma_alloc_coherent); | ||
128 | 126 | ||
129 | /** | 127 | /** |
130 | * sn_pci_free_coherent - free memory associated with coherent DMAable region | 128 | * sn_pci_free_coherent - free memory associated with coherent DMAable region |
@@ -136,8 +134,8 @@ EXPORT_SYMBOL(sn_dma_alloc_coherent); | |||
136 | * Frees the memory allocated by dma_alloc_coherent(), potentially unmapping | 134 | * Frees the memory allocated by dma_alloc_coherent(), potentially unmapping |
137 | * any associated IOMMU mappings. | 135 | * any associated IOMMU mappings. |
138 | */ | 136 | */ |
139 | void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, | 137 | static void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, |
140 | dma_addr_t dma_handle) | 138 | dma_addr_t dma_handle) |
141 | { | 139 | { |
142 | struct pci_dev *pdev = to_pci_dev(dev); | 140 | struct pci_dev *pdev = to_pci_dev(dev); |
143 | struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); | 141 | struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); |
@@ -147,7 +145,6 @@ void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, | |||
147 | provider->dma_unmap(pdev, dma_handle, 0); | 145 | provider->dma_unmap(pdev, dma_handle, 0); |
148 | free_pages((unsigned long)cpu_addr, get_order(size)); | 146 | free_pages((unsigned long)cpu_addr, get_order(size)); |
149 | } | 147 | } |
150 | EXPORT_SYMBOL(sn_dma_free_coherent); | ||
151 | 148 | ||
152 | /** | 149 | /** |
153 | * sn_dma_map_single_attrs - map a single page for DMA | 150 | * sn_dma_map_single_attrs - map a single page for DMA |
@@ -173,10 +170,12 @@ EXPORT_SYMBOL(sn_dma_free_coherent); | |||
173 | * TODO: simplify our interface; | 170 | * TODO: simplify our interface; |
174 | * figure out how to save dmamap handle so can use two step. | 171 | * figure out how to save dmamap handle so can use two step. |
175 | */ | 172 | */ |
176 | dma_addr_t sn_dma_map_single_attrs(struct device *dev, void *cpu_addr, | 173 | static dma_addr_t sn_dma_map_page(struct device *dev, struct page *page, |
177 | size_t size, int direction, | 174 | unsigned long offset, size_t size, |
178 | struct dma_attrs *attrs) | 175 | enum dma_data_direction dir, |
176 | struct dma_attrs *attrs) | ||
179 | { | 177 | { |
178 | void *cpu_addr = page_address(page) + offset; | ||
180 | dma_addr_t dma_addr; | 179 | dma_addr_t dma_addr; |
181 | unsigned long phys_addr; | 180 | unsigned long phys_addr; |
182 | struct pci_dev *pdev = to_pci_dev(dev); | 181 | struct pci_dev *pdev = to_pci_dev(dev); |
@@ -201,7 +200,6 @@ dma_addr_t sn_dma_map_single_attrs(struct device *dev, void *cpu_addr, | |||
201 | } | 200 | } |
202 | return dma_addr; | 201 | return dma_addr; |
203 | } | 202 | } |
204 | EXPORT_SYMBOL(sn_dma_map_single_attrs); | ||
205 | 203 | ||
206 | /** | 204 | /** |
207 | * sn_dma_unmap_single_attrs - unamp a DMA mapped page | 205 | * sn_dma_unmap_single_attrs - unamp a DMA mapped page |
@@ -215,21 +213,20 @@ EXPORT_SYMBOL(sn_dma_map_single_attrs); | |||
215 | * by @dma_handle into the coherence domain. On SN, we're always cache | 213 | * by @dma_handle into the coherence domain. On SN, we're always cache |
216 | * coherent, so we just need to free any ATEs associated with this mapping. | 214 | * coherent, so we just need to free any ATEs associated with this mapping. |
217 | */ | 215 | */ |
218 | void sn_dma_unmap_single_attrs(struct device *dev, dma_addr_t dma_addr, | 216 | static void sn_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, |
219 | size_t size, int direction, | 217 | size_t size, enum dma_data_direction dir, |
220 | struct dma_attrs *attrs) | 218 | struct dma_attrs *attrs) |
221 | { | 219 | { |
222 | struct pci_dev *pdev = to_pci_dev(dev); | 220 | struct pci_dev *pdev = to_pci_dev(dev); |
223 | struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); | 221 | struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); |
224 | 222 | ||
225 | BUG_ON(dev->bus != &pci_bus_type); | 223 | BUG_ON(dev->bus != &pci_bus_type); |
226 | 224 | ||
227 | provider->dma_unmap(pdev, dma_addr, direction); | 225 | provider->dma_unmap(pdev, dma_addr, dir); |
228 | } | 226 | } |
229 | EXPORT_SYMBOL(sn_dma_unmap_single_attrs); | ||
230 | 227 | ||
231 | /** | 228 | /** |
232 | * sn_dma_unmap_sg_attrs - unmap a DMA scatterlist | 229 | * sn_dma_unmap_sg - unmap a DMA scatterlist |
233 | * @dev: device to unmap | 230 | * @dev: device to unmap |
234 | * @sg: scatterlist to unmap | 231 | * @sg: scatterlist to unmap |
235 | * @nhwentries: number of scatterlist entries | 232 | * @nhwentries: number of scatterlist entries |
@@ -238,9 +235,9 @@ EXPORT_SYMBOL(sn_dma_unmap_single_attrs); | |||
238 | * | 235 | * |
239 | * Unmap a set of streaming mode DMA translations. | 236 | * Unmap a set of streaming mode DMA translations. |
240 | */ | 237 | */ |
241 | void sn_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl, | 238 | static void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sgl, |
242 | int nhwentries, int direction, | 239 | int nhwentries, enum dma_data_direction dir, |
243 | struct dma_attrs *attrs) | 240 | struct dma_attrs *attrs) |
244 | { | 241 | { |
245 | int i; | 242 | int i; |
246 | struct pci_dev *pdev = to_pci_dev(dev); | 243 | struct pci_dev *pdev = to_pci_dev(dev); |
@@ -250,15 +247,14 @@ void sn_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl, | |||
250 | BUG_ON(dev->bus != &pci_bus_type); | 247 | BUG_ON(dev->bus != &pci_bus_type); |
251 | 248 | ||
252 | for_each_sg(sgl, sg, nhwentries, i) { | 249 | for_each_sg(sgl, sg, nhwentries, i) { |
253 | provider->dma_unmap(pdev, sg->dma_address, direction); | 250 | provider->dma_unmap(pdev, sg->dma_address, dir); |
254 | sg->dma_address = (dma_addr_t) NULL; | 251 | sg->dma_address = (dma_addr_t) NULL; |
255 | sg->dma_length = 0; | 252 | sg->dma_length = 0; |
256 | } | 253 | } |
257 | } | 254 | } |
258 | EXPORT_SYMBOL(sn_dma_unmap_sg_attrs); | ||
259 | 255 | ||
260 | /** | 256 | /** |
261 | * sn_dma_map_sg_attrs - map a scatterlist for DMA | 257 | * sn_dma_map_sg - map a scatterlist for DMA |
262 | * @dev: device to map for | 258 | * @dev: device to map for |
263 | * @sg: scatterlist to map | 259 | * @sg: scatterlist to map |
264 | * @nhwentries: number of entries | 260 | * @nhwentries: number of entries |
@@ -272,8 +268,9 @@ EXPORT_SYMBOL(sn_dma_unmap_sg_attrs); | |||
272 | * | 268 | * |
273 | * Maps each entry of @sg for DMA. | 269 | * Maps each entry of @sg for DMA. |
274 | */ | 270 | */ |
275 | int sn_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, | 271 | static int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl, |
276 | int nhwentries, int direction, struct dma_attrs *attrs) | 272 | int nhwentries, enum dma_data_direction dir, |
273 | struct dma_attrs *attrs) | ||
277 | { | 274 | { |
278 | unsigned long phys_addr; | 275 | unsigned long phys_addr; |
279 | struct scatterlist *saved_sg = sgl, *sg; | 276 | struct scatterlist *saved_sg = sgl, *sg; |
@@ -310,8 +307,7 @@ int sn_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, | |||
310 | * Free any successfully allocated entries. | 307 | * Free any successfully allocated entries. |
311 | */ | 308 | */ |
312 | if (i > 0) | 309 | if (i > 0) |
313 | sn_dma_unmap_sg_attrs(dev, saved_sg, i, | 310 | sn_dma_unmap_sg(dev, saved_sg, i, dir, attrs); |
314 | direction, attrs); | ||
315 | return 0; | 311 | return 0; |
316 | } | 312 | } |
317 | 313 | ||
@@ -320,41 +316,36 @@ int sn_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, | |||
320 | 316 | ||
321 | return nhwentries; | 317 | return nhwentries; |
322 | } | 318 | } |
323 | EXPORT_SYMBOL(sn_dma_map_sg_attrs); | ||
324 | 319 | ||
325 | void sn_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, | 320 | static void sn_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, |
326 | size_t size, int direction) | 321 | size_t size, enum dma_data_direction dir) |
327 | { | 322 | { |
328 | BUG_ON(dev->bus != &pci_bus_type); | 323 | BUG_ON(dev->bus != &pci_bus_type); |
329 | } | 324 | } |
330 | EXPORT_SYMBOL(sn_dma_sync_single_for_cpu); | ||
331 | 325 | ||
332 | void sn_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, | 326 | static void sn_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, |
333 | size_t size, int direction) | 327 | size_t size, |
328 | enum dma_data_direction dir) | ||
334 | { | 329 | { |
335 | BUG_ON(dev->bus != &pci_bus_type); | 330 | BUG_ON(dev->bus != &pci_bus_type); |
336 | } | 331 | } |
337 | EXPORT_SYMBOL(sn_dma_sync_single_for_device); | ||
338 | 332 | ||
339 | void sn_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, | 333 | static void sn_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, |
340 | int nelems, int direction) | 334 | int nelems, enum dma_data_direction dir) |
341 | { | 335 | { |
342 | BUG_ON(dev->bus != &pci_bus_type); | 336 | BUG_ON(dev->bus != &pci_bus_type); |
343 | } | 337 | } |
344 | EXPORT_SYMBOL(sn_dma_sync_sg_for_cpu); | ||
345 | 338 | ||
346 | void sn_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | 339 | static void sn_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, |
347 | int nelems, int direction) | 340 | int nelems, enum dma_data_direction dir) |
348 | { | 341 | { |
349 | BUG_ON(dev->bus != &pci_bus_type); | 342 | BUG_ON(dev->bus != &pci_bus_type); |
350 | } | 343 | } |
351 | EXPORT_SYMBOL(sn_dma_sync_sg_for_device); | ||
352 | 344 | ||
353 | int sn_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | 345 | static int sn_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
354 | { | 346 | { |
355 | return 0; | 347 | return 0; |
356 | } | 348 | } |
357 | EXPORT_SYMBOL(sn_dma_mapping_error); | ||
358 | 349 | ||
359 | u64 sn_dma_get_required_mask(struct device *dev) | 350 | u64 sn_dma_get_required_mask(struct device *dev) |
360 | { | 351 | { |
@@ -471,3 +462,23 @@ int sn_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size) | |||
471 | out: | 462 | out: |
472 | return ret; | 463 | return ret; |
473 | } | 464 | } |
465 | |||
466 | static struct dma_map_ops sn_dma_ops = { | ||
467 | .alloc_coherent = sn_dma_alloc_coherent, | ||
468 | .free_coherent = sn_dma_free_coherent, | ||
469 | .map_page = sn_dma_map_page, | ||
470 | .unmap_page = sn_dma_unmap_page, | ||
471 | .map_sg = sn_dma_map_sg, | ||
472 | .unmap_sg = sn_dma_unmap_sg, | ||
473 | .sync_single_for_cpu = sn_dma_sync_single_for_cpu, | ||
474 | .sync_sg_for_cpu = sn_dma_sync_sg_for_cpu, | ||
475 | .sync_single_for_device = sn_dma_sync_single_for_device, | ||
476 | .sync_sg_for_device = sn_dma_sync_sg_for_device, | ||
477 | .mapping_error = sn_dma_mapping_error, | ||
478 | .dma_supported = sn_dma_supported, | ||
479 | }; | ||
480 | |||
481 | void sn_dma_init(void) | ||
482 | { | ||
483 | dma_ops = &sn_dma_ops; | ||
484 | } | ||
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 34bc3a89228b..45161b816313 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -40,6 +40,7 @@ config X86 | |||
40 | select HAVE_GENERIC_DMA_COHERENT if X86_32 | 40 | select HAVE_GENERIC_DMA_COHERENT if X86_32 |
41 | select HAVE_EFFICIENT_UNALIGNED_ACCESS | 41 | select HAVE_EFFICIENT_UNALIGNED_ACCESS |
42 | select USER_STACKTRACE_SUPPORT | 42 | select USER_STACKTRACE_SUPPORT |
43 | select HAVE_DMA_API_DEBUG | ||
43 | select HAVE_KERNEL_GZIP | 44 | select HAVE_KERNEL_GZIP |
44 | select HAVE_KERNEL_BZIP2 | 45 | select HAVE_KERNEL_BZIP2 |
45 | select HAVE_KERNEL_LZMA | 46 | select HAVE_KERNEL_LZMA |
diff --git a/arch/x86/include/asm/device.h b/arch/x86/include/asm/device.h index 3c034f48fdb0..4994a20acbcb 100644 --- a/arch/x86/include/asm/device.h +++ b/arch/x86/include/asm/device.h | |||
@@ -6,7 +6,7 @@ struct dev_archdata { | |||
6 | void *acpi_handle; | 6 | void *acpi_handle; |
7 | #endif | 7 | #endif |
8 | #ifdef CONFIG_X86_64 | 8 | #ifdef CONFIG_X86_64 |
9 | struct dma_mapping_ops *dma_ops; | 9 | struct dma_map_ops *dma_ops; |
10 | #endif | 10 | #endif |
11 | #ifdef CONFIG_DMAR | 11 | #ifdef CONFIG_DMAR |
12 | void *iommu; /* hook for IOMMU specific extension */ | 12 | void *iommu; /* hook for IOMMU specific extension */ |
diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h index 132a134d12f2..cea7b74963e9 100644 --- a/arch/x86/include/asm/dma-mapping.h +++ b/arch/x86/include/asm/dma-mapping.h | |||
@@ -7,6 +7,8 @@ | |||
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/scatterlist.h> | 9 | #include <linux/scatterlist.h> |
10 | #include <linux/dma-debug.h> | ||
11 | #include <linux/dma-attrs.h> | ||
10 | #include <asm/io.h> | 12 | #include <asm/io.h> |
11 | #include <asm/swiotlb.h> | 13 | #include <asm/swiotlb.h> |
12 | #include <asm-generic/dma-coherent.h> | 14 | #include <asm-generic/dma-coherent.h> |
@@ -16,47 +18,9 @@ extern int iommu_merge; | |||
16 | extern struct device x86_dma_fallback_dev; | 18 | extern struct device x86_dma_fallback_dev; |
17 | extern int panic_on_overflow; | 19 | extern int panic_on_overflow; |
18 | 20 | ||
19 | struct dma_mapping_ops { | 21 | extern struct dma_map_ops *dma_ops; |
20 | int (*mapping_error)(struct device *dev, | 22 | |
21 | dma_addr_t dma_addr); | 23 | static inline struct dma_map_ops *get_dma_ops(struct device *dev) |
22 | void* (*alloc_coherent)(struct device *dev, size_t size, | ||
23 | dma_addr_t *dma_handle, gfp_t gfp); | ||
24 | void (*free_coherent)(struct device *dev, size_t size, | ||
25 | void *vaddr, dma_addr_t dma_handle); | ||
26 | dma_addr_t (*map_single)(struct device *hwdev, phys_addr_t ptr, | ||
27 | size_t size, int direction); | ||
28 | void (*unmap_single)(struct device *dev, dma_addr_t addr, | ||
29 | size_t size, int direction); | ||
30 | void (*sync_single_for_cpu)(struct device *hwdev, | ||
31 | dma_addr_t dma_handle, size_t size, | ||
32 | int direction); | ||
33 | void (*sync_single_for_device)(struct device *hwdev, | ||
34 | dma_addr_t dma_handle, size_t size, | ||
35 | int direction); | ||
36 | void (*sync_single_range_for_cpu)(struct device *hwdev, | ||
37 | dma_addr_t dma_handle, unsigned long offset, | ||
38 | size_t size, int direction); | ||
39 | void (*sync_single_range_for_device)(struct device *hwdev, | ||
40 | dma_addr_t dma_handle, unsigned long offset, | ||
41 | size_t size, int direction); | ||
42 | void (*sync_sg_for_cpu)(struct device *hwdev, | ||
43 | struct scatterlist *sg, int nelems, | ||
44 | int direction); | ||
45 | void (*sync_sg_for_device)(struct device *hwdev, | ||
46 | struct scatterlist *sg, int nelems, | ||
47 | int direction); | ||
48 | int (*map_sg)(struct device *hwdev, struct scatterlist *sg, | ||
49 | int nents, int direction); | ||
50 | void (*unmap_sg)(struct device *hwdev, | ||
51 | struct scatterlist *sg, int nents, | ||
52 | int direction); | ||
53 | int (*dma_supported)(struct device *hwdev, u64 mask); | ||
54 | int is_phys; | ||
55 | }; | ||
56 | |||
57 | extern struct dma_mapping_ops *dma_ops; | ||
58 | |||
59 | static inline struct dma_mapping_ops *get_dma_ops(struct device *dev) | ||
60 | { | 24 | { |
61 | #ifdef CONFIG_X86_32 | 25 | #ifdef CONFIG_X86_32 |
62 | return dma_ops; | 26 | return dma_ops; |
@@ -71,7 +35,7 @@ static inline struct dma_mapping_ops *get_dma_ops(struct device *dev) | |||
71 | /* Make sure we keep the same behaviour */ | 35 | /* Make sure we keep the same behaviour */ |
72 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | 36 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
73 | { | 37 | { |
74 | struct dma_mapping_ops *ops = get_dma_ops(dev); | 38 | struct dma_map_ops *ops = get_dma_ops(dev); |
75 | if (ops->mapping_error) | 39 | if (ops->mapping_error) |
76 | return ops->mapping_error(dev, dma_addr); | 40 | return ops->mapping_error(dev, dma_addr); |
77 | 41 | ||
@@ -90,137 +54,167 @@ extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, | |||
90 | 54 | ||
91 | static inline dma_addr_t | 55 | static inline dma_addr_t |
92 | dma_map_single(struct device *hwdev, void *ptr, size_t size, | 56 | dma_map_single(struct device *hwdev, void *ptr, size_t size, |
93 | int direction) | 57 | enum dma_data_direction dir) |
94 | { | 58 | { |
95 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); | 59 | struct dma_map_ops *ops = get_dma_ops(hwdev); |
96 | 60 | dma_addr_t addr; | |
97 | BUG_ON(!valid_dma_direction(direction)); | 61 | |
98 | return ops->map_single(hwdev, virt_to_phys(ptr), size, direction); | 62 | BUG_ON(!valid_dma_direction(dir)); |
63 | addr = ops->map_page(hwdev, virt_to_page(ptr), | ||
64 | (unsigned long)ptr & ~PAGE_MASK, size, | ||
65 | dir, NULL); | ||
66 | debug_dma_map_page(hwdev, virt_to_page(ptr), | ||
67 | (unsigned long)ptr & ~PAGE_MASK, size, | ||
68 | dir, addr, true); | ||
69 | return addr; | ||
99 | } | 70 | } |
100 | 71 | ||
101 | static inline void | 72 | static inline void |
102 | dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size, | 73 | dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size, |
103 | int direction) | 74 | enum dma_data_direction dir) |
104 | { | 75 | { |
105 | struct dma_mapping_ops *ops = get_dma_ops(dev); | 76 | struct dma_map_ops *ops = get_dma_ops(dev); |
106 | 77 | ||
107 | BUG_ON(!valid_dma_direction(direction)); | 78 | BUG_ON(!valid_dma_direction(dir)); |
108 | if (ops->unmap_single) | 79 | if (ops->unmap_page) |
109 | ops->unmap_single(dev, addr, size, direction); | 80 | ops->unmap_page(dev, addr, size, dir, NULL); |
81 | debug_dma_unmap_page(dev, addr, size, dir, true); | ||
110 | } | 82 | } |
111 | 83 | ||
112 | static inline int | 84 | static inline int |
113 | dma_map_sg(struct device *hwdev, struct scatterlist *sg, | 85 | dma_map_sg(struct device *hwdev, struct scatterlist *sg, |
114 | int nents, int direction) | 86 | int nents, enum dma_data_direction dir) |
115 | { | 87 | { |
116 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); | 88 | struct dma_map_ops *ops = get_dma_ops(hwdev); |
89 | int ents; | ||
90 | |||
91 | BUG_ON(!valid_dma_direction(dir)); | ||
92 | ents = ops->map_sg(hwdev, sg, nents, dir, NULL); | ||
93 | debug_dma_map_sg(hwdev, sg, nents, ents, dir); | ||
117 | 94 | ||
118 | BUG_ON(!valid_dma_direction(direction)); | 95 | return ents; |
119 | return ops->map_sg(hwdev, sg, nents, direction); | ||
120 | } | 96 | } |
121 | 97 | ||
122 | static inline void | 98 | static inline void |
123 | dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, | 99 | dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, |
124 | int direction) | 100 | enum dma_data_direction dir) |
125 | { | 101 | { |
126 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); | 102 | struct dma_map_ops *ops = get_dma_ops(hwdev); |
127 | 103 | ||
128 | BUG_ON(!valid_dma_direction(direction)); | 104 | BUG_ON(!valid_dma_direction(dir)); |
105 | debug_dma_unmap_sg(hwdev, sg, nents, dir); | ||
129 | if (ops->unmap_sg) | 106 | if (ops->unmap_sg) |
130 | ops->unmap_sg(hwdev, sg, nents, direction); | 107 | ops->unmap_sg(hwdev, sg, nents, dir, NULL); |
131 | } | 108 | } |
132 | 109 | ||
133 | static inline void | 110 | static inline void |
134 | dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle, | 111 | dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle, |
135 | size_t size, int direction) | 112 | size_t size, enum dma_data_direction dir) |
136 | { | 113 | { |
137 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); | 114 | struct dma_map_ops *ops = get_dma_ops(hwdev); |
138 | 115 | ||
139 | BUG_ON(!valid_dma_direction(direction)); | 116 | BUG_ON(!valid_dma_direction(dir)); |
140 | if (ops->sync_single_for_cpu) | 117 | if (ops->sync_single_for_cpu) |
141 | ops->sync_single_for_cpu(hwdev, dma_handle, size, direction); | 118 | ops->sync_single_for_cpu(hwdev, dma_handle, size, dir); |
119 | debug_dma_sync_single_for_cpu(hwdev, dma_handle, size, dir); | ||
142 | flush_write_buffers(); | 120 | flush_write_buffers(); |
143 | } | 121 | } |
144 | 122 | ||
145 | static inline void | 123 | static inline void |
146 | dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle, | 124 | dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle, |
147 | size_t size, int direction) | 125 | size_t size, enum dma_data_direction dir) |
148 | { | 126 | { |
149 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); | 127 | struct dma_map_ops *ops = get_dma_ops(hwdev); |
150 | 128 | ||
151 | BUG_ON(!valid_dma_direction(direction)); | 129 | BUG_ON(!valid_dma_direction(dir)); |
152 | if (ops->sync_single_for_device) | 130 | if (ops->sync_single_for_device) |
153 | ops->sync_single_for_device(hwdev, dma_handle, size, direction); | 131 | ops->sync_single_for_device(hwdev, dma_handle, size, dir); |
132 | debug_dma_sync_single_for_device(hwdev, dma_handle, size, dir); | ||
154 | flush_write_buffers(); | 133 | flush_write_buffers(); |
155 | } | 134 | } |
156 | 135 | ||
157 | static inline void | 136 | static inline void |
158 | dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle, | 137 | dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle, |
159 | unsigned long offset, size_t size, int direction) | 138 | unsigned long offset, size_t size, |
139 | enum dma_data_direction dir) | ||
160 | { | 140 | { |
161 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); | 141 | struct dma_map_ops *ops = get_dma_ops(hwdev); |
162 | 142 | ||
163 | BUG_ON(!valid_dma_direction(direction)); | 143 | BUG_ON(!valid_dma_direction(dir)); |
164 | if (ops->sync_single_range_for_cpu) | 144 | if (ops->sync_single_range_for_cpu) |
165 | ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, | 145 | ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, |
166 | size, direction); | 146 | size, dir); |
147 | debug_dma_sync_single_range_for_cpu(hwdev, dma_handle, | ||
148 | offset, size, dir); | ||
167 | flush_write_buffers(); | 149 | flush_write_buffers(); |
168 | } | 150 | } |
169 | 151 | ||
170 | static inline void | 152 | static inline void |
171 | dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle, | 153 | dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle, |
172 | unsigned long offset, size_t size, | 154 | unsigned long offset, size_t size, |
173 | int direction) | 155 | enum dma_data_direction dir) |
174 | { | 156 | { |
175 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); | 157 | struct dma_map_ops *ops = get_dma_ops(hwdev); |
176 | 158 | ||
177 | BUG_ON(!valid_dma_direction(direction)); | 159 | BUG_ON(!valid_dma_direction(dir)); |
178 | if (ops->sync_single_range_for_device) | 160 | if (ops->sync_single_range_for_device) |
179 | ops->sync_single_range_for_device(hwdev, dma_handle, | 161 | ops->sync_single_range_for_device(hwdev, dma_handle, |
180 | offset, size, direction); | 162 | offset, size, dir); |
163 | debug_dma_sync_single_range_for_device(hwdev, dma_handle, | ||
164 | offset, size, dir); | ||
181 | flush_write_buffers(); | 165 | flush_write_buffers(); |
182 | } | 166 | } |
183 | 167 | ||
184 | static inline void | 168 | static inline void |
185 | dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, | 169 | dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, |
186 | int nelems, int direction) | 170 | int nelems, enum dma_data_direction dir) |
187 | { | 171 | { |
188 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); | 172 | struct dma_map_ops *ops = get_dma_ops(hwdev); |
189 | 173 | ||
190 | BUG_ON(!valid_dma_direction(direction)); | 174 | BUG_ON(!valid_dma_direction(dir)); |
191 | if (ops->sync_sg_for_cpu) | 175 | if (ops->sync_sg_for_cpu) |
192 | ops->sync_sg_for_cpu(hwdev, sg, nelems, direction); | 176 | ops->sync_sg_for_cpu(hwdev, sg, nelems, dir); |
177 | debug_dma_sync_sg_for_cpu(hwdev, sg, nelems, dir); | ||
193 | flush_write_buffers(); | 178 | flush_write_buffers(); |
194 | } | 179 | } |
195 | 180 | ||
196 | static inline void | 181 | static inline void |
197 | dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, | 182 | dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, |
198 | int nelems, int direction) | 183 | int nelems, enum dma_data_direction dir) |
199 | { | 184 | { |
200 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); | 185 | struct dma_map_ops *ops = get_dma_ops(hwdev); |
201 | 186 | ||
202 | BUG_ON(!valid_dma_direction(direction)); | 187 | BUG_ON(!valid_dma_direction(dir)); |
203 | if (ops->sync_sg_for_device) | 188 | if (ops->sync_sg_for_device) |
204 | ops->sync_sg_for_device(hwdev, sg, nelems, direction); | 189 | ops->sync_sg_for_device(hwdev, sg, nelems, dir); |
190 | debug_dma_sync_sg_for_device(hwdev, sg, nelems, dir); | ||
205 | 191 | ||
206 | flush_write_buffers(); | 192 | flush_write_buffers(); |
207 | } | 193 | } |
208 | 194 | ||
209 | static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, | 195 | static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, |
210 | size_t offset, size_t size, | 196 | size_t offset, size_t size, |
211 | int direction) | 197 | enum dma_data_direction dir) |
212 | { | 198 | { |
213 | struct dma_mapping_ops *ops = get_dma_ops(dev); | 199 | struct dma_map_ops *ops = get_dma_ops(dev); |
200 | dma_addr_t addr; | ||
214 | 201 | ||
215 | BUG_ON(!valid_dma_direction(direction)); | 202 | BUG_ON(!valid_dma_direction(dir)); |
216 | return ops->map_single(dev, page_to_phys(page) + offset, | 203 | addr = ops->map_page(dev, page, offset, size, dir, NULL); |
217 | size, direction); | 204 | debug_dma_map_page(dev, page, offset, size, dir, addr, false); |
205 | |||
206 | return addr; | ||
218 | } | 207 | } |
219 | 208 | ||
220 | static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, | 209 | static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, |
221 | size_t size, int direction) | 210 | size_t size, enum dma_data_direction dir) |
222 | { | 211 | { |
223 | dma_unmap_single(dev, addr, size, direction); | 212 | struct dma_map_ops *ops = get_dma_ops(dev); |
213 | |||
214 | BUG_ON(!valid_dma_direction(dir)); | ||
215 | if (ops->unmap_page) | ||
216 | ops->unmap_page(dev, addr, size, dir, NULL); | ||
217 | debug_dma_unmap_page(dev, addr, size, dir, false); | ||
224 | } | 218 | } |
225 | 219 | ||
226 | static inline void | 220 | static inline void |
@@ -266,7 +260,7 @@ static inline void * | |||
266 | dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, | 260 | dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, |
267 | gfp_t gfp) | 261 | gfp_t gfp) |
268 | { | 262 | { |
269 | struct dma_mapping_ops *ops = get_dma_ops(dev); | 263 | struct dma_map_ops *ops = get_dma_ops(dev); |
270 | void *memory; | 264 | void *memory; |
271 | 265 | ||
272 | gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); | 266 | gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); |
@@ -285,20 +279,24 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, | |||
285 | if (!ops->alloc_coherent) | 279 | if (!ops->alloc_coherent) |
286 | return NULL; | 280 | return NULL; |
287 | 281 | ||
288 | return ops->alloc_coherent(dev, size, dma_handle, | 282 | memory = ops->alloc_coherent(dev, size, dma_handle, |
289 | dma_alloc_coherent_gfp_flags(dev, gfp)); | 283 | dma_alloc_coherent_gfp_flags(dev, gfp)); |
284 | debug_dma_alloc_coherent(dev, size, *dma_handle, memory); | ||
285 | |||
286 | return memory; | ||
290 | } | 287 | } |
291 | 288 | ||
292 | static inline void dma_free_coherent(struct device *dev, size_t size, | 289 | static inline void dma_free_coherent(struct device *dev, size_t size, |
293 | void *vaddr, dma_addr_t bus) | 290 | void *vaddr, dma_addr_t bus) |
294 | { | 291 | { |
295 | struct dma_mapping_ops *ops = get_dma_ops(dev); | 292 | struct dma_map_ops *ops = get_dma_ops(dev); |
296 | 293 | ||
297 | WARN_ON(irqs_disabled()); /* for portability */ | 294 | WARN_ON(irqs_disabled()); /* for portability */ |
298 | 295 | ||
299 | if (dma_release_from_coherent(dev, get_order(size), vaddr)) | 296 | if (dma_release_from_coherent(dev, get_order(size), vaddr)) |
300 | return; | 297 | return; |
301 | 298 | ||
299 | debug_dma_free_coherent(dev, size, vaddr, bus); | ||
302 | if (ops->free_coherent) | 300 | if (ops->free_coherent) |
303 | ops->free_coherent(dev, size, vaddr, bus); | 301 | ops->free_coherent(dev, size, vaddr, bus); |
304 | } | 302 | } |
diff --git a/arch/x86/include/asm/iommu.h b/arch/x86/include/asm/iommu.h index a6ee9e6f530f..af326a2975b5 100644 --- a/arch/x86/include/asm/iommu.h +++ b/arch/x86/include/asm/iommu.h | |||
@@ -3,7 +3,7 @@ | |||
3 | 3 | ||
4 | extern void pci_iommu_shutdown(void); | 4 | extern void pci_iommu_shutdown(void); |
5 | extern void no_iommu_init(void); | 5 | extern void no_iommu_init(void); |
6 | extern struct dma_mapping_ops nommu_dma_ops; | 6 | extern struct dma_map_ops nommu_dma_ops; |
7 | extern int force_iommu, no_iommu; | 7 | extern int force_iommu, no_iommu; |
8 | extern int iommu_detected; | 8 | extern int iommu_detected; |
9 | 9 | ||
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 6e9c1f320acf..c611ad64137f 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile | |||
@@ -105,7 +105,7 @@ obj-$(CONFIG_MICROCODE) += microcode.o | |||
105 | 105 | ||
106 | obj-$(CONFIG_X86_CHECK_BIOS_CORRUPTION) += check.o | 106 | obj-$(CONFIG_X86_CHECK_BIOS_CORRUPTION) += check.o |
107 | 107 | ||
108 | obj-$(CONFIG_SWIOTLB) += pci-swiotlb_64.o # NB rename without _64 | 108 | obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o |
109 | 109 | ||
110 | ### | 110 | ### |
111 | # 64 bit specific files | 111 | # 64 bit specific files |
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index 5113c080f0c4..c5962fe3796f 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c | |||
@@ -22,10 +22,9 @@ | |||
22 | #include <linux/bitops.h> | 22 | #include <linux/bitops.h> |
23 | #include <linux/debugfs.h> | 23 | #include <linux/debugfs.h> |
24 | #include <linux/scatterlist.h> | 24 | #include <linux/scatterlist.h> |
25 | #include <linux/dma-mapping.h> | ||
25 | #include <linux/iommu-helper.h> | 26 | #include <linux/iommu-helper.h> |
26 | #ifdef CONFIG_IOMMU_API | ||
27 | #include <linux/iommu.h> | 27 | #include <linux/iommu.h> |
28 | #endif | ||
29 | #include <asm/proto.h> | 28 | #include <asm/proto.h> |
30 | #include <asm/iommu.h> | 29 | #include <asm/iommu.h> |
31 | #include <asm/gart.h> | 30 | #include <asm/gart.h> |
@@ -1297,8 +1296,10 @@ static void __unmap_single(struct amd_iommu *iommu, | |||
1297 | /* | 1296 | /* |
1298 | * The exported map_single function for dma_ops. | 1297 | * The exported map_single function for dma_ops. |
1299 | */ | 1298 | */ |
1300 | static dma_addr_t map_single(struct device *dev, phys_addr_t paddr, | 1299 | static dma_addr_t map_page(struct device *dev, struct page *page, |
1301 | size_t size, int dir) | 1300 | unsigned long offset, size_t size, |
1301 | enum dma_data_direction dir, | ||
1302 | struct dma_attrs *attrs) | ||
1302 | { | 1303 | { |
1303 | unsigned long flags; | 1304 | unsigned long flags; |
1304 | struct amd_iommu *iommu; | 1305 | struct amd_iommu *iommu; |
@@ -1306,6 +1307,7 @@ static dma_addr_t map_single(struct device *dev, phys_addr_t paddr, | |||
1306 | u16 devid; | 1307 | u16 devid; |
1307 | dma_addr_t addr; | 1308 | dma_addr_t addr; |
1308 | u64 dma_mask; | 1309 | u64 dma_mask; |
1310 | phys_addr_t paddr = page_to_phys(page) + offset; | ||
1309 | 1311 | ||
1310 | INC_STATS_COUNTER(cnt_map_single); | 1312 | INC_STATS_COUNTER(cnt_map_single); |
1311 | 1313 | ||
@@ -1340,8 +1342,8 @@ out: | |||
1340 | /* | 1342 | /* |
1341 | * The exported unmap_single function for dma_ops. | 1343 | * The exported unmap_single function for dma_ops. |
1342 | */ | 1344 | */ |
1343 | static void unmap_single(struct device *dev, dma_addr_t dma_addr, | 1345 | static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, |
1344 | size_t size, int dir) | 1346 | enum dma_data_direction dir, struct dma_attrs *attrs) |
1345 | { | 1347 | { |
1346 | unsigned long flags; | 1348 | unsigned long flags; |
1347 | struct amd_iommu *iommu; | 1349 | struct amd_iommu *iommu; |
@@ -1390,7 +1392,8 @@ static int map_sg_no_iommu(struct device *dev, struct scatterlist *sglist, | |||
1390 | * lists). | 1392 | * lists). |
1391 | */ | 1393 | */ |
1392 | static int map_sg(struct device *dev, struct scatterlist *sglist, | 1394 | static int map_sg(struct device *dev, struct scatterlist *sglist, |
1393 | int nelems, int dir) | 1395 | int nelems, enum dma_data_direction dir, |
1396 | struct dma_attrs *attrs) | ||
1394 | { | 1397 | { |
1395 | unsigned long flags; | 1398 | unsigned long flags; |
1396 | struct amd_iommu *iommu; | 1399 | struct amd_iommu *iommu; |
@@ -1457,7 +1460,8 @@ unmap: | |||
1457 | * lists). | 1460 | * lists). |
1458 | */ | 1461 | */ |
1459 | static void unmap_sg(struct device *dev, struct scatterlist *sglist, | 1462 | static void unmap_sg(struct device *dev, struct scatterlist *sglist, |
1460 | int nelems, int dir) | 1463 | int nelems, enum dma_data_direction dir, |
1464 | struct dma_attrs *attrs) | ||
1461 | { | 1465 | { |
1462 | unsigned long flags; | 1466 | unsigned long flags; |
1463 | struct amd_iommu *iommu; | 1467 | struct amd_iommu *iommu; |
@@ -1644,11 +1648,11 @@ static void prealloc_protection_domains(void) | |||
1644 | } | 1648 | } |
1645 | } | 1649 | } |
1646 | 1650 | ||
1647 | static struct dma_mapping_ops amd_iommu_dma_ops = { | 1651 | static struct dma_map_ops amd_iommu_dma_ops = { |
1648 | .alloc_coherent = alloc_coherent, | 1652 | .alloc_coherent = alloc_coherent, |
1649 | .free_coherent = free_coherent, | 1653 | .free_coherent = free_coherent, |
1650 | .map_single = map_single, | 1654 | .map_page = map_page, |
1651 | .unmap_single = unmap_single, | 1655 | .unmap_page = unmap_page, |
1652 | .map_sg = map_sg, | 1656 | .map_sg = map_sg, |
1653 | .unmap_sg = unmap_sg, | 1657 | .unmap_sg = unmap_sg, |
1654 | .dma_supported = amd_iommu_dma_supported, | 1658 | .dma_supported = amd_iommu_dma_supported, |
diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c index d28bbdc35e4e..755c21e906f3 100644 --- a/arch/x86/kernel/pci-calgary_64.c +++ b/arch/x86/kernel/pci-calgary_64.c | |||
@@ -380,8 +380,9 @@ static inline struct iommu_table *find_iommu_table(struct device *dev) | |||
380 | return tbl; | 380 | return tbl; |
381 | } | 381 | } |
382 | 382 | ||
383 | static void calgary_unmap_sg(struct device *dev, | 383 | static void calgary_unmap_sg(struct device *dev, struct scatterlist *sglist, |
384 | struct scatterlist *sglist, int nelems, int direction) | 384 | int nelems,enum dma_data_direction dir, |
385 | struct dma_attrs *attrs) | ||
385 | { | 386 | { |
386 | struct iommu_table *tbl = find_iommu_table(dev); | 387 | struct iommu_table *tbl = find_iommu_table(dev); |
387 | struct scatterlist *s; | 388 | struct scatterlist *s; |
@@ -404,7 +405,8 @@ static void calgary_unmap_sg(struct device *dev, | |||
404 | } | 405 | } |
405 | 406 | ||
406 | static int calgary_map_sg(struct device *dev, struct scatterlist *sg, | 407 | static int calgary_map_sg(struct device *dev, struct scatterlist *sg, |
407 | int nelems, int direction) | 408 | int nelems, enum dma_data_direction dir, |
409 | struct dma_attrs *attrs) | ||
408 | { | 410 | { |
409 | struct iommu_table *tbl = find_iommu_table(dev); | 411 | struct iommu_table *tbl = find_iommu_table(dev); |
410 | struct scatterlist *s; | 412 | struct scatterlist *s; |
@@ -429,15 +431,14 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg, | |||
429 | s->dma_address = (entry << PAGE_SHIFT) | s->offset; | 431 | s->dma_address = (entry << PAGE_SHIFT) | s->offset; |
430 | 432 | ||
431 | /* insert into HW table */ | 433 | /* insert into HW table */ |
432 | tce_build(tbl, entry, npages, vaddr & PAGE_MASK, | 434 | tce_build(tbl, entry, npages, vaddr & PAGE_MASK, dir); |
433 | direction); | ||
434 | 435 | ||
435 | s->dma_length = s->length; | 436 | s->dma_length = s->length; |
436 | } | 437 | } |
437 | 438 | ||
438 | return nelems; | 439 | return nelems; |
439 | error: | 440 | error: |
440 | calgary_unmap_sg(dev, sg, nelems, direction); | 441 | calgary_unmap_sg(dev, sg, nelems, dir, NULL); |
441 | for_each_sg(sg, s, nelems, i) { | 442 | for_each_sg(sg, s, nelems, i) { |
442 | sg->dma_address = bad_dma_address; | 443 | sg->dma_address = bad_dma_address; |
443 | sg->dma_length = 0; | 444 | sg->dma_length = 0; |
@@ -445,10 +446,12 @@ error: | |||
445 | return 0; | 446 | return 0; |
446 | } | 447 | } |
447 | 448 | ||
448 | static dma_addr_t calgary_map_single(struct device *dev, phys_addr_t paddr, | 449 | static dma_addr_t calgary_map_page(struct device *dev, struct page *page, |
449 | size_t size, int direction) | 450 | unsigned long offset, size_t size, |
451 | enum dma_data_direction dir, | ||
452 | struct dma_attrs *attrs) | ||
450 | { | 453 | { |
451 | void *vaddr = phys_to_virt(paddr); | 454 | void *vaddr = page_address(page) + offset; |
452 | unsigned long uaddr; | 455 | unsigned long uaddr; |
453 | unsigned int npages; | 456 | unsigned int npages; |
454 | struct iommu_table *tbl = find_iommu_table(dev); | 457 | struct iommu_table *tbl = find_iommu_table(dev); |
@@ -456,17 +459,18 @@ static dma_addr_t calgary_map_single(struct device *dev, phys_addr_t paddr, | |||
456 | uaddr = (unsigned long)vaddr; | 459 | uaddr = (unsigned long)vaddr; |
457 | npages = iommu_num_pages(uaddr, size, PAGE_SIZE); | 460 | npages = iommu_num_pages(uaddr, size, PAGE_SIZE); |
458 | 461 | ||
459 | return iommu_alloc(dev, tbl, vaddr, npages, direction); | 462 | return iommu_alloc(dev, tbl, vaddr, npages, dir); |
460 | } | 463 | } |
461 | 464 | ||
462 | static void calgary_unmap_single(struct device *dev, dma_addr_t dma_handle, | 465 | static void calgary_unmap_page(struct device *dev, dma_addr_t dma_addr, |
463 | size_t size, int direction) | 466 | size_t size, enum dma_data_direction dir, |
467 | struct dma_attrs *attrs) | ||
464 | { | 468 | { |
465 | struct iommu_table *tbl = find_iommu_table(dev); | 469 | struct iommu_table *tbl = find_iommu_table(dev); |
466 | unsigned int npages; | 470 | unsigned int npages; |
467 | 471 | ||
468 | npages = iommu_num_pages(dma_handle, size, PAGE_SIZE); | 472 | npages = iommu_num_pages(dma_addr, size, PAGE_SIZE); |
469 | iommu_free(tbl, dma_handle, npages); | 473 | iommu_free(tbl, dma_addr, npages); |
470 | } | 474 | } |
471 | 475 | ||
472 | static void* calgary_alloc_coherent(struct device *dev, size_t size, | 476 | static void* calgary_alloc_coherent(struct device *dev, size_t size, |
@@ -515,13 +519,13 @@ static void calgary_free_coherent(struct device *dev, size_t size, | |||
515 | free_pages((unsigned long)vaddr, get_order(size)); | 519 | free_pages((unsigned long)vaddr, get_order(size)); |
516 | } | 520 | } |
517 | 521 | ||
518 | static struct dma_mapping_ops calgary_dma_ops = { | 522 | static struct dma_map_ops calgary_dma_ops = { |
519 | .alloc_coherent = calgary_alloc_coherent, | 523 | .alloc_coherent = calgary_alloc_coherent, |
520 | .free_coherent = calgary_free_coherent, | 524 | .free_coherent = calgary_free_coherent, |
521 | .map_single = calgary_map_single, | ||
522 | .unmap_single = calgary_unmap_single, | ||
523 | .map_sg = calgary_map_sg, | 525 | .map_sg = calgary_map_sg, |
524 | .unmap_sg = calgary_unmap_sg, | 526 | .unmap_sg = calgary_unmap_sg, |
527 | .map_page = calgary_map_page, | ||
528 | .unmap_page = calgary_unmap_page, | ||
525 | }; | 529 | }; |
526 | 530 | ||
527 | static inline void __iomem * busno_to_bbar(unsigned char num) | 531 | static inline void __iomem * busno_to_bbar(unsigned char num) |
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index b25428533141..c7c4776ff630 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c | |||
@@ -1,4 +1,5 @@ | |||
1 | #include <linux/dma-mapping.h> | 1 | #include <linux/dma-mapping.h> |
2 | #include <linux/dma-debug.h> | ||
2 | #include <linux/dmar.h> | 3 | #include <linux/dmar.h> |
3 | #include <linux/bootmem.h> | 4 | #include <linux/bootmem.h> |
4 | #include <linux/pci.h> | 5 | #include <linux/pci.h> |
@@ -12,7 +13,7 @@ | |||
12 | 13 | ||
13 | static int forbid_dac __read_mostly; | 14 | static int forbid_dac __read_mostly; |
14 | 15 | ||
15 | struct dma_mapping_ops *dma_ops; | 16 | struct dma_map_ops *dma_ops; |
16 | EXPORT_SYMBOL(dma_ops); | 17 | EXPORT_SYMBOL(dma_ops); |
17 | 18 | ||
18 | static int iommu_sac_force __read_mostly; | 19 | static int iommu_sac_force __read_mostly; |
@@ -44,6 +45,9 @@ struct device x86_dma_fallback_dev = { | |||
44 | }; | 45 | }; |
45 | EXPORT_SYMBOL(x86_dma_fallback_dev); | 46 | EXPORT_SYMBOL(x86_dma_fallback_dev); |
46 | 47 | ||
48 | /* Number of entries preallocated for DMA-API debugging */ | ||
49 | #define PREALLOC_DMA_DEBUG_ENTRIES 32768 | ||
50 | |||
47 | int dma_set_mask(struct device *dev, u64 mask) | 51 | int dma_set_mask(struct device *dev, u64 mask) |
48 | { | 52 | { |
49 | if (!dev->dma_mask || !dma_supported(dev, mask)) | 53 | if (!dev->dma_mask || !dma_supported(dev, mask)) |
@@ -224,7 +228,7 @@ early_param("iommu", iommu_setup); | |||
224 | 228 | ||
225 | int dma_supported(struct device *dev, u64 mask) | 229 | int dma_supported(struct device *dev, u64 mask) |
226 | { | 230 | { |
227 | struct dma_mapping_ops *ops = get_dma_ops(dev); | 231 | struct dma_map_ops *ops = get_dma_ops(dev); |
228 | 232 | ||
229 | #ifdef CONFIG_PCI | 233 | #ifdef CONFIG_PCI |
230 | if (mask > 0xffffffff && forbid_dac > 0) { | 234 | if (mask > 0xffffffff && forbid_dac > 0) { |
@@ -265,6 +269,12 @@ EXPORT_SYMBOL(dma_supported); | |||
265 | 269 | ||
266 | static int __init pci_iommu_init(void) | 270 | static int __init pci_iommu_init(void) |
267 | { | 271 | { |
272 | dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); | ||
273 | |||
274 | #ifdef CONFIG_PCI | ||
275 | dma_debug_add_bus(&pci_bus_type); | ||
276 | #endif | ||
277 | |||
268 | calgary_iommu_init(); | 278 | calgary_iommu_init(); |
269 | 279 | ||
270 | intel_iommu_init(); | 280 | intel_iommu_init(); |
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c index d5768b1af080..b284b58c035c 100644 --- a/arch/x86/kernel/pci-gart_64.c +++ b/arch/x86/kernel/pci-gart_64.c | |||
@@ -255,10 +255,13 @@ static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem, | |||
255 | } | 255 | } |
256 | 256 | ||
257 | /* Map a single area into the IOMMU */ | 257 | /* Map a single area into the IOMMU */ |
258 | static dma_addr_t | 258 | static dma_addr_t gart_map_page(struct device *dev, struct page *page, |
259 | gart_map_single(struct device *dev, phys_addr_t paddr, size_t size, int dir) | 259 | unsigned long offset, size_t size, |
260 | enum dma_data_direction dir, | ||
261 | struct dma_attrs *attrs) | ||
260 | { | 262 | { |
261 | unsigned long bus; | 263 | unsigned long bus; |
264 | phys_addr_t paddr = page_to_phys(page) + offset; | ||
262 | 265 | ||
263 | if (!dev) | 266 | if (!dev) |
264 | dev = &x86_dma_fallback_dev; | 267 | dev = &x86_dma_fallback_dev; |
@@ -275,8 +278,9 @@ gart_map_single(struct device *dev, phys_addr_t paddr, size_t size, int dir) | |||
275 | /* | 278 | /* |
276 | * Free a DMA mapping. | 279 | * Free a DMA mapping. |
277 | */ | 280 | */ |
278 | static void gart_unmap_single(struct device *dev, dma_addr_t dma_addr, | 281 | static void gart_unmap_page(struct device *dev, dma_addr_t dma_addr, |
279 | size_t size, int direction) | 282 | size_t size, enum dma_data_direction dir, |
283 | struct dma_attrs *attrs) | ||
280 | { | 284 | { |
281 | unsigned long iommu_page; | 285 | unsigned long iommu_page; |
282 | int npages; | 286 | int npages; |
@@ -298,8 +302,8 @@ static void gart_unmap_single(struct device *dev, dma_addr_t dma_addr, | |||
298 | /* | 302 | /* |
299 | * Wrapper for pci_unmap_single working with scatterlists. | 303 | * Wrapper for pci_unmap_single working with scatterlists. |
300 | */ | 304 | */ |
301 | static void | 305 | static void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, |
302 | gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir) | 306 | enum dma_data_direction dir, struct dma_attrs *attrs) |
303 | { | 307 | { |
304 | struct scatterlist *s; | 308 | struct scatterlist *s; |
305 | int i; | 309 | int i; |
@@ -307,7 +311,7 @@ gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir) | |||
307 | for_each_sg(sg, s, nents, i) { | 311 | for_each_sg(sg, s, nents, i) { |
308 | if (!s->dma_length || !s->length) | 312 | if (!s->dma_length || !s->length) |
309 | break; | 313 | break; |
310 | gart_unmap_single(dev, s->dma_address, s->dma_length, dir); | 314 | gart_unmap_page(dev, s->dma_address, s->dma_length, dir, NULL); |
311 | } | 315 | } |
312 | } | 316 | } |
313 | 317 | ||
@@ -329,7 +333,7 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg, | |||
329 | addr = dma_map_area(dev, addr, s->length, dir, 0); | 333 | addr = dma_map_area(dev, addr, s->length, dir, 0); |
330 | if (addr == bad_dma_address) { | 334 | if (addr == bad_dma_address) { |
331 | if (i > 0) | 335 | if (i > 0) |
332 | gart_unmap_sg(dev, sg, i, dir); | 336 | gart_unmap_sg(dev, sg, i, dir, NULL); |
333 | nents = 0; | 337 | nents = 0; |
334 | sg[0].dma_length = 0; | 338 | sg[0].dma_length = 0; |
335 | break; | 339 | break; |
@@ -400,8 +404,8 @@ dma_map_cont(struct device *dev, struct scatterlist *start, int nelems, | |||
400 | * DMA map all entries in a scatterlist. | 404 | * DMA map all entries in a scatterlist. |
401 | * Merge chunks that have page aligned sizes into a continuous mapping. | 405 | * Merge chunks that have page aligned sizes into a continuous mapping. |
402 | */ | 406 | */ |
403 | static int | 407 | static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, |
404 | gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir) | 408 | enum dma_data_direction dir, struct dma_attrs *attrs) |
405 | { | 409 | { |
406 | struct scatterlist *s, *ps, *start_sg, *sgmap; | 410 | struct scatterlist *s, *ps, *start_sg, *sgmap; |
407 | int need = 0, nextneed, i, out, start; | 411 | int need = 0, nextneed, i, out, start; |
@@ -468,7 +472,7 @@ gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir) | |||
468 | 472 | ||
469 | error: | 473 | error: |
470 | flush_gart(); | 474 | flush_gart(); |
471 | gart_unmap_sg(dev, sg, out, dir); | 475 | gart_unmap_sg(dev, sg, out, dir, NULL); |
472 | 476 | ||
473 | /* When it was forced or merged try again in a dumb way */ | 477 | /* When it was forced or merged try again in a dumb way */ |
474 | if (force_iommu || iommu_merge) { | 478 | if (force_iommu || iommu_merge) { |
@@ -521,7 +525,7 @@ static void | |||
521 | gart_free_coherent(struct device *dev, size_t size, void *vaddr, | 525 | gart_free_coherent(struct device *dev, size_t size, void *vaddr, |
522 | dma_addr_t dma_addr) | 526 | dma_addr_t dma_addr) |
523 | { | 527 | { |
524 | gart_unmap_single(dev, dma_addr, size, DMA_BIDIRECTIONAL); | 528 | gart_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL, NULL); |
525 | free_pages((unsigned long)vaddr, get_order(size)); | 529 | free_pages((unsigned long)vaddr, get_order(size)); |
526 | } | 530 | } |
527 | 531 | ||
@@ -707,11 +711,11 @@ static __init int init_k8_gatt(struct agp_kern_info *info) | |||
707 | return -1; | 711 | return -1; |
708 | } | 712 | } |
709 | 713 | ||
710 | static struct dma_mapping_ops gart_dma_ops = { | 714 | static struct dma_map_ops gart_dma_ops = { |
711 | .map_single = gart_map_single, | ||
712 | .unmap_single = gart_unmap_single, | ||
713 | .map_sg = gart_map_sg, | 715 | .map_sg = gart_map_sg, |
714 | .unmap_sg = gart_unmap_sg, | 716 | .unmap_sg = gart_unmap_sg, |
717 | .map_page = gart_map_page, | ||
718 | .unmap_page = gart_unmap_page, | ||
715 | .alloc_coherent = gart_alloc_coherent, | 719 | .alloc_coherent = gart_alloc_coherent, |
716 | .free_coherent = gart_free_coherent, | 720 | .free_coherent = gart_free_coherent, |
717 | }; | 721 | }; |
diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c index 8b02a3936d42..c6d703b39326 100644 --- a/arch/x86/kernel/pci-nommu.c +++ b/arch/x86/kernel/pci-nommu.c | |||
@@ -25,19 +25,19 @@ check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size) | |||
25 | return 1; | 25 | return 1; |
26 | } | 26 | } |
27 | 27 | ||
28 | static dma_addr_t | 28 | static dma_addr_t nommu_map_page(struct device *dev, struct page *page, |
29 | nommu_map_single(struct device *hwdev, phys_addr_t paddr, size_t size, | 29 | unsigned long offset, size_t size, |
30 | int direction) | 30 | enum dma_data_direction dir, |
31 | struct dma_attrs *attrs) | ||
31 | { | 32 | { |
32 | dma_addr_t bus = paddr; | 33 | dma_addr_t bus = page_to_phys(page) + offset; |
33 | WARN_ON(size == 0); | 34 | WARN_ON(size == 0); |
34 | if (!check_addr("map_single", hwdev, bus, size)) | 35 | if (!check_addr("map_single", dev, bus, size)) |
35 | return bad_dma_address; | 36 | return bad_dma_address; |
36 | flush_write_buffers(); | 37 | flush_write_buffers(); |
37 | return bus; | 38 | return bus; |
38 | } | 39 | } |
39 | 40 | ||
40 | |||
41 | /* Map a set of buffers described by scatterlist in streaming | 41 | /* Map a set of buffers described by scatterlist in streaming |
42 | * mode for DMA. This is the scatter-gather version of the | 42 | * mode for DMA. This is the scatter-gather version of the |
43 | * above pci_map_single interface. Here the scatter gather list | 43 | * above pci_map_single interface. Here the scatter gather list |
@@ -54,7 +54,8 @@ nommu_map_single(struct device *hwdev, phys_addr_t paddr, size_t size, | |||
54 | * the same here. | 54 | * the same here. |
55 | */ | 55 | */ |
56 | static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg, | 56 | static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg, |
57 | int nents, int direction) | 57 | int nents, enum dma_data_direction dir, |
58 | struct dma_attrs *attrs) | ||
58 | { | 59 | { |
59 | struct scatterlist *s; | 60 | struct scatterlist *s; |
60 | int i; | 61 | int i; |
@@ -78,11 +79,11 @@ static void nommu_free_coherent(struct device *dev, size_t size, void *vaddr, | |||
78 | free_pages((unsigned long)vaddr, get_order(size)); | 79 | free_pages((unsigned long)vaddr, get_order(size)); |
79 | } | 80 | } |
80 | 81 | ||
81 | struct dma_mapping_ops nommu_dma_ops = { | 82 | struct dma_map_ops nommu_dma_ops = { |
82 | .alloc_coherent = dma_generic_alloc_coherent, | 83 | .alloc_coherent = dma_generic_alloc_coherent, |
83 | .free_coherent = nommu_free_coherent, | 84 | .free_coherent = nommu_free_coherent, |
84 | .map_single = nommu_map_single, | ||
85 | .map_sg = nommu_map_sg, | 85 | .map_sg = nommu_map_sg, |
86 | .map_page = nommu_map_page, | ||
86 | .is_phys = 1, | 87 | .is_phys = 1, |
87 | }; | 88 | }; |
88 | 89 | ||
diff --git a/arch/x86/kernel/pci-swiotlb_64.c b/arch/x86/kernel/pci-swiotlb.c index d59c91747665..34f12e9996ed 100644 --- a/arch/x86/kernel/pci-swiotlb_64.c +++ b/arch/x86/kernel/pci-swiotlb.c | |||
@@ -33,18 +33,11 @@ phys_addr_t swiotlb_bus_to_phys(dma_addr_t baddr) | |||
33 | return baddr; | 33 | return baddr; |
34 | } | 34 | } |
35 | 35 | ||
36 | int __weak swiotlb_arch_range_needs_mapping(void *ptr, size_t size) | 36 | int __weak swiotlb_arch_range_needs_mapping(phys_addr_t paddr, size_t size) |
37 | { | 37 | { |
38 | return 0; | 38 | return 0; |
39 | } | 39 | } |
40 | 40 | ||
41 | static dma_addr_t | ||
42 | swiotlb_map_single_phys(struct device *hwdev, phys_addr_t paddr, size_t size, | ||
43 | int direction) | ||
44 | { | ||
45 | return swiotlb_map_single(hwdev, phys_to_virt(paddr), size, direction); | ||
46 | } | ||
47 | |||
48 | static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size, | 41 | static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size, |
49 | dma_addr_t *dma_handle, gfp_t flags) | 42 | dma_addr_t *dma_handle, gfp_t flags) |
50 | { | 43 | { |
@@ -57,20 +50,20 @@ static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size, | |||
57 | return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags); | 50 | return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags); |
58 | } | 51 | } |
59 | 52 | ||
60 | struct dma_mapping_ops swiotlb_dma_ops = { | 53 | struct dma_map_ops swiotlb_dma_ops = { |
61 | .mapping_error = swiotlb_dma_mapping_error, | 54 | .mapping_error = swiotlb_dma_mapping_error, |
62 | .alloc_coherent = x86_swiotlb_alloc_coherent, | 55 | .alloc_coherent = x86_swiotlb_alloc_coherent, |
63 | .free_coherent = swiotlb_free_coherent, | 56 | .free_coherent = swiotlb_free_coherent, |
64 | .map_single = swiotlb_map_single_phys, | ||
65 | .unmap_single = swiotlb_unmap_single, | ||
66 | .sync_single_for_cpu = swiotlb_sync_single_for_cpu, | 57 | .sync_single_for_cpu = swiotlb_sync_single_for_cpu, |
67 | .sync_single_for_device = swiotlb_sync_single_for_device, | 58 | .sync_single_for_device = swiotlb_sync_single_for_device, |
68 | .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu, | 59 | .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu, |
69 | .sync_single_range_for_device = swiotlb_sync_single_range_for_device, | 60 | .sync_single_range_for_device = swiotlb_sync_single_range_for_device, |
70 | .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, | 61 | .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, |
71 | .sync_sg_for_device = swiotlb_sync_sg_for_device, | 62 | .sync_sg_for_device = swiotlb_sync_sg_for_device, |
72 | .map_sg = swiotlb_map_sg, | 63 | .map_sg = swiotlb_map_sg_attrs, |
73 | .unmap_sg = swiotlb_unmap_sg, | 64 | .unmap_sg = swiotlb_unmap_sg_attrs, |
65 | .map_page = swiotlb_map_page, | ||
66 | .unmap_page = swiotlb_unmap_page, | ||
74 | .dma_supported = NULL, | 67 | .dma_supported = NULL, |
75 | }; | 68 | }; |
76 | 69 | ||