diff options
Diffstat (limited to 'arch/x86/include')
-rw-r--r-- | arch/x86/include/asm/device.h | 2 | ||||
-rw-r--r-- | arch/x86/include/asm/dma-mapping.h | 146 | ||||
-rw-r--r-- | arch/x86/include/asm/iommu.h | 2 |
3 files changed, 55 insertions, 95 deletions
diff --git a/arch/x86/include/asm/device.h b/arch/x86/include/asm/device.h index 3c034f48fdb0..4994a20acbcb 100644 --- a/arch/x86/include/asm/device.h +++ b/arch/x86/include/asm/device.h | |||
@@ -6,7 +6,7 @@ struct dev_archdata { | |||
6 | void *acpi_handle; | 6 | void *acpi_handle; |
7 | #endif | 7 | #endif |
8 | #ifdef CONFIG_X86_64 | 8 | #ifdef CONFIG_X86_64 |
9 | struct dma_mapping_ops *dma_ops; | 9 | struct dma_map_ops *dma_ops; |
10 | #endif | 10 | #endif |
11 | #ifdef CONFIG_DMAR | 11 | #ifdef CONFIG_DMAR |
12 | void *iommu; /* hook for IOMMU specific extension */ | 12 | void *iommu; /* hook for IOMMU specific extension */ |
diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h index b81f82268a16..5a347805a6c7 100644 --- a/arch/x86/include/asm/dma-mapping.h +++ b/arch/x86/include/asm/dma-mapping.h | |||
@@ -17,50 +17,9 @@ extern int iommu_merge; | |||
17 | extern struct device x86_dma_fallback_dev; | 17 | extern struct device x86_dma_fallback_dev; |
18 | extern int panic_on_overflow; | 18 | extern int panic_on_overflow; |
19 | 19 | ||
20 | struct dma_mapping_ops { | 20 | extern struct dma_map_ops *dma_ops; |
21 | int (*mapping_error)(struct device *dev, | 21 | |
22 | dma_addr_t dma_addr); | 22 | static inline struct dma_map_ops *get_dma_ops(struct device *dev) |
23 | void* (*alloc_coherent)(struct device *dev, size_t size, | ||
24 | dma_addr_t *dma_handle, gfp_t gfp); | ||
25 | void (*free_coherent)(struct device *dev, size_t size, | ||
26 | void *vaddr, dma_addr_t dma_handle); | ||
27 | void (*sync_single_for_cpu)(struct device *hwdev, | ||
28 | dma_addr_t dma_handle, size_t size, | ||
29 | int direction); | ||
30 | void (*sync_single_for_device)(struct device *hwdev, | ||
31 | dma_addr_t dma_handle, size_t size, | ||
32 | int direction); | ||
33 | void (*sync_single_range_for_cpu)(struct device *hwdev, | ||
34 | dma_addr_t dma_handle, unsigned long offset, | ||
35 | size_t size, int direction); | ||
36 | void (*sync_single_range_for_device)(struct device *hwdev, | ||
37 | dma_addr_t dma_handle, unsigned long offset, | ||
38 | size_t size, int direction); | ||
39 | void (*sync_sg_for_cpu)(struct device *hwdev, | ||
40 | struct scatterlist *sg, int nelems, | ||
41 | int direction); | ||
42 | void (*sync_sg_for_device)(struct device *hwdev, | ||
43 | struct scatterlist *sg, int nelems, | ||
44 | int direction); | ||
45 | int (*map_sg)(struct device *hwdev, struct scatterlist *sg, | ||
46 | int nents, int direction); | ||
47 | void (*unmap_sg)(struct device *hwdev, | ||
48 | struct scatterlist *sg, int nents, | ||
49 | int direction); | ||
50 | dma_addr_t (*map_page)(struct device *dev, struct page *page, | ||
51 | unsigned long offset, size_t size, | ||
52 | enum dma_data_direction dir, | ||
53 | struct dma_attrs *attrs); | ||
54 | void (*unmap_page)(struct device *dev, dma_addr_t dma_handle, | ||
55 | size_t size, enum dma_data_direction dir, | ||
56 | struct dma_attrs *attrs); | ||
57 | int (*dma_supported)(struct device *hwdev, u64 mask); | ||
58 | int is_phys; | ||
59 | }; | ||
60 | |||
61 | extern struct dma_mapping_ops *dma_ops; | ||
62 | |||
63 | static inline struct dma_mapping_ops *get_dma_ops(struct device *dev) | ||
64 | { | 23 | { |
65 | #ifdef CONFIG_X86_32 | 24 | #ifdef CONFIG_X86_32 |
66 | return dma_ops; | 25 | return dma_ops; |
@@ -75,7 +34,7 @@ static inline struct dma_mapping_ops *get_dma_ops(struct device *dev) | |||
75 | /* Make sure we keep the same behaviour */ | 34 | /* Make sure we keep the same behaviour */ |
76 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | 35 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
77 | { | 36 | { |
78 | struct dma_mapping_ops *ops = get_dma_ops(dev); | 37 | struct dma_map_ops *ops = get_dma_ops(dev); |
79 | if (ops->mapping_error) | 38 | if (ops->mapping_error) |
80 | return ops->mapping_error(dev, dma_addr); | 39 | return ops->mapping_error(dev, dma_addr); |
81 | 40 | ||
@@ -94,138 +53,139 @@ extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, | |||
94 | 53 | ||
95 | static inline dma_addr_t | 54 | static inline dma_addr_t |
96 | dma_map_single(struct device *hwdev, void *ptr, size_t size, | 55 | dma_map_single(struct device *hwdev, void *ptr, size_t size, |
97 | int direction) | 56 | enum dma_data_direction dir) |
98 | { | 57 | { |
99 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); | 58 | struct dma_map_ops *ops = get_dma_ops(hwdev); |
100 | 59 | ||
101 | BUG_ON(!valid_dma_direction(direction)); | 60 | BUG_ON(!valid_dma_direction(dir)); |
102 | return ops->map_page(hwdev, virt_to_page(ptr), | 61 | return ops->map_page(hwdev, virt_to_page(ptr), |
103 | (unsigned long)ptr & ~PAGE_MASK, size, | 62 | (unsigned long)ptr & ~PAGE_MASK, size, |
104 | direction, NULL); | 63 | dir, NULL); |
105 | } | 64 | } |
106 | 65 | ||
107 | static inline void | 66 | static inline void |
108 | dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size, | 67 | dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size, |
109 | int direction) | 68 | enum dma_data_direction dir) |
110 | { | 69 | { |
111 | struct dma_mapping_ops *ops = get_dma_ops(dev); | 70 | struct dma_map_ops *ops = get_dma_ops(dev); |
112 | 71 | ||
113 | BUG_ON(!valid_dma_direction(direction)); | 72 | BUG_ON(!valid_dma_direction(dir)); |
114 | if (ops->unmap_page) | 73 | if (ops->unmap_page) |
115 | ops->unmap_page(dev, addr, size, direction, NULL); | 74 | ops->unmap_page(dev, addr, size, dir, NULL); |
116 | } | 75 | } |
117 | 76 | ||
118 | static inline int | 77 | static inline int |
119 | dma_map_sg(struct device *hwdev, struct scatterlist *sg, | 78 | dma_map_sg(struct device *hwdev, struct scatterlist *sg, |
120 | int nents, int direction) | 79 | int nents, enum dma_data_direction dir) |
121 | { | 80 | { |
122 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); | 81 | struct dma_map_ops *ops = get_dma_ops(hwdev); |
123 | 82 | ||
124 | BUG_ON(!valid_dma_direction(direction)); | 83 | BUG_ON(!valid_dma_direction(dir)); |
125 | return ops->map_sg(hwdev, sg, nents, direction); | 84 | return ops->map_sg(hwdev, sg, nents, dir, NULL); |
126 | } | 85 | } |
127 | 86 | ||
128 | static inline void | 87 | static inline void |
129 | dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, | 88 | dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, |
130 | int direction) | 89 | enum dma_data_direction dir) |
131 | { | 90 | { |
132 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); | 91 | struct dma_map_ops *ops = get_dma_ops(hwdev); |
133 | 92 | ||
134 | BUG_ON(!valid_dma_direction(direction)); | 93 | BUG_ON(!valid_dma_direction(dir)); |
135 | if (ops->unmap_sg) | 94 | if (ops->unmap_sg) |
136 | ops->unmap_sg(hwdev, sg, nents, direction); | 95 | ops->unmap_sg(hwdev, sg, nents, dir, NULL); |
137 | } | 96 | } |
138 | 97 | ||
139 | static inline void | 98 | static inline void |
140 | dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle, | 99 | dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle, |
141 | size_t size, int direction) | 100 | size_t size, enum dma_data_direction dir) |
142 | { | 101 | { |
143 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); | 102 | struct dma_map_ops *ops = get_dma_ops(hwdev); |
144 | 103 | ||
145 | BUG_ON(!valid_dma_direction(direction)); | 104 | BUG_ON(!valid_dma_direction(dir)); |
146 | if (ops->sync_single_for_cpu) | 105 | if (ops->sync_single_for_cpu) |
147 | ops->sync_single_for_cpu(hwdev, dma_handle, size, direction); | 106 | ops->sync_single_for_cpu(hwdev, dma_handle, size, dir); |
148 | flush_write_buffers(); | 107 | flush_write_buffers(); |
149 | } | 108 | } |
150 | 109 | ||
151 | static inline void | 110 | static inline void |
152 | dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle, | 111 | dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle, |
153 | size_t size, int direction) | 112 | size_t size, enum dma_data_direction dir) |
154 | { | 113 | { |
155 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); | 114 | struct dma_map_ops *ops = get_dma_ops(hwdev); |
156 | 115 | ||
157 | BUG_ON(!valid_dma_direction(direction)); | 116 | BUG_ON(!valid_dma_direction(dir)); |
158 | if (ops->sync_single_for_device) | 117 | if (ops->sync_single_for_device) |
159 | ops->sync_single_for_device(hwdev, dma_handle, size, direction); | 118 | ops->sync_single_for_device(hwdev, dma_handle, size, dir); |
160 | flush_write_buffers(); | 119 | flush_write_buffers(); |
161 | } | 120 | } |
162 | 121 | ||
163 | static inline void | 122 | static inline void |
164 | dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle, | 123 | dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle, |
165 | unsigned long offset, size_t size, int direction) | 124 | unsigned long offset, size_t size, |
125 | enum dma_data_direction dir) | ||
166 | { | 126 | { |
167 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); | 127 | struct dma_map_ops *ops = get_dma_ops(hwdev); |
168 | 128 | ||
169 | BUG_ON(!valid_dma_direction(direction)); | 129 | BUG_ON(!valid_dma_direction(dir)); |
170 | if (ops->sync_single_range_for_cpu) | 130 | if (ops->sync_single_range_for_cpu) |
171 | ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, | 131 | ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, |
172 | size, direction); | 132 | size, dir); |
173 | flush_write_buffers(); | 133 | flush_write_buffers(); |
174 | } | 134 | } |
175 | 135 | ||
176 | static inline void | 136 | static inline void |
177 | dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle, | 137 | dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle, |
178 | unsigned long offset, size_t size, | 138 | unsigned long offset, size_t size, |
179 | int direction) | 139 | enum dma_data_direction dir) |
180 | { | 140 | { |
181 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); | 141 | struct dma_map_ops *ops = get_dma_ops(hwdev); |
182 | 142 | ||
183 | BUG_ON(!valid_dma_direction(direction)); | 143 | BUG_ON(!valid_dma_direction(dir)); |
184 | if (ops->sync_single_range_for_device) | 144 | if (ops->sync_single_range_for_device) |
185 | ops->sync_single_range_for_device(hwdev, dma_handle, | 145 | ops->sync_single_range_for_device(hwdev, dma_handle, |
186 | offset, size, direction); | 146 | offset, size, dir); |
187 | flush_write_buffers(); | 147 | flush_write_buffers(); |
188 | } | 148 | } |
189 | 149 | ||
190 | static inline void | 150 | static inline void |
191 | dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, | 151 | dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, |
192 | int nelems, int direction) | 152 | int nelems, enum dma_data_direction dir) |
193 | { | 153 | { |
194 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); | 154 | struct dma_map_ops *ops = get_dma_ops(hwdev); |
195 | 155 | ||
196 | BUG_ON(!valid_dma_direction(direction)); | 156 | BUG_ON(!valid_dma_direction(dir)); |
197 | if (ops->sync_sg_for_cpu) | 157 | if (ops->sync_sg_for_cpu) |
198 | ops->sync_sg_for_cpu(hwdev, sg, nelems, direction); | 158 | ops->sync_sg_for_cpu(hwdev, sg, nelems, dir); |
199 | flush_write_buffers(); | 159 | flush_write_buffers(); |
200 | } | 160 | } |
201 | 161 | ||
202 | static inline void | 162 | static inline void |
203 | dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, | 163 | dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, |
204 | int nelems, int direction) | 164 | int nelems, enum dma_data_direction dir) |
205 | { | 165 | { |
206 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); | 166 | struct dma_map_ops *ops = get_dma_ops(hwdev); |
207 | 167 | ||
208 | BUG_ON(!valid_dma_direction(direction)); | 168 | BUG_ON(!valid_dma_direction(dir)); |
209 | if (ops->sync_sg_for_device) | 169 | if (ops->sync_sg_for_device) |
210 | ops->sync_sg_for_device(hwdev, sg, nelems, direction); | 170 | ops->sync_sg_for_device(hwdev, sg, nelems, dir); |
211 | 171 | ||
212 | flush_write_buffers(); | 172 | flush_write_buffers(); |
213 | } | 173 | } |
214 | 174 | ||
215 | static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, | 175 | static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, |
216 | size_t offset, size_t size, | 176 | size_t offset, size_t size, |
217 | int direction) | 177 | enum dma_data_direction dir) |
218 | { | 178 | { |
219 | struct dma_mapping_ops *ops = get_dma_ops(dev); | 179 | struct dma_map_ops *ops = get_dma_ops(dev); |
220 | 180 | ||
221 | BUG_ON(!valid_dma_direction(direction)); | 181 | BUG_ON(!valid_dma_direction(dir)); |
222 | return ops->map_page(dev, page, offset, size, direction, NULL); | 182 | return ops->map_page(dev, page, offset, size, dir, NULL); |
223 | } | 183 | } |
224 | 184 | ||
225 | static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, | 185 | static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, |
226 | size_t size, int direction) | 186 | size_t size, enum dma_data_direction dir) |
227 | { | 187 | { |
228 | dma_unmap_single(dev, addr, size, direction); | 188 | dma_unmap_single(dev, addr, size, dir); |
229 | } | 189 | } |
230 | 190 | ||
231 | static inline void | 191 | static inline void |
@@ -271,7 +231,7 @@ static inline void * | |||
271 | dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, | 231 | dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, |
272 | gfp_t gfp) | 232 | gfp_t gfp) |
273 | { | 233 | { |
274 | struct dma_mapping_ops *ops = get_dma_ops(dev); | 234 | struct dma_map_ops *ops = get_dma_ops(dev); |
275 | void *memory; | 235 | void *memory; |
276 | 236 | ||
277 | gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); | 237 | gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); |
@@ -297,7 +257,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, | |||
297 | static inline void dma_free_coherent(struct device *dev, size_t size, | 257 | static inline void dma_free_coherent(struct device *dev, size_t size, |
298 | void *vaddr, dma_addr_t bus) | 258 | void *vaddr, dma_addr_t bus) |
299 | { | 259 | { |
300 | struct dma_mapping_ops *ops = get_dma_ops(dev); | 260 | struct dma_map_ops *ops = get_dma_ops(dev); |
301 | 261 | ||
302 | WARN_ON(irqs_disabled()); /* for portability */ | 262 | WARN_ON(irqs_disabled()); /* for portability */ |
303 | 263 | ||
diff --git a/arch/x86/include/asm/iommu.h b/arch/x86/include/asm/iommu.h index a6ee9e6f530f..af326a2975b5 100644 --- a/arch/x86/include/asm/iommu.h +++ b/arch/x86/include/asm/iommu.h | |||
@@ -3,7 +3,7 @@ | |||
3 | 3 | ||
4 | extern void pci_iommu_shutdown(void); | 4 | extern void pci_iommu_shutdown(void); |
5 | extern void no_iommu_init(void); | 5 | extern void no_iommu_init(void); |
6 | extern struct dma_mapping_ops nommu_dma_ops; | 6 | extern struct dma_map_ops nommu_dma_ops; |
7 | extern int force_iommu, no_iommu; | 7 | extern int force_iommu, no_iommu; |
8 | extern int iommu_detected; | 8 | extern int iommu_detected; |
9 | 9 | ||