diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2007-10-11 05:17:24 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2007-10-11 05:17:24 -0400 |
commit | 250c22777fe1ccd7ac588579a6c16db4c0161cc5 (patch) | |
tree | 55c317efb7d792ec6fdae1d1937c67a502c48dec /arch/x86/kernel/pci-dma_64.c | |
parent | 2db55d344e529492545cb3b755c7e9ba8e4fa94e (diff) |
x86_64: move kernel
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/pci-dma_64.c')
-rw-r--r-- | arch/x86/kernel/pci-dma_64.c | 346 |
1 files changed, 346 insertions, 0 deletions
diff --git a/arch/x86/kernel/pci-dma_64.c b/arch/x86/kernel/pci-dma_64.c new file mode 100644 index 000000000000..29711445c818 --- /dev/null +++ b/arch/x86/kernel/pci-dma_64.c | |||
@@ -0,0 +1,346 @@ | |||
1 | /* | ||
2 | * Dynamic DMA mapping support. | ||
3 | */ | ||
4 | |||
5 | #include <linux/types.h> | ||
6 | #include <linux/mm.h> | ||
7 | #include <linux/string.h> | ||
8 | #include <linux/pci.h> | ||
9 | #include <linux/module.h> | ||
10 | #include <asm/io.h> | ||
11 | #include <asm/iommu.h> | ||
12 | #include <asm/calgary.h> | ||
13 | |||
14 | int iommu_merge __read_mostly = 0; | ||
15 | EXPORT_SYMBOL(iommu_merge); | ||
16 | |||
17 | dma_addr_t bad_dma_address __read_mostly; | ||
18 | EXPORT_SYMBOL(bad_dma_address); | ||
19 | |||
20 | /* This tells the BIO block layer to assume merging. Default to off | ||
21 | because we cannot guarantee merging later. */ | ||
22 | int iommu_bio_merge __read_mostly = 0; | ||
23 | EXPORT_SYMBOL(iommu_bio_merge); | ||
24 | |||
25 | static int iommu_sac_force __read_mostly = 0; | ||
26 | |||
27 | int no_iommu __read_mostly; | ||
28 | #ifdef CONFIG_IOMMU_DEBUG | ||
29 | int panic_on_overflow __read_mostly = 1; | ||
30 | int force_iommu __read_mostly = 1; | ||
31 | #else | ||
32 | int panic_on_overflow __read_mostly = 0; | ||
33 | int force_iommu __read_mostly= 0; | ||
34 | #endif | ||
35 | |||
36 | /* Set this to 1 if there is a HW IOMMU in the system */ | ||
37 | int iommu_detected __read_mostly = 0; | ||
38 | |||
39 | /* Dummy device used for NULL arguments (normally ISA). Better would | ||
40 | be probably a smaller DMA mask, but this is bug-to-bug compatible | ||
41 | to i386. */ | ||
42 | struct device fallback_dev = { | ||
43 | .bus_id = "fallback device", | ||
44 | .coherent_dma_mask = DMA_32BIT_MASK, | ||
45 | .dma_mask = &fallback_dev.coherent_dma_mask, | ||
46 | }; | ||
47 | |||
48 | /* Allocate DMA memory on node near device */ | ||
49 | noinline static void * | ||
50 | dma_alloc_pages(struct device *dev, gfp_t gfp, unsigned order) | ||
51 | { | ||
52 | struct page *page; | ||
53 | int node; | ||
54 | #ifdef CONFIG_PCI | ||
55 | if (dev->bus == &pci_bus_type) | ||
56 | node = pcibus_to_node(to_pci_dev(dev)->bus); | ||
57 | else | ||
58 | #endif | ||
59 | node = numa_node_id(); | ||
60 | |||
61 | if (node < first_node(node_online_map)) | ||
62 | node = first_node(node_online_map); | ||
63 | |||
64 | page = alloc_pages_node(node, gfp, order); | ||
65 | return page ? page_address(page) : NULL; | ||
66 | } | ||
67 | |||
68 | /* | ||
69 | * Allocate memory for a coherent mapping. | ||
70 | */ | ||
71 | void * | ||
72 | dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, | ||
73 | gfp_t gfp) | ||
74 | { | ||
75 | void *memory; | ||
76 | unsigned long dma_mask = 0; | ||
77 | u64 bus; | ||
78 | |||
79 | if (!dev) | ||
80 | dev = &fallback_dev; | ||
81 | dma_mask = dev->coherent_dma_mask; | ||
82 | if (dma_mask == 0) | ||
83 | dma_mask = DMA_32BIT_MASK; | ||
84 | |||
85 | /* Device not DMA able */ | ||
86 | if (dev->dma_mask == NULL) | ||
87 | return NULL; | ||
88 | |||
89 | /* Don't invoke OOM killer */ | ||
90 | gfp |= __GFP_NORETRY; | ||
91 | |||
92 | /* Kludge to make it bug-to-bug compatible with i386. i386 | ||
93 | uses the normal dma_mask for alloc_coherent. */ | ||
94 | dma_mask &= *dev->dma_mask; | ||
95 | |||
96 | /* Why <=? Even when the mask is smaller than 4GB it is often | ||
97 | larger than 16MB and in this case we have a chance of | ||
98 | finding fitting memory in the next higher zone first. If | ||
99 | not retry with true GFP_DMA. -AK */ | ||
100 | if (dma_mask <= DMA_32BIT_MASK) | ||
101 | gfp |= GFP_DMA32; | ||
102 | |||
103 | again: | ||
104 | memory = dma_alloc_pages(dev, gfp, get_order(size)); | ||
105 | if (memory == NULL) | ||
106 | return NULL; | ||
107 | |||
108 | { | ||
109 | int high, mmu; | ||
110 | bus = virt_to_bus(memory); | ||
111 | high = (bus + size) >= dma_mask; | ||
112 | mmu = high; | ||
113 | if (force_iommu && !(gfp & GFP_DMA)) | ||
114 | mmu = 1; | ||
115 | else if (high) { | ||
116 | free_pages((unsigned long)memory, | ||
117 | get_order(size)); | ||
118 | |||
119 | /* Don't use the 16MB ZONE_DMA unless absolutely | ||
120 | needed. It's better to use remapping first. */ | ||
121 | if (dma_mask < DMA_32BIT_MASK && !(gfp & GFP_DMA)) { | ||
122 | gfp = (gfp & ~GFP_DMA32) | GFP_DMA; | ||
123 | goto again; | ||
124 | } | ||
125 | |||
126 | /* Let low level make its own zone decisions */ | ||
127 | gfp &= ~(GFP_DMA32|GFP_DMA); | ||
128 | |||
129 | if (dma_ops->alloc_coherent) | ||
130 | return dma_ops->alloc_coherent(dev, size, | ||
131 | dma_handle, gfp); | ||
132 | return NULL; | ||
133 | } | ||
134 | |||
135 | memset(memory, 0, size); | ||
136 | if (!mmu) { | ||
137 | *dma_handle = virt_to_bus(memory); | ||
138 | return memory; | ||
139 | } | ||
140 | } | ||
141 | |||
142 | if (dma_ops->alloc_coherent) { | ||
143 | free_pages((unsigned long)memory, get_order(size)); | ||
144 | gfp &= ~(GFP_DMA|GFP_DMA32); | ||
145 | return dma_ops->alloc_coherent(dev, size, dma_handle, gfp); | ||
146 | } | ||
147 | |||
148 | if (dma_ops->map_simple) { | ||
149 | *dma_handle = dma_ops->map_simple(dev, memory, | ||
150 | size, | ||
151 | PCI_DMA_BIDIRECTIONAL); | ||
152 | if (*dma_handle != bad_dma_address) | ||
153 | return memory; | ||
154 | } | ||
155 | |||
156 | if (panic_on_overflow) | ||
157 | panic("dma_alloc_coherent: IOMMU overflow by %lu bytes\n",size); | ||
158 | free_pages((unsigned long)memory, get_order(size)); | ||
159 | return NULL; | ||
160 | } | ||
161 | EXPORT_SYMBOL(dma_alloc_coherent); | ||
162 | |||
163 | /* | ||
164 | * Unmap coherent memory. | ||
165 | * The caller must ensure that the device has finished accessing the mapping. | ||
166 | */ | ||
167 | void dma_free_coherent(struct device *dev, size_t size, | ||
168 | void *vaddr, dma_addr_t bus) | ||
169 | { | ||
170 | if (dma_ops->unmap_single) | ||
171 | dma_ops->unmap_single(dev, bus, size, 0); | ||
172 | free_pages((unsigned long)vaddr, get_order(size)); | ||
173 | } | ||
174 | EXPORT_SYMBOL(dma_free_coherent); | ||
175 | |||
176 | static int forbid_dac __read_mostly; | ||
177 | |||
178 | int dma_supported(struct device *dev, u64 mask) | ||
179 | { | ||
180 | #ifdef CONFIG_PCI | ||
181 | if (mask > 0xffffffff && forbid_dac > 0) { | ||
182 | |||
183 | |||
184 | |||
185 | printk(KERN_INFO "PCI: Disallowing DAC for device %s\n", dev->bus_id); | ||
186 | return 0; | ||
187 | } | ||
188 | #endif | ||
189 | |||
190 | if (dma_ops->dma_supported) | ||
191 | return dma_ops->dma_supported(dev, mask); | ||
192 | |||
193 | /* Copied from i386. Doesn't make much sense, because it will | ||
194 | only work for pci_alloc_coherent. | ||
195 | The caller just has to use GFP_DMA in this case. */ | ||
196 | if (mask < DMA_24BIT_MASK) | ||
197 | return 0; | ||
198 | |||
199 | /* Tell the device to use SAC when IOMMU force is on. This | ||
200 | allows the driver to use cheaper accesses in some cases. | ||
201 | |||
202 | Problem with this is that if we overflow the IOMMU area and | ||
203 | return DAC as fallback address the device may not handle it | ||
204 | correctly. | ||
205 | |||
206 | As a special case some controllers have a 39bit address | ||
207 | mode that is as efficient as 32bit (aic79xx). Don't force | ||
208 | SAC for these. Assume all masks <= 40 bits are of this | ||
209 | type. Normally this doesn't make any difference, but gives | ||
210 | more gentle handling of IOMMU overflow. */ | ||
211 | if (iommu_sac_force && (mask >= DMA_40BIT_MASK)) { | ||
212 | printk(KERN_INFO "%s: Force SAC with mask %Lx\n", dev->bus_id,mask); | ||
213 | return 0; | ||
214 | } | ||
215 | |||
216 | return 1; | ||
217 | } | ||
218 | EXPORT_SYMBOL(dma_supported); | ||
219 | |||
220 | int dma_set_mask(struct device *dev, u64 mask) | ||
221 | { | ||
222 | if (!dev->dma_mask || !dma_supported(dev, mask)) | ||
223 | return -EIO; | ||
224 | *dev->dma_mask = mask; | ||
225 | return 0; | ||
226 | } | ||
227 | EXPORT_SYMBOL(dma_set_mask); | ||
228 | |||
229 | /* | ||
230 | * See <Documentation/x86_64/boot-options.txt> for the iommu kernel parameter | ||
231 | * documentation. | ||
232 | */ | ||
233 | __init int iommu_setup(char *p) | ||
234 | { | ||
235 | iommu_merge = 1; | ||
236 | |||
237 | if (!p) | ||
238 | return -EINVAL; | ||
239 | |||
240 | while (*p) { | ||
241 | if (!strncmp(p,"off",3)) | ||
242 | no_iommu = 1; | ||
243 | /* gart_parse_options has more force support */ | ||
244 | if (!strncmp(p,"force",5)) | ||
245 | force_iommu = 1; | ||
246 | if (!strncmp(p,"noforce",7)) { | ||
247 | iommu_merge = 0; | ||
248 | force_iommu = 0; | ||
249 | } | ||
250 | |||
251 | if (!strncmp(p, "biomerge",8)) { | ||
252 | iommu_bio_merge = 4096; | ||
253 | iommu_merge = 1; | ||
254 | force_iommu = 1; | ||
255 | } | ||
256 | if (!strncmp(p, "panic",5)) | ||
257 | panic_on_overflow = 1; | ||
258 | if (!strncmp(p, "nopanic",7)) | ||
259 | panic_on_overflow = 0; | ||
260 | if (!strncmp(p, "merge",5)) { | ||
261 | iommu_merge = 1; | ||
262 | force_iommu = 1; | ||
263 | } | ||
264 | if (!strncmp(p, "nomerge",7)) | ||
265 | iommu_merge = 0; | ||
266 | if (!strncmp(p, "forcesac",8)) | ||
267 | iommu_sac_force = 1; | ||
268 | if (!strncmp(p, "allowdac", 8)) | ||
269 | forbid_dac = 0; | ||
270 | if (!strncmp(p, "nodac", 5)) | ||
271 | forbid_dac = -1; | ||
272 | |||
273 | #ifdef CONFIG_SWIOTLB | ||
274 | if (!strncmp(p, "soft",4)) | ||
275 | swiotlb = 1; | ||
276 | #endif | ||
277 | |||
278 | #ifdef CONFIG_IOMMU | ||
279 | gart_parse_options(p); | ||
280 | #endif | ||
281 | |||
282 | #ifdef CONFIG_CALGARY_IOMMU | ||
283 | if (!strncmp(p, "calgary", 7)) | ||
284 | use_calgary = 1; | ||
285 | #endif /* CONFIG_CALGARY_IOMMU */ | ||
286 | |||
287 | p += strcspn(p, ","); | ||
288 | if (*p == ',') | ||
289 | ++p; | ||
290 | } | ||
291 | return 0; | ||
292 | } | ||
293 | early_param("iommu", iommu_setup); | ||
294 | |||
295 | void __init pci_iommu_alloc(void) | ||
296 | { | ||
297 | /* | ||
298 | * The order of these functions is important for | ||
299 | * fall-back/fail-over reasons | ||
300 | */ | ||
301 | #ifdef CONFIG_IOMMU | ||
302 | iommu_hole_init(); | ||
303 | #endif | ||
304 | |||
305 | #ifdef CONFIG_CALGARY_IOMMU | ||
306 | detect_calgary(); | ||
307 | #endif | ||
308 | |||
309 | #ifdef CONFIG_SWIOTLB | ||
310 | pci_swiotlb_init(); | ||
311 | #endif | ||
312 | } | ||
313 | |||
314 | static int __init pci_iommu_init(void) | ||
315 | { | ||
316 | #ifdef CONFIG_CALGARY_IOMMU | ||
317 | calgary_iommu_init(); | ||
318 | #endif | ||
319 | |||
320 | #ifdef CONFIG_IOMMU | ||
321 | gart_iommu_init(); | ||
322 | #endif | ||
323 | |||
324 | no_iommu_init(); | ||
325 | return 0; | ||
326 | } | ||
327 | |||
328 | void pci_iommu_shutdown(void) | ||
329 | { | ||
330 | gart_iommu_shutdown(); | ||
331 | } | ||
332 | |||
333 | #ifdef CONFIG_PCI | ||
334 | /* Many VIA bridges seem to corrupt data for DAC. Disable it here */ | ||
335 | |||
336 | static __devinit void via_no_dac(struct pci_dev *dev) | ||
337 | { | ||
338 | if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) { | ||
339 | printk(KERN_INFO "PCI: VIA PCI bridge detected. Disabling DAC.\n"); | ||
340 | forbid_dac = 1; | ||
341 | } | ||
342 | } | ||
343 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, via_no_dac); | ||
344 | #endif | ||
345 | /* Must execute after PCI subsystem */ | ||
346 | fs_initcall(pci_iommu_init); | ||