aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorGlauber Costa <gcosta@redhat.com>2008-04-09 12:18:10 -0400
committerIngo Molnar <mingo@elte.hu>2008-04-19 13:19:58 -0400
commit098cb7f27ed69276e4db560a444b94b982e4bb8f (patch)
tree6c6a26d9423d3320632e0fd029d9244a07e760da /arch/x86
parentbb8ada95a7c11adf3dad4e8d5c55ef1650560592 (diff)
x86: integrate pci-dma.c
The code in pci-dma_{32,64}.c are now sufficiently close to each other. We merge them in pci-dma.c. Signed-off-by: Glauber Costa <gcosta@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kernel/Makefile2
-rw-r--r--arch/x86/kernel/pci-dma.c175
-rw-r--r--arch/x86/kernel/pci-dma_32.c173
-rw-r--r--arch/x86/kernel/pci-dma_64.c154
4 files changed, 176 insertions, 328 deletions
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 307aee5e8c5b..90e092d0af0c 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -22,7 +22,7 @@ obj-y += setup_$(BITS).o i8259_$(BITS).o setup.o
22obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o 22obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
23obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o 23obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
24obj-$(CONFIG_X86_64) += syscall_64.o vsyscall_64.o setup64.o 24obj-$(CONFIG_X86_64) += syscall_64.o vsyscall_64.o setup64.o
25obj-y += pci-dma_$(BITS).o bootflag.o e820_$(BITS).o 25obj-y += bootflag.o e820_$(BITS).o
26obj-y += pci-dma.o quirks.o i8237.o topology.o kdebugfs.o 26obj-y += pci-dma.o quirks.o i8237.o topology.o kdebugfs.o
27obj-y += alternative.o i8253.o pci-nommu.o 27obj-y += alternative.o i8253.o pci-nommu.o
28obj-$(CONFIG_X86_64) += bugs_64.o 28obj-$(CONFIG_X86_64) += bugs_64.o
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index 00527e74e49c..388b113a7d88 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -38,6 +38,15 @@ EXPORT_SYMBOL(iommu_bio_merge);
38dma_addr_t bad_dma_address __read_mostly = 0; 38dma_addr_t bad_dma_address __read_mostly = 0;
39EXPORT_SYMBOL(bad_dma_address); 39EXPORT_SYMBOL(bad_dma_address);
40 40
41/* Dummy device used for NULL arguments (normally ISA). Better would
42 be probably a smaller DMA mask, but this is bug-to-bug compatible
43 to older i386. */
44struct device fallback_dev = {
45 .bus_id = "fallback device",
46 .coherent_dma_mask = DMA_32BIT_MASK,
47 .dma_mask = &fallback_dev.coherent_dma_mask,
48};
49
41int dma_set_mask(struct device *dev, u64 mask) 50int dma_set_mask(struct device *dev, u64 mask)
42{ 51{
43 if (!dev->dma_mask || !dma_supported(dev, mask)) 52 if (!dev->dma_mask || !dma_supported(dev, mask))
@@ -267,6 +276,43 @@ void *dma_mark_declared_memory_occupied(struct device *dev,
267 return mem->virt_base + (pos << PAGE_SHIFT); 276 return mem->virt_base + (pos << PAGE_SHIFT);
268} 277}
269EXPORT_SYMBOL(dma_mark_declared_memory_occupied); 278EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
279
280static int dma_alloc_from_coherent_mem(struct device *dev, ssize_t size,
281 dma_addr_t *dma_handle, void **ret)
282{
283 struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
284 int order = get_order(size);
285
286 if (mem) {
287 int page = bitmap_find_free_region(mem->bitmap, mem->size,
288 order);
289 if (page >= 0) {
290 *dma_handle = mem->device_base + (page << PAGE_SHIFT);
291 *ret = mem->virt_base + (page << PAGE_SHIFT);
292 memset(*ret, 0, size);
293 }
294 if (mem->flags & DMA_MEMORY_EXCLUSIVE)
295 *ret = NULL;
296 }
297 return (mem != NULL);
298}
299
300static int dma_release_coherent(struct device *dev, int order, void *vaddr)
301{
302 struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
303
304 if (mem && vaddr >= mem->virt_base && vaddr <
305 (mem->virt_base + (mem->size << PAGE_SHIFT))) {
306 int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
307
308 bitmap_release_region(mem->bitmap, page, order);
309 return 1;
310 }
311 return 0;
312}
313#else
314#define dma_alloc_from_coherent_mem(dev, size, handle, ret) (0)
315#define dma_release_coherent(dev, order, vaddr) (0)
270#endif /* CONFIG_X86_32 */ 316#endif /* CONFIG_X86_32 */
271 317
272int dma_supported(struct device *dev, u64 mask) 318int dma_supported(struct device *dev, u64 mask)
@@ -310,6 +356,135 @@ int dma_supported(struct device *dev, u64 mask)
310} 356}
311EXPORT_SYMBOL(dma_supported); 357EXPORT_SYMBOL(dma_supported);
312 358
359/* Allocate DMA memory on node near device */
360noinline struct page *
361dma_alloc_pages(struct device *dev, gfp_t gfp, unsigned order)
362{
363 int node;
364
365 node = dev_to_node(dev);
366
367 return alloc_pages_node(node, gfp, order);
368}
369
370/*
371 * Allocate memory for a coherent mapping.
372 */
373void *
374dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
375 gfp_t gfp)
376{
377 void *memory = NULL;
378 struct page *page;
379 unsigned long dma_mask = 0;
380 dma_addr_t bus;
381
382 /* ignore region specifiers */
383 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
384
385 if (dma_alloc_from_coherent_mem(dev, size, dma_handle, &memory))
386 return memory;
387
388 if (!dev)
389 dev = &fallback_dev;
390 dma_mask = dev->coherent_dma_mask;
391 if (dma_mask == 0)
392 dma_mask = DMA_32BIT_MASK;
393
394 /* Device not DMA able */
395 if (dev->dma_mask == NULL)
396 return NULL;
397
398 /* Don't invoke OOM killer */
399 gfp |= __GFP_NORETRY;
400
401#ifdef CONFIG_X86_64
402 /* Why <=? Even when the mask is smaller than 4GB it is often
403 larger than 16MB and in this case we have a chance of
404 finding fitting memory in the next higher zone first. If
405 not retry with true GFP_DMA. -AK */
406 if (dma_mask <= DMA_32BIT_MASK)
407 gfp |= GFP_DMA32;
408#endif
409
410 again:
411 page = dma_alloc_pages(dev, gfp, get_order(size));
412 if (page == NULL)
413 return NULL;
414
415 {
416 int high, mmu;
417 bus = page_to_phys(page);
418 memory = page_address(page);
419 high = (bus + size) >= dma_mask;
420 mmu = high;
421 if (force_iommu && !(gfp & GFP_DMA))
422 mmu = 1;
423 else if (high) {
424 free_pages((unsigned long)memory,
425 get_order(size));
426
427 /* Don't use the 16MB ZONE_DMA unless absolutely
428 needed. It's better to use remapping first. */
429 if (dma_mask < DMA_32BIT_MASK && !(gfp & GFP_DMA)) {
430 gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
431 goto again;
432 }
433
434 /* Let low level make its own zone decisions */
435 gfp &= ~(GFP_DMA32|GFP_DMA);
436
437 if (dma_ops->alloc_coherent)
438 return dma_ops->alloc_coherent(dev, size,
439 dma_handle, gfp);
440 return NULL;
441 }
442
443 memset(memory, 0, size);
444 if (!mmu) {
445 *dma_handle = bus;
446 return memory;
447 }
448 }
449
450 if (dma_ops->alloc_coherent) {
451 free_pages((unsigned long)memory, get_order(size));
452 gfp &= ~(GFP_DMA|GFP_DMA32);
453 return dma_ops->alloc_coherent(dev, size, dma_handle, gfp);
454 }
455
456 if (dma_ops->map_simple) {
457 *dma_handle = dma_ops->map_simple(dev, virt_to_phys(memory),
458 size,
459 PCI_DMA_BIDIRECTIONAL);
460 if (*dma_handle != bad_dma_address)
461 return memory;
462 }
463
464 if (panic_on_overflow)
465 panic("dma_alloc_coherent: IOMMU overflow by %lu bytes\n",
466 (unsigned long)size);
467 free_pages((unsigned long)memory, get_order(size));
468 return NULL;
469}
470EXPORT_SYMBOL(dma_alloc_coherent);
471
472/*
473 * Unmap coherent memory.
474 * The caller must ensure that the device has finished accessing the mapping.
475 */
476void dma_free_coherent(struct device *dev, size_t size,
477 void *vaddr, dma_addr_t bus)
478{
479 int order = get_order(size);
480 WARN_ON(irqs_disabled()); /* for portability */
481 if (dma_release_coherent(dev, order, vaddr))
482 return;
483 if (dma_ops->unmap_single)
484 dma_ops->unmap_single(dev, bus, size, 0);
485 free_pages((unsigned long)vaddr, order);
486}
487EXPORT_SYMBOL(dma_free_coherent);
313 488
314static int __init pci_iommu_init(void) 489static int __init pci_iommu_init(void)
315{ 490{
diff --git a/arch/x86/kernel/pci-dma_32.c b/arch/x86/kernel/pci-dma_32.c
deleted file mode 100644
index d2f70744a93a..000000000000
--- a/arch/x86/kernel/pci-dma_32.c
+++ /dev/null
@@ -1,173 +0,0 @@
1/*
2 * Dynamic DMA mapping support.
3 *
4 * On i386 there is no hardware dynamic DMA address translation,
5 * so consistent alloc/free are merely page allocation/freeing.
6 * The rest of the dynamic DMA mapping interface is implemented
7 * in asm/pci.h.
8 */
9
10#include <linux/types.h>
11#include <linux/mm.h>
12#include <linux/string.h>
13#include <linux/pci.h>
14#include <linux/module.h>
15#include <asm/io.h>
16
17/* Dummy device used for NULL arguments (normally ISA). Better would
18 be probably a smaller DMA mask, but this is bug-to-bug compatible
19 to i386. */
20struct device fallback_dev = {
21 .bus_id = "fallback device",
22 .coherent_dma_mask = DMA_32BIT_MASK,
23 .dma_mask = &fallback_dev.coherent_dma_mask,
24};
25
26
27static int dma_alloc_from_coherent_mem(struct device *dev, ssize_t size,
28 dma_addr_t *dma_handle, void **ret)
29{
30 struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
31 int order = get_order(size);
32
33 if (mem) {
34 int page = bitmap_find_free_region(mem->bitmap, mem->size,
35 order);
36 if (page >= 0) {
37 *dma_handle = mem->device_base + (page << PAGE_SHIFT);
38 *ret = mem->virt_base + (page << PAGE_SHIFT);
39 memset(*ret, 0, size);
40 }
41 if (mem->flags & DMA_MEMORY_EXCLUSIVE)
42 *ret = NULL;
43 }
44 return (mem != NULL);
45}
46
47static int dma_release_coherent(struct device *dev, int order, void *vaddr)
48{
49 struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
50
51 if (mem && vaddr >= mem->virt_base && vaddr <
52 (mem->virt_base + (mem->size << PAGE_SHIFT))) {
53 int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
54
55 bitmap_release_region(mem->bitmap, page, order);
56 return 1;
57 }
58 return 0;
59}
60
61/* Allocate DMA memory on node near device */
62noinline struct page *
63dma_alloc_pages(struct device *dev, gfp_t gfp, unsigned order)
64{
65 int node;
66
67 node = dev_to_node(dev);
68
69 return alloc_pages_node(node, gfp, order);
70}
71
72void *dma_alloc_coherent(struct device *dev, size_t size,
73 dma_addr_t *dma_handle, gfp_t gfp)
74{
75 void *ret = NULL;
76 struct page *page;
77 dma_addr_t bus;
78 int order = get_order(size);
79 unsigned long dma_mask = 0;
80
81 /* ignore region specifiers */
82 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
83
84 if (dma_alloc_from_coherent_mem(dev, size, dma_handle, &ret))
85 return ret;
86
87 if (!dev)
88 dev = &fallback_dev;
89
90 dma_mask = dev->coherent_dma_mask;
91 if (dma_mask == 0)
92 dma_mask = DMA_32BIT_MASK;
93
94 if (dev->dma_mask == NULL)
95 return NULL;
96
97 /* Don't invoke OOM killer */
98 gfp |= __GFP_NORETRY;
99again:
100 page = dma_alloc_pages(dev, gfp, order);
101 if (page == NULL)
102 return NULL;
103
104 {
105 int high, mmu;
106 bus = page_to_phys(page);
107 ret = page_address(page);
108 high = (bus + size) >= dma_mask;
109 mmu = high;
110 if (force_iommu && !(gfp & GFP_DMA))
111 mmu = 1;
112 else if (high) {
113 free_pages((unsigned long)ret,
114 get_order(size));
115
116 /* Don't use the 16MB ZONE_DMA unless absolutely
117 needed. It's better to use remapping first. */
118 if (dma_mask < DMA_32BIT_MASK && !(gfp & GFP_DMA)) {
119 gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
120 goto again;
121 }
122
123 /* Let low level make its own zone decisions */
124 gfp &= ~(GFP_DMA32|GFP_DMA);
125
126 if (dma_ops->alloc_coherent)
127 return dma_ops->alloc_coherent(dev, size,
128 dma_handle, gfp);
129 return NULL;
130
131 }
132 memset(ret, 0, size);
133 if (!mmu) {
134 *dma_handle = bus;
135 return ret;
136 }
137 }
138
139 if (dma_ops->alloc_coherent) {
140 free_pages((unsigned long)ret, get_order(size));
141 gfp &= ~(GFP_DMA|GFP_DMA32);
142 return dma_ops->alloc_coherent(dev, size, dma_handle, gfp);
143 }
144
145 if (dma_ops->map_simple) {
146 *dma_handle = dma_ops->map_simple(dev, virt_to_phys(ret),
147 size,
148 PCI_DMA_BIDIRECTIONAL);
149 if (*dma_handle != bad_dma_address)
150 return ret;
151 }
152
153 if (panic_on_overflow)
154 panic("dma_alloc_coherent: IOMMU overflow by %lu bytes\n",
155 (unsigned long)size);
156 free_pages((unsigned long)ret, get_order(size));
157 return NULL;
158}
159EXPORT_SYMBOL(dma_alloc_coherent);
160
161void dma_free_coherent(struct device *dev, size_t size,
162 void *vaddr, dma_addr_t dma_handle)
163{
164 int order = get_order(size);
165
166 WARN_ON(irqs_disabled()); /* for portability */
167 if (dma_release_coherent(dev, order, vaddr))
168 return;
169 if (dma_ops->unmap_single)
170 dma_ops->unmap_single(dev, dma_handle, size, 0);
171 free_pages((unsigned long)vaddr, order);
172}
173EXPORT_SYMBOL(dma_free_coherent);
diff --git a/arch/x86/kernel/pci-dma_64.c b/arch/x86/kernel/pci-dma_64.c
deleted file mode 100644
index 596c8c88f36d..000000000000
--- a/arch/x86/kernel/pci-dma_64.c
+++ /dev/null
@@ -1,154 +0,0 @@
1/*
2 * Dynamic DMA mapping support.
3 */
4
5#include <linux/types.h>
6#include <linux/mm.h>
7#include <linux/string.h>
8#include <linux/pci.h>
9#include <linux/module.h>
10#include <linux/dmar.h>
11#include <linux/bootmem.h>
12#include <asm/proto.h>
13#include <asm/io.h>
14#include <asm/gart.h>
15#include <asm/calgary.h>
16
17
18/* Dummy device used for NULL arguments (normally ISA). Better would
19 be probably a smaller DMA mask, but this is bug-to-bug compatible
20 to i386. */
21struct device fallback_dev = {
22 .bus_id = "fallback device",
23 .coherent_dma_mask = DMA_32BIT_MASK,
24 .dma_mask = &fallback_dev.coherent_dma_mask,
25};
26
27/* Allocate DMA memory on node near device */
28noinline static void *
29dma_alloc_pages(struct device *dev, gfp_t gfp, unsigned order)
30{
31 int node;
32
33 node = dev_to_node(dev);
34
35 return alloc_pages_node(node, gfp, order);
36}
37
38#define dma_alloc_from_coherent_mem(dev, size, handle, ret) (0)
39#define dma_release_coherent(dev, order, vaddr) (0)
40/*
41 * Allocate memory for a coherent mapping.
42 */
43void *
44dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
45 gfp_t gfp)
46{
47 void *memory;
48 struct page *page;
49 unsigned long dma_mask = 0;
50 u64 bus;
51
52 /* ignore region specifiers */
53 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
54
55 if (dma_alloc_from_coherent_mem(dev, size, dma_handle, &memory))
56 return memory;
57
58 if (!dev)
59 dev = &fallback_dev;
60 dma_mask = dev->coherent_dma_mask;
61 if (dma_mask == 0)
62 dma_mask = DMA_32BIT_MASK;
63
64 /* Device not DMA able */
65 if (dev->dma_mask == NULL)
66 return NULL;
67
68 /* Don't invoke OOM killer */
69 gfp |= __GFP_NORETRY;
70
71 /* Why <=? Even when the mask is smaller than 4GB it is often
72 larger than 16MB and in this case we have a chance of
73 finding fitting memory in the next higher zone first. If
74 not retry with true GFP_DMA. -AK */
75 if (dma_mask <= DMA_32BIT_MASK)
76 gfp |= GFP_DMA32;
77
78 again:
79 page = dma_alloc_pages(dev, gfp, get_order(size));
80 if (page == NULL)
81 return NULL;
82
83 {
84 int high, mmu;
85 bus = page_to_phys(page);
86 memory = page_address(page);
87 high = (bus + size) >= dma_mask;
88 mmu = high;
89 if (force_iommu && !(gfp & GFP_DMA))
90 mmu = 1;
91 else if (high) {
92 free_pages((unsigned long)memory,
93 get_order(size));
94
95 /* Don't use the 16MB ZONE_DMA unless absolutely
96 needed. It's better to use remapping first. */
97 if (dma_mask < DMA_32BIT_MASK && !(gfp & GFP_DMA)) {
98 gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
99 goto again;
100 }
101
102 /* Let low level make its own zone decisions */
103 gfp &= ~(GFP_DMA32|GFP_DMA);
104
105 if (dma_ops->alloc_coherent)
106 return dma_ops->alloc_coherent(dev, size,
107 dma_handle, gfp);
108 return NULL;
109 }
110
111 memset(memory, 0, size);
112 if (!mmu) {
113 *dma_handle = bus;
114 return memory;
115 }
116 }
117
118 if (dma_ops->alloc_coherent) {
119 free_pages((unsigned long)memory, get_order(size));
120 gfp &= ~(GFP_DMA|GFP_DMA32);
121 return dma_ops->alloc_coherent(dev, size, dma_handle, gfp);
122 }
123
124 if (dma_ops->map_simple) {
125 *dma_handle = dma_ops->map_simple(dev, virt_to_phys(memory),
126 size,
127 PCI_DMA_BIDIRECTIONAL);
128 if (*dma_handle != bad_dma_address)
129 return memory;
130 }
131
132 if (panic_on_overflow)
133 panic("dma_alloc_coherent: IOMMU overflow by %lu bytes\n",size);
134 free_pages((unsigned long)memory, get_order(size));
135 return NULL;
136}
137EXPORT_SYMBOL(dma_alloc_coherent);
138
139/*
140 * Unmap coherent memory.
141 * The caller must ensure that the device has finished accessing the mapping.
142 */
143void dma_free_coherent(struct device *dev, size_t size,
144 void *vaddr, dma_addr_t bus)
145{
146 int order = get_order(size);
147 WARN_ON(irqs_disabled()); /* for portability */
148 if (dma_release_coherent(dev, order, vaddr))
149 return;
150 if (dma_ops->unmap_single)
151 dma_ops->unmap_single(dev, bus, size, 0);
152 free_pages((unsigned long)vaddr, order);
153}
154EXPORT_SYMBOL(dma_free_coherent);