diff options
author | Glauber Costa <gcosta@redhat.com> | 2008-04-09 12:18:10 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-04-19 13:19:58 -0400 |
commit | 098cb7f27ed69276e4db560a444b94b982e4bb8f (patch) | |
tree | 6c6a26d9423d3320632e0fd029d9244a07e760da /arch/x86/kernel/pci-dma_64.c | |
parent | bb8ada95a7c11adf3dad4e8d5c55ef1650560592 (diff) |
x86: integrate pci-dma.c
The code in pci-dma_{32,64}.c are now sufficiently
close to each other. We merge them in pci-dma.c.
Signed-off-by: Glauber Costa <gcosta@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/kernel/pci-dma_64.c')
-rw-r--r-- | arch/x86/kernel/pci-dma_64.c | 154 |
1 files changed, 0 insertions, 154 deletions
diff --git a/arch/x86/kernel/pci-dma_64.c b/arch/x86/kernel/pci-dma_64.c deleted file mode 100644 index 596c8c88f36d..000000000000 --- a/arch/x86/kernel/pci-dma_64.c +++ /dev/null | |||
@@ -1,154 +0,0 @@ | |||
1 | /* | ||
2 | * Dynamic DMA mapping support. | ||
3 | */ | ||
4 | |||
5 | #include <linux/types.h> | ||
6 | #include <linux/mm.h> | ||
7 | #include <linux/string.h> | ||
8 | #include <linux/pci.h> | ||
9 | #include <linux/module.h> | ||
10 | #include <linux/dmar.h> | ||
11 | #include <linux/bootmem.h> | ||
12 | #include <asm/proto.h> | ||
13 | #include <asm/io.h> | ||
14 | #include <asm/gart.h> | ||
15 | #include <asm/calgary.h> | ||
16 | |||
17 | |||
18 | /* Dummy device used for NULL arguments (normally ISA). Better would | ||
19 | be probably a smaller DMA mask, but this is bug-to-bug compatible | ||
20 | to i386. */ | ||
21 | struct device fallback_dev = { | ||
22 | .bus_id = "fallback device", | ||
23 | .coherent_dma_mask = DMA_32BIT_MASK, | ||
24 | .dma_mask = &fallback_dev.coherent_dma_mask, | ||
25 | }; | ||
26 | |||
27 | /* Allocate DMA memory on node near device */ | ||
28 | noinline static void * | ||
29 | dma_alloc_pages(struct device *dev, gfp_t gfp, unsigned order) | ||
30 | { | ||
31 | int node; | ||
32 | |||
33 | node = dev_to_node(dev); | ||
34 | |||
35 | return alloc_pages_node(node, gfp, order); | ||
36 | } | ||
37 | |||
38 | #define dma_alloc_from_coherent_mem(dev, size, handle, ret) (0) | ||
39 | #define dma_release_coherent(dev, order, vaddr) (0) | ||
40 | /* | ||
41 | * Allocate memory for a coherent mapping. | ||
42 | */ | ||
43 | void * | ||
44 | dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, | ||
45 | gfp_t gfp) | ||
46 | { | ||
47 | void *memory; | ||
48 | struct page *page; | ||
49 | unsigned long dma_mask = 0; | ||
50 | u64 bus; | ||
51 | |||
52 | /* ignore region specifiers */ | ||
53 | gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); | ||
54 | |||
55 | if (dma_alloc_from_coherent_mem(dev, size, dma_handle, &memory)) | ||
56 | return memory; | ||
57 | |||
58 | if (!dev) | ||
59 | dev = &fallback_dev; | ||
60 | dma_mask = dev->coherent_dma_mask; | ||
61 | if (dma_mask == 0) | ||
62 | dma_mask = DMA_32BIT_MASK; | ||
63 | |||
64 | /* Device not DMA able */ | ||
65 | if (dev->dma_mask == NULL) | ||
66 | return NULL; | ||
67 | |||
68 | /* Don't invoke OOM killer */ | ||
69 | gfp |= __GFP_NORETRY; | ||
70 | |||
71 | /* Why <=? Even when the mask is smaller than 4GB it is often | ||
72 | larger than 16MB and in this case we have a chance of | ||
73 | finding fitting memory in the next higher zone first. If | ||
74 | not retry with true GFP_DMA. -AK */ | ||
75 | if (dma_mask <= DMA_32BIT_MASK) | ||
76 | gfp |= GFP_DMA32; | ||
77 | |||
78 | again: | ||
79 | page = dma_alloc_pages(dev, gfp, get_order(size)); | ||
80 | if (page == NULL) | ||
81 | return NULL; | ||
82 | |||
83 | { | ||
84 | int high, mmu; | ||
85 | bus = page_to_phys(page); | ||
86 | memory = page_address(page); | ||
87 | high = (bus + size) >= dma_mask; | ||
88 | mmu = high; | ||
89 | if (force_iommu && !(gfp & GFP_DMA)) | ||
90 | mmu = 1; | ||
91 | else if (high) { | ||
92 | free_pages((unsigned long)memory, | ||
93 | get_order(size)); | ||
94 | |||
95 | /* Don't use the 16MB ZONE_DMA unless absolutely | ||
96 | needed. It's better to use remapping first. */ | ||
97 | if (dma_mask < DMA_32BIT_MASK && !(gfp & GFP_DMA)) { | ||
98 | gfp = (gfp & ~GFP_DMA32) | GFP_DMA; | ||
99 | goto again; | ||
100 | } | ||
101 | |||
102 | /* Let low level make its own zone decisions */ | ||
103 | gfp &= ~(GFP_DMA32|GFP_DMA); | ||
104 | |||
105 | if (dma_ops->alloc_coherent) | ||
106 | return dma_ops->alloc_coherent(dev, size, | ||
107 | dma_handle, gfp); | ||
108 | return NULL; | ||
109 | } | ||
110 | |||
111 | memset(memory, 0, size); | ||
112 | if (!mmu) { | ||
113 | *dma_handle = bus; | ||
114 | return memory; | ||
115 | } | ||
116 | } | ||
117 | |||
118 | if (dma_ops->alloc_coherent) { | ||
119 | free_pages((unsigned long)memory, get_order(size)); | ||
120 | gfp &= ~(GFP_DMA|GFP_DMA32); | ||
121 | return dma_ops->alloc_coherent(dev, size, dma_handle, gfp); | ||
122 | } | ||
123 | |||
124 | if (dma_ops->map_simple) { | ||
125 | *dma_handle = dma_ops->map_simple(dev, virt_to_phys(memory), | ||
126 | size, | ||
127 | PCI_DMA_BIDIRECTIONAL); | ||
128 | if (*dma_handle != bad_dma_address) | ||
129 | return memory; | ||
130 | } | ||
131 | |||
132 | if (panic_on_overflow) | ||
133 | panic("dma_alloc_coherent: IOMMU overflow by %lu bytes\n",size); | ||
134 | free_pages((unsigned long)memory, get_order(size)); | ||
135 | return NULL; | ||
136 | } | ||
137 | EXPORT_SYMBOL(dma_alloc_coherent); | ||
138 | |||
139 | /* | ||
140 | * Unmap coherent memory. | ||
141 | * The caller must ensure that the device has finished accessing the mapping. | ||
142 | */ | ||
143 | void dma_free_coherent(struct device *dev, size_t size, | ||
144 | void *vaddr, dma_addr_t bus) | ||
145 | { | ||
146 | int order = get_order(size); | ||
147 | WARN_ON(irqs_disabled()); /* for portability */ | ||
148 | if (dma_release_coherent(dev, order, vaddr)) | ||
149 | return; | ||
150 | if (dma_ops->unmap_single) | ||
151 | dma_ops->unmap_single(dev, bus, size, 0); | ||
152 | free_pages((unsigned long)vaddr, order); | ||
153 | } | ||
154 | EXPORT_SYMBOL(dma_free_coherent); | ||