diff options
Diffstat (limited to 'arch/x86/kernel/pci-dma_32.c')
-rw-r--r-- | arch/x86/kernel/pci-dma_32.c | 173 |
1 files changed, 0 insertions, 173 deletions
diff --git a/arch/x86/kernel/pci-dma_32.c b/arch/x86/kernel/pci-dma_32.c deleted file mode 100644 index d2f70744a93a..000000000000 --- a/arch/x86/kernel/pci-dma_32.c +++ /dev/null | |||
@@ -1,173 +0,0 @@ | |||
1 | /* | ||
2 | * Dynamic DMA mapping support. | ||
3 | * | ||
4 | * On i386 there is no hardware dynamic DMA address translation, | ||
5 | * so consistent alloc/free are merely page allocation/freeing. | ||
6 | * The rest of the dynamic DMA mapping interface is implemented | ||
7 | * in asm/pci.h. | ||
8 | */ | ||
9 | |||
10 | #include <linux/types.h> | ||
11 | #include <linux/mm.h> | ||
12 | #include <linux/string.h> | ||
13 | #include <linux/pci.h> | ||
14 | #include <linux/module.h> | ||
15 | #include <asm/io.h> | ||
16 | |||
17 | /* Dummy device used for NULL arguments (normally ISA). Better would | ||
18 | be probably a smaller DMA mask, but this is bug-to-bug compatible | ||
19 | to i386. */ | ||
20 | struct device fallback_dev = { | ||
21 | .bus_id = "fallback device", | ||
22 | .coherent_dma_mask = DMA_32BIT_MASK, | ||
23 | .dma_mask = &fallback_dev.coherent_dma_mask, | ||
24 | }; | ||
25 | |||
26 | |||
27 | static int dma_alloc_from_coherent_mem(struct device *dev, ssize_t size, | ||
28 | dma_addr_t *dma_handle, void **ret) | ||
29 | { | ||
30 | struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; | ||
31 | int order = get_order(size); | ||
32 | |||
33 | if (mem) { | ||
34 | int page = bitmap_find_free_region(mem->bitmap, mem->size, | ||
35 | order); | ||
36 | if (page >= 0) { | ||
37 | *dma_handle = mem->device_base + (page << PAGE_SHIFT); | ||
38 | *ret = mem->virt_base + (page << PAGE_SHIFT); | ||
39 | memset(*ret, 0, size); | ||
40 | } | ||
41 | if (mem->flags & DMA_MEMORY_EXCLUSIVE) | ||
42 | *ret = NULL; | ||
43 | } | ||
44 | return (mem != NULL); | ||
45 | } | ||
46 | |||
47 | static int dma_release_coherent(struct device *dev, int order, void *vaddr) | ||
48 | { | ||
49 | struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; | ||
50 | |||
51 | if (mem && vaddr >= mem->virt_base && vaddr < | ||
52 | (mem->virt_base + (mem->size << PAGE_SHIFT))) { | ||
53 | int page = (vaddr - mem->virt_base) >> PAGE_SHIFT; | ||
54 | |||
55 | bitmap_release_region(mem->bitmap, page, order); | ||
56 | return 1; | ||
57 | } | ||
58 | return 0; | ||
59 | } | ||
60 | |||
61 | /* Allocate DMA memory on node near device */ | ||
62 | noinline struct page * | ||
63 | dma_alloc_pages(struct device *dev, gfp_t gfp, unsigned order) | ||
64 | { | ||
65 | int node; | ||
66 | |||
67 | node = dev_to_node(dev); | ||
68 | |||
69 | return alloc_pages_node(node, gfp, order); | ||
70 | } | ||
71 | |||
72 | void *dma_alloc_coherent(struct device *dev, size_t size, | ||
73 | dma_addr_t *dma_handle, gfp_t gfp) | ||
74 | { | ||
75 | void *ret = NULL; | ||
76 | struct page *page; | ||
77 | dma_addr_t bus; | ||
78 | int order = get_order(size); | ||
79 | unsigned long dma_mask = 0; | ||
80 | |||
81 | /* ignore region specifiers */ | ||
82 | gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); | ||
83 | |||
84 | if (dma_alloc_from_coherent_mem(dev, size, dma_handle, &ret)) | ||
85 | return ret; | ||
86 | |||
87 | if (!dev) | ||
88 | dev = &fallback_dev; | ||
89 | |||
90 | dma_mask = dev->coherent_dma_mask; | ||
91 | if (dma_mask == 0) | ||
92 | dma_mask = DMA_32BIT_MASK; | ||
93 | |||
94 | if (dev->dma_mask == NULL) | ||
95 | return NULL; | ||
96 | |||
97 | /* Don't invoke OOM killer */ | ||
98 | gfp |= __GFP_NORETRY; | ||
99 | again: | ||
100 | page = dma_alloc_pages(dev, gfp, order); | ||
101 | if (page == NULL) | ||
102 | return NULL; | ||
103 | |||
104 | { | ||
105 | int high, mmu; | ||
106 | bus = page_to_phys(page); | ||
107 | ret = page_address(page); | ||
108 | high = (bus + size) >= dma_mask; | ||
109 | mmu = high; | ||
110 | if (force_iommu && !(gfp & GFP_DMA)) | ||
111 | mmu = 1; | ||
112 | else if (high) { | ||
113 | free_pages((unsigned long)ret, | ||
114 | get_order(size)); | ||
115 | |||
116 | /* Don't use the 16MB ZONE_DMA unless absolutely | ||
117 | needed. It's better to use remapping first. */ | ||
118 | if (dma_mask < DMA_32BIT_MASK && !(gfp & GFP_DMA)) { | ||
119 | gfp = (gfp & ~GFP_DMA32) | GFP_DMA; | ||
120 | goto again; | ||
121 | } | ||
122 | |||
123 | /* Let low level make its own zone decisions */ | ||
124 | gfp &= ~(GFP_DMA32|GFP_DMA); | ||
125 | |||
126 | if (dma_ops->alloc_coherent) | ||
127 | return dma_ops->alloc_coherent(dev, size, | ||
128 | dma_handle, gfp); | ||
129 | return NULL; | ||
130 | |||
131 | } | ||
132 | memset(ret, 0, size); | ||
133 | if (!mmu) { | ||
134 | *dma_handle = bus; | ||
135 | return ret; | ||
136 | } | ||
137 | } | ||
138 | |||
139 | if (dma_ops->alloc_coherent) { | ||
140 | free_pages((unsigned long)ret, get_order(size)); | ||
141 | gfp &= ~(GFP_DMA|GFP_DMA32); | ||
142 | return dma_ops->alloc_coherent(dev, size, dma_handle, gfp); | ||
143 | } | ||
144 | |||
145 | if (dma_ops->map_simple) { | ||
146 | *dma_handle = dma_ops->map_simple(dev, virt_to_phys(ret), | ||
147 | size, | ||
148 | PCI_DMA_BIDIRECTIONAL); | ||
149 | if (*dma_handle != bad_dma_address) | ||
150 | return ret; | ||
151 | } | ||
152 | |||
153 | if (panic_on_overflow) | ||
154 | panic("dma_alloc_coherent: IOMMU overflow by %lu bytes\n", | ||
155 | (unsigned long)size); | ||
156 | free_pages((unsigned long)ret, get_order(size)); | ||
157 | return NULL; | ||
158 | } | ||
159 | EXPORT_SYMBOL(dma_alloc_coherent); | ||
160 | |||
161 | void dma_free_coherent(struct device *dev, size_t size, | ||
162 | void *vaddr, dma_addr_t dma_handle) | ||
163 | { | ||
164 | int order = get_order(size); | ||
165 | |||
166 | WARN_ON(irqs_disabled()); /* for portability */ | ||
167 | if (dma_release_coherent(dev, order, vaddr)) | ||
168 | return; | ||
169 | if (dma_ops->unmap_single) | ||
170 | dma_ops->unmap_single(dev, dma_handle, size, 0); | ||
171 | free_pages((unsigned long)vaddr, order); | ||
172 | } | ||
173 | EXPORT_SYMBOL(dma_free_coherent); | ||