diff options
Diffstat (limited to 'arch/mips/mm/dma-coherent.c')
-rw-r--r-- | arch/mips/mm/dma-coherent.c | 254 |
1 files changed, 0 insertions, 254 deletions
diff --git a/arch/mips/mm/dma-coherent.c b/arch/mips/mm/dma-coherent.c deleted file mode 100644 index 5697c6e250a3..000000000000 --- a/arch/mips/mm/dma-coherent.c +++ /dev/null | |||
@@ -1,254 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com> | ||
7 | * Copyright (C) 2000, 2001 Ralf Baechle <ralf@gnu.org> | ||
8 | * swiped from i386, and cloned for MIPS by Geert, polished by Ralf. | ||
9 | */ | ||
10 | #include <linux/types.h> | ||
11 | #include <linux/dma-mapping.h> | ||
12 | #include <linux/mm.h> | ||
13 | #include <linux/module.h> | ||
14 | #include <linux/string.h> | ||
15 | |||
16 | #include <asm/cache.h> | ||
17 | #include <asm/io.h> | ||
18 | |||
19 | void *dma_alloc_noncoherent(struct device *dev, size_t size, | ||
20 | dma_addr_t * dma_handle, gfp_t gfp) | ||
21 | { | ||
22 | void *ret; | ||
23 | /* ignore region specifiers */ | ||
24 | gfp &= ~(__GFP_DMA | __GFP_HIGHMEM); | ||
25 | |||
26 | if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff)) | ||
27 | gfp |= GFP_DMA; | ||
28 | ret = (void *) __get_free_pages(gfp, get_order(size)); | ||
29 | |||
30 | if (ret != NULL) { | ||
31 | memset(ret, 0, size); | ||
32 | *dma_handle = virt_to_phys(ret); | ||
33 | } | ||
34 | |||
35 | return ret; | ||
36 | } | ||
37 | |||
38 | EXPORT_SYMBOL(dma_alloc_noncoherent); | ||
39 | |||
40 | void *dma_alloc_coherent(struct device *dev, size_t size, | ||
41 | dma_addr_t * dma_handle, gfp_t gfp) | ||
42 | __attribute__((alias("dma_alloc_noncoherent"))); | ||
43 | |||
44 | EXPORT_SYMBOL(dma_alloc_coherent); | ||
45 | |||
46 | void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr, | ||
47 | dma_addr_t dma_handle) | ||
48 | { | ||
49 | unsigned long addr = (unsigned long) vaddr; | ||
50 | |||
51 | free_pages(addr, get_order(size)); | ||
52 | } | ||
53 | |||
54 | EXPORT_SYMBOL(dma_free_noncoherent); | ||
55 | |||
56 | void dma_free_coherent(struct device *dev, size_t size, void *vaddr, | ||
57 | dma_addr_t dma_handle) __attribute__((alias("dma_free_noncoherent"))); | ||
58 | |||
59 | EXPORT_SYMBOL(dma_free_coherent); | ||
60 | |||
61 | dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, | ||
62 | enum dma_data_direction direction) | ||
63 | { | ||
64 | BUG_ON(direction == DMA_NONE); | ||
65 | |||
66 | return __pa(ptr); | ||
67 | } | ||
68 | |||
69 | EXPORT_SYMBOL(dma_map_single); | ||
70 | |||
71 | void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | ||
72 | enum dma_data_direction direction) | ||
73 | { | ||
74 | BUG_ON(direction == DMA_NONE); | ||
75 | } | ||
76 | |||
77 | EXPORT_SYMBOL(dma_unmap_single); | ||
78 | |||
79 | int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | ||
80 | enum dma_data_direction direction) | ||
81 | { | ||
82 | int i; | ||
83 | |||
84 | BUG_ON(direction == DMA_NONE); | ||
85 | |||
86 | for (i = 0; i < nents; i++, sg++) { | ||
87 | sg->dma_address = (dma_addr_t)page_to_phys(sg->page) + sg->offset; | ||
88 | } | ||
89 | |||
90 | return nents; | ||
91 | } | ||
92 | |||
93 | EXPORT_SYMBOL(dma_map_sg); | ||
94 | |||
95 | dma_addr_t dma_map_page(struct device *dev, struct page *page, | ||
96 | unsigned long offset, size_t size, enum dma_data_direction direction) | ||
97 | { | ||
98 | BUG_ON(direction == DMA_NONE); | ||
99 | |||
100 | return page_to_phys(page) + offset; | ||
101 | } | ||
102 | |||
103 | EXPORT_SYMBOL(dma_map_page); | ||
104 | |||
105 | void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, | ||
106 | enum dma_data_direction direction) | ||
107 | { | ||
108 | BUG_ON(direction == DMA_NONE); | ||
109 | } | ||
110 | |||
111 | EXPORT_SYMBOL(dma_unmap_page); | ||
112 | |||
113 | void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, | ||
114 | enum dma_data_direction direction) | ||
115 | { | ||
116 | BUG_ON(direction == DMA_NONE); | ||
117 | } | ||
118 | |||
119 | EXPORT_SYMBOL(dma_unmap_sg); | ||
120 | |||
121 | void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, | ||
122 | size_t size, enum dma_data_direction direction) | ||
123 | { | ||
124 | BUG_ON(direction == DMA_NONE); | ||
125 | } | ||
126 | |||
127 | EXPORT_SYMBOL(dma_sync_single_for_cpu); | ||
128 | |||
129 | void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, | ||
130 | size_t size, enum dma_data_direction direction) | ||
131 | { | ||
132 | BUG_ON(direction == DMA_NONE); | ||
133 | } | ||
134 | |||
135 | EXPORT_SYMBOL(dma_sync_single_for_device); | ||
136 | |||
137 | void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, | ||
138 | unsigned long offset, size_t size, | ||
139 | enum dma_data_direction direction) | ||
140 | { | ||
141 | BUG_ON(direction == DMA_NONE); | ||
142 | } | ||
143 | |||
144 | EXPORT_SYMBOL(dma_sync_single_range_for_cpu); | ||
145 | |||
146 | void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, | ||
147 | unsigned long offset, size_t size, | ||
148 | enum dma_data_direction direction) | ||
149 | { | ||
150 | BUG_ON(direction == DMA_NONE); | ||
151 | } | ||
152 | |||
153 | EXPORT_SYMBOL(dma_sync_single_range_for_device); | ||
154 | |||
155 | void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, | ||
156 | enum dma_data_direction direction) | ||
157 | { | ||
158 | BUG_ON(direction == DMA_NONE); | ||
159 | } | ||
160 | |||
161 | EXPORT_SYMBOL(dma_sync_sg_for_cpu); | ||
162 | |||
163 | void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, | ||
164 | enum dma_data_direction direction) | ||
165 | { | ||
166 | BUG_ON(direction == DMA_NONE); | ||
167 | } | ||
168 | |||
169 | EXPORT_SYMBOL(dma_sync_sg_for_device); | ||
170 | |||
171 | int dma_mapping_error(dma_addr_t dma_addr) | ||
172 | { | ||
173 | return 0; | ||
174 | } | ||
175 | |||
176 | EXPORT_SYMBOL(dma_mapping_error); | ||
177 | |||
178 | int dma_supported(struct device *dev, u64 mask) | ||
179 | { | ||
180 | /* | ||
181 | * we fall back to GFP_DMA when the mask isn't all 1s, | ||
182 | * so we can't guarantee allocations that must be | ||
183 | * within a tighter range than GFP_DMA.. | ||
184 | */ | ||
185 | if (mask < 0x00ffffff) | ||
186 | return 0; | ||
187 | |||
188 | return 1; | ||
189 | } | ||
190 | |||
191 | EXPORT_SYMBOL(dma_supported); | ||
192 | |||
193 | int dma_is_consistent(struct device *dev, dma_addr_t dma_addr) | ||
194 | { | ||
195 | return 1; | ||
196 | } | ||
197 | |||
198 | EXPORT_SYMBOL(dma_is_consistent); | ||
199 | |||
200 | void dma_cache_sync(struct device *dev, void *vaddr, size_t size, | ||
201 | enum dma_data_direction direction) | ||
202 | { | ||
203 | BUG_ON(direction == DMA_NONE); | ||
204 | } | ||
205 | |||
206 | EXPORT_SYMBOL(dma_cache_sync); | ||
207 | |||
208 | /* The DAC routines are a PCIism.. */ | ||
209 | |||
210 | #ifdef CONFIG_PCI | ||
211 | |||
212 | #include <linux/pci.h> | ||
213 | |||
214 | dma64_addr_t pci_dac_page_to_dma(struct pci_dev *pdev, | ||
215 | struct page *page, unsigned long offset, int direction) | ||
216 | { | ||
217 | return (dma64_addr_t)page_to_phys(page) + offset; | ||
218 | } | ||
219 | |||
220 | EXPORT_SYMBOL(pci_dac_page_to_dma); | ||
221 | |||
222 | struct page *pci_dac_dma_to_page(struct pci_dev *pdev, | ||
223 | dma64_addr_t dma_addr) | ||
224 | { | ||
225 | return mem_map + (dma_addr >> PAGE_SHIFT); | ||
226 | } | ||
227 | |||
228 | EXPORT_SYMBOL(pci_dac_dma_to_page); | ||
229 | |||
230 | unsigned long pci_dac_dma_to_offset(struct pci_dev *pdev, | ||
231 | dma64_addr_t dma_addr) | ||
232 | { | ||
233 | return dma_addr & ~PAGE_MASK; | ||
234 | } | ||
235 | |||
236 | EXPORT_SYMBOL(pci_dac_dma_to_offset); | ||
237 | |||
238 | void pci_dac_dma_sync_single_for_cpu(struct pci_dev *pdev, | ||
239 | dma64_addr_t dma_addr, size_t len, int direction) | ||
240 | { | ||
241 | BUG_ON(direction == PCI_DMA_NONE); | ||
242 | } | ||
243 | |||
244 | EXPORT_SYMBOL(pci_dac_dma_sync_single_for_cpu); | ||
245 | |||
246 | void pci_dac_dma_sync_single_for_device(struct pci_dev *pdev, | ||
247 | dma64_addr_t dma_addr, size_t len, int direction) | ||
248 | { | ||
249 | BUG_ON(direction == PCI_DMA_NONE); | ||
250 | } | ||
251 | |||
252 | EXPORT_SYMBOL(pci_dac_dma_sync_single_for_device); | ||
253 | |||
254 | #endif /* CONFIG_PCI */ | ||