diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/parisc/kernel/pci-dma.c |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'arch/parisc/kernel/pci-dma.c')
-rw-r--r-- | arch/parisc/kernel/pci-dma.c | 578 |
1 files changed, 578 insertions, 0 deletions
diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c new file mode 100644 index 000000000000..368cc095c99f --- /dev/null +++ b/arch/parisc/kernel/pci-dma.c | |||
@@ -0,0 +1,578 @@ | |||
1 | /* | ||
2 | ** PARISC 1.1 Dynamic DMA mapping support. | ||
3 | ** This implementation is for PA-RISC platforms that do not support | ||
4 | ** I/O TLBs (aka DMA address translation hardware). | ||
5 | ** See Documentation/DMA-mapping.txt for interface definitions. | ||
6 | ** | ||
7 | ** (c) Copyright 1999,2000 Hewlett-Packard Company | ||
8 | ** (c) Copyright 2000 Grant Grundler | ||
9 | ** (c) Copyright 2000 Philipp Rumpf <prumpf@tux.org> | ||
10 | ** (c) Copyright 2000 John Marvin | ||
11 | ** | ||
12 | ** "leveraged" from 2.3.47: arch/ia64/kernel/pci-dma.c. | ||
13 | ** (I assume it's from David Mosberger-Tang but there was no Copyright) | ||
14 | ** | ||
15 | ** AFAIK, all PA7100LC and PA7300LC platforms can use this code. | ||
16 | ** | ||
17 | ** - ggg | ||
18 | */ | ||
19 | |||
20 | #include <linux/init.h> | ||
21 | #include <linux/mm.h> | ||
22 | #include <linux/pci.h> | ||
23 | #include <linux/proc_fs.h> | ||
24 | #include <linux/slab.h> | ||
25 | #include <linux/string.h> | ||
26 | #include <linux/types.h> | ||
27 | |||
28 | #include <asm/cacheflush.h> | ||
29 | #include <asm/dma.h> /* for DMA_CHUNK_SIZE */ | ||
30 | #include <asm/io.h> | ||
31 | #include <asm/page.h> /* get_order */ | ||
32 | #include <asm/pgalloc.h> | ||
33 | #include <asm/uaccess.h> | ||
34 | |||
35 | |||
36 | static struct proc_dir_entry * proc_gsc_root = NULL; | ||
37 | static int pcxl_proc_info(char *buffer, char **start, off_t offset, int length); | ||
38 | static unsigned long pcxl_used_bytes = 0; | ||
39 | static unsigned long pcxl_used_pages = 0; | ||
40 | |||
41 | extern unsigned long pcxl_dma_start; /* Start of pcxl dma mapping area */ | ||
42 | static spinlock_t pcxl_res_lock; | ||
43 | static char *pcxl_res_map; | ||
44 | static int pcxl_res_hint; | ||
45 | static int pcxl_res_size; | ||
46 | |||
47 | #ifdef DEBUG_PCXL_RESOURCE | ||
48 | #define DBG_RES(x...) printk(x) | ||
49 | #else | ||
50 | #define DBG_RES(x...) | ||
51 | #endif | ||
52 | |||
53 | |||
54 | /* | ||
55 | ** Dump a hex representation of the resource map. | ||
56 | */ | ||
57 | |||
58 | #ifdef DUMP_RESMAP | ||
59 | static | ||
60 | void dump_resmap(void) | ||
61 | { | ||
62 | u_long *res_ptr = (unsigned long *)pcxl_res_map; | ||
63 | u_long i = 0; | ||
64 | |||
65 | printk("res_map: "); | ||
66 | for(; i < (pcxl_res_size / sizeof(unsigned long)); ++i, ++res_ptr) | ||
67 | printk("%08lx ", *res_ptr); | ||
68 | |||
69 | printk("\n"); | ||
70 | } | ||
71 | #else | ||
72 | static inline void dump_resmap(void) {;} | ||
73 | #endif | ||
74 | |||
75 | static int pa11_dma_supported( struct device *dev, u64 mask) | ||
76 | { | ||
77 | return 1; | ||
78 | } | ||
79 | |||
80 | static inline int map_pte_uncached(pte_t * pte, | ||
81 | unsigned long vaddr, | ||
82 | unsigned long size, unsigned long *paddr_ptr) | ||
83 | { | ||
84 | unsigned long end; | ||
85 | unsigned long orig_vaddr = vaddr; | ||
86 | |||
87 | vaddr &= ~PMD_MASK; | ||
88 | end = vaddr + size; | ||
89 | if (end > PMD_SIZE) | ||
90 | end = PMD_SIZE; | ||
91 | do { | ||
92 | if (!pte_none(*pte)) | ||
93 | printk(KERN_ERR "map_pte_uncached: page already exists\n"); | ||
94 | set_pte(pte, __mk_pte(*paddr_ptr, PAGE_KERNEL_UNC)); | ||
95 | purge_tlb_start(); | ||
96 | pdtlb_kernel(orig_vaddr); | ||
97 | purge_tlb_end(); | ||
98 | vaddr += PAGE_SIZE; | ||
99 | orig_vaddr += PAGE_SIZE; | ||
100 | (*paddr_ptr) += PAGE_SIZE; | ||
101 | pte++; | ||
102 | } while (vaddr < end); | ||
103 | return 0; | ||
104 | } | ||
105 | |||
106 | static inline int map_pmd_uncached(pmd_t * pmd, unsigned long vaddr, | ||
107 | unsigned long size, unsigned long *paddr_ptr) | ||
108 | { | ||
109 | unsigned long end; | ||
110 | unsigned long orig_vaddr = vaddr; | ||
111 | |||
112 | vaddr &= ~PGDIR_MASK; | ||
113 | end = vaddr + size; | ||
114 | if (end > PGDIR_SIZE) | ||
115 | end = PGDIR_SIZE; | ||
116 | do { | ||
117 | pte_t * pte = pte_alloc_kernel(&init_mm, pmd, vaddr); | ||
118 | if (!pte) | ||
119 | return -ENOMEM; | ||
120 | if (map_pte_uncached(pte, orig_vaddr, end - vaddr, paddr_ptr)) | ||
121 | return -ENOMEM; | ||
122 | vaddr = (vaddr + PMD_SIZE) & PMD_MASK; | ||
123 | orig_vaddr += PMD_SIZE; | ||
124 | pmd++; | ||
125 | } while (vaddr < end); | ||
126 | return 0; | ||
127 | } | ||
128 | |||
129 | static inline int map_uncached_pages(unsigned long vaddr, unsigned long size, | ||
130 | unsigned long paddr) | ||
131 | { | ||
132 | pgd_t * dir; | ||
133 | unsigned long end = vaddr + size; | ||
134 | |||
135 | dir = pgd_offset_k(vaddr); | ||
136 | do { | ||
137 | pmd_t *pmd; | ||
138 | |||
139 | pmd = pmd_alloc(NULL, dir, vaddr); | ||
140 | if (!pmd) | ||
141 | return -ENOMEM; | ||
142 | if (map_pmd_uncached(pmd, vaddr, end - vaddr, &paddr)) | ||
143 | return -ENOMEM; | ||
144 | vaddr = vaddr + PGDIR_SIZE; | ||
145 | dir++; | ||
146 | } while (vaddr && (vaddr < end)); | ||
147 | return 0; | ||
148 | } | ||
149 | |||
150 | static inline void unmap_uncached_pte(pmd_t * pmd, unsigned long vaddr, | ||
151 | unsigned long size) | ||
152 | { | ||
153 | pte_t * pte; | ||
154 | unsigned long end; | ||
155 | unsigned long orig_vaddr = vaddr; | ||
156 | |||
157 | if (pmd_none(*pmd)) | ||
158 | return; | ||
159 | if (pmd_bad(*pmd)) { | ||
160 | pmd_ERROR(*pmd); | ||
161 | pmd_clear(pmd); | ||
162 | return; | ||
163 | } | ||
164 | pte = pte_offset_map(pmd, vaddr); | ||
165 | vaddr &= ~PMD_MASK; | ||
166 | end = vaddr + size; | ||
167 | if (end > PMD_SIZE) | ||
168 | end = PMD_SIZE; | ||
169 | do { | ||
170 | pte_t page = *pte; | ||
171 | pte_clear(&init_mm, vaddr, pte); | ||
172 | purge_tlb_start(); | ||
173 | pdtlb_kernel(orig_vaddr); | ||
174 | purge_tlb_end(); | ||
175 | vaddr += PAGE_SIZE; | ||
176 | orig_vaddr += PAGE_SIZE; | ||
177 | pte++; | ||
178 | if (pte_none(page) || pte_present(page)) | ||
179 | continue; | ||
180 | printk(KERN_CRIT "Whee.. Swapped out page in kernel page table\n"); | ||
181 | } while (vaddr < end); | ||
182 | } | ||
183 | |||
184 | static inline void unmap_uncached_pmd(pgd_t * dir, unsigned long vaddr, | ||
185 | unsigned long size) | ||
186 | { | ||
187 | pmd_t * pmd; | ||
188 | unsigned long end; | ||
189 | unsigned long orig_vaddr = vaddr; | ||
190 | |||
191 | if (pgd_none(*dir)) | ||
192 | return; | ||
193 | if (pgd_bad(*dir)) { | ||
194 | pgd_ERROR(*dir); | ||
195 | pgd_clear(dir); | ||
196 | return; | ||
197 | } | ||
198 | pmd = pmd_offset(dir, vaddr); | ||
199 | vaddr &= ~PGDIR_MASK; | ||
200 | end = vaddr + size; | ||
201 | if (end > PGDIR_SIZE) | ||
202 | end = PGDIR_SIZE; | ||
203 | do { | ||
204 | unmap_uncached_pte(pmd, orig_vaddr, end - vaddr); | ||
205 | vaddr = (vaddr + PMD_SIZE) & PMD_MASK; | ||
206 | orig_vaddr += PMD_SIZE; | ||
207 | pmd++; | ||
208 | } while (vaddr < end); | ||
209 | } | ||
210 | |||
211 | static void unmap_uncached_pages(unsigned long vaddr, unsigned long size) | ||
212 | { | ||
213 | pgd_t * dir; | ||
214 | unsigned long end = vaddr + size; | ||
215 | |||
216 | dir = pgd_offset_k(vaddr); | ||
217 | do { | ||
218 | unmap_uncached_pmd(dir, vaddr, end - vaddr); | ||
219 | vaddr = vaddr + PGDIR_SIZE; | ||
220 | dir++; | ||
221 | } while (vaddr && (vaddr < end)); | ||
222 | } | ||
223 | |||
224 | #define PCXL_SEARCH_LOOP(idx, mask, size) \ | ||
225 | for(; res_ptr < res_end; ++res_ptr) \ | ||
226 | { \ | ||
227 | if(0 == ((*res_ptr) & mask)) { \ | ||
228 | *res_ptr |= mask; \ | ||
229 | idx = (int)((u_long)res_ptr - (u_long)pcxl_res_map); \ | ||
230 | pcxl_res_hint = idx + (size >> 3); \ | ||
231 | goto resource_found; \ | ||
232 | } \ | ||
233 | } | ||
234 | |||
235 | #define PCXL_FIND_FREE_MAPPING(idx, mask, size) { \ | ||
236 | u##size *res_ptr = (u##size *)&(pcxl_res_map[pcxl_res_hint & ~((size >> 3) - 1)]); \ | ||
237 | u##size *res_end = (u##size *)&pcxl_res_map[pcxl_res_size]; \ | ||
238 | PCXL_SEARCH_LOOP(idx, mask, size); \ | ||
239 | res_ptr = (u##size *)&pcxl_res_map[0]; \ | ||
240 | PCXL_SEARCH_LOOP(idx, mask, size); \ | ||
241 | } | ||
242 | |||
243 | unsigned long | ||
244 | pcxl_alloc_range(size_t size) | ||
245 | { | ||
246 | int res_idx; | ||
247 | u_long mask, flags; | ||
248 | unsigned int pages_needed = size >> PAGE_SHIFT; | ||
249 | |||
250 | mask = (u_long) -1L; | ||
251 | mask >>= BITS_PER_LONG - pages_needed; | ||
252 | |||
253 | DBG_RES("pcxl_alloc_range() size: %d pages_needed %d pages_mask 0x%08lx\n", | ||
254 | size, pages_needed, mask); | ||
255 | |||
256 | spin_lock_irqsave(&pcxl_res_lock, flags); | ||
257 | |||
258 | if(pages_needed <= 8) { | ||
259 | PCXL_FIND_FREE_MAPPING(res_idx, mask, 8); | ||
260 | } else if(pages_needed <= 16) { | ||
261 | PCXL_FIND_FREE_MAPPING(res_idx, mask, 16); | ||
262 | } else if(pages_needed <= 32) { | ||
263 | PCXL_FIND_FREE_MAPPING(res_idx, mask, 32); | ||
264 | } else { | ||
265 | panic("%s: pcxl_alloc_range() Too many pages to map.\n", | ||
266 | __FILE__); | ||
267 | } | ||
268 | |||
269 | dump_resmap(); | ||
270 | panic("%s: pcxl_alloc_range() out of dma mapping resources\n", | ||
271 | __FILE__); | ||
272 | |||
273 | resource_found: | ||
274 | |||
275 | DBG_RES("pcxl_alloc_range() res_idx %d mask 0x%08lx res_hint: %d\n", | ||
276 | res_idx, mask, pcxl_res_hint); | ||
277 | |||
278 | pcxl_used_pages += pages_needed; | ||
279 | pcxl_used_bytes += ((pages_needed >> 3) ? (pages_needed >> 3) : 1); | ||
280 | |||
281 | spin_unlock_irqrestore(&pcxl_res_lock, flags); | ||
282 | |||
283 | dump_resmap(); | ||
284 | |||
285 | /* | ||
286 | ** return the corresponding vaddr in the pcxl dma map | ||
287 | */ | ||
288 | return (pcxl_dma_start + (res_idx << (PAGE_SHIFT + 3))); | ||
289 | } | ||
290 | |||
291 | #define PCXL_FREE_MAPPINGS(idx, m, size) \ | ||
292 | u##size *res_ptr = (u##size *)&(pcxl_res_map[(idx) + (((size >> 3) - 1) & (~((size >> 3) - 1)))]); \ | ||
293 | /* BUG_ON((*res_ptr & m) != m); */ \ | ||
294 | *res_ptr &= ~m; | ||
295 | |||
296 | /* | ||
297 | ** clear bits in the pcxl resource map | ||
298 | */ | ||
299 | static void | ||
300 | pcxl_free_range(unsigned long vaddr, size_t size) | ||
301 | { | ||
302 | u_long mask, flags; | ||
303 | unsigned int res_idx = (vaddr - pcxl_dma_start) >> (PAGE_SHIFT + 3); | ||
304 | unsigned int pages_mapped = size >> PAGE_SHIFT; | ||
305 | |||
306 | mask = (u_long) -1L; | ||
307 | mask >>= BITS_PER_LONG - pages_mapped; | ||
308 | |||
309 | DBG_RES("pcxl_free_range() res_idx: %d size: %d pages_mapped %d mask 0x%08lx\n", | ||
310 | res_idx, size, pages_mapped, mask); | ||
311 | |||
312 | spin_lock_irqsave(&pcxl_res_lock, flags); | ||
313 | |||
314 | if(pages_mapped <= 8) { | ||
315 | PCXL_FREE_MAPPINGS(res_idx, mask, 8); | ||
316 | } else if(pages_mapped <= 16) { | ||
317 | PCXL_FREE_MAPPINGS(res_idx, mask, 16); | ||
318 | } else if(pages_mapped <= 32) { | ||
319 | PCXL_FREE_MAPPINGS(res_idx, mask, 32); | ||
320 | } else { | ||
321 | panic("%s: pcxl_free_range() Too many pages to unmap.\n", | ||
322 | __FILE__); | ||
323 | } | ||
324 | |||
325 | pcxl_used_pages -= (pages_mapped ? pages_mapped : 1); | ||
326 | pcxl_used_bytes -= ((pages_mapped >> 3) ? (pages_mapped >> 3) : 1); | ||
327 | |||
328 | spin_unlock_irqrestore(&pcxl_res_lock, flags); | ||
329 | |||
330 | dump_resmap(); | ||
331 | } | ||
332 | |||
333 | static int __init | ||
334 | pcxl_dma_init(void) | ||
335 | { | ||
336 | if (pcxl_dma_start == 0) | ||
337 | return 0; | ||
338 | |||
339 | spin_lock_init(&pcxl_res_lock); | ||
340 | pcxl_res_size = PCXL_DMA_MAP_SIZE >> (PAGE_SHIFT + 3); | ||
341 | pcxl_res_hint = 0; | ||
342 | pcxl_res_map = (char *)__get_free_pages(GFP_KERNEL, | ||
343 | get_order(pcxl_res_size)); | ||
344 | memset(pcxl_res_map, 0, pcxl_res_size); | ||
345 | proc_gsc_root = proc_mkdir("gsc", 0); | ||
346 | create_proc_info_entry("dino", 0, proc_gsc_root, pcxl_proc_info); | ||
347 | return 0; | ||
348 | } | ||
349 | |||
350 | __initcall(pcxl_dma_init); | ||
351 | |||
352 | static void * pa11_dma_alloc_consistent (struct device *dev, size_t size, dma_addr_t *dma_handle, int flag) | ||
353 | { | ||
354 | unsigned long vaddr; | ||
355 | unsigned long paddr; | ||
356 | int order; | ||
357 | |||
358 | order = get_order(size); | ||
359 | size = 1 << (order + PAGE_SHIFT); | ||
360 | vaddr = pcxl_alloc_range(size); | ||
361 | paddr = __get_free_pages(flag, order); | ||
362 | flush_kernel_dcache_range(paddr, size); | ||
363 | paddr = __pa(paddr); | ||
364 | map_uncached_pages(vaddr, size, paddr); | ||
365 | *dma_handle = (dma_addr_t) paddr; | ||
366 | |||
367 | #if 0 | ||
368 | /* This probably isn't needed to support EISA cards. | ||
369 | ** ISA cards will certainly only support 24-bit DMA addressing. | ||
370 | ** Not clear if we can, want, or need to support ISA. | ||
371 | */ | ||
372 | if (!dev || *dev->coherent_dma_mask < 0xffffffff) | ||
373 | gfp |= GFP_DMA; | ||
374 | #endif | ||
375 | return (void *)vaddr; | ||
376 | } | ||
377 | |||
378 | static void pa11_dma_free_consistent (struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle) | ||
379 | { | ||
380 | int order; | ||
381 | |||
382 | order = get_order(size); | ||
383 | size = 1 << (order + PAGE_SHIFT); | ||
384 | unmap_uncached_pages((unsigned long)vaddr, size); | ||
385 | pcxl_free_range((unsigned long)vaddr, size); | ||
386 | free_pages((unsigned long)__va(dma_handle), order); | ||
387 | } | ||
388 | |||
389 | static dma_addr_t pa11_dma_map_single(struct device *dev, void *addr, size_t size, enum dma_data_direction direction) | ||
390 | { | ||
391 | if (direction == DMA_NONE) { | ||
392 | printk(KERN_ERR "pa11_dma_map_single(PCI_DMA_NONE) called by %p\n", __builtin_return_address(0)); | ||
393 | BUG(); | ||
394 | } | ||
395 | |||
396 | flush_kernel_dcache_range((unsigned long) addr, size); | ||
397 | return virt_to_phys(addr); | ||
398 | } | ||
399 | |||
400 | static void pa11_dma_unmap_single(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction) | ||
401 | { | ||
402 | if (direction == DMA_NONE) { | ||
403 | printk(KERN_ERR "pa11_dma_unmap_single(PCI_DMA_NONE) called by %p\n", __builtin_return_address(0)); | ||
404 | BUG(); | ||
405 | } | ||
406 | |||
407 | if (direction == DMA_TO_DEVICE) | ||
408 | return; | ||
409 | |||
410 | /* | ||
411 | * For PCI_DMA_FROMDEVICE this flush is not necessary for the | ||
412 | * simple map/unmap case. However, it IS necessary if if | ||
413 | * pci_dma_sync_single_* has been called and the buffer reused. | ||
414 | */ | ||
415 | |||
416 | flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle), size); | ||
417 | return; | ||
418 | } | ||
419 | |||
420 | static int pa11_dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction) | ||
421 | { | ||
422 | int i; | ||
423 | |||
424 | if (direction == DMA_NONE) | ||
425 | BUG(); | ||
426 | |||
427 | for (i = 0; i < nents; i++, sglist++ ) { | ||
428 | unsigned long vaddr = sg_virt_addr(sglist); | ||
429 | sg_dma_address(sglist) = (dma_addr_t) virt_to_phys(vaddr); | ||
430 | sg_dma_len(sglist) = sglist->length; | ||
431 | flush_kernel_dcache_range(vaddr, sglist->length); | ||
432 | } | ||
433 | return nents; | ||
434 | } | ||
435 | |||
436 | static void pa11_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction) | ||
437 | { | ||
438 | int i; | ||
439 | |||
440 | if (direction == DMA_NONE) | ||
441 | BUG(); | ||
442 | |||
443 | if (direction == DMA_TO_DEVICE) | ||
444 | return; | ||
445 | |||
446 | /* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */ | ||
447 | |||
448 | for (i = 0; i < nents; i++, sglist++ ) | ||
449 | flush_kernel_dcache_range(sg_virt_addr(sglist), sglist->length); | ||
450 | return; | ||
451 | } | ||
452 | |||
453 | static void pa11_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, unsigned long offset, size_t size, enum dma_data_direction direction) | ||
454 | { | ||
455 | if (direction == DMA_NONE) | ||
456 | BUG(); | ||
457 | |||
458 | flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle) + offset, size); | ||
459 | } | ||
460 | |||
461 | static void pa11_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, unsigned long offset, size_t size, enum dma_data_direction direction) | ||
462 | { | ||
463 | if (direction == DMA_NONE) | ||
464 | BUG(); | ||
465 | |||
466 | flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle) + offset, size); | ||
467 | } | ||
468 | |||
469 | static void pa11_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction) | ||
470 | { | ||
471 | int i; | ||
472 | |||
473 | /* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */ | ||
474 | |||
475 | for (i = 0; i < nents; i++, sglist++ ) | ||
476 | flush_kernel_dcache_range(sg_virt_addr(sglist), sglist->length); | ||
477 | } | ||
478 | |||
479 | static void pa11_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction) | ||
480 | { | ||
481 | int i; | ||
482 | |||
483 | /* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */ | ||
484 | |||
485 | for (i = 0; i < nents; i++, sglist++ ) | ||
486 | flush_kernel_dcache_range(sg_virt_addr(sglist), sglist->length); | ||
487 | } | ||
488 | |||
489 | struct hppa_dma_ops pcxl_dma_ops = { | ||
490 | .dma_supported = pa11_dma_supported, | ||
491 | .alloc_consistent = pa11_dma_alloc_consistent, | ||
492 | .alloc_noncoherent = pa11_dma_alloc_consistent, | ||
493 | .free_consistent = pa11_dma_free_consistent, | ||
494 | .map_single = pa11_dma_map_single, | ||
495 | .unmap_single = pa11_dma_unmap_single, | ||
496 | .map_sg = pa11_dma_map_sg, | ||
497 | .unmap_sg = pa11_dma_unmap_sg, | ||
498 | .dma_sync_single_for_cpu = pa11_dma_sync_single_for_cpu, | ||
499 | .dma_sync_single_for_device = pa11_dma_sync_single_for_device, | ||
500 | .dma_sync_sg_for_cpu = pa11_dma_sync_sg_for_cpu, | ||
501 | .dma_sync_sg_for_device = pa11_dma_sync_sg_for_device, | ||
502 | }; | ||
503 | |||
504 | static void *fail_alloc_consistent(struct device *dev, size_t size, | ||
505 | dma_addr_t *dma_handle, int flag) | ||
506 | { | ||
507 | return NULL; | ||
508 | } | ||
509 | |||
510 | static void *pa11_dma_alloc_noncoherent(struct device *dev, size_t size, | ||
511 | dma_addr_t *dma_handle, int flag) | ||
512 | { | ||
513 | void *addr = NULL; | ||
514 | |||
515 | /* rely on kmalloc to be cacheline aligned */ | ||
516 | addr = kmalloc(size, flag); | ||
517 | if(addr) | ||
518 | *dma_handle = (dma_addr_t)virt_to_phys(addr); | ||
519 | |||
520 | return addr; | ||
521 | } | ||
522 | |||
523 | static void pa11_dma_free_noncoherent(struct device *dev, size_t size, | ||
524 | void *vaddr, dma_addr_t iova) | ||
525 | { | ||
526 | kfree(vaddr); | ||
527 | return; | ||
528 | } | ||
529 | |||
530 | struct hppa_dma_ops pcx_dma_ops = { | ||
531 | .dma_supported = pa11_dma_supported, | ||
532 | .alloc_consistent = fail_alloc_consistent, | ||
533 | .alloc_noncoherent = pa11_dma_alloc_noncoherent, | ||
534 | .free_consistent = pa11_dma_free_noncoherent, | ||
535 | .map_single = pa11_dma_map_single, | ||
536 | .unmap_single = pa11_dma_unmap_single, | ||
537 | .map_sg = pa11_dma_map_sg, | ||
538 | .unmap_sg = pa11_dma_unmap_sg, | ||
539 | .dma_sync_single_for_cpu = pa11_dma_sync_single_for_cpu, | ||
540 | .dma_sync_single_for_device = pa11_dma_sync_single_for_device, | ||
541 | .dma_sync_sg_for_cpu = pa11_dma_sync_sg_for_cpu, | ||
542 | .dma_sync_sg_for_device = pa11_dma_sync_sg_for_device, | ||
543 | }; | ||
544 | |||
545 | |||
546 | static int pcxl_proc_info(char *buf, char **start, off_t offset, int len) | ||
547 | { | ||
548 | u_long i = 0; | ||
549 | unsigned long *res_ptr = (u_long *)pcxl_res_map; | ||
550 | unsigned long total_pages = pcxl_res_size << 3; /* 8 bits per byte */ | ||
551 | |||
552 | sprintf(buf, "\nDMA Mapping Area size : %d bytes (%d pages)\n", | ||
553 | PCXL_DMA_MAP_SIZE, | ||
554 | (pcxl_res_size << 3) ); /* 1 bit per page */ | ||
555 | |||
556 | sprintf(buf, "%sResource bitmap : %d bytes (%d pages)\n", | ||
557 | buf, pcxl_res_size, pcxl_res_size << 3); /* 8 bits per byte */ | ||
558 | |||
559 | strcat(buf, " total: free: used: % used:\n"); | ||
560 | sprintf(buf, "%sblocks %8d %8ld %8ld %8ld%%\n", buf, pcxl_res_size, | ||
561 | pcxl_res_size - pcxl_used_bytes, pcxl_used_bytes, | ||
562 | (pcxl_used_bytes * 100) / pcxl_res_size); | ||
563 | |||
564 | sprintf(buf, "%spages %8ld %8ld %8ld %8ld%%\n", buf, total_pages, | ||
565 | total_pages - pcxl_used_pages, pcxl_used_pages, | ||
566 | (pcxl_used_pages * 100 / total_pages)); | ||
567 | |||
568 | strcat(buf, "\nResource bitmap:"); | ||
569 | |||
570 | for(; i < (pcxl_res_size / sizeof(u_long)); ++i, ++res_ptr) { | ||
571 | if ((i & 7) == 0) | ||
572 | strcat(buf,"\n "); | ||
573 | sprintf(buf, "%s %08lx", buf, *res_ptr); | ||
574 | } | ||
575 | strcat(buf, "\n"); | ||
576 | return strlen(buf); | ||
577 | } | ||
578 | |||