diff options
Diffstat (limited to 'arch/ppc/kernel/dma-mapping.c')
-rw-r--r-- | arch/ppc/kernel/dma-mapping.c | 447 |
1 files changed, 447 insertions, 0 deletions
diff --git a/arch/ppc/kernel/dma-mapping.c b/arch/ppc/kernel/dma-mapping.c new file mode 100644 index 000000000000..e0c631cf96b0 --- /dev/null +++ b/arch/ppc/kernel/dma-mapping.c | |||
@@ -0,0 +1,447 @@ | |||
1 | /* | ||
2 | * PowerPC version derived from arch/arm/mm/consistent.c | ||
3 | * Copyright (C) 2001 Dan Malek (dmalek@jlc.net) | ||
4 | * | ||
5 | * Copyright (C) 2000 Russell King | ||
6 | * | ||
7 | * Consistent memory allocators. Used for DMA devices that want to | ||
8 | * share uncached memory with the processor core. The function return | ||
9 | * is the virtual address and 'dma_handle' is the physical address. | ||
10 | * Mostly stolen from the ARM port, with some changes for PowerPC. | ||
11 | * -- Dan | ||
12 | * | ||
13 | * Reorganized to get rid of the arch-specific consistent_* functions | ||
14 | * and provide non-coherent implementations for the DMA API. -Matt | ||
15 | * | ||
16 | * Added in_interrupt() safe dma_alloc_coherent()/dma_free_coherent() | ||
17 | * implementation. This is pulled straight from ARM and barely | ||
18 | * modified. -Matt | ||
19 | * | ||
20 | * This program is free software; you can redistribute it and/or modify | ||
21 | * it under the terms of the GNU General Public License version 2 as | ||
22 | * published by the Free Software Foundation. | ||
23 | */ | ||
24 | |||
25 | #include <linux/config.h> | ||
26 | #include <linux/module.h> | ||
27 | #include <linux/signal.h> | ||
28 | #include <linux/sched.h> | ||
29 | #include <linux/kernel.h> | ||
30 | #include <linux/errno.h> | ||
31 | #include <linux/string.h> | ||
32 | #include <linux/types.h> | ||
33 | #include <linux/ptrace.h> | ||
34 | #include <linux/mman.h> | ||
35 | #include <linux/mm.h> | ||
36 | #include <linux/swap.h> | ||
37 | #include <linux/stddef.h> | ||
38 | #include <linux/vmalloc.h> | ||
39 | #include <linux/init.h> | ||
40 | #include <linux/delay.h> | ||
41 | #include <linux/bootmem.h> | ||
42 | #include <linux/highmem.h> | ||
43 | #include <linux/dma-mapping.h> | ||
44 | #include <linux/hardirq.h> | ||
45 | |||
46 | #include <asm/pgalloc.h> | ||
47 | #include <asm/prom.h> | ||
48 | #include <asm/io.h> | ||
49 | #include <asm/mmu_context.h> | ||
50 | #include <asm/pgtable.h> | ||
51 | #include <asm/mmu.h> | ||
52 | #include <asm/uaccess.h> | ||
53 | #include <asm/smp.h> | ||
54 | #include <asm/machdep.h> | ||
55 | |||
56 | int map_page(unsigned long va, phys_addr_t pa, int flags); | ||
57 | |||
58 | #include <asm/tlbflush.h> | ||
59 | |||
60 | /* | ||
61 | * This address range defaults to a value that is safe for all | ||
62 | * platforms which currently set CONFIG_NOT_COHERENT_CACHE. It | ||
63 | * can be further configured for specific applications under | ||
64 | * the "Advanced Setup" menu. -Matt | ||
65 | */ | ||
66 | #define CONSISTENT_BASE (CONFIG_CONSISTENT_START) | ||
67 | #define CONSISTENT_END (CONFIG_CONSISTENT_START + CONFIG_CONSISTENT_SIZE) | ||
68 | #define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT) | ||
69 | |||
70 | /* | ||
71 | * This is the page table (2MB) covering uncached, DMA consistent allocations | ||
72 | */ | ||
73 | static pte_t *consistent_pte; | ||
74 | static DEFINE_SPINLOCK(consistent_lock); | ||
75 | |||
76 | /* | ||
77 | * VM region handling support. | ||
78 | * | ||
79 | * This should become something generic, handling VM region allocations for | ||
80 | * vmalloc and similar (ioremap, module space, etc). | ||
81 | * | ||
82 | * I envisage vmalloc()'s supporting vm_struct becoming: | ||
83 | * | ||
84 | * struct vm_struct { | ||
85 | * struct vm_region region; | ||
86 | * unsigned long flags; | ||
87 | * struct page **pages; | ||
88 | * unsigned int nr_pages; | ||
89 | * unsigned long phys_addr; | ||
90 | * }; | ||
91 | * | ||
92 | * get_vm_area() would then call vm_region_alloc with an appropriate | ||
93 | * struct vm_region head (eg): | ||
94 | * | ||
95 | * struct vm_region vmalloc_head = { | ||
96 | * .vm_list = LIST_HEAD_INIT(vmalloc_head.vm_list), | ||
97 | * .vm_start = VMALLOC_START, | ||
98 | * .vm_end = VMALLOC_END, | ||
99 | * }; | ||
100 | * | ||
101 | * However, vmalloc_head.vm_start is variable (typically, it is dependent on | ||
102 | * the amount of RAM found at boot time.) I would imagine that get_vm_area() | ||
103 | * would have to initialise this each time prior to calling vm_region_alloc(). | ||
104 | */ | ||
105 | struct vm_region { | ||
106 | struct list_head vm_list; | ||
107 | unsigned long vm_start; | ||
108 | unsigned long vm_end; | ||
109 | }; | ||
110 | |||
111 | static struct vm_region consistent_head = { | ||
112 | .vm_list = LIST_HEAD_INIT(consistent_head.vm_list), | ||
113 | .vm_start = CONSISTENT_BASE, | ||
114 | .vm_end = CONSISTENT_END, | ||
115 | }; | ||
116 | |||
117 | static struct vm_region * | ||
118 | vm_region_alloc(struct vm_region *head, size_t size, int gfp) | ||
119 | { | ||
120 | unsigned long addr = head->vm_start, end = head->vm_end - size; | ||
121 | unsigned long flags; | ||
122 | struct vm_region *c, *new; | ||
123 | |||
124 | new = kmalloc(sizeof(struct vm_region), gfp); | ||
125 | if (!new) | ||
126 | goto out; | ||
127 | |||
128 | spin_lock_irqsave(&consistent_lock, flags); | ||
129 | |||
130 | list_for_each_entry(c, &head->vm_list, vm_list) { | ||
131 | if ((addr + size) < addr) | ||
132 | goto nospc; | ||
133 | if ((addr + size) <= c->vm_start) | ||
134 | goto found; | ||
135 | addr = c->vm_end; | ||
136 | if (addr > end) | ||
137 | goto nospc; | ||
138 | } | ||
139 | |||
140 | found: | ||
141 | /* | ||
142 | * Insert this entry _before_ the one we found. | ||
143 | */ | ||
144 | list_add_tail(&new->vm_list, &c->vm_list); | ||
145 | new->vm_start = addr; | ||
146 | new->vm_end = addr + size; | ||
147 | |||
148 | spin_unlock_irqrestore(&consistent_lock, flags); | ||
149 | return new; | ||
150 | |||
151 | nospc: | ||
152 | spin_unlock_irqrestore(&consistent_lock, flags); | ||
153 | kfree(new); | ||
154 | out: | ||
155 | return NULL; | ||
156 | } | ||
157 | |||
158 | static struct vm_region *vm_region_find(struct vm_region *head, unsigned long addr) | ||
159 | { | ||
160 | struct vm_region *c; | ||
161 | |||
162 | list_for_each_entry(c, &head->vm_list, vm_list) { | ||
163 | if (c->vm_start == addr) | ||
164 | goto out; | ||
165 | } | ||
166 | c = NULL; | ||
167 | out: | ||
168 | return c; | ||
169 | } | ||
170 | |||
171 | /* | ||
172 | * Allocate DMA-coherent memory space and return both the kernel remapped | ||
173 | * virtual and bus address for that space. | ||
174 | */ | ||
175 | void * | ||
176 | __dma_alloc_coherent(size_t size, dma_addr_t *handle, int gfp) | ||
177 | { | ||
178 | struct page *page; | ||
179 | struct vm_region *c; | ||
180 | unsigned long order; | ||
181 | u64 mask = 0x00ffffff, limit; /* ISA default */ | ||
182 | |||
183 | if (!consistent_pte) { | ||
184 | printk(KERN_ERR "%s: not initialised\n", __func__); | ||
185 | dump_stack(); | ||
186 | return NULL; | ||
187 | } | ||
188 | |||
189 | size = PAGE_ALIGN(size); | ||
190 | limit = (mask + 1) & ~mask; | ||
191 | if ((limit && size >= limit) || size >= (CONSISTENT_END - CONSISTENT_BASE)) { | ||
192 | printk(KERN_WARNING "coherent allocation too big (requested %#x mask %#Lx)\n", | ||
193 | size, mask); | ||
194 | return NULL; | ||
195 | } | ||
196 | |||
197 | order = get_order(size); | ||
198 | |||
199 | if (mask != 0xffffffff) | ||
200 | gfp |= GFP_DMA; | ||
201 | |||
202 | page = alloc_pages(gfp, order); | ||
203 | if (!page) | ||
204 | goto no_page; | ||
205 | |||
206 | /* | ||
207 | * Invalidate any data that might be lurking in the | ||
208 | * kernel direct-mapped region for device DMA. | ||
209 | */ | ||
210 | { | ||
211 | unsigned long kaddr = (unsigned long)page_address(page); | ||
212 | memset(page_address(page), 0, size); | ||
213 | flush_dcache_range(kaddr, kaddr + size); | ||
214 | } | ||
215 | |||
216 | /* | ||
217 | * Allocate a virtual address in the consistent mapping region. | ||
218 | */ | ||
219 | c = vm_region_alloc(&consistent_head, size, | ||
220 | gfp & ~(__GFP_DMA | __GFP_HIGHMEM)); | ||
221 | if (c) { | ||
222 | unsigned long vaddr = c->vm_start; | ||
223 | pte_t *pte = consistent_pte + CONSISTENT_OFFSET(vaddr); | ||
224 | struct page *end = page + (1 << order); | ||
225 | |||
226 | /* | ||
227 | * Set the "dma handle" | ||
228 | */ | ||
229 | *handle = page_to_bus(page); | ||
230 | |||
231 | do { | ||
232 | BUG_ON(!pte_none(*pte)); | ||
233 | |||
234 | set_page_count(page, 1); | ||
235 | SetPageReserved(page); | ||
236 | set_pte_at(&init_mm, vaddr, | ||
237 | pte, mk_pte(page, pgprot_noncached(PAGE_KERNEL))); | ||
238 | page++; | ||
239 | pte++; | ||
240 | vaddr += PAGE_SIZE; | ||
241 | } while (size -= PAGE_SIZE); | ||
242 | |||
243 | /* | ||
244 | * Free the otherwise unused pages. | ||
245 | */ | ||
246 | while (page < end) { | ||
247 | set_page_count(page, 1); | ||
248 | __free_page(page); | ||
249 | page++; | ||
250 | } | ||
251 | |||
252 | return (void *)c->vm_start; | ||
253 | } | ||
254 | |||
255 | if (page) | ||
256 | __free_pages(page, order); | ||
257 | no_page: | ||
258 | return NULL; | ||
259 | } | ||
260 | EXPORT_SYMBOL(__dma_alloc_coherent); | ||
261 | |||
262 | /* | ||
263 | * free a page as defined by the above mapping. | ||
264 | */ | ||
265 | void __dma_free_coherent(size_t size, void *vaddr) | ||
266 | { | ||
267 | struct vm_region *c; | ||
268 | unsigned long flags, addr; | ||
269 | pte_t *ptep; | ||
270 | |||
271 | size = PAGE_ALIGN(size); | ||
272 | |||
273 | spin_lock_irqsave(&consistent_lock, flags); | ||
274 | |||
275 | c = vm_region_find(&consistent_head, (unsigned long)vaddr); | ||
276 | if (!c) | ||
277 | goto no_area; | ||
278 | |||
279 | if ((c->vm_end - c->vm_start) != size) { | ||
280 | printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n", | ||
281 | __func__, c->vm_end - c->vm_start, size); | ||
282 | dump_stack(); | ||
283 | size = c->vm_end - c->vm_start; | ||
284 | } | ||
285 | |||
286 | ptep = consistent_pte + CONSISTENT_OFFSET(c->vm_start); | ||
287 | addr = c->vm_start; | ||
288 | do { | ||
289 | pte_t pte = ptep_get_and_clear(&init_mm, addr, ptep); | ||
290 | unsigned long pfn; | ||
291 | |||
292 | ptep++; | ||
293 | addr += PAGE_SIZE; | ||
294 | |||
295 | if (!pte_none(pte) && pte_present(pte)) { | ||
296 | pfn = pte_pfn(pte); | ||
297 | |||
298 | if (pfn_valid(pfn)) { | ||
299 | struct page *page = pfn_to_page(pfn); | ||
300 | ClearPageReserved(page); | ||
301 | |||
302 | __free_page(page); | ||
303 | continue; | ||
304 | } | ||
305 | } | ||
306 | |||
307 | printk(KERN_CRIT "%s: bad page in kernel page table\n", | ||
308 | __func__); | ||
309 | } while (size -= PAGE_SIZE); | ||
310 | |||
311 | flush_tlb_kernel_range(c->vm_start, c->vm_end); | ||
312 | |||
313 | list_del(&c->vm_list); | ||
314 | |||
315 | spin_unlock_irqrestore(&consistent_lock, flags); | ||
316 | |||
317 | kfree(c); | ||
318 | return; | ||
319 | |||
320 | no_area: | ||
321 | spin_unlock_irqrestore(&consistent_lock, flags); | ||
322 | printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n", | ||
323 | __func__, vaddr); | ||
324 | dump_stack(); | ||
325 | } | ||
326 | EXPORT_SYMBOL(__dma_free_coherent); | ||
327 | |||
328 | /* | ||
329 | * Initialise the consistent memory allocation. | ||
330 | */ | ||
331 | static int __init dma_alloc_init(void) | ||
332 | { | ||
333 | pgd_t *pgd; | ||
334 | pmd_t *pmd; | ||
335 | pte_t *pte; | ||
336 | int ret = 0; | ||
337 | |||
338 | spin_lock(&init_mm.page_table_lock); | ||
339 | |||
340 | do { | ||
341 | pgd = pgd_offset(&init_mm, CONSISTENT_BASE); | ||
342 | pmd = pmd_alloc(&init_mm, pgd, CONSISTENT_BASE); | ||
343 | if (!pmd) { | ||
344 | printk(KERN_ERR "%s: no pmd tables\n", __func__); | ||
345 | ret = -ENOMEM; | ||
346 | break; | ||
347 | } | ||
348 | WARN_ON(!pmd_none(*pmd)); | ||
349 | |||
350 | pte = pte_alloc_kernel(&init_mm, pmd, CONSISTENT_BASE); | ||
351 | if (!pte) { | ||
352 | printk(KERN_ERR "%s: no pte tables\n", __func__); | ||
353 | ret = -ENOMEM; | ||
354 | break; | ||
355 | } | ||
356 | |||
357 | consistent_pte = pte; | ||
358 | } while (0); | ||
359 | |||
360 | spin_unlock(&init_mm.page_table_lock); | ||
361 | |||
362 | return ret; | ||
363 | } | ||
364 | |||
365 | core_initcall(dma_alloc_init); | ||
366 | |||
367 | /* | ||
368 | * make an area consistent. | ||
369 | */ | ||
370 | void __dma_sync(void *vaddr, size_t size, int direction) | ||
371 | { | ||
372 | unsigned long start = (unsigned long)vaddr; | ||
373 | unsigned long end = start + size; | ||
374 | |||
375 | switch (direction) { | ||
376 | case DMA_NONE: | ||
377 | BUG(); | ||
378 | case DMA_FROM_DEVICE: /* invalidate only */ | ||
379 | invalidate_dcache_range(start, end); | ||
380 | break; | ||
381 | case DMA_TO_DEVICE: /* writeback only */ | ||
382 | clean_dcache_range(start, end); | ||
383 | break; | ||
384 | case DMA_BIDIRECTIONAL: /* writeback and invalidate */ | ||
385 | flush_dcache_range(start, end); | ||
386 | break; | ||
387 | } | ||
388 | } | ||
389 | EXPORT_SYMBOL(__dma_sync); | ||
390 | |||
391 | #ifdef CONFIG_HIGHMEM | ||
392 | /* | ||
393 | * __dma_sync_page() implementation for systems using highmem. | ||
394 | * In this case, each page of a buffer must be kmapped/kunmapped | ||
395 | * in order to have a virtual address for __dma_sync(). This must | ||
396 | * not sleep so kmap_atmomic()/kunmap_atomic() are used. | ||
397 | * | ||
398 | * Note: yes, it is possible and correct to have a buffer extend | ||
399 | * beyond the first page. | ||
400 | */ | ||
401 | static inline void __dma_sync_page_highmem(struct page *page, | ||
402 | unsigned long offset, size_t size, int direction) | ||
403 | { | ||
404 | size_t seg_size = min((size_t)PAGE_SIZE, size) - offset; | ||
405 | size_t cur_size = seg_size; | ||
406 | unsigned long flags, start, seg_offset = offset; | ||
407 | int nr_segs = PAGE_ALIGN(size + (PAGE_SIZE - offset))/PAGE_SIZE; | ||
408 | int seg_nr = 0; | ||
409 | |||
410 | local_irq_save(flags); | ||
411 | |||
412 | do { | ||
413 | start = (unsigned long)kmap_atomic(page + seg_nr, | ||
414 | KM_PPC_SYNC_PAGE) + seg_offset; | ||
415 | |||
416 | /* Sync this buffer segment */ | ||
417 | __dma_sync((void *)start, seg_size, direction); | ||
418 | kunmap_atomic((void *)start, KM_PPC_SYNC_PAGE); | ||
419 | seg_nr++; | ||
420 | |||
421 | /* Calculate next buffer segment size */ | ||
422 | seg_size = min((size_t)PAGE_SIZE, size - cur_size); | ||
423 | |||
424 | /* Add the segment size to our running total */ | ||
425 | cur_size += seg_size; | ||
426 | seg_offset = 0; | ||
427 | } while (seg_nr < nr_segs); | ||
428 | |||
429 | local_irq_restore(flags); | ||
430 | } | ||
431 | #endif /* CONFIG_HIGHMEM */ | ||
432 | |||
433 | /* | ||
434 | * __dma_sync_page makes memory consistent. identical to __dma_sync, but | ||
435 | * takes a struct page instead of a virtual address | ||
436 | */ | ||
437 | void __dma_sync_page(struct page *page, unsigned long offset, | ||
438 | size_t size, int direction) | ||
439 | { | ||
440 | #ifdef CONFIG_HIGHMEM | ||
441 | __dma_sync_page_highmem(page, offset, size, direction); | ||
442 | #else | ||
443 | unsigned long start = (unsigned long)page_address(page) + offset; | ||
444 | __dma_sync((void *)start, size, direction); | ||
445 | #endif | ||
446 | } | ||
447 | EXPORT_SYMBOL(__dma_sync_page); | ||