diff options
author | Russell King <rmk+kernel@arm.linux.org.uk> | 2009-11-19 11:31:39 -0500 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2009-11-24 12:41:35 -0500 |
commit | 695ae0af5a52df09dffcc2ce2d625d56ef36ce14 (patch) | |
tree | c2114e79fac53f9eb29e0ed74b5aa4fa5a39b607 /arch/arm/mm/dma-mapping.c | |
parent | 04da56943b416dd9fe7058abf8d5b9153164b3e9 (diff) |
ARM: dma-mapping: factor dma_free_coherent() common code
We effectively have three implementations of dma_free_coherent() mixed up
in the code; the incoherent MMU, coherent MMU and noMMU versions.
The coherent MMU and noMMU versions are actually functionally identical.
The incoherent MMU version is almost the same, but with the additional
step of unmapping the secondary mapping.
Separate out this additional step into __dma_free_remap() and simplify
the resulting dma_free_coherent() code.
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Acked-by: Greg Ungerer <gerg@uclinux.org>
Diffstat (limited to 'arch/arm/mm/dma-mapping.c')
-rw-r--r-- | arch/arm/mm/dma-mapping.c | 141 |
1 files changed, 68 insertions, 73 deletions
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 62b4240b34b3..6b24e5ed85c2 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
@@ -207,7 +207,70 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, | |||
207 | *handle = ~0; | 207 | *handle = ~0; |
208 | return NULL; | 208 | return NULL; |
209 | } | 209 | } |
210 | |||
211 | static void __dma_free_remap(void *cpu_addr, size_t size) | ||
212 | { | ||
213 | struct arm_vmregion *c; | ||
214 | unsigned long addr; | ||
215 | pte_t *ptep; | ||
216 | int idx; | ||
217 | u32 off; | ||
218 | |||
219 | c = arm_vmregion_find_remove(&consistent_head, (unsigned long)cpu_addr); | ||
220 | if (!c) { | ||
221 | printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n", | ||
222 | __func__, cpu_addr); | ||
223 | dump_stack(); | ||
224 | return; | ||
225 | } | ||
226 | |||
227 | if ((c->vm_end - c->vm_start) != size) { | ||
228 | printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n", | ||
229 | __func__, c->vm_end - c->vm_start, size); | ||
230 | dump_stack(); | ||
231 | size = c->vm_end - c->vm_start; | ||
232 | } | ||
233 | |||
234 | idx = CONSISTENT_PTE_INDEX(c->vm_start); | ||
235 | off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1); | ||
236 | ptep = consistent_pte[idx] + off; | ||
237 | addr = c->vm_start; | ||
238 | do { | ||
239 | pte_t pte = ptep_get_and_clear(&init_mm, addr, ptep); | ||
240 | unsigned long pfn; | ||
241 | |||
242 | ptep++; | ||
243 | addr += PAGE_SIZE; | ||
244 | off++; | ||
245 | if (off >= PTRS_PER_PTE) { | ||
246 | off = 0; | ||
247 | ptep = consistent_pte[++idx]; | ||
248 | } | ||
249 | |||
250 | if (!pte_none(pte) && pte_present(pte)) { | ||
251 | pfn = pte_pfn(pte); | ||
252 | |||
253 | if (pfn_valid(pfn)) { | ||
254 | struct page *page = pfn_to_page(pfn); | ||
255 | |||
256 | /* | ||
257 | * x86 does not mark the pages reserved... | ||
258 | */ | ||
259 | ClearPageReserved(page); | ||
260 | continue; | ||
261 | } | ||
262 | } | ||
263 | printk(KERN_CRIT "%s: bad page in kernel page table\n", | ||
264 | __func__); | ||
265 | } while (size -= PAGE_SIZE); | ||
266 | |||
267 | flush_tlb_kernel_range(c->vm_start, c->vm_end); | ||
268 | |||
269 | arm_vmregion_free(&consistent_head, c); | ||
270 | } | ||
271 | |||
210 | #else /* !CONFIG_MMU */ | 272 | #else /* !CONFIG_MMU */ |
273 | |||
211 | static void * | 274 | static void * |
212 | __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, | 275 | __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, |
213 | pgprot_t prot) | 276 | pgprot_t prot) |
@@ -224,6 +287,9 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, | |||
224 | *handle = page_to_dma(dev, page); | 287 | *handle = page_to_dma(dev, page); |
225 | return page_address(page); | 288 | return page_address(page); |
226 | } | 289 | } |
290 | |||
291 | #define __dma_free_remap(addr, size) do { } while (0) | ||
292 | |||
227 | #endif /* CONFIG_MMU */ | 293 | #endif /* CONFIG_MMU */ |
228 | 294 | ||
229 | /* | 295 | /* |
@@ -317,15 +383,8 @@ EXPORT_SYMBOL(dma_mmap_writecombine); | |||
317 | * free a page as defined by the above mapping. | 383 | * free a page as defined by the above mapping. |
318 | * Must not be called with IRQs disabled. | 384 | * Must not be called with IRQs disabled. |
319 | */ | 385 | */ |
320 | #ifdef CONFIG_MMU | ||
321 | void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle) | 386 | void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle) |
322 | { | 387 | { |
323 | struct arm_vmregion *c; | ||
324 | unsigned long addr; | ||
325 | pte_t *ptep; | ||
326 | int idx; | ||
327 | u32 off; | ||
328 | |||
329 | WARN_ON(irqs_disabled()); | 388 | WARN_ON(irqs_disabled()); |
330 | 389 | ||
331 | if (dma_release_from_coherent(dev, get_order(size), cpu_addr)) | 390 | if (dma_release_from_coherent(dev, get_order(size), cpu_addr)) |
@@ -333,75 +392,11 @@ void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr | |||
333 | 392 | ||
334 | size = PAGE_ALIGN(size); | 393 | size = PAGE_ALIGN(size); |
335 | 394 | ||
336 | if (arch_is_coherent()) { | 395 | if (!arch_is_coherent()) |
337 | __dma_free_buffer(dma_to_page(dev, handle), size); | 396 | __dma_free_remap(cpu_addr, size); |
338 | return; | ||
339 | } | ||
340 | |||
341 | c = arm_vmregion_find_remove(&consistent_head, (unsigned long)cpu_addr); | ||
342 | if (!c) | ||
343 | goto no_area; | ||
344 | |||
345 | if ((c->vm_end - c->vm_start) != size) { | ||
346 | printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n", | ||
347 | __func__, c->vm_end - c->vm_start, size); | ||
348 | dump_stack(); | ||
349 | size = c->vm_end - c->vm_start; | ||
350 | } | ||
351 | |||
352 | idx = CONSISTENT_PTE_INDEX(c->vm_start); | ||
353 | off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1); | ||
354 | ptep = consistent_pte[idx] + off; | ||
355 | addr = c->vm_start; | ||
356 | do { | ||
357 | pte_t pte = ptep_get_and_clear(&init_mm, addr, ptep); | ||
358 | unsigned long pfn; | ||
359 | |||
360 | ptep++; | ||
361 | addr += PAGE_SIZE; | ||
362 | off++; | ||
363 | if (off >= PTRS_PER_PTE) { | ||
364 | off = 0; | ||
365 | ptep = consistent_pte[++idx]; | ||
366 | } | ||
367 | |||
368 | if (!pte_none(pte) && pte_present(pte)) { | ||
369 | pfn = pte_pfn(pte); | ||
370 | |||
371 | if (pfn_valid(pfn)) { | ||
372 | struct page *page = pfn_to_page(pfn); | ||
373 | |||
374 | /* | ||
375 | * x86 does not mark the pages reserved... | ||
376 | */ | ||
377 | ClearPageReserved(page); | ||
378 | continue; | ||
379 | } | ||
380 | } | ||
381 | printk(KERN_CRIT "%s: bad page in kernel page table\n", | ||
382 | __func__); | ||
383 | } while (size -= PAGE_SIZE); | ||
384 | |||
385 | flush_tlb_kernel_range(c->vm_start, c->vm_end); | ||
386 | |||
387 | arm_vmregion_free(&consistent_head, c); | ||
388 | 397 | ||
389 | __dma_free_buffer(dma_to_page(dev, handle), size); | 398 | __dma_free_buffer(dma_to_page(dev, handle), size); |
390 | return; | ||
391 | |||
392 | no_area: | ||
393 | printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n", | ||
394 | __func__, cpu_addr); | ||
395 | dump_stack(); | ||
396 | } | 399 | } |
397 | #else /* !CONFIG_MMU */ | ||
398 | void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle) | ||
399 | { | ||
400 | if (dma_release_from_coherent(dev, get_order(size), cpu_addr)) | ||
401 | return; | ||
402 | __dma_free_buffer(dma_to_page(dev, handle), PAGE_ALIGN(size)); | ||
403 | } | ||
404 | #endif /* CONFIG_MMU */ | ||
405 | EXPORT_SYMBOL(dma_free_coherent); | 400 | EXPORT_SYMBOL(dma_free_coherent); |
406 | 401 | ||
407 | /* | 402 | /* |