aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arm/mm/dma-mapping.c141
1 files changed, 68 insertions, 73 deletions
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 62b4240b34b3..6b24e5ed85c2 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -207,7 +207,70 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
207 *handle = ~0; 207 *handle = ~0;
208 return NULL; 208 return NULL;
209} 209}
210
211static void __dma_free_remap(void *cpu_addr, size_t size)
212{
213 struct arm_vmregion *c;
214 unsigned long addr;
215 pte_t *ptep;
216 int idx;
217 u32 off;
218
219 c = arm_vmregion_find_remove(&consistent_head, (unsigned long)cpu_addr);
220 if (!c) {
221 printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n",
222 __func__, cpu_addr);
223 dump_stack();
224 return;
225 }
226
227 if ((c->vm_end - c->vm_start) != size) {
228 printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n",
229 __func__, c->vm_end - c->vm_start, size);
230 dump_stack();
231 size = c->vm_end - c->vm_start;
232 }
233
234 idx = CONSISTENT_PTE_INDEX(c->vm_start);
235 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1);
236 ptep = consistent_pte[idx] + off;
237 addr = c->vm_start;
238 do {
239 pte_t pte = ptep_get_and_clear(&init_mm, addr, ptep);
240 unsigned long pfn;
241
242 ptep++;
243 addr += PAGE_SIZE;
244 off++;
245 if (off >= PTRS_PER_PTE) {
246 off = 0;
247 ptep = consistent_pte[++idx];
248 }
249
250 if (!pte_none(pte) && pte_present(pte)) {
251 pfn = pte_pfn(pte);
252
253 if (pfn_valid(pfn)) {
254 struct page *page = pfn_to_page(pfn);
255
256 /*
257 * x86 does not mark the pages reserved...
258 */
259 ClearPageReserved(page);
260 continue;
261 }
262 }
263 printk(KERN_CRIT "%s: bad page in kernel page table\n",
264 __func__);
265 } while (size -= PAGE_SIZE);
266
267 flush_tlb_kernel_range(c->vm_start, c->vm_end);
268
269 arm_vmregion_free(&consistent_head, c);
270}
271
210#else /* !CONFIG_MMU */ 272#else /* !CONFIG_MMU */
273
211static void * 274static void *
212__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, 275__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
213 pgprot_t prot) 276 pgprot_t prot)
@@ -224,6 +287,9 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
224 *handle = page_to_dma(dev, page); 287 *handle = page_to_dma(dev, page);
225 return page_address(page); 288 return page_address(page);
226} 289}
290
291#define __dma_free_remap(addr, size) do { } while (0)
292
227#endif /* CONFIG_MMU */ 293#endif /* CONFIG_MMU */
228 294
229/* 295/*
@@ -317,15 +383,8 @@ EXPORT_SYMBOL(dma_mmap_writecombine);
317 * free a page as defined by the above mapping. 383 * free a page as defined by the above mapping.
318 * Must not be called with IRQs disabled. 384 * Must not be called with IRQs disabled.
319 */ 385 */
320#ifdef CONFIG_MMU
321void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle) 386void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle)
322{ 387{
323 struct arm_vmregion *c;
324 unsigned long addr;
325 pte_t *ptep;
326 int idx;
327 u32 off;
328
329 WARN_ON(irqs_disabled()); 388 WARN_ON(irqs_disabled());
330 389
331 if (dma_release_from_coherent(dev, get_order(size), cpu_addr)) 390 if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
@@ -333,75 +392,11 @@ void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr
333 392
334 size = PAGE_ALIGN(size); 393 size = PAGE_ALIGN(size);
335 394
336 if (arch_is_coherent()) { 395 if (!arch_is_coherent())
337 __dma_free_buffer(dma_to_page(dev, handle), size); 396 __dma_free_remap(cpu_addr, size);
338 return;
339 }
340
341 c = arm_vmregion_find_remove(&consistent_head, (unsigned long)cpu_addr);
342 if (!c)
343 goto no_area;
344
345 if ((c->vm_end - c->vm_start) != size) {
346 printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n",
347 __func__, c->vm_end - c->vm_start, size);
348 dump_stack();
349 size = c->vm_end - c->vm_start;
350 }
351
352 idx = CONSISTENT_PTE_INDEX(c->vm_start);
353 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1);
354 ptep = consistent_pte[idx] + off;
355 addr = c->vm_start;
356 do {
357 pte_t pte = ptep_get_and_clear(&init_mm, addr, ptep);
358 unsigned long pfn;
359
360 ptep++;
361 addr += PAGE_SIZE;
362 off++;
363 if (off >= PTRS_PER_PTE) {
364 off = 0;
365 ptep = consistent_pte[++idx];
366 }
367
368 if (!pte_none(pte) && pte_present(pte)) {
369 pfn = pte_pfn(pte);
370
371 if (pfn_valid(pfn)) {
372 struct page *page = pfn_to_page(pfn);
373
374 /*
375 * x86 does not mark the pages reserved...
376 */
377 ClearPageReserved(page);
378 continue;
379 }
380 }
381 printk(KERN_CRIT "%s: bad page in kernel page table\n",
382 __func__);
383 } while (size -= PAGE_SIZE);
384
385 flush_tlb_kernel_range(c->vm_start, c->vm_end);
386
387 arm_vmregion_free(&consistent_head, c);
388 397
389 __dma_free_buffer(dma_to_page(dev, handle), size); 398 __dma_free_buffer(dma_to_page(dev, handle), size);
390 return;
391
392 no_area:
393 printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n",
394 __func__, cpu_addr);
395 dump_stack();
396} 399}
397#else /* !CONFIG_MMU */
398void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle)
399{
400 if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
401 return;
402 __dma_free_buffer(dma_to_page(dev, handle), PAGE_ALIGN(size));
403}
404#endif /* CONFIG_MMU */
405EXPORT_SYMBOL(dma_free_coherent); 400EXPORT_SYMBOL(dma_free_coherent);
406 401
407/* 402/*