diff options
Diffstat (limited to 'arch/arm/mm/dma-mapping.c')
-rw-r--r-- | arch/arm/mm/dma-mapping.c | 56 |
1 files changed, 45 insertions, 11 deletions
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 4bc43e535d3b..82a093cee09a 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/init.h> | 17 | #include <linux/init.h> |
18 | #include <linux/device.h> | 18 | #include <linux/device.h> |
19 | #include <linux/dma-mapping.h> | 19 | #include <linux/dma-mapping.h> |
20 | #include <linux/highmem.h> | ||
20 | 21 | ||
21 | #include <asm/memory.h> | 22 | #include <asm/memory.h> |
22 | #include <asm/highmem.h> | 23 | #include <asm/highmem.h> |
@@ -148,6 +149,7 @@ static int __init consistent_init(void) | |||
148 | { | 149 | { |
149 | int ret = 0; | 150 | int ret = 0; |
150 | pgd_t *pgd; | 151 | pgd_t *pgd; |
152 | pud_t *pud; | ||
151 | pmd_t *pmd; | 153 | pmd_t *pmd; |
152 | pte_t *pte; | 154 | pte_t *pte; |
153 | int i = 0; | 155 | int i = 0; |
@@ -155,7 +157,15 @@ static int __init consistent_init(void) | |||
155 | 157 | ||
156 | do { | 158 | do { |
157 | pgd = pgd_offset(&init_mm, base); | 159 | pgd = pgd_offset(&init_mm, base); |
158 | pmd = pmd_alloc(&init_mm, pgd, base); | 160 | |
161 | pud = pud_alloc(&init_mm, pgd, base); | ||
162 | if (!pud) { | ||
163 | printk(KERN_ERR "%s: no pud tables\n", __func__); | ||
164 | ret = -ENOMEM; | ||
165 | break; | ||
166 | } | ||
167 | |||
168 | pmd = pmd_alloc(&init_mm, pud, base); | ||
159 | if (!pmd) { | 169 | if (!pmd) { |
160 | printk(KERN_ERR "%s: no pmd tables\n", __func__); | 170 | printk(KERN_ERR "%s: no pmd tables\n", __func__); |
161 | ret = -ENOMEM; | 171 | ret = -ENOMEM; |
@@ -198,7 +208,7 @@ __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot) | |||
198 | * fragmentation of the DMA space, and also prevents allocations | 208 | * fragmentation of the DMA space, and also prevents allocations |
199 | * smaller than a section from crossing a section boundary. | 209 | * smaller than a section from crossing a section boundary. |
200 | */ | 210 | */ |
201 | bit = fls(size - 1) + 1; | 211 | bit = fls(size - 1); |
202 | if (bit > SECTION_SHIFT) | 212 | if (bit > SECTION_SHIFT) |
203 | bit = SECTION_SHIFT; | 213 | bit = SECTION_SHIFT; |
204 | align = 1 << bit; | 214 | align = 1 << bit; |
@@ -311,7 +321,7 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, | |||
311 | addr = page_address(page); | 321 | addr = page_address(page); |
312 | 322 | ||
313 | if (addr) | 323 | if (addr) |
314 | *handle = page_to_dma(dev, page); | 324 | *handle = pfn_to_dma(dev, page_to_pfn(page)); |
315 | 325 | ||
316 | return addr; | 326 | return addr; |
317 | } | 327 | } |
@@ -406,7 +416,7 @@ void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr | |||
406 | if (!arch_is_coherent()) | 416 | if (!arch_is_coherent()) |
407 | __dma_free_remap(cpu_addr, size); | 417 | __dma_free_remap(cpu_addr, size); |
408 | 418 | ||
409 | __dma_free_buffer(dma_to_page(dev, handle), size); | 419 | __dma_free_buffer(pfn_to_page(dma_to_pfn(dev, handle)), size); |
410 | } | 420 | } |
411 | EXPORT_SYMBOL(dma_free_coherent); | 421 | EXPORT_SYMBOL(dma_free_coherent); |
412 | 422 | ||
@@ -480,10 +490,10 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset, | |||
480 | op(vaddr, len, dir); | 490 | op(vaddr, len, dir); |
481 | kunmap_high(page); | 491 | kunmap_high(page); |
482 | } else if (cache_is_vipt()) { | 492 | } else if (cache_is_vipt()) { |
483 | pte_t saved_pte; | 493 | /* unmapped pages might still be cached */ |
484 | vaddr = kmap_high_l1_vipt(page, &saved_pte); | 494 | vaddr = kmap_atomic(page); |
485 | op(vaddr + offset, len, dir); | 495 | op(vaddr + offset, len, dir); |
486 | kunmap_high_l1_vipt(page, saved_pte); | 496 | kunmap_atomic(vaddr); |
487 | } | 497 | } |
488 | } else { | 498 | } else { |
489 | vaddr = page_address(page) + offset; | 499 | vaddr = page_address(page) + offset; |
@@ -523,6 +533,12 @@ void ___dma_page_dev_to_cpu(struct page *page, unsigned long off, | |||
523 | outer_inv_range(paddr, paddr + size); | 533 | outer_inv_range(paddr, paddr + size); |
524 | 534 | ||
525 | dma_cache_maint_page(page, off, size, dir, dmac_unmap_area); | 535 | dma_cache_maint_page(page, off, size, dir, dmac_unmap_area); |
536 | |||
537 | /* | ||
538 | * Mark the D-cache clean for this page to avoid extra flushing. | ||
539 | */ | ||
540 | if (dir != DMA_TO_DEVICE && off == 0 && size >= PAGE_SIZE) | ||
541 | set_bit(PG_dcache_clean, &page->flags); | ||
526 | } | 542 | } |
527 | EXPORT_SYMBOL(___dma_page_dev_to_cpu); | 543 | EXPORT_SYMBOL(___dma_page_dev_to_cpu); |
528 | 544 | ||
@@ -548,17 +564,20 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | |||
548 | struct scatterlist *s; | 564 | struct scatterlist *s; |
549 | int i, j; | 565 | int i, j; |
550 | 566 | ||
567 | BUG_ON(!valid_dma_direction(dir)); | ||
568 | |||
551 | for_each_sg(sg, s, nents, i) { | 569 | for_each_sg(sg, s, nents, i) { |
552 | s->dma_address = dma_map_page(dev, sg_page(s), s->offset, | 570 | s->dma_address = __dma_map_page(dev, sg_page(s), s->offset, |
553 | s->length, dir); | 571 | s->length, dir); |
554 | if (dma_mapping_error(dev, s->dma_address)) | 572 | if (dma_mapping_error(dev, s->dma_address)) |
555 | goto bad_mapping; | 573 | goto bad_mapping; |
556 | } | 574 | } |
575 | debug_dma_map_sg(dev, sg, nents, nents, dir); | ||
557 | return nents; | 576 | return nents; |
558 | 577 | ||
559 | bad_mapping: | 578 | bad_mapping: |
560 | for_each_sg(sg, s, i, j) | 579 | for_each_sg(sg, s, i, j) |
561 | dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir); | 580 | __dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir); |
562 | return 0; | 581 | return 0; |
563 | } | 582 | } |
564 | EXPORT_SYMBOL(dma_map_sg); | 583 | EXPORT_SYMBOL(dma_map_sg); |
@@ -567,7 +586,7 @@ EXPORT_SYMBOL(dma_map_sg); | |||
567 | * dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg | 586 | * dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg |
568 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | 587 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices |
569 | * @sg: list of buffers | 588 | * @sg: list of buffers |
570 | * @nents: number of buffers to unmap (returned from dma_map_sg) | 589 | * @nents: number of buffers to unmap (same as was passed to dma_map_sg) |
571 | * @dir: DMA transfer direction (same as was passed to dma_map_sg) | 590 | * @dir: DMA transfer direction (same as was passed to dma_map_sg) |
572 | * | 591 | * |
573 | * Unmap a set of streaming mode DMA translations. Again, CPU access | 592 | * Unmap a set of streaming mode DMA translations. Again, CPU access |
@@ -579,8 +598,10 @@ void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, | |||
579 | struct scatterlist *s; | 598 | struct scatterlist *s; |
580 | int i; | 599 | int i; |
581 | 600 | ||
601 | debug_dma_unmap_sg(dev, sg, nents, dir); | ||
602 | |||
582 | for_each_sg(sg, s, nents, i) | 603 | for_each_sg(sg, s, nents, i) |
583 | dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir); | 604 | __dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir); |
584 | } | 605 | } |
585 | EXPORT_SYMBOL(dma_unmap_sg); | 606 | EXPORT_SYMBOL(dma_unmap_sg); |
586 | 607 | ||
@@ -605,6 +626,8 @@ void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, | |||
605 | __dma_page_dev_to_cpu(sg_page(s), s->offset, | 626 | __dma_page_dev_to_cpu(sg_page(s), s->offset, |
606 | s->length, dir); | 627 | s->length, dir); |
607 | } | 628 | } |
629 | |||
630 | debug_dma_sync_sg_for_cpu(dev, sg, nents, dir); | ||
608 | } | 631 | } |
609 | EXPORT_SYMBOL(dma_sync_sg_for_cpu); | 632 | EXPORT_SYMBOL(dma_sync_sg_for_cpu); |
610 | 633 | ||
@@ -629,5 +652,16 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | |||
629 | __dma_page_cpu_to_dev(sg_page(s), s->offset, | 652 | __dma_page_cpu_to_dev(sg_page(s), s->offset, |
630 | s->length, dir); | 653 | s->length, dir); |
631 | } | 654 | } |
655 | |||
656 | debug_dma_sync_sg_for_device(dev, sg, nents, dir); | ||
632 | } | 657 | } |
633 | EXPORT_SYMBOL(dma_sync_sg_for_device); | 658 | EXPORT_SYMBOL(dma_sync_sg_for_device); |
659 | |||
660 | #define PREALLOC_DMA_DEBUG_ENTRIES 4096 | ||
661 | |||
662 | static int __init dma_debug_do_init(void) | ||
663 | { | ||
664 | dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); | ||
665 | return 0; | ||
666 | } | ||
667 | fs_initcall(dma_debug_do_init); | ||