diff options
| -rw-r--r-- | arch/arc/mm/dma.c | 32 |
1 files changed, 14 insertions, 18 deletions
diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c index c0b49399225d..c75d5c3470e3 100644 --- a/arch/arc/mm/dma.c +++ b/arch/arc/mm/dma.c | |||
| @@ -24,30 +24,29 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, | |||
| 24 | struct page *page; | 24 | struct page *page; |
| 25 | phys_addr_t paddr; | 25 | phys_addr_t paddr; |
| 26 | void *kvaddr; | 26 | void *kvaddr; |
| 27 | int need_coh = 1, need_kvaddr = 0; | 27 | bool need_coh = !(attrs & DMA_ATTR_NON_CONSISTENT); |
| 28 | |||
| 29 | /* | ||
| 30 | * __GFP_HIGHMEM flag is cleared by upper layer functions | ||
| 31 | * (in include/linux/dma-mapping.h) so we should never get a | ||
| 32 | * __GFP_HIGHMEM here. | ||
| 33 | */ | ||
| 34 | BUG_ON(gfp & __GFP_HIGHMEM); | ||
| 28 | 35 | ||
| 29 | page = alloc_pages(gfp, order); | 36 | page = alloc_pages(gfp, order); |
| 30 | if (!page) | 37 | if (!page) |
| 31 | return NULL; | 38 | return NULL; |
| 32 | 39 | ||
| 33 | if (attrs & DMA_ATTR_NON_CONSISTENT) | ||
| 34 | need_coh = 0; | ||
| 35 | |||
| 36 | /* | ||
| 37 | * - A coherent buffer needs MMU mapping to enforce non-cachability | ||
| 38 | * - A highmem page needs a virtual handle (hence MMU mapping) | ||
| 39 | * independent of cachability | ||
| 40 | */ | ||
| 41 | if (PageHighMem(page) || need_coh) | ||
| 42 | need_kvaddr = 1; | ||
| 43 | |||
| 44 | /* This is linear addr (0x8000_0000 based) */ | 40 | /* This is linear addr (0x8000_0000 based) */ |
| 45 | paddr = page_to_phys(page); | 41 | paddr = page_to_phys(page); |
| 46 | 42 | ||
| 47 | *dma_handle = paddr; | 43 | *dma_handle = paddr; |
| 48 | 44 | ||
| 49 | /* This is kernel Virtual address (0x7000_0000 based) */ | 45 | /* |
| 50 | if (need_kvaddr) { | 46 | * A coherent buffer needs MMU mapping to enforce non-cachability. |
| 47 | * kvaddr is kernel Virtual address (0x7000_0000 based). | ||
| 48 | */ | ||
| 49 | if (need_coh) { | ||
| 51 | kvaddr = ioremap_nocache(paddr, size); | 50 | kvaddr = ioremap_nocache(paddr, size); |
| 52 | if (kvaddr == NULL) { | 51 | if (kvaddr == NULL) { |
| 53 | __free_pages(page, order); | 52 | __free_pages(page, order); |
| @@ -78,11 +77,8 @@ void arch_dma_free(struct device *dev, size_t size, void *vaddr, | |||
| 78 | { | 77 | { |
| 79 | phys_addr_t paddr = dma_handle; | 78 | phys_addr_t paddr = dma_handle; |
| 80 | struct page *page = virt_to_page(paddr); | 79 | struct page *page = virt_to_page(paddr); |
| 81 | int is_non_coh = 1; | ||
| 82 | |||
| 83 | is_non_coh = (attrs & DMA_ATTR_NON_CONSISTENT); | ||
| 84 | 80 | ||
| 85 | if (PageHighMem(page) || !is_non_coh) | 81 | if (!(attrs & DMA_ATTR_NON_CONSISTENT)) |
| 86 | iounmap((void __force __iomem *)vaddr); | 82 | iounmap((void __force __iomem *)vaddr); |
| 87 | 83 | ||
| 88 | __free_pages(page, get_order(size)); | 84 | __free_pages(page, get_order(size)); |
