diff options
| -rw-r--r-- | arch/arm/mm/dma-mapping.c | 165 |
1 files changed, 128 insertions, 37 deletions
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 696f6ee259ee..deac58d5f1f7 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
| @@ -42,9 +42,33 @@ | |||
| 42 | #include "dma.h" | 42 | #include "dma.h" |
| 43 | #include "mm.h" | 43 | #include "mm.h" |
| 44 | 44 | ||
| 45 | struct arm_dma_alloc_args { | ||
| 46 | struct device *dev; | ||
| 47 | size_t size; | ||
| 48 | gfp_t gfp; | ||
| 49 | pgprot_t prot; | ||
| 50 | const void *caller; | ||
| 51 | bool want_vaddr; | ||
| 52 | }; | ||
| 53 | |||
| 54 | struct arm_dma_free_args { | ||
| 55 | struct device *dev; | ||
| 56 | size_t size; | ||
| 57 | void *cpu_addr; | ||
| 58 | struct page *page; | ||
| 59 | bool want_vaddr; | ||
| 60 | }; | ||
| 61 | |||
| 62 | struct arm_dma_allocator { | ||
| 63 | void *(*alloc)(struct arm_dma_alloc_args *args, | ||
| 64 | struct page **ret_page); | ||
| 65 | void (*free)(struct arm_dma_free_args *args); | ||
| 66 | }; | ||
| 67 | |||
| 45 | struct arm_dma_buffer { | 68 | struct arm_dma_buffer { |
| 46 | struct list_head list; | 69 | struct list_head list; |
| 47 | void *virt; | 70 | void *virt; |
| 71 | struct arm_dma_allocator *allocator; | ||
| 48 | }; | 72 | }; |
| 49 | 73 | ||
| 50 | static LIST_HEAD(arm_dma_bufs); | 74 | static LIST_HEAD(arm_dma_bufs); |
| @@ -617,7 +641,7 @@ static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot) | |||
| 617 | #define __alloc_remap_buffer(dev, size, gfp, prot, ret, c, wv) NULL | 641 | #define __alloc_remap_buffer(dev, size, gfp, prot, ret, c, wv) NULL |
| 618 | #define __alloc_from_pool(size, ret_page) NULL | 642 | #define __alloc_from_pool(size, ret_page) NULL |
| 619 | #define __alloc_from_contiguous(dev, size, prot, ret, c, wv) NULL | 643 | #define __alloc_from_contiguous(dev, size, prot, ret, c, wv) NULL |
| 620 | #define __free_from_pool(cpu_addr, size) 0 | 644 | #define __free_from_pool(cpu_addr, size) do { } while (0) |
| 621 | #define __free_from_contiguous(dev, page, cpu_addr, size, wv) do { } while (0) | 645 | #define __free_from_contiguous(dev, page, cpu_addr, size, wv) do { } while (0) |
| 622 | #define __dma_free_remap(cpu_addr, size) do { } while (0) | 646 | #define __dma_free_remap(cpu_addr, size) do { } while (0) |
| 623 | 647 | ||
| @@ -635,7 +659,78 @@ static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp, | |||
| 635 | return page_address(page); | 659 | return page_address(page); |
| 636 | } | 660 | } |
| 637 | 661 | ||
| 662 | static void *simple_allocator_alloc(struct arm_dma_alloc_args *args, | ||
| 663 | struct page **ret_page) | ||
| 664 | { | ||
| 665 | return __alloc_simple_buffer(args->dev, args->size, args->gfp, | ||
| 666 | ret_page); | ||
| 667 | } | ||
| 668 | |||
| 669 | static void simple_allocator_free(struct arm_dma_free_args *args) | ||
| 670 | { | ||
| 671 | __dma_free_buffer(args->page, args->size); | ||
| 672 | } | ||
| 673 | |||
| 674 | static struct arm_dma_allocator simple_allocator = { | ||
| 675 | .alloc = simple_allocator_alloc, | ||
| 676 | .free = simple_allocator_free, | ||
| 677 | }; | ||
| 678 | |||
| 679 | static void *cma_allocator_alloc(struct arm_dma_alloc_args *args, | ||
| 680 | struct page **ret_page) | ||
| 681 | { | ||
| 682 | return __alloc_from_contiguous(args->dev, args->size, args->prot, | ||
| 683 | ret_page, args->caller, | ||
| 684 | args->want_vaddr); | ||
| 685 | } | ||
| 686 | |||
| 687 | static void cma_allocator_free(struct arm_dma_free_args *args) | ||
| 688 | { | ||
| 689 | __free_from_contiguous(args->dev, args->page, args->cpu_addr, | ||
| 690 | args->size, args->want_vaddr); | ||
| 691 | } | ||
| 692 | |||
| 693 | static struct arm_dma_allocator cma_allocator = { | ||
| 694 | .alloc = cma_allocator_alloc, | ||
| 695 | .free = cma_allocator_free, | ||
| 696 | }; | ||
| 638 | 697 | ||
| 698 | static void *pool_allocator_alloc(struct arm_dma_alloc_args *args, | ||
| 699 | struct page **ret_page) | ||
| 700 | { | ||
| 701 | return __alloc_from_pool(args->size, ret_page); | ||
| 702 | } | ||
| 703 | |||
| 704 | static void pool_allocator_free(struct arm_dma_free_args *args) | ||
| 705 | { | ||
| 706 | __free_from_pool(args->cpu_addr, args->size); | ||
| 707 | } | ||
| 708 | |||
| 709 | static struct arm_dma_allocator pool_allocator = { | ||
| 710 | .alloc = pool_allocator_alloc, | ||
| 711 | .free = pool_allocator_free, | ||
| 712 | }; | ||
| 713 | |||
| 714 | static void *remap_allocator_alloc(struct arm_dma_alloc_args *args, | ||
| 715 | struct page **ret_page) | ||
| 716 | { | ||
| 717 | return __alloc_remap_buffer(args->dev, args->size, args->gfp, | ||
| 718 | args->prot, ret_page, args->caller, | ||
| 719 | args->want_vaddr); | ||
| 720 | } | ||
| 721 | |||
| 722 | static void remap_allocator_free(struct arm_dma_free_args *args) | ||
| 723 | { | ||
| 724 | if (args->want_vaddr) | ||
| 725 | __dma_free_remap(args->cpu_addr, args->size); | ||
| 726 | |||
| 727 | __dma_free_buffer(args->page, args->size); | ||
| 728 | } | ||
| 729 | |||
| 730 | static struct arm_dma_allocator remap_allocator = { | ||
| 731 | .alloc = remap_allocator_alloc, | ||
| 732 | .free = remap_allocator_free, | ||
| 733 | }; | ||
| 639 | 734 | ||
| 640 | static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, | 735 | static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, |
| 641 | gfp_t gfp, pgprot_t prot, bool is_coherent, | 736 | gfp_t gfp, pgprot_t prot, bool is_coherent, |
| @@ -644,8 +739,16 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, | |||
| 644 | u64 mask = get_coherent_dma_mask(dev); | 739 | u64 mask = get_coherent_dma_mask(dev); |
| 645 | struct page *page = NULL; | 740 | struct page *page = NULL; |
| 646 | void *addr; | 741 | void *addr; |
| 647 | bool want_vaddr; | 742 | bool allowblock, cma; |
| 648 | struct arm_dma_buffer *buf; | 743 | struct arm_dma_buffer *buf; |
| 744 | struct arm_dma_alloc_args args = { | ||
| 745 | .dev = dev, | ||
| 746 | .size = PAGE_ALIGN(size), | ||
| 747 | .gfp = gfp, | ||
| 748 | .prot = prot, | ||
| 749 | .caller = caller, | ||
| 750 | .want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs), | ||
| 751 | }; | ||
| 649 | 752 | ||
| 650 | #ifdef CONFIG_DMA_API_DEBUG | 753 | #ifdef CONFIG_DMA_API_DEBUG |
| 651 | u64 limit = (mask + 1) & ~mask; | 754 | u64 limit = (mask + 1) & ~mask; |
| @@ -674,29 +777,28 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, | |||
| 674 | * platform; see CONFIG_HUGETLBFS. | 777 | * platform; see CONFIG_HUGETLBFS. |
| 675 | */ | 778 | */ |
| 676 | gfp &= ~(__GFP_COMP); | 779 | gfp &= ~(__GFP_COMP); |
| 780 | args.gfp = gfp; | ||
| 677 | 781 | ||
| 678 | *handle = DMA_ERROR_CODE; | 782 | *handle = DMA_ERROR_CODE; |
| 679 | size = PAGE_ALIGN(size); | 783 | allowblock = gfpflags_allow_blocking(gfp); |
| 680 | want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs); | 784 | cma = allowblock ? dev_get_cma_area(dev) : false; |
| 681 | 785 | ||
| 682 | if (nommu()) | 786 | if (cma) |
| 683 | addr = __alloc_simple_buffer(dev, size, gfp, &page); | 787 | buf->allocator = &cma_allocator; |
| 684 | else if (dev_get_cma_area(dev) && (gfp & __GFP_DIRECT_RECLAIM)) | 788 | else if (nommu() || is_coherent) |
| 685 | addr = __alloc_from_contiguous(dev, size, prot, &page, | 789 | buf->allocator = &simple_allocator; |
| 686 | caller, want_vaddr); | 790 | else if (allowblock) |
| 687 | else if (is_coherent) | 791 | buf->allocator = &remap_allocator; |
| 688 | addr = __alloc_simple_buffer(dev, size, gfp, &page); | ||
| 689 | else if (!gfpflags_allow_blocking(gfp)) | ||
| 690 | addr = __alloc_from_pool(size, &page); | ||
| 691 | else | 792 | else |
| 692 | addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, | 793 | buf->allocator = &pool_allocator; |
| 693 | caller, want_vaddr); | 794 | |
| 795 | addr = buf->allocator->alloc(&args, &page); | ||
| 694 | 796 | ||
| 695 | if (page) { | 797 | if (page) { |
| 696 | unsigned long flags; | 798 | unsigned long flags; |
| 697 | 799 | ||
| 698 | *handle = pfn_to_dma(dev, page_to_pfn(page)); | 800 | *handle = pfn_to_dma(dev, page_to_pfn(page)); |
| 699 | buf->virt = want_vaddr ? addr : page; | 801 | buf->virt = args.want_vaddr ? addr : page; |
| 700 | 802 | ||
| 701 | spin_lock_irqsave(&arm_dma_bufs_lock, flags); | 803 | spin_lock_irqsave(&arm_dma_bufs_lock, flags); |
| 702 | list_add(&buf->list, &arm_dma_bufs); | 804 | list_add(&buf->list, &arm_dma_bufs); |
| @@ -705,7 +807,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, | |||
| 705 | kfree(buf); | 807 | kfree(buf); |
| 706 | } | 808 | } |
| 707 | 809 | ||
| 708 | return want_vaddr ? addr : page; | 810 | return args.want_vaddr ? addr : page; |
| 709 | } | 811 | } |
| 710 | 812 | ||
| 711 | /* | 813 | /* |
| @@ -781,31 +883,20 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr, | |||
| 781 | bool is_coherent) | 883 | bool is_coherent) |
| 782 | { | 884 | { |
| 783 | struct page *page = pfn_to_page(dma_to_pfn(dev, handle)); | 885 | struct page *page = pfn_to_page(dma_to_pfn(dev, handle)); |
| 784 | bool want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs); | ||
| 785 | struct arm_dma_buffer *buf; | 886 | struct arm_dma_buffer *buf; |
| 887 | struct arm_dma_free_args args = { | ||
| 888 | .dev = dev, | ||
| 889 | .size = PAGE_ALIGN(size), | ||
| 890 | .cpu_addr = cpu_addr, | ||
| 891 | .page = page, | ||
| 892 | .want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs), | ||
| 893 | }; | ||
| 786 | 894 | ||
| 787 | buf = arm_dma_buffer_find(cpu_addr); | 895 | buf = arm_dma_buffer_find(cpu_addr); |
| 788 | if (WARN(!buf, "Freeing invalid buffer %p\n", cpu_addr)) | 896 | if (WARN(!buf, "Freeing invalid buffer %p\n", cpu_addr)) |
| 789 | return; | 897 | return; |
| 790 | 898 | ||
| 791 | size = PAGE_ALIGN(size); | 899 | buf->allocator->free(&args); |
| 792 | |||
| 793 | if (nommu()) { | ||
| 794 | __dma_free_buffer(page, size); | ||
| 795 | } else if (!is_coherent && __free_from_pool(cpu_addr, size)) { | ||
| 796 | return; | ||
| 797 | } else if (!dev_get_cma_area(dev)) { | ||
| 798 | if (want_vaddr && !is_coherent) | ||
| 799 | __dma_free_remap(cpu_addr, size); | ||
| 800 | __dma_free_buffer(page, size); | ||
| 801 | } else { | ||
| 802 | /* | ||
| 803 | * Non-atomic allocations cannot be freed with IRQs disabled | ||
| 804 | */ | ||
| 805 | WARN_ON(irqs_disabled()); | ||
| 806 | __free_from_contiguous(dev, page, cpu_addr, size, want_vaddr); | ||
| 807 | } | ||
| 808 | |||
| 809 | kfree(buf); | 900 | kfree(buf); |
| 810 | } | 901 | } |
| 811 | 902 | ||
