aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arm/Kconfig8
-rw-r--r--arch/arm/include/asm/device.h3
-rw-r--r--arch/arm/include/asm/dma-iommu.h34
-rw-r--r--arch/arm/mm/dma-mapping.c712
-rw-r--r--arch/arm/mm/vmregion.h2
5 files changed, 747 insertions, 12 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index c8111c58a982..97478a5d316f 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -46,6 +46,14 @@ config ARM
46config ARM_HAS_SG_CHAIN 46config ARM_HAS_SG_CHAIN
47 bool 47 bool
48 48
49config NEED_SG_DMA_LENGTH
50 bool
51
52config ARM_DMA_USE_IOMMU
53 select NEED_SG_DMA_LENGTH
54 select ARM_HAS_SG_CHAIN
55 bool
56
49config HAVE_PWM 57config HAVE_PWM
50 bool 58 bool
51 59
diff --git a/arch/arm/include/asm/device.h b/arch/arm/include/asm/device.h
index 6e2cb0ee770d..b69c0d3285f8 100644
--- a/arch/arm/include/asm/device.h
+++ b/arch/arm/include/asm/device.h
@@ -14,6 +14,9 @@ struct dev_archdata {
14#ifdef CONFIG_IOMMU_API 14#ifdef CONFIG_IOMMU_API
15 void *iommu; /* private IOMMU data */ 15 void *iommu; /* private IOMMU data */
16#endif 16#endif
17#ifdef CONFIG_ARM_DMA_USE_IOMMU
18 struct dma_iommu_mapping *mapping;
19#endif
17}; 20};
18 21
19struct omap_device; 22struct omap_device;
diff --git a/arch/arm/include/asm/dma-iommu.h b/arch/arm/include/asm/dma-iommu.h
new file mode 100644
index 000000000000..799b09409fad
--- /dev/null
+++ b/arch/arm/include/asm/dma-iommu.h
@@ -0,0 +1,34 @@
1#ifndef ASMARM_DMA_IOMMU_H
2#define ASMARM_DMA_IOMMU_H
3
4#ifdef __KERNEL__
5
6#include <linux/mm_types.h>
7#include <linux/scatterlist.h>
8#include <linux/dma-debug.h>
9#include <linux/kmemcheck.h>
10
11struct dma_iommu_mapping {
12 /* iommu specific data */
13 struct iommu_domain *domain;
14
15 void *bitmap;
16 size_t bits;
17 unsigned int order;
18 dma_addr_t base;
19
20 spinlock_t lock;
21 struct kref kref;
22};
23
24struct dma_iommu_mapping *
25arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size,
26 int order);
27
28void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping);
29
30int arm_iommu_attach_device(struct device *dev,
31 struct dma_iommu_mapping *mapping);
32
33#endif /* __KERNEL__ */
34#endif
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 2501866a904c..3ac47604a6c0 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -19,6 +19,8 @@
19#include <linux/dma-mapping.h> 19#include <linux/dma-mapping.h>
20#include <linux/highmem.h> 20#include <linux/highmem.h>
21#include <linux/slab.h> 21#include <linux/slab.h>
22#include <linux/iommu.h>
23#include <linux/vmalloc.h>
22 24
23#include <asm/memory.h> 25#include <asm/memory.h>
24#include <asm/highmem.h> 26#include <asm/highmem.h>
@@ -26,6 +28,7 @@
26#include <asm/tlbflush.h> 28#include <asm/tlbflush.h>
27#include <asm/sizes.h> 29#include <asm/sizes.h>
28#include <asm/mach/arch.h> 30#include <asm/mach/arch.h>
31#include <asm/dma-iommu.h>
29 32
30#include "mm.h" 33#include "mm.h"
31 34
@@ -155,6 +158,21 @@ static u64 get_coherent_dma_mask(struct device *dev)
155 return mask; 158 return mask;
156} 159}
157 160
161static void __dma_clear_buffer(struct page *page, size_t size)
162{
163 void *ptr;
164 /*
165 * Ensure that the allocated pages are zeroed, and that any data
166 * lurking in the kernel direct-mapped region is invalidated.
167 */
168 ptr = page_address(page);
169 if (ptr) {
170 memset(ptr, 0, size);
171 dmac_flush_range(ptr, ptr + size);
172 outer_flush_range(__pa(ptr), __pa(ptr) + size);
173 }
174}
175
158/* 176/*
159 * Allocate a DMA buffer for 'dev' of size 'size' using the 177 * Allocate a DMA buffer for 'dev' of size 'size' using the
160 * specified gfp mask. Note that 'size' must be page aligned. 178 * specified gfp mask. Note that 'size' must be page aligned.
@@ -163,7 +181,6 @@ static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gf
163{ 181{
164 unsigned long order = get_order(size); 182 unsigned long order = get_order(size);
165 struct page *page, *p, *e; 183 struct page *page, *p, *e;
166 void *ptr;
167 u64 mask = get_coherent_dma_mask(dev); 184 u64 mask = get_coherent_dma_mask(dev);
168 185
169#ifdef CONFIG_DMA_API_DEBUG 186#ifdef CONFIG_DMA_API_DEBUG
@@ -192,14 +209,7 @@ static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gf
192 for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++) 209 for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++)
193 __free_page(p); 210 __free_page(p);
194 211
195 /* 212 __dma_clear_buffer(page, size);
196 * Ensure that the allocated pages are zeroed, and that any data
197 * lurking in the kernel direct-mapped region is invalidated.
198 */
199 ptr = page_address(page);
200 memset(ptr, 0, size);
201 dmac_flush_range(ptr, ptr + size);
202 outer_flush_range(__pa(ptr), __pa(ptr) + size);
203 213
204 return page; 214 return page;
205} 215}
@@ -348,7 +358,7 @@ __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
348 u32 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1); 358 u32 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1);
349 359
350 pte = consistent_pte[idx] + off; 360 pte = consistent_pte[idx] + off;
351 c->vm_pages = page; 361 c->priv = page;
352 362
353 do { 363 do {
354 BUG_ON(!pte_none(*pte)); 364 BUG_ON(!pte_none(*pte));
@@ -509,13 +519,14 @@ int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
509 c = arm_vmregion_find(&consistent_head, (unsigned long)cpu_addr); 519 c = arm_vmregion_find(&consistent_head, (unsigned long)cpu_addr);
510 if (c) { 520 if (c) {
511 unsigned long off = vma->vm_pgoff; 521 unsigned long off = vma->vm_pgoff;
522 struct page *pages = c->priv;
512 523
513 kern_size = (c->vm_end - c->vm_start) >> PAGE_SHIFT; 524 kern_size = (c->vm_end - c->vm_start) >> PAGE_SHIFT;
514 525
515 if (off < kern_size && 526 if (off < kern_size &&
516 user_size <= (kern_size - off)) { 527 user_size <= (kern_size - off)) {
517 ret = remap_pfn_range(vma, vma->vm_start, 528 ret = remap_pfn_range(vma, vma->vm_start,
518 page_to_pfn(c->vm_pages) + off, 529 page_to_pfn(pages) + off,
519 user_size << PAGE_SHIFT, 530 user_size << PAGE_SHIFT,
520 vma->vm_page_prot); 531 vma->vm_page_prot);
521 } 532 }
@@ -654,6 +665,9 @@ int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
654 int i, j; 665 int i, j;
655 666
656 for_each_sg(sg, s, nents, i) { 667 for_each_sg(sg, s, nents, i) {
668#ifdef CONFIG_NEED_SG_DMA_LENGTH
669 s->dma_length = s->length;
670#endif
657 s->dma_address = ops->map_page(dev, sg_page(s), s->offset, 671 s->dma_address = ops->map_page(dev, sg_page(s), s->offset,
658 s->length, dir, attrs); 672 s->length, dir, attrs);
659 if (dma_mapping_error(dev, s->dma_address)) 673 if (dma_mapping_error(dev, s->dma_address))
@@ -762,3 +776,679 @@ static int __init dma_debug_do_init(void)
762 return 0; 776 return 0;
763} 777}
764fs_initcall(dma_debug_do_init); 778fs_initcall(dma_debug_do_init);
779
780#ifdef CONFIG_ARM_DMA_USE_IOMMU
781
782/* IOMMU */
783
784static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
785 size_t size)
786{
787 unsigned int order = get_order(size);
788 unsigned int align = 0;
789 unsigned int count, start;
790 unsigned long flags;
791
792 count = ((PAGE_ALIGN(size) >> PAGE_SHIFT) +
793 (1 << mapping->order) - 1) >> mapping->order;
794
795 if (order > mapping->order)
796 align = (1 << (order - mapping->order)) - 1;
797
798 spin_lock_irqsave(&mapping->lock, flags);
799 start = bitmap_find_next_zero_area(mapping->bitmap, mapping->bits, 0,
800 count, align);
801 if (start > mapping->bits) {
802 spin_unlock_irqrestore(&mapping->lock, flags);
803 return DMA_ERROR_CODE;
804 }
805
806 bitmap_set(mapping->bitmap, start, count);
807 spin_unlock_irqrestore(&mapping->lock, flags);
808
809 return mapping->base + (start << (mapping->order + PAGE_SHIFT));
810}
811
812static inline void __free_iova(struct dma_iommu_mapping *mapping,
813 dma_addr_t addr, size_t size)
814{
815 unsigned int start = (addr - mapping->base) >>
816 (mapping->order + PAGE_SHIFT);
817 unsigned int count = ((size >> PAGE_SHIFT) +
818 (1 << mapping->order) - 1) >> mapping->order;
819 unsigned long flags;
820
821 spin_lock_irqsave(&mapping->lock, flags);
822 bitmap_clear(mapping->bitmap, start, count);
823 spin_unlock_irqrestore(&mapping->lock, flags);
824}
825
826static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, gfp_t gfp)
827{
828 struct page **pages;
829 int count = size >> PAGE_SHIFT;
830 int array_size = count * sizeof(struct page *);
831 int i = 0;
832
833 if (array_size <= PAGE_SIZE)
834 pages = kzalloc(array_size, gfp);
835 else
836 pages = vzalloc(array_size);
837 if (!pages)
838 return NULL;
839
840 while (count) {
841 int j, order = __ffs(count);
842
843 pages[i] = alloc_pages(gfp | __GFP_NOWARN, order);
844 while (!pages[i] && order)
845 pages[i] = alloc_pages(gfp | __GFP_NOWARN, --order);
846 if (!pages[i])
847 goto error;
848
849 if (order)
850 split_page(pages[i], order);
851 j = 1 << order;
852 while (--j)
853 pages[i + j] = pages[i] + j;
854
855 __dma_clear_buffer(pages[i], PAGE_SIZE << order);
856 i += 1 << order;
857 count -= 1 << order;
858 }
859
860 return pages;
861error:
862 while (--i)
863 if (pages[i])
864 __free_pages(pages[i], 0);
865 if (array_size < PAGE_SIZE)
866 kfree(pages);
867 else
868 vfree(pages);
869 return NULL;
870}
871
872static int __iommu_free_buffer(struct device *dev, struct page **pages, size_t size)
873{
874 int count = size >> PAGE_SHIFT;
875 int array_size = count * sizeof(struct page *);
876 int i;
877 for (i = 0; i < count; i++)
878 if (pages[i])
879 __free_pages(pages[i], 0);
880 if (array_size < PAGE_SIZE)
881 kfree(pages);
882 else
883 vfree(pages);
884 return 0;
885}
886
887/*
888 * Create a CPU mapping for a specified pages
889 */
890static void *
891__iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot)
892{
893 struct arm_vmregion *c;
894 size_t align;
895 size_t count = size >> PAGE_SHIFT;
896 int bit;
897
898 if (!consistent_pte[0]) {
899 pr_err("%s: not initialised\n", __func__);
900 dump_stack();
901 return NULL;
902 }
903
904 /*
905 * Align the virtual region allocation - maximum alignment is
906 * a section size, minimum is a page size. This helps reduce
907 * fragmentation of the DMA space, and also prevents allocations
908 * smaller than a section from crossing a section boundary.
909 */
910 bit = fls(size - 1);
911 if (bit > SECTION_SHIFT)
912 bit = SECTION_SHIFT;
913 align = 1 << bit;
914
915 /*
916 * Allocate a virtual address in the consistent mapping region.
917 */
918 c = arm_vmregion_alloc(&consistent_head, align, size,
919 gfp & ~(__GFP_DMA | __GFP_HIGHMEM), NULL);
920 if (c) {
921 pte_t *pte;
922 int idx = CONSISTENT_PTE_INDEX(c->vm_start);
923 int i = 0;
924 u32 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1);
925
926 pte = consistent_pte[idx] + off;
927 c->priv = pages;
928
929 do {
930 BUG_ON(!pte_none(*pte));
931
932 set_pte_ext(pte, mk_pte(pages[i], prot), 0);
933 pte++;
934 off++;
935 i++;
936 if (off >= PTRS_PER_PTE) {
937 off = 0;
938 pte = consistent_pte[++idx];
939 }
940 } while (i < count);
941
942 dsb();
943
944 return (void *)c->vm_start;
945 }
946 return NULL;
947}
948
949/*
950 * Create a mapping in device IO address space for specified pages
951 */
952static dma_addr_t
953__iommu_create_mapping(struct device *dev, struct page **pages, size_t size)
954{
955 struct dma_iommu_mapping *mapping = dev->archdata.mapping;
956 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
957 dma_addr_t dma_addr, iova;
958 int i, ret = DMA_ERROR_CODE;
959
960 dma_addr = __alloc_iova(mapping, size);
961 if (dma_addr == DMA_ERROR_CODE)
962 return dma_addr;
963
964 iova = dma_addr;
965 for (i = 0; i < count; ) {
966 unsigned int next_pfn = page_to_pfn(pages[i]) + 1;
967 phys_addr_t phys = page_to_phys(pages[i]);
968 unsigned int len, j;
969
970 for (j = i + 1; j < count; j++, next_pfn++)
971 if (page_to_pfn(pages[j]) != next_pfn)
972 break;
973
974 len = (j - i) << PAGE_SHIFT;
975 ret = iommu_map(mapping->domain, iova, phys, len, 0);
976 if (ret < 0)
977 goto fail;
978 iova += len;
979 i = j;
980 }
981 return dma_addr;
982fail:
983 iommu_unmap(mapping->domain, dma_addr, iova-dma_addr);
984 __free_iova(mapping, dma_addr, size);
985 return DMA_ERROR_CODE;
986}
987
988static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size)
989{
990 struct dma_iommu_mapping *mapping = dev->archdata.mapping;
991
992 /*
993 * add optional in-page offset from iova to size and align
994 * result to page size
995 */
996 size = PAGE_ALIGN((iova & ~PAGE_MASK) + size);
997 iova &= PAGE_MASK;
998
999 iommu_unmap(mapping->domain, iova, size);
1000 __free_iova(mapping, iova, size);
1001 return 0;
1002}
1003
1004static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
1005 dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
1006{
1007 pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel);
1008 struct page **pages;
1009 void *addr = NULL;
1010
1011 *handle = DMA_ERROR_CODE;
1012 size = PAGE_ALIGN(size);
1013
1014 pages = __iommu_alloc_buffer(dev, size, gfp);
1015 if (!pages)
1016 return NULL;
1017
1018 *handle = __iommu_create_mapping(dev, pages, size);
1019 if (*handle == DMA_ERROR_CODE)
1020 goto err_buffer;
1021
1022 addr = __iommu_alloc_remap(pages, size, gfp, prot);
1023 if (!addr)
1024 goto err_mapping;
1025
1026 return addr;
1027
1028err_mapping:
1029 __iommu_remove_mapping(dev, *handle, size);
1030err_buffer:
1031 __iommu_free_buffer(dev, pages, size);
1032 return NULL;
1033}
1034
1035static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
1036 void *cpu_addr, dma_addr_t dma_addr, size_t size,
1037 struct dma_attrs *attrs)
1038{
1039 struct arm_vmregion *c;
1040
1041 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
1042 c = arm_vmregion_find(&consistent_head, (unsigned long)cpu_addr);
1043
1044 if (c) {
1045 struct page **pages = c->priv;
1046
1047 unsigned long uaddr = vma->vm_start;
1048 unsigned long usize = vma->vm_end - vma->vm_start;
1049 int i = 0;
1050
1051 do {
1052 int ret;
1053
1054 ret = vm_insert_page(vma, uaddr, pages[i++]);
1055 if (ret) {
1056 pr_err("Remapping memory, error: %d\n", ret);
1057 return ret;
1058 }
1059
1060 uaddr += PAGE_SIZE;
1061 usize -= PAGE_SIZE;
1062 } while (usize > 0);
1063 }
1064 return 0;
1065}
1066
1067/*
1068 * free a page as defined by the above mapping.
1069 * Must not be called with IRQs disabled.
1070 */
1071void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
1072 dma_addr_t handle, struct dma_attrs *attrs)
1073{
1074 struct arm_vmregion *c;
1075 size = PAGE_ALIGN(size);
1076
1077 c = arm_vmregion_find(&consistent_head, (unsigned long)cpu_addr);
1078 if (c) {
1079 struct page **pages = c->priv;
1080 __dma_free_remap(cpu_addr, size);
1081 __iommu_remove_mapping(dev, handle, size);
1082 __iommu_free_buffer(dev, pages, size);
1083 }
1084}
1085
1086/*
1087 * Map a part of the scatter-gather list into contiguous io address space
1088 */
1089static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
1090 size_t size, dma_addr_t *handle,
1091 enum dma_data_direction dir)
1092{
1093 struct dma_iommu_mapping *mapping = dev->archdata.mapping;
1094 dma_addr_t iova, iova_base;
1095 int ret = 0;
1096 unsigned int count;
1097 struct scatterlist *s;
1098
1099 size = PAGE_ALIGN(size);
1100 *handle = DMA_ERROR_CODE;
1101
1102 iova_base = iova = __alloc_iova(mapping, size);
1103 if (iova == DMA_ERROR_CODE)
1104 return -ENOMEM;
1105
1106 for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) {
1107 phys_addr_t phys = page_to_phys(sg_page(s));
1108 unsigned int len = PAGE_ALIGN(s->offset + s->length);
1109
1110 if (!arch_is_coherent())
1111 __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
1112
1113 ret = iommu_map(mapping->domain, iova, phys, len, 0);
1114 if (ret < 0)
1115 goto fail;
1116 count += len >> PAGE_SHIFT;
1117 iova += len;
1118 }
1119 *handle = iova_base;
1120
1121 return 0;
1122fail:
1123 iommu_unmap(mapping->domain, iova_base, count * PAGE_SIZE);
1124 __free_iova(mapping, iova_base, size);
1125 return ret;
1126}
1127
1128/**
1129 * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA
1130 * @dev: valid struct device pointer
1131 * @sg: list of buffers
1132 * @nents: number of buffers to map
1133 * @dir: DMA transfer direction
1134 *
1135 * Map a set of buffers described by scatterlist in streaming mode for DMA.
1136 * The scatter gather list elements are merged together (if possible) and
1137 * tagged with the appropriate dma address and length. They are obtained via
1138 * sg_dma_{address,length}.
1139 */
1140int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
1141 enum dma_data_direction dir, struct dma_attrs *attrs)
1142{
1143 struct scatterlist *s = sg, *dma = sg, *start = sg;
1144 int i, count = 0;
1145 unsigned int offset = s->offset;
1146 unsigned int size = s->offset + s->length;
1147 unsigned int max = dma_get_max_seg_size(dev);
1148
1149 for (i = 1; i < nents; i++) {
1150 s = sg_next(s);
1151
1152 s->dma_address = DMA_ERROR_CODE;
1153 s->dma_length = 0;
1154
1155 if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) {
1156 if (__map_sg_chunk(dev, start, size, &dma->dma_address,
1157 dir) < 0)
1158 goto bad_mapping;
1159
1160 dma->dma_address += offset;
1161 dma->dma_length = size - offset;
1162
1163 size = offset = s->offset;
1164 start = s;
1165 dma = sg_next(dma);
1166 count += 1;
1167 }
1168 size += s->length;
1169 }
1170 if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir) < 0)
1171 goto bad_mapping;
1172
1173 dma->dma_address += offset;
1174 dma->dma_length = size - offset;
1175
1176 return count+1;
1177
1178bad_mapping:
1179 for_each_sg(sg, s, count, i)
1180 __iommu_remove_mapping(dev, sg_dma_address(s), sg_dma_len(s));
1181 return 0;
1182}
1183
1184/**
1185 * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
1186 * @dev: valid struct device pointer
1187 * @sg: list of buffers
1188 * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
1189 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1190 *
1191 * Unmap a set of streaming mode DMA translations. Again, CPU access
1192 * rules concerning calls here are the same as for dma_unmap_single().
1193 */
1194void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
1195 enum dma_data_direction dir, struct dma_attrs *attrs)
1196{
1197 struct scatterlist *s;
1198 int i;
1199
1200 for_each_sg(sg, s, nents, i) {
1201 if (sg_dma_len(s))
1202 __iommu_remove_mapping(dev, sg_dma_address(s),
1203 sg_dma_len(s));
1204 if (!arch_is_coherent())
1205 __dma_page_dev_to_cpu(sg_page(s), s->offset,
1206 s->length, dir);
1207 }
1208}
1209
1210/**
1211 * arm_iommu_sync_sg_for_cpu
1212 * @dev: valid struct device pointer
1213 * @sg: list of buffers
1214 * @nents: number of buffers to map (returned from dma_map_sg)
1215 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1216 */
1217void arm_iommu_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
1218 int nents, enum dma_data_direction dir)
1219{
1220 struct scatterlist *s;
1221 int i;
1222
1223 for_each_sg(sg, s, nents, i)
1224 if (!arch_is_coherent())
1225 __dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir);
1226
1227}
1228
1229/**
1230 * arm_iommu_sync_sg_for_device
1231 * @dev: valid struct device pointer
1232 * @sg: list of buffers
1233 * @nents: number of buffers to map (returned from dma_map_sg)
1234 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1235 */
1236void arm_iommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
1237 int nents, enum dma_data_direction dir)
1238{
1239 struct scatterlist *s;
1240 int i;
1241
1242 for_each_sg(sg, s, nents, i)
1243 if (!arch_is_coherent())
1244 __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
1245}
1246
1247
1248/**
1249 * arm_iommu_map_page
1250 * @dev: valid struct device pointer
1251 * @page: page that buffer resides in
1252 * @offset: offset into page for start of buffer
1253 * @size: size of buffer to map
1254 * @dir: DMA transfer direction
1255 *
1256 * IOMMU aware version of arm_dma_map_page()
1257 */
1258static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
1259 unsigned long offset, size_t size, enum dma_data_direction dir,
1260 struct dma_attrs *attrs)
1261{
1262 struct dma_iommu_mapping *mapping = dev->archdata.mapping;
1263 dma_addr_t dma_addr;
1264 int ret, len = PAGE_ALIGN(size + offset);
1265
1266 if (!arch_is_coherent())
1267 __dma_page_cpu_to_dev(page, offset, size, dir);
1268
1269 dma_addr = __alloc_iova(mapping, len);
1270 if (dma_addr == DMA_ERROR_CODE)
1271 return dma_addr;
1272
1273 ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, 0);
1274 if (ret < 0)
1275 goto fail;
1276
1277 return dma_addr + offset;
1278fail:
1279 __free_iova(mapping, dma_addr, len);
1280 return DMA_ERROR_CODE;
1281}
1282
1283/**
1284 * arm_iommu_unmap_page
1285 * @dev: valid struct device pointer
1286 * @handle: DMA address of buffer
1287 * @size: size of buffer (same as passed to dma_map_page)
1288 * @dir: DMA transfer direction (same as passed to dma_map_page)
1289 *
1290 * IOMMU aware version of arm_dma_unmap_page()
1291 */
1292static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
1293 size_t size, enum dma_data_direction dir,
1294 struct dma_attrs *attrs)
1295{
1296 struct dma_iommu_mapping *mapping = dev->archdata.mapping;
1297 dma_addr_t iova = handle & PAGE_MASK;
1298 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
1299 int offset = handle & ~PAGE_MASK;
1300 int len = PAGE_ALIGN(size + offset);
1301
1302 if (!iova)
1303 return;
1304
1305 if (!arch_is_coherent())
1306 __dma_page_dev_to_cpu(page, offset, size, dir);
1307
1308 iommu_unmap(mapping->domain, iova, len);
1309 __free_iova(mapping, iova, len);
1310}
1311
1312static void arm_iommu_sync_single_for_cpu(struct device *dev,
1313 dma_addr_t handle, size_t size, enum dma_data_direction dir)
1314{
1315 struct dma_iommu_mapping *mapping = dev->archdata.mapping;
1316 dma_addr_t iova = handle & PAGE_MASK;
1317 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
1318 unsigned int offset = handle & ~PAGE_MASK;
1319
1320 if (!iova)
1321 return;
1322
1323 if (!arch_is_coherent())
1324 __dma_page_dev_to_cpu(page, offset, size, dir);
1325}
1326
1327static void arm_iommu_sync_single_for_device(struct device *dev,
1328 dma_addr_t handle, size_t size, enum dma_data_direction dir)
1329{
1330 struct dma_iommu_mapping *mapping = dev->archdata.mapping;
1331 dma_addr_t iova = handle & PAGE_MASK;
1332 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
1333 unsigned int offset = handle & ~PAGE_MASK;
1334
1335 if (!iova)
1336 return;
1337
1338 __dma_page_cpu_to_dev(page, offset, size, dir);
1339}
1340
1341struct dma_map_ops iommu_ops = {
1342 .alloc = arm_iommu_alloc_attrs,
1343 .free = arm_iommu_free_attrs,
1344 .mmap = arm_iommu_mmap_attrs,
1345
1346 .map_page = arm_iommu_map_page,
1347 .unmap_page = arm_iommu_unmap_page,
1348 .sync_single_for_cpu = arm_iommu_sync_single_for_cpu,
1349 .sync_single_for_device = arm_iommu_sync_single_for_device,
1350
1351 .map_sg = arm_iommu_map_sg,
1352 .unmap_sg = arm_iommu_unmap_sg,
1353 .sync_sg_for_cpu = arm_iommu_sync_sg_for_cpu,
1354 .sync_sg_for_device = arm_iommu_sync_sg_for_device,
1355};
1356
1357/**
1358 * arm_iommu_create_mapping
1359 * @bus: pointer to the bus holding the client device (for IOMMU calls)
1360 * @base: start address of the valid IO address space
1361 * @size: size of the valid IO address space
1362 * @order: accuracy of the IO addresses allocations
1363 *
1364 * Creates a mapping structure which holds information about used/unused
1365 * IO address ranges, which is required to perform memory allocation and
1366 * mapping with IOMMU aware functions.
1367 *
1368 * The client device need to be attached to the mapping with
1369 * arm_iommu_attach_device function.
1370 */
1371struct dma_iommu_mapping *
1372arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size,
1373 int order)
1374{
1375 unsigned int count = size >> (PAGE_SHIFT + order);
1376 unsigned int bitmap_size = BITS_TO_LONGS(count) * sizeof(long);
1377 struct dma_iommu_mapping *mapping;
1378 int err = -ENOMEM;
1379
1380 if (!count)
1381 return ERR_PTR(-EINVAL);
1382
1383 mapping = kzalloc(sizeof(struct dma_iommu_mapping), GFP_KERNEL);
1384 if (!mapping)
1385 goto err;
1386
1387 mapping->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
1388 if (!mapping->bitmap)
1389 goto err2;
1390
1391 mapping->base = base;
1392 mapping->bits = BITS_PER_BYTE * bitmap_size;
1393 mapping->order = order;
1394 spin_lock_init(&mapping->lock);
1395
1396 mapping->domain = iommu_domain_alloc(bus);
1397 if (!mapping->domain)
1398 goto err3;
1399
1400 kref_init(&mapping->kref);
1401 return mapping;
1402err3:
1403 kfree(mapping->bitmap);
1404err2:
1405 kfree(mapping);
1406err:
1407 return ERR_PTR(err);
1408}
1409
1410static void release_iommu_mapping(struct kref *kref)
1411{
1412 struct dma_iommu_mapping *mapping =
1413 container_of(kref, struct dma_iommu_mapping, kref);
1414
1415 iommu_domain_free(mapping->domain);
1416 kfree(mapping->bitmap);
1417 kfree(mapping);
1418}
1419
1420void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping)
1421{
1422 if (mapping)
1423 kref_put(&mapping->kref, release_iommu_mapping);
1424}
1425
1426/**
1427 * arm_iommu_attach_device
1428 * @dev: valid struct device pointer
1429 * @mapping: io address space mapping structure (returned from
1430 * arm_iommu_create_mapping)
1431 *
1432 * Attaches specified io address space mapping to the provided device,
1433 * this replaces the dma operations (dma_map_ops pointer) with the
1434 * IOMMU aware version. More than one client might be attached to
1435 * the same io address space mapping.
1436 */
1437int arm_iommu_attach_device(struct device *dev,
1438 struct dma_iommu_mapping *mapping)
1439{
1440 int err;
1441
1442 err = iommu_attach_device(mapping->domain, dev);
1443 if (err)
1444 return err;
1445
1446 kref_get(&mapping->kref);
1447 dev->archdata.mapping = mapping;
1448 set_dma_ops(dev, &iommu_ops);
1449
1450 pr_info("Attached IOMMU controller to %s device.\n", dev_name(dev));
1451 return 0;
1452}
1453
1454#endif
diff --git a/arch/arm/mm/vmregion.h b/arch/arm/mm/vmregion.h
index 162be662c088..bf312c354a21 100644
--- a/arch/arm/mm/vmregion.h
+++ b/arch/arm/mm/vmregion.h
@@ -17,7 +17,7 @@ struct arm_vmregion {
17 struct list_head vm_list; 17 struct list_head vm_list;
18 unsigned long vm_start; 18 unsigned long vm_start;
19 unsigned long vm_end; 19 unsigned long vm_end;
20 struct page *vm_pages; 20 void *priv;
21 int vm_active; 21 int vm_active;
22 const void *caller; 22 const void *caller;
23}; 23};