aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arm/include/asm/dma-iommu.h8
-rw-r--r--arch/arm/mm/dma-mapping.c123
2 files changed, 110 insertions, 21 deletions
diff --git a/arch/arm/include/asm/dma-iommu.h b/arch/arm/include/asm/dma-iommu.h
index a8c56acc8c98..686797cf5618 100644
--- a/arch/arm/include/asm/dma-iommu.h
+++ b/arch/arm/include/asm/dma-iommu.h
@@ -13,8 +13,12 @@ struct dma_iommu_mapping {
13 /* iommu specific data */ 13 /* iommu specific data */
14 struct iommu_domain *domain; 14 struct iommu_domain *domain;
15 15
16 void *bitmap; 16 unsigned long **bitmaps; /* array of bitmaps */
17 size_t bits; 17 unsigned int nr_bitmaps; /* nr of elements in array */
18 unsigned int extensions;
19 size_t bitmap_size; /* size of a single bitmap */
20 size_t bits; /* per bitmap */
21 unsigned int size; /* per bitmap */
18 unsigned int order; 22 unsigned int order;
19 dma_addr_t base; 23 dma_addr_t base;
20 24
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 11b3914660d2..4d06295b8e3e 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -1069,6 +1069,8 @@ fs_initcall(dma_debug_do_init);
1069 1069
1070/* IOMMU */ 1070/* IOMMU */
1071 1071
1072static int extend_iommu_mapping(struct dma_iommu_mapping *mapping);
1073
1072static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping, 1074static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
1073 size_t size) 1075 size_t size)
1074{ 1076{
@@ -1076,6 +1078,8 @@ static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
1076 unsigned int align = 0; 1078 unsigned int align = 0;
1077 unsigned int count, start; 1079 unsigned int count, start;
1078 unsigned long flags; 1080 unsigned long flags;
1081 dma_addr_t iova;
1082 int i;
1079 1083
1080 if (order > CONFIG_ARM_DMA_IOMMU_ALIGNMENT) 1084 if (order > CONFIG_ARM_DMA_IOMMU_ALIGNMENT)
1081 order = CONFIG_ARM_DMA_IOMMU_ALIGNMENT; 1085 order = CONFIG_ARM_DMA_IOMMU_ALIGNMENT;
@@ -1087,30 +1091,78 @@ static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
1087 align = (1 << (order - mapping->order)) - 1; 1091 align = (1 << (order - mapping->order)) - 1;
1088 1092
1089 spin_lock_irqsave(&mapping->lock, flags); 1093 spin_lock_irqsave(&mapping->lock, flags);
1090 start = bitmap_find_next_zero_area(mapping->bitmap, mapping->bits, 0, 1094 for (i = 0; i < mapping->nr_bitmaps; i++) {
1091 count, align); 1095 start = bitmap_find_next_zero_area(mapping->bitmaps[i],
1092 if (start > mapping->bits) { 1096 mapping->bits, 0, count, align);
1093 spin_unlock_irqrestore(&mapping->lock, flags); 1097
1094 return DMA_ERROR_CODE; 1098 if (start > mapping->bits)
1099 continue;
1100
1101 bitmap_set(mapping->bitmaps[i], start, count);
1102 break;
1095 } 1103 }
1096 1104
1097 bitmap_set(mapping->bitmap, start, count); 1105 /*
1106 * No unused range found. Try to extend the existing mapping
1107 * and perform a second attempt to reserve an IO virtual
1108 * address range of size bytes.
1109 */
1110 if (i == mapping->nr_bitmaps) {
1111 if (extend_iommu_mapping(mapping)) {
1112 spin_unlock_irqrestore(&mapping->lock, flags);
1113 return DMA_ERROR_CODE;
1114 }
1115
1116 start = bitmap_find_next_zero_area(mapping->bitmaps[i],
1117 mapping->bits, 0, count, align);
1118
1119 if (start > mapping->bits) {
1120 spin_unlock_irqrestore(&mapping->lock, flags);
1121 return DMA_ERROR_CODE;
1122 }
1123
1124 bitmap_set(mapping->bitmaps[i], start, count);
1125 }
1098 spin_unlock_irqrestore(&mapping->lock, flags); 1126 spin_unlock_irqrestore(&mapping->lock, flags);
1099 1127
1100 return mapping->base + (start << (mapping->order + PAGE_SHIFT)); 1128 iova = mapping->base + (mapping->size * i);
1129 iova += start << (mapping->order + PAGE_SHIFT);
1130
1131 return iova;
1101} 1132}
1102 1133
1103static inline void __free_iova(struct dma_iommu_mapping *mapping, 1134static inline void __free_iova(struct dma_iommu_mapping *mapping,
1104 dma_addr_t addr, size_t size) 1135 dma_addr_t addr, size_t size)
1105{ 1136{
1106 unsigned int start = (addr - mapping->base) >> 1137 unsigned int start, count;
1107 (mapping->order + PAGE_SHIFT);
1108 unsigned int count = ((size >> PAGE_SHIFT) +
1109 (1 << mapping->order) - 1) >> mapping->order;
1110 unsigned long flags; 1138 unsigned long flags;
1139 dma_addr_t bitmap_base;
1140 u32 bitmap_index;
1141
1142 if (!size)
1143 return;
1144
1145 bitmap_index = (u32) (addr - mapping->base) / (u32) mapping->size;
1146 BUG_ON(addr < mapping->base || bitmap_index > mapping->extensions);
1147
1148 bitmap_base = mapping->base + mapping->size * bitmap_index;
1149
1150 start = (addr - bitmap_base) >> (mapping->order + PAGE_SHIFT);
1151
1152 if (addr + size > bitmap_base + mapping->size) {
1153 /*
1154 * The address range to be freed reaches into the iova
1155 * range of the next bitmap. This should not happen as
1156 * we don't allow this in __alloc_iova (at the
1157 * moment).
1158 */
1159 BUG();
1160 } else
1161 count = ((size >> PAGE_SHIFT) +
1162 (1 << mapping->order) - 1) >> mapping->order;
1111 1163
1112 spin_lock_irqsave(&mapping->lock, flags); 1164 spin_lock_irqsave(&mapping->lock, flags);
1113 bitmap_clear(mapping->bitmap, start, count); 1165 bitmap_clear(mapping->bitmaps[bitmap_index], start, count);
1114 spin_unlock_irqrestore(&mapping->lock, flags); 1166 spin_unlock_irqrestore(&mapping->lock, flags);
1115} 1167}
1116 1168
@@ -1890,8 +1942,8 @@ arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size,
1890 int order) 1942 int order)
1891{ 1943{
1892 unsigned int count = size >> (PAGE_SHIFT + order); 1944 unsigned int count = size >> (PAGE_SHIFT + order);
1893 unsigned int bitmap_size = BITS_TO_LONGS(count) * sizeof(long);
1894 struct dma_iommu_mapping *mapping; 1945 struct dma_iommu_mapping *mapping;
1946 int extensions = 0;
1895 int err = -ENOMEM; 1947 int err = -ENOMEM;
1896 1948
1897 if (!count) 1949 if (!count)
@@ -1901,23 +1953,35 @@ arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size,
1901 if (!mapping) 1953 if (!mapping)
1902 goto err; 1954 goto err;
1903 1955
1904 mapping->bitmap = kzalloc(bitmap_size, GFP_KERNEL); 1956 mapping->bitmap_size = BITS_TO_LONGS(count) * sizeof(long);
1905 if (!mapping->bitmap) 1957 mapping->bitmaps = kzalloc((extensions + 1) * sizeof(unsigned long *),
1958 GFP_KERNEL);
1959 if (!mapping->bitmaps)
1906 goto err2; 1960 goto err2;
1907 1961
1962 mapping->bitmaps[0] = kzalloc(mapping->bitmap_size, GFP_KERNEL);
1963 if (!mapping->bitmaps[0])
1964 goto err3;
1965
1966 mapping->nr_bitmaps = 1;
1967 mapping->extensions = extensions;
1908 mapping->base = base; 1968 mapping->base = base;
1909 mapping->bits = BITS_PER_BYTE * bitmap_size; 1969 mapping->size = size;
1910 mapping->order = order; 1970 mapping->order = order;
1971 mapping->bits = BITS_PER_BYTE * mapping->bitmap_size;
1972
1911 spin_lock_init(&mapping->lock); 1973 spin_lock_init(&mapping->lock);
1912 1974
1913 mapping->domain = iommu_domain_alloc(bus); 1975 mapping->domain = iommu_domain_alloc(bus);
1914 if (!mapping->domain) 1976 if (!mapping->domain)
1915 goto err3; 1977 goto err4;
1916 1978
1917 kref_init(&mapping->kref); 1979 kref_init(&mapping->kref);
1918 return mapping; 1980 return mapping;
1981err4:
1982 kfree(mapping->bitmaps[0]);
1919err3: 1983err3:
1920 kfree(mapping->bitmap); 1984 kfree(mapping->bitmaps);
1921err2: 1985err2:
1922 kfree(mapping); 1986 kfree(mapping);
1923err: 1987err:
@@ -1927,14 +1991,35 @@ EXPORT_SYMBOL_GPL(arm_iommu_create_mapping);
1927 1991
1928static void release_iommu_mapping(struct kref *kref) 1992static void release_iommu_mapping(struct kref *kref)
1929{ 1993{
1994 int i;
1930 struct dma_iommu_mapping *mapping = 1995 struct dma_iommu_mapping *mapping =
1931 container_of(kref, struct dma_iommu_mapping, kref); 1996 container_of(kref, struct dma_iommu_mapping, kref);
1932 1997
1933 iommu_domain_free(mapping->domain); 1998 iommu_domain_free(mapping->domain);
1934 kfree(mapping->bitmap); 1999 for (i = 0; i < mapping->nr_bitmaps; i++)
2000 kfree(mapping->bitmaps[i]);
2001 kfree(mapping->bitmaps);
1935 kfree(mapping); 2002 kfree(mapping);
1936} 2003}
1937 2004
2005static int extend_iommu_mapping(struct dma_iommu_mapping *mapping)
2006{
2007 int next_bitmap;
2008
2009 if (mapping->nr_bitmaps > mapping->extensions)
2010 return -EINVAL;
2011
2012 next_bitmap = mapping->nr_bitmaps;
2013 mapping->bitmaps[next_bitmap] = kzalloc(mapping->bitmap_size,
2014 GFP_ATOMIC);
2015 if (!mapping->bitmaps[next_bitmap])
2016 return -ENOMEM;
2017
2018 mapping->nr_bitmaps++;
2019
2020 return 0;
2021}
2022
1938void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping) 2023void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping)
1939{ 2024{
1940 if (mapping) 2025 if (mapping)