aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
authorAndreas Herrmann <andreas.herrmann@calxeda.com>2014-02-25 07:09:53 -0500
committerMarek Szyprowski <m.szyprowski@samsung.com>2014-02-28 05:55:18 -0500
commit4d852ef8c2544ce21ae41414099a7504c61164a0 (patch)
treecbaa9c31df099c84e78a7d4a8139ca2b9ccf7a27 /arch/arm/mm
parentcfbf8d4857c26a8a307fb7cd258074c9dcd8c691 (diff)
arm: dma-mapping: Add support to extend DMA IOMMU mappings
Instead of using just one bitmap to keep track of IO virtual addresses (handed out for IOMMU use) introduce an array of bitmaps. This allows us to extend existing mappings when running out of iova space in the initial mapping etc. If there is not enough space in the mapping to service an IO virtual address allocation request, __alloc_iova() tries to extend the mapping -- by allocating another bitmap -- and makes another allocation attempt using the freshly allocated bitmap. This allows arm iommu drivers to start with a decent initial size when an dma_iommu_mapping is created and still to avoid running out of IO virtual addresses for the mapping. Signed-off-by: Andreas Herrmann <andreas.herrmann@calxeda.com> [mszyprow: removed extensions parameter to arm_iommu_create_mapping() function, which will be modified in the next patch anyway, also some debug messages about extending bitmap] Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/dma-mapping.c123
1 files changed, 104 insertions, 19 deletions
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 11b3914660d2..4d06295b8e3e 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -1069,6 +1069,8 @@ fs_initcall(dma_debug_do_init);
1069 1069
1070/* IOMMU */ 1070/* IOMMU */
1071 1071
1072static int extend_iommu_mapping(struct dma_iommu_mapping *mapping);
1073
1072static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping, 1074static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
1073 size_t size) 1075 size_t size)
1074{ 1076{
@@ -1076,6 +1078,8 @@ static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
1076 unsigned int align = 0; 1078 unsigned int align = 0;
1077 unsigned int count, start; 1079 unsigned int count, start;
1078 unsigned long flags; 1080 unsigned long flags;
1081 dma_addr_t iova;
1082 int i;
1079 1083
1080 if (order > CONFIG_ARM_DMA_IOMMU_ALIGNMENT) 1084 if (order > CONFIG_ARM_DMA_IOMMU_ALIGNMENT)
1081 order = CONFIG_ARM_DMA_IOMMU_ALIGNMENT; 1085 order = CONFIG_ARM_DMA_IOMMU_ALIGNMENT;
@@ -1087,30 +1091,78 @@ static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
1087 align = (1 << (order - mapping->order)) - 1; 1091 align = (1 << (order - mapping->order)) - 1;
1088 1092
1089 spin_lock_irqsave(&mapping->lock, flags); 1093 spin_lock_irqsave(&mapping->lock, flags);
1090 start = bitmap_find_next_zero_area(mapping->bitmap, mapping->bits, 0, 1094 for (i = 0; i < mapping->nr_bitmaps; i++) {
1091 count, align); 1095 start = bitmap_find_next_zero_area(mapping->bitmaps[i],
1092 if (start > mapping->bits) { 1096 mapping->bits, 0, count, align);
1093 spin_unlock_irqrestore(&mapping->lock, flags); 1097
1094 return DMA_ERROR_CODE; 1098 if (start > mapping->bits)
1099 continue;
1100
1101 bitmap_set(mapping->bitmaps[i], start, count);
1102 break;
1095 } 1103 }
1096 1104
1097 bitmap_set(mapping->bitmap, start, count); 1105 /*
1106 * No unused range found. Try to extend the existing mapping
1107 * and perform a second attempt to reserve an IO virtual
1108 * address range of size bytes.
1109 */
1110 if (i == mapping->nr_bitmaps) {
1111 if (extend_iommu_mapping(mapping)) {
1112 spin_unlock_irqrestore(&mapping->lock, flags);
1113 return DMA_ERROR_CODE;
1114 }
1115
1116 start = bitmap_find_next_zero_area(mapping->bitmaps[i],
1117 mapping->bits, 0, count, align);
1118
1119 if (start > mapping->bits) {
1120 spin_unlock_irqrestore(&mapping->lock, flags);
1121 return DMA_ERROR_CODE;
1122 }
1123
1124 bitmap_set(mapping->bitmaps[i], start, count);
1125 }
1098 spin_unlock_irqrestore(&mapping->lock, flags); 1126 spin_unlock_irqrestore(&mapping->lock, flags);
1099 1127
1100 return mapping->base + (start << (mapping->order + PAGE_SHIFT)); 1128 iova = mapping->base + (mapping->size * i);
1129 iova += start << (mapping->order + PAGE_SHIFT);
1130
1131 return iova;
1101} 1132}
1102 1133
1103static inline void __free_iova(struct dma_iommu_mapping *mapping, 1134static inline void __free_iova(struct dma_iommu_mapping *mapping,
1104 dma_addr_t addr, size_t size) 1135 dma_addr_t addr, size_t size)
1105{ 1136{
1106 unsigned int start = (addr - mapping->base) >> 1137 unsigned int start, count;
1107 (mapping->order + PAGE_SHIFT);
1108 unsigned int count = ((size >> PAGE_SHIFT) +
1109 (1 << mapping->order) - 1) >> mapping->order;
1110 unsigned long flags; 1138 unsigned long flags;
1139 dma_addr_t bitmap_base;
1140 u32 bitmap_index;
1141
1142 if (!size)
1143 return;
1144
1145 bitmap_index = (u32) (addr - mapping->base) / (u32) mapping->size;
1146 BUG_ON(addr < mapping->base || bitmap_index > mapping->extensions);
1147
1148 bitmap_base = mapping->base + mapping->size * bitmap_index;
1149
1150 start = (addr - bitmap_base) >> (mapping->order + PAGE_SHIFT);
1151
1152 if (addr + size > bitmap_base + mapping->size) {
1153 /*
1154 * The address range to be freed reaches into the iova
1155 * range of the next bitmap. This should not happen as
1156 * we don't allow this in __alloc_iova (at the
1157 * moment).
1158 */
1159 BUG();
1160 } else
1161 count = ((size >> PAGE_SHIFT) +
1162 (1 << mapping->order) - 1) >> mapping->order;
1111 1163
1112 spin_lock_irqsave(&mapping->lock, flags); 1164 spin_lock_irqsave(&mapping->lock, flags);
1113 bitmap_clear(mapping->bitmap, start, count); 1165 bitmap_clear(mapping->bitmaps[bitmap_index], start, count);
1114 spin_unlock_irqrestore(&mapping->lock, flags); 1166 spin_unlock_irqrestore(&mapping->lock, flags);
1115} 1167}
1116 1168
@@ -1890,8 +1942,8 @@ arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size,
1890 int order) 1942 int order)
1891{ 1943{
1892 unsigned int count = size >> (PAGE_SHIFT + order); 1944 unsigned int count = size >> (PAGE_SHIFT + order);
1893 unsigned int bitmap_size = BITS_TO_LONGS(count) * sizeof(long);
1894 struct dma_iommu_mapping *mapping; 1945 struct dma_iommu_mapping *mapping;
1946 int extensions = 0;
1895 int err = -ENOMEM; 1947 int err = -ENOMEM;
1896 1948
1897 if (!count) 1949 if (!count)
@@ -1901,23 +1953,35 @@ arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size,
1901 if (!mapping) 1953 if (!mapping)
1902 goto err; 1954 goto err;
1903 1955
1904 mapping->bitmap = kzalloc(bitmap_size, GFP_KERNEL); 1956 mapping->bitmap_size = BITS_TO_LONGS(count) * sizeof(long);
1905 if (!mapping->bitmap) 1957 mapping->bitmaps = kzalloc((extensions + 1) * sizeof(unsigned long *),
1958 GFP_KERNEL);
1959 if (!mapping->bitmaps)
1906 goto err2; 1960 goto err2;
1907 1961
1962 mapping->bitmaps[0] = kzalloc(mapping->bitmap_size, GFP_KERNEL);
1963 if (!mapping->bitmaps[0])
1964 goto err3;
1965
1966 mapping->nr_bitmaps = 1;
1967 mapping->extensions = extensions;
1908 mapping->base = base; 1968 mapping->base = base;
1909 mapping->bits = BITS_PER_BYTE * bitmap_size; 1969 mapping->size = size;
1910 mapping->order = order; 1970 mapping->order = order;
1971 mapping->bits = BITS_PER_BYTE * mapping->bitmap_size;
1972
1911 spin_lock_init(&mapping->lock); 1973 spin_lock_init(&mapping->lock);
1912 1974
1913 mapping->domain = iommu_domain_alloc(bus); 1975 mapping->domain = iommu_domain_alloc(bus);
1914 if (!mapping->domain) 1976 if (!mapping->domain)
1915 goto err3; 1977 goto err4;
1916 1978
1917 kref_init(&mapping->kref); 1979 kref_init(&mapping->kref);
1918 return mapping; 1980 return mapping;
1981err4:
1982 kfree(mapping->bitmaps[0]);
1919err3: 1983err3:
1920 kfree(mapping->bitmap); 1984 kfree(mapping->bitmaps);
1921err2: 1985err2:
1922 kfree(mapping); 1986 kfree(mapping);
1923err: 1987err:
@@ -1927,14 +1991,35 @@ EXPORT_SYMBOL_GPL(arm_iommu_create_mapping);
1927 1991
1928static void release_iommu_mapping(struct kref *kref) 1992static void release_iommu_mapping(struct kref *kref)
1929{ 1993{
1994 int i;
1930 struct dma_iommu_mapping *mapping = 1995 struct dma_iommu_mapping *mapping =
1931 container_of(kref, struct dma_iommu_mapping, kref); 1996 container_of(kref, struct dma_iommu_mapping, kref);
1932 1997
1933 iommu_domain_free(mapping->domain); 1998 iommu_domain_free(mapping->domain);
1934 kfree(mapping->bitmap); 1999 for (i = 0; i < mapping->nr_bitmaps; i++)
2000 kfree(mapping->bitmaps[i]);
2001 kfree(mapping->bitmaps);
1935 kfree(mapping); 2002 kfree(mapping);
1936} 2003}
1937 2004
2005static int extend_iommu_mapping(struct dma_iommu_mapping *mapping)
2006{
2007 int next_bitmap;
2008
2009 if (mapping->nr_bitmaps > mapping->extensions)
2010 return -EINVAL;
2011
2012 next_bitmap = mapping->nr_bitmaps;
2013 mapping->bitmaps[next_bitmap] = kzalloc(mapping->bitmap_size,
2014 GFP_ATOMIC);
2015 if (!mapping->bitmaps[next_bitmap])
2016 return -ENOMEM;
2017
2018 mapping->nr_bitmaps++;
2019
2020 return 0;
2021}
2022
1938void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping) 2023void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping)
1939{ 2024{
1940 if (mapping) 2025 if (mapping)