diff options
Diffstat (limited to 'lib/iommu-common.c')
| -rw-r--r-- | lib/iommu-common.c | 270 |
1 files changed, 270 insertions, 0 deletions
diff --git a/lib/iommu-common.c b/lib/iommu-common.c new file mode 100644 index 000000000000..df30632f0bef --- /dev/null +++ b/lib/iommu-common.c | |||
| @@ -0,0 +1,270 @@ | |||
| 1 | /* | ||
| 2 | * IOMMU mmap management and range allocation functions. | ||
| 3 | * Based almost entirely upon the powerpc iommu allocator. | ||
| 4 | */ | ||
| 5 | |||
| 6 | #include <linux/export.h> | ||
| 7 | #include <linux/bitmap.h> | ||
| 8 | #include <linux/bug.h> | ||
| 9 | #include <linux/iommu-helper.h> | ||
| 10 | #include <linux/iommu-common.h> | ||
| 11 | #include <linux/dma-mapping.h> | ||
| 12 | #include <linux/hash.h> | ||
| 13 | |||
| 14 | #ifndef DMA_ERROR_CODE | ||
| 15 | #define DMA_ERROR_CODE (~(dma_addr_t)0x0) | ||
| 16 | #endif | ||
| 17 | |||
| 18 | static unsigned long iommu_large_alloc = 15; | ||
| 19 | |||
| 20 | static DEFINE_PER_CPU(unsigned int, iommu_hash_common); | ||
| 21 | |||
| 22 | static inline bool need_flush(struct iommu_map_table *iommu) | ||
| 23 | { | ||
| 24 | return (iommu->lazy_flush != NULL && | ||
| 25 | (iommu->flags & IOMMU_NEED_FLUSH) != 0); | ||
| 26 | } | ||
| 27 | |||
| 28 | static inline void set_flush(struct iommu_map_table *iommu) | ||
| 29 | { | ||
| 30 | iommu->flags |= IOMMU_NEED_FLUSH; | ||
| 31 | } | ||
| 32 | |||
| 33 | static inline void clear_flush(struct iommu_map_table *iommu) | ||
| 34 | { | ||
| 35 | iommu->flags &= ~IOMMU_NEED_FLUSH; | ||
| 36 | } | ||
| 37 | |||
| 38 | static void setup_iommu_pool_hash(void) | ||
| 39 | { | ||
| 40 | unsigned int i; | ||
| 41 | static bool do_once; | ||
| 42 | |||
| 43 | if (do_once) | ||
| 44 | return; | ||
| 45 | do_once = true; | ||
| 46 | for_each_possible_cpu(i) | ||
| 47 | per_cpu(iommu_hash_common, i) = hash_32(i, IOMMU_POOL_HASHBITS); | ||
| 48 | } | ||
| 49 | |||
| 50 | /* | ||
| 51 | * Initialize iommu_pool entries for the iommu_map_table. `num_entries' | ||
| 52 | * is the number of table entries. If `large_pool' is set to true, | ||
| 53 | * the top 1/4 of the table will be set aside for pool allocations | ||
| 54 | * of more than iommu_large_alloc pages. | ||
| 55 | */ | ||
| 56 | void iommu_tbl_pool_init(struct iommu_map_table *iommu, | ||
| 57 | unsigned long num_entries, | ||
| 58 | u32 table_shift, | ||
| 59 | void (*lazy_flush)(struct iommu_map_table *), | ||
| 60 | bool large_pool, u32 npools, | ||
| 61 | bool skip_span_boundary_check) | ||
| 62 | { | ||
| 63 | unsigned int start, i; | ||
| 64 | struct iommu_pool *p = &(iommu->large_pool); | ||
| 65 | |||
| 66 | setup_iommu_pool_hash(); | ||
| 67 | if (npools == 0) | ||
| 68 | iommu->nr_pools = IOMMU_NR_POOLS; | ||
| 69 | else | ||
| 70 | iommu->nr_pools = npools; | ||
| 71 | BUG_ON(npools > IOMMU_NR_POOLS); | ||
| 72 | |||
| 73 | iommu->table_shift = table_shift; | ||
| 74 | iommu->lazy_flush = lazy_flush; | ||
| 75 | start = 0; | ||
| 76 | if (skip_span_boundary_check) | ||
| 77 | iommu->flags |= IOMMU_NO_SPAN_BOUND; | ||
| 78 | if (large_pool) | ||
| 79 | iommu->flags |= IOMMU_HAS_LARGE_POOL; | ||
| 80 | |||
| 81 | if (!large_pool) | ||
| 82 | iommu->poolsize = num_entries/iommu->nr_pools; | ||
| 83 | else | ||
| 84 | iommu->poolsize = (num_entries * 3 / 4)/iommu->nr_pools; | ||
| 85 | for (i = 0; i < iommu->nr_pools; i++) { | ||
| 86 | spin_lock_init(&(iommu->pools[i].lock)); | ||
| 87 | iommu->pools[i].start = start; | ||
| 88 | iommu->pools[i].hint = start; | ||
| 89 | start += iommu->poolsize; /* start for next pool */ | ||
| 90 | iommu->pools[i].end = start - 1; | ||
| 91 | } | ||
| 92 | if (!large_pool) | ||
| 93 | return; | ||
| 94 | /* initialize large_pool */ | ||
| 95 | spin_lock_init(&(p->lock)); | ||
| 96 | p->start = start; | ||
| 97 | p->hint = p->start; | ||
| 98 | p->end = num_entries; | ||
| 99 | } | ||
| 100 | EXPORT_SYMBOL(iommu_tbl_pool_init); | ||
| 101 | |||
| 102 | unsigned long iommu_tbl_range_alloc(struct device *dev, | ||
| 103 | struct iommu_map_table *iommu, | ||
| 104 | unsigned long npages, | ||
| 105 | unsigned long *handle, | ||
| 106 | unsigned long mask, | ||
| 107 | unsigned int align_order) | ||
| 108 | { | ||
| 109 | unsigned int pool_hash = __this_cpu_read(iommu_hash_common); | ||
| 110 | unsigned long n, end, start, limit, boundary_size; | ||
| 111 | struct iommu_pool *pool; | ||
| 112 | int pass = 0; | ||
| 113 | unsigned int pool_nr; | ||
| 114 | unsigned int npools = iommu->nr_pools; | ||
| 115 | unsigned long flags; | ||
| 116 | bool large_pool = ((iommu->flags & IOMMU_HAS_LARGE_POOL) != 0); | ||
| 117 | bool largealloc = (large_pool && npages > iommu_large_alloc); | ||
| 118 | unsigned long shift; | ||
| 119 | unsigned long align_mask = 0; | ||
| 120 | |||
| 121 | if (align_order > 0) | ||
| 122 | align_mask = 0xffffffffffffffffl >> (64 - align_order); | ||
| 123 | |||
| 124 | /* Sanity check */ | ||
| 125 | if (unlikely(npages == 0)) { | ||
| 126 | WARN_ON_ONCE(1); | ||
| 127 | return DMA_ERROR_CODE; | ||
| 128 | } | ||
| 129 | |||
| 130 | if (largealloc) { | ||
| 131 | pool = &(iommu->large_pool); | ||
| 132 | pool_nr = 0; /* to keep compiler happy */ | ||
| 133 | } else { | ||
| 134 | /* pick out pool_nr */ | ||
| 135 | pool_nr = pool_hash & (npools - 1); | ||
| 136 | pool = &(iommu->pools[pool_nr]); | ||
| 137 | } | ||
| 138 | spin_lock_irqsave(&pool->lock, flags); | ||
| 139 | |||
| 140 | again: | ||
| 141 | if (pass == 0 && handle && *handle && | ||
| 142 | (*handle >= pool->start) && (*handle < pool->end)) | ||
| 143 | start = *handle; | ||
| 144 | else | ||
| 145 | start = pool->hint; | ||
| 146 | |||
| 147 | limit = pool->end; | ||
| 148 | |||
| 149 | /* The case below can happen if we have a small segment appended | ||
| 150 | * to a large, or when the previous alloc was at the very end of | ||
| 151 | * the available space. If so, go back to the beginning. If a | ||
| 152 | * flush is needed, it will get done based on the return value | ||
| 153 | * from iommu_area_alloc() below. | ||
| 154 | */ | ||
| 155 | if (start >= limit) | ||
| 156 | start = pool->start; | ||
| 157 | shift = iommu->table_map_base >> iommu->table_shift; | ||
| 158 | if (limit + shift > mask) { | ||
| 159 | limit = mask - shift + 1; | ||
| 160 | /* If we're constrained on address range, first try | ||
| 161 | * at the masked hint to avoid O(n) search complexity, | ||
| 162 | * but on second pass, start at 0 in pool 0. | ||
| 163 | */ | ||
| 164 | if ((start & mask) >= limit || pass > 0) { | ||
| 165 | spin_unlock(&(pool->lock)); | ||
| 166 | pool = &(iommu->pools[0]); | ||
| 167 | spin_lock(&(pool->lock)); | ||
| 168 | start = pool->start; | ||
| 169 | } else { | ||
| 170 | start &= mask; | ||
| 171 | } | ||
| 172 | } | ||
| 173 | |||
| 174 | if (dev) | ||
| 175 | boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, | ||
| 176 | 1 << iommu->table_shift); | ||
| 177 | else | ||
| 178 | boundary_size = ALIGN(1ULL << 32, 1 << iommu->table_shift); | ||
| 179 | |||
| 180 | boundary_size = boundary_size >> iommu->table_shift; | ||
| 181 | /* | ||
| 182 | * if the skip_span_boundary_check had been set during init, we set | ||
| 183 | * things up so that iommu_is_span_boundary() merely checks if the | ||
| 184 | * (index + npages) < num_tsb_entries | ||
| 185 | */ | ||
| 186 | if ((iommu->flags & IOMMU_NO_SPAN_BOUND) != 0) { | ||
| 187 | shift = 0; | ||
| 188 | boundary_size = iommu->poolsize * iommu->nr_pools; | ||
| 189 | } | ||
| 190 | n = iommu_area_alloc(iommu->map, limit, start, npages, shift, | ||
| 191 | boundary_size, align_mask); | ||
| 192 | if (n == -1) { | ||
| 193 | if (likely(pass == 0)) { | ||
| 194 | /* First failure, rescan from the beginning. */ | ||
| 195 | pool->hint = pool->start; | ||
| 196 | set_flush(iommu); | ||
| 197 | pass++; | ||
| 198 | goto again; | ||
| 199 | } else if (!largealloc && pass <= iommu->nr_pools) { | ||
| 200 | spin_unlock(&(pool->lock)); | ||
| 201 | pool_nr = (pool_nr + 1) & (iommu->nr_pools - 1); | ||
| 202 | pool = &(iommu->pools[pool_nr]); | ||
| 203 | spin_lock(&(pool->lock)); | ||
| 204 | pool->hint = pool->start; | ||
| 205 | set_flush(iommu); | ||
| 206 | pass++; | ||
| 207 | goto again; | ||
| 208 | } else { | ||
| 209 | /* give up */ | ||
| 210 | n = DMA_ERROR_CODE; | ||
| 211 | goto bail; | ||
| 212 | } | ||
| 213 | } | ||
| 214 | if (n < pool->hint || need_flush(iommu)) { | ||
| 215 | clear_flush(iommu); | ||
| 216 | iommu->lazy_flush(iommu); | ||
| 217 | } | ||
| 218 | |||
| 219 | end = n + npages; | ||
| 220 | pool->hint = end; | ||
| 221 | |||
| 222 | /* Update handle for SG allocations */ | ||
| 223 | if (handle) | ||
| 224 | *handle = end; | ||
| 225 | bail: | ||
| 226 | spin_unlock_irqrestore(&(pool->lock), flags); | ||
| 227 | |||
| 228 | return n; | ||
| 229 | } | ||
| 230 | EXPORT_SYMBOL(iommu_tbl_range_alloc); | ||
| 231 | |||
| 232 | static struct iommu_pool *get_pool(struct iommu_map_table *tbl, | ||
| 233 | unsigned long entry) | ||
| 234 | { | ||
| 235 | struct iommu_pool *p; | ||
| 236 | unsigned long largepool_start = tbl->large_pool.start; | ||
| 237 | bool large_pool = ((tbl->flags & IOMMU_HAS_LARGE_POOL) != 0); | ||
| 238 | |||
| 239 | /* The large pool is the last pool at the top of the table */ | ||
| 240 | if (large_pool && entry >= largepool_start) { | ||
| 241 | p = &tbl->large_pool; | ||
| 242 | } else { | ||
| 243 | unsigned int pool_nr = entry / tbl->poolsize; | ||
| 244 | |||
| 245 | BUG_ON(pool_nr >= tbl->nr_pools); | ||
| 246 | p = &tbl->pools[pool_nr]; | ||
| 247 | } | ||
| 248 | return p; | ||
| 249 | } | ||
| 250 | |||
| 251 | /* Caller supplies the index of the entry into the iommu map table | ||
| 252 | * itself when the mapping from dma_addr to the entry is not the | ||
| 253 | * default addr->entry mapping below. | ||
| 254 | */ | ||
| 255 | void iommu_tbl_range_free(struct iommu_map_table *iommu, u64 dma_addr, | ||
| 256 | unsigned long npages, unsigned long entry) | ||
| 257 | { | ||
| 258 | struct iommu_pool *pool; | ||
| 259 | unsigned long flags; | ||
| 260 | unsigned long shift = iommu->table_shift; | ||
| 261 | |||
| 262 | if (entry == DMA_ERROR_CODE) /* use default addr->entry mapping */ | ||
| 263 | entry = (dma_addr - iommu->table_map_base) >> shift; | ||
| 264 | pool = get_pool(iommu, entry); | ||
| 265 | |||
| 266 | spin_lock_irqsave(&(pool->lock), flags); | ||
| 267 | bitmap_clear(iommu->map, entry, npages); | ||
| 268 | spin_unlock_irqrestore(&(pool->lock), flags); | ||
| 269 | } | ||
| 270 | EXPORT_SYMBOL(iommu_tbl_range_free); | ||
