diff options
Diffstat (limited to 'lib/iommu-common.c')
| -rw-r--r-- | lib/iommu-common.c | 267 | 
1 files changed, 0 insertions, 267 deletions
diff --git a/lib/iommu-common.c b/lib/iommu-common.c deleted file mode 100644 index 55b00de106b5..000000000000 --- a/lib/iommu-common.c +++ /dev/null  | |||
| @@ -1,267 +0,0 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | /* | ||
| 3 | * IOMMU mmap management and range allocation functions. | ||
| 4 | * Based almost entirely upon the powerpc iommu allocator. | ||
| 5 | */ | ||
| 6 | |||
| 7 | #include <linux/export.h> | ||
| 8 | #include <linux/bitmap.h> | ||
| 9 | #include <linux/bug.h> | ||
| 10 | #include <linux/iommu-helper.h> | ||
| 11 | #include <linux/iommu-common.h> | ||
| 12 | #include <linux/dma-mapping.h> | ||
| 13 | #include <linux/hash.h> | ||
| 14 | |||
| 15 | static unsigned long iommu_large_alloc = 15; | ||
| 16 | |||
| 17 | static DEFINE_PER_CPU(unsigned int, iommu_hash_common); | ||
| 18 | |||
| 19 | static inline bool need_flush(struct iommu_map_table *iommu) | ||
| 20 | { | ||
| 21 | return ((iommu->flags & IOMMU_NEED_FLUSH) != 0); | ||
| 22 | } | ||
| 23 | |||
| 24 | static inline void set_flush(struct iommu_map_table *iommu) | ||
| 25 | { | ||
| 26 | iommu->flags |= IOMMU_NEED_FLUSH; | ||
| 27 | } | ||
| 28 | |||
| 29 | static inline void clear_flush(struct iommu_map_table *iommu) | ||
| 30 | { | ||
| 31 | iommu->flags &= ~IOMMU_NEED_FLUSH; | ||
| 32 | } | ||
| 33 | |||
| 34 | static void setup_iommu_pool_hash(void) | ||
| 35 | { | ||
| 36 | unsigned int i; | ||
| 37 | static bool do_once; | ||
| 38 | |||
| 39 | if (do_once) | ||
| 40 | return; | ||
| 41 | do_once = true; | ||
| 42 | for_each_possible_cpu(i) | ||
| 43 | per_cpu(iommu_hash_common, i) = hash_32(i, IOMMU_POOL_HASHBITS); | ||
| 44 | } | ||
| 45 | |||
| 46 | /* | ||
| 47 | * Initialize iommu_pool entries for the iommu_map_table. `num_entries' | ||
| 48 | * is the number of table entries. If `large_pool' is set to true, | ||
| 49 | * the top 1/4 of the table will be set aside for pool allocations | ||
| 50 | * of more than iommu_large_alloc pages. | ||
| 51 | */ | ||
| 52 | void iommu_tbl_pool_init(struct iommu_map_table *iommu, | ||
| 53 | unsigned long num_entries, | ||
| 54 | u32 table_shift, | ||
| 55 | void (*lazy_flush)(struct iommu_map_table *), | ||
| 56 | bool large_pool, u32 npools, | ||
| 57 | bool skip_span_boundary_check) | ||
| 58 | { | ||
| 59 | unsigned int start, i; | ||
| 60 | struct iommu_pool *p = &(iommu->large_pool); | ||
| 61 | |||
| 62 | setup_iommu_pool_hash(); | ||
| 63 | if (npools == 0) | ||
| 64 | iommu->nr_pools = IOMMU_NR_POOLS; | ||
| 65 | else | ||
| 66 | iommu->nr_pools = npools; | ||
| 67 | BUG_ON(npools > IOMMU_NR_POOLS); | ||
| 68 | |||
| 69 | iommu->table_shift = table_shift; | ||
| 70 | iommu->lazy_flush = lazy_flush; | ||
| 71 | start = 0; | ||
| 72 | if (skip_span_boundary_check) | ||
| 73 | iommu->flags |= IOMMU_NO_SPAN_BOUND; | ||
| 74 | if (large_pool) | ||
| 75 | iommu->flags |= IOMMU_HAS_LARGE_POOL; | ||
| 76 | |||
| 77 | if (!large_pool) | ||
| 78 | iommu->poolsize = num_entries/iommu->nr_pools; | ||
| 79 | else | ||
| 80 | iommu->poolsize = (num_entries * 3 / 4)/iommu->nr_pools; | ||
| 81 | for (i = 0; i < iommu->nr_pools; i++) { | ||
| 82 | spin_lock_init(&(iommu->pools[i].lock)); | ||
| 83 | iommu->pools[i].start = start; | ||
| 84 | iommu->pools[i].hint = start; | ||
| 85 | start += iommu->poolsize; /* start for next pool */ | ||
| 86 | iommu->pools[i].end = start - 1; | ||
| 87 | } | ||
| 88 | if (!large_pool) | ||
| 89 | return; | ||
| 90 | /* initialize large_pool */ | ||
| 91 | spin_lock_init(&(p->lock)); | ||
| 92 | p->start = start; | ||
| 93 | p->hint = p->start; | ||
| 94 | p->end = num_entries; | ||
| 95 | } | ||
| 96 | EXPORT_SYMBOL(iommu_tbl_pool_init); | ||
| 97 | |||
| 98 | unsigned long iommu_tbl_range_alloc(struct device *dev, | ||
| 99 | struct iommu_map_table *iommu, | ||
| 100 | unsigned long npages, | ||
| 101 | unsigned long *handle, | ||
| 102 | unsigned long mask, | ||
| 103 | unsigned int align_order) | ||
| 104 | { | ||
| 105 | unsigned int pool_hash = __this_cpu_read(iommu_hash_common); | ||
| 106 | unsigned long n, end, start, limit, boundary_size; | ||
| 107 | struct iommu_pool *pool; | ||
| 108 | int pass = 0; | ||
| 109 | unsigned int pool_nr; | ||
| 110 | unsigned int npools = iommu->nr_pools; | ||
| 111 | unsigned long flags; | ||
| 112 | bool large_pool = ((iommu->flags & IOMMU_HAS_LARGE_POOL) != 0); | ||
| 113 | bool largealloc = (large_pool && npages > iommu_large_alloc); | ||
| 114 | unsigned long shift; | ||
| 115 | unsigned long align_mask = 0; | ||
| 116 | |||
| 117 | if (align_order > 0) | ||
| 118 | align_mask = ~0ul >> (BITS_PER_LONG - align_order); | ||
| 119 | |||
| 120 | /* Sanity check */ | ||
| 121 | if (unlikely(npages == 0)) { | ||
| 122 | WARN_ON_ONCE(1); | ||
| 123 | return IOMMU_ERROR_CODE; | ||
| 124 | } | ||
| 125 | |||
| 126 | if (largealloc) { | ||
| 127 | pool = &(iommu->large_pool); | ||
| 128 | pool_nr = 0; /* to keep compiler happy */ | ||
| 129 | } else { | ||
| 130 | /* pick out pool_nr */ | ||
| 131 | pool_nr = pool_hash & (npools - 1); | ||
| 132 | pool = &(iommu->pools[pool_nr]); | ||
| 133 | } | ||
| 134 | spin_lock_irqsave(&pool->lock, flags); | ||
| 135 | |||
| 136 | again: | ||
| 137 | if (pass == 0 && handle && *handle && | ||
| 138 | (*handle >= pool->start) && (*handle < pool->end)) | ||
| 139 | start = *handle; | ||
| 140 | else | ||
| 141 | start = pool->hint; | ||
| 142 | |||
| 143 | limit = pool->end; | ||
| 144 | |||
| 145 | /* The case below can happen if we have a small segment appended | ||
| 146 | * to a large, or when the previous alloc was at the very end of | ||
| 147 | * the available space. If so, go back to the beginning. If a | ||
| 148 | * flush is needed, it will get done based on the return value | ||
| 149 | * from iommu_area_alloc() below. | ||
| 150 | */ | ||
| 151 | if (start >= limit) | ||
| 152 | start = pool->start; | ||
| 153 | shift = iommu->table_map_base >> iommu->table_shift; | ||
| 154 | if (limit + shift > mask) { | ||
| 155 | limit = mask - shift + 1; | ||
| 156 | /* If we're constrained on address range, first try | ||
| 157 | * at the masked hint to avoid O(n) search complexity, | ||
| 158 | * but on second pass, start at 0 in pool 0. | ||
| 159 | */ | ||
| 160 | if ((start & mask) >= limit || pass > 0) { | ||
| 161 | spin_unlock(&(pool->lock)); | ||
| 162 | pool = &(iommu->pools[0]); | ||
| 163 | spin_lock(&(pool->lock)); | ||
| 164 | start = pool->start; | ||
| 165 | } else { | ||
| 166 | start &= mask; | ||
| 167 | } | ||
| 168 | } | ||
| 169 | |||
| 170 | if (dev) | ||
| 171 | boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, | ||
| 172 | 1 << iommu->table_shift); | ||
| 173 | else | ||
| 174 | boundary_size = ALIGN(1ULL << 32, 1 << iommu->table_shift); | ||
| 175 | |||
| 176 | boundary_size = boundary_size >> iommu->table_shift; | ||
| 177 | /* | ||
| 178 | * if the skip_span_boundary_check had been set during init, we set | ||
| 179 | * things up so that iommu_is_span_boundary() merely checks if the | ||
| 180 | * (index + npages) < num_tsb_entries | ||
| 181 | */ | ||
| 182 | if ((iommu->flags & IOMMU_NO_SPAN_BOUND) != 0) { | ||
| 183 | shift = 0; | ||
| 184 | boundary_size = iommu->poolsize * iommu->nr_pools; | ||
| 185 | } | ||
| 186 | n = iommu_area_alloc(iommu->map, limit, start, npages, shift, | ||
| 187 | boundary_size, align_mask); | ||
| 188 | if (n == -1) { | ||
| 189 | if (likely(pass == 0)) { | ||
| 190 | /* First failure, rescan from the beginning. */ | ||
| 191 | pool->hint = pool->start; | ||
| 192 | set_flush(iommu); | ||
| 193 | pass++; | ||
| 194 | goto again; | ||
| 195 | } else if (!largealloc && pass <= iommu->nr_pools) { | ||
| 196 | spin_unlock(&(pool->lock)); | ||
| 197 | pool_nr = (pool_nr + 1) & (iommu->nr_pools - 1); | ||
| 198 | pool = &(iommu->pools[pool_nr]); | ||
| 199 | spin_lock(&(pool->lock)); | ||
| 200 | pool->hint = pool->start; | ||
| 201 | set_flush(iommu); | ||
| 202 | pass++; | ||
| 203 | goto again; | ||
| 204 | } else { | ||
| 205 | /* give up */ | ||
| 206 | n = IOMMU_ERROR_CODE; | ||
| 207 | goto bail; | ||
| 208 | } | ||
| 209 | } | ||
| 210 | if (iommu->lazy_flush && | ||
| 211 | (n < pool->hint || need_flush(iommu))) { | ||
| 212 | clear_flush(iommu); | ||
| 213 | iommu->lazy_flush(iommu); | ||
| 214 | } | ||
| 215 | |||
| 216 | end = n + npages; | ||
| 217 | pool->hint = end; | ||
| 218 | |||
| 219 | /* Update handle for SG allocations */ | ||
| 220 | if (handle) | ||
| 221 | *handle = end; | ||
| 222 | bail: | ||
| 223 | spin_unlock_irqrestore(&(pool->lock), flags); | ||
| 224 | |||
| 225 | return n; | ||
| 226 | } | ||
| 227 | EXPORT_SYMBOL(iommu_tbl_range_alloc); | ||
| 228 | |||
| 229 | static struct iommu_pool *get_pool(struct iommu_map_table *tbl, | ||
| 230 | unsigned long entry) | ||
| 231 | { | ||
| 232 | struct iommu_pool *p; | ||
| 233 | unsigned long largepool_start = tbl->large_pool.start; | ||
| 234 | bool large_pool = ((tbl->flags & IOMMU_HAS_LARGE_POOL) != 0); | ||
| 235 | |||
| 236 | /* The large pool is the last pool at the top of the table */ | ||
| 237 | if (large_pool && entry >= largepool_start) { | ||
| 238 | p = &tbl->large_pool; | ||
| 239 | } else { | ||
| 240 | unsigned int pool_nr = entry / tbl->poolsize; | ||
| 241 | |||
| 242 | BUG_ON(pool_nr >= tbl->nr_pools); | ||
| 243 | p = &tbl->pools[pool_nr]; | ||
| 244 | } | ||
| 245 | return p; | ||
| 246 | } | ||
| 247 | |||
| 248 | /* Caller supplies the index of the entry into the iommu map table | ||
| 249 | * itself when the mapping from dma_addr to the entry is not the | ||
| 250 | * default addr->entry mapping below. | ||
| 251 | */ | ||
| 252 | void iommu_tbl_range_free(struct iommu_map_table *iommu, u64 dma_addr, | ||
| 253 | unsigned long npages, unsigned long entry) | ||
| 254 | { | ||
| 255 | struct iommu_pool *pool; | ||
| 256 | unsigned long flags; | ||
| 257 | unsigned long shift = iommu->table_shift; | ||
| 258 | |||
| 259 | if (entry == IOMMU_ERROR_CODE) /* use default addr->entry mapping */ | ||
| 260 | entry = (dma_addr - iommu->table_map_base) >> shift; | ||
| 261 | pool = get_pool(iommu, entry); | ||
| 262 | |||
| 263 | spin_lock_irqsave(&(pool->lock), flags); | ||
| 264 | bitmap_clear(iommu->map, entry, npages); | ||
| 265 | spin_unlock_irqrestore(&(pool->lock), flags); | ||
| 266 | } | ||
| 267 | EXPORT_SYMBOL(iommu_tbl_range_free); | ||
