aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/mmu_context_iommu.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm/mmu_context_iommu.c')
-rw-r--r--arch/powerpc/mm/mmu_context_iommu.c110
1 files changed, 92 insertions, 18 deletions
diff --git a/arch/powerpc/mm/mmu_context_iommu.c b/arch/powerpc/mm/mmu_context_iommu.c
index 56c2234cc6ae..a712a650a8b6 100644
--- a/arch/powerpc/mm/mmu_context_iommu.c
+++ b/arch/powerpc/mm/mmu_context_iommu.c
@@ -36,6 +36,8 @@ struct mm_iommu_table_group_mem_t {
36 u64 ua; /* userspace address */ 36 u64 ua; /* userspace address */
37 u64 entries; /* number of entries in hpas[] */ 37 u64 entries; /* number of entries in hpas[] */
38 u64 *hpas; /* vmalloc'ed */ 38 u64 *hpas; /* vmalloc'ed */
39#define MM_IOMMU_TABLE_INVALID_HPA ((uint64_t)-1)
40 u64 dev_hpa; /* Device memory base address */
39}; 41};
40 42
41static long mm_iommu_adjust_locked_vm(struct mm_struct *mm, 43static long mm_iommu_adjust_locked_vm(struct mm_struct *mm,
@@ -126,7 +128,8 @@ static int mm_iommu_move_page_from_cma(struct page *page)
126 return 0; 128 return 0;
127} 129}
128 130
129long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries, 131static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua,
132 unsigned long entries, unsigned long dev_hpa,
130 struct mm_iommu_table_group_mem_t **pmem) 133 struct mm_iommu_table_group_mem_t **pmem)
131{ 134{
132 struct mm_iommu_table_group_mem_t *mem; 135 struct mm_iommu_table_group_mem_t *mem;
@@ -140,12 +143,6 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries,
140 143
141 list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, 144 list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list,
142 next) { 145 next) {
143 if ((mem->ua == ua) && (mem->entries == entries)) {
144 ++mem->used;
145 *pmem = mem;
146 goto unlock_exit;
147 }
148
149 /* Overlap? */ 146 /* Overlap? */
150 if ((mem->ua < (ua + (entries << PAGE_SHIFT))) && 147 if ((mem->ua < (ua + (entries << PAGE_SHIFT))) &&
151 (ua < (mem->ua + 148 (ua < (mem->ua +
@@ -156,11 +153,13 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries,
156 153
157 } 154 }
158 155
159 ret = mm_iommu_adjust_locked_vm(mm, entries, true); 156 if (dev_hpa == MM_IOMMU_TABLE_INVALID_HPA) {
160 if (ret) 157 ret = mm_iommu_adjust_locked_vm(mm, entries, true);
161 goto unlock_exit; 158 if (ret)
159 goto unlock_exit;
162 160
163 locked_entries = entries; 161 locked_entries = entries;
162 }
164 163
165 mem = kzalloc(sizeof(*mem), GFP_KERNEL); 164 mem = kzalloc(sizeof(*mem), GFP_KERNEL);
166 if (!mem) { 165 if (!mem) {
@@ -168,6 +167,13 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries,
168 goto unlock_exit; 167 goto unlock_exit;
169 } 168 }
170 169
170 if (dev_hpa != MM_IOMMU_TABLE_INVALID_HPA) {
171 mem->pageshift = __ffs(dev_hpa | (entries << PAGE_SHIFT));
172 mem->dev_hpa = dev_hpa;
173 goto good_exit;
174 }
175 mem->dev_hpa = MM_IOMMU_TABLE_INVALID_HPA;
176
171 /* 177 /*
172 * For a starting point for a maximum page size calculation 178 * For a starting point for a maximum page size calculation
173 * we use @ua and @entries natural alignment to allow IOMMU pages 179 * we use @ua and @entries natural alignment to allow IOMMU pages
@@ -236,6 +242,7 @@ populate:
236 mem->hpas[i] = page_to_pfn(page) << PAGE_SHIFT; 242 mem->hpas[i] = page_to_pfn(page) << PAGE_SHIFT;
237 } 243 }
238 244
245good_exit:
239 atomic64_set(&mem->mapped, 1); 246 atomic64_set(&mem->mapped, 1);
240 mem->used = 1; 247 mem->used = 1;
241 mem->ua = ua; 248 mem->ua = ua;
@@ -252,13 +259,31 @@ unlock_exit:
252 259
253 return ret; 260 return ret;
254} 261}
255EXPORT_SYMBOL_GPL(mm_iommu_get); 262
263long mm_iommu_new(struct mm_struct *mm, unsigned long ua, unsigned long entries,
264 struct mm_iommu_table_group_mem_t **pmem)
265{
266 return mm_iommu_do_alloc(mm, ua, entries, MM_IOMMU_TABLE_INVALID_HPA,
267 pmem);
268}
269EXPORT_SYMBOL_GPL(mm_iommu_new);
270
271long mm_iommu_newdev(struct mm_struct *mm, unsigned long ua,
272 unsigned long entries, unsigned long dev_hpa,
273 struct mm_iommu_table_group_mem_t **pmem)
274{
275 return mm_iommu_do_alloc(mm, ua, entries, dev_hpa, pmem);
276}
277EXPORT_SYMBOL_GPL(mm_iommu_newdev);
256 278
257static void mm_iommu_unpin(struct mm_iommu_table_group_mem_t *mem) 279static void mm_iommu_unpin(struct mm_iommu_table_group_mem_t *mem)
258{ 280{
259 long i; 281 long i;
260 struct page *page = NULL; 282 struct page *page = NULL;
261 283
284 if (!mem->hpas)
285 return;
286
262 for (i = 0; i < mem->entries; ++i) { 287 for (i = 0; i < mem->entries; ++i) {
263 if (!mem->hpas[i]) 288 if (!mem->hpas[i])
264 continue; 289 continue;
@@ -300,6 +325,7 @@ static void mm_iommu_release(struct mm_iommu_table_group_mem_t *mem)
300long mm_iommu_put(struct mm_struct *mm, struct mm_iommu_table_group_mem_t *mem) 325long mm_iommu_put(struct mm_struct *mm, struct mm_iommu_table_group_mem_t *mem)
301{ 326{
302 long ret = 0; 327 long ret = 0;
328 unsigned long entries, dev_hpa;
303 329
304 mutex_lock(&mem_list_mutex); 330 mutex_lock(&mem_list_mutex);
305 331
@@ -321,9 +347,12 @@ long mm_iommu_put(struct mm_struct *mm, struct mm_iommu_table_group_mem_t *mem)
321 } 347 }
322 348
323 /* @mapped became 0 so now mappings are disabled, release the region */ 349 /* @mapped became 0 so now mappings are disabled, release the region */
350 entries = mem->entries;
351 dev_hpa = mem->dev_hpa;
324 mm_iommu_release(mem); 352 mm_iommu_release(mem);
325 353
326 mm_iommu_adjust_locked_vm(mm, mem->entries, false); 354 if (dev_hpa == MM_IOMMU_TABLE_INVALID_HPA)
355 mm_iommu_adjust_locked_vm(mm, entries, false);
327 356
328unlock_exit: 357unlock_exit:
329 mutex_unlock(&mem_list_mutex); 358 mutex_unlock(&mem_list_mutex);
@@ -368,27 +397,32 @@ struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm(struct mm_struct *mm,
368 return ret; 397 return ret;
369} 398}
370 399
371struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm, 400struct mm_iommu_table_group_mem_t *mm_iommu_get(struct mm_struct *mm,
372 unsigned long ua, unsigned long entries) 401 unsigned long ua, unsigned long entries)
373{ 402{
374 struct mm_iommu_table_group_mem_t *mem, *ret = NULL; 403 struct mm_iommu_table_group_mem_t *mem, *ret = NULL;
375 404
405 mutex_lock(&mem_list_mutex);
406
376 list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) { 407 list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) {
377 if ((mem->ua == ua) && (mem->entries == entries)) { 408 if ((mem->ua == ua) && (mem->entries == entries)) {
378 ret = mem; 409 ret = mem;
410 ++mem->used;
379 break; 411 break;
380 } 412 }
381 } 413 }
382 414
415 mutex_unlock(&mem_list_mutex);
416
383 return ret; 417 return ret;
384} 418}
385EXPORT_SYMBOL_GPL(mm_iommu_find); 419EXPORT_SYMBOL_GPL(mm_iommu_get);
386 420
387long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem, 421long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
388 unsigned long ua, unsigned int pageshift, unsigned long *hpa) 422 unsigned long ua, unsigned int pageshift, unsigned long *hpa)
389{ 423{
390 const long entry = (ua - mem->ua) >> PAGE_SHIFT; 424 const long entry = (ua - mem->ua) >> PAGE_SHIFT;
391 u64 *va = &mem->hpas[entry]; 425 u64 *va;
392 426
393 if (entry >= mem->entries) 427 if (entry >= mem->entries)
394 return -EFAULT; 428 return -EFAULT;
@@ -396,6 +430,12 @@ long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
396 if (pageshift > mem->pageshift) 430 if (pageshift > mem->pageshift)
397 return -EFAULT; 431 return -EFAULT;
398 432
433 if (!mem->hpas) {
434 *hpa = mem->dev_hpa + (ua - mem->ua);
435 return 0;
436 }
437
438 va = &mem->hpas[entry];
399 *hpa = (*va & MM_IOMMU_TABLE_GROUP_PAGE_MASK) | (ua & ~PAGE_MASK); 439 *hpa = (*va & MM_IOMMU_TABLE_GROUP_PAGE_MASK) | (ua & ~PAGE_MASK);
400 440
401 return 0; 441 return 0;
@@ -406,7 +446,6 @@ long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
406 unsigned long ua, unsigned int pageshift, unsigned long *hpa) 446 unsigned long ua, unsigned int pageshift, unsigned long *hpa)
407{ 447{
408 const long entry = (ua - mem->ua) >> PAGE_SHIFT; 448 const long entry = (ua - mem->ua) >> PAGE_SHIFT;
409 void *va = &mem->hpas[entry];
410 unsigned long *pa; 449 unsigned long *pa;
411 450
412 if (entry >= mem->entries) 451 if (entry >= mem->entries)
@@ -415,7 +454,12 @@ long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
415 if (pageshift > mem->pageshift) 454 if (pageshift > mem->pageshift)
416 return -EFAULT; 455 return -EFAULT;
417 456
418 pa = (void *) vmalloc_to_phys(va); 457 if (!mem->hpas) {
458 *hpa = mem->dev_hpa + (ua - mem->ua);
459 return 0;
460 }
461
462 pa = (void *) vmalloc_to_phys(&mem->hpas[entry]);
419 if (!pa) 463 if (!pa)
420 return -EFAULT; 464 return -EFAULT;
421 465
@@ -435,6 +479,9 @@ extern void mm_iommu_ua_mark_dirty_rm(struct mm_struct *mm, unsigned long ua)
435 if (!mem) 479 if (!mem)
436 return; 480 return;
437 481
482 if (mem->dev_hpa != MM_IOMMU_TABLE_INVALID_HPA)
483 return;
484
438 entry = (ua - mem->ua) >> PAGE_SHIFT; 485 entry = (ua - mem->ua) >> PAGE_SHIFT;
439 va = &mem->hpas[entry]; 486 va = &mem->hpas[entry];
440 487
@@ -445,6 +492,33 @@ extern void mm_iommu_ua_mark_dirty_rm(struct mm_struct *mm, unsigned long ua)
445 *pa |= MM_IOMMU_TABLE_GROUP_PAGE_DIRTY; 492 *pa |= MM_IOMMU_TABLE_GROUP_PAGE_DIRTY;
446} 493}
447 494
495bool mm_iommu_is_devmem(struct mm_struct *mm, unsigned long hpa,
496 unsigned int pageshift, unsigned long *size)
497{
498 struct mm_iommu_table_group_mem_t *mem;
499 unsigned long end;
500
501 list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) {
502 if (mem->dev_hpa == MM_IOMMU_TABLE_INVALID_HPA)
503 continue;
504
505 end = mem->dev_hpa + (mem->entries << PAGE_SHIFT);
506 if ((mem->dev_hpa <= hpa) && (hpa < end)) {
507 /*
508 * Since the IOMMU page size might be bigger than
509 * PAGE_SIZE, the amount of preregistered memory
510 * starting from @hpa might be smaller than 1<<pageshift
511 * and the caller needs to distinguish this situation.
512 */
513 *size = min(1UL << pageshift, end - hpa);
514 return true;
515 }
516 }
517
518 return false;
519}
520EXPORT_SYMBOL_GPL(mm_iommu_is_devmem);
521
448long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem) 522long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem)
449{ 523{
450 if (atomic64_inc_not_zero(&mem->mapped)) 524 if (atomic64_inc_not_zero(&mem->mapped))