aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlexey Kardashevskiy <aik@ozlabs.ru>2016-11-30 01:52:04 -0500
committerMichael Ellerman <mpe@ellerman.id.au>2016-12-01 22:38:33 -0500
commitbc82d122ae4a0e9f971f13403995898fcfa0c09e (patch)
treef95d3369a1b62cb2324e4ad6d5234c8d0b1322e3
parentd9c728949ddc9de5734bf3b12ea906ca8a77f2a0 (diff)
vfio/spapr: Reference mm in tce_container
In some situations the userspace memory context may live longer than the userspace process itself so if we need to do proper memory context cleanup, we better have tce_container take a reference to mm_struct and use it later when the process is gone (@current or @current->mm is NULL). This references mm and stores the pointer in the container; this is done in a new helper - tce_iommu_mm_set() - when one of the following happens: - a container is enabled (IOMMU v1); - a first attempt to pre-register memory is made (IOMMU v2); - a DMA window is created (IOMMU v2). The @mm stays referenced till the container is destroyed. This replaces current->mm with container->mm everywhere except debug prints. This adds a check that current->mm is the same as the one stored in the container to prevent userspace from making changes to a memory context of other processes. DMA map/unmap ioctls() do not check for @mm as they already check for @enabled which is set after tce_iommu_mm_set() is called. This does not reference a task as multiple threads within the same mm are allowed to ioctl() to vfio and supposedly they will have same limits and capabilities and if they do not, we'll just fail with no harm made. Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru> Acked-by: Alex Williamson <alex.williamson@redhat.com> Reviewed-by: David Gibson <david@gibson.dropbear.id.au> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
-rw-r--r--drivers/vfio/vfio_iommu_spapr_tce.c160
1 files changed, 100 insertions, 60 deletions
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
index 88622be0d6b5..4c03c8525c26 100644
--- a/drivers/vfio/vfio_iommu_spapr_tce.c
+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
@@ -31,49 +31,49 @@
31static void tce_iommu_detach_group(void *iommu_data, 31static void tce_iommu_detach_group(void *iommu_data,
32 struct iommu_group *iommu_group); 32 struct iommu_group *iommu_group);
33 33
34static long try_increment_locked_vm(long npages) 34static long try_increment_locked_vm(struct mm_struct *mm, long npages)
35{ 35{
36 long ret = 0, locked, lock_limit; 36 long ret = 0, locked, lock_limit;
37 37
38 if (!current || !current->mm) 38 if (WARN_ON_ONCE(!mm))
39 return -ESRCH; /* process exited */ 39 return -EPERM;
40 40
41 if (!npages) 41 if (!npages)
42 return 0; 42 return 0;
43 43
44 down_write(&current->mm->mmap_sem); 44 down_write(&mm->mmap_sem);
45 locked = current->mm->locked_vm + npages; 45 locked = mm->locked_vm + npages;
46 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; 46 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
47 if (locked > lock_limit && !capable(CAP_IPC_LOCK)) 47 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
48 ret = -ENOMEM; 48 ret = -ENOMEM;
49 else 49 else
50 current->mm->locked_vm += npages; 50 mm->locked_vm += npages;
51 51
52 pr_debug("[%d] RLIMIT_MEMLOCK +%ld %ld/%ld%s\n", current->pid, 52 pr_debug("[%d] RLIMIT_MEMLOCK +%ld %ld/%ld%s\n", current->pid,
53 npages << PAGE_SHIFT, 53 npages << PAGE_SHIFT,
54 current->mm->locked_vm << PAGE_SHIFT, 54 mm->locked_vm << PAGE_SHIFT,
55 rlimit(RLIMIT_MEMLOCK), 55 rlimit(RLIMIT_MEMLOCK),
56 ret ? " - exceeded" : ""); 56 ret ? " - exceeded" : "");
57 57
58 up_write(&current->mm->mmap_sem); 58 up_write(&mm->mmap_sem);
59 59
60 return ret; 60 return ret;
61} 61}
62 62
63static void decrement_locked_vm(long npages) 63static void decrement_locked_vm(struct mm_struct *mm, long npages)
64{ 64{
65 if (!current || !current->mm || !npages) 65 if (!mm || !npages)
66 return; /* process exited */ 66 return;
67 67
68 down_write(&current->mm->mmap_sem); 68 down_write(&mm->mmap_sem);
69 if (WARN_ON_ONCE(npages > current->mm->locked_vm)) 69 if (WARN_ON_ONCE(npages > mm->locked_vm))
70 npages = current->mm->locked_vm; 70 npages = mm->locked_vm;
71 current->mm->locked_vm -= npages; 71 mm->locked_vm -= npages;
72 pr_debug("[%d] RLIMIT_MEMLOCK -%ld %ld/%ld\n", current->pid, 72 pr_debug("[%d] RLIMIT_MEMLOCK -%ld %ld/%ld\n", current->pid,
73 npages << PAGE_SHIFT, 73 npages << PAGE_SHIFT,
74 current->mm->locked_vm << PAGE_SHIFT, 74 mm->locked_vm << PAGE_SHIFT,
75 rlimit(RLIMIT_MEMLOCK)); 75 rlimit(RLIMIT_MEMLOCK));
76 up_write(&current->mm->mmap_sem); 76 up_write(&mm->mmap_sem);
77} 77}
78 78
79/* 79/*
@@ -99,26 +99,38 @@ struct tce_container {
99 bool v2; 99 bool v2;
100 bool def_window_pending; 100 bool def_window_pending;
101 unsigned long locked_pages; 101 unsigned long locked_pages;
102 struct mm_struct *mm;
102 struct iommu_table *tables[IOMMU_TABLE_GROUP_MAX_TABLES]; 103 struct iommu_table *tables[IOMMU_TABLE_GROUP_MAX_TABLES];
103 struct list_head group_list; 104 struct list_head group_list;
104}; 105};
105 106
107static long tce_iommu_mm_set(struct tce_container *container)
108{
109 if (container->mm) {
110 if (container->mm == current->mm)
111 return 0;
112 return -EPERM;
113 }
114 BUG_ON(!current->mm);
115 container->mm = current->mm;
116 atomic_inc(&container->mm->mm_count);
117
118 return 0;
119}
120
106static long tce_iommu_unregister_pages(struct tce_container *container, 121static long tce_iommu_unregister_pages(struct tce_container *container,
107 __u64 vaddr, __u64 size) 122 __u64 vaddr, __u64 size)
108{ 123{
109 struct mm_iommu_table_group_mem_t *mem; 124 struct mm_iommu_table_group_mem_t *mem;
110 125
111 if (!current || !current->mm)
112 return -ESRCH; /* process exited */
113
114 if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK)) 126 if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK))
115 return -EINVAL; 127 return -EINVAL;
116 128
117 mem = mm_iommu_find(current->mm, vaddr, size >> PAGE_SHIFT); 129 mem = mm_iommu_find(container->mm, vaddr, size >> PAGE_SHIFT);
118 if (!mem) 130 if (!mem)
119 return -ENOENT; 131 return -ENOENT;
120 132
121 return mm_iommu_put(current->mm, mem); 133 return mm_iommu_put(container->mm, mem);
122} 134}
123 135
124static long tce_iommu_register_pages(struct tce_container *container, 136static long tce_iommu_register_pages(struct tce_container *container,
@@ -128,14 +140,11 @@ static long tce_iommu_register_pages(struct tce_container *container,
128 struct mm_iommu_table_group_mem_t *mem = NULL; 140 struct mm_iommu_table_group_mem_t *mem = NULL;
129 unsigned long entries = size >> PAGE_SHIFT; 141 unsigned long entries = size >> PAGE_SHIFT;
130 142
131 if (!current || !current->mm)
132 return -ESRCH; /* process exited */
133
134 if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK) || 143 if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK) ||
135 ((vaddr + size) < vaddr)) 144 ((vaddr + size) < vaddr))
136 return -EINVAL; 145 return -EINVAL;
137 146
138 ret = mm_iommu_get(current->mm, vaddr, entries, &mem); 147 ret = mm_iommu_get(container->mm, vaddr, entries, &mem);
139 if (ret) 148 if (ret)
140 return ret; 149 return ret;
141 150
@@ -144,7 +153,8 @@ static long tce_iommu_register_pages(struct tce_container *container,
144 return 0; 153 return 0;
145} 154}
146 155
147static long tce_iommu_userspace_view_alloc(struct iommu_table *tbl) 156static long tce_iommu_userspace_view_alloc(struct iommu_table *tbl,
157 struct mm_struct *mm)
148{ 158{
149 unsigned long cb = _ALIGN_UP(sizeof(tbl->it_userspace[0]) * 159 unsigned long cb = _ALIGN_UP(sizeof(tbl->it_userspace[0]) *
150 tbl->it_size, PAGE_SIZE); 160 tbl->it_size, PAGE_SIZE);
@@ -153,13 +163,13 @@ static long tce_iommu_userspace_view_alloc(struct iommu_table *tbl)
153 163
154 BUG_ON(tbl->it_userspace); 164 BUG_ON(tbl->it_userspace);
155 165
156 ret = try_increment_locked_vm(cb >> PAGE_SHIFT); 166 ret = try_increment_locked_vm(mm, cb >> PAGE_SHIFT);
157 if (ret) 167 if (ret)
158 return ret; 168 return ret;
159 169
160 uas = vzalloc(cb); 170 uas = vzalloc(cb);
161 if (!uas) { 171 if (!uas) {
162 decrement_locked_vm(cb >> PAGE_SHIFT); 172 decrement_locked_vm(mm, cb >> PAGE_SHIFT);
163 return -ENOMEM; 173 return -ENOMEM;
164 } 174 }
165 tbl->it_userspace = uas; 175 tbl->it_userspace = uas;
@@ -167,7 +177,8 @@ static long tce_iommu_userspace_view_alloc(struct iommu_table *tbl)
167 return 0; 177 return 0;
168} 178}
169 179
170static void tce_iommu_userspace_view_free(struct iommu_table *tbl) 180static void tce_iommu_userspace_view_free(struct iommu_table *tbl,
181 struct mm_struct *mm)
171{ 182{
172 unsigned long cb = _ALIGN_UP(sizeof(tbl->it_userspace[0]) * 183 unsigned long cb = _ALIGN_UP(sizeof(tbl->it_userspace[0]) *
173 tbl->it_size, PAGE_SIZE); 184 tbl->it_size, PAGE_SIZE);
@@ -177,7 +188,7 @@ static void tce_iommu_userspace_view_free(struct iommu_table *tbl)
177 188
178 vfree(tbl->it_userspace); 189 vfree(tbl->it_userspace);
179 tbl->it_userspace = NULL; 190 tbl->it_userspace = NULL;
180 decrement_locked_vm(cb >> PAGE_SHIFT); 191 decrement_locked_vm(mm, cb >> PAGE_SHIFT);
181} 192}
182 193
183static bool tce_page_is_contained(struct page *page, unsigned page_shift) 194static bool tce_page_is_contained(struct page *page, unsigned page_shift)
@@ -237,9 +248,6 @@ static int tce_iommu_enable(struct tce_container *container)
237 struct iommu_table_group *table_group; 248 struct iommu_table_group *table_group;
238 struct tce_iommu_group *tcegrp; 249 struct tce_iommu_group *tcegrp;
239 250
240 if (!current->mm)
241 return -ESRCH; /* process exited */
242
243 if (container->enabled) 251 if (container->enabled)
244 return -EBUSY; 252 return -EBUSY;
245 253
@@ -284,8 +292,12 @@ static int tce_iommu_enable(struct tce_container *container)
284 if (!table_group->tce32_size) 292 if (!table_group->tce32_size)
285 return -EPERM; 293 return -EPERM;
286 294
295 ret = tce_iommu_mm_set(container);
296 if (ret)
297 return ret;
298
287 locked = table_group->tce32_size >> PAGE_SHIFT; 299 locked = table_group->tce32_size >> PAGE_SHIFT;
288 ret = try_increment_locked_vm(locked); 300 ret = try_increment_locked_vm(container->mm, locked);
289 if (ret) 301 if (ret)
290 return ret; 302 return ret;
291 303
@@ -303,10 +315,8 @@ static void tce_iommu_disable(struct tce_container *container)
303 315
304 container->enabled = false; 316 container->enabled = false;
305 317
306 if (!current->mm) 318 BUG_ON(!container->mm);
307 return; 319 decrement_locked_vm(container->mm, container->locked_pages);
308
309 decrement_locked_vm(container->locked_pages);
310} 320}
311 321
312static void *tce_iommu_open(unsigned long arg) 322static void *tce_iommu_open(unsigned long arg)
@@ -333,7 +343,8 @@ static void *tce_iommu_open(unsigned long arg)
333static int tce_iommu_clear(struct tce_container *container, 343static int tce_iommu_clear(struct tce_container *container,
334 struct iommu_table *tbl, 344 struct iommu_table *tbl,
335 unsigned long entry, unsigned long pages); 345 unsigned long entry, unsigned long pages);
336static void tce_iommu_free_table(struct iommu_table *tbl); 346static void tce_iommu_free_table(struct tce_container *container,
347 struct iommu_table *tbl);
337 348
338static void tce_iommu_release(void *iommu_data) 349static void tce_iommu_release(void *iommu_data)
339{ 350{
@@ -358,10 +369,12 @@ static void tce_iommu_release(void *iommu_data)
358 continue; 369 continue;
359 370
360 tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size); 371 tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
361 tce_iommu_free_table(tbl); 372 tce_iommu_free_table(container, tbl);
362 } 373 }
363 374
364 tce_iommu_disable(container); 375 tce_iommu_disable(container);
376 if (container->mm)
377 mmdrop(container->mm);
365 mutex_destroy(&container->lock); 378 mutex_destroy(&container->lock);
366 379
367 kfree(container); 380 kfree(container);
@@ -376,13 +389,14 @@ static void tce_iommu_unuse_page(struct tce_container *container,
376 put_page(page); 389 put_page(page);
377} 390}
378 391
379static int tce_iommu_prereg_ua_to_hpa(unsigned long tce, unsigned long size, 392static int tce_iommu_prereg_ua_to_hpa(struct tce_container *container,
393 unsigned long tce, unsigned long size,
380 unsigned long *phpa, struct mm_iommu_table_group_mem_t **pmem) 394 unsigned long *phpa, struct mm_iommu_table_group_mem_t **pmem)
381{ 395{
382 long ret = 0; 396 long ret = 0;
383 struct mm_iommu_table_group_mem_t *mem; 397 struct mm_iommu_table_group_mem_t *mem;
384 398
385 mem = mm_iommu_lookup(current->mm, tce, size); 399 mem = mm_iommu_lookup(container->mm, tce, size);
386 if (!mem) 400 if (!mem)
387 return -EINVAL; 401 return -EINVAL;
388 402
@@ -395,18 +409,18 @@ static int tce_iommu_prereg_ua_to_hpa(unsigned long tce, unsigned long size,
395 return 0; 409 return 0;
396} 410}
397 411
398static void tce_iommu_unuse_page_v2(struct iommu_table *tbl, 412static void tce_iommu_unuse_page_v2(struct tce_container *container,
399 unsigned long entry) 413 struct iommu_table *tbl, unsigned long entry)
400{ 414{
401 struct mm_iommu_table_group_mem_t *mem = NULL; 415 struct mm_iommu_table_group_mem_t *mem = NULL;
402 int ret; 416 int ret;
403 unsigned long hpa = 0; 417 unsigned long hpa = 0;
404 unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry); 418 unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
405 419
406 if (!pua || !current || !current->mm) 420 if (!pua)
407 return; 421 return;
408 422
409 ret = tce_iommu_prereg_ua_to_hpa(*pua, IOMMU_PAGE_SIZE(tbl), 423 ret = tce_iommu_prereg_ua_to_hpa(container, *pua, IOMMU_PAGE_SIZE(tbl),
410 &hpa, &mem); 424 &hpa, &mem);
411 if (ret) 425 if (ret)
412 pr_debug("%s: tce %lx at #%lx was not cached, ret=%d\n", 426 pr_debug("%s: tce %lx at #%lx was not cached, ret=%d\n",
@@ -436,7 +450,7 @@ static int tce_iommu_clear(struct tce_container *container,
436 continue; 450 continue;
437 451
438 if (container->v2) { 452 if (container->v2) {
439 tce_iommu_unuse_page_v2(tbl, entry); 453 tce_iommu_unuse_page_v2(container, tbl, entry);
440 continue; 454 continue;
441 } 455 }
442 456
@@ -517,7 +531,7 @@ static long tce_iommu_build_v2(struct tce_container *container,
517 enum dma_data_direction dirtmp; 531 enum dma_data_direction dirtmp;
518 532
519 if (!tbl->it_userspace) { 533 if (!tbl->it_userspace) {
520 ret = tce_iommu_userspace_view_alloc(tbl); 534 ret = tce_iommu_userspace_view_alloc(tbl, container->mm);
521 if (ret) 535 if (ret)
522 return ret; 536 return ret;
523 } 537 }
@@ -527,8 +541,8 @@ static long tce_iommu_build_v2(struct tce_container *container,
527 unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, 541 unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl,
528 entry + i); 542 entry + i);
529 543
530 ret = tce_iommu_prereg_ua_to_hpa(tce, IOMMU_PAGE_SIZE(tbl), 544 ret = tce_iommu_prereg_ua_to_hpa(container,
531 &hpa, &mem); 545 tce, IOMMU_PAGE_SIZE(tbl), &hpa, &mem);
532 if (ret) 546 if (ret)
533 break; 547 break;
534 548
@@ -549,7 +563,7 @@ static long tce_iommu_build_v2(struct tce_container *container,
549 ret = iommu_tce_xchg(tbl, entry + i, &hpa, &dirtmp); 563 ret = iommu_tce_xchg(tbl, entry + i, &hpa, &dirtmp);
550 if (ret) { 564 if (ret) {
551 /* dirtmp cannot be DMA_NONE here */ 565 /* dirtmp cannot be DMA_NONE here */
552 tce_iommu_unuse_page_v2(tbl, entry + i); 566 tce_iommu_unuse_page_v2(container, tbl, entry + i);
553 pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n", 567 pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n",
554 __func__, entry << tbl->it_page_shift, 568 __func__, entry << tbl->it_page_shift,
555 tce, ret); 569 tce, ret);
@@ -557,7 +571,7 @@ static long tce_iommu_build_v2(struct tce_container *container,
557 } 571 }
558 572
559 if (dirtmp != DMA_NONE) 573 if (dirtmp != DMA_NONE)
560 tce_iommu_unuse_page_v2(tbl, entry + i); 574 tce_iommu_unuse_page_v2(container, tbl, entry + i);
561 575
562 *pua = tce; 576 *pua = tce;
563 577
@@ -585,7 +599,7 @@ static long tce_iommu_create_table(struct tce_container *container,
585 if (!table_size) 599 if (!table_size)
586 return -EINVAL; 600 return -EINVAL;
587 601
588 ret = try_increment_locked_vm(table_size >> PAGE_SHIFT); 602 ret = try_increment_locked_vm(container->mm, table_size >> PAGE_SHIFT);
589 if (ret) 603 if (ret)
590 return ret; 604 return ret;
591 605
@@ -598,13 +612,14 @@ static long tce_iommu_create_table(struct tce_container *container,
598 return ret; 612 return ret;
599} 613}
600 614
601static void tce_iommu_free_table(struct iommu_table *tbl) 615static void tce_iommu_free_table(struct tce_container *container,
616 struct iommu_table *tbl)
602{ 617{
603 unsigned long pages = tbl->it_allocated_size >> PAGE_SHIFT; 618 unsigned long pages = tbl->it_allocated_size >> PAGE_SHIFT;
604 619
605 tce_iommu_userspace_view_free(tbl); 620 tce_iommu_userspace_view_free(tbl, container->mm);
606 tbl->it_ops->free(tbl); 621 tbl->it_ops->free(tbl);
607 decrement_locked_vm(pages); 622 decrement_locked_vm(container->mm, pages);
608} 623}
609 624
610static long tce_iommu_create_window(struct tce_container *container, 625static long tce_iommu_create_window(struct tce_container *container,
@@ -667,7 +682,7 @@ unset_exit:
667 table_group = iommu_group_get_iommudata(tcegrp->grp); 682 table_group = iommu_group_get_iommudata(tcegrp->grp);
668 table_group->ops->unset_window(table_group, num); 683 table_group->ops->unset_window(table_group, num);
669 } 684 }
670 tce_iommu_free_table(tbl); 685 tce_iommu_free_table(container, tbl);
671 686
672 return ret; 687 return ret;
673} 688}
@@ -705,7 +720,7 @@ static long tce_iommu_remove_window(struct tce_container *container,
705 720
706 /* Free table */ 721 /* Free table */
707 tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size); 722 tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
708 tce_iommu_free_table(tbl); 723 tce_iommu_free_table(container, tbl);
709 container->tables[num] = NULL; 724 container->tables[num] = NULL;
710 725
711 return 0; 726 return 0;
@@ -760,7 +775,17 @@ static long tce_iommu_ioctl(void *iommu_data,
760 } 775 }
761 776
762 return (ret < 0) ? 0 : ret; 777 return (ret < 0) ? 0 : ret;
778 }
779
780 /*
781 * Sanity check to prevent one userspace from manipulating
782 * another userspace mm.
783 */
784 BUG_ON(!container);
785 if (container->mm && container->mm != current->mm)
786 return -EPERM;
763 787
788 switch (cmd) {
764 case VFIO_IOMMU_SPAPR_TCE_GET_INFO: { 789 case VFIO_IOMMU_SPAPR_TCE_GET_INFO: {
765 struct vfio_iommu_spapr_tce_info info; 790 struct vfio_iommu_spapr_tce_info info;
766 struct tce_iommu_group *tcegrp; 791 struct tce_iommu_group *tcegrp;
@@ -929,6 +954,10 @@ static long tce_iommu_ioctl(void *iommu_data,
929 minsz = offsetofend(struct vfio_iommu_spapr_register_memory, 954 minsz = offsetofend(struct vfio_iommu_spapr_register_memory,
930 size); 955 size);
931 956
957 ret = tce_iommu_mm_set(container);
958 if (ret)
959 return ret;
960
932 if (copy_from_user(&param, (void __user *)arg, minsz)) 961 if (copy_from_user(&param, (void __user *)arg, minsz))
933 return -EFAULT; 962 return -EFAULT;
934 963
@@ -952,6 +981,9 @@ static long tce_iommu_ioctl(void *iommu_data,
952 if (!container->v2) 981 if (!container->v2)
953 break; 982 break;
954 983
984 if (!container->mm)
985 return -EPERM;
986
955 minsz = offsetofend(struct vfio_iommu_spapr_register_memory, 987 minsz = offsetofend(struct vfio_iommu_spapr_register_memory,
956 size); 988 size);
957 989
@@ -1010,6 +1042,10 @@ static long tce_iommu_ioctl(void *iommu_data,
1010 if (!container->v2) 1042 if (!container->v2)
1011 break; 1043 break;
1012 1044
1045 ret = tce_iommu_mm_set(container);
1046 if (ret)
1047 return ret;
1048
1013 if (!tce_groups_attached(container)) 1049 if (!tce_groups_attached(container))
1014 return -ENXIO; 1050 return -ENXIO;
1015 1051
@@ -1048,6 +1084,10 @@ static long tce_iommu_ioctl(void *iommu_data,
1048 if (!container->v2) 1084 if (!container->v2)
1049 break; 1085 break;
1050 1086
1087 ret = tce_iommu_mm_set(container);
1088 if (ret)
1089 return ret;
1090
1051 if (!tce_groups_attached(container)) 1091 if (!tce_groups_attached(container))
1052 return -ENXIO; 1092 return -ENXIO;
1053 1093
@@ -1093,7 +1133,7 @@ static void tce_iommu_release_ownership(struct tce_container *container,
1093 continue; 1133 continue;
1094 1134
1095 tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size); 1135 tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
1096 tce_iommu_userspace_view_free(tbl); 1136 tce_iommu_userspace_view_free(tbl, container->mm);
1097 if (tbl->it_map) 1137 if (tbl->it_map)
1098 iommu_release_ownership(tbl); 1138 iommu_release_ownership(tbl);
1099 1139