summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorAlexey Kardashevskiy <aik@ozlabs.ru>2018-07-04 02:13:47 -0400
committerMichael Ellerman <mpe@ellerman.id.au>2018-07-16 08:53:10 -0400
commit090bad39b237aad92d8e01baa033699cf0c81cbe (patch)
treee9586d1bdb4a7eaf03714c1d3c7eca391e4235fa /drivers
parent00a5c58d9499bd0c290b57205f43a70f2e69d3f6 (diff)
powerpc/powernv: Add indirect levels to it_userspace
We want to support sparse memory and therefore huge chunks of DMA windows do not need to be mapped. If a DMA window big enough to require 2 or more indirect levels, and a DMA window is used to map all RAM (which is a default case for 64bit window), we can actually save some memory by not allocation TCE for regions which we are not going to map anyway. The hardware tables alreary support indirect levels but we also keep host-physical-to-userspace translation array which is allocated by vmalloc() and is a flat array which might use quite some memory. This converts it_userspace from vmalloc'ed array to a multi level table. As the format becomes platform dependend, this replaces the direct access to it_usespace with a iommu_table_ops::useraddrptr hook which returns a pointer to the userspace copy of a TCE; future extension will return NULL if the level was not allocated. This should not change non-KVM handling of TCE tables and it_userspace will not be allocated for non-KVM tables. Reviewed-by: David Gibson <david@gibson.dropbear.id.au> Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/vfio/vfio_iommu_spapr_tce.c46
1 files changed, 0 insertions, 46 deletions
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
index 8ab124a67311..54ae6c2be1b7 100644
--- a/drivers/vfio/vfio_iommu_spapr_tce.c
+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
@@ -211,44 +211,6 @@ static long tce_iommu_register_pages(struct tce_container *container,
211 return 0; 211 return 0;
212} 212}
213 213
214static long tce_iommu_userspace_view_alloc(struct iommu_table *tbl,
215 struct mm_struct *mm)
216{
217 unsigned long cb = _ALIGN_UP(sizeof(tbl->it_userspace[0]) *
218 tbl->it_size, PAGE_SIZE);
219 unsigned long *uas;
220 long ret;
221
222 BUG_ON(tbl->it_userspace);
223
224 ret = try_increment_locked_vm(mm, cb >> PAGE_SHIFT);
225 if (ret)
226 return ret;
227
228 uas = vzalloc(cb);
229 if (!uas) {
230 decrement_locked_vm(mm, cb >> PAGE_SHIFT);
231 return -ENOMEM;
232 }
233 tbl->it_userspace = (__be64 *) uas;
234
235 return 0;
236}
237
238static void tce_iommu_userspace_view_free(struct iommu_table *tbl,
239 struct mm_struct *mm)
240{
241 unsigned long cb = _ALIGN_UP(sizeof(tbl->it_userspace[0]) *
242 tbl->it_size, PAGE_SIZE);
243
244 if (!tbl->it_userspace)
245 return;
246
247 vfree(tbl->it_userspace);
248 tbl->it_userspace = NULL;
249 decrement_locked_vm(mm, cb >> PAGE_SHIFT);
250}
251
252static bool tce_page_is_contained(struct page *page, unsigned page_shift) 214static bool tce_page_is_contained(struct page *page, unsigned page_shift)
253{ 215{
254 /* 216 /*
@@ -599,12 +561,6 @@ static long tce_iommu_build_v2(struct tce_container *container,
599 unsigned long hpa; 561 unsigned long hpa;
600 enum dma_data_direction dirtmp; 562 enum dma_data_direction dirtmp;
601 563
602 if (!tbl->it_userspace) {
603 ret = tce_iommu_userspace_view_alloc(tbl, container->mm);
604 if (ret)
605 return ret;
606 }
607
608 for (i = 0; i < pages; ++i) { 564 for (i = 0; i < pages; ++i) {
609 struct mm_iommu_table_group_mem_t *mem = NULL; 565 struct mm_iommu_table_group_mem_t *mem = NULL;
610 __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry + i); 566 __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry + i);
@@ -685,7 +641,6 @@ static void tce_iommu_free_table(struct tce_container *container,
685{ 641{
686 unsigned long pages = tbl->it_allocated_size >> PAGE_SHIFT; 642 unsigned long pages = tbl->it_allocated_size >> PAGE_SHIFT;
687 643
688 tce_iommu_userspace_view_free(tbl, container->mm);
689 iommu_tce_table_put(tbl); 644 iommu_tce_table_put(tbl);
690 decrement_locked_vm(container->mm, pages); 645 decrement_locked_vm(container->mm, pages);
691} 646}
@@ -1200,7 +1155,6 @@ static void tce_iommu_release_ownership(struct tce_container *container,
1200 continue; 1155 continue;
1201 1156
1202 tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size); 1157 tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
1203 tce_iommu_userspace_view_free(tbl, container->mm);
1204 if (tbl->it_map) 1158 if (tbl->it_map)
1205 iommu_release_ownership(tbl); 1159 iommu_release_ownership(tbl);
1206 1160