summaryrefslogtreecommitdiffstats
path: root/drivers/vfio
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-08-17 14:32:50 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-08-17 14:32:50 -0400
commit5e2d059b52e397d9ac42f4c4d9d9a841887b5818 (patch)
treec8cd8fd7187113be33e29fcc75f45a8bbc27e6b2 /drivers/vfio
parentd190775206d06397a9309421cac5ba2f2c243521 (diff)
parenta2dc009afa9ae8b92305be7728676562a104cb40 (diff)
Merge tag 'powerpc-4.19-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux
Pull powerpc updates from Michael Ellerman: "Notable changes: - A fix for a bug in our page table fragment allocator, where a page table page could be freed and reallocated for something else while still in use, leading to memory corruption etc. The fix reuses pt_mm in struct page (x86 only) for a powerpc only refcount. - Fixes to our pkey support. Several are user-visible changes, but bring us in to line with x86 behaviour and/or fix outright bugs. Thanks to Florian Weimer for reporting many of these. - A series to improve the hvc driver & related OPAL console code, which have been seen to cause hardlockups at times. The hvc driver changes in particular have been in linux-next for ~month. - Increase our MAX_PHYSMEM_BITS to 128TB when SPARSEMEM_VMEMMAP=y. - Remove Power8 DD1 and Power9 DD1 support, neither chip should be in use anywhere other than as a paper weight. - An optimised memcmp implementation using Power7-or-later VMX instructions - Support for barrier_nospec on some NXP CPUs. - Support for flushing the count cache on context switch on some IBM CPUs (controlled by firmware), as a Spectre v2 mitigation. - A series to enhance the information we print on unhandled signals to bring it into line with other arches, including showing the offending VMA and dumping the instructions around the fault. Thanks to: Aaro Koskinen, Akshay Adiga, Alastair D'Silva, Alexey Kardashevskiy, Alexey Spirkov, Alistair Popple, Andrew Donnellan, Aneesh Kumar K.V, Anju T Sudhakar, Arnd Bergmann, Bartosz Golaszewski, Benjamin Herrenschmidt, Bharat Bhushan, Bjoern Noetel, Boqun Feng, Breno Leitao, Bryant G. Ly, Camelia Groza, Christophe Leroy, Christoph Hellwig, Cyril Bur, Dan Carpenter, Daniel Klamt, Darren Stevens, Dave Young, David Gibson, Diana Craciun, Finn Thain, Florian Weimer, Frederic Barrat, Gautham R. Shenoy, Geert Uytterhoeven, Geoff Levand, Guenter Roeck, Gustavo Romero, Haren Myneni, Hari Bathini, Joel Stanley, Jonathan Neuschäfer, Kees Cook, Madhavan Srinivasan, Mahesh Salgaonkar, Markus Elfring, Mathieu Malaterre, Mauro S. M. Rodrigues, Michael Hanselmann, Michael Neuling, Michael Schmitz, Mukesh Ojha, Murilo Opsfelder Araujo, Nicholas Piggin, Parth Y Shah, Paul Mackerras, Paul Menzel, Ram Pai, Randy Dunlap, Rashmica Gupta, Reza Arbab, Rodrigo R. Galvao, Russell Currey, Sam Bobroff, Scott Wood, Shilpasri G Bhat, Simon Guo, Souptick Joarder, Stan Johnson, Thiago Jung Bauermann, Tyrel Datwyler, Vaibhav Jain, Vasant Hegde, Venkat Rao, zhong jiang" * tag 'powerpc-4.19-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: (234 commits) powerpc/mm/book3s/radix: Add mapping statistics powerpc/uaccess: Enable get_user(u64, *p) on 32-bit powerpc/mm/hash: Remove unnecessary do { } while(0) loop powerpc/64s: move machine check SLB flushing to mm/slb.c powerpc/powernv/idle: Fix build error powerpc/mm/tlbflush: update the mmu_gather page size while iterating address range powerpc/mm: remove warning about ‘type’ being set powerpc/32: Include setup.h header file to fix warnings powerpc: Move `path` variable inside DEBUG_PROM powerpc/powermac: Make some functions static powerpc/powermac: Remove variable x that's never read cxl: remove a dead branch powerpc/powermac: Add missing include of header pmac.h powerpc/kexec: Use common error handling code in setup_new_fdt() powerpc/xmon: Add address lookup for percpu symbols powerpc/mm: remove huge_pte_offset_and_shift() prototype powerpc/lib: Use patch_site to patch copy_32 functions once cache is enabled powerpc/pseries: Fix endianness while restoring of r3 in MCE handler. powerpc/fadump: merge adjacent memory ranges to reduce PT_LOAD segements powerpc/fadump: handle crash memory ranges array index overflow ...
Diffstat (limited to 'drivers/vfio')
-rw-r--r--drivers/vfio/vfio_iommu_spapr_tce.c65
1 files changed, 9 insertions, 56 deletions
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
index 7cd63b0c1a46..96721b154454 100644
--- a/drivers/vfio/vfio_iommu_spapr_tce.c
+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
@@ -211,44 +211,6 @@ static long tce_iommu_register_pages(struct tce_container *container,
211 return 0; 211 return 0;
212} 212}
213 213
214static long tce_iommu_userspace_view_alloc(struct iommu_table *tbl,
215 struct mm_struct *mm)
216{
217 unsigned long cb = _ALIGN_UP(sizeof(tbl->it_userspace[0]) *
218 tbl->it_size, PAGE_SIZE);
219 unsigned long *uas;
220 long ret;
221
222 BUG_ON(tbl->it_userspace);
223
224 ret = try_increment_locked_vm(mm, cb >> PAGE_SHIFT);
225 if (ret)
226 return ret;
227
228 uas = vzalloc(cb);
229 if (!uas) {
230 decrement_locked_vm(mm, cb >> PAGE_SHIFT);
231 return -ENOMEM;
232 }
233 tbl->it_userspace = uas;
234
235 return 0;
236}
237
238static void tce_iommu_userspace_view_free(struct iommu_table *tbl,
239 struct mm_struct *mm)
240{
241 unsigned long cb = _ALIGN_UP(sizeof(tbl->it_userspace[0]) *
242 tbl->it_size, PAGE_SIZE);
243
244 if (!tbl->it_userspace)
245 return;
246
247 vfree(tbl->it_userspace);
248 tbl->it_userspace = NULL;
249 decrement_locked_vm(mm, cb >> PAGE_SHIFT);
250}
251
252static bool tce_page_is_contained(struct page *page, unsigned page_shift) 214static bool tce_page_is_contained(struct page *page, unsigned page_shift)
253{ 215{
254 /* 216 /*
@@ -482,20 +444,20 @@ static void tce_iommu_unuse_page_v2(struct tce_container *container,
482 struct mm_iommu_table_group_mem_t *mem = NULL; 444 struct mm_iommu_table_group_mem_t *mem = NULL;
483 int ret; 445 int ret;
484 unsigned long hpa = 0; 446 unsigned long hpa = 0;
485 unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry); 447 __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
486 448
487 if (!pua) 449 if (!pua)
488 return; 450 return;
489 451
490 ret = tce_iommu_prereg_ua_to_hpa(container, *pua, tbl->it_page_shift, 452 ret = tce_iommu_prereg_ua_to_hpa(container, be64_to_cpu(*pua),
491 &hpa, &mem); 453 tbl->it_page_shift, &hpa, &mem);
492 if (ret) 454 if (ret)
493 pr_debug("%s: tce %lx at #%lx was not cached, ret=%d\n", 455 pr_debug("%s: tce %llx at #%lx was not cached, ret=%d\n",
494 __func__, *pua, entry, ret); 456 __func__, be64_to_cpu(*pua), entry, ret);
495 if (mem) 457 if (mem)
496 mm_iommu_mapped_dec(mem); 458 mm_iommu_mapped_dec(mem);
497 459
498 *pua = 0; 460 *pua = cpu_to_be64(0);
499} 461}
500 462
501static int tce_iommu_clear(struct tce_container *container, 463static int tce_iommu_clear(struct tce_container *container,
@@ -599,16 +561,9 @@ static long tce_iommu_build_v2(struct tce_container *container,
599 unsigned long hpa; 561 unsigned long hpa;
600 enum dma_data_direction dirtmp; 562 enum dma_data_direction dirtmp;
601 563
602 if (!tbl->it_userspace) {
603 ret = tce_iommu_userspace_view_alloc(tbl, container->mm);
604 if (ret)
605 return ret;
606 }
607
608 for (i = 0; i < pages; ++i) { 564 for (i = 0; i < pages; ++i) {
609 struct mm_iommu_table_group_mem_t *mem = NULL; 565 struct mm_iommu_table_group_mem_t *mem = NULL;
610 unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, 566 __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry + i);
611 entry + i);
612 567
613 ret = tce_iommu_prereg_ua_to_hpa(container, 568 ret = tce_iommu_prereg_ua_to_hpa(container,
614 tce, tbl->it_page_shift, &hpa, &mem); 569 tce, tbl->it_page_shift, &hpa, &mem);
@@ -642,7 +597,7 @@ static long tce_iommu_build_v2(struct tce_container *container,
642 if (dirtmp != DMA_NONE) 597 if (dirtmp != DMA_NONE)
643 tce_iommu_unuse_page_v2(container, tbl, entry + i); 598 tce_iommu_unuse_page_v2(container, tbl, entry + i);
644 599
645 *pua = tce; 600 *pua = cpu_to_be64(tce);
646 601
647 tce += IOMMU_PAGE_SIZE(tbl); 602 tce += IOMMU_PAGE_SIZE(tbl);
648 } 603 }
@@ -676,7 +631,7 @@ static long tce_iommu_create_table(struct tce_container *container,
676 page_shift, window_size, levels, ptbl); 631 page_shift, window_size, levels, ptbl);
677 632
678 WARN_ON(!ret && !(*ptbl)->it_ops->free); 633 WARN_ON(!ret && !(*ptbl)->it_ops->free);
679 WARN_ON(!ret && ((*ptbl)->it_allocated_size != table_size)); 634 WARN_ON(!ret && ((*ptbl)->it_allocated_size > table_size));
680 635
681 return ret; 636 return ret;
682} 637}
@@ -686,7 +641,6 @@ static void tce_iommu_free_table(struct tce_container *container,
686{ 641{
687 unsigned long pages = tbl->it_allocated_size >> PAGE_SHIFT; 642 unsigned long pages = tbl->it_allocated_size >> PAGE_SHIFT;
688 643
689 tce_iommu_userspace_view_free(tbl, container->mm);
690 iommu_tce_table_put(tbl); 644 iommu_tce_table_put(tbl);
691 decrement_locked_vm(container->mm, pages); 645 decrement_locked_vm(container->mm, pages);
692} 646}
@@ -1201,7 +1155,6 @@ static void tce_iommu_release_ownership(struct tce_container *container,
1201 continue; 1155 continue;
1202 1156
1203 tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size); 1157 tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
1204 tce_iommu_userspace_view_free(tbl, container->mm);
1205 if (tbl->it_map) 1158 if (tbl->it_map)
1206 iommu_release_ownership(tbl); 1159 iommu_release_ownership(tbl);
1207 1160