aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJanosch Frank <frankja@linux.vnet.ibm.com>2017-03-02 09:23:42 -0500
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2017-03-02 11:17:16 -0500
commit2e4d88009f57057df7672fa69a32b5224af54d37 (patch)
tree06e098c4ba6b3f091a3262b60ff562069f73ee11
parent7afbeb6df2aa5f9e3a0fc228817a85c16dea0faa (diff)
KVM: s390: Fix guest migration for huge guests resulting in panic
While we can technically not run huge page guests right now, we can setup a guest with huge pages. Trying to migrate it will trigger a VM_BUG_ON and, if the kernel is not configured to panic on a BUG, it will happily try to work on non-existing page table entries. With this patch, we always return "dirty" if we encounter a large page when migrating. This at least fixes the immediate problem until we have proper handling for both kind of pages. Fixes: 15f36eb ("KVM: s390: Add proper dirty bitmap support to S390 kvm.") Cc: <stable@vger.kernel.org> # 3.16+ Signed-off-by: Janosch Frank <frankja@linux.vnet.ibm.com> Acked-by: Christian Borntraeger <borntraeger@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
-rw-r--r--arch/s390/mm/pgtable.c19
1 files changed, 18 insertions, 1 deletions
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index b48dc5f1900b..463e5ef02304 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -608,12 +608,29 @@ void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
608bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long addr) 608bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long addr)
609{ 609{
610 spinlock_t *ptl; 610 spinlock_t *ptl;
611 pgd_t *pgd;
612 pud_t *pud;
613 pmd_t *pmd;
611 pgste_t pgste; 614 pgste_t pgste;
612 pte_t *ptep; 615 pte_t *ptep;
613 pte_t pte; 616 pte_t pte;
614 bool dirty; 617 bool dirty;
615 618
616 ptep = get_locked_pte(mm, addr, &ptl); 619 pgd = pgd_offset(mm, addr);
620 pud = pud_alloc(mm, pgd, addr);
621 if (!pud)
622 return false;
623 pmd = pmd_alloc(mm, pud, addr);
624 if (!pmd)
625 return false;
626 /* We can't run guests backed by huge pages, but userspace can
627 * still set them up and then try to migrate them without any
628 * migration support.
629 */
630 if (pmd_large(*pmd))
631 return true;
632
633 ptep = pte_alloc_map_lock(mm, pmd, addr, &ptl);
617 if (unlikely(!ptep)) 634 if (unlikely(!ptep))
618 return false; 635 return false;
619 636