aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJanosch Frank <frankja@linux.vnet.ibm.com>2017-03-02 09:23:42 -0500
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2017-03-18 07:14:34 -0400
commit9d89c20f3b8f82ddb1d7ef63748ad74691549e80 (patch)
treef20e87391c5a60fd9dc91e6d17197618c8f1e2cb
parentce8ab5f168f654101ecba3f4443b12d4afdee841 (diff)
KVM: s390: Fix guest migration for huge guests resulting in panic
commit 2e4d88009f57057df7672fa69a32b5224af54d37 upstream. While we can technically not run huge page guests right now, we can setup a guest with huge pages. Trying to migrate it will trigger a VM_BUG_ON and, if the kernel is not configured to panic on a BUG, it will happily try to work on non-existing page table entries. With this patch, we always return "dirty" if we encounter a large page when migrating. This at least fixes the immediate problem until we have proper handling for both kind of pages. Fixes: 15f36eb ("KVM: s390: Add proper dirty bitmap support to S390 kvm.") Signed-off-by: Janosch Frank <frankja@linux.vnet.ibm.com> Acked-by: Christian Borntraeger <borntraeger@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--arch/s390/mm/pgtable.c19
1 files changed, 18 insertions, 1 deletions
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index d56ef26d4681..7678f7956409 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -606,12 +606,29 @@ void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
606bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long addr) 606bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long addr)
607{ 607{
608 spinlock_t *ptl; 608 spinlock_t *ptl;
609 pgd_t *pgd;
610 pud_t *pud;
611 pmd_t *pmd;
609 pgste_t pgste; 612 pgste_t pgste;
610 pte_t *ptep; 613 pte_t *ptep;
611 pte_t pte; 614 pte_t pte;
612 bool dirty; 615 bool dirty;
613 616
614 ptep = get_locked_pte(mm, addr, &ptl); 617 pgd = pgd_offset(mm, addr);
618 pud = pud_alloc(mm, pgd, addr);
619 if (!pud)
620 return false;
621 pmd = pmd_alloc(mm, pud, addr);
622 if (!pmd)
623 return false;
624 /* We can't run guests backed by huge pages, but userspace can
625 * still set them up and then try to migrate them without any
626 * migration support.
627 */
628 if (pmd_large(*pmd))
629 return true;
630
631 ptep = pte_alloc_map_lock(mm, pmd, addr, &ptl);
615 if (unlikely(!ptep)) 632 if (unlikely(!ptep))
616 return false; 633 return false;
617 634