diff options
author | Marc Zyngier <marc.zyngier@arm.com> | 2015-03-10 15:06:59 -0400 |
---|---|---|
committer | Christoffer Dall <christoffer.dall@linaro.org> | 2015-03-11 09:23:20 -0400 |
commit | a987370f8e7a1677ae385042644326d9cd145a20 (patch) | |
tree | be11b7e7eac43314ff7ba8715c72bbdecdc905fb /arch/arm/kvm | |
parent | bfb8fb4775d3397908ae3a7ff65807097d81d713 (diff) |
arm64: KVM: Fix stage-2 PGD allocation to have per-page refcounting
We're using __get_free_pages with to allocate the guest's stage-2
PGD. The standard behaviour of this function is to return a set of
pages where only the head page has a valid refcount.
This behaviour gets us into trouble when we're trying to increment
the refcount on a non-head page:
page:ffff7c00cfb693c0 count:0 mapcount:0 mapping: (null) index:0x0
flags: 0x4000000000000000()
page dumped because: VM_BUG_ON_PAGE((*({ __attribute__((unused)) typeof((&page->_count)->counter) __var = ( typeof((&page->_count)->counter)) 0; (volatile typeof((&page->_count)->counter) *)&((&page->_count)->counter); })) <= 0)
BUG: failure at include/linux/mm.h:548/get_page()!
Kernel panic - not syncing: BUG!
CPU: 1 PID: 1695 Comm: kvm-vcpu-0 Not tainted 4.0.0-rc1+ #3825
Hardware name: APM X-Gene Mustang board (DT)
Call trace:
[<ffff80000008a09c>] dump_backtrace+0x0/0x13c
[<ffff80000008a1e8>] show_stack+0x10/0x1c
[<ffff800000691da8>] dump_stack+0x74/0x94
[<ffff800000690d78>] panic+0x100/0x240
[<ffff8000000a0bc4>] stage2_get_pmd+0x17c/0x2bc
[<ffff8000000a1dc4>] kvm_handle_guest_abort+0x4b4/0x6b0
[<ffff8000000a420c>] handle_exit+0x58/0x180
[<ffff80000009e7a4>] kvm_arch_vcpu_ioctl_run+0x114/0x45c
[<ffff800000099df4>] kvm_vcpu_ioctl+0x2e0/0x754
[<ffff8000001c0a18>] do_vfs_ioctl+0x424/0x5c8
[<ffff8000001c0bfc>] SyS_ioctl+0x40/0x78
CPU0: stopping
A possible approach for this is to split the compound page using
split_page() at allocation time, and change the teardown path to
free one page at a time. It turns out that alloc_pages_exact() and
free_pages_exact() does exactly that.
While we're at it, the PGD allocation code is reworked to reduce
duplication.
This has been tested on an X-Gene platform with a 4kB/48bit-VA host
kernel, and kvmtool hacked to place memory in the second page of
the hardware PGD (PUD for the host kernel). Also regression-tested
on a Cubietruck (Cortex-A7).
[ Reworked to use alloc_pages_exact() and free_pages_exact() and to
return pointers directly instead of by reference as arguments
- Christoffer ]
Reported-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
Diffstat (limited to 'arch/arm/kvm')
-rw-r--r-- | arch/arm/kvm/mmu.c | 67 |
1 files changed, 49 insertions, 18 deletions
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index 3e6859bc3e11..a48a73c6b866 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c | |||
@@ -632,6 +632,20 @@ int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr) | |||
632 | __phys_to_pfn(phys_addr), PAGE_HYP_DEVICE); | 632 | __phys_to_pfn(phys_addr), PAGE_HYP_DEVICE); |
633 | } | 633 | } |
634 | 634 | ||
635 | /* Free the HW pgd, one page at a time */ | ||
636 | static void kvm_free_hwpgd(void *hwpgd) | ||
637 | { | ||
638 | free_pages_exact(hwpgd, kvm_get_hwpgd_size()); | ||
639 | } | ||
640 | |||
641 | /* Allocate the HW PGD, making sure that each page gets its own refcount */ | ||
642 | static void *kvm_alloc_hwpgd(void) | ||
643 | { | ||
644 | unsigned int size = kvm_get_hwpgd_size(); | ||
645 | |||
646 | return alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO); | ||
647 | } | ||
648 | |||
635 | /** | 649 | /** |
636 | * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation. | 650 | * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation. |
637 | * @kvm: The KVM struct pointer for the VM. | 651 | * @kvm: The KVM struct pointer for the VM. |
@@ -645,15 +659,31 @@ int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr) | |||
645 | */ | 659 | */ |
646 | int kvm_alloc_stage2_pgd(struct kvm *kvm) | 660 | int kvm_alloc_stage2_pgd(struct kvm *kvm) |
647 | { | 661 | { |
648 | int ret; | ||
649 | pgd_t *pgd; | 662 | pgd_t *pgd; |
663 | void *hwpgd; | ||
650 | 664 | ||
651 | if (kvm->arch.pgd != NULL) { | 665 | if (kvm->arch.pgd != NULL) { |
652 | kvm_err("kvm_arch already initialized?\n"); | 666 | kvm_err("kvm_arch already initialized?\n"); |
653 | return -EINVAL; | 667 | return -EINVAL; |
654 | } | 668 | } |
655 | 669 | ||
670 | hwpgd = kvm_alloc_hwpgd(); | ||
671 | if (!hwpgd) | ||
672 | return -ENOMEM; | ||
673 | |||
674 | /* When the kernel uses more levels of page tables than the | ||
675 | * guest, we allocate a fake PGD and pre-populate it to point | ||
676 | * to the next-level page table, which will be the real | ||
677 | * initial page table pointed to by the VTTBR. | ||
678 | * | ||
679 | * When KVM_PREALLOC_LEVEL==2, we allocate a single page for | ||
680 | * the PMD and the kernel will use folded pud. | ||
681 | * When KVM_PREALLOC_LEVEL==1, we allocate 2 consecutive PUD | ||
682 | * pages. | ||
683 | */ | ||
656 | if (KVM_PREALLOC_LEVEL > 0) { | 684 | if (KVM_PREALLOC_LEVEL > 0) { |
685 | int i; | ||
686 | |||
657 | /* | 687 | /* |
658 | * Allocate fake pgd for the page table manipulation macros to | 688 | * Allocate fake pgd for the page table manipulation macros to |
659 | * work. This is not used by the hardware and we have no | 689 | * work. This is not used by the hardware and we have no |
@@ -661,30 +691,32 @@ int kvm_alloc_stage2_pgd(struct kvm *kvm) | |||
661 | */ | 691 | */ |
662 | pgd = (pgd_t *)kmalloc(PTRS_PER_S2_PGD * sizeof(pgd_t), | 692 | pgd = (pgd_t *)kmalloc(PTRS_PER_S2_PGD * sizeof(pgd_t), |
663 | GFP_KERNEL | __GFP_ZERO); | 693 | GFP_KERNEL | __GFP_ZERO); |
694 | |||
695 | if (!pgd) { | ||
696 | kvm_free_hwpgd(hwpgd); | ||
697 | return -ENOMEM; | ||
698 | } | ||
699 | |||
700 | /* Plug the HW PGD into the fake one. */ | ||
701 | for (i = 0; i < PTRS_PER_S2_PGD; i++) { | ||
702 | if (KVM_PREALLOC_LEVEL == 1) | ||
703 | pgd_populate(NULL, pgd + i, | ||
704 | (pud_t *)hwpgd + i * PTRS_PER_PUD); | ||
705 | else if (KVM_PREALLOC_LEVEL == 2) | ||
706 | pud_populate(NULL, pud_offset(pgd, 0) + i, | ||
707 | (pmd_t *)hwpgd + i * PTRS_PER_PMD); | ||
708 | } | ||
664 | } else { | 709 | } else { |
665 | /* | 710 | /* |
666 | * Allocate actual first-level Stage-2 page table used by the | 711 | * Allocate actual first-level Stage-2 page table used by the |
667 | * hardware for Stage-2 page table walks. | 712 | * hardware for Stage-2 page table walks. |
668 | */ | 713 | */ |
669 | pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, S2_PGD_ORDER); | 714 | pgd = (pgd_t *)hwpgd; |
670 | } | 715 | } |
671 | 716 | ||
672 | if (!pgd) | ||
673 | return -ENOMEM; | ||
674 | |||
675 | ret = kvm_prealloc_hwpgd(kvm, pgd); | ||
676 | if (ret) | ||
677 | goto out_err; | ||
678 | |||
679 | kvm_clean_pgd(pgd); | 717 | kvm_clean_pgd(pgd); |
680 | kvm->arch.pgd = pgd; | 718 | kvm->arch.pgd = pgd; |
681 | return 0; | 719 | return 0; |
682 | out_err: | ||
683 | if (KVM_PREALLOC_LEVEL > 0) | ||
684 | kfree(pgd); | ||
685 | else | ||
686 | free_pages((unsigned long)pgd, S2_PGD_ORDER); | ||
687 | return ret; | ||
688 | } | 720 | } |
689 | 721 | ||
690 | /** | 722 | /** |
@@ -785,11 +817,10 @@ void kvm_free_stage2_pgd(struct kvm *kvm) | |||
785 | return; | 817 | return; |
786 | 818 | ||
787 | unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE); | 819 | unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE); |
788 | kvm_free_hwpgd(kvm); | 820 | kvm_free_hwpgd(kvm_get_hwpgd(kvm)); |
789 | if (KVM_PREALLOC_LEVEL > 0) | 821 | if (KVM_PREALLOC_LEVEL > 0) |
790 | kfree(kvm->arch.pgd); | 822 | kfree(kvm->arch.pgd); |
791 | else | 823 | |
792 | free_pages((unsigned long)kvm->arch.pgd, S2_PGD_ORDER); | ||
793 | kvm->arch.pgd = NULL; | 824 | kvm->arch.pgd = NULL; |
794 | } | 825 | } |
795 | 826 | ||