diff options
| author | Daniel Vetter <daniel.vetter@ffwll.ch> | 2015-03-30 10:37:08 -0400 |
|---|---|---|
| committer | Daniel Vetter <daniel.vetter@ffwll.ch> | 2015-03-30 10:37:08 -0400 |
| commit | 6e0aa8018f9c676b115b7ca6c20a056fc57c68a9 (patch) | |
| tree | b35393574b48dcc05c99599add7f8b0015de3913 /arch/arm/kvm | |
| parent | 1ff27a3443addb19af609245b218fa510c7d37dd (diff) | |
| parent | e42391cd048809d903291d07f86ed3934ce138e9 (diff) | |
Merge tag 'v4.0-rc6' into drm-intel-next
Backmerge Linux 4.0-rc6 because conflicts are (again) getting out of
hand. To make sure we don't lose any bugfixes from the 4.0-rc5-rc6
flurry of patches we've applied them all to -next too.
Conflicts:
drivers/gpu/drm/i915/intel_display.c
Always take the version from -next, we've already handled all
conflicts with explicit cherrypicking.
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Diffstat (limited to 'arch/arm/kvm')
| -rw-r--r-- | arch/arm/kvm/mmu.c | 75 |
1 files changed, 53 insertions, 22 deletions
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index 3e6859bc3e11..5656d79c5a44 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c | |||
| @@ -290,7 +290,7 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp, | |||
| 290 | phys_addr_t addr = start, end = start + size; | 290 | phys_addr_t addr = start, end = start + size; |
| 291 | phys_addr_t next; | 291 | phys_addr_t next; |
| 292 | 292 | ||
| 293 | pgd = pgdp + pgd_index(addr); | 293 | pgd = pgdp + kvm_pgd_index(addr); |
| 294 | do { | 294 | do { |
| 295 | next = kvm_pgd_addr_end(addr, end); | 295 | next = kvm_pgd_addr_end(addr, end); |
| 296 | if (!pgd_none(*pgd)) | 296 | if (!pgd_none(*pgd)) |
| @@ -355,7 +355,7 @@ static void stage2_flush_memslot(struct kvm *kvm, | |||
| 355 | phys_addr_t next; | 355 | phys_addr_t next; |
| 356 | pgd_t *pgd; | 356 | pgd_t *pgd; |
| 357 | 357 | ||
| 358 | pgd = kvm->arch.pgd + pgd_index(addr); | 358 | pgd = kvm->arch.pgd + kvm_pgd_index(addr); |
| 359 | do { | 359 | do { |
| 360 | next = kvm_pgd_addr_end(addr, end); | 360 | next = kvm_pgd_addr_end(addr, end); |
| 361 | stage2_flush_puds(kvm, pgd, addr, next); | 361 | stage2_flush_puds(kvm, pgd, addr, next); |
| @@ -632,6 +632,20 @@ int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr) | |||
| 632 | __phys_to_pfn(phys_addr), PAGE_HYP_DEVICE); | 632 | __phys_to_pfn(phys_addr), PAGE_HYP_DEVICE); |
| 633 | } | 633 | } |
| 634 | 634 | ||
| 635 | /* Free the HW pgd, one page at a time */ | ||
| 636 | static void kvm_free_hwpgd(void *hwpgd) | ||
| 637 | { | ||
| 638 | free_pages_exact(hwpgd, kvm_get_hwpgd_size()); | ||
| 639 | } | ||
| 640 | |||
| 641 | /* Allocate the HW PGD, making sure that each page gets its own refcount */ | ||
| 642 | static void *kvm_alloc_hwpgd(void) | ||
| 643 | { | ||
| 644 | unsigned int size = kvm_get_hwpgd_size(); | ||
| 645 | |||
| 646 | return alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO); | ||
| 647 | } | ||
| 648 | |||
| 635 | /** | 649 | /** |
| 636 | * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation. | 650 | * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation. |
| 637 | * @kvm: The KVM struct pointer for the VM. | 651 | * @kvm: The KVM struct pointer for the VM. |
| @@ -645,15 +659,31 @@ int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr) | |||
| 645 | */ | 659 | */ |
| 646 | int kvm_alloc_stage2_pgd(struct kvm *kvm) | 660 | int kvm_alloc_stage2_pgd(struct kvm *kvm) |
| 647 | { | 661 | { |
| 648 | int ret; | ||
| 649 | pgd_t *pgd; | 662 | pgd_t *pgd; |
| 663 | void *hwpgd; | ||
| 650 | 664 | ||
| 651 | if (kvm->arch.pgd != NULL) { | 665 | if (kvm->arch.pgd != NULL) { |
| 652 | kvm_err("kvm_arch already initialized?\n"); | 666 | kvm_err("kvm_arch already initialized?\n"); |
| 653 | return -EINVAL; | 667 | return -EINVAL; |
| 654 | } | 668 | } |
| 655 | 669 | ||
| 670 | hwpgd = kvm_alloc_hwpgd(); | ||
| 671 | if (!hwpgd) | ||
| 672 | return -ENOMEM; | ||
| 673 | |||
| 674 | /* When the kernel uses more levels of page tables than the | ||
| 675 | * guest, we allocate a fake PGD and pre-populate it to point | ||
| 676 | * to the next-level page table, which will be the real | ||
| 677 | * initial page table pointed to by the VTTBR. | ||
| 678 | * | ||
| 679 | * When KVM_PREALLOC_LEVEL==2, we allocate a single page for | ||
| 680 | * the PMD and the kernel will use folded pud. | ||
| 681 | * When KVM_PREALLOC_LEVEL==1, we allocate 2 consecutive PUD | ||
| 682 | * pages. | ||
| 683 | */ | ||
| 656 | if (KVM_PREALLOC_LEVEL > 0) { | 684 | if (KVM_PREALLOC_LEVEL > 0) { |
| 685 | int i; | ||
| 686 | |||
| 657 | /* | 687 | /* |
| 658 | * Allocate fake pgd for the page table manipulation macros to | 688 | * Allocate fake pgd for the page table manipulation macros to |
| 659 | * work. This is not used by the hardware and we have no | 689 | * work. This is not used by the hardware and we have no |
| @@ -661,30 +691,32 @@ int kvm_alloc_stage2_pgd(struct kvm *kvm) | |||
| 661 | */ | 691 | */ |
| 662 | pgd = (pgd_t *)kmalloc(PTRS_PER_S2_PGD * sizeof(pgd_t), | 692 | pgd = (pgd_t *)kmalloc(PTRS_PER_S2_PGD * sizeof(pgd_t), |
| 663 | GFP_KERNEL | __GFP_ZERO); | 693 | GFP_KERNEL | __GFP_ZERO); |
| 694 | |||
| 695 | if (!pgd) { | ||
| 696 | kvm_free_hwpgd(hwpgd); | ||
| 697 | return -ENOMEM; | ||
| 698 | } | ||
| 699 | |||
| 700 | /* Plug the HW PGD into the fake one. */ | ||
| 701 | for (i = 0; i < PTRS_PER_S2_PGD; i++) { | ||
| 702 | if (KVM_PREALLOC_LEVEL == 1) | ||
| 703 | pgd_populate(NULL, pgd + i, | ||
| 704 | (pud_t *)hwpgd + i * PTRS_PER_PUD); | ||
| 705 | else if (KVM_PREALLOC_LEVEL == 2) | ||
| 706 | pud_populate(NULL, pud_offset(pgd, 0) + i, | ||
| 707 | (pmd_t *)hwpgd + i * PTRS_PER_PMD); | ||
| 708 | } | ||
| 664 | } else { | 709 | } else { |
| 665 | /* | 710 | /* |
| 666 | * Allocate actual first-level Stage-2 page table used by the | 711 | * Allocate actual first-level Stage-2 page table used by the |
| 667 | * hardware for Stage-2 page table walks. | 712 | * hardware for Stage-2 page table walks. |
| 668 | */ | 713 | */ |
| 669 | pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, S2_PGD_ORDER); | 714 | pgd = (pgd_t *)hwpgd; |
| 670 | } | 715 | } |
| 671 | 716 | ||
| 672 | if (!pgd) | ||
| 673 | return -ENOMEM; | ||
| 674 | |||
| 675 | ret = kvm_prealloc_hwpgd(kvm, pgd); | ||
| 676 | if (ret) | ||
| 677 | goto out_err; | ||
| 678 | |||
| 679 | kvm_clean_pgd(pgd); | 717 | kvm_clean_pgd(pgd); |
| 680 | kvm->arch.pgd = pgd; | 718 | kvm->arch.pgd = pgd; |
| 681 | return 0; | 719 | return 0; |
| 682 | out_err: | ||
| 683 | if (KVM_PREALLOC_LEVEL > 0) | ||
| 684 | kfree(pgd); | ||
| 685 | else | ||
| 686 | free_pages((unsigned long)pgd, S2_PGD_ORDER); | ||
| 687 | return ret; | ||
| 688 | } | 720 | } |
| 689 | 721 | ||
| 690 | /** | 722 | /** |
| @@ -785,11 +817,10 @@ void kvm_free_stage2_pgd(struct kvm *kvm) | |||
| 785 | return; | 817 | return; |
| 786 | 818 | ||
| 787 | unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE); | 819 | unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE); |
| 788 | kvm_free_hwpgd(kvm); | 820 | kvm_free_hwpgd(kvm_get_hwpgd(kvm)); |
| 789 | if (KVM_PREALLOC_LEVEL > 0) | 821 | if (KVM_PREALLOC_LEVEL > 0) |
| 790 | kfree(kvm->arch.pgd); | 822 | kfree(kvm->arch.pgd); |
| 791 | else | 823 | |
| 792 | free_pages((unsigned long)kvm->arch.pgd, S2_PGD_ORDER); | ||
| 793 | kvm->arch.pgd = NULL; | 824 | kvm->arch.pgd = NULL; |
| 794 | } | 825 | } |
| 795 | 826 | ||
| @@ -799,7 +830,7 @@ static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache | |||
| 799 | pgd_t *pgd; | 830 | pgd_t *pgd; |
| 800 | pud_t *pud; | 831 | pud_t *pud; |
| 801 | 832 | ||
| 802 | pgd = kvm->arch.pgd + pgd_index(addr); | 833 | pgd = kvm->arch.pgd + kvm_pgd_index(addr); |
| 803 | if (WARN_ON(pgd_none(*pgd))) { | 834 | if (WARN_ON(pgd_none(*pgd))) { |
| 804 | if (!cache) | 835 | if (!cache) |
| 805 | return NULL; | 836 | return NULL; |
| @@ -1089,7 +1120,7 @@ static void stage2_wp_range(struct kvm *kvm, phys_addr_t addr, phys_addr_t end) | |||
| 1089 | pgd_t *pgd; | 1120 | pgd_t *pgd; |
| 1090 | phys_addr_t next; | 1121 | phys_addr_t next; |
| 1091 | 1122 | ||
| 1092 | pgd = kvm->arch.pgd + pgd_index(addr); | 1123 | pgd = kvm->arch.pgd + kvm_pgd_index(addr); |
| 1093 | do { | 1124 | do { |
| 1094 | /* | 1125 | /* |
| 1095 | * Release kvm_mmu_lock periodically if the memory region is | 1126 | * Release kvm_mmu_lock periodically if the memory region is |
