aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kvm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-04-16 14:58:29 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-04-16 14:58:29 -0400
commit714d8e7e27197dd39b2550e762a6a6fcf397a471 (patch)
treebc989a2a0e14f21912943e56d0002a26a2b7793e /arch/arm/kvm
parentd19d5efd8c8840aa4f38a6dfbfe500d8cc27de46 (diff)
parent6d1966dfd6e0ad2f8aa4b664ae1a62e33abe1998 (diff)
Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 updates from Will Deacon: "Here are the core arm64 updates for 4.1. Highlights include a significant rework to head.S (allowing us to boot on machines with physical memory at a really high address), an AES performance boost on Cortex-A57 and the ability to run a 32-bit userspace with 64k pages (although this requires said userspace to be built with a recent binutils). The head.S rework spilt over into KVM, so there are some changes under arch/arm/ which have been acked by Marc Zyngier (KVM co-maintainer). In particular, the linker script changes caused us some issues in -next, so there are a few merge commits where we had to apply fixes on top of a stable branch. Other changes include: - AES performance boost for Cortex-A57 - AArch32 (compat) userspace with 64k pages - Cortex-A53 erratum workaround for #845719 - defconfig updates (new platforms, PCI, ...)" * tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (39 commits) arm64: fix midr range for Cortex-A57 erratum 832075 arm64: errata: add workaround for cortex-a53 erratum #845719 arm64: Use bool function return values of true/false not 1/0 arm64: defconfig: updates for 4.1 arm64: Extract feature parsing code from cpu_errata.c arm64: alternative: Allow immediate branch as alternative instruction arm64: insn: Add aarch64_insn_decode_immediate ARM: kvm: round HYP section to page size instead of log2 upper bound ARM: kvm: assert on HYP section boundaries not actual code size arm64: head.S: ensure idmap_t0sz is visible arm64: pmu: add support for interrupt-affinity property dt: pmu: extend ARM PMU binding to allow for explicit interrupt affinity arm64: head.S: ensure visibility of page tables arm64: KVM: use ID map with increased VA range if required arm64: mm: increase VA range of identity map ARM: kvm: implement replacement for ld's LOG2CEIL() arm64: proc: remove unused cpu_get_pgd macro arm64: enforce x1|x2|x3 == 0 upon kernel entry as per boot protocol arm64: remove __calc_phys_offset arm64: merge __enable_mmu and __turn_mmu_on ...
Diffstat (limited to 'arch/arm/kvm')
-rw-r--r--arch/arm/kvm/mmu.c69
1 files changed, 30 insertions, 39 deletions
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 15b050d46fc9..1d5accbd3dcf 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -35,9 +35,9 @@ extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[];
35 35
36static pgd_t *boot_hyp_pgd; 36static pgd_t *boot_hyp_pgd;
37static pgd_t *hyp_pgd; 37static pgd_t *hyp_pgd;
38static pgd_t *merged_hyp_pgd;
38static DEFINE_MUTEX(kvm_hyp_pgd_mutex); 39static DEFINE_MUTEX(kvm_hyp_pgd_mutex);
39 40
40static void *init_bounce_page;
41static unsigned long hyp_idmap_start; 41static unsigned long hyp_idmap_start;
42static unsigned long hyp_idmap_end; 42static unsigned long hyp_idmap_end;
43static phys_addr_t hyp_idmap_vector; 43static phys_addr_t hyp_idmap_vector;
@@ -405,9 +405,6 @@ void free_boot_hyp_pgd(void)
405 if (hyp_pgd) 405 if (hyp_pgd)
406 unmap_range(NULL, hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE); 406 unmap_range(NULL, hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
407 407
408 free_page((unsigned long)init_bounce_page);
409 init_bounce_page = NULL;
410
411 mutex_unlock(&kvm_hyp_pgd_mutex); 408 mutex_unlock(&kvm_hyp_pgd_mutex);
412} 409}
413 410
@@ -438,6 +435,11 @@ void free_hyp_pgds(void)
438 free_pages((unsigned long)hyp_pgd, hyp_pgd_order); 435 free_pages((unsigned long)hyp_pgd, hyp_pgd_order);
439 hyp_pgd = NULL; 436 hyp_pgd = NULL;
440 } 437 }
438 if (merged_hyp_pgd) {
439 clear_page(merged_hyp_pgd);
440 free_page((unsigned long)merged_hyp_pgd);
441 merged_hyp_pgd = NULL;
442 }
441 443
442 mutex_unlock(&kvm_hyp_pgd_mutex); 444 mutex_unlock(&kvm_hyp_pgd_mutex);
443} 445}
@@ -1622,12 +1624,18 @@ void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
1622 1624
1623phys_addr_t kvm_mmu_get_httbr(void) 1625phys_addr_t kvm_mmu_get_httbr(void)
1624{ 1626{
1625 return virt_to_phys(hyp_pgd); 1627 if (__kvm_cpu_uses_extended_idmap())
1628 return virt_to_phys(merged_hyp_pgd);
1629 else
1630 return virt_to_phys(hyp_pgd);
1626} 1631}
1627 1632
1628phys_addr_t kvm_mmu_get_boot_httbr(void) 1633phys_addr_t kvm_mmu_get_boot_httbr(void)
1629{ 1634{
1630 return virt_to_phys(boot_hyp_pgd); 1635 if (__kvm_cpu_uses_extended_idmap())
1636 return virt_to_phys(merged_hyp_pgd);
1637 else
1638 return virt_to_phys(boot_hyp_pgd);
1631} 1639}
1632 1640
1633phys_addr_t kvm_get_idmap_vector(void) 1641phys_addr_t kvm_get_idmap_vector(void)
@@ -1643,39 +1651,11 @@ int kvm_mmu_init(void)
1643 hyp_idmap_end = kvm_virt_to_phys(__hyp_idmap_text_end); 1651 hyp_idmap_end = kvm_virt_to_phys(__hyp_idmap_text_end);
1644 hyp_idmap_vector = kvm_virt_to_phys(__kvm_hyp_init); 1652 hyp_idmap_vector = kvm_virt_to_phys(__kvm_hyp_init);
1645 1653
1646 if ((hyp_idmap_start ^ hyp_idmap_end) & PAGE_MASK) { 1654 /*
1647 /* 1655 * We rely on the linker script to ensure at build time that the HYP
1648 * Our init code is crossing a page boundary. Allocate 1656 * init code does not cross a page boundary.
1649 * a bounce page, copy the code over and use that. 1657 */
1650 */ 1658 BUG_ON((hyp_idmap_start ^ (hyp_idmap_end - 1)) & PAGE_MASK);
1651 size_t len = __hyp_idmap_text_end - __hyp_idmap_text_start;
1652 phys_addr_t phys_base;
1653
1654 init_bounce_page = (void *)__get_free_page(GFP_KERNEL);
1655 if (!init_bounce_page) {
1656 kvm_err("Couldn't allocate HYP init bounce page\n");
1657 err = -ENOMEM;
1658 goto out;
1659 }
1660
1661 memcpy(init_bounce_page, __hyp_idmap_text_start, len);
1662 /*
1663 * Warning: the code we just copied to the bounce page
1664 * must be flushed to the point of coherency.
1665 * Otherwise, the data may be sitting in L2, and HYP
1666 * mode won't be able to observe it as it runs with
1667 * caches off at that point.
1668 */
1669 kvm_flush_dcache_to_poc(init_bounce_page, len);
1670
1671 phys_base = kvm_virt_to_phys(init_bounce_page);
1672 hyp_idmap_vector += phys_base - hyp_idmap_start;
1673 hyp_idmap_start = phys_base;
1674 hyp_idmap_end = phys_base + len;
1675
1676 kvm_info("Using HYP init bounce page @%lx\n",
1677 (unsigned long)phys_base);
1678 }
1679 1659
1680 hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, hyp_pgd_order); 1660 hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, hyp_pgd_order);
1681 boot_hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, hyp_pgd_order); 1661 boot_hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, hyp_pgd_order);
@@ -1698,6 +1678,17 @@ int kvm_mmu_init(void)
1698 goto out; 1678 goto out;
1699 } 1679 }
1700 1680
1681 if (__kvm_cpu_uses_extended_idmap()) {
1682 merged_hyp_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
1683 if (!merged_hyp_pgd) {
1684 kvm_err("Failed to allocate extra HYP pgd\n");
1685 goto out;
1686 }
1687 __kvm_extend_hypmap(boot_hyp_pgd, hyp_pgd, merged_hyp_pgd,
1688 hyp_idmap_start);
1689 return 0;
1690 }
1691
1701 /* Map the very same page at the trampoline VA */ 1692 /* Map the very same page at the trampoline VA */
1702 err = __create_hyp_mappings(boot_hyp_pgd, 1693 err = __create_hyp_mappings(boot_hyp_pgd,
1703 TRAMPOLINE_VA, TRAMPOLINE_VA + PAGE_SIZE, 1694 TRAMPOLINE_VA, TRAMPOLINE_VA + PAGE_SIZE,