diff options
author | Ard Biesheuvel <ard.biesheuvel@linaro.org> | 2015-03-19 12:42:26 -0400 |
---|---|---|
committer | Will Deacon <will.deacon@arm.com> | 2015-03-19 15:21:56 -0400 |
commit | 06f75a1f6200042aa36ad40afb44dd72107b25d6 (patch) | |
tree | 84f134705f009d74aba8637f25da3f887ea40c08 | |
parent | 06e5801b8cb3fc057d88cb4dc03c0b64b2744cda (diff) |
ARM, arm64: kvm: get rid of the bounce page
The HYP init bounce page is a runtime construct that ensures that the
HYP init code does not cross a page boundary. However, this is something
we can do perfectly well at build time, by aligning the code appropriately.
For arm64, we just align to 4 KB, and enforce that the code size is less
than 4 KB, regardless of the chosen page size.
For ARM, the whole code is less than 256 bytes, so we tweak the linker
script to align at a power of 2 upper bound of the code size
Note that this also fixes a benign off-by-one error in the original bounce
page code, where a bounce page would be allocated unnecessarily if the code
was exactly 1 page in size.
On ARM, it also fixes an issue with very large kernels reported by Arnd
Bergmann, where stub sections with linker emitted veneers could erroneously
trigger the size/alignment ASSERT() in the linker script.
Tested-by: Marc Zyngier <marc.zyngier@arm.com>
Reviewed-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
-rw-r--r-- | arch/arm/kernel/vmlinux.lds.S | 18 | ||||
-rw-r--r-- | arch/arm/kvm/init.S | 3 | ||||
-rw-r--r-- | arch/arm/kvm/mmu.c | 42 | ||||
-rw-r--r-- | arch/arm64/kernel/vmlinux.lds.S | 17 |
4 files changed, 34 insertions, 46 deletions
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index b31aa73e8076..ba65f1217310 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S | |||
@@ -23,11 +23,20 @@ | |||
23 | VMLINUX_SYMBOL(__idmap_text_start) = .; \ | 23 | VMLINUX_SYMBOL(__idmap_text_start) = .; \ |
24 | *(.idmap.text) \ | 24 | *(.idmap.text) \ |
25 | VMLINUX_SYMBOL(__idmap_text_end) = .; \ | 25 | VMLINUX_SYMBOL(__idmap_text_end) = .; \ |
26 | . = ALIGN(32); \ | 26 | . = ALIGN(1 << LOG2CEIL(__hyp_idmap_size)); \ |
27 | VMLINUX_SYMBOL(__hyp_idmap_text_start) = .; \ | 27 | VMLINUX_SYMBOL(__hyp_idmap_text_start) = .; \ |
28 | *(.hyp.idmap.text) \ | 28 | *(.hyp.idmap.text) \ |
29 | VMLINUX_SYMBOL(__hyp_idmap_text_end) = .; | 29 | VMLINUX_SYMBOL(__hyp_idmap_text_end) = .; |
30 | 30 | ||
31 | /* | ||
32 | * If the HYP idmap .text section is populated, it needs to be positioned | ||
33 | * such that it will not cross a page boundary in the final output image. | ||
34 | * So align it to the section size rounded up to the next power of 2. | ||
35 | * If __hyp_idmap_size is undefined, the section will be empty so define | ||
36 | * it as 0 in that case. | ||
37 | */ | ||
38 | PROVIDE(__hyp_idmap_size = 0); | ||
39 | |||
31 | #ifdef CONFIG_HOTPLUG_CPU | 40 | #ifdef CONFIG_HOTPLUG_CPU |
32 | #define ARM_CPU_DISCARD(x) | 41 | #define ARM_CPU_DISCARD(x) |
33 | #define ARM_CPU_KEEP(x) x | 42 | #define ARM_CPU_KEEP(x) x |
@@ -346,8 +355,11 @@ SECTIONS | |||
346 | */ | 355 | */ |
347 | ASSERT((__proc_info_end - __proc_info_begin), "missing CPU support") | 356 | ASSERT((__proc_info_end - __proc_info_begin), "missing CPU support") |
348 | ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined") | 357 | ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined") |
358 | |||
349 | /* | 359 | /* |
350 | * The HYP init code can't be more than a page long. | 360 | * The HYP init code can't be more than a page long, |
361 | * and should not cross a page boundary. | ||
351 | * The above comment applies as well. | 362 | * The above comment applies as well. |
352 | */ | 363 | */ |
353 | ASSERT(((__hyp_idmap_text_end - __hyp_idmap_text_start) <= PAGE_SIZE), "HYP init code too big") | 364 | ASSERT((__hyp_idmap_text_start & ~PAGE_MASK) + __hyp_idmap_size <= PAGE_SIZE, |
365 | "HYP init code too big or misaligned") | ||
diff --git a/arch/arm/kvm/init.S b/arch/arm/kvm/init.S index 3988e72d16ff..11fb1d56f449 100644 --- a/arch/arm/kvm/init.S +++ b/arch/arm/kvm/init.S | |||
@@ -157,3 +157,6 @@ target: @ We're now in the trampoline code, switch page tables | |||
157 | __kvm_hyp_init_end: | 157 | __kvm_hyp_init_end: |
158 | 158 | ||
159 | .popsection | 159 | .popsection |
160 | |||
161 | .global __hyp_idmap_size | ||
162 | .set __hyp_idmap_size, __kvm_hyp_init_end - __kvm_hyp_init | ||
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index 3e6859bc3e11..42a24d6b003b 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c | |||
@@ -37,7 +37,6 @@ static pgd_t *boot_hyp_pgd; | |||
37 | static pgd_t *hyp_pgd; | 37 | static pgd_t *hyp_pgd; |
38 | static DEFINE_MUTEX(kvm_hyp_pgd_mutex); | 38 | static DEFINE_MUTEX(kvm_hyp_pgd_mutex); |
39 | 39 | ||
40 | static void *init_bounce_page; | ||
41 | static unsigned long hyp_idmap_start; | 40 | static unsigned long hyp_idmap_start; |
42 | static unsigned long hyp_idmap_end; | 41 | static unsigned long hyp_idmap_end; |
43 | static phys_addr_t hyp_idmap_vector; | 42 | static phys_addr_t hyp_idmap_vector; |
@@ -405,9 +404,6 @@ void free_boot_hyp_pgd(void) | |||
405 | if (hyp_pgd) | 404 | if (hyp_pgd) |
406 | unmap_range(NULL, hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE); | 405 | unmap_range(NULL, hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE); |
407 | 406 | ||
408 | free_page((unsigned long)init_bounce_page); | ||
409 | init_bounce_page = NULL; | ||
410 | |||
411 | mutex_unlock(&kvm_hyp_pgd_mutex); | 407 | mutex_unlock(&kvm_hyp_pgd_mutex); |
412 | } | 408 | } |
413 | 409 | ||
@@ -1498,39 +1494,11 @@ int kvm_mmu_init(void) | |||
1498 | hyp_idmap_end = kvm_virt_to_phys(__hyp_idmap_text_end); | 1494 | hyp_idmap_end = kvm_virt_to_phys(__hyp_idmap_text_end); |
1499 | hyp_idmap_vector = kvm_virt_to_phys(__kvm_hyp_init); | 1495 | hyp_idmap_vector = kvm_virt_to_phys(__kvm_hyp_init); |
1500 | 1496 | ||
1501 | if ((hyp_idmap_start ^ hyp_idmap_end) & PAGE_MASK) { | 1497 | /* |
1502 | /* | 1498 | * We rely on the linker script to ensure at build time that the HYP |
1503 | * Our init code is crossing a page boundary. Allocate | 1499 | * init code does not cross a page boundary. |
1504 | * a bounce page, copy the code over and use that. | 1500 | */ |
1505 | */ | 1501 | BUG_ON((hyp_idmap_start ^ (hyp_idmap_end - 1)) & PAGE_MASK); |
1506 | size_t len = __hyp_idmap_text_end - __hyp_idmap_text_start; | ||
1507 | phys_addr_t phys_base; | ||
1508 | |||
1509 | init_bounce_page = (void *)__get_free_page(GFP_KERNEL); | ||
1510 | if (!init_bounce_page) { | ||
1511 | kvm_err("Couldn't allocate HYP init bounce page\n"); | ||
1512 | err = -ENOMEM; | ||
1513 | goto out; | ||
1514 | } | ||
1515 | |||
1516 | memcpy(init_bounce_page, __hyp_idmap_text_start, len); | ||
1517 | /* | ||
1518 | * Warning: the code we just copied to the bounce page | ||
1519 | * must be flushed to the point of coherency. | ||
1520 | * Otherwise, the data may be sitting in L2, and HYP | ||
1521 | * mode won't be able to observe it as it runs with | ||
1522 | * caches off at that point. | ||
1523 | */ | ||
1524 | kvm_flush_dcache_to_poc(init_bounce_page, len); | ||
1525 | |||
1526 | phys_base = kvm_virt_to_phys(init_bounce_page); | ||
1527 | hyp_idmap_vector += phys_base - hyp_idmap_start; | ||
1528 | hyp_idmap_start = phys_base; | ||
1529 | hyp_idmap_end = phys_base + len; | ||
1530 | |||
1531 | kvm_info("Using HYP init bounce page @%lx\n", | ||
1532 | (unsigned long)phys_base); | ||
1533 | } | ||
1534 | 1502 | ||
1535 | hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, hyp_pgd_order); | 1503 | hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, hyp_pgd_order); |
1536 | boot_hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, hyp_pgd_order); | 1504 | boot_hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, hyp_pgd_order); |
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index 5d9d2dca530d..a2c29865c3fe 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S | |||
@@ -23,10 +23,14 @@ jiffies = jiffies_64; | |||
23 | 23 | ||
24 | #define HYPERVISOR_TEXT \ | 24 | #define HYPERVISOR_TEXT \ |
25 | /* \ | 25 | /* \ |
26 | * Force the alignment to be compatible with \ | 26 | * Align to 4 KB so that \ |
27 | * the vectors requirements \ | 27 | * a) the HYP vector table is at its minimum \ |
28 | * alignment of 2048 bytes \ | ||
29 | * b) the HYP init code will not cross a page \ | ||
30 | * boundary if its size does not exceed \ | ||
31 | * 4 KB (see related ASSERT() below) \ | ||
28 | */ \ | 32 | */ \ |
29 | . = ALIGN(2048); \ | 33 | . = ALIGN(SZ_4K); \ |
30 | VMLINUX_SYMBOL(__hyp_idmap_text_start) = .; \ | 34 | VMLINUX_SYMBOL(__hyp_idmap_text_start) = .; \ |
31 | *(.hyp.idmap.text) \ | 35 | *(.hyp.idmap.text) \ |
32 | VMLINUX_SYMBOL(__hyp_idmap_text_end) = .; \ | 36 | VMLINUX_SYMBOL(__hyp_idmap_text_end) = .; \ |
@@ -163,10 +167,11 @@ SECTIONS | |||
163 | } | 167 | } |
164 | 168 | ||
165 | /* | 169 | /* |
166 | * The HYP init code can't be more than a page long. | 170 | * The HYP init code can't be more than a page long, |
171 | * and should not cross a page boundary. | ||
167 | */ | 172 | */ |
168 | ASSERT(((__hyp_idmap_text_start + PAGE_SIZE) > __hyp_idmap_text_end), | 173 | ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K, |
169 | "HYP init code too big") | 174 | "HYP init code too big or misaligned") |
170 | 175 | ||
171 | /* | 176 | /* |
172 | * If padding is applied before .head.text, virt<->phys conversions will fail. | 177 | * If padding is applied before .head.text, virt<->phys conversions will fail. |