diff options
author | Ard Biesheuvel <ard.biesheuvel@linaro.org> | 2015-03-19 12:42:26 -0400 |
---|---|---|
committer | Will Deacon <will.deacon@arm.com> | 2015-03-19 15:21:56 -0400 |
commit | 06f75a1f6200042aa36ad40afb44dd72107b25d6 (patch) | |
tree | 84f134705f009d74aba8637f25da3f887ea40c08 /arch/arm/kernel/vmlinux.lds.S | |
parent | 06e5801b8cb3fc057d88cb4dc03c0b64b2744cda (diff) |
ARM, arm64: kvm: get rid of the bounce page
The HYP init bounce page is a runtime construct that ensures that the
HYP init code does not cross a page boundary. However, this is something
we can do perfectly well at build time, by aligning the code appropriately.
For arm64, we just align to 4 KB, and enforce that the code size is less
than 4 KB, regardless of the chosen page size.
For ARM, the whole code is less than 256 bytes, so we tweak the linker
script to align at a power of 2 upper bound of the code size
Note that this also fixes a benign off-by-one error in the original bounce
page code, where a bounce page would be allocated unnecessarily if the code
was exactly 1 page in size.
On ARM, it also fixes an issue with very large kernels reported by Arnd
Bergmann, where stub sections with linker emitted veneers could erroneously
trigger the size/alignment ASSERT() in the linker script.
Tested-by: Marc Zyngier <marc.zyngier@arm.com>
Reviewed-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'arch/arm/kernel/vmlinux.lds.S')
-rw-r--r-- | arch/arm/kernel/vmlinux.lds.S | 18 |
1 files changed, 15 insertions, 3 deletions
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index b31aa73e8076..ba65f1217310 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S | |||
@@ -23,11 +23,20 @@ | |||
23 | VMLINUX_SYMBOL(__idmap_text_start) = .; \ | 23 | VMLINUX_SYMBOL(__idmap_text_start) = .; \ |
24 | *(.idmap.text) \ | 24 | *(.idmap.text) \ |
25 | VMLINUX_SYMBOL(__idmap_text_end) = .; \ | 25 | VMLINUX_SYMBOL(__idmap_text_end) = .; \ |
26 | . = ALIGN(32); \ | 26 | . = ALIGN(1 << LOG2CEIL(__hyp_idmap_size)); \ |
27 | VMLINUX_SYMBOL(__hyp_idmap_text_start) = .; \ | 27 | VMLINUX_SYMBOL(__hyp_idmap_text_start) = .; \ |
28 | *(.hyp.idmap.text) \ | 28 | *(.hyp.idmap.text) \ |
29 | VMLINUX_SYMBOL(__hyp_idmap_text_end) = .; | 29 | VMLINUX_SYMBOL(__hyp_idmap_text_end) = .; |
30 | 30 | ||
31 | /* | ||
32 | * If the HYP idmap .text section is populated, it needs to be positioned | ||
33 | * such that it will not cross a page boundary in the final output image. | ||
34 | * So align it to the section size rounded up to the next power of 2. | ||
35 | * If __hyp_idmap_size is undefined, the section will be empty so define | ||
36 | * it as 0 in that case. | ||
37 | */ | ||
38 | PROVIDE(__hyp_idmap_size = 0); | ||
39 | |||
31 | #ifdef CONFIG_HOTPLUG_CPU | 40 | #ifdef CONFIG_HOTPLUG_CPU |
32 | #define ARM_CPU_DISCARD(x) | 41 | #define ARM_CPU_DISCARD(x) |
33 | #define ARM_CPU_KEEP(x) x | 42 | #define ARM_CPU_KEEP(x) x |
@@ -346,8 +355,11 @@ SECTIONS | |||
346 | */ | 355 | */ |
347 | ASSERT((__proc_info_end - __proc_info_begin), "missing CPU support") | 356 | ASSERT((__proc_info_end - __proc_info_begin), "missing CPU support") |
348 | ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined") | 357 | ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined") |
358 | |||
349 | /* | 359 | /* |
350 | * The HYP init code can't be more than a page long. | 360 | * The HYP init code can't be more than a page long, |
361 | * and should not cross a page boundary. | ||
351 | * The above comment applies as well. | 362 | * The above comment applies as well. |
352 | */ | 363 | */ |
353 | ASSERT(((__hyp_idmap_text_end - __hyp_idmap_text_start) <= PAGE_SIZE), "HYP init code too big") | 364 | ASSERT((__hyp_idmap_text_start & ~PAGE_MASK) + __hyp_idmap_size <= PAGE_SIZE, |
365 | "HYP init code too big or misaligned") | ||