aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arm/include/asm/kvm_asm.h2
-rw-r--r--arch/arm/kvm/arm.c8
-rw-r--r--arch/arm64/include/asm/kvm_asm.h17
-rw-r--r--arch/arm64/include/asm/kvm_host.h8
-rw-r--r--arch/arm64/kvm/hyp.S6
5 files changed, 32 insertions, 9 deletions
diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h
index 194c91b610ff..c35c349da069 100644
--- a/arch/arm/include/asm/kvm_asm.h
+++ b/arch/arm/include/asm/kvm_asm.h
@@ -79,6 +79,8 @@
79#define rr_lo_hi(a1, a2) a1, a2 79#define rr_lo_hi(a1, a2) a1, a2
80#endif 80#endif
81 81
82#define kvm_ksym_ref(kva) (kva)
83
82#ifndef __ASSEMBLY__ 84#ifndef __ASSEMBLY__
83struct kvm; 85struct kvm;
84struct kvm_vcpu; 86struct kvm_vcpu;
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index dda1959f0dde..975da6cfbf59 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -982,7 +982,7 @@ static void cpu_init_hyp_mode(void *dummy)
982 pgd_ptr = kvm_mmu_get_httbr(); 982 pgd_ptr = kvm_mmu_get_httbr();
983 stack_page = __this_cpu_read(kvm_arm_hyp_stack_page); 983 stack_page = __this_cpu_read(kvm_arm_hyp_stack_page);
984 hyp_stack_ptr = stack_page + PAGE_SIZE; 984 hyp_stack_ptr = stack_page + PAGE_SIZE;
985 vector_ptr = (unsigned long)__kvm_hyp_vector; 985 vector_ptr = (unsigned long)kvm_ksym_ref(__kvm_hyp_vector);
986 986
987 __cpu_init_hyp_mode(boot_pgd_ptr, pgd_ptr, hyp_stack_ptr, vector_ptr); 987 __cpu_init_hyp_mode(boot_pgd_ptr, pgd_ptr, hyp_stack_ptr, vector_ptr);
988 988
@@ -1074,13 +1074,15 @@ static int init_hyp_mode(void)
1074 /* 1074 /*
1075 * Map the Hyp-code called directly from the host 1075 * Map the Hyp-code called directly from the host
1076 */ 1076 */
1077 err = create_hyp_mappings(__kvm_hyp_code_start, __kvm_hyp_code_end); 1077 err = create_hyp_mappings(kvm_ksym_ref(__kvm_hyp_code_start),
1078 kvm_ksym_ref(__kvm_hyp_code_end));
1078 if (err) { 1079 if (err) {
1079 kvm_err("Cannot map world-switch code\n"); 1080 kvm_err("Cannot map world-switch code\n");
1080 goto out_free_mappings; 1081 goto out_free_mappings;
1081 } 1082 }
1082 1083
1083 err = create_hyp_mappings(__start_rodata, __end_rodata); 1084 err = create_hyp_mappings(kvm_ksym_ref(__start_rodata),
1085 kvm_ksym_ref(__end_rodata));
1084 if (err) { 1086 if (err) {
1085 kvm_err("Cannot map rodata section\n"); 1087 kvm_err("Cannot map rodata section\n");
1086 goto out_free_mappings; 1088 goto out_free_mappings;
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index 52b777b7d407..31b56008f412 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -26,7 +26,24 @@
26#define KVM_ARM64_DEBUG_DIRTY_SHIFT 0 26#define KVM_ARM64_DEBUG_DIRTY_SHIFT 0
27#define KVM_ARM64_DEBUG_DIRTY (1 << KVM_ARM64_DEBUG_DIRTY_SHIFT) 27#define KVM_ARM64_DEBUG_DIRTY (1 << KVM_ARM64_DEBUG_DIRTY_SHIFT)
28 28
29#define kvm_ksym_ref(sym) ((void *)&sym + kvm_ksym_shift)
30
29#ifndef __ASSEMBLY__ 31#ifndef __ASSEMBLY__
32#if __GNUC__ > 4
33#define kvm_ksym_shift (PAGE_OFFSET - KIMAGE_VADDR)
34#else
35/*
36 * GCC versions 4.9 and older will fold the constant below into the addend of
37 * the reference to 'sym' above if kvm_ksym_shift is declared static or if the
38 * constant is used directly. However, since we use the small code model for
39 * the core kernel, the reference to 'sym' will be emitted as a adrp/add pair,
40 * with a +/- 4 GB range, resulting in linker relocation errors if the shift
41 * is sufficiently large. So prevent the compiler from folding the shift into
42 * the addend, by making the shift a variable with external linkage.
43 */
44__weak u64 kvm_ksym_shift = PAGE_OFFSET - KIMAGE_VADDR;
45#endif
46
30struct kvm; 47struct kvm;
31struct kvm_vcpu; 48struct kvm_vcpu;
32 49
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 689d4c95e12f..e3d67ff8798b 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -307,7 +307,7 @@ static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
307struct kvm_vcpu *kvm_arm_get_running_vcpu(void); 307struct kvm_vcpu *kvm_arm_get_running_vcpu(void);
308struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void); 308struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void);
309 309
310u64 kvm_call_hyp(void *hypfn, ...); 310u64 __kvm_call_hyp(void *hypfn, ...);
311void force_vm_exit(const cpumask_t *mask); 311void force_vm_exit(const cpumask_t *mask);
312void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot); 312void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);
313 313
@@ -328,8 +328,8 @@ static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr,
328 * Call initialization code, and switch to the full blown 328 * Call initialization code, and switch to the full blown
329 * HYP code. 329 * HYP code.
330 */ 330 */
331 kvm_call_hyp((void *)boot_pgd_ptr, pgd_ptr, 331 __kvm_call_hyp((void *)boot_pgd_ptr, pgd_ptr,
332 hyp_stack_ptr, vector_ptr); 332 hyp_stack_ptr, vector_ptr);
333} 333}
334 334
335static inline void kvm_arch_hardware_disable(void) {} 335static inline void kvm_arch_hardware_disable(void) {}
@@ -343,4 +343,6 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu);
343void kvm_arm_clear_debug(struct kvm_vcpu *vcpu); 343void kvm_arm_clear_debug(struct kvm_vcpu *vcpu);
344void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu); 344void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu);
345 345
346#define kvm_call_hyp(f, ...) __kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__)
347
346#endif /* __ARM64_KVM_HOST_H__ */ 348#endif /* __ARM64_KVM_HOST_H__ */
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
index 0ccdcbbef3c2..870578f84b1c 100644
--- a/arch/arm64/kvm/hyp.S
+++ b/arch/arm64/kvm/hyp.S
@@ -20,7 +20,7 @@
20#include <asm/assembler.h> 20#include <asm/assembler.h>
21 21
22/* 22/*
23 * u64 kvm_call_hyp(void *hypfn, ...); 23 * u64 __kvm_call_hyp(void *hypfn, ...);
24 * 24 *
25 * This is not really a variadic function in the classic C-way and care must 25 * This is not really a variadic function in the classic C-way and care must
26 * be taken when calling this to ensure parameters are passed in registers 26 * be taken when calling this to ensure parameters are passed in registers
@@ -37,7 +37,7 @@
37 * used to implement __hyp_get_vectors in the same way as in 37 * used to implement __hyp_get_vectors in the same way as in
38 * arch/arm64/kernel/hyp_stub.S. 38 * arch/arm64/kernel/hyp_stub.S.
39 */ 39 */
40ENTRY(kvm_call_hyp) 40ENTRY(__kvm_call_hyp)
41 hvc #0 41 hvc #0
42 ret 42 ret
43ENDPROC(kvm_call_hyp) 43ENDPROC(__kvm_call_hyp)