aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMarc Zyngier <marc.zyngier@arm.com>2018-02-01 06:07:35 -0500
committerRussell King <rmk+kernel@armlinux.org.uk>2018-05-31 06:09:03 -0400
commit3f7e8e2e1ebda787f156ce46e3f0a9ce2833fa4f (patch)
treecb2e36e2ad4908ce0b3daf776c8857a729a0fc31
parentc44f366ea7c85e1be27d08f2f0880f4120698125 (diff)
ARM: KVM: invalidate BTB on guest exit for Cortex-A12/A17
In order to avoid aliasing attacks against the branch predictor, let's invalidate the BTB on guest exit. This is made complicated by the fact that we cannot take a branch before invalidating the BTB. We only apply this to A12 and A17, which are the only two ARM cores on which this useful. Signed-off-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk> Boot-tested-by: Tony Lindgren <tony@atomide.com> Reviewed-by: Tony Lindgren <tony@atomide.com>
-rw-r--r--arch/arm/include/asm/kvm_asm.h2
-rw-r--r--arch/arm/include/asm/kvm_mmu.h17
-rw-r--r--arch/arm/kvm/hyp/hyp-entry.S71
3 files changed, 85 insertions, 5 deletions
diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h
index 36dd2962a42d..df24ed48977d 100644
--- a/arch/arm/include/asm/kvm_asm.h
+++ b/arch/arm/include/asm/kvm_asm.h
@@ -61,8 +61,6 @@ struct kvm_vcpu;
61extern char __kvm_hyp_init[]; 61extern char __kvm_hyp_init[];
62extern char __kvm_hyp_init_end[]; 62extern char __kvm_hyp_init_end[];
63 63
64extern char __kvm_hyp_vector[];
65
66extern void __kvm_flush_vm_context(void); 64extern void __kvm_flush_vm_context(void);
67extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); 65extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
68extern void __kvm_tlb_flush_vmid(struct kvm *kvm); 66extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index de1b919404e4..d08ce9c41df4 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -297,7 +297,22 @@ static inline unsigned int kvm_get_vmid_bits(void)
297 297
298static inline void *kvm_get_hyp_vector(void) 298static inline void *kvm_get_hyp_vector(void)
299{ 299{
300 return kvm_ksym_ref(__kvm_hyp_vector); 300 switch(read_cpuid_part()) {
301#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
302 case ARM_CPU_PART_CORTEX_A12:
303 case ARM_CPU_PART_CORTEX_A17:
304 {
305 extern char __kvm_hyp_vector_bp_inv[];
306 return kvm_ksym_ref(__kvm_hyp_vector_bp_inv);
307 }
308
309#endif
310 default:
311 {
312 extern char __kvm_hyp_vector[];
313 return kvm_ksym_ref(__kvm_hyp_vector);
314 }
315 }
301} 316}
302 317
303static inline int kvm_map_vectors(void) 318static inline int kvm_map_vectors(void)
diff --git a/arch/arm/kvm/hyp/hyp-entry.S b/arch/arm/kvm/hyp/hyp-entry.S
index 95a2faefc070..e789f52a5129 100644
--- a/arch/arm/kvm/hyp/hyp-entry.S
+++ b/arch/arm/kvm/hyp/hyp-entry.S
@@ -71,6 +71,66 @@ __kvm_hyp_vector:
71 W(b) hyp_irq 71 W(b) hyp_irq
72 W(b) hyp_fiq 72 W(b) hyp_fiq
73 73
74#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
75 .align 5
76__kvm_hyp_vector_bp_inv:
77 .global __kvm_hyp_vector_bp_inv
78
79 /*
80 * We encode the exception entry in the bottom 3 bits of
81 * SP, and we have to guarantee to be 8 bytes aligned.
82 */
83 W(add) sp, sp, #1 /* Reset 7 */
84 W(add) sp, sp, #1 /* Undef 6 */
85 W(add) sp, sp, #1 /* Syscall 5 */
86 W(add) sp, sp, #1 /* Prefetch abort 4 */
87 W(add) sp, sp, #1 /* Data abort 3 */
88 W(add) sp, sp, #1 /* HVC 2 */
89 W(add) sp, sp, #1 /* IRQ 1 */
90 W(nop) /* FIQ 0 */
91
92 mcr p15, 0, r0, c7, c5, 6 /* BPIALL */
93 isb
94
95#ifdef CONFIG_THUMB2_KERNEL
96 /*
97 * Yet another silly hack: Use VPIDR as a temp register.
98 * Thumb2 is really a pain, as SP cannot be used with most
99 * of the bitwise instructions. The vect_br macro ensures
100 * things gets cleaned-up.
101 */
102 mcr p15, 4, r0, c0, c0, 0 /* VPIDR */
103 mov r0, sp
104 and r0, r0, #7
105 sub sp, sp, r0
106 push {r1, r2}
107 mov r1, r0
108 mrc p15, 4, r0, c0, c0, 0 /* VPIDR */
109 mrc p15, 0, r2, c0, c0, 0 /* MIDR */
110 mcr p15, 4, r2, c0, c0, 0 /* VPIDR */
111#endif
112
113.macro vect_br val, targ
114ARM( eor sp, sp, #\val )
115ARM( tst sp, #7 )
116ARM( eorne sp, sp, #\val )
117
118THUMB( cmp r1, #\val )
119THUMB( popeq {r1, r2} )
120
121 beq \targ
122.endm
123
124 vect_br 0, hyp_fiq
125 vect_br 1, hyp_irq
126 vect_br 2, hyp_hvc
127 vect_br 3, hyp_dabt
128 vect_br 4, hyp_pabt
129 vect_br 5, hyp_svc
130 vect_br 6, hyp_undef
131 vect_br 7, hyp_reset
132#endif
133
74.macro invalid_vector label, cause 134.macro invalid_vector label, cause
75 .align 135 .align
76\label: mov r0, #\cause 136\label: mov r0, #\cause
@@ -149,7 +209,14 @@ hyp_hvc:
149 bx ip 209 bx ip
150 210
1511: 2111:
152 push {lr} 212 /*
213 * Pushing r2 here is just a way of keeping the stack aligned to
214 * 8 bytes on any path that can trigger a HYP exception. Here,
215 * we may well be about to jump into the guest, and the guest
216 * exit would otherwise be badly decoded by our fancy
217 * "decode-exception-without-a-branch" code...
218 */
219 push {r2, lr}
153 220
154 mov lr, r0 221 mov lr, r0
155 mov r0, r1 222 mov r0, r1
@@ -159,7 +226,7 @@ hyp_hvc:
159THUMB( orr lr, #1) 226THUMB( orr lr, #1)
160 blx lr @ Call the HYP function 227 blx lr @ Call the HYP function
161 228
162 pop {lr} 229 pop {r2, lr}
163 eret 230 eret
164 231
165guest_trap: 232guest_trap: