aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-02-08 13:32:30 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2016-02-08 13:32:30 -0500
commit765bdb406de4b6132e349c5d4e077866536a9cc0 (patch)
tree1fa9510a516c49edaa050b984cd3550227da715a
parent92e6edd685a83f8fa922f3274c362a583deea784 (diff)
parentafc60743811828dfccc7aff94ac527c857630d4b (diff)
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM fixes from Paolo Bonzini: "KVM-ARM fixes, mostly coming from the PMU work" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: arm64: KVM: Fix guest dead loop when register accessor returns false arm64: KVM: Fix comments of the CP handler arm64: KVM: Fix wrong use of the CPSR MODE mask for 32bit guests arm64: KVM: Obey RES0/1 reserved bits when setting CPTR_EL2 arm64: KVM: Fix AArch64 guest userspace exception injection
-rw-r--r--arch/arm64/include/asm/kvm_arm.h1
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h8
-rw-r--r--arch/arm64/kvm/hyp/switch.c8
-rw-r--r--arch/arm64/kvm/inject_fault.c38
-rw-r--r--arch/arm64/kvm/sys_regs.c9
5 files changed, 52 insertions, 12 deletions
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index 738a95f93e49..bef6e9243c63 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -182,6 +182,7 @@
182#define CPTR_EL2_TCPAC (1 << 31) 182#define CPTR_EL2_TCPAC (1 << 31)
183#define CPTR_EL2_TTA (1 << 20) 183#define CPTR_EL2_TTA (1 << 20)
184#define CPTR_EL2_TFP (1 << CPTR_EL2_TFP_SHIFT) 184#define CPTR_EL2_TFP (1 << CPTR_EL2_TFP_SHIFT)
185#define CPTR_EL2_DEFAULT 0x000033ff
185 186
186/* Hyp Debug Configuration Register bits */ 187/* Hyp Debug Configuration Register bits */
187#define MDCR_EL2_TDRA (1 << 11) 188#define MDCR_EL2_TDRA (1 << 11)
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 3066328cd86b..779a5872a2c5 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -127,10 +127,14 @@ static inline unsigned long *vcpu_spsr(const struct kvm_vcpu *vcpu)
127 127
128static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu) 128static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
129{ 129{
130 u32 mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK; 130 u32 mode;
131 131
132 if (vcpu_mode_is_32bit(vcpu)) 132 if (vcpu_mode_is_32bit(vcpu)) {
133 mode = *vcpu_cpsr(vcpu) & COMPAT_PSR_MODE_MASK;
133 return mode > COMPAT_PSR_MODE_USR; 134 return mode > COMPAT_PSR_MODE_USR;
135 }
136
137 mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK;
134 138
135 return mode != PSR_MODE_EL0t; 139 return mode != PSR_MODE_EL0t;
136} 140}
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index ca8f5a5e2f96..f0e7bdfae134 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -36,7 +36,11 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
36 write_sysreg(val, hcr_el2); 36 write_sysreg(val, hcr_el2);
37 /* Trap on AArch32 cp15 c15 accesses (EL1 or EL0) */ 37 /* Trap on AArch32 cp15 c15 accesses (EL1 or EL0) */
38 write_sysreg(1 << 15, hstr_el2); 38 write_sysreg(1 << 15, hstr_el2);
39 write_sysreg(CPTR_EL2_TTA | CPTR_EL2_TFP, cptr_el2); 39
40 val = CPTR_EL2_DEFAULT;
41 val |= CPTR_EL2_TTA | CPTR_EL2_TFP;
42 write_sysreg(val, cptr_el2);
43
40 write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2); 44 write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
41} 45}
42 46
@@ -45,7 +49,7 @@ static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
45 write_sysreg(HCR_RW, hcr_el2); 49 write_sysreg(HCR_RW, hcr_el2);
46 write_sysreg(0, hstr_el2); 50 write_sysreg(0, hstr_el2);
47 write_sysreg(read_sysreg(mdcr_el2) & MDCR_EL2_HPMN_MASK, mdcr_el2); 51 write_sysreg(read_sysreg(mdcr_el2) & MDCR_EL2_HPMN_MASK, mdcr_el2);
48 write_sysreg(0, cptr_el2); 52 write_sysreg(CPTR_EL2_DEFAULT, cptr_el2);
49} 53}
50 54
51static void __hyp_text __activate_vm(struct kvm_vcpu *vcpu) 55static void __hyp_text __activate_vm(struct kvm_vcpu *vcpu)
diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
index 648112e90ed5..4d1ac81870d2 100644
--- a/arch/arm64/kvm/inject_fault.c
+++ b/arch/arm64/kvm/inject_fault.c
@@ -27,7 +27,11 @@
27 27
28#define PSTATE_FAULT_BITS_64 (PSR_MODE_EL1h | PSR_A_BIT | PSR_F_BIT | \ 28#define PSTATE_FAULT_BITS_64 (PSR_MODE_EL1h | PSR_A_BIT | PSR_F_BIT | \
29 PSR_I_BIT | PSR_D_BIT) 29 PSR_I_BIT | PSR_D_BIT)
30#define EL1_EXCEPT_SYNC_OFFSET 0x200 30
31#define CURRENT_EL_SP_EL0_VECTOR 0x0
32#define CURRENT_EL_SP_ELx_VECTOR 0x200
33#define LOWER_EL_AArch64_VECTOR 0x400
34#define LOWER_EL_AArch32_VECTOR 0x600
31 35
32static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset) 36static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
33{ 37{
@@ -97,6 +101,34 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt,
97 *fsr = 0x14; 101 *fsr = 0x14;
98} 102}
99 103
104enum exception_type {
105 except_type_sync = 0,
106 except_type_irq = 0x80,
107 except_type_fiq = 0x100,
108 except_type_serror = 0x180,
109};
110
111static u64 get_except_vector(struct kvm_vcpu *vcpu, enum exception_type type)
112{
113 u64 exc_offset;
114
115 switch (*vcpu_cpsr(vcpu) & (PSR_MODE_MASK | PSR_MODE32_BIT)) {
116 case PSR_MODE_EL1t:
117 exc_offset = CURRENT_EL_SP_EL0_VECTOR;
118 break;
119 case PSR_MODE_EL1h:
120 exc_offset = CURRENT_EL_SP_ELx_VECTOR;
121 break;
122 case PSR_MODE_EL0t:
123 exc_offset = LOWER_EL_AArch64_VECTOR;
124 break;
125 default:
126 exc_offset = LOWER_EL_AArch32_VECTOR;
127 }
128
129 return vcpu_sys_reg(vcpu, VBAR_EL1) + exc_offset + type;
130}
131
100static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr) 132static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr)
101{ 133{
102 unsigned long cpsr = *vcpu_cpsr(vcpu); 134 unsigned long cpsr = *vcpu_cpsr(vcpu);
@@ -108,8 +140,8 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr
108 *vcpu_spsr(vcpu) = cpsr; 140 *vcpu_spsr(vcpu) = cpsr;
109 *vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu); 141 *vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu);
110 142
143 *vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
111 *vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64; 144 *vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
112 *vcpu_pc(vcpu) = vcpu_sys_reg(vcpu, VBAR_EL1) + EL1_EXCEPT_SYNC_OFFSET;
113 145
114 vcpu_sys_reg(vcpu, FAR_EL1) = addr; 146 vcpu_sys_reg(vcpu, FAR_EL1) = addr;
115 147
@@ -143,8 +175,8 @@ static void inject_undef64(struct kvm_vcpu *vcpu)
143 *vcpu_spsr(vcpu) = cpsr; 175 *vcpu_spsr(vcpu) = cpsr;
144 *vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu); 176 *vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu);
145 177
178 *vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
146 *vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64; 179 *vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
147 *vcpu_pc(vcpu) = vcpu_sys_reg(vcpu, VBAR_EL1) + EL1_EXCEPT_SYNC_OFFSET;
148 180
149 /* 181 /*
150 * Build an unknown exception, depending on the instruction 182 * Build an unknown exception, depending on the instruction
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index eec3598b4184..2e90371cfb37 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -1007,10 +1007,9 @@ static int emulate_cp(struct kvm_vcpu *vcpu,
1007 if (likely(r->access(vcpu, params, r))) { 1007 if (likely(r->access(vcpu, params, r))) {
1008 /* Skip instruction, since it was emulated */ 1008 /* Skip instruction, since it was emulated */
1009 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); 1009 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
1010 /* Handled */
1011 return 0;
1010 } 1012 }
1011
1012 /* Handled */
1013 return 0;
1014 } 1013 }
1015 1014
1016 /* Not handled */ 1015 /* Not handled */
@@ -1043,7 +1042,7 @@ static void unhandled_cp_access(struct kvm_vcpu *vcpu,
1043} 1042}
1044 1043
1045/** 1044/**
1046 * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP15 access 1045 * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP14/CP15 access
1047 * @vcpu: The VCPU pointer 1046 * @vcpu: The VCPU pointer
1048 * @run: The kvm_run struct 1047 * @run: The kvm_run struct
1049 */ 1048 */
@@ -1095,7 +1094,7 @@ out:
1095} 1094}
1096 1095
1097/** 1096/**
1098 * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access 1097 * kvm_handle_cp_32 -- handles a mrc/mcr trap on a guest CP14/CP15 access
1099 * @vcpu: The VCPU pointer 1098 * @vcpu: The VCPU pointer
1100 * @run: The kvm_run struct 1099 * @run: The kvm_run struct
1101 */ 1100 */