diff options
author | Marc Zyngier <marc.zyngier@arm.com> | 2016-01-03 06:26:01 -0500 |
---|---|---|
committer | Marc Zyngier <marc.zyngier@arm.com> | 2016-02-29 13:34:12 -0500 |
commit | fb32a52a1d4487f3ac5b7ccb659d0beb11ec504f (patch) | |
tree | ae4c1660133ab040018cbbd657f08a631f084063 | |
parent | 0ca5565df8ef7534c0d85ec87e6c74f8ebe86e88 (diff) |
ARM: KVM: Move CP15 array into the CPU context structure
Continuing our rework of the CPU context, we now move the CP15
array into the CPU context structure. As this causes quite a bit
of churn, we introduce the vcpu_cp15() macro that abstract the
location of the actual array. This will probably help next time
we have to revisit that code.
Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
-rw-r--r-- | arch/arm/include/asm/kvm_emulate.h | 2 | ||||
-rw-r--r-- | arch/arm/include/asm/kvm_host.h | 6 | ||||
-rw-r--r-- | arch/arm/include/asm/kvm_mmu.h | 2 | ||||
-rw-r--r-- | arch/arm/kernel/asm-offsets.c | 2 | ||||
-rw-r--r-- | arch/arm/kvm/coproc.c | 32 | ||||
-rw-r--r-- | arch/arm/kvm/coproc.h | 16 | ||||
-rw-r--r-- | arch/arm/kvm/emulate.c | 22 | ||||
-rw-r--r-- | arch/arm/kvm/interrupts_head.S | 3 |
8 files changed, 43 insertions, 42 deletions
diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h index 3095df091ff8..32bb52a489d0 100644 --- a/arch/arm/include/asm/kvm_emulate.h +++ b/arch/arm/include/asm/kvm_emulate.h | |||
@@ -192,7 +192,7 @@ static inline u32 kvm_vcpu_hvc_get_imm(struct kvm_vcpu *vcpu) | |||
192 | 192 | ||
193 | static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu) | 193 | static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu) |
194 | { | 194 | { |
195 | return vcpu->arch.cp15[c0_MPIDR] & MPIDR_HWID_BITMASK; | 195 | return vcpu_cp15(vcpu, c0_MPIDR) & MPIDR_HWID_BITMASK; |
196 | } | 196 | } |
197 | 197 | ||
198 | static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu) | 198 | static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu) |
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index b64ac8e4adaa..4203701cc7f4 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h | |||
@@ -90,6 +90,7 @@ struct kvm_vcpu_fault_info { | |||
90 | 90 | ||
91 | struct kvm_cpu_context { | 91 | struct kvm_cpu_context { |
92 | struct vfp_hard_struct vfp; | 92 | struct vfp_hard_struct vfp; |
93 | u32 cp15[NR_CP15_REGS]; | ||
93 | }; | 94 | }; |
94 | 95 | ||
95 | typedef struct kvm_cpu_context kvm_cpu_context_t; | 96 | typedef struct kvm_cpu_context kvm_cpu_context_t; |
@@ -102,9 +103,6 @@ struct kvm_vcpu_arch { | |||
102 | int target; /* Processor target */ | 103 | int target; /* Processor target */ |
103 | DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES); | 104 | DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES); |
104 | 105 | ||
105 | /* System control coprocessor (cp15) */ | ||
106 | u32 cp15[NR_CP15_REGS]; | ||
107 | |||
108 | /* The CPU type we expose to the VM */ | 106 | /* The CPU type we expose to the VM */ |
109 | u32 midr; | 107 | u32 midr; |
110 | 108 | ||
@@ -161,6 +159,8 @@ struct kvm_vcpu_stat { | |||
161 | u64 exits; | 159 | u64 exits; |
162 | }; | 160 | }; |
163 | 161 | ||
162 | #define vcpu_cp15(v,r) (v)->arch.ctxt.cp15[r] | ||
163 | |||
164 | int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init); | 164 | int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init); |
165 | unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu); | 165 | unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu); |
166 | int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices); | 166 | int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices); |
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h index a520b7987a29..da44be9db4fa 100644 --- a/arch/arm/include/asm/kvm_mmu.h +++ b/arch/arm/include/asm/kvm_mmu.h | |||
@@ -179,7 +179,7 @@ struct kvm; | |||
179 | 179 | ||
180 | static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu) | 180 | static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu) |
181 | { | 181 | { |
182 | return (vcpu->arch.cp15[c1_SCTLR] & 0b101) == 0b101; | 182 | return (vcpu_cp15(vcpu, c1_SCTLR) & 0b101) == 0b101; |
183 | } | 183 | } |
184 | 184 | ||
185 | static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, | 185 | static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, |
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index 346bfca29720..43f8b01072c1 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c | |||
@@ -172,10 +172,10 @@ int main(void) | |||
172 | #ifdef CONFIG_KVM_ARM_HOST | 172 | #ifdef CONFIG_KVM_ARM_HOST |
173 | DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm)); | 173 | DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm)); |
174 | DEFINE(VCPU_MIDR, offsetof(struct kvm_vcpu, arch.midr)); | 174 | DEFINE(VCPU_MIDR, offsetof(struct kvm_vcpu, arch.midr)); |
175 | DEFINE(VCPU_CP15, offsetof(struct kvm_vcpu, arch.cp15)); | ||
176 | DEFINE(VCPU_GUEST_CTXT, offsetof(struct kvm_vcpu, arch.ctxt)); | 175 | DEFINE(VCPU_GUEST_CTXT, offsetof(struct kvm_vcpu, arch.ctxt)); |
177 | DEFINE(VCPU_HOST_CTXT, offsetof(struct kvm_vcpu, arch.host_cpu_context)); | 176 | DEFINE(VCPU_HOST_CTXT, offsetof(struct kvm_vcpu, arch.host_cpu_context)); |
178 | DEFINE(CPU_CTXT_VFP, offsetof(struct kvm_cpu_context, vfp)); | 177 | DEFINE(CPU_CTXT_VFP, offsetof(struct kvm_cpu_context, vfp)); |
178 | DEFINE(CPU_CTXT_CP15, offsetof(struct kvm_cpu_context, cp15)); | ||
179 | DEFINE(VCPU_REGS, offsetof(struct kvm_vcpu, arch.regs)); | 179 | DEFINE(VCPU_REGS, offsetof(struct kvm_vcpu, arch.regs)); |
180 | DEFINE(VCPU_USR_REGS, offsetof(struct kvm_vcpu, arch.regs.usr_regs)); | 180 | DEFINE(VCPU_USR_REGS, offsetof(struct kvm_vcpu, arch.regs.usr_regs)); |
181 | DEFINE(VCPU_SVC_REGS, offsetof(struct kvm_vcpu, arch.regs.svc_regs)); | 181 | DEFINE(VCPU_SVC_REGS, offsetof(struct kvm_vcpu, arch.regs.svc_regs)); |
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c index 1a643f38031d..e3e86c4cfed2 100644 --- a/arch/arm/kvm/coproc.c +++ b/arch/arm/kvm/coproc.c | |||
@@ -54,8 +54,8 @@ static inline void vcpu_cp15_reg64_set(struct kvm_vcpu *vcpu, | |||
54 | const struct coproc_reg *r, | 54 | const struct coproc_reg *r, |
55 | u64 val) | 55 | u64 val) |
56 | { | 56 | { |
57 | vcpu->arch.cp15[r->reg] = val & 0xffffffff; | 57 | vcpu_cp15(vcpu, r->reg) = val & 0xffffffff; |
58 | vcpu->arch.cp15[r->reg + 1] = val >> 32; | 58 | vcpu_cp15(vcpu, r->reg + 1) = val >> 32; |
59 | } | 59 | } |
60 | 60 | ||
61 | static inline u64 vcpu_cp15_reg64_get(struct kvm_vcpu *vcpu, | 61 | static inline u64 vcpu_cp15_reg64_get(struct kvm_vcpu *vcpu, |
@@ -63,9 +63,9 @@ static inline u64 vcpu_cp15_reg64_get(struct kvm_vcpu *vcpu, | |||
63 | { | 63 | { |
64 | u64 val; | 64 | u64 val; |
65 | 65 | ||
66 | val = vcpu->arch.cp15[r->reg + 1]; | 66 | val = vcpu_cp15(vcpu, r->reg + 1); |
67 | val = val << 32; | 67 | val = val << 32; |
68 | val = val | vcpu->arch.cp15[r->reg]; | 68 | val = val | vcpu_cp15(vcpu, r->reg); |
69 | return val; | 69 | return val; |
70 | } | 70 | } |
71 | 71 | ||
@@ -104,7 +104,7 @@ static void reset_mpidr(struct kvm_vcpu *vcpu, const struct coproc_reg *r) | |||
104 | * vcpu_id, but we read the 'U' bit from the underlying | 104 | * vcpu_id, but we read the 'U' bit from the underlying |
105 | * hardware directly. | 105 | * hardware directly. |
106 | */ | 106 | */ |
107 | vcpu->arch.cp15[c0_MPIDR] = ((read_cpuid_mpidr() & MPIDR_SMP_BITMASK) | | 107 | vcpu_cp15(vcpu, c0_MPIDR) = ((read_cpuid_mpidr() & MPIDR_SMP_BITMASK) | |
108 | ((vcpu->vcpu_id >> 2) << MPIDR_LEVEL_BITS) | | 108 | ((vcpu->vcpu_id >> 2) << MPIDR_LEVEL_BITS) | |
109 | (vcpu->vcpu_id & 3)); | 109 | (vcpu->vcpu_id & 3)); |
110 | } | 110 | } |
@@ -117,7 +117,7 @@ static bool access_actlr(struct kvm_vcpu *vcpu, | |||
117 | if (p->is_write) | 117 | if (p->is_write) |
118 | return ignore_write(vcpu, p); | 118 | return ignore_write(vcpu, p); |
119 | 119 | ||
120 | *vcpu_reg(vcpu, p->Rt1) = vcpu->arch.cp15[c1_ACTLR]; | 120 | *vcpu_reg(vcpu, p->Rt1) = vcpu_cp15(vcpu, c1_ACTLR); |
121 | return true; | 121 | return true; |
122 | } | 122 | } |
123 | 123 | ||
@@ -139,7 +139,7 @@ static bool access_l2ctlr(struct kvm_vcpu *vcpu, | |||
139 | if (p->is_write) | 139 | if (p->is_write) |
140 | return ignore_write(vcpu, p); | 140 | return ignore_write(vcpu, p); |
141 | 141 | ||
142 | *vcpu_reg(vcpu, p->Rt1) = vcpu->arch.cp15[c9_L2CTLR]; | 142 | *vcpu_reg(vcpu, p->Rt1) = vcpu_cp15(vcpu, c9_L2CTLR); |
143 | return true; | 143 | return true; |
144 | } | 144 | } |
145 | 145 | ||
@@ -156,7 +156,7 @@ static void reset_l2ctlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r) | |||
156 | ncores = min(ncores, 3U); | 156 | ncores = min(ncores, 3U); |
157 | l2ctlr |= (ncores & 3) << 24; | 157 | l2ctlr |= (ncores & 3) << 24; |
158 | 158 | ||
159 | vcpu->arch.cp15[c9_L2CTLR] = l2ctlr; | 159 | vcpu_cp15(vcpu, c9_L2CTLR) = l2ctlr; |
160 | } | 160 | } |
161 | 161 | ||
162 | static void reset_actlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r) | 162 | static void reset_actlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r) |
@@ -171,7 +171,7 @@ static void reset_actlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r) | |||
171 | else | 171 | else |
172 | actlr &= ~(1U << 6); | 172 | actlr &= ~(1U << 6); |
173 | 173 | ||
174 | vcpu->arch.cp15[c1_ACTLR] = actlr; | 174 | vcpu_cp15(vcpu, c1_ACTLR) = actlr; |
175 | } | 175 | } |
176 | 176 | ||
177 | /* | 177 | /* |
@@ -218,9 +218,9 @@ bool access_vm_reg(struct kvm_vcpu *vcpu, | |||
218 | 218 | ||
219 | BUG_ON(!p->is_write); | 219 | BUG_ON(!p->is_write); |
220 | 220 | ||
221 | vcpu->arch.cp15[r->reg] = *vcpu_reg(vcpu, p->Rt1); | 221 | vcpu_cp15(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt1); |
222 | if (p->is_64bit) | 222 | if (p->is_64bit) |
223 | vcpu->arch.cp15[r->reg + 1] = *vcpu_reg(vcpu, p->Rt2); | 223 | vcpu_cp15(vcpu, r->reg + 1) = *vcpu_reg(vcpu, p->Rt2); |
224 | 224 | ||
225 | kvm_toggle_cache(vcpu, was_enabled); | 225 | kvm_toggle_cache(vcpu, was_enabled); |
226 | return true; | 226 | return true; |
@@ -1030,7 +1030,7 @@ int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) | |||
1030 | val = vcpu_cp15_reg64_get(vcpu, r); | 1030 | val = vcpu_cp15_reg64_get(vcpu, r); |
1031 | ret = reg_to_user(uaddr, &val, reg->id); | 1031 | ret = reg_to_user(uaddr, &val, reg->id); |
1032 | } else if (KVM_REG_SIZE(reg->id) == 4) { | 1032 | } else if (KVM_REG_SIZE(reg->id) == 4) { |
1033 | ret = reg_to_user(uaddr, &vcpu->arch.cp15[r->reg], reg->id); | 1033 | ret = reg_to_user(uaddr, &vcpu_cp15(vcpu, r->reg), reg->id); |
1034 | } | 1034 | } |
1035 | 1035 | ||
1036 | return ret; | 1036 | return ret; |
@@ -1060,7 +1060,7 @@ int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) | |||
1060 | if (!ret) | 1060 | if (!ret) |
1061 | vcpu_cp15_reg64_set(vcpu, r, val); | 1061 | vcpu_cp15_reg64_set(vcpu, r, val); |
1062 | } else if (KVM_REG_SIZE(reg->id) == 4) { | 1062 | } else if (KVM_REG_SIZE(reg->id) == 4) { |
1063 | ret = reg_from_user(&vcpu->arch.cp15[r->reg], uaddr, reg->id); | 1063 | ret = reg_from_user(&vcpu_cp15(vcpu, r->reg), uaddr, reg->id); |
1064 | } | 1064 | } |
1065 | 1065 | ||
1066 | return ret; | 1066 | return ret; |
@@ -1248,7 +1248,7 @@ void kvm_reset_coprocs(struct kvm_vcpu *vcpu) | |||
1248 | const struct coproc_reg *table; | 1248 | const struct coproc_reg *table; |
1249 | 1249 | ||
1250 | /* Catch someone adding a register without putting in reset entry. */ | 1250 | /* Catch someone adding a register without putting in reset entry. */ |
1251 | memset(vcpu->arch.cp15, 0x42, sizeof(vcpu->arch.cp15)); | 1251 | memset(vcpu->arch.ctxt.cp15, 0x42, sizeof(vcpu->arch.ctxt.cp15)); |
1252 | 1252 | ||
1253 | /* Generic chip reset first (so target could override). */ | 1253 | /* Generic chip reset first (so target could override). */ |
1254 | reset_coproc_regs(vcpu, cp15_regs, ARRAY_SIZE(cp15_regs)); | 1254 | reset_coproc_regs(vcpu, cp15_regs, ARRAY_SIZE(cp15_regs)); |
@@ -1257,6 +1257,6 @@ void kvm_reset_coprocs(struct kvm_vcpu *vcpu) | |||
1257 | reset_coproc_regs(vcpu, table, num); | 1257 | reset_coproc_regs(vcpu, table, num); |
1258 | 1258 | ||
1259 | for (num = 1; num < NR_CP15_REGS; num++) | 1259 | for (num = 1; num < NR_CP15_REGS; num++) |
1260 | if (vcpu->arch.cp15[num] == 0x42424242) | 1260 | if (vcpu_cp15(vcpu, num) == 0x42424242) |
1261 | panic("Didn't reset vcpu->arch.cp15[%zi]", num); | 1261 | panic("Didn't reset vcpu_cp15(vcpu, %zi)", num); |
1262 | } | 1262 | } |
diff --git a/arch/arm/kvm/coproc.h b/arch/arm/kvm/coproc.h index 88d24a3a9778..27351323871d 100644 --- a/arch/arm/kvm/coproc.h +++ b/arch/arm/kvm/coproc.h | |||
@@ -47,7 +47,7 @@ struct coproc_reg { | |||
47 | /* Initialization for vcpu. */ | 47 | /* Initialization for vcpu. */ |
48 | void (*reset)(struct kvm_vcpu *, const struct coproc_reg *); | 48 | void (*reset)(struct kvm_vcpu *, const struct coproc_reg *); |
49 | 49 | ||
50 | /* Index into vcpu->arch.cp15[], or 0 if we don't need to save it. */ | 50 | /* Index into vcpu_cp15(vcpu, ...), or 0 if we don't need to save it. */ |
51 | unsigned long reg; | 51 | unsigned long reg; |
52 | 52 | ||
53 | /* Value (usually reset value) */ | 53 | /* Value (usually reset value) */ |
@@ -104,25 +104,25 @@ static inline void reset_unknown(struct kvm_vcpu *vcpu, | |||
104 | const struct coproc_reg *r) | 104 | const struct coproc_reg *r) |
105 | { | 105 | { |
106 | BUG_ON(!r->reg); | 106 | BUG_ON(!r->reg); |
107 | BUG_ON(r->reg >= ARRAY_SIZE(vcpu->arch.cp15)); | 107 | BUG_ON(r->reg >= ARRAY_SIZE(vcpu->arch.ctxt.cp15)); |
108 | vcpu->arch.cp15[r->reg] = 0xdecafbad; | 108 | vcpu_cp15(vcpu, r->reg) = 0xdecafbad; |
109 | } | 109 | } |
110 | 110 | ||
111 | static inline void reset_val(struct kvm_vcpu *vcpu, const struct coproc_reg *r) | 111 | static inline void reset_val(struct kvm_vcpu *vcpu, const struct coproc_reg *r) |
112 | { | 112 | { |
113 | BUG_ON(!r->reg); | 113 | BUG_ON(!r->reg); |
114 | BUG_ON(r->reg >= ARRAY_SIZE(vcpu->arch.cp15)); | 114 | BUG_ON(r->reg >= ARRAY_SIZE(vcpu->arch.ctxt.cp15)); |
115 | vcpu->arch.cp15[r->reg] = r->val; | 115 | vcpu_cp15(vcpu, r->reg) = r->val; |
116 | } | 116 | } |
117 | 117 | ||
118 | static inline void reset_unknown64(struct kvm_vcpu *vcpu, | 118 | static inline void reset_unknown64(struct kvm_vcpu *vcpu, |
119 | const struct coproc_reg *r) | 119 | const struct coproc_reg *r) |
120 | { | 120 | { |
121 | BUG_ON(!r->reg); | 121 | BUG_ON(!r->reg); |
122 | BUG_ON(r->reg + 1 >= ARRAY_SIZE(vcpu->arch.cp15)); | 122 | BUG_ON(r->reg + 1 >= ARRAY_SIZE(vcpu->arch.ctxt.cp15)); |
123 | 123 | ||
124 | vcpu->arch.cp15[r->reg] = 0xdecafbad; | 124 | vcpu_cp15(vcpu, r->reg) = 0xdecafbad; |
125 | vcpu->arch.cp15[r->reg+1] = 0xd0c0ffee; | 125 | vcpu_cp15(vcpu, r->reg+1) = 0xd0c0ffee; |
126 | } | 126 | } |
127 | 127 | ||
128 | static inline int cmp_reg(const struct coproc_reg *i1, | 128 | static inline int cmp_reg(const struct coproc_reg *i1, |
diff --git a/arch/arm/kvm/emulate.c b/arch/arm/kvm/emulate.c index dc99159857b4..ee161b1c66da 100644 --- a/arch/arm/kvm/emulate.c +++ b/arch/arm/kvm/emulate.c | |||
@@ -266,8 +266,8 @@ void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr) | |||
266 | 266 | ||
267 | static u32 exc_vector_base(struct kvm_vcpu *vcpu) | 267 | static u32 exc_vector_base(struct kvm_vcpu *vcpu) |
268 | { | 268 | { |
269 | u32 sctlr = vcpu->arch.cp15[c1_SCTLR]; | 269 | u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR); |
270 | u32 vbar = vcpu->arch.cp15[c12_VBAR]; | 270 | u32 vbar = vcpu_cp15(vcpu, c12_VBAR); |
271 | 271 | ||
272 | if (sctlr & SCTLR_V) | 272 | if (sctlr & SCTLR_V) |
273 | return 0xffff0000; | 273 | return 0xffff0000; |
@@ -282,7 +282,7 @@ static u32 exc_vector_base(struct kvm_vcpu *vcpu) | |||
282 | static void kvm_update_psr(struct kvm_vcpu *vcpu, unsigned long mode) | 282 | static void kvm_update_psr(struct kvm_vcpu *vcpu, unsigned long mode) |
283 | { | 283 | { |
284 | unsigned long cpsr = *vcpu_cpsr(vcpu); | 284 | unsigned long cpsr = *vcpu_cpsr(vcpu); |
285 | u32 sctlr = vcpu->arch.cp15[c1_SCTLR]; | 285 | u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR); |
286 | 286 | ||
287 | *vcpu_cpsr(vcpu) = (cpsr & ~MODE_MASK) | mode; | 287 | *vcpu_cpsr(vcpu) = (cpsr & ~MODE_MASK) | mode; |
288 | 288 | ||
@@ -357,22 +357,22 @@ static void inject_abt(struct kvm_vcpu *vcpu, bool is_pabt, unsigned long addr) | |||
357 | 357 | ||
358 | if (is_pabt) { | 358 | if (is_pabt) { |
359 | /* Set IFAR and IFSR */ | 359 | /* Set IFAR and IFSR */ |
360 | vcpu->arch.cp15[c6_IFAR] = addr; | 360 | vcpu_cp15(vcpu, c6_IFAR) = addr; |
361 | is_lpae = (vcpu->arch.cp15[c2_TTBCR] >> 31); | 361 | is_lpae = (vcpu_cp15(vcpu, c2_TTBCR) >> 31); |
362 | /* Always give debug fault for now - should give guest a clue */ | 362 | /* Always give debug fault for now - should give guest a clue */ |
363 | if (is_lpae) | 363 | if (is_lpae) |
364 | vcpu->arch.cp15[c5_IFSR] = 1 << 9 | 0x22; | 364 | vcpu_cp15(vcpu, c5_IFSR) = 1 << 9 | 0x22; |
365 | else | 365 | else |
366 | vcpu->arch.cp15[c5_IFSR] = 2; | 366 | vcpu_cp15(vcpu, c5_IFSR) = 2; |
367 | } else { /* !iabt */ | 367 | } else { /* !iabt */ |
368 | /* Set DFAR and DFSR */ | 368 | /* Set DFAR and DFSR */ |
369 | vcpu->arch.cp15[c6_DFAR] = addr; | 369 | vcpu_cp15(vcpu, c6_DFAR) = addr; |
370 | is_lpae = (vcpu->arch.cp15[c2_TTBCR] >> 31); | 370 | is_lpae = (vcpu_cp15(vcpu, c2_TTBCR) >> 31); |
371 | /* Always give debug fault for now - should give guest a clue */ | 371 | /* Always give debug fault for now - should give guest a clue */ |
372 | if (is_lpae) | 372 | if (is_lpae) |
373 | vcpu->arch.cp15[c5_DFSR] = 1 << 9 | 0x22; | 373 | vcpu_cp15(vcpu, c5_DFSR) = 1 << 9 | 0x22; |
374 | else | 374 | else |
375 | vcpu->arch.cp15[c5_DFSR] = 2; | 375 | vcpu_cp15(vcpu, c5_DFSR) = 2; |
376 | } | 376 | } |
377 | 377 | ||
378 | } | 378 | } |
diff --git a/arch/arm/kvm/interrupts_head.S b/arch/arm/kvm/interrupts_head.S index 51a59504bef4..b9d953158877 100644 --- a/arch/arm/kvm/interrupts_head.S +++ b/arch/arm/kvm/interrupts_head.S | |||
@@ -4,7 +4,8 @@ | |||
4 | #define VCPU_USR_REG(_reg_nr) (VCPU_USR_REGS + (_reg_nr * 4)) | 4 | #define VCPU_USR_REG(_reg_nr) (VCPU_USR_REGS + (_reg_nr * 4)) |
5 | #define VCPU_USR_SP (VCPU_USR_REG(13)) | 5 | #define VCPU_USR_SP (VCPU_USR_REG(13)) |
6 | #define VCPU_USR_LR (VCPU_USR_REG(14)) | 6 | #define VCPU_USR_LR (VCPU_USR_REG(14)) |
7 | #define CP15_OFFSET(_cp15_reg_idx) (VCPU_CP15 + (_cp15_reg_idx * 4)) | 7 | #define VCPU_CP15_BASE (VCPU_GUEST_CTXT + CPU_CTXT_CP15) |
8 | #define CP15_OFFSET(_cp15_reg_idx) (VCPU_CP15_BASE + (_cp15_reg_idx * 4)) | ||
8 | 9 | ||
9 | /* | 10 | /* |
10 | * Many of these macros need to access the VCPU structure, which is always | 11 | * Many of these macros need to access the VCPU structure, which is always |