diff options
author | Alexander Graf <agraf@suse.de> | 2010-04-15 18:11:40 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2010-05-17 05:18:26 -0400 |
commit | c7f38f46f2a98d232147e47284cb4e7363296a3e (patch) | |
tree | ba1de6a9d8398d31807756789d015983c4610b21 | |
parent | 66bb170655799a0149df0844fb8232f27e54323c (diff) |
KVM: PPC: Improve indirect svcpu accessors
We already have some inline fuctions we use to access vcpu or svcpu structs,
depending on whether we're on booke or book3s. Since we just put a few more
registers into the svcpu, we also need to make sure the respective callbacks
are available and get used.
So this patch moves direct use of the now in the svcpu struct fields to
inline function calls. While at it, it also moves the definition of those
inline function calls to respective header files for booke and book3s,
greatly improving readability.
Signed-off-by: Alexander Graf <agraf@suse.de>
Signed-off-by: Avi Kivity <avi@redhat.com>
-rw-r--r-- | arch/powerpc/include/asm/kvm_book3s.h | 98 | ||||
-rw-r--r-- | arch/powerpc/include/asm/kvm_booke.h | 96 | ||||
-rw-r--r-- | arch/powerpc/include/asm/kvm_ppc.h | 79 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s.c | 125 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_64_mmu.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_64_mmu_host.c | 26 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_emulate.c | 6 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_paired_singles.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kvm/emulate.c | 7 | ||||
-rw-r--r-- | arch/powerpc/kvm/powerpc.c | 2 |
10 files changed, 290 insertions, 153 deletions
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index 7670e2a12867..9517b8deafed 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h | |||
@@ -71,7 +71,7 @@ struct kvmppc_sid_map { | |||
71 | 71 | ||
72 | struct kvmppc_vcpu_book3s { | 72 | struct kvmppc_vcpu_book3s { |
73 | struct kvm_vcpu vcpu; | 73 | struct kvm_vcpu vcpu; |
74 | struct kvmppc_book3s_shadow_vcpu shadow_vcpu; | 74 | struct kvmppc_book3s_shadow_vcpu *shadow_vcpu; |
75 | struct kvmppc_sid_map sid_map[SID_MAP_NUM]; | 75 | struct kvmppc_sid_map sid_map[SID_MAP_NUM]; |
76 | struct kvmppc_slb slb[64]; | 76 | struct kvmppc_slb slb[64]; |
77 | struct { | 77 | struct { |
@@ -147,6 +147,94 @@ static inline ulong dsisr(void) | |||
147 | } | 147 | } |
148 | 148 | ||
149 | extern void kvm_return_point(void); | 149 | extern void kvm_return_point(void); |
150 | static inline struct kvmppc_book3s_shadow_vcpu *to_svcpu(struct kvm_vcpu *vcpu); | ||
151 | |||
152 | static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) | ||
153 | { | ||
154 | if ( num < 14 ) { | ||
155 | to_svcpu(vcpu)->gpr[num] = val; | ||
156 | to_book3s(vcpu)->shadow_vcpu->gpr[num] = val; | ||
157 | } else | ||
158 | vcpu->arch.gpr[num] = val; | ||
159 | } | ||
160 | |||
161 | static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num) | ||
162 | { | ||
163 | if ( num < 14 ) | ||
164 | return to_svcpu(vcpu)->gpr[num]; | ||
165 | else | ||
166 | return vcpu->arch.gpr[num]; | ||
167 | } | ||
168 | |||
169 | static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val) | ||
170 | { | ||
171 | to_svcpu(vcpu)->cr = val; | ||
172 | to_book3s(vcpu)->shadow_vcpu->cr = val; | ||
173 | } | ||
174 | |||
175 | static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu) | ||
176 | { | ||
177 | return to_svcpu(vcpu)->cr; | ||
178 | } | ||
179 | |||
180 | static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val) | ||
181 | { | ||
182 | to_svcpu(vcpu)->xer = val; | ||
183 | to_book3s(vcpu)->shadow_vcpu->xer = val; | ||
184 | } | ||
185 | |||
186 | static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu) | ||
187 | { | ||
188 | return to_svcpu(vcpu)->xer; | ||
189 | } | ||
190 | |||
191 | static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val) | ||
192 | { | ||
193 | to_svcpu(vcpu)->ctr = val; | ||
194 | } | ||
195 | |||
196 | static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu) | ||
197 | { | ||
198 | return to_svcpu(vcpu)->ctr; | ||
199 | } | ||
200 | |||
201 | static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val) | ||
202 | { | ||
203 | to_svcpu(vcpu)->lr = val; | ||
204 | } | ||
205 | |||
206 | static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu) | ||
207 | { | ||
208 | return to_svcpu(vcpu)->lr; | ||
209 | } | ||
210 | |||
211 | static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val) | ||
212 | { | ||
213 | to_svcpu(vcpu)->pc = val; | ||
214 | } | ||
215 | |||
216 | static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu) | ||
217 | { | ||
218 | return to_svcpu(vcpu)->pc; | ||
219 | } | ||
220 | |||
221 | static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu) | ||
222 | { | ||
223 | ulong pc = kvmppc_get_pc(vcpu); | ||
224 | struct kvmppc_book3s_shadow_vcpu *svcpu = to_svcpu(vcpu); | ||
225 | |||
226 | /* Load the instruction manually if it failed to do so in the | ||
227 | * exit path */ | ||
228 | if (svcpu->last_inst == KVM_INST_FETCH_FAILED) | ||
229 | kvmppc_ld(vcpu, &pc, sizeof(u32), &svcpu->last_inst, false); | ||
230 | |||
231 | return svcpu->last_inst; | ||
232 | } | ||
233 | |||
234 | static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu) | ||
235 | { | ||
236 | return to_svcpu(vcpu)->fault_dar; | ||
237 | } | ||
150 | 238 | ||
151 | /* Magic register values loaded into r3 and r4 before the 'sc' assembly | 239 | /* Magic register values loaded into r3 and r4 before the 'sc' assembly |
152 | * instruction for the OSI hypercalls */ | 240 | * instruction for the OSI hypercalls */ |
@@ -155,4 +243,12 @@ extern void kvm_return_point(void); | |||
155 | 243 | ||
156 | #define INS_DCBZ 0x7c0007ec | 244 | #define INS_DCBZ 0x7c0007ec |
157 | 245 | ||
246 | /* Also add subarch specific defines */ | ||
247 | |||
248 | #ifdef CONFIG_PPC_BOOK3S_32 | ||
249 | #include <asm/kvm_book3s_32.h> | ||
250 | #else | ||
251 | #include <asm/kvm_book3s_64.h> | ||
252 | #endif | ||
253 | |||
158 | #endif /* __ASM_KVM_BOOK3S_H__ */ | 254 | #endif /* __ASM_KVM_BOOK3S_H__ */ |
diff --git a/arch/powerpc/include/asm/kvm_booke.h b/arch/powerpc/include/asm/kvm_booke.h new file mode 100644 index 000000000000..9c9ba3d59b1b --- /dev/null +++ b/arch/powerpc/include/asm/kvm_booke.h | |||
@@ -0,0 +1,96 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License, version 2, as | ||
4 | * published by the Free Software Foundation. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, | ||
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
9 | * GNU General Public License for more details. | ||
10 | * | ||
11 | * You should have received a copy of the GNU General Public License | ||
12 | * along with this program; if not, write to the Free Software | ||
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
14 | * | ||
15 | * Copyright SUSE Linux Products GmbH 2010 | ||
16 | * | ||
17 | * Authors: Alexander Graf <agraf@suse.de> | ||
18 | */ | ||
19 | |||
20 | #ifndef __ASM_KVM_BOOKE_H__ | ||
21 | #define __ASM_KVM_BOOKE_H__ | ||
22 | |||
23 | #include <linux/types.h> | ||
24 | #include <linux/kvm_host.h> | ||
25 | |||
26 | static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) | ||
27 | { | ||
28 | vcpu->arch.gpr[num] = val; | ||
29 | } | ||
30 | |||
31 | static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num) | ||
32 | { | ||
33 | return vcpu->arch.gpr[num]; | ||
34 | } | ||
35 | |||
36 | static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val) | ||
37 | { | ||
38 | vcpu->arch.cr = val; | ||
39 | } | ||
40 | |||
41 | static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu) | ||
42 | { | ||
43 | return vcpu->arch.cr; | ||
44 | } | ||
45 | |||
46 | static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val) | ||
47 | { | ||
48 | vcpu->arch.xer = val; | ||
49 | } | ||
50 | |||
51 | static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu) | ||
52 | { | ||
53 | return vcpu->arch.xer; | ||
54 | } | ||
55 | |||
56 | static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu) | ||
57 | { | ||
58 | return vcpu->arch.last_inst; | ||
59 | } | ||
60 | |||
61 | static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val) | ||
62 | { | ||
63 | vcpu->arch.ctr = val; | ||
64 | } | ||
65 | |||
66 | static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu) | ||
67 | { | ||
68 | return vcpu->arch.ctr; | ||
69 | } | ||
70 | |||
71 | static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val) | ||
72 | { | ||
73 | vcpu->arch.lr = val; | ||
74 | } | ||
75 | |||
76 | static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu) | ||
77 | { | ||
78 | return vcpu->arch.lr; | ||
79 | } | ||
80 | |||
81 | static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val) | ||
82 | { | ||
83 | vcpu->arch.pc = val; | ||
84 | } | ||
85 | |||
86 | static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu) | ||
87 | { | ||
88 | return vcpu->arch.pc; | ||
89 | } | ||
90 | |||
91 | static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu) | ||
92 | { | ||
93 | return vcpu->arch.fault_dear; | ||
94 | } | ||
95 | |||
96 | #endif /* __ASM_KVM_BOOKE_H__ */ | ||
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index 6a2464e4d6b9..edade847b8f8 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h | |||
@@ -30,6 +30,8 @@ | |||
30 | #include <linux/kvm_host.h> | 30 | #include <linux/kvm_host.h> |
31 | #ifdef CONFIG_PPC_BOOK3S | 31 | #ifdef CONFIG_PPC_BOOK3S |
32 | #include <asm/kvm_book3s.h> | 32 | #include <asm/kvm_book3s.h> |
33 | #else | ||
34 | #include <asm/kvm_booke.h> | ||
33 | #endif | 35 | #endif |
34 | 36 | ||
35 | enum emulation_result { | 37 | enum emulation_result { |
@@ -138,81 +140,4 @@ static inline u32 kvmppc_set_field(u64 inst, int msb, int lsb, int value) | |||
138 | return r; | 140 | return r; |
139 | } | 141 | } |
140 | 142 | ||
141 | #ifdef CONFIG_PPC_BOOK3S | ||
142 | |||
143 | /* We assume we're always acting on the current vcpu */ | ||
144 | |||
145 | static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) | ||
146 | { | ||
147 | if ( num < 14 ) { | ||
148 | get_paca()->shadow_vcpu.gpr[num] = val; | ||
149 | to_book3s(vcpu)->shadow_vcpu.gpr[num] = val; | ||
150 | } else | ||
151 | vcpu->arch.gpr[num] = val; | ||
152 | } | ||
153 | |||
154 | static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num) | ||
155 | { | ||
156 | if ( num < 14 ) | ||
157 | return get_paca()->shadow_vcpu.gpr[num]; | ||
158 | else | ||
159 | return vcpu->arch.gpr[num]; | ||
160 | } | ||
161 | |||
162 | static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val) | ||
163 | { | ||
164 | get_paca()->shadow_vcpu.cr = val; | ||
165 | to_book3s(vcpu)->shadow_vcpu.cr = val; | ||
166 | } | ||
167 | |||
168 | static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu) | ||
169 | { | ||
170 | return get_paca()->shadow_vcpu.cr; | ||
171 | } | ||
172 | |||
173 | static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val) | ||
174 | { | ||
175 | get_paca()->shadow_vcpu.xer = val; | ||
176 | to_book3s(vcpu)->shadow_vcpu.xer = val; | ||
177 | } | ||
178 | |||
179 | static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu) | ||
180 | { | ||
181 | return get_paca()->shadow_vcpu.xer; | ||
182 | } | ||
183 | |||
184 | #else | ||
185 | |||
186 | static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) | ||
187 | { | ||
188 | vcpu->arch.gpr[num] = val; | ||
189 | } | ||
190 | |||
191 | static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num) | ||
192 | { | ||
193 | return vcpu->arch.gpr[num]; | ||
194 | } | ||
195 | |||
196 | static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val) | ||
197 | { | ||
198 | vcpu->arch.cr = val; | ||
199 | } | ||
200 | |||
201 | static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu) | ||
202 | { | ||
203 | return vcpu->arch.cr; | ||
204 | } | ||
205 | |||
206 | static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val) | ||
207 | { | ||
208 | vcpu->arch.xer = val; | ||
209 | } | ||
210 | |||
211 | static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu) | ||
212 | { | ||
213 | return vcpu->arch.xer; | ||
214 | } | ||
215 | |||
216 | #endif | ||
217 | |||
218 | #endif /* __POWERPC_KVM_PPC_H__ */ | 143 | #endif /* __POWERPC_KVM_PPC_H__ */ |
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index 41c23b636f53..7ff80f9f13a8 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c | |||
@@ -71,18 +71,26 @@ void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu) | |||
71 | 71 | ||
72 | void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | 72 | void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
73 | { | 73 | { |
74 | memcpy(get_paca()->kvm_slb, to_book3s(vcpu)->slb_shadow, sizeof(get_paca()->kvm_slb)); | 74 | #ifdef CONFIG_PPC_BOOK3S_64 |
75 | memcpy(&get_paca()->shadow_vcpu, &to_book3s(vcpu)->shadow_vcpu, | 75 | memcpy(to_svcpu(vcpu)->slb, to_book3s(vcpu)->slb_shadow, sizeof(to_svcpu(vcpu)->slb)); |
76 | memcpy(&get_paca()->shadow_vcpu, to_book3s(vcpu)->shadow_vcpu, | ||
76 | sizeof(get_paca()->shadow_vcpu)); | 77 | sizeof(get_paca()->shadow_vcpu)); |
77 | get_paca()->kvm_slb_max = to_book3s(vcpu)->slb_shadow_max; | 78 | to_svcpu(vcpu)->slb_max = to_book3s(vcpu)->slb_shadow_max; |
79 | #endif | ||
80 | |||
81 | #ifdef CONFIG_PPC_BOOK3S_32 | ||
82 | current->thread.kvm_shadow_vcpu = to_book3s(vcpu)->shadow_vcpu; | ||
83 | #endif | ||
78 | } | 84 | } |
79 | 85 | ||
80 | void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) | 86 | void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) |
81 | { | 87 | { |
82 | memcpy(to_book3s(vcpu)->slb_shadow, get_paca()->kvm_slb, sizeof(get_paca()->kvm_slb)); | 88 | #ifdef CONFIG_PPC_BOOK3S_64 |
83 | memcpy(&to_book3s(vcpu)->shadow_vcpu, &get_paca()->shadow_vcpu, | 89 | memcpy(to_book3s(vcpu)->slb_shadow, to_svcpu(vcpu)->slb, sizeof(to_svcpu(vcpu)->slb)); |
90 | memcpy(to_book3s(vcpu)->shadow_vcpu, &get_paca()->shadow_vcpu, | ||
84 | sizeof(get_paca()->shadow_vcpu)); | 91 | sizeof(get_paca()->shadow_vcpu)); |
85 | to_book3s(vcpu)->slb_shadow_max = get_paca()->kvm_slb_max; | 92 | to_book3s(vcpu)->slb_shadow_max = to_svcpu(vcpu)->slb_max; |
93 | #endif | ||
86 | 94 | ||
87 | kvmppc_giveup_ext(vcpu, MSR_FP); | 95 | kvmppc_giveup_ext(vcpu, MSR_FP); |
88 | kvmppc_giveup_ext(vcpu, MSR_VEC); | 96 | kvmppc_giveup_ext(vcpu, MSR_VEC); |
@@ -144,7 +152,7 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) | |||
144 | VSID_SPLIT_MASK); | 152 | VSID_SPLIT_MASK); |
145 | 153 | ||
146 | kvmppc_mmu_flush_segments(vcpu); | 154 | kvmppc_mmu_flush_segments(vcpu); |
147 | kvmppc_mmu_map_segment(vcpu, vcpu->arch.pc); | 155 | kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); |
148 | } | 156 | } |
149 | 157 | ||
150 | /* Preload FPU if it's enabled */ | 158 | /* Preload FPU if it's enabled */ |
@@ -154,9 +162,9 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) | |||
154 | 162 | ||
155 | void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags) | 163 | void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags) |
156 | { | 164 | { |
157 | vcpu->arch.srr0 = vcpu->arch.pc; | 165 | vcpu->arch.srr0 = kvmppc_get_pc(vcpu); |
158 | vcpu->arch.srr1 = vcpu->arch.msr | flags; | 166 | vcpu->arch.srr1 = vcpu->arch.msr | flags; |
159 | vcpu->arch.pc = to_book3s(vcpu)->hior + vec; | 167 | kvmppc_set_pc(vcpu, to_book3s(vcpu)->hior + vec); |
160 | vcpu->arch.mmu.reset_msr(vcpu); | 168 | vcpu->arch.mmu.reset_msr(vcpu); |
161 | } | 169 | } |
162 | 170 | ||
@@ -551,20 +559,20 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
551 | 559 | ||
552 | if (page_found == -ENOENT) { | 560 | if (page_found == -ENOENT) { |
553 | /* Page not found in guest PTE entries */ | 561 | /* Page not found in guest PTE entries */ |
554 | vcpu->arch.dear = vcpu->arch.fault_dear; | 562 | vcpu->arch.dear = kvmppc_get_fault_dar(vcpu); |
555 | to_book3s(vcpu)->dsisr = vcpu->arch.fault_dsisr; | 563 | to_book3s(vcpu)->dsisr = to_svcpu(vcpu)->fault_dsisr; |
556 | vcpu->arch.msr |= (vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL); | 564 | vcpu->arch.msr |= (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL); |
557 | kvmppc_book3s_queue_irqprio(vcpu, vec); | 565 | kvmppc_book3s_queue_irqprio(vcpu, vec); |
558 | } else if (page_found == -EPERM) { | 566 | } else if (page_found == -EPERM) { |
559 | /* Storage protection */ | 567 | /* Storage protection */ |
560 | vcpu->arch.dear = vcpu->arch.fault_dear; | 568 | vcpu->arch.dear = kvmppc_get_fault_dar(vcpu); |
561 | to_book3s(vcpu)->dsisr = vcpu->arch.fault_dsisr & ~DSISR_NOHPTE; | 569 | to_book3s(vcpu)->dsisr = to_svcpu(vcpu)->fault_dsisr & ~DSISR_NOHPTE; |
562 | to_book3s(vcpu)->dsisr |= DSISR_PROTFAULT; | 570 | to_book3s(vcpu)->dsisr |= DSISR_PROTFAULT; |
563 | vcpu->arch.msr |= (vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL); | 571 | vcpu->arch.msr |= (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL); |
564 | kvmppc_book3s_queue_irqprio(vcpu, vec); | 572 | kvmppc_book3s_queue_irqprio(vcpu, vec); |
565 | } else if (page_found == -EINVAL) { | 573 | } else if (page_found == -EINVAL) { |
566 | /* Page not found in guest SLB */ | 574 | /* Page not found in guest SLB */ |
567 | vcpu->arch.dear = vcpu->arch.fault_dear; | 575 | vcpu->arch.dear = kvmppc_get_fault_dar(vcpu); |
568 | kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80); | 576 | kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80); |
569 | } else if (!is_mmio && | 577 | } else if (!is_mmio && |
570 | kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) { | 578 | kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) { |
@@ -646,10 +654,11 @@ void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr) | |||
646 | 654 | ||
647 | static int kvmppc_read_inst(struct kvm_vcpu *vcpu) | 655 | static int kvmppc_read_inst(struct kvm_vcpu *vcpu) |
648 | { | 656 | { |
649 | ulong srr0 = vcpu->arch.pc; | 657 | ulong srr0 = kvmppc_get_pc(vcpu); |
658 | u32 last_inst = kvmppc_get_last_inst(vcpu); | ||
650 | int ret; | 659 | int ret; |
651 | 660 | ||
652 | ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &vcpu->arch.last_inst, false); | 661 | ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false); |
653 | if (ret == -ENOENT) { | 662 | if (ret == -ENOENT) { |
654 | vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 33, 33, 1); | 663 | vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 33, 33, 1); |
655 | vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 34, 36, 0); | 664 | vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 34, 36, 0); |
@@ -754,12 +763,12 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
754 | run->ready_for_interrupt_injection = 1; | 763 | run->ready_for_interrupt_injection = 1; |
755 | #ifdef EXIT_DEBUG | 764 | #ifdef EXIT_DEBUG |
756 | printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | dar=0x%lx | dec=0x%x | msr=0x%lx\n", | 765 | printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | dar=0x%lx | dec=0x%x | msr=0x%lx\n", |
757 | exit_nr, vcpu->arch.pc, vcpu->arch.fault_dear, | 766 | exit_nr, kvmppc_get_pc(vcpu), kvmppc_get_fault_dar(vcpu), |
758 | kvmppc_get_dec(vcpu), vcpu->arch.msr); | 767 | kvmppc_get_dec(vcpu), to_svcpu(vcpu)->shadow_srr1); |
759 | #elif defined (EXIT_DEBUG_SIMPLE) | 768 | #elif defined (EXIT_DEBUG_SIMPLE) |
760 | if ((exit_nr != 0x900) && (exit_nr != 0x500)) | 769 | if ((exit_nr != 0x900) && (exit_nr != 0x500)) |
761 | printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | dar=0x%lx | msr=0x%lx\n", | 770 | printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | dar=0x%lx | msr=0x%lx\n", |
762 | exit_nr, vcpu->arch.pc, vcpu->arch.fault_dear, | 771 | exit_nr, kvmppc_get_pc(vcpu), kvmppc_get_fault_dar(vcpu), |
763 | vcpu->arch.msr); | 772 | vcpu->arch.msr); |
764 | #endif | 773 | #endif |
765 | kvm_resched(vcpu); | 774 | kvm_resched(vcpu); |
@@ -767,8 +776,8 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
767 | case BOOK3S_INTERRUPT_INST_STORAGE: | 776 | case BOOK3S_INTERRUPT_INST_STORAGE: |
768 | vcpu->stat.pf_instruc++; | 777 | vcpu->stat.pf_instruc++; |
769 | /* only care about PTEG not found errors, but leave NX alone */ | 778 | /* only care about PTEG not found errors, but leave NX alone */ |
770 | if (vcpu->arch.shadow_srr1 & 0x40000000) { | 779 | if (to_svcpu(vcpu)->shadow_srr1 & 0x40000000) { |
771 | r = kvmppc_handle_pagefault(run, vcpu, vcpu->arch.pc, exit_nr); | 780 | r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr); |
772 | vcpu->stat.sp_instruc++; | 781 | vcpu->stat.sp_instruc++; |
773 | } else if (vcpu->arch.mmu.is_dcbz32(vcpu) && | 782 | } else if (vcpu->arch.mmu.is_dcbz32(vcpu) && |
774 | (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) { | 783 | (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) { |
@@ -777,38 +786,41 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
777 | * so we can't use the NX bit inside the guest. Let's cross our fingers, | 786 | * so we can't use the NX bit inside the guest. Let's cross our fingers, |
778 | * that no guest that needs the dcbz hack does NX. | 787 | * that no guest that needs the dcbz hack does NX. |
779 | */ | 788 | */ |
780 | kvmppc_mmu_pte_flush(vcpu, vcpu->arch.pc, ~0xFFFULL); | 789 | kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFULL); |
781 | r = RESUME_GUEST; | 790 | r = RESUME_GUEST; |
782 | } else { | 791 | } else { |
783 | vcpu->arch.msr |= vcpu->arch.shadow_srr1 & 0x58000000; | 792 | vcpu->arch.msr |= to_svcpu(vcpu)->shadow_srr1 & 0x58000000; |
784 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | 793 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); |
785 | kvmppc_mmu_pte_flush(vcpu, vcpu->arch.pc, ~0xFFFULL); | 794 | kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFULL); |
786 | r = RESUME_GUEST; | 795 | r = RESUME_GUEST; |
787 | } | 796 | } |
788 | break; | 797 | break; |
789 | case BOOK3S_INTERRUPT_DATA_STORAGE: | 798 | case BOOK3S_INTERRUPT_DATA_STORAGE: |
799 | { | ||
800 | ulong dar = kvmppc_get_fault_dar(vcpu); | ||
790 | vcpu->stat.pf_storage++; | 801 | vcpu->stat.pf_storage++; |
791 | /* The only case we need to handle is missing shadow PTEs */ | 802 | /* The only case we need to handle is missing shadow PTEs */ |
792 | if (vcpu->arch.fault_dsisr & DSISR_NOHPTE) { | 803 | if (to_svcpu(vcpu)->fault_dsisr & DSISR_NOHPTE) { |
793 | r = kvmppc_handle_pagefault(run, vcpu, vcpu->arch.fault_dear, exit_nr); | 804 | r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr); |
794 | } else { | 805 | } else { |
795 | vcpu->arch.dear = vcpu->arch.fault_dear; | 806 | vcpu->arch.dear = dar; |
796 | to_book3s(vcpu)->dsisr = vcpu->arch.fault_dsisr; | 807 | to_book3s(vcpu)->dsisr = to_svcpu(vcpu)->fault_dsisr; |
797 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | 808 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); |
798 | kvmppc_mmu_pte_flush(vcpu, vcpu->arch.dear, ~0xFFFULL); | 809 | kvmppc_mmu_pte_flush(vcpu, vcpu->arch.dear, ~0xFFFULL); |
799 | r = RESUME_GUEST; | 810 | r = RESUME_GUEST; |
800 | } | 811 | } |
801 | break; | 812 | break; |
813 | } | ||
802 | case BOOK3S_INTERRUPT_DATA_SEGMENT: | 814 | case BOOK3S_INTERRUPT_DATA_SEGMENT: |
803 | if (kvmppc_mmu_map_segment(vcpu, vcpu->arch.fault_dear) < 0) { | 815 | if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) { |
804 | vcpu->arch.dear = vcpu->arch.fault_dear; | 816 | vcpu->arch.dear = kvmppc_get_fault_dar(vcpu); |
805 | kvmppc_book3s_queue_irqprio(vcpu, | 817 | kvmppc_book3s_queue_irqprio(vcpu, |
806 | BOOK3S_INTERRUPT_DATA_SEGMENT); | 818 | BOOK3S_INTERRUPT_DATA_SEGMENT); |
807 | } | 819 | } |
808 | r = RESUME_GUEST; | 820 | r = RESUME_GUEST; |
809 | break; | 821 | break; |
810 | case BOOK3S_INTERRUPT_INST_SEGMENT: | 822 | case BOOK3S_INTERRUPT_INST_SEGMENT: |
811 | if (kvmppc_mmu_map_segment(vcpu, vcpu->arch.pc) < 0) { | 823 | if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)) < 0) { |
812 | kvmppc_book3s_queue_irqprio(vcpu, | 824 | kvmppc_book3s_queue_irqprio(vcpu, |
813 | BOOK3S_INTERRUPT_INST_SEGMENT); | 825 | BOOK3S_INTERRUPT_INST_SEGMENT); |
814 | } | 826 | } |
@@ -829,13 +841,13 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
829 | ulong flags; | 841 | ulong flags; |
830 | 842 | ||
831 | program_interrupt: | 843 | program_interrupt: |
832 | flags = vcpu->arch.shadow_srr1 & 0x1f0000ull; | 844 | flags = to_svcpu(vcpu)->shadow_srr1 & 0x1f0000ull; |
833 | 845 | ||
834 | if (vcpu->arch.msr & MSR_PR) { | 846 | if (vcpu->arch.msr & MSR_PR) { |
835 | #ifdef EXIT_DEBUG | 847 | #ifdef EXIT_DEBUG |
836 | printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", vcpu->arch.pc, vcpu->arch.last_inst); | 848 | printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu)); |
837 | #endif | 849 | #endif |
838 | if ((vcpu->arch.last_inst & 0xff0007ff) != | 850 | if ((kvmppc_get_last_inst(vcpu) & 0xff0007ff) != |
839 | (INS_DCBZ & 0xfffffff7)) { | 851 | (INS_DCBZ & 0xfffffff7)) { |
840 | kvmppc_core_queue_program(vcpu, flags); | 852 | kvmppc_core_queue_program(vcpu, flags); |
841 | r = RESUME_GUEST; | 853 | r = RESUME_GUEST; |
@@ -854,7 +866,7 @@ program_interrupt: | |||
854 | break; | 866 | break; |
855 | case EMULATE_FAIL: | 867 | case EMULATE_FAIL: |
856 | printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n", | 868 | printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n", |
857 | __func__, vcpu->arch.pc, vcpu->arch.last_inst); | 869 | __func__, kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu)); |
858 | kvmppc_core_queue_program(vcpu, flags); | 870 | kvmppc_core_queue_program(vcpu, flags); |
859 | r = RESUME_GUEST; | 871 | r = RESUME_GUEST; |
860 | break; | 872 | break; |
@@ -917,9 +929,9 @@ program_interrupt: | |||
917 | case BOOK3S_INTERRUPT_ALIGNMENT: | 929 | case BOOK3S_INTERRUPT_ALIGNMENT: |
918 | if (kvmppc_read_inst(vcpu) == EMULATE_DONE) { | 930 | if (kvmppc_read_inst(vcpu) == EMULATE_DONE) { |
919 | to_book3s(vcpu)->dsisr = kvmppc_alignment_dsisr(vcpu, | 931 | to_book3s(vcpu)->dsisr = kvmppc_alignment_dsisr(vcpu, |
920 | vcpu->arch.last_inst); | 932 | kvmppc_get_last_inst(vcpu)); |
921 | vcpu->arch.dear = kvmppc_alignment_dar(vcpu, | 933 | vcpu->arch.dear = kvmppc_alignment_dar(vcpu, |
922 | vcpu->arch.last_inst); | 934 | kvmppc_get_last_inst(vcpu)); |
923 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | 935 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); |
924 | } | 936 | } |
925 | r = RESUME_GUEST; | 937 | r = RESUME_GUEST; |
@@ -932,7 +944,7 @@ program_interrupt: | |||
932 | default: | 944 | default: |
933 | /* Ugh - bork here! What did we get? */ | 945 | /* Ugh - bork here! What did we get? */ |
934 | printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n", | 946 | printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n", |
935 | exit_nr, vcpu->arch.pc, vcpu->arch.shadow_srr1); | 947 | exit_nr, kvmppc_get_pc(vcpu), to_svcpu(vcpu)->shadow_srr1); |
936 | r = RESUME_HOST; | 948 | r = RESUME_HOST; |
937 | BUG(); | 949 | BUG(); |
938 | break; | 950 | break; |
@@ -959,7 +971,7 @@ program_interrupt: | |||
959 | } | 971 | } |
960 | 972 | ||
961 | #ifdef EXIT_DEBUG | 973 | #ifdef EXIT_DEBUG |
962 | printk(KERN_EMERG "KVM exit: vcpu=0x%p pc=0x%lx r=0x%x\n", vcpu, vcpu->arch.pc, r); | 974 | printk(KERN_EMERG "KVM exit: vcpu=0x%p pc=0x%lx r=0x%x\n", vcpu, kvmppc_get_pc(vcpu), r); |
963 | #endif | 975 | #endif |
964 | 976 | ||
965 | return r; | 977 | return r; |
@@ -976,10 +988,10 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
976 | 988 | ||
977 | vcpu_load(vcpu); | 989 | vcpu_load(vcpu); |
978 | 990 | ||
979 | regs->pc = vcpu->arch.pc; | 991 | regs->pc = kvmppc_get_pc(vcpu); |
980 | regs->cr = kvmppc_get_cr(vcpu); | 992 | regs->cr = kvmppc_get_cr(vcpu); |
981 | regs->ctr = vcpu->arch.ctr; | 993 | regs->ctr = kvmppc_get_ctr(vcpu); |
982 | regs->lr = vcpu->arch.lr; | 994 | regs->lr = kvmppc_get_lr(vcpu); |
983 | regs->xer = kvmppc_get_xer(vcpu); | 995 | regs->xer = kvmppc_get_xer(vcpu); |
984 | regs->msr = vcpu->arch.msr; | 996 | regs->msr = vcpu->arch.msr; |
985 | regs->srr0 = vcpu->arch.srr0; | 997 | regs->srr0 = vcpu->arch.srr0; |
@@ -1007,10 +1019,10 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
1007 | 1019 | ||
1008 | vcpu_load(vcpu); | 1020 | vcpu_load(vcpu); |
1009 | 1021 | ||
1010 | vcpu->arch.pc = regs->pc; | 1022 | kvmppc_set_pc(vcpu, regs->pc); |
1011 | kvmppc_set_cr(vcpu, regs->cr); | 1023 | kvmppc_set_cr(vcpu, regs->cr); |
1012 | vcpu->arch.ctr = regs->ctr; | 1024 | kvmppc_set_ctr(vcpu, regs->ctr); |
1013 | vcpu->arch.lr = regs->lr; | 1025 | kvmppc_set_lr(vcpu, regs->lr); |
1014 | kvmppc_set_xer(vcpu, regs->xer); | 1026 | kvmppc_set_xer(vcpu, regs->xer); |
1015 | kvmppc_set_msr(vcpu, regs->msr); | 1027 | kvmppc_set_msr(vcpu, regs->msr); |
1016 | vcpu->arch.srr0 = regs->srr0; | 1028 | vcpu->arch.srr0 = regs->srr0; |
@@ -1157,19 +1169,23 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) | |||
1157 | { | 1169 | { |
1158 | struct kvmppc_vcpu_book3s *vcpu_book3s; | 1170 | struct kvmppc_vcpu_book3s *vcpu_book3s; |
1159 | struct kvm_vcpu *vcpu; | 1171 | struct kvm_vcpu *vcpu; |
1160 | int err; | 1172 | int err = -ENOMEM; |
1161 | 1173 | ||
1162 | vcpu_book3s = vmalloc(sizeof(struct kvmppc_vcpu_book3s)); | 1174 | vcpu_book3s = vmalloc(sizeof(struct kvmppc_vcpu_book3s)); |
1163 | if (!vcpu_book3s) { | 1175 | if (!vcpu_book3s) |
1164 | err = -ENOMEM; | ||
1165 | goto out; | 1176 | goto out; |
1166 | } | 1177 | |
1167 | memset(vcpu_book3s, 0, sizeof(struct kvmppc_vcpu_book3s)); | 1178 | memset(vcpu_book3s, 0, sizeof(struct kvmppc_vcpu_book3s)); |
1168 | 1179 | ||
1180 | vcpu_book3s->shadow_vcpu = (struct kvmppc_book3s_shadow_vcpu *) | ||
1181 | kzalloc(sizeof(*vcpu_book3s->shadow_vcpu), GFP_KERNEL); | ||
1182 | if (!vcpu_book3s->shadow_vcpu) | ||
1183 | goto free_vcpu; | ||
1184 | |||
1169 | vcpu = &vcpu_book3s->vcpu; | 1185 | vcpu = &vcpu_book3s->vcpu; |
1170 | err = kvm_vcpu_init(vcpu, kvm, id); | 1186 | err = kvm_vcpu_init(vcpu, kvm, id); |
1171 | if (err) | 1187 | if (err) |
1172 | goto free_vcpu; | 1188 | goto free_shadow_vcpu; |
1173 | 1189 | ||
1174 | vcpu->arch.host_retip = kvm_return_point; | 1190 | vcpu->arch.host_retip = kvm_return_point; |
1175 | vcpu->arch.host_msr = mfmsr(); | 1191 | vcpu->arch.host_msr = mfmsr(); |
@@ -1188,7 +1204,7 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) | |||
1188 | 1204 | ||
1189 | err = __init_new_context(); | 1205 | err = __init_new_context(); |
1190 | if (err < 0) | 1206 | if (err < 0) |
1191 | goto free_vcpu; | 1207 | goto free_shadow_vcpu; |
1192 | vcpu_book3s->context_id = err; | 1208 | vcpu_book3s->context_id = err; |
1193 | 1209 | ||
1194 | vcpu_book3s->vsid_max = ((vcpu_book3s->context_id + 1) << USER_ESID_BITS) - 1; | 1210 | vcpu_book3s->vsid_max = ((vcpu_book3s->context_id + 1) << USER_ESID_BITS) - 1; |
@@ -1197,6 +1213,8 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) | |||
1197 | 1213 | ||
1198 | return vcpu; | 1214 | return vcpu; |
1199 | 1215 | ||
1216 | free_shadow_vcpu: | ||
1217 | kfree(vcpu_book3s->shadow_vcpu); | ||
1200 | free_vcpu: | 1218 | free_vcpu: |
1201 | vfree(vcpu_book3s); | 1219 | vfree(vcpu_book3s); |
1202 | out: | 1220 | out: |
@@ -1209,6 +1227,7 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) | |||
1209 | 1227 | ||
1210 | __destroy_context(vcpu_book3s->context_id); | 1228 | __destroy_context(vcpu_book3s->context_id); |
1211 | kvm_vcpu_uninit(vcpu); | 1229 | kvm_vcpu_uninit(vcpu); |
1230 | kfree(vcpu_book3s->shadow_vcpu); | ||
1212 | vfree(vcpu_book3s); | 1231 | vfree(vcpu_book3s); |
1213 | } | 1232 | } |
1214 | 1233 | ||
diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c index 512dcff77554..12e4c975a376 100644 --- a/arch/powerpc/kvm/book3s_64_mmu.c +++ b/arch/powerpc/kvm/book3s_64_mmu.c | |||
@@ -383,7 +383,7 @@ static void kvmppc_mmu_book3s_64_slbia(struct kvm_vcpu *vcpu) | |||
383 | 383 | ||
384 | if (vcpu->arch.msr & MSR_IR) { | 384 | if (vcpu->arch.msr & MSR_IR) { |
385 | kvmppc_mmu_flush_segments(vcpu); | 385 | kvmppc_mmu_flush_segments(vcpu); |
386 | kvmppc_mmu_map_segment(vcpu, vcpu->arch.pc); | 386 | kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); |
387 | } | 387 | } |
388 | } | 388 | } |
389 | 389 | ||
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c index a01e9c5a3fc7..b0f5b4edaec2 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_host.c +++ b/arch/powerpc/kvm/book3s_64_mmu_host.c | |||
@@ -331,14 +331,14 @@ static int kvmppc_mmu_next_segment(struct kvm_vcpu *vcpu, ulong esid) | |||
331 | int found_inval = -1; | 331 | int found_inval = -1; |
332 | int r; | 332 | int r; |
333 | 333 | ||
334 | if (!get_paca()->kvm_slb_max) | 334 | if (!to_svcpu(vcpu)->slb_max) |
335 | get_paca()->kvm_slb_max = 1; | 335 | to_svcpu(vcpu)->slb_max = 1; |
336 | 336 | ||
337 | /* Are we overwriting? */ | 337 | /* Are we overwriting? */ |
338 | for (i = 1; i < get_paca()->kvm_slb_max; i++) { | 338 | for (i = 1; i < to_svcpu(vcpu)->slb_max; i++) { |
339 | if (!(get_paca()->kvm_slb[i].esid & SLB_ESID_V)) | 339 | if (!(to_svcpu(vcpu)->slb[i].esid & SLB_ESID_V)) |
340 | found_inval = i; | 340 | found_inval = i; |
341 | else if ((get_paca()->kvm_slb[i].esid & ESID_MASK) == esid) | 341 | else if ((to_svcpu(vcpu)->slb[i].esid & ESID_MASK) == esid) |
342 | return i; | 342 | return i; |
343 | } | 343 | } |
344 | 344 | ||
@@ -352,11 +352,11 @@ static int kvmppc_mmu_next_segment(struct kvm_vcpu *vcpu, ulong esid) | |||
352 | max_slb_size = mmu_slb_size; | 352 | max_slb_size = mmu_slb_size; |
353 | 353 | ||
354 | /* Overflowing -> purge */ | 354 | /* Overflowing -> purge */ |
355 | if ((get_paca()->kvm_slb_max) == max_slb_size) | 355 | if ((to_svcpu(vcpu)->slb_max) == max_slb_size) |
356 | kvmppc_mmu_flush_segments(vcpu); | 356 | kvmppc_mmu_flush_segments(vcpu); |
357 | 357 | ||
358 | r = get_paca()->kvm_slb_max; | 358 | r = to_svcpu(vcpu)->slb_max; |
359 | get_paca()->kvm_slb_max++; | 359 | to_svcpu(vcpu)->slb_max++; |
360 | 360 | ||
361 | return r; | 361 | return r; |
362 | } | 362 | } |
@@ -374,7 +374,7 @@ int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr) | |||
374 | 374 | ||
375 | if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) { | 375 | if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) { |
376 | /* Invalidate an entry */ | 376 | /* Invalidate an entry */ |
377 | get_paca()->kvm_slb[slb_index].esid = 0; | 377 | to_svcpu(vcpu)->slb[slb_index].esid = 0; |
378 | return -ENOENT; | 378 | return -ENOENT; |
379 | } | 379 | } |
380 | 380 | ||
@@ -388,8 +388,8 @@ int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr) | |||
388 | slb_vsid &= ~SLB_VSID_KP; | 388 | slb_vsid &= ~SLB_VSID_KP; |
389 | slb_esid |= slb_index; | 389 | slb_esid |= slb_index; |
390 | 390 | ||
391 | get_paca()->kvm_slb[slb_index].esid = slb_esid; | 391 | to_svcpu(vcpu)->slb[slb_index].esid = slb_esid; |
392 | get_paca()->kvm_slb[slb_index].vsid = slb_vsid; | 392 | to_svcpu(vcpu)->slb[slb_index].vsid = slb_vsid; |
393 | 393 | ||
394 | dprintk_slb("slbmte %#llx, %#llx\n", slb_vsid, slb_esid); | 394 | dprintk_slb("slbmte %#llx, %#llx\n", slb_vsid, slb_esid); |
395 | 395 | ||
@@ -398,8 +398,8 @@ int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr) | |||
398 | 398 | ||
399 | void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu) | 399 | void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu) |
400 | { | 400 | { |
401 | get_paca()->kvm_slb_max = 1; | 401 | to_svcpu(vcpu)->slb_max = 1; |
402 | get_paca()->kvm_slb[0].esid = 0; | 402 | to_svcpu(vcpu)->slb[0].esid = 0; |
403 | } | 403 | } |
404 | 404 | ||
405 | void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) | 405 | void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) |
diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c index 8f50776a9a1d..daa829b8f1f1 100644 --- a/arch/powerpc/kvm/book3s_emulate.c +++ b/arch/powerpc/kvm/book3s_emulate.c | |||
@@ -69,7 +69,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
69 | switch (get_xop(inst)) { | 69 | switch (get_xop(inst)) { |
70 | case OP_19_XOP_RFID: | 70 | case OP_19_XOP_RFID: |
71 | case OP_19_XOP_RFI: | 71 | case OP_19_XOP_RFI: |
72 | vcpu->arch.pc = vcpu->arch.srr0; | 72 | kvmppc_set_pc(vcpu, vcpu->arch.srr0); |
73 | kvmppc_set_msr(vcpu, vcpu->arch.srr1); | 73 | kvmppc_set_msr(vcpu, vcpu->arch.srr1); |
74 | *advance = 0; | 74 | *advance = 0; |
75 | break; | 75 | break; |
@@ -208,7 +208,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
208 | if ((r == -ENOENT) || (r == -EPERM)) { | 208 | if ((r == -ENOENT) || (r == -EPERM)) { |
209 | *advance = 0; | 209 | *advance = 0; |
210 | vcpu->arch.dear = vaddr; | 210 | vcpu->arch.dear = vaddr; |
211 | vcpu->arch.fault_dear = vaddr; | 211 | to_svcpu(vcpu)->fault_dar = vaddr; |
212 | 212 | ||
213 | dsisr = DSISR_ISSTORE; | 213 | dsisr = DSISR_ISSTORE; |
214 | if (r == -ENOENT) | 214 | if (r == -ENOENT) |
@@ -217,7 +217,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
217 | dsisr |= DSISR_PROTFAULT; | 217 | dsisr |= DSISR_PROTFAULT; |
218 | 218 | ||
219 | to_book3s(vcpu)->dsisr = dsisr; | 219 | to_book3s(vcpu)->dsisr = dsisr; |
220 | vcpu->arch.fault_dsisr = dsisr; | 220 | to_svcpu(vcpu)->fault_dsisr = dsisr; |
221 | 221 | ||
222 | kvmppc_book3s_queue_irqprio(vcpu, | 222 | kvmppc_book3s_queue_irqprio(vcpu, |
223 | BOOK3S_INTERRUPT_DATA_STORAGE); | 223 | BOOK3S_INTERRUPT_DATA_STORAGE); |
diff --git a/arch/powerpc/kvm/book3s_paired_singles.c b/arch/powerpc/kvm/book3s_paired_singles.c index 7a27bac8c44a..a9f66abafcb3 100644 --- a/arch/powerpc/kvm/book3s_paired_singles.c +++ b/arch/powerpc/kvm/book3s_paired_singles.c | |||
@@ -656,7 +656,7 @@ static int kvmppc_ps_one_in(struct kvm_vcpu *vcpu, bool rc, | |||
656 | 656 | ||
657 | int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu) | 657 | int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu) |
658 | { | 658 | { |
659 | u32 inst = vcpu->arch.last_inst; | 659 | u32 inst = kvmppc_get_last_inst(vcpu); |
660 | enum emulation_result emulated = EMULATE_DONE; | 660 | enum emulation_result emulated = EMULATE_DONE; |
661 | 661 | ||
662 | int ax_rd = inst_get_field(inst, 6, 10); | 662 | int ax_rd = inst_get_field(inst, 6, 10); |
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c index dbb5d6842a51..c6db28cdc594 100644 --- a/arch/powerpc/kvm/emulate.c +++ b/arch/powerpc/kvm/emulate.c | |||
@@ -132,7 +132,7 @@ void kvmppc_emulate_dec(struct kvm_vcpu *vcpu) | |||
132 | * from opcode tables in the future. */ | 132 | * from opcode tables in the future. */ |
133 | int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | 133 | int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) |
134 | { | 134 | { |
135 | u32 inst = vcpu->arch.last_inst; | 135 | u32 inst = kvmppc_get_last_inst(vcpu); |
136 | u32 ea; | 136 | u32 ea; |
137 | int ra; | 137 | int ra; |
138 | int rb; | 138 | int rb; |
@@ -516,10 +516,11 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
516 | } | 516 | } |
517 | } | 517 | } |
518 | 518 | ||
519 | trace_kvm_ppc_instr(inst, vcpu->arch.pc, emulated); | 519 | trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated); |
520 | 520 | ||
521 | /* Advance past emulated instruction. */ | ||
521 | if (advance) | 522 | if (advance) |
522 | vcpu->arch.pc += 4; /* Advance past emulated instruction. */ | 523 | kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4); |
523 | 524 | ||
524 | return emulated; | 525 | return emulated; |
525 | } | 526 | } |
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index ffbe4cac5b15..9b8683f39e05 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c | |||
@@ -70,7 +70,7 @@ int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
70 | case EMULATE_FAIL: | 70 | case EMULATE_FAIL: |
71 | /* XXX Deliver Program interrupt to guest. */ | 71 | /* XXX Deliver Program interrupt to guest. */ |
72 | printk(KERN_EMERG "%s: emulation failed (%08x)\n", __func__, | 72 | printk(KERN_EMERG "%s: emulation failed (%08x)\n", __func__, |
73 | vcpu->arch.last_inst); | 73 | kvmppc_get_last_inst(vcpu)); |
74 | r = RESUME_HOST; | 74 | r = RESUME_HOST; |
75 | break; | 75 | break; |
76 | default: | 76 | default: |