aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorAlexander Graf <agraf@suse.de>2011-12-09 08:44:13 -0500
committerAvi Kivity <avi@redhat.com>2012-03-05 07:52:30 -0500
commit468a12c2b53776721ff83517d4a195b85c5fce54 (patch)
treeba417210997c2e3119525641764303d97db32815 /arch
parentd33ad328c0025c45f4688a769aeebddc342222c1 (diff)
KVM: PPC: Use get/set for to_svcpu to help preemption
When running the 64-bit Book3s PR code without CONFIG_PREEMPT_NONE, we were doing a few things wrong, most notably access to PACA fields without making sure that the pointers stay stable accross the access (preempt_disable()). This patch moves to_svcpu towards a get/put model which allows us to disable preemption while accessing the shadow vcpu fields in the PACA. That way we can run preemptible and everyone's happy! Reported-by: Jörg Sommer <joerg@alea.gnuu.de> Signed-off-by: Alexander Graf <agraf@suse.de> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h76
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_32.h6
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64.h8
-rw-r--r--arch/powerpc/kvm/book3s_32_mmu_host.c21
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_host.c66
-rw-r--r--arch/powerpc/kvm/book3s_emulate.c8
-rw-r--r--arch/powerpc/kvm/book3s_pr.c62
-rw-r--r--arch/powerpc/kvm/trace.h5
8 files changed, 181 insertions, 71 deletions
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index 69c7377d2071..c941c21a1893 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -183,7 +183,9 @@ static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
183static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) 183static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
184{ 184{
185 if ( num < 14 ) { 185 if ( num < 14 ) {
186 to_svcpu(vcpu)->gpr[num] = val; 186 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
187 svcpu->gpr[num] = val;
188 svcpu_put(svcpu);
187 to_book3s(vcpu)->shadow_vcpu->gpr[num] = val; 189 to_book3s(vcpu)->shadow_vcpu->gpr[num] = val;
188 } else 190 } else
189 vcpu->arch.gpr[num] = val; 191 vcpu->arch.gpr[num] = val;
@@ -191,80 +193,120 @@ static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
191 193
192static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num) 194static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
193{ 195{
194 if ( num < 14 ) 196 if ( num < 14 ) {
195 return to_svcpu(vcpu)->gpr[num]; 197 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
196 else 198 ulong r = svcpu->gpr[num];
199 svcpu_put(svcpu);
200 return r;
201 } else
197 return vcpu->arch.gpr[num]; 202 return vcpu->arch.gpr[num];
198} 203}
199 204
200static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val) 205static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
201{ 206{
202 to_svcpu(vcpu)->cr = val; 207 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
208 svcpu->cr = val;
209 svcpu_put(svcpu);
203 to_book3s(vcpu)->shadow_vcpu->cr = val; 210 to_book3s(vcpu)->shadow_vcpu->cr = val;
204} 211}
205 212
206static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu) 213static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
207{ 214{
208 return to_svcpu(vcpu)->cr; 215 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
216 u32 r;
217 r = svcpu->cr;
218 svcpu_put(svcpu);
219 return r;
209} 220}
210 221
211static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val) 222static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val)
212{ 223{
213 to_svcpu(vcpu)->xer = val; 224 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
225 svcpu->xer = val;
214 to_book3s(vcpu)->shadow_vcpu->xer = val; 226 to_book3s(vcpu)->shadow_vcpu->xer = val;
227 svcpu_put(svcpu);
215} 228}
216 229
217static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu) 230static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
218{ 231{
219 return to_svcpu(vcpu)->xer; 232 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
233 u32 r;
234 r = svcpu->xer;
235 svcpu_put(svcpu);
236 return r;
220} 237}
221 238
222static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val) 239static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
223{ 240{
224 to_svcpu(vcpu)->ctr = val; 241 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
242 svcpu->ctr = val;
243 svcpu_put(svcpu);
225} 244}
226 245
227static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu) 246static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu)
228{ 247{
229 return to_svcpu(vcpu)->ctr; 248 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
249 ulong r;
250 r = svcpu->ctr;
251 svcpu_put(svcpu);
252 return r;
230} 253}
231 254
232static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val) 255static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val)
233{ 256{
234 to_svcpu(vcpu)->lr = val; 257 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
258 svcpu->lr = val;
259 svcpu_put(svcpu);
235} 260}
236 261
237static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu) 262static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu)
238{ 263{
239 return to_svcpu(vcpu)->lr; 264 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
265 ulong r;
266 r = svcpu->lr;
267 svcpu_put(svcpu);
268 return r;
240} 269}
241 270
242static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val) 271static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val)
243{ 272{
244 to_svcpu(vcpu)->pc = val; 273 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
274 svcpu->pc = val;
275 svcpu_put(svcpu);
245} 276}
246 277
247static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu) 278static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
248{ 279{
249 return to_svcpu(vcpu)->pc; 280 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
281 ulong r;
282 r = svcpu->pc;
283 svcpu_put(svcpu);
284 return r;
250} 285}
251 286
252static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu) 287static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
253{ 288{
254 ulong pc = kvmppc_get_pc(vcpu); 289 ulong pc = kvmppc_get_pc(vcpu);
255 struct kvmppc_book3s_shadow_vcpu *svcpu = to_svcpu(vcpu); 290 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
291 u32 r;
256 292
257 /* Load the instruction manually if it failed to do so in the 293 /* Load the instruction manually if it failed to do so in the
258 * exit path */ 294 * exit path */
259 if (svcpu->last_inst == KVM_INST_FETCH_FAILED) 295 if (svcpu->last_inst == KVM_INST_FETCH_FAILED)
260 kvmppc_ld(vcpu, &pc, sizeof(u32), &svcpu->last_inst, false); 296 kvmppc_ld(vcpu, &pc, sizeof(u32), &svcpu->last_inst, false);
261 297
262 return svcpu->last_inst; 298 r = svcpu->last_inst;
299 svcpu_put(svcpu);
300 return r;
263} 301}
264 302
265static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu) 303static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
266{ 304{
267 return to_svcpu(vcpu)->fault_dar; 305 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
306 ulong r;
307 r = svcpu->fault_dar;
308 svcpu_put(svcpu);
309 return r;
268} 310}
269 311
270static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu) 312static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
diff --git a/arch/powerpc/include/asm/kvm_book3s_32.h b/arch/powerpc/include/asm/kvm_book3s_32.h
index de604db135f5..38040ff82063 100644
--- a/arch/powerpc/include/asm/kvm_book3s_32.h
+++ b/arch/powerpc/include/asm/kvm_book3s_32.h
@@ -20,11 +20,15 @@
20#ifndef __ASM_KVM_BOOK3S_32_H__ 20#ifndef __ASM_KVM_BOOK3S_32_H__
21#define __ASM_KVM_BOOK3S_32_H__ 21#define __ASM_KVM_BOOK3S_32_H__
22 22
23static inline struct kvmppc_book3s_shadow_vcpu *to_svcpu(struct kvm_vcpu *vcpu) 23static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu)
24{ 24{
25 return to_book3s(vcpu)->shadow_vcpu; 25 return to_book3s(vcpu)->shadow_vcpu;
26} 26}
27 27
28static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu)
29{
30}
31
28#define PTE_SIZE 12 32#define PTE_SIZE 12
29#define VSID_ALL 0 33#define VSID_ALL 0
30#define SR_INVALID 0x00000001 /* VSID 1 should always be unused */ 34#define SR_INVALID 0x00000001 /* VSID 1 should always be unused */
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
index d0ac94f98f9e..2054e4726ba2 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -21,10 +21,16 @@
21#define __ASM_KVM_BOOK3S_64_H__ 21#define __ASM_KVM_BOOK3S_64_H__
22 22
23#ifdef CONFIG_KVM_BOOK3S_PR 23#ifdef CONFIG_KVM_BOOK3S_PR
24static inline struct kvmppc_book3s_shadow_vcpu *to_svcpu(struct kvm_vcpu *vcpu) 24static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu)
25{ 25{
26 preempt_disable();
26 return &get_paca()->shadow_vcpu; 27 return &get_paca()->shadow_vcpu;
27} 28}
29
30static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu)
31{
32 preempt_enable();
33}
28#endif 34#endif
29 35
30#define SPAPR_TCE_SHIFT 12 36#define SPAPR_TCE_SHIFT 12
diff --git a/arch/powerpc/kvm/book3s_32_mmu_host.c b/arch/powerpc/kvm/book3s_32_mmu_host.c
index 9fecbfbce773..f922c29bb234 100644
--- a/arch/powerpc/kvm/book3s_32_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_32_mmu_host.c
@@ -151,13 +151,15 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
151 bool primary = false; 151 bool primary = false;
152 bool evict = false; 152 bool evict = false;
153 struct hpte_cache *pte; 153 struct hpte_cache *pte;
154 int r = 0;
154 155
155 /* Get host physical address for gpa */ 156 /* Get host physical address for gpa */
156 hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT); 157 hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT);
157 if (is_error_pfn(hpaddr)) { 158 if (is_error_pfn(hpaddr)) {
158 printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", 159 printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n",
159 orig_pte->eaddr); 160 orig_pte->eaddr);
160 return -EINVAL; 161 r = -EINVAL;
162 goto out;
161 } 163 }
162 hpaddr <<= PAGE_SHIFT; 164 hpaddr <<= PAGE_SHIFT;
163 165
@@ -249,7 +251,8 @@ next_pteg:
249 251
250 kvmppc_mmu_hpte_cache_map(vcpu, pte); 252 kvmppc_mmu_hpte_cache_map(vcpu, pte);
251 253
252 return 0; 254out:
255 return r;
253} 256}
254 257
255static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid) 258static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
@@ -297,12 +300,14 @@ int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
297 u64 gvsid; 300 u64 gvsid;
298 u32 sr; 301 u32 sr;
299 struct kvmppc_sid_map *map; 302 struct kvmppc_sid_map *map;
300 struct kvmppc_book3s_shadow_vcpu *svcpu = to_svcpu(vcpu); 303 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
304 int r = 0;
301 305
302 if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) { 306 if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) {
303 /* Invalidate an entry */ 307 /* Invalidate an entry */
304 svcpu->sr[esid] = SR_INVALID; 308 svcpu->sr[esid] = SR_INVALID;
305 return -ENOENT; 309 r = -ENOENT;
310 goto out;
306 } 311 }
307 312
308 map = find_sid_vsid(vcpu, gvsid); 313 map = find_sid_vsid(vcpu, gvsid);
@@ -315,17 +320,21 @@ int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
315 320
316 dprintk_sr("MMU: mtsr %d, 0x%x\n", esid, sr); 321 dprintk_sr("MMU: mtsr %d, 0x%x\n", esid, sr);
317 322
318 return 0; 323out:
324 svcpu_put(svcpu);
325 return r;
319} 326}
320 327
321void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu) 328void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu)
322{ 329{
323 int i; 330 int i;
324 struct kvmppc_book3s_shadow_vcpu *svcpu = to_svcpu(vcpu); 331 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
325 332
326 dprintk_sr("MMU: flushing all segments (%d)\n", ARRAY_SIZE(svcpu->sr)); 333 dprintk_sr("MMU: flushing all segments (%d)\n", ARRAY_SIZE(svcpu->sr));
327 for (i = 0; i < ARRAY_SIZE(svcpu->sr); i++) 334 for (i = 0; i < ARRAY_SIZE(svcpu->sr); i++)
328 svcpu->sr[i] = SR_INVALID; 335 svcpu->sr[i] = SR_INVALID;
336
337 svcpu_put(svcpu);
329} 338}
330 339
331void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) 340void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c
index fa2f08434ba5..6f87f39a1ac2 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
@@ -88,12 +88,14 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
88 int vflags = 0; 88 int vflags = 0;
89 int attempt = 0; 89 int attempt = 0;
90 struct kvmppc_sid_map *map; 90 struct kvmppc_sid_map *map;
91 int r = 0;
91 92
92 /* Get host physical address for gpa */ 93 /* Get host physical address for gpa */
93 hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT); 94 hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT);
94 if (is_error_pfn(hpaddr)) { 95 if (is_error_pfn(hpaddr)) {
95 printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", orig_pte->eaddr); 96 printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", orig_pte->eaddr);
96 return -EINVAL; 97 r = -EINVAL;
98 goto out;
97 } 99 }
98 hpaddr <<= PAGE_SHIFT; 100 hpaddr <<= PAGE_SHIFT;
99 hpaddr |= orig_pte->raddr & (~0xfffULL & ~PAGE_MASK); 101 hpaddr |= orig_pte->raddr & (~0xfffULL & ~PAGE_MASK);
@@ -110,7 +112,8 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
110 printk(KERN_ERR "KVM: Segment map for 0x%llx (0x%lx) failed\n", 112 printk(KERN_ERR "KVM: Segment map for 0x%llx (0x%lx) failed\n",
111 vsid, orig_pte->eaddr); 113 vsid, orig_pte->eaddr);
112 WARN_ON(true); 114 WARN_ON(true);
113 return -EINVAL; 115 r = -EINVAL;
116 goto out;
114 } 117 }
115 118
116 vsid = map->host_vsid; 119 vsid = map->host_vsid;
@@ -131,8 +134,10 @@ map_again:
131 134
132 /* In case we tried normal mapping already, let's nuke old entries */ 135 /* In case we tried normal mapping already, let's nuke old entries */
133 if (attempt > 1) 136 if (attempt > 1)
134 if (ppc_md.hpte_remove(hpteg) < 0) 137 if (ppc_md.hpte_remove(hpteg) < 0) {
135 return -1; 138 r = -1;
139 goto out;
140 }
136 141
137 ret = ppc_md.hpte_insert(hpteg, va, hpaddr, rflags, vflags, MMU_PAGE_4K, MMU_SEGSIZE_256M); 142 ret = ppc_md.hpte_insert(hpteg, va, hpaddr, rflags, vflags, MMU_PAGE_4K, MMU_SEGSIZE_256M);
138 143
@@ -162,7 +167,8 @@ map_again:
162 kvmppc_mmu_hpte_cache_map(vcpu, pte); 167 kvmppc_mmu_hpte_cache_map(vcpu, pte);
163 } 168 }
164 169
165 return 0; 170out:
171 return r;
166} 172}
167 173
168static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid) 174static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
@@ -207,25 +213,30 @@ static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
207 213
208static int kvmppc_mmu_next_segment(struct kvm_vcpu *vcpu, ulong esid) 214static int kvmppc_mmu_next_segment(struct kvm_vcpu *vcpu, ulong esid)
209{ 215{
216 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
210 int i; 217 int i;
211 int max_slb_size = 64; 218 int max_slb_size = 64;
212 int found_inval = -1; 219 int found_inval = -1;
213 int r; 220 int r;
214 221
215 if (!to_svcpu(vcpu)->slb_max) 222 if (!svcpu->slb_max)
216 to_svcpu(vcpu)->slb_max = 1; 223 svcpu->slb_max = 1;
217 224
218 /* Are we overwriting? */ 225 /* Are we overwriting? */
219 for (i = 1; i < to_svcpu(vcpu)->slb_max; i++) { 226 for (i = 1; i < svcpu->slb_max; i++) {
220 if (!(to_svcpu(vcpu)->slb[i].esid & SLB_ESID_V)) 227 if (!(svcpu->slb[i].esid & SLB_ESID_V))
221 found_inval = i; 228 found_inval = i;
222 else if ((to_svcpu(vcpu)->slb[i].esid & ESID_MASK) == esid) 229 else if ((svcpu->slb[i].esid & ESID_MASK) == esid) {
223 return i; 230 r = i;
231 goto out;
232 }
224 } 233 }
225 234
226 /* Found a spare entry that was invalidated before */ 235 /* Found a spare entry that was invalidated before */
227 if (found_inval > 0) 236 if (found_inval > 0) {
228 return found_inval; 237 r = found_inval;
238 goto out;
239 }
229 240
230 /* No spare invalid entry, so create one */ 241 /* No spare invalid entry, so create one */
231 242
@@ -233,30 +244,35 @@ static int kvmppc_mmu_next_segment(struct kvm_vcpu *vcpu, ulong esid)
233 max_slb_size = mmu_slb_size; 244 max_slb_size = mmu_slb_size;
234 245
235 /* Overflowing -> purge */ 246 /* Overflowing -> purge */
236 if ((to_svcpu(vcpu)->slb_max) == max_slb_size) 247 if ((svcpu->slb_max) == max_slb_size)
237 kvmppc_mmu_flush_segments(vcpu); 248 kvmppc_mmu_flush_segments(vcpu);
238 249
239 r = to_svcpu(vcpu)->slb_max; 250 r = svcpu->slb_max;
240 to_svcpu(vcpu)->slb_max++; 251 svcpu->slb_max++;
241 252
253out:
254 svcpu_put(svcpu);
242 return r; 255 return r;
243} 256}
244 257
245int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr) 258int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
246{ 259{
260 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
247 u64 esid = eaddr >> SID_SHIFT; 261 u64 esid = eaddr >> SID_SHIFT;
248 u64 slb_esid = (eaddr & ESID_MASK) | SLB_ESID_V; 262 u64 slb_esid = (eaddr & ESID_MASK) | SLB_ESID_V;
249 u64 slb_vsid = SLB_VSID_USER; 263 u64 slb_vsid = SLB_VSID_USER;
250 u64 gvsid; 264 u64 gvsid;
251 int slb_index; 265 int slb_index;
252 struct kvmppc_sid_map *map; 266 struct kvmppc_sid_map *map;
267 int r = 0;
253 268
254 slb_index = kvmppc_mmu_next_segment(vcpu, eaddr & ESID_MASK); 269 slb_index = kvmppc_mmu_next_segment(vcpu, eaddr & ESID_MASK);
255 270
256 if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) { 271 if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) {
257 /* Invalidate an entry */ 272 /* Invalidate an entry */
258 to_svcpu(vcpu)->slb[slb_index].esid = 0; 273 svcpu->slb[slb_index].esid = 0;
259 return -ENOENT; 274 r = -ENOENT;
275 goto out;
260 } 276 }
261 277
262 map = find_sid_vsid(vcpu, gvsid); 278 map = find_sid_vsid(vcpu, gvsid);
@@ -269,18 +285,22 @@ int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
269 slb_vsid &= ~SLB_VSID_KP; 285 slb_vsid &= ~SLB_VSID_KP;
270 slb_esid |= slb_index; 286 slb_esid |= slb_index;
271 287
272 to_svcpu(vcpu)->slb[slb_index].esid = slb_esid; 288 svcpu->slb[slb_index].esid = slb_esid;
273 to_svcpu(vcpu)->slb[slb_index].vsid = slb_vsid; 289 svcpu->slb[slb_index].vsid = slb_vsid;
274 290
275 trace_kvm_book3s_slbmte(slb_vsid, slb_esid); 291 trace_kvm_book3s_slbmte(slb_vsid, slb_esid);
276 292
277 return 0; 293out:
294 svcpu_put(svcpu);
295 return r;
278} 296}
279 297
280void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu) 298void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu)
281{ 299{
282 to_svcpu(vcpu)->slb_max = 1; 300 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
283 to_svcpu(vcpu)->slb[0].esid = 0; 301 svcpu->slb_max = 1;
302 svcpu->slb[0].esid = 0;
303 svcpu_put(svcpu);
284} 304}
285 305
286void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) 306void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c
index 0c9dc62532d0..f1950d131827 100644
--- a/arch/powerpc/kvm/book3s_emulate.c
+++ b/arch/powerpc/kvm/book3s_emulate.c
@@ -230,9 +230,12 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
230 230
231 r = kvmppc_st(vcpu, &addr, 32, zeros, true); 231 r = kvmppc_st(vcpu, &addr, 32, zeros, true);
232 if ((r == -ENOENT) || (r == -EPERM)) { 232 if ((r == -ENOENT) || (r == -EPERM)) {
233 struct kvmppc_book3s_shadow_vcpu *svcpu;
234
235 svcpu = svcpu_get(vcpu);
233 *advance = 0; 236 *advance = 0;
234 vcpu->arch.shared->dar = vaddr; 237 vcpu->arch.shared->dar = vaddr;
235 to_svcpu(vcpu)->fault_dar = vaddr; 238 svcpu->fault_dar = vaddr;
236 239
237 dsisr = DSISR_ISSTORE; 240 dsisr = DSISR_ISSTORE;
238 if (r == -ENOENT) 241 if (r == -ENOENT)
@@ -241,7 +244,8 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
241 dsisr |= DSISR_PROTFAULT; 244 dsisr |= DSISR_PROTFAULT;
242 245
243 vcpu->arch.shared->dsisr = dsisr; 246 vcpu->arch.shared->dsisr = dsisr;
244 to_svcpu(vcpu)->fault_dsisr = dsisr; 247 svcpu->fault_dsisr = dsisr;
248 svcpu_put(svcpu);
245 249
246 kvmppc_book3s_queue_irqprio(vcpu, 250 kvmppc_book3s_queue_irqprio(vcpu,
247 BOOK3S_INTERRUPT_DATA_STORAGE); 251 BOOK3S_INTERRUPT_DATA_STORAGE);
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index 857ecde0cfdf..0c31507be908 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -56,10 +56,12 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
56void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 56void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
57{ 57{
58#ifdef CONFIG_PPC_BOOK3S_64 58#ifdef CONFIG_PPC_BOOK3S_64
59 memcpy(to_svcpu(vcpu)->slb, to_book3s(vcpu)->slb_shadow, sizeof(to_svcpu(vcpu)->slb)); 59 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
60 memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb));
60 memcpy(&get_paca()->shadow_vcpu, to_book3s(vcpu)->shadow_vcpu, 61 memcpy(&get_paca()->shadow_vcpu, to_book3s(vcpu)->shadow_vcpu,
61 sizeof(get_paca()->shadow_vcpu)); 62 sizeof(get_paca()->shadow_vcpu));
62 to_svcpu(vcpu)->slb_max = to_book3s(vcpu)->slb_shadow_max; 63 svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max;
64 svcpu_put(svcpu);
63#endif 65#endif
64 66
65#ifdef CONFIG_PPC_BOOK3S_32 67#ifdef CONFIG_PPC_BOOK3S_32
@@ -70,10 +72,12 @@ void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
70void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) 72void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
71{ 73{
72#ifdef CONFIG_PPC_BOOK3S_64 74#ifdef CONFIG_PPC_BOOK3S_64
73 memcpy(to_book3s(vcpu)->slb_shadow, to_svcpu(vcpu)->slb, sizeof(to_svcpu(vcpu)->slb)); 75 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
76 memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb));
74 memcpy(to_book3s(vcpu)->shadow_vcpu, &get_paca()->shadow_vcpu, 77 memcpy(to_book3s(vcpu)->shadow_vcpu, &get_paca()->shadow_vcpu,
75 sizeof(get_paca()->shadow_vcpu)); 78 sizeof(get_paca()->shadow_vcpu));
76 to_book3s(vcpu)->slb_shadow_max = to_svcpu(vcpu)->slb_max; 79 to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max;
80 svcpu_put(svcpu);
77#endif 81#endif
78 82
79 kvmppc_giveup_ext(vcpu, MSR_FP); 83 kvmppc_giveup_ext(vcpu, MSR_FP);
@@ -308,19 +312,22 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
308 312
309 if (page_found == -ENOENT) { 313 if (page_found == -ENOENT) {
310 /* Page not found in guest PTE entries */ 314 /* Page not found in guest PTE entries */
315 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
311 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu); 316 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
312 vcpu->arch.shared->dsisr = to_svcpu(vcpu)->fault_dsisr; 317 vcpu->arch.shared->dsisr = svcpu->fault_dsisr;
313 vcpu->arch.shared->msr |= 318 vcpu->arch.shared->msr |=
314 (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL); 319 (svcpu->shadow_srr1 & 0x00000000f8000000ULL);
320 svcpu_put(svcpu);
315 kvmppc_book3s_queue_irqprio(vcpu, vec); 321 kvmppc_book3s_queue_irqprio(vcpu, vec);
316 } else if (page_found == -EPERM) { 322 } else if (page_found == -EPERM) {
317 /* Storage protection */ 323 /* Storage protection */
324 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
318 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu); 325 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
319 vcpu->arch.shared->dsisr = 326 vcpu->arch.shared->dsisr = svcpu->fault_dsisr & ~DSISR_NOHPTE;
320 to_svcpu(vcpu)->fault_dsisr & ~DSISR_NOHPTE;
321 vcpu->arch.shared->dsisr |= DSISR_PROTFAULT; 327 vcpu->arch.shared->dsisr |= DSISR_PROTFAULT;
322 vcpu->arch.shared->msr |= 328 vcpu->arch.shared->msr |=
323 (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL); 329 svcpu->shadow_srr1 & 0x00000000f8000000ULL;
330 svcpu_put(svcpu);
324 kvmppc_book3s_queue_irqprio(vcpu, vec); 331 kvmppc_book3s_queue_irqprio(vcpu, vec);
325 } else if (page_found == -EINVAL) { 332 } else if (page_found == -EINVAL) {
326 /* Page not found in guest SLB */ 333 /* Page not found in guest SLB */
@@ -521,21 +528,25 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
521 kvm_resched(vcpu); 528 kvm_resched(vcpu);
522 switch (exit_nr) { 529 switch (exit_nr) {
523 case BOOK3S_INTERRUPT_INST_STORAGE: 530 case BOOK3S_INTERRUPT_INST_STORAGE:
531 {
532 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
533 ulong shadow_srr1 = svcpu->shadow_srr1;
524 vcpu->stat.pf_instruc++; 534 vcpu->stat.pf_instruc++;
525 535
526#ifdef CONFIG_PPC_BOOK3S_32 536#ifdef CONFIG_PPC_BOOK3S_32
527 /* We set segments as unused segments when invalidating them. So 537 /* We set segments as unused segments when invalidating them. So
528 * treat the respective fault as segment fault. */ 538 * treat the respective fault as segment fault. */
529 if (to_svcpu(vcpu)->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT] 539 if (svcpu->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT] == SR_INVALID) {
530 == SR_INVALID) {
531 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); 540 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
532 r = RESUME_GUEST; 541 r = RESUME_GUEST;
542 svcpu_put(svcpu);
533 break; 543 break;
534 } 544 }
535#endif 545#endif
546 svcpu_put(svcpu);
536 547
537 /* only care about PTEG not found errors, but leave NX alone */ 548 /* only care about PTEG not found errors, but leave NX alone */
538 if (to_svcpu(vcpu)->shadow_srr1 & 0x40000000) { 549 if (shadow_srr1 & 0x40000000) {
539 r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr); 550 r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr);
540 vcpu->stat.sp_instruc++; 551 vcpu->stat.sp_instruc++;
541 } else if (vcpu->arch.mmu.is_dcbz32(vcpu) && 552 } else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
@@ -548,33 +559,37 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
548 kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL); 559 kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL);
549 r = RESUME_GUEST; 560 r = RESUME_GUEST;
550 } else { 561 } else {
551 vcpu->arch.shared->msr |= 562 vcpu->arch.shared->msr |= shadow_srr1 & 0x58000000;
552 to_svcpu(vcpu)->shadow_srr1 & 0x58000000;
553 kvmppc_book3s_queue_irqprio(vcpu, exit_nr); 563 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
554 r = RESUME_GUEST; 564 r = RESUME_GUEST;
555 } 565 }
556 break; 566 break;
567 }
557 case BOOK3S_INTERRUPT_DATA_STORAGE: 568 case BOOK3S_INTERRUPT_DATA_STORAGE:
558 { 569 {
559 ulong dar = kvmppc_get_fault_dar(vcpu); 570 ulong dar = kvmppc_get_fault_dar(vcpu);
571 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
572 u32 fault_dsisr = svcpu->fault_dsisr;
560 vcpu->stat.pf_storage++; 573 vcpu->stat.pf_storage++;
561 574
562#ifdef CONFIG_PPC_BOOK3S_32 575#ifdef CONFIG_PPC_BOOK3S_32
563 /* We set segments as unused segments when invalidating them. So 576 /* We set segments as unused segments when invalidating them. So
564 * treat the respective fault as segment fault. */ 577 * treat the respective fault as segment fault. */
565 if ((to_svcpu(vcpu)->sr[dar >> SID_SHIFT]) == SR_INVALID) { 578 if ((svcpu->sr[dar >> SID_SHIFT]) == SR_INVALID) {
566 kvmppc_mmu_map_segment(vcpu, dar); 579 kvmppc_mmu_map_segment(vcpu, dar);
567 r = RESUME_GUEST; 580 r = RESUME_GUEST;
581 svcpu_put(svcpu);
568 break; 582 break;
569 } 583 }
570#endif 584#endif
585 svcpu_put(svcpu);
571 586
572 /* The only case we need to handle is missing shadow PTEs */ 587 /* The only case we need to handle is missing shadow PTEs */
573 if (to_svcpu(vcpu)->fault_dsisr & DSISR_NOHPTE) { 588 if (fault_dsisr & DSISR_NOHPTE) {
574 r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr); 589 r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr);
575 } else { 590 } else {
576 vcpu->arch.shared->dar = dar; 591 vcpu->arch.shared->dar = dar;
577 vcpu->arch.shared->dsisr = to_svcpu(vcpu)->fault_dsisr; 592 vcpu->arch.shared->dsisr = fault_dsisr;
578 kvmppc_book3s_queue_irqprio(vcpu, exit_nr); 593 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
579 r = RESUME_GUEST; 594 r = RESUME_GUEST;
580 } 595 }
@@ -610,10 +625,13 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
610 case BOOK3S_INTERRUPT_PROGRAM: 625 case BOOK3S_INTERRUPT_PROGRAM:
611 { 626 {
612 enum emulation_result er; 627 enum emulation_result er;
628 struct kvmppc_book3s_shadow_vcpu *svcpu;
613 ulong flags; 629 ulong flags;
614 630
615program_interrupt: 631program_interrupt:
616 flags = to_svcpu(vcpu)->shadow_srr1 & 0x1f0000ull; 632 svcpu = svcpu_get(vcpu);
633 flags = svcpu->shadow_srr1 & 0x1f0000ull;
634 svcpu_put(svcpu);
617 635
618 if (vcpu->arch.shared->msr & MSR_PR) { 636 if (vcpu->arch.shared->msr & MSR_PR) {
619#ifdef EXIT_DEBUG 637#ifdef EXIT_DEBUG
@@ -741,14 +759,18 @@ program_interrupt:
741 r = RESUME_GUEST; 759 r = RESUME_GUEST;
742 break; 760 break;
743 default: 761 default:
762 {
763 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
764 ulong shadow_srr1 = svcpu->shadow_srr1;
765 svcpu_put(svcpu);
744 /* Ugh - bork here! What did we get? */ 766 /* Ugh - bork here! What did we get? */
745 printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n", 767 printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n",
746 exit_nr, kvmppc_get_pc(vcpu), to_svcpu(vcpu)->shadow_srr1); 768 exit_nr, kvmppc_get_pc(vcpu), shadow_srr1);
747 r = RESUME_HOST; 769 r = RESUME_HOST;
748 BUG(); 770 BUG();
749 break; 771 break;
750 } 772 }
751 773 }
752 774
753 if (!(r & RESUME_HOST)) { 775 if (!(r & RESUME_HOST)) {
754 /* To avoid clobbering exit_reason, only check for signals if 776 /* To avoid clobbering exit_reason, only check for signals if
diff --git a/arch/powerpc/kvm/trace.h b/arch/powerpc/kvm/trace.h
index b135d3d397db..609d8bfb54e3 100644
--- a/arch/powerpc/kvm/trace.h
+++ b/arch/powerpc/kvm/trace.h
@@ -118,11 +118,14 @@ TRACE_EVENT(kvm_book3s_exit,
118 ), 118 ),
119 119
120 TP_fast_assign( 120 TP_fast_assign(
121 struct kvmppc_book3s_shadow_vcpu *svcpu;
121 __entry->exit_nr = exit_nr; 122 __entry->exit_nr = exit_nr;
122 __entry->pc = kvmppc_get_pc(vcpu); 123 __entry->pc = kvmppc_get_pc(vcpu);
123 __entry->dar = kvmppc_get_fault_dar(vcpu); 124 __entry->dar = kvmppc_get_fault_dar(vcpu);
124 __entry->msr = vcpu->arch.shared->msr; 125 __entry->msr = vcpu->arch.shared->msr;
125 __entry->srr1 = to_svcpu(vcpu)->shadow_srr1; 126 svcpu = svcpu_get(vcpu);
127 __entry->srr1 = svcpu->shadow_srr1;
128 svcpu_put(svcpu);
126 ), 129 ),
127 130
128 TP_printk("exit=0x%x | pc=0x%lx | msr=0x%lx | dar=0x%lx | srr1=0x%lx", 131 TP_printk("exit=0x%x | pc=0x%lx | msr=0x%lx | dar=0x%lx | srr1=0x%lx",