aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm
diff options
context:
space:
mode:
authorAndreas Schwab <schwab@linux-m68k.org>2010-05-31 15:59:13 -0400
committerAvi Kivity <avi@redhat.com>2010-08-01 03:39:24 -0400
commit49f6be8ea1bd74713c1a48e42db06a3808dfa2cd (patch)
tree5be35ebab7a049e9358a60b308df981b02f70f21 /arch/powerpc/kvm
parent5120702e732ed72c7055f511f8dd01de36424569 (diff)
KVM: PPC: elide struct thread_struct instances from stack
Instead of instantiating a whole thread_struct on the stack use only the required parts of it. Signed-off-by: Andreas Schwab <schwab@linux-m68k.org> Tested-by: Alexander Graf <agraf@suse.de> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/powerpc/kvm')
-rw-r--r--arch/powerpc/kvm/book3s.c49
-rw-r--r--arch/powerpc/kvm/book3s_paired_singles.c94
-rw-r--r--arch/powerpc/kvm/fpu.S18
3 files changed, 82 insertions, 79 deletions
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index f6eac2f337d..801d9f3c70a 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -1293,12 +1293,17 @@ extern int __kvmppc_vcpu_entry(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
1293int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) 1293int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
1294{ 1294{
1295 int ret; 1295 int ret;
1296 struct thread_struct ext_bkp; 1296 double fpr[32][TS_FPRWIDTH];
1297 unsigned int fpscr;
1298 int fpexc_mode;
1297#ifdef CONFIG_ALTIVEC 1299#ifdef CONFIG_ALTIVEC
1298 bool save_vec = current->thread.used_vr; 1300 vector128 vr[32];
1301 vector128 vscr;
1302 unsigned long uninitialized_var(vrsave);
1303 int used_vr;
1299#endif 1304#endif
1300#ifdef CONFIG_VSX 1305#ifdef CONFIG_VSX
1301 bool save_vsx = current->thread.used_vsr; 1306 int used_vsr;
1302#endif 1307#endif
1303 ulong ext_msr; 1308 ulong ext_msr;
1304 1309
@@ -1311,27 +1316,27 @@ int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
1311 /* Save FPU state in stack */ 1316 /* Save FPU state in stack */
1312 if (current->thread.regs->msr & MSR_FP) 1317 if (current->thread.regs->msr & MSR_FP)
1313 giveup_fpu(current); 1318 giveup_fpu(current);
1314 memcpy(ext_bkp.fpr, current->thread.fpr, sizeof(current->thread.fpr)); 1319 memcpy(fpr, current->thread.fpr, sizeof(current->thread.fpr));
1315 ext_bkp.fpscr = current->thread.fpscr; 1320 fpscr = current->thread.fpscr.val;
1316 ext_bkp.fpexc_mode = current->thread.fpexc_mode; 1321 fpexc_mode = current->thread.fpexc_mode;
1317 1322
1318#ifdef CONFIG_ALTIVEC 1323#ifdef CONFIG_ALTIVEC
1319 /* Save Altivec state in stack */ 1324 /* Save Altivec state in stack */
1320 if (save_vec) { 1325 used_vr = current->thread.used_vr;
1326 if (used_vr) {
1321 if (current->thread.regs->msr & MSR_VEC) 1327 if (current->thread.regs->msr & MSR_VEC)
1322 giveup_altivec(current); 1328 giveup_altivec(current);
1323 memcpy(ext_bkp.vr, current->thread.vr, sizeof(ext_bkp.vr)); 1329 memcpy(vr, current->thread.vr, sizeof(current->thread.vr));
1324 ext_bkp.vscr = current->thread.vscr; 1330 vscr = current->thread.vscr;
1325 ext_bkp.vrsave = current->thread.vrsave; 1331 vrsave = current->thread.vrsave;
1326 } 1332 }
1327 ext_bkp.used_vr = current->thread.used_vr;
1328#endif 1333#endif
1329 1334
1330#ifdef CONFIG_VSX 1335#ifdef CONFIG_VSX
1331 /* Save VSX state in stack */ 1336 /* Save VSX state in stack */
1332 if (save_vsx && (current->thread.regs->msr & MSR_VSX)) 1337 used_vsr = current->thread.used_vsr;
1338 if (used_vsr && (current->thread.regs->msr & MSR_VSX))
1333 __giveup_vsx(current); 1339 __giveup_vsx(current);
1334 ext_bkp.used_vsr = current->thread.used_vsr;
1335#endif 1340#endif
1336 1341
1337 /* Remember the MSR with disabled extensions */ 1342 /* Remember the MSR with disabled extensions */
@@ -1356,22 +1361,22 @@ int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
1356 kvmppc_giveup_ext(vcpu, MSR_VSX); 1361 kvmppc_giveup_ext(vcpu, MSR_VSX);
1357 1362
1358 /* Restore FPU state from stack */ 1363 /* Restore FPU state from stack */
1359 memcpy(current->thread.fpr, ext_bkp.fpr, sizeof(ext_bkp.fpr)); 1364 memcpy(current->thread.fpr, fpr, sizeof(current->thread.fpr));
1360 current->thread.fpscr = ext_bkp.fpscr; 1365 current->thread.fpscr.val = fpscr;
1361 current->thread.fpexc_mode = ext_bkp.fpexc_mode; 1366 current->thread.fpexc_mode = fpexc_mode;
1362 1367
1363#ifdef CONFIG_ALTIVEC 1368#ifdef CONFIG_ALTIVEC
1364 /* Restore Altivec state from stack */ 1369 /* Restore Altivec state from stack */
1365 if (save_vec && current->thread.used_vr) { 1370 if (used_vr && current->thread.used_vr) {
1366 memcpy(current->thread.vr, ext_bkp.vr, sizeof(ext_bkp.vr)); 1371 memcpy(current->thread.vr, vr, sizeof(current->thread.vr));
1367 current->thread.vscr = ext_bkp.vscr; 1372 current->thread.vscr = vscr;
1368 current->thread.vrsave= ext_bkp.vrsave; 1373 current->thread.vrsave = vrsave;
1369 } 1374 }
1370 current->thread.used_vr = ext_bkp.used_vr; 1375 current->thread.used_vr = used_vr;
1371#endif 1376#endif
1372 1377
1373#ifdef CONFIG_VSX 1378#ifdef CONFIG_VSX
1374 current->thread.used_vsr = ext_bkp.used_vsr; 1379 current->thread.used_vsr = used_vsr;
1375#endif 1380#endif
1376 1381
1377 return ret; 1382 return ret;
diff --git a/arch/powerpc/kvm/book3s_paired_singles.c b/arch/powerpc/kvm/book3s_paired_singles.c
index a9f66abafcb..474f2e24050 100644
--- a/arch/powerpc/kvm/book3s_paired_singles.c
+++ b/arch/powerpc/kvm/book3s_paired_singles.c
@@ -159,10 +159,7 @@
159 159
160static inline void kvmppc_sync_qpr(struct kvm_vcpu *vcpu, int rt) 160static inline void kvmppc_sync_qpr(struct kvm_vcpu *vcpu, int rt)
161{ 161{
162 struct thread_struct t; 162 kvm_cvt_df(&vcpu->arch.fpr[rt], &vcpu->arch.qpr[rt], &vcpu->arch.fpscr);
163
164 t.fpscr.val = vcpu->arch.fpscr;
165 cvt_df((double*)&vcpu->arch.fpr[rt], (float*)&vcpu->arch.qpr[rt], &t);
166} 163}
167 164
168static void kvmppc_inject_pf(struct kvm_vcpu *vcpu, ulong eaddr, bool is_store) 165static void kvmppc_inject_pf(struct kvm_vcpu *vcpu, ulong eaddr, bool is_store)
@@ -183,7 +180,6 @@ static int kvmppc_emulate_fpr_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
183 int rs, ulong addr, int ls_type) 180 int rs, ulong addr, int ls_type)
184{ 181{
185 int emulated = EMULATE_FAIL; 182 int emulated = EMULATE_FAIL;
186 struct thread_struct t;
187 int r; 183 int r;
188 char tmp[8]; 184 char tmp[8];
189 int len = sizeof(u32); 185 int len = sizeof(u32);
@@ -191,8 +187,6 @@ static int kvmppc_emulate_fpr_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
191 if (ls_type == FPU_LS_DOUBLE) 187 if (ls_type == FPU_LS_DOUBLE)
192 len = sizeof(u64); 188 len = sizeof(u64);
193 189
194 t.fpscr.val = vcpu->arch.fpscr;
195
196 /* read from memory */ 190 /* read from memory */
197 r = kvmppc_ld(vcpu, &addr, len, tmp, true); 191 r = kvmppc_ld(vcpu, &addr, len, tmp, true);
198 vcpu->arch.paddr_accessed = addr; 192 vcpu->arch.paddr_accessed = addr;
@@ -210,7 +204,7 @@ static int kvmppc_emulate_fpr_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
210 /* put in registers */ 204 /* put in registers */
211 switch (ls_type) { 205 switch (ls_type) {
212 case FPU_LS_SINGLE: 206 case FPU_LS_SINGLE:
213 cvt_fd((float*)tmp, (double*)&vcpu->arch.fpr[rs], &t); 207 kvm_cvt_fd((u32*)tmp, &vcpu->arch.fpr[rs], &vcpu->arch.fpscr);
214 vcpu->arch.qpr[rs] = *((u32*)tmp); 208 vcpu->arch.qpr[rs] = *((u32*)tmp);
215 break; 209 break;
216 case FPU_LS_DOUBLE: 210 case FPU_LS_DOUBLE:
@@ -229,17 +223,14 @@ static int kvmppc_emulate_fpr_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
229 int rs, ulong addr, int ls_type) 223 int rs, ulong addr, int ls_type)
230{ 224{
231 int emulated = EMULATE_FAIL; 225 int emulated = EMULATE_FAIL;
232 struct thread_struct t;
233 int r; 226 int r;
234 char tmp[8]; 227 char tmp[8];
235 u64 val; 228 u64 val;
236 int len; 229 int len;
237 230
238 t.fpscr.val = vcpu->arch.fpscr;
239
240 switch (ls_type) { 231 switch (ls_type) {
241 case FPU_LS_SINGLE: 232 case FPU_LS_SINGLE:
242 cvt_df((double*)&vcpu->arch.fpr[rs], (float*)tmp, &t); 233 kvm_cvt_df(&vcpu->arch.fpr[rs], (u32*)tmp, &vcpu->arch.fpscr);
243 val = *((u32*)tmp); 234 val = *((u32*)tmp);
244 len = sizeof(u32); 235 len = sizeof(u32);
245 break; 236 break;
@@ -278,13 +269,10 @@ static int kvmppc_emulate_psq_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
278 int rs, ulong addr, bool w, int i) 269 int rs, ulong addr, bool w, int i)
279{ 270{
280 int emulated = EMULATE_FAIL; 271 int emulated = EMULATE_FAIL;
281 struct thread_struct t;
282 int r; 272 int r;
283 float one = 1.0; 273 float one = 1.0;
284 u32 tmp[2]; 274 u32 tmp[2];
285 275
286 t.fpscr.val = vcpu->arch.fpscr;
287
288 /* read from memory */ 276 /* read from memory */
289 if (w) { 277 if (w) {
290 r = kvmppc_ld(vcpu, &addr, sizeof(u32), tmp, true); 278 r = kvmppc_ld(vcpu, &addr, sizeof(u32), tmp, true);
@@ -308,7 +296,7 @@ static int kvmppc_emulate_psq_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
308 emulated = EMULATE_DONE; 296 emulated = EMULATE_DONE;
309 297
310 /* put in registers */ 298 /* put in registers */
311 cvt_fd((float*)&tmp[0], (double*)&vcpu->arch.fpr[rs], &t); 299 kvm_cvt_fd(&tmp[0], &vcpu->arch.fpr[rs], &vcpu->arch.fpscr);
312 vcpu->arch.qpr[rs] = tmp[1]; 300 vcpu->arch.qpr[rs] = tmp[1];
313 301
314 dprintk(KERN_INFO "KVM: PSQ_LD [0x%x, 0x%x] at 0x%lx (%d)\n", tmp[0], 302 dprintk(KERN_INFO "KVM: PSQ_LD [0x%x, 0x%x] at 0x%lx (%d)\n", tmp[0],
@@ -322,14 +310,11 @@ static int kvmppc_emulate_psq_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
322 int rs, ulong addr, bool w, int i) 310 int rs, ulong addr, bool w, int i)
323{ 311{
324 int emulated = EMULATE_FAIL; 312 int emulated = EMULATE_FAIL;
325 struct thread_struct t;
326 int r; 313 int r;
327 u32 tmp[2]; 314 u32 tmp[2];
328 int len = w ? sizeof(u32) : sizeof(u64); 315 int len = w ? sizeof(u32) : sizeof(u64);
329 316
330 t.fpscr.val = vcpu->arch.fpscr; 317 kvm_cvt_df(&vcpu->arch.fpr[rs], &tmp[0], &vcpu->arch.fpscr);
331
332 cvt_df((double*)&vcpu->arch.fpr[rs], (float*)&tmp[0], &t);
333 tmp[1] = vcpu->arch.qpr[rs]; 318 tmp[1] = vcpu->arch.qpr[rs];
334 319
335 r = kvmppc_st(vcpu, &addr, len, tmp, true); 320 r = kvmppc_st(vcpu, &addr, len, tmp, true);
@@ -517,7 +502,7 @@ static int get_d_signext(u32 inst)
517static int kvmppc_ps_three_in(struct kvm_vcpu *vcpu, bool rc, 502static int kvmppc_ps_three_in(struct kvm_vcpu *vcpu, bool rc,
518 int reg_out, int reg_in1, int reg_in2, 503 int reg_out, int reg_in1, int reg_in2,
519 int reg_in3, int scalar, 504 int reg_in3, int scalar,
520 void (*func)(struct thread_struct *t, 505 void (*func)(u64 *fpscr,
521 u32 *dst, u32 *src1, 506 u32 *dst, u32 *src1,
522 u32 *src2, u32 *src3)) 507 u32 *src2, u32 *src3))
523{ 508{
@@ -526,27 +511,25 @@ static int kvmppc_ps_three_in(struct kvm_vcpu *vcpu, bool rc,
526 u32 ps0_out; 511 u32 ps0_out;
527 u32 ps0_in1, ps0_in2, ps0_in3; 512 u32 ps0_in1, ps0_in2, ps0_in3;
528 u32 ps1_in1, ps1_in2, ps1_in3; 513 u32 ps1_in1, ps1_in2, ps1_in3;
529 struct thread_struct t;
530 t.fpscr.val = vcpu->arch.fpscr;
531 514
532 /* RC */ 515 /* RC */
533 WARN_ON(rc); 516 WARN_ON(rc);
534 517
535 /* PS0 */ 518 /* PS0 */
536 cvt_df((double*)&fpr[reg_in1], (float*)&ps0_in1, &t); 519 kvm_cvt_df(&fpr[reg_in1], &ps0_in1, &vcpu->arch.fpscr);
537 cvt_df((double*)&fpr[reg_in2], (float*)&ps0_in2, &t); 520 kvm_cvt_df(&fpr[reg_in2], &ps0_in2, &vcpu->arch.fpscr);
538 cvt_df((double*)&fpr[reg_in3], (float*)&ps0_in3, &t); 521 kvm_cvt_df(&fpr[reg_in3], &ps0_in3, &vcpu->arch.fpscr);
539 522
540 if (scalar & SCALAR_LOW) 523 if (scalar & SCALAR_LOW)
541 ps0_in2 = qpr[reg_in2]; 524 ps0_in2 = qpr[reg_in2];
542 525
543 func(&t, &ps0_out, &ps0_in1, &ps0_in2, &ps0_in3); 526 func(&vcpu->arch.fpscr, &ps0_out, &ps0_in1, &ps0_in2, &ps0_in3);
544 527
545 dprintk(KERN_INFO "PS3 ps0 -> f(0x%x, 0x%x, 0x%x) = 0x%x\n", 528 dprintk(KERN_INFO "PS3 ps0 -> f(0x%x, 0x%x, 0x%x) = 0x%x\n",
546 ps0_in1, ps0_in2, ps0_in3, ps0_out); 529 ps0_in1, ps0_in2, ps0_in3, ps0_out);
547 530
548 if (!(scalar & SCALAR_NO_PS0)) 531 if (!(scalar & SCALAR_NO_PS0))
549 cvt_fd((float*)&ps0_out, (double*)&fpr[reg_out], &t); 532 kvm_cvt_fd(&ps0_out, &fpr[reg_out], &vcpu->arch.fpscr);
550 533
551 /* PS1 */ 534 /* PS1 */
552 ps1_in1 = qpr[reg_in1]; 535 ps1_in1 = qpr[reg_in1];
@@ -557,7 +540,7 @@ static int kvmppc_ps_three_in(struct kvm_vcpu *vcpu, bool rc,
557 ps1_in2 = ps0_in2; 540 ps1_in2 = ps0_in2;
558 541
559 if (!(scalar & SCALAR_NO_PS1)) 542 if (!(scalar & SCALAR_NO_PS1))
560 func(&t, &qpr[reg_out], &ps1_in1, &ps1_in2, &ps1_in3); 543 func(&vcpu->arch.fpscr, &qpr[reg_out], &ps1_in1, &ps1_in2, &ps1_in3);
561 544
562 dprintk(KERN_INFO "PS3 ps1 -> f(0x%x, 0x%x, 0x%x) = 0x%x\n", 545 dprintk(KERN_INFO "PS3 ps1 -> f(0x%x, 0x%x, 0x%x) = 0x%x\n",
563 ps1_in1, ps1_in2, ps1_in3, qpr[reg_out]); 546 ps1_in1, ps1_in2, ps1_in3, qpr[reg_out]);
@@ -568,7 +551,7 @@ static int kvmppc_ps_three_in(struct kvm_vcpu *vcpu, bool rc,
568static int kvmppc_ps_two_in(struct kvm_vcpu *vcpu, bool rc, 551static int kvmppc_ps_two_in(struct kvm_vcpu *vcpu, bool rc,
569 int reg_out, int reg_in1, int reg_in2, 552 int reg_out, int reg_in1, int reg_in2,
570 int scalar, 553 int scalar,
571 void (*func)(struct thread_struct *t, 554 void (*func)(u64 *fpscr,
572 u32 *dst, u32 *src1, 555 u32 *dst, u32 *src1,
573 u32 *src2)) 556 u32 *src2))
574{ 557{
@@ -578,27 +561,25 @@ static int kvmppc_ps_two_in(struct kvm_vcpu *vcpu, bool rc,
578 u32 ps0_in1, ps0_in2; 561 u32 ps0_in1, ps0_in2;
579 u32 ps1_out; 562 u32 ps1_out;
580 u32 ps1_in1, ps1_in2; 563 u32 ps1_in1, ps1_in2;
581 struct thread_struct t;
582 t.fpscr.val = vcpu->arch.fpscr;
583 564
584 /* RC */ 565 /* RC */
585 WARN_ON(rc); 566 WARN_ON(rc);
586 567
587 /* PS0 */ 568 /* PS0 */
588 cvt_df((double*)&fpr[reg_in1], (float*)&ps0_in1, &t); 569 kvm_cvt_df(&fpr[reg_in1], &ps0_in1, &vcpu->arch.fpscr);
589 570
590 if (scalar & SCALAR_LOW) 571 if (scalar & SCALAR_LOW)
591 ps0_in2 = qpr[reg_in2]; 572 ps0_in2 = qpr[reg_in2];
592 else 573 else
593 cvt_df((double*)&fpr[reg_in2], (float*)&ps0_in2, &t); 574 kvm_cvt_df(&fpr[reg_in2], &ps0_in2, &vcpu->arch.fpscr);
594 575
595 func(&t, &ps0_out, &ps0_in1, &ps0_in2); 576 func(&vcpu->arch.fpscr, &ps0_out, &ps0_in1, &ps0_in2);
596 577
597 if (!(scalar & SCALAR_NO_PS0)) { 578 if (!(scalar & SCALAR_NO_PS0)) {
598 dprintk(KERN_INFO "PS2 ps0 -> f(0x%x, 0x%x) = 0x%x\n", 579 dprintk(KERN_INFO "PS2 ps0 -> f(0x%x, 0x%x) = 0x%x\n",
599 ps0_in1, ps0_in2, ps0_out); 580 ps0_in1, ps0_in2, ps0_out);
600 581
601 cvt_fd((float*)&ps0_out, (double*)&fpr[reg_out], &t); 582 kvm_cvt_fd(&ps0_out, &fpr[reg_out], &vcpu->arch.fpscr);
602 } 583 }
603 584
604 /* PS1 */ 585 /* PS1 */
@@ -608,7 +589,7 @@ static int kvmppc_ps_two_in(struct kvm_vcpu *vcpu, bool rc,
608 if (scalar & SCALAR_HIGH) 589 if (scalar & SCALAR_HIGH)
609 ps1_in2 = ps0_in2; 590 ps1_in2 = ps0_in2;
610 591
611 func(&t, &ps1_out, &ps1_in1, &ps1_in2); 592 func(&vcpu->arch.fpscr, &ps1_out, &ps1_in1, &ps1_in2);
612 593
613 if (!(scalar & SCALAR_NO_PS1)) { 594 if (!(scalar & SCALAR_NO_PS1)) {
614 qpr[reg_out] = ps1_out; 595 qpr[reg_out] = ps1_out;
@@ -622,31 +603,29 @@ static int kvmppc_ps_two_in(struct kvm_vcpu *vcpu, bool rc,
622 603
623static int kvmppc_ps_one_in(struct kvm_vcpu *vcpu, bool rc, 604static int kvmppc_ps_one_in(struct kvm_vcpu *vcpu, bool rc,
624 int reg_out, int reg_in, 605 int reg_out, int reg_in,
625 void (*func)(struct thread_struct *t, 606 void (*func)(u64 *t,
626 u32 *dst, u32 *src1)) 607 u32 *dst, u32 *src1))
627{ 608{
628 u32 *qpr = vcpu->arch.qpr; 609 u32 *qpr = vcpu->arch.qpr;
629 u64 *fpr = vcpu->arch.fpr; 610 u64 *fpr = vcpu->arch.fpr;
630 u32 ps0_out, ps0_in; 611 u32 ps0_out, ps0_in;
631 u32 ps1_in; 612 u32 ps1_in;
632 struct thread_struct t;
633 t.fpscr.val = vcpu->arch.fpscr;
634 613
635 /* RC */ 614 /* RC */
636 WARN_ON(rc); 615 WARN_ON(rc);
637 616
638 /* PS0 */ 617 /* PS0 */
639 cvt_df((double*)&fpr[reg_in], (float*)&ps0_in, &t); 618 kvm_cvt_df(&fpr[reg_in], &ps0_in, &vcpu->arch.fpscr);
640 func(&t, &ps0_out, &ps0_in); 619 func(&vcpu->arch.fpscr, &ps0_out, &ps0_in);
641 620
642 dprintk(KERN_INFO "PS1 ps0 -> f(0x%x) = 0x%x\n", 621 dprintk(KERN_INFO "PS1 ps0 -> f(0x%x) = 0x%x\n",
643 ps0_in, ps0_out); 622 ps0_in, ps0_out);
644 623
645 cvt_fd((float*)&ps0_out, (double*)&fpr[reg_out], &t); 624 kvm_cvt_fd(&ps0_out, &fpr[reg_out], &vcpu->arch.fpscr);
646 625
647 /* PS1 */ 626 /* PS1 */
648 ps1_in = qpr[reg_in]; 627 ps1_in = qpr[reg_in];
649 func(&t, &qpr[reg_out], &ps1_in); 628 func(&vcpu->arch.fpscr, &qpr[reg_out], &ps1_in);
650 629
651 dprintk(KERN_INFO "PS1 ps1 -> f(0x%x) = 0x%x\n", 630 dprintk(KERN_INFO "PS1 ps1 -> f(0x%x) = 0x%x\n",
652 ps1_in, qpr[reg_out]); 631 ps1_in, qpr[reg_out]);
@@ -672,13 +651,10 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
672 651
673 bool rcomp = (inst & 1) ? true : false; 652 bool rcomp = (inst & 1) ? true : false;
674 u32 cr = kvmppc_get_cr(vcpu); 653 u32 cr = kvmppc_get_cr(vcpu);
675 struct thread_struct t;
676#ifdef DEBUG 654#ifdef DEBUG
677 int i; 655 int i;
678#endif 656#endif
679 657
680 t.fpscr.val = vcpu->arch.fpscr;
681
682 if (!kvmppc_inst_is_paired_single(vcpu, inst)) 658 if (!kvmppc_inst_is_paired_single(vcpu, inst))
683 return EMULATE_FAIL; 659 return EMULATE_FAIL;
684 660
@@ -695,7 +671,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
695#ifdef DEBUG 671#ifdef DEBUG
696 for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) { 672 for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) {
697 u32 f; 673 u32 f;
698 cvt_df((double*)&vcpu->arch.fpr[i], (float*)&f, &t); 674 kvm_cvt_df(&vcpu->arch.fpr[i], &f, &vcpu->arch.fpscr);
699 dprintk(KERN_INFO "FPR[%d] = 0x%x / 0x%llx QPR[%d] = 0x%x\n", 675 dprintk(KERN_INFO "FPR[%d] = 0x%x / 0x%llx QPR[%d] = 0x%x\n",
700 i, f, vcpu->arch.fpr[i], i, vcpu->arch.qpr[i]); 676 i, f, vcpu->arch.fpr[i], i, vcpu->arch.qpr[i]);
701 } 677 }
@@ -819,8 +795,9 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
819 WARN_ON(rcomp); 795 WARN_ON(rcomp);
820 vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_ra]; 796 vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_ra];
821 /* vcpu->arch.qpr[ax_rd] = vcpu->arch.fpr[ax_rb]; */ 797 /* vcpu->arch.qpr[ax_rd] = vcpu->arch.fpr[ax_rb]; */
822 cvt_df((double*)&vcpu->arch.fpr[ax_rb], 798 kvm_cvt_df(&vcpu->arch.fpr[ax_rb],
823 (float*)&vcpu->arch.qpr[ax_rd], &t); 799 &vcpu->arch.qpr[ax_rd],
800 &vcpu->arch.fpscr);
824 break; 801 break;
825 case OP_4X_PS_MERGE01: 802 case OP_4X_PS_MERGE01:
826 WARN_ON(rcomp); 803 WARN_ON(rcomp);
@@ -830,17 +807,20 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
830 case OP_4X_PS_MERGE10: 807 case OP_4X_PS_MERGE10:
831 WARN_ON(rcomp); 808 WARN_ON(rcomp);
832 /* vcpu->arch.fpr[ax_rd] = vcpu->arch.qpr[ax_ra]; */ 809 /* vcpu->arch.fpr[ax_rd] = vcpu->arch.qpr[ax_ra]; */
833 cvt_fd((float*)&vcpu->arch.qpr[ax_ra], 810 kvm_cvt_fd(&vcpu->arch.qpr[ax_ra],
834 (double*)&vcpu->arch.fpr[ax_rd], &t); 811 &vcpu->arch.fpr[ax_rd],
812 &vcpu->arch.fpscr);
835 /* vcpu->arch.qpr[ax_rd] = vcpu->arch.fpr[ax_rb]; */ 813 /* vcpu->arch.qpr[ax_rd] = vcpu->arch.fpr[ax_rb]; */
836 cvt_df((double*)&vcpu->arch.fpr[ax_rb], 814 kvm_cvt_df(&vcpu->arch.fpr[ax_rb],
837 (float*)&vcpu->arch.qpr[ax_rd], &t); 815 &vcpu->arch.qpr[ax_rd],
816 &vcpu->arch.fpscr);
838 break; 817 break;
839 case OP_4X_PS_MERGE11: 818 case OP_4X_PS_MERGE11:
840 WARN_ON(rcomp); 819 WARN_ON(rcomp);
841 /* vcpu->arch.fpr[ax_rd] = vcpu->arch.qpr[ax_ra]; */ 820 /* vcpu->arch.fpr[ax_rd] = vcpu->arch.qpr[ax_ra]; */
842 cvt_fd((float*)&vcpu->arch.qpr[ax_ra], 821 kvm_cvt_fd(&vcpu->arch.qpr[ax_ra],
843 (double*)&vcpu->arch.fpr[ax_rd], &t); 822 &vcpu->arch.fpr[ax_rd],
823 &vcpu->arch.fpscr);
844 vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb]; 824 vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
845 break; 825 break;
846 } 826 }
@@ -1275,7 +1255,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
1275#ifdef DEBUG 1255#ifdef DEBUG
1276 for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) { 1256 for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) {
1277 u32 f; 1257 u32 f;
1278 cvt_df((double*)&vcpu->arch.fpr[i], (float*)&f, &t); 1258 kvm_cvt_df(&vcpu->arch.fpr[i], &f, &vcpu->arch.fpscr);
1279 dprintk(KERN_INFO "FPR[%d] = 0x%x\n", i, f); 1259 dprintk(KERN_INFO "FPR[%d] = 0x%x\n", i, f);
1280 } 1260 }
1281#endif 1261#endif
diff --git a/arch/powerpc/kvm/fpu.S b/arch/powerpc/kvm/fpu.S
index 2b340a3eee9..cb34bbe1611 100644
--- a/arch/powerpc/kvm/fpu.S
+++ b/arch/powerpc/kvm/fpu.S
@@ -271,3 +271,21 @@ FPD_THREE_IN(fmsub)
271FPD_THREE_IN(fmadd) 271FPD_THREE_IN(fmadd)
272FPD_THREE_IN(fnmsub) 272FPD_THREE_IN(fnmsub)
273FPD_THREE_IN(fnmadd) 273FPD_THREE_IN(fnmadd)
274
275_GLOBAL(kvm_cvt_fd)
276 lfd 0,0(r5) /* load up fpscr value */
277 MTFSF_L(0)
278 lfs 0,0(r3)
279 stfd 0,0(r4)
280 mffs 0
281 stfd 0,0(r5) /* save new fpscr value */
282 blr
283
284_GLOBAL(kvm_cvt_df)
285 lfd 0,0(r5) /* load up fpscr value */
286 MTFSF_L(0)
287 lfd 0,0(r3)
288 stfs 0,0(r4)
289 mffs 0
290 stfd 0,0(r5) /* save new fpscr value */
291 blr