diff options
Diffstat (limited to 'arch/powerpc/kvm/book3s.c')
-rw-r--r-- | arch/powerpc/kvm/book3s.c | 79 |
1 files changed, 39 insertions, 40 deletions
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index b998abf1a63d..a3cef30d1d42 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c | |||
@@ -1047,8 +1047,6 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
1047 | { | 1047 | { |
1048 | int i; | 1048 | int i; |
1049 | 1049 | ||
1050 | vcpu_load(vcpu); | ||
1051 | |||
1052 | regs->pc = kvmppc_get_pc(vcpu); | 1050 | regs->pc = kvmppc_get_pc(vcpu); |
1053 | regs->cr = kvmppc_get_cr(vcpu); | 1051 | regs->cr = kvmppc_get_cr(vcpu); |
1054 | regs->ctr = kvmppc_get_ctr(vcpu); | 1052 | regs->ctr = kvmppc_get_ctr(vcpu); |
@@ -1069,8 +1067,6 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
1069 | for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) | 1067 | for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) |
1070 | regs->gpr[i] = kvmppc_get_gpr(vcpu, i); | 1068 | regs->gpr[i] = kvmppc_get_gpr(vcpu, i); |
1071 | 1069 | ||
1072 | vcpu_put(vcpu); | ||
1073 | |||
1074 | return 0; | 1070 | return 0; |
1075 | } | 1071 | } |
1076 | 1072 | ||
@@ -1078,8 +1074,6 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
1078 | { | 1074 | { |
1079 | int i; | 1075 | int i; |
1080 | 1076 | ||
1081 | vcpu_load(vcpu); | ||
1082 | |||
1083 | kvmppc_set_pc(vcpu, regs->pc); | 1077 | kvmppc_set_pc(vcpu, regs->pc); |
1084 | kvmppc_set_cr(vcpu, regs->cr); | 1078 | kvmppc_set_cr(vcpu, regs->cr); |
1085 | kvmppc_set_ctr(vcpu, regs->ctr); | 1079 | kvmppc_set_ctr(vcpu, regs->ctr); |
@@ -1099,8 +1093,6 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
1099 | for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) | 1093 | for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) |
1100 | kvmppc_set_gpr(vcpu, i, regs->gpr[i]); | 1094 | kvmppc_set_gpr(vcpu, i, regs->gpr[i]); |
1101 | 1095 | ||
1102 | vcpu_put(vcpu); | ||
1103 | |||
1104 | return 0; | 1096 | return 0; |
1105 | } | 1097 | } |
1106 | 1098 | ||
@@ -1110,8 +1102,6 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, | |||
1110 | struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); | 1102 | struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); |
1111 | int i; | 1103 | int i; |
1112 | 1104 | ||
1113 | vcpu_load(vcpu); | ||
1114 | |||
1115 | sregs->pvr = vcpu->arch.pvr; | 1105 | sregs->pvr = vcpu->arch.pvr; |
1116 | 1106 | ||
1117 | sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1; | 1107 | sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1; |
@@ -1131,8 +1121,6 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, | |||
1131 | } | 1121 | } |
1132 | } | 1122 | } |
1133 | 1123 | ||
1134 | vcpu_put(vcpu); | ||
1135 | |||
1136 | return 0; | 1124 | return 0; |
1137 | } | 1125 | } |
1138 | 1126 | ||
@@ -1142,8 +1130,6 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, | |||
1142 | struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); | 1130 | struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); |
1143 | int i; | 1131 | int i; |
1144 | 1132 | ||
1145 | vcpu_load(vcpu); | ||
1146 | |||
1147 | kvmppc_set_pvr(vcpu, sregs->pvr); | 1133 | kvmppc_set_pvr(vcpu, sregs->pvr); |
1148 | 1134 | ||
1149 | vcpu3s->sdr1 = sregs->u.s.sdr1; | 1135 | vcpu3s->sdr1 = sregs->u.s.sdr1; |
@@ -1171,8 +1157,6 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, | |||
1171 | /* Flush the MMU after messing with the segments */ | 1157 | /* Flush the MMU after messing with the segments */ |
1172 | kvmppc_mmu_pte_flush(vcpu, 0, 0); | 1158 | kvmppc_mmu_pte_flush(vcpu, 0, 0); |
1173 | 1159 | ||
1174 | vcpu_put(vcpu); | ||
1175 | |||
1176 | return 0; | 1160 | return 0; |
1177 | } | 1161 | } |
1178 | 1162 | ||
@@ -1309,12 +1293,17 @@ extern int __kvmppc_vcpu_entry(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); | |||
1309 | int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | 1293 | int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) |
1310 | { | 1294 | { |
1311 | int ret; | 1295 | int ret; |
1312 | struct thread_struct ext_bkp; | 1296 | double fpr[32][TS_FPRWIDTH]; |
1297 | unsigned int fpscr; | ||
1298 | int fpexc_mode; | ||
1313 | #ifdef CONFIG_ALTIVEC | 1299 | #ifdef CONFIG_ALTIVEC |
1314 | bool save_vec = current->thread.used_vr; | 1300 | vector128 vr[32]; |
1301 | vector128 vscr; | ||
1302 | unsigned long uninitialized_var(vrsave); | ||
1303 | int used_vr; | ||
1315 | #endif | 1304 | #endif |
1316 | #ifdef CONFIG_VSX | 1305 | #ifdef CONFIG_VSX |
1317 | bool save_vsx = current->thread.used_vsr; | 1306 | int used_vsr; |
1318 | #endif | 1307 | #endif |
1319 | ulong ext_msr; | 1308 | ulong ext_msr; |
1320 | 1309 | ||
@@ -1327,27 +1316,27 @@ int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
1327 | /* Save FPU state in stack */ | 1316 | /* Save FPU state in stack */ |
1328 | if (current->thread.regs->msr & MSR_FP) | 1317 | if (current->thread.regs->msr & MSR_FP) |
1329 | giveup_fpu(current); | 1318 | giveup_fpu(current); |
1330 | memcpy(ext_bkp.fpr, current->thread.fpr, sizeof(current->thread.fpr)); | 1319 | memcpy(fpr, current->thread.fpr, sizeof(current->thread.fpr)); |
1331 | ext_bkp.fpscr = current->thread.fpscr; | 1320 | fpscr = current->thread.fpscr.val; |
1332 | ext_bkp.fpexc_mode = current->thread.fpexc_mode; | 1321 | fpexc_mode = current->thread.fpexc_mode; |
1333 | 1322 | ||
1334 | #ifdef CONFIG_ALTIVEC | 1323 | #ifdef CONFIG_ALTIVEC |
1335 | /* Save Altivec state in stack */ | 1324 | /* Save Altivec state in stack */ |
1336 | if (save_vec) { | 1325 | used_vr = current->thread.used_vr; |
1326 | if (used_vr) { | ||
1337 | if (current->thread.regs->msr & MSR_VEC) | 1327 | if (current->thread.regs->msr & MSR_VEC) |
1338 | giveup_altivec(current); | 1328 | giveup_altivec(current); |
1339 | memcpy(ext_bkp.vr, current->thread.vr, sizeof(ext_bkp.vr)); | 1329 | memcpy(vr, current->thread.vr, sizeof(current->thread.vr)); |
1340 | ext_bkp.vscr = current->thread.vscr; | 1330 | vscr = current->thread.vscr; |
1341 | ext_bkp.vrsave = current->thread.vrsave; | 1331 | vrsave = current->thread.vrsave; |
1342 | } | 1332 | } |
1343 | ext_bkp.used_vr = current->thread.used_vr; | ||
1344 | #endif | 1333 | #endif |
1345 | 1334 | ||
1346 | #ifdef CONFIG_VSX | 1335 | #ifdef CONFIG_VSX |
1347 | /* Save VSX state in stack */ | 1336 | /* Save VSX state in stack */ |
1348 | if (save_vsx && (current->thread.regs->msr & MSR_VSX)) | 1337 | used_vsr = current->thread.used_vsr; |
1338 | if (used_vsr && (current->thread.regs->msr & MSR_VSX)) | ||
1349 | __giveup_vsx(current); | 1339 | __giveup_vsx(current); |
1350 | ext_bkp.used_vsr = current->thread.used_vsr; | ||
1351 | #endif | 1340 | #endif |
1352 | 1341 | ||
1353 | /* Remember the MSR with disabled extensions */ | 1342 | /* Remember the MSR with disabled extensions */ |
@@ -1372,22 +1361,22 @@ int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
1372 | kvmppc_giveup_ext(vcpu, MSR_VSX); | 1361 | kvmppc_giveup_ext(vcpu, MSR_VSX); |
1373 | 1362 | ||
1374 | /* Restore FPU state from stack */ | 1363 | /* Restore FPU state from stack */ |
1375 | memcpy(current->thread.fpr, ext_bkp.fpr, sizeof(ext_bkp.fpr)); | 1364 | memcpy(current->thread.fpr, fpr, sizeof(current->thread.fpr)); |
1376 | current->thread.fpscr = ext_bkp.fpscr; | 1365 | current->thread.fpscr.val = fpscr; |
1377 | current->thread.fpexc_mode = ext_bkp.fpexc_mode; | 1366 | current->thread.fpexc_mode = fpexc_mode; |
1378 | 1367 | ||
1379 | #ifdef CONFIG_ALTIVEC | 1368 | #ifdef CONFIG_ALTIVEC |
1380 | /* Restore Altivec state from stack */ | 1369 | /* Restore Altivec state from stack */ |
1381 | if (save_vec && current->thread.used_vr) { | 1370 | if (used_vr && current->thread.used_vr) { |
1382 | memcpy(current->thread.vr, ext_bkp.vr, sizeof(ext_bkp.vr)); | 1371 | memcpy(current->thread.vr, vr, sizeof(current->thread.vr)); |
1383 | current->thread.vscr = ext_bkp.vscr; | 1372 | current->thread.vscr = vscr; |
1384 | current->thread.vrsave= ext_bkp.vrsave; | 1373 | current->thread.vrsave = vrsave; |
1385 | } | 1374 | } |
1386 | current->thread.used_vr = ext_bkp.used_vr; | 1375 | current->thread.used_vr = used_vr; |
1387 | #endif | 1376 | #endif |
1388 | 1377 | ||
1389 | #ifdef CONFIG_VSX | 1378 | #ifdef CONFIG_VSX |
1390 | current->thread.used_vsr = ext_bkp.used_vsr; | 1379 | current->thread.used_vsr = used_vsr; |
1391 | #endif | 1380 | #endif |
1392 | 1381 | ||
1393 | return ret; | 1382 | return ret; |
@@ -1395,12 +1384,22 @@ int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
1395 | 1384 | ||
1396 | static int kvmppc_book3s_init(void) | 1385 | static int kvmppc_book3s_init(void) |
1397 | { | 1386 | { |
1398 | return kvm_init(NULL, sizeof(struct kvmppc_vcpu_book3s), 0, | 1387 | int r; |
1399 | THIS_MODULE); | 1388 | |
1389 | r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_book3s), 0, | ||
1390 | THIS_MODULE); | ||
1391 | |||
1392 | if (r) | ||
1393 | return r; | ||
1394 | |||
1395 | r = kvmppc_mmu_hpte_sysinit(); | ||
1396 | |||
1397 | return r; | ||
1400 | } | 1398 | } |
1401 | 1399 | ||
1402 | static void kvmppc_book3s_exit(void) | 1400 | static void kvmppc_book3s_exit(void) |
1403 | { | 1401 | { |
1402 | kvmppc_mmu_hpte_sysexit(); | ||
1404 | kvm_exit(); | 1403 | kvm_exit(); |
1405 | } | 1404 | } |
1406 | 1405 | ||