aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRadim Krčmář <rkrcmar@redhat.com>2018-02-09 15:36:57 -0500
committerRadim Krčmář <rkrcmar@redhat.com>2018-02-09 16:03:06 -0500
commit1ab03c072feb579c9fd116de25be2b211e6bff6a (patch)
treec207a69c7943a464a83c97c9cc02a45c30c42bfa
parent80132f4c0cf9e9966216ef4368bce530055dbb1d (diff)
parent09f984961c137c4b252c368adab7e1c9f035fa59 (diff)
Merge tag 'kvm-ppc-next-4.16-2' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc
Second PPC KVM update for 4.16 Seven fixes that are either trivial or that address bugs that people are actually hitting. The main ones are: - Drop spinlocks before reading guest memory - Fix a bug causing corruption of VCPU state in PR KVM with preemption enabled - Make HPT resizing work on POWER9 - Add MMIO emulation for vector loads and stores, because guests now use these instructions in memcpy and similar routines.
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h6
-rw-r--r--arch/powerpc/include/asm/kvm_host.h2
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h4
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h6
-rw-r--r--arch/powerpc/kvm/Kconfig2
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c38
-rw-r--r--arch/powerpc/kvm/book3s_hv.c16
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S3
-rw-r--r--arch/powerpc/kvm/book3s_interrupts.S4
-rw-r--r--arch/powerpc/kvm/book3s_pr.c20
-rw-r--r--arch/powerpc/kvm/emulate_loadstore.c36
-rw-r--r--arch/powerpc/kvm/powerpc.c153
12 files changed, 251 insertions, 39 deletions
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index 9a667007bff8..376ae803b69c 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -249,10 +249,8 @@ extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd);
249extern void kvmppc_pr_init_default_hcalls(struct kvm *kvm); 249extern void kvmppc_pr_init_default_hcalls(struct kvm *kvm);
250extern int kvmppc_hcall_impl_pr(unsigned long cmd); 250extern int kvmppc_hcall_impl_pr(unsigned long cmd);
251extern int kvmppc_hcall_impl_hv_realmode(unsigned long cmd); 251extern int kvmppc_hcall_impl_hv_realmode(unsigned long cmd);
252extern void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu, 252extern void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu);
253 struct kvm_vcpu *vcpu); 253extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu);
254extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
255 struct kvmppc_book3s_shadow_vcpu *svcpu);
256extern int kvm_irq_bypass; 254extern int kvm_irq_bypass;
257 255
258static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu) 256static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index fef8133becc8..1f53b562726f 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -690,6 +690,7 @@ struct kvm_vcpu_arch {
690 u8 mmio_vsx_offset; 690 u8 mmio_vsx_offset;
691 u8 mmio_vsx_copy_type; 691 u8 mmio_vsx_copy_type;
692 u8 mmio_vsx_tx_sx_enabled; 692 u8 mmio_vsx_tx_sx_enabled;
693 u8 mmio_vmx_copy_nums;
693 u8 osi_needed; 694 u8 osi_needed;
694 u8 osi_enabled; 695 u8 osi_enabled;
695 u8 papr_enabled; 696 u8 papr_enabled;
@@ -804,6 +805,7 @@ struct kvm_vcpu_arch {
804#define KVM_MMIO_REG_QPR 0x0040 805#define KVM_MMIO_REG_QPR 0x0040
805#define KVM_MMIO_REG_FQPR 0x0060 806#define KVM_MMIO_REG_FQPR 0x0060
806#define KVM_MMIO_REG_VSX 0x0080 807#define KVM_MMIO_REG_VSX 0x0080
808#define KVM_MMIO_REG_VMX 0x00c0
807 809
808#define __KVM_HAVE_ARCH_WQP 810#define __KVM_HAVE_ARCH_WQP
809#define __KVM_HAVE_CREATE_DEVICE 811#define __KVM_HAVE_CREATE_DEVICE
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index 941c2a3f231b..28c203003519 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -81,6 +81,10 @@ extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
81extern int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu, 81extern int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
82 unsigned int rt, unsigned int bytes, 82 unsigned int rt, unsigned int bytes,
83 int is_default_endian, int mmio_sign_extend); 83 int is_default_endian, int mmio_sign_extend);
84extern int kvmppc_handle_load128_by2x64(struct kvm_run *run,
85 struct kvm_vcpu *vcpu, unsigned int rt, int is_default_endian);
86extern int kvmppc_handle_store128_by2x64(struct kvm_run *run,
87 struct kvm_vcpu *vcpu, unsigned int rs, int is_default_endian);
84extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, 88extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
85 u64 val, unsigned int bytes, 89 u64 val, unsigned int bytes,
86 int is_default_endian); 90 int is_default_endian);
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index ce0930d68857..a51febca08c5 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -156,6 +156,12 @@
156#define OP_31_XOP_LFDX 599 156#define OP_31_XOP_LFDX 599
157#define OP_31_XOP_LFDUX 631 157#define OP_31_XOP_LFDUX 631
158 158
159/* VMX Vector Load Instructions */
160#define OP_31_XOP_LVX 103
161
162/* VMX Vector Store Instructions */
163#define OP_31_XOP_STVX 231
164
159#define OP_LWZ 32 165#define OP_LWZ 32
160#define OP_STFS 52 166#define OP_STFS 52
161#define OP_STFSU 53 167#define OP_STFSU 53
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
index f884a0529dfe..68a0e9d5b440 100644
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
@@ -69,7 +69,7 @@ config KVM_BOOK3S_64
69 select KVM_BOOK3S_64_HANDLER 69 select KVM_BOOK3S_64_HANDLER
70 select KVM 70 select KVM
71 select KVM_BOOK3S_PR_POSSIBLE if !KVM_BOOK3S_HV_POSSIBLE 71 select KVM_BOOK3S_PR_POSSIBLE if !KVM_BOOK3S_HV_POSSIBLE
72 select SPAPR_TCE_IOMMU if IOMMU_SUPPORT && (PPC_SERIES || PPC_POWERNV) 72 select SPAPR_TCE_IOMMU if IOMMU_SUPPORT && (PPC_PSERIES || PPC_POWERNV)
73 ---help--- 73 ---help---
74 Support running unmodified book3s_64 and book3s_32 guest kernels 74 Support running unmodified book3s_64 and book3s_32 guest kernels
75 in virtual machines on book3s_64 host processors. 75 in virtual machines on book3s_64 host processors.
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index b73dbc9e797d..ef243fed2f2b 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -1269,6 +1269,11 @@ static unsigned long resize_hpt_rehash_hpte(struct kvm_resize_hpt *resize,
1269 /* Nothing to do */ 1269 /* Nothing to do */
1270 goto out; 1270 goto out;
1271 1271
1272 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
1273 rpte = be64_to_cpu(hptep[1]);
1274 vpte = hpte_new_to_old_v(vpte, rpte);
1275 }
1276
1272 /* Unmap */ 1277 /* Unmap */
1273 rev = &old->rev[idx]; 1278 rev = &old->rev[idx];
1274 guest_rpte = rev->guest_rpte; 1279 guest_rpte = rev->guest_rpte;
@@ -1298,7 +1303,6 @@ static unsigned long resize_hpt_rehash_hpte(struct kvm_resize_hpt *resize,
1298 1303
1299 /* Reload PTE after unmap */ 1304 /* Reload PTE after unmap */
1300 vpte = be64_to_cpu(hptep[0]); 1305 vpte = be64_to_cpu(hptep[0]);
1301
1302 BUG_ON(vpte & HPTE_V_VALID); 1306 BUG_ON(vpte & HPTE_V_VALID);
1303 BUG_ON(!(vpte & HPTE_V_ABSENT)); 1307 BUG_ON(!(vpte & HPTE_V_ABSENT));
1304 1308
@@ -1307,6 +1311,12 @@ static unsigned long resize_hpt_rehash_hpte(struct kvm_resize_hpt *resize,
1307 goto out; 1311 goto out;
1308 1312
1309 rpte = be64_to_cpu(hptep[1]); 1313 rpte = be64_to_cpu(hptep[1]);
1314
1315 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
1316 vpte = hpte_new_to_old_v(vpte, rpte);
1317 rpte = hpte_new_to_old_r(rpte);
1318 }
1319
1310 pshift = kvmppc_hpte_base_page_shift(vpte, rpte); 1320 pshift = kvmppc_hpte_base_page_shift(vpte, rpte);
1311 avpn = HPTE_V_AVPN_VAL(vpte) & ~(((1ul << pshift) - 1) >> 23); 1321 avpn = HPTE_V_AVPN_VAL(vpte) & ~(((1ul << pshift) - 1) >> 23);
1312 pteg = idx / HPTES_PER_GROUP; 1322 pteg = idx / HPTES_PER_GROUP;
@@ -1337,17 +1347,17 @@ static unsigned long resize_hpt_rehash_hpte(struct kvm_resize_hpt *resize,
1337 } 1347 }
1338 1348
1339 new_pteg = hash & new_hash_mask; 1349 new_pteg = hash & new_hash_mask;
1340 if (vpte & HPTE_V_SECONDARY) { 1350 if (vpte & HPTE_V_SECONDARY)
1341 BUG_ON(~pteg != (hash & old_hash_mask)); 1351 new_pteg = ~hash & new_hash_mask;
1342 new_pteg = ~new_pteg;
1343 } else {
1344 BUG_ON(pteg != (hash & old_hash_mask));
1345 }
1346 1352
1347 new_idx = new_pteg * HPTES_PER_GROUP + (idx % HPTES_PER_GROUP); 1353 new_idx = new_pteg * HPTES_PER_GROUP + (idx % HPTES_PER_GROUP);
1348 new_hptep = (__be64 *)(new->virt + (new_idx << 4)); 1354 new_hptep = (__be64 *)(new->virt + (new_idx << 4));
1349 1355
1350 replace_vpte = be64_to_cpu(new_hptep[0]); 1356 replace_vpte = be64_to_cpu(new_hptep[0]);
1357 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
1358 unsigned long replace_rpte = be64_to_cpu(new_hptep[1]);
1359 replace_vpte = hpte_new_to_old_v(replace_vpte, replace_rpte);
1360 }
1351 1361
1352 if (replace_vpte & (HPTE_V_VALID | HPTE_V_ABSENT)) { 1362 if (replace_vpte & (HPTE_V_VALID | HPTE_V_ABSENT)) {
1353 BUG_ON(new->order >= old->order); 1363 BUG_ON(new->order >= old->order);
@@ -1363,6 +1373,11 @@ static unsigned long resize_hpt_rehash_hpte(struct kvm_resize_hpt *resize,
1363 /* Discard the previous HPTE */ 1373 /* Discard the previous HPTE */
1364 } 1374 }
1365 1375
1376 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
1377 rpte = hpte_old_to_new_r(vpte, rpte);
1378 vpte = hpte_old_to_new_v(vpte);
1379 }
1380
1366 new_hptep[1] = cpu_to_be64(rpte); 1381 new_hptep[1] = cpu_to_be64(rpte);
1367 new->rev[new_idx].guest_rpte = guest_rpte; 1382 new->rev[new_idx].guest_rpte = guest_rpte;
1368 /* No need for a barrier, since new HPT isn't active */ 1383 /* No need for a barrier, since new HPT isn't active */
@@ -1380,12 +1395,6 @@ static int resize_hpt_rehash(struct kvm_resize_hpt *resize)
1380 unsigned long i; 1395 unsigned long i;
1381 int rc; 1396 int rc;
1382 1397
1383 /*
1384 * resize_hpt_rehash_hpte() doesn't handle the new-format HPTEs
1385 * that POWER9 uses, and could well hit a BUG_ON on POWER9.
1386 */
1387 if (cpu_has_feature(CPU_FTR_ARCH_300))
1388 return -EIO;
1389 for (i = 0; i < kvmppc_hpt_npte(&kvm->arch.hpt); i++) { 1398 for (i = 0; i < kvmppc_hpt_npte(&kvm->arch.hpt); i++) {
1390 rc = resize_hpt_rehash_hpte(resize, i); 1399 rc = resize_hpt_rehash_hpte(resize, i);
1391 if (rc != 0) 1400 if (rc != 0)
@@ -1416,6 +1425,9 @@ static void resize_hpt_pivot(struct kvm_resize_hpt *resize)
1416 1425
1417 synchronize_srcu_expedited(&kvm->srcu); 1426 synchronize_srcu_expedited(&kvm->srcu);
1418 1427
1428 if (cpu_has_feature(CPU_FTR_ARCH_300))
1429 kvmppc_setup_partition_table(kvm);
1430
1419 resize_hpt_debug(resize, "resize_hpt_pivot() done\n"); 1431 resize_hpt_debug(resize, "resize_hpt_pivot() done\n");
1420} 1432}
1421 1433
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index e5f81fc108e0..aa6130b56b5e 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -1008,8 +1008,6 @@ static int kvmppc_emulate_doorbell_instr(struct kvm_vcpu *vcpu)
1008 struct kvm *kvm = vcpu->kvm; 1008 struct kvm *kvm = vcpu->kvm;
1009 struct kvm_vcpu *tvcpu; 1009 struct kvm_vcpu *tvcpu;
1010 1010
1011 if (!cpu_has_feature(CPU_FTR_ARCH_300))
1012 return EMULATE_FAIL;
1013 if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst) != EMULATE_DONE) 1011 if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst) != EMULATE_DONE)
1014 return RESUME_GUEST; 1012 return RESUME_GUEST;
1015 if (get_op(inst) != 31) 1013 if (get_op(inst) != 31)
@@ -1059,6 +1057,7 @@ static int kvmppc_emulate_doorbell_instr(struct kvm_vcpu *vcpu)
1059 return RESUME_GUEST; 1057 return RESUME_GUEST;
1060} 1058}
1061 1059
1060/* Called with vcpu->arch.vcore->lock held */
1062static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, 1061static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
1063 struct task_struct *tsk) 1062 struct task_struct *tsk)
1064{ 1063{
@@ -1179,7 +1178,10 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
1179 swab32(vcpu->arch.emul_inst) : 1178 swab32(vcpu->arch.emul_inst) :
1180 vcpu->arch.emul_inst; 1179 vcpu->arch.emul_inst;
1181 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) { 1180 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) {
1181 /* Need vcore unlocked to call kvmppc_get_last_inst */
1182 spin_unlock(&vcpu->arch.vcore->lock);
1182 r = kvmppc_emulate_debug_inst(run, vcpu); 1183 r = kvmppc_emulate_debug_inst(run, vcpu);
1184 spin_lock(&vcpu->arch.vcore->lock);
1183 } else { 1185 } else {
1184 kvmppc_core_queue_program(vcpu, SRR1_PROGILL); 1186 kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
1185 r = RESUME_GUEST; 1187 r = RESUME_GUEST;
@@ -1194,8 +1196,13 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
1194 */ 1196 */
1195 case BOOK3S_INTERRUPT_H_FAC_UNAVAIL: 1197 case BOOK3S_INTERRUPT_H_FAC_UNAVAIL:
1196 r = EMULATE_FAIL; 1198 r = EMULATE_FAIL;
1197 if ((vcpu->arch.hfscr >> 56) == FSCR_MSGP_LG) 1199 if (((vcpu->arch.hfscr >> 56) == FSCR_MSGP_LG) &&
1200 cpu_has_feature(CPU_FTR_ARCH_300)) {
1201 /* Need vcore unlocked to call kvmppc_get_last_inst */
1202 spin_unlock(&vcpu->arch.vcore->lock);
1198 r = kvmppc_emulate_doorbell_instr(vcpu); 1203 r = kvmppc_emulate_doorbell_instr(vcpu);
1204 spin_lock(&vcpu->arch.vcore->lock);
1205 }
1199 if (r == EMULATE_FAIL) { 1206 if (r == EMULATE_FAIL) {
1200 kvmppc_core_queue_program(vcpu, SRR1_PROGILL); 1207 kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
1201 r = RESUME_GUEST; 1208 r = RESUME_GUEST;
@@ -2946,13 +2953,14 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
2946 /* make sure updates to secondary vcpu structs are visible now */ 2953 /* make sure updates to secondary vcpu structs are visible now */
2947 smp_mb(); 2954 smp_mb();
2948 2955
2956 preempt_enable();
2957
2949 for (sub = 0; sub < core_info.n_subcores; ++sub) { 2958 for (sub = 0; sub < core_info.n_subcores; ++sub) {
2950 pvc = core_info.vc[sub]; 2959 pvc = core_info.vc[sub];
2951 post_guest_process(pvc, pvc == vc); 2960 post_guest_process(pvc, pvc == vc);
2952 } 2961 }
2953 2962
2954 spin_lock(&vc->lock); 2963 spin_lock(&vc->lock);
2955 preempt_enable();
2956 2964
2957 out: 2965 out:
2958 vc->vcore_state = VCORE_INACTIVE; 2966 vc->vcore_state = VCORE_INACTIVE;
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index b64f10a5f5e7..875195369354 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -413,10 +413,11 @@ FTR_SECTION_ELSE
413 /* On P9 we use the split_info for coordinating LPCR changes */ 413 /* On P9 we use the split_info for coordinating LPCR changes */
414 lwz r4, KVM_SPLIT_DO_SET(r6) 414 lwz r4, KVM_SPLIT_DO_SET(r6)
415 cmpwi r4, 0 415 cmpwi r4, 0
416 beq 63f 416 beq 1f
417 mr r3, r6 417 mr r3, r6
418 bl kvmhv_p9_set_lpcr 418 bl kvmhv_p9_set_lpcr
419 nop 419 nop
4201:
420ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300) 421ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
42163: 42263:
422 /* Order load of vcpu after load of vcore */ 423 /* Order load of vcpu after load of vcore */
diff --git a/arch/powerpc/kvm/book3s_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S
index 901e6fe00c39..c18e845019ec 100644
--- a/arch/powerpc/kvm/book3s_interrupts.S
+++ b/arch/powerpc/kvm/book3s_interrupts.S
@@ -96,7 +96,7 @@ kvm_start_entry:
96 96
97kvm_start_lightweight: 97kvm_start_lightweight:
98 /* Copy registers into shadow vcpu so we can access them in real mode */ 98 /* Copy registers into shadow vcpu so we can access them in real mode */
99 GET_SHADOW_VCPU(r3) 99 mr r3, r4
100 bl FUNC(kvmppc_copy_to_svcpu) 100 bl FUNC(kvmppc_copy_to_svcpu)
101 nop 101 nop
102 REST_GPR(4, r1) 102 REST_GPR(4, r1)
@@ -165,9 +165,7 @@ after_sprg3_load:
165 stw r12, VCPU_TRAP(r3) 165 stw r12, VCPU_TRAP(r3)
166 166
167 /* Transfer reg values from shadow vcpu back to vcpu struct */ 167 /* Transfer reg values from shadow vcpu back to vcpu struct */
168 /* On 64-bit, interrupts are still off at this point */
169 168
170 GET_SHADOW_VCPU(r4)
171 bl FUNC(kvmppc_copy_from_svcpu) 169 bl FUNC(kvmppc_copy_from_svcpu)
172 nop 170 nop
173 171
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index 7deaeeb14b93..3ae752314b34 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -121,7 +121,7 @@ static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu)
121#ifdef CONFIG_PPC_BOOK3S_64 121#ifdef CONFIG_PPC_BOOK3S_64
122 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); 122 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
123 if (svcpu->in_use) { 123 if (svcpu->in_use) {
124 kvmppc_copy_from_svcpu(vcpu, svcpu); 124 kvmppc_copy_from_svcpu(vcpu);
125 } 125 }
126 memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb)); 126 memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb));
127 to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max; 127 to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max;
@@ -143,9 +143,10 @@ static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu)
143} 143}
144 144
145/* Copy data needed by real-mode code from vcpu to shadow vcpu */ 145/* Copy data needed by real-mode code from vcpu to shadow vcpu */
146void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu, 146void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu)
147 struct kvm_vcpu *vcpu)
148{ 147{
148 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
149
149 svcpu->gpr[0] = vcpu->arch.gpr[0]; 150 svcpu->gpr[0] = vcpu->arch.gpr[0];
150 svcpu->gpr[1] = vcpu->arch.gpr[1]; 151 svcpu->gpr[1] = vcpu->arch.gpr[1];
151 svcpu->gpr[2] = vcpu->arch.gpr[2]; 152 svcpu->gpr[2] = vcpu->arch.gpr[2];
@@ -177,17 +178,14 @@ void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu,
177 if (cpu_has_feature(CPU_FTR_ARCH_207S)) 178 if (cpu_has_feature(CPU_FTR_ARCH_207S))
178 vcpu->arch.entry_ic = mfspr(SPRN_IC); 179 vcpu->arch.entry_ic = mfspr(SPRN_IC);
179 svcpu->in_use = true; 180 svcpu->in_use = true;
181
182 svcpu_put(svcpu);
180} 183}
181 184
182/* Copy data touched by real-mode code from shadow vcpu back to vcpu */ 185/* Copy data touched by real-mode code from shadow vcpu back to vcpu */
183void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu, 186void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu)
184 struct kvmppc_book3s_shadow_vcpu *svcpu)
185{ 187{
186 /* 188 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
187 * vcpu_put would just call us again because in_use hasn't
188 * been updated yet.
189 */
190 preempt_disable();
191 189
192 /* 190 /*
193 * Maybe we were already preempted and synced the svcpu from 191 * Maybe we were already preempted and synced the svcpu from
@@ -233,7 +231,7 @@ void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
233 svcpu->in_use = false; 231 svcpu->in_use = false;
234 232
235out: 233out:
236 preempt_enable(); 234 svcpu_put(svcpu);
237} 235}
238 236
239static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu) 237static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu)
diff --git a/arch/powerpc/kvm/emulate_loadstore.c b/arch/powerpc/kvm/emulate_loadstore.c
index af833531af31..a382e15135e6 100644
--- a/arch/powerpc/kvm/emulate_loadstore.c
+++ b/arch/powerpc/kvm/emulate_loadstore.c
@@ -58,6 +58,18 @@ static bool kvmppc_check_vsx_disabled(struct kvm_vcpu *vcpu)
58} 58}
59#endif /* CONFIG_VSX */ 59#endif /* CONFIG_VSX */
60 60
61#ifdef CONFIG_ALTIVEC
62static bool kvmppc_check_altivec_disabled(struct kvm_vcpu *vcpu)
63{
64 if (!(kvmppc_get_msr(vcpu) & MSR_VEC)) {
65 kvmppc_core_queue_vec_unavail(vcpu);
66 return true;
67 }
68
69 return false;
70}
71#endif /* CONFIG_ALTIVEC */
72
61/* 73/*
62 * XXX to do: 74 * XXX to do:
63 * lfiwax, lfiwzx 75 * lfiwax, lfiwzx
@@ -98,6 +110,7 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
98 vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_NONE; 110 vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_NONE;
99 vcpu->arch.mmio_sp64_extend = 0; 111 vcpu->arch.mmio_sp64_extend = 0;
100 vcpu->arch.mmio_sign_extend = 0; 112 vcpu->arch.mmio_sign_extend = 0;
113 vcpu->arch.mmio_vmx_copy_nums = 0;
101 114
102 switch (get_op(inst)) { 115 switch (get_op(inst)) {
103 case 31: 116 case 31:
@@ -459,6 +472,29 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
459 rs, 4, 1); 472 rs, 4, 1);
460 break; 473 break;
461#endif /* CONFIG_VSX */ 474#endif /* CONFIG_VSX */
475
476#ifdef CONFIG_ALTIVEC
477 case OP_31_XOP_LVX:
478 if (kvmppc_check_altivec_disabled(vcpu))
479 return EMULATE_DONE;
480 vcpu->arch.vaddr_accessed &= ~0xFULL;
481 vcpu->arch.paddr_accessed &= ~0xFULL;
482 vcpu->arch.mmio_vmx_copy_nums = 2;
483 emulated = kvmppc_handle_load128_by2x64(run, vcpu,
484 KVM_MMIO_REG_VMX|rt, 1);
485 break;
486
487 case OP_31_XOP_STVX:
488 if (kvmppc_check_altivec_disabled(vcpu))
489 return EMULATE_DONE;
490 vcpu->arch.vaddr_accessed &= ~0xFULL;
491 vcpu->arch.paddr_accessed &= ~0xFULL;
492 vcpu->arch.mmio_vmx_copy_nums = 2;
493 emulated = kvmppc_handle_store128_by2x64(run, vcpu,
494 rs, 1);
495 break;
496#endif /* CONFIG_ALTIVEC */
497
462 default: 498 default:
463 emulated = EMULATE_FAIL; 499 emulated = EMULATE_FAIL;
464 break; 500 break;
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 748562ec9a04..403e642c78f5 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -638,8 +638,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
638 r = 1; 638 r = 1;
639 break; 639 break;
640 case KVM_CAP_SPAPR_RESIZE_HPT: 640 case KVM_CAP_SPAPR_RESIZE_HPT:
641 /* Disable this on POWER9 until code handles new HPTE format */ 641 r = !!hv_enabled;
642 r = !!hv_enabled && !cpu_has_feature(CPU_FTR_ARCH_300);
643 break; 642 break;
644#endif 643#endif
645#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 644#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
@@ -930,6 +929,34 @@ static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu,
930} 929}
931#endif /* CONFIG_VSX */ 930#endif /* CONFIG_VSX */
932 931
932#ifdef CONFIG_ALTIVEC
933static inline void kvmppc_set_vmx_dword(struct kvm_vcpu *vcpu,
934 u64 gpr)
935{
936 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
937 u32 hi, lo;
938 u32 di;
939
940#ifdef __BIG_ENDIAN
941 hi = gpr >> 32;
942 lo = gpr & 0xffffffff;
943#else
944 lo = gpr >> 32;
945 hi = gpr & 0xffffffff;
946#endif
947
948 di = 2 - vcpu->arch.mmio_vmx_copy_nums; /* doubleword index */
949 if (di > 1)
950 return;
951
952 if (vcpu->arch.mmio_host_swabbed)
953 di = 1 - di;
954
955 VCPU_VSX_VR(vcpu, index).u[di * 2] = hi;
956 VCPU_VSX_VR(vcpu, index).u[di * 2 + 1] = lo;
957}
958#endif /* CONFIG_ALTIVEC */
959
933#ifdef CONFIG_PPC_FPU 960#ifdef CONFIG_PPC_FPU
934static inline u64 sp_to_dp(u32 fprs) 961static inline u64 sp_to_dp(u32 fprs)
935{ 962{
@@ -1033,6 +1060,11 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
1033 kvmppc_set_vsr_dword_dump(vcpu, gpr); 1060 kvmppc_set_vsr_dword_dump(vcpu, gpr);
1034 break; 1061 break;
1035#endif 1062#endif
1063#ifdef CONFIG_ALTIVEC
1064 case KVM_MMIO_REG_VMX:
1065 kvmppc_set_vmx_dword(vcpu, gpr);
1066 break;
1067#endif
1036 default: 1068 default:
1037 BUG(); 1069 BUG();
1038 } 1070 }
@@ -1308,6 +1340,111 @@ static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu,
1308} 1340}
1309#endif /* CONFIG_VSX */ 1341#endif /* CONFIG_VSX */
1310 1342
1343#ifdef CONFIG_ALTIVEC
1344/* handle quadword load access in two halves */
1345int kvmppc_handle_load128_by2x64(struct kvm_run *run, struct kvm_vcpu *vcpu,
1346 unsigned int rt, int is_default_endian)
1347{
1348 enum emulation_result emulated;
1349
1350 while (vcpu->arch.mmio_vmx_copy_nums) {
1351 emulated = __kvmppc_handle_load(run, vcpu, rt, 8,
1352 is_default_endian, 0);
1353
1354 if (emulated != EMULATE_DONE)
1355 break;
1356
1357 vcpu->arch.paddr_accessed += run->mmio.len;
1358 vcpu->arch.mmio_vmx_copy_nums--;
1359 }
1360
1361 return emulated;
1362}
1363
1364static inline int kvmppc_get_vmx_data(struct kvm_vcpu *vcpu, int rs, u64 *val)
1365{
1366 vector128 vrs = VCPU_VSX_VR(vcpu, rs);
1367 u32 di;
1368 u64 w0, w1;
1369
1370 di = 2 - vcpu->arch.mmio_vmx_copy_nums; /* doubleword index */
1371 if (di > 1)
1372 return -1;
1373
1374 if (vcpu->arch.mmio_host_swabbed)
1375 di = 1 - di;
1376
1377 w0 = vrs.u[di * 2];
1378 w1 = vrs.u[di * 2 + 1];
1379
1380#ifdef __BIG_ENDIAN
1381 *val = (w0 << 32) | w1;
1382#else
1383 *val = (w1 << 32) | w0;
1384#endif
1385 return 0;
1386}
1387
1388/* handle quadword store in two halves */
1389int kvmppc_handle_store128_by2x64(struct kvm_run *run, struct kvm_vcpu *vcpu,
1390 unsigned int rs, int is_default_endian)
1391{
1392 u64 val = 0;
1393 enum emulation_result emulated = EMULATE_DONE;
1394
1395 vcpu->arch.io_gpr = rs;
1396
1397 while (vcpu->arch.mmio_vmx_copy_nums) {
1398 if (kvmppc_get_vmx_data(vcpu, rs, &val) == -1)
1399 return EMULATE_FAIL;
1400
1401 emulated = kvmppc_handle_store(run, vcpu, val, 8,
1402 is_default_endian);
1403 if (emulated != EMULATE_DONE)
1404 break;
1405
1406 vcpu->arch.paddr_accessed += run->mmio.len;
1407 vcpu->arch.mmio_vmx_copy_nums--;
1408 }
1409
1410 return emulated;
1411}
1412
1413static int kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu *vcpu,
1414 struct kvm_run *run)
1415{
1416 enum emulation_result emulated = EMULATE_FAIL;
1417 int r;
1418
1419 vcpu->arch.paddr_accessed += run->mmio.len;
1420
1421 if (!vcpu->mmio_is_write) {
1422 emulated = kvmppc_handle_load128_by2x64(run, vcpu,
1423 vcpu->arch.io_gpr, 1);
1424 } else {
1425 emulated = kvmppc_handle_store128_by2x64(run, vcpu,
1426 vcpu->arch.io_gpr, 1);
1427 }
1428
1429 switch (emulated) {
1430 case EMULATE_DO_MMIO:
1431 run->exit_reason = KVM_EXIT_MMIO;
1432 r = RESUME_HOST;
1433 break;
1434 case EMULATE_FAIL:
1435 pr_info("KVM: MMIO emulation failed (VMX repeat)\n");
1436 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1437 run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
1438 r = RESUME_HOST;
1439 break;
1440 default:
1441 r = RESUME_GUEST;
1442 break;
1443 }
1444 return r;
1445}
1446#endif /* CONFIG_ALTIVEC */
1447
1311int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) 1448int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1312{ 1449{
1313 int r = 0; 1450 int r = 0;
@@ -1429,6 +1566,18 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
1429 } 1566 }
1430 } 1567 }
1431#endif 1568#endif
1569#ifdef CONFIG_ALTIVEC
1570 if (vcpu->arch.mmio_vmx_copy_nums > 0)
1571 vcpu->arch.mmio_vmx_copy_nums--;
1572
1573 if (vcpu->arch.mmio_vmx_copy_nums > 0) {
1574 r = kvmppc_emulate_mmio_vmx_loadstore(vcpu, run);
1575 if (r == RESUME_HOST) {
1576 vcpu->mmio_needed = 1;
1577 goto out;
1578 }
1579 }
1580#endif
1432 } else if (vcpu->arch.osi_needed) { 1581 } else if (vcpu->arch.osi_needed) {
1433 u64 *gprs = run->osi.gprs; 1582 u64 *gprs = run->osi.gprs;
1434 int i; 1583 int i;