aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-05-16 17:30:51 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-05-16 17:30:51 -0400
commit0e93b4b304ae052ba1bc73f6d34a68556fe93429 (patch)
tree93f7d72e1833cefc62742624402b3ea415e7f22f
parentb724cc199bc8e889569e85301e6e56b5be25b986 (diff)
parent51bfd2998113e1f8ce8dcf853407b76a04b5f2a0 (diff)
Merge git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull kvm powerpc fixes from Marcelo Tosatti: "Urgent KVM PPC updates, quoting Alexander Graf: There are a few bugs in 3.4 that really should be fixed before people can be all happy and fuzzy about KVM on PowerPC. These fixes are: * fix POWER7 bare metal with PR=y * fix deadlock on HV=y book3s_64 mode in low memory cases * fix invalid MMU scope of PR=y mode on book3s_64, possibly eading to memory corruption" * git://git.kernel.org/pub/scm/virt/kvm/kvm: KVM: PPC: Book3S HV: Fix bug leading to deadlock in guest HPT updates powerpc/kvm: Fix VSID usage in 64-bit "PR" KVM KVM: PPC: Book3S: PR: Fix hsrr code KVM: PPC: Fix PR KVM on POWER7 bare metal KVM: PPC: Book3S: PR: Handle EMUL_ASSIST
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h7
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_host.c13
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c1
-rw-r--r--arch/powerpc/kvm/book3s_segment.S42
4 files changed, 40 insertions, 23 deletions
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index aa795ccef29..fd07f43d662 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -81,12 +81,13 @@ struct kvmppc_vcpu_book3s {
81 u64 sdr1; 81 u64 sdr1;
82 u64 hior; 82 u64 hior;
83 u64 msr_mask; 83 u64 msr_mask;
84 u64 vsid_next;
85#ifdef CONFIG_PPC_BOOK3S_32 84#ifdef CONFIG_PPC_BOOK3S_32
86 u32 vsid_pool[VSID_POOL_SIZE]; 85 u32 vsid_pool[VSID_POOL_SIZE];
86 u32 vsid_next;
87#else 87#else
88 u64 vsid_first; 88 u64 proto_vsid_first;
89 u64 vsid_max; 89 u64 proto_vsid_max;
90 u64 proto_vsid_next;
90#endif 91#endif
91 int context_id[SID_CONTEXTS]; 92 int context_id[SID_CONTEXTS];
92 93
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c
index 6f87f39a1ac..10fc8ec9d2a 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
@@ -194,14 +194,14 @@ static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
194 backwards_map = !backwards_map; 194 backwards_map = !backwards_map;
195 195
196 /* Uh-oh ... out of mappings. Let's flush! */ 196 /* Uh-oh ... out of mappings. Let's flush! */
197 if (vcpu_book3s->vsid_next == vcpu_book3s->vsid_max) { 197 if (vcpu_book3s->proto_vsid_next == vcpu_book3s->proto_vsid_max) {
198 vcpu_book3s->vsid_next = vcpu_book3s->vsid_first; 198 vcpu_book3s->proto_vsid_next = vcpu_book3s->proto_vsid_first;
199 memset(vcpu_book3s->sid_map, 0, 199 memset(vcpu_book3s->sid_map, 0,
200 sizeof(struct kvmppc_sid_map) * SID_MAP_NUM); 200 sizeof(struct kvmppc_sid_map) * SID_MAP_NUM);
201 kvmppc_mmu_pte_flush(vcpu, 0, 0); 201 kvmppc_mmu_pte_flush(vcpu, 0, 0);
202 kvmppc_mmu_flush_segments(vcpu); 202 kvmppc_mmu_flush_segments(vcpu);
203 } 203 }
204 map->host_vsid = vcpu_book3s->vsid_next++; 204 map->host_vsid = vsid_scramble(vcpu_book3s->proto_vsid_next++, 256M);
205 205
206 map->guest_vsid = gvsid; 206 map->guest_vsid = gvsid;
207 map->valid = true; 207 map->valid = true;
@@ -319,9 +319,10 @@ int kvmppc_mmu_init(struct kvm_vcpu *vcpu)
319 return -1; 319 return -1;
320 vcpu3s->context_id[0] = err; 320 vcpu3s->context_id[0] = err;
321 321
322 vcpu3s->vsid_max = ((vcpu3s->context_id[0] + 1) << USER_ESID_BITS) - 1; 322 vcpu3s->proto_vsid_max = ((vcpu3s->context_id[0] + 1)
323 vcpu3s->vsid_first = vcpu3s->context_id[0] << USER_ESID_BITS; 323 << USER_ESID_BITS) - 1;
324 vcpu3s->vsid_next = vcpu3s->vsid_first; 324 vcpu3s->proto_vsid_first = vcpu3s->context_id[0] << USER_ESID_BITS;
325 vcpu3s->proto_vsid_next = vcpu3s->proto_vsid_first;
325 326
326 kvmppc_mmu_hpte_init(vcpu); 327 kvmppc_mmu_hpte_init(vcpu);
327 328
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index def880aea63..cec4daddbf3 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -463,6 +463,7 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
463 /* insert R and C bits from PTE */ 463 /* insert R and C bits from PTE */
464 rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C); 464 rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C);
465 args[j] |= rcbits << (56 - 5); 465 args[j] |= rcbits << (56 - 5);
466 hp[0] = 0;
466 continue; 467 continue;
467 } 468 }
468 469
diff --git a/arch/powerpc/kvm/book3s_segment.S b/arch/powerpc/kvm/book3s_segment.S
index 0676ae249b9..6e6e9cef34a 100644
--- a/arch/powerpc/kvm/book3s_segment.S
+++ b/arch/powerpc/kvm/book3s_segment.S
@@ -197,7 +197,8 @@ kvmppc_interrupt:
197 /* Save guest PC and MSR */ 197 /* Save guest PC and MSR */
198#ifdef CONFIG_PPC64 198#ifdef CONFIG_PPC64
199BEGIN_FTR_SECTION 199BEGIN_FTR_SECTION
200 andi. r0,r12,0x2 200 andi. r0, r12, 0x2
201 cmpwi cr1, r0, 0
201 beq 1f 202 beq 1f
202 mfspr r3,SPRN_HSRR0 203 mfspr r3,SPRN_HSRR0
203 mfspr r4,SPRN_HSRR1 204 mfspr r4,SPRN_HSRR1
@@ -250,6 +251,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
250 beq ld_last_prev_inst 251 beq ld_last_prev_inst
251 cmpwi r12, BOOK3S_INTERRUPT_ALIGNMENT 252 cmpwi r12, BOOK3S_INTERRUPT_ALIGNMENT
252 beq- ld_last_inst 253 beq- ld_last_inst
254#ifdef CONFIG_PPC64
255BEGIN_FTR_SECTION
256 cmpwi r12, BOOK3S_INTERRUPT_H_EMUL_ASSIST
257 beq- ld_last_inst
258END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
259#endif
253 260
254 b no_ld_last_inst 261 b no_ld_last_inst
255 262
@@ -316,23 +323,17 @@ no_dcbz32_off:
316 * Having set up SRR0/1 with the address where we want 323 * Having set up SRR0/1 with the address where we want
317 * to continue with relocation on (potentially in module 324 * to continue with relocation on (potentially in module
318 * space), we either just go straight there with rfi[d], 325 * space), we either just go straight there with rfi[d],
319 * or we jump to an interrupt handler with bctr if there 326 * or we jump to an interrupt handler if there is an
320 * is an interrupt to be handled first. In the latter 327 * interrupt to be handled first. In the latter case,
321 * case, the rfi[d] at the end of the interrupt handler 328 * the rfi[d] at the end of the interrupt handler will
322 * will get us back to where we want to continue. 329 * get us back to where we want to continue.
323 */ 330 */
324 331
325 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
326 beq 1f
327 cmpwi r12, BOOK3S_INTERRUPT_DECREMENTER
328 beq 1f
329 cmpwi r12, BOOK3S_INTERRUPT_PERFMON
3301: mtctr r12
331
332 /* Register usage at this point: 332 /* Register usage at this point:
333 * 333 *
334 * R1 = host R1 334 * R1 = host R1
335 * R2 = host R2 335 * R2 = host R2
336 * R10 = raw exit handler id
336 * R12 = exit handler id 337 * R12 = exit handler id
337 * R13 = shadow vcpu (32-bit) or PACA (64-bit) 338 * R13 = shadow vcpu (32-bit) or PACA (64-bit)
338 * SVCPU.* = guest * 339 * SVCPU.* = guest *
@@ -342,12 +343,25 @@ no_dcbz32_off:
342 PPC_LL r6, HSTATE_HOST_MSR(r13) 343 PPC_LL r6, HSTATE_HOST_MSR(r13)
343 PPC_LL r8, HSTATE_VMHANDLER(r13) 344 PPC_LL r8, HSTATE_VMHANDLER(r13)
344 345
345 /* Restore host msr -> SRR1 */ 346#ifdef CONFIG_PPC64
347BEGIN_FTR_SECTION
348 beq cr1, 1f
349 mtspr SPRN_HSRR1, r6
350 mtspr SPRN_HSRR0, r8
351END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
352#endif
3531: /* Restore host msr -> SRR1 */
346 mtsrr1 r6 354 mtsrr1 r6
347 /* Load highmem handler address */ 355 /* Load highmem handler address */
348 mtsrr0 r8 356 mtsrr0 r8
349 357
350 /* RFI into the highmem handler, or jump to interrupt handler */ 358 /* RFI into the highmem handler, or jump to interrupt handler */
351 beqctr 359 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
360 beqa BOOK3S_INTERRUPT_EXTERNAL
361 cmpwi r12, BOOK3S_INTERRUPT_DECREMENTER
362 beqa BOOK3S_INTERRUPT_DECREMENTER
363 cmpwi r12, BOOK3S_INTERRUPT_PERFMON
364 beqa BOOK3S_INTERRUPT_PERFMON
365
352 RFI 366 RFI
353kvmppc_handler_trampoline_exit_end: 367kvmppc_handler_trampoline_exit_end: