diff options
author | Alexander Graf <agraf@suse.de> | 2011-12-09 08:44:13 -0500 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2012-03-05 07:52:30 -0500 |
commit | 468a12c2b53776721ff83517d4a195b85c5fce54 (patch) | |
tree | ba417210997c2e3119525641764303d97db32815 /arch/powerpc/kvm/book3s_pr.c | |
parent | d33ad328c0025c45f4688a769aeebddc342222c1 (diff) |
KVM: PPC: Use get/set for to_svcpu to help preemption
When running the 64-bit Book3s PR code without CONFIG_PREEMPT_NONE, we were
doing a few things wrong, most notably access to PACA fields without making
sure that the pointers stay stable accross the access (preempt_disable()).
This patch moves to_svcpu towards a get/put model which allows us to disable
preemption while accessing the shadow vcpu fields in the PACA. That way we
can run preemptible and everyone's happy!
Reported-by: Jörg Sommer <joerg@alea.gnuu.de>
Signed-off-by: Alexander Graf <agraf@suse.de>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/powerpc/kvm/book3s_pr.c')
-rw-r--r-- | arch/powerpc/kvm/book3s_pr.c | 62 |
1 files changed, 42 insertions, 20 deletions
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index 857ecde0cfdf..0c31507be908 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c | |||
@@ -56,10 +56,12 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, | |||
56 | void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | 56 | void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
57 | { | 57 | { |
58 | #ifdef CONFIG_PPC_BOOK3S_64 | 58 | #ifdef CONFIG_PPC_BOOK3S_64 |
59 | memcpy(to_svcpu(vcpu)->slb, to_book3s(vcpu)->slb_shadow, sizeof(to_svcpu(vcpu)->slb)); | 59 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); |
60 | memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb)); | ||
60 | memcpy(&get_paca()->shadow_vcpu, to_book3s(vcpu)->shadow_vcpu, | 61 | memcpy(&get_paca()->shadow_vcpu, to_book3s(vcpu)->shadow_vcpu, |
61 | sizeof(get_paca()->shadow_vcpu)); | 62 | sizeof(get_paca()->shadow_vcpu)); |
62 | to_svcpu(vcpu)->slb_max = to_book3s(vcpu)->slb_shadow_max; | 63 | svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max; |
64 | svcpu_put(svcpu); | ||
63 | #endif | 65 | #endif |
64 | 66 | ||
65 | #ifdef CONFIG_PPC_BOOK3S_32 | 67 | #ifdef CONFIG_PPC_BOOK3S_32 |
@@ -70,10 +72,12 @@ void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |||
70 | void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) | 72 | void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) |
71 | { | 73 | { |
72 | #ifdef CONFIG_PPC_BOOK3S_64 | 74 | #ifdef CONFIG_PPC_BOOK3S_64 |
73 | memcpy(to_book3s(vcpu)->slb_shadow, to_svcpu(vcpu)->slb, sizeof(to_svcpu(vcpu)->slb)); | 75 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); |
76 | memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb)); | ||
74 | memcpy(to_book3s(vcpu)->shadow_vcpu, &get_paca()->shadow_vcpu, | 77 | memcpy(to_book3s(vcpu)->shadow_vcpu, &get_paca()->shadow_vcpu, |
75 | sizeof(get_paca()->shadow_vcpu)); | 78 | sizeof(get_paca()->shadow_vcpu)); |
76 | to_book3s(vcpu)->slb_shadow_max = to_svcpu(vcpu)->slb_max; | 79 | to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max; |
80 | svcpu_put(svcpu); | ||
77 | #endif | 81 | #endif |
78 | 82 | ||
79 | kvmppc_giveup_ext(vcpu, MSR_FP); | 83 | kvmppc_giveup_ext(vcpu, MSR_FP); |
@@ -308,19 +312,22 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
308 | 312 | ||
309 | if (page_found == -ENOENT) { | 313 | if (page_found == -ENOENT) { |
310 | /* Page not found in guest PTE entries */ | 314 | /* Page not found in guest PTE entries */ |
315 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); | ||
311 | vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu); | 316 | vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu); |
312 | vcpu->arch.shared->dsisr = to_svcpu(vcpu)->fault_dsisr; | 317 | vcpu->arch.shared->dsisr = svcpu->fault_dsisr; |
313 | vcpu->arch.shared->msr |= | 318 | vcpu->arch.shared->msr |= |
314 | (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL); | 319 | (svcpu->shadow_srr1 & 0x00000000f8000000ULL); |
320 | svcpu_put(svcpu); | ||
315 | kvmppc_book3s_queue_irqprio(vcpu, vec); | 321 | kvmppc_book3s_queue_irqprio(vcpu, vec); |
316 | } else if (page_found == -EPERM) { | 322 | } else if (page_found == -EPERM) { |
317 | /* Storage protection */ | 323 | /* Storage protection */ |
324 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); | ||
318 | vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu); | 325 | vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu); |
319 | vcpu->arch.shared->dsisr = | 326 | vcpu->arch.shared->dsisr = svcpu->fault_dsisr & ~DSISR_NOHPTE; |
320 | to_svcpu(vcpu)->fault_dsisr & ~DSISR_NOHPTE; | ||
321 | vcpu->arch.shared->dsisr |= DSISR_PROTFAULT; | 327 | vcpu->arch.shared->dsisr |= DSISR_PROTFAULT; |
322 | vcpu->arch.shared->msr |= | 328 | vcpu->arch.shared->msr |= |
323 | (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL); | 329 | svcpu->shadow_srr1 & 0x00000000f8000000ULL; |
330 | svcpu_put(svcpu); | ||
324 | kvmppc_book3s_queue_irqprio(vcpu, vec); | 331 | kvmppc_book3s_queue_irqprio(vcpu, vec); |
325 | } else if (page_found == -EINVAL) { | 332 | } else if (page_found == -EINVAL) { |
326 | /* Page not found in guest SLB */ | 333 | /* Page not found in guest SLB */ |
@@ -521,21 +528,25 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
521 | kvm_resched(vcpu); | 528 | kvm_resched(vcpu); |
522 | switch (exit_nr) { | 529 | switch (exit_nr) { |
523 | case BOOK3S_INTERRUPT_INST_STORAGE: | 530 | case BOOK3S_INTERRUPT_INST_STORAGE: |
531 | { | ||
532 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); | ||
533 | ulong shadow_srr1 = svcpu->shadow_srr1; | ||
524 | vcpu->stat.pf_instruc++; | 534 | vcpu->stat.pf_instruc++; |
525 | 535 | ||
526 | #ifdef CONFIG_PPC_BOOK3S_32 | 536 | #ifdef CONFIG_PPC_BOOK3S_32 |
527 | /* We set segments as unused segments when invalidating them. So | 537 | /* We set segments as unused segments when invalidating them. So |
528 | * treat the respective fault as segment fault. */ | 538 | * treat the respective fault as segment fault. */ |
529 | if (to_svcpu(vcpu)->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT] | 539 | if (svcpu->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT] == SR_INVALID) { |
530 | == SR_INVALID) { | ||
531 | kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); | 540 | kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); |
532 | r = RESUME_GUEST; | 541 | r = RESUME_GUEST; |
542 | svcpu_put(svcpu); | ||
533 | break; | 543 | break; |
534 | } | 544 | } |
535 | #endif | 545 | #endif |
546 | svcpu_put(svcpu); | ||
536 | 547 | ||
537 | /* only care about PTEG not found errors, but leave NX alone */ | 548 | /* only care about PTEG not found errors, but leave NX alone */ |
538 | if (to_svcpu(vcpu)->shadow_srr1 & 0x40000000) { | 549 | if (shadow_srr1 & 0x40000000) { |
539 | r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr); | 550 | r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr); |
540 | vcpu->stat.sp_instruc++; | 551 | vcpu->stat.sp_instruc++; |
541 | } else if (vcpu->arch.mmu.is_dcbz32(vcpu) && | 552 | } else if (vcpu->arch.mmu.is_dcbz32(vcpu) && |
@@ -548,33 +559,37 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
548 | kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL); | 559 | kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL); |
549 | r = RESUME_GUEST; | 560 | r = RESUME_GUEST; |
550 | } else { | 561 | } else { |
551 | vcpu->arch.shared->msr |= | 562 | vcpu->arch.shared->msr |= shadow_srr1 & 0x58000000; |
552 | to_svcpu(vcpu)->shadow_srr1 & 0x58000000; | ||
553 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | 563 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); |
554 | r = RESUME_GUEST; | 564 | r = RESUME_GUEST; |
555 | } | 565 | } |
556 | break; | 566 | break; |
567 | } | ||
557 | case BOOK3S_INTERRUPT_DATA_STORAGE: | 568 | case BOOK3S_INTERRUPT_DATA_STORAGE: |
558 | { | 569 | { |
559 | ulong dar = kvmppc_get_fault_dar(vcpu); | 570 | ulong dar = kvmppc_get_fault_dar(vcpu); |
571 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); | ||
572 | u32 fault_dsisr = svcpu->fault_dsisr; | ||
560 | vcpu->stat.pf_storage++; | 573 | vcpu->stat.pf_storage++; |
561 | 574 | ||
562 | #ifdef CONFIG_PPC_BOOK3S_32 | 575 | #ifdef CONFIG_PPC_BOOK3S_32 |
563 | /* We set segments as unused segments when invalidating them. So | 576 | /* We set segments as unused segments when invalidating them. So |
564 | * treat the respective fault as segment fault. */ | 577 | * treat the respective fault as segment fault. */ |
565 | if ((to_svcpu(vcpu)->sr[dar >> SID_SHIFT]) == SR_INVALID) { | 578 | if ((svcpu->sr[dar >> SID_SHIFT]) == SR_INVALID) { |
566 | kvmppc_mmu_map_segment(vcpu, dar); | 579 | kvmppc_mmu_map_segment(vcpu, dar); |
567 | r = RESUME_GUEST; | 580 | r = RESUME_GUEST; |
581 | svcpu_put(svcpu); | ||
568 | break; | 582 | break; |
569 | } | 583 | } |
570 | #endif | 584 | #endif |
585 | svcpu_put(svcpu); | ||
571 | 586 | ||
572 | /* The only case we need to handle is missing shadow PTEs */ | 587 | /* The only case we need to handle is missing shadow PTEs */ |
573 | if (to_svcpu(vcpu)->fault_dsisr & DSISR_NOHPTE) { | 588 | if (fault_dsisr & DSISR_NOHPTE) { |
574 | r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr); | 589 | r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr); |
575 | } else { | 590 | } else { |
576 | vcpu->arch.shared->dar = dar; | 591 | vcpu->arch.shared->dar = dar; |
577 | vcpu->arch.shared->dsisr = to_svcpu(vcpu)->fault_dsisr; | 592 | vcpu->arch.shared->dsisr = fault_dsisr; |
578 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | 593 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); |
579 | r = RESUME_GUEST; | 594 | r = RESUME_GUEST; |
580 | } | 595 | } |
@@ -610,10 +625,13 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
610 | case BOOK3S_INTERRUPT_PROGRAM: | 625 | case BOOK3S_INTERRUPT_PROGRAM: |
611 | { | 626 | { |
612 | enum emulation_result er; | 627 | enum emulation_result er; |
628 | struct kvmppc_book3s_shadow_vcpu *svcpu; | ||
613 | ulong flags; | 629 | ulong flags; |
614 | 630 | ||
615 | program_interrupt: | 631 | program_interrupt: |
616 | flags = to_svcpu(vcpu)->shadow_srr1 & 0x1f0000ull; | 632 | svcpu = svcpu_get(vcpu); |
633 | flags = svcpu->shadow_srr1 & 0x1f0000ull; | ||
634 | svcpu_put(svcpu); | ||
617 | 635 | ||
618 | if (vcpu->arch.shared->msr & MSR_PR) { | 636 | if (vcpu->arch.shared->msr & MSR_PR) { |
619 | #ifdef EXIT_DEBUG | 637 | #ifdef EXIT_DEBUG |
@@ -741,14 +759,18 @@ program_interrupt: | |||
741 | r = RESUME_GUEST; | 759 | r = RESUME_GUEST; |
742 | break; | 760 | break; |
743 | default: | 761 | default: |
762 | { | ||
763 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); | ||
764 | ulong shadow_srr1 = svcpu->shadow_srr1; | ||
765 | svcpu_put(svcpu); | ||
744 | /* Ugh - bork here! What did we get? */ | 766 | /* Ugh - bork here! What did we get? */ |
745 | printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n", | 767 | printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n", |
746 | exit_nr, kvmppc_get_pc(vcpu), to_svcpu(vcpu)->shadow_srr1); | 768 | exit_nr, kvmppc_get_pc(vcpu), shadow_srr1); |
747 | r = RESUME_HOST; | 769 | r = RESUME_HOST; |
748 | BUG(); | 770 | BUG(); |
749 | break; | 771 | break; |
750 | } | 772 | } |
751 | 773 | } | |
752 | 774 | ||
753 | if (!(r & RESUME_HOST)) { | 775 | if (!(r & RESUME_HOST)) { |
754 | /* To avoid clobbering exit_reason, only check for signals if | 776 | /* To avoid clobbering exit_reason, only check for signals if |