diff options
Diffstat (limited to 'arch/powerpc/kvm/book3s_pr.c')
-rw-r--r-- | arch/powerpc/kvm/book3s_pr.c | 178 |
1 files changed, 151 insertions, 27 deletions
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index 220fcdf26978..7340e1090b77 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c | |||
@@ -51,15 +51,19 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, | |||
51 | #define MSR_USER32 MSR_USER | 51 | #define MSR_USER32 MSR_USER |
52 | #define MSR_USER64 MSR_USER | 52 | #define MSR_USER64 MSR_USER |
53 | #define HW_PAGE_SIZE PAGE_SIZE | 53 | #define HW_PAGE_SIZE PAGE_SIZE |
54 | #define __hard_irq_disable local_irq_disable | ||
55 | #define __hard_irq_enable local_irq_enable | ||
54 | #endif | 56 | #endif |
55 | 57 | ||
56 | void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | 58 | void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
57 | { | 59 | { |
58 | #ifdef CONFIG_PPC_BOOK3S_64 | 60 | #ifdef CONFIG_PPC_BOOK3S_64 |
59 | memcpy(to_svcpu(vcpu)->slb, to_book3s(vcpu)->slb_shadow, sizeof(to_svcpu(vcpu)->slb)); | 61 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); |
62 | memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb)); | ||
60 | memcpy(&get_paca()->shadow_vcpu, to_book3s(vcpu)->shadow_vcpu, | 63 | memcpy(&get_paca()->shadow_vcpu, to_book3s(vcpu)->shadow_vcpu, |
61 | sizeof(get_paca()->shadow_vcpu)); | 64 | sizeof(get_paca()->shadow_vcpu)); |
62 | to_svcpu(vcpu)->slb_max = to_book3s(vcpu)->slb_shadow_max; | 65 | svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max; |
66 | svcpu_put(svcpu); | ||
63 | #endif | 67 | #endif |
64 | 68 | ||
65 | #ifdef CONFIG_PPC_BOOK3S_32 | 69 | #ifdef CONFIG_PPC_BOOK3S_32 |
@@ -70,10 +74,12 @@ void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |||
70 | void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) | 74 | void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) |
71 | { | 75 | { |
72 | #ifdef CONFIG_PPC_BOOK3S_64 | 76 | #ifdef CONFIG_PPC_BOOK3S_64 |
73 | memcpy(to_book3s(vcpu)->slb_shadow, to_svcpu(vcpu)->slb, sizeof(to_svcpu(vcpu)->slb)); | 77 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); |
78 | memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb)); | ||
74 | memcpy(to_book3s(vcpu)->shadow_vcpu, &get_paca()->shadow_vcpu, | 79 | memcpy(to_book3s(vcpu)->shadow_vcpu, &get_paca()->shadow_vcpu, |
75 | sizeof(get_paca()->shadow_vcpu)); | 80 | sizeof(get_paca()->shadow_vcpu)); |
76 | to_book3s(vcpu)->slb_shadow_max = to_svcpu(vcpu)->slb_max; | 81 | to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max; |
82 | svcpu_put(svcpu); | ||
77 | #endif | 83 | #endif |
78 | 84 | ||
79 | kvmppc_giveup_ext(vcpu, MSR_FP); | 85 | kvmppc_giveup_ext(vcpu, MSR_FP); |
@@ -151,14 +157,16 @@ void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr) | |||
151 | #ifdef CONFIG_PPC_BOOK3S_64 | 157 | #ifdef CONFIG_PPC_BOOK3S_64 |
152 | if ((pvr >= 0x330000) && (pvr < 0x70330000)) { | 158 | if ((pvr >= 0x330000) && (pvr < 0x70330000)) { |
153 | kvmppc_mmu_book3s_64_init(vcpu); | 159 | kvmppc_mmu_book3s_64_init(vcpu); |
154 | to_book3s(vcpu)->hior = 0xfff00000; | 160 | if (!to_book3s(vcpu)->hior_explicit) |
161 | to_book3s(vcpu)->hior = 0xfff00000; | ||
155 | to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL; | 162 | to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL; |
156 | vcpu->arch.cpu_type = KVM_CPU_3S_64; | 163 | vcpu->arch.cpu_type = KVM_CPU_3S_64; |
157 | } else | 164 | } else |
158 | #endif | 165 | #endif |
159 | { | 166 | { |
160 | kvmppc_mmu_book3s_32_init(vcpu); | 167 | kvmppc_mmu_book3s_32_init(vcpu); |
161 | to_book3s(vcpu)->hior = 0; | 168 | if (!to_book3s(vcpu)->hior_explicit) |
169 | to_book3s(vcpu)->hior = 0; | ||
162 | to_book3s(vcpu)->msr_mask = 0xffffffffULL; | 170 | to_book3s(vcpu)->msr_mask = 0xffffffffULL; |
163 | vcpu->arch.cpu_type = KVM_CPU_3S_32; | 171 | vcpu->arch.cpu_type = KVM_CPU_3S_32; |
164 | } | 172 | } |
@@ -308,19 +316,22 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
308 | 316 | ||
309 | if (page_found == -ENOENT) { | 317 | if (page_found == -ENOENT) { |
310 | /* Page not found in guest PTE entries */ | 318 | /* Page not found in guest PTE entries */ |
319 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); | ||
311 | vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu); | 320 | vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu); |
312 | vcpu->arch.shared->dsisr = to_svcpu(vcpu)->fault_dsisr; | 321 | vcpu->arch.shared->dsisr = svcpu->fault_dsisr; |
313 | vcpu->arch.shared->msr |= | 322 | vcpu->arch.shared->msr |= |
314 | (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL); | 323 | (svcpu->shadow_srr1 & 0x00000000f8000000ULL); |
324 | svcpu_put(svcpu); | ||
315 | kvmppc_book3s_queue_irqprio(vcpu, vec); | 325 | kvmppc_book3s_queue_irqprio(vcpu, vec); |
316 | } else if (page_found == -EPERM) { | 326 | } else if (page_found == -EPERM) { |
317 | /* Storage protection */ | 327 | /* Storage protection */ |
328 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); | ||
318 | vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu); | 329 | vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu); |
319 | vcpu->arch.shared->dsisr = | 330 | vcpu->arch.shared->dsisr = svcpu->fault_dsisr & ~DSISR_NOHPTE; |
320 | to_svcpu(vcpu)->fault_dsisr & ~DSISR_NOHPTE; | ||
321 | vcpu->arch.shared->dsisr |= DSISR_PROTFAULT; | 331 | vcpu->arch.shared->dsisr |= DSISR_PROTFAULT; |
322 | vcpu->arch.shared->msr |= | 332 | vcpu->arch.shared->msr |= |
323 | (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL); | 333 | svcpu->shadow_srr1 & 0x00000000f8000000ULL; |
334 | svcpu_put(svcpu); | ||
324 | kvmppc_book3s_queue_irqprio(vcpu, vec); | 335 | kvmppc_book3s_queue_irqprio(vcpu, vec); |
325 | } else if (page_found == -EINVAL) { | 336 | } else if (page_found == -EINVAL) { |
326 | /* Page not found in guest SLB */ | 337 | /* Page not found in guest SLB */ |
@@ -517,24 +528,29 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
517 | run->ready_for_interrupt_injection = 1; | 528 | run->ready_for_interrupt_injection = 1; |
518 | 529 | ||
519 | trace_kvm_book3s_exit(exit_nr, vcpu); | 530 | trace_kvm_book3s_exit(exit_nr, vcpu); |
531 | preempt_enable(); | ||
520 | kvm_resched(vcpu); | 532 | kvm_resched(vcpu); |
521 | switch (exit_nr) { | 533 | switch (exit_nr) { |
522 | case BOOK3S_INTERRUPT_INST_STORAGE: | 534 | case BOOK3S_INTERRUPT_INST_STORAGE: |
535 | { | ||
536 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); | ||
537 | ulong shadow_srr1 = svcpu->shadow_srr1; | ||
523 | vcpu->stat.pf_instruc++; | 538 | vcpu->stat.pf_instruc++; |
524 | 539 | ||
525 | #ifdef CONFIG_PPC_BOOK3S_32 | 540 | #ifdef CONFIG_PPC_BOOK3S_32 |
526 | /* We set segments as unused segments when invalidating them. So | 541 | /* We set segments as unused segments when invalidating them. So |
527 | * treat the respective fault as segment fault. */ | 542 | * treat the respective fault as segment fault. */ |
528 | if (to_svcpu(vcpu)->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT] | 543 | if (svcpu->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT] == SR_INVALID) { |
529 | == SR_INVALID) { | ||
530 | kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); | 544 | kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); |
531 | r = RESUME_GUEST; | 545 | r = RESUME_GUEST; |
546 | svcpu_put(svcpu); | ||
532 | break; | 547 | break; |
533 | } | 548 | } |
534 | #endif | 549 | #endif |
550 | svcpu_put(svcpu); | ||
535 | 551 | ||
536 | /* only care about PTEG not found errors, but leave NX alone */ | 552 | /* only care about PTEG not found errors, but leave NX alone */ |
537 | if (to_svcpu(vcpu)->shadow_srr1 & 0x40000000) { | 553 | if (shadow_srr1 & 0x40000000) { |
538 | r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr); | 554 | r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr); |
539 | vcpu->stat.sp_instruc++; | 555 | vcpu->stat.sp_instruc++; |
540 | } else if (vcpu->arch.mmu.is_dcbz32(vcpu) && | 556 | } else if (vcpu->arch.mmu.is_dcbz32(vcpu) && |
@@ -547,33 +563,37 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
547 | kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL); | 563 | kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL); |
548 | r = RESUME_GUEST; | 564 | r = RESUME_GUEST; |
549 | } else { | 565 | } else { |
550 | vcpu->arch.shared->msr |= | 566 | vcpu->arch.shared->msr |= shadow_srr1 & 0x58000000; |
551 | to_svcpu(vcpu)->shadow_srr1 & 0x58000000; | ||
552 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | 567 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); |
553 | r = RESUME_GUEST; | 568 | r = RESUME_GUEST; |
554 | } | 569 | } |
555 | break; | 570 | break; |
571 | } | ||
556 | case BOOK3S_INTERRUPT_DATA_STORAGE: | 572 | case BOOK3S_INTERRUPT_DATA_STORAGE: |
557 | { | 573 | { |
558 | ulong dar = kvmppc_get_fault_dar(vcpu); | 574 | ulong dar = kvmppc_get_fault_dar(vcpu); |
575 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); | ||
576 | u32 fault_dsisr = svcpu->fault_dsisr; | ||
559 | vcpu->stat.pf_storage++; | 577 | vcpu->stat.pf_storage++; |
560 | 578 | ||
561 | #ifdef CONFIG_PPC_BOOK3S_32 | 579 | #ifdef CONFIG_PPC_BOOK3S_32 |
562 | /* We set segments as unused segments when invalidating them. So | 580 | /* We set segments as unused segments when invalidating them. So |
563 | * treat the respective fault as segment fault. */ | 581 | * treat the respective fault as segment fault. */ |
564 | if ((to_svcpu(vcpu)->sr[dar >> SID_SHIFT]) == SR_INVALID) { | 582 | if ((svcpu->sr[dar >> SID_SHIFT]) == SR_INVALID) { |
565 | kvmppc_mmu_map_segment(vcpu, dar); | 583 | kvmppc_mmu_map_segment(vcpu, dar); |
566 | r = RESUME_GUEST; | 584 | r = RESUME_GUEST; |
585 | svcpu_put(svcpu); | ||
567 | break; | 586 | break; |
568 | } | 587 | } |
569 | #endif | 588 | #endif |
589 | svcpu_put(svcpu); | ||
570 | 590 | ||
571 | /* The only case we need to handle is missing shadow PTEs */ | 591 | /* The only case we need to handle is missing shadow PTEs */ |
572 | if (to_svcpu(vcpu)->fault_dsisr & DSISR_NOHPTE) { | 592 | if (fault_dsisr & DSISR_NOHPTE) { |
573 | r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr); | 593 | r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr); |
574 | } else { | 594 | } else { |
575 | vcpu->arch.shared->dar = dar; | 595 | vcpu->arch.shared->dar = dar; |
576 | vcpu->arch.shared->dsisr = to_svcpu(vcpu)->fault_dsisr; | 596 | vcpu->arch.shared->dsisr = fault_dsisr; |
577 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | 597 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); |
578 | r = RESUME_GUEST; | 598 | r = RESUME_GUEST; |
579 | } | 599 | } |
@@ -609,10 +629,13 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
609 | case BOOK3S_INTERRUPT_PROGRAM: | 629 | case BOOK3S_INTERRUPT_PROGRAM: |
610 | { | 630 | { |
611 | enum emulation_result er; | 631 | enum emulation_result er; |
632 | struct kvmppc_book3s_shadow_vcpu *svcpu; | ||
612 | ulong flags; | 633 | ulong flags; |
613 | 634 | ||
614 | program_interrupt: | 635 | program_interrupt: |
615 | flags = to_svcpu(vcpu)->shadow_srr1 & 0x1f0000ull; | 636 | svcpu = svcpu_get(vcpu); |
637 | flags = svcpu->shadow_srr1 & 0x1f0000ull; | ||
638 | svcpu_put(svcpu); | ||
616 | 639 | ||
617 | if (vcpu->arch.shared->msr & MSR_PR) { | 640 | if (vcpu->arch.shared->msr & MSR_PR) { |
618 | #ifdef EXIT_DEBUG | 641 | #ifdef EXIT_DEBUG |
@@ -740,20 +763,33 @@ program_interrupt: | |||
740 | r = RESUME_GUEST; | 763 | r = RESUME_GUEST; |
741 | break; | 764 | break; |
742 | default: | 765 | default: |
766 | { | ||
767 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); | ||
768 | ulong shadow_srr1 = svcpu->shadow_srr1; | ||
769 | svcpu_put(svcpu); | ||
743 | /* Ugh - bork here! What did we get? */ | 770 | /* Ugh - bork here! What did we get? */ |
744 | printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n", | 771 | printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n", |
745 | exit_nr, kvmppc_get_pc(vcpu), to_svcpu(vcpu)->shadow_srr1); | 772 | exit_nr, kvmppc_get_pc(vcpu), shadow_srr1); |
746 | r = RESUME_HOST; | 773 | r = RESUME_HOST; |
747 | BUG(); | 774 | BUG(); |
748 | break; | 775 | break; |
749 | } | 776 | } |
750 | 777 | } | |
751 | 778 | ||
752 | if (!(r & RESUME_HOST)) { | 779 | if (!(r & RESUME_HOST)) { |
753 | /* To avoid clobbering exit_reason, only check for signals if | 780 | /* To avoid clobbering exit_reason, only check for signals if |
754 | * we aren't already exiting to userspace for some other | 781 | * we aren't already exiting to userspace for some other |
755 | * reason. */ | 782 | * reason. */ |
783 | |||
784 | /* | ||
785 | * Interrupts could be timers for the guest which we have to | ||
786 | * inject again, so let's postpone them until we're in the guest | ||
787 | * and if we really did time things so badly, then we just exit | ||
788 | * again due to a host external interrupt. | ||
789 | */ | ||
790 | __hard_irq_disable(); | ||
756 | if (signal_pending(current)) { | 791 | if (signal_pending(current)) { |
792 | __hard_irq_enable(); | ||
757 | #ifdef EXIT_DEBUG | 793 | #ifdef EXIT_DEBUG |
758 | printk(KERN_EMERG "KVM: Going back to host\n"); | 794 | printk(KERN_EMERG "KVM: Going back to host\n"); |
759 | #endif | 795 | #endif |
@@ -761,10 +797,12 @@ program_interrupt: | |||
761 | run->exit_reason = KVM_EXIT_INTR; | 797 | run->exit_reason = KVM_EXIT_INTR; |
762 | r = -EINTR; | 798 | r = -EINTR; |
763 | } else { | 799 | } else { |
800 | preempt_disable(); | ||
801 | |||
764 | /* In case an interrupt came in that was triggered | 802 | /* In case an interrupt came in that was triggered |
765 | * from userspace (like DEC), we need to check what | 803 | * from userspace (like DEC), we need to check what |
766 | * to inject now! */ | 804 | * to inject now! */ |
767 | kvmppc_core_deliver_interrupts(vcpu); | 805 | kvmppc_core_prepare_to_enter(vcpu); |
768 | } | 806 | } |
769 | } | 807 | } |
770 | 808 | ||
@@ -836,6 +874,38 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, | |||
836 | return 0; | 874 | return 0; |
837 | } | 875 | } |
838 | 876 | ||
877 | int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) | ||
878 | { | ||
879 | int r = -EINVAL; | ||
880 | |||
881 | switch (reg->id) { | ||
882 | case KVM_REG_PPC_HIOR: | ||
883 | r = put_user(to_book3s(vcpu)->hior, (u64 __user *)reg->addr); | ||
884 | break; | ||
885 | default: | ||
886 | break; | ||
887 | } | ||
888 | |||
889 | return r; | ||
890 | } | ||
891 | |||
892 | int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) | ||
893 | { | ||
894 | int r = -EINVAL; | ||
895 | |||
896 | switch (reg->id) { | ||
897 | case KVM_REG_PPC_HIOR: | ||
898 | r = get_user(to_book3s(vcpu)->hior, (u64 __user *)reg->addr); | ||
899 | if (!r) | ||
900 | to_book3s(vcpu)->hior_explicit = true; | ||
901 | break; | ||
902 | default: | ||
903 | break; | ||
904 | } | ||
905 | |||
906 | return r; | ||
907 | } | ||
908 | |||
839 | int kvmppc_core_check_processor_compat(void) | 909 | int kvmppc_core_check_processor_compat(void) |
840 | { | 910 | { |
841 | return 0; | 911 | return 0; |
@@ -923,16 +993,31 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
923 | #endif | 993 | #endif |
924 | ulong ext_msr; | 994 | ulong ext_msr; |
925 | 995 | ||
996 | preempt_disable(); | ||
997 | |||
926 | /* Check if we can run the vcpu at all */ | 998 | /* Check if we can run the vcpu at all */ |
927 | if (!vcpu->arch.sane) { | 999 | if (!vcpu->arch.sane) { |
928 | kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | 1000 | kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
929 | return -EINVAL; | 1001 | ret = -EINVAL; |
1002 | goto out; | ||
930 | } | 1003 | } |
931 | 1004 | ||
1005 | kvmppc_core_prepare_to_enter(vcpu); | ||
1006 | |||
1007 | /* | ||
1008 | * Interrupts could be timers for the guest which we have to inject | ||
1009 | * again, so let's postpone them until we're in the guest and if we | ||
1010 | * really did time things so badly, then we just exit again due to | ||
1011 | * a host external interrupt. | ||
1012 | */ | ||
1013 | __hard_irq_disable(); | ||
1014 | |||
932 | /* No need to go into the guest when all we do is going out */ | 1015 | /* No need to go into the guest when all we do is going out */ |
933 | if (signal_pending(current)) { | 1016 | if (signal_pending(current)) { |
1017 | __hard_irq_enable(); | ||
934 | kvm_run->exit_reason = KVM_EXIT_INTR; | 1018 | kvm_run->exit_reason = KVM_EXIT_INTR; |
935 | return -EINTR; | 1019 | ret = -EINTR; |
1020 | goto out; | ||
936 | } | 1021 | } |
937 | 1022 | ||
938 | /* Save FPU state in stack */ | 1023 | /* Save FPU state in stack */ |
@@ -974,8 +1059,6 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
974 | 1059 | ||
975 | kvm_guest_exit(); | 1060 | kvm_guest_exit(); |
976 | 1061 | ||
977 | local_irq_disable(); | ||
978 | |||
979 | current->thread.regs->msr = ext_msr; | 1062 | current->thread.regs->msr = ext_msr; |
980 | 1063 | ||
981 | /* Make sure we save the guest FPU/Altivec/VSX state */ | 1064 | /* Make sure we save the guest FPU/Altivec/VSX state */ |
@@ -1002,9 +1085,50 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
1002 | current->thread.used_vsr = used_vsr; | 1085 | current->thread.used_vsr = used_vsr; |
1003 | #endif | 1086 | #endif |
1004 | 1087 | ||
1088 | out: | ||
1089 | preempt_enable(); | ||
1005 | return ret; | 1090 | return ret; |
1006 | } | 1091 | } |
1007 | 1092 | ||
1093 | /* | ||
1094 | * Get (and clear) the dirty memory log for a memory slot. | ||
1095 | */ | ||
1096 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, | ||
1097 | struct kvm_dirty_log *log) | ||
1098 | { | ||
1099 | struct kvm_memory_slot *memslot; | ||
1100 | struct kvm_vcpu *vcpu; | ||
1101 | ulong ga, ga_end; | ||
1102 | int is_dirty = 0; | ||
1103 | int r; | ||
1104 | unsigned long n; | ||
1105 | |||
1106 | mutex_lock(&kvm->slots_lock); | ||
1107 | |||
1108 | r = kvm_get_dirty_log(kvm, log, &is_dirty); | ||
1109 | if (r) | ||
1110 | goto out; | ||
1111 | |||
1112 | /* If nothing is dirty, don't bother messing with page tables. */ | ||
1113 | if (is_dirty) { | ||
1114 | memslot = id_to_memslot(kvm->memslots, log->slot); | ||
1115 | |||
1116 | ga = memslot->base_gfn << PAGE_SHIFT; | ||
1117 | ga_end = ga + (memslot->npages << PAGE_SHIFT); | ||
1118 | |||
1119 | kvm_for_each_vcpu(n, vcpu, kvm) | ||
1120 | kvmppc_mmu_pte_pflush(vcpu, ga, ga_end); | ||
1121 | |||
1122 | n = kvm_dirty_bitmap_bytes(memslot); | ||
1123 | memset(memslot->dirty_bitmap, 0, n); | ||
1124 | } | ||
1125 | |||
1126 | r = 0; | ||
1127 | out: | ||
1128 | mutex_unlock(&kvm->slots_lock); | ||
1129 | return r; | ||
1130 | } | ||
1131 | |||
1008 | int kvmppc_core_prepare_memory_region(struct kvm *kvm, | 1132 | int kvmppc_core_prepare_memory_region(struct kvm *kvm, |
1009 | struct kvm_userspace_memory_region *mem) | 1133 | struct kvm_userspace_memory_region *mem) |
1010 | { | 1134 | { |