aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlexander Graf <agraf@suse.de>2012-08-12 19:04:19 -0400
committerAlexander Graf <agraf@suse.de>2012-10-05 17:38:45 -0400
commitbd2be6836ee493d41fe42367a2b129aa771185c1 (patch)
tree6c3f609ad1d1b74c0e08eb616eec0715c6f7679b
parent24afa37b9c8f035d2fe2028e4824bc4e49bafe73 (diff)
KVM: PPC: Book3S: PR: Rework irq disabling
Today, we disable preemption while inside guest context, because we need to expose to the world that we are not in a preemptible context. However, during that time we already have interrupts disabled, which would indicate that we are in a non-preemptible context. The reason the checks for irqs_disabled() fail for us though is that we manually control hard IRQs and ignore all the lazy EE framework. Let's stop doing that. Instead, let's always use lazy EE to indicate when we want to disable IRQs, but do a special final switch that gets us into EE disabled, but soft enabled state. That way when we get back out of guest state, we are immediately ready to process interrupts. This simplifies the code drastically and reduces the time that we appear as preempt disabled. Signed-off-by: Alexander Graf <agraf@suse.de>
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h10
-rw-r--r--arch/powerpc/kvm/book3s_pr.c21
-rw-r--r--arch/powerpc/kvm/book3s_rmhandlers.S15
-rw-r--r--arch/powerpc/kvm/booke.c2
-rw-r--r--arch/powerpc/kvm/powerpc.c14
5 files changed, 41 insertions, 21 deletions
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index 59b7c87e47f7..545936428bf6 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -234,5 +234,15 @@ static inline void kvmppc_mmu_flush_icache(pfn_t pfn)
234 } 234 }
235} 235}
236 236
237/* Please call after prepare_to_enter. This function puts the lazy ee state
238 back to normal mode, without actually enabling interrupts. */
239static inline void kvmppc_lazy_ee_enable(void)
240{
241#ifdef CONFIG_PPC64
242 /* Only need to enable IRQs by hard enabling them after this */
243 local_paca->irq_happened = 0;
244 local_paca->soft_enabled = 1;
245#endif
246}
237 247
238#endif /* __POWERPC_KVM_PPC_H__ */ 248#endif /* __POWERPC_KVM_PPC_H__ */
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index 3dec346c4b93..e737db8a5ca7 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -52,8 +52,6 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
52#define MSR_USER32 MSR_USER 52#define MSR_USER32 MSR_USER
53#define MSR_USER64 MSR_USER 53#define MSR_USER64 MSR_USER
54#define HW_PAGE_SIZE PAGE_SIZE 54#define HW_PAGE_SIZE PAGE_SIZE
55#define __hard_irq_disable local_irq_disable
56#define __hard_irq_enable local_irq_enable
57#endif 55#endif
58 56
59void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 57void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
@@ -597,12 +595,10 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
597 run->exit_reason = KVM_EXIT_UNKNOWN; 595 run->exit_reason = KVM_EXIT_UNKNOWN;
598 run->ready_for_interrupt_injection = 1; 596 run->ready_for_interrupt_injection = 1;
599 597
600 /* We get here with MSR.EE=0, so enable it to be a nice citizen */ 598 /* We get here with MSR.EE=1 */
601 __hard_irq_enable();
602 599
603 trace_kvm_exit(exit_nr, vcpu); 600 trace_kvm_exit(exit_nr, vcpu);
604 kvm_guest_exit(); 601 kvm_guest_exit();
605 preempt_enable();
606 602
607 switch (exit_nr) { 603 switch (exit_nr) {
608 case BOOK3S_INTERRUPT_INST_STORAGE: 604 case BOOK3S_INTERRUPT_INST_STORAGE:
@@ -854,7 +850,6 @@ program_interrupt:
854 } 850 }
855 } 851 }
856 852
857 preempt_disable();
858 if (!(r & RESUME_HOST)) { 853 if (!(r & RESUME_HOST)) {
859 /* To avoid clobbering exit_reason, only check for signals if 854 /* To avoid clobbering exit_reason, only check for signals if
860 * we aren't already exiting to userspace for some other 855 * we aren't already exiting to userspace for some other
@@ -866,14 +861,15 @@ program_interrupt:
866 * and if we really did time things so badly, then we just exit 861 * and if we really did time things so badly, then we just exit
867 * again due to a host external interrupt. 862 * again due to a host external interrupt.
868 */ 863 */
869 __hard_irq_disable(); 864 local_irq_disable();
870 if (kvmppc_prepare_to_enter(vcpu)) { 865 if (kvmppc_prepare_to_enter(vcpu)) {
871 /* local_irq_enable(); */ 866 local_irq_enable();
872 run->exit_reason = KVM_EXIT_INTR; 867 run->exit_reason = KVM_EXIT_INTR;
873 r = -EINTR; 868 r = -EINTR;
874 } else { 869 } else {
875 /* Going back to guest */ 870 /* Going back to guest */
876 kvm_guest_enter(); 871 kvm_guest_enter();
872 kvmppc_lazy_ee_enable();
877 } 873 }
878 } 874 }
879 875
@@ -1066,8 +1062,6 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
1066#endif 1062#endif
1067 ulong ext_msr; 1063 ulong ext_msr;
1068 1064
1069 preempt_disable();
1070
1071 /* Check if we can run the vcpu at all */ 1065 /* Check if we can run the vcpu at all */
1072 if (!vcpu->arch.sane) { 1066 if (!vcpu->arch.sane) {
1073 kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 1067 kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
@@ -1081,9 +1075,9 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
1081 * really did time things so badly, then we just exit again due to 1075 * really did time things so badly, then we just exit again due to
1082 * a host external interrupt. 1076 * a host external interrupt.
1083 */ 1077 */
1084 __hard_irq_disable(); 1078 local_irq_disable();
1085 if (kvmppc_prepare_to_enter(vcpu)) { 1079 if (kvmppc_prepare_to_enter(vcpu)) {
1086 __hard_irq_enable(); 1080 local_irq_enable();
1087 kvm_run->exit_reason = KVM_EXIT_INTR; 1081 kvm_run->exit_reason = KVM_EXIT_INTR;
1088 ret = -EINTR; 1082 ret = -EINTR;
1089 goto out; 1083 goto out;
@@ -1122,7 +1116,7 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
1122 if (vcpu->arch.shared->msr & MSR_FP) 1116 if (vcpu->arch.shared->msr & MSR_FP)
1123 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); 1117 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
1124 1118
1125 kvm_guest_enter(); 1119 kvmppc_lazy_ee_enable();
1126 1120
1127 ret = __kvmppc_vcpu_run(kvm_run, vcpu); 1121 ret = __kvmppc_vcpu_run(kvm_run, vcpu);
1128 1122
@@ -1157,7 +1151,6 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
1157 1151
1158out: 1152out:
1159 vcpu->mode = OUTSIDE_GUEST_MODE; 1153 vcpu->mode = OUTSIDE_GUEST_MODE;
1160 preempt_enable();
1161 return ret; 1154 return ret;
1162} 1155}
1163 1156
diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S
index 9ecf6e35cd8d..b2f8258b545a 100644
--- a/arch/powerpc/kvm/book3s_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_rmhandlers.S
@@ -170,20 +170,21 @@ kvmppc_handler_skip_ins:
170 * Call kvmppc_handler_trampoline_enter in real mode 170 * Call kvmppc_handler_trampoline_enter in real mode
171 * 171 *
172 * On entry, r4 contains the guest shadow MSR 172 * On entry, r4 contains the guest shadow MSR
173 * MSR.EE has to be 0 when calling this function
173 */ 174 */
174_GLOBAL(kvmppc_entry_trampoline) 175_GLOBAL(kvmppc_entry_trampoline)
175 mfmsr r5 176 mfmsr r5
176 LOAD_REG_ADDR(r7, kvmppc_handler_trampoline_enter) 177 LOAD_REG_ADDR(r7, kvmppc_handler_trampoline_enter)
177 toreal(r7) 178 toreal(r7)
178 179
179 li r9, MSR_RI
180 ori r9, r9, MSR_EE
181 andc r9, r5, r9 /* Clear EE and RI in MSR value */
182 li r6, MSR_IR | MSR_DR 180 li r6, MSR_IR | MSR_DR
183 ori r6, r6, MSR_EE 181 andc r6, r5, r6 /* Clear DR and IR in MSR value */
184 andc r6, r5, r6 /* Clear EE, DR and IR in MSR value */ 182 /*
185 MTMSR_EERI(r9) /* Clear EE and RI in MSR */ 183 * Set EE in HOST_MSR so that it's enabled when we get into our
186 mtsrr0 r7 /* before we set srr0/1 */ 184 * C exit handler function
185 */
186 ori r5, r5, MSR_EE
187 mtsrr0 r7
187 mtsrr1 r6 188 mtsrr1 r6
188 RFI 189 RFI
189 190
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index aae535f6d9de..2bd190c488ef 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -486,6 +486,7 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
486 ret = -EINTR; 486 ret = -EINTR;
487 goto out; 487 goto out;
488 } 488 }
489 kvmppc_lazy_ee_enable();
489 490
490 kvm_guest_enter(); 491 kvm_guest_enter();
491 492
@@ -955,6 +956,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
955 } else { 956 } else {
956 /* Going back to guest */ 957 /* Going back to guest */
957 kvm_guest_enter(); 958 kvm_guest_enter();
959 kvmppc_lazy_ee_enable();
958 } 960 }
959 } 961 }
960 962
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 153a26abc915..266549979e9f 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -30,6 +30,7 @@
30#include <asm/kvm_ppc.h> 30#include <asm/kvm_ppc.h>
31#include <asm/tlbflush.h> 31#include <asm/tlbflush.h>
32#include <asm/cputhreads.h> 32#include <asm/cputhreads.h>
33#include <asm/irqflags.h>
33#include "timing.h" 34#include "timing.h"
34#include "../mm/mmu_decl.h" 35#include "../mm/mmu_decl.h"
35 36
@@ -93,6 +94,19 @@ int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
93 break; 94 break;
94 } 95 }
95 96
97#ifdef CONFIG_PPC64
98 /* lazy EE magic */
99 hard_irq_disable();
100 if (lazy_irq_pending()) {
101 /* Got an interrupt in between, try again */
102 local_irq_enable();
103 local_irq_disable();
104 continue;
105 }
106
107 trace_hardirqs_on();
108#endif
109
96 /* Going into guest context! Yay! */ 110 /* Going into guest context! Yay! */
97 vcpu->mode = IN_GUEST_MODE; 111 vcpu->mode = IN_GUEST_MODE;
98 smp_wmb(); 112 smp_wmb();