diff options
author | Scott Wood <scottwood@freescale.com> | 2014-01-09 20:18:40 -0500 |
---|---|---|
committer | Alexander Graf <agraf@suse.de> | 2014-01-27 10:00:55 -0500 |
commit | 6c85f52b10fd60e45c6e30c5b85d116406bd3c9b (patch) | |
tree | e1fc869540a04f9003c26bd6d6598a8a848b8cd0 | |
parent | 70713fe315ed14cd1bb07d1a7f33e973d136ae3d (diff) |
kvm/ppc: IRQ disabling cleanup
Simplify the handling of lazy EE by going directly from fully-enabled
to hard-disabled. This replaces the lazy_irq_pending() check
(including its misplaced kvm_guest_exit() call).
As suggested by Tiejun Chen, move the interrupt disabling into
kvmppc_prepare_to_enter() rather than have each caller do it. Also
move the IRQ enabling on heavyweight exit into
kvmppc_prepare_to_enter().
Signed-off-by: Scott Wood <scottwood@freescale.com>
Signed-off-by: Alexander Graf <agraf@suse.de>
-rw-r--r-- | arch/powerpc/include/asm/kvm_ppc.h | 6 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_pr.c | 14 | ||||
-rw-r--r-- | arch/powerpc/kvm/booke.c | 12 | ||||
-rw-r--r-- | arch/powerpc/kvm/powerpc.c | 26 |
4 files changed, 26 insertions, 32 deletions
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index 629277df4798..fcd53f0d34ba 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h | |||
@@ -456,6 +456,12 @@ static inline void kvmppc_fix_ee_before_entry(void) | |||
456 | trace_hardirqs_on(); | 456 | trace_hardirqs_on(); |
457 | 457 | ||
458 | #ifdef CONFIG_PPC64 | 458 | #ifdef CONFIG_PPC64 |
459 | /* | ||
460 | * To avoid races, the caller must have gone directly from having | ||
461 | * interrupts fully-enabled to hard-disabled. | ||
462 | */ | ||
463 | WARN_ON(local_paca->irq_happened != PACA_IRQ_HARD_DIS); | ||
464 | |||
459 | /* Only need to enable IRQs by hard enabling them after this */ | 465 | /* Only need to enable IRQs by hard enabling them after this */ |
460 | local_paca->irq_happened = 0; | 466 | local_paca->irq_happened = 0; |
461 | local_paca->soft_enabled = 1; | 467 | local_paca->soft_enabled = 1; |
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index aedba681bb94..e82fafdaf880 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c | |||
@@ -999,14 +999,14 @@ program_interrupt: | |||
999 | * and if we really did time things so badly, then we just exit | 999 | * and if we really did time things so badly, then we just exit |
1000 | * again due to a host external interrupt. | 1000 | * again due to a host external interrupt. |
1001 | */ | 1001 | */ |
1002 | local_irq_disable(); | ||
1003 | s = kvmppc_prepare_to_enter(vcpu); | 1002 | s = kvmppc_prepare_to_enter(vcpu); |
1004 | if (s <= 0) { | 1003 | if (s <= 0) |
1005 | local_irq_enable(); | ||
1006 | r = s; | 1004 | r = s; |
1007 | } else { | 1005 | else { |
1006 | /* interrupts now hard-disabled */ | ||
1008 | kvmppc_fix_ee_before_entry(); | 1007 | kvmppc_fix_ee_before_entry(); |
1009 | } | 1008 | } |
1009 | |||
1010 | kvmppc_handle_lost_ext(vcpu); | 1010 | kvmppc_handle_lost_ext(vcpu); |
1011 | } | 1011 | } |
1012 | 1012 | ||
@@ -1219,12 +1219,10 @@ static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
1219 | * really did time things so badly, then we just exit again due to | 1219 | * really did time things so badly, then we just exit again due to |
1220 | * a host external interrupt. | 1220 | * a host external interrupt. |
1221 | */ | 1221 | */ |
1222 | local_irq_disable(); | ||
1223 | ret = kvmppc_prepare_to_enter(vcpu); | 1222 | ret = kvmppc_prepare_to_enter(vcpu); |
1224 | if (ret <= 0) { | 1223 | if (ret <= 0) |
1225 | local_irq_enable(); | ||
1226 | goto out; | 1224 | goto out; |
1227 | } | 1225 | /* interrupts now hard-disabled */ |
1228 | 1226 | ||
1229 | /* Save FPU state in thread_struct */ | 1227 | /* Save FPU state in thread_struct */ |
1230 | if (current->thread.regs->msr & MSR_FP) | 1228 | if (current->thread.regs->msr & MSR_FP) |
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index 6a8c32ec4173..07b89c711898 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c | |||
@@ -643,7 +643,7 @@ int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu) | |||
643 | local_irq_enable(); | 643 | local_irq_enable(); |
644 | kvm_vcpu_block(vcpu); | 644 | kvm_vcpu_block(vcpu); |
645 | clear_bit(KVM_REQ_UNHALT, &vcpu->requests); | 645 | clear_bit(KVM_REQ_UNHALT, &vcpu->requests); |
646 | local_irq_disable(); | 646 | hard_irq_disable(); |
647 | 647 | ||
648 | kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS); | 648 | kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS); |
649 | r = 1; | 649 | r = 1; |
@@ -688,13 +688,12 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
688 | return -EINVAL; | 688 | return -EINVAL; |
689 | } | 689 | } |
690 | 690 | ||
691 | local_irq_disable(); | ||
692 | s = kvmppc_prepare_to_enter(vcpu); | 691 | s = kvmppc_prepare_to_enter(vcpu); |
693 | if (s <= 0) { | 692 | if (s <= 0) { |
694 | local_irq_enable(); | ||
695 | ret = s; | 693 | ret = s; |
696 | goto out; | 694 | goto out; |
697 | } | 695 | } |
696 | /* interrupts now hard-disabled */ | ||
698 | 697 | ||
699 | #ifdef CONFIG_PPC_FPU | 698 | #ifdef CONFIG_PPC_FPU |
700 | /* Save userspace FPU state in stack */ | 699 | /* Save userspace FPU state in stack */ |
@@ -1187,12 +1186,11 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
1187 | * aren't already exiting to userspace for some other reason. | 1186 | * aren't already exiting to userspace for some other reason. |
1188 | */ | 1187 | */ |
1189 | if (!(r & RESUME_HOST)) { | 1188 | if (!(r & RESUME_HOST)) { |
1190 | local_irq_disable(); | ||
1191 | s = kvmppc_prepare_to_enter(vcpu); | 1189 | s = kvmppc_prepare_to_enter(vcpu); |
1192 | if (s <= 0) { | 1190 | if (s <= 0) |
1193 | local_irq_enable(); | ||
1194 | r = (s << 2) | RESUME_HOST | (r & RESUME_FLAG_NV); | 1191 | r = (s << 2) | RESUME_HOST | (r & RESUME_FLAG_NV); |
1195 | } else { | 1192 | else { |
1193 | /* interrupts now hard-disabled */ | ||
1196 | kvmppc_fix_ee_before_entry(); | 1194 | kvmppc_fix_ee_before_entry(); |
1197 | } | 1195 | } |
1198 | } | 1196 | } |
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 026dfaaa4772..3cf541a53e2a 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c | |||
@@ -68,14 +68,16 @@ int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) | |||
68 | */ | 68 | */ |
69 | int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu) | 69 | int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu) |
70 | { | 70 | { |
71 | int r = 1; | 71 | int r; |
72 | |||
73 | WARN_ON(irqs_disabled()); | ||
74 | hard_irq_disable(); | ||
72 | 75 | ||
73 | WARN_ON_ONCE(!irqs_disabled()); | ||
74 | while (true) { | 76 | while (true) { |
75 | if (need_resched()) { | 77 | if (need_resched()) { |
76 | local_irq_enable(); | 78 | local_irq_enable(); |
77 | cond_resched(); | 79 | cond_resched(); |
78 | local_irq_disable(); | 80 | hard_irq_disable(); |
79 | continue; | 81 | continue; |
80 | } | 82 | } |
81 | 83 | ||
@@ -101,7 +103,7 @@ int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu) | |||
101 | local_irq_enable(); | 103 | local_irq_enable(); |
102 | trace_kvm_check_requests(vcpu); | 104 | trace_kvm_check_requests(vcpu); |
103 | r = kvmppc_core_check_requests(vcpu); | 105 | r = kvmppc_core_check_requests(vcpu); |
104 | local_irq_disable(); | 106 | hard_irq_disable(); |
105 | if (r > 0) | 107 | if (r > 0) |
106 | continue; | 108 | continue; |
107 | break; | 109 | break; |
@@ -113,22 +115,12 @@ int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu) | |||
113 | continue; | 115 | continue; |
114 | } | 116 | } |
115 | 117 | ||
116 | #ifdef CONFIG_PPC64 | ||
117 | /* lazy EE magic */ | ||
118 | hard_irq_disable(); | ||
119 | if (lazy_irq_pending()) { | ||
120 | /* Got an interrupt in between, try again */ | ||
121 | local_irq_enable(); | ||
122 | local_irq_disable(); | ||
123 | kvm_guest_exit(); | ||
124 | continue; | ||
125 | } | ||
126 | #endif | ||
127 | |||
128 | kvm_guest_enter(); | 118 | kvm_guest_enter(); |
129 | break; | 119 | return 1; |
130 | } | 120 | } |
131 | 121 | ||
122 | /* return to host */ | ||
123 | local_irq_enable(); | ||
132 | return r; | 124 | return r; |
133 | } | 125 | } |
134 | EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter); | 126 | EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter); |