diff options
author | Liu Yu <yu.liu@freescale.com> | 2010-02-02 06:44:35 -0500 |
---|---|---|
committer | Marcelo Tosatti <mtosatti@redhat.com> | 2010-03-01 10:36:10 -0500 |
commit | daf5e27109c8c16c987e955cc6abbbc0af050edd (patch) | |
tree | 70456cce995c63895d50d921e6e0c36a9678de19 /arch/powerpc/kvm | |
parent | 4b7bb9210047fe880bb71e6273c3a4526799dbd7 (diff) |
KVM: ppc/booke: Set ESR and DEAR when inject interrupt to guest
Old method prematurely sets ESR and DEAR.
Move this part after we decide to inject interrupt,
which is more like hardware behave.
Signed-off-by: Liu Yu <yu.liu@freescale.com>
Acked-by: Hollis Blanchard <hollis@penguinppc.org>
Acked-by: Alexander Graf <agraf@suse.de>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/powerpc/kvm')
-rw-r--r-- | arch/powerpc/kvm/booke.c | 59 | ||||
-rw-r--r-- | arch/powerpc/kvm/emulate.c | 4 |
2 files changed, 46 insertions, 17 deletions
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index e283e44e9f16..4d686cc6b260 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c | |||
@@ -82,9 +82,32 @@ static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu, | |||
82 | set_bit(priority, &vcpu->arch.pending_exceptions); | 82 | set_bit(priority, &vcpu->arch.pending_exceptions); |
83 | } | 83 | } |
84 | 84 | ||
85 | void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags) | 85 | static void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu, |
86 | ulong dear_flags, ulong esr_flags) | ||
86 | { | 87 | { |
87 | /* BookE does flags in ESR, so ignore those we get here */ | 88 | vcpu->arch.queued_dear = dear_flags; |
89 | vcpu->arch.queued_esr = esr_flags; | ||
90 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS); | ||
91 | } | ||
92 | |||
93 | static void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu, | ||
94 | ulong dear_flags, ulong esr_flags) | ||
95 | { | ||
96 | vcpu->arch.queued_dear = dear_flags; | ||
97 | vcpu->arch.queued_esr = esr_flags; | ||
98 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE); | ||
99 | } | ||
100 | |||
101 | static void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, | ||
102 | ulong esr_flags) | ||
103 | { | ||
104 | vcpu->arch.queued_esr = esr_flags; | ||
105 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE); | ||
106 | } | ||
107 | |||
108 | void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong esr_flags) | ||
109 | { | ||
110 | vcpu->arch.queued_esr = esr_flags; | ||
88 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM); | 111 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM); |
89 | } | 112 | } |
90 | 113 | ||
@@ -115,14 +138,19 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu, | |||
115 | { | 138 | { |
116 | int allowed = 0; | 139 | int allowed = 0; |
117 | ulong msr_mask; | 140 | ulong msr_mask; |
141 | bool update_esr = false, update_dear = false; | ||
118 | 142 | ||
119 | switch (priority) { | 143 | switch (priority) { |
120 | case BOOKE_IRQPRIO_PROGRAM: | ||
121 | case BOOKE_IRQPRIO_DTLB_MISS: | 144 | case BOOKE_IRQPRIO_DTLB_MISS: |
122 | case BOOKE_IRQPRIO_ITLB_MISS: | ||
123 | case BOOKE_IRQPRIO_SYSCALL: | ||
124 | case BOOKE_IRQPRIO_DATA_STORAGE: | 145 | case BOOKE_IRQPRIO_DATA_STORAGE: |
146 | update_dear = true; | ||
147 | /* fall through */ | ||
125 | case BOOKE_IRQPRIO_INST_STORAGE: | 148 | case BOOKE_IRQPRIO_INST_STORAGE: |
149 | case BOOKE_IRQPRIO_PROGRAM: | ||
150 | update_esr = true; | ||
151 | /* fall through */ | ||
152 | case BOOKE_IRQPRIO_ITLB_MISS: | ||
153 | case BOOKE_IRQPRIO_SYSCALL: | ||
126 | case BOOKE_IRQPRIO_FP_UNAVAIL: | 154 | case BOOKE_IRQPRIO_FP_UNAVAIL: |
127 | case BOOKE_IRQPRIO_SPE_UNAVAIL: | 155 | case BOOKE_IRQPRIO_SPE_UNAVAIL: |
128 | case BOOKE_IRQPRIO_SPE_FP_DATA: | 156 | case BOOKE_IRQPRIO_SPE_FP_DATA: |
@@ -157,6 +185,10 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu, | |||
157 | vcpu->arch.srr0 = vcpu->arch.pc; | 185 | vcpu->arch.srr0 = vcpu->arch.pc; |
158 | vcpu->arch.srr1 = vcpu->arch.msr; | 186 | vcpu->arch.srr1 = vcpu->arch.msr; |
159 | vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority]; | 187 | vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority]; |
188 | if (update_esr == true) | ||
189 | vcpu->arch.esr = vcpu->arch.queued_esr; | ||
190 | if (update_dear == true) | ||
191 | vcpu->arch.dear = vcpu->arch.queued_dear; | ||
160 | kvmppc_set_msr(vcpu, vcpu->arch.msr & msr_mask); | 192 | kvmppc_set_msr(vcpu, vcpu->arch.msr & msr_mask); |
161 | 193 | ||
162 | clear_bit(priority, &vcpu->arch.pending_exceptions); | 194 | clear_bit(priority, &vcpu->arch.pending_exceptions); |
@@ -229,8 +261,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
229 | if (vcpu->arch.msr & MSR_PR) { | 261 | if (vcpu->arch.msr & MSR_PR) { |
230 | /* Program traps generated by user-level software must be handled | 262 | /* Program traps generated by user-level software must be handled |
231 | * by the guest kernel. */ | 263 | * by the guest kernel. */ |
232 | vcpu->arch.esr = vcpu->arch.fault_esr; | 264 | kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr); |
233 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM); | ||
234 | r = RESUME_GUEST; | 265 | r = RESUME_GUEST; |
235 | kvmppc_account_exit(vcpu, USR_PR_INST); | 266 | kvmppc_account_exit(vcpu, USR_PR_INST); |
236 | break; | 267 | break; |
@@ -286,16 +317,14 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
286 | break; | 317 | break; |
287 | 318 | ||
288 | case BOOKE_INTERRUPT_DATA_STORAGE: | 319 | case BOOKE_INTERRUPT_DATA_STORAGE: |
289 | vcpu->arch.dear = vcpu->arch.fault_dear; | 320 | kvmppc_core_queue_data_storage(vcpu, vcpu->arch.fault_dear, |
290 | vcpu->arch.esr = vcpu->arch.fault_esr; | 321 | vcpu->arch.fault_esr); |
291 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE); | ||
292 | kvmppc_account_exit(vcpu, DSI_EXITS); | 322 | kvmppc_account_exit(vcpu, DSI_EXITS); |
293 | r = RESUME_GUEST; | 323 | r = RESUME_GUEST; |
294 | break; | 324 | break; |
295 | 325 | ||
296 | case BOOKE_INTERRUPT_INST_STORAGE: | 326 | case BOOKE_INTERRUPT_INST_STORAGE: |
297 | vcpu->arch.esr = vcpu->arch.fault_esr; | 327 | kvmppc_core_queue_inst_storage(vcpu, vcpu->arch.fault_esr); |
298 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE); | ||
299 | kvmppc_account_exit(vcpu, ISI_EXITS); | 328 | kvmppc_account_exit(vcpu, ISI_EXITS); |
300 | r = RESUME_GUEST; | 329 | r = RESUME_GUEST; |
301 | break; | 330 | break; |
@@ -316,9 +345,9 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
316 | gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr); | 345 | gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr); |
317 | if (gtlb_index < 0) { | 346 | if (gtlb_index < 0) { |
318 | /* The guest didn't have a mapping for it. */ | 347 | /* The guest didn't have a mapping for it. */ |
319 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS); | 348 | kvmppc_core_queue_dtlb_miss(vcpu, |
320 | vcpu->arch.dear = vcpu->arch.fault_dear; | 349 | vcpu->arch.fault_dear, |
321 | vcpu->arch.esr = vcpu->arch.fault_esr; | 350 | vcpu->arch.fault_esr); |
322 | kvmppc_mmu_dtlb_miss(vcpu); | 351 | kvmppc_mmu_dtlb_miss(vcpu); |
323 | kvmppc_account_exit(vcpu, DTLB_REAL_MISS_EXITS); | 352 | kvmppc_account_exit(vcpu, DTLB_REAL_MISS_EXITS); |
324 | r = RESUME_GUEST; | 353 | r = RESUME_GUEST; |
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c index b905623735bd..cb72a65f4ecc 100644 --- a/arch/powerpc/kvm/emulate.c +++ b/arch/powerpc/kvm/emulate.c | |||
@@ -151,10 +151,10 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
151 | case OP_TRAP: | 151 | case OP_TRAP: |
152 | #ifdef CONFIG_PPC64 | 152 | #ifdef CONFIG_PPC64 |
153 | case OP_TRAP_64: | 153 | case OP_TRAP_64: |
154 | kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP); | ||
154 | #else | 155 | #else |
155 | vcpu->arch.esr |= ESR_PTR; | 156 | kvmppc_core_queue_program(vcpu, vcpu->arch.esr | ESR_PTR); |
156 | #endif | 157 | #endif |
157 | kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP); | ||
158 | advance = 0; | 158 | advance = 0; |
159 | break; | 159 | break; |
160 | 160 | ||