diff options
Diffstat (limited to 'arch/ia64/kvm/process.c')
-rw-r--r-- | arch/ia64/kvm/process.c | 71 |
1 files changed, 49 insertions, 22 deletions
diff --git a/arch/ia64/kvm/process.c b/arch/ia64/kvm/process.c index 230eae482f32..b1dc80952d91 100644 --- a/arch/ia64/kvm/process.c +++ b/arch/ia64/kvm/process.c | |||
@@ -167,7 +167,6 @@ static u64 vcpu_get_itir_on_fault(struct kvm_vcpu *vcpu, u64 ifa) | |||
167 | return (rr1.val); | 167 | return (rr1.val); |
168 | } | 168 | } |
169 | 169 | ||
170 | |||
171 | /* | 170 | /* |
172 | * Set vIFA & vITIR & vIHA, when vPSR.ic =1 | 171 | * Set vIFA & vITIR & vIHA, when vPSR.ic =1 |
173 | * Parameter: | 172 | * Parameter: |
@@ -222,8 +221,6 @@ void itlb_fault(struct kvm_vcpu *vcpu, u64 vadr) | |||
222 | inject_guest_interruption(vcpu, IA64_INST_TLB_VECTOR); | 221 | inject_guest_interruption(vcpu, IA64_INST_TLB_VECTOR); |
223 | } | 222 | } |
224 | 223 | ||
225 | |||
226 | |||
227 | /* | 224 | /* |
228 | * Data Nested TLB Fault | 225 | * Data Nested TLB Fault |
229 | * @ Data Nested TLB Vector | 226 | * @ Data Nested TLB Vector |
@@ -245,7 +242,6 @@ void alt_dtlb(struct kvm_vcpu *vcpu, u64 vadr) | |||
245 | inject_guest_interruption(vcpu, IA64_ALT_DATA_TLB_VECTOR); | 242 | inject_guest_interruption(vcpu, IA64_ALT_DATA_TLB_VECTOR); |
246 | } | 243 | } |
247 | 244 | ||
248 | |||
249 | /* | 245 | /* |
250 | * Data TLB Fault | 246 | * Data TLB Fault |
251 | * @ Data TLB vector | 247 | * @ Data TLB vector |
@@ -265,8 +261,6 @@ static void _vhpt_fault(struct kvm_vcpu *vcpu, u64 vadr) | |||
265 | /* If vPSR.ic, IFA, ITIR, IHA*/ | 261 | /* If vPSR.ic, IFA, ITIR, IHA*/ |
266 | set_ifa_itir_iha(vcpu, vadr, 1, 1, 1); | 262 | set_ifa_itir_iha(vcpu, vadr, 1, 1, 1); |
267 | inject_guest_interruption(vcpu, IA64_VHPT_TRANS_VECTOR); | 263 | inject_guest_interruption(vcpu, IA64_VHPT_TRANS_VECTOR); |
268 | |||
269 | |||
270 | } | 264 | } |
271 | 265 | ||
272 | /* | 266 | /* |
@@ -279,7 +273,6 @@ void ivhpt_fault(struct kvm_vcpu *vcpu, u64 vadr) | |||
279 | _vhpt_fault(vcpu, vadr); | 273 | _vhpt_fault(vcpu, vadr); |
280 | } | 274 | } |
281 | 275 | ||
282 | |||
283 | /* | 276 | /* |
284 | * VHPT Data Fault | 277 | * VHPT Data Fault |
285 | * @ VHPT Translation vector | 278 | * @ VHPT Translation vector |
@@ -290,8 +283,6 @@ void dvhpt_fault(struct kvm_vcpu *vcpu, u64 vadr) | |||
290 | _vhpt_fault(vcpu, vadr); | 283 | _vhpt_fault(vcpu, vadr); |
291 | } | 284 | } |
292 | 285 | ||
293 | |||
294 | |||
295 | /* | 286 | /* |
296 | * Deal with: | 287 | * Deal with: |
297 | * General Exception vector | 288 | * General Exception vector |
@@ -301,7 +292,6 @@ void _general_exception(struct kvm_vcpu *vcpu) | |||
301 | inject_guest_interruption(vcpu, IA64_GENEX_VECTOR); | 292 | inject_guest_interruption(vcpu, IA64_GENEX_VECTOR); |
302 | } | 293 | } |
303 | 294 | ||
304 | |||
305 | /* | 295 | /* |
306 | * Illegal Operation Fault | 296 | * Illegal Operation Fault |
307 | * @ General Exception Vector | 297 | * @ General Exception Vector |
@@ -419,19 +409,16 @@ static void __page_not_present(struct kvm_vcpu *vcpu, u64 vadr) | |||
419 | inject_guest_interruption(vcpu, IA64_PAGE_NOT_PRESENT_VECTOR); | 409 | inject_guest_interruption(vcpu, IA64_PAGE_NOT_PRESENT_VECTOR); |
420 | } | 410 | } |
421 | 411 | ||
422 | |||
423 | void data_page_not_present(struct kvm_vcpu *vcpu, u64 vadr) | 412 | void data_page_not_present(struct kvm_vcpu *vcpu, u64 vadr) |
424 | { | 413 | { |
425 | __page_not_present(vcpu, vadr); | 414 | __page_not_present(vcpu, vadr); |
426 | } | 415 | } |
427 | 416 | ||
428 | |||
429 | void inst_page_not_present(struct kvm_vcpu *vcpu, u64 vadr) | 417 | void inst_page_not_present(struct kvm_vcpu *vcpu, u64 vadr) |
430 | { | 418 | { |
431 | __page_not_present(vcpu, vadr); | 419 | __page_not_present(vcpu, vadr); |
432 | } | 420 | } |
433 | 421 | ||
434 | |||
435 | /* Deal with | 422 | /* Deal with |
436 | * Data access rights vector | 423 | * Data access rights vector |
437 | */ | 424 | */ |
@@ -563,22 +550,64 @@ void reflect_interruption(u64 ifa, u64 isr, u64 iim, | |||
563 | inject_guest_interruption(vcpu, vector); | 550 | inject_guest_interruption(vcpu, vector); |
564 | } | 551 | } |
565 | 552 | ||
553 | static unsigned long kvm_trans_pal_call_args(struct kvm_vcpu *vcpu, | ||
554 | unsigned long arg) | ||
555 | { | ||
556 | struct thash_data *data; | ||
557 | unsigned long gpa, poff; | ||
558 | |||
559 | if (!is_physical_mode(vcpu)) { | ||
560 | /* Depends on caller to provide the DTR or DTC mapping.*/ | ||
561 | data = vtlb_lookup(vcpu, arg, D_TLB); | ||
562 | if (data) | ||
563 | gpa = data->page_flags & _PAGE_PPN_MASK; | ||
564 | else { | ||
565 | data = vhpt_lookup(arg); | ||
566 | if (!data) | ||
567 | return 0; | ||
568 | gpa = data->gpaddr & _PAGE_PPN_MASK; | ||
569 | } | ||
570 | |||
571 | poff = arg & (PSIZE(data->ps) - 1); | ||
572 | arg = PAGEALIGN(gpa, data->ps) | poff; | ||
573 | } | ||
574 | arg = kvm_gpa_to_mpa(arg << 1 >> 1); | ||
575 | |||
576 | return (unsigned long)__va(arg); | ||
577 | } | ||
578 | |||
566 | static void set_pal_call_data(struct kvm_vcpu *vcpu) | 579 | static void set_pal_call_data(struct kvm_vcpu *vcpu) |
567 | { | 580 | { |
568 | struct exit_ctl_data *p = &vcpu->arch.exit_data; | 581 | struct exit_ctl_data *p = &vcpu->arch.exit_data; |
582 | unsigned long gr28 = vcpu_get_gr(vcpu, 28); | ||
583 | unsigned long gr29 = vcpu_get_gr(vcpu, 29); | ||
584 | unsigned long gr30 = vcpu_get_gr(vcpu, 30); | ||
569 | 585 | ||
570 | /*FIXME:For static and stacked convention, firmware | 586 | /*FIXME:For static and stacked convention, firmware |
571 | * has put the parameters in gr28-gr31 before | 587 | * has put the parameters in gr28-gr31 before |
572 | * break to vmm !!*/ | 588 | * break to vmm !!*/ |
573 | 589 | ||
574 | p->u.pal_data.gr28 = vcpu_get_gr(vcpu, 28); | 590 | switch (gr28) { |
575 | p->u.pal_data.gr29 = vcpu_get_gr(vcpu, 29); | 591 | case PAL_PERF_MON_INFO: |
576 | p->u.pal_data.gr30 = vcpu_get_gr(vcpu, 30); | 592 | case PAL_HALT_INFO: |
593 | p->u.pal_data.gr29 = kvm_trans_pal_call_args(vcpu, gr29); | ||
594 | p->u.pal_data.gr30 = vcpu_get_gr(vcpu, 30); | ||
595 | break; | ||
596 | case PAL_BRAND_INFO: | ||
597 | p->u.pal_data.gr29 = gr29;; | ||
598 | p->u.pal_data.gr30 = kvm_trans_pal_call_args(vcpu, gr30); | ||
599 | break; | ||
600 | default: | ||
601 | p->u.pal_data.gr29 = gr29;; | ||
602 | p->u.pal_data.gr30 = vcpu_get_gr(vcpu, 30); | ||
603 | } | ||
604 | p->u.pal_data.gr28 = gr28; | ||
577 | p->u.pal_data.gr31 = vcpu_get_gr(vcpu, 31); | 605 | p->u.pal_data.gr31 = vcpu_get_gr(vcpu, 31); |
606 | |||
578 | p->exit_reason = EXIT_REASON_PAL_CALL; | 607 | p->exit_reason = EXIT_REASON_PAL_CALL; |
579 | } | 608 | } |
580 | 609 | ||
581 | static void set_pal_call_result(struct kvm_vcpu *vcpu) | 610 | static void get_pal_call_result(struct kvm_vcpu *vcpu) |
582 | { | 611 | { |
583 | struct exit_ctl_data *p = &vcpu->arch.exit_data; | 612 | struct exit_ctl_data *p = &vcpu->arch.exit_data; |
584 | 613 | ||
@@ -606,7 +635,7 @@ static void set_sal_call_data(struct kvm_vcpu *vcpu) | |||
606 | p->exit_reason = EXIT_REASON_SAL_CALL; | 635 | p->exit_reason = EXIT_REASON_SAL_CALL; |
607 | } | 636 | } |
608 | 637 | ||
609 | static void set_sal_call_result(struct kvm_vcpu *vcpu) | 638 | static void get_sal_call_result(struct kvm_vcpu *vcpu) |
610 | { | 639 | { |
611 | struct exit_ctl_data *p = &vcpu->arch.exit_data; | 640 | struct exit_ctl_data *p = &vcpu->arch.exit_data; |
612 | 641 | ||
@@ -629,13 +658,13 @@ void kvm_ia64_handle_break(unsigned long ifa, struct kvm_pt_regs *regs, | |||
629 | if (iim == DOMN_PAL_REQUEST) { | 658 | if (iim == DOMN_PAL_REQUEST) { |
630 | set_pal_call_data(v); | 659 | set_pal_call_data(v); |
631 | vmm_transition(v); | 660 | vmm_transition(v); |
632 | set_pal_call_result(v); | 661 | get_pal_call_result(v); |
633 | vcpu_increment_iip(v); | 662 | vcpu_increment_iip(v); |
634 | return; | 663 | return; |
635 | } else if (iim == DOMN_SAL_REQUEST) { | 664 | } else if (iim == DOMN_SAL_REQUEST) { |
636 | set_sal_call_data(v); | 665 | set_sal_call_data(v); |
637 | vmm_transition(v); | 666 | vmm_transition(v); |
638 | set_sal_call_result(v); | 667 | get_sal_call_result(v); |
639 | vcpu_increment_iip(v); | 668 | vcpu_increment_iip(v); |
640 | return; | 669 | return; |
641 | } | 670 | } |
@@ -703,7 +732,6 @@ void vhpi_detection(struct kvm_vcpu *vcpu) | |||
703 | } | 732 | } |
704 | } | 733 | } |
705 | 734 | ||
706 | |||
707 | void leave_hypervisor_tail(void) | 735 | void leave_hypervisor_tail(void) |
708 | { | 736 | { |
709 | struct kvm_vcpu *v = current_vcpu; | 737 | struct kvm_vcpu *v = current_vcpu; |
@@ -737,7 +765,6 @@ void leave_hypervisor_tail(void) | |||
737 | } | 765 | } |
738 | } | 766 | } |
739 | 767 | ||
740 | |||
741 | static inline void handle_lds(struct kvm_pt_regs *regs) | 768 | static inline void handle_lds(struct kvm_pt_regs *regs) |
742 | { | 769 | { |
743 | regs->cr_ipsr |= IA64_PSR_ED; | 770 | regs->cr_ipsr |= IA64_PSR_ED; |