aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/include/asm/kvm_host.h6
-rw-r--r--arch/ia64/include/asm/pgtable.h2
-rw-r--r--arch/ia64/kernel/irq_ia64.c3
-rw-r--r--arch/ia64/kvm/Kconfig2
-rw-r--r--arch/ia64/kvm/kvm-ia64.c263
-rw-r--r--arch/ia64/kvm/kvm_fw.c28
-rw-r--r--arch/ia64/kvm/lapic.h6
-rw-r--r--arch/ia64/kvm/optvfault.S30
-rw-r--r--arch/ia64/kvm/process.c5
-rw-r--r--arch/ia64/kvm/vcpu.c20
-rw-r--r--arch/ia64/kvm/vmm.c12
-rw-r--r--arch/ia64/kvm/vmm_ivt.S18
-rw-r--r--arch/ia64/kvm/vtlb.c3
13 files changed, 285 insertions, 113 deletions
diff --git a/arch/ia64/include/asm/kvm_host.h b/arch/ia64/include/asm/kvm_host.h
index 4542651e6acb..5f43697aed30 100644
--- a/arch/ia64/include/asm/kvm_host.h
+++ b/arch/ia64/include/asm/kvm_host.h
@@ -371,6 +371,7 @@ struct kvm_vcpu_arch {
371 int last_run_cpu; 371 int last_run_cpu;
372 int vmm_tr_slot; 372 int vmm_tr_slot;
373 int vm_tr_slot; 373 int vm_tr_slot;
374 int sn_rtc_tr_slot;
374 375
375#define KVM_MP_STATE_RUNNABLE 0 376#define KVM_MP_STATE_RUNNABLE 0
376#define KVM_MP_STATE_UNINITIALIZED 1 377#define KVM_MP_STATE_UNINITIALIZED 1
@@ -465,6 +466,7 @@ struct kvm_arch {
465 unsigned long vmm_init_rr; 466 unsigned long vmm_init_rr;
466 467
467 int online_vcpus; 468 int online_vcpus;
469 int is_sn2;
468 470
469 struct kvm_ioapic *vioapic; 471 struct kvm_ioapic *vioapic;
470 struct kvm_vm_stat stat; 472 struct kvm_vm_stat stat;
@@ -472,6 +474,7 @@ struct kvm_arch {
472 474
473 struct list_head assigned_dev_head; 475 struct list_head assigned_dev_head;
474 struct iommu_domain *iommu_domain; 476 struct iommu_domain *iommu_domain;
477 int iommu_flags;
475 struct hlist_head irq_ack_notifier_list; 478 struct hlist_head irq_ack_notifier_list;
476 479
477 unsigned long irq_sources_bitmap; 480 unsigned long irq_sources_bitmap;
@@ -578,6 +581,8 @@ struct kvm_vmm_info{
578 kvm_vmm_entry *vmm_entry; 581 kvm_vmm_entry *vmm_entry;
579 kvm_tramp_entry *tramp_entry; 582 kvm_tramp_entry *tramp_entry;
580 unsigned long vmm_ivt; 583 unsigned long vmm_ivt;
584 unsigned long patch_mov_ar;
585 unsigned long patch_mov_ar_sn2;
581}; 586};
582 587
583int kvm_highest_pending_irq(struct kvm_vcpu *vcpu); 588int kvm_highest_pending_irq(struct kvm_vcpu *vcpu);
@@ -585,7 +590,6 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu);
585int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run); 590int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
586void kvm_sal_emul(struct kvm_vcpu *vcpu); 591void kvm_sal_emul(struct kvm_vcpu *vcpu);
587 592
588static inline void kvm_inject_nmi(struct kvm_vcpu *vcpu) {}
589#endif /* __ASSEMBLY__*/ 593#endif /* __ASSEMBLY__*/
590 594
591#endif 595#endif
diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
index 7a9bff47564f..0a9cc73d35c7 100644
--- a/arch/ia64/include/asm/pgtable.h
+++ b/arch/ia64/include/asm/pgtable.h
@@ -146,6 +146,8 @@
146#define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX) 146#define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
147#define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX) 147#define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
148#define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX) 148#define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
149#define PAGE_KERNEL_UC __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX | \
150 _PAGE_MA_UC)
149 151
150# ifndef __ASSEMBLY__ 152# ifndef __ASSEMBLY__
151 153
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
index acc4d19ae62a..b448197728be 100644
--- a/arch/ia64/kernel/irq_ia64.c
+++ b/arch/ia64/kernel/irq_ia64.c
@@ -610,6 +610,9 @@ static struct irqaction ipi_irqaction = {
610 .name = "IPI" 610 .name = "IPI"
611}; 611};
612 612
613/*
614 * KVM uses this interrupt to force a cpu out of guest mode
615 */
613static struct irqaction resched_irqaction = { 616static struct irqaction resched_irqaction = {
614 .handler = dummy_handler, 617 .handler = dummy_handler,
615 .flags = IRQF_DISABLED, 618 .flags = IRQF_DISABLED,
diff --git a/arch/ia64/kvm/Kconfig b/arch/ia64/kvm/Kconfig
index 0a2d6b86075a..64d520937874 100644
--- a/arch/ia64/kvm/Kconfig
+++ b/arch/ia64/kvm/Kconfig
@@ -23,7 +23,7 @@ if VIRTUALIZATION
23 23
24config KVM 24config KVM
25 tristate "Kernel-based Virtual Machine (KVM) support" 25 tristate "Kernel-based Virtual Machine (KVM) support"
26 depends on HAVE_KVM && EXPERIMENTAL 26 depends on HAVE_KVM && MODULES && EXPERIMENTAL
27 # for device assignment: 27 # for device assignment:
28 depends on PCI 28 depends on PCI
29 select PREEMPT_NOTIFIERS 29 select PREEMPT_NOTIFIERS
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index d20a5db4c4dd..80c57b0a21c4 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -41,6 +41,9 @@
41#include <asm/div64.h> 41#include <asm/div64.h>
42#include <asm/tlb.h> 42#include <asm/tlb.h>
43#include <asm/elf.h> 43#include <asm/elf.h>
44#include <asm/sn/addrs.h>
45#include <asm/sn/clksupport.h>
46#include <asm/sn/shub_mmr.h>
44 47
45#include "misc.h" 48#include "misc.h"
46#include "vti.h" 49#include "vti.h"
@@ -65,6 +68,16 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
65 { NULL } 68 { NULL }
66}; 69};
67 70
71static unsigned long kvm_get_itc(struct kvm_vcpu *vcpu)
72{
73#if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC)
74 if (vcpu->kvm->arch.is_sn2)
75 return rtc_time();
76 else
77#endif
78 return ia64_getreg(_IA64_REG_AR_ITC);
79}
80
68static void kvm_flush_icache(unsigned long start, unsigned long len) 81static void kvm_flush_icache(unsigned long start, unsigned long len)
69{ 82{
70 int l; 83 int l;
@@ -119,8 +132,7 @@ void kvm_arch_hardware_enable(void *garbage)
119 unsigned long saved_psr; 132 unsigned long saved_psr;
120 int slot; 133 int slot;
121 134
122 pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base), 135 pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base), PAGE_KERNEL));
123 PAGE_KERNEL));
124 local_irq_save(saved_psr); 136 local_irq_save(saved_psr);
125 slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT); 137 slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
126 local_irq_restore(saved_psr); 138 local_irq_restore(saved_psr);
@@ -283,6 +295,18 @@ static int handle_sal_call(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
283 295
284} 296}
285 297
298static int __apic_accept_irq(struct kvm_vcpu *vcpu, uint64_t vector)
299{
300 struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
301
302 if (!test_and_set_bit(vector, &vpd->irr[0])) {
303 vcpu->arch.irq_new_pending = 1;
304 kvm_vcpu_kick(vcpu);
305 return 1;
306 }
307 return 0;
308}
309
286/* 310/*
287 * offset: address offset to IPI space. 311 * offset: address offset to IPI space.
288 * value: deliver value. 312 * value: deliver value.
@@ -292,20 +316,20 @@ static void vcpu_deliver_ipi(struct kvm_vcpu *vcpu, uint64_t dm,
292{ 316{
293 switch (dm) { 317 switch (dm) {
294 case SAPIC_FIXED: 318 case SAPIC_FIXED:
295 kvm_apic_set_irq(vcpu, vector, 0);
296 break; 319 break;
297 case SAPIC_NMI: 320 case SAPIC_NMI:
298 kvm_apic_set_irq(vcpu, 2, 0); 321 vector = 2;
299 break; 322 break;
300 case SAPIC_EXTINT: 323 case SAPIC_EXTINT:
301 kvm_apic_set_irq(vcpu, 0, 0); 324 vector = 0;
302 break; 325 break;
303 case SAPIC_INIT: 326 case SAPIC_INIT:
304 case SAPIC_PMI: 327 case SAPIC_PMI:
305 default: 328 default:
306 printk(KERN_ERR"kvm: Unimplemented Deliver reserved IPI!\n"); 329 printk(KERN_ERR"kvm: Unimplemented Deliver reserved IPI!\n");
307 break; 330 return;
308 } 331 }
332 __apic_accept_irq(vcpu, vector);
309} 333}
310 334
311static struct kvm_vcpu *lid_to_vcpu(struct kvm *kvm, unsigned long id, 335static struct kvm_vcpu *lid_to_vcpu(struct kvm *kvm, unsigned long id,
@@ -413,6 +437,23 @@ static int handle_switch_rr6(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
413 return 1; 437 return 1;
414} 438}
415 439
440static int kvm_sn2_setup_mappings(struct kvm_vcpu *vcpu)
441{
442 unsigned long pte, rtc_phys_addr, map_addr;
443 int slot;
444
445 map_addr = KVM_VMM_BASE + (1UL << KVM_VMM_SHIFT);
446 rtc_phys_addr = LOCAL_MMR_OFFSET | SH_RTC;
447 pte = pte_val(mk_pte_phys(rtc_phys_addr, PAGE_KERNEL_UC));
448 slot = ia64_itr_entry(0x3, map_addr, pte, PAGE_SHIFT);
449 vcpu->arch.sn_rtc_tr_slot = slot;
450 if (slot < 0) {
451 printk(KERN_ERR "Mayday mayday! RTC mapping failed!\n");
452 slot = 0;
453 }
454 return slot;
455}
456
416int kvm_emulate_halt(struct kvm_vcpu *vcpu) 457int kvm_emulate_halt(struct kvm_vcpu *vcpu)
417{ 458{
418 459
@@ -426,7 +467,7 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu)
426 467
427 if (irqchip_in_kernel(vcpu->kvm)) { 468 if (irqchip_in_kernel(vcpu->kvm)) {
428 469
429 vcpu_now_itc = ia64_getreg(_IA64_REG_AR_ITC) + vcpu->arch.itc_offset; 470 vcpu_now_itc = kvm_get_itc(vcpu) + vcpu->arch.itc_offset;
430 471
431 if (time_after(vcpu_now_itc, vpd->itm)) { 472 if (time_after(vcpu_now_itc, vpd->itm)) {
432 vcpu->arch.timer_check = 1; 473 vcpu->arch.timer_check = 1;
@@ -447,10 +488,10 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu)
447 hrtimer_cancel(p_ht); 488 hrtimer_cancel(p_ht);
448 vcpu->arch.ht_active = 0; 489 vcpu->arch.ht_active = 0;
449 490
450 if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests)) 491 if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests) ||
492 kvm_cpu_has_pending_timer(vcpu))
451 if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) 493 if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED)
452 vcpu->arch.mp_state = 494 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
453 KVM_MP_STATE_RUNNABLE;
454 495
455 if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE) 496 if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE)
456 return -EINTR; 497 return -EINTR;
@@ -551,22 +592,35 @@ static int kvm_insert_vmm_mapping(struct kvm_vcpu *vcpu)
551 if (r < 0) 592 if (r < 0)
552 goto out; 593 goto out;
553 vcpu->arch.vm_tr_slot = r; 594 vcpu->arch.vm_tr_slot = r;
595
596#if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC)
597 if (kvm->arch.is_sn2) {
598 r = kvm_sn2_setup_mappings(vcpu);
599 if (r < 0)
600 goto out;
601 }
602#endif
603
554 r = 0; 604 r = 0;
555out: 605out:
556 return r; 606 return r;
557
558} 607}
559 608
560static void kvm_purge_vmm_mapping(struct kvm_vcpu *vcpu) 609static void kvm_purge_vmm_mapping(struct kvm_vcpu *vcpu)
561{ 610{
562 611 struct kvm *kvm = vcpu->kvm;
563 ia64_ptr_entry(0x3, vcpu->arch.vmm_tr_slot); 612 ia64_ptr_entry(0x3, vcpu->arch.vmm_tr_slot);
564 ia64_ptr_entry(0x3, vcpu->arch.vm_tr_slot); 613 ia64_ptr_entry(0x3, vcpu->arch.vm_tr_slot);
565 614#if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC)
615 if (kvm->arch.is_sn2)
616 ia64_ptr_entry(0x3, vcpu->arch.sn_rtc_tr_slot);
617#endif
566} 618}
567 619
568static int kvm_vcpu_pre_transition(struct kvm_vcpu *vcpu) 620static int kvm_vcpu_pre_transition(struct kvm_vcpu *vcpu)
569{ 621{
622 unsigned long psr;
623 int r;
570 int cpu = smp_processor_id(); 624 int cpu = smp_processor_id();
571 625
572 if (vcpu->arch.last_run_cpu != cpu || 626 if (vcpu->arch.last_run_cpu != cpu ||
@@ -578,36 +632,27 @@ static int kvm_vcpu_pre_transition(struct kvm_vcpu *vcpu)
578 632
579 vcpu->arch.host_rr6 = ia64_get_rr(RR6); 633 vcpu->arch.host_rr6 = ia64_get_rr(RR6);
580 vti_set_rr6(vcpu->arch.vmm_rr); 634 vti_set_rr6(vcpu->arch.vmm_rr);
581 return kvm_insert_vmm_mapping(vcpu); 635 local_irq_save(psr);
636 r = kvm_insert_vmm_mapping(vcpu);
637 local_irq_restore(psr);
638 return r;
582} 639}
640
583static void kvm_vcpu_post_transition(struct kvm_vcpu *vcpu) 641static void kvm_vcpu_post_transition(struct kvm_vcpu *vcpu)
584{ 642{
585 kvm_purge_vmm_mapping(vcpu); 643 kvm_purge_vmm_mapping(vcpu);
586 vti_set_rr6(vcpu->arch.host_rr6); 644 vti_set_rr6(vcpu->arch.host_rr6);
587} 645}
588 646
589static int vti_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 647static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
590{ 648{
591 union context *host_ctx, *guest_ctx; 649 union context *host_ctx, *guest_ctx;
592 int r; 650 int r;
593 651
594 /*Get host and guest context with guest address space.*/ 652 /*
595 host_ctx = kvm_get_host_context(vcpu); 653 * down_read() may sleep and return with interrupts enabled
596 guest_ctx = kvm_get_guest_context(vcpu); 654 */
597 655 down_read(&vcpu->kvm->slots_lock);
598 r = kvm_vcpu_pre_transition(vcpu);
599 if (r < 0)
600 goto out;
601 kvm_vmm_info->tramp_entry(host_ctx, guest_ctx);
602 kvm_vcpu_post_transition(vcpu);
603 r = 0;
604out:
605 return r;
606}
607
608static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
609{
610 int r;
611 656
612again: 657again:
613 if (signal_pending(current)) { 658 if (signal_pending(current)) {
@@ -616,26 +661,31 @@ again:
616 goto out; 661 goto out;
617 } 662 }
618 663
619 /*
620 * down_read() may sleep and return with interrupts enabled
621 */
622 down_read(&vcpu->kvm->slots_lock);
623
624 preempt_disable(); 664 preempt_disable();
625 local_irq_disable(); 665 local_irq_disable();
626 666
627 vcpu->guest_mode = 1; 667 /*Get host and guest context with guest address space.*/
668 host_ctx = kvm_get_host_context(vcpu);
669 guest_ctx = kvm_get_guest_context(vcpu);
670
671 clear_bit(KVM_REQ_KICK, &vcpu->requests);
672
673 r = kvm_vcpu_pre_transition(vcpu);
674 if (r < 0)
675 goto vcpu_run_fail;
676
677 up_read(&vcpu->kvm->slots_lock);
628 kvm_guest_enter(); 678 kvm_guest_enter();
629 r = vti_vcpu_run(vcpu, kvm_run); 679
630 if (r < 0) { 680 /*
631 local_irq_enable(); 681 * Transition to the guest
632 preempt_enable(); 682 */
633 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; 683 kvm_vmm_info->tramp_entry(host_ctx, guest_ctx);
634 goto out; 684
635 } 685 kvm_vcpu_post_transition(vcpu);
636 686
637 vcpu->arch.launched = 1; 687 vcpu->arch.launched = 1;
638 vcpu->guest_mode = 0; 688 set_bit(KVM_REQ_KICK, &vcpu->requests);
639 local_irq_enable(); 689 local_irq_enable();
640 690
641 /* 691 /*
@@ -646,9 +696,10 @@ again:
646 */ 696 */
647 barrier(); 697 barrier();
648 kvm_guest_exit(); 698 kvm_guest_exit();
649 up_read(&vcpu->kvm->slots_lock);
650 preempt_enable(); 699 preempt_enable();
651 700
701 down_read(&vcpu->kvm->slots_lock);
702
652 r = kvm_handle_exit(kvm_run, vcpu); 703 r = kvm_handle_exit(kvm_run, vcpu);
653 704
654 if (r > 0) { 705 if (r > 0) {
@@ -657,12 +708,20 @@ again:
657 } 708 }
658 709
659out: 710out:
711 up_read(&vcpu->kvm->slots_lock);
660 if (r > 0) { 712 if (r > 0) {
661 kvm_resched(vcpu); 713 kvm_resched(vcpu);
714 down_read(&vcpu->kvm->slots_lock);
662 goto again; 715 goto again;
663 } 716 }
664 717
665 return r; 718 return r;
719
720vcpu_run_fail:
721 local_irq_enable();
722 preempt_enable();
723 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
724 goto out;
666} 725}
667 726
668static void kvm_set_mmio_data(struct kvm_vcpu *vcpu) 727static void kvm_set_mmio_data(struct kvm_vcpu *vcpu)
@@ -788,6 +847,9 @@ struct kvm *kvm_arch_create_vm(void)
788 847
789 if (IS_ERR(kvm)) 848 if (IS_ERR(kvm))
790 return ERR_PTR(-ENOMEM); 849 return ERR_PTR(-ENOMEM);
850
851 kvm->arch.is_sn2 = ia64_platform_is("sn2");
852
791 kvm_init_vm(kvm); 853 kvm_init_vm(kvm);
792 854
793 kvm->arch.online_vcpus = 0; 855 kvm->arch.online_vcpus = 0;
@@ -884,7 +946,7 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
884 RESTORE_REGS(saved_gp); 946 RESTORE_REGS(saved_gp);
885 947
886 vcpu->arch.irq_new_pending = 1; 948 vcpu->arch.irq_new_pending = 1;
887 vcpu->arch.itc_offset = regs->saved_itc - ia64_getreg(_IA64_REG_AR_ITC); 949 vcpu->arch.itc_offset = regs->saved_itc - kvm_get_itc(vcpu);
888 set_bit(KVM_REQ_RESUME, &vcpu->requests); 950 set_bit(KVM_REQ_RESUME, &vcpu->requests);
889 951
890 vcpu_put(vcpu); 952 vcpu_put(vcpu);
@@ -1043,10 +1105,6 @@ static void kvm_free_vmm_area(void)
1043 } 1105 }
1044} 1106}
1045 1107
1046static void vti_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1047{
1048}
1049
1050static int vti_init_vpd(struct kvm_vcpu *vcpu) 1108static int vti_init_vpd(struct kvm_vcpu *vcpu)
1051{ 1109{
1052 int i; 1110 int i;
@@ -1165,7 +1223,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1165 regs->cr_iip = PALE_RESET_ENTRY; 1223 regs->cr_iip = PALE_RESET_ENTRY;
1166 1224
1167 /*Initialize itc offset for vcpus*/ 1225 /*Initialize itc offset for vcpus*/
1168 itc_offset = 0UL - ia64_getreg(_IA64_REG_AR_ITC); 1226 itc_offset = 0UL - kvm_get_itc(vcpu);
1169 for (i = 0; i < kvm->arch.online_vcpus; i++) { 1227 for (i = 0; i < kvm->arch.online_vcpus; i++) {
1170 v = (struct kvm_vcpu *)((char *)vcpu + 1228 v = (struct kvm_vcpu *)((char *)vcpu +
1171 sizeof(struct kvm_vcpu_data) * i); 1229 sizeof(struct kvm_vcpu_data) * i);
@@ -1237,6 +1295,7 @@ static int vti_vcpu_setup(struct kvm_vcpu *vcpu, int id)
1237 1295
1238 local_irq_save(psr); 1296 local_irq_save(psr);
1239 r = kvm_insert_vmm_mapping(vcpu); 1297 r = kvm_insert_vmm_mapping(vcpu);
1298 local_irq_restore(psr);
1240 if (r) 1299 if (r)
1241 goto fail; 1300 goto fail;
1242 r = kvm_vcpu_init(vcpu, vcpu->kvm, id); 1301 r = kvm_vcpu_init(vcpu, vcpu->kvm, id);
@@ -1254,13 +1313,11 @@ static int vti_vcpu_setup(struct kvm_vcpu *vcpu, int id)
1254 goto uninit; 1313 goto uninit;
1255 1314
1256 kvm_purge_vmm_mapping(vcpu); 1315 kvm_purge_vmm_mapping(vcpu);
1257 local_irq_restore(psr);
1258 1316
1259 return 0; 1317 return 0;
1260uninit: 1318uninit:
1261 kvm_vcpu_uninit(vcpu); 1319 kvm_vcpu_uninit(vcpu);
1262fail: 1320fail:
1263 local_irq_restore(psr);
1264 return r; 1321 return r;
1265} 1322}
1266 1323
@@ -1291,7 +1348,6 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
1291 vcpu->kvm = kvm; 1348 vcpu->kvm = kvm;
1292 1349
1293 cpu = get_cpu(); 1350 cpu = get_cpu();
1294 vti_vcpu_load(vcpu, cpu);
1295 r = vti_vcpu_setup(vcpu, id); 1351 r = vti_vcpu_setup(vcpu, id);
1296 put_cpu(); 1352 put_cpu();
1297 1353
@@ -1427,7 +1483,7 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1427 } 1483 }
1428 for (i = 0; i < 4; i++) 1484 for (i = 0; i < 4; i++)
1429 regs->insvc[i] = vcpu->arch.insvc[i]; 1485 regs->insvc[i] = vcpu->arch.insvc[i];
1430 regs->saved_itc = vcpu->arch.itc_offset + ia64_getreg(_IA64_REG_AR_ITC); 1486 regs->saved_itc = vcpu->arch.itc_offset + kvm_get_itc(vcpu);
1431 SAVE_REGS(xtp); 1487 SAVE_REGS(xtp);
1432 SAVE_REGS(metaphysical_rr0); 1488 SAVE_REGS(metaphysical_rr0);
1433 SAVE_REGS(metaphysical_rr4); 1489 SAVE_REGS(metaphysical_rr4);
@@ -1574,6 +1630,7 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
1574 1630
1575void kvm_arch_flush_shadow(struct kvm *kvm) 1631void kvm_arch_flush_shadow(struct kvm *kvm)
1576{ 1632{
1633 kvm_flush_remote_tlbs(kvm);
1577} 1634}
1578 1635
1579long kvm_arch_dev_ioctl(struct file *filp, 1636long kvm_arch_dev_ioctl(struct file *filp,
@@ -1616,8 +1673,37 @@ out:
1616 return 0; 1673 return 0;
1617} 1674}
1618 1675
1676
1677/*
1678 * On SN2, the ITC isn't stable, so copy in fast path code to use the
1679 * SN2 RTC, replacing the ITC based default verion.
1680 */
1681static void kvm_patch_vmm(struct kvm_vmm_info *vmm_info,
1682 struct module *module)
1683{
1684 unsigned long new_ar, new_ar_sn2;
1685 unsigned long module_base;
1686
1687 if (!ia64_platform_is("sn2"))
1688 return;
1689
1690 module_base = (unsigned long)module->module_core;
1691
1692 new_ar = kvm_vmm_base + vmm_info->patch_mov_ar - module_base;
1693 new_ar_sn2 = kvm_vmm_base + vmm_info->patch_mov_ar_sn2 - module_base;
1694
1695 printk(KERN_INFO "kvm: Patching ITC emulation to use SGI SN2 RTC "
1696 "as source\n");
1697
1698 /*
1699 * Copy the SN2 version of mov_ar into place. They are both
1700 * the same size, so 6 bundles is sufficient (6 * 0x10).
1701 */
1702 memcpy((void *)new_ar, (void *)new_ar_sn2, 0x60);
1703}
1704
1619static int kvm_relocate_vmm(struct kvm_vmm_info *vmm_info, 1705static int kvm_relocate_vmm(struct kvm_vmm_info *vmm_info,
1620 struct module *module) 1706 struct module *module)
1621{ 1707{
1622 unsigned long module_base; 1708 unsigned long module_base;
1623 unsigned long vmm_size; 1709 unsigned long vmm_size;
@@ -1639,6 +1725,7 @@ static int kvm_relocate_vmm(struct kvm_vmm_info *vmm_info,
1639 return -EFAULT; 1725 return -EFAULT;
1640 1726
1641 memcpy((void *)kvm_vmm_base, (void *)module_base, vmm_size); 1727 memcpy((void *)kvm_vmm_base, (void *)module_base, vmm_size);
1728 kvm_patch_vmm(vmm_info, module);
1642 kvm_flush_icache(kvm_vmm_base, vmm_size); 1729 kvm_flush_icache(kvm_vmm_base, vmm_size);
1643 1730
1644 /*Recalculate kvm_vmm_info based on new VMM*/ 1731 /*Recalculate kvm_vmm_info based on new VMM*/
@@ -1792,38 +1879,24 @@ void kvm_arch_hardware_unsetup(void)
1792{ 1879{
1793} 1880}
1794 1881
1795static void vcpu_kick_intr(void *info)
1796{
1797#ifdef DEBUG
1798 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)info;
1799 printk(KERN_DEBUG"vcpu_kick_intr %p \n", vcpu);
1800#endif
1801}
1802
1803void kvm_vcpu_kick(struct kvm_vcpu *vcpu) 1882void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
1804{ 1883{
1805 int ipi_pcpu = vcpu->cpu; 1884 int me;
1806 int cpu = get_cpu(); 1885 int cpu = vcpu->cpu;
1807 1886
1808 if (waitqueue_active(&vcpu->wq)) 1887 if (waitqueue_active(&vcpu->wq))
1809 wake_up_interruptible(&vcpu->wq); 1888 wake_up_interruptible(&vcpu->wq);
1810 1889
1811 if (vcpu->guest_mode && cpu != ipi_pcpu) 1890 me = get_cpu();
1812 smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0); 1891 if (cpu != me && (unsigned) cpu < nr_cpu_ids && cpu_online(cpu))
1892 if (!test_and_set_bit(KVM_REQ_KICK, &vcpu->requests))
1893 smp_send_reschedule(cpu);
1813 put_cpu(); 1894 put_cpu();
1814} 1895}
1815 1896
1816int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 trig) 1897int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq)
1817{ 1898{
1818 1899 return __apic_accept_irq(vcpu, irq->vector);
1819 struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
1820
1821 if (!test_and_set_bit(vec, &vpd->irr[0])) {
1822 vcpu->arch.irq_new_pending = 1;
1823 kvm_vcpu_kick(vcpu);
1824 return 1;
1825 }
1826 return 0;
1827} 1900}
1828 1901
1829int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest) 1902int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest)
@@ -1836,20 +1909,18 @@ int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda)
1836 return 0; 1909 return 0;
1837} 1910}
1838 1911
1839struct kvm_vcpu *kvm_get_lowest_prio_vcpu(struct kvm *kvm, u8 vector, 1912int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2)
1840 unsigned long bitmap)
1841{ 1913{
1842 struct kvm_vcpu *lvcpu = kvm->vcpus[0]; 1914 return vcpu1->arch.xtp - vcpu2->arch.xtp;
1843 int i; 1915}
1844
1845 for (i = 1; i < kvm->arch.online_vcpus; i++) {
1846 if (!kvm->vcpus[i])
1847 continue;
1848 if (lvcpu->arch.xtp > kvm->vcpus[i]->arch.xtp)
1849 lvcpu = kvm->vcpus[i];
1850 }
1851 1916
1852 return lvcpu; 1917int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
1918 int short_hand, int dest, int dest_mode)
1919{
1920 struct kvm_lapic *target = vcpu->arch.apic;
1921 return (dest_mode == 0) ?
1922 kvm_apic_match_physical_addr(target, dest) :
1923 kvm_apic_match_logical_addr(target, dest);
1853} 1924}
1854 1925
1855static int find_highest_bits(int *dat) 1926static int find_highest_bits(int *dat)
@@ -1888,6 +1959,12 @@ int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
1888 return 0; 1959 return 0;
1889} 1960}
1890 1961
1962int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
1963{
1964 /* do real check here */
1965 return 1;
1966}
1967
1891int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) 1968int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
1892{ 1969{
1893 return vcpu->arch.timer_fired; 1970 return vcpu->arch.timer_fired;
@@ -1918,6 +1995,7 @@ static int vcpu_reset(struct kvm_vcpu *vcpu)
1918 long psr; 1995 long psr;
1919 local_irq_save(psr); 1996 local_irq_save(psr);
1920 r = kvm_insert_vmm_mapping(vcpu); 1997 r = kvm_insert_vmm_mapping(vcpu);
1998 local_irq_restore(psr);
1921 if (r) 1999 if (r)
1922 goto fail; 2000 goto fail;
1923 2001
@@ -1930,7 +2008,6 @@ static int vcpu_reset(struct kvm_vcpu *vcpu)
1930 kvm_purge_vmm_mapping(vcpu); 2008 kvm_purge_vmm_mapping(vcpu);
1931 r = 0; 2009 r = 0;
1932fail: 2010fail:
1933 local_irq_restore(psr);
1934 return r; 2011 return r;
1935} 2012}
1936 2013
diff --git a/arch/ia64/kvm/kvm_fw.c b/arch/ia64/kvm/kvm_fw.c
index a8ae52ed5635..e4b82319881d 100644
--- a/arch/ia64/kvm/kvm_fw.c
+++ b/arch/ia64/kvm/kvm_fw.c
@@ -21,6 +21,9 @@
21 21
22#include <linux/kvm_host.h> 22#include <linux/kvm_host.h>
23#include <linux/smp.h> 23#include <linux/smp.h>
24#include <asm/sn/addrs.h>
25#include <asm/sn/clksupport.h>
26#include <asm/sn/shub_mmr.h>
24 27
25#include "vti.h" 28#include "vti.h"
26#include "misc.h" 29#include "misc.h"
@@ -188,12 +191,35 @@ static struct ia64_pal_retval pal_freq_base(struct kvm_vcpu *vcpu)
188 return result; 191 return result;
189} 192}
190 193
191static struct ia64_pal_retval pal_freq_ratios(struct kvm_vcpu *vcpu) 194/*
195 * On the SGI SN2, the ITC isn't stable. Emulation backed by the SN2
196 * RTC is used instead. This function patches the ratios from SAL
197 * to match the RTC before providing them to the guest.
198 */
199static void sn2_patch_itc_freq_ratios(struct ia64_pal_retval *result)
192{ 200{
201 struct pal_freq_ratio *ratio;
202 unsigned long sal_freq, sal_drift, factor;
203
204 result->status = ia64_sal_freq_base(SAL_FREQ_BASE_PLATFORM,
205 &sal_freq, &sal_drift);
206 ratio = (struct pal_freq_ratio *)&result->v2;
207 factor = ((sal_freq * 3) + (sn_rtc_cycles_per_second / 2)) /
208 sn_rtc_cycles_per_second;
209
210 ratio->num = 3;
211 ratio->den = factor;
212}
193 213
214static struct ia64_pal_retval pal_freq_ratios(struct kvm_vcpu *vcpu)
215{
194 struct ia64_pal_retval result; 216 struct ia64_pal_retval result;
195 217
196 PAL_CALL(result, PAL_FREQ_RATIOS, 0, 0, 0); 218 PAL_CALL(result, PAL_FREQ_RATIOS, 0, 0, 0);
219
220 if (vcpu->kvm->arch.is_sn2)
221 sn2_patch_itc_freq_ratios(&result);
222
197 return result; 223 return result;
198} 224}
199 225
diff --git a/arch/ia64/kvm/lapic.h b/arch/ia64/kvm/lapic.h
index 6d6cbcb14893..ee541cebcd78 100644
--- a/arch/ia64/kvm/lapic.h
+++ b/arch/ia64/kvm/lapic.h
@@ -20,6 +20,10 @@ void kvm_free_lapic(struct kvm_vcpu *vcpu);
20 20
21int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest); 21int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest);
22int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda); 22int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda);
23int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 trig); 23int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
24 int short_hand, int dest, int dest_mode);
25int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2);
26int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq);
27#define kvm_apic_present(x) (true)
24 28
25#endif 29#endif
diff --git a/arch/ia64/kvm/optvfault.S b/arch/ia64/kvm/optvfault.S
index 32254ce9a1bd..f793be3effff 100644
--- a/arch/ia64/kvm/optvfault.S
+++ b/arch/ia64/kvm/optvfault.S
@@ -11,6 +11,7 @@
11 11
12#include <asm/asmmacro.h> 12#include <asm/asmmacro.h>
13#include <asm/processor.h> 13#include <asm/processor.h>
14#include <asm/kvm_host.h>
14 15
15#include "vti.h" 16#include "vti.h"
16#include "asm-offsets.h" 17#include "asm-offsets.h"
@@ -140,6 +141,35 @@ GLOBAL_ENTRY(kvm_asm_mov_from_ar)
140 ;; 141 ;;
141END(kvm_asm_mov_from_ar) 142END(kvm_asm_mov_from_ar)
142 143
144/*
145 * Special SGI SN2 optimized version of mov_from_ar using the SN2 RTC
146 * clock as it's source for emulating the ITC. This version will be
147 * copied on top of the original version if the host is determined to
148 * be an SN2.
149 */
150GLOBAL_ENTRY(kvm_asm_mov_from_ar_sn2)
151 add r18=VMM_VCPU_ITC_OFS_OFFSET, r21
152 movl r19 = (KVM_VMM_BASE+(1<<KVM_VMM_SHIFT))
153
154 add r16=VMM_VCPU_LAST_ITC_OFFSET,r21
155 extr.u r17=r25,6,7
156 mov r24=b0
157 ;;
158 ld8 r18=[r18]
159 ld8 r19=[r19]
160 addl r20=@gprel(asm_mov_to_reg),gp
161 ;;
162 add r19=r19,r18
163 shladd r17=r17,4,r20
164 ;;
165 adds r30=kvm_resume_to_guest-asm_mov_to_reg,r20
166 st8 [r16] = r19
167 mov b0=r17
168 br.sptk.few b0
169 ;;
170END(kvm_asm_mov_from_ar_sn2)
171
172
143 173
144// mov r1=rr[r3] 174// mov r1=rr[r3]
145GLOBAL_ENTRY(kvm_asm_mov_from_rr) 175GLOBAL_ENTRY(kvm_asm_mov_from_rr)
diff --git a/arch/ia64/kvm/process.c b/arch/ia64/kvm/process.c
index b1dc80952d91..a8f84da04b49 100644
--- a/arch/ia64/kvm/process.c
+++ b/arch/ia64/kvm/process.c
@@ -652,20 +652,25 @@ void kvm_ia64_handle_break(unsigned long ifa, struct kvm_pt_regs *regs,
652 unsigned long isr, unsigned long iim) 652 unsigned long isr, unsigned long iim)
653{ 653{
654 struct kvm_vcpu *v = current_vcpu; 654 struct kvm_vcpu *v = current_vcpu;
655 long psr;
655 656
656 if (ia64_psr(regs)->cpl == 0) { 657 if (ia64_psr(regs)->cpl == 0) {
657 /* Allow hypercalls only when cpl = 0. */ 658 /* Allow hypercalls only when cpl = 0. */
658 if (iim == DOMN_PAL_REQUEST) { 659 if (iim == DOMN_PAL_REQUEST) {
660 local_irq_save(psr);
659 set_pal_call_data(v); 661 set_pal_call_data(v);
660 vmm_transition(v); 662 vmm_transition(v);
661 get_pal_call_result(v); 663 get_pal_call_result(v);
662 vcpu_increment_iip(v); 664 vcpu_increment_iip(v);
665 local_irq_restore(psr);
663 return; 666 return;
664 } else if (iim == DOMN_SAL_REQUEST) { 667 } else if (iim == DOMN_SAL_REQUEST) {
668 local_irq_save(psr);
665 set_sal_call_data(v); 669 set_sal_call_data(v);
666 vmm_transition(v); 670 vmm_transition(v);
667 get_sal_call_result(v); 671 get_sal_call_result(v);
668 vcpu_increment_iip(v); 672 vcpu_increment_iip(v);
673 local_irq_restore(psr);
669 return; 674 return;
670 } 675 }
671 } 676 }
diff --git a/arch/ia64/kvm/vcpu.c b/arch/ia64/kvm/vcpu.c
index a18ee17b9192..a2c6c15e4761 100644
--- a/arch/ia64/kvm/vcpu.c
+++ b/arch/ia64/kvm/vcpu.c
@@ -788,13 +788,29 @@ void vcpu_set_fpreg(struct kvm_vcpu *vcpu, unsigned long reg,
788 setfpreg(reg, val, regs); /* FIXME: handle NATs later*/ 788 setfpreg(reg, val, regs); /* FIXME: handle NATs later*/
789} 789}
790 790
791/*
792 * The Altix RTC is mapped specially here for the vmm module
793 */
794#define SN_RTC_BASE (u64 *)(KVM_VMM_BASE+(1UL<<KVM_VMM_SHIFT))
795static long kvm_get_itc(struct kvm_vcpu *vcpu)
796{
797#if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC)
798 struct kvm *kvm = (struct kvm *)KVM_VM_BASE;
799
800 if (kvm->arch.is_sn2)
801 return (*SN_RTC_BASE);
802 else
803#endif
804 return ia64_getreg(_IA64_REG_AR_ITC);
805}
806
791/************************************************************************ 807/************************************************************************
792 * lsapic timer 808 * lsapic timer
793 ***********************************************************************/ 809 ***********************************************************************/
794u64 vcpu_get_itc(struct kvm_vcpu *vcpu) 810u64 vcpu_get_itc(struct kvm_vcpu *vcpu)
795{ 811{
796 unsigned long guest_itc; 812 unsigned long guest_itc;
797 guest_itc = VMX(vcpu, itc_offset) + ia64_getreg(_IA64_REG_AR_ITC); 813 guest_itc = VMX(vcpu, itc_offset) + kvm_get_itc(vcpu);
798 814
799 if (guest_itc >= VMX(vcpu, last_itc)) { 815 if (guest_itc >= VMX(vcpu, last_itc)) {
800 VMX(vcpu, last_itc) = guest_itc; 816 VMX(vcpu, last_itc) = guest_itc;
@@ -809,7 +825,7 @@ static void vcpu_set_itc(struct kvm_vcpu *vcpu, u64 val)
809 struct kvm_vcpu *v; 825 struct kvm_vcpu *v;
810 struct kvm *kvm; 826 struct kvm *kvm;
811 int i; 827 int i;
812 long itc_offset = val - ia64_getreg(_IA64_REG_AR_ITC); 828 long itc_offset = val - kvm_get_itc(vcpu);
813 unsigned long vitv = VCPU(vcpu, itv); 829 unsigned long vitv = VCPU(vcpu, itv);
814 830
815 kvm = (struct kvm *)KVM_VM_BASE; 831 kvm = (struct kvm *)KVM_VM_BASE;
diff --git a/arch/ia64/kvm/vmm.c b/arch/ia64/kvm/vmm.c
index 9eee5c04bacc..f4b4c899bb6c 100644
--- a/arch/ia64/kvm/vmm.c
+++ b/arch/ia64/kvm/vmm.c
@@ -30,15 +30,19 @@ MODULE_AUTHOR("Intel");
30MODULE_LICENSE("GPL"); 30MODULE_LICENSE("GPL");
31 31
32extern char kvm_ia64_ivt; 32extern char kvm_ia64_ivt;
33extern char kvm_asm_mov_from_ar;
34extern char kvm_asm_mov_from_ar_sn2;
33extern fpswa_interface_t *vmm_fpswa_interface; 35extern fpswa_interface_t *vmm_fpswa_interface;
34 36
35long vmm_sanity = 1; 37long vmm_sanity = 1;
36 38
37struct kvm_vmm_info vmm_info = { 39struct kvm_vmm_info vmm_info = {
38 .module = THIS_MODULE, 40 .module = THIS_MODULE,
39 .vmm_entry = vmm_entry, 41 .vmm_entry = vmm_entry,
40 .tramp_entry = vmm_trampoline, 42 .tramp_entry = vmm_trampoline,
41 .vmm_ivt = (unsigned long)&kvm_ia64_ivt, 43 .vmm_ivt = (unsigned long)&kvm_ia64_ivt,
44 .patch_mov_ar = (unsigned long)&kvm_asm_mov_from_ar,
45 .patch_mov_ar_sn2 = (unsigned long)&kvm_asm_mov_from_ar_sn2,
42}; 46};
43 47
44static int __init kvm_vmm_init(void) 48static int __init kvm_vmm_init(void)
diff --git a/arch/ia64/kvm/vmm_ivt.S b/arch/ia64/kvm/vmm_ivt.S
index 3ef1a017a318..40920c630649 100644
--- a/arch/ia64/kvm/vmm_ivt.S
+++ b/arch/ia64/kvm/vmm_ivt.S
@@ -95,7 +95,7 @@ GLOBAL_ENTRY(kvm_vmm_panic)
95 ;; 95 ;;
96 srlz.i // guarantee that interruption collection is on 96 srlz.i // guarantee that interruption collection is on
97 ;; 97 ;;
98 //(p15) ssm psr.i // restore psr.i 98 (p15) ssm psr.i // restore psr.
99 addl r14=@gprel(ia64_leave_hypervisor),gp 99 addl r14=@gprel(ia64_leave_hypervisor),gp
100 ;; 100 ;;
101 KVM_SAVE_REST 101 KVM_SAVE_REST
@@ -249,7 +249,7 @@ ENTRY(kvm_break_fault)
249 ;; 249 ;;
250 srlz.i // guarantee that interruption collection is on 250 srlz.i // guarantee that interruption collection is on
251 ;; 251 ;;
252 //(p15)ssm psr.i // restore psr.i 252 (p15)ssm psr.i // restore psr.i
253 addl r14=@gprel(ia64_leave_hypervisor),gp 253 addl r14=@gprel(ia64_leave_hypervisor),gp
254 ;; 254 ;;
255 KVM_SAVE_REST 255 KVM_SAVE_REST
@@ -439,7 +439,7 @@ kvm_dispatch_vexirq:
439 ;; 439 ;;
440 srlz.i // guarantee that interruption collection is on 440 srlz.i // guarantee that interruption collection is on
441 ;; 441 ;;
442 //(p15) ssm psr.i // restore psr.i 442 (p15) ssm psr.i // restore psr.i
443 adds r3=8,r2 // set up second base pointer 443 adds r3=8,r2 // set up second base pointer
444 ;; 444 ;;
445 KVM_SAVE_REST 445 KVM_SAVE_REST
@@ -819,7 +819,7 @@ ENTRY(kvm_dtlb_miss_dispatch)
819 ;; 819 ;;
820 srlz.i // guarantee that interruption collection is on 820 srlz.i // guarantee that interruption collection is on
821 ;; 821 ;;
822 //(p15) ssm psr.i // restore psr.i 822 (p15) ssm psr.i // restore psr.i
823 addl r14=@gprel(ia64_leave_hypervisor_prepare),gp 823 addl r14=@gprel(ia64_leave_hypervisor_prepare),gp
824 ;; 824 ;;
825 KVM_SAVE_REST 825 KVM_SAVE_REST
@@ -842,7 +842,7 @@ ENTRY(kvm_itlb_miss_dispatch)
842 ;; 842 ;;
843 srlz.i // guarantee that interruption collection is on 843 srlz.i // guarantee that interruption collection is on
844 ;; 844 ;;
845 //(p15) ssm psr.i // restore psr.i 845 (p15) ssm psr.i // restore psr.i
846 addl r14=@gprel(ia64_leave_hypervisor),gp 846 addl r14=@gprel(ia64_leave_hypervisor),gp
847 ;; 847 ;;
848 KVM_SAVE_REST 848 KVM_SAVE_REST
@@ -871,7 +871,7 @@ ENTRY(kvm_dispatch_reflection)
871 ;; 871 ;;
872 srlz.i // guarantee that interruption collection is on 872 srlz.i // guarantee that interruption collection is on
873 ;; 873 ;;
874 //(p15) ssm psr.i // restore psr.i 874 (p15) ssm psr.i // restore psr.i
875 addl r14=@gprel(ia64_leave_hypervisor),gp 875 addl r14=@gprel(ia64_leave_hypervisor),gp
876 ;; 876 ;;
877 KVM_SAVE_REST 877 KVM_SAVE_REST
@@ -898,7 +898,7 @@ ENTRY(kvm_dispatch_virtualization_fault)
898 ;; 898 ;;
899 srlz.i // guarantee that interruption collection is on 899 srlz.i // guarantee that interruption collection is on
900 ;; 900 ;;
901 //(p15) ssm psr.i // restore psr.i 901 (p15) ssm psr.i // restore psr.i
902 addl r14=@gprel(ia64_leave_hypervisor_prepare),gp 902 addl r14=@gprel(ia64_leave_hypervisor_prepare),gp
903 ;; 903 ;;
904 KVM_SAVE_REST 904 KVM_SAVE_REST
@@ -920,7 +920,7 @@ ENTRY(kvm_dispatch_interrupt)
920 ;; 920 ;;
921 srlz.i 921 srlz.i
922 ;; 922 ;;
923 //(p15) ssm psr.i 923 (p15) ssm psr.i
924 addl r14=@gprel(ia64_leave_hypervisor),gp 924 addl r14=@gprel(ia64_leave_hypervisor),gp
925 ;; 925 ;;
926 KVM_SAVE_REST 926 KVM_SAVE_REST
@@ -1333,7 +1333,7 @@ hostret = r24
1333 ;; 1333 ;;
1334(p7) srlz.i 1334(p7) srlz.i
1335 ;; 1335 ;;
1336//(p6) ssm psr.i 1336(p6) ssm psr.i
1337 ;; 1337 ;;
1338 mov rp=rpsave 1338 mov rp=rpsave
1339 mov ar.pfs=pfssave 1339 mov ar.pfs=pfssave
diff --git a/arch/ia64/kvm/vtlb.c b/arch/ia64/kvm/vtlb.c
index 2c2501f13159..4290a429bf7c 100644
--- a/arch/ia64/kvm/vtlb.c
+++ b/arch/ia64/kvm/vtlb.c
@@ -254,7 +254,8 @@ u64 guest_vhpt_lookup(u64 iha, u64 *pte)
254 "(p7) st8 [%2]=r9;;" 254 "(p7) st8 [%2]=r9;;"
255 "ssm psr.ic;;" 255 "ssm psr.ic;;"
256 "srlz.d;;" 256 "srlz.d;;"
257 /* "ssm psr.i;;" Once interrupts in vmm open, need fix*/ 257 "ssm psr.i;;"
258 "srlz.d;;"
258 : "=r"(ret) : "r"(iha), "r"(pte):"memory"); 259 : "=r"(ret) : "r"(iha), "r"(pte):"memory");
259 260
260 return ret; 261 return ret;