aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-01-02 14:41:11 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2009-01-02 14:41:11 -0500
commit597b0d21626da4e6f09f132442caf0cc2b0eb47c (patch)
tree13c0074bb20f7b05a471e78d4ff52c665a10266a /arch/ia64
parent2640c9a90fa596871e142f42052608864335f102 (diff)
parent87917239204d67a316cb89751750f86c9ed3640b (diff)
Merge branch 'kvm-updates/2.6.29' of git://git.kernel.org/pub/scm/linux/kernel/git/avi/kvm
* 'kvm-updates/2.6.29' of git://git.kernel.org/pub/scm/linux/kernel/git/avi/kvm: (140 commits) KVM: MMU: handle large host sptes on invlpg/resync KVM: Add locking to virtual i8259 interrupt controller KVM: MMU: Don't treat a global pte as such if cr4.pge is cleared MAINTAINERS: Maintainership changes for kvm/ia64 KVM: ia64: Fix kvm_arch_vcpu_ioctl_[gs]et_regs() KVM: x86: Rework user space NMI injection as KVM_CAP_USER_NMI KVM: VMX: Fix pending NMI-vs.-IRQ race for user space irqchip KVM: fix handling of ACK from shared guest IRQ KVM: MMU: check for present pdptr shadow page in walk_shadow KVM: Consolidate userspace memory capability reporting into common code KVM: Advertise the bug in memory region destruction as fixed KVM: use cpumask_var_t for cpus_hardware_enabled KVM: use modern cpumask primitives, no cpumask_t on stack KVM: Extract core of kvm_flush_remote_tlbs/kvm_reload_remote_mmus KVM: set owner of cpu and vm file operations anon_inodes: use fops->owner for module refcount x86: KVM guest: kvm_get_tsc_khz: return khz, not lpj KVM: MMU: prepopulate the shadow on invlpg KVM: MMU: skip global pgtables on sync due to cr3 switch KVM: MMU: collapse remote TLB flushes on root sync ...
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/include/asm/kvm.h6
-rw-r--r--arch/ia64/include/asm/kvm_host.h196
-rw-r--r--arch/ia64/kvm/Makefile2
-rw-r--r--arch/ia64/kvm/asm-offsets.c11
-rw-r--r--arch/ia64/kvm/kvm-ia64.c107
-rw-r--r--arch/ia64/kvm/kvm_lib.c15
-rw-r--r--arch/ia64/kvm/kvm_minstate.h4
-rw-r--r--arch/ia64/kvm/misc.h3
-rw-r--r--arch/ia64/kvm/mmio.c38
-rw-r--r--arch/ia64/kvm/process.c29
-rw-r--r--arch/ia64/kvm/vcpu.c76
-rw-r--r--arch/ia64/kvm/vcpu.h5
-rw-r--r--arch/ia64/kvm/vmm.c29
-rw-r--r--arch/ia64/kvm/vmm_ivt.S1469
-rw-r--r--arch/ia64/kvm/vtlb.c4
15 files changed, 1073 insertions, 921 deletions
diff --git a/arch/ia64/include/asm/kvm.h b/arch/ia64/include/asm/kvm.h
index f38472ac2267..68aa6da807c1 100644
--- a/arch/ia64/include/asm/kvm.h
+++ b/arch/ia64/include/asm/kvm.h
@@ -166,8 +166,6 @@ struct saved_vpd {
166}; 166};
167 167
168struct kvm_regs { 168struct kvm_regs {
169 char *saved_guest;
170 char *saved_stack;
171 struct saved_vpd vpd; 169 struct saved_vpd vpd;
172 /*Arch-regs*/ 170 /*Arch-regs*/
173 int mp_state; 171 int mp_state;
@@ -200,6 +198,10 @@ struct kvm_regs {
200 unsigned long fp_psr; /*used for lazy float register */ 198 unsigned long fp_psr; /*used for lazy float register */
201 unsigned long saved_gp; 199 unsigned long saved_gp;
202 /*for phycial emulation */ 200 /*for phycial emulation */
201
202 union context saved_guest;
203
204 unsigned long reserved[64]; /* for future use */
203}; 205};
204 206
205struct kvm_sregs { 207struct kvm_sregs {
diff --git a/arch/ia64/include/asm/kvm_host.h b/arch/ia64/include/asm/kvm_host.h
index c60d324da540..0560f3fae538 100644
--- a/arch/ia64/include/asm/kvm_host.h
+++ b/arch/ia64/include/asm/kvm_host.h
@@ -23,17 +23,6 @@
23#ifndef __ASM_KVM_HOST_H 23#ifndef __ASM_KVM_HOST_H
24#define __ASM_KVM_HOST_H 24#define __ASM_KVM_HOST_H
25 25
26
27#include <linux/types.h>
28#include <linux/mm.h>
29#include <linux/kvm.h>
30#include <linux/kvm_para.h>
31#include <linux/kvm_types.h>
32
33#include <asm/pal.h>
34#include <asm/sal.h>
35
36#define KVM_MAX_VCPUS 4
37#define KVM_MEMORY_SLOTS 32 26#define KVM_MEMORY_SLOTS 32
38/* memory slots that does not exposed to userspace */ 27/* memory slots that does not exposed to userspace */
39#define KVM_PRIVATE_MEM_SLOTS 4 28#define KVM_PRIVATE_MEM_SLOTS 4
@@ -50,70 +39,132 @@
50#define EXIT_REASON_EXTERNAL_INTERRUPT 6 39#define EXIT_REASON_EXTERNAL_INTERRUPT 6
51#define EXIT_REASON_IPI 7 40#define EXIT_REASON_IPI 7
52#define EXIT_REASON_PTC_G 8 41#define EXIT_REASON_PTC_G 8
42#define EXIT_REASON_DEBUG 20
53 43
54/*Define vmm address space and vm data space.*/ 44/*Define vmm address space and vm data space.*/
55#define KVM_VMM_SIZE (16UL<<20) 45#define KVM_VMM_SIZE (__IA64_UL_CONST(16)<<20)
56#define KVM_VMM_SHIFT 24 46#define KVM_VMM_SHIFT 24
57#define KVM_VMM_BASE 0xD000000000000000UL 47#define KVM_VMM_BASE 0xD000000000000000
58#define VMM_SIZE (8UL<<20) 48#define VMM_SIZE (__IA64_UL_CONST(8)<<20)
59 49
60/* 50/*
61 * Define vm_buffer, used by PAL Services, base address. 51 * Define vm_buffer, used by PAL Services, base address.
62 * Note: vmbuffer is in the VMM-BLOCK, the size must be < 8M 52 * Note: vm_buffer is in the VMM-BLOCK, the size must be < 8M
63 */ 53 */
64#define KVM_VM_BUFFER_BASE (KVM_VMM_BASE + VMM_SIZE) 54#define KVM_VM_BUFFER_BASE (KVM_VMM_BASE + VMM_SIZE)
65#define KVM_VM_BUFFER_SIZE (8UL<<20) 55#define KVM_VM_BUFFER_SIZE (__IA64_UL_CONST(8)<<20)
66 56
67/*Define Virtual machine data layout.*/ 57/*
68#define KVM_VM_DATA_SHIFT 24 58 * kvm guest's data area looks as follow:
69#define KVM_VM_DATA_SIZE (1UL << KVM_VM_DATA_SHIFT) 59 *
70#define KVM_VM_DATA_BASE (KVM_VMM_BASE + KVM_VMM_SIZE) 60 * +----------------------+ ------- KVM_VM_DATA_SIZE
71 61 * | vcpu[n]'s data | | ___________________KVM_STK_OFFSET
72 62 * | | | / |
73#define KVM_P2M_BASE KVM_VM_DATA_BASE 63 * | .......... | | /vcpu's struct&stack |
74#define KVM_P2M_OFS 0 64 * | .......... | | /---------------------|---- 0
75#define KVM_P2M_SIZE (8UL << 20) 65 * | vcpu[5]'s data | | / vpd |
76 66 * | vcpu[4]'s data | |/-----------------------|
77#define KVM_VHPT_BASE (KVM_P2M_BASE + KVM_P2M_SIZE) 67 * | vcpu[3]'s data | / vtlb |
78#define KVM_VHPT_OFS KVM_P2M_SIZE 68 * | vcpu[2]'s data | /|------------------------|
79#define KVM_VHPT_BLOCK_SIZE (2UL << 20) 69 * | vcpu[1]'s data |/ | vhpt |
80#define VHPT_SHIFT 18 70 * | vcpu[0]'s data |____________________________|
81#define VHPT_SIZE (1UL << VHPT_SHIFT) 71 * +----------------------+ |
82#define VHPT_NUM_ENTRIES (1<<(VHPT_SHIFT-5)) 72 * | memory dirty log | |
83 73 * +----------------------+ |
84#define KVM_VTLB_BASE (KVM_VHPT_BASE+KVM_VHPT_BLOCK_SIZE) 74 * | vm's data struct | |
85#define KVM_VTLB_OFS (KVM_VHPT_OFS+KVM_VHPT_BLOCK_SIZE) 75 * +----------------------+ |
86#define KVM_VTLB_BLOCK_SIZE (1UL<<20) 76 * | | |
87#define VTLB_SHIFT 17 77 * | | |
88#define VTLB_SIZE (1UL<<VTLB_SHIFT) 78 * | | |
89#define VTLB_NUM_ENTRIES (1<<(VTLB_SHIFT-5)) 79 * | | |
90 80 * | | |
91#define KVM_VPD_BASE (KVM_VTLB_BASE+KVM_VTLB_BLOCK_SIZE) 81 * | | |
92#define KVM_VPD_OFS (KVM_VTLB_OFS+KVM_VTLB_BLOCK_SIZE) 82 * | | |
93#define KVM_VPD_BLOCK_SIZE (2UL<<20) 83 * | vm's p2m table | |
94#define VPD_SHIFT 16 84 * | | |
95#define VPD_SIZE (1UL<<VPD_SHIFT) 85 * | | |
96 86 * | | | |
97#define KVM_VCPU_BASE (KVM_VPD_BASE+KVM_VPD_BLOCK_SIZE) 87 * vm's data->| | | |
98#define KVM_VCPU_OFS (KVM_VPD_OFS+KVM_VPD_BLOCK_SIZE) 88 * +----------------------+ ------- 0
99#define KVM_VCPU_BLOCK_SIZE (2UL<<20) 89 * To support large memory, needs to increase the size of p2m.
100#define VCPU_SHIFT 18 90 * To support more vcpus, needs to ensure it has enough space to
101#define VCPU_SIZE (1UL<<VCPU_SHIFT) 91 * hold vcpus' data.
102#define MAX_VCPU_NUM KVM_VCPU_BLOCK_SIZE/VCPU_SIZE 92 */
103 93
104#define KVM_VM_BASE (KVM_VCPU_BASE+KVM_VCPU_BLOCK_SIZE) 94#define KVM_VM_DATA_SHIFT 26
105#define KVM_VM_OFS (KVM_VCPU_OFS+KVM_VCPU_BLOCK_SIZE) 95#define KVM_VM_DATA_SIZE (__IA64_UL_CONST(1) << KVM_VM_DATA_SHIFT)
106#define KVM_VM_BLOCK_SIZE (1UL<<19) 96#define KVM_VM_DATA_BASE (KVM_VMM_BASE + KVM_VM_DATA_SIZE)
107 97
108#define KVM_MEM_DIRTY_LOG_BASE (KVM_VM_BASE+KVM_VM_BLOCK_SIZE) 98#define KVM_P2M_BASE KVM_VM_DATA_BASE
109#define KVM_MEM_DIRTY_LOG_OFS (KVM_VM_OFS+KVM_VM_BLOCK_SIZE) 99#define KVM_P2M_SIZE (__IA64_UL_CONST(24) << 20)
110#define KVM_MEM_DIRTY_LOG_SIZE (1UL<<19) 100
111 101#define VHPT_SHIFT 16
112/* Get vpd, vhpt, tlb, vcpu, base*/ 102#define VHPT_SIZE (__IA64_UL_CONST(1) << VHPT_SHIFT)
113#define VPD_ADDR(n) (KVM_VPD_BASE+n*VPD_SIZE) 103#define VHPT_NUM_ENTRIES (__IA64_UL_CONST(1) << (VHPT_SHIFT-5))
114#define VHPT_ADDR(n) (KVM_VHPT_BASE+n*VHPT_SIZE) 104
115#define VTLB_ADDR(n) (KVM_VTLB_BASE+n*VTLB_SIZE) 105#define VTLB_SHIFT 16
116#define VCPU_ADDR(n) (KVM_VCPU_BASE+n*VCPU_SIZE) 106#define VTLB_SIZE (__IA64_UL_CONST(1) << VTLB_SHIFT)
107#define VTLB_NUM_ENTRIES (1UL << (VHPT_SHIFT-5))
108
109#define VPD_SHIFT 16
110#define VPD_SIZE (__IA64_UL_CONST(1) << VPD_SHIFT)
111
112#define VCPU_STRUCT_SHIFT 16
113#define VCPU_STRUCT_SIZE (__IA64_UL_CONST(1) << VCPU_STRUCT_SHIFT)
114
115#define KVM_STK_OFFSET VCPU_STRUCT_SIZE
116
117#define KVM_VM_STRUCT_SHIFT 19
118#define KVM_VM_STRUCT_SIZE (__IA64_UL_CONST(1) << KVM_VM_STRUCT_SHIFT)
119
120#define KVM_MEM_DIRY_LOG_SHIFT 19
121#define KVM_MEM_DIRTY_LOG_SIZE (__IA64_UL_CONST(1) << KVM_MEM_DIRY_LOG_SHIFT)
122
123#ifndef __ASSEMBLY__
124
125/*Define the max vcpus and memory for Guests.*/
126#define KVM_MAX_VCPUS (KVM_VM_DATA_SIZE - KVM_P2M_SIZE - KVM_VM_STRUCT_SIZE -\
127 KVM_MEM_DIRTY_LOG_SIZE) / sizeof(struct kvm_vcpu_data)
128#define KVM_MAX_MEM_SIZE (KVM_P2M_SIZE >> 3 << PAGE_SHIFT)
129
130#define VMM_LOG_LEN 256
131
132#include <linux/types.h>
133#include <linux/mm.h>
134#include <linux/kvm.h>
135#include <linux/kvm_para.h>
136#include <linux/kvm_types.h>
137
138#include <asm/pal.h>
139#include <asm/sal.h>
140#include <asm/page.h>
141
142struct kvm_vcpu_data {
143 char vcpu_vhpt[VHPT_SIZE];
144 char vcpu_vtlb[VTLB_SIZE];
145 char vcpu_vpd[VPD_SIZE];
146 char vcpu_struct[VCPU_STRUCT_SIZE];
147};
148
149struct kvm_vm_data {
150 char kvm_p2m[KVM_P2M_SIZE];
151 char kvm_vm_struct[KVM_VM_STRUCT_SIZE];
152 char kvm_mem_dirty_log[KVM_MEM_DIRTY_LOG_SIZE];
153 struct kvm_vcpu_data vcpu_data[KVM_MAX_VCPUS];
154};
155
156#define VCPU_BASE(n) KVM_VM_DATA_BASE + \
157 offsetof(struct kvm_vm_data, vcpu_data[n])
158#define VM_BASE KVM_VM_DATA_BASE + \
159 offsetof(struct kvm_vm_data, kvm_vm_struct)
160#define KVM_MEM_DIRTY_LOG_BASE KVM_VM_DATA_BASE + \
161 offsetof(struct kvm_vm_data, kvm_mem_dirty_log)
162
163#define VHPT_BASE(n) (VCPU_BASE(n) + offsetof(struct kvm_vcpu_data, vcpu_vhpt))
164#define VTLB_BASE(n) (VCPU_BASE(n) + offsetof(struct kvm_vcpu_data, vcpu_vtlb))
165#define VPD_BASE(n) (VCPU_BASE(n) + offsetof(struct kvm_vcpu_data, vcpu_vpd))
166#define VCPU_STRUCT_BASE(n) (VCPU_BASE(n) + \
167 offsetof(struct kvm_vcpu_data, vcpu_struct))
117 168
118/*IO section definitions*/ 169/*IO section definitions*/
119#define IOREQ_READ 1 170#define IOREQ_READ 1
@@ -389,6 +440,7 @@ struct kvm_vcpu_arch {
389 440
390 unsigned long opcode; 441 unsigned long opcode;
391 unsigned long cause; 442 unsigned long cause;
443 char log_buf[VMM_LOG_LEN];
392 union context host; 444 union context host;
393 union context guest; 445 union context guest;
394}; 446};
@@ -403,14 +455,13 @@ struct kvm_sal_data {
403}; 455};
404 456
405struct kvm_arch { 457struct kvm_arch {
458 spinlock_t dirty_log_lock;
459
406 unsigned long vm_base; 460 unsigned long vm_base;
407 unsigned long metaphysical_rr0; 461 unsigned long metaphysical_rr0;
408 unsigned long metaphysical_rr4; 462 unsigned long metaphysical_rr4;
409 unsigned long vmm_init_rr; 463 unsigned long vmm_init_rr;
410 unsigned long vhpt_base; 464
411 unsigned long vtlb_base;
412 unsigned long vpd_base;
413 spinlock_t dirty_log_lock;
414 struct kvm_ioapic *vioapic; 465 struct kvm_ioapic *vioapic;
415 struct kvm_vm_stat stat; 466 struct kvm_vm_stat stat;
416 struct kvm_sal_data rdv_sal_data; 467 struct kvm_sal_data rdv_sal_data;
@@ -512,7 +563,7 @@ struct kvm_pt_regs {
512 563
513static inline struct kvm_pt_regs *vcpu_regs(struct kvm_vcpu *v) 564static inline struct kvm_pt_regs *vcpu_regs(struct kvm_vcpu *v)
514{ 565{
515 return (struct kvm_pt_regs *) ((unsigned long) v + IA64_STK_OFFSET) - 1; 566 return (struct kvm_pt_regs *) ((unsigned long) v + KVM_STK_OFFSET) - 1;
516} 567}
517 568
518typedef int kvm_vmm_entry(void); 569typedef int kvm_vmm_entry(void);
@@ -531,5 +582,6 @@ int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
531void kvm_sal_emul(struct kvm_vcpu *vcpu); 582void kvm_sal_emul(struct kvm_vcpu *vcpu);
532 583
533static inline void kvm_inject_nmi(struct kvm_vcpu *vcpu) {} 584static inline void kvm_inject_nmi(struct kvm_vcpu *vcpu) {}
585#endif /* __ASSEMBLY__*/
534 586
535#endif 587#endif
diff --git a/arch/ia64/kvm/Makefile b/arch/ia64/kvm/Makefile
index 92cef66ca268..76464dc312e6 100644
--- a/arch/ia64/kvm/Makefile
+++ b/arch/ia64/kvm/Makefile
@@ -60,7 +60,7 @@ obj-$(CONFIG_KVM) += kvm.o
60 60
61CFLAGS_vcpu.o += -mfixed-range=f2-f5,f12-f127 61CFLAGS_vcpu.o += -mfixed-range=f2-f5,f12-f127
62kvm-intel-objs = vmm.o vmm_ivt.o trampoline.o vcpu.o optvfault.o mmio.o \ 62kvm-intel-objs = vmm.o vmm_ivt.o trampoline.o vcpu.o optvfault.o mmio.o \
63 vtlb.o process.o 63 vtlb.o process.o kvm_lib.o
64#Add link memcpy and memset to avoid possible structure assignment error 64#Add link memcpy and memset to avoid possible structure assignment error
65kvm-intel-objs += memcpy.o memset.o 65kvm-intel-objs += memcpy.o memset.o
66obj-$(CONFIG_KVM_INTEL) += kvm-intel.o 66obj-$(CONFIG_KVM_INTEL) += kvm-intel.o
diff --git a/arch/ia64/kvm/asm-offsets.c b/arch/ia64/kvm/asm-offsets.c
index 4e3dc13a619c..0c3564a7a033 100644
--- a/arch/ia64/kvm/asm-offsets.c
+++ b/arch/ia64/kvm/asm-offsets.c
@@ -24,19 +24,10 @@
24 24
25#include <linux/autoconf.h> 25#include <linux/autoconf.h>
26#include <linux/kvm_host.h> 26#include <linux/kvm_host.h>
27#include <linux/kbuild.h>
27 28
28#include "vcpu.h" 29#include "vcpu.h"
29 30
30#define task_struct kvm_vcpu
31
32#define DEFINE(sym, val) \
33 asm volatile("\n->" #sym " (%0) " #val : : "i" (val))
34
35#define BLANK() asm volatile("\n->" : :)
36
37#define OFFSET(_sym, _str, _mem) \
38 DEFINE(_sym, offsetof(_str, _mem));
39
40void foo(void) 31void foo(void)
41{ 32{
42 DEFINE(VMM_TASK_SIZE, sizeof(struct kvm_vcpu)); 33 DEFINE(VMM_TASK_SIZE, sizeof(struct kvm_vcpu));
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index af1464f7a6ad..0f5ebd948437 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -180,7 +180,6 @@ int kvm_dev_ioctl_check_extension(long ext)
180 180
181 switch (ext) { 181 switch (ext) {
182 case KVM_CAP_IRQCHIP: 182 case KVM_CAP_IRQCHIP:
183 case KVM_CAP_USER_MEMORY:
184 case KVM_CAP_MP_STATE: 183 case KVM_CAP_MP_STATE:
185 184
186 r = 1; 185 r = 1;
@@ -439,7 +438,6 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu)
439 expires = div64_u64(itc_diff, cyc_per_usec); 438 expires = div64_u64(itc_diff, cyc_per_usec);
440 kt = ktime_set(0, 1000 * expires); 439 kt = ktime_set(0, 1000 * expires);
441 440
442 down_read(&vcpu->kvm->slots_lock);
443 vcpu->arch.ht_active = 1; 441 vcpu->arch.ht_active = 1;
444 hrtimer_start(p_ht, kt, HRTIMER_MODE_ABS); 442 hrtimer_start(p_ht, kt, HRTIMER_MODE_ABS);
445 443
@@ -452,7 +450,6 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu)
452 if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) 450 if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED)
453 vcpu->arch.mp_state = 451 vcpu->arch.mp_state =
454 KVM_MP_STATE_RUNNABLE; 452 KVM_MP_STATE_RUNNABLE;
455 up_read(&vcpu->kvm->slots_lock);
456 453
457 if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE) 454 if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE)
458 return -EINTR; 455 return -EINTR;
@@ -476,6 +473,13 @@ static int handle_external_interrupt(struct kvm_vcpu *vcpu,
476 return 1; 473 return 1;
477} 474}
478 475
476static int handle_vcpu_debug(struct kvm_vcpu *vcpu,
477 struct kvm_run *kvm_run)
478{
479 printk("VMM: %s", vcpu->arch.log_buf);
480 return 1;
481}
482
479static int (*kvm_vti_exit_handlers[])(struct kvm_vcpu *vcpu, 483static int (*kvm_vti_exit_handlers[])(struct kvm_vcpu *vcpu,
480 struct kvm_run *kvm_run) = { 484 struct kvm_run *kvm_run) = {
481 [EXIT_REASON_VM_PANIC] = handle_vm_error, 485 [EXIT_REASON_VM_PANIC] = handle_vm_error,
@@ -487,6 +491,7 @@ static int (*kvm_vti_exit_handlers[])(struct kvm_vcpu *vcpu,
487 [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt, 491 [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt,
488 [EXIT_REASON_IPI] = handle_ipi, 492 [EXIT_REASON_IPI] = handle_ipi,
489 [EXIT_REASON_PTC_G] = handle_global_purge, 493 [EXIT_REASON_PTC_G] = handle_global_purge,
494 [EXIT_REASON_DEBUG] = handle_vcpu_debug,
490 495
491}; 496};
492 497
@@ -698,27 +703,24 @@ out:
698 return r; 703 return r;
699} 704}
700 705
701/*
702 * Allocate 16M memory for every vm to hold its specific data.
703 * Its memory map is defined in kvm_host.h.
704 */
705static struct kvm *kvm_alloc_kvm(void) 706static struct kvm *kvm_alloc_kvm(void)
706{ 707{
707 708
708 struct kvm *kvm; 709 struct kvm *kvm;
709 uint64_t vm_base; 710 uint64_t vm_base;
710 711
712 BUG_ON(sizeof(struct kvm) > KVM_VM_STRUCT_SIZE);
713
711 vm_base = __get_free_pages(GFP_KERNEL, get_order(KVM_VM_DATA_SIZE)); 714 vm_base = __get_free_pages(GFP_KERNEL, get_order(KVM_VM_DATA_SIZE));
712 715
713 if (!vm_base) 716 if (!vm_base)
714 return ERR_PTR(-ENOMEM); 717 return ERR_PTR(-ENOMEM);
715 printk(KERN_DEBUG"kvm: VM data's base Address:0x%lx\n", vm_base);
716 718
717 /* Zero all pages before use! */
718 memset((void *)vm_base, 0, KVM_VM_DATA_SIZE); 719 memset((void *)vm_base, 0, KVM_VM_DATA_SIZE);
719 720 kvm = (struct kvm *)(vm_base +
720 kvm = (struct kvm *)(vm_base + KVM_VM_OFS); 721 offsetof(struct kvm_vm_data, kvm_vm_struct));
721 kvm->arch.vm_base = vm_base; 722 kvm->arch.vm_base = vm_base;
723 printk(KERN_DEBUG"kvm: vm's data area:0x%lx\n", vm_base);
722 724
723 return kvm; 725 return kvm;
724} 726}
@@ -760,21 +762,12 @@ static void kvm_build_io_pmt(struct kvm *kvm)
760 762
761static void kvm_init_vm(struct kvm *kvm) 763static void kvm_init_vm(struct kvm *kvm)
762{ 764{
763 long vm_base;
764
765 BUG_ON(!kvm); 765 BUG_ON(!kvm);
766 766
767 kvm->arch.metaphysical_rr0 = GUEST_PHYSICAL_RR0; 767 kvm->arch.metaphysical_rr0 = GUEST_PHYSICAL_RR0;
768 kvm->arch.metaphysical_rr4 = GUEST_PHYSICAL_RR4; 768 kvm->arch.metaphysical_rr4 = GUEST_PHYSICAL_RR4;
769 kvm->arch.vmm_init_rr = VMM_INIT_RR; 769 kvm->arch.vmm_init_rr = VMM_INIT_RR;
770 770
771 vm_base = kvm->arch.vm_base;
772 if (vm_base) {
773 kvm->arch.vhpt_base = vm_base + KVM_VHPT_OFS;
774 kvm->arch.vtlb_base = vm_base + KVM_VTLB_OFS;
775 kvm->arch.vpd_base = vm_base + KVM_VPD_OFS;
776 }
777
778 /* 771 /*
779 *Fill P2M entries for MMIO/IO ranges 772 *Fill P2M entries for MMIO/IO ranges
780 */ 773 */
@@ -838,9 +831,8 @@ static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
838 831
839int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 832int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
840{ 833{
841 int i;
842 struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); 834 struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
843 int r; 835 int i;
844 836
845 vcpu_load(vcpu); 837 vcpu_load(vcpu);
846 838
@@ -857,18 +849,7 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
857 849
858 vpd->vpr = regs->vpd.vpr; 850 vpd->vpr = regs->vpd.vpr;
859 851
860 r = -EFAULT; 852 memcpy(&vcpu->arch.guest, &regs->saved_guest, sizeof(union context));
861 r = copy_from_user(&vcpu->arch.guest, regs->saved_guest,
862 sizeof(union context));
863 if (r)
864 goto out;
865 r = copy_from_user(vcpu + 1, regs->saved_stack +
866 sizeof(struct kvm_vcpu),
867 IA64_STK_OFFSET - sizeof(struct kvm_vcpu));
868 if (r)
869 goto out;
870 vcpu->arch.exit_data =
871 ((struct kvm_vcpu *)(regs->saved_stack))->arch.exit_data;
872 853
873 RESTORE_REGS(mp_state); 854 RESTORE_REGS(mp_state);
874 RESTORE_REGS(vmm_rr); 855 RESTORE_REGS(vmm_rr);
@@ -902,9 +883,8 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
902 set_bit(KVM_REQ_RESUME, &vcpu->requests); 883 set_bit(KVM_REQ_RESUME, &vcpu->requests);
903 884
904 vcpu_put(vcpu); 885 vcpu_put(vcpu);
905 r = 0; 886
906out: 887 return 0;
907 return r;
908} 888}
909 889
910long kvm_arch_vm_ioctl(struct file *filp, 890long kvm_arch_vm_ioctl(struct file *filp,
@@ -1166,10 +1146,11 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1166 /*Set entry address for first run.*/ 1146 /*Set entry address for first run.*/
1167 regs->cr_iip = PALE_RESET_ENTRY; 1147 regs->cr_iip = PALE_RESET_ENTRY;
1168 1148
1169 /*Initilize itc offset for vcpus*/ 1149 /*Initialize itc offset for vcpus*/
1170 itc_offset = 0UL - ia64_getreg(_IA64_REG_AR_ITC); 1150 itc_offset = 0UL - ia64_getreg(_IA64_REG_AR_ITC);
1171 for (i = 0; i < MAX_VCPU_NUM; i++) { 1151 for (i = 0; i < KVM_MAX_VCPUS; i++) {
1172 v = (struct kvm_vcpu *)((char *)vcpu + VCPU_SIZE * i); 1152 v = (struct kvm_vcpu *)((char *)vcpu +
1153 sizeof(struct kvm_vcpu_data) * i);
1173 v->arch.itc_offset = itc_offset; 1154 v->arch.itc_offset = itc_offset;
1174 v->arch.last_itc = 0; 1155 v->arch.last_itc = 0;
1175 } 1156 }
@@ -1183,7 +1164,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1183 vcpu->arch.apic->vcpu = vcpu; 1164 vcpu->arch.apic->vcpu = vcpu;
1184 1165
1185 p_ctx->gr[1] = 0; 1166 p_ctx->gr[1] = 0;
1186 p_ctx->gr[12] = (unsigned long)((char *)vmm_vcpu + IA64_STK_OFFSET); 1167 p_ctx->gr[12] = (unsigned long)((char *)vmm_vcpu + KVM_STK_OFFSET);
1187 p_ctx->gr[13] = (unsigned long)vmm_vcpu; 1168 p_ctx->gr[13] = (unsigned long)vmm_vcpu;
1188 p_ctx->psr = 0x1008522000UL; 1169 p_ctx->psr = 0x1008522000UL;
1189 p_ctx->ar[40] = FPSR_DEFAULT; /*fpsr*/ 1170 p_ctx->ar[40] = FPSR_DEFAULT; /*fpsr*/
@@ -1218,12 +1199,12 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1218 vcpu->arch.hlt_timer.function = hlt_timer_fn; 1199 vcpu->arch.hlt_timer.function = hlt_timer_fn;
1219 1200
1220 vcpu->arch.last_run_cpu = -1; 1201 vcpu->arch.last_run_cpu = -1;
1221 vcpu->arch.vpd = (struct vpd *)VPD_ADDR(vcpu->vcpu_id); 1202 vcpu->arch.vpd = (struct vpd *)VPD_BASE(vcpu->vcpu_id);
1222 vcpu->arch.vsa_base = kvm_vsa_base; 1203 vcpu->arch.vsa_base = kvm_vsa_base;
1223 vcpu->arch.__gp = kvm_vmm_gp; 1204 vcpu->arch.__gp = kvm_vmm_gp;
1224 vcpu->arch.dirty_log_lock_pa = __pa(&kvm->arch.dirty_log_lock); 1205 vcpu->arch.dirty_log_lock_pa = __pa(&kvm->arch.dirty_log_lock);
1225 vcpu->arch.vhpt.hash = (struct thash_data *)VHPT_ADDR(vcpu->vcpu_id); 1206 vcpu->arch.vhpt.hash = (struct thash_data *)VHPT_BASE(vcpu->vcpu_id);
1226 vcpu->arch.vtlb.hash = (struct thash_data *)VTLB_ADDR(vcpu->vcpu_id); 1207 vcpu->arch.vtlb.hash = (struct thash_data *)VTLB_BASE(vcpu->vcpu_id);
1227 init_ptce_info(vcpu); 1208 init_ptce_info(vcpu);
1228 1209
1229 r = 0; 1210 r = 0;
@@ -1273,12 +1254,22 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
1273 int r; 1254 int r;
1274 int cpu; 1255 int cpu;
1275 1256
1257 BUG_ON(sizeof(struct kvm_vcpu) > VCPU_STRUCT_SIZE/2);
1258
1259 r = -EINVAL;
1260 if (id >= KVM_MAX_VCPUS) {
1261 printk(KERN_ERR"kvm: Can't configure vcpus > %ld",
1262 KVM_MAX_VCPUS);
1263 goto fail;
1264 }
1265
1276 r = -ENOMEM; 1266 r = -ENOMEM;
1277 if (!vm_base) { 1267 if (!vm_base) {
1278 printk(KERN_ERR"kvm: Create vcpu[%d] error!\n", id); 1268 printk(KERN_ERR"kvm: Create vcpu[%d] error!\n", id);
1279 goto fail; 1269 goto fail;
1280 } 1270 }
1281 vcpu = (struct kvm_vcpu *)(vm_base + KVM_VCPU_OFS + VCPU_SIZE * id); 1271 vcpu = (struct kvm_vcpu *)(vm_base + offsetof(struct kvm_vm_data,
1272 vcpu_data[id].vcpu_struct));
1282 vcpu->kvm = kvm; 1273 vcpu->kvm = kvm;
1283 1274
1284 cpu = get_cpu(); 1275 cpu = get_cpu();
@@ -1374,9 +1365,9 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1374 1365
1375int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 1366int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1376{ 1367{
1377 int i;
1378 int r;
1379 struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); 1368 struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
1369 int i;
1370
1380 vcpu_load(vcpu); 1371 vcpu_load(vcpu);
1381 1372
1382 for (i = 0; i < 16; i++) { 1373 for (i = 0; i < 16; i++) {
@@ -1391,14 +1382,8 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1391 regs->vpd.vpsr = vpd->vpsr; 1382 regs->vpd.vpsr = vpd->vpsr;
1392 regs->vpd.vpr = vpd->vpr; 1383 regs->vpd.vpr = vpd->vpr;
1393 1384
1394 r = -EFAULT; 1385 memcpy(&regs->saved_guest, &vcpu->arch.guest, sizeof(union context));
1395 r = copy_to_user(regs->saved_guest, &vcpu->arch.guest, 1386
1396 sizeof(union context));
1397 if (r)
1398 goto out;
1399 r = copy_to_user(regs->saved_stack, (void *)vcpu, IA64_STK_OFFSET);
1400 if (r)
1401 goto out;
1402 SAVE_REGS(mp_state); 1387 SAVE_REGS(mp_state);
1403 SAVE_REGS(vmm_rr); 1388 SAVE_REGS(vmm_rr);
1404 memcpy(regs->itrs, vcpu->arch.itrs, sizeof(struct thash_data) * NITRS); 1389 memcpy(regs->itrs, vcpu->arch.itrs, sizeof(struct thash_data) * NITRS);
@@ -1426,10 +1411,9 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1426 SAVE_REGS(metaphysical_saved_rr4); 1411 SAVE_REGS(metaphysical_saved_rr4);
1427 SAVE_REGS(fp_psr); 1412 SAVE_REGS(fp_psr);
1428 SAVE_REGS(saved_gp); 1413 SAVE_REGS(saved_gp);
1414
1429 vcpu_put(vcpu); 1415 vcpu_put(vcpu);
1430 r = 0; 1416 return 0;
1431out:
1432 return r;
1433} 1417}
1434 1418
1435void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) 1419void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
@@ -1457,6 +1441,9 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
1457 struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot]; 1441 struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot];
1458 unsigned long base_gfn = memslot->base_gfn; 1442 unsigned long base_gfn = memslot->base_gfn;
1459 1443
1444 if (base_gfn + npages > (KVM_MAX_MEM_SIZE >> PAGE_SHIFT))
1445 return -ENOMEM;
1446
1460 for (i = 0; i < npages; i++) { 1447 for (i = 0; i < npages; i++) {
1461 pfn = gfn_to_pfn(kvm, base_gfn + i); 1448 pfn = gfn_to_pfn(kvm, base_gfn + i);
1462 if (!kvm_is_mmio_pfn(pfn)) { 1449 if (!kvm_is_mmio_pfn(pfn)) {
@@ -1631,8 +1618,8 @@ static int kvm_ia64_sync_dirty_log(struct kvm *kvm,
1631 struct kvm_memory_slot *memslot; 1618 struct kvm_memory_slot *memslot;
1632 int r, i; 1619 int r, i;
1633 long n, base; 1620 long n, base;
1634 unsigned long *dirty_bitmap = (unsigned long *)((void *)kvm - KVM_VM_OFS 1621 unsigned long *dirty_bitmap = (unsigned long *)(kvm->arch.vm_base +
1635 + KVM_MEM_DIRTY_LOG_OFS); 1622 offsetof(struct kvm_vm_data, kvm_mem_dirty_log));
1636 1623
1637 r = -EINVAL; 1624 r = -EINVAL;
1638 if (log->slot >= KVM_MEMORY_SLOTS) 1625 if (log->slot >= KVM_MEMORY_SLOTS)
diff --git a/arch/ia64/kvm/kvm_lib.c b/arch/ia64/kvm/kvm_lib.c
new file mode 100644
index 000000000000..a85cb611ecd7
--- /dev/null
+++ b/arch/ia64/kvm/kvm_lib.c
@@ -0,0 +1,15 @@
1/*
2 * kvm_lib.c: Compile some libraries for kvm-intel module.
3 *
4 * Just include kernel's library, and disable symbols export.
5 * Copyright (C) 2008, Intel Corporation.
6 * Xiantao Zhang (xiantao.zhang@intel.com)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 */
13#undef CONFIG_MODULES
14#include "../../../lib/vsprintf.c"
15#include "../../../lib/ctype.c"
diff --git a/arch/ia64/kvm/kvm_minstate.h b/arch/ia64/kvm/kvm_minstate.h
index 2cc41d17cf99..b2bcaa2787aa 100644
--- a/arch/ia64/kvm/kvm_minstate.h
+++ b/arch/ia64/kvm/kvm_minstate.h
@@ -24,6 +24,8 @@
24#include <asm/asmmacro.h> 24#include <asm/asmmacro.h>
25#include <asm/types.h> 25#include <asm/types.h>
26#include <asm/kregs.h> 26#include <asm/kregs.h>
27#include <asm/kvm_host.h>
28
27#include "asm-offsets.h" 29#include "asm-offsets.h"
28 30
29#define KVM_MINSTATE_START_SAVE_MIN \ 31#define KVM_MINSTATE_START_SAVE_MIN \
@@ -33,7 +35,7 @@
33 addl r22 = VMM_RBS_OFFSET,r1; /* compute base of RBS */ \ 35 addl r22 = VMM_RBS_OFFSET,r1; /* compute base of RBS */ \
34 ;; \ 36 ;; \
35 lfetch.fault.excl.nt1 [r22]; \ 37 lfetch.fault.excl.nt1 [r22]; \
36 addl r1 = IA64_STK_OFFSET-VMM_PT_REGS_SIZE,r1; /* compute base of memory stack */ \ 38 addl r1 = KVM_STK_OFFSET-VMM_PT_REGS_SIZE, r1; \
37 mov r23 = ar.bspstore; /* save ar.bspstore */ \ 39 mov r23 = ar.bspstore; /* save ar.bspstore */ \
38 ;; \ 40 ;; \
39 mov ar.bspstore = r22; /* switch to kernel RBS */\ 41 mov ar.bspstore = r22; /* switch to kernel RBS */\
diff --git a/arch/ia64/kvm/misc.h b/arch/ia64/kvm/misc.h
index e585c4607344..dd979e00b574 100644
--- a/arch/ia64/kvm/misc.h
+++ b/arch/ia64/kvm/misc.h
@@ -27,7 +27,8 @@
27 */ 27 */
28static inline uint64_t *kvm_host_get_pmt(struct kvm *kvm) 28static inline uint64_t *kvm_host_get_pmt(struct kvm *kvm)
29{ 29{
30 return (uint64_t *)(kvm->arch.vm_base + KVM_P2M_OFS); 30 return (uint64_t *)(kvm->arch.vm_base +
31 offsetof(struct kvm_vm_data, kvm_p2m));
31} 32}
32 33
33static inline void kvm_set_pmt_entry(struct kvm *kvm, gfn_t gfn, 34static inline void kvm_set_pmt_entry(struct kvm *kvm, gfn_t gfn,
diff --git a/arch/ia64/kvm/mmio.c b/arch/ia64/kvm/mmio.c
index 7f1a858bc69f..21f63fffc379 100644
--- a/arch/ia64/kvm/mmio.c
+++ b/arch/ia64/kvm/mmio.c
@@ -66,31 +66,25 @@ void lsapic_write(struct kvm_vcpu *v, unsigned long addr,
66 66
67 switch (addr) { 67 switch (addr) {
68 case PIB_OFST_INTA: 68 case PIB_OFST_INTA:
69 /*panic_domain(NULL, "Undefined write on PIB INTA\n");*/ 69 panic_vm(v, "Undefined write on PIB INTA\n");
70 panic_vm(v);
71 break; 70 break;
72 case PIB_OFST_XTP: 71 case PIB_OFST_XTP:
73 if (length == 1) { 72 if (length == 1) {
74 vlsapic_write_xtp(v, val); 73 vlsapic_write_xtp(v, val);
75 } else { 74 } else {
76 /*panic_domain(NULL, 75 panic_vm(v, "Undefined write on PIB XTP\n");
77 "Undefined write on PIB XTP\n");*/
78 panic_vm(v);
79 } 76 }
80 break; 77 break;
81 default: 78 default:
82 if (PIB_LOW_HALF(addr)) { 79 if (PIB_LOW_HALF(addr)) {
83 /*lower half */ 80 /*Lower half */
84 if (length != 8) 81 if (length != 8)
85 /*panic_domain(NULL, 82 panic_vm(v, "Can't LHF write with size %ld!\n",
86 "Can't LHF write with size %ld!\n", 83 length);
87 length);*/
88 panic_vm(v);
89 else 84 else
90 vlsapic_write_ipi(v, addr, val); 85 vlsapic_write_ipi(v, addr, val);
91 } else { /* upper half 86 } else { /*Upper half */
92 printk("IPI-UHF write %lx\n",addr);*/ 87 panic_vm(v, "IPI-UHF write %lx\n", addr);
93 panic_vm(v);
94 } 88 }
95 break; 89 break;
96 } 90 }
@@ -108,22 +102,18 @@ unsigned long lsapic_read(struct kvm_vcpu *v, unsigned long addr,
108 if (length == 1) /* 1 byte load */ 102 if (length == 1) /* 1 byte load */
109 ; /* There is no i8259, there is no INTA access*/ 103 ; /* There is no i8259, there is no INTA access*/
110 else 104 else
111 /*panic_domain(NULL,"Undefined read on PIB INTA\n"); */ 105 panic_vm(v, "Undefined read on PIB INTA\n");
112 panic_vm(v);
113 106
114 break; 107 break;
115 case PIB_OFST_XTP: 108 case PIB_OFST_XTP:
116 if (length == 1) { 109 if (length == 1) {
117 result = VLSAPIC_XTP(v); 110 result = VLSAPIC_XTP(v);
118 /* printk("read xtp %lx\n", result); */
119 } else { 111 } else {
120 /*panic_domain(NULL, 112 panic_vm(v, "Undefined read on PIB XTP\n");
121 "Undefined read on PIB XTP\n");*/
122 panic_vm(v);
123 } 113 }
124 break; 114 break;
125 default: 115 default:
126 panic_vm(v); 116 panic_vm(v, "Undefined addr access for lsapic!\n");
127 break; 117 break;
128 } 118 }
129 return result; 119 return result;
@@ -162,7 +152,7 @@ static void mmio_access(struct kvm_vcpu *vcpu, u64 src_pa, u64 *dest,
162 /* it's necessary to ensure zero extending */ 152 /* it's necessary to ensure zero extending */
163 *dest = p->u.ioreq.data & (~0UL >> (64-(s*8))); 153 *dest = p->u.ioreq.data & (~0UL >> (64-(s*8)));
164 } else 154 } else
165 panic_vm(vcpu); 155 panic_vm(vcpu, "Unhandled mmio access returned!\n");
166out: 156out:
167 local_irq_restore(psr); 157 local_irq_restore(psr);
168 return ; 158 return ;
@@ -324,7 +314,9 @@ void emulate_io_inst(struct kvm_vcpu *vcpu, u64 padr, u64 ma)
324 return; 314 return;
325 } else { 315 } else {
326 inst_type = -1; 316 inst_type = -1;
327 panic_vm(vcpu); 317 panic_vm(vcpu, "Unsupported MMIO access instruction! \
318 Bunld[0]=0x%lx, Bundle[1]=0x%lx\n",
319 bundle.i64[0], bundle.i64[1]);
328 } 320 }
329 321
330 size = 1 << size; 322 size = 1 << size;
@@ -335,7 +327,7 @@ void emulate_io_inst(struct kvm_vcpu *vcpu, u64 padr, u64 ma)
335 if (inst_type == SL_INTEGER) 327 if (inst_type == SL_INTEGER)
336 vcpu_set_gr(vcpu, inst.M1.r1, data, 0); 328 vcpu_set_gr(vcpu, inst.M1.r1, data, 0);
337 else 329 else
338 panic_vm(vcpu); 330 panic_vm(vcpu, "Unsupported instruction type!\n");
339 331
340 } 332 }
341 vcpu_increment_iip(vcpu); 333 vcpu_increment_iip(vcpu);
diff --git a/arch/ia64/kvm/process.c b/arch/ia64/kvm/process.c
index 800817307b7b..552d07724207 100644
--- a/arch/ia64/kvm/process.c
+++ b/arch/ia64/kvm/process.c
@@ -527,7 +527,8 @@ void reflect_interruption(u64 ifa, u64 isr, u64 iim,
527 vector = vec2off[vec]; 527 vector = vec2off[vec];
528 528
529 if (!(vpsr & IA64_PSR_IC) && (vector != IA64_DATA_NESTED_TLB_VECTOR)) { 529 if (!(vpsr & IA64_PSR_IC) && (vector != IA64_DATA_NESTED_TLB_VECTOR)) {
530 panic_vm(vcpu); 530 panic_vm(vcpu, "Interruption with vector :0x%lx occurs "
531 "with psr.ic = 0\n", vector);
531 return; 532 return;
532 } 533 }
533 534
@@ -586,7 +587,7 @@ static void set_pal_call_result(struct kvm_vcpu *vcpu)
586 vcpu_set_gr(vcpu, 10, p->u.pal_data.ret.v1, 0); 587 vcpu_set_gr(vcpu, 10, p->u.pal_data.ret.v1, 0);
587 vcpu_set_gr(vcpu, 11, p->u.pal_data.ret.v2, 0); 588 vcpu_set_gr(vcpu, 11, p->u.pal_data.ret.v2, 0);
588 } else 589 } else
589 panic_vm(vcpu); 590 panic_vm(vcpu, "Mis-set for exit reason!\n");
590} 591}
591 592
592static void set_sal_call_data(struct kvm_vcpu *vcpu) 593static void set_sal_call_data(struct kvm_vcpu *vcpu)
@@ -614,7 +615,7 @@ static void set_sal_call_result(struct kvm_vcpu *vcpu)
614 vcpu_set_gr(vcpu, 10, p->u.sal_data.ret.r10, 0); 615 vcpu_set_gr(vcpu, 10, p->u.sal_data.ret.r10, 0);
615 vcpu_set_gr(vcpu, 11, p->u.sal_data.ret.r11, 0); 616 vcpu_set_gr(vcpu, 11, p->u.sal_data.ret.r11, 0);
616 } else 617 } else
617 panic_vm(vcpu); 618 panic_vm(vcpu, "Mis-set for exit reason!\n");
618} 619}
619 620
620void kvm_ia64_handle_break(unsigned long ifa, struct kvm_pt_regs *regs, 621void kvm_ia64_handle_break(unsigned long ifa, struct kvm_pt_regs *regs,
@@ -680,7 +681,7 @@ static void generate_exirq(struct kvm_vcpu *vcpu)
680 vpsr = VCPU(vcpu, vpsr); 681 vpsr = VCPU(vcpu, vpsr);
681 isr = vpsr & IA64_PSR_RI; 682 isr = vpsr & IA64_PSR_RI;
682 if (!(vpsr & IA64_PSR_IC)) 683 if (!(vpsr & IA64_PSR_IC))
683 panic_vm(vcpu); 684 panic_vm(vcpu, "Trying to inject one IRQ with psr.ic=0\n");
684 reflect_interruption(0, isr, 0, 12, regs); /* EXT IRQ */ 685 reflect_interruption(0, isr, 0, 12, regs); /* EXT IRQ */
685} 686}
686 687
@@ -941,8 +942,20 @@ static void vcpu_do_resume(struct kvm_vcpu *vcpu)
941 ia64_set_pta(vcpu->arch.vhpt.pta.val); 942 ia64_set_pta(vcpu->arch.vhpt.pta.val);
942} 943}
943 944
945static void vmm_sanity_check(struct kvm_vcpu *vcpu)
946{
947 struct exit_ctl_data *p = &vcpu->arch.exit_data;
948
949 if (!vmm_sanity && p->exit_reason != EXIT_REASON_DEBUG) {
950 panic_vm(vcpu, "Failed to do vmm sanity check,"
951 "it maybe caused by crashed vmm!!\n\n");
952 }
953}
954
944static void kvm_do_resume_op(struct kvm_vcpu *vcpu) 955static void kvm_do_resume_op(struct kvm_vcpu *vcpu)
945{ 956{
957 vmm_sanity_check(vcpu); /*Guarantee vcpu runing on healthy vmm!*/
958
946 if (test_and_clear_bit(KVM_REQ_RESUME, &vcpu->requests)) { 959 if (test_and_clear_bit(KVM_REQ_RESUME, &vcpu->requests)) {
947 vcpu_do_resume(vcpu); 960 vcpu_do_resume(vcpu);
948 return; 961 return;
@@ -968,3 +981,11 @@ void vmm_transition(struct kvm_vcpu *vcpu)
968 1, 0, 0, 0, 0, 0); 981 1, 0, 0, 0, 0, 0);
969 kvm_do_resume_op(vcpu); 982 kvm_do_resume_op(vcpu);
970} 983}
984
985void vmm_panic_handler(u64 vec)
986{
987 struct kvm_vcpu *vcpu = current_vcpu;
988 vmm_sanity = 0;
989 panic_vm(vcpu, "Unexpected interruption occurs in VMM, vector:0x%lx\n",
990 vec2off[vec]);
991}
diff --git a/arch/ia64/kvm/vcpu.c b/arch/ia64/kvm/vcpu.c
index e44027ce5667..ecd526b55323 100644
--- a/arch/ia64/kvm/vcpu.c
+++ b/arch/ia64/kvm/vcpu.c
@@ -816,8 +816,9 @@ static void vcpu_set_itc(struct kvm_vcpu *vcpu, u64 val)
816 unsigned long vitv = VCPU(vcpu, itv); 816 unsigned long vitv = VCPU(vcpu, itv);
817 817
818 if (vcpu->vcpu_id == 0) { 818 if (vcpu->vcpu_id == 0) {
819 for (i = 0; i < MAX_VCPU_NUM; i++) { 819 for (i = 0; i < KVM_MAX_VCPUS; i++) {
820 v = (struct kvm_vcpu *)((char *)vcpu + VCPU_SIZE * i); 820 v = (struct kvm_vcpu *)((char *)vcpu +
821 sizeof(struct kvm_vcpu_data) * i);
821 VMX(v, itc_offset) = itc_offset; 822 VMX(v, itc_offset) = itc_offset;
822 VMX(v, last_itc) = 0; 823 VMX(v, last_itc) = 0;
823 } 824 }
@@ -1650,7 +1651,8 @@ void vcpu_set_psr(struct kvm_vcpu *vcpu, unsigned long val)
1650 * Otherwise panic 1651 * Otherwise panic
1651 */ 1652 */
1652 if (val & (IA64_PSR_PK | IA64_PSR_IS | IA64_PSR_VM)) 1653 if (val & (IA64_PSR_PK | IA64_PSR_IS | IA64_PSR_VM))
1653 panic_vm(vcpu); 1654 panic_vm(vcpu, "Only support guests with vpsr.pk =0 \
1655 & vpsr.is=0\n");
1654 1656
1655 /* 1657 /*
1656 * For those IA64_PSR bits: id/da/dd/ss/ed/ia 1658 * For those IA64_PSR bits: id/da/dd/ss/ed/ia
@@ -2103,7 +2105,7 @@ void kvm_init_all_rr(struct kvm_vcpu *vcpu)
2103 2105
2104 if (is_physical_mode(vcpu)) { 2106 if (is_physical_mode(vcpu)) {
2105 if (vcpu->arch.mode_flags & GUEST_PHY_EMUL) 2107 if (vcpu->arch.mode_flags & GUEST_PHY_EMUL)
2106 panic_vm(vcpu); 2108 panic_vm(vcpu, "Machine Status conflicts!\n");
2107 2109
2108 ia64_set_rr((VRN0 << VRN_SHIFT), vcpu->arch.metaphysical_rr0); 2110 ia64_set_rr((VRN0 << VRN_SHIFT), vcpu->arch.metaphysical_rr0);
2109 ia64_dv_serialize_data(); 2111 ia64_dv_serialize_data();
@@ -2152,10 +2154,70 @@ int vmm_entry(void)
2152 return 0; 2154 return 0;
2153} 2155}
2154 2156
2155void panic_vm(struct kvm_vcpu *v) 2157static void kvm_show_registers(struct kvm_pt_regs *regs)
2156{ 2158{
2159 unsigned long ip = regs->cr_iip + ia64_psr(regs)->ri;
2160
2161 struct kvm_vcpu *vcpu = current_vcpu;
2162 if (vcpu != NULL)
2163 printk("vcpu 0x%p vcpu %d\n",
2164 vcpu, vcpu->vcpu_id);
2165
2166 printk("psr : %016lx ifs : %016lx ip : [<%016lx>]\n",
2167 regs->cr_ipsr, regs->cr_ifs, ip);
2168
2169 printk("unat: %016lx pfs : %016lx rsc : %016lx\n",
2170 regs->ar_unat, regs->ar_pfs, regs->ar_rsc);
2171 printk("rnat: %016lx bspstore: %016lx pr : %016lx\n",
2172 regs->ar_rnat, regs->ar_bspstore, regs->pr);
2173 printk("ldrs: %016lx ccv : %016lx fpsr: %016lx\n",
2174 regs->loadrs, regs->ar_ccv, regs->ar_fpsr);
2175 printk("csd : %016lx ssd : %016lx\n", regs->ar_csd, regs->ar_ssd);
2176 printk("b0 : %016lx b6 : %016lx b7 : %016lx\n", regs->b0,
2177 regs->b6, regs->b7);
2178 printk("f6 : %05lx%016lx f7 : %05lx%016lx\n",
2179 regs->f6.u.bits[1], regs->f6.u.bits[0],
2180 regs->f7.u.bits[1], regs->f7.u.bits[0]);
2181 printk("f8 : %05lx%016lx f9 : %05lx%016lx\n",
2182 regs->f8.u.bits[1], regs->f8.u.bits[0],
2183 regs->f9.u.bits[1], regs->f9.u.bits[0]);
2184 printk("f10 : %05lx%016lx f11 : %05lx%016lx\n",
2185 regs->f10.u.bits[1], regs->f10.u.bits[0],
2186 regs->f11.u.bits[1], regs->f11.u.bits[0]);
2187
2188 printk("r1 : %016lx r2 : %016lx r3 : %016lx\n", regs->r1,
2189 regs->r2, regs->r3);
2190 printk("r8 : %016lx r9 : %016lx r10 : %016lx\n", regs->r8,
2191 regs->r9, regs->r10);
2192 printk("r11 : %016lx r12 : %016lx r13 : %016lx\n", regs->r11,
2193 regs->r12, regs->r13);
2194 printk("r14 : %016lx r15 : %016lx r16 : %016lx\n", regs->r14,
2195 regs->r15, regs->r16);
2196 printk("r17 : %016lx r18 : %016lx r19 : %016lx\n", regs->r17,
2197 regs->r18, regs->r19);
2198 printk("r20 : %016lx r21 : %016lx r22 : %016lx\n", regs->r20,
2199 regs->r21, regs->r22);
2200 printk("r23 : %016lx r24 : %016lx r25 : %016lx\n", regs->r23,
2201 regs->r24, regs->r25);
2202 printk("r26 : %016lx r27 : %016lx r28 : %016lx\n", regs->r26,
2203 regs->r27, regs->r28);
2204 printk("r29 : %016lx r30 : %016lx r31 : %016lx\n", regs->r29,
2205 regs->r30, regs->r31);
2206
2207}
2208
2209void panic_vm(struct kvm_vcpu *v, const char *fmt, ...)
2210{
2211 va_list args;
2212 char buf[256];
2213
2214 struct kvm_pt_regs *regs = vcpu_regs(v);
2157 struct exit_ctl_data *p = &v->arch.exit_data; 2215 struct exit_ctl_data *p = &v->arch.exit_data;
2158 2216 va_start(args, fmt);
2217 vsnprintf(buf, sizeof(buf), fmt, args);
2218 va_end(args);
2219 printk(buf);
2220 kvm_show_registers(regs);
2159 p->exit_reason = EXIT_REASON_VM_PANIC; 2221 p->exit_reason = EXIT_REASON_VM_PANIC;
2160 vmm_transition(v); 2222 vmm_transition(v);
2161 /*Never to return*/ 2223 /*Never to return*/
diff --git a/arch/ia64/kvm/vcpu.h b/arch/ia64/kvm/vcpu.h
index e9b2a4e121c0..b2f12a562bdf 100644
--- a/arch/ia64/kvm/vcpu.h
+++ b/arch/ia64/kvm/vcpu.h
@@ -737,9 +737,12 @@ void kvm_init_vtlb(struct kvm_vcpu *v);
737void kvm_init_vhpt(struct kvm_vcpu *v); 737void kvm_init_vhpt(struct kvm_vcpu *v);
738void thash_init(struct thash_cb *hcb, u64 sz); 738void thash_init(struct thash_cb *hcb, u64 sz);
739 739
740void panic_vm(struct kvm_vcpu *v); 740void panic_vm(struct kvm_vcpu *v, const char *fmt, ...);
741 741
742extern u64 ia64_call_vsa(u64 proc, u64 arg1, u64 arg2, u64 arg3, 742extern u64 ia64_call_vsa(u64 proc, u64 arg1, u64 arg2, u64 arg3,
743 u64 arg4, u64 arg5, u64 arg6, u64 arg7); 743 u64 arg4, u64 arg5, u64 arg6, u64 arg7);
744
745extern long vmm_sanity;
746
744#endif 747#endif
745#endif /* __VCPU_H__ */ 748#endif /* __VCPU_H__ */
diff --git a/arch/ia64/kvm/vmm.c b/arch/ia64/kvm/vmm.c
index 2275bf4e681a..9eee5c04bacc 100644
--- a/arch/ia64/kvm/vmm.c
+++ b/arch/ia64/kvm/vmm.c
@@ -20,6 +20,7 @@
20 */ 20 */
21 21
22 22
23#include<linux/kernel.h>
23#include<linux/module.h> 24#include<linux/module.h>
24#include<asm/fpswa.h> 25#include<asm/fpswa.h>
25 26
@@ -31,6 +32,8 @@ MODULE_LICENSE("GPL");
31extern char kvm_ia64_ivt; 32extern char kvm_ia64_ivt;
32extern fpswa_interface_t *vmm_fpswa_interface; 33extern fpswa_interface_t *vmm_fpswa_interface;
33 34
35long vmm_sanity = 1;
36
34struct kvm_vmm_info vmm_info = { 37struct kvm_vmm_info vmm_info = {
35 .module = THIS_MODULE, 38 .module = THIS_MODULE,
36 .vmm_entry = vmm_entry, 39 .vmm_entry = vmm_entry,
@@ -62,5 +65,31 @@ void vmm_spin_unlock(spinlock_t *lock)
62{ 65{
63 _vmm_raw_spin_unlock(lock); 66 _vmm_raw_spin_unlock(lock);
64} 67}
68
69static void vcpu_debug_exit(struct kvm_vcpu *vcpu)
70{
71 struct exit_ctl_data *p = &vcpu->arch.exit_data;
72 long psr;
73
74 local_irq_save(psr);
75 p->exit_reason = EXIT_REASON_DEBUG;
76 vmm_transition(vcpu);
77 local_irq_restore(psr);
78}
79
80asmlinkage int printk(const char *fmt, ...)
81{
82 struct kvm_vcpu *vcpu = current_vcpu;
83 va_list args;
84 int r;
85
86 memset(vcpu->arch.log_buf, 0, VMM_LOG_LEN);
87 va_start(args, fmt);
88 r = vsnprintf(vcpu->arch.log_buf, VMM_LOG_LEN, fmt, args);
89 va_end(args);
90 vcpu_debug_exit(vcpu);
91 return r;
92}
93
65module_init(kvm_vmm_init) 94module_init(kvm_vmm_init)
66module_exit(kvm_vmm_exit) 95module_exit(kvm_vmm_exit)
diff --git a/arch/ia64/kvm/vmm_ivt.S b/arch/ia64/kvm/vmm_ivt.S
index c1d7251a1480..3ef1a017a318 100644
--- a/arch/ia64/kvm/vmm_ivt.S
+++ b/arch/ia64/kvm/vmm_ivt.S
@@ -1,5 +1,5 @@
1/* 1/*
2 * /ia64/kvm_ivt.S 2 * arch/ia64/kvm/vmm_ivt.S
3 * 3 *
4 * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co 4 * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co
5 * Stephane Eranian <eranian@hpl.hp.com> 5 * Stephane Eranian <eranian@hpl.hp.com>
@@ -70,32 +70,39 @@
70# define PSR_DEFAULT_BITS 0 70# define PSR_DEFAULT_BITS 0
71#endif 71#endif
72 72
73
74#define KVM_FAULT(n) \ 73#define KVM_FAULT(n) \
75 kvm_fault_##n:; \ 74 kvm_fault_##n:; \
76 mov r19=n;; \ 75 mov r19=n;; \
77 br.sptk.many kvm_fault_##n; \ 76 br.sptk.many kvm_vmm_panic; \
78 ;; \ 77 ;; \
79
80 78
81#define KVM_REFLECT(n) \ 79#define KVM_REFLECT(n) \
82 mov r31=pr; \ 80 mov r31=pr; \
83 mov r19=n; /* prepare to save predicates */ \ 81 mov r19=n; /* prepare to save predicates */ \
84 mov r29=cr.ipsr; \ 82 mov r29=cr.ipsr; \
85 ;; \ 83 ;; \
86 tbit.z p6,p7=r29,IA64_PSR_VM_BIT; \ 84 tbit.z p6,p7=r29,IA64_PSR_VM_BIT; \
87(p7)br.sptk.many kvm_dispatch_reflection; \ 85(p7) br.sptk.many kvm_dispatch_reflection; \
88 br.sptk.many kvm_panic; \ 86 br.sptk.many kvm_vmm_panic; \
89 87
90 88GLOBAL_ENTRY(kvm_vmm_panic)
91GLOBAL_ENTRY(kvm_panic) 89 KVM_SAVE_MIN_WITH_COVER_R19
92 br.sptk.many kvm_panic 90 alloc r14=ar.pfs,0,0,1,0
93 ;; 91 mov out0=r15
94END(kvm_panic) 92 adds r3=8,r2 // set up second base pointer
95 93 ;;
96 94 ssm psr.ic
97 95 ;;
98 96 srlz.i // guarantee that interruption collection is on
97 ;;
98 //(p15) ssm psr.i // restore psr.i
99 addl r14=@gprel(ia64_leave_hypervisor),gp
100 ;;
101 KVM_SAVE_REST
102 mov rp=r14
103 ;;
104 br.call.sptk.many b6=vmm_panic_handler;
105END(kvm_vmm_panic)
99 106
100 .section .text.ivt,"ax" 107 .section .text.ivt,"ax"
101 108
@@ -105,308 +112,307 @@ kvm_ia64_ivt:
105/////////////////////////////////////////////////////////////// 112///////////////////////////////////////////////////////////////
106// 0x0000 Entry 0 (size 64 bundles) VHPT Translation (8,20,47) 113// 0x0000 Entry 0 (size 64 bundles) VHPT Translation (8,20,47)
107ENTRY(kvm_vhpt_miss) 114ENTRY(kvm_vhpt_miss)
108 KVM_FAULT(0) 115 KVM_FAULT(0)
109END(kvm_vhpt_miss) 116END(kvm_vhpt_miss)
110 117
111
112 .org kvm_ia64_ivt+0x400 118 .org kvm_ia64_ivt+0x400
113//////////////////////////////////////////////////////////////// 119////////////////////////////////////////////////////////////////
114// 0x0400 Entry 1 (size 64 bundles) ITLB (21) 120// 0x0400 Entry 1 (size 64 bundles) ITLB (21)
115ENTRY(kvm_itlb_miss) 121ENTRY(kvm_itlb_miss)
116 mov r31 = pr 122 mov r31 = pr
117 mov r29=cr.ipsr; 123 mov r29=cr.ipsr;
118 ;; 124 ;;
119 tbit.z p6,p7=r29,IA64_PSR_VM_BIT; 125 tbit.z p6,p7=r29,IA64_PSR_VM_BIT;
120 (p6) br.sptk kvm_alt_itlb_miss 126(p6) br.sptk kvm_alt_itlb_miss
121 mov r19 = 1 127 mov r19 = 1
122 br.sptk kvm_itlb_miss_dispatch 128 br.sptk kvm_itlb_miss_dispatch
123 KVM_FAULT(1); 129 KVM_FAULT(1);
124END(kvm_itlb_miss) 130END(kvm_itlb_miss)
125 131
126 .org kvm_ia64_ivt+0x0800 132 .org kvm_ia64_ivt+0x0800
127////////////////////////////////////////////////////////////////// 133//////////////////////////////////////////////////////////////////
128// 0x0800 Entry 2 (size 64 bundles) DTLB (9,48) 134// 0x0800 Entry 2 (size 64 bundles) DTLB (9,48)
129ENTRY(kvm_dtlb_miss) 135ENTRY(kvm_dtlb_miss)
130 mov r31 = pr 136 mov r31 = pr
131 mov r29=cr.ipsr; 137 mov r29=cr.ipsr;
132 ;; 138 ;;
133 tbit.z p6,p7=r29,IA64_PSR_VM_BIT; 139 tbit.z p6,p7=r29,IA64_PSR_VM_BIT;
134(p6)br.sptk kvm_alt_dtlb_miss 140(p6) br.sptk kvm_alt_dtlb_miss
135 br.sptk kvm_dtlb_miss_dispatch 141 br.sptk kvm_dtlb_miss_dispatch
136END(kvm_dtlb_miss) 142END(kvm_dtlb_miss)
137 143
138 .org kvm_ia64_ivt+0x0c00 144 .org kvm_ia64_ivt+0x0c00
139//////////////////////////////////////////////////////////////////// 145////////////////////////////////////////////////////////////////////
140// 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19) 146// 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19)
141ENTRY(kvm_alt_itlb_miss) 147ENTRY(kvm_alt_itlb_miss)
142 mov r16=cr.ifa // get address that caused the TLB miss 148 mov r16=cr.ifa // get address that caused the TLB miss
143 ;; 149 ;;
144 movl r17=PAGE_KERNEL 150 movl r17=PAGE_KERNEL
145 mov r24=cr.ipsr 151 mov r24=cr.ipsr
146 movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff) 152 movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
147 ;; 153 ;;
148 and r19=r19,r16 // clear ed, reserved bits, and PTE control bits 154 and r19=r19,r16 // clear ed, reserved bits, and PTE control bits
149 ;; 155 ;;
150 or r19=r17,r19 // insert PTE control bits into r19 156 or r19=r17,r19 // insert PTE control bits into r19
151 ;; 157 ;;
152 movl r20=IA64_GRANULE_SHIFT<<2 158 movl r20=IA64_GRANULE_SHIFT<<2
153 ;; 159 ;;
154 mov cr.itir=r20 160 mov cr.itir=r20
155 ;; 161 ;;
156 itc.i r19 // insert the TLB entry 162 itc.i r19 // insert the TLB entry
157 mov pr=r31,-1 163 mov pr=r31,-1
158 rfi 164 rfi
159END(kvm_alt_itlb_miss) 165END(kvm_alt_itlb_miss)
160 166
161 .org kvm_ia64_ivt+0x1000 167 .org kvm_ia64_ivt+0x1000
162///////////////////////////////////////////////////////////////////// 168/////////////////////////////////////////////////////////////////////
163// 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46) 169// 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46)
164ENTRY(kvm_alt_dtlb_miss) 170ENTRY(kvm_alt_dtlb_miss)
165 mov r16=cr.ifa // get address that caused the TLB miss 171 mov r16=cr.ifa // get address that caused the TLB miss
166 ;; 172 ;;
167 movl r17=PAGE_KERNEL 173 movl r17=PAGE_KERNEL
168 movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff) 174 movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
169 mov r24=cr.ipsr 175 mov r24=cr.ipsr
170 ;; 176 ;;
171 and r19=r19,r16 // clear ed, reserved bits, and PTE control bits 177 and r19=r19,r16 // clear ed, reserved bits, and PTE control bits
172 ;; 178 ;;
173 or r19=r19,r17 // insert PTE control bits into r19 179 or r19=r19,r17 // insert PTE control bits into r19
174 ;; 180 ;;
175 movl r20=IA64_GRANULE_SHIFT<<2 181 movl r20=IA64_GRANULE_SHIFT<<2
176 ;; 182 ;;
177 mov cr.itir=r20 183 mov cr.itir=r20
178 ;; 184 ;;
179 itc.d r19 // insert the TLB entry 185 itc.d r19 // insert the TLB entry
180 mov pr=r31,-1 186 mov pr=r31,-1
181 rfi 187 rfi
182END(kvm_alt_dtlb_miss) 188END(kvm_alt_dtlb_miss)
183 189
184 .org kvm_ia64_ivt+0x1400 190 .org kvm_ia64_ivt+0x1400
185////////////////////////////////////////////////////////////////////// 191//////////////////////////////////////////////////////////////////////
186// 0x1400 Entry 5 (size 64 bundles) Data nested TLB (6,45) 192// 0x1400 Entry 5 (size 64 bundles) Data nested TLB (6,45)
187ENTRY(kvm_nested_dtlb_miss) 193ENTRY(kvm_nested_dtlb_miss)
188 KVM_FAULT(5) 194 KVM_FAULT(5)
189END(kvm_nested_dtlb_miss) 195END(kvm_nested_dtlb_miss)
190 196
191 .org kvm_ia64_ivt+0x1800 197 .org kvm_ia64_ivt+0x1800
192///////////////////////////////////////////////////////////////////// 198/////////////////////////////////////////////////////////////////////
193// 0x1800 Entry 6 (size 64 bundles) Instruction Key Miss (24) 199// 0x1800 Entry 6 (size 64 bundles) Instruction Key Miss (24)
194ENTRY(kvm_ikey_miss) 200ENTRY(kvm_ikey_miss)
195 KVM_REFLECT(6) 201 KVM_REFLECT(6)
196END(kvm_ikey_miss) 202END(kvm_ikey_miss)
197 203
198 .org kvm_ia64_ivt+0x1c00 204 .org kvm_ia64_ivt+0x1c00
199///////////////////////////////////////////////////////////////////// 205/////////////////////////////////////////////////////////////////////
200// 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51) 206// 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
201ENTRY(kvm_dkey_miss) 207ENTRY(kvm_dkey_miss)
202 KVM_REFLECT(7) 208 KVM_REFLECT(7)
203END(kvm_dkey_miss) 209END(kvm_dkey_miss)
204 210
205 .org kvm_ia64_ivt+0x2000 211 .org kvm_ia64_ivt+0x2000
206//////////////////////////////////////////////////////////////////// 212////////////////////////////////////////////////////////////////////
207// 0x2000 Entry 8 (size 64 bundles) Dirty-bit (54) 213// 0x2000 Entry 8 (size 64 bundles) Dirty-bit (54)
208ENTRY(kvm_dirty_bit) 214ENTRY(kvm_dirty_bit)
209 KVM_REFLECT(8) 215 KVM_REFLECT(8)
210END(kvm_dirty_bit) 216END(kvm_dirty_bit)
211 217
212 .org kvm_ia64_ivt+0x2400 218 .org kvm_ia64_ivt+0x2400
213//////////////////////////////////////////////////////////////////// 219////////////////////////////////////////////////////////////////////
214// 0x2400 Entry 9 (size 64 bundles) Instruction Access-bit (27) 220// 0x2400 Entry 9 (size 64 bundles) Instruction Access-bit (27)
215ENTRY(kvm_iaccess_bit) 221ENTRY(kvm_iaccess_bit)
216 KVM_REFLECT(9) 222 KVM_REFLECT(9)
217END(kvm_iaccess_bit) 223END(kvm_iaccess_bit)
218 224
219 .org kvm_ia64_ivt+0x2800 225 .org kvm_ia64_ivt+0x2800
220/////////////////////////////////////////////////////////////////// 226///////////////////////////////////////////////////////////////////
221// 0x2800 Entry 10 (size 64 bundles) Data Access-bit (15,55) 227// 0x2800 Entry 10 (size 64 bundles) Data Access-bit (15,55)
222ENTRY(kvm_daccess_bit) 228ENTRY(kvm_daccess_bit)
223 KVM_REFLECT(10) 229 KVM_REFLECT(10)
224END(kvm_daccess_bit) 230END(kvm_daccess_bit)
225 231
226 .org kvm_ia64_ivt+0x2c00 232 .org kvm_ia64_ivt+0x2c00
227///////////////////////////////////////////////////////////////// 233/////////////////////////////////////////////////////////////////
228// 0x2c00 Entry 11 (size 64 bundles) Break instruction (33) 234// 0x2c00 Entry 11 (size 64 bundles) Break instruction (33)
229ENTRY(kvm_break_fault) 235ENTRY(kvm_break_fault)
230 mov r31=pr 236 mov r31=pr
231 mov r19=11 237 mov r19=11
232 mov r29=cr.ipsr 238 mov r29=cr.ipsr
233 ;; 239 ;;
234 KVM_SAVE_MIN_WITH_COVER_R19 240 KVM_SAVE_MIN_WITH_COVER_R19
235 ;; 241 ;;
236 alloc r14=ar.pfs,0,0,4,0 // now it's safe (must be first in insn group!) 242 alloc r14=ar.pfs,0,0,4,0 //(must be first in insn group!)
237 mov out0=cr.ifa 243 mov out0=cr.ifa
238 mov out2=cr.isr // FIXME: pity to make this slow access twice 244 mov out2=cr.isr // FIXME: pity to make this slow access twice
239 mov out3=cr.iim // FIXME: pity to make this slow access twice 245 mov out3=cr.iim // FIXME: pity to make this slow access twice
240 adds r3=8,r2 // set up second base pointer 246 adds r3=8,r2 // set up second base pointer
241 ;; 247 ;;
242 ssm psr.ic 248 ssm psr.ic
243 ;; 249 ;;
244 srlz.i // guarantee that interruption collection is on 250 srlz.i // guarantee that interruption collection is on
245 ;; 251 ;;
246 //(p15)ssm psr.i // restore psr.i 252 //(p15)ssm psr.i // restore psr.i
247 addl r14=@gprel(ia64_leave_hypervisor),gp 253 addl r14=@gprel(ia64_leave_hypervisor),gp
248 ;; 254 ;;
249 KVM_SAVE_REST 255 KVM_SAVE_REST
250 mov rp=r14 256 mov rp=r14
251 ;; 257 ;;
252 adds out1=16,sp 258 adds out1=16,sp
253 br.call.sptk.many b6=kvm_ia64_handle_break 259 br.call.sptk.many b6=kvm_ia64_handle_break
254 ;; 260 ;;
255END(kvm_break_fault) 261END(kvm_break_fault)
256 262
257 .org kvm_ia64_ivt+0x3000 263 .org kvm_ia64_ivt+0x3000
258///////////////////////////////////////////////////////////////// 264/////////////////////////////////////////////////////////////////
259// 0x3000 Entry 12 (size 64 bundles) External Interrupt (4) 265// 0x3000 Entry 12 (size 64 bundles) External Interrupt (4)
260ENTRY(kvm_interrupt) 266ENTRY(kvm_interrupt)
261 mov r31=pr // prepare to save predicates 267 mov r31=pr // prepare to save predicates
262 mov r19=12 268 mov r19=12
263 mov r29=cr.ipsr 269 mov r29=cr.ipsr
264 ;; 270 ;;
265 tbit.z p6,p7=r29,IA64_PSR_VM_BIT 271 tbit.z p6,p7=r29,IA64_PSR_VM_BIT
266 tbit.z p0,p15=r29,IA64_PSR_I_BIT 272 tbit.z p0,p15=r29,IA64_PSR_I_BIT
267 ;; 273 ;;
268(p7) br.sptk kvm_dispatch_interrupt 274(p7) br.sptk kvm_dispatch_interrupt
269 ;; 275 ;;
270 mov r27=ar.rsc /* M */ 276 mov r27=ar.rsc /* M */
271 mov r20=r1 /* A */ 277 mov r20=r1 /* A */
272 mov r25=ar.unat /* M */ 278 mov r25=ar.unat /* M */
273 mov r26=ar.pfs /* I */ 279 mov r26=ar.pfs /* I */
274 mov r28=cr.iip /* M */ 280 mov r28=cr.iip /* M */
275 cover /* B (or nothing) */ 281 cover /* B (or nothing) */
276 ;; 282 ;;
277 mov r1=sp 283 mov r1=sp
278 ;; 284 ;;
279 invala /* M */ 285 invala /* M */
280 mov r30=cr.ifs 286 mov r30=cr.ifs
281 ;; 287 ;;
282 addl r1=-VMM_PT_REGS_SIZE,r1 288 addl r1=-VMM_PT_REGS_SIZE,r1
283 ;; 289 ;;
284 adds r17=2*L1_CACHE_BYTES,r1 /* really: biggest cache-line size */ 290 adds r17=2*L1_CACHE_BYTES,r1 /* really: biggest cache-line size */
285 adds r16=PT(CR_IPSR),r1 291 adds r16=PT(CR_IPSR),r1
286 ;; 292 ;;
287 lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES 293 lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES
288 st8 [r16]=r29 /* save cr.ipsr */ 294 st8 [r16]=r29 /* save cr.ipsr */
289 ;; 295 ;;
290 lfetch.fault.excl.nt1 [r17] 296 lfetch.fault.excl.nt1 [r17]
291 mov r29=b0 297 mov r29=b0
292 ;; 298 ;;
293 adds r16=PT(R8),r1 /* initialize first base pointer */ 299 adds r16=PT(R8),r1 /* initialize first base pointer */
294 adds r17=PT(R9),r1 /* initialize second base pointer */ 300 adds r17=PT(R9),r1 /* initialize second base pointer */
295 mov r18=r0 /* make sure r18 isn't NaT */ 301 mov r18=r0 /* make sure r18 isn't NaT */
296 ;; 302 ;;
297.mem.offset 0,0; st8.spill [r16]=r8,16 303.mem.offset 0,0; st8.spill [r16]=r8,16
298.mem.offset 8,0; st8.spill [r17]=r9,16 304.mem.offset 8,0; st8.spill [r17]=r9,16
299 ;; 305 ;;
300.mem.offset 0,0; st8.spill [r16]=r10,24 306.mem.offset 0,0; st8.spill [r16]=r10,24
301.mem.offset 8,0; st8.spill [r17]=r11,24 307.mem.offset 8,0; st8.spill [r17]=r11,24
302 ;; 308 ;;
303 st8 [r16]=r28,16 /* save cr.iip */ 309 st8 [r16]=r28,16 /* save cr.iip */
304 st8 [r17]=r30,16 /* save cr.ifs */ 310 st8 [r17]=r30,16 /* save cr.ifs */
305 mov r8=ar.fpsr /* M */ 311 mov r8=ar.fpsr /* M */
306 mov r9=ar.csd 312 mov r9=ar.csd
307 mov r10=ar.ssd 313 mov r10=ar.ssd
308 movl r11=FPSR_DEFAULT /* L-unit */ 314 movl r11=FPSR_DEFAULT /* L-unit */
309 ;; 315 ;;
310 st8 [r16]=r25,16 /* save ar.unat */ 316 st8 [r16]=r25,16 /* save ar.unat */
311 st8 [r17]=r26,16 /* save ar.pfs */ 317 st8 [r17]=r26,16 /* save ar.pfs */
312 shl r18=r18,16 /* compute ar.rsc to be used for "loadrs" */ 318 shl r18=r18,16 /* compute ar.rsc to be used for "loadrs" */
313 ;; 319 ;;
314 st8 [r16]=r27,16 /* save ar.rsc */ 320 st8 [r16]=r27,16 /* save ar.rsc */
315 adds r17=16,r17 /* skip over ar_rnat field */ 321 adds r17=16,r17 /* skip over ar_rnat field */
316 ;; 322 ;;
317 st8 [r17]=r31,16 /* save predicates */ 323 st8 [r17]=r31,16 /* save predicates */
318 adds r16=16,r16 /* skip over ar_bspstore field */ 324 adds r16=16,r16 /* skip over ar_bspstore field */
319 ;; 325 ;;
320 st8 [r16]=r29,16 /* save b0 */ 326 st8 [r16]=r29,16 /* save b0 */
321 st8 [r17]=r18,16 /* save ar.rsc value for "loadrs" */ 327 st8 [r17]=r18,16 /* save ar.rsc value for "loadrs" */
322 ;; 328 ;;
323.mem.offset 0,0; st8.spill [r16]=r20,16 /* save original r1 */ 329.mem.offset 0,0; st8.spill [r16]=r20,16 /* save original r1 */
324.mem.offset 8,0; st8.spill [r17]=r12,16 330.mem.offset 8,0; st8.spill [r17]=r12,16
325 adds r12=-16,r1 331 adds r12=-16,r1
326 /* switch to kernel memory stack (with 16 bytes of scratch) */ 332 /* switch to kernel memory stack (with 16 bytes of scratch) */
327 ;; 333 ;;
328.mem.offset 0,0; st8.spill [r16]=r13,16 334.mem.offset 0,0; st8.spill [r16]=r13,16
329.mem.offset 8,0; st8.spill [r17]=r8,16 /* save ar.fpsr */ 335.mem.offset 8,0; st8.spill [r17]=r8,16 /* save ar.fpsr */
330 ;; 336 ;;
331.mem.offset 0,0; st8.spill [r16]=r15,16 337.mem.offset 0,0; st8.spill [r16]=r15,16
332.mem.offset 8,0; st8.spill [r17]=r14,16 338.mem.offset 8,0; st8.spill [r17]=r14,16
333 dep r14=-1,r0,60,4 339 dep r14=-1,r0,60,4
334 ;; 340 ;;
335.mem.offset 0,0; st8.spill [r16]=r2,16 341.mem.offset 0,0; st8.spill [r16]=r2,16
336.mem.offset 8,0; st8.spill [r17]=r3,16 342.mem.offset 8,0; st8.spill [r17]=r3,16
337 adds r2=VMM_PT_REGS_R16_OFFSET,r1 343 adds r2=VMM_PT_REGS_R16_OFFSET,r1
338 adds r14 = VMM_VCPU_GP_OFFSET,r13 344 adds r14 = VMM_VCPU_GP_OFFSET,r13
339 ;; 345 ;;
340 mov r8=ar.ccv 346 mov r8=ar.ccv
341 ld8 r14 = [r14] 347 ld8 r14 = [r14]
342 ;; 348 ;;
343 mov r1=r14 /* establish kernel global pointer */ 349 mov r1=r14 /* establish kernel global pointer */
344 ;; \ 350 ;; \
345 bsw.1 351 bsw.1
346 ;; 352 ;;
347 alloc r14=ar.pfs,0,0,1,0 // must be first in an insn group 353 alloc r14=ar.pfs,0,0,1,0 // must be first in an insn group
348 mov out0=r13 354 mov out0=r13
349 ;; 355 ;;
350 ssm psr.ic 356 ssm psr.ic
351 ;; 357 ;;
352 srlz.i 358 srlz.i
353 ;; 359 ;;
354 //(p15) ssm psr.i 360 //(p15) ssm psr.i
355 adds r3=8,r2 // set up second base pointer for SAVE_REST 361 adds r3=8,r2 // set up second base pointer for SAVE_REST
356 srlz.i // ensure everybody knows psr.ic is back on 362 srlz.i // ensure everybody knows psr.ic is back on
357 ;; 363 ;;
358.mem.offset 0,0; st8.spill [r2]=r16,16 364.mem.offset 0,0; st8.spill [r2]=r16,16
359.mem.offset 8,0; st8.spill [r3]=r17,16 365.mem.offset 8,0; st8.spill [r3]=r17,16
360 ;; 366 ;;
361.mem.offset 0,0; st8.spill [r2]=r18,16 367.mem.offset 0,0; st8.spill [r2]=r18,16
362.mem.offset 8,0; st8.spill [r3]=r19,16 368.mem.offset 8,0; st8.spill [r3]=r19,16
363 ;; 369 ;;
364.mem.offset 0,0; st8.spill [r2]=r20,16 370.mem.offset 0,0; st8.spill [r2]=r20,16
365.mem.offset 8,0; st8.spill [r3]=r21,16 371.mem.offset 8,0; st8.spill [r3]=r21,16
366 mov r18=b6 372 mov r18=b6
367 ;; 373 ;;
368.mem.offset 0,0; st8.spill [r2]=r22,16 374.mem.offset 0,0; st8.spill [r2]=r22,16
369.mem.offset 8,0; st8.spill [r3]=r23,16 375.mem.offset 8,0; st8.spill [r3]=r23,16
370 mov r19=b7 376 mov r19=b7
371 ;; 377 ;;
372.mem.offset 0,0; st8.spill [r2]=r24,16 378.mem.offset 0,0; st8.spill [r2]=r24,16
373.mem.offset 8,0; st8.spill [r3]=r25,16 379.mem.offset 8,0; st8.spill [r3]=r25,16
374 ;; 380 ;;
375.mem.offset 0,0; st8.spill [r2]=r26,16 381.mem.offset 0,0; st8.spill [r2]=r26,16
376.mem.offset 8,0; st8.spill [r3]=r27,16 382.mem.offset 8,0; st8.spill [r3]=r27,16
377 ;; 383 ;;
378.mem.offset 0,0; st8.spill [r2]=r28,16 384.mem.offset 0,0; st8.spill [r2]=r28,16
379.mem.offset 8,0; st8.spill [r3]=r29,16 385.mem.offset 8,0; st8.spill [r3]=r29,16
380 ;; 386 ;;
381.mem.offset 0,0; st8.spill [r2]=r30,16 387.mem.offset 0,0; st8.spill [r2]=r30,16
382.mem.offset 8,0; st8.spill [r3]=r31,32 388.mem.offset 8,0; st8.spill [r3]=r31,32
383 ;; 389 ;;
384 mov ar.fpsr=r11 /* M-unit */ 390 mov ar.fpsr=r11 /* M-unit */
385 st8 [r2]=r8,8 /* ar.ccv */ 391 st8 [r2]=r8,8 /* ar.ccv */
386 adds r24=PT(B6)-PT(F7),r3 392 adds r24=PT(B6)-PT(F7),r3
387 ;; 393 ;;
388 stf.spill [r2]=f6,32 394 stf.spill [r2]=f6,32
389 stf.spill [r3]=f7,32 395 stf.spill [r3]=f7,32
390 ;; 396 ;;
391 stf.spill [r2]=f8,32 397 stf.spill [r2]=f8,32
392 stf.spill [r3]=f9,32 398 stf.spill [r3]=f9,32
393 ;; 399 ;;
394 stf.spill [r2]=f10 400 stf.spill [r2]=f10
395 stf.spill [r3]=f11 401 stf.spill [r3]=f11
396 adds r25=PT(B7)-PT(F11),r3 402 adds r25=PT(B7)-PT(F11),r3
397 ;; 403 ;;
398 st8 [r24]=r18,16 /* b6 */ 404 st8 [r24]=r18,16 /* b6 */
399 st8 [r25]=r19,16 /* b7 */ 405 st8 [r25]=r19,16 /* b7 */
400 ;; 406 ;;
401 st8 [r24]=r9 /* ar.csd */ 407 st8 [r24]=r9 /* ar.csd */
402 st8 [r25]=r10 /* ar.ssd */ 408 st8 [r25]=r10 /* ar.ssd */
403 ;; 409 ;;
404 srlz.d // make sure we see the effect of cr.ivr 410 srlz.d // make sure we see the effect of cr.ivr
405 addl r14=@gprel(ia64_leave_nested),gp 411 addl r14=@gprel(ia64_leave_nested),gp
406 ;; 412 ;;
407 mov rp=r14 413 mov rp=r14
408 br.call.sptk.many b6=kvm_ia64_handle_irq 414 br.call.sptk.many b6=kvm_ia64_handle_irq
409 ;; 415 ;;
410END(kvm_interrupt) 416END(kvm_interrupt)
411 417
412 .global kvm_dispatch_vexirq 418 .global kvm_dispatch_vexirq
@@ -414,387 +420,385 @@ END(kvm_interrupt)
414////////////////////////////////////////////////////////////////////// 420//////////////////////////////////////////////////////////////////////
415// 0x3400 Entry 13 (size 64 bundles) Reserved 421// 0x3400 Entry 13 (size 64 bundles) Reserved
416ENTRY(kvm_virtual_exirq) 422ENTRY(kvm_virtual_exirq)
417 mov r31=pr 423 mov r31=pr
418 mov r19=13 424 mov r19=13
419 mov r30 =r0 425 mov r30 =r0
420 ;; 426 ;;
421kvm_dispatch_vexirq: 427kvm_dispatch_vexirq:
422 cmp.eq p6,p0 = 1,r30 428 cmp.eq p6,p0 = 1,r30
423 ;; 429 ;;
424(p6)add r29 = VMM_VCPU_SAVED_GP_OFFSET,r21 430(p6) add r29 = VMM_VCPU_SAVED_GP_OFFSET,r21
425 ;; 431 ;;
426(p6)ld8 r1 = [r29] 432(p6) ld8 r1 = [r29]
427 ;; 433 ;;
428 KVM_SAVE_MIN_WITH_COVER_R19 434 KVM_SAVE_MIN_WITH_COVER_R19
429 alloc r14=ar.pfs,0,0,1,0 435 alloc r14=ar.pfs,0,0,1,0
430 mov out0=r13 436 mov out0=r13
431 437
432 ssm psr.ic 438 ssm psr.ic
433 ;; 439 ;;
434 srlz.i // guarantee that interruption collection is on 440 srlz.i // guarantee that interruption collection is on
435 ;; 441 ;;
436 //(p15) ssm psr.i // restore psr.i 442 //(p15) ssm psr.i // restore psr.i
437 adds r3=8,r2 // set up second base pointer 443 adds r3=8,r2 // set up second base pointer
438 ;; 444 ;;
439 KVM_SAVE_REST 445 KVM_SAVE_REST
440 addl r14=@gprel(ia64_leave_hypervisor),gp 446 addl r14=@gprel(ia64_leave_hypervisor),gp
441 ;; 447 ;;
442 mov rp=r14 448 mov rp=r14
443 br.call.sptk.many b6=kvm_vexirq 449 br.call.sptk.many b6=kvm_vexirq
444END(kvm_virtual_exirq) 450END(kvm_virtual_exirq)
445 451
446 .org kvm_ia64_ivt+0x3800 452 .org kvm_ia64_ivt+0x3800
447///////////////////////////////////////////////////////////////////// 453/////////////////////////////////////////////////////////////////////
448// 0x3800 Entry 14 (size 64 bundles) Reserved 454// 0x3800 Entry 14 (size 64 bundles) Reserved
449 KVM_FAULT(14) 455 KVM_FAULT(14)
450 // this code segment is from 2.6.16.13 456 // this code segment is from 2.6.16.13
451
452 457
453 .org kvm_ia64_ivt+0x3c00 458 .org kvm_ia64_ivt+0x3c00
454/////////////////////////////////////////////////////////////////////// 459///////////////////////////////////////////////////////////////////////
455// 0x3c00 Entry 15 (size 64 bundles) Reserved 460// 0x3c00 Entry 15 (size 64 bundles) Reserved
456 KVM_FAULT(15) 461 KVM_FAULT(15)
457
458 462
459 .org kvm_ia64_ivt+0x4000 463 .org kvm_ia64_ivt+0x4000
460/////////////////////////////////////////////////////////////////////// 464///////////////////////////////////////////////////////////////////////
461// 0x4000 Entry 16 (size 64 bundles) Reserved 465// 0x4000 Entry 16 (size 64 bundles) Reserved
462 KVM_FAULT(16) 466 KVM_FAULT(16)
463 467
464 .org kvm_ia64_ivt+0x4400 468 .org kvm_ia64_ivt+0x4400
465////////////////////////////////////////////////////////////////////// 469//////////////////////////////////////////////////////////////////////
466// 0x4400 Entry 17 (size 64 bundles) Reserved 470// 0x4400 Entry 17 (size 64 bundles) Reserved
467 KVM_FAULT(17) 471 KVM_FAULT(17)
468 472
469 .org kvm_ia64_ivt+0x4800 473 .org kvm_ia64_ivt+0x4800
470////////////////////////////////////////////////////////////////////// 474//////////////////////////////////////////////////////////////////////
471// 0x4800 Entry 18 (size 64 bundles) Reserved 475// 0x4800 Entry 18 (size 64 bundles) Reserved
472 KVM_FAULT(18) 476 KVM_FAULT(18)
473 477
474 .org kvm_ia64_ivt+0x4c00 478 .org kvm_ia64_ivt+0x4c00
475////////////////////////////////////////////////////////////////////// 479//////////////////////////////////////////////////////////////////////
476// 0x4c00 Entry 19 (size 64 bundles) Reserved 480// 0x4c00 Entry 19 (size 64 bundles) Reserved
477 KVM_FAULT(19) 481 KVM_FAULT(19)
478 482
479 .org kvm_ia64_ivt+0x5000 483 .org kvm_ia64_ivt+0x5000
480////////////////////////////////////////////////////////////////////// 484//////////////////////////////////////////////////////////////////////
481// 0x5000 Entry 20 (size 16 bundles) Page Not Present 485// 0x5000 Entry 20 (size 16 bundles) Page Not Present
482ENTRY(kvm_page_not_present) 486ENTRY(kvm_page_not_present)
483 KVM_REFLECT(20) 487 KVM_REFLECT(20)
484END(kvm_page_not_present) 488END(kvm_page_not_present)
485 489
486 .org kvm_ia64_ivt+0x5100 490 .org kvm_ia64_ivt+0x5100
487/////////////////////////////////////////////////////////////////////// 491///////////////////////////////////////////////////////////////////////
488// 0x5100 Entry 21 (size 16 bundles) Key Permission vector 492// 0x5100 Entry 21 (size 16 bundles) Key Permission vector
489ENTRY(kvm_key_permission) 493ENTRY(kvm_key_permission)
490 KVM_REFLECT(21) 494 KVM_REFLECT(21)
491END(kvm_key_permission) 495END(kvm_key_permission)
492 496
493 .org kvm_ia64_ivt+0x5200 497 .org kvm_ia64_ivt+0x5200
494////////////////////////////////////////////////////////////////////// 498//////////////////////////////////////////////////////////////////////
495// 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26) 499// 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26)
496ENTRY(kvm_iaccess_rights) 500ENTRY(kvm_iaccess_rights)
497 KVM_REFLECT(22) 501 KVM_REFLECT(22)
498END(kvm_iaccess_rights) 502END(kvm_iaccess_rights)
499 503
500 .org kvm_ia64_ivt+0x5300 504 .org kvm_ia64_ivt+0x5300
501////////////////////////////////////////////////////////////////////// 505//////////////////////////////////////////////////////////////////////
502// 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53) 506// 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53)
503ENTRY(kvm_daccess_rights) 507ENTRY(kvm_daccess_rights)
504 KVM_REFLECT(23) 508 KVM_REFLECT(23)
505END(kvm_daccess_rights) 509END(kvm_daccess_rights)
506 510
507 .org kvm_ia64_ivt+0x5400 511 .org kvm_ia64_ivt+0x5400
508///////////////////////////////////////////////////////////////////// 512/////////////////////////////////////////////////////////////////////
509// 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39) 513// 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39)
510ENTRY(kvm_general_exception) 514ENTRY(kvm_general_exception)
511 KVM_REFLECT(24) 515 KVM_REFLECT(24)
512 KVM_FAULT(24) 516 KVM_FAULT(24)
513END(kvm_general_exception) 517END(kvm_general_exception)
514 518
515 .org kvm_ia64_ivt+0x5500 519 .org kvm_ia64_ivt+0x5500
516////////////////////////////////////////////////////////////////////// 520//////////////////////////////////////////////////////////////////////
517// 0x5500 Entry 25 (size 16 bundles) Disabled FP-Register (35) 521// 0x5500 Entry 25 (size 16 bundles) Disabled FP-Register (35)
518ENTRY(kvm_disabled_fp_reg) 522ENTRY(kvm_disabled_fp_reg)
519 KVM_REFLECT(25) 523 KVM_REFLECT(25)
520END(kvm_disabled_fp_reg) 524END(kvm_disabled_fp_reg)
521 525
522 .org kvm_ia64_ivt+0x5600 526 .org kvm_ia64_ivt+0x5600
523//////////////////////////////////////////////////////////////////// 527////////////////////////////////////////////////////////////////////
524// 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50) 528// 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50)
525ENTRY(kvm_nat_consumption) 529ENTRY(kvm_nat_consumption)
526 KVM_REFLECT(26) 530 KVM_REFLECT(26)
527END(kvm_nat_consumption) 531END(kvm_nat_consumption)
528 532
529 .org kvm_ia64_ivt+0x5700 533 .org kvm_ia64_ivt+0x5700
530///////////////////////////////////////////////////////////////////// 534/////////////////////////////////////////////////////////////////////
531// 0x5700 Entry 27 (size 16 bundles) Speculation (40) 535// 0x5700 Entry 27 (size 16 bundles) Speculation (40)
532ENTRY(kvm_speculation_vector) 536ENTRY(kvm_speculation_vector)
533 KVM_REFLECT(27) 537 KVM_REFLECT(27)
534END(kvm_speculation_vector) 538END(kvm_speculation_vector)
535 539
536 .org kvm_ia64_ivt+0x5800 540 .org kvm_ia64_ivt+0x5800
537///////////////////////////////////////////////////////////////////// 541/////////////////////////////////////////////////////////////////////
538// 0x5800 Entry 28 (size 16 bundles) Reserved 542// 0x5800 Entry 28 (size 16 bundles) Reserved
539 KVM_FAULT(28) 543 KVM_FAULT(28)
540 544
541 .org kvm_ia64_ivt+0x5900 545 .org kvm_ia64_ivt+0x5900
542/////////////////////////////////////////////////////////////////// 546///////////////////////////////////////////////////////////////////
543// 0x5900 Entry 29 (size 16 bundles) Debug (16,28,56) 547// 0x5900 Entry 29 (size 16 bundles) Debug (16,28,56)
544ENTRY(kvm_debug_vector) 548ENTRY(kvm_debug_vector)
545 KVM_FAULT(29) 549 KVM_FAULT(29)
546END(kvm_debug_vector) 550END(kvm_debug_vector)
547 551
548 .org kvm_ia64_ivt+0x5a00 552 .org kvm_ia64_ivt+0x5a00
549/////////////////////////////////////////////////////////////// 553///////////////////////////////////////////////////////////////
550// 0x5a00 Entry 30 (size 16 bundles) Unaligned Reference (57) 554// 0x5a00 Entry 30 (size 16 bundles) Unaligned Reference (57)
551ENTRY(kvm_unaligned_access) 555ENTRY(kvm_unaligned_access)
552 KVM_REFLECT(30) 556 KVM_REFLECT(30)
553END(kvm_unaligned_access) 557END(kvm_unaligned_access)
554 558
555 .org kvm_ia64_ivt+0x5b00 559 .org kvm_ia64_ivt+0x5b00
556////////////////////////////////////////////////////////////////////// 560//////////////////////////////////////////////////////////////////////
557// 0x5b00 Entry 31 (size 16 bundles) Unsupported Data Reference (57) 561// 0x5b00 Entry 31 (size 16 bundles) Unsupported Data Reference (57)
558ENTRY(kvm_unsupported_data_reference) 562ENTRY(kvm_unsupported_data_reference)
559 KVM_REFLECT(31) 563 KVM_REFLECT(31)
560END(kvm_unsupported_data_reference) 564END(kvm_unsupported_data_reference)
561 565
562 .org kvm_ia64_ivt+0x5c00 566 .org kvm_ia64_ivt+0x5c00
563//////////////////////////////////////////////////////////////////// 567////////////////////////////////////////////////////////////////////
564// 0x5c00 Entry 32 (size 16 bundles) Floating Point FAULT (65) 568// 0x5c00 Entry 32 (size 16 bundles) Floating Point FAULT (65)
565ENTRY(kvm_floating_point_fault) 569ENTRY(kvm_floating_point_fault)
566 KVM_REFLECT(32) 570 KVM_REFLECT(32)
567END(kvm_floating_point_fault) 571END(kvm_floating_point_fault)
568 572
569 .org kvm_ia64_ivt+0x5d00 573 .org kvm_ia64_ivt+0x5d00
570///////////////////////////////////////////////////////////////////// 574/////////////////////////////////////////////////////////////////////
571// 0x5d00 Entry 33 (size 16 bundles) Floating Point Trap (66) 575// 0x5d00 Entry 33 (size 16 bundles) Floating Point Trap (66)
572ENTRY(kvm_floating_point_trap) 576ENTRY(kvm_floating_point_trap)
573 KVM_REFLECT(33) 577 KVM_REFLECT(33)
574END(kvm_floating_point_trap) 578END(kvm_floating_point_trap)
575 579
576 .org kvm_ia64_ivt+0x5e00 580 .org kvm_ia64_ivt+0x5e00
577////////////////////////////////////////////////////////////////////// 581//////////////////////////////////////////////////////////////////////
578// 0x5e00 Entry 34 (size 16 bundles) Lower Privilege Transfer Trap (66) 582// 0x5e00 Entry 34 (size 16 bundles) Lower Privilege Transfer Trap (66)
579ENTRY(kvm_lower_privilege_trap) 583ENTRY(kvm_lower_privilege_trap)
580 KVM_REFLECT(34) 584 KVM_REFLECT(34)
581END(kvm_lower_privilege_trap) 585END(kvm_lower_privilege_trap)
582 586
583 .org kvm_ia64_ivt+0x5f00 587 .org kvm_ia64_ivt+0x5f00
584////////////////////////////////////////////////////////////////////// 588//////////////////////////////////////////////////////////////////////
585// 0x5f00 Entry 35 (size 16 bundles) Taken Branch Trap (68) 589// 0x5f00 Entry 35 (size 16 bundles) Taken Branch Trap (68)
586ENTRY(kvm_taken_branch_trap) 590ENTRY(kvm_taken_branch_trap)
587 KVM_REFLECT(35) 591 KVM_REFLECT(35)
588END(kvm_taken_branch_trap) 592END(kvm_taken_branch_trap)
589 593
590 .org kvm_ia64_ivt+0x6000 594 .org kvm_ia64_ivt+0x6000
591//////////////////////////////////////////////////////////////////// 595////////////////////////////////////////////////////////////////////
592// 0x6000 Entry 36 (size 16 bundles) Single Step Trap (69) 596// 0x6000 Entry 36 (size 16 bundles) Single Step Trap (69)
593ENTRY(kvm_single_step_trap) 597ENTRY(kvm_single_step_trap)
594 KVM_REFLECT(36) 598 KVM_REFLECT(36)
595END(kvm_single_step_trap) 599END(kvm_single_step_trap)
596 .global kvm_virtualization_fault_back 600 .global kvm_virtualization_fault_back
597 .org kvm_ia64_ivt+0x6100 601 .org kvm_ia64_ivt+0x6100
598///////////////////////////////////////////////////////////////////// 602/////////////////////////////////////////////////////////////////////
599// 0x6100 Entry 37 (size 16 bundles) Virtualization Fault 603// 0x6100 Entry 37 (size 16 bundles) Virtualization Fault
600ENTRY(kvm_virtualization_fault) 604ENTRY(kvm_virtualization_fault)
601 mov r31=pr 605 mov r31=pr
602 adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21 606 adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21
603 ;; 607 ;;
604 st8 [r16] = r1 608 st8 [r16] = r1
605 adds r17 = VMM_VCPU_GP_OFFSET, r21 609 adds r17 = VMM_VCPU_GP_OFFSET, r21
606 ;; 610 ;;
607 ld8 r1 = [r17] 611 ld8 r1 = [r17]
608 cmp.eq p6,p0=EVENT_MOV_FROM_AR,r24 612 cmp.eq p6,p0=EVENT_MOV_FROM_AR,r24
609 cmp.eq p7,p0=EVENT_MOV_FROM_RR,r24 613 cmp.eq p7,p0=EVENT_MOV_FROM_RR,r24
610 cmp.eq p8,p0=EVENT_MOV_TO_RR,r24 614 cmp.eq p8,p0=EVENT_MOV_TO_RR,r24
611 cmp.eq p9,p0=EVENT_RSM,r24 615 cmp.eq p9,p0=EVENT_RSM,r24
612 cmp.eq p10,p0=EVENT_SSM,r24 616 cmp.eq p10,p0=EVENT_SSM,r24
613 cmp.eq p11,p0=EVENT_MOV_TO_PSR,r24 617 cmp.eq p11,p0=EVENT_MOV_TO_PSR,r24
614 cmp.eq p12,p0=EVENT_THASH,r24 618 cmp.eq p12,p0=EVENT_THASH,r24
615 (p6) br.dptk.many kvm_asm_mov_from_ar 619(p6) br.dptk.many kvm_asm_mov_from_ar
616 (p7) br.dptk.many kvm_asm_mov_from_rr 620(p7) br.dptk.many kvm_asm_mov_from_rr
617 (p8) br.dptk.many kvm_asm_mov_to_rr 621(p8) br.dptk.many kvm_asm_mov_to_rr
618 (p9) br.dptk.many kvm_asm_rsm 622(p9) br.dptk.many kvm_asm_rsm
619 (p10) br.dptk.many kvm_asm_ssm 623(p10) br.dptk.many kvm_asm_ssm
620 (p11) br.dptk.many kvm_asm_mov_to_psr 624(p11) br.dptk.many kvm_asm_mov_to_psr
621 (p12) br.dptk.many kvm_asm_thash 625(p12) br.dptk.many kvm_asm_thash
622 ;; 626 ;;
623kvm_virtualization_fault_back: 627kvm_virtualization_fault_back:
624 adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21 628 adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21
625 ;; 629 ;;
626 ld8 r1 = [r16] 630 ld8 r1 = [r16]
627 ;; 631 ;;
628 mov r19=37 632 mov r19=37
629 adds r16 = VMM_VCPU_CAUSE_OFFSET,r21 633 adds r16 = VMM_VCPU_CAUSE_OFFSET,r21
630 adds r17 = VMM_VCPU_OPCODE_OFFSET,r21 634 adds r17 = VMM_VCPU_OPCODE_OFFSET,r21
631 ;; 635 ;;
632 st8 [r16] = r24 636 st8 [r16] = r24
633 st8 [r17] = r25 637 st8 [r17] = r25
634 ;; 638 ;;
635 cmp.ne p6,p0=EVENT_RFI, r24 639 cmp.ne p6,p0=EVENT_RFI, r24
636 (p6) br.sptk kvm_dispatch_virtualization_fault 640(p6) br.sptk kvm_dispatch_virtualization_fault
637 ;; 641 ;;
638 adds r18=VMM_VPD_BASE_OFFSET,r21 642 adds r18=VMM_VPD_BASE_OFFSET,r21
639 ;; 643 ;;
640 ld8 r18=[r18] 644 ld8 r18=[r18]
641 ;; 645 ;;
642 adds r18=VMM_VPD_VIFS_OFFSET,r18 646 adds r18=VMM_VPD_VIFS_OFFSET,r18
643 ;; 647 ;;
644 ld8 r18=[r18] 648 ld8 r18=[r18]
645 ;; 649 ;;
646 tbit.z p6,p0=r18,63 650 tbit.z p6,p0=r18,63
647 (p6) br.sptk kvm_dispatch_virtualization_fault 651(p6) br.sptk kvm_dispatch_virtualization_fault
648 ;; 652 ;;
649 //if vifs.v=1 desert current register frame 653//if vifs.v=1 desert current register frame
650 alloc r18=ar.pfs,0,0,0,0 654 alloc r18=ar.pfs,0,0,0,0
651 br.sptk kvm_dispatch_virtualization_fault 655 br.sptk kvm_dispatch_virtualization_fault
652END(kvm_virtualization_fault) 656END(kvm_virtualization_fault)
653 657
654 .org kvm_ia64_ivt+0x6200 658 .org kvm_ia64_ivt+0x6200
655////////////////////////////////////////////////////////////// 659//////////////////////////////////////////////////////////////
656// 0x6200 Entry 38 (size 16 bundles) Reserved 660// 0x6200 Entry 38 (size 16 bundles) Reserved
657 KVM_FAULT(38) 661 KVM_FAULT(38)
658 662
659 .org kvm_ia64_ivt+0x6300 663 .org kvm_ia64_ivt+0x6300
660///////////////////////////////////////////////////////////////// 664/////////////////////////////////////////////////////////////////
661// 0x6300 Entry 39 (size 16 bundles) Reserved 665// 0x6300 Entry 39 (size 16 bundles) Reserved
662 KVM_FAULT(39) 666 KVM_FAULT(39)
663 667
664 .org kvm_ia64_ivt+0x6400 668 .org kvm_ia64_ivt+0x6400
665///////////////////////////////////////////////////////////////// 669/////////////////////////////////////////////////////////////////
666// 0x6400 Entry 40 (size 16 bundles) Reserved 670// 0x6400 Entry 40 (size 16 bundles) Reserved
667 KVM_FAULT(40) 671 KVM_FAULT(40)
668 672
669 .org kvm_ia64_ivt+0x6500 673 .org kvm_ia64_ivt+0x6500
670////////////////////////////////////////////////////////////////// 674//////////////////////////////////////////////////////////////////
671// 0x6500 Entry 41 (size 16 bundles) Reserved 675// 0x6500 Entry 41 (size 16 bundles) Reserved
672 KVM_FAULT(41) 676 KVM_FAULT(41)
673 677
674 .org kvm_ia64_ivt+0x6600 678 .org kvm_ia64_ivt+0x6600
675////////////////////////////////////////////////////////////////// 679//////////////////////////////////////////////////////////////////
676// 0x6600 Entry 42 (size 16 bundles) Reserved 680// 0x6600 Entry 42 (size 16 bundles) Reserved
677 KVM_FAULT(42) 681 KVM_FAULT(42)
678 682
679 .org kvm_ia64_ivt+0x6700 683 .org kvm_ia64_ivt+0x6700
680////////////////////////////////////////////////////////////////// 684//////////////////////////////////////////////////////////////////
681// 0x6700 Entry 43 (size 16 bundles) Reserved 685// 0x6700 Entry 43 (size 16 bundles) Reserved
682 KVM_FAULT(43) 686 KVM_FAULT(43)
683 687
684 .org kvm_ia64_ivt+0x6800 688 .org kvm_ia64_ivt+0x6800
685////////////////////////////////////////////////////////////////// 689//////////////////////////////////////////////////////////////////
686// 0x6800 Entry 44 (size 16 bundles) Reserved 690// 0x6800 Entry 44 (size 16 bundles) Reserved
687 KVM_FAULT(44) 691 KVM_FAULT(44)
688 692
689 .org kvm_ia64_ivt+0x6900 693 .org kvm_ia64_ivt+0x6900
690/////////////////////////////////////////////////////////////////// 694///////////////////////////////////////////////////////////////////
691// 0x6900 Entry 45 (size 16 bundles) IA-32 Exeception 695// 0x6900 Entry 45 (size 16 bundles) IA-32 Exeception
692//(17,18,29,41,42,43,44,58,60,61,62,72,73,75,76,77) 696//(17,18,29,41,42,43,44,58,60,61,62,72,73,75,76,77)
693ENTRY(kvm_ia32_exception) 697ENTRY(kvm_ia32_exception)
694 KVM_FAULT(45) 698 KVM_FAULT(45)
695END(kvm_ia32_exception) 699END(kvm_ia32_exception)
696 700
697 .org kvm_ia64_ivt+0x6a00 701 .org kvm_ia64_ivt+0x6a00
698//////////////////////////////////////////////////////////////////// 702////////////////////////////////////////////////////////////////////
699// 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept (30,31,59,70,71) 703// 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept (30,31,59,70,71)
700ENTRY(kvm_ia32_intercept) 704ENTRY(kvm_ia32_intercept)
701 KVM_FAULT(47) 705 KVM_FAULT(47)
702END(kvm_ia32_intercept) 706END(kvm_ia32_intercept)
703 707
704 .org kvm_ia64_ivt+0x6c00 708 .org kvm_ia64_ivt+0x6c00
705///////////////////////////////////////////////////////////////////// 709/////////////////////////////////////////////////////////////////////
706// 0x6c00 Entry 48 (size 16 bundles) Reserved 710// 0x6c00 Entry 48 (size 16 bundles) Reserved
707 KVM_FAULT(48) 711 KVM_FAULT(48)
708 712
709 .org kvm_ia64_ivt+0x6d00 713 .org kvm_ia64_ivt+0x6d00
710////////////////////////////////////////////////////////////////////// 714//////////////////////////////////////////////////////////////////////
711// 0x6d00 Entry 49 (size 16 bundles) Reserved 715// 0x6d00 Entry 49 (size 16 bundles) Reserved
712 KVM_FAULT(49) 716 KVM_FAULT(49)
713 717
714 .org kvm_ia64_ivt+0x6e00 718 .org kvm_ia64_ivt+0x6e00
715////////////////////////////////////////////////////////////////////// 719//////////////////////////////////////////////////////////////////////
716// 0x6e00 Entry 50 (size 16 bundles) Reserved 720// 0x6e00 Entry 50 (size 16 bundles) Reserved
717 KVM_FAULT(50) 721 KVM_FAULT(50)
718 722
719 .org kvm_ia64_ivt+0x6f00 723 .org kvm_ia64_ivt+0x6f00
720///////////////////////////////////////////////////////////////////// 724/////////////////////////////////////////////////////////////////////
721// 0x6f00 Entry 51 (size 16 bundles) Reserved 725// 0x6f00 Entry 51 (size 16 bundles) Reserved
722 KVM_FAULT(52) 726 KVM_FAULT(52)
723 727
724 .org kvm_ia64_ivt+0x7100 728 .org kvm_ia64_ivt+0x7100
725//////////////////////////////////////////////////////////////////// 729////////////////////////////////////////////////////////////////////
726// 0x7100 Entry 53 (size 16 bundles) Reserved 730// 0x7100 Entry 53 (size 16 bundles) Reserved
727 KVM_FAULT(53) 731 KVM_FAULT(53)
728 732
729 .org kvm_ia64_ivt+0x7200 733 .org kvm_ia64_ivt+0x7200
730///////////////////////////////////////////////////////////////////// 734/////////////////////////////////////////////////////////////////////
731// 0x7200 Entry 54 (size 16 bundles) Reserved 735// 0x7200 Entry 54 (size 16 bundles) Reserved
732 KVM_FAULT(54) 736 KVM_FAULT(54)
733 737
734 .org kvm_ia64_ivt+0x7300 738 .org kvm_ia64_ivt+0x7300
735//////////////////////////////////////////////////////////////////// 739////////////////////////////////////////////////////////////////////
736// 0x7300 Entry 55 (size 16 bundles) Reserved 740// 0x7300 Entry 55 (size 16 bundles) Reserved
737 KVM_FAULT(55) 741 KVM_FAULT(55)
738 742
739 .org kvm_ia64_ivt+0x7400 743 .org kvm_ia64_ivt+0x7400
740//////////////////////////////////////////////////////////////////// 744////////////////////////////////////////////////////////////////////
741// 0x7400 Entry 56 (size 16 bundles) Reserved 745// 0x7400 Entry 56 (size 16 bundles) Reserved
742 KVM_FAULT(56) 746 KVM_FAULT(56)
743 747
744 .org kvm_ia64_ivt+0x7500 748 .org kvm_ia64_ivt+0x7500
745///////////////////////////////////////////////////////////////////// 749/////////////////////////////////////////////////////////////////////
746// 0x7500 Entry 57 (size 16 bundles) Reserved 750// 0x7500 Entry 57 (size 16 bundles) Reserved
747 KVM_FAULT(57) 751 KVM_FAULT(57)
748 752
749 .org kvm_ia64_ivt+0x7600 753 .org kvm_ia64_ivt+0x7600
750///////////////////////////////////////////////////////////////////// 754/////////////////////////////////////////////////////////////////////
751// 0x7600 Entry 58 (size 16 bundles) Reserved 755// 0x7600 Entry 58 (size 16 bundles) Reserved
752 KVM_FAULT(58) 756 KVM_FAULT(58)
753 757
754 .org kvm_ia64_ivt+0x7700 758 .org kvm_ia64_ivt+0x7700
755//////////////////////////////////////////////////////////////////// 759////////////////////////////////////////////////////////////////////
756// 0x7700 Entry 59 (size 16 bundles) Reserved 760// 0x7700 Entry 59 (size 16 bundles) Reserved
757 KVM_FAULT(59) 761 KVM_FAULT(59)
758 762
759 .org kvm_ia64_ivt+0x7800 763 .org kvm_ia64_ivt+0x7800
760//////////////////////////////////////////////////////////////////// 764////////////////////////////////////////////////////////////////////
761// 0x7800 Entry 60 (size 16 bundles) Reserved 765// 0x7800 Entry 60 (size 16 bundles) Reserved
762 KVM_FAULT(60) 766 KVM_FAULT(60)
763 767
764 .org kvm_ia64_ivt+0x7900 768 .org kvm_ia64_ivt+0x7900
765///////////////////////////////////////////////////////////////////// 769/////////////////////////////////////////////////////////////////////
766// 0x7900 Entry 61 (size 16 bundles) Reserved 770// 0x7900 Entry 61 (size 16 bundles) Reserved
767 KVM_FAULT(61) 771 KVM_FAULT(61)
768 772
769 .org kvm_ia64_ivt+0x7a00 773 .org kvm_ia64_ivt+0x7a00
770///////////////////////////////////////////////////////////////////// 774/////////////////////////////////////////////////////////////////////
771// 0x7a00 Entry 62 (size 16 bundles) Reserved 775// 0x7a00 Entry 62 (size 16 bundles) Reserved
772 KVM_FAULT(62) 776 KVM_FAULT(62)
773 777
774 .org kvm_ia64_ivt+0x7b00 778 .org kvm_ia64_ivt+0x7b00
775///////////////////////////////////////////////////////////////////// 779/////////////////////////////////////////////////////////////////////
776// 0x7b00 Entry 63 (size 16 bundles) Reserved 780// 0x7b00 Entry 63 (size 16 bundles) Reserved
777 KVM_FAULT(63) 781 KVM_FAULT(63)
778 782
779 .org kvm_ia64_ivt+0x7c00 783 .org kvm_ia64_ivt+0x7c00
780//////////////////////////////////////////////////////////////////// 784////////////////////////////////////////////////////////////////////
781// 0x7c00 Entry 64 (size 16 bundles) Reserved 785// 0x7c00 Entry 64 (size 16 bundles) Reserved
782 KVM_FAULT(64) 786 KVM_FAULT(64)
783 787
784 .org kvm_ia64_ivt+0x7d00 788 .org kvm_ia64_ivt+0x7d00
785///////////////////////////////////////////////////////////////////// 789/////////////////////////////////////////////////////////////////////
786// 0x7d00 Entry 65 (size 16 bundles) Reserved 790// 0x7d00 Entry 65 (size 16 bundles) Reserved
787 KVM_FAULT(65) 791 KVM_FAULT(65)
788 792
789 .org kvm_ia64_ivt+0x7e00 793 .org kvm_ia64_ivt+0x7e00
790///////////////////////////////////////////////////////////////////// 794/////////////////////////////////////////////////////////////////////
791// 0x7e00 Entry 66 (size 16 bundles) Reserved 795// 0x7e00 Entry 66 (size 16 bundles) Reserved
792 KVM_FAULT(66) 796 KVM_FAULT(66)
793 797
794 .org kvm_ia64_ivt+0x7f00 798 .org kvm_ia64_ivt+0x7f00
795//////////////////////////////////////////////////////////////////// 799////////////////////////////////////////////////////////////////////
796// 0x7f00 Entry 67 (size 16 bundles) Reserved 800// 0x7f00 Entry 67 (size 16 bundles) Reserved
797 KVM_FAULT(67) 801 KVM_FAULT(67)
798 802
799 .org kvm_ia64_ivt+0x8000 803 .org kvm_ia64_ivt+0x8000
800// There is no particular reason for this code to be here, other than that 804// There is no particular reason for this code to be here, other than that
@@ -804,132 +808,128 @@ END(kvm_ia32_intercept)
804 808
805 809
806ENTRY(kvm_dtlb_miss_dispatch) 810ENTRY(kvm_dtlb_miss_dispatch)
807 mov r19 = 2 811 mov r19 = 2
808 KVM_SAVE_MIN_WITH_COVER_R19 812 KVM_SAVE_MIN_WITH_COVER_R19
809 alloc r14=ar.pfs,0,0,3,0 813 alloc r14=ar.pfs,0,0,3,0
810 mov out0=cr.ifa 814 mov out0=cr.ifa
811 mov out1=r15 815 mov out1=r15
812 adds r3=8,r2 // set up second base pointer 816 adds r3=8,r2 // set up second base pointer
813 ;; 817 ;;
814 ssm psr.ic 818 ssm psr.ic
815 ;; 819 ;;
816 srlz.i // guarantee that interruption collection is on 820 srlz.i // guarantee that interruption collection is on
817 ;; 821 ;;
818 //(p15) ssm psr.i // restore psr.i 822 //(p15) ssm psr.i // restore psr.i
819 addl r14=@gprel(ia64_leave_hypervisor_prepare),gp 823 addl r14=@gprel(ia64_leave_hypervisor_prepare),gp
820 ;; 824 ;;
821 KVM_SAVE_REST 825 KVM_SAVE_REST
822 KVM_SAVE_EXTRA 826 KVM_SAVE_EXTRA
823 mov rp=r14 827 mov rp=r14
824 ;; 828 ;;
825 adds out2=16,r12 829 adds out2=16,r12
826 br.call.sptk.many b6=kvm_page_fault 830 br.call.sptk.many b6=kvm_page_fault
827END(kvm_dtlb_miss_dispatch) 831END(kvm_dtlb_miss_dispatch)
828 832
829ENTRY(kvm_itlb_miss_dispatch) 833ENTRY(kvm_itlb_miss_dispatch)
830 834
831 KVM_SAVE_MIN_WITH_COVER_R19 835 KVM_SAVE_MIN_WITH_COVER_R19
832 alloc r14=ar.pfs,0,0,3,0 836 alloc r14=ar.pfs,0,0,3,0
833 mov out0=cr.ifa 837 mov out0=cr.ifa
834 mov out1=r15 838 mov out1=r15
835 adds r3=8,r2 // set up second base pointer 839 adds r3=8,r2 // set up second base pointer
836 ;; 840 ;;
837 ssm psr.ic 841 ssm psr.ic
838 ;; 842 ;;
839 srlz.i // guarantee that interruption collection is on 843 srlz.i // guarantee that interruption collection is on
840 ;; 844 ;;
841 //(p15) ssm psr.i // restore psr.i 845 //(p15) ssm psr.i // restore psr.i
842 addl r14=@gprel(ia64_leave_hypervisor),gp 846 addl r14=@gprel(ia64_leave_hypervisor),gp
843 ;; 847 ;;
844 KVM_SAVE_REST 848 KVM_SAVE_REST
845 mov rp=r14 849 mov rp=r14
846 ;; 850 ;;
847 adds out2=16,r12 851 adds out2=16,r12
848 br.call.sptk.many b6=kvm_page_fault 852 br.call.sptk.many b6=kvm_page_fault
849END(kvm_itlb_miss_dispatch) 853END(kvm_itlb_miss_dispatch)
850 854
851ENTRY(kvm_dispatch_reflection) 855ENTRY(kvm_dispatch_reflection)
852 /* 856/*
853 * Input: 857 * Input:
854 * psr.ic: off 858 * psr.ic: off
855 * r19: intr type (offset into ivt, see ia64_int.h) 859 * r19: intr type (offset into ivt, see ia64_int.h)
856 * r31: contains saved predicates (pr) 860 * r31: contains saved predicates (pr)
857 */ 861 */
858 KVM_SAVE_MIN_WITH_COVER_R19 862 KVM_SAVE_MIN_WITH_COVER_R19
859 alloc r14=ar.pfs,0,0,5,0 863 alloc r14=ar.pfs,0,0,5,0
860 mov out0=cr.ifa 864 mov out0=cr.ifa
861 mov out1=cr.isr 865 mov out1=cr.isr
862 mov out2=cr.iim 866 mov out2=cr.iim
863 mov out3=r15 867 mov out3=r15
864 adds r3=8,r2 // set up second base pointer 868 adds r3=8,r2 // set up second base pointer
865 ;; 869 ;;
866 ssm psr.ic 870 ssm psr.ic
867 ;; 871 ;;
868 srlz.i // guarantee that interruption collection is on 872 srlz.i // guarantee that interruption collection is on
869 ;; 873 ;;
870 //(p15) ssm psr.i // restore psr.i 874 //(p15) ssm psr.i // restore psr.i
871 addl r14=@gprel(ia64_leave_hypervisor),gp 875 addl r14=@gprel(ia64_leave_hypervisor),gp
872 ;; 876 ;;
873 KVM_SAVE_REST 877 KVM_SAVE_REST
874 mov rp=r14 878 mov rp=r14
875 ;; 879 ;;
876 adds out4=16,r12 880 adds out4=16,r12
877 br.call.sptk.many b6=reflect_interruption 881 br.call.sptk.many b6=reflect_interruption
878END(kvm_dispatch_reflection) 882END(kvm_dispatch_reflection)
879 883
880ENTRY(kvm_dispatch_virtualization_fault) 884ENTRY(kvm_dispatch_virtualization_fault)
881 adds r16 = VMM_VCPU_CAUSE_OFFSET,r21 885 adds r16 = VMM_VCPU_CAUSE_OFFSET,r21
882 adds r17 = VMM_VCPU_OPCODE_OFFSET,r21 886 adds r17 = VMM_VCPU_OPCODE_OFFSET,r21
883 ;; 887 ;;
884 st8 [r16] = r24 888 st8 [r16] = r24
885 st8 [r17] = r25 889 st8 [r17] = r25
886 ;; 890 ;;
887 KVM_SAVE_MIN_WITH_COVER_R19 891 KVM_SAVE_MIN_WITH_COVER_R19
888 ;; 892 ;;
889 alloc r14=ar.pfs,0,0,2,0 // now it's safe (must be first in insn group!) 893 alloc r14=ar.pfs,0,0,2,0 // (must be first in insn group!)
890 mov out0=r13 //vcpu 894 mov out0=r13 //vcpu
891 adds r3=8,r2 // set up second base pointer 895 adds r3=8,r2 // set up second base pointer
892 ;; 896 ;;
893 ssm psr.ic 897 ssm psr.ic
894 ;; 898 ;;
895 srlz.i // guarantee that interruption collection is on 899 srlz.i // guarantee that interruption collection is on
896 ;; 900 ;;
897 //(p15) ssm psr.i // restore psr.i 901 //(p15) ssm psr.i // restore psr.i
898 addl r14=@gprel(ia64_leave_hypervisor_prepare),gp 902 addl r14=@gprel(ia64_leave_hypervisor_prepare),gp
899 ;; 903 ;;
900 KVM_SAVE_REST 904 KVM_SAVE_REST
901 KVM_SAVE_EXTRA 905 KVM_SAVE_EXTRA
902 mov rp=r14 906 mov rp=r14
903 ;; 907 ;;
904 adds out1=16,sp //regs 908 adds out1=16,sp //regs
905 br.call.sptk.many b6=kvm_emulate 909 br.call.sptk.many b6=kvm_emulate
906END(kvm_dispatch_virtualization_fault) 910END(kvm_dispatch_virtualization_fault)
907 911
908 912
909ENTRY(kvm_dispatch_interrupt) 913ENTRY(kvm_dispatch_interrupt)
910 KVM_SAVE_MIN_WITH_COVER_R19 // uses r31; defines r2 and r3 914 KVM_SAVE_MIN_WITH_COVER_R19 // uses r31; defines r2 and r3
911 ;; 915 ;;
912 alloc r14=ar.pfs,0,0,1,0 // must be first in an insn group 916 alloc r14=ar.pfs,0,0,1,0 // must be first in an insn group
913 //mov out0=cr.ivr // pass cr.ivr as first arg 917 adds r3=8,r2 // set up second base pointer for SAVE_REST
914 adds r3=8,r2 // set up second base pointer for SAVE_REST 918 ;;
915 ;; 919 ssm psr.ic
916 ssm psr.ic 920 ;;
917 ;; 921 srlz.i
918 srlz.i 922 ;;
919 ;; 923 //(p15) ssm psr.i
920 //(p15) ssm psr.i 924 addl r14=@gprel(ia64_leave_hypervisor),gp
921 addl r14=@gprel(ia64_leave_hypervisor),gp 925 ;;
922 ;; 926 KVM_SAVE_REST
923 KVM_SAVE_REST 927 mov rp=r14
924 mov rp=r14 928 ;;
925 ;; 929 mov out0=r13 // pass pointer to pt_regs as second arg
926 mov out0=r13 // pass pointer to pt_regs as second arg 930 br.call.sptk.many b6=kvm_ia64_handle_irq
927 br.call.sptk.many b6=kvm_ia64_handle_irq
928END(kvm_dispatch_interrupt) 931END(kvm_dispatch_interrupt)
929 932
930
931
932
933GLOBAL_ENTRY(ia64_leave_nested) 933GLOBAL_ENTRY(ia64_leave_nested)
934 rsm psr.i 934 rsm psr.i
935 ;; 935 ;;
@@ -1008,7 +1008,7 @@ GLOBAL_ENTRY(ia64_leave_nested)
1008 ;; 1008 ;;
1009 ldf.fill f11=[r2] 1009 ldf.fill f11=[r2]
1010// mov r18=r13 1010// mov r18=r13
1011// mov r21=r13 1011// mov r21=r13
1012 adds r16=PT(CR_IPSR)+16,r12 1012 adds r16=PT(CR_IPSR)+16,r12
1013 adds r17=PT(CR_IIP)+16,r12 1013 adds r17=PT(CR_IIP)+16,r12
1014 ;; 1014 ;;
@@ -1058,138 +1058,135 @@ GLOBAL_ENTRY(ia64_leave_nested)
1058 rfi 1058 rfi
1059END(ia64_leave_nested) 1059END(ia64_leave_nested)
1060 1060
1061
1062
1063GLOBAL_ENTRY(ia64_leave_hypervisor_prepare) 1061GLOBAL_ENTRY(ia64_leave_hypervisor_prepare)
1064 /* 1062/*
1065 * work.need_resched etc. mustn't get changed 1063 * work.need_resched etc. mustn't get changed
1066 *by this CPU before it returns to 1064 *by this CPU before it returns to
1067 ;; 1065 * user- or fsys-mode, hence we disable interrupts early on:
1068 * user- or fsys-mode, hence we disable interrupts early on: 1066 */
1069 */ 1067 adds r2 = PT(R4)+16,r12
1070 adds r2 = PT(R4)+16,r12 1068 adds r3 = PT(R5)+16,r12
1071 adds r3 = PT(R5)+16,r12 1069 adds r8 = PT(EML_UNAT)+16,r12
1072 adds r8 = PT(EML_UNAT)+16,r12 1070 ;;
1073 ;; 1071 ld8 r8 = [r8]
1074 ld8 r8 = [r8] 1072 ;;
1075 ;; 1073 mov ar.unat=r8
1076 mov ar.unat=r8 1074 ;;
1077 ;; 1075 ld8.fill r4=[r2],16 //load r4
1078 ld8.fill r4=[r2],16 //load r4 1076 ld8.fill r5=[r3],16 //load r5
1079 ld8.fill r5=[r3],16 //load r5 1077 ;;
1080 ;; 1078 ld8.fill r6=[r2] //load r6
1081 ld8.fill r6=[r2] //load r6 1079 ld8.fill r7=[r3] //load r7
1082 ld8.fill r7=[r3] //load r7 1080 ;;
1083 ;;
1084END(ia64_leave_hypervisor_prepare) 1081END(ia64_leave_hypervisor_prepare)
1085//fall through 1082//fall through
1086GLOBAL_ENTRY(ia64_leave_hypervisor) 1083GLOBAL_ENTRY(ia64_leave_hypervisor)
1087 rsm psr.i 1084 rsm psr.i
1088 ;; 1085 ;;
1089 br.call.sptk.many b0=leave_hypervisor_tail 1086 br.call.sptk.many b0=leave_hypervisor_tail
1090 ;; 1087 ;;
1091 adds r20=PT(PR)+16,r12 1088 adds r20=PT(PR)+16,r12
1092 adds r8=PT(EML_UNAT)+16,r12 1089 adds r8=PT(EML_UNAT)+16,r12
1093 ;; 1090 ;;
1094 ld8 r8=[r8] 1091 ld8 r8=[r8]
1095 ;; 1092 ;;
1096 mov ar.unat=r8 1093 mov ar.unat=r8
1097 ;; 1094 ;;
1098 lfetch [r20],PT(CR_IPSR)-PT(PR) 1095 lfetch [r20],PT(CR_IPSR)-PT(PR)
1099 adds r2 = PT(B6)+16,r12 1096 adds r2 = PT(B6)+16,r12
1100 adds r3 = PT(B7)+16,r12 1097 adds r3 = PT(B7)+16,r12
1101 ;; 1098 ;;
1102 lfetch [r20] 1099 lfetch [r20]
1103 ;; 1100 ;;
1104 ld8 r24=[r2],16 /* B6 */ 1101 ld8 r24=[r2],16 /* B6 */
1105 ld8 r25=[r3],16 /* B7 */ 1102 ld8 r25=[r3],16 /* B7 */
1106 ;; 1103 ;;
1107 ld8 r26=[r2],16 /* ar_csd */ 1104 ld8 r26=[r2],16 /* ar_csd */
1108 ld8 r27=[r3],16 /* ar_ssd */ 1105 ld8 r27=[r3],16 /* ar_ssd */
1109 mov b6 = r24 1106 mov b6 = r24
1110 ;; 1107 ;;
1111 ld8.fill r8=[r2],16 1108 ld8.fill r8=[r2],16
1112 ld8.fill r9=[r3],16 1109 ld8.fill r9=[r3],16
1113 mov b7 = r25 1110 mov b7 = r25
1114 ;; 1111 ;;
1115 mov ar.csd = r26 1112 mov ar.csd = r26
1116 mov ar.ssd = r27 1113 mov ar.ssd = r27
1117 ;; 1114 ;;
1118 ld8.fill r10=[r2],PT(R15)-PT(R10) 1115 ld8.fill r10=[r2],PT(R15)-PT(R10)
1119 ld8.fill r11=[r3],PT(R14)-PT(R11) 1116 ld8.fill r11=[r3],PT(R14)-PT(R11)
1120 ;; 1117 ;;
1121 ld8.fill r15=[r2],PT(R16)-PT(R15) 1118 ld8.fill r15=[r2],PT(R16)-PT(R15)
1122 ld8.fill r14=[r3],PT(R17)-PT(R14) 1119 ld8.fill r14=[r3],PT(R17)-PT(R14)
1123 ;; 1120 ;;
1124 ld8.fill r16=[r2],16 1121 ld8.fill r16=[r2],16
1125 ld8.fill r17=[r3],16 1122 ld8.fill r17=[r3],16
1126 ;; 1123 ;;
1127 ld8.fill r18=[r2],16 1124 ld8.fill r18=[r2],16
1128 ld8.fill r19=[r3],16 1125 ld8.fill r19=[r3],16
1129 ;; 1126 ;;
1130 ld8.fill r20=[r2],16 1127 ld8.fill r20=[r2],16
1131 ld8.fill r21=[r3],16 1128 ld8.fill r21=[r3],16
1132 ;; 1129 ;;
1133 ld8.fill r22=[r2],16 1130 ld8.fill r22=[r2],16
1134 ld8.fill r23=[r3],16 1131 ld8.fill r23=[r3],16
1135 ;; 1132 ;;
1136 ld8.fill r24=[r2],16 1133 ld8.fill r24=[r2],16
1137 ld8.fill r25=[r3],16 1134 ld8.fill r25=[r3],16
1138 ;; 1135 ;;
1139 ld8.fill r26=[r2],16 1136 ld8.fill r26=[r2],16
1140 ld8.fill r27=[r3],16 1137 ld8.fill r27=[r3],16
1141 ;; 1138 ;;
1142 ld8.fill r28=[r2],16 1139 ld8.fill r28=[r2],16
1143 ld8.fill r29=[r3],16 1140 ld8.fill r29=[r3],16
1144 ;; 1141 ;;
1145 ld8.fill r30=[r2],PT(F6)-PT(R30) 1142 ld8.fill r30=[r2],PT(F6)-PT(R30)
1146 ld8.fill r31=[r3],PT(F7)-PT(R31) 1143 ld8.fill r31=[r3],PT(F7)-PT(R31)
1147 ;; 1144 ;;
1148 rsm psr.i | psr.ic 1145 rsm psr.i | psr.ic
1149 // initiate turning off of interrupt and interruption collection 1146 // initiate turning off of interrupt and interruption collection
1150 invala // invalidate ALAT 1147 invala // invalidate ALAT
1151 ;; 1148 ;;
1152 srlz.i // ensure interruption collection is off 1149 srlz.i // ensure interruption collection is off
1153 ;; 1150 ;;
1154 bsw.0 1151 bsw.0
1155 ;; 1152 ;;
1156 adds r16 = PT(CR_IPSR)+16,r12 1153 adds r16 = PT(CR_IPSR)+16,r12
1157 adds r17 = PT(CR_IIP)+16,r12 1154 adds r17 = PT(CR_IIP)+16,r12
1158 mov r21=r13 // get current 1155 mov r21=r13 // get current
1159 ;; 1156 ;;
1160 ld8 r31=[r16],16 // load cr.ipsr 1157 ld8 r31=[r16],16 // load cr.ipsr
1161 ld8 r30=[r17],16 // load cr.iip 1158 ld8 r30=[r17],16 // load cr.iip
1162 ;; 1159 ;;
1163 ld8 r29=[r16],16 // load cr.ifs 1160 ld8 r29=[r16],16 // load cr.ifs
1164 ld8 r28=[r17],16 // load ar.unat 1161 ld8 r28=[r17],16 // load ar.unat
1165 ;; 1162 ;;
1166 ld8 r27=[r16],16 // load ar.pfs 1163 ld8 r27=[r16],16 // load ar.pfs
1167 ld8 r26=[r17],16 // load ar.rsc 1164 ld8 r26=[r17],16 // load ar.rsc
1168 ;; 1165 ;;
1169 ld8 r25=[r16],16 // load ar.rnat 1166 ld8 r25=[r16],16 // load ar.rnat
1170 ld8 r24=[r17],16 // load ar.bspstore 1167 ld8 r24=[r17],16 // load ar.bspstore
1171 ;; 1168 ;;
1172 ld8 r23=[r16],16 // load predicates 1169 ld8 r23=[r16],16 // load predicates
1173 ld8 r22=[r17],16 // load b0 1170 ld8 r22=[r17],16 // load b0
1174 ;; 1171 ;;
1175 ld8 r20=[r16],16 // load ar.rsc value for "loadrs" 1172 ld8 r20=[r16],16 // load ar.rsc value for "loadrs"
1176 ld8.fill r1=[r17],16 //load r1 1173 ld8.fill r1=[r17],16 //load r1
1177 ;; 1174 ;;
1178 ld8.fill r12=[r16],16 //load r12 1175 ld8.fill r12=[r16],16 //load r12
1179 ld8.fill r13=[r17],PT(R2)-PT(R13) //load r13 1176 ld8.fill r13=[r17],PT(R2)-PT(R13) //load r13
1180 ;; 1177 ;;
1181 ld8 r19=[r16],PT(R3)-PT(AR_FPSR) //load ar_fpsr 1178 ld8 r19=[r16],PT(R3)-PT(AR_FPSR) //load ar_fpsr
1182 ld8.fill r2=[r17],PT(AR_CCV)-PT(R2) //load r2 1179 ld8.fill r2=[r17],PT(AR_CCV)-PT(R2) //load r2
1183 ;; 1180 ;;
1184 ld8.fill r3=[r16] //load r3 1181 ld8.fill r3=[r16] //load r3
1185 ld8 r18=[r17] //load ar_ccv 1182 ld8 r18=[r17] //load ar_ccv
1186 ;; 1183 ;;
1187 mov ar.fpsr=r19 1184 mov ar.fpsr=r19
1188 mov ar.ccv=r18 1185 mov ar.ccv=r18
1189 shr.u r18=r20,16 1186 shr.u r18=r20,16
1190 ;; 1187 ;;
1191kvm_rbs_switch: 1188kvm_rbs_switch:
1192 mov r19=96 1189 mov r19=96
1193 1190
1194kvm_dont_preserve_current_frame: 1191kvm_dont_preserve_current_frame:
1195/* 1192/*
@@ -1201,76 +1198,76 @@ kvm_dont_preserve_current_frame:
1201# define pReturn p7 1198# define pReturn p7
1202# define Nregs 14 1199# define Nregs 14
1203 1200
1204 alloc loc0=ar.pfs,2,Nregs-2,2,0 1201 alloc loc0=ar.pfs,2,Nregs-2,2,0
1205 shr.u loc1=r18,9 // RNaTslots <= floor(dirtySize / (64*8)) 1202 shr.u loc1=r18,9 // RNaTslots <= floor(dirtySize / (64*8))
1206 sub r19=r19,r18 // r19 = (physStackedSize + 8) - dirtySize 1203 sub r19=r19,r18 // r19 = (physStackedSize + 8) - dirtySize
1207 ;; 1204 ;;
1208 mov ar.rsc=r20 // load ar.rsc to be used for "loadrs" 1205 mov ar.rsc=r20 // load ar.rsc to be used for "loadrs"
1209 shladd in0=loc1,3,r19 1206 shladd in0=loc1,3,r19
1210 mov in1=0 1207 mov in1=0
1211 ;; 1208 ;;
1212 TEXT_ALIGN(32) 1209 TEXT_ALIGN(32)
1213kvm_rse_clear_invalid: 1210kvm_rse_clear_invalid:
1214 alloc loc0=ar.pfs,2,Nregs-2,2,0 1211 alloc loc0=ar.pfs,2,Nregs-2,2,0
1215 cmp.lt pRecurse,p0=Nregs*8,in0 1212 cmp.lt pRecurse,p0=Nregs*8,in0
1216 // if more than Nregs regs left to clear, (re)curse 1213 // if more than Nregs regs left to clear, (re)curse
1217 add out0=-Nregs*8,in0 1214 add out0=-Nregs*8,in0
1218 add out1=1,in1 // increment recursion count 1215 add out1=1,in1 // increment recursion count
1219 mov loc1=0 1216 mov loc1=0
1220 mov loc2=0 1217 mov loc2=0
1221 ;; 1218 ;;
1222 mov loc3=0 1219 mov loc3=0
1223 mov loc4=0 1220 mov loc4=0
1224 mov loc5=0 1221 mov loc5=0
1225 mov loc6=0 1222 mov loc6=0
1226 mov loc7=0 1223 mov loc7=0
1227(pRecurse) br.call.dptk.few b0=kvm_rse_clear_invalid 1224(pRecurse) br.call.dptk.few b0=kvm_rse_clear_invalid
1228 ;; 1225 ;;
1229 mov loc8=0 1226 mov loc8=0
1230 mov loc9=0 1227 mov loc9=0
1231 cmp.ne pReturn,p0=r0,in1 1228 cmp.ne pReturn,p0=r0,in1
1232 // if recursion count != 0, we need to do a br.ret 1229 // if recursion count != 0, we need to do a br.ret
1233 mov loc10=0 1230 mov loc10=0
1234 mov loc11=0 1231 mov loc11=0
1235(pReturn) br.ret.dptk.many b0 1232(pReturn) br.ret.dptk.many b0
1236 1233
1237# undef pRecurse 1234# undef pRecurse
1238# undef pReturn 1235# undef pReturn
1239 1236
1240// loadrs has already been shifted 1237// loadrs has already been shifted
1241 alloc r16=ar.pfs,0,0,0,0 // drop current register frame 1238 alloc r16=ar.pfs,0,0,0,0 // drop current register frame
1242 ;; 1239 ;;
1243 loadrs 1240 loadrs
1244 ;; 1241 ;;
1245 mov ar.bspstore=r24 1242 mov ar.bspstore=r24
1246 ;; 1243 ;;
1247 mov ar.unat=r28 1244 mov ar.unat=r28
1248 mov ar.rnat=r25 1245 mov ar.rnat=r25
1249 mov ar.rsc=r26 1246 mov ar.rsc=r26
1250 ;; 1247 ;;
1251 mov cr.ipsr=r31 1248 mov cr.ipsr=r31
1252 mov cr.iip=r30 1249 mov cr.iip=r30
1253 mov cr.ifs=r29 1250 mov cr.ifs=r29
1254 mov ar.pfs=r27 1251 mov ar.pfs=r27
1255 adds r18=VMM_VPD_BASE_OFFSET,r21 1252 adds r18=VMM_VPD_BASE_OFFSET,r21
1256 ;; 1253 ;;
1257 ld8 r18=[r18] //vpd 1254 ld8 r18=[r18] //vpd
1258 adds r17=VMM_VCPU_ISR_OFFSET,r21 1255 adds r17=VMM_VCPU_ISR_OFFSET,r21
1259 ;; 1256 ;;
1260 ld8 r17=[r17] 1257 ld8 r17=[r17]
1261 adds r19=VMM_VPD_VPSR_OFFSET,r18 1258 adds r19=VMM_VPD_VPSR_OFFSET,r18
1262 ;; 1259 ;;
1263 ld8 r19=[r19] //vpsr 1260 ld8 r19=[r19] //vpsr
1264 mov r25=r18 1261 mov r25=r18
1265 adds r16= VMM_VCPU_GP_OFFSET,r21 1262 adds r16= VMM_VCPU_GP_OFFSET,r21
1266 ;; 1263 ;;
1267 ld8 r16= [r16] // Put gp in r24 1264 ld8 r16= [r16] // Put gp in r24
1268 movl r24=@gprel(ia64_vmm_entry) // calculate return address 1265 movl r24=@gprel(ia64_vmm_entry) // calculate return address
1269 ;; 1266 ;;
1270 add r24=r24,r16 1267 add r24=r24,r16
1271 ;; 1268 ;;
1272 br.sptk.many kvm_vps_sync_write // call the service 1269 br.sptk.many kvm_vps_sync_write // call the service
1273 ;; 1270 ;;
1274END(ia64_leave_hypervisor) 1271END(ia64_leave_hypervisor)
1275// fall through 1272// fall through
1276GLOBAL_ENTRY(ia64_vmm_entry) 1273GLOBAL_ENTRY(ia64_vmm_entry)
@@ -1283,16 +1280,14 @@ GLOBAL_ENTRY(ia64_vmm_entry)
1283 * r22:b0 1280 * r22:b0
1284 * r23:predicate 1281 * r23:predicate
1285 */ 1282 */
1286 mov r24=r22 1283 mov r24=r22
1287 mov r25=r18 1284 mov r25=r18
1288 tbit.nz p1,p2 = r19,IA64_PSR_IC_BIT // p1=vpsr.ic 1285 tbit.nz p1,p2 = r19,IA64_PSR_IC_BIT // p1=vpsr.ic
1289 (p1) br.cond.sptk.few kvm_vps_resume_normal 1286(p1) br.cond.sptk.few kvm_vps_resume_normal
1290 (p2) br.cond.sptk.many kvm_vps_resume_handler 1287(p2) br.cond.sptk.many kvm_vps_resume_handler
1291 ;; 1288 ;;
1292END(ia64_vmm_entry) 1289END(ia64_vmm_entry)
1293 1290
1294
1295
1296/* 1291/*
1297 * extern u64 ia64_call_vsa(u64 proc, u64 arg1, u64 arg2, 1292 * extern u64 ia64_call_vsa(u64 proc, u64 arg1, u64 arg2,
1298 * u64 arg3, u64 arg4, u64 arg5, 1293 * u64 arg3, u64 arg4, u64 arg5,
@@ -1310,88 +1305,88 @@ psrsave = loc2
1310entry = loc3 1305entry = loc3
1311hostret = r24 1306hostret = r24
1312 1307
1313 alloc pfssave=ar.pfs,4,4,0,0 1308 alloc pfssave=ar.pfs,4,4,0,0
1314 mov rpsave=rp 1309 mov rpsave=rp
1315 adds entry=VMM_VCPU_VSA_BASE_OFFSET, r13 1310 adds entry=VMM_VCPU_VSA_BASE_OFFSET, r13
1316 ;; 1311 ;;
1317 ld8 entry=[entry] 1312 ld8 entry=[entry]
13181: mov hostret=ip 13131: mov hostret=ip
1319 mov r25=in1 // copy arguments 1314 mov r25=in1 // copy arguments
1320 mov r26=in2 1315 mov r26=in2
1321 mov r27=in3 1316 mov r27=in3
1322 mov psrsave=psr 1317 mov psrsave=psr
1323 ;; 1318 ;;
1324 tbit.nz p6,p0=psrsave,14 // IA64_PSR_I 1319 tbit.nz p6,p0=psrsave,14 // IA64_PSR_I
1325 tbit.nz p7,p0=psrsave,13 // IA64_PSR_IC 1320 tbit.nz p7,p0=psrsave,13 // IA64_PSR_IC
1326 ;; 1321 ;;
1327 add hostret=2f-1b,hostret // calculate return address 1322 add hostret=2f-1b,hostret // calculate return address
1328 add entry=entry,in0 1323 add entry=entry,in0
1329 ;; 1324 ;;
1330 rsm psr.i | psr.ic 1325 rsm psr.i | psr.ic
1331 ;; 1326 ;;
1332 srlz.i 1327 srlz.i
1333 mov b6=entry 1328 mov b6=entry
1334 br.cond.sptk b6 // call the service 1329 br.cond.sptk b6 // call the service
13352: 13302:
1336 // Architectural sequence for enabling interrupts if necessary 1331// Architectural sequence for enabling interrupts if necessary
1337(p7) ssm psr.ic 1332(p7) ssm psr.ic
1338 ;; 1333 ;;
1339(p7) srlz.i 1334(p7) srlz.i
1340 ;; 1335 ;;
1341//(p6) ssm psr.i 1336//(p6) ssm psr.i
1342 ;; 1337 ;;
1343 mov rp=rpsave 1338 mov rp=rpsave
1344 mov ar.pfs=pfssave 1339 mov ar.pfs=pfssave
1345 mov r8=r31 1340 mov r8=r31
1346 ;; 1341 ;;
1347 srlz.d 1342 srlz.d
1348 br.ret.sptk rp 1343 br.ret.sptk rp
1349 1344
1350END(ia64_call_vsa) 1345END(ia64_call_vsa)
1351 1346
1352#define INIT_BSPSTORE ((4<<30)-(12<<20)-0x100) 1347#define INIT_BSPSTORE ((4<<30)-(12<<20)-0x100)
1353 1348
1354GLOBAL_ENTRY(vmm_reset_entry) 1349GLOBAL_ENTRY(vmm_reset_entry)
1355 //set up ipsr, iip, vpd.vpsr, dcr 1350 //set up ipsr, iip, vpd.vpsr, dcr
1356 // For IPSR: it/dt/rt=1, i/ic=1, si=1, vm/bn=1 1351 // For IPSR: it/dt/rt=1, i/ic=1, si=1, vm/bn=1
1357 // For DCR: all bits 0 1352 // For DCR: all bits 0
1358 bsw.0 1353 bsw.0
1359 ;; 1354 ;;
1360 mov r21 =r13 1355 mov r21 =r13
1361 adds r14=-VMM_PT_REGS_SIZE, r12 1356 adds r14=-VMM_PT_REGS_SIZE, r12
1362 ;; 1357 ;;
1363 movl r6=0x501008826000 // IPSR dt/rt/it:1;i/ic:1, si:1, vm/bn:1 1358 movl r6=0x501008826000 // IPSR dt/rt/it:1;i/ic:1, si:1, vm/bn:1
1364 movl r10=0x8000000000000000 1359 movl r10=0x8000000000000000
1365 adds r16=PT(CR_IIP), r14 1360 adds r16=PT(CR_IIP), r14
1366 adds r20=PT(R1), r14 1361 adds r20=PT(R1), r14
1367 ;; 1362 ;;
1368 rsm psr.ic | psr.i 1363 rsm psr.ic | psr.i
1369 ;; 1364 ;;
1370 srlz.i 1365 srlz.i
1371 ;; 1366 ;;
1372 mov ar.rsc = 0 1367 mov ar.rsc = 0
1373 ;; 1368 ;;
1374 flushrs 1369 flushrs
1375 ;; 1370 ;;
1376 mov ar.bspstore = 0 1371 mov ar.bspstore = 0
1377 // clear BSPSTORE 1372 // clear BSPSTORE
1378 ;; 1373 ;;
1379 mov cr.ipsr=r6 1374 mov cr.ipsr=r6
1380 mov cr.ifs=r10 1375 mov cr.ifs=r10
1381 ld8 r4 = [r16] // Set init iip for first run. 1376 ld8 r4 = [r16] // Set init iip for first run.
1382 ld8 r1 = [r20] 1377 ld8 r1 = [r20]
1383 ;; 1378 ;;
1384 mov cr.iip=r4 1379 mov cr.iip=r4
1385 adds r16=VMM_VPD_BASE_OFFSET,r13 1380 adds r16=VMM_VPD_BASE_OFFSET,r13
1386 ;; 1381 ;;
1387 ld8 r18=[r16] 1382 ld8 r18=[r16]
1388 ;; 1383 ;;
1389 adds r19=VMM_VPD_VPSR_OFFSET,r18 1384 adds r19=VMM_VPD_VPSR_OFFSET,r18
1390 ;; 1385 ;;
1391 ld8 r19=[r19] 1386 ld8 r19=[r19]
1392 mov r17=r0 1387 mov r17=r0
1393 mov r22=r0 1388 mov r22=r0
1394 mov r23=r0 1389 mov r23=r0
1395 br.cond.sptk ia64_vmm_entry 1390 br.cond.sptk ia64_vmm_entry
1396 br.ret.sptk b0 1391 br.ret.sptk b0
1397END(vmm_reset_entry) 1392END(vmm_reset_entry)
diff --git a/arch/ia64/kvm/vtlb.c b/arch/ia64/kvm/vtlb.c
index e22b93361e08..6b6307a3bd55 100644
--- a/arch/ia64/kvm/vtlb.c
+++ b/arch/ia64/kvm/vtlb.c
@@ -183,8 +183,8 @@ void mark_pages_dirty(struct kvm_vcpu *v, u64 pte, u64 ps)
183 u64 i, dirty_pages = 1; 183 u64 i, dirty_pages = 1;
184 u64 base_gfn = (pte&_PAGE_PPN_MASK) >> PAGE_SHIFT; 184 u64 base_gfn = (pte&_PAGE_PPN_MASK) >> PAGE_SHIFT;
185 spinlock_t *lock = __kvm_va(v->arch.dirty_log_lock_pa); 185 spinlock_t *lock = __kvm_va(v->arch.dirty_log_lock_pa);
186 void *dirty_bitmap = (void *)v - (KVM_VCPU_OFS + v->vcpu_id * VCPU_SIZE) 186 void *dirty_bitmap = (void *)KVM_MEM_DIRTY_LOG_BASE;
187 + KVM_MEM_DIRTY_LOG_OFS; 187
188 dirty_pages <<= ps <= PAGE_SHIFT ? 0 : ps - PAGE_SHIFT; 188 dirty_pages <<= ps <= PAGE_SHIFT ? 0 : ps - PAGE_SHIFT;
189 189
190 vmm_spin_lock(lock); 190 vmm_spin_lock(lock);