diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-12-18 19:05:28 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-12-18 19:05:28 -0500 |
commit | 66dcff86ba40eebb5133cccf450878f2bba102ef (patch) | |
tree | e7eb49ad9316989a529b00303d2dd2cffa61a7f5 /arch/ia64 | |
parent | 91ed9e8a32d9a76adc59c83f8b40024076cf8a02 (diff) | |
parent | 2c4aa55a6af070262cca425745e8e54310e96b8d (diff) |
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM update from Paolo Bonzini:
"3.19 changes for KVM:
- spring cleaning: removed support for IA64, and for hardware-
assisted virtualization on the PPC970
- ARM, PPC, s390 all had only small fixes
For x86:
- small performance improvements (though only on weird guests)
- usual round of hardware-compliancy fixes from Nadav
- APICv fixes
- XSAVES support for hosts and guests. XSAVES hosts were broken
because the (non-KVM) XSAVES patches inadvertently changed the KVM
userspace ABI whenever XSAVES was enabled; hence, this part is
going to stable. Guest support is just a matter of exposing the
feature and CPUID leaves support"
* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (179 commits)
KVM: move APIC types to arch/x86/
KVM: PPC: Book3S: Enable in-kernel XICS emulation by default
KVM: PPC: Book3S HV: Improve H_CONFER implementation
KVM: PPC: Book3S HV: Fix endianness of instruction obtained from HEIR register
KVM: PPC: Book3S HV: Remove code for PPC970 processors
KVM: PPC: Book3S HV: Tracepoints for KVM HV guest interactions
KVM: PPC: Book3S HV: Simplify locking around stolen time calculations
arch: powerpc: kvm: book3s_paired_singles.c: Remove unused function
arch: powerpc: kvm: book3s_pr.c: Remove unused function
arch: powerpc: kvm: book3s.c: Remove some unused functions
arch: powerpc: kvm: book3s_32_mmu.c: Remove unused function
KVM: PPC: Book3S HV: Check wait conditions before sleeping in kvmppc_vcore_blocked
KVM: PPC: Book3S HV: ptes are big endian
KVM: PPC: Book3S HV: Fix inaccuracies in ICP emulation for H_IPI
KVM: PPC: Book3S HV: Fix KSM memory corruption
KVM: PPC: Book3S HV: Fix an issue where guest is paused on receiving HMI
KVM: PPC: Book3S HV: Fix computation of tlbie operand
KVM: PPC: Book3S HV: Add missing HPTE unlock
KVM: PPC: BookE: Improve irq inject tracepoint
arm/arm64: KVM: Require in-kernel vgic for the arch timers
...
Diffstat (limited to 'arch/ia64')
-rw-r--r-- | arch/ia64/Kconfig | 3 | ||||
-rw-r--r-- | arch/ia64/Makefile | 1 | ||||
-rw-r--r-- | arch/ia64/include/asm/kvm_host.h | 609 | ||||
-rw-r--r-- | arch/ia64/include/asm/pvclock-abi.h | 48 | ||||
-rw-r--r-- | arch/ia64/include/uapi/asm/kvm.h | 268 | ||||
-rw-r--r-- | arch/ia64/kvm/Kconfig | 66 | ||||
-rw-r--r-- | arch/ia64/kvm/Makefile | 67 | ||||
-rw-r--r-- | arch/ia64/kvm/asm-offsets.c | 241 | ||||
-rw-r--r-- | arch/ia64/kvm/irq.h | 33 | ||||
-rw-r--r-- | arch/ia64/kvm/kvm-ia64.c | 1942 | ||||
-rw-r--r-- | arch/ia64/kvm/kvm_fw.c | 674 | ||||
-rw-r--r-- | arch/ia64/kvm/kvm_lib.c | 21 | ||||
-rw-r--r-- | arch/ia64/kvm/kvm_minstate.h | 266 | ||||
-rw-r--r-- | arch/ia64/kvm/lapic.h | 30 | ||||
-rw-r--r-- | arch/ia64/kvm/memcpy.S | 1 | ||||
-rw-r--r-- | arch/ia64/kvm/memset.S | 1 | ||||
-rw-r--r-- | arch/ia64/kvm/misc.h | 94 | ||||
-rw-r--r-- | arch/ia64/kvm/mmio.c | 336 | ||||
-rw-r--r-- | arch/ia64/kvm/optvfault.S | 1090 | ||||
-rw-r--r-- | arch/ia64/kvm/process.c | 1024 | ||||
-rw-r--r-- | arch/ia64/kvm/trampoline.S | 1038 | ||||
-rw-r--r-- | arch/ia64/kvm/vcpu.c | 2209 | ||||
-rw-r--r-- | arch/ia64/kvm/vcpu.h | 752 | ||||
-rw-r--r-- | arch/ia64/kvm/vmm.c | 99 | ||||
-rw-r--r-- | arch/ia64/kvm/vmm_ivt.S | 1392 | ||||
-rw-r--r-- | arch/ia64/kvm/vti.h | 290 | ||||
-rw-r--r-- | arch/ia64/kvm/vtlb.c | 640 |
27 files changed, 0 insertions, 13235 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 536d13b0bea6..371b55bc5a6e 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig | |||
@@ -20,7 +20,6 @@ config IA64 | |||
20 | select HAVE_DYNAMIC_FTRACE if (!ITANIUM) | 20 | select HAVE_DYNAMIC_FTRACE if (!ITANIUM) |
21 | select HAVE_FUNCTION_TRACER | 21 | select HAVE_FUNCTION_TRACER |
22 | select HAVE_DMA_ATTRS | 22 | select HAVE_DMA_ATTRS |
23 | select HAVE_KVM | ||
24 | select TTY | 23 | select TTY |
25 | select HAVE_ARCH_TRACEHOOK | 24 | select HAVE_ARCH_TRACEHOOK |
26 | select HAVE_DMA_API_DEBUG | 25 | select HAVE_DMA_API_DEBUG |
@@ -640,8 +639,6 @@ source "security/Kconfig" | |||
640 | 639 | ||
641 | source "crypto/Kconfig" | 640 | source "crypto/Kconfig" |
642 | 641 | ||
643 | source "arch/ia64/kvm/Kconfig" | ||
644 | |||
645 | source "lib/Kconfig" | 642 | source "lib/Kconfig" |
646 | 643 | ||
647 | config IOMMU_HELPER | 644 | config IOMMU_HELPER |
diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile index 5441b14994fc..970d0bd99621 100644 --- a/arch/ia64/Makefile +++ b/arch/ia64/Makefile | |||
@@ -53,7 +53,6 @@ core-$(CONFIG_IA64_HP_ZX1) += arch/ia64/dig/ | |||
53 | core-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += arch/ia64/dig/ | 53 | core-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += arch/ia64/dig/ |
54 | core-$(CONFIG_IA64_SGI_SN2) += arch/ia64/sn/ | 54 | core-$(CONFIG_IA64_SGI_SN2) += arch/ia64/sn/ |
55 | core-$(CONFIG_IA64_SGI_UV) += arch/ia64/uv/ | 55 | core-$(CONFIG_IA64_SGI_UV) += arch/ia64/uv/ |
56 | core-$(CONFIG_KVM) += arch/ia64/kvm/ | ||
57 | 56 | ||
58 | drivers-$(CONFIG_PCI) += arch/ia64/pci/ | 57 | drivers-$(CONFIG_PCI) += arch/ia64/pci/ |
59 | drivers-$(CONFIG_IA64_HP_SIM) += arch/ia64/hp/sim/ | 58 | drivers-$(CONFIG_IA64_HP_SIM) += arch/ia64/hp/sim/ |
diff --git a/arch/ia64/include/asm/kvm_host.h b/arch/ia64/include/asm/kvm_host.h deleted file mode 100644 index 4729752b7256..000000000000 --- a/arch/ia64/include/asm/kvm_host.h +++ /dev/null | |||
@@ -1,609 +0,0 @@ | |||
1 | /* | ||
2 | * kvm_host.h: used for kvm module, and hold ia64-specific sections. | ||
3 | * | ||
4 | * Copyright (C) 2007, Intel Corporation. | ||
5 | * | ||
6 | * Xiantao Zhang <xiantao.zhang@intel.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms and conditions of the GNU General Public License, | ||
10 | * version 2, as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
15 | * more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License along with | ||
18 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple | ||
19 | * Place - Suite 330, Boston, MA 02111-1307 USA. | ||
20 | * | ||
21 | */ | ||
22 | |||
23 | #ifndef __ASM_KVM_HOST_H | ||
24 | #define __ASM_KVM_HOST_H | ||
25 | |||
26 | #define KVM_USER_MEM_SLOTS 32 | ||
27 | |||
28 | #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 | ||
29 | #define KVM_IRQCHIP_NUM_PINS KVM_IOAPIC_NUM_PINS | ||
30 | |||
31 | /* define exit reasons from vmm to kvm*/ | ||
32 | #define EXIT_REASON_VM_PANIC 0 | ||
33 | #define EXIT_REASON_MMIO_INSTRUCTION 1 | ||
34 | #define EXIT_REASON_PAL_CALL 2 | ||
35 | #define EXIT_REASON_SAL_CALL 3 | ||
36 | #define EXIT_REASON_SWITCH_RR6 4 | ||
37 | #define EXIT_REASON_VM_DESTROY 5 | ||
38 | #define EXIT_REASON_EXTERNAL_INTERRUPT 6 | ||
39 | #define EXIT_REASON_IPI 7 | ||
40 | #define EXIT_REASON_PTC_G 8 | ||
41 | #define EXIT_REASON_DEBUG 20 | ||
42 | |||
43 | /*Define vmm address space and vm data space.*/ | ||
44 | #define KVM_VMM_SIZE (__IA64_UL_CONST(16)<<20) | ||
45 | #define KVM_VMM_SHIFT 24 | ||
46 | #define KVM_VMM_BASE 0xD000000000000000 | ||
47 | #define VMM_SIZE (__IA64_UL_CONST(8)<<20) | ||
48 | |||
49 | /* | ||
50 | * Define vm_buffer, used by PAL Services, base address. | ||
51 | * Note: vm_buffer is in the VMM-BLOCK, the size must be < 8M | ||
52 | */ | ||
53 | #define KVM_VM_BUFFER_BASE (KVM_VMM_BASE + VMM_SIZE) | ||
54 | #define KVM_VM_BUFFER_SIZE (__IA64_UL_CONST(8)<<20) | ||
55 | |||
56 | /* | ||
57 | * kvm guest's data area looks as follow: | ||
58 | * | ||
59 | * +----------------------+ ------- KVM_VM_DATA_SIZE | ||
60 | * | vcpu[n]'s data | | ___________________KVM_STK_OFFSET | ||
61 | * | | | / | | ||
62 | * | .......... | | /vcpu's struct&stack | | ||
63 | * | .......... | | /---------------------|---- 0 | ||
64 | * | vcpu[5]'s data | | / vpd | | ||
65 | * | vcpu[4]'s data | |/-----------------------| | ||
66 | * | vcpu[3]'s data | / vtlb | | ||
67 | * | vcpu[2]'s data | /|------------------------| | ||
68 | * | vcpu[1]'s data |/ | vhpt | | ||
69 | * | vcpu[0]'s data |____________________________| | ||
70 | * +----------------------+ | | ||
71 | * | memory dirty log | | | ||
72 | * +----------------------+ | | ||
73 | * | vm's data struct | | | ||
74 | * +----------------------+ | | ||
75 | * | | | | ||
76 | * | | | | ||
77 | * | | | | ||
78 | * | | | | ||
79 | * | | | | ||
80 | * | | | | ||
81 | * | | | | ||
82 | * | vm's p2m table | | | ||
83 | * | | | | ||
84 | * | | | | ||
85 | * | | | | | ||
86 | * vm's data->| | | | | ||
87 | * +----------------------+ ------- 0 | ||
88 | * To support large memory, needs to increase the size of p2m. | ||
89 | * To support more vcpus, needs to ensure it has enough space to | ||
90 | * hold vcpus' data. | ||
91 | */ | ||
92 | |||
93 | #define KVM_VM_DATA_SHIFT 26 | ||
94 | #define KVM_VM_DATA_SIZE (__IA64_UL_CONST(1) << KVM_VM_DATA_SHIFT) | ||
95 | #define KVM_VM_DATA_BASE (KVM_VMM_BASE + KVM_VM_DATA_SIZE) | ||
96 | |||
97 | #define KVM_P2M_BASE KVM_VM_DATA_BASE | ||
98 | #define KVM_P2M_SIZE (__IA64_UL_CONST(24) << 20) | ||
99 | |||
100 | #define VHPT_SHIFT 16 | ||
101 | #define VHPT_SIZE (__IA64_UL_CONST(1) << VHPT_SHIFT) | ||
102 | #define VHPT_NUM_ENTRIES (__IA64_UL_CONST(1) << (VHPT_SHIFT-5)) | ||
103 | |||
104 | #define VTLB_SHIFT 16 | ||
105 | #define VTLB_SIZE (__IA64_UL_CONST(1) << VTLB_SHIFT) | ||
106 | #define VTLB_NUM_ENTRIES (1UL << (VHPT_SHIFT-5)) | ||
107 | |||
108 | #define VPD_SHIFT 16 | ||
109 | #define VPD_SIZE (__IA64_UL_CONST(1) << VPD_SHIFT) | ||
110 | |||
111 | #define VCPU_STRUCT_SHIFT 16 | ||
112 | #define VCPU_STRUCT_SIZE (__IA64_UL_CONST(1) << VCPU_STRUCT_SHIFT) | ||
113 | |||
114 | /* | ||
115 | * This must match KVM_IA64_VCPU_STACK_{SHIFT,SIZE} arch/ia64/include/asm/kvm.h | ||
116 | */ | ||
117 | #define KVM_STK_SHIFT 16 | ||
118 | #define KVM_STK_OFFSET (__IA64_UL_CONST(1)<< KVM_STK_SHIFT) | ||
119 | |||
120 | #define KVM_VM_STRUCT_SHIFT 19 | ||
121 | #define KVM_VM_STRUCT_SIZE (__IA64_UL_CONST(1) << KVM_VM_STRUCT_SHIFT) | ||
122 | |||
123 | #define KVM_MEM_DIRY_LOG_SHIFT 19 | ||
124 | #define KVM_MEM_DIRTY_LOG_SIZE (__IA64_UL_CONST(1) << KVM_MEM_DIRY_LOG_SHIFT) | ||
125 | |||
126 | #ifndef __ASSEMBLY__ | ||
127 | |||
128 | /*Define the max vcpus and memory for Guests.*/ | ||
129 | #define KVM_MAX_VCPUS (KVM_VM_DATA_SIZE - KVM_P2M_SIZE - KVM_VM_STRUCT_SIZE -\ | ||
130 | KVM_MEM_DIRTY_LOG_SIZE) / sizeof(struct kvm_vcpu_data) | ||
131 | #define KVM_MAX_MEM_SIZE (KVM_P2M_SIZE >> 3 << PAGE_SHIFT) | ||
132 | |||
133 | #define VMM_LOG_LEN 256 | ||
134 | |||
135 | #include <linux/types.h> | ||
136 | #include <linux/mm.h> | ||
137 | #include <linux/kvm.h> | ||
138 | #include <linux/kvm_para.h> | ||
139 | #include <linux/kvm_types.h> | ||
140 | |||
141 | #include <asm/pal.h> | ||
142 | #include <asm/sal.h> | ||
143 | #include <asm/page.h> | ||
144 | |||
145 | struct kvm_vcpu_data { | ||
146 | char vcpu_vhpt[VHPT_SIZE]; | ||
147 | char vcpu_vtlb[VTLB_SIZE]; | ||
148 | char vcpu_vpd[VPD_SIZE]; | ||
149 | char vcpu_struct[VCPU_STRUCT_SIZE]; | ||
150 | }; | ||
151 | |||
152 | struct kvm_vm_data { | ||
153 | char kvm_p2m[KVM_P2M_SIZE]; | ||
154 | char kvm_vm_struct[KVM_VM_STRUCT_SIZE]; | ||
155 | char kvm_mem_dirty_log[KVM_MEM_DIRTY_LOG_SIZE]; | ||
156 | struct kvm_vcpu_data vcpu_data[KVM_MAX_VCPUS]; | ||
157 | }; | ||
158 | |||
159 | #define VCPU_BASE(n) (KVM_VM_DATA_BASE + \ | ||
160 | offsetof(struct kvm_vm_data, vcpu_data[n])) | ||
161 | #define KVM_VM_BASE (KVM_VM_DATA_BASE + \ | ||
162 | offsetof(struct kvm_vm_data, kvm_vm_struct)) | ||
163 | #define KVM_MEM_DIRTY_LOG_BASE KVM_VM_DATA_BASE + \ | ||
164 | offsetof(struct kvm_vm_data, kvm_mem_dirty_log) | ||
165 | |||
166 | #define VHPT_BASE(n) (VCPU_BASE(n) + offsetof(struct kvm_vcpu_data, vcpu_vhpt)) | ||
167 | #define VTLB_BASE(n) (VCPU_BASE(n) + offsetof(struct kvm_vcpu_data, vcpu_vtlb)) | ||
168 | #define VPD_BASE(n) (VCPU_BASE(n) + offsetof(struct kvm_vcpu_data, vcpu_vpd)) | ||
169 | #define VCPU_STRUCT_BASE(n) (VCPU_BASE(n) + \ | ||
170 | offsetof(struct kvm_vcpu_data, vcpu_struct)) | ||
171 | |||
172 | /*IO section definitions*/ | ||
173 | #define IOREQ_READ 1 | ||
174 | #define IOREQ_WRITE 0 | ||
175 | |||
176 | #define STATE_IOREQ_NONE 0 | ||
177 | #define STATE_IOREQ_READY 1 | ||
178 | #define STATE_IOREQ_INPROCESS 2 | ||
179 | #define STATE_IORESP_READY 3 | ||
180 | |||
181 | /*Guest Physical address layout.*/ | ||
182 | #define GPFN_MEM (0UL << 60) /* Guest pfn is normal mem */ | ||
183 | #define GPFN_FRAME_BUFFER (1UL << 60) /* VGA framebuffer */ | ||
184 | #define GPFN_LOW_MMIO (2UL << 60) /* Low MMIO range */ | ||
185 | #define GPFN_PIB (3UL << 60) /* PIB base */ | ||
186 | #define GPFN_IOSAPIC (4UL << 60) /* IOSAPIC base */ | ||
187 | #define GPFN_LEGACY_IO (5UL << 60) /* Legacy I/O base */ | ||
188 | #define GPFN_GFW (6UL << 60) /* Guest Firmware */ | ||
189 | #define GPFN_PHYS_MMIO (7UL << 60) /* Directed MMIO Range */ | ||
190 | |||
191 | #define GPFN_IO_MASK (7UL << 60) /* Guest pfn is I/O type */ | ||
192 | #define GPFN_INV_MASK (1UL << 63) /* Guest pfn is invalid */ | ||
193 | #define INVALID_MFN (~0UL) | ||
194 | #define MEM_G (1UL << 30) | ||
195 | #define MEM_M (1UL << 20) | ||
196 | #define MMIO_START (3 * MEM_G) | ||
197 | #define MMIO_SIZE (512 * MEM_M) | ||
198 | #define VGA_IO_START 0xA0000UL | ||
199 | #define VGA_IO_SIZE 0x20000 | ||
200 | #define LEGACY_IO_START (MMIO_START + MMIO_SIZE) | ||
201 | #define LEGACY_IO_SIZE (64 * MEM_M) | ||
202 | #define IO_SAPIC_START 0xfec00000UL | ||
203 | #define IO_SAPIC_SIZE 0x100000 | ||
204 | #define PIB_START 0xfee00000UL | ||
205 | #define PIB_SIZE 0x200000 | ||
206 | #define GFW_START (4 * MEM_G - 16 * MEM_M) | ||
207 | #define GFW_SIZE (16 * MEM_M) | ||
208 | |||
209 | /*Deliver mode, defined for ioapic.c*/ | ||
210 | #define dest_Fixed IOSAPIC_FIXED | ||
211 | #define dest_LowestPrio IOSAPIC_LOWEST_PRIORITY | ||
212 | |||
213 | #define NMI_VECTOR 2 | ||
214 | #define ExtINT_VECTOR 0 | ||
215 | #define NULL_VECTOR (-1) | ||
216 | #define IA64_SPURIOUS_INT_VECTOR 0x0f | ||
217 | |||
218 | #define VCPU_LID(v) (((u64)(v)->vcpu_id) << 24) | ||
219 | |||
220 | /* | ||
221 | *Delivery mode | ||
222 | */ | ||
223 | #define SAPIC_DELIV_SHIFT 8 | ||
224 | #define SAPIC_FIXED 0x0 | ||
225 | #define SAPIC_LOWEST_PRIORITY 0x1 | ||
226 | #define SAPIC_PMI 0x2 | ||
227 | #define SAPIC_NMI 0x4 | ||
228 | #define SAPIC_INIT 0x5 | ||
229 | #define SAPIC_EXTINT 0x7 | ||
230 | |||
231 | /* | ||
232 | * vcpu->requests bit members for arch | ||
233 | */ | ||
234 | #define KVM_REQ_PTC_G 32 | ||
235 | #define KVM_REQ_RESUME 33 | ||
236 | |||
237 | struct kvm_mmio_req { | ||
238 | uint64_t addr; /* physical address */ | ||
239 | uint64_t size; /* size in bytes */ | ||
240 | uint64_t data; /* data (or paddr of data) */ | ||
241 | uint8_t state:4; | ||
242 | uint8_t dir:1; /* 1=read, 0=write */ | ||
243 | }; | ||
244 | |||
245 | /*Pal data struct */ | ||
246 | struct kvm_pal_call{ | ||
247 | /*In area*/ | ||
248 | uint64_t gr28; | ||
249 | uint64_t gr29; | ||
250 | uint64_t gr30; | ||
251 | uint64_t gr31; | ||
252 | /*Out area*/ | ||
253 | struct ia64_pal_retval ret; | ||
254 | }; | ||
255 | |||
256 | /* Sal data structure */ | ||
257 | struct kvm_sal_call{ | ||
258 | /*In area*/ | ||
259 | uint64_t in0; | ||
260 | uint64_t in1; | ||
261 | uint64_t in2; | ||
262 | uint64_t in3; | ||
263 | uint64_t in4; | ||
264 | uint64_t in5; | ||
265 | uint64_t in6; | ||
266 | uint64_t in7; | ||
267 | struct sal_ret_values ret; | ||
268 | }; | ||
269 | |||
270 | /*Guest change rr6*/ | ||
271 | struct kvm_switch_rr6 { | ||
272 | uint64_t old_rr; | ||
273 | uint64_t new_rr; | ||
274 | }; | ||
275 | |||
276 | union ia64_ipi_a{ | ||
277 | unsigned long val; | ||
278 | struct { | ||
279 | unsigned long rv : 3; | ||
280 | unsigned long ir : 1; | ||
281 | unsigned long eid : 8; | ||
282 | unsigned long id : 8; | ||
283 | unsigned long ib_base : 44; | ||
284 | }; | ||
285 | }; | ||
286 | |||
287 | union ia64_ipi_d { | ||
288 | unsigned long val; | ||
289 | struct { | ||
290 | unsigned long vector : 8; | ||
291 | unsigned long dm : 3; | ||
292 | unsigned long ig : 53; | ||
293 | }; | ||
294 | }; | ||
295 | |||
296 | /*ipi check exit data*/ | ||
297 | struct kvm_ipi_data{ | ||
298 | union ia64_ipi_a addr; | ||
299 | union ia64_ipi_d data; | ||
300 | }; | ||
301 | |||
302 | /*global purge data*/ | ||
303 | struct kvm_ptc_g { | ||
304 | unsigned long vaddr; | ||
305 | unsigned long rr; | ||
306 | unsigned long ps; | ||
307 | struct kvm_vcpu *vcpu; | ||
308 | }; | ||
309 | |||
310 | /*Exit control data */ | ||
311 | struct exit_ctl_data{ | ||
312 | uint32_t exit_reason; | ||
313 | uint32_t vm_status; | ||
314 | union { | ||
315 | struct kvm_mmio_req ioreq; | ||
316 | struct kvm_pal_call pal_data; | ||
317 | struct kvm_sal_call sal_data; | ||
318 | struct kvm_switch_rr6 rr_data; | ||
319 | struct kvm_ipi_data ipi_data; | ||
320 | struct kvm_ptc_g ptc_g_data; | ||
321 | } u; | ||
322 | }; | ||
323 | |||
324 | union pte_flags { | ||
325 | unsigned long val; | ||
326 | struct { | ||
327 | unsigned long p : 1; /*0 */ | ||
328 | unsigned long : 1; /* 1 */ | ||
329 | unsigned long ma : 3; /* 2-4 */ | ||
330 | unsigned long a : 1; /* 5 */ | ||
331 | unsigned long d : 1; /* 6 */ | ||
332 | unsigned long pl : 2; /* 7-8 */ | ||
333 | unsigned long ar : 3; /* 9-11 */ | ||
334 | unsigned long ppn : 38; /* 12-49 */ | ||
335 | unsigned long : 2; /* 50-51 */ | ||
336 | unsigned long ed : 1; /* 52 */ | ||
337 | }; | ||
338 | }; | ||
339 | |||
340 | union ia64_pta { | ||
341 | unsigned long val; | ||
342 | struct { | ||
343 | unsigned long ve : 1; | ||
344 | unsigned long reserved0 : 1; | ||
345 | unsigned long size : 6; | ||
346 | unsigned long vf : 1; | ||
347 | unsigned long reserved1 : 6; | ||
348 | unsigned long base : 49; | ||
349 | }; | ||
350 | }; | ||
351 | |||
352 | struct thash_cb { | ||
353 | /* THASH base information */ | ||
354 | struct thash_data *hash; /* hash table pointer */ | ||
355 | union ia64_pta pta; | ||
356 | int num; | ||
357 | }; | ||
358 | |||
359 | struct kvm_vcpu_stat { | ||
360 | u32 halt_wakeup; | ||
361 | }; | ||
362 | |||
363 | struct kvm_vcpu_arch { | ||
364 | int launched; | ||
365 | int last_exit; | ||
366 | int last_run_cpu; | ||
367 | int vmm_tr_slot; | ||
368 | int vm_tr_slot; | ||
369 | int sn_rtc_tr_slot; | ||
370 | |||
371 | #define KVM_MP_STATE_RUNNABLE 0 | ||
372 | #define KVM_MP_STATE_UNINITIALIZED 1 | ||
373 | #define KVM_MP_STATE_INIT_RECEIVED 2 | ||
374 | #define KVM_MP_STATE_HALTED 3 | ||
375 | int mp_state; | ||
376 | |||
377 | #define MAX_PTC_G_NUM 3 | ||
378 | int ptc_g_count; | ||
379 | struct kvm_ptc_g ptc_g_data[MAX_PTC_G_NUM]; | ||
380 | |||
381 | /*halt timer to wake up sleepy vcpus*/ | ||
382 | struct hrtimer hlt_timer; | ||
383 | long ht_active; | ||
384 | |||
385 | struct kvm_lapic *apic; /* kernel irqchip context */ | ||
386 | struct vpd *vpd; | ||
387 | |||
388 | /* Exit data for vmm_transition*/ | ||
389 | struct exit_ctl_data exit_data; | ||
390 | |||
391 | cpumask_t cache_coherent_map; | ||
392 | |||
393 | unsigned long vmm_rr; | ||
394 | unsigned long host_rr6; | ||
395 | unsigned long psbits[8]; | ||
396 | unsigned long cr_iipa; | ||
397 | unsigned long cr_isr; | ||
398 | unsigned long vsa_base; | ||
399 | unsigned long dirty_log_lock_pa; | ||
400 | unsigned long __gp; | ||
401 | /* TR and TC. */ | ||
402 | struct thash_data itrs[NITRS]; | ||
403 | struct thash_data dtrs[NDTRS]; | ||
404 | /* Bit is set if there is a tr/tc for the region. */ | ||
405 | unsigned char itr_regions; | ||
406 | unsigned char dtr_regions; | ||
407 | unsigned char tc_regions; | ||
408 | /* purge all */ | ||
409 | unsigned long ptce_base; | ||
410 | unsigned long ptce_count[2]; | ||
411 | unsigned long ptce_stride[2]; | ||
412 | /* itc/itm */ | ||
413 | unsigned long last_itc; | ||
414 | long itc_offset; | ||
415 | unsigned long itc_check; | ||
416 | unsigned long timer_check; | ||
417 | unsigned int timer_pending; | ||
418 | unsigned int timer_fired; | ||
419 | |||
420 | unsigned long vrr[8]; | ||
421 | unsigned long ibr[8]; | ||
422 | unsigned long dbr[8]; | ||
423 | unsigned long insvc[4]; /* Interrupt in service. */ | ||
424 | unsigned long xtp; | ||
425 | |||
426 | unsigned long metaphysical_rr0; /* from kvm_arch (so is pinned) */ | ||
427 | unsigned long metaphysical_rr4; /* from kvm_arch (so is pinned) */ | ||
428 | unsigned long metaphysical_saved_rr0; /* from kvm_arch */ | ||
429 | unsigned long metaphysical_saved_rr4; /* from kvm_arch */ | ||
430 | unsigned long fp_psr; /*used for lazy float register */ | ||
431 | unsigned long saved_gp; | ||
432 | /*for phycial emulation */ | ||
433 | int mode_flags; | ||
434 | struct thash_cb vtlb; | ||
435 | struct thash_cb vhpt; | ||
436 | char irq_check; | ||
437 | char irq_new_pending; | ||
438 | |||
439 | unsigned long opcode; | ||
440 | unsigned long cause; | ||
441 | char log_buf[VMM_LOG_LEN]; | ||
442 | union context host; | ||
443 | union context guest; | ||
444 | |||
445 | char mmio_data[8]; | ||
446 | }; | ||
447 | |||
448 | struct kvm_vm_stat { | ||
449 | u64 remote_tlb_flush; | ||
450 | }; | ||
451 | |||
452 | struct kvm_sal_data { | ||
453 | unsigned long boot_ip; | ||
454 | unsigned long boot_gp; | ||
455 | }; | ||
456 | |||
457 | struct kvm_arch_memory_slot { | ||
458 | }; | ||
459 | |||
460 | struct kvm_arch { | ||
461 | spinlock_t dirty_log_lock; | ||
462 | |||
463 | unsigned long vm_base; | ||
464 | unsigned long metaphysical_rr0; | ||
465 | unsigned long metaphysical_rr4; | ||
466 | unsigned long vmm_init_rr; | ||
467 | |||
468 | int is_sn2; | ||
469 | |||
470 | struct kvm_ioapic *vioapic; | ||
471 | struct kvm_vm_stat stat; | ||
472 | struct kvm_sal_data rdv_sal_data; | ||
473 | |||
474 | struct list_head assigned_dev_head; | ||
475 | struct iommu_domain *iommu_domain; | ||
476 | bool iommu_noncoherent; | ||
477 | |||
478 | unsigned long irq_sources_bitmap; | ||
479 | unsigned long irq_states[KVM_IOAPIC_NUM_PINS]; | ||
480 | }; | ||
481 | |||
482 | union cpuid3_t { | ||
483 | u64 value; | ||
484 | struct { | ||
485 | u64 number : 8; | ||
486 | u64 revision : 8; | ||
487 | u64 model : 8; | ||
488 | u64 family : 8; | ||
489 | u64 archrev : 8; | ||
490 | u64 rv : 24; | ||
491 | }; | ||
492 | }; | ||
493 | |||
494 | struct kvm_pt_regs { | ||
495 | /* The following registers are saved by SAVE_MIN: */ | ||
496 | unsigned long b6; /* scratch */ | ||
497 | unsigned long b7; /* scratch */ | ||
498 | |||
499 | unsigned long ar_csd; /* used by cmp8xchg16 (scratch) */ | ||
500 | unsigned long ar_ssd; /* reserved for future use (scratch) */ | ||
501 | |||
502 | unsigned long r8; /* scratch (return value register 0) */ | ||
503 | unsigned long r9; /* scratch (return value register 1) */ | ||
504 | unsigned long r10; /* scratch (return value register 2) */ | ||
505 | unsigned long r11; /* scratch (return value register 3) */ | ||
506 | |||
507 | unsigned long cr_ipsr; /* interrupted task's psr */ | ||
508 | unsigned long cr_iip; /* interrupted task's instruction pointer */ | ||
509 | unsigned long cr_ifs; /* interrupted task's function state */ | ||
510 | |||
511 | unsigned long ar_unat; /* interrupted task's NaT register (preserved) */ | ||
512 | unsigned long ar_pfs; /* prev function state */ | ||
513 | unsigned long ar_rsc; /* RSE configuration */ | ||
514 | /* The following two are valid only if cr_ipsr.cpl > 0: */ | ||
515 | unsigned long ar_rnat; /* RSE NaT */ | ||
516 | unsigned long ar_bspstore; /* RSE bspstore */ | ||
517 | |||
518 | unsigned long pr; /* 64 predicate registers (1 bit each) */ | ||
519 | unsigned long b0; /* return pointer (bp) */ | ||
520 | unsigned long loadrs; /* size of dirty partition << 16 */ | ||
521 | |||
522 | unsigned long r1; /* the gp pointer */ | ||
523 | unsigned long r12; /* interrupted task's memory stack pointer */ | ||
524 | unsigned long r13; /* thread pointer */ | ||
525 | |||
526 | unsigned long ar_fpsr; /* floating point status (preserved) */ | ||
527 | unsigned long r15; /* scratch */ | ||
528 | |||
529 | /* The remaining registers are NOT saved for system calls. */ | ||
530 | unsigned long r14; /* scratch */ | ||
531 | unsigned long r2; /* scratch */ | ||
532 | unsigned long r3; /* scratch */ | ||
533 | unsigned long r16; /* scratch */ | ||
534 | unsigned long r17; /* scratch */ | ||
535 | unsigned long r18; /* scratch */ | ||
536 | unsigned long r19; /* scratch */ | ||
537 | unsigned long r20; /* scratch */ | ||
538 | unsigned long r21; /* scratch */ | ||
539 | unsigned long r22; /* scratch */ | ||
540 | unsigned long r23; /* scratch */ | ||
541 | unsigned long r24; /* scratch */ | ||
542 | unsigned long r25; /* scratch */ | ||
543 | unsigned long r26; /* scratch */ | ||
544 | unsigned long r27; /* scratch */ | ||
545 | unsigned long r28; /* scratch */ | ||
546 | unsigned long r29; /* scratch */ | ||
547 | unsigned long r30; /* scratch */ | ||
548 | unsigned long r31; /* scratch */ | ||
549 | unsigned long ar_ccv; /* compare/exchange value (scratch) */ | ||
550 | |||
551 | /* | ||
552 | * Floating point registers that the kernel considers scratch: | ||
553 | */ | ||
554 | struct ia64_fpreg f6; /* scratch */ | ||
555 | struct ia64_fpreg f7; /* scratch */ | ||
556 | struct ia64_fpreg f8; /* scratch */ | ||
557 | struct ia64_fpreg f9; /* scratch */ | ||
558 | struct ia64_fpreg f10; /* scratch */ | ||
559 | struct ia64_fpreg f11; /* scratch */ | ||
560 | |||
561 | unsigned long r4; /* preserved */ | ||
562 | unsigned long r5; /* preserved */ | ||
563 | unsigned long r6; /* preserved */ | ||
564 | unsigned long r7; /* preserved */ | ||
565 | unsigned long eml_unat; /* used for emulating instruction */ | ||
566 | unsigned long pad0; /* alignment pad */ | ||
567 | }; | ||
568 | |||
569 | static inline struct kvm_pt_regs *vcpu_regs(struct kvm_vcpu *v) | ||
570 | { | ||
571 | return (struct kvm_pt_regs *) ((unsigned long) v + KVM_STK_OFFSET) - 1; | ||
572 | } | ||
573 | |||
574 | typedef int kvm_vmm_entry(void); | ||
575 | typedef void kvm_tramp_entry(union context *host, union context *guest); | ||
576 | |||
577 | struct kvm_vmm_info{ | ||
578 | struct module *module; | ||
579 | kvm_vmm_entry *vmm_entry; | ||
580 | kvm_tramp_entry *tramp_entry; | ||
581 | unsigned long vmm_ivt; | ||
582 | unsigned long patch_mov_ar; | ||
583 | unsigned long patch_mov_ar_sn2; | ||
584 | }; | ||
585 | |||
586 | int kvm_highest_pending_irq(struct kvm_vcpu *vcpu); | ||
587 | int kvm_emulate_halt(struct kvm_vcpu *vcpu); | ||
588 | int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run); | ||
589 | void kvm_sal_emul(struct kvm_vcpu *vcpu); | ||
590 | |||
591 | #define __KVM_HAVE_ARCH_VM_ALLOC 1 | ||
592 | struct kvm *kvm_arch_alloc_vm(void); | ||
593 | void kvm_arch_free_vm(struct kvm *kvm); | ||
594 | |||
595 | static inline void kvm_arch_sync_events(struct kvm *kvm) {} | ||
596 | static inline void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) {} | ||
597 | static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu) {} | ||
598 | static inline void kvm_arch_free_memslot(struct kvm *kvm, | ||
599 | struct kvm_memory_slot *free, struct kvm_memory_slot *dont) {} | ||
600 | static inline void kvm_arch_memslots_updated(struct kvm *kvm) {} | ||
601 | static inline void kvm_arch_commit_memory_region(struct kvm *kvm, | ||
602 | struct kvm_userspace_memory_region *mem, | ||
603 | const struct kvm_memory_slot *old, | ||
604 | enum kvm_mr_change change) {} | ||
605 | static inline void kvm_arch_hardware_unsetup(void) {} | ||
606 | |||
607 | #endif /* __ASSEMBLY__*/ | ||
608 | |||
609 | #endif | ||
diff --git a/arch/ia64/include/asm/pvclock-abi.h b/arch/ia64/include/asm/pvclock-abi.h deleted file mode 100644 index 42b233bedeb5..000000000000 --- a/arch/ia64/include/asm/pvclock-abi.h +++ /dev/null | |||
@@ -1,48 +0,0 @@ | |||
1 | /* | ||
2 | * same structure to x86's | ||
3 | * Hopefully asm-x86/pvclock-abi.h would be moved to somewhere more generic. | ||
4 | * For now, define same duplicated definitions. | ||
5 | */ | ||
6 | |||
7 | #ifndef _ASM_IA64__PVCLOCK_ABI_H | ||
8 | #define _ASM_IA64__PVCLOCK_ABI_H | ||
9 | #ifndef __ASSEMBLY__ | ||
10 | |||
11 | /* | ||
12 | * These structs MUST NOT be changed. | ||
13 | * They are the ABI between hypervisor and guest OS. | ||
14 | * KVM is using this. | ||
15 | * | ||
16 | * pvclock_vcpu_time_info holds the system time and the tsc timestamp | ||
17 | * of the last update. So the guest can use the tsc delta to get a | ||
18 | * more precise system time. There is one per virtual cpu. | ||
19 | * | ||
20 | * pvclock_wall_clock references the point in time when the system | ||
21 | * time was zero (usually boot time), thus the guest calculates the | ||
22 | * current wall clock by adding the system time. | ||
23 | * | ||
24 | * Protocol for the "version" fields is: hypervisor raises it (making | ||
25 | * it uneven) before it starts updating the fields and raises it again | ||
26 | * (making it even) when it is done. Thus the guest can make sure the | ||
27 | * time values it got are consistent by checking the version before | ||
28 | * and after reading them. | ||
29 | */ | ||
30 | |||
31 | struct pvclock_vcpu_time_info { | ||
32 | u32 version; | ||
33 | u32 pad0; | ||
34 | u64 tsc_timestamp; | ||
35 | u64 system_time; | ||
36 | u32 tsc_to_system_mul; | ||
37 | s8 tsc_shift; | ||
38 | u8 pad[3]; | ||
39 | } __attribute__((__packed__)); /* 32 bytes */ | ||
40 | |||
41 | struct pvclock_wall_clock { | ||
42 | u32 version; | ||
43 | u32 sec; | ||
44 | u32 nsec; | ||
45 | } __attribute__((__packed__)); | ||
46 | |||
47 | #endif /* __ASSEMBLY__ */ | ||
48 | #endif /* _ASM_IA64__PVCLOCK_ABI_H */ | ||
diff --git a/arch/ia64/include/uapi/asm/kvm.h b/arch/ia64/include/uapi/asm/kvm.h deleted file mode 100644 index 99503c284400..000000000000 --- a/arch/ia64/include/uapi/asm/kvm.h +++ /dev/null | |||
@@ -1,268 +0,0 @@ | |||
1 | #ifndef __ASM_IA64_KVM_H | ||
2 | #define __ASM_IA64_KVM_H | ||
3 | |||
4 | /* | ||
5 | * kvm structure definitions for ia64 | ||
6 | * | ||
7 | * Copyright (C) 2007 Xiantao Zhang <xiantao.zhang@intel.com> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify it | ||
10 | * under the terms and conditions of the GNU General Public License, | ||
11 | * version 2, as published by the Free Software Foundation. | ||
12 | * | ||
13 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
14 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
15 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
16 | * more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License along with | ||
19 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple | ||
20 | * Place - Suite 330, Boston, MA 02111-1307 USA. | ||
21 | * | ||
22 | */ | ||
23 | |||
24 | #include <linux/types.h> | ||
25 | #include <linux/ioctl.h> | ||
26 | |||
27 | /* Select x86 specific features in <linux/kvm.h> */ | ||
28 | #define __KVM_HAVE_IOAPIC | ||
29 | #define __KVM_HAVE_IRQ_LINE | ||
30 | |||
31 | /* Architectural interrupt line count. */ | ||
32 | #define KVM_NR_INTERRUPTS 256 | ||
33 | |||
34 | #define KVM_IOAPIC_NUM_PINS 48 | ||
35 | |||
36 | struct kvm_ioapic_state { | ||
37 | __u64 base_address; | ||
38 | __u32 ioregsel; | ||
39 | __u32 id; | ||
40 | __u32 irr; | ||
41 | __u32 pad; | ||
42 | union { | ||
43 | __u64 bits; | ||
44 | struct { | ||
45 | __u8 vector; | ||
46 | __u8 delivery_mode:3; | ||
47 | __u8 dest_mode:1; | ||
48 | __u8 delivery_status:1; | ||
49 | __u8 polarity:1; | ||
50 | __u8 remote_irr:1; | ||
51 | __u8 trig_mode:1; | ||
52 | __u8 mask:1; | ||
53 | __u8 reserve:7; | ||
54 | __u8 reserved[4]; | ||
55 | __u8 dest_id; | ||
56 | } fields; | ||
57 | } redirtbl[KVM_IOAPIC_NUM_PINS]; | ||
58 | }; | ||
59 | |||
60 | #define KVM_IRQCHIP_PIC_MASTER 0 | ||
61 | #define KVM_IRQCHIP_PIC_SLAVE 1 | ||
62 | #define KVM_IRQCHIP_IOAPIC 2 | ||
63 | #define KVM_NR_IRQCHIPS 3 | ||
64 | |||
65 | #define KVM_CONTEXT_SIZE 8*1024 | ||
66 | |||
67 | struct kvm_fpreg { | ||
68 | union { | ||
69 | unsigned long bits[2]; | ||
70 | long double __dummy; /* force 16-byte alignment */ | ||
71 | } u; | ||
72 | }; | ||
73 | |||
74 | union context { | ||
75 | /* 8K size */ | ||
76 | char dummy[KVM_CONTEXT_SIZE]; | ||
77 | struct { | ||
78 | unsigned long psr; | ||
79 | unsigned long pr; | ||
80 | unsigned long caller_unat; | ||
81 | unsigned long pad; | ||
82 | unsigned long gr[32]; | ||
83 | unsigned long ar[128]; | ||
84 | unsigned long br[8]; | ||
85 | unsigned long cr[128]; | ||
86 | unsigned long rr[8]; | ||
87 | unsigned long ibr[8]; | ||
88 | unsigned long dbr[8]; | ||
89 | unsigned long pkr[8]; | ||
90 | struct kvm_fpreg fr[128]; | ||
91 | }; | ||
92 | }; | ||
93 | |||
94 | struct thash_data { | ||
95 | union { | ||
96 | struct { | ||
97 | unsigned long p : 1; /* 0 */ | ||
98 | unsigned long rv1 : 1; /* 1 */ | ||
99 | unsigned long ma : 3; /* 2-4 */ | ||
100 | unsigned long a : 1; /* 5 */ | ||
101 | unsigned long d : 1; /* 6 */ | ||
102 | unsigned long pl : 2; /* 7-8 */ | ||
103 | unsigned long ar : 3; /* 9-11 */ | ||
104 | unsigned long ppn : 38; /* 12-49 */ | ||
105 | unsigned long rv2 : 2; /* 50-51 */ | ||
106 | unsigned long ed : 1; /* 52 */ | ||
107 | unsigned long ig1 : 11; /* 53-63 */ | ||
108 | }; | ||
109 | struct { | ||
110 | unsigned long __rv1 : 53; /* 0-52 */ | ||
111 | unsigned long contiguous : 1; /*53 */ | ||
112 | unsigned long tc : 1; /* 54 TR or TC */ | ||
113 | unsigned long cl : 1; | ||
114 | /* 55 I side or D side cache line */ | ||
115 | unsigned long len : 4; /* 56-59 */ | ||
116 | unsigned long io : 1; /* 60 entry is for io or not */ | ||
117 | unsigned long nomap : 1; | ||
118 | /* 61 entry cann't be inserted into machine TLB.*/ | ||
119 | unsigned long checked : 1; | ||
120 | /* 62 for VTLB/VHPT sanity check */ | ||
121 | unsigned long invalid : 1; | ||
122 | /* 63 invalid entry */ | ||
123 | }; | ||
124 | unsigned long page_flags; | ||
125 | }; /* same for VHPT and TLB */ | ||
126 | |||
127 | union { | ||
128 | struct { | ||
129 | unsigned long rv3 : 2; | ||
130 | unsigned long ps : 6; | ||
131 | unsigned long key : 24; | ||
132 | unsigned long rv4 : 32; | ||
133 | }; | ||
134 | unsigned long itir; | ||
135 | }; | ||
136 | union { | ||
137 | struct { | ||
138 | unsigned long ig2 : 12; | ||
139 | unsigned long vpn : 49; | ||
140 | unsigned long vrn : 3; | ||
141 | }; | ||
142 | unsigned long ifa; | ||
143 | unsigned long vadr; | ||
144 | struct { | ||
145 | unsigned long tag : 63; | ||
146 | unsigned long ti : 1; | ||
147 | }; | ||
148 | unsigned long etag; | ||
149 | }; | ||
150 | union { | ||
151 | struct thash_data *next; | ||
152 | unsigned long rid; | ||
153 | unsigned long gpaddr; | ||
154 | }; | ||
155 | }; | ||
156 | |||
157 | #define NITRS 8 | ||
158 | #define NDTRS 8 | ||
159 | |||
160 | struct saved_vpd { | ||
161 | unsigned long vhpi; | ||
162 | unsigned long vgr[16]; | ||
163 | unsigned long vbgr[16]; | ||
164 | unsigned long vnat; | ||
165 | unsigned long vbnat; | ||
166 | unsigned long vcpuid[5]; | ||
167 | unsigned long vpsr; | ||
168 | unsigned long vpr; | ||
169 | union { | ||
170 | unsigned long vcr[128]; | ||
171 | struct { | ||
172 | unsigned long dcr; | ||
173 | unsigned long itm; | ||
174 | unsigned long iva; | ||
175 | unsigned long rsv1[5]; | ||
176 | unsigned long pta; | ||
177 | unsigned long rsv2[7]; | ||
178 | unsigned long ipsr; | ||
179 | unsigned long isr; | ||
180 | unsigned long rsv3; | ||
181 | unsigned long iip; | ||
182 | unsigned long ifa; | ||
183 | unsigned long itir; | ||
184 | unsigned long iipa; | ||
185 | unsigned long ifs; | ||
186 | unsigned long iim; | ||
187 | unsigned long iha; | ||
188 | unsigned long rsv4[38]; | ||
189 | unsigned long lid; | ||
190 | unsigned long ivr; | ||
191 | unsigned long tpr; | ||
192 | unsigned long eoi; | ||
193 | unsigned long irr[4]; | ||
194 | unsigned long itv; | ||
195 | unsigned long pmv; | ||
196 | unsigned long cmcv; | ||
197 | unsigned long rsv5[5]; | ||
198 | unsigned long lrr0; | ||
199 | unsigned long lrr1; | ||
200 | unsigned long rsv6[46]; | ||
201 | }; | ||
202 | }; | ||
203 | }; | ||
204 | |||
205 | struct kvm_regs { | ||
206 | struct saved_vpd vpd; | ||
207 | /*Arch-regs*/ | ||
208 | int mp_state; | ||
209 | unsigned long vmm_rr; | ||
210 | /* TR and TC. */ | ||
211 | struct thash_data itrs[NITRS]; | ||
212 | struct thash_data dtrs[NDTRS]; | ||
213 | /* Bit is set if there is a tr/tc for the region. */ | ||
214 | unsigned char itr_regions; | ||
215 | unsigned char dtr_regions; | ||
216 | unsigned char tc_regions; | ||
217 | |||
218 | char irq_check; | ||
219 | unsigned long saved_itc; | ||
220 | unsigned long itc_check; | ||
221 | unsigned long timer_check; | ||
222 | unsigned long timer_pending; | ||
223 | unsigned long last_itc; | ||
224 | |||
225 | unsigned long vrr[8]; | ||
226 | unsigned long ibr[8]; | ||
227 | unsigned long dbr[8]; | ||
228 | unsigned long insvc[4]; /* Interrupt in service. */ | ||
229 | unsigned long xtp; | ||
230 | |||
231 | unsigned long metaphysical_rr0; /* from kvm_arch (so is pinned) */ | ||
232 | unsigned long metaphysical_rr4; /* from kvm_arch (so is pinned) */ | ||
233 | unsigned long metaphysical_saved_rr0; /* from kvm_arch */ | ||
234 | unsigned long metaphysical_saved_rr4; /* from kvm_arch */ | ||
235 | unsigned long fp_psr; /*used for lazy float register */ | ||
236 | unsigned long saved_gp; | ||
237 | /*for phycial emulation */ | ||
238 | |||
239 | union context saved_guest; | ||
240 | |||
241 | unsigned long reserved[64]; /* for future use */ | ||
242 | }; | ||
243 | |||
244 | struct kvm_sregs { | ||
245 | }; | ||
246 | |||
247 | struct kvm_fpu { | ||
248 | }; | ||
249 | |||
250 | #define KVM_IA64_VCPU_STACK_SHIFT 16 | ||
251 | #define KVM_IA64_VCPU_STACK_SIZE (1UL << KVM_IA64_VCPU_STACK_SHIFT) | ||
252 | |||
253 | struct kvm_ia64_vcpu_stack { | ||
254 | unsigned char stack[KVM_IA64_VCPU_STACK_SIZE]; | ||
255 | }; | ||
256 | |||
257 | struct kvm_debug_exit_arch { | ||
258 | }; | ||
259 | |||
260 | /* for KVM_SET_GUEST_DEBUG */ | ||
261 | struct kvm_guest_debug_arch { | ||
262 | }; | ||
263 | |||
264 | /* definition of registers in kvm_run */ | ||
265 | struct kvm_sync_regs { | ||
266 | }; | ||
267 | |||
268 | #endif | ||
diff --git a/arch/ia64/kvm/Kconfig b/arch/ia64/kvm/Kconfig deleted file mode 100644 index 3d50ea955c4c..000000000000 --- a/arch/ia64/kvm/Kconfig +++ /dev/null | |||
@@ -1,66 +0,0 @@ | |||
1 | # | ||
2 | # KVM configuration | ||
3 | # | ||
4 | |||
5 | source "virt/kvm/Kconfig" | ||
6 | |||
7 | menuconfig VIRTUALIZATION | ||
8 | bool "Virtualization" | ||
9 | depends on HAVE_KVM || IA64 | ||
10 | default y | ||
11 | ---help--- | ||
12 | Say Y here to get to see options for using your Linux host to run other | ||
13 | operating systems inside virtual machines (guests). | ||
14 | This option alone does not add any kernel code. | ||
15 | |||
16 | If you say N, all options in this submenu will be skipped and disabled. | ||
17 | |||
18 | if VIRTUALIZATION | ||
19 | |||
20 | config KVM | ||
21 | tristate "Kernel-based Virtual Machine (KVM) support" | ||
22 | depends on BROKEN | ||
23 | depends on HAVE_KVM && MODULES | ||
24 | depends on BROKEN | ||
25 | select PREEMPT_NOTIFIERS | ||
26 | select ANON_INODES | ||
27 | select HAVE_KVM_IRQCHIP | ||
28 | select HAVE_KVM_IRQFD | ||
29 | select HAVE_KVM_IRQ_ROUTING | ||
30 | select KVM_APIC_ARCHITECTURE | ||
31 | select KVM_MMIO | ||
32 | ---help--- | ||
33 | Support hosting fully virtualized guest machines using hardware | ||
34 | virtualization extensions. You will need a fairly recent | ||
35 | processor equipped with virtualization extensions. You will also | ||
36 | need to select one or more of the processor modules below. | ||
37 | |||
38 | This module provides access to the hardware capabilities through | ||
39 | a character device node named /dev/kvm. | ||
40 | |||
41 | To compile this as a module, choose M here: the module | ||
42 | will be called kvm. | ||
43 | |||
44 | If unsure, say N. | ||
45 | |||
46 | config KVM_INTEL | ||
47 | tristate "KVM for Intel Itanium 2 processors support" | ||
48 | depends on KVM && m | ||
49 | ---help--- | ||
50 | Provides support for KVM on Itanium 2 processors equipped with the VT | ||
51 | extensions. | ||
52 | |||
53 | config KVM_DEVICE_ASSIGNMENT | ||
54 | bool "KVM legacy PCI device assignment support" | ||
55 | depends on KVM && PCI && IOMMU_API | ||
56 | default y | ||
57 | ---help--- | ||
58 | Provide support for legacy PCI device assignment through KVM. The | ||
59 | kernel now also supports a full featured userspace device driver | ||
60 | framework through VFIO, which supersedes much of this support. | ||
61 | |||
62 | If unsure, say Y. | ||
63 | |||
64 | source drivers/vhost/Kconfig | ||
65 | |||
66 | endif # VIRTUALIZATION | ||
diff --git a/arch/ia64/kvm/Makefile b/arch/ia64/kvm/Makefile deleted file mode 100644 index 18e45ec49bbf..000000000000 --- a/arch/ia64/kvm/Makefile +++ /dev/null | |||
@@ -1,67 +0,0 @@ | |||
1 | #This Make file is to generate asm-offsets.h and build source. | ||
2 | # | ||
3 | |||
4 | #Generate asm-offsets.h for vmm module build | ||
5 | offsets-file := asm-offsets.h | ||
6 | |||
7 | always := $(offsets-file) | ||
8 | targets := $(offsets-file) | ||
9 | targets += arch/ia64/kvm/asm-offsets.s | ||
10 | |||
11 | # Default sed regexp - multiline due to syntax constraints | ||
12 | define sed-y | ||
13 | "/^->/{s:^->\([^ ]*\) [\$$#]*\([^ ]*\) \(.*\):#define \1 \2 /* \3 */:; s:->::; p;}" | ||
14 | endef | ||
15 | |||
16 | quiet_cmd_offsets = GEN $@ | ||
17 | define cmd_offsets | ||
18 | (set -e; \ | ||
19 | echo "#ifndef __ASM_KVM_OFFSETS_H__"; \ | ||
20 | echo "#define __ASM_KVM_OFFSETS_H__"; \ | ||
21 | echo "/*"; \ | ||
22 | echo " * DO NOT MODIFY."; \ | ||
23 | echo " *"; \ | ||
24 | echo " * This file was generated by Makefile"; \ | ||
25 | echo " *"; \ | ||
26 | echo " */"; \ | ||
27 | echo ""; \ | ||
28 | sed -ne $(sed-y) $<; \ | ||
29 | echo ""; \ | ||
30 | echo "#endif" ) > $@ | ||
31 | endef | ||
32 | |||
33 | # We use internal rules to avoid the "is up to date" message from make | ||
34 | arch/ia64/kvm/asm-offsets.s: arch/ia64/kvm/asm-offsets.c \ | ||
35 | $(wildcard $(srctree)/arch/ia64/include/asm/*.h)\ | ||
36 | $(wildcard $(srctree)/include/linux/*.h) | ||
37 | $(call if_changed_dep,cc_s_c) | ||
38 | |||
39 | $(obj)/$(offsets-file): arch/ia64/kvm/asm-offsets.s | ||
40 | $(call cmd,offsets) | ||
41 | |||
42 | FORCE : $(obj)/$(offsets-file) | ||
43 | |||
44 | # | ||
45 | # Makefile for Kernel-based Virtual Machine module | ||
46 | # | ||
47 | |||
48 | ccflags-y := -Ivirt/kvm -Iarch/ia64/kvm/ | ||
49 | asflags-y := -Ivirt/kvm -Iarch/ia64/kvm/ | ||
50 | KVM := ../../../virt/kvm | ||
51 | |||
52 | common-objs = $(KVM)/kvm_main.o $(KVM)/ioapic.o \ | ||
53 | $(KVM)/coalesced_mmio.o $(KVM)/irq_comm.o | ||
54 | |||
55 | ifeq ($(CONFIG_KVM_DEVICE_ASSIGNMENT),y) | ||
56 | common-objs += $(KVM)/assigned-dev.o $(KVM)/iommu.o | ||
57 | endif | ||
58 | |||
59 | kvm-objs := $(common-objs) kvm-ia64.o kvm_fw.o | ||
60 | obj-$(CONFIG_KVM) += kvm.o | ||
61 | |||
62 | CFLAGS_vcpu.o += -mfixed-range=f2-f5,f12-f127 | ||
63 | kvm-intel-objs = vmm.o vmm_ivt.o trampoline.o vcpu.o optvfault.o mmio.o \ | ||
64 | vtlb.o process.o kvm_lib.o | ||
65 | #Add link memcpy and memset to avoid possible structure assignment error | ||
66 | kvm-intel-objs += memcpy.o memset.o | ||
67 | obj-$(CONFIG_KVM_INTEL) += kvm-intel.o | ||
diff --git a/arch/ia64/kvm/asm-offsets.c b/arch/ia64/kvm/asm-offsets.c deleted file mode 100644 index 9324c875caf5..000000000000 --- a/arch/ia64/kvm/asm-offsets.c +++ /dev/null | |||
@@ -1,241 +0,0 @@ | |||
1 | /* | ||
2 | * asm-offsets.c Generate definitions needed by assembly language modules. | ||
3 | * This code generates raw asm output which is post-processed | ||
4 | * to extract and format the required data. | ||
5 | * | ||
6 | * Anthony Xu <anthony.xu@intel.com> | ||
7 | * Xiantao Zhang <xiantao.zhang@intel.com> | ||
8 | * Copyright (c) 2007 Intel Corporation KVM support. | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify it | ||
11 | * under the terms and conditions of the GNU General Public License, | ||
12 | * version 2, as published by the Free Software Foundation. | ||
13 | * | ||
14 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
15 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
17 | * more details. | ||
18 | * | ||
19 | * You should have received a copy of the GNU General Public License along with | ||
20 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple | ||
21 | * Place - Suite 330, Boston, MA 02111-1307 USA. | ||
22 | * | ||
23 | */ | ||
24 | |||
25 | #include <linux/kvm_host.h> | ||
26 | #include <linux/kbuild.h> | ||
27 | |||
28 | #include "vcpu.h" | ||
29 | |||
30 | void foo(void) | ||
31 | { | ||
32 | DEFINE(VMM_TASK_SIZE, sizeof(struct kvm_vcpu)); | ||
33 | DEFINE(VMM_PT_REGS_SIZE, sizeof(struct kvm_pt_regs)); | ||
34 | |||
35 | BLANK(); | ||
36 | |||
37 | DEFINE(VMM_VCPU_META_RR0_OFFSET, | ||
38 | offsetof(struct kvm_vcpu, arch.metaphysical_rr0)); | ||
39 | DEFINE(VMM_VCPU_META_SAVED_RR0_OFFSET, | ||
40 | offsetof(struct kvm_vcpu, | ||
41 | arch.metaphysical_saved_rr0)); | ||
42 | DEFINE(VMM_VCPU_VRR0_OFFSET, | ||
43 | offsetof(struct kvm_vcpu, arch.vrr[0])); | ||
44 | DEFINE(VMM_VPD_IRR0_OFFSET, | ||
45 | offsetof(struct vpd, irr[0])); | ||
46 | DEFINE(VMM_VCPU_ITC_CHECK_OFFSET, | ||
47 | offsetof(struct kvm_vcpu, arch.itc_check)); | ||
48 | DEFINE(VMM_VCPU_IRQ_CHECK_OFFSET, | ||
49 | offsetof(struct kvm_vcpu, arch.irq_check)); | ||
50 | DEFINE(VMM_VPD_VHPI_OFFSET, | ||
51 | offsetof(struct vpd, vhpi)); | ||
52 | DEFINE(VMM_VCPU_VSA_BASE_OFFSET, | ||
53 | offsetof(struct kvm_vcpu, arch.vsa_base)); | ||
54 | DEFINE(VMM_VCPU_VPD_OFFSET, | ||
55 | offsetof(struct kvm_vcpu, arch.vpd)); | ||
56 | DEFINE(VMM_VCPU_IRQ_CHECK, | ||
57 | offsetof(struct kvm_vcpu, arch.irq_check)); | ||
58 | DEFINE(VMM_VCPU_TIMER_PENDING, | ||
59 | offsetof(struct kvm_vcpu, arch.timer_pending)); | ||
60 | DEFINE(VMM_VCPU_META_SAVED_RR0_OFFSET, | ||
61 | offsetof(struct kvm_vcpu, arch.metaphysical_saved_rr0)); | ||
62 | DEFINE(VMM_VCPU_MODE_FLAGS_OFFSET, | ||
63 | offsetof(struct kvm_vcpu, arch.mode_flags)); | ||
64 | DEFINE(VMM_VCPU_ITC_OFS_OFFSET, | ||
65 | offsetof(struct kvm_vcpu, arch.itc_offset)); | ||
66 | DEFINE(VMM_VCPU_LAST_ITC_OFFSET, | ||
67 | offsetof(struct kvm_vcpu, arch.last_itc)); | ||
68 | DEFINE(VMM_VCPU_SAVED_GP_OFFSET, | ||
69 | offsetof(struct kvm_vcpu, arch.saved_gp)); | ||
70 | |||
71 | BLANK(); | ||
72 | |||
73 | DEFINE(VMM_PT_REGS_B6_OFFSET, | ||
74 | offsetof(struct kvm_pt_regs, b6)); | ||
75 | DEFINE(VMM_PT_REGS_B7_OFFSET, | ||
76 | offsetof(struct kvm_pt_regs, b7)); | ||
77 | DEFINE(VMM_PT_REGS_AR_CSD_OFFSET, | ||
78 | offsetof(struct kvm_pt_regs, ar_csd)); | ||
79 | DEFINE(VMM_PT_REGS_AR_SSD_OFFSET, | ||
80 | offsetof(struct kvm_pt_regs, ar_ssd)); | ||
81 | DEFINE(VMM_PT_REGS_R8_OFFSET, | ||
82 | offsetof(struct kvm_pt_regs, r8)); | ||
83 | DEFINE(VMM_PT_REGS_R9_OFFSET, | ||
84 | offsetof(struct kvm_pt_regs, r9)); | ||
85 | DEFINE(VMM_PT_REGS_R10_OFFSET, | ||
86 | offsetof(struct kvm_pt_regs, r10)); | ||
87 | DEFINE(VMM_PT_REGS_R11_OFFSET, | ||
88 | offsetof(struct kvm_pt_regs, r11)); | ||
89 | DEFINE(VMM_PT_REGS_CR_IPSR_OFFSET, | ||
90 | offsetof(struct kvm_pt_regs, cr_ipsr)); | ||
91 | DEFINE(VMM_PT_REGS_CR_IIP_OFFSET, | ||
92 | offsetof(struct kvm_pt_regs, cr_iip)); | ||
93 | DEFINE(VMM_PT_REGS_CR_IFS_OFFSET, | ||
94 | offsetof(struct kvm_pt_regs, cr_ifs)); | ||
95 | DEFINE(VMM_PT_REGS_AR_UNAT_OFFSET, | ||
96 | offsetof(struct kvm_pt_regs, ar_unat)); | ||
97 | DEFINE(VMM_PT_REGS_AR_PFS_OFFSET, | ||
98 | offsetof(struct kvm_pt_regs, ar_pfs)); | ||
99 | DEFINE(VMM_PT_REGS_AR_RSC_OFFSET, | ||
100 | offsetof(struct kvm_pt_regs, ar_rsc)); | ||
101 | DEFINE(VMM_PT_REGS_AR_RNAT_OFFSET, | ||
102 | offsetof(struct kvm_pt_regs, ar_rnat)); | ||
103 | |||
104 | DEFINE(VMM_PT_REGS_AR_BSPSTORE_OFFSET, | ||
105 | offsetof(struct kvm_pt_regs, ar_bspstore)); | ||
106 | DEFINE(VMM_PT_REGS_PR_OFFSET, | ||
107 | offsetof(struct kvm_pt_regs, pr)); | ||
108 | DEFINE(VMM_PT_REGS_B0_OFFSET, | ||
109 | offsetof(struct kvm_pt_regs, b0)); | ||
110 | DEFINE(VMM_PT_REGS_LOADRS_OFFSET, | ||
111 | offsetof(struct kvm_pt_regs, loadrs)); | ||
112 | DEFINE(VMM_PT_REGS_R1_OFFSET, | ||
113 | offsetof(struct kvm_pt_regs, r1)); | ||
114 | DEFINE(VMM_PT_REGS_R12_OFFSET, | ||
115 | offsetof(struct kvm_pt_regs, r12)); | ||
116 | DEFINE(VMM_PT_REGS_R13_OFFSET, | ||
117 | offsetof(struct kvm_pt_regs, r13)); | ||
118 | DEFINE(VMM_PT_REGS_AR_FPSR_OFFSET, | ||
119 | offsetof(struct kvm_pt_regs, ar_fpsr)); | ||
120 | DEFINE(VMM_PT_REGS_R15_OFFSET, | ||
121 | offsetof(struct kvm_pt_regs, r15)); | ||
122 | DEFINE(VMM_PT_REGS_R14_OFFSET, | ||
123 | offsetof(struct kvm_pt_regs, r14)); | ||
124 | DEFINE(VMM_PT_REGS_R2_OFFSET, | ||
125 | offsetof(struct kvm_pt_regs, r2)); | ||
126 | DEFINE(VMM_PT_REGS_R3_OFFSET, | ||
127 | offsetof(struct kvm_pt_regs, r3)); | ||
128 | DEFINE(VMM_PT_REGS_R16_OFFSET, | ||
129 | offsetof(struct kvm_pt_regs, r16)); | ||
130 | DEFINE(VMM_PT_REGS_R17_OFFSET, | ||
131 | offsetof(struct kvm_pt_regs, r17)); | ||
132 | DEFINE(VMM_PT_REGS_R18_OFFSET, | ||
133 | offsetof(struct kvm_pt_regs, r18)); | ||
134 | DEFINE(VMM_PT_REGS_R19_OFFSET, | ||
135 | offsetof(struct kvm_pt_regs, r19)); | ||
136 | DEFINE(VMM_PT_REGS_R20_OFFSET, | ||
137 | offsetof(struct kvm_pt_regs, r20)); | ||
138 | DEFINE(VMM_PT_REGS_R21_OFFSET, | ||
139 | offsetof(struct kvm_pt_regs, r21)); | ||
140 | DEFINE(VMM_PT_REGS_R22_OFFSET, | ||
141 | offsetof(struct kvm_pt_regs, r22)); | ||
142 | DEFINE(VMM_PT_REGS_R23_OFFSET, | ||
143 | offsetof(struct kvm_pt_regs, r23)); | ||
144 | DEFINE(VMM_PT_REGS_R24_OFFSET, | ||
145 | offsetof(struct kvm_pt_regs, r24)); | ||
146 | DEFINE(VMM_PT_REGS_R25_OFFSET, | ||
147 | offsetof(struct kvm_pt_regs, r25)); | ||
148 | DEFINE(VMM_PT_REGS_R26_OFFSET, | ||
149 | offsetof(struct kvm_pt_regs, r26)); | ||
150 | DEFINE(VMM_PT_REGS_R27_OFFSET, | ||
151 | offsetof(struct kvm_pt_regs, r27)); | ||
152 | DEFINE(VMM_PT_REGS_R28_OFFSET, | ||
153 | offsetof(struct kvm_pt_regs, r28)); | ||
154 | DEFINE(VMM_PT_REGS_R29_OFFSET, | ||
155 | offsetof(struct kvm_pt_regs, r29)); | ||
156 | DEFINE(VMM_PT_REGS_R30_OFFSET, | ||
157 | offsetof(struct kvm_pt_regs, r30)); | ||
158 | DEFINE(VMM_PT_REGS_R31_OFFSET, | ||
159 | offsetof(struct kvm_pt_regs, r31)); | ||
160 | DEFINE(VMM_PT_REGS_AR_CCV_OFFSET, | ||
161 | offsetof(struct kvm_pt_regs, ar_ccv)); | ||
162 | DEFINE(VMM_PT_REGS_F6_OFFSET, | ||
163 | offsetof(struct kvm_pt_regs, f6)); | ||
164 | DEFINE(VMM_PT_REGS_F7_OFFSET, | ||
165 | offsetof(struct kvm_pt_regs, f7)); | ||
166 | DEFINE(VMM_PT_REGS_F8_OFFSET, | ||
167 | offsetof(struct kvm_pt_regs, f8)); | ||
168 | DEFINE(VMM_PT_REGS_F9_OFFSET, | ||
169 | offsetof(struct kvm_pt_regs, f9)); | ||
170 | DEFINE(VMM_PT_REGS_F10_OFFSET, | ||
171 | offsetof(struct kvm_pt_regs, f10)); | ||
172 | DEFINE(VMM_PT_REGS_F11_OFFSET, | ||
173 | offsetof(struct kvm_pt_regs, f11)); | ||
174 | DEFINE(VMM_PT_REGS_R4_OFFSET, | ||
175 | offsetof(struct kvm_pt_regs, r4)); | ||
176 | DEFINE(VMM_PT_REGS_R5_OFFSET, | ||
177 | offsetof(struct kvm_pt_regs, r5)); | ||
178 | DEFINE(VMM_PT_REGS_R6_OFFSET, | ||
179 | offsetof(struct kvm_pt_regs, r6)); | ||
180 | DEFINE(VMM_PT_REGS_R7_OFFSET, | ||
181 | offsetof(struct kvm_pt_regs, r7)); | ||
182 | DEFINE(VMM_PT_REGS_EML_UNAT_OFFSET, | ||
183 | offsetof(struct kvm_pt_regs, eml_unat)); | ||
184 | DEFINE(VMM_VCPU_IIPA_OFFSET, | ||
185 | offsetof(struct kvm_vcpu, arch.cr_iipa)); | ||
186 | DEFINE(VMM_VCPU_OPCODE_OFFSET, | ||
187 | offsetof(struct kvm_vcpu, arch.opcode)); | ||
188 | DEFINE(VMM_VCPU_CAUSE_OFFSET, offsetof(struct kvm_vcpu, arch.cause)); | ||
189 | DEFINE(VMM_VCPU_ISR_OFFSET, | ||
190 | offsetof(struct kvm_vcpu, arch.cr_isr)); | ||
191 | DEFINE(VMM_PT_REGS_R16_SLOT, | ||
192 | (((offsetof(struct kvm_pt_regs, r16) | ||
193 | - sizeof(struct kvm_pt_regs)) >> 3) & 0x3f)); | ||
194 | DEFINE(VMM_VCPU_MODE_FLAGS_OFFSET, | ||
195 | offsetof(struct kvm_vcpu, arch.mode_flags)); | ||
196 | DEFINE(VMM_VCPU_GP_OFFSET, offsetof(struct kvm_vcpu, arch.__gp)); | ||
197 | BLANK(); | ||
198 | |||
199 | DEFINE(VMM_VPD_BASE_OFFSET, offsetof(struct kvm_vcpu, arch.vpd)); | ||
200 | DEFINE(VMM_VPD_VIFS_OFFSET, offsetof(struct vpd, ifs)); | ||
201 | DEFINE(VMM_VLSAPIC_INSVC_BASE_OFFSET, | ||
202 | offsetof(struct kvm_vcpu, arch.insvc[0])); | ||
203 | DEFINE(VMM_VPD_VPTA_OFFSET, offsetof(struct vpd, pta)); | ||
204 | DEFINE(VMM_VPD_VPSR_OFFSET, offsetof(struct vpd, vpsr)); | ||
205 | |||
206 | DEFINE(VMM_CTX_R4_OFFSET, offsetof(union context, gr[4])); | ||
207 | DEFINE(VMM_CTX_R5_OFFSET, offsetof(union context, gr[5])); | ||
208 | DEFINE(VMM_CTX_R12_OFFSET, offsetof(union context, gr[12])); | ||
209 | DEFINE(VMM_CTX_R13_OFFSET, offsetof(union context, gr[13])); | ||
210 | DEFINE(VMM_CTX_KR0_OFFSET, offsetof(union context, ar[0])); | ||
211 | DEFINE(VMM_CTX_KR1_OFFSET, offsetof(union context, ar[1])); | ||
212 | DEFINE(VMM_CTX_B0_OFFSET, offsetof(union context, br[0])); | ||
213 | DEFINE(VMM_CTX_B1_OFFSET, offsetof(union context, br[1])); | ||
214 | DEFINE(VMM_CTX_B2_OFFSET, offsetof(union context, br[2])); | ||
215 | DEFINE(VMM_CTX_RR0_OFFSET, offsetof(union context, rr[0])); | ||
216 | DEFINE(VMM_CTX_RSC_OFFSET, offsetof(union context, ar[16])); | ||
217 | DEFINE(VMM_CTX_BSPSTORE_OFFSET, offsetof(union context, ar[18])); | ||
218 | DEFINE(VMM_CTX_RNAT_OFFSET, offsetof(union context, ar[19])); | ||
219 | DEFINE(VMM_CTX_FCR_OFFSET, offsetof(union context, ar[21])); | ||
220 | DEFINE(VMM_CTX_EFLAG_OFFSET, offsetof(union context, ar[24])); | ||
221 | DEFINE(VMM_CTX_CFLG_OFFSET, offsetof(union context, ar[27])); | ||
222 | DEFINE(VMM_CTX_FSR_OFFSET, offsetof(union context, ar[28])); | ||
223 | DEFINE(VMM_CTX_FIR_OFFSET, offsetof(union context, ar[29])); | ||
224 | DEFINE(VMM_CTX_FDR_OFFSET, offsetof(union context, ar[30])); | ||
225 | DEFINE(VMM_CTX_UNAT_OFFSET, offsetof(union context, ar[36])); | ||
226 | DEFINE(VMM_CTX_FPSR_OFFSET, offsetof(union context, ar[40])); | ||
227 | DEFINE(VMM_CTX_PFS_OFFSET, offsetof(union context, ar[64])); | ||
228 | DEFINE(VMM_CTX_LC_OFFSET, offsetof(union context, ar[65])); | ||
229 | DEFINE(VMM_CTX_DCR_OFFSET, offsetof(union context, cr[0])); | ||
230 | DEFINE(VMM_CTX_IVA_OFFSET, offsetof(union context, cr[2])); | ||
231 | DEFINE(VMM_CTX_PTA_OFFSET, offsetof(union context, cr[8])); | ||
232 | DEFINE(VMM_CTX_IBR0_OFFSET, offsetof(union context, ibr[0])); | ||
233 | DEFINE(VMM_CTX_DBR0_OFFSET, offsetof(union context, dbr[0])); | ||
234 | DEFINE(VMM_CTX_F2_OFFSET, offsetof(union context, fr[2])); | ||
235 | DEFINE(VMM_CTX_F3_OFFSET, offsetof(union context, fr[3])); | ||
236 | DEFINE(VMM_CTX_F32_OFFSET, offsetof(union context, fr[32])); | ||
237 | DEFINE(VMM_CTX_F33_OFFSET, offsetof(union context, fr[33])); | ||
238 | DEFINE(VMM_CTX_PKR0_OFFSET, offsetof(union context, pkr[0])); | ||
239 | DEFINE(VMM_CTX_PSR_OFFSET, offsetof(union context, psr)); | ||
240 | BLANK(); | ||
241 | } | ||
diff --git a/arch/ia64/kvm/irq.h b/arch/ia64/kvm/irq.h deleted file mode 100644 index c0785a728271..000000000000 --- a/arch/ia64/kvm/irq.h +++ /dev/null | |||
@@ -1,33 +0,0 @@ | |||
1 | /* | ||
2 | * irq.h: In-kernel interrupt controller related definitions | ||
3 | * Copyright (c) 2008, Intel Corporation. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms and conditions of the GNU General Public License, | ||
7 | * version 2, as published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License along with | ||
15 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple | ||
16 | * Place - Suite 330, Boston, MA 02111-1307 USA. | ||
17 | * | ||
18 | * Authors: | ||
19 | * Xiantao Zhang <xiantao.zhang@intel.com> | ||
20 | * | ||
21 | */ | ||
22 | |||
23 | #ifndef __IRQ_H | ||
24 | #define __IRQ_H | ||
25 | |||
26 | #include "lapic.h" | ||
27 | |||
28 | static inline int irqchip_in_kernel(struct kvm *kvm) | ||
29 | { | ||
30 | return 1; | ||
31 | } | ||
32 | |||
33 | #endif | ||
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c deleted file mode 100644 index dbe46f43884d..000000000000 --- a/arch/ia64/kvm/kvm-ia64.c +++ /dev/null | |||
@@ -1,1942 +0,0 @@ | |||
1 | /* | ||
2 | * kvm_ia64.c: Basic KVM support On Itanium series processors | ||
3 | * | ||
4 | * | ||
5 | * Copyright (C) 2007, Intel Corporation. | ||
6 | * Xiantao Zhang (xiantao.zhang@intel.com) | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms and conditions of the GNU General Public License, | ||
10 | * version 2, as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
15 | * more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License along with | ||
18 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple | ||
19 | * Place - Suite 330, Boston, MA 02111-1307 USA. | ||
20 | * | ||
21 | */ | ||
22 | |||
23 | #include <linux/module.h> | ||
24 | #include <linux/errno.h> | ||
25 | #include <linux/percpu.h> | ||
26 | #include <linux/fs.h> | ||
27 | #include <linux/slab.h> | ||
28 | #include <linux/smp.h> | ||
29 | #include <linux/kvm_host.h> | ||
30 | #include <linux/kvm.h> | ||
31 | #include <linux/bitops.h> | ||
32 | #include <linux/hrtimer.h> | ||
33 | #include <linux/uaccess.h> | ||
34 | #include <linux/iommu.h> | ||
35 | #include <linux/intel-iommu.h> | ||
36 | #include <linux/pci.h> | ||
37 | |||
38 | #include <asm/pgtable.h> | ||
39 | #include <asm/gcc_intrin.h> | ||
40 | #include <asm/pal.h> | ||
41 | #include <asm/cacheflush.h> | ||
42 | #include <asm/div64.h> | ||
43 | #include <asm/tlb.h> | ||
44 | #include <asm/elf.h> | ||
45 | #include <asm/sn/addrs.h> | ||
46 | #include <asm/sn/clksupport.h> | ||
47 | #include <asm/sn/shub_mmr.h> | ||
48 | |||
49 | #include "misc.h" | ||
50 | #include "vti.h" | ||
51 | #include "iodev.h" | ||
52 | #include "ioapic.h" | ||
53 | #include "lapic.h" | ||
54 | #include "irq.h" | ||
55 | |||
56 | static unsigned long kvm_vmm_base; | ||
57 | static unsigned long kvm_vsa_base; | ||
58 | static unsigned long kvm_vm_buffer; | ||
59 | static unsigned long kvm_vm_buffer_size; | ||
60 | unsigned long kvm_vmm_gp; | ||
61 | |||
62 | static long vp_env_info; | ||
63 | |||
64 | static struct kvm_vmm_info *kvm_vmm_info; | ||
65 | |||
66 | static DEFINE_PER_CPU(struct kvm_vcpu *, last_vcpu); | ||
67 | |||
68 | struct kvm_stats_debugfs_item debugfs_entries[] = { | ||
69 | { NULL } | ||
70 | }; | ||
71 | |||
72 | static unsigned long kvm_get_itc(struct kvm_vcpu *vcpu) | ||
73 | { | ||
74 | #if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC) | ||
75 | if (vcpu->kvm->arch.is_sn2) | ||
76 | return rtc_time(); | ||
77 | else | ||
78 | #endif | ||
79 | return ia64_getreg(_IA64_REG_AR_ITC); | ||
80 | } | ||
81 | |||
82 | static void kvm_flush_icache(unsigned long start, unsigned long len) | ||
83 | { | ||
84 | int l; | ||
85 | |||
86 | for (l = 0; l < (len + 32); l += 32) | ||
87 | ia64_fc((void *)(start + l)); | ||
88 | |||
89 | ia64_sync_i(); | ||
90 | ia64_srlz_i(); | ||
91 | } | ||
92 | |||
93 | static void kvm_flush_tlb_all(void) | ||
94 | { | ||
95 | unsigned long i, j, count0, count1, stride0, stride1, addr; | ||
96 | long flags; | ||
97 | |||
98 | addr = local_cpu_data->ptce_base; | ||
99 | count0 = local_cpu_data->ptce_count[0]; | ||
100 | count1 = local_cpu_data->ptce_count[1]; | ||
101 | stride0 = local_cpu_data->ptce_stride[0]; | ||
102 | stride1 = local_cpu_data->ptce_stride[1]; | ||
103 | |||
104 | local_irq_save(flags); | ||
105 | for (i = 0; i < count0; ++i) { | ||
106 | for (j = 0; j < count1; ++j) { | ||
107 | ia64_ptce(addr); | ||
108 | addr += stride1; | ||
109 | } | ||
110 | addr += stride0; | ||
111 | } | ||
112 | local_irq_restore(flags); | ||
113 | ia64_srlz_i(); /* srlz.i implies srlz.d */ | ||
114 | } | ||
115 | |||
116 | long ia64_pal_vp_create(u64 *vpd, u64 *host_iva, u64 *opt_handler) | ||
117 | { | ||
118 | struct ia64_pal_retval iprv; | ||
119 | |||
120 | PAL_CALL_STK(iprv, PAL_VP_CREATE, (u64)vpd, (u64)host_iva, | ||
121 | (u64)opt_handler); | ||
122 | |||
123 | return iprv.status; | ||
124 | } | ||
125 | |||
126 | static DEFINE_SPINLOCK(vp_lock); | ||
127 | |||
128 | int kvm_arch_hardware_enable(void) | ||
129 | { | ||
130 | long status; | ||
131 | long tmp_base; | ||
132 | unsigned long pte; | ||
133 | unsigned long saved_psr; | ||
134 | int slot; | ||
135 | |||
136 | pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base), PAGE_KERNEL)); | ||
137 | local_irq_save(saved_psr); | ||
138 | slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT); | ||
139 | local_irq_restore(saved_psr); | ||
140 | if (slot < 0) | ||
141 | return -EINVAL; | ||
142 | |||
143 | spin_lock(&vp_lock); | ||
144 | status = ia64_pal_vp_init_env(kvm_vsa_base ? | ||
145 | VP_INIT_ENV : VP_INIT_ENV_INITALIZE, | ||
146 | __pa(kvm_vm_buffer), KVM_VM_BUFFER_BASE, &tmp_base); | ||
147 | if (status != 0) { | ||
148 | spin_unlock(&vp_lock); | ||
149 | printk(KERN_WARNING"kvm: Failed to Enable VT Support!!!!\n"); | ||
150 | return -EINVAL; | ||
151 | } | ||
152 | |||
153 | if (!kvm_vsa_base) { | ||
154 | kvm_vsa_base = tmp_base; | ||
155 | printk(KERN_INFO"kvm: kvm_vsa_base:0x%lx\n", kvm_vsa_base); | ||
156 | } | ||
157 | spin_unlock(&vp_lock); | ||
158 | ia64_ptr_entry(0x3, slot); | ||
159 | |||
160 | return 0; | ||
161 | } | ||
162 | |||
163 | void kvm_arch_hardware_disable(void) | ||
164 | { | ||
165 | |||
166 | long status; | ||
167 | int slot; | ||
168 | unsigned long pte; | ||
169 | unsigned long saved_psr; | ||
170 | unsigned long host_iva = ia64_getreg(_IA64_REG_CR_IVA); | ||
171 | |||
172 | pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base), | ||
173 | PAGE_KERNEL)); | ||
174 | |||
175 | local_irq_save(saved_psr); | ||
176 | slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT); | ||
177 | local_irq_restore(saved_psr); | ||
178 | if (slot < 0) | ||
179 | return; | ||
180 | |||
181 | status = ia64_pal_vp_exit_env(host_iva); | ||
182 | if (status) | ||
183 | printk(KERN_DEBUG"kvm: Failed to disable VT support! :%ld\n", | ||
184 | status); | ||
185 | ia64_ptr_entry(0x3, slot); | ||
186 | } | ||
187 | |||
188 | void kvm_arch_check_processor_compat(void *rtn) | ||
189 | { | ||
190 | *(int *)rtn = 0; | ||
191 | } | ||
192 | |||
193 | int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) | ||
194 | { | ||
195 | |||
196 | int r; | ||
197 | |||
198 | switch (ext) { | ||
199 | case KVM_CAP_IRQCHIP: | ||
200 | case KVM_CAP_MP_STATE: | ||
201 | case KVM_CAP_IRQ_INJECT_STATUS: | ||
202 | case KVM_CAP_IOAPIC_POLARITY_IGNORED: | ||
203 | r = 1; | ||
204 | break; | ||
205 | case KVM_CAP_COALESCED_MMIO: | ||
206 | r = KVM_COALESCED_MMIO_PAGE_OFFSET; | ||
207 | break; | ||
208 | #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT | ||
209 | case KVM_CAP_IOMMU: | ||
210 | r = iommu_present(&pci_bus_type); | ||
211 | break; | ||
212 | #endif | ||
213 | default: | ||
214 | r = 0; | ||
215 | } | ||
216 | return r; | ||
217 | |||
218 | } | ||
219 | |||
220 | static int handle_vm_error(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | ||
221 | { | ||
222 | kvm_run->exit_reason = KVM_EXIT_UNKNOWN; | ||
223 | kvm_run->hw.hardware_exit_reason = 1; | ||
224 | return 0; | ||
225 | } | ||
226 | |||
227 | static int handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | ||
228 | { | ||
229 | struct kvm_mmio_req *p; | ||
230 | struct kvm_io_device *mmio_dev; | ||
231 | int r; | ||
232 | |||
233 | p = kvm_get_vcpu_ioreq(vcpu); | ||
234 | |||
235 | if ((p->addr & PAGE_MASK) == IOAPIC_DEFAULT_BASE_ADDRESS) | ||
236 | goto mmio; | ||
237 | vcpu->mmio_needed = 1; | ||
238 | vcpu->mmio_fragments[0].gpa = kvm_run->mmio.phys_addr = p->addr; | ||
239 | vcpu->mmio_fragments[0].len = kvm_run->mmio.len = p->size; | ||
240 | vcpu->mmio_is_write = kvm_run->mmio.is_write = !p->dir; | ||
241 | |||
242 | if (vcpu->mmio_is_write) | ||
243 | memcpy(vcpu->arch.mmio_data, &p->data, p->size); | ||
244 | memcpy(kvm_run->mmio.data, &p->data, p->size); | ||
245 | kvm_run->exit_reason = KVM_EXIT_MMIO; | ||
246 | return 0; | ||
247 | mmio: | ||
248 | if (p->dir) | ||
249 | r = kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, p->addr, | ||
250 | p->size, &p->data); | ||
251 | else | ||
252 | r = kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, p->addr, | ||
253 | p->size, &p->data); | ||
254 | if (r) | ||
255 | printk(KERN_ERR"kvm: No iodevice found! addr:%lx\n", p->addr); | ||
256 | p->state = STATE_IORESP_READY; | ||
257 | |||
258 | return 1; | ||
259 | } | ||
260 | |||
261 | static int handle_pal_call(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | ||
262 | { | ||
263 | struct exit_ctl_data *p; | ||
264 | |||
265 | p = kvm_get_exit_data(vcpu); | ||
266 | |||
267 | if (p->exit_reason == EXIT_REASON_PAL_CALL) | ||
268 | return kvm_pal_emul(vcpu, kvm_run); | ||
269 | else { | ||
270 | kvm_run->exit_reason = KVM_EXIT_UNKNOWN; | ||
271 | kvm_run->hw.hardware_exit_reason = 2; | ||
272 | return 0; | ||
273 | } | ||
274 | } | ||
275 | |||
276 | static int handle_sal_call(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | ||
277 | { | ||
278 | struct exit_ctl_data *p; | ||
279 | |||
280 | p = kvm_get_exit_data(vcpu); | ||
281 | |||
282 | if (p->exit_reason == EXIT_REASON_SAL_CALL) { | ||
283 | kvm_sal_emul(vcpu); | ||
284 | return 1; | ||
285 | } else { | ||
286 | kvm_run->exit_reason = KVM_EXIT_UNKNOWN; | ||
287 | kvm_run->hw.hardware_exit_reason = 3; | ||
288 | return 0; | ||
289 | } | ||
290 | |||
291 | } | ||
292 | |||
293 | static int __apic_accept_irq(struct kvm_vcpu *vcpu, uint64_t vector) | ||
294 | { | ||
295 | struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); | ||
296 | |||
297 | if (!test_and_set_bit(vector, &vpd->irr[0])) { | ||
298 | vcpu->arch.irq_new_pending = 1; | ||
299 | kvm_vcpu_kick(vcpu); | ||
300 | return 1; | ||
301 | } | ||
302 | return 0; | ||
303 | } | ||
304 | |||
305 | /* | ||
306 | * offset: address offset to IPI space. | ||
307 | * value: deliver value. | ||
308 | */ | ||
309 | static void vcpu_deliver_ipi(struct kvm_vcpu *vcpu, uint64_t dm, | ||
310 | uint64_t vector) | ||
311 | { | ||
312 | switch (dm) { | ||
313 | case SAPIC_FIXED: | ||
314 | break; | ||
315 | case SAPIC_NMI: | ||
316 | vector = 2; | ||
317 | break; | ||
318 | case SAPIC_EXTINT: | ||
319 | vector = 0; | ||
320 | break; | ||
321 | case SAPIC_INIT: | ||
322 | case SAPIC_PMI: | ||
323 | default: | ||
324 | printk(KERN_ERR"kvm: Unimplemented Deliver reserved IPI!\n"); | ||
325 | return; | ||
326 | } | ||
327 | __apic_accept_irq(vcpu, vector); | ||
328 | } | ||
329 | |||
330 | static struct kvm_vcpu *lid_to_vcpu(struct kvm *kvm, unsigned long id, | ||
331 | unsigned long eid) | ||
332 | { | ||
333 | union ia64_lid lid; | ||
334 | int i; | ||
335 | struct kvm_vcpu *vcpu; | ||
336 | |||
337 | kvm_for_each_vcpu(i, vcpu, kvm) { | ||
338 | lid.val = VCPU_LID(vcpu); | ||
339 | if (lid.id == id && lid.eid == eid) | ||
340 | return vcpu; | ||
341 | } | ||
342 | |||
343 | return NULL; | ||
344 | } | ||
345 | |||
346 | static int handle_ipi(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | ||
347 | { | ||
348 | struct exit_ctl_data *p = kvm_get_exit_data(vcpu); | ||
349 | struct kvm_vcpu *target_vcpu; | ||
350 | struct kvm_pt_regs *regs; | ||
351 | union ia64_ipi_a addr = p->u.ipi_data.addr; | ||
352 | union ia64_ipi_d data = p->u.ipi_data.data; | ||
353 | |||
354 | target_vcpu = lid_to_vcpu(vcpu->kvm, addr.id, addr.eid); | ||
355 | if (!target_vcpu) | ||
356 | return handle_vm_error(vcpu, kvm_run); | ||
357 | |||
358 | if (!target_vcpu->arch.launched) { | ||
359 | regs = vcpu_regs(target_vcpu); | ||
360 | |||
361 | regs->cr_iip = vcpu->kvm->arch.rdv_sal_data.boot_ip; | ||
362 | regs->r1 = vcpu->kvm->arch.rdv_sal_data.boot_gp; | ||
363 | |||
364 | target_vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; | ||
365 | if (waitqueue_active(&target_vcpu->wq)) | ||
366 | wake_up_interruptible(&target_vcpu->wq); | ||
367 | } else { | ||
368 | vcpu_deliver_ipi(target_vcpu, data.dm, data.vector); | ||
369 | if (target_vcpu != vcpu) | ||
370 | kvm_vcpu_kick(target_vcpu); | ||
371 | } | ||
372 | |||
373 | return 1; | ||
374 | } | ||
375 | |||
376 | struct call_data { | ||
377 | struct kvm_ptc_g ptc_g_data; | ||
378 | struct kvm_vcpu *vcpu; | ||
379 | }; | ||
380 | |||
381 | static void vcpu_global_purge(void *info) | ||
382 | { | ||
383 | struct call_data *p = (struct call_data *)info; | ||
384 | struct kvm_vcpu *vcpu = p->vcpu; | ||
385 | |||
386 | if (test_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests)) | ||
387 | return; | ||
388 | |||
389 | set_bit(KVM_REQ_PTC_G, &vcpu->requests); | ||
390 | if (vcpu->arch.ptc_g_count < MAX_PTC_G_NUM) { | ||
391 | vcpu->arch.ptc_g_data[vcpu->arch.ptc_g_count++] = | ||
392 | p->ptc_g_data; | ||
393 | } else { | ||
394 | clear_bit(KVM_REQ_PTC_G, &vcpu->requests); | ||
395 | vcpu->arch.ptc_g_count = 0; | ||
396 | set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests); | ||
397 | } | ||
398 | } | ||
399 | |||
400 | static int handle_global_purge(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | ||
401 | { | ||
402 | struct exit_ctl_data *p = kvm_get_exit_data(vcpu); | ||
403 | struct kvm *kvm = vcpu->kvm; | ||
404 | struct call_data call_data; | ||
405 | int i; | ||
406 | struct kvm_vcpu *vcpui; | ||
407 | |||
408 | call_data.ptc_g_data = p->u.ptc_g_data; | ||
409 | |||
410 | kvm_for_each_vcpu(i, vcpui, kvm) { | ||
411 | if (vcpui->arch.mp_state == KVM_MP_STATE_UNINITIALIZED || | ||
412 | vcpu == vcpui) | ||
413 | continue; | ||
414 | |||
415 | if (waitqueue_active(&vcpui->wq)) | ||
416 | wake_up_interruptible(&vcpui->wq); | ||
417 | |||
418 | if (vcpui->cpu != -1) { | ||
419 | call_data.vcpu = vcpui; | ||
420 | smp_call_function_single(vcpui->cpu, | ||
421 | vcpu_global_purge, &call_data, 1); | ||
422 | } else | ||
423 | printk(KERN_WARNING"kvm: Uninit vcpu received ipi!\n"); | ||
424 | |||
425 | } | ||
426 | return 1; | ||
427 | } | ||
428 | |||
429 | static int handle_switch_rr6(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | ||
430 | { | ||
431 | return 1; | ||
432 | } | ||
433 | |||
434 | static int kvm_sn2_setup_mappings(struct kvm_vcpu *vcpu) | ||
435 | { | ||
436 | unsigned long pte, rtc_phys_addr, map_addr; | ||
437 | int slot; | ||
438 | |||
439 | map_addr = KVM_VMM_BASE + (1UL << KVM_VMM_SHIFT); | ||
440 | rtc_phys_addr = LOCAL_MMR_OFFSET | SH_RTC; | ||
441 | pte = pte_val(mk_pte_phys(rtc_phys_addr, PAGE_KERNEL_UC)); | ||
442 | slot = ia64_itr_entry(0x3, map_addr, pte, PAGE_SHIFT); | ||
443 | vcpu->arch.sn_rtc_tr_slot = slot; | ||
444 | if (slot < 0) { | ||
445 | printk(KERN_ERR "Mayday mayday! RTC mapping failed!\n"); | ||
446 | slot = 0; | ||
447 | } | ||
448 | return slot; | ||
449 | } | ||
450 | |||
451 | int kvm_emulate_halt(struct kvm_vcpu *vcpu) | ||
452 | { | ||
453 | |||
454 | ktime_t kt; | ||
455 | long itc_diff; | ||
456 | unsigned long vcpu_now_itc; | ||
457 | unsigned long expires; | ||
458 | struct hrtimer *p_ht = &vcpu->arch.hlt_timer; | ||
459 | unsigned long cyc_per_usec = local_cpu_data->cyc_per_usec; | ||
460 | struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); | ||
461 | |||
462 | if (irqchip_in_kernel(vcpu->kvm)) { | ||
463 | |||
464 | vcpu_now_itc = kvm_get_itc(vcpu) + vcpu->arch.itc_offset; | ||
465 | |||
466 | if (time_after(vcpu_now_itc, vpd->itm)) { | ||
467 | vcpu->arch.timer_check = 1; | ||
468 | return 1; | ||
469 | } | ||
470 | itc_diff = vpd->itm - vcpu_now_itc; | ||
471 | if (itc_diff < 0) | ||
472 | itc_diff = -itc_diff; | ||
473 | |||
474 | expires = div64_u64(itc_diff, cyc_per_usec); | ||
475 | kt = ktime_set(0, 1000 * expires); | ||
476 | |||
477 | vcpu->arch.ht_active = 1; | ||
478 | hrtimer_start(p_ht, kt, HRTIMER_MODE_ABS); | ||
479 | |||
480 | vcpu->arch.mp_state = KVM_MP_STATE_HALTED; | ||
481 | kvm_vcpu_block(vcpu); | ||
482 | hrtimer_cancel(p_ht); | ||
483 | vcpu->arch.ht_active = 0; | ||
484 | |||
485 | if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests) || | ||
486 | kvm_cpu_has_pending_timer(vcpu)) | ||
487 | if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) | ||
488 | vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; | ||
489 | |||
490 | if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE) | ||
491 | return -EINTR; | ||
492 | return 1; | ||
493 | } else { | ||
494 | printk(KERN_ERR"kvm: Unsupported userspace halt!"); | ||
495 | return 0; | ||
496 | } | ||
497 | } | ||
498 | |||
499 | static int handle_vm_shutdown(struct kvm_vcpu *vcpu, | ||
500 | struct kvm_run *kvm_run) | ||
501 | { | ||
502 | kvm_run->exit_reason = KVM_EXIT_SHUTDOWN; | ||
503 | return 0; | ||
504 | } | ||
505 | |||
506 | static int handle_external_interrupt(struct kvm_vcpu *vcpu, | ||
507 | struct kvm_run *kvm_run) | ||
508 | { | ||
509 | return 1; | ||
510 | } | ||
511 | |||
512 | static int handle_vcpu_debug(struct kvm_vcpu *vcpu, | ||
513 | struct kvm_run *kvm_run) | ||
514 | { | ||
515 | printk("VMM: %s", vcpu->arch.log_buf); | ||
516 | return 1; | ||
517 | } | ||
518 | |||
519 | static int (*kvm_vti_exit_handlers[])(struct kvm_vcpu *vcpu, | ||
520 | struct kvm_run *kvm_run) = { | ||
521 | [EXIT_REASON_VM_PANIC] = handle_vm_error, | ||
522 | [EXIT_REASON_MMIO_INSTRUCTION] = handle_mmio, | ||
523 | [EXIT_REASON_PAL_CALL] = handle_pal_call, | ||
524 | [EXIT_REASON_SAL_CALL] = handle_sal_call, | ||
525 | [EXIT_REASON_SWITCH_RR6] = handle_switch_rr6, | ||
526 | [EXIT_REASON_VM_DESTROY] = handle_vm_shutdown, | ||
527 | [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt, | ||
528 | [EXIT_REASON_IPI] = handle_ipi, | ||
529 | [EXIT_REASON_PTC_G] = handle_global_purge, | ||
530 | [EXIT_REASON_DEBUG] = handle_vcpu_debug, | ||
531 | |||
532 | }; | ||
533 | |||
534 | static const int kvm_vti_max_exit_handlers = | ||
535 | sizeof(kvm_vti_exit_handlers)/sizeof(*kvm_vti_exit_handlers); | ||
536 | |||
537 | static uint32_t kvm_get_exit_reason(struct kvm_vcpu *vcpu) | ||
538 | { | ||
539 | struct exit_ctl_data *p_exit_data; | ||
540 | |||
541 | p_exit_data = kvm_get_exit_data(vcpu); | ||
542 | return p_exit_data->exit_reason; | ||
543 | } | ||
544 | |||
545 | /* | ||
546 | * The guest has exited. See if we can fix it or if we need userspace | ||
547 | * assistance. | ||
548 | */ | ||
549 | static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | ||
550 | { | ||
551 | u32 exit_reason = kvm_get_exit_reason(vcpu); | ||
552 | vcpu->arch.last_exit = exit_reason; | ||
553 | |||
554 | if (exit_reason < kvm_vti_max_exit_handlers | ||
555 | && kvm_vti_exit_handlers[exit_reason]) | ||
556 | return kvm_vti_exit_handlers[exit_reason](vcpu, kvm_run); | ||
557 | else { | ||
558 | kvm_run->exit_reason = KVM_EXIT_UNKNOWN; | ||
559 | kvm_run->hw.hardware_exit_reason = exit_reason; | ||
560 | } | ||
561 | return 0; | ||
562 | } | ||
563 | |||
564 | static inline void vti_set_rr6(unsigned long rr6) | ||
565 | { | ||
566 | ia64_set_rr(RR6, rr6); | ||
567 | ia64_srlz_i(); | ||
568 | } | ||
569 | |||
570 | static int kvm_insert_vmm_mapping(struct kvm_vcpu *vcpu) | ||
571 | { | ||
572 | unsigned long pte; | ||
573 | struct kvm *kvm = vcpu->kvm; | ||
574 | int r; | ||
575 | |||
576 | /*Insert a pair of tr to map vmm*/ | ||
577 | pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base), PAGE_KERNEL)); | ||
578 | r = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT); | ||
579 | if (r < 0) | ||
580 | goto out; | ||
581 | vcpu->arch.vmm_tr_slot = r; | ||
582 | /*Insert a pairt of tr to map data of vm*/ | ||
583 | pte = pte_val(mk_pte_phys(__pa(kvm->arch.vm_base), PAGE_KERNEL)); | ||
584 | r = ia64_itr_entry(0x3, KVM_VM_DATA_BASE, | ||
585 | pte, KVM_VM_DATA_SHIFT); | ||
586 | if (r < 0) | ||
587 | goto out; | ||
588 | vcpu->arch.vm_tr_slot = r; | ||
589 | |||
590 | #if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC) | ||
591 | if (kvm->arch.is_sn2) { | ||
592 | r = kvm_sn2_setup_mappings(vcpu); | ||
593 | if (r < 0) | ||
594 | goto out; | ||
595 | } | ||
596 | #endif | ||
597 | |||
598 | r = 0; | ||
599 | out: | ||
600 | return r; | ||
601 | } | ||
602 | |||
603 | static void kvm_purge_vmm_mapping(struct kvm_vcpu *vcpu) | ||
604 | { | ||
605 | struct kvm *kvm = vcpu->kvm; | ||
606 | ia64_ptr_entry(0x3, vcpu->arch.vmm_tr_slot); | ||
607 | ia64_ptr_entry(0x3, vcpu->arch.vm_tr_slot); | ||
608 | #if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC) | ||
609 | if (kvm->arch.is_sn2) | ||
610 | ia64_ptr_entry(0x3, vcpu->arch.sn_rtc_tr_slot); | ||
611 | #endif | ||
612 | } | ||
613 | |||
614 | static int kvm_vcpu_pre_transition(struct kvm_vcpu *vcpu) | ||
615 | { | ||
616 | unsigned long psr; | ||
617 | int r; | ||
618 | int cpu = smp_processor_id(); | ||
619 | |||
620 | if (vcpu->arch.last_run_cpu != cpu || | ||
621 | per_cpu(last_vcpu, cpu) != vcpu) { | ||
622 | per_cpu(last_vcpu, cpu) = vcpu; | ||
623 | vcpu->arch.last_run_cpu = cpu; | ||
624 | kvm_flush_tlb_all(); | ||
625 | } | ||
626 | |||
627 | vcpu->arch.host_rr6 = ia64_get_rr(RR6); | ||
628 | vti_set_rr6(vcpu->arch.vmm_rr); | ||
629 | local_irq_save(psr); | ||
630 | r = kvm_insert_vmm_mapping(vcpu); | ||
631 | local_irq_restore(psr); | ||
632 | return r; | ||
633 | } | ||
634 | |||
635 | static void kvm_vcpu_post_transition(struct kvm_vcpu *vcpu) | ||
636 | { | ||
637 | kvm_purge_vmm_mapping(vcpu); | ||
638 | vti_set_rr6(vcpu->arch.host_rr6); | ||
639 | } | ||
640 | |||
641 | static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | ||
642 | { | ||
643 | union context *host_ctx, *guest_ctx; | ||
644 | int r, idx; | ||
645 | |||
646 | idx = srcu_read_lock(&vcpu->kvm->srcu); | ||
647 | |||
648 | again: | ||
649 | if (signal_pending(current)) { | ||
650 | r = -EINTR; | ||
651 | kvm_run->exit_reason = KVM_EXIT_INTR; | ||
652 | goto out; | ||
653 | } | ||
654 | |||
655 | preempt_disable(); | ||
656 | local_irq_disable(); | ||
657 | |||
658 | /*Get host and guest context with guest address space.*/ | ||
659 | host_ctx = kvm_get_host_context(vcpu); | ||
660 | guest_ctx = kvm_get_guest_context(vcpu); | ||
661 | |||
662 | clear_bit(KVM_REQ_KICK, &vcpu->requests); | ||
663 | |||
664 | r = kvm_vcpu_pre_transition(vcpu); | ||
665 | if (r < 0) | ||
666 | goto vcpu_run_fail; | ||
667 | |||
668 | srcu_read_unlock(&vcpu->kvm->srcu, idx); | ||
669 | vcpu->mode = IN_GUEST_MODE; | ||
670 | kvm_guest_enter(); | ||
671 | |||
672 | /* | ||
673 | * Transition to the guest | ||
674 | */ | ||
675 | kvm_vmm_info->tramp_entry(host_ctx, guest_ctx); | ||
676 | |||
677 | kvm_vcpu_post_transition(vcpu); | ||
678 | |||
679 | vcpu->arch.launched = 1; | ||
680 | set_bit(KVM_REQ_KICK, &vcpu->requests); | ||
681 | local_irq_enable(); | ||
682 | |||
683 | /* | ||
684 | * We must have an instruction between local_irq_enable() and | ||
685 | * kvm_guest_exit(), so the timer interrupt isn't delayed by | ||
686 | * the interrupt shadow. The stat.exits increment will do nicely. | ||
687 | * But we need to prevent reordering, hence this barrier(): | ||
688 | */ | ||
689 | barrier(); | ||
690 | kvm_guest_exit(); | ||
691 | vcpu->mode = OUTSIDE_GUEST_MODE; | ||
692 | preempt_enable(); | ||
693 | |||
694 | idx = srcu_read_lock(&vcpu->kvm->srcu); | ||
695 | |||
696 | r = kvm_handle_exit(kvm_run, vcpu); | ||
697 | |||
698 | if (r > 0) { | ||
699 | if (!need_resched()) | ||
700 | goto again; | ||
701 | } | ||
702 | |||
703 | out: | ||
704 | srcu_read_unlock(&vcpu->kvm->srcu, idx); | ||
705 | if (r > 0) { | ||
706 | cond_resched(); | ||
707 | idx = srcu_read_lock(&vcpu->kvm->srcu); | ||
708 | goto again; | ||
709 | } | ||
710 | |||
711 | return r; | ||
712 | |||
713 | vcpu_run_fail: | ||
714 | local_irq_enable(); | ||
715 | preempt_enable(); | ||
716 | kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; | ||
717 | goto out; | ||
718 | } | ||
719 | |||
720 | static void kvm_set_mmio_data(struct kvm_vcpu *vcpu) | ||
721 | { | ||
722 | struct kvm_mmio_req *p = kvm_get_vcpu_ioreq(vcpu); | ||
723 | |||
724 | if (!vcpu->mmio_is_write) | ||
725 | memcpy(&p->data, vcpu->arch.mmio_data, 8); | ||
726 | p->state = STATE_IORESP_READY; | ||
727 | } | ||
728 | |||
729 | int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | ||
730 | { | ||
731 | int r; | ||
732 | sigset_t sigsaved; | ||
733 | |||
734 | if (vcpu->sigset_active) | ||
735 | sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); | ||
736 | |||
737 | if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { | ||
738 | kvm_vcpu_block(vcpu); | ||
739 | clear_bit(KVM_REQ_UNHALT, &vcpu->requests); | ||
740 | r = -EAGAIN; | ||
741 | goto out; | ||
742 | } | ||
743 | |||
744 | if (vcpu->mmio_needed) { | ||
745 | memcpy(vcpu->arch.mmio_data, kvm_run->mmio.data, 8); | ||
746 | kvm_set_mmio_data(vcpu); | ||
747 | vcpu->mmio_read_completed = 1; | ||
748 | vcpu->mmio_needed = 0; | ||
749 | } | ||
750 | r = __vcpu_run(vcpu, kvm_run); | ||
751 | out: | ||
752 | if (vcpu->sigset_active) | ||
753 | sigprocmask(SIG_SETMASK, &sigsaved, NULL); | ||
754 | |||
755 | return r; | ||
756 | } | ||
757 | |||
758 | struct kvm *kvm_arch_alloc_vm(void) | ||
759 | { | ||
760 | |||
761 | struct kvm *kvm; | ||
762 | uint64_t vm_base; | ||
763 | |||
764 | BUG_ON(sizeof(struct kvm) > KVM_VM_STRUCT_SIZE); | ||
765 | |||
766 | vm_base = __get_free_pages(GFP_KERNEL, get_order(KVM_VM_DATA_SIZE)); | ||
767 | |||
768 | if (!vm_base) | ||
769 | return NULL; | ||
770 | |||
771 | memset((void *)vm_base, 0, KVM_VM_DATA_SIZE); | ||
772 | kvm = (struct kvm *)(vm_base + | ||
773 | offsetof(struct kvm_vm_data, kvm_vm_struct)); | ||
774 | kvm->arch.vm_base = vm_base; | ||
775 | printk(KERN_DEBUG"kvm: vm's data area:0x%lx\n", vm_base); | ||
776 | |||
777 | return kvm; | ||
778 | } | ||
779 | |||
780 | struct kvm_ia64_io_range { | ||
781 | unsigned long start; | ||
782 | unsigned long size; | ||
783 | unsigned long type; | ||
784 | }; | ||
785 | |||
786 | static const struct kvm_ia64_io_range io_ranges[] = { | ||
787 | {VGA_IO_START, VGA_IO_SIZE, GPFN_FRAME_BUFFER}, | ||
788 | {MMIO_START, MMIO_SIZE, GPFN_LOW_MMIO}, | ||
789 | {LEGACY_IO_START, LEGACY_IO_SIZE, GPFN_LEGACY_IO}, | ||
790 | {IO_SAPIC_START, IO_SAPIC_SIZE, GPFN_IOSAPIC}, | ||
791 | {PIB_START, PIB_SIZE, GPFN_PIB}, | ||
792 | }; | ||
793 | |||
794 | static void kvm_build_io_pmt(struct kvm *kvm) | ||
795 | { | ||
796 | unsigned long i, j; | ||
797 | |||
798 | /* Mark I/O ranges */ | ||
799 | for (i = 0; i < (sizeof(io_ranges) / sizeof(struct kvm_io_range)); | ||
800 | i++) { | ||
801 | for (j = io_ranges[i].start; | ||
802 | j < io_ranges[i].start + io_ranges[i].size; | ||
803 | j += PAGE_SIZE) | ||
804 | kvm_set_pmt_entry(kvm, j >> PAGE_SHIFT, | ||
805 | io_ranges[i].type, 0); | ||
806 | } | ||
807 | |||
808 | } | ||
809 | |||
810 | /*Use unused rids to virtualize guest rid.*/ | ||
811 | #define GUEST_PHYSICAL_RR0 0x1739 | ||
812 | #define GUEST_PHYSICAL_RR4 0x2739 | ||
813 | #define VMM_INIT_RR 0x1660 | ||
814 | |||
815 | int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) | ||
816 | { | ||
817 | BUG_ON(!kvm); | ||
818 | |||
819 | if (type) | ||
820 | return -EINVAL; | ||
821 | |||
822 | kvm->arch.is_sn2 = ia64_platform_is("sn2"); | ||
823 | |||
824 | kvm->arch.metaphysical_rr0 = GUEST_PHYSICAL_RR0; | ||
825 | kvm->arch.metaphysical_rr4 = GUEST_PHYSICAL_RR4; | ||
826 | kvm->arch.vmm_init_rr = VMM_INIT_RR; | ||
827 | |||
828 | /* | ||
829 | *Fill P2M entries for MMIO/IO ranges | ||
830 | */ | ||
831 | kvm_build_io_pmt(kvm); | ||
832 | |||
833 | INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); | ||
834 | |||
835 | /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */ | ||
836 | set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap); | ||
837 | |||
838 | return 0; | ||
839 | } | ||
840 | |||
841 | static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, | ||
842 | struct kvm_irqchip *chip) | ||
843 | { | ||
844 | int r; | ||
845 | |||
846 | r = 0; | ||
847 | switch (chip->chip_id) { | ||
848 | case KVM_IRQCHIP_IOAPIC: | ||
849 | r = kvm_get_ioapic(kvm, &chip->chip.ioapic); | ||
850 | break; | ||
851 | default: | ||
852 | r = -EINVAL; | ||
853 | break; | ||
854 | } | ||
855 | return r; | ||
856 | } | ||
857 | |||
858 | static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) | ||
859 | { | ||
860 | int r; | ||
861 | |||
862 | r = 0; | ||
863 | switch (chip->chip_id) { | ||
864 | case KVM_IRQCHIP_IOAPIC: | ||
865 | r = kvm_set_ioapic(kvm, &chip->chip.ioapic); | ||
866 | break; | ||
867 | default: | ||
868 | r = -EINVAL; | ||
869 | break; | ||
870 | } | ||
871 | return r; | ||
872 | } | ||
873 | |||
874 | #define RESTORE_REGS(_x) vcpu->arch._x = regs->_x | ||
875 | |||
876 | int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | ||
877 | { | ||
878 | struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); | ||
879 | int i; | ||
880 | |||
881 | for (i = 0; i < 16; i++) { | ||
882 | vpd->vgr[i] = regs->vpd.vgr[i]; | ||
883 | vpd->vbgr[i] = regs->vpd.vbgr[i]; | ||
884 | } | ||
885 | for (i = 0; i < 128; i++) | ||
886 | vpd->vcr[i] = regs->vpd.vcr[i]; | ||
887 | vpd->vhpi = regs->vpd.vhpi; | ||
888 | vpd->vnat = regs->vpd.vnat; | ||
889 | vpd->vbnat = regs->vpd.vbnat; | ||
890 | vpd->vpsr = regs->vpd.vpsr; | ||
891 | |||
892 | vpd->vpr = regs->vpd.vpr; | ||
893 | |||
894 | memcpy(&vcpu->arch.guest, ®s->saved_guest, sizeof(union context)); | ||
895 | |||
896 | RESTORE_REGS(mp_state); | ||
897 | RESTORE_REGS(vmm_rr); | ||
898 | memcpy(vcpu->arch.itrs, regs->itrs, sizeof(struct thash_data) * NITRS); | ||
899 | memcpy(vcpu->arch.dtrs, regs->dtrs, sizeof(struct thash_data) * NDTRS); | ||
900 | RESTORE_REGS(itr_regions); | ||
901 | RESTORE_REGS(dtr_regions); | ||
902 | RESTORE_REGS(tc_regions); | ||
903 | RESTORE_REGS(irq_check); | ||
904 | RESTORE_REGS(itc_check); | ||
905 | RESTORE_REGS(timer_check); | ||
906 | RESTORE_REGS(timer_pending); | ||
907 | RESTORE_REGS(last_itc); | ||
908 | for (i = 0; i < 8; i++) { | ||
909 | vcpu->arch.vrr[i] = regs->vrr[i]; | ||
910 | vcpu->arch.ibr[i] = regs->ibr[i]; | ||
911 | vcpu->arch.dbr[i] = regs->dbr[i]; | ||
912 | } | ||
913 | for (i = 0; i < 4; i++) | ||
914 | vcpu->arch.insvc[i] = regs->insvc[i]; | ||
915 | RESTORE_REGS(xtp); | ||
916 | RESTORE_REGS(metaphysical_rr0); | ||
917 | RESTORE_REGS(metaphysical_rr4); | ||
918 | RESTORE_REGS(metaphysical_saved_rr0); | ||
919 | RESTORE_REGS(metaphysical_saved_rr4); | ||
920 | RESTORE_REGS(fp_psr); | ||
921 | RESTORE_REGS(saved_gp); | ||
922 | |||
923 | vcpu->arch.irq_new_pending = 1; | ||
924 | vcpu->arch.itc_offset = regs->saved_itc - kvm_get_itc(vcpu); | ||
925 | set_bit(KVM_REQ_RESUME, &vcpu->requests); | ||
926 | |||
927 | return 0; | ||
928 | } | ||
929 | |||
930 | int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event, | ||
931 | bool line_status) | ||
932 | { | ||
933 | if (!irqchip_in_kernel(kvm)) | ||
934 | return -ENXIO; | ||
935 | |||
936 | irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, | ||
937 | irq_event->irq, irq_event->level, | ||
938 | line_status); | ||
939 | return 0; | ||
940 | } | ||
941 | |||
942 | long kvm_arch_vm_ioctl(struct file *filp, | ||
943 | unsigned int ioctl, unsigned long arg) | ||
944 | { | ||
945 | struct kvm *kvm = filp->private_data; | ||
946 | void __user *argp = (void __user *)arg; | ||
947 | int r = -ENOTTY; | ||
948 | |||
949 | switch (ioctl) { | ||
950 | case KVM_CREATE_IRQCHIP: | ||
951 | r = -EFAULT; | ||
952 | r = kvm_ioapic_init(kvm); | ||
953 | if (r) | ||
954 | goto out; | ||
955 | r = kvm_setup_default_irq_routing(kvm); | ||
956 | if (r) { | ||
957 | mutex_lock(&kvm->slots_lock); | ||
958 | kvm_ioapic_destroy(kvm); | ||
959 | mutex_unlock(&kvm->slots_lock); | ||
960 | goto out; | ||
961 | } | ||
962 | break; | ||
963 | case KVM_GET_IRQCHIP: { | ||
964 | /* 0: PIC master, 1: PIC slave, 2: IOAPIC */ | ||
965 | struct kvm_irqchip chip; | ||
966 | |||
967 | r = -EFAULT; | ||
968 | if (copy_from_user(&chip, argp, sizeof chip)) | ||
969 | goto out; | ||
970 | r = -ENXIO; | ||
971 | if (!irqchip_in_kernel(kvm)) | ||
972 | goto out; | ||
973 | r = kvm_vm_ioctl_get_irqchip(kvm, &chip); | ||
974 | if (r) | ||
975 | goto out; | ||
976 | r = -EFAULT; | ||
977 | if (copy_to_user(argp, &chip, sizeof chip)) | ||
978 | goto out; | ||
979 | r = 0; | ||
980 | break; | ||
981 | } | ||
982 | case KVM_SET_IRQCHIP: { | ||
983 | /* 0: PIC master, 1: PIC slave, 2: IOAPIC */ | ||
984 | struct kvm_irqchip chip; | ||
985 | |||
986 | r = -EFAULT; | ||
987 | if (copy_from_user(&chip, argp, sizeof chip)) | ||
988 | goto out; | ||
989 | r = -ENXIO; | ||
990 | if (!irqchip_in_kernel(kvm)) | ||
991 | goto out; | ||
992 | r = kvm_vm_ioctl_set_irqchip(kvm, &chip); | ||
993 | if (r) | ||
994 | goto out; | ||
995 | r = 0; | ||
996 | break; | ||
997 | } | ||
998 | default: | ||
999 | ; | ||
1000 | } | ||
1001 | out: | ||
1002 | return r; | ||
1003 | } | ||
1004 | |||
1005 | int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, | ||
1006 | struct kvm_sregs *sregs) | ||
1007 | { | ||
1008 | return -EINVAL; | ||
1009 | } | ||
1010 | |||
1011 | int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, | ||
1012 | struct kvm_sregs *sregs) | ||
1013 | { | ||
1014 | return -EINVAL; | ||
1015 | |||
1016 | } | ||
1017 | int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, | ||
1018 | struct kvm_translation *tr) | ||
1019 | { | ||
1020 | |||
1021 | return -EINVAL; | ||
1022 | } | ||
1023 | |||
1024 | static int kvm_alloc_vmm_area(void) | ||
1025 | { | ||
1026 | if (!kvm_vmm_base && (kvm_vm_buffer_size < KVM_VM_BUFFER_SIZE)) { | ||
1027 | kvm_vmm_base = __get_free_pages(GFP_KERNEL, | ||
1028 | get_order(KVM_VMM_SIZE)); | ||
1029 | if (!kvm_vmm_base) | ||
1030 | return -ENOMEM; | ||
1031 | |||
1032 | memset((void *)kvm_vmm_base, 0, KVM_VMM_SIZE); | ||
1033 | kvm_vm_buffer = kvm_vmm_base + VMM_SIZE; | ||
1034 | |||
1035 | printk(KERN_DEBUG"kvm:VMM's Base Addr:0x%lx, vm_buffer:0x%lx\n", | ||
1036 | kvm_vmm_base, kvm_vm_buffer); | ||
1037 | } | ||
1038 | |||
1039 | return 0; | ||
1040 | } | ||
1041 | |||
1042 | static void kvm_free_vmm_area(void) | ||
1043 | { | ||
1044 | if (kvm_vmm_base) { | ||
1045 | /*Zero this area before free to avoid bits leak!!*/ | ||
1046 | memset((void *)kvm_vmm_base, 0, KVM_VMM_SIZE); | ||
1047 | free_pages(kvm_vmm_base, get_order(KVM_VMM_SIZE)); | ||
1048 | kvm_vmm_base = 0; | ||
1049 | kvm_vm_buffer = 0; | ||
1050 | kvm_vsa_base = 0; | ||
1051 | } | ||
1052 | } | ||
1053 | |||
1054 | static int vti_init_vpd(struct kvm_vcpu *vcpu) | ||
1055 | { | ||
1056 | int i; | ||
1057 | union cpuid3_t cpuid3; | ||
1058 | struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); | ||
1059 | |||
1060 | if (IS_ERR(vpd)) | ||
1061 | return PTR_ERR(vpd); | ||
1062 | |||
1063 | /* CPUID init */ | ||
1064 | for (i = 0; i < 5; i++) | ||
1065 | vpd->vcpuid[i] = ia64_get_cpuid(i); | ||
1066 | |||
1067 | /* Limit the CPUID number to 5 */ | ||
1068 | cpuid3.value = vpd->vcpuid[3]; | ||
1069 | cpuid3.number = 4; /* 5 - 1 */ | ||
1070 | vpd->vcpuid[3] = cpuid3.value; | ||
1071 | |||
1072 | /*Set vac and vdc fields*/ | ||
1073 | vpd->vac.a_from_int_cr = 1; | ||
1074 | vpd->vac.a_to_int_cr = 1; | ||
1075 | vpd->vac.a_from_psr = 1; | ||
1076 | vpd->vac.a_from_cpuid = 1; | ||
1077 | vpd->vac.a_cover = 1; | ||
1078 | vpd->vac.a_bsw = 1; | ||
1079 | vpd->vac.a_int = 1; | ||
1080 | vpd->vdc.d_vmsw = 1; | ||
1081 | |||
1082 | /*Set virtual buffer*/ | ||
1083 | vpd->virt_env_vaddr = KVM_VM_BUFFER_BASE; | ||
1084 | |||
1085 | return 0; | ||
1086 | } | ||
1087 | |||
1088 | static int vti_create_vp(struct kvm_vcpu *vcpu) | ||
1089 | { | ||
1090 | long ret; | ||
1091 | struct vpd *vpd = vcpu->arch.vpd; | ||
1092 | unsigned long vmm_ivt; | ||
1093 | |||
1094 | vmm_ivt = kvm_vmm_info->vmm_ivt; | ||
1095 | |||
1096 | printk(KERN_DEBUG "kvm: vcpu:%p,ivt: 0x%lx\n", vcpu, vmm_ivt); | ||
1097 | |||
1098 | ret = ia64_pal_vp_create((u64 *)vpd, (u64 *)vmm_ivt, 0); | ||
1099 | |||
1100 | if (ret) { | ||
1101 | printk(KERN_ERR"kvm: ia64_pal_vp_create failed!\n"); | ||
1102 | return -EINVAL; | ||
1103 | } | ||
1104 | return 0; | ||
1105 | } | ||
1106 | |||
1107 | static void init_ptce_info(struct kvm_vcpu *vcpu) | ||
1108 | { | ||
1109 | ia64_ptce_info_t ptce = {0}; | ||
1110 | |||
1111 | ia64_get_ptce(&ptce); | ||
1112 | vcpu->arch.ptce_base = ptce.base; | ||
1113 | vcpu->arch.ptce_count[0] = ptce.count[0]; | ||
1114 | vcpu->arch.ptce_count[1] = ptce.count[1]; | ||
1115 | vcpu->arch.ptce_stride[0] = ptce.stride[0]; | ||
1116 | vcpu->arch.ptce_stride[1] = ptce.stride[1]; | ||
1117 | } | ||
1118 | |||
1119 | static void kvm_migrate_hlt_timer(struct kvm_vcpu *vcpu) | ||
1120 | { | ||
1121 | struct hrtimer *p_ht = &vcpu->arch.hlt_timer; | ||
1122 | |||
1123 | if (hrtimer_cancel(p_ht)) | ||
1124 | hrtimer_start_expires(p_ht, HRTIMER_MODE_ABS); | ||
1125 | } | ||
1126 | |||
1127 | static enum hrtimer_restart hlt_timer_fn(struct hrtimer *data) | ||
1128 | { | ||
1129 | struct kvm_vcpu *vcpu; | ||
1130 | wait_queue_head_t *q; | ||
1131 | |||
1132 | vcpu = container_of(data, struct kvm_vcpu, arch.hlt_timer); | ||
1133 | q = &vcpu->wq; | ||
1134 | |||
1135 | if (vcpu->arch.mp_state != KVM_MP_STATE_HALTED) | ||
1136 | goto out; | ||
1137 | |||
1138 | if (waitqueue_active(q)) | ||
1139 | wake_up_interruptible(q); | ||
1140 | |||
1141 | out: | ||
1142 | vcpu->arch.timer_fired = 1; | ||
1143 | vcpu->arch.timer_check = 1; | ||
1144 | return HRTIMER_NORESTART; | ||
1145 | } | ||
1146 | |||
1147 | #define PALE_RESET_ENTRY 0x80000000ffffffb0UL | ||
1148 | |||
1149 | bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) | ||
1150 | { | ||
1151 | return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL); | ||
1152 | } | ||
1153 | |||
1154 | int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) | ||
1155 | { | ||
1156 | struct kvm_vcpu *v; | ||
1157 | int r; | ||
1158 | int i; | ||
1159 | long itc_offset; | ||
1160 | struct kvm *kvm = vcpu->kvm; | ||
1161 | struct kvm_pt_regs *regs = vcpu_regs(vcpu); | ||
1162 | |||
1163 | union context *p_ctx = &vcpu->arch.guest; | ||
1164 | struct kvm_vcpu *vmm_vcpu = to_guest(vcpu->kvm, vcpu); | ||
1165 | |||
1166 | /*Init vcpu context for first run.*/ | ||
1167 | if (IS_ERR(vmm_vcpu)) | ||
1168 | return PTR_ERR(vmm_vcpu); | ||
1169 | |||
1170 | if (kvm_vcpu_is_bsp(vcpu)) { | ||
1171 | vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; | ||
1172 | |||
1173 | /*Set entry address for first run.*/ | ||
1174 | regs->cr_iip = PALE_RESET_ENTRY; | ||
1175 | |||
1176 | /*Initialize itc offset for vcpus*/ | ||
1177 | itc_offset = 0UL - kvm_get_itc(vcpu); | ||
1178 | for (i = 0; i < KVM_MAX_VCPUS; i++) { | ||
1179 | v = (struct kvm_vcpu *)((char *)vcpu + | ||
1180 | sizeof(struct kvm_vcpu_data) * i); | ||
1181 | v->arch.itc_offset = itc_offset; | ||
1182 | v->arch.last_itc = 0; | ||
1183 | } | ||
1184 | } else | ||
1185 | vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED; | ||
1186 | |||
1187 | r = -ENOMEM; | ||
1188 | vcpu->arch.apic = kzalloc(sizeof(struct kvm_lapic), GFP_KERNEL); | ||
1189 | if (!vcpu->arch.apic) | ||
1190 | goto out; | ||
1191 | vcpu->arch.apic->vcpu = vcpu; | ||
1192 | |||
1193 | p_ctx->gr[1] = 0; | ||
1194 | p_ctx->gr[12] = (unsigned long)((char *)vmm_vcpu + KVM_STK_OFFSET); | ||
1195 | p_ctx->gr[13] = (unsigned long)vmm_vcpu; | ||
1196 | p_ctx->psr = 0x1008522000UL; | ||
1197 | p_ctx->ar[40] = FPSR_DEFAULT; /*fpsr*/ | ||
1198 | p_ctx->caller_unat = 0; | ||
1199 | p_ctx->pr = 0x0; | ||
1200 | p_ctx->ar[36] = 0x0; /*unat*/ | ||
1201 | p_ctx->ar[19] = 0x0; /*rnat*/ | ||
1202 | p_ctx->ar[18] = (unsigned long)vmm_vcpu + | ||
1203 | ((sizeof(struct kvm_vcpu)+15) & ~15); | ||
1204 | p_ctx->ar[64] = 0x0; /*pfs*/ | ||
1205 | p_ctx->cr[0] = 0x7e04UL; | ||
1206 | p_ctx->cr[2] = (unsigned long)kvm_vmm_info->vmm_ivt; | ||
1207 | p_ctx->cr[8] = 0x3c; | ||
1208 | |||
1209 | /*Initialize region register*/ | ||
1210 | p_ctx->rr[0] = 0x30; | ||
1211 | p_ctx->rr[1] = 0x30; | ||
1212 | p_ctx->rr[2] = 0x30; | ||
1213 | p_ctx->rr[3] = 0x30; | ||
1214 | p_ctx->rr[4] = 0x30; | ||
1215 | p_ctx->rr[5] = 0x30; | ||
1216 | p_ctx->rr[7] = 0x30; | ||
1217 | |||
1218 | /*Initialize branch register 0*/ | ||
1219 | p_ctx->br[0] = *(unsigned long *)kvm_vmm_info->vmm_entry; | ||
1220 | |||
1221 | vcpu->arch.vmm_rr = kvm->arch.vmm_init_rr; | ||
1222 | vcpu->arch.metaphysical_rr0 = kvm->arch.metaphysical_rr0; | ||
1223 | vcpu->arch.metaphysical_rr4 = kvm->arch.metaphysical_rr4; | ||
1224 | |||
1225 | hrtimer_init(&vcpu->arch.hlt_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | ||
1226 | vcpu->arch.hlt_timer.function = hlt_timer_fn; | ||
1227 | |||
1228 | vcpu->arch.last_run_cpu = -1; | ||
1229 | vcpu->arch.vpd = (struct vpd *)VPD_BASE(vcpu->vcpu_id); | ||
1230 | vcpu->arch.vsa_base = kvm_vsa_base; | ||
1231 | vcpu->arch.__gp = kvm_vmm_gp; | ||
1232 | vcpu->arch.dirty_log_lock_pa = __pa(&kvm->arch.dirty_log_lock); | ||
1233 | vcpu->arch.vhpt.hash = (struct thash_data *)VHPT_BASE(vcpu->vcpu_id); | ||
1234 | vcpu->arch.vtlb.hash = (struct thash_data *)VTLB_BASE(vcpu->vcpu_id); | ||
1235 | init_ptce_info(vcpu); | ||
1236 | |||
1237 | r = 0; | ||
1238 | out: | ||
1239 | return r; | ||
1240 | } | ||
1241 | |||
1242 | static int vti_vcpu_setup(struct kvm_vcpu *vcpu, int id) | ||
1243 | { | ||
1244 | unsigned long psr; | ||
1245 | int r; | ||
1246 | |||
1247 | local_irq_save(psr); | ||
1248 | r = kvm_insert_vmm_mapping(vcpu); | ||
1249 | local_irq_restore(psr); | ||
1250 | if (r) | ||
1251 | goto fail; | ||
1252 | r = kvm_vcpu_init(vcpu, vcpu->kvm, id); | ||
1253 | if (r) | ||
1254 | goto fail; | ||
1255 | |||
1256 | r = vti_init_vpd(vcpu); | ||
1257 | if (r) { | ||
1258 | printk(KERN_DEBUG"kvm: vpd init error!!\n"); | ||
1259 | goto uninit; | ||
1260 | } | ||
1261 | |||
1262 | r = vti_create_vp(vcpu); | ||
1263 | if (r) | ||
1264 | goto uninit; | ||
1265 | |||
1266 | kvm_purge_vmm_mapping(vcpu); | ||
1267 | |||
1268 | return 0; | ||
1269 | uninit: | ||
1270 | kvm_vcpu_uninit(vcpu); | ||
1271 | fail: | ||
1272 | return r; | ||
1273 | } | ||
1274 | |||
1275 | struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, | ||
1276 | unsigned int id) | ||
1277 | { | ||
1278 | struct kvm_vcpu *vcpu; | ||
1279 | unsigned long vm_base = kvm->arch.vm_base; | ||
1280 | int r; | ||
1281 | int cpu; | ||
1282 | |||
1283 | BUG_ON(sizeof(struct kvm_vcpu) > VCPU_STRUCT_SIZE/2); | ||
1284 | |||
1285 | r = -EINVAL; | ||
1286 | if (id >= KVM_MAX_VCPUS) { | ||
1287 | printk(KERN_ERR"kvm: Can't configure vcpus > %ld", | ||
1288 | KVM_MAX_VCPUS); | ||
1289 | goto fail; | ||
1290 | } | ||
1291 | |||
1292 | r = -ENOMEM; | ||
1293 | if (!vm_base) { | ||
1294 | printk(KERN_ERR"kvm: Create vcpu[%d] error!\n", id); | ||
1295 | goto fail; | ||
1296 | } | ||
1297 | vcpu = (struct kvm_vcpu *)(vm_base + offsetof(struct kvm_vm_data, | ||
1298 | vcpu_data[id].vcpu_struct)); | ||
1299 | vcpu->kvm = kvm; | ||
1300 | |||
1301 | cpu = get_cpu(); | ||
1302 | r = vti_vcpu_setup(vcpu, id); | ||
1303 | put_cpu(); | ||
1304 | |||
1305 | if (r) { | ||
1306 | printk(KERN_DEBUG"kvm: vcpu_setup error!!\n"); | ||
1307 | goto fail; | ||
1308 | } | ||
1309 | |||
1310 | return vcpu; | ||
1311 | fail: | ||
1312 | return ERR_PTR(r); | ||
1313 | } | ||
1314 | |||
1315 | int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | ||
1316 | { | ||
1317 | return 0; | ||
1318 | } | ||
1319 | |||
1320 | int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) | ||
1321 | { | ||
1322 | return 0; | ||
1323 | } | ||
1324 | |||
1325 | int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) | ||
1326 | { | ||
1327 | return -EINVAL; | ||
1328 | } | ||
1329 | |||
1330 | int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) | ||
1331 | { | ||
1332 | return -EINVAL; | ||
1333 | } | ||
1334 | |||
1335 | int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, | ||
1336 | struct kvm_guest_debug *dbg) | ||
1337 | { | ||
1338 | return -EINVAL; | ||
1339 | } | ||
1340 | |||
1341 | void kvm_arch_free_vm(struct kvm *kvm) | ||
1342 | { | ||
1343 | unsigned long vm_base = kvm->arch.vm_base; | ||
1344 | |||
1345 | if (vm_base) { | ||
1346 | memset((void *)vm_base, 0, KVM_VM_DATA_SIZE); | ||
1347 | free_pages(vm_base, get_order(KVM_VM_DATA_SIZE)); | ||
1348 | } | ||
1349 | |||
1350 | } | ||
1351 | |||
1352 | static void kvm_release_vm_pages(struct kvm *kvm) | ||
1353 | { | ||
1354 | struct kvm_memslots *slots; | ||
1355 | struct kvm_memory_slot *memslot; | ||
1356 | int j; | ||
1357 | |||
1358 | slots = kvm_memslots(kvm); | ||
1359 | kvm_for_each_memslot(memslot, slots) { | ||
1360 | for (j = 0; j < memslot->npages; j++) { | ||
1361 | if (memslot->rmap[j]) | ||
1362 | put_page((struct page *)memslot->rmap[j]); | ||
1363 | } | ||
1364 | } | ||
1365 | } | ||
1366 | |||
1367 | void kvm_arch_destroy_vm(struct kvm *kvm) | ||
1368 | { | ||
1369 | kvm_iommu_unmap_guest(kvm); | ||
1370 | kvm_free_all_assigned_devices(kvm); | ||
1371 | kfree(kvm->arch.vioapic); | ||
1372 | kvm_release_vm_pages(kvm); | ||
1373 | } | ||
1374 | |||
1375 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | ||
1376 | { | ||
1377 | if (cpu != vcpu->cpu) { | ||
1378 | vcpu->cpu = cpu; | ||
1379 | if (vcpu->arch.ht_active) | ||
1380 | kvm_migrate_hlt_timer(vcpu); | ||
1381 | } | ||
1382 | } | ||
1383 | |||
1384 | #define SAVE_REGS(_x) regs->_x = vcpu->arch._x | ||
1385 | |||
1386 | int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | ||
1387 | { | ||
1388 | struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); | ||
1389 | int i; | ||
1390 | |||
1391 | vcpu_load(vcpu); | ||
1392 | |||
1393 | for (i = 0; i < 16; i++) { | ||
1394 | regs->vpd.vgr[i] = vpd->vgr[i]; | ||
1395 | regs->vpd.vbgr[i] = vpd->vbgr[i]; | ||
1396 | } | ||
1397 | for (i = 0; i < 128; i++) | ||
1398 | regs->vpd.vcr[i] = vpd->vcr[i]; | ||
1399 | regs->vpd.vhpi = vpd->vhpi; | ||
1400 | regs->vpd.vnat = vpd->vnat; | ||
1401 | regs->vpd.vbnat = vpd->vbnat; | ||
1402 | regs->vpd.vpsr = vpd->vpsr; | ||
1403 | regs->vpd.vpr = vpd->vpr; | ||
1404 | |||
1405 | memcpy(®s->saved_guest, &vcpu->arch.guest, sizeof(union context)); | ||
1406 | |||
1407 | SAVE_REGS(mp_state); | ||
1408 | SAVE_REGS(vmm_rr); | ||
1409 | memcpy(regs->itrs, vcpu->arch.itrs, sizeof(struct thash_data) * NITRS); | ||
1410 | memcpy(regs->dtrs, vcpu->arch.dtrs, sizeof(struct thash_data) * NDTRS); | ||
1411 | SAVE_REGS(itr_regions); | ||
1412 | SAVE_REGS(dtr_regions); | ||
1413 | SAVE_REGS(tc_regions); | ||
1414 | SAVE_REGS(irq_check); | ||
1415 | SAVE_REGS(itc_check); | ||
1416 | SAVE_REGS(timer_check); | ||
1417 | SAVE_REGS(timer_pending); | ||
1418 | SAVE_REGS(last_itc); | ||
1419 | for (i = 0; i < 8; i++) { | ||
1420 | regs->vrr[i] = vcpu->arch.vrr[i]; | ||
1421 | regs->ibr[i] = vcpu->arch.ibr[i]; | ||
1422 | regs->dbr[i] = vcpu->arch.dbr[i]; | ||
1423 | } | ||
1424 | for (i = 0; i < 4; i++) | ||
1425 | regs->insvc[i] = vcpu->arch.insvc[i]; | ||
1426 | regs->saved_itc = vcpu->arch.itc_offset + kvm_get_itc(vcpu); | ||
1427 | SAVE_REGS(xtp); | ||
1428 | SAVE_REGS(metaphysical_rr0); | ||
1429 | SAVE_REGS(metaphysical_rr4); | ||
1430 | SAVE_REGS(metaphysical_saved_rr0); | ||
1431 | SAVE_REGS(metaphysical_saved_rr4); | ||
1432 | SAVE_REGS(fp_psr); | ||
1433 | SAVE_REGS(saved_gp); | ||
1434 | |||
1435 | vcpu_put(vcpu); | ||
1436 | return 0; | ||
1437 | } | ||
1438 | |||
1439 | int kvm_arch_vcpu_ioctl_get_stack(struct kvm_vcpu *vcpu, | ||
1440 | struct kvm_ia64_vcpu_stack *stack) | ||
1441 | { | ||
1442 | memcpy(stack, vcpu, sizeof(struct kvm_ia64_vcpu_stack)); | ||
1443 | return 0; | ||
1444 | } | ||
1445 | |||
1446 | int kvm_arch_vcpu_ioctl_set_stack(struct kvm_vcpu *vcpu, | ||
1447 | struct kvm_ia64_vcpu_stack *stack) | ||
1448 | { | ||
1449 | memcpy(vcpu + 1, &stack->stack[0] + sizeof(struct kvm_vcpu), | ||
1450 | sizeof(struct kvm_ia64_vcpu_stack) - sizeof(struct kvm_vcpu)); | ||
1451 | |||
1452 | vcpu->arch.exit_data = ((struct kvm_vcpu *)stack)->arch.exit_data; | ||
1453 | return 0; | ||
1454 | } | ||
1455 | |||
1456 | void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) | ||
1457 | { | ||
1458 | |||
1459 | hrtimer_cancel(&vcpu->arch.hlt_timer); | ||
1460 | kfree(vcpu->arch.apic); | ||
1461 | } | ||
1462 | |||
1463 | long kvm_arch_vcpu_ioctl(struct file *filp, | ||
1464 | unsigned int ioctl, unsigned long arg) | ||
1465 | { | ||
1466 | struct kvm_vcpu *vcpu = filp->private_data; | ||
1467 | void __user *argp = (void __user *)arg; | ||
1468 | struct kvm_ia64_vcpu_stack *stack = NULL; | ||
1469 | long r; | ||
1470 | |||
1471 | switch (ioctl) { | ||
1472 | case KVM_IA64_VCPU_GET_STACK: { | ||
1473 | struct kvm_ia64_vcpu_stack __user *user_stack; | ||
1474 | void __user *first_p = argp; | ||
1475 | |||
1476 | r = -EFAULT; | ||
1477 | if (copy_from_user(&user_stack, first_p, sizeof(void *))) | ||
1478 | goto out; | ||
1479 | |||
1480 | if (!access_ok(VERIFY_WRITE, user_stack, | ||
1481 | sizeof(struct kvm_ia64_vcpu_stack))) { | ||
1482 | printk(KERN_INFO "KVM_IA64_VCPU_GET_STACK: " | ||
1483 | "Illegal user destination address for stack\n"); | ||
1484 | goto out; | ||
1485 | } | ||
1486 | stack = kzalloc(sizeof(struct kvm_ia64_vcpu_stack), GFP_KERNEL); | ||
1487 | if (!stack) { | ||
1488 | r = -ENOMEM; | ||
1489 | goto out; | ||
1490 | } | ||
1491 | |||
1492 | r = kvm_arch_vcpu_ioctl_get_stack(vcpu, stack); | ||
1493 | if (r) | ||
1494 | goto out; | ||
1495 | |||
1496 | if (copy_to_user(user_stack, stack, | ||
1497 | sizeof(struct kvm_ia64_vcpu_stack))) { | ||
1498 | r = -EFAULT; | ||
1499 | goto out; | ||
1500 | } | ||
1501 | |||
1502 | break; | ||
1503 | } | ||
1504 | case KVM_IA64_VCPU_SET_STACK: { | ||
1505 | struct kvm_ia64_vcpu_stack __user *user_stack; | ||
1506 | void __user *first_p = argp; | ||
1507 | |||
1508 | r = -EFAULT; | ||
1509 | if (copy_from_user(&user_stack, first_p, sizeof(void *))) | ||
1510 | goto out; | ||
1511 | |||
1512 | if (!access_ok(VERIFY_READ, user_stack, | ||
1513 | sizeof(struct kvm_ia64_vcpu_stack))) { | ||
1514 | printk(KERN_INFO "KVM_IA64_VCPU_SET_STACK: " | ||
1515 | "Illegal user address for stack\n"); | ||
1516 | goto out; | ||
1517 | } | ||
1518 | stack = kmalloc(sizeof(struct kvm_ia64_vcpu_stack), GFP_KERNEL); | ||
1519 | if (!stack) { | ||
1520 | r = -ENOMEM; | ||
1521 | goto out; | ||
1522 | } | ||
1523 | if (copy_from_user(stack, user_stack, | ||
1524 | sizeof(struct kvm_ia64_vcpu_stack))) | ||
1525 | goto out; | ||
1526 | |||
1527 | r = kvm_arch_vcpu_ioctl_set_stack(vcpu, stack); | ||
1528 | break; | ||
1529 | } | ||
1530 | |||
1531 | default: | ||
1532 | r = -EINVAL; | ||
1533 | } | ||
1534 | |||
1535 | out: | ||
1536 | kfree(stack); | ||
1537 | return r; | ||
1538 | } | ||
1539 | |||
1540 | int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) | ||
1541 | { | ||
1542 | return VM_FAULT_SIGBUS; | ||
1543 | } | ||
1544 | |||
1545 | int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, | ||
1546 | unsigned long npages) | ||
1547 | { | ||
1548 | return 0; | ||
1549 | } | ||
1550 | |||
1551 | int kvm_arch_prepare_memory_region(struct kvm *kvm, | ||
1552 | struct kvm_memory_slot *memslot, | ||
1553 | struct kvm_userspace_memory_region *mem, | ||
1554 | enum kvm_mr_change change) | ||
1555 | { | ||
1556 | unsigned long i; | ||
1557 | unsigned long pfn; | ||
1558 | int npages = memslot->npages; | ||
1559 | unsigned long base_gfn = memslot->base_gfn; | ||
1560 | |||
1561 | if (base_gfn + npages > (KVM_MAX_MEM_SIZE >> PAGE_SHIFT)) | ||
1562 | return -ENOMEM; | ||
1563 | |||
1564 | for (i = 0; i < npages; i++) { | ||
1565 | pfn = gfn_to_pfn(kvm, base_gfn + i); | ||
1566 | if (!kvm_is_reserved_pfn(pfn)) { | ||
1567 | kvm_set_pmt_entry(kvm, base_gfn + i, | ||
1568 | pfn << PAGE_SHIFT, | ||
1569 | _PAGE_AR_RWX | _PAGE_MA_WB); | ||
1570 | memslot->rmap[i] = (unsigned long)pfn_to_page(pfn); | ||
1571 | } else { | ||
1572 | kvm_set_pmt_entry(kvm, base_gfn + i, | ||
1573 | GPFN_PHYS_MMIO | (pfn << PAGE_SHIFT), | ||
1574 | _PAGE_MA_UC); | ||
1575 | memslot->rmap[i] = 0; | ||
1576 | } | ||
1577 | } | ||
1578 | |||
1579 | return 0; | ||
1580 | } | ||
1581 | |||
1582 | void kvm_arch_flush_shadow_all(struct kvm *kvm) | ||
1583 | { | ||
1584 | kvm_flush_remote_tlbs(kvm); | ||
1585 | } | ||
1586 | |||
1587 | void kvm_arch_flush_shadow_memslot(struct kvm *kvm, | ||
1588 | struct kvm_memory_slot *slot) | ||
1589 | { | ||
1590 | kvm_arch_flush_shadow_all(); | ||
1591 | } | ||
1592 | |||
1593 | long kvm_arch_dev_ioctl(struct file *filp, | ||
1594 | unsigned int ioctl, unsigned long arg) | ||
1595 | { | ||
1596 | return -EINVAL; | ||
1597 | } | ||
1598 | |||
1599 | void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) | ||
1600 | { | ||
1601 | kvm_vcpu_uninit(vcpu); | ||
1602 | } | ||
1603 | |||
1604 | static int vti_cpu_has_kvm_support(void) | ||
1605 | { | ||
1606 | long avail = 1, status = 1, control = 1; | ||
1607 | long ret; | ||
1608 | |||
1609 | ret = ia64_pal_proc_get_features(&avail, &status, &control, 0); | ||
1610 | if (ret) | ||
1611 | goto out; | ||
1612 | |||
1613 | if (!(avail & PAL_PROC_VM_BIT)) | ||
1614 | goto out; | ||
1615 | |||
1616 | printk(KERN_DEBUG"kvm: Hardware Supports VT\n"); | ||
1617 | |||
1618 | ret = ia64_pal_vp_env_info(&kvm_vm_buffer_size, &vp_env_info); | ||
1619 | if (ret) | ||
1620 | goto out; | ||
1621 | printk(KERN_DEBUG"kvm: VM Buffer Size:0x%lx\n", kvm_vm_buffer_size); | ||
1622 | |||
1623 | if (!(vp_env_info & VP_OPCODE)) { | ||
1624 | printk(KERN_WARNING"kvm: No opcode ability on hardware, " | ||
1625 | "vm_env_info:0x%lx\n", vp_env_info); | ||
1626 | } | ||
1627 | |||
1628 | return 1; | ||
1629 | out: | ||
1630 | return 0; | ||
1631 | } | ||
1632 | |||
1633 | |||
1634 | /* | ||
1635 | * On SN2, the ITC isn't stable, so copy in fast path code to use the | ||
1636 | * SN2 RTC, replacing the ITC based default verion. | ||
1637 | */ | ||
1638 | static void kvm_patch_vmm(struct kvm_vmm_info *vmm_info, | ||
1639 | struct module *module) | ||
1640 | { | ||
1641 | unsigned long new_ar, new_ar_sn2; | ||
1642 | unsigned long module_base; | ||
1643 | |||
1644 | if (!ia64_platform_is("sn2")) | ||
1645 | return; | ||
1646 | |||
1647 | module_base = (unsigned long)module->module_core; | ||
1648 | |||
1649 | new_ar = kvm_vmm_base + vmm_info->patch_mov_ar - module_base; | ||
1650 | new_ar_sn2 = kvm_vmm_base + vmm_info->patch_mov_ar_sn2 - module_base; | ||
1651 | |||
1652 | printk(KERN_INFO "kvm: Patching ITC emulation to use SGI SN2 RTC " | ||
1653 | "as source\n"); | ||
1654 | |||
1655 | /* | ||
1656 | * Copy the SN2 version of mov_ar into place. They are both | ||
1657 | * the same size, so 6 bundles is sufficient (6 * 0x10). | ||
1658 | */ | ||
1659 | memcpy((void *)new_ar, (void *)new_ar_sn2, 0x60); | ||
1660 | } | ||
1661 | |||
1662 | static int kvm_relocate_vmm(struct kvm_vmm_info *vmm_info, | ||
1663 | struct module *module) | ||
1664 | { | ||
1665 | unsigned long module_base; | ||
1666 | unsigned long vmm_size; | ||
1667 | |||
1668 | unsigned long vmm_offset, func_offset, fdesc_offset; | ||
1669 | struct fdesc *p_fdesc; | ||
1670 | |||
1671 | BUG_ON(!module); | ||
1672 | |||
1673 | if (!kvm_vmm_base) { | ||
1674 | printk("kvm: kvm area hasn't been initialized yet!!\n"); | ||
1675 | return -EFAULT; | ||
1676 | } | ||
1677 | |||
1678 | /*Calculate new position of relocated vmm module.*/ | ||
1679 | module_base = (unsigned long)module->module_core; | ||
1680 | vmm_size = module->core_size; | ||
1681 | if (unlikely(vmm_size > KVM_VMM_SIZE)) | ||
1682 | return -EFAULT; | ||
1683 | |||
1684 | memcpy((void *)kvm_vmm_base, (void *)module_base, vmm_size); | ||
1685 | kvm_patch_vmm(vmm_info, module); | ||
1686 | kvm_flush_icache(kvm_vmm_base, vmm_size); | ||
1687 | |||
1688 | /*Recalculate kvm_vmm_info based on new VMM*/ | ||
1689 | vmm_offset = vmm_info->vmm_ivt - module_base; | ||
1690 | kvm_vmm_info->vmm_ivt = KVM_VMM_BASE + vmm_offset; | ||
1691 | printk(KERN_DEBUG"kvm: Relocated VMM's IVT Base Addr:%lx\n", | ||
1692 | kvm_vmm_info->vmm_ivt); | ||
1693 | |||
1694 | fdesc_offset = (unsigned long)vmm_info->vmm_entry - module_base; | ||
1695 | kvm_vmm_info->vmm_entry = (kvm_vmm_entry *)(KVM_VMM_BASE + | ||
1696 | fdesc_offset); | ||
1697 | func_offset = *(unsigned long *)vmm_info->vmm_entry - module_base; | ||
1698 | p_fdesc = (struct fdesc *)(kvm_vmm_base + fdesc_offset); | ||
1699 | p_fdesc->ip = KVM_VMM_BASE + func_offset; | ||
1700 | p_fdesc->gp = KVM_VMM_BASE+(p_fdesc->gp - module_base); | ||
1701 | |||
1702 | printk(KERN_DEBUG"kvm: Relocated VMM's Init Entry Addr:%lx\n", | ||
1703 | KVM_VMM_BASE+func_offset); | ||
1704 | |||
1705 | fdesc_offset = (unsigned long)vmm_info->tramp_entry - module_base; | ||
1706 | kvm_vmm_info->tramp_entry = (kvm_tramp_entry *)(KVM_VMM_BASE + | ||
1707 | fdesc_offset); | ||
1708 | func_offset = *(unsigned long *)vmm_info->tramp_entry - module_base; | ||
1709 | p_fdesc = (struct fdesc *)(kvm_vmm_base + fdesc_offset); | ||
1710 | p_fdesc->ip = KVM_VMM_BASE + func_offset; | ||
1711 | p_fdesc->gp = KVM_VMM_BASE + (p_fdesc->gp - module_base); | ||
1712 | |||
1713 | kvm_vmm_gp = p_fdesc->gp; | ||
1714 | |||
1715 | printk(KERN_DEBUG"kvm: Relocated VMM's Entry IP:%p\n", | ||
1716 | kvm_vmm_info->vmm_entry); | ||
1717 | printk(KERN_DEBUG"kvm: Relocated VMM's Trampoline Entry IP:0x%lx\n", | ||
1718 | KVM_VMM_BASE + func_offset); | ||
1719 | |||
1720 | return 0; | ||
1721 | } | ||
1722 | |||
1723 | int kvm_arch_init(void *opaque) | ||
1724 | { | ||
1725 | int r; | ||
1726 | struct kvm_vmm_info *vmm_info = (struct kvm_vmm_info *)opaque; | ||
1727 | |||
1728 | if (!vti_cpu_has_kvm_support()) { | ||
1729 | printk(KERN_ERR "kvm: No Hardware Virtualization Support!\n"); | ||
1730 | r = -EOPNOTSUPP; | ||
1731 | goto out; | ||
1732 | } | ||
1733 | |||
1734 | if (kvm_vmm_info) { | ||
1735 | printk(KERN_ERR "kvm: Already loaded VMM module!\n"); | ||
1736 | r = -EEXIST; | ||
1737 | goto out; | ||
1738 | } | ||
1739 | |||
1740 | r = -ENOMEM; | ||
1741 | kvm_vmm_info = kzalloc(sizeof(struct kvm_vmm_info), GFP_KERNEL); | ||
1742 | if (!kvm_vmm_info) | ||
1743 | goto out; | ||
1744 | |||
1745 | if (kvm_alloc_vmm_area()) | ||
1746 | goto out_free0; | ||
1747 | |||
1748 | r = kvm_relocate_vmm(vmm_info, vmm_info->module); | ||
1749 | if (r) | ||
1750 | goto out_free1; | ||
1751 | |||
1752 | return 0; | ||
1753 | |||
1754 | out_free1: | ||
1755 | kvm_free_vmm_area(); | ||
1756 | out_free0: | ||
1757 | kfree(kvm_vmm_info); | ||
1758 | out: | ||
1759 | return r; | ||
1760 | } | ||
1761 | |||
1762 | void kvm_arch_exit(void) | ||
1763 | { | ||
1764 | kvm_free_vmm_area(); | ||
1765 | kfree(kvm_vmm_info); | ||
1766 | kvm_vmm_info = NULL; | ||
1767 | } | ||
1768 | |||
1769 | static void kvm_ia64_sync_dirty_log(struct kvm *kvm, | ||
1770 | struct kvm_memory_slot *memslot) | ||
1771 | { | ||
1772 | int i; | ||
1773 | long base; | ||
1774 | unsigned long n; | ||
1775 | unsigned long *dirty_bitmap = (unsigned long *)(kvm->arch.vm_base + | ||
1776 | offsetof(struct kvm_vm_data, kvm_mem_dirty_log)); | ||
1777 | |||
1778 | n = kvm_dirty_bitmap_bytes(memslot); | ||
1779 | base = memslot->base_gfn / BITS_PER_LONG; | ||
1780 | |||
1781 | spin_lock(&kvm->arch.dirty_log_lock); | ||
1782 | for (i = 0; i < n/sizeof(long); ++i) { | ||
1783 | memslot->dirty_bitmap[i] = dirty_bitmap[base + i]; | ||
1784 | dirty_bitmap[base + i] = 0; | ||
1785 | } | ||
1786 | spin_unlock(&kvm->arch.dirty_log_lock); | ||
1787 | } | ||
1788 | |||
1789 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, | ||
1790 | struct kvm_dirty_log *log) | ||
1791 | { | ||
1792 | int r; | ||
1793 | unsigned long n; | ||
1794 | struct kvm_memory_slot *memslot; | ||
1795 | int is_dirty = 0; | ||
1796 | |||
1797 | mutex_lock(&kvm->slots_lock); | ||
1798 | |||
1799 | r = -EINVAL; | ||
1800 | if (log->slot >= KVM_USER_MEM_SLOTS) | ||
1801 | goto out; | ||
1802 | |||
1803 | memslot = id_to_memslot(kvm->memslots, log->slot); | ||
1804 | r = -ENOENT; | ||
1805 | if (!memslot->dirty_bitmap) | ||
1806 | goto out; | ||
1807 | |||
1808 | kvm_ia64_sync_dirty_log(kvm, memslot); | ||
1809 | r = kvm_get_dirty_log(kvm, log, &is_dirty); | ||
1810 | if (r) | ||
1811 | goto out; | ||
1812 | |||
1813 | /* If nothing is dirty, don't bother messing with page tables. */ | ||
1814 | if (is_dirty) { | ||
1815 | kvm_flush_remote_tlbs(kvm); | ||
1816 | n = kvm_dirty_bitmap_bytes(memslot); | ||
1817 | memset(memslot->dirty_bitmap, 0, n); | ||
1818 | } | ||
1819 | r = 0; | ||
1820 | out: | ||
1821 | mutex_unlock(&kvm->slots_lock); | ||
1822 | return r; | ||
1823 | } | ||
1824 | |||
1825 | int kvm_arch_hardware_setup(void) | ||
1826 | { | ||
1827 | return 0; | ||
1828 | } | ||
1829 | |||
1830 | int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq) | ||
1831 | { | ||
1832 | return __apic_accept_irq(vcpu, irq->vector); | ||
1833 | } | ||
1834 | |||
1835 | int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest) | ||
1836 | { | ||
1837 | return apic->vcpu->vcpu_id == dest; | ||
1838 | } | ||
1839 | |||
1840 | int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda) | ||
1841 | { | ||
1842 | return 0; | ||
1843 | } | ||
1844 | |||
1845 | int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2) | ||
1846 | { | ||
1847 | return vcpu1->arch.xtp - vcpu2->arch.xtp; | ||
1848 | } | ||
1849 | |||
1850 | int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source, | ||
1851 | int short_hand, int dest, int dest_mode) | ||
1852 | { | ||
1853 | struct kvm_lapic *target = vcpu->arch.apic; | ||
1854 | return (dest_mode == 0) ? | ||
1855 | kvm_apic_match_physical_addr(target, dest) : | ||
1856 | kvm_apic_match_logical_addr(target, dest); | ||
1857 | } | ||
1858 | |||
1859 | static int find_highest_bits(int *dat) | ||
1860 | { | ||
1861 | u32 bits, bitnum; | ||
1862 | int i; | ||
1863 | |||
1864 | /* loop for all 256 bits */ | ||
1865 | for (i = 7; i >= 0 ; i--) { | ||
1866 | bits = dat[i]; | ||
1867 | if (bits) { | ||
1868 | bitnum = fls(bits); | ||
1869 | return i * 32 + bitnum - 1; | ||
1870 | } | ||
1871 | } | ||
1872 | |||
1873 | return -1; | ||
1874 | } | ||
1875 | |||
1876 | int kvm_highest_pending_irq(struct kvm_vcpu *vcpu) | ||
1877 | { | ||
1878 | struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); | ||
1879 | |||
1880 | if (vpd->irr[0] & (1UL << NMI_VECTOR)) | ||
1881 | return NMI_VECTOR; | ||
1882 | if (vpd->irr[0] & (1UL << ExtINT_VECTOR)) | ||
1883 | return ExtINT_VECTOR; | ||
1884 | |||
1885 | return find_highest_bits((int *)&vpd->irr[0]); | ||
1886 | } | ||
1887 | |||
1888 | int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) | ||
1889 | { | ||
1890 | return vcpu->arch.timer_fired; | ||
1891 | } | ||
1892 | |||
1893 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) | ||
1894 | { | ||
1895 | return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE) || | ||
1896 | (kvm_highest_pending_irq(vcpu) != -1); | ||
1897 | } | ||
1898 | |||
1899 | int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) | ||
1900 | { | ||
1901 | return (!test_and_set_bit(KVM_REQ_KICK, &vcpu->requests)); | ||
1902 | } | ||
1903 | |||
1904 | int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, | ||
1905 | struct kvm_mp_state *mp_state) | ||
1906 | { | ||
1907 | mp_state->mp_state = vcpu->arch.mp_state; | ||
1908 | return 0; | ||
1909 | } | ||
1910 | |||
1911 | static int vcpu_reset(struct kvm_vcpu *vcpu) | ||
1912 | { | ||
1913 | int r; | ||
1914 | long psr; | ||
1915 | local_irq_save(psr); | ||
1916 | r = kvm_insert_vmm_mapping(vcpu); | ||
1917 | local_irq_restore(psr); | ||
1918 | if (r) | ||
1919 | goto fail; | ||
1920 | |||
1921 | vcpu->arch.launched = 0; | ||
1922 | kvm_arch_vcpu_uninit(vcpu); | ||
1923 | r = kvm_arch_vcpu_init(vcpu); | ||
1924 | if (r) | ||
1925 | goto fail; | ||
1926 | |||
1927 | kvm_purge_vmm_mapping(vcpu); | ||
1928 | r = 0; | ||
1929 | fail: | ||
1930 | return r; | ||
1931 | } | ||
1932 | |||
1933 | int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, | ||
1934 | struct kvm_mp_state *mp_state) | ||
1935 | { | ||
1936 | int r = 0; | ||
1937 | |||
1938 | vcpu->arch.mp_state = mp_state->mp_state; | ||
1939 | if (vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED) | ||
1940 | r = vcpu_reset(vcpu); | ||
1941 | return r; | ||
1942 | } | ||
diff --git a/arch/ia64/kvm/kvm_fw.c b/arch/ia64/kvm/kvm_fw.c deleted file mode 100644 index cb548ee9fcae..000000000000 --- a/arch/ia64/kvm/kvm_fw.c +++ /dev/null | |||
@@ -1,674 +0,0 @@ | |||
1 | /* | ||
2 | * PAL/SAL call delegation | ||
3 | * | ||
4 | * Copyright (c) 2004 Li Susie <susie.li@intel.com> | ||
5 | * Copyright (c) 2005 Yu Ke <ke.yu@intel.com> | ||
6 | * Copyright (c) 2007 Xiantao Zhang <xiantao.zhang@intel.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms and conditions of the GNU General Public License, | ||
10 | * version 2, as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
15 | * more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License along with | ||
18 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple | ||
19 | * Place - Suite 330, Boston, MA 02111-1307 USA. | ||
20 | */ | ||
21 | |||
22 | #include <linux/kvm_host.h> | ||
23 | #include <linux/smp.h> | ||
24 | #include <asm/sn/addrs.h> | ||
25 | #include <asm/sn/clksupport.h> | ||
26 | #include <asm/sn/shub_mmr.h> | ||
27 | |||
28 | #include "vti.h" | ||
29 | #include "misc.h" | ||
30 | |||
31 | #include <asm/pal.h> | ||
32 | #include <asm/sal.h> | ||
33 | #include <asm/tlb.h> | ||
34 | |||
35 | /* | ||
36 | * Handy macros to make sure that the PAL return values start out | ||
37 | * as something meaningful. | ||
38 | */ | ||
39 | #define INIT_PAL_STATUS_UNIMPLEMENTED(x) \ | ||
40 | { \ | ||
41 | x.status = PAL_STATUS_UNIMPLEMENTED; \ | ||
42 | x.v0 = 0; \ | ||
43 | x.v1 = 0; \ | ||
44 | x.v2 = 0; \ | ||
45 | } | ||
46 | |||
47 | #define INIT_PAL_STATUS_SUCCESS(x) \ | ||
48 | { \ | ||
49 | x.status = PAL_STATUS_SUCCESS; \ | ||
50 | x.v0 = 0; \ | ||
51 | x.v1 = 0; \ | ||
52 | x.v2 = 0; \ | ||
53 | } | ||
54 | |||
55 | static void kvm_get_pal_call_data(struct kvm_vcpu *vcpu, | ||
56 | u64 *gr28, u64 *gr29, u64 *gr30, u64 *gr31) { | ||
57 | struct exit_ctl_data *p; | ||
58 | |||
59 | if (vcpu) { | ||
60 | p = &vcpu->arch.exit_data; | ||
61 | if (p->exit_reason == EXIT_REASON_PAL_CALL) { | ||
62 | *gr28 = p->u.pal_data.gr28; | ||
63 | *gr29 = p->u.pal_data.gr29; | ||
64 | *gr30 = p->u.pal_data.gr30; | ||
65 | *gr31 = p->u.pal_data.gr31; | ||
66 | return ; | ||
67 | } | ||
68 | } | ||
69 | printk(KERN_DEBUG"Failed to get vcpu pal data!!!\n"); | ||
70 | } | ||
71 | |||
72 | static void set_pal_result(struct kvm_vcpu *vcpu, | ||
73 | struct ia64_pal_retval result) { | ||
74 | |||
75 | struct exit_ctl_data *p; | ||
76 | |||
77 | p = kvm_get_exit_data(vcpu); | ||
78 | if (p->exit_reason == EXIT_REASON_PAL_CALL) { | ||
79 | p->u.pal_data.ret = result; | ||
80 | return ; | ||
81 | } | ||
82 | INIT_PAL_STATUS_UNIMPLEMENTED(p->u.pal_data.ret); | ||
83 | } | ||
84 | |||
85 | static void set_sal_result(struct kvm_vcpu *vcpu, | ||
86 | struct sal_ret_values result) { | ||
87 | struct exit_ctl_data *p; | ||
88 | |||
89 | p = kvm_get_exit_data(vcpu); | ||
90 | if (p->exit_reason == EXIT_REASON_SAL_CALL) { | ||
91 | p->u.sal_data.ret = result; | ||
92 | return ; | ||
93 | } | ||
94 | printk(KERN_WARNING"Failed to set sal result!!\n"); | ||
95 | } | ||
96 | |||
97 | struct cache_flush_args { | ||
98 | u64 cache_type; | ||
99 | u64 operation; | ||
100 | u64 progress; | ||
101 | long status; | ||
102 | }; | ||
103 | |||
104 | cpumask_t cpu_cache_coherent_map; | ||
105 | |||
106 | static void remote_pal_cache_flush(void *data) | ||
107 | { | ||
108 | struct cache_flush_args *args = data; | ||
109 | long status; | ||
110 | u64 progress = args->progress; | ||
111 | |||
112 | status = ia64_pal_cache_flush(args->cache_type, args->operation, | ||
113 | &progress, NULL); | ||
114 | if (status != 0) | ||
115 | args->status = status; | ||
116 | } | ||
117 | |||
118 | static struct ia64_pal_retval pal_cache_flush(struct kvm_vcpu *vcpu) | ||
119 | { | ||
120 | u64 gr28, gr29, gr30, gr31; | ||
121 | struct ia64_pal_retval result = {0, 0, 0, 0}; | ||
122 | struct cache_flush_args args = {0, 0, 0, 0}; | ||
123 | long psr; | ||
124 | |||
125 | gr28 = gr29 = gr30 = gr31 = 0; | ||
126 | kvm_get_pal_call_data(vcpu, &gr28, &gr29, &gr30, &gr31); | ||
127 | |||
128 | if (gr31 != 0) | ||
129 | printk(KERN_ERR"vcpu:%p called cache_flush error!\n", vcpu); | ||
130 | |||
131 | /* Always call Host Pal in int=1 */ | ||
132 | gr30 &= ~PAL_CACHE_FLUSH_CHK_INTRS; | ||
133 | args.cache_type = gr29; | ||
134 | args.operation = gr30; | ||
135 | smp_call_function(remote_pal_cache_flush, | ||
136 | (void *)&args, 1); | ||
137 | if (args.status != 0) | ||
138 | printk(KERN_ERR"pal_cache_flush error!," | ||
139 | "status:0x%lx\n", args.status); | ||
140 | /* | ||
141 | * Call Host PAL cache flush | ||
142 | * Clear psr.ic when call PAL_CACHE_FLUSH | ||
143 | */ | ||
144 | local_irq_save(psr); | ||
145 | result.status = ia64_pal_cache_flush(gr29, gr30, &result.v1, | ||
146 | &result.v0); | ||
147 | local_irq_restore(psr); | ||
148 | if (result.status != 0) | ||
149 | printk(KERN_ERR"vcpu:%p crashed due to cache_flush err:%ld" | ||
150 | "in1:%lx,in2:%lx\n", | ||
151 | vcpu, result.status, gr29, gr30); | ||
152 | |||
153 | #if 0 | ||
154 | if (gr29 == PAL_CACHE_TYPE_COHERENT) { | ||
155 | cpus_setall(vcpu->arch.cache_coherent_map); | ||
156 | cpu_clear(vcpu->cpu, vcpu->arch.cache_coherent_map); | ||
157 | cpus_setall(cpu_cache_coherent_map); | ||
158 | cpu_clear(vcpu->cpu, cpu_cache_coherent_map); | ||
159 | } | ||
160 | #endif | ||
161 | return result; | ||
162 | } | ||
163 | |||
164 | struct ia64_pal_retval pal_cache_summary(struct kvm_vcpu *vcpu) | ||
165 | { | ||
166 | |||
167 | struct ia64_pal_retval result; | ||
168 | |||
169 | PAL_CALL(result, PAL_CACHE_SUMMARY, 0, 0, 0); | ||
170 | return result; | ||
171 | } | ||
172 | |||
173 | static struct ia64_pal_retval pal_freq_base(struct kvm_vcpu *vcpu) | ||
174 | { | ||
175 | |||
176 | struct ia64_pal_retval result; | ||
177 | |||
178 | PAL_CALL(result, PAL_FREQ_BASE, 0, 0, 0); | ||
179 | |||
180 | /* | ||
181 | * PAL_FREQ_BASE may not be implemented in some platforms, | ||
182 | * call SAL instead. | ||
183 | */ | ||
184 | if (result.v0 == 0) { | ||
185 | result.status = ia64_sal_freq_base(SAL_FREQ_BASE_PLATFORM, | ||
186 | &result.v0, | ||
187 | &result.v1); | ||
188 | result.v2 = 0; | ||
189 | } | ||
190 | |||
191 | return result; | ||
192 | } | ||
193 | |||
194 | /* | ||
195 | * On the SGI SN2, the ITC isn't stable. Emulation backed by the SN2 | ||
196 | * RTC is used instead. This function patches the ratios from SAL | ||
197 | * to match the RTC before providing them to the guest. | ||
198 | */ | ||
199 | static void sn2_patch_itc_freq_ratios(struct ia64_pal_retval *result) | ||
200 | { | ||
201 | struct pal_freq_ratio *ratio; | ||
202 | unsigned long sal_freq, sal_drift, factor; | ||
203 | |||
204 | result->status = ia64_sal_freq_base(SAL_FREQ_BASE_PLATFORM, | ||
205 | &sal_freq, &sal_drift); | ||
206 | ratio = (struct pal_freq_ratio *)&result->v2; | ||
207 | factor = ((sal_freq * 3) + (sn_rtc_cycles_per_second / 2)) / | ||
208 | sn_rtc_cycles_per_second; | ||
209 | |||
210 | ratio->num = 3; | ||
211 | ratio->den = factor; | ||
212 | } | ||
213 | |||
214 | static struct ia64_pal_retval pal_freq_ratios(struct kvm_vcpu *vcpu) | ||
215 | { | ||
216 | struct ia64_pal_retval result; | ||
217 | |||
218 | PAL_CALL(result, PAL_FREQ_RATIOS, 0, 0, 0); | ||
219 | |||
220 | if (vcpu->kvm->arch.is_sn2) | ||
221 | sn2_patch_itc_freq_ratios(&result); | ||
222 | |||
223 | return result; | ||
224 | } | ||
225 | |||
226 | static struct ia64_pal_retval pal_logical_to_physica(struct kvm_vcpu *vcpu) | ||
227 | { | ||
228 | struct ia64_pal_retval result; | ||
229 | |||
230 | INIT_PAL_STATUS_UNIMPLEMENTED(result); | ||
231 | return result; | ||
232 | } | ||
233 | |||
234 | static struct ia64_pal_retval pal_platform_addr(struct kvm_vcpu *vcpu) | ||
235 | { | ||
236 | |||
237 | struct ia64_pal_retval result; | ||
238 | |||
239 | INIT_PAL_STATUS_SUCCESS(result); | ||
240 | return result; | ||
241 | } | ||
242 | |||
243 | static struct ia64_pal_retval pal_proc_get_features(struct kvm_vcpu *vcpu) | ||
244 | { | ||
245 | |||
246 | struct ia64_pal_retval result = {0, 0, 0, 0}; | ||
247 | long in0, in1, in2, in3; | ||
248 | |||
249 | kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3); | ||
250 | result.status = ia64_pal_proc_get_features(&result.v0, &result.v1, | ||
251 | &result.v2, in2); | ||
252 | |||
253 | return result; | ||
254 | } | ||
255 | |||
256 | static struct ia64_pal_retval pal_register_info(struct kvm_vcpu *vcpu) | ||
257 | { | ||
258 | |||
259 | struct ia64_pal_retval result = {0, 0, 0, 0}; | ||
260 | long in0, in1, in2, in3; | ||
261 | |||
262 | kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3); | ||
263 | result.status = ia64_pal_register_info(in1, &result.v1, &result.v2); | ||
264 | |||
265 | return result; | ||
266 | } | ||
267 | |||
268 | static struct ia64_pal_retval pal_cache_info(struct kvm_vcpu *vcpu) | ||
269 | { | ||
270 | |||
271 | pal_cache_config_info_t ci; | ||
272 | long status; | ||
273 | unsigned long in0, in1, in2, in3, r9, r10; | ||
274 | |||
275 | kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3); | ||
276 | status = ia64_pal_cache_config_info(in1, in2, &ci); | ||
277 | r9 = ci.pcci_info_1.pcci1_data; | ||
278 | r10 = ci.pcci_info_2.pcci2_data; | ||
279 | return ((struct ia64_pal_retval){status, r9, r10, 0}); | ||
280 | } | ||
281 | |||
282 | #define GUEST_IMPL_VA_MSB 59 | ||
283 | #define GUEST_RID_BITS 18 | ||
284 | |||
285 | static struct ia64_pal_retval pal_vm_summary(struct kvm_vcpu *vcpu) | ||
286 | { | ||
287 | |||
288 | pal_vm_info_1_u_t vminfo1; | ||
289 | pal_vm_info_2_u_t vminfo2; | ||
290 | struct ia64_pal_retval result; | ||
291 | |||
292 | PAL_CALL(result, PAL_VM_SUMMARY, 0, 0, 0); | ||
293 | if (!result.status) { | ||
294 | vminfo1.pvi1_val = result.v0; | ||
295 | vminfo1.pal_vm_info_1_s.max_itr_entry = 8; | ||
296 | vminfo1.pal_vm_info_1_s.max_dtr_entry = 8; | ||
297 | result.v0 = vminfo1.pvi1_val; | ||
298 | vminfo2.pal_vm_info_2_s.impl_va_msb = GUEST_IMPL_VA_MSB; | ||
299 | vminfo2.pal_vm_info_2_s.rid_size = GUEST_RID_BITS; | ||
300 | result.v1 = vminfo2.pvi2_val; | ||
301 | } | ||
302 | |||
303 | return result; | ||
304 | } | ||
305 | |||
306 | static struct ia64_pal_retval pal_vm_info(struct kvm_vcpu *vcpu) | ||
307 | { | ||
308 | struct ia64_pal_retval result; | ||
309 | unsigned long in0, in1, in2, in3; | ||
310 | |||
311 | kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3); | ||
312 | |||
313 | result.status = ia64_pal_vm_info(in1, in2, | ||
314 | (pal_tc_info_u_t *)&result.v1, &result.v2); | ||
315 | |||
316 | return result; | ||
317 | } | ||
318 | |||
319 | static u64 kvm_get_pal_call_index(struct kvm_vcpu *vcpu) | ||
320 | { | ||
321 | u64 index = 0; | ||
322 | struct exit_ctl_data *p; | ||
323 | |||
324 | p = kvm_get_exit_data(vcpu); | ||
325 | if (p->exit_reason == EXIT_REASON_PAL_CALL) | ||
326 | index = p->u.pal_data.gr28; | ||
327 | |||
328 | return index; | ||
329 | } | ||
330 | |||
331 | static void prepare_for_halt(struct kvm_vcpu *vcpu) | ||
332 | { | ||
333 | vcpu->arch.timer_pending = 1; | ||
334 | vcpu->arch.timer_fired = 0; | ||
335 | } | ||
336 | |||
337 | static struct ia64_pal_retval pal_perf_mon_info(struct kvm_vcpu *vcpu) | ||
338 | { | ||
339 | long status; | ||
340 | unsigned long in0, in1, in2, in3, r9; | ||
341 | unsigned long pm_buffer[16]; | ||
342 | |||
343 | kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3); | ||
344 | status = ia64_pal_perf_mon_info(pm_buffer, | ||
345 | (pal_perf_mon_info_u_t *) &r9); | ||
346 | if (status != 0) { | ||
347 | printk(KERN_DEBUG"PAL_PERF_MON_INFO fails ret=%ld\n", status); | ||
348 | } else { | ||
349 | if (in1) | ||
350 | memcpy((void *)in1, pm_buffer, sizeof(pm_buffer)); | ||
351 | else { | ||
352 | status = PAL_STATUS_EINVAL; | ||
353 | printk(KERN_WARNING"Invalid parameters " | ||
354 | "for PAL call:0x%lx!\n", in0); | ||
355 | } | ||
356 | } | ||
357 | return (struct ia64_pal_retval){status, r9, 0, 0}; | ||
358 | } | ||
359 | |||
360 | static struct ia64_pal_retval pal_halt_info(struct kvm_vcpu *vcpu) | ||
361 | { | ||
362 | unsigned long in0, in1, in2, in3; | ||
363 | long status; | ||
364 | unsigned long res = 1000UL | (1000UL << 16) | (10UL << 32) | ||
365 | | (1UL << 61) | (1UL << 60); | ||
366 | |||
367 | kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3); | ||
368 | if (in1) { | ||
369 | memcpy((void *)in1, &res, sizeof(res)); | ||
370 | status = 0; | ||
371 | } else{ | ||
372 | status = PAL_STATUS_EINVAL; | ||
373 | printk(KERN_WARNING"Invalid parameters " | ||
374 | "for PAL call:0x%lx!\n", in0); | ||
375 | } | ||
376 | |||
377 | return (struct ia64_pal_retval){status, 0, 0, 0}; | ||
378 | } | ||
379 | |||
380 | static struct ia64_pal_retval pal_mem_attrib(struct kvm_vcpu *vcpu) | ||
381 | { | ||
382 | unsigned long r9; | ||
383 | long status; | ||
384 | |||
385 | status = ia64_pal_mem_attrib(&r9); | ||
386 | |||
387 | return (struct ia64_pal_retval){status, r9, 0, 0}; | ||
388 | } | ||
389 | |||
390 | static void remote_pal_prefetch_visibility(void *v) | ||
391 | { | ||
392 | s64 trans_type = (s64)v; | ||
393 | ia64_pal_prefetch_visibility(trans_type); | ||
394 | } | ||
395 | |||
396 | static struct ia64_pal_retval pal_prefetch_visibility(struct kvm_vcpu *vcpu) | ||
397 | { | ||
398 | struct ia64_pal_retval result = {0, 0, 0, 0}; | ||
399 | unsigned long in0, in1, in2, in3; | ||
400 | kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3); | ||
401 | result.status = ia64_pal_prefetch_visibility(in1); | ||
402 | if (result.status == 0) { | ||
403 | /* Must be performed on all remote processors | ||
404 | in the coherence domain. */ | ||
405 | smp_call_function(remote_pal_prefetch_visibility, | ||
406 | (void *)in1, 1); | ||
407 | /* Unnecessary on remote processor for other vcpus!*/ | ||
408 | result.status = 1; | ||
409 | } | ||
410 | return result; | ||
411 | } | ||
412 | |||
413 | static void remote_pal_mc_drain(void *v) | ||
414 | { | ||
415 | ia64_pal_mc_drain(); | ||
416 | } | ||
417 | |||
418 | static struct ia64_pal_retval pal_get_brand_info(struct kvm_vcpu *vcpu) | ||
419 | { | ||
420 | struct ia64_pal_retval result = {0, 0, 0, 0}; | ||
421 | unsigned long in0, in1, in2, in3; | ||
422 | |||
423 | kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3); | ||
424 | |||
425 | if (in1 == 0 && in2) { | ||
426 | char brand_info[128]; | ||
427 | result.status = ia64_pal_get_brand_info(brand_info); | ||
428 | if (result.status == PAL_STATUS_SUCCESS) | ||
429 | memcpy((void *)in2, brand_info, 128); | ||
430 | } else { | ||
431 | result.status = PAL_STATUS_REQUIRES_MEMORY; | ||
432 | printk(KERN_WARNING"Invalid parameters for " | ||
433 | "PAL call:0x%lx!\n", in0); | ||
434 | } | ||
435 | |||
436 | return result; | ||
437 | } | ||
438 | |||
439 | int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run) | ||
440 | { | ||
441 | |||
442 | u64 gr28; | ||
443 | struct ia64_pal_retval result; | ||
444 | int ret = 1; | ||
445 | |||
446 | gr28 = kvm_get_pal_call_index(vcpu); | ||
447 | switch (gr28) { | ||
448 | case PAL_CACHE_FLUSH: | ||
449 | result = pal_cache_flush(vcpu); | ||
450 | break; | ||
451 | case PAL_MEM_ATTRIB: | ||
452 | result = pal_mem_attrib(vcpu); | ||
453 | break; | ||
454 | case PAL_CACHE_SUMMARY: | ||
455 | result = pal_cache_summary(vcpu); | ||
456 | break; | ||
457 | case PAL_PERF_MON_INFO: | ||
458 | result = pal_perf_mon_info(vcpu); | ||
459 | break; | ||
460 | case PAL_HALT_INFO: | ||
461 | result = pal_halt_info(vcpu); | ||
462 | break; | ||
463 | case PAL_HALT_LIGHT: | ||
464 | { | ||
465 | INIT_PAL_STATUS_SUCCESS(result); | ||
466 | prepare_for_halt(vcpu); | ||
467 | if (kvm_highest_pending_irq(vcpu) == -1) | ||
468 | ret = kvm_emulate_halt(vcpu); | ||
469 | } | ||
470 | break; | ||
471 | |||
472 | case PAL_PREFETCH_VISIBILITY: | ||
473 | result = pal_prefetch_visibility(vcpu); | ||
474 | break; | ||
475 | case PAL_MC_DRAIN: | ||
476 | result.status = ia64_pal_mc_drain(); | ||
477 | /* FIXME: All vcpus likely call PAL_MC_DRAIN. | ||
478 | That causes the congestion. */ | ||
479 | smp_call_function(remote_pal_mc_drain, NULL, 1); | ||
480 | break; | ||
481 | |||
482 | case PAL_FREQ_RATIOS: | ||
483 | result = pal_freq_ratios(vcpu); | ||
484 | break; | ||
485 | |||
486 | case PAL_FREQ_BASE: | ||
487 | result = pal_freq_base(vcpu); | ||
488 | break; | ||
489 | |||
490 | case PAL_LOGICAL_TO_PHYSICAL : | ||
491 | result = pal_logical_to_physica(vcpu); | ||
492 | break; | ||
493 | |||
494 | case PAL_VM_SUMMARY : | ||
495 | result = pal_vm_summary(vcpu); | ||
496 | break; | ||
497 | |||
498 | case PAL_VM_INFO : | ||
499 | result = pal_vm_info(vcpu); | ||
500 | break; | ||
501 | case PAL_PLATFORM_ADDR : | ||
502 | result = pal_platform_addr(vcpu); | ||
503 | break; | ||
504 | case PAL_CACHE_INFO: | ||
505 | result = pal_cache_info(vcpu); | ||
506 | break; | ||
507 | case PAL_PTCE_INFO: | ||
508 | INIT_PAL_STATUS_SUCCESS(result); | ||
509 | result.v1 = (1L << 32) | 1L; | ||
510 | break; | ||
511 | case PAL_REGISTER_INFO: | ||
512 | result = pal_register_info(vcpu); | ||
513 | break; | ||
514 | case PAL_VM_PAGE_SIZE: | ||
515 | result.status = ia64_pal_vm_page_size(&result.v0, | ||
516 | &result.v1); | ||
517 | break; | ||
518 | case PAL_RSE_INFO: | ||
519 | result.status = ia64_pal_rse_info(&result.v0, | ||
520 | (pal_hints_u_t *)&result.v1); | ||
521 | break; | ||
522 | case PAL_PROC_GET_FEATURES: | ||
523 | result = pal_proc_get_features(vcpu); | ||
524 | break; | ||
525 | case PAL_DEBUG_INFO: | ||
526 | result.status = ia64_pal_debug_info(&result.v0, | ||
527 | &result.v1); | ||
528 | break; | ||
529 | case PAL_VERSION: | ||
530 | result.status = ia64_pal_version( | ||
531 | (pal_version_u_t *)&result.v0, | ||
532 | (pal_version_u_t *)&result.v1); | ||
533 | break; | ||
534 | case PAL_FIXED_ADDR: | ||
535 | result.status = PAL_STATUS_SUCCESS; | ||
536 | result.v0 = vcpu->vcpu_id; | ||
537 | break; | ||
538 | case PAL_BRAND_INFO: | ||
539 | result = pal_get_brand_info(vcpu); | ||
540 | break; | ||
541 | case PAL_GET_PSTATE: | ||
542 | case PAL_CACHE_SHARED_INFO: | ||
543 | INIT_PAL_STATUS_UNIMPLEMENTED(result); | ||
544 | break; | ||
545 | default: | ||
546 | INIT_PAL_STATUS_UNIMPLEMENTED(result); | ||
547 | printk(KERN_WARNING"kvm: Unsupported pal call," | ||
548 | " index:0x%lx\n", gr28); | ||
549 | } | ||
550 | set_pal_result(vcpu, result); | ||
551 | return ret; | ||
552 | } | ||
553 | |||
554 | static struct sal_ret_values sal_emulator(struct kvm *kvm, | ||
555 | long index, unsigned long in1, | ||
556 | unsigned long in2, unsigned long in3, | ||
557 | unsigned long in4, unsigned long in5, | ||
558 | unsigned long in6, unsigned long in7) | ||
559 | { | ||
560 | unsigned long r9 = 0; | ||
561 | unsigned long r10 = 0; | ||
562 | long r11 = 0; | ||
563 | long status; | ||
564 | |||
565 | status = 0; | ||
566 | switch (index) { | ||
567 | case SAL_FREQ_BASE: | ||
568 | status = ia64_sal_freq_base(in1, &r9, &r10); | ||
569 | break; | ||
570 | case SAL_PCI_CONFIG_READ: | ||
571 | printk(KERN_WARNING"kvm: Not allowed to call here!" | ||
572 | " SAL_PCI_CONFIG_READ\n"); | ||
573 | break; | ||
574 | case SAL_PCI_CONFIG_WRITE: | ||
575 | printk(KERN_WARNING"kvm: Not allowed to call here!" | ||
576 | " SAL_PCI_CONFIG_WRITE\n"); | ||
577 | break; | ||
578 | case SAL_SET_VECTORS: | ||
579 | if (in1 == SAL_VECTOR_OS_BOOT_RENDEZ) { | ||
580 | if (in4 != 0 || in5 != 0 || in6 != 0 || in7 != 0) { | ||
581 | status = -2; | ||
582 | } else { | ||
583 | kvm->arch.rdv_sal_data.boot_ip = in2; | ||
584 | kvm->arch.rdv_sal_data.boot_gp = in3; | ||
585 | } | ||
586 | printk("Rendvous called! iip:%lx\n\n", in2); | ||
587 | } else | ||
588 | printk(KERN_WARNING"kvm: CALLED SAL_SET_VECTORS %lu." | ||
589 | "ignored...\n", in1); | ||
590 | break; | ||
591 | case SAL_GET_STATE_INFO: | ||
592 | /* No more info. */ | ||
593 | status = -5; | ||
594 | r9 = 0; | ||
595 | break; | ||
596 | case SAL_GET_STATE_INFO_SIZE: | ||
597 | /* Return a dummy size. */ | ||
598 | status = 0; | ||
599 | r9 = 128; | ||
600 | break; | ||
601 | case SAL_CLEAR_STATE_INFO: | ||
602 | /* Noop. */ | ||
603 | break; | ||
604 | case SAL_MC_RENDEZ: | ||
605 | printk(KERN_WARNING | ||
606 | "kvm: called SAL_MC_RENDEZ. ignored...\n"); | ||
607 | break; | ||
608 | case SAL_MC_SET_PARAMS: | ||
609 | printk(KERN_WARNING | ||
610 | "kvm: called SAL_MC_SET_PARAMS.ignored!\n"); | ||
611 | break; | ||
612 | case SAL_CACHE_FLUSH: | ||
613 | if (1) { | ||
614 | /*Flush using SAL. | ||
615 | This method is faster but has a side | ||
616 | effect on other vcpu running on | ||
617 | this cpu. */ | ||
618 | status = ia64_sal_cache_flush(in1); | ||
619 | } else { | ||
620 | /*Maybe need to implement the method | ||
621 | without side effect!*/ | ||
622 | status = 0; | ||
623 | } | ||
624 | break; | ||
625 | case SAL_CACHE_INIT: | ||
626 | printk(KERN_WARNING | ||
627 | "kvm: called SAL_CACHE_INIT. ignored...\n"); | ||
628 | break; | ||
629 | case SAL_UPDATE_PAL: | ||
630 | printk(KERN_WARNING | ||
631 | "kvm: CALLED SAL_UPDATE_PAL. ignored...\n"); | ||
632 | break; | ||
633 | default: | ||
634 | printk(KERN_WARNING"kvm: called SAL_CALL with unknown index." | ||
635 | " index:%ld\n", index); | ||
636 | status = -1; | ||
637 | break; | ||
638 | } | ||
639 | return ((struct sal_ret_values) {status, r9, r10, r11}); | ||
640 | } | ||
641 | |||
642 | static void kvm_get_sal_call_data(struct kvm_vcpu *vcpu, u64 *in0, u64 *in1, | ||
643 | u64 *in2, u64 *in3, u64 *in4, u64 *in5, u64 *in6, u64 *in7){ | ||
644 | |||
645 | struct exit_ctl_data *p; | ||
646 | |||
647 | p = kvm_get_exit_data(vcpu); | ||
648 | |||
649 | if (p->exit_reason == EXIT_REASON_SAL_CALL) { | ||
650 | *in0 = p->u.sal_data.in0; | ||
651 | *in1 = p->u.sal_data.in1; | ||
652 | *in2 = p->u.sal_data.in2; | ||
653 | *in3 = p->u.sal_data.in3; | ||
654 | *in4 = p->u.sal_data.in4; | ||
655 | *in5 = p->u.sal_data.in5; | ||
656 | *in6 = p->u.sal_data.in6; | ||
657 | *in7 = p->u.sal_data.in7; | ||
658 | return ; | ||
659 | } | ||
660 | *in0 = 0; | ||
661 | } | ||
662 | |||
663 | void kvm_sal_emul(struct kvm_vcpu *vcpu) | ||
664 | { | ||
665 | |||
666 | struct sal_ret_values result; | ||
667 | u64 index, in1, in2, in3, in4, in5, in6, in7; | ||
668 | |||
669 | kvm_get_sal_call_data(vcpu, &index, &in1, &in2, | ||
670 | &in3, &in4, &in5, &in6, &in7); | ||
671 | result = sal_emulator(vcpu->kvm, index, in1, in2, in3, | ||
672 | in4, in5, in6, in7); | ||
673 | set_sal_result(vcpu, result); | ||
674 | } | ||
diff --git a/arch/ia64/kvm/kvm_lib.c b/arch/ia64/kvm/kvm_lib.c deleted file mode 100644 index f1268b8e6f9e..000000000000 --- a/arch/ia64/kvm/kvm_lib.c +++ /dev/null | |||
@@ -1,21 +0,0 @@ | |||
1 | /* | ||
2 | * kvm_lib.c: Compile some libraries for kvm-intel module. | ||
3 | * | ||
4 | * Just include kernel's library, and disable symbols export. | ||
5 | * Copyright (C) 2008, Intel Corporation. | ||
6 | * Xiantao Zhang (xiantao.zhang@intel.com) | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | * | ||
12 | */ | ||
13 | #undef CONFIG_MODULES | ||
14 | #include <linux/module.h> | ||
15 | #undef CONFIG_KALLSYMS | ||
16 | #undef EXPORT_SYMBOL | ||
17 | #undef EXPORT_SYMBOL_GPL | ||
18 | #define EXPORT_SYMBOL(sym) | ||
19 | #define EXPORT_SYMBOL_GPL(sym) | ||
20 | #include "../../../lib/vsprintf.c" | ||
21 | #include "../../../lib/ctype.c" | ||
diff --git a/arch/ia64/kvm/kvm_minstate.h b/arch/ia64/kvm/kvm_minstate.h deleted file mode 100644 index b2bcaa2787aa..000000000000 --- a/arch/ia64/kvm/kvm_minstate.h +++ /dev/null | |||
@@ -1,266 +0,0 @@ | |||
1 | /* | ||
2 | * kvm_minstate.h: min save macros | ||
3 | * Copyright (c) 2007, Intel Corporation. | ||
4 | * | ||
5 | * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com) | ||
6 | * Xiantao Zhang (xiantao.zhang@intel.com) | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms and conditions of the GNU General Public License, | ||
10 | * version 2, as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
15 | * more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License along with | ||
18 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple | ||
19 | * Place - Suite 330, Boston, MA 02111-1307 USA. | ||
20 | * | ||
21 | */ | ||
22 | |||
23 | |||
24 | #include <asm/asmmacro.h> | ||
25 | #include <asm/types.h> | ||
26 | #include <asm/kregs.h> | ||
27 | #include <asm/kvm_host.h> | ||
28 | |||
29 | #include "asm-offsets.h" | ||
30 | |||
31 | #define KVM_MINSTATE_START_SAVE_MIN \ | ||
32 | mov ar.rsc = 0;/* set enforced lazy mode, pl 0, little-endian, loadrs=0 */\ | ||
33 | ;; \ | ||
34 | mov.m r28 = ar.rnat; \ | ||
35 | addl r22 = VMM_RBS_OFFSET,r1; /* compute base of RBS */ \ | ||
36 | ;; \ | ||
37 | lfetch.fault.excl.nt1 [r22]; \ | ||
38 | addl r1 = KVM_STK_OFFSET-VMM_PT_REGS_SIZE, r1; \ | ||
39 | mov r23 = ar.bspstore; /* save ar.bspstore */ \ | ||
40 | ;; \ | ||
41 | mov ar.bspstore = r22; /* switch to kernel RBS */\ | ||
42 | ;; \ | ||
43 | mov r18 = ar.bsp; \ | ||
44 | mov ar.rsc = 0x3; /* set eager mode, pl 0, little-endian, loadrs=0 */ | ||
45 | |||
46 | |||
47 | |||
48 | #define KVM_MINSTATE_END_SAVE_MIN \ | ||
49 | bsw.1; /* switch back to bank 1 (must be last in insn group) */\ | ||
50 | ;; | ||
51 | |||
52 | |||
53 | #define PAL_VSA_SYNC_READ \ | ||
54 | /* begin to call pal vps sync_read */ \ | ||
55 | {.mii; \ | ||
56 | add r25 = VMM_VPD_BASE_OFFSET, r21; \ | ||
57 | nop 0x0; \ | ||
58 | mov r24=ip; \ | ||
59 | ;; \ | ||
60 | } \ | ||
61 | {.mmb \ | ||
62 | add r24=0x20, r24; \ | ||
63 | ld8 r25 = [r25]; /* read vpd base */ \ | ||
64 | br.cond.sptk kvm_vps_sync_read; /*call the service*/ \ | ||
65 | ;; \ | ||
66 | }; \ | ||
67 | |||
68 | |||
69 | #define KVM_MINSTATE_GET_CURRENT(reg) mov reg=r21 | ||
70 | |||
71 | /* | ||
72 | * KVM_DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves | ||
73 | * the minimum state necessary that allows us to turn psr.ic back | ||
74 | * on. | ||
75 | * | ||
76 | * Assumed state upon entry: | ||
77 | * psr.ic: off | ||
78 | * r31: contains saved predicates (pr) | ||
79 | * | ||
80 | * Upon exit, the state is as follows: | ||
81 | * psr.ic: off | ||
82 | * r2 = points to &pt_regs.r16 | ||
83 | * r8 = contents of ar.ccv | ||
84 | * r9 = contents of ar.csd | ||
85 | * r10 = contents of ar.ssd | ||
86 | * r11 = FPSR_DEFAULT | ||
87 | * r12 = kernel sp (kernel virtual address) | ||
88 | * r13 = points to current task_struct (kernel virtual address) | ||
89 | * p15 = TRUE if psr.i is set in cr.ipsr | ||
90 | * predicate registers (other than p2, p3, and p15), b6, r3, r14, r15: | ||
91 | * preserved | ||
92 | * | ||
93 | * Note that psr.ic is NOT turned on by this macro. This is so that | ||
94 | * we can pass interruption state as arguments to a handler. | ||
95 | */ | ||
96 | |||
97 | |||
98 | #define PT(f) (VMM_PT_REGS_##f##_OFFSET) | ||
99 | |||
100 | #define KVM_DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA) \ | ||
101 | KVM_MINSTATE_GET_CURRENT(r16); /* M (or M;;I) */ \ | ||
102 | mov r27 = ar.rsc; /* M */ \ | ||
103 | mov r20 = r1; /* A */ \ | ||
104 | mov r25 = ar.unat; /* M */ \ | ||
105 | mov r29 = cr.ipsr; /* M */ \ | ||
106 | mov r26 = ar.pfs; /* I */ \ | ||
107 | mov r18 = cr.isr; \ | ||
108 | COVER; /* B;; (or nothing) */ \ | ||
109 | ;; \ | ||
110 | tbit.z p0,p15 = r29,IA64_PSR_I_BIT; \ | ||
111 | mov r1 = r16; \ | ||
112 | /* mov r21=r16; */ \ | ||
113 | /* switch from user to kernel RBS: */ \ | ||
114 | ;; \ | ||
115 | invala; /* M */ \ | ||
116 | SAVE_IFS; \ | ||
117 | ;; \ | ||
118 | KVM_MINSTATE_START_SAVE_MIN \ | ||
119 | adds r17 = 2*L1_CACHE_BYTES,r1;/* cache-line size */ \ | ||
120 | adds r16 = PT(CR_IPSR),r1; \ | ||
121 | ;; \ | ||
122 | lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES; \ | ||
123 | st8 [r16] = r29; /* save cr.ipsr */ \ | ||
124 | ;; \ | ||
125 | lfetch.fault.excl.nt1 [r17]; \ | ||
126 | tbit.nz p15,p0 = r29,IA64_PSR_I_BIT; \ | ||
127 | mov r29 = b0 \ | ||
128 | ;; \ | ||
129 | adds r16 = PT(R8),r1; /* initialize first base pointer */\ | ||
130 | adds r17 = PT(R9),r1; /* initialize second base pointer */\ | ||
131 | ;; \ | ||
132 | .mem.offset 0,0; st8.spill [r16] = r8,16; \ | ||
133 | .mem.offset 8,0; st8.spill [r17] = r9,16; \ | ||
134 | ;; \ | ||
135 | .mem.offset 0,0; st8.spill [r16] = r10,24; \ | ||
136 | .mem.offset 8,0; st8.spill [r17] = r11,24; \ | ||
137 | ;; \ | ||
138 | mov r9 = cr.iip; /* M */ \ | ||
139 | mov r10 = ar.fpsr; /* M */ \ | ||
140 | ;; \ | ||
141 | st8 [r16] = r9,16; /* save cr.iip */ \ | ||
142 | st8 [r17] = r30,16; /* save cr.ifs */ \ | ||
143 | sub r18 = r18,r22; /* r18=RSE.ndirty*8 */ \ | ||
144 | ;; \ | ||
145 | st8 [r16] = r25,16; /* save ar.unat */ \ | ||
146 | st8 [r17] = r26,16; /* save ar.pfs */ \ | ||
147 | shl r18 = r18,16; /* calu ar.rsc used for "loadrs" */\ | ||
148 | ;; \ | ||
149 | st8 [r16] = r27,16; /* save ar.rsc */ \ | ||
150 | st8 [r17] = r28,16; /* save ar.rnat */ \ | ||
151 | ;; /* avoid RAW on r16 & r17 */ \ | ||
152 | st8 [r16] = r23,16; /* save ar.bspstore */ \ | ||
153 | st8 [r17] = r31,16; /* save predicates */ \ | ||
154 | ;; \ | ||
155 | st8 [r16] = r29,16; /* save b0 */ \ | ||
156 | st8 [r17] = r18,16; /* save ar.rsc value for "loadrs" */\ | ||
157 | ;; \ | ||
158 | .mem.offset 0,0; st8.spill [r16] = r20,16;/* save original r1 */ \ | ||
159 | .mem.offset 8,0; st8.spill [r17] = r12,16; \ | ||
160 | adds r12 = -16,r1; /* switch to kernel memory stack */ \ | ||
161 | ;; \ | ||
162 | .mem.offset 0,0; st8.spill [r16] = r13,16; \ | ||
163 | .mem.offset 8,0; st8.spill [r17] = r10,16; /* save ar.fpsr */\ | ||
164 | mov r13 = r21; /* establish `current' */ \ | ||
165 | ;; \ | ||
166 | .mem.offset 0,0; st8.spill [r16] = r15,16; \ | ||
167 | .mem.offset 8,0; st8.spill [r17] = r14,16; \ | ||
168 | ;; \ | ||
169 | .mem.offset 0,0; st8.spill [r16] = r2,16; \ | ||
170 | .mem.offset 8,0; st8.spill [r17] = r3,16; \ | ||
171 | adds r2 = VMM_PT_REGS_R16_OFFSET,r1; \ | ||
172 | ;; \ | ||
173 | adds r16 = VMM_VCPU_IIPA_OFFSET,r13; \ | ||
174 | adds r17 = VMM_VCPU_ISR_OFFSET,r13; \ | ||
175 | mov r26 = cr.iipa; \ | ||
176 | mov r27 = cr.isr; \ | ||
177 | ;; \ | ||
178 | st8 [r16] = r26; \ | ||
179 | st8 [r17] = r27; \ | ||
180 | ;; \ | ||
181 | EXTRA; \ | ||
182 | mov r8 = ar.ccv; \ | ||
183 | mov r9 = ar.csd; \ | ||
184 | mov r10 = ar.ssd; \ | ||
185 | movl r11 = FPSR_DEFAULT; /* L-unit */ \ | ||
186 | adds r17 = VMM_VCPU_GP_OFFSET,r13; \ | ||
187 | ;; \ | ||
188 | ld8 r1 = [r17];/* establish kernel global pointer */ \ | ||
189 | ;; \ | ||
190 | PAL_VSA_SYNC_READ \ | ||
191 | KVM_MINSTATE_END_SAVE_MIN | ||
192 | |||
193 | /* | ||
194 | * SAVE_REST saves the remainder of pt_regs (with psr.ic on). | ||
195 | * | ||
196 | * Assumed state upon entry: | ||
197 | * psr.ic: on | ||
198 | * r2: points to &pt_regs.f6 | ||
199 | * r3: points to &pt_regs.f7 | ||
200 | * r8: contents of ar.ccv | ||
201 | * r9: contents of ar.csd | ||
202 | * r10: contents of ar.ssd | ||
203 | * r11: FPSR_DEFAULT | ||
204 | * | ||
205 | * Registers r14 and r15 are guaranteed not to be touched by SAVE_REST. | ||
206 | */ | ||
207 | #define KVM_SAVE_REST \ | ||
208 | .mem.offset 0,0; st8.spill [r2] = r16,16; \ | ||
209 | .mem.offset 8,0; st8.spill [r3] = r17,16; \ | ||
210 | ;; \ | ||
211 | .mem.offset 0,0; st8.spill [r2] = r18,16; \ | ||
212 | .mem.offset 8,0; st8.spill [r3] = r19,16; \ | ||
213 | ;; \ | ||
214 | .mem.offset 0,0; st8.spill [r2] = r20,16; \ | ||
215 | .mem.offset 8,0; st8.spill [r3] = r21,16; \ | ||
216 | mov r18=b6; \ | ||
217 | ;; \ | ||
218 | .mem.offset 0,0; st8.spill [r2] = r22,16; \ | ||
219 | .mem.offset 8,0; st8.spill [r3] = r23,16; \ | ||
220 | mov r19 = b7; \ | ||
221 | ;; \ | ||
222 | .mem.offset 0,0; st8.spill [r2] = r24,16; \ | ||
223 | .mem.offset 8,0; st8.spill [r3] = r25,16; \ | ||
224 | ;; \ | ||
225 | .mem.offset 0,0; st8.spill [r2] = r26,16; \ | ||
226 | .mem.offset 8,0; st8.spill [r3] = r27,16; \ | ||
227 | ;; \ | ||
228 | .mem.offset 0,0; st8.spill [r2] = r28,16; \ | ||
229 | .mem.offset 8,0; st8.spill [r3] = r29,16; \ | ||
230 | ;; \ | ||
231 | .mem.offset 0,0; st8.spill [r2] = r30,16; \ | ||
232 | .mem.offset 8,0; st8.spill [r3] = r31,32; \ | ||
233 | ;; \ | ||
234 | mov ar.fpsr = r11; \ | ||
235 | st8 [r2] = r8,8; \ | ||
236 | adds r24 = PT(B6)-PT(F7),r3; \ | ||
237 | adds r25 = PT(B7)-PT(F7),r3; \ | ||
238 | ;; \ | ||
239 | st8 [r24] = r18,16; /* b6 */ \ | ||
240 | st8 [r25] = r19,16; /* b7 */ \ | ||
241 | adds r2 = PT(R4)-PT(F6),r2; \ | ||
242 | adds r3 = PT(R5)-PT(F7),r3; \ | ||
243 | ;; \ | ||
244 | st8 [r24] = r9; /* ar.csd */ \ | ||
245 | st8 [r25] = r10; /* ar.ssd */ \ | ||
246 | ;; \ | ||
247 | mov r18 = ar.unat; \ | ||
248 | adds r19 = PT(EML_UNAT)-PT(R4),r2; \ | ||
249 | ;; \ | ||
250 | st8 [r19] = r18; /* eml_unat */ \ | ||
251 | |||
252 | |||
253 | #define KVM_SAVE_EXTRA \ | ||
254 | .mem.offset 0,0; st8.spill [r2] = r4,16; \ | ||
255 | .mem.offset 8,0; st8.spill [r3] = r5,16; \ | ||
256 | ;; \ | ||
257 | .mem.offset 0,0; st8.spill [r2] = r6,16; \ | ||
258 | .mem.offset 8,0; st8.spill [r3] = r7; \ | ||
259 | ;; \ | ||
260 | mov r26 = ar.unat; \ | ||
261 | ;; \ | ||
262 | st8 [r2] = r26;/* eml_unat */ \ | ||
263 | |||
264 | #define KVM_SAVE_MIN_WITH_COVER KVM_DO_SAVE_MIN(cover, mov r30 = cr.ifs,) | ||
265 | #define KVM_SAVE_MIN_WITH_COVER_R19 KVM_DO_SAVE_MIN(cover, mov r30 = cr.ifs, mov r15 = r19) | ||
266 | #define KVM_SAVE_MIN KVM_DO_SAVE_MIN( , mov r30 = r0, ) | ||
diff --git a/arch/ia64/kvm/lapic.h b/arch/ia64/kvm/lapic.h deleted file mode 100644 index c5f92a926a9a..000000000000 --- a/arch/ia64/kvm/lapic.h +++ /dev/null | |||
@@ -1,30 +0,0 @@ | |||
1 | #ifndef __KVM_IA64_LAPIC_H | ||
2 | #define __KVM_IA64_LAPIC_H | ||
3 | |||
4 | #include <linux/kvm_host.h> | ||
5 | |||
6 | /* | ||
7 | * vlsapic | ||
8 | */ | ||
9 | struct kvm_lapic{ | ||
10 | struct kvm_vcpu *vcpu; | ||
11 | uint64_t insvc[4]; | ||
12 | uint64_t vhpi; | ||
13 | uint8_t xtp; | ||
14 | uint8_t pal_init_pending; | ||
15 | uint8_t pad[2]; | ||
16 | }; | ||
17 | |||
18 | int kvm_create_lapic(struct kvm_vcpu *vcpu); | ||
19 | void kvm_free_lapic(struct kvm_vcpu *vcpu); | ||
20 | |||
21 | int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest); | ||
22 | int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda); | ||
23 | int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source, | ||
24 | int short_hand, int dest, int dest_mode); | ||
25 | int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2); | ||
26 | int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq); | ||
27 | #define kvm_apic_present(x) (true) | ||
28 | #define kvm_lapic_enabled(x) (true) | ||
29 | |||
30 | #endif | ||
diff --git a/arch/ia64/kvm/memcpy.S b/arch/ia64/kvm/memcpy.S deleted file mode 100644 index c04cdbe9f80f..000000000000 --- a/arch/ia64/kvm/memcpy.S +++ /dev/null | |||
@@ -1 +0,0 @@ | |||
1 | #include "../lib/memcpy.S" | ||
diff --git a/arch/ia64/kvm/memset.S b/arch/ia64/kvm/memset.S deleted file mode 100644 index 83c3066d844a..000000000000 --- a/arch/ia64/kvm/memset.S +++ /dev/null | |||
@@ -1 +0,0 @@ | |||
1 | #include "../lib/memset.S" | ||
diff --git a/arch/ia64/kvm/misc.h b/arch/ia64/kvm/misc.h deleted file mode 100644 index dd979e00b574..000000000000 --- a/arch/ia64/kvm/misc.h +++ /dev/null | |||
@@ -1,94 +0,0 @@ | |||
1 | #ifndef __KVM_IA64_MISC_H | ||
2 | #define __KVM_IA64_MISC_H | ||
3 | |||
4 | #include <linux/kvm_host.h> | ||
5 | /* | ||
6 | * misc.h | ||
7 | * Copyright (C) 2007, Intel Corporation. | ||
8 | * Xiantao Zhang (xiantao.zhang@intel.com) | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify it | ||
11 | * under the terms and conditions of the GNU General Public License, | ||
12 | * version 2, as published by the Free Software Foundation. | ||
13 | * | ||
14 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
15 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
17 | * more details. | ||
18 | * | ||
19 | * You should have received a copy of the GNU General Public License along with | ||
20 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple | ||
21 | * Place - Suite 330, Boston, MA 02111-1307 USA. | ||
22 | * | ||
23 | */ | ||
24 | |||
25 | /* | ||
26 | *Return p2m base address at host side! | ||
27 | */ | ||
28 | static inline uint64_t *kvm_host_get_pmt(struct kvm *kvm) | ||
29 | { | ||
30 | return (uint64_t *)(kvm->arch.vm_base + | ||
31 | offsetof(struct kvm_vm_data, kvm_p2m)); | ||
32 | } | ||
33 | |||
34 | static inline void kvm_set_pmt_entry(struct kvm *kvm, gfn_t gfn, | ||
35 | u64 paddr, u64 mem_flags) | ||
36 | { | ||
37 | uint64_t *pmt_base = kvm_host_get_pmt(kvm); | ||
38 | unsigned long pte; | ||
39 | |||
40 | pte = PAGE_ALIGN(paddr) | mem_flags; | ||
41 | pmt_base[gfn] = pte; | ||
42 | } | ||
43 | |||
44 | /*Function for translating host address to guest address*/ | ||
45 | |||
46 | static inline void *to_guest(struct kvm *kvm, void *addr) | ||
47 | { | ||
48 | return (void *)((unsigned long)(addr) - kvm->arch.vm_base + | ||
49 | KVM_VM_DATA_BASE); | ||
50 | } | ||
51 | |||
52 | /*Function for translating guest address to host address*/ | ||
53 | |||
54 | static inline void *to_host(struct kvm *kvm, void *addr) | ||
55 | { | ||
56 | return (void *)((unsigned long)addr - KVM_VM_DATA_BASE | ||
57 | + kvm->arch.vm_base); | ||
58 | } | ||
59 | |||
60 | /* Get host context of the vcpu */ | ||
61 | static inline union context *kvm_get_host_context(struct kvm_vcpu *vcpu) | ||
62 | { | ||
63 | union context *ctx = &vcpu->arch.host; | ||
64 | return to_guest(vcpu->kvm, ctx); | ||
65 | } | ||
66 | |||
67 | /* Get guest context of the vcpu */ | ||
68 | static inline union context *kvm_get_guest_context(struct kvm_vcpu *vcpu) | ||
69 | { | ||
70 | union context *ctx = &vcpu->arch.guest; | ||
71 | return to_guest(vcpu->kvm, ctx); | ||
72 | } | ||
73 | |||
74 | /* kvm get exit data from gvmm! */ | ||
75 | static inline struct exit_ctl_data *kvm_get_exit_data(struct kvm_vcpu *vcpu) | ||
76 | { | ||
77 | return &vcpu->arch.exit_data; | ||
78 | } | ||
79 | |||
80 | /*kvm get vcpu ioreq for kvm module!*/ | ||
81 | static inline struct kvm_mmio_req *kvm_get_vcpu_ioreq(struct kvm_vcpu *vcpu) | ||
82 | { | ||
83 | struct exit_ctl_data *p_ctl_data; | ||
84 | |||
85 | if (vcpu) { | ||
86 | p_ctl_data = kvm_get_exit_data(vcpu); | ||
87 | if (p_ctl_data->exit_reason == EXIT_REASON_MMIO_INSTRUCTION) | ||
88 | return &p_ctl_data->u.ioreq; | ||
89 | } | ||
90 | |||
91 | return NULL; | ||
92 | } | ||
93 | |||
94 | #endif | ||
diff --git a/arch/ia64/kvm/mmio.c b/arch/ia64/kvm/mmio.c deleted file mode 100644 index f1e17d3d6cd9..000000000000 --- a/arch/ia64/kvm/mmio.c +++ /dev/null | |||
@@ -1,336 +0,0 @@ | |||
1 | /* | ||
2 | * mmio.c: MMIO emulation components. | ||
3 | * Copyright (c) 2004, Intel Corporation. | ||
4 | * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com) | ||
5 | * Kun Tian (Kevin Tian) (Kevin.tian@intel.com) | ||
6 | * | ||
7 | * Copyright (c) 2007 Intel Corporation KVM support. | ||
8 | * Xuefei Xu (Anthony Xu) (anthony.xu@intel.com) | ||
9 | * Xiantao Zhang (xiantao.zhang@intel.com) | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or modify it | ||
12 | * under the terms and conditions of the GNU General Public License, | ||
13 | * version 2, as published by the Free Software Foundation. | ||
14 | * | ||
15 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
16 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
18 | * more details. | ||
19 | * | ||
20 | * You should have received a copy of the GNU General Public License along with | ||
21 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple | ||
22 | * Place - Suite 330, Boston, MA 02111-1307 USA. | ||
23 | * | ||
24 | */ | ||
25 | |||
26 | #include <linux/kvm_host.h> | ||
27 | |||
28 | #include "vcpu.h" | ||
29 | |||
30 | static void vlsapic_write_xtp(struct kvm_vcpu *v, uint8_t val) | ||
31 | { | ||
32 | VLSAPIC_XTP(v) = val; | ||
33 | } | ||
34 | |||
35 | /* | ||
36 | * LSAPIC OFFSET | ||
37 | */ | ||
38 | #define PIB_LOW_HALF(ofst) !(ofst & (1 << 20)) | ||
39 | #define PIB_OFST_INTA 0x1E0000 | ||
40 | #define PIB_OFST_XTP 0x1E0008 | ||
41 | |||
42 | /* | ||
43 | * execute write IPI op. | ||
44 | */ | ||
45 | static void vlsapic_write_ipi(struct kvm_vcpu *vcpu, | ||
46 | uint64_t addr, uint64_t data) | ||
47 | { | ||
48 | struct exit_ctl_data *p = ¤t_vcpu->arch.exit_data; | ||
49 | unsigned long psr; | ||
50 | |||
51 | local_irq_save(psr); | ||
52 | |||
53 | p->exit_reason = EXIT_REASON_IPI; | ||
54 | p->u.ipi_data.addr.val = addr; | ||
55 | p->u.ipi_data.data.val = data; | ||
56 | vmm_transition(current_vcpu); | ||
57 | |||
58 | local_irq_restore(psr); | ||
59 | |||
60 | } | ||
61 | |||
62 | void lsapic_write(struct kvm_vcpu *v, unsigned long addr, | ||
63 | unsigned long length, unsigned long val) | ||
64 | { | ||
65 | addr &= (PIB_SIZE - 1); | ||
66 | |||
67 | switch (addr) { | ||
68 | case PIB_OFST_INTA: | ||
69 | panic_vm(v, "Undefined write on PIB INTA\n"); | ||
70 | break; | ||
71 | case PIB_OFST_XTP: | ||
72 | if (length == 1) { | ||
73 | vlsapic_write_xtp(v, val); | ||
74 | } else { | ||
75 | panic_vm(v, "Undefined write on PIB XTP\n"); | ||
76 | } | ||
77 | break; | ||
78 | default: | ||
79 | if (PIB_LOW_HALF(addr)) { | ||
80 | /*Lower half */ | ||
81 | if (length != 8) | ||
82 | panic_vm(v, "Can't LHF write with size %ld!\n", | ||
83 | length); | ||
84 | else | ||
85 | vlsapic_write_ipi(v, addr, val); | ||
86 | } else { /*Upper half */ | ||
87 | panic_vm(v, "IPI-UHF write %lx\n", addr); | ||
88 | } | ||
89 | break; | ||
90 | } | ||
91 | } | ||
92 | |||
93 | unsigned long lsapic_read(struct kvm_vcpu *v, unsigned long addr, | ||
94 | unsigned long length) | ||
95 | { | ||
96 | uint64_t result = 0; | ||
97 | |||
98 | addr &= (PIB_SIZE - 1); | ||
99 | |||
100 | switch (addr) { | ||
101 | case PIB_OFST_INTA: | ||
102 | if (length == 1) /* 1 byte load */ | ||
103 | ; /* There is no i8259, there is no INTA access*/ | ||
104 | else | ||
105 | panic_vm(v, "Undefined read on PIB INTA\n"); | ||
106 | |||
107 | break; | ||
108 | case PIB_OFST_XTP: | ||
109 | if (length == 1) { | ||
110 | result = VLSAPIC_XTP(v); | ||
111 | } else { | ||
112 | panic_vm(v, "Undefined read on PIB XTP\n"); | ||
113 | } | ||
114 | break; | ||
115 | default: | ||
116 | panic_vm(v, "Undefined addr access for lsapic!\n"); | ||
117 | break; | ||
118 | } | ||
119 | return result; | ||
120 | } | ||
121 | |||
122 | static void mmio_access(struct kvm_vcpu *vcpu, u64 src_pa, u64 *dest, | ||
123 | u16 s, int ma, int dir) | ||
124 | { | ||
125 | unsigned long iot; | ||
126 | struct exit_ctl_data *p = &vcpu->arch.exit_data; | ||
127 | unsigned long psr; | ||
128 | |||
129 | iot = __gpfn_is_io(src_pa >> PAGE_SHIFT); | ||
130 | |||
131 | local_irq_save(psr); | ||
132 | |||
133 | /*Intercept the access for PIB range*/ | ||
134 | if (iot == GPFN_PIB) { | ||
135 | if (!dir) | ||
136 | lsapic_write(vcpu, src_pa, s, *dest); | ||
137 | else | ||
138 | *dest = lsapic_read(vcpu, src_pa, s); | ||
139 | goto out; | ||
140 | } | ||
141 | p->exit_reason = EXIT_REASON_MMIO_INSTRUCTION; | ||
142 | p->u.ioreq.addr = src_pa; | ||
143 | p->u.ioreq.size = s; | ||
144 | p->u.ioreq.dir = dir; | ||
145 | if (dir == IOREQ_WRITE) | ||
146 | p->u.ioreq.data = *dest; | ||
147 | p->u.ioreq.state = STATE_IOREQ_READY; | ||
148 | vmm_transition(vcpu); | ||
149 | |||
150 | if (p->u.ioreq.state == STATE_IORESP_READY) { | ||
151 | if (dir == IOREQ_READ) | ||
152 | /* it's necessary to ensure zero extending */ | ||
153 | *dest = p->u.ioreq.data & (~0UL >> (64-(s*8))); | ||
154 | } else | ||
155 | panic_vm(vcpu, "Unhandled mmio access returned!\n"); | ||
156 | out: | ||
157 | local_irq_restore(psr); | ||
158 | return ; | ||
159 | } | ||
160 | |||
161 | /* | ||
162 | dir 1: read 0:write | ||
163 | inst_type 0:integer 1:floating point | ||
164 | */ | ||
165 | #define SL_INTEGER 0 /* store/load interger*/ | ||
166 | #define SL_FLOATING 1 /* store/load floating*/ | ||
167 | |||
168 | void emulate_io_inst(struct kvm_vcpu *vcpu, u64 padr, u64 ma) | ||
169 | { | ||
170 | struct kvm_pt_regs *regs; | ||
171 | IA64_BUNDLE bundle; | ||
172 | int slot, dir = 0; | ||
173 | int inst_type = -1; | ||
174 | u16 size = 0; | ||
175 | u64 data, slot1a, slot1b, temp, update_reg; | ||
176 | s32 imm; | ||
177 | INST64 inst; | ||
178 | |||
179 | regs = vcpu_regs(vcpu); | ||
180 | |||
181 | if (fetch_code(vcpu, regs->cr_iip, &bundle)) { | ||
182 | /* if fetch code fail, return and try again */ | ||
183 | return; | ||
184 | } | ||
185 | slot = ((struct ia64_psr *)&(regs->cr_ipsr))->ri; | ||
186 | if (!slot) | ||
187 | inst.inst = bundle.slot0; | ||
188 | else if (slot == 1) { | ||
189 | slot1a = bundle.slot1a; | ||
190 | slot1b = bundle.slot1b; | ||
191 | inst.inst = slot1a + (slot1b << 18); | ||
192 | } else if (slot == 2) | ||
193 | inst.inst = bundle.slot2; | ||
194 | |||
195 | /* Integer Load/Store */ | ||
196 | if (inst.M1.major == 4 && inst.M1.m == 0 && inst.M1.x == 0) { | ||
197 | inst_type = SL_INTEGER; | ||
198 | size = (inst.M1.x6 & 0x3); | ||
199 | if ((inst.M1.x6 >> 2) > 0xb) { | ||
200 | /*write*/ | ||
201 | dir = IOREQ_WRITE; | ||
202 | data = vcpu_get_gr(vcpu, inst.M4.r2); | ||
203 | } else if ((inst.M1.x6 >> 2) < 0xb) { | ||
204 | /*read*/ | ||
205 | dir = IOREQ_READ; | ||
206 | } | ||
207 | } else if (inst.M2.major == 4 && inst.M2.m == 1 && inst.M2.x == 0) { | ||
208 | /* Integer Load + Reg update */ | ||
209 | inst_type = SL_INTEGER; | ||
210 | dir = IOREQ_READ; | ||
211 | size = (inst.M2.x6 & 0x3); | ||
212 | temp = vcpu_get_gr(vcpu, inst.M2.r3); | ||
213 | update_reg = vcpu_get_gr(vcpu, inst.M2.r2); | ||
214 | temp += update_reg; | ||
215 | vcpu_set_gr(vcpu, inst.M2.r3, temp, 0); | ||
216 | } else if (inst.M3.major == 5) { | ||
217 | /*Integer Load/Store + Imm update*/ | ||
218 | inst_type = SL_INTEGER; | ||
219 | size = (inst.M3.x6&0x3); | ||
220 | if ((inst.M5.x6 >> 2) > 0xb) { | ||
221 | /*write*/ | ||
222 | dir = IOREQ_WRITE; | ||
223 | data = vcpu_get_gr(vcpu, inst.M5.r2); | ||
224 | temp = vcpu_get_gr(vcpu, inst.M5.r3); | ||
225 | imm = (inst.M5.s << 31) | (inst.M5.i << 30) | | ||
226 | (inst.M5.imm7 << 23); | ||
227 | temp += imm >> 23; | ||
228 | vcpu_set_gr(vcpu, inst.M5.r3, temp, 0); | ||
229 | |||
230 | } else if ((inst.M3.x6 >> 2) < 0xb) { | ||
231 | /*read*/ | ||
232 | dir = IOREQ_READ; | ||
233 | temp = vcpu_get_gr(vcpu, inst.M3.r3); | ||
234 | imm = (inst.M3.s << 31) | (inst.M3.i << 30) | | ||
235 | (inst.M3.imm7 << 23); | ||
236 | temp += imm >> 23; | ||
237 | vcpu_set_gr(vcpu, inst.M3.r3, temp, 0); | ||
238 | |||
239 | } | ||
240 | } else if (inst.M9.major == 6 && inst.M9.x6 == 0x3B | ||
241 | && inst.M9.m == 0 && inst.M9.x == 0) { | ||
242 | /* Floating-point spill*/ | ||
243 | struct ia64_fpreg v; | ||
244 | |||
245 | inst_type = SL_FLOATING; | ||
246 | dir = IOREQ_WRITE; | ||
247 | vcpu_get_fpreg(vcpu, inst.M9.f2, &v); | ||
248 | /* Write high word. FIXME: this is a kludge! */ | ||
249 | v.u.bits[1] &= 0x3ffff; | ||
250 | mmio_access(vcpu, padr + 8, (u64 *)&v.u.bits[1], 8, | ||
251 | ma, IOREQ_WRITE); | ||
252 | data = v.u.bits[0]; | ||
253 | size = 3; | ||
254 | } else if (inst.M10.major == 7 && inst.M10.x6 == 0x3B) { | ||
255 | /* Floating-point spill + Imm update */ | ||
256 | struct ia64_fpreg v; | ||
257 | |||
258 | inst_type = SL_FLOATING; | ||
259 | dir = IOREQ_WRITE; | ||
260 | vcpu_get_fpreg(vcpu, inst.M10.f2, &v); | ||
261 | temp = vcpu_get_gr(vcpu, inst.M10.r3); | ||
262 | imm = (inst.M10.s << 31) | (inst.M10.i << 30) | | ||
263 | (inst.M10.imm7 << 23); | ||
264 | temp += imm >> 23; | ||
265 | vcpu_set_gr(vcpu, inst.M10.r3, temp, 0); | ||
266 | |||
267 | /* Write high word.FIXME: this is a kludge! */ | ||
268 | v.u.bits[1] &= 0x3ffff; | ||
269 | mmio_access(vcpu, padr + 8, (u64 *)&v.u.bits[1], | ||
270 | 8, ma, IOREQ_WRITE); | ||
271 | data = v.u.bits[0]; | ||
272 | size = 3; | ||
273 | } else if (inst.M10.major == 7 && inst.M10.x6 == 0x31) { | ||
274 | /* Floating-point stf8 + Imm update */ | ||
275 | struct ia64_fpreg v; | ||
276 | inst_type = SL_FLOATING; | ||
277 | dir = IOREQ_WRITE; | ||
278 | size = 3; | ||
279 | vcpu_get_fpreg(vcpu, inst.M10.f2, &v); | ||
280 | data = v.u.bits[0]; /* Significand. */ | ||
281 | temp = vcpu_get_gr(vcpu, inst.M10.r3); | ||
282 | imm = (inst.M10.s << 31) | (inst.M10.i << 30) | | ||
283 | (inst.M10.imm7 << 23); | ||
284 | temp += imm >> 23; | ||
285 | vcpu_set_gr(vcpu, inst.M10.r3, temp, 0); | ||
286 | } else if (inst.M15.major == 7 && inst.M15.x6 >= 0x2c | ||
287 | && inst.M15.x6 <= 0x2f) { | ||
288 | temp = vcpu_get_gr(vcpu, inst.M15.r3); | ||
289 | imm = (inst.M15.s << 31) | (inst.M15.i << 30) | | ||
290 | (inst.M15.imm7 << 23); | ||
291 | temp += imm >> 23; | ||
292 | vcpu_set_gr(vcpu, inst.M15.r3, temp, 0); | ||
293 | |||
294 | vcpu_increment_iip(vcpu); | ||
295 | return; | ||
296 | } else if (inst.M12.major == 6 && inst.M12.m == 1 | ||
297 | && inst.M12.x == 1 && inst.M12.x6 == 1) { | ||
298 | /* Floating-point Load Pair + Imm ldfp8 M12*/ | ||
299 | struct ia64_fpreg v; | ||
300 | |||
301 | inst_type = SL_FLOATING; | ||
302 | dir = IOREQ_READ; | ||
303 | size = 8; /*ldfd*/ | ||
304 | mmio_access(vcpu, padr, &data, size, ma, dir); | ||
305 | v.u.bits[0] = data; | ||
306 | v.u.bits[1] = 0x1003E; | ||
307 | vcpu_set_fpreg(vcpu, inst.M12.f1, &v); | ||
308 | padr += 8; | ||
309 | mmio_access(vcpu, padr, &data, size, ma, dir); | ||
310 | v.u.bits[0] = data; | ||
311 | v.u.bits[1] = 0x1003E; | ||
312 | vcpu_set_fpreg(vcpu, inst.M12.f2, &v); | ||
313 | padr += 8; | ||
314 | vcpu_set_gr(vcpu, inst.M12.r3, padr, 0); | ||
315 | vcpu_increment_iip(vcpu); | ||
316 | return; | ||
317 | } else { | ||
318 | inst_type = -1; | ||
319 | panic_vm(vcpu, "Unsupported MMIO access instruction! " | ||
320 | "Bunld[0]=0x%lx, Bundle[1]=0x%lx\n", | ||
321 | bundle.i64[0], bundle.i64[1]); | ||
322 | } | ||
323 | |||
324 | size = 1 << size; | ||
325 | if (dir == IOREQ_WRITE) { | ||
326 | mmio_access(vcpu, padr, &data, size, ma, dir); | ||
327 | } else { | ||
328 | mmio_access(vcpu, padr, &data, size, ma, dir); | ||
329 | if (inst_type == SL_INTEGER) | ||
330 | vcpu_set_gr(vcpu, inst.M1.r1, data, 0); | ||
331 | else | ||
332 | panic_vm(vcpu, "Unsupported instruction type!\n"); | ||
333 | |||
334 | } | ||
335 | vcpu_increment_iip(vcpu); | ||
336 | } | ||
diff --git a/arch/ia64/kvm/optvfault.S b/arch/ia64/kvm/optvfault.S deleted file mode 100644 index f793be3effff..000000000000 --- a/arch/ia64/kvm/optvfault.S +++ /dev/null | |||
@@ -1,1090 +0,0 @@ | |||
1 | /* | ||
2 | * arch/ia64/kvm/optvfault.S | ||
3 | * optimize virtualization fault handler | ||
4 | * | ||
5 | * Copyright (C) 2006 Intel Co | ||
6 | * Xuefei Xu (Anthony Xu) <anthony.xu@intel.com> | ||
7 | * Copyright (C) 2008 Intel Co | ||
8 | * Add the support for Tukwila processors. | ||
9 | * Xiantao Zhang <xiantao.zhang@intel.com> | ||
10 | */ | ||
11 | |||
12 | #include <asm/asmmacro.h> | ||
13 | #include <asm/processor.h> | ||
14 | #include <asm/kvm_host.h> | ||
15 | |||
16 | #include "vti.h" | ||
17 | #include "asm-offsets.h" | ||
18 | |||
19 | #define ACCE_MOV_FROM_AR | ||
20 | #define ACCE_MOV_FROM_RR | ||
21 | #define ACCE_MOV_TO_RR | ||
22 | #define ACCE_RSM | ||
23 | #define ACCE_SSM | ||
24 | #define ACCE_MOV_TO_PSR | ||
25 | #define ACCE_THASH | ||
26 | |||
27 | #define VMX_VPS_SYNC_READ \ | ||
28 | add r16=VMM_VPD_BASE_OFFSET,r21; \ | ||
29 | mov r17 = b0; \ | ||
30 | mov r18 = r24; \ | ||
31 | mov r19 = r25; \ | ||
32 | mov r20 = r31; \ | ||
33 | ;; \ | ||
34 | {.mii; \ | ||
35 | ld8 r16 = [r16]; \ | ||
36 | nop 0x0; \ | ||
37 | mov r24 = ip; \ | ||
38 | ;; \ | ||
39 | }; \ | ||
40 | {.mmb; \ | ||
41 | add r24=0x20, r24; \ | ||
42 | mov r25 =r16; \ | ||
43 | br.sptk.many kvm_vps_sync_read; \ | ||
44 | }; \ | ||
45 | mov b0 = r17; \ | ||
46 | mov r24 = r18; \ | ||
47 | mov r25 = r19; \ | ||
48 | mov r31 = r20 | ||
49 | |||
50 | ENTRY(kvm_vps_entry) | ||
51 | adds r29 = VMM_VCPU_VSA_BASE_OFFSET,r21 | ||
52 | ;; | ||
53 | ld8 r29 = [r29] | ||
54 | ;; | ||
55 | add r29 = r29, r30 | ||
56 | ;; | ||
57 | mov b0 = r29 | ||
58 | br.sptk.many b0 | ||
59 | END(kvm_vps_entry) | ||
60 | |||
61 | /* | ||
62 | * Inputs: | ||
63 | * r24 : return address | ||
64 | * r25 : vpd | ||
65 | * r29 : scratch | ||
66 | * | ||
67 | */ | ||
68 | GLOBAL_ENTRY(kvm_vps_sync_read) | ||
69 | movl r30 = PAL_VPS_SYNC_READ | ||
70 | ;; | ||
71 | br.sptk.many kvm_vps_entry | ||
72 | END(kvm_vps_sync_read) | ||
73 | |||
74 | /* | ||
75 | * Inputs: | ||
76 | * r24 : return address | ||
77 | * r25 : vpd | ||
78 | * r29 : scratch | ||
79 | * | ||
80 | */ | ||
81 | GLOBAL_ENTRY(kvm_vps_sync_write) | ||
82 | movl r30 = PAL_VPS_SYNC_WRITE | ||
83 | ;; | ||
84 | br.sptk.many kvm_vps_entry | ||
85 | END(kvm_vps_sync_write) | ||
86 | |||
87 | /* | ||
88 | * Inputs: | ||
89 | * r23 : pr | ||
90 | * r24 : guest b0 | ||
91 | * r25 : vpd | ||
92 | * | ||
93 | */ | ||
94 | GLOBAL_ENTRY(kvm_vps_resume_normal) | ||
95 | movl r30 = PAL_VPS_RESUME_NORMAL | ||
96 | ;; | ||
97 | mov pr=r23,-2 | ||
98 | br.sptk.many kvm_vps_entry | ||
99 | END(kvm_vps_resume_normal) | ||
100 | |||
101 | /* | ||
102 | * Inputs: | ||
103 | * r23 : pr | ||
104 | * r24 : guest b0 | ||
105 | * r25 : vpd | ||
106 | * r17 : isr | ||
107 | */ | ||
108 | GLOBAL_ENTRY(kvm_vps_resume_handler) | ||
109 | movl r30 = PAL_VPS_RESUME_HANDLER | ||
110 | ;; | ||
111 | ld8 r26=[r25] | ||
112 | shr r17=r17,IA64_ISR_IR_BIT | ||
113 | ;; | ||
114 | dep r26=r17,r26,63,1 // bit 63 of r26 indicate whether enable CFLE | ||
115 | mov pr=r23,-2 | ||
116 | br.sptk.many kvm_vps_entry | ||
117 | END(kvm_vps_resume_handler) | ||
118 | |||
119 | //mov r1=ar3 | ||
120 | GLOBAL_ENTRY(kvm_asm_mov_from_ar) | ||
121 | #ifndef ACCE_MOV_FROM_AR | ||
122 | br.many kvm_virtualization_fault_back | ||
123 | #endif | ||
124 | add r18=VMM_VCPU_ITC_OFS_OFFSET, r21 | ||
125 | add r16=VMM_VCPU_LAST_ITC_OFFSET,r21 | ||
126 | extr.u r17=r25,6,7 | ||
127 | ;; | ||
128 | ld8 r18=[r18] | ||
129 | mov r19=ar.itc | ||
130 | mov r24=b0 | ||
131 | ;; | ||
132 | add r19=r19,r18 | ||
133 | addl r20=@gprel(asm_mov_to_reg),gp | ||
134 | ;; | ||
135 | st8 [r16] = r19 | ||
136 | adds r30=kvm_resume_to_guest-asm_mov_to_reg,r20 | ||
137 | shladd r17=r17,4,r20 | ||
138 | ;; | ||
139 | mov b0=r17 | ||
140 | br.sptk.few b0 | ||
141 | ;; | ||
142 | END(kvm_asm_mov_from_ar) | ||
143 | |||
144 | /* | ||
145 | * Special SGI SN2 optimized version of mov_from_ar using the SN2 RTC | ||
146 | * clock as it's source for emulating the ITC. This version will be | ||
147 | * copied on top of the original version if the host is determined to | ||
148 | * be an SN2. | ||
149 | */ | ||
150 | GLOBAL_ENTRY(kvm_asm_mov_from_ar_sn2) | ||
151 | add r18=VMM_VCPU_ITC_OFS_OFFSET, r21 | ||
152 | movl r19 = (KVM_VMM_BASE+(1<<KVM_VMM_SHIFT)) | ||
153 | |||
154 | add r16=VMM_VCPU_LAST_ITC_OFFSET,r21 | ||
155 | extr.u r17=r25,6,7 | ||
156 | mov r24=b0 | ||
157 | ;; | ||
158 | ld8 r18=[r18] | ||
159 | ld8 r19=[r19] | ||
160 | addl r20=@gprel(asm_mov_to_reg),gp | ||
161 | ;; | ||
162 | add r19=r19,r18 | ||
163 | shladd r17=r17,4,r20 | ||
164 | ;; | ||
165 | adds r30=kvm_resume_to_guest-asm_mov_to_reg,r20 | ||
166 | st8 [r16] = r19 | ||
167 | mov b0=r17 | ||
168 | br.sptk.few b0 | ||
169 | ;; | ||
170 | END(kvm_asm_mov_from_ar_sn2) | ||
171 | |||
172 | |||
173 | |||
174 | // mov r1=rr[r3] | ||
175 | GLOBAL_ENTRY(kvm_asm_mov_from_rr) | ||
176 | #ifndef ACCE_MOV_FROM_RR | ||
177 | br.many kvm_virtualization_fault_back | ||
178 | #endif | ||
179 | extr.u r16=r25,20,7 | ||
180 | extr.u r17=r25,6,7 | ||
181 | addl r20=@gprel(asm_mov_from_reg),gp | ||
182 | ;; | ||
183 | adds r30=kvm_asm_mov_from_rr_back_1-asm_mov_from_reg,r20 | ||
184 | shladd r16=r16,4,r20 | ||
185 | mov r24=b0 | ||
186 | ;; | ||
187 | add r27=VMM_VCPU_VRR0_OFFSET,r21 | ||
188 | mov b0=r16 | ||
189 | br.many b0 | ||
190 | ;; | ||
191 | kvm_asm_mov_from_rr_back_1: | ||
192 | adds r30=kvm_resume_to_guest-asm_mov_from_reg,r20 | ||
193 | adds r22=asm_mov_to_reg-asm_mov_from_reg,r20 | ||
194 | shr.u r26=r19,61 | ||
195 | ;; | ||
196 | shladd r17=r17,4,r22 | ||
197 | shladd r27=r26,3,r27 | ||
198 | ;; | ||
199 | ld8 r19=[r27] | ||
200 | mov b0=r17 | ||
201 | br.many b0 | ||
202 | END(kvm_asm_mov_from_rr) | ||
203 | |||
204 | |||
205 | // mov rr[r3]=r2 | ||
206 | GLOBAL_ENTRY(kvm_asm_mov_to_rr) | ||
207 | #ifndef ACCE_MOV_TO_RR | ||
208 | br.many kvm_virtualization_fault_back | ||
209 | #endif | ||
210 | extr.u r16=r25,20,7 | ||
211 | extr.u r17=r25,13,7 | ||
212 | addl r20=@gprel(asm_mov_from_reg),gp | ||
213 | ;; | ||
214 | adds r30=kvm_asm_mov_to_rr_back_1-asm_mov_from_reg,r20 | ||
215 | shladd r16=r16,4,r20 | ||
216 | mov r22=b0 | ||
217 | ;; | ||
218 | add r27=VMM_VCPU_VRR0_OFFSET,r21 | ||
219 | mov b0=r16 | ||
220 | br.many b0 | ||
221 | ;; | ||
222 | kvm_asm_mov_to_rr_back_1: | ||
223 | adds r30=kvm_asm_mov_to_rr_back_2-asm_mov_from_reg,r20 | ||
224 | shr.u r23=r19,61 | ||
225 | shladd r17=r17,4,r20 | ||
226 | ;; | ||
227 | //if rr6, go back | ||
228 | cmp.eq p6,p0=6,r23 | ||
229 | mov b0=r22 | ||
230 | (p6) br.cond.dpnt.many kvm_virtualization_fault_back | ||
231 | ;; | ||
232 | mov r28=r19 | ||
233 | mov b0=r17 | ||
234 | br.many b0 | ||
235 | kvm_asm_mov_to_rr_back_2: | ||
236 | adds r30=kvm_resume_to_guest-asm_mov_from_reg,r20 | ||
237 | shladd r27=r23,3,r27 | ||
238 | ;; // vrr.rid<<4 |0xe | ||
239 | st8 [r27]=r19 | ||
240 | mov b0=r30 | ||
241 | ;; | ||
242 | extr.u r16=r19,8,26 | ||
243 | extr.u r18 =r19,2,6 | ||
244 | mov r17 =0xe | ||
245 | ;; | ||
246 | shladd r16 = r16, 4, r17 | ||
247 | extr.u r19 =r19,0,8 | ||
248 | ;; | ||
249 | shl r16 = r16,8 | ||
250 | ;; | ||
251 | add r19 = r19, r16 | ||
252 | ;; //set ve 1 | ||
253 | dep r19=-1,r19,0,1 | ||
254 | cmp.lt p6,p0=14,r18 | ||
255 | ;; | ||
256 | (p6) mov r18=14 | ||
257 | ;; | ||
258 | (p6) dep r19=r18,r19,2,6 | ||
259 | ;; | ||
260 | cmp.eq p6,p0=0,r23 | ||
261 | ;; | ||
262 | cmp.eq.or p6,p0=4,r23 | ||
263 | ;; | ||
264 | adds r16=VMM_VCPU_MODE_FLAGS_OFFSET,r21 | ||
265 | (p6) adds r17=VMM_VCPU_META_SAVED_RR0_OFFSET,r21 | ||
266 | ;; | ||
267 | ld4 r16=[r16] | ||
268 | cmp.eq p7,p0=r0,r0 | ||
269 | (p6) shladd r17=r23,1,r17 | ||
270 | ;; | ||
271 | (p6) st8 [r17]=r19 | ||
272 | (p6) tbit.nz p6,p7=r16,0 | ||
273 | ;; | ||
274 | (p7) mov rr[r28]=r19 | ||
275 | mov r24=r22 | ||
276 | br.many b0 | ||
277 | END(kvm_asm_mov_to_rr) | ||
278 | |||
279 | |||
280 | //rsm | ||
281 | GLOBAL_ENTRY(kvm_asm_rsm) | ||
282 | #ifndef ACCE_RSM | ||
283 | br.many kvm_virtualization_fault_back | ||
284 | #endif | ||
285 | VMX_VPS_SYNC_READ | ||
286 | ;; | ||
287 | extr.u r26=r25,6,21 | ||
288 | extr.u r27=r25,31,2 | ||
289 | ;; | ||
290 | extr.u r28=r25,36,1 | ||
291 | dep r26=r27,r26,21,2 | ||
292 | ;; | ||
293 | add r17=VPD_VPSR_START_OFFSET,r16 | ||
294 | add r22=VMM_VCPU_MODE_FLAGS_OFFSET,r21 | ||
295 | //r26 is imm24 | ||
296 | dep r26=r28,r26,23,1 | ||
297 | ;; | ||
298 | ld8 r18=[r17] | ||
299 | movl r28=IA64_PSR_IC+IA64_PSR_I+IA64_PSR_DT+IA64_PSR_SI | ||
300 | ld4 r23=[r22] | ||
301 | sub r27=-1,r26 | ||
302 | mov r24=b0 | ||
303 | ;; | ||
304 | mov r20=cr.ipsr | ||
305 | or r28=r27,r28 | ||
306 | and r19=r18,r27 | ||
307 | ;; | ||
308 | st8 [r17]=r19 | ||
309 | and r20=r20,r28 | ||
310 | /* Comment it out due to short of fp lazy alorgithm support | ||
311 | adds r27=IA64_VCPU_FP_PSR_OFFSET,r21 | ||
312 | ;; | ||
313 | ld8 r27=[r27] | ||
314 | ;; | ||
315 | tbit.nz p8,p0= r27,IA64_PSR_DFH_BIT | ||
316 | ;; | ||
317 | (p8) dep r20=-1,r20,IA64_PSR_DFH_BIT,1 | ||
318 | */ | ||
319 | ;; | ||
320 | mov cr.ipsr=r20 | ||
321 | tbit.nz p6,p0=r23,0 | ||
322 | ;; | ||
323 | tbit.z.or p6,p0=r26,IA64_PSR_DT_BIT | ||
324 | (p6) br.dptk kvm_resume_to_guest_with_sync | ||
325 | ;; | ||
326 | add r26=VMM_VCPU_META_RR0_OFFSET,r21 | ||
327 | add r27=VMM_VCPU_META_RR0_OFFSET+8,r21 | ||
328 | dep r23=-1,r23,0,1 | ||
329 | ;; | ||
330 | ld8 r26=[r26] | ||
331 | ld8 r27=[r27] | ||
332 | st4 [r22]=r23 | ||
333 | dep.z r28=4,61,3 | ||
334 | ;; | ||
335 | mov rr[r0]=r26 | ||
336 | ;; | ||
337 | mov rr[r28]=r27 | ||
338 | ;; | ||
339 | srlz.d | ||
340 | br.many kvm_resume_to_guest_with_sync | ||
341 | END(kvm_asm_rsm) | ||
342 | |||
343 | |||
344 | //ssm | ||
345 | GLOBAL_ENTRY(kvm_asm_ssm) | ||
346 | #ifndef ACCE_SSM | ||
347 | br.many kvm_virtualization_fault_back | ||
348 | #endif | ||
349 | VMX_VPS_SYNC_READ | ||
350 | ;; | ||
351 | extr.u r26=r25,6,21 | ||
352 | extr.u r27=r25,31,2 | ||
353 | ;; | ||
354 | extr.u r28=r25,36,1 | ||
355 | dep r26=r27,r26,21,2 | ||
356 | ;; //r26 is imm24 | ||
357 | add r27=VPD_VPSR_START_OFFSET,r16 | ||
358 | dep r26=r28,r26,23,1 | ||
359 | ;; //r19 vpsr | ||
360 | ld8 r29=[r27] | ||
361 | mov r24=b0 | ||
362 | ;; | ||
363 | add r22=VMM_VCPU_MODE_FLAGS_OFFSET,r21 | ||
364 | mov r20=cr.ipsr | ||
365 | or r19=r29,r26 | ||
366 | ;; | ||
367 | ld4 r23=[r22] | ||
368 | st8 [r27]=r19 | ||
369 | or r20=r20,r26 | ||
370 | ;; | ||
371 | mov cr.ipsr=r20 | ||
372 | movl r28=IA64_PSR_DT+IA64_PSR_RT+IA64_PSR_IT | ||
373 | ;; | ||
374 | and r19=r28,r19 | ||
375 | tbit.z p6,p0=r23,0 | ||
376 | ;; | ||
377 | cmp.ne.or p6,p0=r28,r19 | ||
378 | (p6) br.dptk kvm_asm_ssm_1 | ||
379 | ;; | ||
380 | add r26=VMM_VCPU_META_SAVED_RR0_OFFSET,r21 | ||
381 | add r27=VMM_VCPU_META_SAVED_RR0_OFFSET+8,r21 | ||
382 | dep r23=0,r23,0,1 | ||
383 | ;; | ||
384 | ld8 r26=[r26] | ||
385 | ld8 r27=[r27] | ||
386 | st4 [r22]=r23 | ||
387 | dep.z r28=4,61,3 | ||
388 | ;; | ||
389 | mov rr[r0]=r26 | ||
390 | ;; | ||
391 | mov rr[r28]=r27 | ||
392 | ;; | ||
393 | srlz.d | ||
394 | ;; | ||
395 | kvm_asm_ssm_1: | ||
396 | tbit.nz p6,p0=r29,IA64_PSR_I_BIT | ||
397 | ;; | ||
398 | tbit.z.or p6,p0=r19,IA64_PSR_I_BIT | ||
399 | (p6) br.dptk kvm_resume_to_guest_with_sync | ||
400 | ;; | ||
401 | add r29=VPD_VTPR_START_OFFSET,r16 | ||
402 | add r30=VPD_VHPI_START_OFFSET,r16 | ||
403 | ;; | ||
404 | ld8 r29=[r29] | ||
405 | ld8 r30=[r30] | ||
406 | ;; | ||
407 | extr.u r17=r29,4,4 | ||
408 | extr.u r18=r29,16,1 | ||
409 | ;; | ||
410 | dep r17=r18,r17,4,1 | ||
411 | ;; | ||
412 | cmp.gt p6,p0=r30,r17 | ||
413 | (p6) br.dpnt.few kvm_asm_dispatch_vexirq | ||
414 | br.many kvm_resume_to_guest_with_sync | ||
415 | END(kvm_asm_ssm) | ||
416 | |||
417 | |||
418 | //mov psr.l=r2 | ||
419 | GLOBAL_ENTRY(kvm_asm_mov_to_psr) | ||
420 | #ifndef ACCE_MOV_TO_PSR | ||
421 | br.many kvm_virtualization_fault_back | ||
422 | #endif | ||
423 | VMX_VPS_SYNC_READ | ||
424 | ;; | ||
425 | extr.u r26=r25,13,7 //r2 | ||
426 | addl r20=@gprel(asm_mov_from_reg),gp | ||
427 | ;; | ||
428 | adds r30=kvm_asm_mov_to_psr_back-asm_mov_from_reg,r20 | ||
429 | shladd r26=r26,4,r20 | ||
430 | mov r24=b0 | ||
431 | ;; | ||
432 | add r27=VPD_VPSR_START_OFFSET,r16 | ||
433 | mov b0=r26 | ||
434 | br.many b0 | ||
435 | ;; | ||
436 | kvm_asm_mov_to_psr_back: | ||
437 | ld8 r17=[r27] | ||
438 | add r22=VMM_VCPU_MODE_FLAGS_OFFSET,r21 | ||
439 | dep r19=0,r19,32,32 | ||
440 | ;; | ||
441 | ld4 r23=[r22] | ||
442 | dep r18=0,r17,0,32 | ||
443 | ;; | ||
444 | add r30=r18,r19 | ||
445 | movl r28=IA64_PSR_DT+IA64_PSR_RT+IA64_PSR_IT | ||
446 | ;; | ||
447 | st8 [r27]=r30 | ||
448 | and r27=r28,r30 | ||
449 | and r29=r28,r17 | ||
450 | ;; | ||
451 | cmp.eq p5,p0=r29,r27 | ||
452 | cmp.eq p6,p7=r28,r27 | ||
453 | (p5) br.many kvm_asm_mov_to_psr_1 | ||
454 | ;; | ||
455 | //virtual to physical | ||
456 | (p7) add r26=VMM_VCPU_META_RR0_OFFSET,r21 | ||
457 | (p7) add r27=VMM_VCPU_META_RR0_OFFSET+8,r21 | ||
458 | (p7) dep r23=-1,r23,0,1 | ||
459 | ;; | ||
460 | //physical to virtual | ||
461 | (p6) add r26=VMM_VCPU_META_SAVED_RR0_OFFSET,r21 | ||
462 | (p6) add r27=VMM_VCPU_META_SAVED_RR0_OFFSET+8,r21 | ||
463 | (p6) dep r23=0,r23,0,1 | ||
464 | ;; | ||
465 | ld8 r26=[r26] | ||
466 | ld8 r27=[r27] | ||
467 | st4 [r22]=r23 | ||
468 | dep.z r28=4,61,3 | ||
469 | ;; | ||
470 | mov rr[r0]=r26 | ||
471 | ;; | ||
472 | mov rr[r28]=r27 | ||
473 | ;; | ||
474 | srlz.d | ||
475 | ;; | ||
476 | kvm_asm_mov_to_psr_1: | ||
477 | mov r20=cr.ipsr | ||
478 | movl r28=IA64_PSR_IC+IA64_PSR_I+IA64_PSR_DT+IA64_PSR_SI+IA64_PSR_RT | ||
479 | ;; | ||
480 | or r19=r19,r28 | ||
481 | dep r20=0,r20,0,32 | ||
482 | ;; | ||
483 | add r20=r19,r20 | ||
484 | mov b0=r24 | ||
485 | ;; | ||
486 | /* Comment it out due to short of fp lazy algorithm support | ||
487 | adds r27=IA64_VCPU_FP_PSR_OFFSET,r21 | ||
488 | ;; | ||
489 | ld8 r27=[r27] | ||
490 | ;; | ||
491 | tbit.nz p8,p0=r27,IA64_PSR_DFH_BIT | ||
492 | ;; | ||
493 | (p8) dep r20=-1,r20,IA64_PSR_DFH_BIT,1 | ||
494 | ;; | ||
495 | */ | ||
496 | mov cr.ipsr=r20 | ||
497 | cmp.ne p6,p0=r0,r0 | ||
498 | ;; | ||
499 | tbit.nz.or p6,p0=r17,IA64_PSR_I_BIT | ||
500 | tbit.z.or p6,p0=r30,IA64_PSR_I_BIT | ||
501 | (p6) br.dpnt.few kvm_resume_to_guest_with_sync | ||
502 | ;; | ||
503 | add r29=VPD_VTPR_START_OFFSET,r16 | ||
504 | add r30=VPD_VHPI_START_OFFSET,r16 | ||
505 | ;; | ||
506 | ld8 r29=[r29] | ||
507 | ld8 r30=[r30] | ||
508 | ;; | ||
509 | extr.u r17=r29,4,4 | ||
510 | extr.u r18=r29,16,1 | ||
511 | ;; | ||
512 | dep r17=r18,r17,4,1 | ||
513 | ;; | ||
514 | cmp.gt p6,p0=r30,r17 | ||
515 | (p6) br.dpnt.few kvm_asm_dispatch_vexirq | ||
516 | br.many kvm_resume_to_guest_with_sync | ||
517 | END(kvm_asm_mov_to_psr) | ||
518 | |||
519 | |||
520 | ENTRY(kvm_asm_dispatch_vexirq) | ||
521 | //increment iip | ||
522 | mov r17 = b0 | ||
523 | mov r18 = r31 | ||
524 | {.mii | ||
525 | add r25=VMM_VPD_BASE_OFFSET,r21 | ||
526 | nop 0x0 | ||
527 | mov r24 = ip | ||
528 | ;; | ||
529 | } | ||
530 | {.mmb | ||
531 | add r24 = 0x20, r24 | ||
532 | ld8 r25 = [r25] | ||
533 | br.sptk.many kvm_vps_sync_write | ||
534 | } | ||
535 | mov b0 =r17 | ||
536 | mov r16=cr.ipsr | ||
537 | mov r31 = r18 | ||
538 | mov r19 = 37 | ||
539 | ;; | ||
540 | extr.u r17=r16,IA64_PSR_RI_BIT,2 | ||
541 | tbit.nz p6,p7=r16,IA64_PSR_RI_BIT+1 | ||
542 | ;; | ||
543 | (p6) mov r18=cr.iip | ||
544 | (p6) mov r17=r0 | ||
545 | (p7) add r17=1,r17 | ||
546 | ;; | ||
547 | (p6) add r18=0x10,r18 | ||
548 | dep r16=r17,r16,IA64_PSR_RI_BIT,2 | ||
549 | ;; | ||
550 | (p6) mov cr.iip=r18 | ||
551 | mov cr.ipsr=r16 | ||
552 | mov r30 =1 | ||
553 | br.many kvm_dispatch_vexirq | ||
554 | END(kvm_asm_dispatch_vexirq) | ||
555 | |||
556 | // thash | ||
557 | // TODO: add support when pta.vf = 1 | ||
558 | GLOBAL_ENTRY(kvm_asm_thash) | ||
559 | #ifndef ACCE_THASH | ||
560 | br.many kvm_virtualization_fault_back | ||
561 | #endif | ||
562 | extr.u r17=r25,20,7 // get r3 from opcode in r25 | ||
563 | extr.u r18=r25,6,7 // get r1 from opcode in r25 | ||
564 | addl r20=@gprel(asm_mov_from_reg),gp | ||
565 | ;; | ||
566 | adds r30=kvm_asm_thash_back1-asm_mov_from_reg,r20 | ||
567 | shladd r17=r17,4,r20 // get addr of MOVE_FROM_REG(r17) | ||
568 | adds r16=VMM_VPD_BASE_OFFSET,r21 // get vcpu.arch.priveregs | ||
569 | ;; | ||
570 | mov r24=b0 | ||
571 | ;; | ||
572 | ld8 r16=[r16] // get VPD addr | ||
573 | mov b0=r17 | ||
574 | br.many b0 // r19 return value | ||
575 | ;; | ||
576 | kvm_asm_thash_back1: | ||
577 | shr.u r23=r19,61 // get RR number | ||
578 | adds r28=VMM_VCPU_VRR0_OFFSET,r21 // get vcpu->arch.vrr[0]'s addr | ||
579 | adds r16=VMM_VPD_VPTA_OFFSET,r16 // get vpta | ||
580 | ;; | ||
581 | shladd r27=r23,3,r28 // get vcpu->arch.vrr[r23]'s addr | ||
582 | ld8 r17=[r16] // get PTA | ||
583 | mov r26=1 | ||
584 | ;; | ||
585 | extr.u r29=r17,2,6 // get pta.size | ||
586 | ld8 r28=[r27] // get vcpu->arch.vrr[r23]'s value | ||
587 | ;; | ||
588 | mov b0=r24 | ||
589 | //Fallback to C if pta.vf is set | ||
590 | tbit.nz p6,p0=r17, 8 | ||
591 | ;; | ||
592 | (p6) mov r24=EVENT_THASH | ||
593 | (p6) br.cond.dpnt.many kvm_virtualization_fault_back | ||
594 | extr.u r28=r28,2,6 // get rr.ps | ||
595 | shl r22=r26,r29 // 1UL << pta.size | ||
596 | ;; | ||
597 | shr.u r23=r19,r28 // vaddr >> rr.ps | ||
598 | adds r26=3,r29 // pta.size + 3 | ||
599 | shl r27=r17,3 // pta << 3 | ||
600 | ;; | ||
601 | shl r23=r23,3 // (vaddr >> rr.ps) << 3 | ||
602 | shr.u r27=r27,r26 // (pta << 3) >> (pta.size+3) | ||
603 | movl r16=7<<61 | ||
604 | ;; | ||
605 | adds r22=-1,r22 // (1UL << pta.size) - 1 | ||
606 | shl r27=r27,r29 // ((pta<<3)>>(pta.size+3))<<pta.size | ||
607 | and r19=r19,r16 // vaddr & VRN_MASK | ||
608 | ;; | ||
609 | and r22=r22,r23 // vhpt_offset | ||
610 | or r19=r19,r27 // (vadr&VRN_MASK)|(((pta<<3)>>(pta.size + 3))<<pta.size) | ||
611 | adds r26=asm_mov_to_reg-asm_mov_from_reg,r20 | ||
612 | ;; | ||
613 | or r19=r19,r22 // calc pval | ||
614 | shladd r17=r18,4,r26 | ||
615 | adds r30=kvm_resume_to_guest-asm_mov_from_reg,r20 | ||
616 | ;; | ||
617 | mov b0=r17 | ||
618 | br.many b0 | ||
619 | END(kvm_asm_thash) | ||
620 | |||
621 | #define MOV_TO_REG0 \ | ||
622 | {; \ | ||
623 | nop.b 0x0; \ | ||
624 | nop.b 0x0; \ | ||
625 | nop.b 0x0; \ | ||
626 | ;; \ | ||
627 | }; | ||
628 | |||
629 | |||
630 | #define MOV_TO_REG(n) \ | ||
631 | {; \ | ||
632 | mov r##n##=r19; \ | ||
633 | mov b0=r30; \ | ||
634 | br.sptk.many b0; \ | ||
635 | ;; \ | ||
636 | }; | ||
637 | |||
638 | |||
639 | #define MOV_FROM_REG(n) \ | ||
640 | {; \ | ||
641 | mov r19=r##n##; \ | ||
642 | mov b0=r30; \ | ||
643 | br.sptk.many b0; \ | ||
644 | ;; \ | ||
645 | }; | ||
646 | |||
647 | |||
648 | #define MOV_TO_BANK0_REG(n) \ | ||
649 | ENTRY_MIN_ALIGN(asm_mov_to_bank0_reg##n##); \ | ||
650 | {; \ | ||
651 | mov r26=r2; \ | ||
652 | mov r2=r19; \ | ||
653 | bsw.1; \ | ||
654 | ;; \ | ||
655 | }; \ | ||
656 | {; \ | ||
657 | mov r##n##=r2; \ | ||
658 | nop.b 0x0; \ | ||
659 | bsw.0; \ | ||
660 | ;; \ | ||
661 | }; \ | ||
662 | {; \ | ||
663 | mov r2=r26; \ | ||
664 | mov b0=r30; \ | ||
665 | br.sptk.many b0; \ | ||
666 | ;; \ | ||
667 | }; \ | ||
668 | END(asm_mov_to_bank0_reg##n##) | ||
669 | |||
670 | |||
671 | #define MOV_FROM_BANK0_REG(n) \ | ||
672 | ENTRY_MIN_ALIGN(asm_mov_from_bank0_reg##n##); \ | ||
673 | {; \ | ||
674 | mov r26=r2; \ | ||
675 | nop.b 0x0; \ | ||
676 | bsw.1; \ | ||
677 | ;; \ | ||
678 | }; \ | ||
679 | {; \ | ||
680 | mov r2=r##n##; \ | ||
681 | nop.b 0x0; \ | ||
682 | bsw.0; \ | ||
683 | ;; \ | ||
684 | }; \ | ||
685 | {; \ | ||
686 | mov r19=r2; \ | ||
687 | mov r2=r26; \ | ||
688 | mov b0=r30; \ | ||
689 | }; \ | ||
690 | {; \ | ||
691 | nop.b 0x0; \ | ||
692 | nop.b 0x0; \ | ||
693 | br.sptk.many b0; \ | ||
694 | ;; \ | ||
695 | }; \ | ||
696 | END(asm_mov_from_bank0_reg##n##) | ||
697 | |||
698 | |||
699 | #define JMP_TO_MOV_TO_BANK0_REG(n) \ | ||
700 | {; \ | ||
701 | nop.b 0x0; \ | ||
702 | nop.b 0x0; \ | ||
703 | br.sptk.many asm_mov_to_bank0_reg##n##; \ | ||
704 | ;; \ | ||
705 | } | ||
706 | |||
707 | |||
708 | #define JMP_TO_MOV_FROM_BANK0_REG(n) \ | ||
709 | {; \ | ||
710 | nop.b 0x0; \ | ||
711 | nop.b 0x0; \ | ||
712 | br.sptk.many asm_mov_from_bank0_reg##n##; \ | ||
713 | ;; \ | ||
714 | } | ||
715 | |||
716 | |||
717 | MOV_FROM_BANK0_REG(16) | ||
718 | MOV_FROM_BANK0_REG(17) | ||
719 | MOV_FROM_BANK0_REG(18) | ||
720 | MOV_FROM_BANK0_REG(19) | ||
721 | MOV_FROM_BANK0_REG(20) | ||
722 | MOV_FROM_BANK0_REG(21) | ||
723 | MOV_FROM_BANK0_REG(22) | ||
724 | MOV_FROM_BANK0_REG(23) | ||
725 | MOV_FROM_BANK0_REG(24) | ||
726 | MOV_FROM_BANK0_REG(25) | ||
727 | MOV_FROM_BANK0_REG(26) | ||
728 | MOV_FROM_BANK0_REG(27) | ||
729 | MOV_FROM_BANK0_REG(28) | ||
730 | MOV_FROM_BANK0_REG(29) | ||
731 | MOV_FROM_BANK0_REG(30) | ||
732 | MOV_FROM_BANK0_REG(31) | ||
733 | |||
734 | |||
735 | // mov from reg table | ||
736 | ENTRY(asm_mov_from_reg) | ||
737 | MOV_FROM_REG(0) | ||
738 | MOV_FROM_REG(1) | ||
739 | MOV_FROM_REG(2) | ||
740 | MOV_FROM_REG(3) | ||
741 | MOV_FROM_REG(4) | ||
742 | MOV_FROM_REG(5) | ||
743 | MOV_FROM_REG(6) | ||
744 | MOV_FROM_REG(7) | ||
745 | MOV_FROM_REG(8) | ||
746 | MOV_FROM_REG(9) | ||
747 | MOV_FROM_REG(10) | ||
748 | MOV_FROM_REG(11) | ||
749 | MOV_FROM_REG(12) | ||
750 | MOV_FROM_REG(13) | ||
751 | MOV_FROM_REG(14) | ||
752 | MOV_FROM_REG(15) | ||
753 | JMP_TO_MOV_FROM_BANK0_REG(16) | ||
754 | JMP_TO_MOV_FROM_BANK0_REG(17) | ||
755 | JMP_TO_MOV_FROM_BANK0_REG(18) | ||
756 | JMP_TO_MOV_FROM_BANK0_REG(19) | ||
757 | JMP_TO_MOV_FROM_BANK0_REG(20) | ||
758 | JMP_TO_MOV_FROM_BANK0_REG(21) | ||
759 | JMP_TO_MOV_FROM_BANK0_REG(22) | ||
760 | JMP_TO_MOV_FROM_BANK0_REG(23) | ||
761 | JMP_TO_MOV_FROM_BANK0_REG(24) | ||
762 | JMP_TO_MOV_FROM_BANK0_REG(25) | ||
763 | JMP_TO_MOV_FROM_BANK0_REG(26) | ||
764 | JMP_TO_MOV_FROM_BANK0_REG(27) | ||
765 | JMP_TO_MOV_FROM_BANK0_REG(28) | ||
766 | JMP_TO_MOV_FROM_BANK0_REG(29) | ||
767 | JMP_TO_MOV_FROM_BANK0_REG(30) | ||
768 | JMP_TO_MOV_FROM_BANK0_REG(31) | ||
769 | MOV_FROM_REG(32) | ||
770 | MOV_FROM_REG(33) | ||
771 | MOV_FROM_REG(34) | ||
772 | MOV_FROM_REG(35) | ||
773 | MOV_FROM_REG(36) | ||
774 | MOV_FROM_REG(37) | ||
775 | MOV_FROM_REG(38) | ||
776 | MOV_FROM_REG(39) | ||
777 | MOV_FROM_REG(40) | ||
778 | MOV_FROM_REG(41) | ||
779 | MOV_FROM_REG(42) | ||
780 | MOV_FROM_REG(43) | ||
781 | MOV_FROM_REG(44) | ||
782 | MOV_FROM_REG(45) | ||
783 | MOV_FROM_REG(46) | ||
784 | MOV_FROM_REG(47) | ||
785 | MOV_FROM_REG(48) | ||
786 | MOV_FROM_REG(49) | ||
787 | MOV_FROM_REG(50) | ||
788 | MOV_FROM_REG(51) | ||
789 | MOV_FROM_REG(52) | ||
790 | MOV_FROM_REG(53) | ||
791 | MOV_FROM_REG(54) | ||
792 | MOV_FROM_REG(55) | ||
793 | MOV_FROM_REG(56) | ||
794 | MOV_FROM_REG(57) | ||
795 | MOV_FROM_REG(58) | ||
796 | MOV_FROM_REG(59) | ||
797 | MOV_FROM_REG(60) | ||
798 | MOV_FROM_REG(61) | ||
799 | MOV_FROM_REG(62) | ||
800 | MOV_FROM_REG(63) | ||
801 | MOV_FROM_REG(64) | ||
802 | MOV_FROM_REG(65) | ||
803 | MOV_FROM_REG(66) | ||
804 | MOV_FROM_REG(67) | ||
805 | MOV_FROM_REG(68) | ||
806 | MOV_FROM_REG(69) | ||
807 | MOV_FROM_REG(70) | ||
808 | MOV_FROM_REG(71) | ||
809 | MOV_FROM_REG(72) | ||
810 | MOV_FROM_REG(73) | ||
811 | MOV_FROM_REG(74) | ||
812 | MOV_FROM_REG(75) | ||
813 | MOV_FROM_REG(76) | ||
814 | MOV_FROM_REG(77) | ||
815 | MOV_FROM_REG(78) | ||
816 | MOV_FROM_REG(79) | ||
817 | MOV_FROM_REG(80) | ||
818 | MOV_FROM_REG(81) | ||
819 | MOV_FROM_REG(82) | ||
820 | MOV_FROM_REG(83) | ||
821 | MOV_FROM_REG(84) | ||
822 | MOV_FROM_REG(85) | ||
823 | MOV_FROM_REG(86) | ||
824 | MOV_FROM_REG(87) | ||
825 | MOV_FROM_REG(88) | ||
826 | MOV_FROM_REG(89) | ||
827 | MOV_FROM_REG(90) | ||
828 | MOV_FROM_REG(91) | ||
829 | MOV_FROM_REG(92) | ||
830 | MOV_FROM_REG(93) | ||
831 | MOV_FROM_REG(94) | ||
832 | MOV_FROM_REG(95) | ||
833 | MOV_FROM_REG(96) | ||
834 | MOV_FROM_REG(97) | ||
835 | MOV_FROM_REG(98) | ||
836 | MOV_FROM_REG(99) | ||
837 | MOV_FROM_REG(100) | ||
838 | MOV_FROM_REG(101) | ||
839 | MOV_FROM_REG(102) | ||
840 | MOV_FROM_REG(103) | ||
841 | MOV_FROM_REG(104) | ||
842 | MOV_FROM_REG(105) | ||
843 | MOV_FROM_REG(106) | ||
844 | MOV_FROM_REG(107) | ||
845 | MOV_FROM_REG(108) | ||
846 | MOV_FROM_REG(109) | ||
847 | MOV_FROM_REG(110) | ||
848 | MOV_FROM_REG(111) | ||
849 | MOV_FROM_REG(112) | ||
850 | MOV_FROM_REG(113) | ||
851 | MOV_FROM_REG(114) | ||
852 | MOV_FROM_REG(115) | ||
853 | MOV_FROM_REG(116) | ||
854 | MOV_FROM_REG(117) | ||
855 | MOV_FROM_REG(118) | ||
856 | MOV_FROM_REG(119) | ||
857 | MOV_FROM_REG(120) | ||
858 | MOV_FROM_REG(121) | ||
859 | MOV_FROM_REG(122) | ||
860 | MOV_FROM_REG(123) | ||
861 | MOV_FROM_REG(124) | ||
862 | MOV_FROM_REG(125) | ||
863 | MOV_FROM_REG(126) | ||
864 | MOV_FROM_REG(127) | ||
865 | END(asm_mov_from_reg) | ||
866 | |||
867 | |||
868 | /* must be in bank 0 | ||
869 | * parameter: | ||
870 | * r31: pr | ||
871 | * r24: b0 | ||
872 | */ | ||
873 | ENTRY(kvm_resume_to_guest_with_sync) | ||
874 | adds r19=VMM_VPD_BASE_OFFSET,r21 | ||
875 | mov r16 = r31 | ||
876 | mov r17 = r24 | ||
877 | ;; | ||
878 | {.mii | ||
879 | ld8 r25 =[r19] | ||
880 | nop 0x0 | ||
881 | mov r24 = ip | ||
882 | ;; | ||
883 | } | ||
884 | {.mmb | ||
885 | add r24 =0x20, r24 | ||
886 | nop 0x0 | ||
887 | br.sptk.many kvm_vps_sync_write | ||
888 | } | ||
889 | |||
890 | mov r31 = r16 | ||
891 | mov r24 =r17 | ||
892 | ;; | ||
893 | br.sptk.many kvm_resume_to_guest | ||
894 | END(kvm_resume_to_guest_with_sync) | ||
895 | |||
896 | ENTRY(kvm_resume_to_guest) | ||
897 | adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21 | ||
898 | ;; | ||
899 | ld8 r1 =[r16] | ||
900 | adds r20 = VMM_VCPU_VSA_BASE_OFFSET,r21 | ||
901 | ;; | ||
902 | mov r16=cr.ipsr | ||
903 | ;; | ||
904 | ld8 r20 = [r20] | ||
905 | adds r19=VMM_VPD_BASE_OFFSET,r21 | ||
906 | ;; | ||
907 | ld8 r25=[r19] | ||
908 | extr.u r17=r16,IA64_PSR_RI_BIT,2 | ||
909 | tbit.nz p6,p7=r16,IA64_PSR_RI_BIT+1 | ||
910 | ;; | ||
911 | (p6) mov r18=cr.iip | ||
912 | (p6) mov r17=r0 | ||
913 | ;; | ||
914 | (p6) add r18=0x10,r18 | ||
915 | (p7) add r17=1,r17 | ||
916 | ;; | ||
917 | (p6) mov cr.iip=r18 | ||
918 | dep r16=r17,r16,IA64_PSR_RI_BIT,2 | ||
919 | ;; | ||
920 | mov cr.ipsr=r16 | ||
921 | adds r19= VPD_VPSR_START_OFFSET,r25 | ||
922 | add r28=PAL_VPS_RESUME_NORMAL,r20 | ||
923 | add r29=PAL_VPS_RESUME_HANDLER,r20 | ||
924 | ;; | ||
925 | ld8 r19=[r19] | ||
926 | mov b0=r29 | ||
927 | mov r27=cr.isr | ||
928 | ;; | ||
929 | tbit.z p6,p7 = r19,IA64_PSR_IC_BIT // p7=vpsr.ic | ||
930 | shr r27=r27,IA64_ISR_IR_BIT | ||
931 | ;; | ||
932 | (p6) ld8 r26=[r25] | ||
933 | (p7) mov b0=r28 | ||
934 | ;; | ||
935 | (p6) dep r26=r27,r26,63,1 | ||
936 | mov pr=r31,-2 | ||
937 | br.sptk.many b0 // call pal service | ||
938 | ;; | ||
939 | END(kvm_resume_to_guest) | ||
940 | |||
941 | |||
942 | MOV_TO_BANK0_REG(16) | ||
943 | MOV_TO_BANK0_REG(17) | ||
944 | MOV_TO_BANK0_REG(18) | ||
945 | MOV_TO_BANK0_REG(19) | ||
946 | MOV_TO_BANK0_REG(20) | ||
947 | MOV_TO_BANK0_REG(21) | ||
948 | MOV_TO_BANK0_REG(22) | ||
949 | MOV_TO_BANK0_REG(23) | ||
950 | MOV_TO_BANK0_REG(24) | ||
951 | MOV_TO_BANK0_REG(25) | ||
952 | MOV_TO_BANK0_REG(26) | ||
953 | MOV_TO_BANK0_REG(27) | ||
954 | MOV_TO_BANK0_REG(28) | ||
955 | MOV_TO_BANK0_REG(29) | ||
956 | MOV_TO_BANK0_REG(30) | ||
957 | MOV_TO_BANK0_REG(31) | ||
958 | |||
959 | |||
960 | // mov to reg table | ||
961 | ENTRY(asm_mov_to_reg) | ||
962 | MOV_TO_REG0 | ||
963 | MOV_TO_REG(1) | ||
964 | MOV_TO_REG(2) | ||
965 | MOV_TO_REG(3) | ||
966 | MOV_TO_REG(4) | ||
967 | MOV_TO_REG(5) | ||
968 | MOV_TO_REG(6) | ||
969 | MOV_TO_REG(7) | ||
970 | MOV_TO_REG(8) | ||
971 | MOV_TO_REG(9) | ||
972 | MOV_TO_REG(10) | ||
973 | MOV_TO_REG(11) | ||
974 | MOV_TO_REG(12) | ||
975 | MOV_TO_REG(13) | ||
976 | MOV_TO_REG(14) | ||
977 | MOV_TO_REG(15) | ||
978 | JMP_TO_MOV_TO_BANK0_REG(16) | ||
979 | JMP_TO_MOV_TO_BANK0_REG(17) | ||
980 | JMP_TO_MOV_TO_BANK0_REG(18) | ||
981 | JMP_TO_MOV_TO_BANK0_REG(19) | ||
982 | JMP_TO_MOV_TO_BANK0_REG(20) | ||
983 | JMP_TO_MOV_TO_BANK0_REG(21) | ||
984 | JMP_TO_MOV_TO_BANK0_REG(22) | ||
985 | JMP_TO_MOV_TO_BANK0_REG(23) | ||
986 | JMP_TO_MOV_TO_BANK0_REG(24) | ||
987 | JMP_TO_MOV_TO_BANK0_REG(25) | ||
988 | JMP_TO_MOV_TO_BANK0_REG(26) | ||
989 | JMP_TO_MOV_TO_BANK0_REG(27) | ||
990 | JMP_TO_MOV_TO_BANK0_REG(28) | ||
991 | JMP_TO_MOV_TO_BANK0_REG(29) | ||
992 | JMP_TO_MOV_TO_BANK0_REG(30) | ||
993 | JMP_TO_MOV_TO_BANK0_REG(31) | ||
994 | MOV_TO_REG(32) | ||
995 | MOV_TO_REG(33) | ||
996 | MOV_TO_REG(34) | ||
997 | MOV_TO_REG(35) | ||
998 | MOV_TO_REG(36) | ||
999 | MOV_TO_REG(37) | ||
1000 | MOV_TO_REG(38) | ||
1001 | MOV_TO_REG(39) | ||
1002 | MOV_TO_REG(40) | ||
1003 | MOV_TO_REG(41) | ||
1004 | MOV_TO_REG(42) | ||
1005 | MOV_TO_REG(43) | ||
1006 | MOV_TO_REG(44) | ||
1007 | MOV_TO_REG(45) | ||
1008 | MOV_TO_REG(46) | ||
1009 | MOV_TO_REG(47) | ||
1010 | MOV_TO_REG(48) | ||
1011 | MOV_TO_REG(49) | ||
1012 | MOV_TO_REG(50) | ||
1013 | MOV_TO_REG(51) | ||
1014 | MOV_TO_REG(52) | ||
1015 | MOV_TO_REG(53) | ||
1016 | MOV_TO_REG(54) | ||
1017 | MOV_TO_REG(55) | ||
1018 | MOV_TO_REG(56) | ||
1019 | MOV_TO_REG(57) | ||
1020 | MOV_TO_REG(58) | ||
1021 | MOV_TO_REG(59) | ||
1022 | MOV_TO_REG(60) | ||
1023 | MOV_TO_REG(61) | ||
1024 | MOV_TO_REG(62) | ||
1025 | MOV_TO_REG(63) | ||
1026 | MOV_TO_REG(64) | ||
1027 | MOV_TO_REG(65) | ||
1028 | MOV_TO_REG(66) | ||
1029 | MOV_TO_REG(67) | ||
1030 | MOV_TO_REG(68) | ||
1031 | MOV_TO_REG(69) | ||
1032 | MOV_TO_REG(70) | ||
1033 | MOV_TO_REG(71) | ||
1034 | MOV_TO_REG(72) | ||
1035 | MOV_TO_REG(73) | ||
1036 | MOV_TO_REG(74) | ||
1037 | MOV_TO_REG(75) | ||
1038 | MOV_TO_REG(76) | ||
1039 | MOV_TO_REG(77) | ||
1040 | MOV_TO_REG(78) | ||
1041 | MOV_TO_REG(79) | ||
1042 | MOV_TO_REG(80) | ||
1043 | MOV_TO_REG(81) | ||
1044 | MOV_TO_REG(82) | ||
1045 | MOV_TO_REG(83) | ||
1046 | MOV_TO_REG(84) | ||
1047 | MOV_TO_REG(85) | ||
1048 | MOV_TO_REG(86) | ||
1049 | MOV_TO_REG(87) | ||
1050 | MOV_TO_REG(88) | ||
1051 | MOV_TO_REG(89) | ||
1052 | MOV_TO_REG(90) | ||
1053 | MOV_TO_REG(91) | ||
1054 | MOV_TO_REG(92) | ||
1055 | MOV_TO_REG(93) | ||
1056 | MOV_TO_REG(94) | ||
1057 | MOV_TO_REG(95) | ||
1058 | MOV_TO_REG(96) | ||
1059 | MOV_TO_REG(97) | ||
1060 | MOV_TO_REG(98) | ||
1061 | MOV_TO_REG(99) | ||
1062 | MOV_TO_REG(100) | ||
1063 | MOV_TO_REG(101) | ||
1064 | MOV_TO_REG(102) | ||
1065 | MOV_TO_REG(103) | ||
1066 | MOV_TO_REG(104) | ||
1067 | MOV_TO_REG(105) | ||
1068 | MOV_TO_REG(106) | ||
1069 | MOV_TO_REG(107) | ||
1070 | MOV_TO_REG(108) | ||
1071 | MOV_TO_REG(109) | ||
1072 | MOV_TO_REG(110) | ||
1073 | MOV_TO_REG(111) | ||
1074 | MOV_TO_REG(112) | ||
1075 | MOV_TO_REG(113) | ||
1076 | MOV_TO_REG(114) | ||
1077 | MOV_TO_REG(115) | ||
1078 | MOV_TO_REG(116) | ||
1079 | MOV_TO_REG(117) | ||
1080 | MOV_TO_REG(118) | ||
1081 | MOV_TO_REG(119) | ||
1082 | MOV_TO_REG(120) | ||
1083 | MOV_TO_REG(121) | ||
1084 | MOV_TO_REG(122) | ||
1085 | MOV_TO_REG(123) | ||
1086 | MOV_TO_REG(124) | ||
1087 | MOV_TO_REG(125) | ||
1088 | MOV_TO_REG(126) | ||
1089 | MOV_TO_REG(127) | ||
1090 | END(asm_mov_to_reg) | ||
diff --git a/arch/ia64/kvm/process.c b/arch/ia64/kvm/process.c deleted file mode 100644 index b0398740b48d..000000000000 --- a/arch/ia64/kvm/process.c +++ /dev/null | |||
@@ -1,1024 +0,0 @@ | |||
1 | /* | ||
2 | * process.c: handle interruption inject for guests. | ||
3 | * Copyright (c) 2005, Intel Corporation. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms and conditions of the GNU General Public License, | ||
7 | * version 2, as published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License along with | ||
15 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple | ||
16 | * Place - Suite 330, Boston, MA 02111-1307 USA. | ||
17 | * | ||
18 | * Shaofan Li (Susue Li) <susie.li@intel.com> | ||
19 | * Xiaoyan Feng (Fleming Feng) <fleming.feng@intel.com> | ||
20 | * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com) | ||
21 | * Xiantao Zhang (xiantao.zhang@intel.com) | ||
22 | */ | ||
23 | #include "vcpu.h" | ||
24 | |||
25 | #include <asm/pal.h> | ||
26 | #include <asm/sal.h> | ||
27 | #include <asm/fpswa.h> | ||
28 | #include <asm/kregs.h> | ||
29 | #include <asm/tlb.h> | ||
30 | |||
31 | fpswa_interface_t *vmm_fpswa_interface; | ||
32 | |||
33 | #define IA64_VHPT_TRANS_VECTOR 0x0000 | ||
34 | #define IA64_INST_TLB_VECTOR 0x0400 | ||
35 | #define IA64_DATA_TLB_VECTOR 0x0800 | ||
36 | #define IA64_ALT_INST_TLB_VECTOR 0x0c00 | ||
37 | #define IA64_ALT_DATA_TLB_VECTOR 0x1000 | ||
38 | #define IA64_DATA_NESTED_TLB_VECTOR 0x1400 | ||
39 | #define IA64_INST_KEY_MISS_VECTOR 0x1800 | ||
40 | #define IA64_DATA_KEY_MISS_VECTOR 0x1c00 | ||
41 | #define IA64_DIRTY_BIT_VECTOR 0x2000 | ||
42 | #define IA64_INST_ACCESS_BIT_VECTOR 0x2400 | ||
43 | #define IA64_DATA_ACCESS_BIT_VECTOR 0x2800 | ||
44 | #define IA64_BREAK_VECTOR 0x2c00 | ||
45 | #define IA64_EXTINT_VECTOR 0x3000 | ||
46 | #define IA64_PAGE_NOT_PRESENT_VECTOR 0x5000 | ||
47 | #define IA64_KEY_PERMISSION_VECTOR 0x5100 | ||
48 | #define IA64_INST_ACCESS_RIGHTS_VECTOR 0x5200 | ||
49 | #define IA64_DATA_ACCESS_RIGHTS_VECTOR 0x5300 | ||
50 | #define IA64_GENEX_VECTOR 0x5400 | ||
51 | #define IA64_DISABLED_FPREG_VECTOR 0x5500 | ||
52 | #define IA64_NAT_CONSUMPTION_VECTOR 0x5600 | ||
53 | #define IA64_SPECULATION_VECTOR 0x5700 /* UNUSED */ | ||
54 | #define IA64_DEBUG_VECTOR 0x5900 | ||
55 | #define IA64_UNALIGNED_REF_VECTOR 0x5a00 | ||
56 | #define IA64_UNSUPPORTED_DATA_REF_VECTOR 0x5b00 | ||
57 | #define IA64_FP_FAULT_VECTOR 0x5c00 | ||
58 | #define IA64_FP_TRAP_VECTOR 0x5d00 | ||
59 | #define IA64_LOWERPRIV_TRANSFER_TRAP_VECTOR 0x5e00 | ||
60 | #define IA64_TAKEN_BRANCH_TRAP_VECTOR 0x5f00 | ||
61 | #define IA64_SINGLE_STEP_TRAP_VECTOR 0x6000 | ||
62 | |||
63 | /* SDM vol2 5.5 - IVA based interruption handling */ | ||
64 | #define INITIAL_PSR_VALUE_AT_INTERRUPTION (IA64_PSR_UP | IA64_PSR_MFL |\ | ||
65 | IA64_PSR_MFH | IA64_PSR_PK | IA64_PSR_DT | \ | ||
66 | IA64_PSR_RT | IA64_PSR_MC|IA64_PSR_IT) | ||
67 | |||
68 | #define DOMN_PAL_REQUEST 0x110000 | ||
69 | #define DOMN_SAL_REQUEST 0x110001 | ||
70 | |||
71 | static u64 vec2off[68] = {0x0, 0x400, 0x800, 0xc00, 0x1000, 0x1400, 0x1800, | ||
72 | 0x1c00, 0x2000, 0x2400, 0x2800, 0x2c00, 0x3000, 0x3400, 0x3800, 0x3c00, | ||
73 | 0x4000, 0x4400, 0x4800, 0x4c00, 0x5000, 0x5100, 0x5200, 0x5300, 0x5400, | ||
74 | 0x5500, 0x5600, 0x5700, 0x5800, 0x5900, 0x5a00, 0x5b00, 0x5c00, 0x5d00, | ||
75 | 0x5e00, 0x5f00, 0x6000, 0x6100, 0x6200, 0x6300, 0x6400, 0x6500, 0x6600, | ||
76 | 0x6700, 0x6800, 0x6900, 0x6a00, 0x6b00, 0x6c00, 0x6d00, 0x6e00, 0x6f00, | ||
77 | 0x7000, 0x7100, 0x7200, 0x7300, 0x7400, 0x7500, 0x7600, 0x7700, 0x7800, | ||
78 | 0x7900, 0x7a00, 0x7b00, 0x7c00, 0x7d00, 0x7e00, 0x7f00 | ||
79 | }; | ||
80 | |||
81 | static void collect_interruption(struct kvm_vcpu *vcpu) | ||
82 | { | ||
83 | u64 ipsr; | ||
84 | u64 vdcr; | ||
85 | u64 vifs; | ||
86 | unsigned long vpsr; | ||
87 | struct kvm_pt_regs *regs = vcpu_regs(vcpu); | ||
88 | |||
89 | vpsr = vcpu_get_psr(vcpu); | ||
90 | vcpu_bsw0(vcpu); | ||
91 | if (vpsr & IA64_PSR_IC) { | ||
92 | |||
93 | /* Sync mpsr id/da/dd/ss/ed bits to vipsr | ||
94 | * since after guest do rfi, we still want these bits on in | ||
95 | * mpsr | ||
96 | */ | ||
97 | |||
98 | ipsr = regs->cr_ipsr; | ||
99 | vpsr = vpsr | (ipsr & (IA64_PSR_ID | IA64_PSR_DA | ||
100 | | IA64_PSR_DD | IA64_PSR_SS | ||
101 | | IA64_PSR_ED)); | ||
102 | vcpu_set_ipsr(vcpu, vpsr); | ||
103 | |||
104 | /* Currently, for trap, we do not advance IIP to next | ||
105 | * instruction. That's because we assume caller already | ||
106 | * set up IIP correctly | ||
107 | */ | ||
108 | |||
109 | vcpu_set_iip(vcpu , regs->cr_iip); | ||
110 | |||
111 | /* set vifs.v to zero */ | ||
112 | vifs = VCPU(vcpu, ifs); | ||
113 | vifs &= ~IA64_IFS_V; | ||
114 | vcpu_set_ifs(vcpu, vifs); | ||
115 | |||
116 | vcpu_set_iipa(vcpu, VMX(vcpu, cr_iipa)); | ||
117 | } | ||
118 | |||
119 | vdcr = VCPU(vcpu, dcr); | ||
120 | |||
121 | /* Set guest psr | ||
122 | * up/mfl/mfh/pk/dt/rt/mc/it keeps unchanged | ||
123 | * be: set to the value of dcr.be | ||
124 | * pp: set to the value of dcr.pp | ||
125 | */ | ||
126 | vpsr &= INITIAL_PSR_VALUE_AT_INTERRUPTION; | ||
127 | vpsr |= (vdcr & IA64_DCR_BE); | ||
128 | |||
129 | /* VDCR pp bit position is different from VPSR pp bit */ | ||
130 | if (vdcr & IA64_DCR_PP) { | ||
131 | vpsr |= IA64_PSR_PP; | ||
132 | } else { | ||
133 | vpsr &= ~IA64_PSR_PP; | ||
134 | } | ||
135 | |||
136 | vcpu_set_psr(vcpu, vpsr); | ||
137 | |||
138 | } | ||
139 | |||
140 | void inject_guest_interruption(struct kvm_vcpu *vcpu, u64 vec) | ||
141 | { | ||
142 | u64 viva; | ||
143 | struct kvm_pt_regs *regs; | ||
144 | union ia64_isr pt_isr; | ||
145 | |||
146 | regs = vcpu_regs(vcpu); | ||
147 | |||
148 | /* clear cr.isr.ir (incomplete register frame)*/ | ||
149 | pt_isr.val = VMX(vcpu, cr_isr); | ||
150 | pt_isr.ir = 0; | ||
151 | VMX(vcpu, cr_isr) = pt_isr.val; | ||
152 | |||
153 | collect_interruption(vcpu); | ||
154 | |||
155 | viva = vcpu_get_iva(vcpu); | ||
156 | regs->cr_iip = viva + vec; | ||
157 | } | ||
158 | |||
159 | static u64 vcpu_get_itir_on_fault(struct kvm_vcpu *vcpu, u64 ifa) | ||
160 | { | ||
161 | union ia64_rr rr, rr1; | ||
162 | |||
163 | rr.val = vcpu_get_rr(vcpu, ifa); | ||
164 | rr1.val = 0; | ||
165 | rr1.ps = rr.ps; | ||
166 | rr1.rid = rr.rid; | ||
167 | return (rr1.val); | ||
168 | } | ||
169 | |||
170 | /* | ||
171 | * Set vIFA & vITIR & vIHA, when vPSR.ic =1 | ||
172 | * Parameter: | ||
173 | * set_ifa: if true, set vIFA | ||
174 | * set_itir: if true, set vITIR | ||
175 | * set_iha: if true, set vIHA | ||
176 | */ | ||
177 | void set_ifa_itir_iha(struct kvm_vcpu *vcpu, u64 vadr, | ||
178 | int set_ifa, int set_itir, int set_iha) | ||
179 | { | ||
180 | long vpsr; | ||
181 | u64 value; | ||
182 | |||
183 | vpsr = VCPU(vcpu, vpsr); | ||
184 | /* Vol2, Table 8-1 */ | ||
185 | if (vpsr & IA64_PSR_IC) { | ||
186 | if (set_ifa) | ||
187 | vcpu_set_ifa(vcpu, vadr); | ||
188 | if (set_itir) { | ||
189 | value = vcpu_get_itir_on_fault(vcpu, vadr); | ||
190 | vcpu_set_itir(vcpu, value); | ||
191 | } | ||
192 | |||
193 | if (set_iha) { | ||
194 | value = vcpu_thash(vcpu, vadr); | ||
195 | vcpu_set_iha(vcpu, value); | ||
196 | } | ||
197 | } | ||
198 | } | ||
199 | |||
200 | /* | ||
201 | * Data TLB Fault | ||
202 | * @ Data TLB vector | ||
203 | * Refer to SDM Vol2 Table 5-6 & 8-1 | ||
204 | */ | ||
205 | void dtlb_fault(struct kvm_vcpu *vcpu, u64 vadr) | ||
206 | { | ||
207 | /* If vPSR.ic, IFA, ITIR, IHA */ | ||
208 | set_ifa_itir_iha(vcpu, vadr, 1, 1, 1); | ||
209 | inject_guest_interruption(vcpu, IA64_DATA_TLB_VECTOR); | ||
210 | } | ||
211 | |||
212 | /* | ||
213 | * Instruction TLB Fault | ||
214 | * @ Instruction TLB vector | ||
215 | * Refer to SDM Vol2 Table 5-6 & 8-1 | ||
216 | */ | ||
217 | void itlb_fault(struct kvm_vcpu *vcpu, u64 vadr) | ||
218 | { | ||
219 | /* If vPSR.ic, IFA, ITIR, IHA */ | ||
220 | set_ifa_itir_iha(vcpu, vadr, 1, 1, 1); | ||
221 | inject_guest_interruption(vcpu, IA64_INST_TLB_VECTOR); | ||
222 | } | ||
223 | |||
224 | /* | ||
225 | * Data Nested TLB Fault | ||
226 | * @ Data Nested TLB Vector | ||
227 | * Refer to SDM Vol2 Table 5-6 & 8-1 | ||
228 | */ | ||
229 | void nested_dtlb(struct kvm_vcpu *vcpu) | ||
230 | { | ||
231 | inject_guest_interruption(vcpu, IA64_DATA_NESTED_TLB_VECTOR); | ||
232 | } | ||
233 | |||
234 | /* | ||
235 | * Alternate Data TLB Fault | ||
236 | * @ Alternate Data TLB vector | ||
237 | * Refer to SDM Vol2 Table 5-6 & 8-1 | ||
238 | */ | ||
239 | void alt_dtlb(struct kvm_vcpu *vcpu, u64 vadr) | ||
240 | { | ||
241 | set_ifa_itir_iha(vcpu, vadr, 1, 1, 0); | ||
242 | inject_guest_interruption(vcpu, IA64_ALT_DATA_TLB_VECTOR); | ||
243 | } | ||
244 | |||
245 | /* | ||
246 | * Data TLB Fault | ||
247 | * @ Data TLB vector | ||
248 | * Refer to SDM Vol2 Table 5-6 & 8-1 | ||
249 | */ | ||
250 | void alt_itlb(struct kvm_vcpu *vcpu, u64 vadr) | ||
251 | { | ||
252 | set_ifa_itir_iha(vcpu, vadr, 1, 1, 0); | ||
253 | inject_guest_interruption(vcpu, IA64_ALT_INST_TLB_VECTOR); | ||
254 | } | ||
255 | |||
256 | /* Deal with: | ||
257 | * VHPT Translation Vector | ||
258 | */ | ||
259 | static void _vhpt_fault(struct kvm_vcpu *vcpu, u64 vadr) | ||
260 | { | ||
261 | /* If vPSR.ic, IFA, ITIR, IHA*/ | ||
262 | set_ifa_itir_iha(vcpu, vadr, 1, 1, 1); | ||
263 | inject_guest_interruption(vcpu, IA64_VHPT_TRANS_VECTOR); | ||
264 | } | ||
265 | |||
266 | /* | ||
267 | * VHPT Instruction Fault | ||
268 | * @ VHPT Translation vector | ||
269 | * Refer to SDM Vol2 Table 5-6 & 8-1 | ||
270 | */ | ||
271 | void ivhpt_fault(struct kvm_vcpu *vcpu, u64 vadr) | ||
272 | { | ||
273 | _vhpt_fault(vcpu, vadr); | ||
274 | } | ||
275 | |||
276 | /* | ||
277 | * VHPT Data Fault | ||
278 | * @ VHPT Translation vector | ||
279 | * Refer to SDM Vol2 Table 5-6 & 8-1 | ||
280 | */ | ||
281 | void dvhpt_fault(struct kvm_vcpu *vcpu, u64 vadr) | ||
282 | { | ||
283 | _vhpt_fault(vcpu, vadr); | ||
284 | } | ||
285 | |||
286 | /* | ||
287 | * Deal with: | ||
288 | * General Exception vector | ||
289 | */ | ||
290 | void _general_exception(struct kvm_vcpu *vcpu) | ||
291 | { | ||
292 | inject_guest_interruption(vcpu, IA64_GENEX_VECTOR); | ||
293 | } | ||
294 | |||
295 | /* | ||
296 | * Illegal Operation Fault | ||
297 | * @ General Exception Vector | ||
298 | * Refer to SDM Vol2 Table 5-6 & 8-1 | ||
299 | */ | ||
300 | void illegal_op(struct kvm_vcpu *vcpu) | ||
301 | { | ||
302 | _general_exception(vcpu); | ||
303 | } | ||
304 | |||
305 | /* | ||
306 | * Illegal Dependency Fault | ||
307 | * @ General Exception Vector | ||
308 | * Refer to SDM Vol2 Table 5-6 & 8-1 | ||
309 | */ | ||
310 | void illegal_dep(struct kvm_vcpu *vcpu) | ||
311 | { | ||
312 | _general_exception(vcpu); | ||
313 | } | ||
314 | |||
315 | /* | ||
316 | * Reserved Register/Field Fault | ||
317 | * @ General Exception Vector | ||
318 | * Refer to SDM Vol2 Table 5-6 & 8-1 | ||
319 | */ | ||
320 | void rsv_reg_field(struct kvm_vcpu *vcpu) | ||
321 | { | ||
322 | _general_exception(vcpu); | ||
323 | } | ||
324 | /* | ||
325 | * Privileged Operation Fault | ||
326 | * @ General Exception Vector | ||
327 | * Refer to SDM Vol2 Table 5-6 & 8-1 | ||
328 | */ | ||
329 | |||
330 | void privilege_op(struct kvm_vcpu *vcpu) | ||
331 | { | ||
332 | _general_exception(vcpu); | ||
333 | } | ||
334 | |||
335 | /* | ||
336 | * Unimplement Data Address Fault | ||
337 | * @ General Exception Vector | ||
338 | * Refer to SDM Vol2 Table 5-6 & 8-1 | ||
339 | */ | ||
340 | void unimpl_daddr(struct kvm_vcpu *vcpu) | ||
341 | { | ||
342 | _general_exception(vcpu); | ||
343 | } | ||
344 | |||
345 | /* | ||
346 | * Privileged Register Fault | ||
347 | * @ General Exception Vector | ||
348 | * Refer to SDM Vol2 Table 5-6 & 8-1 | ||
349 | */ | ||
350 | void privilege_reg(struct kvm_vcpu *vcpu) | ||
351 | { | ||
352 | _general_exception(vcpu); | ||
353 | } | ||
354 | |||
355 | /* Deal with | ||
356 | * Nat consumption vector | ||
357 | * Parameter: | ||
358 | * vaddr: Optional, if t == REGISTER | ||
359 | */ | ||
360 | static void _nat_consumption_fault(struct kvm_vcpu *vcpu, u64 vadr, | ||
361 | enum tlb_miss_type t) | ||
362 | { | ||
363 | /* If vPSR.ic && t == DATA/INST, IFA */ | ||
364 | if (t == DATA || t == INSTRUCTION) { | ||
365 | /* IFA */ | ||
366 | set_ifa_itir_iha(vcpu, vadr, 1, 0, 0); | ||
367 | } | ||
368 | |||
369 | inject_guest_interruption(vcpu, IA64_NAT_CONSUMPTION_VECTOR); | ||
370 | } | ||
371 | |||
372 | /* | ||
373 | * Instruction Nat Page Consumption Fault | ||
374 | * @ Nat Consumption Vector | ||
375 | * Refer to SDM Vol2 Table 5-6 & 8-1 | ||
376 | */ | ||
377 | void inat_page_consumption(struct kvm_vcpu *vcpu, u64 vadr) | ||
378 | { | ||
379 | _nat_consumption_fault(vcpu, vadr, INSTRUCTION); | ||
380 | } | ||
381 | |||
382 | /* | ||
383 | * Register Nat Consumption Fault | ||
384 | * @ Nat Consumption Vector | ||
385 | * Refer to SDM Vol2 Table 5-6 & 8-1 | ||
386 | */ | ||
387 | void rnat_consumption(struct kvm_vcpu *vcpu) | ||
388 | { | ||
389 | _nat_consumption_fault(vcpu, 0, REGISTER); | ||
390 | } | ||
391 | |||
392 | /* | ||
393 | * Data Nat Page Consumption Fault | ||
394 | * @ Nat Consumption Vector | ||
395 | * Refer to SDM Vol2 Table 5-6 & 8-1 | ||
396 | */ | ||
397 | void dnat_page_consumption(struct kvm_vcpu *vcpu, u64 vadr) | ||
398 | { | ||
399 | _nat_consumption_fault(vcpu, vadr, DATA); | ||
400 | } | ||
401 | |||
402 | /* Deal with | ||
403 | * Page not present vector | ||
404 | */ | ||
405 | static void __page_not_present(struct kvm_vcpu *vcpu, u64 vadr) | ||
406 | { | ||
407 | /* If vPSR.ic, IFA, ITIR */ | ||
408 | set_ifa_itir_iha(vcpu, vadr, 1, 1, 0); | ||
409 | inject_guest_interruption(vcpu, IA64_PAGE_NOT_PRESENT_VECTOR); | ||
410 | } | ||
411 | |||
412 | void data_page_not_present(struct kvm_vcpu *vcpu, u64 vadr) | ||
413 | { | ||
414 | __page_not_present(vcpu, vadr); | ||
415 | } | ||
416 | |||
417 | void inst_page_not_present(struct kvm_vcpu *vcpu, u64 vadr) | ||
418 | { | ||
419 | __page_not_present(vcpu, vadr); | ||
420 | } | ||
421 | |||
422 | /* Deal with | ||
423 | * Data access rights vector | ||
424 | */ | ||
425 | void data_access_rights(struct kvm_vcpu *vcpu, u64 vadr) | ||
426 | { | ||
427 | /* If vPSR.ic, IFA, ITIR */ | ||
428 | set_ifa_itir_iha(vcpu, vadr, 1, 1, 0); | ||
429 | inject_guest_interruption(vcpu, IA64_DATA_ACCESS_RIGHTS_VECTOR); | ||
430 | } | ||
431 | |||
432 | fpswa_ret_t vmm_fp_emulate(int fp_fault, void *bundle, unsigned long *ipsr, | ||
433 | unsigned long *fpsr, unsigned long *isr, unsigned long *pr, | ||
434 | unsigned long *ifs, struct kvm_pt_regs *regs) | ||
435 | { | ||
436 | fp_state_t fp_state; | ||
437 | fpswa_ret_t ret; | ||
438 | struct kvm_vcpu *vcpu = current_vcpu; | ||
439 | |||
440 | uint64_t old_rr7 = ia64_get_rr(7UL<<61); | ||
441 | |||
442 | if (!vmm_fpswa_interface) | ||
443 | return (fpswa_ret_t) {-1, 0, 0, 0}; | ||
444 | |||
445 | memset(&fp_state, 0, sizeof(fp_state_t)); | ||
446 | |||
447 | /* | ||
448 | * compute fp_state. only FP registers f6 - f11 are used by the | ||
449 | * vmm, so set those bits in the mask and set the low volatile | ||
450 | * pointer to point to these registers. | ||
451 | */ | ||
452 | fp_state.bitmask_low64 = 0xfc0; /* bit6..bit11 */ | ||
453 | |||
454 | fp_state.fp_state_low_volatile = (fp_state_low_volatile_t *) ®s->f6; | ||
455 | |||
456 | /* | ||
457 | * unsigned long (*EFI_FPSWA) ( | ||
458 | * unsigned long trap_type, | ||
459 | * void *Bundle, | ||
460 | * unsigned long *pipsr, | ||
461 | * unsigned long *pfsr, | ||
462 | * unsigned long *pisr, | ||
463 | * unsigned long *ppreds, | ||
464 | * unsigned long *pifs, | ||
465 | * void *fp_state); | ||
466 | */ | ||
467 | /*Call host fpswa interface directly to virtualize | ||
468 | *guest fpswa request! | ||
469 | */ | ||
470 | ia64_set_rr(7UL << 61, vcpu->arch.host.rr[7]); | ||
471 | ia64_srlz_d(); | ||
472 | |||
473 | ret = (*vmm_fpswa_interface->fpswa) (fp_fault, bundle, | ||
474 | ipsr, fpsr, isr, pr, ifs, &fp_state); | ||
475 | ia64_set_rr(7UL << 61, old_rr7); | ||
476 | ia64_srlz_d(); | ||
477 | return ret; | ||
478 | } | ||
479 | |||
480 | /* | ||
481 | * Handle floating-point assist faults and traps for domain. | ||
482 | */ | ||
483 | unsigned long vmm_handle_fpu_swa(int fp_fault, struct kvm_pt_regs *regs, | ||
484 | unsigned long isr) | ||
485 | { | ||
486 | struct kvm_vcpu *v = current_vcpu; | ||
487 | IA64_BUNDLE bundle; | ||
488 | unsigned long fault_ip; | ||
489 | fpswa_ret_t ret; | ||
490 | |||
491 | fault_ip = regs->cr_iip; | ||
492 | /* | ||
493 | * When the FP trap occurs, the trapping instruction is completed. | ||
494 | * If ipsr.ri == 0, there is the trapping instruction in previous | ||
495 | * bundle. | ||
496 | */ | ||
497 | if (!fp_fault && (ia64_psr(regs)->ri == 0)) | ||
498 | fault_ip -= 16; | ||
499 | |||
500 | if (fetch_code(v, fault_ip, &bundle)) | ||
501 | return -EAGAIN; | ||
502 | |||
503 | if (!bundle.i64[0] && !bundle.i64[1]) | ||
504 | return -EACCES; | ||
505 | |||
506 | ret = vmm_fp_emulate(fp_fault, &bundle, ®s->cr_ipsr, ®s->ar_fpsr, | ||
507 | &isr, ®s->pr, ®s->cr_ifs, regs); | ||
508 | return ret.status; | ||
509 | } | ||
510 | |||
511 | void reflect_interruption(u64 ifa, u64 isr, u64 iim, | ||
512 | u64 vec, struct kvm_pt_regs *regs) | ||
513 | { | ||
514 | u64 vector; | ||
515 | int status ; | ||
516 | struct kvm_vcpu *vcpu = current_vcpu; | ||
517 | u64 vpsr = VCPU(vcpu, vpsr); | ||
518 | |||
519 | vector = vec2off[vec]; | ||
520 | |||
521 | if (!(vpsr & IA64_PSR_IC) && (vector != IA64_DATA_NESTED_TLB_VECTOR)) { | ||
522 | panic_vm(vcpu, "Interruption with vector :0x%lx occurs " | ||
523 | "with psr.ic = 0\n", vector); | ||
524 | return; | ||
525 | } | ||
526 | |||
527 | switch (vec) { | ||
528 | case 32: /*IA64_FP_FAULT_VECTOR*/ | ||
529 | status = vmm_handle_fpu_swa(1, regs, isr); | ||
530 | if (!status) { | ||
531 | vcpu_increment_iip(vcpu); | ||
532 | return; | ||
533 | } else if (-EAGAIN == status) | ||
534 | return; | ||
535 | break; | ||
536 | case 33: /*IA64_FP_TRAP_VECTOR*/ | ||
537 | status = vmm_handle_fpu_swa(0, regs, isr); | ||
538 | if (!status) | ||
539 | return ; | ||
540 | break; | ||
541 | } | ||
542 | |||
543 | VCPU(vcpu, isr) = isr; | ||
544 | VCPU(vcpu, iipa) = regs->cr_iip; | ||
545 | if (vector == IA64_BREAK_VECTOR || vector == IA64_SPECULATION_VECTOR) | ||
546 | VCPU(vcpu, iim) = iim; | ||
547 | else | ||
548 | set_ifa_itir_iha(vcpu, ifa, 1, 1, 1); | ||
549 | |||
550 | inject_guest_interruption(vcpu, vector); | ||
551 | } | ||
552 | |||
553 | static unsigned long kvm_trans_pal_call_args(struct kvm_vcpu *vcpu, | ||
554 | unsigned long arg) | ||
555 | { | ||
556 | struct thash_data *data; | ||
557 | unsigned long gpa, poff; | ||
558 | |||
559 | if (!is_physical_mode(vcpu)) { | ||
560 | /* Depends on caller to provide the DTR or DTC mapping.*/ | ||
561 | data = vtlb_lookup(vcpu, arg, D_TLB); | ||
562 | if (data) | ||
563 | gpa = data->page_flags & _PAGE_PPN_MASK; | ||
564 | else { | ||
565 | data = vhpt_lookup(arg); | ||
566 | if (!data) | ||
567 | return 0; | ||
568 | gpa = data->gpaddr & _PAGE_PPN_MASK; | ||
569 | } | ||
570 | |||
571 | poff = arg & (PSIZE(data->ps) - 1); | ||
572 | arg = PAGEALIGN(gpa, data->ps) | poff; | ||
573 | } | ||
574 | arg = kvm_gpa_to_mpa(arg << 1 >> 1); | ||
575 | |||
576 | return (unsigned long)__va(arg); | ||
577 | } | ||
578 | |||
579 | static void set_pal_call_data(struct kvm_vcpu *vcpu) | ||
580 | { | ||
581 | struct exit_ctl_data *p = &vcpu->arch.exit_data; | ||
582 | unsigned long gr28 = vcpu_get_gr(vcpu, 28); | ||
583 | unsigned long gr29 = vcpu_get_gr(vcpu, 29); | ||
584 | unsigned long gr30 = vcpu_get_gr(vcpu, 30); | ||
585 | |||
586 | /*FIXME:For static and stacked convention, firmware | ||
587 | * has put the parameters in gr28-gr31 before | ||
588 | * break to vmm !!*/ | ||
589 | |||
590 | switch (gr28) { | ||
591 | case PAL_PERF_MON_INFO: | ||
592 | case PAL_HALT_INFO: | ||
593 | p->u.pal_data.gr29 = kvm_trans_pal_call_args(vcpu, gr29); | ||
594 | p->u.pal_data.gr30 = vcpu_get_gr(vcpu, 30); | ||
595 | break; | ||
596 | case PAL_BRAND_INFO: | ||
597 | p->u.pal_data.gr29 = gr29; | ||
598 | p->u.pal_data.gr30 = kvm_trans_pal_call_args(vcpu, gr30); | ||
599 | break; | ||
600 | default: | ||
601 | p->u.pal_data.gr29 = gr29; | ||
602 | p->u.pal_data.gr30 = vcpu_get_gr(vcpu, 30); | ||
603 | } | ||
604 | p->u.pal_data.gr28 = gr28; | ||
605 | p->u.pal_data.gr31 = vcpu_get_gr(vcpu, 31); | ||
606 | |||
607 | p->exit_reason = EXIT_REASON_PAL_CALL; | ||
608 | } | ||
609 | |||
610 | static void get_pal_call_result(struct kvm_vcpu *vcpu) | ||
611 | { | ||
612 | struct exit_ctl_data *p = &vcpu->arch.exit_data; | ||
613 | |||
614 | if (p->exit_reason == EXIT_REASON_PAL_CALL) { | ||
615 | vcpu_set_gr(vcpu, 8, p->u.pal_data.ret.status, 0); | ||
616 | vcpu_set_gr(vcpu, 9, p->u.pal_data.ret.v0, 0); | ||
617 | vcpu_set_gr(vcpu, 10, p->u.pal_data.ret.v1, 0); | ||
618 | vcpu_set_gr(vcpu, 11, p->u.pal_data.ret.v2, 0); | ||
619 | } else | ||
620 | panic_vm(vcpu, "Mis-set for exit reason!\n"); | ||
621 | } | ||
622 | |||
623 | static void set_sal_call_data(struct kvm_vcpu *vcpu) | ||
624 | { | ||
625 | struct exit_ctl_data *p = &vcpu->arch.exit_data; | ||
626 | |||
627 | p->u.sal_data.in0 = vcpu_get_gr(vcpu, 32); | ||
628 | p->u.sal_data.in1 = vcpu_get_gr(vcpu, 33); | ||
629 | p->u.sal_data.in2 = vcpu_get_gr(vcpu, 34); | ||
630 | p->u.sal_data.in3 = vcpu_get_gr(vcpu, 35); | ||
631 | p->u.sal_data.in4 = vcpu_get_gr(vcpu, 36); | ||
632 | p->u.sal_data.in5 = vcpu_get_gr(vcpu, 37); | ||
633 | p->u.sal_data.in6 = vcpu_get_gr(vcpu, 38); | ||
634 | p->u.sal_data.in7 = vcpu_get_gr(vcpu, 39); | ||
635 | p->exit_reason = EXIT_REASON_SAL_CALL; | ||
636 | } | ||
637 | |||
638 | static void get_sal_call_result(struct kvm_vcpu *vcpu) | ||
639 | { | ||
640 | struct exit_ctl_data *p = &vcpu->arch.exit_data; | ||
641 | |||
642 | if (p->exit_reason == EXIT_REASON_SAL_CALL) { | ||
643 | vcpu_set_gr(vcpu, 8, p->u.sal_data.ret.r8, 0); | ||
644 | vcpu_set_gr(vcpu, 9, p->u.sal_data.ret.r9, 0); | ||
645 | vcpu_set_gr(vcpu, 10, p->u.sal_data.ret.r10, 0); | ||
646 | vcpu_set_gr(vcpu, 11, p->u.sal_data.ret.r11, 0); | ||
647 | } else | ||
648 | panic_vm(vcpu, "Mis-set for exit reason!\n"); | ||
649 | } | ||
650 | |||
651 | void kvm_ia64_handle_break(unsigned long ifa, struct kvm_pt_regs *regs, | ||
652 | unsigned long isr, unsigned long iim) | ||
653 | { | ||
654 | struct kvm_vcpu *v = current_vcpu; | ||
655 | long psr; | ||
656 | |||
657 | if (ia64_psr(regs)->cpl == 0) { | ||
658 | /* Allow hypercalls only when cpl = 0. */ | ||
659 | if (iim == DOMN_PAL_REQUEST) { | ||
660 | local_irq_save(psr); | ||
661 | set_pal_call_data(v); | ||
662 | vmm_transition(v); | ||
663 | get_pal_call_result(v); | ||
664 | vcpu_increment_iip(v); | ||
665 | local_irq_restore(psr); | ||
666 | return; | ||
667 | } else if (iim == DOMN_SAL_REQUEST) { | ||
668 | local_irq_save(psr); | ||
669 | set_sal_call_data(v); | ||
670 | vmm_transition(v); | ||
671 | get_sal_call_result(v); | ||
672 | vcpu_increment_iip(v); | ||
673 | local_irq_restore(psr); | ||
674 | return; | ||
675 | } | ||
676 | } | ||
677 | reflect_interruption(ifa, isr, iim, 11, regs); | ||
678 | } | ||
679 | |||
680 | void check_pending_irq(struct kvm_vcpu *vcpu) | ||
681 | { | ||
682 | int mask, h_pending, h_inservice; | ||
683 | u64 isr; | ||
684 | unsigned long vpsr; | ||
685 | struct kvm_pt_regs *regs = vcpu_regs(vcpu); | ||
686 | |||
687 | h_pending = highest_pending_irq(vcpu); | ||
688 | if (h_pending == NULL_VECTOR) { | ||
689 | update_vhpi(vcpu, NULL_VECTOR); | ||
690 | return; | ||
691 | } | ||
692 | h_inservice = highest_inservice_irq(vcpu); | ||
693 | |||
694 | vpsr = VCPU(vcpu, vpsr); | ||
695 | mask = irq_masked(vcpu, h_pending, h_inservice); | ||
696 | if ((vpsr & IA64_PSR_I) && IRQ_NO_MASKED == mask) { | ||
697 | isr = vpsr & IA64_PSR_RI; | ||
698 | update_vhpi(vcpu, h_pending); | ||
699 | reflect_interruption(0, isr, 0, 12, regs); /* EXT IRQ */ | ||
700 | } else if (mask == IRQ_MASKED_BY_INSVC) { | ||
701 | if (VCPU(vcpu, vhpi)) | ||
702 | update_vhpi(vcpu, NULL_VECTOR); | ||
703 | } else { | ||
704 | /* masked by vpsr.i or vtpr.*/ | ||
705 | update_vhpi(vcpu, h_pending); | ||
706 | } | ||
707 | } | ||
708 | |||
709 | static void generate_exirq(struct kvm_vcpu *vcpu) | ||
710 | { | ||
711 | unsigned vpsr; | ||
712 | uint64_t isr; | ||
713 | |||
714 | struct kvm_pt_regs *regs = vcpu_regs(vcpu); | ||
715 | |||
716 | vpsr = VCPU(vcpu, vpsr); | ||
717 | isr = vpsr & IA64_PSR_RI; | ||
718 | if (!(vpsr & IA64_PSR_IC)) | ||
719 | panic_vm(vcpu, "Trying to inject one IRQ with psr.ic=0\n"); | ||
720 | reflect_interruption(0, isr, 0, 12, regs); /* EXT IRQ */ | ||
721 | } | ||
722 | |||
723 | void vhpi_detection(struct kvm_vcpu *vcpu) | ||
724 | { | ||
725 | uint64_t threshold, vhpi; | ||
726 | union ia64_tpr vtpr; | ||
727 | struct ia64_psr vpsr; | ||
728 | |||
729 | vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr); | ||
730 | vtpr.val = VCPU(vcpu, tpr); | ||
731 | |||
732 | threshold = ((!vpsr.i) << 5) | (vtpr.mmi << 4) | vtpr.mic; | ||
733 | vhpi = VCPU(vcpu, vhpi); | ||
734 | if (vhpi > threshold) { | ||
735 | /* interrupt actived*/ | ||
736 | generate_exirq(vcpu); | ||
737 | } | ||
738 | } | ||
739 | |||
740 | void leave_hypervisor_tail(void) | ||
741 | { | ||
742 | struct kvm_vcpu *v = current_vcpu; | ||
743 | |||
744 | if (VMX(v, timer_check)) { | ||
745 | VMX(v, timer_check) = 0; | ||
746 | if (VMX(v, itc_check)) { | ||
747 | if (vcpu_get_itc(v) > VCPU(v, itm)) { | ||
748 | if (!(VCPU(v, itv) & (1 << 16))) { | ||
749 | vcpu_pend_interrupt(v, VCPU(v, itv) | ||
750 | & 0xff); | ||
751 | VMX(v, itc_check) = 0; | ||
752 | } else { | ||
753 | v->arch.timer_pending = 1; | ||
754 | } | ||
755 | VMX(v, last_itc) = VCPU(v, itm) + 1; | ||
756 | } | ||
757 | } | ||
758 | } | ||
759 | |||
760 | rmb(); | ||
761 | if (v->arch.irq_new_pending) { | ||
762 | v->arch.irq_new_pending = 0; | ||
763 | VMX(v, irq_check) = 0; | ||
764 | check_pending_irq(v); | ||
765 | return; | ||
766 | } | ||
767 | if (VMX(v, irq_check)) { | ||
768 | VMX(v, irq_check) = 0; | ||
769 | vhpi_detection(v); | ||
770 | } | ||
771 | } | ||
772 | |||
773 | static inline void handle_lds(struct kvm_pt_regs *regs) | ||
774 | { | ||
775 | regs->cr_ipsr |= IA64_PSR_ED; | ||
776 | } | ||
777 | |||
778 | void physical_tlb_miss(struct kvm_vcpu *vcpu, unsigned long vadr, int type) | ||
779 | { | ||
780 | unsigned long pte; | ||
781 | union ia64_rr rr; | ||
782 | |||
783 | rr.val = ia64_get_rr(vadr); | ||
784 | pte = vadr & _PAGE_PPN_MASK; | ||
785 | pte = pte | PHY_PAGE_WB; | ||
786 | thash_vhpt_insert(vcpu, pte, (u64)(rr.ps << 2), vadr, type); | ||
787 | return; | ||
788 | } | ||
789 | |||
790 | void kvm_page_fault(u64 vadr , u64 vec, struct kvm_pt_regs *regs) | ||
791 | { | ||
792 | unsigned long vpsr; | ||
793 | int type; | ||
794 | |||
795 | u64 vhpt_adr, gppa, pteval, rr, itir; | ||
796 | union ia64_isr misr; | ||
797 | union ia64_pta vpta; | ||
798 | struct thash_data *data; | ||
799 | struct kvm_vcpu *v = current_vcpu; | ||
800 | |||
801 | vpsr = VCPU(v, vpsr); | ||
802 | misr.val = VMX(v, cr_isr); | ||
803 | |||
804 | type = vec; | ||
805 | |||
806 | if (is_physical_mode(v) && (!(vadr << 1 >> 62))) { | ||
807 | if (vec == 2) { | ||
808 | if (__gpfn_is_io((vadr << 1) >> (PAGE_SHIFT + 1))) { | ||
809 | emulate_io_inst(v, ((vadr << 1) >> 1), 4); | ||
810 | return; | ||
811 | } | ||
812 | } | ||
813 | physical_tlb_miss(v, vadr, type); | ||
814 | return; | ||
815 | } | ||
816 | data = vtlb_lookup(v, vadr, type); | ||
817 | if (data != 0) { | ||
818 | if (type == D_TLB) { | ||
819 | gppa = (vadr & ((1UL << data->ps) - 1)) | ||
820 | + (data->ppn >> (data->ps - 12) << data->ps); | ||
821 | if (__gpfn_is_io(gppa >> PAGE_SHIFT)) { | ||
822 | if (data->pl >= ((regs->cr_ipsr >> | ||
823 | IA64_PSR_CPL0_BIT) & 3)) | ||
824 | emulate_io_inst(v, gppa, data->ma); | ||
825 | else { | ||
826 | vcpu_set_isr(v, misr.val); | ||
827 | data_access_rights(v, vadr); | ||
828 | } | ||
829 | return ; | ||
830 | } | ||
831 | } | ||
832 | thash_vhpt_insert(v, data->page_flags, data->itir, vadr, type); | ||
833 | |||
834 | } else if (type == D_TLB) { | ||
835 | if (misr.sp) { | ||
836 | handle_lds(regs); | ||
837 | return; | ||
838 | } | ||
839 | |||
840 | rr = vcpu_get_rr(v, vadr); | ||
841 | itir = rr & (RR_RID_MASK | RR_PS_MASK); | ||
842 | |||
843 | if (!vhpt_enabled(v, vadr, misr.rs ? RSE_REF : DATA_REF)) { | ||
844 | if (vpsr & IA64_PSR_IC) { | ||
845 | vcpu_set_isr(v, misr.val); | ||
846 | alt_dtlb(v, vadr); | ||
847 | } else { | ||
848 | nested_dtlb(v); | ||
849 | } | ||
850 | return ; | ||
851 | } | ||
852 | |||
853 | vpta.val = vcpu_get_pta(v); | ||
854 | /* avoid recursively walking (short format) VHPT */ | ||
855 | |||
856 | vhpt_adr = vcpu_thash(v, vadr); | ||
857 | if (!guest_vhpt_lookup(vhpt_adr, &pteval)) { | ||
858 | /* VHPT successfully read. */ | ||
859 | if (!(pteval & _PAGE_P)) { | ||
860 | if (vpsr & IA64_PSR_IC) { | ||
861 | vcpu_set_isr(v, misr.val); | ||
862 | dtlb_fault(v, vadr); | ||
863 | } else { | ||
864 | nested_dtlb(v); | ||
865 | } | ||
866 | } else if ((pteval & _PAGE_MA_MASK) != _PAGE_MA_ST) { | ||
867 | thash_purge_and_insert(v, pteval, itir, | ||
868 | vadr, D_TLB); | ||
869 | } else if (vpsr & IA64_PSR_IC) { | ||
870 | vcpu_set_isr(v, misr.val); | ||
871 | dtlb_fault(v, vadr); | ||
872 | } else { | ||
873 | nested_dtlb(v); | ||
874 | } | ||
875 | } else { | ||
876 | /* Can't read VHPT. */ | ||
877 | if (vpsr & IA64_PSR_IC) { | ||
878 | vcpu_set_isr(v, misr.val); | ||
879 | dvhpt_fault(v, vadr); | ||
880 | } else { | ||
881 | nested_dtlb(v); | ||
882 | } | ||
883 | } | ||
884 | } else if (type == I_TLB) { | ||
885 | if (!(vpsr & IA64_PSR_IC)) | ||
886 | misr.ni = 1; | ||
887 | if (!vhpt_enabled(v, vadr, INST_REF)) { | ||
888 | vcpu_set_isr(v, misr.val); | ||
889 | alt_itlb(v, vadr); | ||
890 | return; | ||
891 | } | ||
892 | |||
893 | vpta.val = vcpu_get_pta(v); | ||
894 | |||
895 | vhpt_adr = vcpu_thash(v, vadr); | ||
896 | if (!guest_vhpt_lookup(vhpt_adr, &pteval)) { | ||
897 | /* VHPT successfully read. */ | ||
898 | if (pteval & _PAGE_P) { | ||
899 | if ((pteval & _PAGE_MA_MASK) == _PAGE_MA_ST) { | ||
900 | vcpu_set_isr(v, misr.val); | ||
901 | itlb_fault(v, vadr); | ||
902 | return ; | ||
903 | } | ||
904 | rr = vcpu_get_rr(v, vadr); | ||
905 | itir = rr & (RR_RID_MASK | RR_PS_MASK); | ||
906 | thash_purge_and_insert(v, pteval, itir, | ||
907 | vadr, I_TLB); | ||
908 | } else { | ||
909 | vcpu_set_isr(v, misr.val); | ||
910 | inst_page_not_present(v, vadr); | ||
911 | } | ||
912 | } else { | ||
913 | vcpu_set_isr(v, misr.val); | ||
914 | ivhpt_fault(v, vadr); | ||
915 | } | ||
916 | } | ||
917 | } | ||
918 | |||
919 | void kvm_vexirq(struct kvm_vcpu *vcpu) | ||
920 | { | ||
921 | u64 vpsr, isr; | ||
922 | struct kvm_pt_regs *regs; | ||
923 | |||
924 | regs = vcpu_regs(vcpu); | ||
925 | vpsr = VCPU(vcpu, vpsr); | ||
926 | isr = vpsr & IA64_PSR_RI; | ||
927 | reflect_interruption(0, isr, 0, 12, regs); /*EXT IRQ*/ | ||
928 | } | ||
929 | |||
930 | void kvm_ia64_handle_irq(struct kvm_vcpu *v) | ||
931 | { | ||
932 | struct exit_ctl_data *p = &v->arch.exit_data; | ||
933 | long psr; | ||
934 | |||
935 | local_irq_save(psr); | ||
936 | p->exit_reason = EXIT_REASON_EXTERNAL_INTERRUPT; | ||
937 | vmm_transition(v); | ||
938 | local_irq_restore(psr); | ||
939 | |||
940 | VMX(v, timer_check) = 1; | ||
941 | |||
942 | } | ||
943 | |||
944 | static void ptc_ga_remote_func(struct kvm_vcpu *v, int pos) | ||
945 | { | ||
946 | u64 oldrid, moldrid, oldpsbits, vaddr; | ||
947 | struct kvm_ptc_g *p = &v->arch.ptc_g_data[pos]; | ||
948 | vaddr = p->vaddr; | ||
949 | |||
950 | oldrid = VMX(v, vrr[0]); | ||
951 | VMX(v, vrr[0]) = p->rr; | ||
952 | oldpsbits = VMX(v, psbits[0]); | ||
953 | VMX(v, psbits[0]) = VMX(v, psbits[REGION_NUMBER(vaddr)]); | ||
954 | moldrid = ia64_get_rr(0x0); | ||
955 | ia64_set_rr(0x0, vrrtomrr(p->rr)); | ||
956 | ia64_srlz_d(); | ||
957 | |||
958 | vaddr = PAGEALIGN(vaddr, p->ps); | ||
959 | thash_purge_entries_remote(v, vaddr, p->ps); | ||
960 | |||
961 | VMX(v, vrr[0]) = oldrid; | ||
962 | VMX(v, psbits[0]) = oldpsbits; | ||
963 | ia64_set_rr(0x0, moldrid); | ||
964 | ia64_dv_serialize_data(); | ||
965 | } | ||
966 | |||
967 | static void vcpu_do_resume(struct kvm_vcpu *vcpu) | ||
968 | { | ||
969 | /*Re-init VHPT and VTLB once from resume*/ | ||
970 | vcpu->arch.vhpt.num = VHPT_NUM_ENTRIES; | ||
971 | thash_init(&vcpu->arch.vhpt, VHPT_SHIFT); | ||
972 | vcpu->arch.vtlb.num = VTLB_NUM_ENTRIES; | ||
973 | thash_init(&vcpu->arch.vtlb, VTLB_SHIFT); | ||
974 | |||
975 | ia64_set_pta(vcpu->arch.vhpt.pta.val); | ||
976 | } | ||
977 | |||
978 | static void vmm_sanity_check(struct kvm_vcpu *vcpu) | ||
979 | { | ||
980 | struct exit_ctl_data *p = &vcpu->arch.exit_data; | ||
981 | |||
982 | if (!vmm_sanity && p->exit_reason != EXIT_REASON_DEBUG) { | ||
983 | panic_vm(vcpu, "Failed to do vmm sanity check," | ||
984 | "it maybe caused by crashed vmm!!\n\n"); | ||
985 | } | ||
986 | } | ||
987 | |||
988 | static void kvm_do_resume_op(struct kvm_vcpu *vcpu) | ||
989 | { | ||
990 | vmm_sanity_check(vcpu); /*Guarantee vcpu running on healthy vmm!*/ | ||
991 | |||
992 | if (test_and_clear_bit(KVM_REQ_RESUME, &vcpu->requests)) { | ||
993 | vcpu_do_resume(vcpu); | ||
994 | return; | ||
995 | } | ||
996 | |||
997 | if (unlikely(test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))) { | ||
998 | thash_purge_all(vcpu); | ||
999 | return; | ||
1000 | } | ||
1001 | |||
1002 | if (test_and_clear_bit(KVM_REQ_PTC_G, &vcpu->requests)) { | ||
1003 | while (vcpu->arch.ptc_g_count > 0) | ||
1004 | ptc_ga_remote_func(vcpu, --vcpu->arch.ptc_g_count); | ||
1005 | } | ||
1006 | } | ||
1007 | |||
1008 | void vmm_transition(struct kvm_vcpu *vcpu) | ||
1009 | { | ||
1010 | ia64_call_vsa(PAL_VPS_SAVE, (unsigned long)vcpu->arch.vpd, | ||
1011 | 1, 0, 0, 0, 0, 0); | ||
1012 | vmm_trampoline(&vcpu->arch.guest, &vcpu->arch.host); | ||
1013 | ia64_call_vsa(PAL_VPS_RESTORE, (unsigned long)vcpu->arch.vpd, | ||
1014 | 1, 0, 0, 0, 0, 0); | ||
1015 | kvm_do_resume_op(vcpu); | ||
1016 | } | ||
1017 | |||
1018 | void vmm_panic_handler(u64 vec) | ||
1019 | { | ||
1020 | struct kvm_vcpu *vcpu = current_vcpu; | ||
1021 | vmm_sanity = 0; | ||
1022 | panic_vm(vcpu, "Unexpected interruption occurs in VMM, vector:0x%lx\n", | ||
1023 | vec2off[vec]); | ||
1024 | } | ||
diff --git a/arch/ia64/kvm/trampoline.S b/arch/ia64/kvm/trampoline.S deleted file mode 100644 index 30897d44d61e..000000000000 --- a/arch/ia64/kvm/trampoline.S +++ /dev/null | |||
@@ -1,1038 +0,0 @@ | |||
1 | /* Save all processor states | ||
2 | * | ||
3 | * Copyright (c) 2007 Fleming Feng <fleming.feng@intel.com> | ||
4 | * Copyright (c) 2007 Anthony Xu <anthony.xu@intel.com> | ||
5 | */ | ||
6 | |||
7 | #include <asm/asmmacro.h> | ||
8 | #include "asm-offsets.h" | ||
9 | |||
10 | |||
11 | #define CTX(name) VMM_CTX_##name##_OFFSET | ||
12 | |||
13 | /* | ||
14 | * r32: context_t base address | ||
15 | */ | ||
16 | #define SAVE_BRANCH_REGS \ | ||
17 | add r2 = CTX(B0),r32; \ | ||
18 | add r3 = CTX(B1),r32; \ | ||
19 | mov r16 = b0; \ | ||
20 | mov r17 = b1; \ | ||
21 | ;; \ | ||
22 | st8 [r2]=r16,16; \ | ||
23 | st8 [r3]=r17,16; \ | ||
24 | ;; \ | ||
25 | mov r16 = b2; \ | ||
26 | mov r17 = b3; \ | ||
27 | ;; \ | ||
28 | st8 [r2]=r16,16; \ | ||
29 | st8 [r3]=r17,16; \ | ||
30 | ;; \ | ||
31 | mov r16 = b4; \ | ||
32 | mov r17 = b5; \ | ||
33 | ;; \ | ||
34 | st8 [r2]=r16; \ | ||
35 | st8 [r3]=r17; \ | ||
36 | ;; | ||
37 | |||
38 | /* | ||
39 | * r33: context_t base address | ||
40 | */ | ||
41 | #define RESTORE_BRANCH_REGS \ | ||
42 | add r2 = CTX(B0),r33; \ | ||
43 | add r3 = CTX(B1),r33; \ | ||
44 | ;; \ | ||
45 | ld8 r16=[r2],16; \ | ||
46 | ld8 r17=[r3],16; \ | ||
47 | ;; \ | ||
48 | mov b0 = r16; \ | ||
49 | mov b1 = r17; \ | ||
50 | ;; \ | ||
51 | ld8 r16=[r2],16; \ | ||
52 | ld8 r17=[r3],16; \ | ||
53 | ;; \ | ||
54 | mov b2 = r16; \ | ||
55 | mov b3 = r17; \ | ||
56 | ;; \ | ||
57 | ld8 r16=[r2]; \ | ||
58 | ld8 r17=[r3]; \ | ||
59 | ;; \ | ||
60 | mov b4=r16; \ | ||
61 | mov b5=r17; \ | ||
62 | ;; | ||
63 | |||
64 | |||
65 | /* | ||
66 | * r32: context_t base address | ||
67 | * bsw == 1 | ||
68 | * Save all bank1 general registers, r4 ~ r7 | ||
69 | */ | ||
70 | #define SAVE_GENERAL_REGS \ | ||
71 | add r2=CTX(R4),r32; \ | ||
72 | add r3=CTX(R5),r32; \ | ||
73 | ;; \ | ||
74 | .mem.offset 0,0; \ | ||
75 | st8.spill [r2]=r4,16; \ | ||
76 | .mem.offset 8,0; \ | ||
77 | st8.spill [r3]=r5,16; \ | ||
78 | ;; \ | ||
79 | .mem.offset 0,0; \ | ||
80 | st8.spill [r2]=r6,48; \ | ||
81 | .mem.offset 8,0; \ | ||
82 | st8.spill [r3]=r7,48; \ | ||
83 | ;; \ | ||
84 | .mem.offset 0,0; \ | ||
85 | st8.spill [r2]=r12; \ | ||
86 | .mem.offset 8,0; \ | ||
87 | st8.spill [r3]=r13; \ | ||
88 | ;; | ||
89 | |||
90 | /* | ||
91 | * r33: context_t base address | ||
92 | * bsw == 1 | ||
93 | */ | ||
94 | #define RESTORE_GENERAL_REGS \ | ||
95 | add r2=CTX(R4),r33; \ | ||
96 | add r3=CTX(R5),r33; \ | ||
97 | ;; \ | ||
98 | ld8.fill r4=[r2],16; \ | ||
99 | ld8.fill r5=[r3],16; \ | ||
100 | ;; \ | ||
101 | ld8.fill r6=[r2],48; \ | ||
102 | ld8.fill r7=[r3],48; \ | ||
103 | ;; \ | ||
104 | ld8.fill r12=[r2]; \ | ||
105 | ld8.fill r13 =[r3]; \ | ||
106 | ;; | ||
107 | |||
108 | |||
109 | |||
110 | |||
111 | /* | ||
112 | * r32: context_t base address | ||
113 | */ | ||
114 | #define SAVE_KERNEL_REGS \ | ||
115 | add r2 = CTX(KR0),r32; \ | ||
116 | add r3 = CTX(KR1),r32; \ | ||
117 | mov r16 = ar.k0; \ | ||
118 | mov r17 = ar.k1; \ | ||
119 | ;; \ | ||
120 | st8 [r2] = r16,16; \ | ||
121 | st8 [r3] = r17,16; \ | ||
122 | ;; \ | ||
123 | mov r16 = ar.k2; \ | ||
124 | mov r17 = ar.k3; \ | ||
125 | ;; \ | ||
126 | st8 [r2] = r16,16; \ | ||
127 | st8 [r3] = r17,16; \ | ||
128 | ;; \ | ||
129 | mov r16 = ar.k4; \ | ||
130 | mov r17 = ar.k5; \ | ||
131 | ;; \ | ||
132 | st8 [r2] = r16,16; \ | ||
133 | st8 [r3] = r17,16; \ | ||
134 | ;; \ | ||
135 | mov r16 = ar.k6; \ | ||
136 | mov r17 = ar.k7; \ | ||
137 | ;; \ | ||
138 | st8 [r2] = r16; \ | ||
139 | st8 [r3] = r17; \ | ||
140 | ;; | ||
141 | |||
142 | |||
143 | |||
144 | /* | ||
145 | * r33: context_t base address | ||
146 | */ | ||
147 | #define RESTORE_KERNEL_REGS \ | ||
148 | add r2 = CTX(KR0),r33; \ | ||
149 | add r3 = CTX(KR1),r33; \ | ||
150 | ;; \ | ||
151 | ld8 r16=[r2],16; \ | ||
152 | ld8 r17=[r3],16; \ | ||
153 | ;; \ | ||
154 | mov ar.k0=r16; \ | ||
155 | mov ar.k1=r17; \ | ||
156 | ;; \ | ||
157 | ld8 r16=[r2],16; \ | ||
158 | ld8 r17=[r3],16; \ | ||
159 | ;; \ | ||
160 | mov ar.k2=r16; \ | ||
161 | mov ar.k3=r17; \ | ||
162 | ;; \ | ||
163 | ld8 r16=[r2],16; \ | ||
164 | ld8 r17=[r3],16; \ | ||
165 | ;; \ | ||
166 | mov ar.k4=r16; \ | ||
167 | mov ar.k5=r17; \ | ||
168 | ;; \ | ||
169 | ld8 r16=[r2],16; \ | ||
170 | ld8 r17=[r3],16; \ | ||
171 | ;; \ | ||
172 | mov ar.k6=r16; \ | ||
173 | mov ar.k7=r17; \ | ||
174 | ;; | ||
175 | |||
176 | |||
177 | |||
178 | /* | ||
179 | * r32: context_t base address | ||
180 | */ | ||
181 | #define SAVE_APP_REGS \ | ||
182 | add r2 = CTX(BSPSTORE),r32; \ | ||
183 | mov r16 = ar.bspstore; \ | ||
184 | ;; \ | ||
185 | st8 [r2] = r16,CTX(RNAT)-CTX(BSPSTORE);\ | ||
186 | mov r16 = ar.rnat; \ | ||
187 | ;; \ | ||
188 | st8 [r2] = r16,CTX(FCR)-CTX(RNAT); \ | ||
189 | mov r16 = ar.fcr; \ | ||
190 | ;; \ | ||
191 | st8 [r2] = r16,CTX(EFLAG)-CTX(FCR); \ | ||
192 | mov r16 = ar.eflag; \ | ||
193 | ;; \ | ||
194 | st8 [r2] = r16,CTX(CFLG)-CTX(EFLAG); \ | ||
195 | mov r16 = ar.cflg; \ | ||
196 | ;; \ | ||
197 | st8 [r2] = r16,CTX(FSR)-CTX(CFLG); \ | ||
198 | mov r16 = ar.fsr; \ | ||
199 | ;; \ | ||
200 | st8 [r2] = r16,CTX(FIR)-CTX(FSR); \ | ||
201 | mov r16 = ar.fir; \ | ||
202 | ;; \ | ||
203 | st8 [r2] = r16,CTX(FDR)-CTX(FIR); \ | ||
204 | mov r16 = ar.fdr; \ | ||
205 | ;; \ | ||
206 | st8 [r2] = r16,CTX(UNAT)-CTX(FDR); \ | ||
207 | mov r16 = ar.unat; \ | ||
208 | ;; \ | ||
209 | st8 [r2] = r16,CTX(FPSR)-CTX(UNAT); \ | ||
210 | mov r16 = ar.fpsr; \ | ||
211 | ;; \ | ||
212 | st8 [r2] = r16,CTX(PFS)-CTX(FPSR); \ | ||
213 | mov r16 = ar.pfs; \ | ||
214 | ;; \ | ||
215 | st8 [r2] = r16,CTX(LC)-CTX(PFS); \ | ||
216 | mov r16 = ar.lc; \ | ||
217 | ;; \ | ||
218 | st8 [r2] = r16; \ | ||
219 | ;; | ||
220 | |||
221 | /* | ||
222 | * r33: context_t base address | ||
223 | */ | ||
224 | #define RESTORE_APP_REGS \ | ||
225 | add r2=CTX(BSPSTORE),r33; \ | ||
226 | ;; \ | ||
227 | ld8 r16=[r2],CTX(RNAT)-CTX(BSPSTORE); \ | ||
228 | ;; \ | ||
229 | mov ar.bspstore=r16; \ | ||
230 | ld8 r16=[r2],CTX(FCR)-CTX(RNAT); \ | ||
231 | ;; \ | ||
232 | mov ar.rnat=r16; \ | ||
233 | ld8 r16=[r2],CTX(EFLAG)-CTX(FCR); \ | ||
234 | ;; \ | ||
235 | mov ar.fcr=r16; \ | ||
236 | ld8 r16=[r2],CTX(CFLG)-CTX(EFLAG); \ | ||
237 | ;; \ | ||
238 | mov ar.eflag=r16; \ | ||
239 | ld8 r16=[r2],CTX(FSR)-CTX(CFLG); \ | ||
240 | ;; \ | ||
241 | mov ar.cflg=r16; \ | ||
242 | ld8 r16=[r2],CTX(FIR)-CTX(FSR); \ | ||
243 | ;; \ | ||
244 | mov ar.fsr=r16; \ | ||
245 | ld8 r16=[r2],CTX(FDR)-CTX(FIR); \ | ||
246 | ;; \ | ||
247 | mov ar.fir=r16; \ | ||
248 | ld8 r16=[r2],CTX(UNAT)-CTX(FDR); \ | ||
249 | ;; \ | ||
250 | mov ar.fdr=r16; \ | ||
251 | ld8 r16=[r2],CTX(FPSR)-CTX(UNAT); \ | ||
252 | ;; \ | ||
253 | mov ar.unat=r16; \ | ||
254 | ld8 r16=[r2],CTX(PFS)-CTX(FPSR); \ | ||
255 | ;; \ | ||
256 | mov ar.fpsr=r16; \ | ||
257 | ld8 r16=[r2],CTX(LC)-CTX(PFS); \ | ||
258 | ;; \ | ||
259 | mov ar.pfs=r16; \ | ||
260 | ld8 r16=[r2]; \ | ||
261 | ;; \ | ||
262 | mov ar.lc=r16; \ | ||
263 | ;; | ||
264 | |||
265 | /* | ||
266 | * r32: context_t base address | ||
267 | */ | ||
268 | #define SAVE_CTL_REGS \ | ||
269 | add r2 = CTX(DCR),r32; \ | ||
270 | mov r16 = cr.dcr; \ | ||
271 | ;; \ | ||
272 | st8 [r2] = r16,CTX(IVA)-CTX(DCR); \ | ||
273 | ;; \ | ||
274 | mov r16 = cr.iva; \ | ||
275 | ;; \ | ||
276 | st8 [r2] = r16,CTX(PTA)-CTX(IVA); \ | ||
277 | ;; \ | ||
278 | mov r16 = cr.pta; \ | ||
279 | ;; \ | ||
280 | st8 [r2] = r16 ; \ | ||
281 | ;; | ||
282 | |||
283 | /* | ||
284 | * r33: context_t base address | ||
285 | */ | ||
286 | #define RESTORE_CTL_REGS \ | ||
287 | add r2 = CTX(DCR),r33; \ | ||
288 | ;; \ | ||
289 | ld8 r16 = [r2],CTX(IVA)-CTX(DCR); \ | ||
290 | ;; \ | ||
291 | mov cr.dcr = r16; \ | ||
292 | dv_serialize_data; \ | ||
293 | ;; \ | ||
294 | ld8 r16 = [r2],CTX(PTA)-CTX(IVA); \ | ||
295 | ;; \ | ||
296 | mov cr.iva = r16; \ | ||
297 | dv_serialize_data; \ | ||
298 | ;; \ | ||
299 | ld8 r16 = [r2]; \ | ||
300 | ;; \ | ||
301 | mov cr.pta = r16; \ | ||
302 | dv_serialize_data; \ | ||
303 | ;; | ||
304 | |||
305 | |||
306 | /* | ||
307 | * r32: context_t base address | ||
308 | */ | ||
309 | #define SAVE_REGION_REGS \ | ||
310 | add r2=CTX(RR0),r32; \ | ||
311 | mov r16=rr[r0]; \ | ||
312 | dep.z r18=1,61,3; \ | ||
313 | ;; \ | ||
314 | st8 [r2]=r16,8; \ | ||
315 | mov r17=rr[r18]; \ | ||
316 | dep.z r18=2,61,3; \ | ||
317 | ;; \ | ||
318 | st8 [r2]=r17,8; \ | ||
319 | mov r16=rr[r18]; \ | ||
320 | dep.z r18=3,61,3; \ | ||
321 | ;; \ | ||
322 | st8 [r2]=r16,8; \ | ||
323 | mov r17=rr[r18]; \ | ||
324 | dep.z r18=4,61,3; \ | ||
325 | ;; \ | ||
326 | st8 [r2]=r17,8; \ | ||
327 | mov r16=rr[r18]; \ | ||
328 | dep.z r18=5,61,3; \ | ||
329 | ;; \ | ||
330 | st8 [r2]=r16,8; \ | ||
331 | mov r17=rr[r18]; \ | ||
332 | dep.z r18=7,61,3; \ | ||
333 | ;; \ | ||
334 | st8 [r2]=r17,16; \ | ||
335 | mov r16=rr[r18]; \ | ||
336 | ;; \ | ||
337 | st8 [r2]=r16,8; \ | ||
338 | ;; | ||
339 | |||
340 | /* | ||
341 | * r33:context_t base address | ||
342 | */ | ||
343 | #define RESTORE_REGION_REGS \ | ||
344 | add r2=CTX(RR0),r33;\ | ||
345 | mov r18=r0; \ | ||
346 | ;; \ | ||
347 | ld8 r20=[r2],8; \ | ||
348 | ;; /* rr0 */ \ | ||
349 | ld8 r21=[r2],8; \ | ||
350 | ;; /* rr1 */ \ | ||
351 | ld8 r22=[r2],8; \ | ||
352 | ;; /* rr2 */ \ | ||
353 | ld8 r23=[r2],8; \ | ||
354 | ;; /* rr3 */ \ | ||
355 | ld8 r24=[r2],8; \ | ||
356 | ;; /* rr4 */ \ | ||
357 | ld8 r25=[r2],16; \ | ||
358 | ;; /* rr5 */ \ | ||
359 | ld8 r27=[r2]; \ | ||
360 | ;; /* rr7 */ \ | ||
361 | mov rr[r18]=r20; \ | ||
362 | dep.z r18=1,61,3; \ | ||
363 | ;; /* rr1 */ \ | ||
364 | mov rr[r18]=r21; \ | ||
365 | dep.z r18=2,61,3; \ | ||
366 | ;; /* rr2 */ \ | ||
367 | mov rr[r18]=r22; \ | ||
368 | dep.z r18=3,61,3; \ | ||
369 | ;; /* rr3 */ \ | ||
370 | mov rr[r18]=r23; \ | ||
371 | dep.z r18=4,61,3; \ | ||
372 | ;; /* rr4 */ \ | ||
373 | mov rr[r18]=r24; \ | ||
374 | dep.z r18=5,61,3; \ | ||
375 | ;; /* rr5 */ \ | ||
376 | mov rr[r18]=r25; \ | ||
377 | dep.z r18=7,61,3; \ | ||
378 | ;; /* rr7 */ \ | ||
379 | mov rr[r18]=r27; \ | ||
380 | ;; \ | ||
381 | srlz.i; \ | ||
382 | ;; | ||
383 | |||
384 | |||
385 | |||
386 | /* | ||
387 | * r32: context_t base address | ||
388 | * r36~r39:scratch registers | ||
389 | */ | ||
390 | #define SAVE_DEBUG_REGS \ | ||
391 | add r2=CTX(IBR0),r32; \ | ||
392 | add r3=CTX(DBR0),r32; \ | ||
393 | mov r16=ibr[r0]; \ | ||
394 | mov r17=dbr[r0]; \ | ||
395 | ;; \ | ||
396 | st8 [r2]=r16,8; \ | ||
397 | st8 [r3]=r17,8; \ | ||
398 | add r18=1,r0; \ | ||
399 | ;; \ | ||
400 | mov r16=ibr[r18]; \ | ||
401 | mov r17=dbr[r18]; \ | ||
402 | ;; \ | ||
403 | st8 [r2]=r16,8; \ | ||
404 | st8 [r3]=r17,8; \ | ||
405 | add r18=2,r0; \ | ||
406 | ;; \ | ||
407 | mov r16=ibr[r18]; \ | ||
408 | mov r17=dbr[r18]; \ | ||
409 | ;; \ | ||
410 | st8 [r2]=r16,8; \ | ||
411 | st8 [r3]=r17,8; \ | ||
412 | add r18=2,r0; \ | ||
413 | ;; \ | ||
414 | mov r16=ibr[r18]; \ | ||
415 | mov r17=dbr[r18]; \ | ||
416 | ;; \ | ||
417 | st8 [r2]=r16,8; \ | ||
418 | st8 [r3]=r17,8; \ | ||
419 | add r18=3,r0; \ | ||
420 | ;; \ | ||
421 | mov r16=ibr[r18]; \ | ||
422 | mov r17=dbr[r18]; \ | ||
423 | ;; \ | ||
424 | st8 [r2]=r16,8; \ | ||
425 | st8 [r3]=r17,8; \ | ||
426 | add r18=4,r0; \ | ||
427 | ;; \ | ||
428 | mov r16=ibr[r18]; \ | ||
429 | mov r17=dbr[r18]; \ | ||
430 | ;; \ | ||
431 | st8 [r2]=r16,8; \ | ||
432 | st8 [r3]=r17,8; \ | ||
433 | add r18=5,r0; \ | ||
434 | ;; \ | ||
435 | mov r16=ibr[r18]; \ | ||
436 | mov r17=dbr[r18]; \ | ||
437 | ;; \ | ||
438 | st8 [r2]=r16,8; \ | ||
439 | st8 [r3]=r17,8; \ | ||
440 | add r18=6,r0; \ | ||
441 | ;; \ | ||
442 | mov r16=ibr[r18]; \ | ||
443 | mov r17=dbr[r18]; \ | ||
444 | ;; \ | ||
445 | st8 [r2]=r16,8; \ | ||
446 | st8 [r3]=r17,8; \ | ||
447 | add r18=7,r0; \ | ||
448 | ;; \ | ||
449 | mov r16=ibr[r18]; \ | ||
450 | mov r17=dbr[r18]; \ | ||
451 | ;; \ | ||
452 | st8 [r2]=r16,8; \ | ||
453 | st8 [r3]=r17,8; \ | ||
454 | ;; | ||
455 | |||
456 | |||
457 | /* | ||
458 | * r33: point to context_t structure | ||
459 | * ar.lc are corrupted. | ||
460 | */ | ||
461 | #define RESTORE_DEBUG_REGS \ | ||
462 | add r2=CTX(IBR0),r33; \ | ||
463 | add r3=CTX(DBR0),r33; \ | ||
464 | mov r16=7; \ | ||
465 | mov r17=r0; \ | ||
466 | ;; \ | ||
467 | mov ar.lc = r16; \ | ||
468 | ;; \ | ||
469 | 1: \ | ||
470 | ld8 r18=[r2],8; \ | ||
471 | ld8 r19=[r3],8; \ | ||
472 | ;; \ | ||
473 | mov ibr[r17]=r18; \ | ||
474 | mov dbr[r17]=r19; \ | ||
475 | ;; \ | ||
476 | srlz.i; \ | ||
477 | ;; \ | ||
478 | add r17=1,r17; \ | ||
479 | br.cloop.sptk 1b; \ | ||
480 | ;; | ||
481 | |||
482 | |||
483 | /* | ||
484 | * r32: context_t base address | ||
485 | */ | ||
486 | #define SAVE_FPU_LOW \ | ||
487 | add r2=CTX(F2),r32; \ | ||
488 | add r3=CTX(F3),r32; \ | ||
489 | ;; \ | ||
490 | stf.spill.nta [r2]=f2,32; \ | ||
491 | stf.spill.nta [r3]=f3,32; \ | ||
492 | ;; \ | ||
493 | stf.spill.nta [r2]=f4,32; \ | ||
494 | stf.spill.nta [r3]=f5,32; \ | ||
495 | ;; \ | ||
496 | stf.spill.nta [r2]=f6,32; \ | ||
497 | stf.spill.nta [r3]=f7,32; \ | ||
498 | ;; \ | ||
499 | stf.spill.nta [r2]=f8,32; \ | ||
500 | stf.spill.nta [r3]=f9,32; \ | ||
501 | ;; \ | ||
502 | stf.spill.nta [r2]=f10,32; \ | ||
503 | stf.spill.nta [r3]=f11,32; \ | ||
504 | ;; \ | ||
505 | stf.spill.nta [r2]=f12,32; \ | ||
506 | stf.spill.nta [r3]=f13,32; \ | ||
507 | ;; \ | ||
508 | stf.spill.nta [r2]=f14,32; \ | ||
509 | stf.spill.nta [r3]=f15,32; \ | ||
510 | ;; \ | ||
511 | stf.spill.nta [r2]=f16,32; \ | ||
512 | stf.spill.nta [r3]=f17,32; \ | ||
513 | ;; \ | ||
514 | stf.spill.nta [r2]=f18,32; \ | ||
515 | stf.spill.nta [r3]=f19,32; \ | ||
516 | ;; \ | ||
517 | stf.spill.nta [r2]=f20,32; \ | ||
518 | stf.spill.nta [r3]=f21,32; \ | ||
519 | ;; \ | ||
520 | stf.spill.nta [r2]=f22,32; \ | ||
521 | stf.spill.nta [r3]=f23,32; \ | ||
522 | ;; \ | ||
523 | stf.spill.nta [r2]=f24,32; \ | ||
524 | stf.spill.nta [r3]=f25,32; \ | ||
525 | ;; \ | ||
526 | stf.spill.nta [r2]=f26,32; \ | ||
527 | stf.spill.nta [r3]=f27,32; \ | ||
528 | ;; \ | ||
529 | stf.spill.nta [r2]=f28,32; \ | ||
530 | stf.spill.nta [r3]=f29,32; \ | ||
531 | ;; \ | ||
532 | stf.spill.nta [r2]=f30; \ | ||
533 | stf.spill.nta [r3]=f31; \ | ||
534 | ;; | ||
535 | |||
536 | /* | ||
537 | * r32: context_t base address | ||
538 | */ | ||
539 | #define SAVE_FPU_HIGH \ | ||
540 | add r2=CTX(F32),r32; \ | ||
541 | add r3=CTX(F33),r32; \ | ||
542 | ;; \ | ||
543 | stf.spill.nta [r2]=f32,32; \ | ||
544 | stf.spill.nta [r3]=f33,32; \ | ||
545 | ;; \ | ||
546 | stf.spill.nta [r2]=f34,32; \ | ||
547 | stf.spill.nta [r3]=f35,32; \ | ||
548 | ;; \ | ||
549 | stf.spill.nta [r2]=f36,32; \ | ||
550 | stf.spill.nta [r3]=f37,32; \ | ||
551 | ;; \ | ||
552 | stf.spill.nta [r2]=f38,32; \ | ||
553 | stf.spill.nta [r3]=f39,32; \ | ||
554 | ;; \ | ||
555 | stf.spill.nta [r2]=f40,32; \ | ||
556 | stf.spill.nta [r3]=f41,32; \ | ||
557 | ;; \ | ||
558 | stf.spill.nta [r2]=f42,32; \ | ||
559 | stf.spill.nta [r3]=f43,32; \ | ||
560 | ;; \ | ||
561 | stf.spill.nta [r2]=f44,32; \ | ||
562 | stf.spill.nta [r3]=f45,32; \ | ||
563 | ;; \ | ||
564 | stf.spill.nta [r2]=f46,32; \ | ||
565 | stf.spill.nta [r3]=f47,32; \ | ||
566 | ;; \ | ||
567 | stf.spill.nta [r2]=f48,32; \ | ||
568 | stf.spill.nta [r3]=f49,32; \ | ||
569 | ;; \ | ||
570 | stf.spill.nta [r2]=f50,32; \ | ||
571 | stf.spill.nta [r3]=f51,32; \ | ||
572 | ;; \ | ||
573 | stf.spill.nta [r2]=f52,32; \ | ||
574 | stf.spill.nta [r3]=f53,32; \ | ||
575 | ;; \ | ||
576 | stf.spill.nta [r2]=f54,32; \ | ||
577 | stf.spill.nta [r3]=f55,32; \ | ||
578 | ;; \ | ||
579 | stf.spill.nta [r2]=f56,32; \ | ||
580 | stf.spill.nta [r3]=f57,32; \ | ||
581 | ;; \ | ||
582 | stf.spill.nta [r2]=f58,32; \ | ||
583 | stf.spill.nta [r3]=f59,32; \ | ||
584 | ;; \ | ||
585 | stf.spill.nta [r2]=f60,32; \ | ||
586 | stf.spill.nta [r3]=f61,32; \ | ||
587 | ;; \ | ||
588 | stf.spill.nta [r2]=f62,32; \ | ||
589 | stf.spill.nta [r3]=f63,32; \ | ||
590 | ;; \ | ||
591 | stf.spill.nta [r2]=f64,32; \ | ||
592 | stf.spill.nta [r3]=f65,32; \ | ||
593 | ;; \ | ||
594 | stf.spill.nta [r2]=f66,32; \ | ||
595 | stf.spill.nta [r3]=f67,32; \ | ||
596 | ;; \ | ||
597 | stf.spill.nta [r2]=f68,32; \ | ||
598 | stf.spill.nta [r3]=f69,32; \ | ||
599 | ;; \ | ||
600 | stf.spill.nta [r2]=f70,32; \ | ||
601 | stf.spill.nta [r3]=f71,32; \ | ||
602 | ;; \ | ||
603 | stf.spill.nta [r2]=f72,32; \ | ||
604 | stf.spill.nta [r3]=f73,32; \ | ||
605 | ;; \ | ||
606 | stf.spill.nta [r2]=f74,32; \ | ||
607 | stf.spill.nta [r3]=f75,32; \ | ||
608 | ;; \ | ||
609 | stf.spill.nta [r2]=f76,32; \ | ||
610 | stf.spill.nta [r3]=f77,32; \ | ||
611 | ;; \ | ||
612 | stf.spill.nta [r2]=f78,32; \ | ||
613 | stf.spill.nta [r3]=f79,32; \ | ||
614 | ;; \ | ||
615 | stf.spill.nta [r2]=f80,32; \ | ||
616 | stf.spill.nta [r3]=f81,32; \ | ||
617 | ;; \ | ||
618 | stf.spill.nta [r2]=f82,32; \ | ||
619 | stf.spill.nta [r3]=f83,32; \ | ||
620 | ;; \ | ||
621 | stf.spill.nta [r2]=f84,32; \ | ||
622 | stf.spill.nta [r3]=f85,32; \ | ||
623 | ;; \ | ||
624 | stf.spill.nta [r2]=f86,32; \ | ||
625 | stf.spill.nta [r3]=f87,32; \ | ||
626 | ;; \ | ||
627 | stf.spill.nta [r2]=f88,32; \ | ||
628 | stf.spill.nta [r3]=f89,32; \ | ||
629 | ;; \ | ||
630 | stf.spill.nta [r2]=f90,32; \ | ||
631 | stf.spill.nta [r3]=f91,32; \ | ||
632 | ;; \ | ||
633 | stf.spill.nta [r2]=f92,32; \ | ||
634 | stf.spill.nta [r3]=f93,32; \ | ||
635 | ;; \ | ||
636 | stf.spill.nta [r2]=f94,32; \ | ||
637 | stf.spill.nta [r3]=f95,32; \ | ||
638 | ;; \ | ||
639 | stf.spill.nta [r2]=f96,32; \ | ||
640 | stf.spill.nta [r3]=f97,32; \ | ||
641 | ;; \ | ||
642 | stf.spill.nta [r2]=f98,32; \ | ||
643 | stf.spill.nta [r3]=f99,32; \ | ||
644 | ;; \ | ||
645 | stf.spill.nta [r2]=f100,32; \ | ||
646 | stf.spill.nta [r3]=f101,32; \ | ||
647 | ;; \ | ||
648 | stf.spill.nta [r2]=f102,32; \ | ||
649 | stf.spill.nta [r3]=f103,32; \ | ||
650 | ;; \ | ||
651 | stf.spill.nta [r2]=f104,32; \ | ||
652 | stf.spill.nta [r3]=f105,32; \ | ||
653 | ;; \ | ||
654 | stf.spill.nta [r2]=f106,32; \ | ||
655 | stf.spill.nta [r3]=f107,32; \ | ||
656 | ;; \ | ||
657 | stf.spill.nta [r2]=f108,32; \ | ||
658 | stf.spill.nta [r3]=f109,32; \ | ||
659 | ;; \ | ||
660 | stf.spill.nta [r2]=f110,32; \ | ||
661 | stf.spill.nta [r3]=f111,32; \ | ||
662 | ;; \ | ||
663 | stf.spill.nta [r2]=f112,32; \ | ||
664 | stf.spill.nta [r3]=f113,32; \ | ||
665 | ;; \ | ||
666 | stf.spill.nta [r2]=f114,32; \ | ||
667 | stf.spill.nta [r3]=f115,32; \ | ||
668 | ;; \ | ||
669 | stf.spill.nta [r2]=f116,32; \ | ||
670 | stf.spill.nta [r3]=f117,32; \ | ||
671 | ;; \ | ||
672 | stf.spill.nta [r2]=f118,32; \ | ||
673 | stf.spill.nta [r3]=f119,32; \ | ||
674 | ;; \ | ||
675 | stf.spill.nta [r2]=f120,32; \ | ||
676 | stf.spill.nta [r3]=f121,32; \ | ||
677 | ;; \ | ||
678 | stf.spill.nta [r2]=f122,32; \ | ||
679 | stf.spill.nta [r3]=f123,32; \ | ||
680 | ;; \ | ||
681 | stf.spill.nta [r2]=f124,32; \ | ||
682 | stf.spill.nta [r3]=f125,32; \ | ||
683 | ;; \ | ||
684 | stf.spill.nta [r2]=f126; \ | ||
685 | stf.spill.nta [r3]=f127; \ | ||
686 | ;; | ||
687 | |||
688 | /* | ||
689 | * r33: point to context_t structure | ||
690 | */ | ||
691 | #define RESTORE_FPU_LOW \ | ||
692 | add r2 = CTX(F2), r33; \ | ||
693 | add r3 = CTX(F3), r33; \ | ||
694 | ;; \ | ||
695 | ldf.fill.nta f2 = [r2], 32; \ | ||
696 | ldf.fill.nta f3 = [r3], 32; \ | ||
697 | ;; \ | ||
698 | ldf.fill.nta f4 = [r2], 32; \ | ||
699 | ldf.fill.nta f5 = [r3], 32; \ | ||
700 | ;; \ | ||
701 | ldf.fill.nta f6 = [r2], 32; \ | ||
702 | ldf.fill.nta f7 = [r3], 32; \ | ||
703 | ;; \ | ||
704 | ldf.fill.nta f8 = [r2], 32; \ | ||
705 | ldf.fill.nta f9 = [r3], 32; \ | ||
706 | ;; \ | ||
707 | ldf.fill.nta f10 = [r2], 32; \ | ||
708 | ldf.fill.nta f11 = [r3], 32; \ | ||
709 | ;; \ | ||
710 | ldf.fill.nta f12 = [r2], 32; \ | ||
711 | ldf.fill.nta f13 = [r3], 32; \ | ||
712 | ;; \ | ||
713 | ldf.fill.nta f14 = [r2], 32; \ | ||
714 | ldf.fill.nta f15 = [r3], 32; \ | ||
715 | ;; \ | ||
716 | ldf.fill.nta f16 = [r2], 32; \ | ||
717 | ldf.fill.nta f17 = [r3], 32; \ | ||
718 | ;; \ | ||
719 | ldf.fill.nta f18 = [r2], 32; \ | ||
720 | ldf.fill.nta f19 = [r3], 32; \ | ||
721 | ;; \ | ||
722 | ldf.fill.nta f20 = [r2], 32; \ | ||
723 | ldf.fill.nta f21 = [r3], 32; \ | ||
724 | ;; \ | ||
725 | ldf.fill.nta f22 = [r2], 32; \ | ||
726 | ldf.fill.nta f23 = [r3], 32; \ | ||
727 | ;; \ | ||
728 | ldf.fill.nta f24 = [r2], 32; \ | ||
729 | ldf.fill.nta f25 = [r3], 32; \ | ||
730 | ;; \ | ||
731 | ldf.fill.nta f26 = [r2], 32; \ | ||
732 | ldf.fill.nta f27 = [r3], 32; \ | ||
733 | ;; \ | ||
734 | ldf.fill.nta f28 = [r2], 32; \ | ||
735 | ldf.fill.nta f29 = [r3], 32; \ | ||
736 | ;; \ | ||
737 | ldf.fill.nta f30 = [r2], 32; \ | ||
738 | ldf.fill.nta f31 = [r3], 32; \ | ||
739 | ;; | ||
740 | |||
741 | |||
742 | |||
743 | /* | ||
744 | * r33: point to context_t structure | ||
745 | */ | ||
746 | #define RESTORE_FPU_HIGH \ | ||
747 | add r2 = CTX(F32), r33; \ | ||
748 | add r3 = CTX(F33), r33; \ | ||
749 | ;; \ | ||
750 | ldf.fill.nta f32 = [r2], 32; \ | ||
751 | ldf.fill.nta f33 = [r3], 32; \ | ||
752 | ;; \ | ||
753 | ldf.fill.nta f34 = [r2], 32; \ | ||
754 | ldf.fill.nta f35 = [r3], 32; \ | ||
755 | ;; \ | ||
756 | ldf.fill.nta f36 = [r2], 32; \ | ||
757 | ldf.fill.nta f37 = [r3], 32; \ | ||
758 | ;; \ | ||
759 | ldf.fill.nta f38 = [r2], 32; \ | ||
760 | ldf.fill.nta f39 = [r3], 32; \ | ||
761 | ;; \ | ||
762 | ldf.fill.nta f40 = [r2], 32; \ | ||
763 | ldf.fill.nta f41 = [r3], 32; \ | ||
764 | ;; \ | ||
765 | ldf.fill.nta f42 = [r2], 32; \ | ||
766 | ldf.fill.nta f43 = [r3], 32; \ | ||
767 | ;; \ | ||
768 | ldf.fill.nta f44 = [r2], 32; \ | ||
769 | ldf.fill.nta f45 = [r3], 32; \ | ||
770 | ;; \ | ||
771 | ldf.fill.nta f46 = [r2], 32; \ | ||
772 | ldf.fill.nta f47 = [r3], 32; \ | ||
773 | ;; \ | ||
774 | ldf.fill.nta f48 = [r2], 32; \ | ||
775 | ldf.fill.nta f49 = [r3], 32; \ | ||
776 | ;; \ | ||
777 | ldf.fill.nta f50 = [r2], 32; \ | ||
778 | ldf.fill.nta f51 = [r3], 32; \ | ||
779 | ;; \ | ||
780 | ldf.fill.nta f52 = [r2], 32; \ | ||
781 | ldf.fill.nta f53 = [r3], 32; \ | ||
782 | ;; \ | ||
783 | ldf.fill.nta f54 = [r2], 32; \ | ||
784 | ldf.fill.nta f55 = [r3], 32; \ | ||
785 | ;; \ | ||
786 | ldf.fill.nta f56 = [r2], 32; \ | ||
787 | ldf.fill.nta f57 = [r3], 32; \ | ||
788 | ;; \ | ||
789 | ldf.fill.nta f58 = [r2], 32; \ | ||
790 | ldf.fill.nta f59 = [r3], 32; \ | ||
791 | ;; \ | ||
792 | ldf.fill.nta f60 = [r2], 32; \ | ||
793 | ldf.fill.nta f61 = [r3], 32; \ | ||
794 | ;; \ | ||
795 | ldf.fill.nta f62 = [r2], 32; \ | ||
796 | ldf.fill.nta f63 = [r3], 32; \ | ||
797 | ;; \ | ||
798 | ldf.fill.nta f64 = [r2], 32; \ | ||
799 | ldf.fill.nta f65 = [r3], 32; \ | ||
800 | ;; \ | ||
801 | ldf.fill.nta f66 = [r2], 32; \ | ||
802 | ldf.fill.nta f67 = [r3], 32; \ | ||
803 | ;; \ | ||
804 | ldf.fill.nta f68 = [r2], 32; \ | ||
805 | ldf.fill.nta f69 = [r3], 32; \ | ||
806 | ;; \ | ||
807 | ldf.fill.nta f70 = [r2], 32; \ | ||
808 | ldf.fill.nta f71 = [r3], 32; \ | ||
809 | ;; \ | ||
810 | ldf.fill.nta f72 = [r2], 32; \ | ||
811 | ldf.fill.nta f73 = [r3], 32; \ | ||
812 | ;; \ | ||
813 | ldf.fill.nta f74 = [r2], 32; \ | ||
814 | ldf.fill.nta f75 = [r3], 32; \ | ||
815 | ;; \ | ||
816 | ldf.fill.nta f76 = [r2], 32; \ | ||
817 | ldf.fill.nta f77 = [r3], 32; \ | ||
818 | ;; \ | ||
819 | ldf.fill.nta f78 = [r2], 32; \ | ||
820 | ldf.fill.nta f79 = [r3], 32; \ | ||
821 | ;; \ | ||
822 | ldf.fill.nta f80 = [r2], 32; \ | ||
823 | ldf.fill.nta f81 = [r3], 32; \ | ||
824 | ;; \ | ||
825 | ldf.fill.nta f82 = [r2], 32; \ | ||
826 | ldf.fill.nta f83 = [r3], 32; \ | ||
827 | ;; \ | ||
828 | ldf.fill.nta f84 = [r2], 32; \ | ||
829 | ldf.fill.nta f85 = [r3], 32; \ | ||
830 | ;; \ | ||
831 | ldf.fill.nta f86 = [r2], 32; \ | ||
832 | ldf.fill.nta f87 = [r3], 32; \ | ||
833 | ;; \ | ||
834 | ldf.fill.nta f88 = [r2], 32; \ | ||
835 | ldf.fill.nta f89 = [r3], 32; \ | ||
836 | ;; \ | ||
837 | ldf.fill.nta f90 = [r2], 32; \ | ||
838 | ldf.fill.nta f91 = [r3], 32; \ | ||
839 | ;; \ | ||
840 | ldf.fill.nta f92 = [r2], 32; \ | ||
841 | ldf.fill.nta f93 = [r3], 32; \ | ||
842 | ;; \ | ||
843 | ldf.fill.nta f94 = [r2], 32; \ | ||
844 | ldf.fill.nta f95 = [r3], 32; \ | ||
845 | ;; \ | ||
846 | ldf.fill.nta f96 = [r2], 32; \ | ||
847 | ldf.fill.nta f97 = [r3], 32; \ | ||
848 | ;; \ | ||
849 | ldf.fill.nta f98 = [r2], 32; \ | ||
850 | ldf.fill.nta f99 = [r3], 32; \ | ||
851 | ;; \ | ||
852 | ldf.fill.nta f100 = [r2], 32; \ | ||
853 | ldf.fill.nta f101 = [r3], 32; \ | ||
854 | ;; \ | ||
855 | ldf.fill.nta f102 = [r2], 32; \ | ||
856 | ldf.fill.nta f103 = [r3], 32; \ | ||
857 | ;; \ | ||
858 | ldf.fill.nta f104 = [r2], 32; \ | ||
859 | ldf.fill.nta f105 = [r3], 32; \ | ||
860 | ;; \ | ||
861 | ldf.fill.nta f106 = [r2], 32; \ | ||
862 | ldf.fill.nta f107 = [r3], 32; \ | ||
863 | ;; \ | ||
864 | ldf.fill.nta f108 = [r2], 32; \ | ||
865 | ldf.fill.nta f109 = [r3], 32; \ | ||
866 | ;; \ | ||
867 | ldf.fill.nta f110 = [r2], 32; \ | ||
868 | ldf.fill.nta f111 = [r3], 32; \ | ||
869 | ;; \ | ||
870 | ldf.fill.nta f112 = [r2], 32; \ | ||
871 | ldf.fill.nta f113 = [r3], 32; \ | ||
872 | ;; \ | ||
873 | ldf.fill.nta f114 = [r2], 32; \ | ||
874 | ldf.fill.nta f115 = [r3], 32; \ | ||
875 | ;; \ | ||
876 | ldf.fill.nta f116 = [r2], 32; \ | ||
877 | ldf.fill.nta f117 = [r3], 32; \ | ||
878 | ;; \ | ||
879 | ldf.fill.nta f118 = [r2], 32; \ | ||
880 | ldf.fill.nta f119 = [r3], 32; \ | ||
881 | ;; \ | ||
882 | ldf.fill.nta f120 = [r2], 32; \ | ||
883 | ldf.fill.nta f121 = [r3], 32; \ | ||
884 | ;; \ | ||
885 | ldf.fill.nta f122 = [r2], 32; \ | ||
886 | ldf.fill.nta f123 = [r3], 32; \ | ||
887 | ;; \ | ||
888 | ldf.fill.nta f124 = [r2], 32; \ | ||
889 | ldf.fill.nta f125 = [r3], 32; \ | ||
890 | ;; \ | ||
891 | ldf.fill.nta f126 = [r2], 32; \ | ||
892 | ldf.fill.nta f127 = [r3], 32; \ | ||
893 | ;; | ||
894 | |||
895 | /* | ||
896 | * r32: context_t base address | ||
897 | */ | ||
898 | #define SAVE_PTK_REGS \ | ||
899 | add r2=CTX(PKR0), r32; \ | ||
900 | mov r16=7; \ | ||
901 | ;; \ | ||
902 | mov ar.lc=r16; \ | ||
903 | mov r17=r0; \ | ||
904 | ;; \ | ||
905 | 1: \ | ||
906 | mov r18=pkr[r17]; \ | ||
907 | ;; \ | ||
908 | srlz.i; \ | ||
909 | ;; \ | ||
910 | st8 [r2]=r18, 8; \ | ||
911 | ;; \ | ||
912 | add r17 =1,r17; \ | ||
913 | ;; \ | ||
914 | br.cloop.sptk 1b; \ | ||
915 | ;; | ||
916 | |||
917 | /* | ||
918 | * r33: point to context_t structure | ||
919 | * ar.lc are corrupted. | ||
920 | */ | ||
921 | #define RESTORE_PTK_REGS \ | ||
922 | add r2=CTX(PKR0), r33; \ | ||
923 | mov r16=7; \ | ||
924 | ;; \ | ||
925 | mov ar.lc=r16; \ | ||
926 | mov r17=r0; \ | ||
927 | ;; \ | ||
928 | 1: \ | ||
929 | ld8 r18=[r2], 8; \ | ||
930 | ;; \ | ||
931 | mov pkr[r17]=r18; \ | ||
932 | ;; \ | ||
933 | srlz.i; \ | ||
934 | ;; \ | ||
935 | add r17 =1,r17; \ | ||
936 | ;; \ | ||
937 | br.cloop.sptk 1b; \ | ||
938 | ;; | ||
939 | |||
940 | |||
941 | /* | ||
942 | * void vmm_trampoline( context_t * from, | ||
943 | * context_t * to) | ||
944 | * | ||
945 | * from: r32 | ||
946 | * to: r33 | ||
947 | * note: interrupt disabled before call this function. | ||
948 | */ | ||
949 | GLOBAL_ENTRY(vmm_trampoline) | ||
950 | mov r16 = psr | ||
951 | adds r2 = CTX(PSR), r32 | ||
952 | ;; | ||
953 | st8 [r2] = r16, 8 // psr | ||
954 | mov r17 = pr | ||
955 | ;; | ||
956 | st8 [r2] = r17, 8 // pr | ||
957 | mov r18 = ar.unat | ||
958 | ;; | ||
959 | st8 [r2] = r18 | ||
960 | mov r17 = ar.rsc | ||
961 | ;; | ||
962 | adds r2 = CTX(RSC),r32 | ||
963 | ;; | ||
964 | st8 [r2]= r17 | ||
965 | mov ar.rsc =0 | ||
966 | flushrs | ||
967 | ;; | ||
968 | SAVE_GENERAL_REGS | ||
969 | ;; | ||
970 | SAVE_KERNEL_REGS | ||
971 | ;; | ||
972 | SAVE_APP_REGS | ||
973 | ;; | ||
974 | SAVE_BRANCH_REGS | ||
975 | ;; | ||
976 | SAVE_CTL_REGS | ||
977 | ;; | ||
978 | SAVE_REGION_REGS | ||
979 | ;; | ||
980 | //SAVE_DEBUG_REGS | ||
981 | ;; | ||
982 | rsm psr.dfl | ||
983 | ;; | ||
984 | srlz.d | ||
985 | ;; | ||
986 | SAVE_FPU_LOW | ||
987 | ;; | ||
988 | rsm psr.dfh | ||
989 | ;; | ||
990 | srlz.d | ||
991 | ;; | ||
992 | SAVE_FPU_HIGH | ||
993 | ;; | ||
994 | SAVE_PTK_REGS | ||
995 | ;; | ||
996 | RESTORE_PTK_REGS | ||
997 | ;; | ||
998 | RESTORE_FPU_HIGH | ||
999 | ;; | ||
1000 | RESTORE_FPU_LOW | ||
1001 | ;; | ||
1002 | //RESTORE_DEBUG_REGS | ||
1003 | ;; | ||
1004 | RESTORE_REGION_REGS | ||
1005 | ;; | ||
1006 | RESTORE_CTL_REGS | ||
1007 | ;; | ||
1008 | RESTORE_BRANCH_REGS | ||
1009 | ;; | ||
1010 | RESTORE_APP_REGS | ||
1011 | ;; | ||
1012 | RESTORE_KERNEL_REGS | ||
1013 | ;; | ||
1014 | RESTORE_GENERAL_REGS | ||
1015 | ;; | ||
1016 | adds r2=CTX(PSR), r33 | ||
1017 | ;; | ||
1018 | ld8 r16=[r2], 8 // psr | ||
1019 | ;; | ||
1020 | mov psr.l=r16 | ||
1021 | ;; | ||
1022 | srlz.d | ||
1023 | ;; | ||
1024 | ld8 r16=[r2], 8 // pr | ||
1025 | ;; | ||
1026 | mov pr =r16,-1 | ||
1027 | ld8 r16=[r2] // unat | ||
1028 | ;; | ||
1029 | mov ar.unat=r16 | ||
1030 | ;; | ||
1031 | adds r2=CTX(RSC),r33 | ||
1032 | ;; | ||
1033 | ld8 r16 =[r2] | ||
1034 | ;; | ||
1035 | mov ar.rsc = r16 | ||
1036 | ;; | ||
1037 | br.ret.sptk.few b0 | ||
1038 | END(vmm_trampoline) | ||
diff --git a/arch/ia64/kvm/vcpu.c b/arch/ia64/kvm/vcpu.c deleted file mode 100644 index 958815c9787d..000000000000 --- a/arch/ia64/kvm/vcpu.c +++ /dev/null | |||
@@ -1,2209 +0,0 @@ | |||
1 | /* | ||
2 | * kvm_vcpu.c: handling all virtual cpu related thing. | ||
3 | * Copyright (c) 2005, Intel Corporation. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms and conditions of the GNU General Public License, | ||
7 | * version 2, as published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License along with | ||
15 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple | ||
16 | * Place - Suite 330, Boston, MA 02111-1307 USA. | ||
17 | * | ||
18 | * Shaofan Li (Susue Li) <susie.li@intel.com> | ||
19 | * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com) | ||
20 | * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com) | ||
21 | * Xiantao Zhang <xiantao.zhang@intel.com> | ||
22 | */ | ||
23 | |||
24 | #include <linux/kvm_host.h> | ||
25 | #include <linux/types.h> | ||
26 | |||
27 | #include <asm/processor.h> | ||
28 | #include <asm/ia64regs.h> | ||
29 | #include <asm/gcc_intrin.h> | ||
30 | #include <asm/kregs.h> | ||
31 | #include <asm/pgtable.h> | ||
32 | #include <asm/tlb.h> | ||
33 | |||
34 | #include "asm-offsets.h" | ||
35 | #include "vcpu.h" | ||
36 | |||
37 | /* | ||
38 | * Special notes: | ||
39 | * - Index by it/dt/rt sequence | ||
40 | * - Only existing mode transitions are allowed in this table | ||
41 | * - RSE is placed at lazy mode when emulating guest partial mode | ||
42 | * - If gva happens to be rr0 and rr4, only allowed case is identity | ||
43 | * mapping (gva=gpa), or panic! (How?) | ||
44 | */ | ||
45 | int mm_switch_table[8][8] = { | ||
46 | /* 2004/09/12(Kevin): Allow switch to self */ | ||
47 | /* | ||
48 | * (it,dt,rt): (0,0,0) -> (1,1,1) | ||
49 | * This kind of transition usually occurs in the very early | ||
50 | * stage of Linux boot up procedure. Another case is in efi | ||
51 | * and pal calls. (see "arch/ia64/kernel/head.S") | ||
52 | * | ||
53 | * (it,dt,rt): (0,0,0) -> (0,1,1) | ||
54 | * This kind of transition is found when OSYa exits efi boot | ||
55 | * service. Due to gva = gpa in this case (Same region), | ||
56 | * data access can be satisfied though itlb entry for physical | ||
57 | * emulation is hit. | ||
58 | */ | ||
59 | {SW_SELF, 0, 0, SW_NOP, 0, 0, 0, SW_P2V}, | ||
60 | {0, 0, 0, 0, 0, 0, 0, 0}, | ||
61 | {0, 0, 0, 0, 0, 0, 0, 0}, | ||
62 | /* | ||
63 | * (it,dt,rt): (0,1,1) -> (1,1,1) | ||
64 | * This kind of transition is found in OSYa. | ||
65 | * | ||
66 | * (it,dt,rt): (0,1,1) -> (0,0,0) | ||
67 | * This kind of transition is found in OSYa | ||
68 | */ | ||
69 | {SW_NOP, 0, 0, SW_SELF, 0, 0, 0, SW_P2V}, | ||
70 | /* (1,0,0)->(1,1,1) */ | ||
71 | {0, 0, 0, 0, 0, 0, 0, SW_P2V}, | ||
72 | /* | ||
73 | * (it,dt,rt): (1,0,1) -> (1,1,1) | ||
74 | * This kind of transition usually occurs when Linux returns | ||
75 | * from the low level TLB miss handlers. | ||
76 | * (see "arch/ia64/kernel/ivt.S") | ||
77 | */ | ||
78 | {0, 0, 0, 0, 0, SW_SELF, 0, SW_P2V}, | ||
79 | {0, 0, 0, 0, 0, 0, 0, 0}, | ||
80 | /* | ||
81 | * (it,dt,rt): (1,1,1) -> (1,0,1) | ||
82 | * This kind of transition usually occurs in Linux low level | ||
83 | * TLB miss handler. (see "arch/ia64/kernel/ivt.S") | ||
84 | * | ||
85 | * (it,dt,rt): (1,1,1) -> (0,0,0) | ||
86 | * This kind of transition usually occurs in pal and efi calls, | ||
87 | * which requires running in physical mode. | ||
88 | * (see "arch/ia64/kernel/head.S") | ||
89 | * (1,1,1)->(1,0,0) | ||
90 | */ | ||
91 | |||
92 | {SW_V2P, 0, 0, 0, SW_V2P, SW_V2P, 0, SW_SELF}, | ||
93 | }; | ||
94 | |||
95 | void physical_mode_init(struct kvm_vcpu *vcpu) | ||
96 | { | ||
97 | vcpu->arch.mode_flags = GUEST_IN_PHY; | ||
98 | } | ||
99 | |||
100 | void switch_to_physical_rid(struct kvm_vcpu *vcpu) | ||
101 | { | ||
102 | unsigned long psr; | ||
103 | |||
104 | /* Save original virtual mode rr[0] and rr[4] */ | ||
105 | psr = ia64_clear_ic(); | ||
106 | ia64_set_rr(VRN0<<VRN_SHIFT, vcpu->arch.metaphysical_rr0); | ||
107 | ia64_srlz_d(); | ||
108 | ia64_set_rr(VRN4<<VRN_SHIFT, vcpu->arch.metaphysical_rr4); | ||
109 | ia64_srlz_d(); | ||
110 | |||
111 | ia64_set_psr(psr); | ||
112 | return; | ||
113 | } | ||
114 | |||
115 | void switch_to_virtual_rid(struct kvm_vcpu *vcpu) | ||
116 | { | ||
117 | unsigned long psr; | ||
118 | |||
119 | psr = ia64_clear_ic(); | ||
120 | ia64_set_rr(VRN0 << VRN_SHIFT, vcpu->arch.metaphysical_saved_rr0); | ||
121 | ia64_srlz_d(); | ||
122 | ia64_set_rr(VRN4 << VRN_SHIFT, vcpu->arch.metaphysical_saved_rr4); | ||
123 | ia64_srlz_d(); | ||
124 | ia64_set_psr(psr); | ||
125 | return; | ||
126 | } | ||
127 | |||
128 | static int mm_switch_action(struct ia64_psr opsr, struct ia64_psr npsr) | ||
129 | { | ||
130 | return mm_switch_table[MODE_IND(opsr)][MODE_IND(npsr)]; | ||
131 | } | ||
132 | |||
133 | void switch_mm_mode(struct kvm_vcpu *vcpu, struct ia64_psr old_psr, | ||
134 | struct ia64_psr new_psr) | ||
135 | { | ||
136 | int act; | ||
137 | act = mm_switch_action(old_psr, new_psr); | ||
138 | switch (act) { | ||
139 | case SW_V2P: | ||
140 | /*printk("V -> P mode transition: (0x%lx -> 0x%lx)\n", | ||
141 | old_psr.val, new_psr.val);*/ | ||
142 | switch_to_physical_rid(vcpu); | ||
143 | /* | ||
144 | * Set rse to enforced lazy, to prevent active rse | ||
145 | *save/restor when guest physical mode. | ||
146 | */ | ||
147 | vcpu->arch.mode_flags |= GUEST_IN_PHY; | ||
148 | break; | ||
149 | case SW_P2V: | ||
150 | switch_to_virtual_rid(vcpu); | ||
151 | /* | ||
152 | * recover old mode which is saved when entering | ||
153 | * guest physical mode | ||
154 | */ | ||
155 | vcpu->arch.mode_flags &= ~GUEST_IN_PHY; | ||
156 | break; | ||
157 | case SW_SELF: | ||
158 | break; | ||
159 | case SW_NOP: | ||
160 | break; | ||
161 | default: | ||
162 | /* Sanity check */ | ||
163 | break; | ||
164 | } | ||
165 | return; | ||
166 | } | ||
167 | |||
168 | /* | ||
169 | * In physical mode, insert tc/tr for region 0 and 4 uses | ||
170 | * RID[0] and RID[4] which is for physical mode emulation. | ||
171 | * However what those inserted tc/tr wants is rid for | ||
172 | * virtual mode. So original virtual rid needs to be restored | ||
173 | * before insert. | ||
174 | * | ||
175 | * Operations which required such switch include: | ||
176 | * - insertions (itc.*, itr.*) | ||
177 | * - purges (ptc.* and ptr.*) | ||
178 | * - tpa | ||
179 | * - tak | ||
180 | * - thash?, ttag? | ||
181 | * All above needs actual virtual rid for destination entry. | ||
182 | */ | ||
183 | |||
184 | void check_mm_mode_switch(struct kvm_vcpu *vcpu, struct ia64_psr old_psr, | ||
185 | struct ia64_psr new_psr) | ||
186 | { | ||
187 | |||
188 | if ((old_psr.dt != new_psr.dt) | ||
189 | || (old_psr.it != new_psr.it) | ||
190 | || (old_psr.rt != new_psr.rt)) | ||
191 | switch_mm_mode(vcpu, old_psr, new_psr); | ||
192 | |||
193 | return; | ||
194 | } | ||
195 | |||
196 | |||
197 | /* | ||
198 | * In physical mode, insert tc/tr for region 0 and 4 uses | ||
199 | * RID[0] and RID[4] which is for physical mode emulation. | ||
200 | * However what those inserted tc/tr wants is rid for | ||
201 | * virtual mode. So original virtual rid needs to be restored | ||
202 | * before insert. | ||
203 | * | ||
204 | * Operations which required such switch include: | ||
205 | * - insertions (itc.*, itr.*) | ||
206 | * - purges (ptc.* and ptr.*) | ||
207 | * - tpa | ||
208 | * - tak | ||
209 | * - thash?, ttag? | ||
210 | * All above needs actual virtual rid for destination entry. | ||
211 | */ | ||
212 | |||
213 | void prepare_if_physical_mode(struct kvm_vcpu *vcpu) | ||
214 | { | ||
215 | if (is_physical_mode(vcpu)) { | ||
216 | vcpu->arch.mode_flags |= GUEST_PHY_EMUL; | ||
217 | switch_to_virtual_rid(vcpu); | ||
218 | } | ||
219 | return; | ||
220 | } | ||
221 | |||
222 | /* Recover always follows prepare */ | ||
223 | void recover_if_physical_mode(struct kvm_vcpu *vcpu) | ||
224 | { | ||
225 | if (is_physical_mode(vcpu)) | ||
226 | switch_to_physical_rid(vcpu); | ||
227 | vcpu->arch.mode_flags &= ~GUEST_PHY_EMUL; | ||
228 | return; | ||
229 | } | ||
230 | |||
231 | #define RPT(x) ((u16) &((struct kvm_pt_regs *)0)->x) | ||
232 | |||
233 | static u16 gr_info[32] = { | ||
234 | 0, /* r0 is read-only : WE SHOULD NEVER GET THIS */ | ||
235 | RPT(r1), RPT(r2), RPT(r3), | ||
236 | RPT(r4), RPT(r5), RPT(r6), RPT(r7), | ||
237 | RPT(r8), RPT(r9), RPT(r10), RPT(r11), | ||
238 | RPT(r12), RPT(r13), RPT(r14), RPT(r15), | ||
239 | RPT(r16), RPT(r17), RPT(r18), RPT(r19), | ||
240 | RPT(r20), RPT(r21), RPT(r22), RPT(r23), | ||
241 | RPT(r24), RPT(r25), RPT(r26), RPT(r27), | ||
242 | RPT(r28), RPT(r29), RPT(r30), RPT(r31) | ||
243 | }; | ||
244 | |||
245 | #define IA64_FIRST_STACKED_GR 32 | ||
246 | #define IA64_FIRST_ROTATING_FR 32 | ||
247 | |||
248 | static inline unsigned long | ||
249 | rotate_reg(unsigned long sor, unsigned long rrb, unsigned long reg) | ||
250 | { | ||
251 | reg += rrb; | ||
252 | if (reg >= sor) | ||
253 | reg -= sor; | ||
254 | return reg; | ||
255 | } | ||
256 | |||
257 | /* | ||
258 | * Return the (rotated) index for floating point register | ||
259 | * be in the REGNUM (REGNUM must range from 32-127, | ||
260 | * result is in the range from 0-95. | ||
261 | */ | ||
262 | static inline unsigned long fph_index(struct kvm_pt_regs *regs, | ||
263 | long regnum) | ||
264 | { | ||
265 | unsigned long rrb_fr = (regs->cr_ifs >> 25) & 0x7f; | ||
266 | return rotate_reg(96, rrb_fr, (regnum - IA64_FIRST_ROTATING_FR)); | ||
267 | } | ||
268 | |||
269 | /* | ||
270 | * The inverse of the above: given bspstore and the number of | ||
271 | * registers, calculate ar.bsp. | ||
272 | */ | ||
273 | static inline unsigned long *kvm_rse_skip_regs(unsigned long *addr, | ||
274 | long num_regs) | ||
275 | { | ||
276 | long delta = ia64_rse_slot_num(addr) + num_regs; | ||
277 | int i = 0; | ||
278 | |||
279 | if (num_regs < 0) | ||
280 | delta -= 0x3e; | ||
281 | if (delta < 0) { | ||
282 | while (delta <= -0x3f) { | ||
283 | i--; | ||
284 | delta += 0x3f; | ||
285 | } | ||
286 | } else { | ||
287 | while (delta >= 0x3f) { | ||
288 | i++; | ||
289 | delta -= 0x3f; | ||
290 | } | ||
291 | } | ||
292 | |||
293 | return addr + num_regs + i; | ||
294 | } | ||
295 | |||
296 | static void get_rse_reg(struct kvm_pt_regs *regs, unsigned long r1, | ||
297 | unsigned long *val, int *nat) | ||
298 | { | ||
299 | unsigned long *bsp, *addr, *rnat_addr, *bspstore; | ||
300 | unsigned long *kbs = (void *) current_vcpu + VMM_RBS_OFFSET; | ||
301 | unsigned long nat_mask; | ||
302 | unsigned long old_rsc, new_rsc; | ||
303 | long sof = (regs->cr_ifs) & 0x7f; | ||
304 | long sor = (((regs->cr_ifs >> 14) & 0xf) << 3); | ||
305 | long rrb_gr = (regs->cr_ifs >> 18) & 0x7f; | ||
306 | long ridx = r1 - 32; | ||
307 | |||
308 | if (ridx < sor) | ||
309 | ridx = rotate_reg(sor, rrb_gr, ridx); | ||
310 | |||
311 | old_rsc = ia64_getreg(_IA64_REG_AR_RSC); | ||
312 | new_rsc = old_rsc&(~(0x3)); | ||
313 | ia64_setreg(_IA64_REG_AR_RSC, new_rsc); | ||
314 | |||
315 | bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE); | ||
316 | bsp = kbs + (regs->loadrs >> 19); | ||
317 | |||
318 | addr = kvm_rse_skip_regs(bsp, -sof + ridx); | ||
319 | nat_mask = 1UL << ia64_rse_slot_num(addr); | ||
320 | rnat_addr = ia64_rse_rnat_addr(addr); | ||
321 | |||
322 | if (addr >= bspstore) { | ||
323 | ia64_flushrs(); | ||
324 | ia64_mf(); | ||
325 | bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE); | ||
326 | } | ||
327 | *val = *addr; | ||
328 | if (nat) { | ||
329 | if (bspstore < rnat_addr) | ||
330 | *nat = (int)!!(ia64_getreg(_IA64_REG_AR_RNAT) | ||
331 | & nat_mask); | ||
332 | else | ||
333 | *nat = (int)!!((*rnat_addr) & nat_mask); | ||
334 | ia64_setreg(_IA64_REG_AR_RSC, old_rsc); | ||
335 | } | ||
336 | } | ||
337 | |||
338 | void set_rse_reg(struct kvm_pt_regs *regs, unsigned long r1, | ||
339 | unsigned long val, unsigned long nat) | ||
340 | { | ||
341 | unsigned long *bsp, *bspstore, *addr, *rnat_addr; | ||
342 | unsigned long *kbs = (void *) current_vcpu + VMM_RBS_OFFSET; | ||
343 | unsigned long nat_mask; | ||
344 | unsigned long old_rsc, new_rsc, psr; | ||
345 | unsigned long rnat; | ||
346 | long sof = (regs->cr_ifs) & 0x7f; | ||
347 | long sor = (((regs->cr_ifs >> 14) & 0xf) << 3); | ||
348 | long rrb_gr = (regs->cr_ifs >> 18) & 0x7f; | ||
349 | long ridx = r1 - 32; | ||
350 | |||
351 | if (ridx < sor) | ||
352 | ridx = rotate_reg(sor, rrb_gr, ridx); | ||
353 | |||
354 | old_rsc = ia64_getreg(_IA64_REG_AR_RSC); | ||
355 | /* put RSC to lazy mode, and set loadrs 0 */ | ||
356 | new_rsc = old_rsc & (~0x3fff0003); | ||
357 | ia64_setreg(_IA64_REG_AR_RSC, new_rsc); | ||
358 | bsp = kbs + (regs->loadrs >> 19); /* 16 + 3 */ | ||
359 | |||
360 | addr = kvm_rse_skip_regs(bsp, -sof + ridx); | ||
361 | nat_mask = 1UL << ia64_rse_slot_num(addr); | ||
362 | rnat_addr = ia64_rse_rnat_addr(addr); | ||
363 | |||
364 | local_irq_save(psr); | ||
365 | bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE); | ||
366 | if (addr >= bspstore) { | ||
367 | |||
368 | ia64_flushrs(); | ||
369 | ia64_mf(); | ||
370 | *addr = val; | ||
371 | bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE); | ||
372 | rnat = ia64_getreg(_IA64_REG_AR_RNAT); | ||
373 | if (bspstore < rnat_addr) | ||
374 | rnat = rnat & (~nat_mask); | ||
375 | else | ||
376 | *rnat_addr = (*rnat_addr)&(~nat_mask); | ||
377 | |||
378 | ia64_mf(); | ||
379 | ia64_loadrs(); | ||
380 | ia64_setreg(_IA64_REG_AR_RNAT, rnat); | ||
381 | } else { | ||
382 | rnat = ia64_getreg(_IA64_REG_AR_RNAT); | ||
383 | *addr = val; | ||
384 | if (bspstore < rnat_addr) | ||
385 | rnat = rnat&(~nat_mask); | ||
386 | else | ||
387 | *rnat_addr = (*rnat_addr) & (~nat_mask); | ||
388 | |||
389 | ia64_setreg(_IA64_REG_AR_BSPSTORE, (unsigned long)bspstore); | ||
390 | ia64_setreg(_IA64_REG_AR_RNAT, rnat); | ||
391 | } | ||
392 | local_irq_restore(psr); | ||
393 | ia64_setreg(_IA64_REG_AR_RSC, old_rsc); | ||
394 | } | ||
395 | |||
396 | void getreg(unsigned long regnum, unsigned long *val, | ||
397 | int *nat, struct kvm_pt_regs *regs) | ||
398 | { | ||
399 | unsigned long addr, *unat; | ||
400 | if (regnum >= IA64_FIRST_STACKED_GR) { | ||
401 | get_rse_reg(regs, regnum, val, nat); | ||
402 | return; | ||
403 | } | ||
404 | |||
405 | /* | ||
406 | * Now look at registers in [0-31] range and init correct UNAT | ||
407 | */ | ||
408 | addr = (unsigned long)regs; | ||
409 | unat = ®s->eml_unat; | ||
410 | |||
411 | addr += gr_info[regnum]; | ||
412 | |||
413 | *val = *(unsigned long *)addr; | ||
414 | /* | ||
415 | * do it only when requested | ||
416 | */ | ||
417 | if (nat) | ||
418 | *nat = (*unat >> ((addr >> 3) & 0x3f)) & 0x1UL; | ||
419 | } | ||
420 | |||
421 | void setreg(unsigned long regnum, unsigned long val, | ||
422 | int nat, struct kvm_pt_regs *regs) | ||
423 | { | ||
424 | unsigned long addr; | ||
425 | unsigned long bitmask; | ||
426 | unsigned long *unat; | ||
427 | |||
428 | /* | ||
429 | * First takes care of stacked registers | ||
430 | */ | ||
431 | if (regnum >= IA64_FIRST_STACKED_GR) { | ||
432 | set_rse_reg(regs, regnum, val, nat); | ||
433 | return; | ||
434 | } | ||
435 | |||
436 | /* | ||
437 | * Now look at registers in [0-31] range and init correct UNAT | ||
438 | */ | ||
439 | addr = (unsigned long)regs; | ||
440 | unat = ®s->eml_unat; | ||
441 | /* | ||
442 | * add offset from base of struct | ||
443 | * and do it ! | ||
444 | */ | ||
445 | addr += gr_info[regnum]; | ||
446 | |||
447 | *(unsigned long *)addr = val; | ||
448 | |||
449 | /* | ||
450 | * We need to clear the corresponding UNAT bit to fully emulate the load | ||
451 | * UNAT bit_pos = GR[r3]{8:3} form EAS-2.4 | ||
452 | */ | ||
453 | bitmask = 1UL << ((addr >> 3) & 0x3f); | ||
454 | if (nat) | ||
455 | *unat |= bitmask; | ||
456 | else | ||
457 | *unat &= ~bitmask; | ||
458 | |||
459 | } | ||
460 | |||
461 | u64 vcpu_get_gr(struct kvm_vcpu *vcpu, unsigned long reg) | ||
462 | { | ||
463 | struct kvm_pt_regs *regs = vcpu_regs(vcpu); | ||
464 | unsigned long val; | ||
465 | |||
466 | if (!reg) | ||
467 | return 0; | ||
468 | getreg(reg, &val, 0, regs); | ||
469 | return val; | ||
470 | } | ||
471 | |||
472 | void vcpu_set_gr(struct kvm_vcpu *vcpu, unsigned long reg, u64 value, int nat) | ||
473 | { | ||
474 | struct kvm_pt_regs *regs = vcpu_regs(vcpu); | ||
475 | long sof = (regs->cr_ifs) & 0x7f; | ||
476 | |||
477 | if (!reg) | ||
478 | return; | ||
479 | if (reg >= sof + 32) | ||
480 | return; | ||
481 | setreg(reg, value, nat, regs); /* FIXME: handle NATs later*/ | ||
482 | } | ||
483 | |||
484 | void getfpreg(unsigned long regnum, struct ia64_fpreg *fpval, | ||
485 | struct kvm_pt_regs *regs) | ||
486 | { | ||
487 | /* Take floating register rotation into consideration*/ | ||
488 | if (regnum >= IA64_FIRST_ROTATING_FR) | ||
489 | regnum = IA64_FIRST_ROTATING_FR + fph_index(regs, regnum); | ||
490 | #define CASE_FIXED_FP(reg) \ | ||
491 | case (reg) : \ | ||
492 | ia64_stf_spill(fpval, reg); \ | ||
493 | break | ||
494 | |||
495 | switch (regnum) { | ||
496 | CASE_FIXED_FP(0); | ||
497 | CASE_FIXED_FP(1); | ||
498 | CASE_FIXED_FP(2); | ||
499 | CASE_FIXED_FP(3); | ||
500 | CASE_FIXED_FP(4); | ||
501 | CASE_FIXED_FP(5); | ||
502 | |||
503 | CASE_FIXED_FP(6); | ||
504 | CASE_FIXED_FP(7); | ||
505 | CASE_FIXED_FP(8); | ||
506 | CASE_FIXED_FP(9); | ||
507 | CASE_FIXED_FP(10); | ||
508 | CASE_FIXED_FP(11); | ||
509 | |||
510 | CASE_FIXED_FP(12); | ||
511 | CASE_FIXED_FP(13); | ||
512 | CASE_FIXED_FP(14); | ||
513 | CASE_FIXED_FP(15); | ||
514 | CASE_FIXED_FP(16); | ||
515 | CASE_FIXED_FP(17); | ||
516 | CASE_FIXED_FP(18); | ||
517 | CASE_FIXED_FP(19); | ||
518 | CASE_FIXED_FP(20); | ||
519 | CASE_FIXED_FP(21); | ||
520 | CASE_FIXED_FP(22); | ||
521 | CASE_FIXED_FP(23); | ||
522 | CASE_FIXED_FP(24); | ||
523 | CASE_FIXED_FP(25); | ||
524 | CASE_FIXED_FP(26); | ||
525 | CASE_FIXED_FP(27); | ||
526 | CASE_FIXED_FP(28); | ||
527 | CASE_FIXED_FP(29); | ||
528 | CASE_FIXED_FP(30); | ||
529 | CASE_FIXED_FP(31); | ||
530 | CASE_FIXED_FP(32); | ||
531 | CASE_FIXED_FP(33); | ||
532 | CASE_FIXED_FP(34); | ||
533 | CASE_FIXED_FP(35); | ||
534 | CASE_FIXED_FP(36); | ||
535 | CASE_FIXED_FP(37); | ||
536 | CASE_FIXED_FP(38); | ||
537 | CASE_FIXED_FP(39); | ||
538 | CASE_FIXED_FP(40); | ||
539 | CASE_FIXED_FP(41); | ||
540 | CASE_FIXED_FP(42); | ||
541 | CASE_FIXED_FP(43); | ||
542 | CASE_FIXED_FP(44); | ||
543 | CASE_FIXED_FP(45); | ||
544 | CASE_FIXED_FP(46); | ||
545 | CASE_FIXED_FP(47); | ||
546 | CASE_FIXED_FP(48); | ||
547 | CASE_FIXED_FP(49); | ||
548 | CASE_FIXED_FP(50); | ||
549 | CASE_FIXED_FP(51); | ||
550 | CASE_FIXED_FP(52); | ||
551 | CASE_FIXED_FP(53); | ||
552 | CASE_FIXED_FP(54); | ||
553 | CASE_FIXED_FP(55); | ||
554 | CASE_FIXED_FP(56); | ||
555 | CASE_FIXED_FP(57); | ||
556 | CASE_FIXED_FP(58); | ||
557 | CASE_FIXED_FP(59); | ||
558 | CASE_FIXED_FP(60); | ||
559 | CASE_FIXED_FP(61); | ||
560 | CASE_FIXED_FP(62); | ||
561 | CASE_FIXED_FP(63); | ||
562 | CASE_FIXED_FP(64); | ||
563 | CASE_FIXED_FP(65); | ||
564 | CASE_FIXED_FP(66); | ||
565 | CASE_FIXED_FP(67); | ||
566 | CASE_FIXED_FP(68); | ||
567 | CASE_FIXED_FP(69); | ||
568 | CASE_FIXED_FP(70); | ||
569 | CASE_FIXED_FP(71); | ||
570 | CASE_FIXED_FP(72); | ||
571 | CASE_FIXED_FP(73); | ||
572 | CASE_FIXED_FP(74); | ||
573 | CASE_FIXED_FP(75); | ||
574 | CASE_FIXED_FP(76); | ||
575 | CASE_FIXED_FP(77); | ||
576 | CASE_FIXED_FP(78); | ||
577 | CASE_FIXED_FP(79); | ||
578 | CASE_FIXED_FP(80); | ||
579 | CASE_FIXED_FP(81); | ||
580 | CASE_FIXED_FP(82); | ||
581 | CASE_FIXED_FP(83); | ||
582 | CASE_FIXED_FP(84); | ||
583 | CASE_FIXED_FP(85); | ||
584 | CASE_FIXED_FP(86); | ||
585 | CASE_FIXED_FP(87); | ||
586 | CASE_FIXED_FP(88); | ||
587 | CASE_FIXED_FP(89); | ||
588 | CASE_FIXED_FP(90); | ||
589 | CASE_FIXED_FP(91); | ||
590 | CASE_FIXED_FP(92); | ||
591 | CASE_FIXED_FP(93); | ||
592 | CASE_FIXED_FP(94); | ||
593 | CASE_FIXED_FP(95); | ||
594 | CASE_FIXED_FP(96); | ||
595 | CASE_FIXED_FP(97); | ||
596 | CASE_FIXED_FP(98); | ||
597 | CASE_FIXED_FP(99); | ||
598 | CASE_FIXED_FP(100); | ||
599 | CASE_FIXED_FP(101); | ||
600 | CASE_FIXED_FP(102); | ||
601 | CASE_FIXED_FP(103); | ||
602 | CASE_FIXED_FP(104); | ||
603 | CASE_FIXED_FP(105); | ||
604 | CASE_FIXED_FP(106); | ||
605 | CASE_FIXED_FP(107); | ||
606 | CASE_FIXED_FP(108); | ||
607 | CASE_FIXED_FP(109); | ||
608 | CASE_FIXED_FP(110); | ||
609 | CASE_FIXED_FP(111); | ||
610 | CASE_FIXED_FP(112); | ||
611 | CASE_FIXED_FP(113); | ||
612 | CASE_FIXED_FP(114); | ||
613 | CASE_FIXED_FP(115); | ||
614 | CASE_FIXED_FP(116); | ||
615 | CASE_FIXED_FP(117); | ||
616 | CASE_FIXED_FP(118); | ||
617 | CASE_FIXED_FP(119); | ||
618 | CASE_FIXED_FP(120); | ||
619 | CASE_FIXED_FP(121); | ||
620 | CASE_FIXED_FP(122); | ||
621 | CASE_FIXED_FP(123); | ||
622 | CASE_FIXED_FP(124); | ||
623 | CASE_FIXED_FP(125); | ||
624 | CASE_FIXED_FP(126); | ||
625 | CASE_FIXED_FP(127); | ||
626 | } | ||
627 | #undef CASE_FIXED_FP | ||
628 | } | ||
629 | |||
630 | void setfpreg(unsigned long regnum, struct ia64_fpreg *fpval, | ||
631 | struct kvm_pt_regs *regs) | ||
632 | { | ||
633 | /* Take floating register rotation into consideration*/ | ||
634 | if (regnum >= IA64_FIRST_ROTATING_FR) | ||
635 | regnum = IA64_FIRST_ROTATING_FR + fph_index(regs, regnum); | ||
636 | |||
637 | #define CASE_FIXED_FP(reg) \ | ||
638 | case (reg) : \ | ||
639 | ia64_ldf_fill(reg, fpval); \ | ||
640 | break | ||
641 | |||
642 | switch (regnum) { | ||
643 | CASE_FIXED_FP(2); | ||
644 | CASE_FIXED_FP(3); | ||
645 | CASE_FIXED_FP(4); | ||
646 | CASE_FIXED_FP(5); | ||
647 | |||
648 | CASE_FIXED_FP(6); | ||
649 | CASE_FIXED_FP(7); | ||
650 | CASE_FIXED_FP(8); | ||
651 | CASE_FIXED_FP(9); | ||
652 | CASE_FIXED_FP(10); | ||
653 | CASE_FIXED_FP(11); | ||
654 | |||
655 | CASE_FIXED_FP(12); | ||
656 | CASE_FIXED_FP(13); | ||
657 | CASE_FIXED_FP(14); | ||
658 | CASE_FIXED_FP(15); | ||
659 | CASE_FIXED_FP(16); | ||
660 | CASE_FIXED_FP(17); | ||
661 | CASE_FIXED_FP(18); | ||
662 | CASE_FIXED_FP(19); | ||
663 | CASE_FIXED_FP(20); | ||
664 | CASE_FIXED_FP(21); | ||
665 | CASE_FIXED_FP(22); | ||
666 | CASE_FIXED_FP(23); | ||
667 | CASE_FIXED_FP(24); | ||
668 | CASE_FIXED_FP(25); | ||
669 | CASE_FIXED_FP(26); | ||
670 | CASE_FIXED_FP(27); | ||
671 | CASE_FIXED_FP(28); | ||
672 | CASE_FIXED_FP(29); | ||
673 | CASE_FIXED_FP(30); | ||
674 | CASE_FIXED_FP(31); | ||
675 | CASE_FIXED_FP(32); | ||
676 | CASE_FIXED_FP(33); | ||
677 | CASE_FIXED_FP(34); | ||
678 | CASE_FIXED_FP(35); | ||
679 | CASE_FIXED_FP(36); | ||
680 | CASE_FIXED_FP(37); | ||
681 | CASE_FIXED_FP(38); | ||
682 | CASE_FIXED_FP(39); | ||
683 | CASE_FIXED_FP(40); | ||
684 | CASE_FIXED_FP(41); | ||
685 | CASE_FIXED_FP(42); | ||
686 | CASE_FIXED_FP(43); | ||
687 | CASE_FIXED_FP(44); | ||
688 | CASE_FIXED_FP(45); | ||
689 | CASE_FIXED_FP(46); | ||
690 | CASE_FIXED_FP(47); | ||
691 | CASE_FIXED_FP(48); | ||
692 | CASE_FIXED_FP(49); | ||
693 | CASE_FIXED_FP(50); | ||
694 | CASE_FIXED_FP(51); | ||
695 | CASE_FIXED_FP(52); | ||
696 | CASE_FIXED_FP(53); | ||
697 | CASE_FIXED_FP(54); | ||
698 | CASE_FIXED_FP(55); | ||
699 | CASE_FIXED_FP(56); | ||
700 | CASE_FIXED_FP(57); | ||
701 | CASE_FIXED_FP(58); | ||
702 | CASE_FIXED_FP(59); | ||
703 | CASE_FIXED_FP(60); | ||
704 | CASE_FIXED_FP(61); | ||
705 | CASE_FIXED_FP(62); | ||
706 | CASE_FIXED_FP(63); | ||
707 | CASE_FIXED_FP(64); | ||
708 | CASE_FIXED_FP(65); | ||
709 | CASE_FIXED_FP(66); | ||
710 | CASE_FIXED_FP(67); | ||
711 | CASE_FIXED_FP(68); | ||
712 | CASE_FIXED_FP(69); | ||
713 | CASE_FIXED_FP(70); | ||
714 | CASE_FIXED_FP(71); | ||
715 | CASE_FIXED_FP(72); | ||
716 | CASE_FIXED_FP(73); | ||
717 | CASE_FIXED_FP(74); | ||
718 | CASE_FIXED_FP(75); | ||
719 | CASE_FIXED_FP(76); | ||
720 | CASE_FIXED_FP(77); | ||
721 | CASE_FIXED_FP(78); | ||
722 | CASE_FIXED_FP(79); | ||
723 | CASE_FIXED_FP(80); | ||
724 | CASE_FIXED_FP(81); | ||
725 | CASE_FIXED_FP(82); | ||
726 | CASE_FIXED_FP(83); | ||
727 | CASE_FIXED_FP(84); | ||
728 | CASE_FIXED_FP(85); | ||
729 | CASE_FIXED_FP(86); | ||
730 | CASE_FIXED_FP(87); | ||
731 | CASE_FIXED_FP(88); | ||
732 | CASE_FIXED_FP(89); | ||
733 | CASE_FIXED_FP(90); | ||
734 | CASE_FIXED_FP(91); | ||
735 | CASE_FIXED_FP(92); | ||
736 | CASE_FIXED_FP(93); | ||
737 | CASE_FIXED_FP(94); | ||
738 | CASE_FIXED_FP(95); | ||
739 | CASE_FIXED_FP(96); | ||
740 | CASE_FIXED_FP(97); | ||
741 | CASE_FIXED_FP(98); | ||
742 | CASE_FIXED_FP(99); | ||
743 | CASE_FIXED_FP(100); | ||
744 | CASE_FIXED_FP(101); | ||
745 | CASE_FIXED_FP(102); | ||
746 | CASE_FIXED_FP(103); | ||
747 | CASE_FIXED_FP(104); | ||
748 | CASE_FIXED_FP(105); | ||
749 | CASE_FIXED_FP(106); | ||
750 | CASE_FIXED_FP(107); | ||
751 | CASE_FIXED_FP(108); | ||
752 | CASE_FIXED_FP(109); | ||
753 | CASE_FIXED_FP(110); | ||
754 | CASE_FIXED_FP(111); | ||
755 | CASE_FIXED_FP(112); | ||
756 | CASE_FIXED_FP(113); | ||
757 | CASE_FIXED_FP(114); | ||
758 | CASE_FIXED_FP(115); | ||
759 | CASE_FIXED_FP(116); | ||
760 | CASE_FIXED_FP(117); | ||
761 | CASE_FIXED_FP(118); | ||
762 | CASE_FIXED_FP(119); | ||
763 | CASE_FIXED_FP(120); | ||
764 | CASE_FIXED_FP(121); | ||
765 | CASE_FIXED_FP(122); | ||
766 | CASE_FIXED_FP(123); | ||
767 | CASE_FIXED_FP(124); | ||
768 | CASE_FIXED_FP(125); | ||
769 | CASE_FIXED_FP(126); | ||
770 | CASE_FIXED_FP(127); | ||
771 | } | ||
772 | } | ||
773 | |||
774 | void vcpu_get_fpreg(struct kvm_vcpu *vcpu, unsigned long reg, | ||
775 | struct ia64_fpreg *val) | ||
776 | { | ||
777 | struct kvm_pt_regs *regs = vcpu_regs(vcpu); | ||
778 | |||
779 | getfpreg(reg, val, regs); /* FIXME: handle NATs later*/ | ||
780 | } | ||
781 | |||
782 | void vcpu_set_fpreg(struct kvm_vcpu *vcpu, unsigned long reg, | ||
783 | struct ia64_fpreg *val) | ||
784 | { | ||
785 | struct kvm_pt_regs *regs = vcpu_regs(vcpu); | ||
786 | |||
787 | if (reg > 1) | ||
788 | setfpreg(reg, val, regs); /* FIXME: handle NATs later*/ | ||
789 | } | ||
790 | |||
791 | /* | ||
792 | * The Altix RTC is mapped specially here for the vmm module | ||
793 | */ | ||
794 | #define SN_RTC_BASE (u64 *)(KVM_VMM_BASE+(1UL<<KVM_VMM_SHIFT)) | ||
795 | static long kvm_get_itc(struct kvm_vcpu *vcpu) | ||
796 | { | ||
797 | #if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC) | ||
798 | struct kvm *kvm = (struct kvm *)KVM_VM_BASE; | ||
799 | |||
800 | if (kvm->arch.is_sn2) | ||
801 | return (*SN_RTC_BASE); | ||
802 | else | ||
803 | #endif | ||
804 | return ia64_getreg(_IA64_REG_AR_ITC); | ||
805 | } | ||
806 | |||
807 | /************************************************************************ | ||
808 | * lsapic timer | ||
809 | ***********************************************************************/ | ||
810 | u64 vcpu_get_itc(struct kvm_vcpu *vcpu) | ||
811 | { | ||
812 | unsigned long guest_itc; | ||
813 | guest_itc = VMX(vcpu, itc_offset) + kvm_get_itc(vcpu); | ||
814 | |||
815 | if (guest_itc >= VMX(vcpu, last_itc)) { | ||
816 | VMX(vcpu, last_itc) = guest_itc; | ||
817 | return guest_itc; | ||
818 | } else | ||
819 | return VMX(vcpu, last_itc); | ||
820 | } | ||
821 | |||
822 | static inline void vcpu_set_itm(struct kvm_vcpu *vcpu, u64 val); | ||
823 | static void vcpu_set_itc(struct kvm_vcpu *vcpu, u64 val) | ||
824 | { | ||
825 | struct kvm_vcpu *v; | ||
826 | struct kvm *kvm; | ||
827 | int i; | ||
828 | long itc_offset = val - kvm_get_itc(vcpu); | ||
829 | unsigned long vitv = VCPU(vcpu, itv); | ||
830 | |||
831 | kvm = (struct kvm *)KVM_VM_BASE; | ||
832 | |||
833 | if (kvm_vcpu_is_bsp(vcpu)) { | ||
834 | for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) { | ||
835 | v = (struct kvm_vcpu *)((char *)vcpu + | ||
836 | sizeof(struct kvm_vcpu_data) * i); | ||
837 | VMX(v, itc_offset) = itc_offset; | ||
838 | VMX(v, last_itc) = 0; | ||
839 | } | ||
840 | } | ||
841 | VMX(vcpu, last_itc) = 0; | ||
842 | if (VCPU(vcpu, itm) <= val) { | ||
843 | VMX(vcpu, itc_check) = 0; | ||
844 | vcpu_unpend_interrupt(vcpu, vitv); | ||
845 | } else { | ||
846 | VMX(vcpu, itc_check) = 1; | ||
847 | vcpu_set_itm(vcpu, VCPU(vcpu, itm)); | ||
848 | } | ||
849 | |||
850 | } | ||
851 | |||
852 | static inline u64 vcpu_get_itm(struct kvm_vcpu *vcpu) | ||
853 | { | ||
854 | return ((u64)VCPU(vcpu, itm)); | ||
855 | } | ||
856 | |||
857 | static inline void vcpu_set_itm(struct kvm_vcpu *vcpu, u64 val) | ||
858 | { | ||
859 | unsigned long vitv = VCPU(vcpu, itv); | ||
860 | VCPU(vcpu, itm) = val; | ||
861 | |||
862 | if (val > vcpu_get_itc(vcpu)) { | ||
863 | VMX(vcpu, itc_check) = 1; | ||
864 | vcpu_unpend_interrupt(vcpu, vitv); | ||
865 | VMX(vcpu, timer_pending) = 0; | ||
866 | } else | ||
867 | VMX(vcpu, itc_check) = 0; | ||
868 | } | ||
869 | |||
870 | #define ITV_VECTOR(itv) (itv&0xff) | ||
871 | #define ITV_IRQ_MASK(itv) (itv&(1<<16)) | ||
872 | |||
873 | static inline void vcpu_set_itv(struct kvm_vcpu *vcpu, u64 val) | ||
874 | { | ||
875 | VCPU(vcpu, itv) = val; | ||
876 | if (!ITV_IRQ_MASK(val) && vcpu->arch.timer_pending) { | ||
877 | vcpu_pend_interrupt(vcpu, ITV_VECTOR(val)); | ||
878 | vcpu->arch.timer_pending = 0; | ||
879 | } | ||
880 | } | ||
881 | |||
882 | static inline void vcpu_set_eoi(struct kvm_vcpu *vcpu, u64 val) | ||
883 | { | ||
884 | int vec; | ||
885 | |||
886 | vec = highest_inservice_irq(vcpu); | ||
887 | if (vec == NULL_VECTOR) | ||
888 | return; | ||
889 | VMX(vcpu, insvc[vec >> 6]) &= ~(1UL << (vec & 63)); | ||
890 | VCPU(vcpu, eoi) = 0; | ||
891 | vcpu->arch.irq_new_pending = 1; | ||
892 | |||
893 | } | ||
894 | |||
895 | /* See Table 5-8 in SDM vol2 for the definition */ | ||
896 | int irq_masked(struct kvm_vcpu *vcpu, int h_pending, int h_inservice) | ||
897 | { | ||
898 | union ia64_tpr vtpr; | ||
899 | |||
900 | vtpr.val = VCPU(vcpu, tpr); | ||
901 | |||
902 | if (h_inservice == NMI_VECTOR) | ||
903 | return IRQ_MASKED_BY_INSVC; | ||
904 | |||
905 | if (h_pending == NMI_VECTOR) { | ||
906 | /* Non Maskable Interrupt */ | ||
907 | return IRQ_NO_MASKED; | ||
908 | } | ||
909 | |||
910 | if (h_inservice == ExtINT_VECTOR) | ||
911 | return IRQ_MASKED_BY_INSVC; | ||
912 | |||
913 | if (h_pending == ExtINT_VECTOR) { | ||
914 | if (vtpr.mmi) { | ||
915 | /* mask all external IRQ */ | ||
916 | return IRQ_MASKED_BY_VTPR; | ||
917 | } else | ||
918 | return IRQ_NO_MASKED; | ||
919 | } | ||
920 | |||
921 | if (is_higher_irq(h_pending, h_inservice)) { | ||
922 | if (is_higher_class(h_pending, vtpr.mic + (vtpr.mmi << 4))) | ||
923 | return IRQ_NO_MASKED; | ||
924 | else | ||
925 | return IRQ_MASKED_BY_VTPR; | ||
926 | } else { | ||
927 | return IRQ_MASKED_BY_INSVC; | ||
928 | } | ||
929 | } | ||
930 | |||
931 | void vcpu_pend_interrupt(struct kvm_vcpu *vcpu, u8 vec) | ||
932 | { | ||
933 | long spsr; | ||
934 | int ret; | ||
935 | |||
936 | local_irq_save(spsr); | ||
937 | ret = test_and_set_bit(vec, &VCPU(vcpu, irr[0])); | ||
938 | local_irq_restore(spsr); | ||
939 | |||
940 | vcpu->arch.irq_new_pending = 1; | ||
941 | } | ||
942 | |||
943 | void vcpu_unpend_interrupt(struct kvm_vcpu *vcpu, u8 vec) | ||
944 | { | ||
945 | long spsr; | ||
946 | int ret; | ||
947 | |||
948 | local_irq_save(spsr); | ||
949 | ret = test_and_clear_bit(vec, &VCPU(vcpu, irr[0])); | ||
950 | local_irq_restore(spsr); | ||
951 | if (ret) { | ||
952 | vcpu->arch.irq_new_pending = 1; | ||
953 | wmb(); | ||
954 | } | ||
955 | } | ||
956 | |||
957 | void update_vhpi(struct kvm_vcpu *vcpu, int vec) | ||
958 | { | ||
959 | u64 vhpi; | ||
960 | |||
961 | if (vec == NULL_VECTOR) | ||
962 | vhpi = 0; | ||
963 | else if (vec == NMI_VECTOR) | ||
964 | vhpi = 32; | ||
965 | else if (vec == ExtINT_VECTOR) | ||
966 | vhpi = 16; | ||
967 | else | ||
968 | vhpi = vec >> 4; | ||
969 | |||
970 | VCPU(vcpu, vhpi) = vhpi; | ||
971 | if (VCPU(vcpu, vac).a_int) | ||
972 | ia64_call_vsa(PAL_VPS_SET_PENDING_INTERRUPT, | ||
973 | (u64)vcpu->arch.vpd, 0, 0, 0, 0, 0, 0); | ||
974 | } | ||
975 | |||
976 | u64 vcpu_get_ivr(struct kvm_vcpu *vcpu) | ||
977 | { | ||
978 | int vec, h_inservice, mask; | ||
979 | |||
980 | vec = highest_pending_irq(vcpu); | ||
981 | h_inservice = highest_inservice_irq(vcpu); | ||
982 | mask = irq_masked(vcpu, vec, h_inservice); | ||
983 | if (vec == NULL_VECTOR || mask == IRQ_MASKED_BY_INSVC) { | ||
984 | if (VCPU(vcpu, vhpi)) | ||
985 | update_vhpi(vcpu, NULL_VECTOR); | ||
986 | return IA64_SPURIOUS_INT_VECTOR; | ||
987 | } | ||
988 | if (mask == IRQ_MASKED_BY_VTPR) { | ||
989 | update_vhpi(vcpu, vec); | ||
990 | return IA64_SPURIOUS_INT_VECTOR; | ||
991 | } | ||
992 | VMX(vcpu, insvc[vec >> 6]) |= (1UL << (vec & 63)); | ||
993 | vcpu_unpend_interrupt(vcpu, vec); | ||
994 | return (u64)vec; | ||
995 | } | ||
996 | |||
997 | /************************************************************************** | ||
998 | Privileged operation emulation routines | ||
999 | **************************************************************************/ | ||
1000 | u64 vcpu_thash(struct kvm_vcpu *vcpu, u64 vadr) | ||
1001 | { | ||
1002 | union ia64_pta vpta; | ||
1003 | union ia64_rr vrr; | ||
1004 | u64 pval; | ||
1005 | u64 vhpt_offset; | ||
1006 | |||
1007 | vpta.val = vcpu_get_pta(vcpu); | ||
1008 | vrr.val = vcpu_get_rr(vcpu, vadr); | ||
1009 | vhpt_offset = ((vadr >> vrr.ps) << 3) & ((1UL << (vpta.size)) - 1); | ||
1010 | if (vpta.vf) { | ||
1011 | pval = ia64_call_vsa(PAL_VPS_THASH, vadr, vrr.val, | ||
1012 | vpta.val, 0, 0, 0, 0); | ||
1013 | } else { | ||
1014 | pval = (vadr & VRN_MASK) | vhpt_offset | | ||
1015 | (vpta.val << 3 >> (vpta.size + 3) << (vpta.size)); | ||
1016 | } | ||
1017 | return pval; | ||
1018 | } | ||
1019 | |||
1020 | u64 vcpu_ttag(struct kvm_vcpu *vcpu, u64 vadr) | ||
1021 | { | ||
1022 | union ia64_rr vrr; | ||
1023 | union ia64_pta vpta; | ||
1024 | u64 pval; | ||
1025 | |||
1026 | vpta.val = vcpu_get_pta(vcpu); | ||
1027 | vrr.val = vcpu_get_rr(vcpu, vadr); | ||
1028 | if (vpta.vf) { | ||
1029 | pval = ia64_call_vsa(PAL_VPS_TTAG, vadr, vrr.val, | ||
1030 | 0, 0, 0, 0, 0); | ||
1031 | } else | ||
1032 | pval = 1; | ||
1033 | |||
1034 | return pval; | ||
1035 | } | ||
1036 | |||
1037 | u64 vcpu_tak(struct kvm_vcpu *vcpu, u64 vadr) | ||
1038 | { | ||
1039 | struct thash_data *data; | ||
1040 | union ia64_pta vpta; | ||
1041 | u64 key; | ||
1042 | |||
1043 | vpta.val = vcpu_get_pta(vcpu); | ||
1044 | if (vpta.vf == 0) { | ||
1045 | key = 1; | ||
1046 | return key; | ||
1047 | } | ||
1048 | data = vtlb_lookup(vcpu, vadr, D_TLB); | ||
1049 | if (!data || !data->p) | ||
1050 | key = 1; | ||
1051 | else | ||
1052 | key = data->key; | ||
1053 | |||
1054 | return key; | ||
1055 | } | ||
1056 | |||
1057 | void kvm_thash(struct kvm_vcpu *vcpu, INST64 inst) | ||
1058 | { | ||
1059 | unsigned long thash, vadr; | ||
1060 | |||
1061 | vadr = vcpu_get_gr(vcpu, inst.M46.r3); | ||
1062 | thash = vcpu_thash(vcpu, vadr); | ||
1063 | vcpu_set_gr(vcpu, inst.M46.r1, thash, 0); | ||
1064 | } | ||
1065 | |||
1066 | void kvm_ttag(struct kvm_vcpu *vcpu, INST64 inst) | ||
1067 | { | ||
1068 | unsigned long tag, vadr; | ||
1069 | |||
1070 | vadr = vcpu_get_gr(vcpu, inst.M46.r3); | ||
1071 | tag = vcpu_ttag(vcpu, vadr); | ||
1072 | vcpu_set_gr(vcpu, inst.M46.r1, tag, 0); | ||
1073 | } | ||
1074 | |||
1075 | int vcpu_tpa(struct kvm_vcpu *vcpu, u64 vadr, unsigned long *padr) | ||
1076 | { | ||
1077 | struct thash_data *data; | ||
1078 | union ia64_isr visr, pt_isr; | ||
1079 | struct kvm_pt_regs *regs; | ||
1080 | struct ia64_psr vpsr; | ||
1081 | |||
1082 | regs = vcpu_regs(vcpu); | ||
1083 | pt_isr.val = VMX(vcpu, cr_isr); | ||
1084 | visr.val = 0; | ||
1085 | visr.ei = pt_isr.ei; | ||
1086 | visr.ir = pt_isr.ir; | ||
1087 | vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr); | ||
1088 | visr.na = 1; | ||
1089 | |||
1090 | data = vhpt_lookup(vadr); | ||
1091 | if (data) { | ||
1092 | if (data->p == 0) { | ||
1093 | vcpu_set_isr(vcpu, visr.val); | ||
1094 | data_page_not_present(vcpu, vadr); | ||
1095 | return IA64_FAULT; | ||
1096 | } else if (data->ma == VA_MATTR_NATPAGE) { | ||
1097 | vcpu_set_isr(vcpu, visr.val); | ||
1098 | dnat_page_consumption(vcpu, vadr); | ||
1099 | return IA64_FAULT; | ||
1100 | } else { | ||
1101 | *padr = (data->gpaddr >> data->ps << data->ps) | | ||
1102 | (vadr & (PSIZE(data->ps) - 1)); | ||
1103 | return IA64_NO_FAULT; | ||
1104 | } | ||
1105 | } | ||
1106 | |||
1107 | data = vtlb_lookup(vcpu, vadr, D_TLB); | ||
1108 | if (data) { | ||
1109 | if (data->p == 0) { | ||
1110 | vcpu_set_isr(vcpu, visr.val); | ||
1111 | data_page_not_present(vcpu, vadr); | ||
1112 | return IA64_FAULT; | ||
1113 | } else if (data->ma == VA_MATTR_NATPAGE) { | ||
1114 | vcpu_set_isr(vcpu, visr.val); | ||
1115 | dnat_page_consumption(vcpu, vadr); | ||
1116 | return IA64_FAULT; | ||
1117 | } else{ | ||
1118 | *padr = ((data->ppn >> (data->ps - 12)) << data->ps) | ||
1119 | | (vadr & (PSIZE(data->ps) - 1)); | ||
1120 | return IA64_NO_FAULT; | ||
1121 | } | ||
1122 | } | ||
1123 | if (!vhpt_enabled(vcpu, vadr, NA_REF)) { | ||
1124 | if (vpsr.ic) { | ||
1125 | vcpu_set_isr(vcpu, visr.val); | ||
1126 | alt_dtlb(vcpu, vadr); | ||
1127 | return IA64_FAULT; | ||
1128 | } else { | ||
1129 | nested_dtlb(vcpu); | ||
1130 | return IA64_FAULT; | ||
1131 | } | ||
1132 | } else { | ||
1133 | if (vpsr.ic) { | ||
1134 | vcpu_set_isr(vcpu, visr.val); | ||
1135 | dvhpt_fault(vcpu, vadr); | ||
1136 | return IA64_FAULT; | ||
1137 | } else{ | ||
1138 | nested_dtlb(vcpu); | ||
1139 | return IA64_FAULT; | ||
1140 | } | ||
1141 | } | ||
1142 | |||
1143 | return IA64_NO_FAULT; | ||
1144 | } | ||
1145 | |||
1146 | int kvm_tpa(struct kvm_vcpu *vcpu, INST64 inst) | ||
1147 | { | ||
1148 | unsigned long r1, r3; | ||
1149 | |||
1150 | r3 = vcpu_get_gr(vcpu, inst.M46.r3); | ||
1151 | |||
1152 | if (vcpu_tpa(vcpu, r3, &r1)) | ||
1153 | return IA64_FAULT; | ||
1154 | |||
1155 | vcpu_set_gr(vcpu, inst.M46.r1, r1, 0); | ||
1156 | return(IA64_NO_FAULT); | ||
1157 | } | ||
1158 | |||
1159 | void kvm_tak(struct kvm_vcpu *vcpu, INST64 inst) | ||
1160 | { | ||
1161 | unsigned long r1, r3; | ||
1162 | |||
1163 | r3 = vcpu_get_gr(vcpu, inst.M46.r3); | ||
1164 | r1 = vcpu_tak(vcpu, r3); | ||
1165 | vcpu_set_gr(vcpu, inst.M46.r1, r1, 0); | ||
1166 | } | ||
1167 | |||
1168 | /************************************ | ||
1169 | * Insert/Purge translation register/cache | ||
1170 | ************************************/ | ||
1171 | void vcpu_itc_i(struct kvm_vcpu *vcpu, u64 pte, u64 itir, u64 ifa) | ||
1172 | { | ||
1173 | thash_purge_and_insert(vcpu, pte, itir, ifa, I_TLB); | ||
1174 | } | ||
1175 | |||
1176 | void vcpu_itc_d(struct kvm_vcpu *vcpu, u64 pte, u64 itir, u64 ifa) | ||
1177 | { | ||
1178 | thash_purge_and_insert(vcpu, pte, itir, ifa, D_TLB); | ||
1179 | } | ||
1180 | |||
1181 | void vcpu_itr_i(struct kvm_vcpu *vcpu, u64 slot, u64 pte, u64 itir, u64 ifa) | ||
1182 | { | ||
1183 | u64 ps, va, rid; | ||
1184 | struct thash_data *p_itr; | ||
1185 | |||
1186 | ps = itir_ps(itir); | ||
1187 | va = PAGEALIGN(ifa, ps); | ||
1188 | pte &= ~PAGE_FLAGS_RV_MASK; | ||
1189 | rid = vcpu_get_rr(vcpu, ifa); | ||
1190 | rid = rid & RR_RID_MASK; | ||
1191 | p_itr = (struct thash_data *)&vcpu->arch.itrs[slot]; | ||
1192 | vcpu_set_tr(p_itr, pte, itir, va, rid); | ||
1193 | vcpu_quick_region_set(VMX(vcpu, itr_regions), va); | ||
1194 | } | ||
1195 | |||
1196 | |||
1197 | void vcpu_itr_d(struct kvm_vcpu *vcpu, u64 slot, u64 pte, u64 itir, u64 ifa) | ||
1198 | { | ||
1199 | u64 gpfn; | ||
1200 | u64 ps, va, rid; | ||
1201 | struct thash_data *p_dtr; | ||
1202 | |||
1203 | ps = itir_ps(itir); | ||
1204 | va = PAGEALIGN(ifa, ps); | ||
1205 | pte &= ~PAGE_FLAGS_RV_MASK; | ||
1206 | |||
1207 | if (ps != _PAGE_SIZE_16M) | ||
1208 | thash_purge_entries(vcpu, va, ps); | ||
1209 | gpfn = (pte & _PAGE_PPN_MASK) >> PAGE_SHIFT; | ||
1210 | if (__gpfn_is_io(gpfn)) | ||
1211 | pte |= VTLB_PTE_IO; | ||
1212 | rid = vcpu_get_rr(vcpu, va); | ||
1213 | rid = rid & RR_RID_MASK; | ||
1214 | p_dtr = (struct thash_data *)&vcpu->arch.dtrs[slot]; | ||
1215 | vcpu_set_tr((struct thash_data *)&vcpu->arch.dtrs[slot], | ||
1216 | pte, itir, va, rid); | ||
1217 | vcpu_quick_region_set(VMX(vcpu, dtr_regions), va); | ||
1218 | } | ||
1219 | |||
1220 | void vcpu_ptr_d(struct kvm_vcpu *vcpu, u64 ifa, u64 ps) | ||
1221 | { | ||
1222 | int index; | ||
1223 | u64 va; | ||
1224 | |||
1225 | va = PAGEALIGN(ifa, ps); | ||
1226 | while ((index = vtr_find_overlap(vcpu, va, ps, D_TLB)) >= 0) | ||
1227 | vcpu->arch.dtrs[index].page_flags = 0; | ||
1228 | |||
1229 | thash_purge_entries(vcpu, va, ps); | ||
1230 | } | ||
1231 | |||
1232 | void vcpu_ptr_i(struct kvm_vcpu *vcpu, u64 ifa, u64 ps) | ||
1233 | { | ||
1234 | int index; | ||
1235 | u64 va; | ||
1236 | |||
1237 | va = PAGEALIGN(ifa, ps); | ||
1238 | while ((index = vtr_find_overlap(vcpu, va, ps, I_TLB)) >= 0) | ||
1239 | vcpu->arch.itrs[index].page_flags = 0; | ||
1240 | |||
1241 | thash_purge_entries(vcpu, va, ps); | ||
1242 | } | ||
1243 | |||
1244 | void vcpu_ptc_l(struct kvm_vcpu *vcpu, u64 va, u64 ps) | ||
1245 | { | ||
1246 | va = PAGEALIGN(va, ps); | ||
1247 | thash_purge_entries(vcpu, va, ps); | ||
1248 | } | ||
1249 | |||
1250 | void vcpu_ptc_e(struct kvm_vcpu *vcpu, u64 va) | ||
1251 | { | ||
1252 | thash_purge_all(vcpu); | ||
1253 | } | ||
1254 | |||
1255 | void vcpu_ptc_ga(struct kvm_vcpu *vcpu, u64 va, u64 ps) | ||
1256 | { | ||
1257 | struct exit_ctl_data *p = &vcpu->arch.exit_data; | ||
1258 | long psr; | ||
1259 | local_irq_save(psr); | ||
1260 | p->exit_reason = EXIT_REASON_PTC_G; | ||
1261 | |||
1262 | p->u.ptc_g_data.rr = vcpu_get_rr(vcpu, va); | ||
1263 | p->u.ptc_g_data.vaddr = va; | ||
1264 | p->u.ptc_g_data.ps = ps; | ||
1265 | vmm_transition(vcpu); | ||
1266 | /* Do Local Purge Here*/ | ||
1267 | vcpu_ptc_l(vcpu, va, ps); | ||
1268 | local_irq_restore(psr); | ||
1269 | } | ||
1270 | |||
1271 | |||
1272 | void vcpu_ptc_g(struct kvm_vcpu *vcpu, u64 va, u64 ps) | ||
1273 | { | ||
1274 | vcpu_ptc_ga(vcpu, va, ps); | ||
1275 | } | ||
1276 | |||
1277 | void kvm_ptc_e(struct kvm_vcpu *vcpu, INST64 inst) | ||
1278 | { | ||
1279 | unsigned long ifa; | ||
1280 | |||
1281 | ifa = vcpu_get_gr(vcpu, inst.M45.r3); | ||
1282 | vcpu_ptc_e(vcpu, ifa); | ||
1283 | } | ||
1284 | |||
1285 | void kvm_ptc_g(struct kvm_vcpu *vcpu, INST64 inst) | ||
1286 | { | ||
1287 | unsigned long ifa, itir; | ||
1288 | |||
1289 | ifa = vcpu_get_gr(vcpu, inst.M45.r3); | ||
1290 | itir = vcpu_get_gr(vcpu, inst.M45.r2); | ||
1291 | vcpu_ptc_g(vcpu, ifa, itir_ps(itir)); | ||
1292 | } | ||
1293 | |||
1294 | void kvm_ptc_ga(struct kvm_vcpu *vcpu, INST64 inst) | ||
1295 | { | ||
1296 | unsigned long ifa, itir; | ||
1297 | |||
1298 | ifa = vcpu_get_gr(vcpu, inst.M45.r3); | ||
1299 | itir = vcpu_get_gr(vcpu, inst.M45.r2); | ||
1300 | vcpu_ptc_ga(vcpu, ifa, itir_ps(itir)); | ||
1301 | } | ||
1302 | |||
1303 | void kvm_ptc_l(struct kvm_vcpu *vcpu, INST64 inst) | ||
1304 | { | ||
1305 | unsigned long ifa, itir; | ||
1306 | |||
1307 | ifa = vcpu_get_gr(vcpu, inst.M45.r3); | ||
1308 | itir = vcpu_get_gr(vcpu, inst.M45.r2); | ||
1309 | vcpu_ptc_l(vcpu, ifa, itir_ps(itir)); | ||
1310 | } | ||
1311 | |||
1312 | void kvm_ptr_d(struct kvm_vcpu *vcpu, INST64 inst) | ||
1313 | { | ||
1314 | unsigned long ifa, itir; | ||
1315 | |||
1316 | ifa = vcpu_get_gr(vcpu, inst.M45.r3); | ||
1317 | itir = vcpu_get_gr(vcpu, inst.M45.r2); | ||
1318 | vcpu_ptr_d(vcpu, ifa, itir_ps(itir)); | ||
1319 | } | ||
1320 | |||
1321 | void kvm_ptr_i(struct kvm_vcpu *vcpu, INST64 inst) | ||
1322 | { | ||
1323 | unsigned long ifa, itir; | ||
1324 | |||
1325 | ifa = vcpu_get_gr(vcpu, inst.M45.r3); | ||
1326 | itir = vcpu_get_gr(vcpu, inst.M45.r2); | ||
1327 | vcpu_ptr_i(vcpu, ifa, itir_ps(itir)); | ||
1328 | } | ||
1329 | |||
1330 | void kvm_itr_d(struct kvm_vcpu *vcpu, INST64 inst) | ||
1331 | { | ||
1332 | unsigned long itir, ifa, pte, slot; | ||
1333 | |||
1334 | slot = vcpu_get_gr(vcpu, inst.M45.r3); | ||
1335 | pte = vcpu_get_gr(vcpu, inst.M45.r2); | ||
1336 | itir = vcpu_get_itir(vcpu); | ||
1337 | ifa = vcpu_get_ifa(vcpu); | ||
1338 | vcpu_itr_d(vcpu, slot, pte, itir, ifa); | ||
1339 | } | ||
1340 | |||
1341 | |||
1342 | |||
1343 | void kvm_itr_i(struct kvm_vcpu *vcpu, INST64 inst) | ||
1344 | { | ||
1345 | unsigned long itir, ifa, pte, slot; | ||
1346 | |||
1347 | slot = vcpu_get_gr(vcpu, inst.M45.r3); | ||
1348 | pte = vcpu_get_gr(vcpu, inst.M45.r2); | ||
1349 | itir = vcpu_get_itir(vcpu); | ||
1350 | ifa = vcpu_get_ifa(vcpu); | ||
1351 | vcpu_itr_i(vcpu, slot, pte, itir, ifa); | ||
1352 | } | ||
1353 | |||
1354 | void kvm_itc_d(struct kvm_vcpu *vcpu, INST64 inst) | ||
1355 | { | ||
1356 | unsigned long itir, ifa, pte; | ||
1357 | |||
1358 | itir = vcpu_get_itir(vcpu); | ||
1359 | ifa = vcpu_get_ifa(vcpu); | ||
1360 | pte = vcpu_get_gr(vcpu, inst.M45.r2); | ||
1361 | vcpu_itc_d(vcpu, pte, itir, ifa); | ||
1362 | } | ||
1363 | |||
1364 | void kvm_itc_i(struct kvm_vcpu *vcpu, INST64 inst) | ||
1365 | { | ||
1366 | unsigned long itir, ifa, pte; | ||
1367 | |||
1368 | itir = vcpu_get_itir(vcpu); | ||
1369 | ifa = vcpu_get_ifa(vcpu); | ||
1370 | pte = vcpu_get_gr(vcpu, inst.M45.r2); | ||
1371 | vcpu_itc_i(vcpu, pte, itir, ifa); | ||
1372 | } | ||
1373 | |||
1374 | /************************************* | ||
1375 | * Moves to semi-privileged registers | ||
1376 | *************************************/ | ||
1377 | |||
1378 | void kvm_mov_to_ar_imm(struct kvm_vcpu *vcpu, INST64 inst) | ||
1379 | { | ||
1380 | unsigned long imm; | ||
1381 | |||
1382 | if (inst.M30.s) | ||
1383 | imm = -inst.M30.imm; | ||
1384 | else | ||
1385 | imm = inst.M30.imm; | ||
1386 | |||
1387 | vcpu_set_itc(vcpu, imm); | ||
1388 | } | ||
1389 | |||
1390 | void kvm_mov_to_ar_reg(struct kvm_vcpu *vcpu, INST64 inst) | ||
1391 | { | ||
1392 | unsigned long r2; | ||
1393 | |||
1394 | r2 = vcpu_get_gr(vcpu, inst.M29.r2); | ||
1395 | vcpu_set_itc(vcpu, r2); | ||
1396 | } | ||
1397 | |||
1398 | void kvm_mov_from_ar_reg(struct kvm_vcpu *vcpu, INST64 inst) | ||
1399 | { | ||
1400 | unsigned long r1; | ||
1401 | |||
1402 | r1 = vcpu_get_itc(vcpu); | ||
1403 | vcpu_set_gr(vcpu, inst.M31.r1, r1, 0); | ||
1404 | } | ||
1405 | |||
1406 | /************************************************************************** | ||
1407 | struct kvm_vcpu protection key register access routines | ||
1408 | **************************************************************************/ | ||
1409 | |||
1410 | unsigned long vcpu_get_pkr(struct kvm_vcpu *vcpu, unsigned long reg) | ||
1411 | { | ||
1412 | return ((unsigned long)ia64_get_pkr(reg)); | ||
1413 | } | ||
1414 | |||
1415 | void vcpu_set_pkr(struct kvm_vcpu *vcpu, unsigned long reg, unsigned long val) | ||
1416 | { | ||
1417 | ia64_set_pkr(reg, val); | ||
1418 | } | ||
1419 | |||
1420 | /******************************** | ||
1421 | * Moves to privileged registers | ||
1422 | ********************************/ | ||
1423 | unsigned long vcpu_set_rr(struct kvm_vcpu *vcpu, unsigned long reg, | ||
1424 | unsigned long val) | ||
1425 | { | ||
1426 | union ia64_rr oldrr, newrr; | ||
1427 | unsigned long rrval; | ||
1428 | struct exit_ctl_data *p = &vcpu->arch.exit_data; | ||
1429 | unsigned long psr; | ||
1430 | |||
1431 | oldrr.val = vcpu_get_rr(vcpu, reg); | ||
1432 | newrr.val = val; | ||
1433 | vcpu->arch.vrr[reg >> VRN_SHIFT] = val; | ||
1434 | |||
1435 | switch ((unsigned long)(reg >> VRN_SHIFT)) { | ||
1436 | case VRN6: | ||
1437 | vcpu->arch.vmm_rr = vrrtomrr(val); | ||
1438 | local_irq_save(psr); | ||
1439 | p->exit_reason = EXIT_REASON_SWITCH_RR6; | ||
1440 | vmm_transition(vcpu); | ||
1441 | local_irq_restore(psr); | ||
1442 | break; | ||
1443 | case VRN4: | ||
1444 | rrval = vrrtomrr(val); | ||
1445 | vcpu->arch.metaphysical_saved_rr4 = rrval; | ||
1446 | if (!is_physical_mode(vcpu)) | ||
1447 | ia64_set_rr(reg, rrval); | ||
1448 | break; | ||
1449 | case VRN0: | ||
1450 | rrval = vrrtomrr(val); | ||
1451 | vcpu->arch.metaphysical_saved_rr0 = rrval; | ||
1452 | if (!is_physical_mode(vcpu)) | ||
1453 | ia64_set_rr(reg, rrval); | ||
1454 | break; | ||
1455 | default: | ||
1456 | ia64_set_rr(reg, vrrtomrr(val)); | ||
1457 | break; | ||
1458 | } | ||
1459 | |||
1460 | return (IA64_NO_FAULT); | ||
1461 | } | ||
1462 | |||
1463 | void kvm_mov_to_rr(struct kvm_vcpu *vcpu, INST64 inst) | ||
1464 | { | ||
1465 | unsigned long r3, r2; | ||
1466 | |||
1467 | r3 = vcpu_get_gr(vcpu, inst.M42.r3); | ||
1468 | r2 = vcpu_get_gr(vcpu, inst.M42.r2); | ||
1469 | vcpu_set_rr(vcpu, r3, r2); | ||
1470 | } | ||
1471 | |||
1472 | void kvm_mov_to_dbr(struct kvm_vcpu *vcpu, INST64 inst) | ||
1473 | { | ||
1474 | } | ||
1475 | |||
1476 | void kvm_mov_to_ibr(struct kvm_vcpu *vcpu, INST64 inst) | ||
1477 | { | ||
1478 | } | ||
1479 | |||
1480 | void kvm_mov_to_pmc(struct kvm_vcpu *vcpu, INST64 inst) | ||
1481 | { | ||
1482 | unsigned long r3, r2; | ||
1483 | |||
1484 | r3 = vcpu_get_gr(vcpu, inst.M42.r3); | ||
1485 | r2 = vcpu_get_gr(vcpu, inst.M42.r2); | ||
1486 | vcpu_set_pmc(vcpu, r3, r2); | ||
1487 | } | ||
1488 | |||
1489 | void kvm_mov_to_pmd(struct kvm_vcpu *vcpu, INST64 inst) | ||
1490 | { | ||
1491 | unsigned long r3, r2; | ||
1492 | |||
1493 | r3 = vcpu_get_gr(vcpu, inst.M42.r3); | ||
1494 | r2 = vcpu_get_gr(vcpu, inst.M42.r2); | ||
1495 | vcpu_set_pmd(vcpu, r3, r2); | ||
1496 | } | ||
1497 | |||
1498 | void kvm_mov_to_pkr(struct kvm_vcpu *vcpu, INST64 inst) | ||
1499 | { | ||
1500 | u64 r3, r2; | ||
1501 | |||
1502 | r3 = vcpu_get_gr(vcpu, inst.M42.r3); | ||
1503 | r2 = vcpu_get_gr(vcpu, inst.M42.r2); | ||
1504 | vcpu_set_pkr(vcpu, r3, r2); | ||
1505 | } | ||
1506 | |||
1507 | void kvm_mov_from_rr(struct kvm_vcpu *vcpu, INST64 inst) | ||
1508 | { | ||
1509 | unsigned long r3, r1; | ||
1510 | |||
1511 | r3 = vcpu_get_gr(vcpu, inst.M43.r3); | ||
1512 | r1 = vcpu_get_rr(vcpu, r3); | ||
1513 | vcpu_set_gr(vcpu, inst.M43.r1, r1, 0); | ||
1514 | } | ||
1515 | |||
1516 | void kvm_mov_from_pkr(struct kvm_vcpu *vcpu, INST64 inst) | ||
1517 | { | ||
1518 | unsigned long r3, r1; | ||
1519 | |||
1520 | r3 = vcpu_get_gr(vcpu, inst.M43.r3); | ||
1521 | r1 = vcpu_get_pkr(vcpu, r3); | ||
1522 | vcpu_set_gr(vcpu, inst.M43.r1, r1, 0); | ||
1523 | } | ||
1524 | |||
1525 | void kvm_mov_from_dbr(struct kvm_vcpu *vcpu, INST64 inst) | ||
1526 | { | ||
1527 | unsigned long r3, r1; | ||
1528 | |||
1529 | r3 = vcpu_get_gr(vcpu, inst.M43.r3); | ||
1530 | r1 = vcpu_get_dbr(vcpu, r3); | ||
1531 | vcpu_set_gr(vcpu, inst.M43.r1, r1, 0); | ||
1532 | } | ||
1533 | |||
1534 | void kvm_mov_from_ibr(struct kvm_vcpu *vcpu, INST64 inst) | ||
1535 | { | ||
1536 | unsigned long r3, r1; | ||
1537 | |||
1538 | r3 = vcpu_get_gr(vcpu, inst.M43.r3); | ||
1539 | r1 = vcpu_get_ibr(vcpu, r3); | ||
1540 | vcpu_set_gr(vcpu, inst.M43.r1, r1, 0); | ||
1541 | } | ||
1542 | |||
1543 | void kvm_mov_from_pmc(struct kvm_vcpu *vcpu, INST64 inst) | ||
1544 | { | ||
1545 | unsigned long r3, r1; | ||
1546 | |||
1547 | r3 = vcpu_get_gr(vcpu, inst.M43.r3); | ||
1548 | r1 = vcpu_get_pmc(vcpu, r3); | ||
1549 | vcpu_set_gr(vcpu, inst.M43.r1, r1, 0); | ||
1550 | } | ||
1551 | |||
1552 | unsigned long vcpu_get_cpuid(struct kvm_vcpu *vcpu, unsigned long reg) | ||
1553 | { | ||
1554 | /* FIXME: This could get called as a result of a rsvd-reg fault */ | ||
1555 | if (reg > (ia64_get_cpuid(3) & 0xff)) | ||
1556 | return 0; | ||
1557 | else | ||
1558 | return ia64_get_cpuid(reg); | ||
1559 | } | ||
1560 | |||
1561 | void kvm_mov_from_cpuid(struct kvm_vcpu *vcpu, INST64 inst) | ||
1562 | { | ||
1563 | unsigned long r3, r1; | ||
1564 | |||
1565 | r3 = vcpu_get_gr(vcpu, inst.M43.r3); | ||
1566 | r1 = vcpu_get_cpuid(vcpu, r3); | ||
1567 | vcpu_set_gr(vcpu, inst.M43.r1, r1, 0); | ||
1568 | } | ||
1569 | |||
1570 | void vcpu_set_tpr(struct kvm_vcpu *vcpu, unsigned long val) | ||
1571 | { | ||
1572 | VCPU(vcpu, tpr) = val; | ||
1573 | vcpu->arch.irq_check = 1; | ||
1574 | } | ||
1575 | |||
1576 | unsigned long kvm_mov_to_cr(struct kvm_vcpu *vcpu, INST64 inst) | ||
1577 | { | ||
1578 | unsigned long r2; | ||
1579 | |||
1580 | r2 = vcpu_get_gr(vcpu, inst.M32.r2); | ||
1581 | VCPU(vcpu, vcr[inst.M32.cr3]) = r2; | ||
1582 | |||
1583 | switch (inst.M32.cr3) { | ||
1584 | case 0: | ||
1585 | vcpu_set_dcr(vcpu, r2); | ||
1586 | break; | ||
1587 | case 1: | ||
1588 | vcpu_set_itm(vcpu, r2); | ||
1589 | break; | ||
1590 | case 66: | ||
1591 | vcpu_set_tpr(vcpu, r2); | ||
1592 | break; | ||
1593 | case 67: | ||
1594 | vcpu_set_eoi(vcpu, r2); | ||
1595 | break; | ||
1596 | default: | ||
1597 | break; | ||
1598 | } | ||
1599 | |||
1600 | return 0; | ||
1601 | } | ||
1602 | |||
1603 | unsigned long kvm_mov_from_cr(struct kvm_vcpu *vcpu, INST64 inst) | ||
1604 | { | ||
1605 | unsigned long tgt = inst.M33.r1; | ||
1606 | unsigned long val; | ||
1607 | |||
1608 | switch (inst.M33.cr3) { | ||
1609 | case 65: | ||
1610 | val = vcpu_get_ivr(vcpu); | ||
1611 | vcpu_set_gr(vcpu, tgt, val, 0); | ||
1612 | break; | ||
1613 | |||
1614 | case 67: | ||
1615 | vcpu_set_gr(vcpu, tgt, 0L, 0); | ||
1616 | break; | ||
1617 | default: | ||
1618 | val = VCPU(vcpu, vcr[inst.M33.cr3]); | ||
1619 | vcpu_set_gr(vcpu, tgt, val, 0); | ||
1620 | break; | ||
1621 | } | ||
1622 | |||
1623 | return 0; | ||
1624 | } | ||
1625 | |||
1626 | void vcpu_set_psr(struct kvm_vcpu *vcpu, unsigned long val) | ||
1627 | { | ||
1628 | |||
1629 | unsigned long mask; | ||
1630 | struct kvm_pt_regs *regs; | ||
1631 | struct ia64_psr old_psr, new_psr; | ||
1632 | |||
1633 | old_psr = *(struct ia64_psr *)&VCPU(vcpu, vpsr); | ||
1634 | |||
1635 | regs = vcpu_regs(vcpu); | ||
1636 | /* We only support guest as: | ||
1637 | * vpsr.pk = 0 | ||
1638 | * vpsr.is = 0 | ||
1639 | * Otherwise panic | ||
1640 | */ | ||
1641 | if (val & (IA64_PSR_PK | IA64_PSR_IS | IA64_PSR_VM)) | ||
1642 | panic_vm(vcpu, "Only support guests with vpsr.pk =0 " | ||
1643 | "& vpsr.is=0\n"); | ||
1644 | |||
1645 | /* | ||
1646 | * For those IA64_PSR bits: id/da/dd/ss/ed/ia | ||
1647 | * Since these bits will become 0, after success execution of each | ||
1648 | * instruction, we will change set them to mIA64_PSR | ||
1649 | */ | ||
1650 | VCPU(vcpu, vpsr) = val | ||
1651 | & (~(IA64_PSR_ID | IA64_PSR_DA | IA64_PSR_DD | | ||
1652 | IA64_PSR_SS | IA64_PSR_ED | IA64_PSR_IA)); | ||
1653 | |||
1654 | if (!old_psr.i && (val & IA64_PSR_I)) { | ||
1655 | /* vpsr.i 0->1 */ | ||
1656 | vcpu->arch.irq_check = 1; | ||
1657 | } | ||
1658 | new_psr = *(struct ia64_psr *)&VCPU(vcpu, vpsr); | ||
1659 | |||
1660 | /* | ||
1661 | * All vIA64_PSR bits shall go to mPSR (v->tf->tf_special.psr) | ||
1662 | * , except for the following bits: | ||
1663 | * ic/i/dt/si/rt/mc/it/bn/vm | ||
1664 | */ | ||
1665 | mask = IA64_PSR_IC + IA64_PSR_I + IA64_PSR_DT + IA64_PSR_SI + | ||
1666 | IA64_PSR_RT + IA64_PSR_MC + IA64_PSR_IT + IA64_PSR_BN + | ||
1667 | IA64_PSR_VM; | ||
1668 | |||
1669 | regs->cr_ipsr = (regs->cr_ipsr & mask) | (val & (~mask)); | ||
1670 | |||
1671 | check_mm_mode_switch(vcpu, old_psr, new_psr); | ||
1672 | |||
1673 | return ; | ||
1674 | } | ||
1675 | |||
1676 | unsigned long vcpu_cover(struct kvm_vcpu *vcpu) | ||
1677 | { | ||
1678 | struct ia64_psr vpsr; | ||
1679 | |||
1680 | struct kvm_pt_regs *regs = vcpu_regs(vcpu); | ||
1681 | vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr); | ||
1682 | |||
1683 | if (!vpsr.ic) | ||
1684 | VCPU(vcpu, ifs) = regs->cr_ifs; | ||
1685 | regs->cr_ifs = IA64_IFS_V; | ||
1686 | return (IA64_NO_FAULT); | ||
1687 | } | ||
1688 | |||
1689 | |||
1690 | |||
1691 | /************************************************************************** | ||
1692 | VCPU banked general register access routines | ||
1693 | **************************************************************************/ | ||
1694 | #define vcpu_bsw0_unat(i, b0unat, b1unat, runat, VMM_PT_REGS_R16_SLOT) \ | ||
1695 | do { \ | ||
1696 | __asm__ __volatile__ ( \ | ||
1697 | ";;extr.u %0 = %3,%6,16;;\n" \ | ||
1698 | "dep %1 = %0, %1, 0, 16;;\n" \ | ||
1699 | "st8 [%4] = %1\n" \ | ||
1700 | "extr.u %0 = %2, 16, 16;;\n" \ | ||
1701 | "dep %3 = %0, %3, %6, 16;;\n" \ | ||
1702 | "st8 [%5] = %3\n" \ | ||
1703 | ::"r"(i), "r"(*b1unat), "r"(*b0unat), \ | ||
1704 | "r"(*runat), "r"(b1unat), "r"(runat), \ | ||
1705 | "i"(VMM_PT_REGS_R16_SLOT) : "memory"); \ | ||
1706 | } while (0) | ||
1707 | |||
1708 | void vcpu_bsw0(struct kvm_vcpu *vcpu) | ||
1709 | { | ||
1710 | unsigned long i; | ||
1711 | |||
1712 | struct kvm_pt_regs *regs = vcpu_regs(vcpu); | ||
1713 | unsigned long *r = ®s->r16; | ||
1714 | unsigned long *b0 = &VCPU(vcpu, vbgr[0]); | ||
1715 | unsigned long *b1 = &VCPU(vcpu, vgr[0]); | ||
1716 | unsigned long *runat = ®s->eml_unat; | ||
1717 | unsigned long *b0unat = &VCPU(vcpu, vbnat); | ||
1718 | unsigned long *b1unat = &VCPU(vcpu, vnat); | ||
1719 | |||
1720 | |||
1721 | if (VCPU(vcpu, vpsr) & IA64_PSR_BN) { | ||
1722 | for (i = 0; i < 16; i++) { | ||
1723 | *b1++ = *r; | ||
1724 | *r++ = *b0++; | ||
1725 | } | ||
1726 | vcpu_bsw0_unat(i, b0unat, b1unat, runat, | ||
1727 | VMM_PT_REGS_R16_SLOT); | ||
1728 | VCPU(vcpu, vpsr) &= ~IA64_PSR_BN; | ||
1729 | } | ||
1730 | } | ||
1731 | |||
1732 | #define vcpu_bsw1_unat(i, b0unat, b1unat, runat, VMM_PT_REGS_R16_SLOT) \ | ||
1733 | do { \ | ||
1734 | __asm__ __volatile__ (";;extr.u %0 = %3, %6, 16;;\n" \ | ||
1735 | "dep %1 = %0, %1, 16, 16;;\n" \ | ||
1736 | "st8 [%4] = %1\n" \ | ||
1737 | "extr.u %0 = %2, 0, 16;;\n" \ | ||
1738 | "dep %3 = %0, %3, %6, 16;;\n" \ | ||
1739 | "st8 [%5] = %3\n" \ | ||
1740 | ::"r"(i), "r"(*b0unat), "r"(*b1unat), \ | ||
1741 | "r"(*runat), "r"(b0unat), "r"(runat), \ | ||
1742 | "i"(VMM_PT_REGS_R16_SLOT) : "memory"); \ | ||
1743 | } while (0) | ||
1744 | |||
1745 | void vcpu_bsw1(struct kvm_vcpu *vcpu) | ||
1746 | { | ||
1747 | unsigned long i; | ||
1748 | struct kvm_pt_regs *regs = vcpu_regs(vcpu); | ||
1749 | unsigned long *r = ®s->r16; | ||
1750 | unsigned long *b0 = &VCPU(vcpu, vbgr[0]); | ||
1751 | unsigned long *b1 = &VCPU(vcpu, vgr[0]); | ||
1752 | unsigned long *runat = ®s->eml_unat; | ||
1753 | unsigned long *b0unat = &VCPU(vcpu, vbnat); | ||
1754 | unsigned long *b1unat = &VCPU(vcpu, vnat); | ||
1755 | |||
1756 | if (!(VCPU(vcpu, vpsr) & IA64_PSR_BN)) { | ||
1757 | for (i = 0; i < 16; i++) { | ||
1758 | *b0++ = *r; | ||
1759 | *r++ = *b1++; | ||
1760 | } | ||
1761 | vcpu_bsw1_unat(i, b0unat, b1unat, runat, | ||
1762 | VMM_PT_REGS_R16_SLOT); | ||
1763 | VCPU(vcpu, vpsr) |= IA64_PSR_BN; | ||
1764 | } | ||
1765 | } | ||
1766 | |||
1767 | void vcpu_rfi(struct kvm_vcpu *vcpu) | ||
1768 | { | ||
1769 | unsigned long ifs, psr; | ||
1770 | struct kvm_pt_regs *regs = vcpu_regs(vcpu); | ||
1771 | |||
1772 | psr = VCPU(vcpu, ipsr); | ||
1773 | if (psr & IA64_PSR_BN) | ||
1774 | vcpu_bsw1(vcpu); | ||
1775 | else | ||
1776 | vcpu_bsw0(vcpu); | ||
1777 | vcpu_set_psr(vcpu, psr); | ||
1778 | ifs = VCPU(vcpu, ifs); | ||
1779 | if (ifs >> 63) | ||
1780 | regs->cr_ifs = ifs; | ||
1781 | regs->cr_iip = VCPU(vcpu, iip); | ||
1782 | } | ||
1783 | |||
1784 | /* | ||
1785 | VPSR can't keep track of below bits of guest PSR | ||
1786 | This function gets guest PSR | ||
1787 | */ | ||
1788 | |||
1789 | unsigned long vcpu_get_psr(struct kvm_vcpu *vcpu) | ||
1790 | { | ||
1791 | unsigned long mask; | ||
1792 | struct kvm_pt_regs *regs = vcpu_regs(vcpu); | ||
1793 | |||
1794 | mask = IA64_PSR_BE | IA64_PSR_UP | IA64_PSR_AC | IA64_PSR_MFL | | ||
1795 | IA64_PSR_MFH | IA64_PSR_CPL | IA64_PSR_RI; | ||
1796 | return (VCPU(vcpu, vpsr) & ~mask) | (regs->cr_ipsr & mask); | ||
1797 | } | ||
1798 | |||
1799 | void kvm_rsm(struct kvm_vcpu *vcpu, INST64 inst) | ||
1800 | { | ||
1801 | unsigned long vpsr; | ||
1802 | unsigned long imm24 = (inst.M44.i<<23) | (inst.M44.i2<<21) | ||
1803 | | inst.M44.imm; | ||
1804 | |||
1805 | vpsr = vcpu_get_psr(vcpu); | ||
1806 | vpsr &= (~imm24); | ||
1807 | vcpu_set_psr(vcpu, vpsr); | ||
1808 | } | ||
1809 | |||
1810 | void kvm_ssm(struct kvm_vcpu *vcpu, INST64 inst) | ||
1811 | { | ||
1812 | unsigned long vpsr; | ||
1813 | unsigned long imm24 = (inst.M44.i << 23) | (inst.M44.i2 << 21) | ||
1814 | | inst.M44.imm; | ||
1815 | |||
1816 | vpsr = vcpu_get_psr(vcpu); | ||
1817 | vpsr |= imm24; | ||
1818 | vcpu_set_psr(vcpu, vpsr); | ||
1819 | } | ||
1820 | |||
1821 | /* Generate Mask | ||
1822 | * Parameter: | ||
1823 | * bit -- starting bit | ||
1824 | * len -- how many bits | ||
1825 | */ | ||
1826 | #define MASK(bit,len) \ | ||
1827 | ({ \ | ||
1828 | __u64 ret; \ | ||
1829 | \ | ||
1830 | __asm __volatile("dep %0=-1, r0, %1, %2"\ | ||
1831 | : "=r" (ret): \ | ||
1832 | "M" (bit), \ | ||
1833 | "M" (len)); \ | ||
1834 | ret; \ | ||
1835 | }) | ||
1836 | |||
1837 | void vcpu_set_psr_l(struct kvm_vcpu *vcpu, unsigned long val) | ||
1838 | { | ||
1839 | val = (val & MASK(0, 32)) | (vcpu_get_psr(vcpu) & MASK(32, 32)); | ||
1840 | vcpu_set_psr(vcpu, val); | ||
1841 | } | ||
1842 | |||
1843 | void kvm_mov_to_psr(struct kvm_vcpu *vcpu, INST64 inst) | ||
1844 | { | ||
1845 | unsigned long val; | ||
1846 | |||
1847 | val = vcpu_get_gr(vcpu, inst.M35.r2); | ||
1848 | vcpu_set_psr_l(vcpu, val); | ||
1849 | } | ||
1850 | |||
1851 | void kvm_mov_from_psr(struct kvm_vcpu *vcpu, INST64 inst) | ||
1852 | { | ||
1853 | unsigned long val; | ||
1854 | |||
1855 | val = vcpu_get_psr(vcpu); | ||
1856 | val = (val & MASK(0, 32)) | (val & MASK(35, 2)); | ||
1857 | vcpu_set_gr(vcpu, inst.M33.r1, val, 0); | ||
1858 | } | ||
1859 | |||
1860 | void vcpu_increment_iip(struct kvm_vcpu *vcpu) | ||
1861 | { | ||
1862 | struct kvm_pt_regs *regs = vcpu_regs(vcpu); | ||
1863 | struct ia64_psr *ipsr = (struct ia64_psr *)®s->cr_ipsr; | ||
1864 | if (ipsr->ri == 2) { | ||
1865 | ipsr->ri = 0; | ||
1866 | regs->cr_iip += 16; | ||
1867 | } else | ||
1868 | ipsr->ri++; | ||
1869 | } | ||
1870 | |||
1871 | void vcpu_decrement_iip(struct kvm_vcpu *vcpu) | ||
1872 | { | ||
1873 | struct kvm_pt_regs *regs = vcpu_regs(vcpu); | ||
1874 | struct ia64_psr *ipsr = (struct ia64_psr *)®s->cr_ipsr; | ||
1875 | |||
1876 | if (ipsr->ri == 0) { | ||
1877 | ipsr->ri = 2; | ||
1878 | regs->cr_iip -= 16; | ||
1879 | } else | ||
1880 | ipsr->ri--; | ||
1881 | } | ||
1882 | |||
1883 | /** Emulate a privileged operation. | ||
1884 | * | ||
1885 | * | ||
1886 | * @param vcpu virtual cpu | ||
1887 | * @cause the reason cause virtualization fault | ||
1888 | * @opcode the instruction code which cause virtualization fault | ||
1889 | */ | ||
1890 | |||
1891 | void kvm_emulate(struct kvm_vcpu *vcpu, struct kvm_pt_regs *regs) | ||
1892 | { | ||
1893 | unsigned long status, cause, opcode ; | ||
1894 | INST64 inst; | ||
1895 | |||
1896 | status = IA64_NO_FAULT; | ||
1897 | cause = VMX(vcpu, cause); | ||
1898 | opcode = VMX(vcpu, opcode); | ||
1899 | inst.inst = opcode; | ||
1900 | /* | ||
1901 | * Switch to actual virtual rid in rr0 and rr4, | ||
1902 | * which is required by some tlb related instructions. | ||
1903 | */ | ||
1904 | prepare_if_physical_mode(vcpu); | ||
1905 | |||
1906 | switch (cause) { | ||
1907 | case EVENT_RSM: | ||
1908 | kvm_rsm(vcpu, inst); | ||
1909 | break; | ||
1910 | case EVENT_SSM: | ||
1911 | kvm_ssm(vcpu, inst); | ||
1912 | break; | ||
1913 | case EVENT_MOV_TO_PSR: | ||
1914 | kvm_mov_to_psr(vcpu, inst); | ||
1915 | break; | ||
1916 | case EVENT_MOV_FROM_PSR: | ||
1917 | kvm_mov_from_psr(vcpu, inst); | ||
1918 | break; | ||
1919 | case EVENT_MOV_FROM_CR: | ||
1920 | kvm_mov_from_cr(vcpu, inst); | ||
1921 | break; | ||
1922 | case EVENT_MOV_TO_CR: | ||
1923 | kvm_mov_to_cr(vcpu, inst); | ||
1924 | break; | ||
1925 | case EVENT_BSW_0: | ||
1926 | vcpu_bsw0(vcpu); | ||
1927 | break; | ||
1928 | case EVENT_BSW_1: | ||
1929 | vcpu_bsw1(vcpu); | ||
1930 | break; | ||
1931 | case EVENT_COVER: | ||
1932 | vcpu_cover(vcpu); | ||
1933 | break; | ||
1934 | case EVENT_RFI: | ||
1935 | vcpu_rfi(vcpu); | ||
1936 | break; | ||
1937 | case EVENT_ITR_D: | ||
1938 | kvm_itr_d(vcpu, inst); | ||
1939 | break; | ||
1940 | case EVENT_ITR_I: | ||
1941 | kvm_itr_i(vcpu, inst); | ||
1942 | break; | ||
1943 | case EVENT_PTR_D: | ||
1944 | kvm_ptr_d(vcpu, inst); | ||
1945 | break; | ||
1946 | case EVENT_PTR_I: | ||
1947 | kvm_ptr_i(vcpu, inst); | ||
1948 | break; | ||
1949 | case EVENT_ITC_D: | ||
1950 | kvm_itc_d(vcpu, inst); | ||
1951 | break; | ||
1952 | case EVENT_ITC_I: | ||
1953 | kvm_itc_i(vcpu, inst); | ||
1954 | break; | ||
1955 | case EVENT_PTC_L: | ||
1956 | kvm_ptc_l(vcpu, inst); | ||
1957 | break; | ||
1958 | case EVENT_PTC_G: | ||
1959 | kvm_ptc_g(vcpu, inst); | ||
1960 | break; | ||
1961 | case EVENT_PTC_GA: | ||
1962 | kvm_ptc_ga(vcpu, inst); | ||
1963 | break; | ||
1964 | case EVENT_PTC_E: | ||
1965 | kvm_ptc_e(vcpu, inst); | ||
1966 | break; | ||
1967 | case EVENT_MOV_TO_RR: | ||
1968 | kvm_mov_to_rr(vcpu, inst); | ||
1969 | break; | ||
1970 | case EVENT_MOV_FROM_RR: | ||
1971 | kvm_mov_from_rr(vcpu, inst); | ||
1972 | break; | ||
1973 | case EVENT_THASH: | ||
1974 | kvm_thash(vcpu, inst); | ||
1975 | break; | ||
1976 | case EVENT_TTAG: | ||
1977 | kvm_ttag(vcpu, inst); | ||
1978 | break; | ||
1979 | case EVENT_TPA: | ||
1980 | status = kvm_tpa(vcpu, inst); | ||
1981 | break; | ||
1982 | case EVENT_TAK: | ||
1983 | kvm_tak(vcpu, inst); | ||
1984 | break; | ||
1985 | case EVENT_MOV_TO_AR_IMM: | ||
1986 | kvm_mov_to_ar_imm(vcpu, inst); | ||
1987 | break; | ||
1988 | case EVENT_MOV_TO_AR: | ||
1989 | kvm_mov_to_ar_reg(vcpu, inst); | ||
1990 | break; | ||
1991 | case EVENT_MOV_FROM_AR: | ||
1992 | kvm_mov_from_ar_reg(vcpu, inst); | ||
1993 | break; | ||
1994 | case EVENT_MOV_TO_DBR: | ||
1995 | kvm_mov_to_dbr(vcpu, inst); | ||
1996 | break; | ||
1997 | case EVENT_MOV_TO_IBR: | ||
1998 | kvm_mov_to_ibr(vcpu, inst); | ||
1999 | break; | ||
2000 | case EVENT_MOV_TO_PMC: | ||
2001 | kvm_mov_to_pmc(vcpu, inst); | ||
2002 | break; | ||
2003 | case EVENT_MOV_TO_PMD: | ||
2004 | kvm_mov_to_pmd(vcpu, inst); | ||
2005 | break; | ||
2006 | case EVENT_MOV_TO_PKR: | ||
2007 | kvm_mov_to_pkr(vcpu, inst); | ||
2008 | break; | ||
2009 | case EVENT_MOV_FROM_DBR: | ||
2010 | kvm_mov_from_dbr(vcpu, inst); | ||
2011 | break; | ||
2012 | case EVENT_MOV_FROM_IBR: | ||
2013 | kvm_mov_from_ibr(vcpu, inst); | ||
2014 | break; | ||
2015 | case EVENT_MOV_FROM_PMC: | ||
2016 | kvm_mov_from_pmc(vcpu, inst); | ||
2017 | break; | ||
2018 | case EVENT_MOV_FROM_PKR: | ||
2019 | kvm_mov_from_pkr(vcpu, inst); | ||
2020 | break; | ||
2021 | case EVENT_MOV_FROM_CPUID: | ||
2022 | kvm_mov_from_cpuid(vcpu, inst); | ||
2023 | break; | ||
2024 | case EVENT_VMSW: | ||
2025 | status = IA64_FAULT; | ||
2026 | break; | ||
2027 | default: | ||
2028 | break; | ||
2029 | }; | ||
2030 | /*Assume all status is NO_FAULT ?*/ | ||
2031 | if (status == IA64_NO_FAULT && cause != EVENT_RFI) | ||
2032 | vcpu_increment_iip(vcpu); | ||
2033 | |||
2034 | recover_if_physical_mode(vcpu); | ||
2035 | } | ||
2036 | |||
2037 | void init_vcpu(struct kvm_vcpu *vcpu) | ||
2038 | { | ||
2039 | int i; | ||
2040 | |||
2041 | vcpu->arch.mode_flags = GUEST_IN_PHY; | ||
2042 | VMX(vcpu, vrr[0]) = 0x38; | ||
2043 | VMX(vcpu, vrr[1]) = 0x38; | ||
2044 | VMX(vcpu, vrr[2]) = 0x38; | ||
2045 | VMX(vcpu, vrr[3]) = 0x38; | ||
2046 | VMX(vcpu, vrr[4]) = 0x38; | ||
2047 | VMX(vcpu, vrr[5]) = 0x38; | ||
2048 | VMX(vcpu, vrr[6]) = 0x38; | ||
2049 | VMX(vcpu, vrr[7]) = 0x38; | ||
2050 | VCPU(vcpu, vpsr) = IA64_PSR_BN; | ||
2051 | VCPU(vcpu, dcr) = 0; | ||
2052 | /* pta.size must not be 0. The minimum is 15 (32k) */ | ||
2053 | VCPU(vcpu, pta) = 15 << 2; | ||
2054 | VCPU(vcpu, itv) = 0x10000; | ||
2055 | VCPU(vcpu, itm) = 0; | ||
2056 | VMX(vcpu, last_itc) = 0; | ||
2057 | |||
2058 | VCPU(vcpu, lid) = VCPU_LID(vcpu); | ||
2059 | VCPU(vcpu, ivr) = 0; | ||
2060 | VCPU(vcpu, tpr) = 0x10000; | ||
2061 | VCPU(vcpu, eoi) = 0; | ||
2062 | VCPU(vcpu, irr[0]) = 0; | ||
2063 | VCPU(vcpu, irr[1]) = 0; | ||
2064 | VCPU(vcpu, irr[2]) = 0; | ||
2065 | VCPU(vcpu, irr[3]) = 0; | ||
2066 | VCPU(vcpu, pmv) = 0x10000; | ||
2067 | VCPU(vcpu, cmcv) = 0x10000; | ||
2068 | VCPU(vcpu, lrr0) = 0x10000; /* default reset value? */ | ||
2069 | VCPU(vcpu, lrr1) = 0x10000; /* default reset value? */ | ||
2070 | update_vhpi(vcpu, NULL_VECTOR); | ||
2071 | VLSAPIC_XTP(vcpu) = 0x80; /* disabled */ | ||
2072 | |||
2073 | for (i = 0; i < 4; i++) | ||
2074 | VLSAPIC_INSVC(vcpu, i) = 0; | ||
2075 | } | ||
2076 | |||
2077 | void kvm_init_all_rr(struct kvm_vcpu *vcpu) | ||
2078 | { | ||
2079 | unsigned long psr; | ||
2080 | |||
2081 | local_irq_save(psr); | ||
2082 | |||
2083 | /* WARNING: not allow co-exist of both virtual mode and physical | ||
2084 | * mode in same region | ||
2085 | */ | ||
2086 | |||
2087 | vcpu->arch.metaphysical_saved_rr0 = vrrtomrr(VMX(vcpu, vrr[VRN0])); | ||
2088 | vcpu->arch.metaphysical_saved_rr4 = vrrtomrr(VMX(vcpu, vrr[VRN4])); | ||
2089 | |||
2090 | if (is_physical_mode(vcpu)) { | ||
2091 | if (vcpu->arch.mode_flags & GUEST_PHY_EMUL) | ||
2092 | panic_vm(vcpu, "Machine Status conflicts!\n"); | ||
2093 | |||
2094 | ia64_set_rr((VRN0 << VRN_SHIFT), vcpu->arch.metaphysical_rr0); | ||
2095 | ia64_dv_serialize_data(); | ||
2096 | ia64_set_rr((VRN4 << VRN_SHIFT), vcpu->arch.metaphysical_rr4); | ||
2097 | ia64_dv_serialize_data(); | ||
2098 | } else { | ||
2099 | ia64_set_rr((VRN0 << VRN_SHIFT), | ||
2100 | vcpu->arch.metaphysical_saved_rr0); | ||
2101 | ia64_dv_serialize_data(); | ||
2102 | ia64_set_rr((VRN4 << VRN_SHIFT), | ||
2103 | vcpu->arch.metaphysical_saved_rr4); | ||
2104 | ia64_dv_serialize_data(); | ||
2105 | } | ||
2106 | ia64_set_rr((VRN1 << VRN_SHIFT), | ||
2107 | vrrtomrr(VMX(vcpu, vrr[VRN1]))); | ||
2108 | ia64_dv_serialize_data(); | ||
2109 | ia64_set_rr((VRN2 << VRN_SHIFT), | ||
2110 | vrrtomrr(VMX(vcpu, vrr[VRN2]))); | ||
2111 | ia64_dv_serialize_data(); | ||
2112 | ia64_set_rr((VRN3 << VRN_SHIFT), | ||
2113 | vrrtomrr(VMX(vcpu, vrr[VRN3]))); | ||
2114 | ia64_dv_serialize_data(); | ||
2115 | ia64_set_rr((VRN5 << VRN_SHIFT), | ||
2116 | vrrtomrr(VMX(vcpu, vrr[VRN5]))); | ||
2117 | ia64_dv_serialize_data(); | ||
2118 | ia64_set_rr((VRN7 << VRN_SHIFT), | ||
2119 | vrrtomrr(VMX(vcpu, vrr[VRN7]))); | ||
2120 | ia64_dv_serialize_data(); | ||
2121 | ia64_srlz_d(); | ||
2122 | ia64_set_psr(psr); | ||
2123 | } | ||
2124 | |||
2125 | int vmm_entry(void) | ||
2126 | { | ||
2127 | struct kvm_vcpu *v; | ||
2128 | v = current_vcpu; | ||
2129 | |||
2130 | ia64_call_vsa(PAL_VPS_RESTORE, (unsigned long)v->arch.vpd, | ||
2131 | 0, 0, 0, 0, 0, 0); | ||
2132 | kvm_init_vtlb(v); | ||
2133 | kvm_init_vhpt(v); | ||
2134 | init_vcpu(v); | ||
2135 | kvm_init_all_rr(v); | ||
2136 | vmm_reset_entry(); | ||
2137 | |||
2138 | return 0; | ||
2139 | } | ||
2140 | |||
2141 | static void kvm_show_registers(struct kvm_pt_regs *regs) | ||
2142 | { | ||
2143 | unsigned long ip = regs->cr_iip + ia64_psr(regs)->ri; | ||
2144 | |||
2145 | struct kvm_vcpu *vcpu = current_vcpu; | ||
2146 | if (vcpu != NULL) | ||
2147 | printk("vcpu 0x%p vcpu %d\n", | ||
2148 | vcpu, vcpu->vcpu_id); | ||
2149 | |||
2150 | printk("psr : %016lx ifs : %016lx ip : [<%016lx>]\n", | ||
2151 | regs->cr_ipsr, regs->cr_ifs, ip); | ||
2152 | |||
2153 | printk("unat: %016lx pfs : %016lx rsc : %016lx\n", | ||
2154 | regs->ar_unat, regs->ar_pfs, regs->ar_rsc); | ||
2155 | printk("rnat: %016lx bspstore: %016lx pr : %016lx\n", | ||
2156 | regs->ar_rnat, regs->ar_bspstore, regs->pr); | ||
2157 | printk("ldrs: %016lx ccv : %016lx fpsr: %016lx\n", | ||
2158 | regs->loadrs, regs->ar_ccv, regs->ar_fpsr); | ||
2159 | printk("csd : %016lx ssd : %016lx\n", regs->ar_csd, regs->ar_ssd); | ||
2160 | printk("b0 : %016lx b6 : %016lx b7 : %016lx\n", regs->b0, | ||
2161 | regs->b6, regs->b7); | ||
2162 | printk("f6 : %05lx%016lx f7 : %05lx%016lx\n", | ||
2163 | regs->f6.u.bits[1], regs->f6.u.bits[0], | ||
2164 | regs->f7.u.bits[1], regs->f7.u.bits[0]); | ||
2165 | printk("f8 : %05lx%016lx f9 : %05lx%016lx\n", | ||
2166 | regs->f8.u.bits[1], regs->f8.u.bits[0], | ||
2167 | regs->f9.u.bits[1], regs->f9.u.bits[0]); | ||
2168 | printk("f10 : %05lx%016lx f11 : %05lx%016lx\n", | ||
2169 | regs->f10.u.bits[1], regs->f10.u.bits[0], | ||
2170 | regs->f11.u.bits[1], regs->f11.u.bits[0]); | ||
2171 | |||
2172 | printk("r1 : %016lx r2 : %016lx r3 : %016lx\n", regs->r1, | ||
2173 | regs->r2, regs->r3); | ||
2174 | printk("r8 : %016lx r9 : %016lx r10 : %016lx\n", regs->r8, | ||
2175 | regs->r9, regs->r10); | ||
2176 | printk("r11 : %016lx r12 : %016lx r13 : %016lx\n", regs->r11, | ||
2177 | regs->r12, regs->r13); | ||
2178 | printk("r14 : %016lx r15 : %016lx r16 : %016lx\n", regs->r14, | ||
2179 | regs->r15, regs->r16); | ||
2180 | printk("r17 : %016lx r18 : %016lx r19 : %016lx\n", regs->r17, | ||
2181 | regs->r18, regs->r19); | ||
2182 | printk("r20 : %016lx r21 : %016lx r22 : %016lx\n", regs->r20, | ||
2183 | regs->r21, regs->r22); | ||
2184 | printk("r23 : %016lx r24 : %016lx r25 : %016lx\n", regs->r23, | ||
2185 | regs->r24, regs->r25); | ||
2186 | printk("r26 : %016lx r27 : %016lx r28 : %016lx\n", regs->r26, | ||
2187 | regs->r27, regs->r28); | ||
2188 | printk("r29 : %016lx r30 : %016lx r31 : %016lx\n", regs->r29, | ||
2189 | regs->r30, regs->r31); | ||
2190 | |||
2191 | } | ||
2192 | |||
2193 | void panic_vm(struct kvm_vcpu *v, const char *fmt, ...) | ||
2194 | { | ||
2195 | va_list args; | ||
2196 | char buf[256]; | ||
2197 | |||
2198 | struct kvm_pt_regs *regs = vcpu_regs(v); | ||
2199 | struct exit_ctl_data *p = &v->arch.exit_data; | ||
2200 | va_start(args, fmt); | ||
2201 | vsnprintf(buf, sizeof(buf), fmt, args); | ||
2202 | va_end(args); | ||
2203 | printk(buf); | ||
2204 | kvm_show_registers(regs); | ||
2205 | p->exit_reason = EXIT_REASON_VM_PANIC; | ||
2206 | vmm_transition(v); | ||
2207 | /*Never to return*/ | ||
2208 | while (1); | ||
2209 | } | ||
diff --git a/arch/ia64/kvm/vcpu.h b/arch/ia64/kvm/vcpu.h deleted file mode 100644 index 988911b4cc7a..000000000000 --- a/arch/ia64/kvm/vcpu.h +++ /dev/null | |||
@@ -1,752 +0,0 @@ | |||
1 | /* | ||
2 | * vcpu.h: vcpu routines | ||
3 | * Copyright (c) 2005, Intel Corporation. | ||
4 | * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com) | ||
5 | * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com) | ||
6 | * | ||
7 | * Copyright (c) 2007, Intel Corporation. | ||
8 | * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com) | ||
9 | * Xiantao Zhang (xiantao.zhang@intel.com) | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or modify it | ||
12 | * under the terms and conditions of the GNU General Public License, | ||
13 | * version 2, as published by the Free Software Foundation. | ||
14 | * | ||
15 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
16 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
18 | * more details. | ||
19 | * | ||
20 | * You should have received a copy of the GNU General Public License along with | ||
21 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple | ||
22 | * Place - Suite 330, Boston, MA 02111-1307 USA. | ||
23 | * | ||
24 | */ | ||
25 | |||
26 | |||
27 | #ifndef __KVM_VCPU_H__ | ||
28 | #define __KVM_VCPU_H__ | ||
29 | |||
30 | #include <asm/types.h> | ||
31 | #include <asm/fpu.h> | ||
32 | #include <asm/processor.h> | ||
33 | |||
34 | #ifndef __ASSEMBLY__ | ||
35 | #include "vti.h" | ||
36 | |||
37 | #include <linux/kvm_host.h> | ||
38 | #include <linux/spinlock.h> | ||
39 | |||
40 | typedef unsigned long IA64_INST; | ||
41 | |||
42 | typedef union U_IA64_BUNDLE { | ||
43 | unsigned long i64[2]; | ||
44 | struct { unsigned long template:5, slot0:41, slot1a:18, | ||
45 | slot1b:23, slot2:41; }; | ||
46 | /* NOTE: following doesn't work because bitfields can't cross natural | ||
47 | size boundaries | ||
48 | struct { unsigned long template:5, slot0:41, slot1:41, slot2:41; }; */ | ||
49 | } IA64_BUNDLE; | ||
50 | |||
51 | typedef union U_INST64_A5 { | ||
52 | IA64_INST inst; | ||
53 | struct { unsigned long qp:6, r1:7, imm7b:7, r3:2, imm5c:5, | ||
54 | imm9d:9, s:1, major:4; }; | ||
55 | } INST64_A5; | ||
56 | |||
57 | typedef union U_INST64_B4 { | ||
58 | IA64_INST inst; | ||
59 | struct { unsigned long qp:6, btype:3, un3:3, p:1, b2:3, un11:11, x6:6, | ||
60 | wh:2, d:1, un1:1, major:4; }; | ||
61 | } INST64_B4; | ||
62 | |||
63 | typedef union U_INST64_B8 { | ||
64 | IA64_INST inst; | ||
65 | struct { unsigned long qp:6, un21:21, x6:6, un4:4, major:4; }; | ||
66 | } INST64_B8; | ||
67 | |||
68 | typedef union U_INST64_B9 { | ||
69 | IA64_INST inst; | ||
70 | struct { unsigned long qp:6, imm20:20, :1, x6:6, :3, i:1, major:4; }; | ||
71 | } INST64_B9; | ||
72 | |||
73 | typedef union U_INST64_I19 { | ||
74 | IA64_INST inst; | ||
75 | struct { unsigned long qp:6, imm20:20, :1, x6:6, x3:3, i:1, major:4; }; | ||
76 | } INST64_I19; | ||
77 | |||
78 | typedef union U_INST64_I26 { | ||
79 | IA64_INST inst; | ||
80 | struct { unsigned long qp:6, :7, r2:7, ar3:7, x6:6, x3:3, :1, major:4; }; | ||
81 | } INST64_I26; | ||
82 | |||
83 | typedef union U_INST64_I27 { | ||
84 | IA64_INST inst; | ||
85 | struct { unsigned long qp:6, :7, imm:7, ar3:7, x6:6, x3:3, s:1, major:4; }; | ||
86 | } INST64_I27; | ||
87 | |||
88 | typedef union U_INST64_I28 { /* not privileged (mov from AR) */ | ||
89 | IA64_INST inst; | ||
90 | struct { unsigned long qp:6, r1:7, :7, ar3:7, x6:6, x3:3, :1, major:4; }; | ||
91 | } INST64_I28; | ||
92 | |||
93 | typedef union U_INST64_M28 { | ||
94 | IA64_INST inst; | ||
95 | struct { unsigned long qp:6, :14, r3:7, x6:6, x3:3, :1, major:4; }; | ||
96 | } INST64_M28; | ||
97 | |||
98 | typedef union U_INST64_M29 { | ||
99 | IA64_INST inst; | ||
100 | struct { unsigned long qp:6, :7, r2:7, ar3:7, x6:6, x3:3, :1, major:4; }; | ||
101 | } INST64_M29; | ||
102 | |||
103 | typedef union U_INST64_M30 { | ||
104 | IA64_INST inst; | ||
105 | struct { unsigned long qp:6, :7, imm:7, ar3:7, x4:4, x2:2, | ||
106 | x3:3, s:1, major:4; }; | ||
107 | } INST64_M30; | ||
108 | |||
109 | typedef union U_INST64_M31 { | ||
110 | IA64_INST inst; | ||
111 | struct { unsigned long qp:6, r1:7, :7, ar3:7, x6:6, x3:3, :1, major:4; }; | ||
112 | } INST64_M31; | ||
113 | |||
114 | typedef union U_INST64_M32 { | ||
115 | IA64_INST inst; | ||
116 | struct { unsigned long qp:6, :7, r2:7, cr3:7, x6:6, x3:3, :1, major:4; }; | ||
117 | } INST64_M32; | ||
118 | |||
119 | typedef union U_INST64_M33 { | ||
120 | IA64_INST inst; | ||
121 | struct { unsigned long qp:6, r1:7, :7, cr3:7, x6:6, x3:3, :1, major:4; }; | ||
122 | } INST64_M33; | ||
123 | |||
124 | typedef union U_INST64_M35 { | ||
125 | IA64_INST inst; | ||
126 | struct { unsigned long qp:6, :7, r2:7, :7, x6:6, x3:3, :1, major:4; }; | ||
127 | |||
128 | } INST64_M35; | ||
129 | |||
130 | typedef union U_INST64_M36 { | ||
131 | IA64_INST inst; | ||
132 | struct { unsigned long qp:6, r1:7, :14, x6:6, x3:3, :1, major:4; }; | ||
133 | } INST64_M36; | ||
134 | |||
135 | typedef union U_INST64_M37 { | ||
136 | IA64_INST inst; | ||
137 | struct { unsigned long qp:6, imm20a:20, :1, x4:4, x2:2, x3:3, | ||
138 | i:1, major:4; }; | ||
139 | } INST64_M37; | ||
140 | |||
141 | typedef union U_INST64_M41 { | ||
142 | IA64_INST inst; | ||
143 | struct { unsigned long qp:6, :7, r2:7, :7, x6:6, x3:3, :1, major:4; }; | ||
144 | } INST64_M41; | ||
145 | |||
146 | typedef union U_INST64_M42 { | ||
147 | IA64_INST inst; | ||
148 | struct { unsigned long qp:6, :7, r2:7, r3:7, x6:6, x3:3, :1, major:4; }; | ||
149 | } INST64_M42; | ||
150 | |||
151 | typedef union U_INST64_M43 { | ||
152 | IA64_INST inst; | ||
153 | struct { unsigned long qp:6, r1:7, :7, r3:7, x6:6, x3:3, :1, major:4; }; | ||
154 | } INST64_M43; | ||
155 | |||
156 | typedef union U_INST64_M44 { | ||
157 | IA64_INST inst; | ||
158 | struct { unsigned long qp:6, imm:21, x4:4, i2:2, x3:3, i:1, major:4; }; | ||
159 | } INST64_M44; | ||
160 | |||
161 | typedef union U_INST64_M45 { | ||
162 | IA64_INST inst; | ||
163 | struct { unsigned long qp:6, :7, r2:7, r3:7, x6:6, x3:3, :1, major:4; }; | ||
164 | } INST64_M45; | ||
165 | |||
166 | typedef union U_INST64_M46 { | ||
167 | IA64_INST inst; | ||
168 | struct { unsigned long qp:6, r1:7, un7:7, r3:7, x6:6, | ||
169 | x3:3, un1:1, major:4; }; | ||
170 | } INST64_M46; | ||
171 | |||
172 | typedef union U_INST64_M47 { | ||
173 | IA64_INST inst; | ||
174 | struct { unsigned long qp:6, un14:14, r3:7, x6:6, x3:3, un1:1, major:4; }; | ||
175 | } INST64_M47; | ||
176 | |||
177 | typedef union U_INST64_M1{ | ||
178 | IA64_INST inst; | ||
179 | struct { unsigned long qp:6, r1:7, un7:7, r3:7, x:1, hint:2, | ||
180 | x6:6, m:1, major:4; }; | ||
181 | } INST64_M1; | ||
182 | |||
183 | typedef union U_INST64_M2{ | ||
184 | IA64_INST inst; | ||
185 | struct { unsigned long qp:6, r1:7, r2:7, r3:7, x:1, hint:2, | ||
186 | x6:6, m:1, major:4; }; | ||
187 | } INST64_M2; | ||
188 | |||
189 | typedef union U_INST64_M3{ | ||
190 | IA64_INST inst; | ||
191 | struct { unsigned long qp:6, r1:7, imm7:7, r3:7, i:1, hint:2, | ||
192 | x6:6, s:1, major:4; }; | ||
193 | } INST64_M3; | ||
194 | |||
195 | typedef union U_INST64_M4 { | ||
196 | IA64_INST inst; | ||
197 | struct { unsigned long qp:6, un7:7, r2:7, r3:7, x:1, hint:2, | ||
198 | x6:6, m:1, major:4; }; | ||
199 | } INST64_M4; | ||
200 | |||
201 | typedef union U_INST64_M5 { | ||
202 | IA64_INST inst; | ||
203 | struct { unsigned long qp:6, imm7:7, r2:7, r3:7, i:1, hint:2, | ||
204 | x6:6, s:1, major:4; }; | ||
205 | } INST64_M5; | ||
206 | |||
207 | typedef union U_INST64_M6 { | ||
208 | IA64_INST inst; | ||
209 | struct { unsigned long qp:6, f1:7, un7:7, r3:7, x:1, hint:2, | ||
210 | x6:6, m:1, major:4; }; | ||
211 | } INST64_M6; | ||
212 | |||
213 | typedef union U_INST64_M9 { | ||
214 | IA64_INST inst; | ||
215 | struct { unsigned long qp:6, :7, f2:7, r3:7, x:1, hint:2, | ||
216 | x6:6, m:1, major:4; }; | ||
217 | } INST64_M9; | ||
218 | |||
219 | typedef union U_INST64_M10 { | ||
220 | IA64_INST inst; | ||
221 | struct { unsigned long qp:6, imm7:7, f2:7, r3:7, i:1, hint:2, | ||
222 | x6:6, s:1, major:4; }; | ||
223 | } INST64_M10; | ||
224 | |||
225 | typedef union U_INST64_M12 { | ||
226 | IA64_INST inst; | ||
227 | struct { unsigned long qp:6, f1:7, f2:7, r3:7, x:1, hint:2, | ||
228 | x6:6, m:1, major:4; }; | ||
229 | } INST64_M12; | ||
230 | |||
231 | typedef union U_INST64_M15 { | ||
232 | IA64_INST inst; | ||
233 | struct { unsigned long qp:6, :7, imm7:7, r3:7, i:1, hint:2, | ||
234 | x6:6, s:1, major:4; }; | ||
235 | } INST64_M15; | ||
236 | |||
237 | typedef union U_INST64 { | ||
238 | IA64_INST inst; | ||
239 | struct { unsigned long :37, major:4; } generic; | ||
240 | INST64_A5 A5; /* used in build_hypercall_bundle only */ | ||
241 | INST64_B4 B4; /* used in build_hypercall_bundle only */ | ||
242 | INST64_B8 B8; /* rfi, bsw.[01] */ | ||
243 | INST64_B9 B9; /* break.b */ | ||
244 | INST64_I19 I19; /* used in build_hypercall_bundle only */ | ||
245 | INST64_I26 I26; /* mov register to ar (I unit) */ | ||
246 | INST64_I27 I27; /* mov immediate to ar (I unit) */ | ||
247 | INST64_I28 I28; /* mov from ar (I unit) */ | ||
248 | INST64_M1 M1; /* ld integer */ | ||
249 | INST64_M2 M2; | ||
250 | INST64_M3 M3; | ||
251 | INST64_M4 M4; /* st integer */ | ||
252 | INST64_M5 M5; | ||
253 | INST64_M6 M6; /* ldfd floating pointer */ | ||
254 | INST64_M9 M9; /* stfd floating pointer */ | ||
255 | INST64_M10 M10; /* stfd floating pointer */ | ||
256 | INST64_M12 M12; /* ldfd pair floating pointer */ | ||
257 | INST64_M15 M15; /* lfetch + imm update */ | ||
258 | INST64_M28 M28; /* purge translation cache entry */ | ||
259 | INST64_M29 M29; /* mov register to ar (M unit) */ | ||
260 | INST64_M30 M30; /* mov immediate to ar (M unit) */ | ||
261 | INST64_M31 M31; /* mov from ar (M unit) */ | ||
262 | INST64_M32 M32; /* mov reg to cr */ | ||
263 | INST64_M33 M33; /* mov from cr */ | ||
264 | INST64_M35 M35; /* mov to psr */ | ||
265 | INST64_M36 M36; /* mov from psr */ | ||
266 | INST64_M37 M37; /* break.m */ | ||
267 | INST64_M41 M41; /* translation cache insert */ | ||
268 | INST64_M42 M42; /* mov to indirect reg/translation reg insert*/ | ||
269 | INST64_M43 M43; /* mov from indirect reg */ | ||
270 | INST64_M44 M44; /* set/reset system mask */ | ||
271 | INST64_M45 M45; /* translation purge */ | ||
272 | INST64_M46 M46; /* translation access (tpa,tak) */ | ||
273 | INST64_M47 M47; /* purge translation entry */ | ||
274 | } INST64; | ||
275 | |||
276 | #define MASK_41 ((unsigned long)0x1ffffffffff) | ||
277 | |||
278 | /* Virtual address memory attributes encoding */ | ||
279 | #define VA_MATTR_WB 0x0 | ||
280 | #define VA_MATTR_UC 0x4 | ||
281 | #define VA_MATTR_UCE 0x5 | ||
282 | #define VA_MATTR_WC 0x6 | ||
283 | #define VA_MATTR_NATPAGE 0x7 | ||
284 | |||
285 | #define PMASK(size) (~((size) - 1)) | ||
286 | #define PSIZE(size) (1UL<<(size)) | ||
287 | #define CLEARLSB(ppn, nbits) (((ppn) >> (nbits)) << (nbits)) | ||
288 | #define PAGEALIGN(va, ps) CLEARLSB(va, ps) | ||
289 | #define PAGE_FLAGS_RV_MASK (0x2|(0x3UL<<50)|(((1UL<<11)-1)<<53)) | ||
290 | #define _PAGE_MA_ST (0x1 << 2) /* is reserved for software use */ | ||
291 | |||
292 | #define ARCH_PAGE_SHIFT 12 | ||
293 | |||
294 | #define INVALID_TI_TAG (1UL << 63) | ||
295 | |||
296 | #define VTLB_PTE_P_BIT 0 | ||
297 | #define VTLB_PTE_IO_BIT 60 | ||
298 | #define VTLB_PTE_IO (1UL<<VTLB_PTE_IO_BIT) | ||
299 | #define VTLB_PTE_P (1UL<<VTLB_PTE_P_BIT) | ||
300 | |||
301 | #define vcpu_quick_region_check(_tr_regions,_ifa) \ | ||
302 | (_tr_regions & (1 << ((unsigned long)_ifa >> 61))) | ||
303 | |||
304 | #define vcpu_quick_region_set(_tr_regions,_ifa) \ | ||
305 | do {_tr_regions |= (1 << ((unsigned long)_ifa >> 61)); } while (0) | ||
306 | |||
307 | static inline void vcpu_set_tr(struct thash_data *trp, u64 pte, u64 itir, | ||
308 | u64 va, u64 rid) | ||
309 | { | ||
310 | trp->page_flags = pte; | ||
311 | trp->itir = itir; | ||
312 | trp->vadr = va; | ||
313 | trp->rid = rid; | ||
314 | } | ||
315 | |||
316 | extern u64 kvm_get_mpt_entry(u64 gpfn); | ||
317 | |||
318 | /* Return I/ */ | ||
319 | static inline u64 __gpfn_is_io(u64 gpfn) | ||
320 | { | ||
321 | u64 pte; | ||
322 | pte = kvm_get_mpt_entry(gpfn); | ||
323 | if (!(pte & GPFN_INV_MASK)) { | ||
324 | pte = pte & GPFN_IO_MASK; | ||
325 | if (pte != GPFN_PHYS_MMIO) | ||
326 | return pte; | ||
327 | } | ||
328 | return 0; | ||
329 | } | ||
330 | #endif | ||
331 | #define IA64_NO_FAULT 0 | ||
332 | #define IA64_FAULT 1 | ||
333 | |||
334 | #define VMM_RBS_OFFSET ((VMM_TASK_SIZE + 15) & ~15) | ||
335 | |||
336 | #define SW_BAD 0 /* Bad mode transitition */ | ||
337 | #define SW_V2P 1 /* Physical emulatino is activated */ | ||
338 | #define SW_P2V 2 /* Exit physical mode emulation */ | ||
339 | #define SW_SELF 3 /* No mode transition */ | ||
340 | #define SW_NOP 4 /* Mode transition, but without action required */ | ||
341 | |||
342 | #define GUEST_IN_PHY 0x1 | ||
343 | #define GUEST_PHY_EMUL 0x2 | ||
344 | |||
345 | #define current_vcpu ((struct kvm_vcpu *) ia64_getreg(_IA64_REG_TP)) | ||
346 | |||
347 | #define VRN_SHIFT 61 | ||
348 | #define VRN_MASK 0xe000000000000000 | ||
349 | #define VRN0 0x0UL | ||
350 | #define VRN1 0x1UL | ||
351 | #define VRN2 0x2UL | ||
352 | #define VRN3 0x3UL | ||
353 | #define VRN4 0x4UL | ||
354 | #define VRN5 0x5UL | ||
355 | #define VRN6 0x6UL | ||
356 | #define VRN7 0x7UL | ||
357 | |||
358 | #define IRQ_NO_MASKED 0 | ||
359 | #define IRQ_MASKED_BY_VTPR 1 | ||
360 | #define IRQ_MASKED_BY_INSVC 2 /* masked by inservice IRQ */ | ||
361 | |||
362 | #define PTA_BASE_SHIFT 15 | ||
363 | |||
364 | #define IA64_PSR_VM_BIT 46 | ||
365 | #define IA64_PSR_VM (__IA64_UL(1) << IA64_PSR_VM_BIT) | ||
366 | |||
367 | /* Interruption Function State */ | ||
368 | #define IA64_IFS_V_BIT 63 | ||
369 | #define IA64_IFS_V (__IA64_UL(1) << IA64_IFS_V_BIT) | ||
370 | |||
371 | #define PHY_PAGE_UC (_PAGE_A|_PAGE_D|_PAGE_P|_PAGE_MA_UC|_PAGE_AR_RWX) | ||
372 | #define PHY_PAGE_WB (_PAGE_A|_PAGE_D|_PAGE_P|_PAGE_MA_WB|_PAGE_AR_RWX) | ||
373 | |||
374 | #ifndef __ASSEMBLY__ | ||
375 | |||
376 | #include <asm/gcc_intrin.h> | ||
377 | |||
378 | #define is_physical_mode(v) \ | ||
379 | ((v->arch.mode_flags) & GUEST_IN_PHY) | ||
380 | |||
381 | #define is_virtual_mode(v) \ | ||
382 | (!is_physical_mode(v)) | ||
383 | |||
384 | #define MODE_IND(psr) \ | ||
385 | (((psr).it << 2) + ((psr).dt << 1) + (psr).rt) | ||
386 | |||
387 | #ifndef CONFIG_SMP | ||
388 | #define _vmm_raw_spin_lock(x) do {}while(0) | ||
389 | #define _vmm_raw_spin_unlock(x) do {}while(0) | ||
390 | #else | ||
391 | typedef struct { | ||
392 | volatile unsigned int lock; | ||
393 | } vmm_spinlock_t; | ||
394 | #define _vmm_raw_spin_lock(x) \ | ||
395 | do { \ | ||
396 | __u32 *ia64_spinlock_ptr = (__u32 *) (x); \ | ||
397 | __u64 ia64_spinlock_val; \ | ||
398 | ia64_spinlock_val = ia64_cmpxchg4_acq(ia64_spinlock_ptr, 1, 0);\ | ||
399 | if (unlikely(ia64_spinlock_val)) { \ | ||
400 | do { \ | ||
401 | while (*ia64_spinlock_ptr) \ | ||
402 | ia64_barrier(); \ | ||
403 | ia64_spinlock_val = \ | ||
404 | ia64_cmpxchg4_acq(ia64_spinlock_ptr, 1, 0);\ | ||
405 | } while (ia64_spinlock_val); \ | ||
406 | } \ | ||
407 | } while (0) | ||
408 | |||
409 | #define _vmm_raw_spin_unlock(x) \ | ||
410 | do { barrier(); \ | ||
411 | ((vmm_spinlock_t *)x)->lock = 0; } \ | ||
412 | while (0) | ||
413 | #endif | ||
414 | |||
415 | void vmm_spin_lock(vmm_spinlock_t *lock); | ||
416 | void vmm_spin_unlock(vmm_spinlock_t *lock); | ||
417 | enum { | ||
418 | I_TLB = 1, | ||
419 | D_TLB = 2 | ||
420 | }; | ||
421 | |||
422 | union kvm_va { | ||
423 | struct { | ||
424 | unsigned long off : 60; /* intra-region offset */ | ||
425 | unsigned long reg : 4; /* region number */ | ||
426 | } f; | ||
427 | unsigned long l; | ||
428 | void *p; | ||
429 | }; | ||
430 | |||
431 | #define __kvm_pa(x) ({union kvm_va _v; _v.l = (long) (x); \ | ||
432 | _v.f.reg = 0; _v.l; }) | ||
433 | #define __kvm_va(x) ({union kvm_va _v; _v.l = (long) (x); \ | ||
434 | _v.f.reg = -1; _v.p; }) | ||
435 | |||
436 | #define _REGION_ID(x) ({union ia64_rr _v; _v.val = (long)(x); \ | ||
437 | _v.rid; }) | ||
438 | #define _REGION_PAGE_SIZE(x) ({union ia64_rr _v; _v.val = (long)(x); \ | ||
439 | _v.ps; }) | ||
440 | #define _REGION_HW_WALKER(x) ({union ia64_rr _v; _v.val = (long)(x); \ | ||
441 | _v.ve; }) | ||
442 | |||
443 | enum vhpt_ref{ DATA_REF, NA_REF, INST_REF, RSE_REF }; | ||
444 | enum tlb_miss_type { INSTRUCTION, DATA, REGISTER }; | ||
445 | |||
446 | #define VCPU(_v, _x) ((_v)->arch.vpd->_x) | ||
447 | #define VMX(_v, _x) ((_v)->arch._x) | ||
448 | |||
449 | #define VLSAPIC_INSVC(vcpu, i) ((vcpu)->arch.insvc[i]) | ||
450 | #define VLSAPIC_XTP(_v) VMX(_v, xtp) | ||
451 | |||
452 | static inline unsigned long itir_ps(unsigned long itir) | ||
453 | { | ||
454 | return ((itir >> 2) & 0x3f); | ||
455 | } | ||
456 | |||
457 | |||
458 | /************************************************************************** | ||
459 | VCPU control register access routines | ||
460 | **************************************************************************/ | ||
461 | |||
462 | static inline u64 vcpu_get_itir(struct kvm_vcpu *vcpu) | ||
463 | { | ||
464 | return ((u64)VCPU(vcpu, itir)); | ||
465 | } | ||
466 | |||
467 | static inline void vcpu_set_itir(struct kvm_vcpu *vcpu, u64 val) | ||
468 | { | ||
469 | VCPU(vcpu, itir) = val; | ||
470 | } | ||
471 | |||
472 | static inline u64 vcpu_get_ifa(struct kvm_vcpu *vcpu) | ||
473 | { | ||
474 | return ((u64)VCPU(vcpu, ifa)); | ||
475 | } | ||
476 | |||
477 | static inline void vcpu_set_ifa(struct kvm_vcpu *vcpu, u64 val) | ||
478 | { | ||
479 | VCPU(vcpu, ifa) = val; | ||
480 | } | ||
481 | |||
482 | static inline u64 vcpu_get_iva(struct kvm_vcpu *vcpu) | ||
483 | { | ||
484 | return ((u64)VCPU(vcpu, iva)); | ||
485 | } | ||
486 | |||
487 | static inline u64 vcpu_get_pta(struct kvm_vcpu *vcpu) | ||
488 | { | ||
489 | return ((u64)VCPU(vcpu, pta)); | ||
490 | } | ||
491 | |||
492 | static inline u64 vcpu_get_lid(struct kvm_vcpu *vcpu) | ||
493 | { | ||
494 | return ((u64)VCPU(vcpu, lid)); | ||
495 | } | ||
496 | |||
497 | static inline u64 vcpu_get_tpr(struct kvm_vcpu *vcpu) | ||
498 | { | ||
499 | return ((u64)VCPU(vcpu, tpr)); | ||
500 | } | ||
501 | |||
502 | static inline u64 vcpu_get_eoi(struct kvm_vcpu *vcpu) | ||
503 | { | ||
504 | return (0UL); /*reads of eoi always return 0 */ | ||
505 | } | ||
506 | |||
507 | static inline u64 vcpu_get_irr0(struct kvm_vcpu *vcpu) | ||
508 | { | ||
509 | return ((u64)VCPU(vcpu, irr[0])); | ||
510 | } | ||
511 | |||
512 | static inline u64 vcpu_get_irr1(struct kvm_vcpu *vcpu) | ||
513 | { | ||
514 | return ((u64)VCPU(vcpu, irr[1])); | ||
515 | } | ||
516 | |||
517 | static inline u64 vcpu_get_irr2(struct kvm_vcpu *vcpu) | ||
518 | { | ||
519 | return ((u64)VCPU(vcpu, irr[2])); | ||
520 | } | ||
521 | |||
522 | static inline u64 vcpu_get_irr3(struct kvm_vcpu *vcpu) | ||
523 | { | ||
524 | return ((u64)VCPU(vcpu, irr[3])); | ||
525 | } | ||
526 | |||
527 | static inline void vcpu_set_dcr(struct kvm_vcpu *vcpu, u64 val) | ||
528 | { | ||
529 | ia64_setreg(_IA64_REG_CR_DCR, val); | ||
530 | } | ||
531 | |||
532 | static inline void vcpu_set_isr(struct kvm_vcpu *vcpu, u64 val) | ||
533 | { | ||
534 | VCPU(vcpu, isr) = val; | ||
535 | } | ||
536 | |||
537 | static inline void vcpu_set_lid(struct kvm_vcpu *vcpu, u64 val) | ||
538 | { | ||
539 | VCPU(vcpu, lid) = val; | ||
540 | } | ||
541 | |||
542 | static inline void vcpu_set_ipsr(struct kvm_vcpu *vcpu, u64 val) | ||
543 | { | ||
544 | VCPU(vcpu, ipsr) = val; | ||
545 | } | ||
546 | |||
547 | static inline void vcpu_set_iip(struct kvm_vcpu *vcpu, u64 val) | ||
548 | { | ||
549 | VCPU(vcpu, iip) = val; | ||
550 | } | ||
551 | |||
552 | static inline void vcpu_set_ifs(struct kvm_vcpu *vcpu, u64 val) | ||
553 | { | ||
554 | VCPU(vcpu, ifs) = val; | ||
555 | } | ||
556 | |||
557 | static inline void vcpu_set_iipa(struct kvm_vcpu *vcpu, u64 val) | ||
558 | { | ||
559 | VCPU(vcpu, iipa) = val; | ||
560 | } | ||
561 | |||
562 | static inline void vcpu_set_iha(struct kvm_vcpu *vcpu, u64 val) | ||
563 | { | ||
564 | VCPU(vcpu, iha) = val; | ||
565 | } | ||
566 | |||
567 | |||
568 | static inline u64 vcpu_get_rr(struct kvm_vcpu *vcpu, u64 reg) | ||
569 | { | ||
570 | return vcpu->arch.vrr[reg>>61]; | ||
571 | } | ||
572 | |||
573 | /************************************************************************** | ||
574 | VCPU debug breakpoint register access routines | ||
575 | **************************************************************************/ | ||
576 | |||
577 | static inline void vcpu_set_dbr(struct kvm_vcpu *vcpu, u64 reg, u64 val) | ||
578 | { | ||
579 | __ia64_set_dbr(reg, val); | ||
580 | } | ||
581 | |||
582 | static inline void vcpu_set_ibr(struct kvm_vcpu *vcpu, u64 reg, u64 val) | ||
583 | { | ||
584 | ia64_set_ibr(reg, val); | ||
585 | } | ||
586 | |||
587 | static inline u64 vcpu_get_dbr(struct kvm_vcpu *vcpu, u64 reg) | ||
588 | { | ||
589 | return ((u64)__ia64_get_dbr(reg)); | ||
590 | } | ||
591 | |||
592 | static inline u64 vcpu_get_ibr(struct kvm_vcpu *vcpu, u64 reg) | ||
593 | { | ||
594 | return ((u64)ia64_get_ibr(reg)); | ||
595 | } | ||
596 | |||
597 | /************************************************************************** | ||
598 | VCPU performance monitor register access routines | ||
599 | **************************************************************************/ | ||
600 | static inline void vcpu_set_pmc(struct kvm_vcpu *vcpu, u64 reg, u64 val) | ||
601 | { | ||
602 | /* NOTE: Writes to unimplemented PMC registers are discarded */ | ||
603 | ia64_set_pmc(reg, val); | ||
604 | } | ||
605 | |||
606 | static inline void vcpu_set_pmd(struct kvm_vcpu *vcpu, u64 reg, u64 val) | ||
607 | { | ||
608 | /* NOTE: Writes to unimplemented PMD registers are discarded */ | ||
609 | ia64_set_pmd(reg, val); | ||
610 | } | ||
611 | |||
612 | static inline u64 vcpu_get_pmc(struct kvm_vcpu *vcpu, u64 reg) | ||
613 | { | ||
614 | /* NOTE: Reads from unimplemented PMC registers return zero */ | ||
615 | return ((u64)ia64_get_pmc(reg)); | ||
616 | } | ||
617 | |||
618 | static inline u64 vcpu_get_pmd(struct kvm_vcpu *vcpu, u64 reg) | ||
619 | { | ||
620 | /* NOTE: Reads from unimplemented PMD registers return zero */ | ||
621 | return ((u64)ia64_get_pmd(reg)); | ||
622 | } | ||
623 | |||
624 | static inline unsigned long vrrtomrr(unsigned long val) | ||
625 | { | ||
626 | union ia64_rr rr; | ||
627 | rr.val = val; | ||
628 | rr.rid = (rr.rid << 4) | 0xe; | ||
629 | if (rr.ps > PAGE_SHIFT) | ||
630 | rr.ps = PAGE_SHIFT; | ||
631 | rr.ve = 1; | ||
632 | return rr.val; | ||
633 | } | ||
634 | |||
635 | |||
636 | static inline int highest_bits(int *dat) | ||
637 | { | ||
638 | u32 bits, bitnum; | ||
639 | int i; | ||
640 | |||
641 | /* loop for all 256 bits */ | ||
642 | for (i = 7; i >= 0 ; i--) { | ||
643 | bits = dat[i]; | ||
644 | if (bits) { | ||
645 | bitnum = fls(bits); | ||
646 | return i * 32 + bitnum - 1; | ||
647 | } | ||
648 | } | ||
649 | return NULL_VECTOR; | ||
650 | } | ||
651 | |||
652 | /* | ||
653 | * The pending irq is higher than the inservice one. | ||
654 | * | ||
655 | */ | ||
656 | static inline int is_higher_irq(int pending, int inservice) | ||
657 | { | ||
658 | return ((pending > inservice) | ||
659 | || ((pending != NULL_VECTOR) | ||
660 | && (inservice == NULL_VECTOR))); | ||
661 | } | ||
662 | |||
663 | static inline int is_higher_class(int pending, int mic) | ||
664 | { | ||
665 | return ((pending >> 4) > mic); | ||
666 | } | ||
667 | |||
668 | /* | ||
669 | * Return 0-255 for pending irq. | ||
670 | * NULL_VECTOR: when no pending. | ||
671 | */ | ||
672 | static inline int highest_pending_irq(struct kvm_vcpu *vcpu) | ||
673 | { | ||
674 | if (VCPU(vcpu, irr[0]) & (1UL<<NMI_VECTOR)) | ||
675 | return NMI_VECTOR; | ||
676 | if (VCPU(vcpu, irr[0]) & (1UL<<ExtINT_VECTOR)) | ||
677 | return ExtINT_VECTOR; | ||
678 | |||
679 | return highest_bits((int *)&VCPU(vcpu, irr[0])); | ||
680 | } | ||
681 | |||
682 | static inline int highest_inservice_irq(struct kvm_vcpu *vcpu) | ||
683 | { | ||
684 | if (VMX(vcpu, insvc[0]) & (1UL<<NMI_VECTOR)) | ||
685 | return NMI_VECTOR; | ||
686 | if (VMX(vcpu, insvc[0]) & (1UL<<ExtINT_VECTOR)) | ||
687 | return ExtINT_VECTOR; | ||
688 | |||
689 | return highest_bits((int *)&(VMX(vcpu, insvc[0]))); | ||
690 | } | ||
691 | |||
692 | extern void vcpu_get_fpreg(struct kvm_vcpu *vcpu, unsigned long reg, | ||
693 | struct ia64_fpreg *val); | ||
694 | extern void vcpu_set_fpreg(struct kvm_vcpu *vcpu, unsigned long reg, | ||
695 | struct ia64_fpreg *val); | ||
696 | extern u64 vcpu_get_gr(struct kvm_vcpu *vcpu, unsigned long reg); | ||
697 | extern void vcpu_set_gr(struct kvm_vcpu *vcpu, unsigned long reg, | ||
698 | u64 val, int nat); | ||
699 | extern unsigned long vcpu_get_psr(struct kvm_vcpu *vcpu); | ||
700 | extern void vcpu_set_psr(struct kvm_vcpu *vcpu, unsigned long val); | ||
701 | extern u64 vcpu_thash(struct kvm_vcpu *vcpu, u64 vadr); | ||
702 | extern void vcpu_bsw0(struct kvm_vcpu *vcpu); | ||
703 | extern void thash_vhpt_insert(struct kvm_vcpu *v, u64 pte, | ||
704 | u64 itir, u64 va, int type); | ||
705 | extern struct thash_data *vhpt_lookup(u64 va); | ||
706 | extern u64 guest_vhpt_lookup(u64 iha, u64 *pte); | ||
707 | extern void thash_purge_entries(struct kvm_vcpu *v, u64 va, u64 ps); | ||
708 | extern void thash_purge_entries_remote(struct kvm_vcpu *v, u64 va, u64 ps); | ||
709 | extern u64 translate_phy_pte(u64 *pte, u64 itir, u64 va); | ||
710 | extern void thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, | ||
711 | u64 itir, u64 ifa, int type); | ||
712 | extern void thash_purge_all(struct kvm_vcpu *v); | ||
713 | extern struct thash_data *vtlb_lookup(struct kvm_vcpu *v, | ||
714 | u64 va, int is_data); | ||
715 | extern int vtr_find_overlap(struct kvm_vcpu *vcpu, u64 va, | ||
716 | u64 ps, int is_data); | ||
717 | |||
718 | extern void vcpu_increment_iip(struct kvm_vcpu *v); | ||
719 | extern void vcpu_decrement_iip(struct kvm_vcpu *vcpu); | ||
720 | extern void vcpu_pend_interrupt(struct kvm_vcpu *vcpu, u8 vec); | ||
721 | extern void vcpu_unpend_interrupt(struct kvm_vcpu *vcpu, u8 vec); | ||
722 | extern void data_page_not_present(struct kvm_vcpu *vcpu, u64 vadr); | ||
723 | extern void dnat_page_consumption(struct kvm_vcpu *vcpu, u64 vadr); | ||
724 | extern void alt_dtlb(struct kvm_vcpu *vcpu, u64 vadr); | ||
725 | extern void nested_dtlb(struct kvm_vcpu *vcpu); | ||
726 | extern void dvhpt_fault(struct kvm_vcpu *vcpu, u64 vadr); | ||
727 | extern int vhpt_enabled(struct kvm_vcpu *vcpu, u64 vadr, enum vhpt_ref ref); | ||
728 | |||
729 | extern void update_vhpi(struct kvm_vcpu *vcpu, int vec); | ||
730 | extern int irq_masked(struct kvm_vcpu *vcpu, int h_pending, int h_inservice); | ||
731 | |||
732 | extern int fetch_code(struct kvm_vcpu *vcpu, u64 gip, IA64_BUNDLE *pbundle); | ||
733 | extern void emulate_io_inst(struct kvm_vcpu *vcpu, u64 padr, u64 ma); | ||
734 | extern void vmm_transition(struct kvm_vcpu *vcpu); | ||
735 | extern void vmm_trampoline(union context *from, union context *to); | ||
736 | extern int vmm_entry(void); | ||
737 | extern u64 vcpu_get_itc(struct kvm_vcpu *vcpu); | ||
738 | |||
739 | extern void vmm_reset_entry(void); | ||
740 | void kvm_init_vtlb(struct kvm_vcpu *v); | ||
741 | void kvm_init_vhpt(struct kvm_vcpu *v); | ||
742 | void thash_init(struct thash_cb *hcb, u64 sz); | ||
743 | |||
744 | void panic_vm(struct kvm_vcpu *v, const char *fmt, ...); | ||
745 | u64 kvm_gpa_to_mpa(u64 gpa); | ||
746 | extern u64 ia64_call_vsa(u64 proc, u64 arg1, u64 arg2, u64 arg3, | ||
747 | u64 arg4, u64 arg5, u64 arg6, u64 arg7); | ||
748 | |||
749 | extern long vmm_sanity; | ||
750 | |||
751 | #endif | ||
752 | #endif /* __VCPU_H__ */ | ||
diff --git a/arch/ia64/kvm/vmm.c b/arch/ia64/kvm/vmm.c deleted file mode 100644 index 176a12cd56de..000000000000 --- a/arch/ia64/kvm/vmm.c +++ /dev/null | |||
@@ -1,99 +0,0 @@ | |||
1 | /* | ||
2 | * vmm.c: vmm module interface with kvm module | ||
3 | * | ||
4 | * Copyright (c) 2007, Intel Corporation. | ||
5 | * | ||
6 | * Xiantao Zhang (xiantao.zhang@intel.com) | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms and conditions of the GNU General Public License, | ||
10 | * version 2, as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
15 | * more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License along with | ||
18 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple | ||
19 | * Place - Suite 330, Boston, MA 02111-1307 USA. | ||
20 | */ | ||
21 | |||
22 | |||
23 | #include <linux/kernel.h> | ||
24 | #include <linux/module.h> | ||
25 | #include <asm/fpswa.h> | ||
26 | |||
27 | #include "vcpu.h" | ||
28 | |||
29 | MODULE_AUTHOR("Intel"); | ||
30 | MODULE_LICENSE("GPL"); | ||
31 | |||
32 | extern char kvm_ia64_ivt; | ||
33 | extern char kvm_asm_mov_from_ar; | ||
34 | extern char kvm_asm_mov_from_ar_sn2; | ||
35 | extern fpswa_interface_t *vmm_fpswa_interface; | ||
36 | |||
37 | long vmm_sanity = 1; | ||
38 | |||
39 | struct kvm_vmm_info vmm_info = { | ||
40 | .module = THIS_MODULE, | ||
41 | .vmm_entry = vmm_entry, | ||
42 | .tramp_entry = vmm_trampoline, | ||
43 | .vmm_ivt = (unsigned long)&kvm_ia64_ivt, | ||
44 | .patch_mov_ar = (unsigned long)&kvm_asm_mov_from_ar, | ||
45 | .patch_mov_ar_sn2 = (unsigned long)&kvm_asm_mov_from_ar_sn2, | ||
46 | }; | ||
47 | |||
48 | static int __init kvm_vmm_init(void) | ||
49 | { | ||
50 | |||
51 | vmm_fpswa_interface = fpswa_interface; | ||
52 | |||
53 | /*Register vmm data to kvm side*/ | ||
54 | return kvm_init(&vmm_info, 1024, 0, THIS_MODULE); | ||
55 | } | ||
56 | |||
57 | static void __exit kvm_vmm_exit(void) | ||
58 | { | ||
59 | kvm_exit(); | ||
60 | return ; | ||
61 | } | ||
62 | |||
63 | void vmm_spin_lock(vmm_spinlock_t *lock) | ||
64 | { | ||
65 | _vmm_raw_spin_lock(lock); | ||
66 | } | ||
67 | |||
68 | void vmm_spin_unlock(vmm_spinlock_t *lock) | ||
69 | { | ||
70 | _vmm_raw_spin_unlock(lock); | ||
71 | } | ||
72 | |||
73 | static void vcpu_debug_exit(struct kvm_vcpu *vcpu) | ||
74 | { | ||
75 | struct exit_ctl_data *p = &vcpu->arch.exit_data; | ||
76 | long psr; | ||
77 | |||
78 | local_irq_save(psr); | ||
79 | p->exit_reason = EXIT_REASON_DEBUG; | ||
80 | vmm_transition(vcpu); | ||
81 | local_irq_restore(psr); | ||
82 | } | ||
83 | |||
84 | asmlinkage int printk(const char *fmt, ...) | ||
85 | { | ||
86 | struct kvm_vcpu *vcpu = current_vcpu; | ||
87 | va_list args; | ||
88 | int r; | ||
89 | |||
90 | memset(vcpu->arch.log_buf, 0, VMM_LOG_LEN); | ||
91 | va_start(args, fmt); | ||
92 | r = vsnprintf(vcpu->arch.log_buf, VMM_LOG_LEN, fmt, args); | ||
93 | va_end(args); | ||
94 | vcpu_debug_exit(vcpu); | ||
95 | return r; | ||
96 | } | ||
97 | |||
98 | module_init(kvm_vmm_init) | ||
99 | module_exit(kvm_vmm_exit) | ||
diff --git a/arch/ia64/kvm/vmm_ivt.S b/arch/ia64/kvm/vmm_ivt.S deleted file mode 100644 index 397e34a63e18..000000000000 --- a/arch/ia64/kvm/vmm_ivt.S +++ /dev/null | |||
@@ -1,1392 +0,0 @@ | |||
1 | /* | ||
2 | * arch/ia64/kvm/vmm_ivt.S | ||
3 | * | ||
4 | * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co | ||
5 | * Stephane Eranian <eranian@hpl.hp.com> | ||
6 | * David Mosberger <davidm@hpl.hp.com> | ||
7 | * Copyright (C) 2000, 2002-2003 Intel Co | ||
8 | * Asit Mallick <asit.k.mallick@intel.com> | ||
9 | * Suresh Siddha <suresh.b.siddha@intel.com> | ||
10 | * Kenneth Chen <kenneth.w.chen@intel.com> | ||
11 | * Fenghua Yu <fenghua.yu@intel.com> | ||
12 | * | ||
13 | * | ||
14 | * 00/08/23 Asit Mallick <asit.k.mallick@intel.com> TLB handling | ||
15 | * for SMP | ||
16 | * 00/12/20 David Mosberger-Tang <davidm@hpl.hp.com> DTLB/ITLB | ||
17 | * handler now uses virtual PT. | ||
18 | * | ||
19 | * 07/6/20 Xuefei Xu (Anthony Xu) (anthony.xu@intel.com) | ||
20 | * Supporting Intel virtualization architecture | ||
21 | * | ||
22 | */ | ||
23 | |||
24 | /* | ||
25 | * This file defines the interruption vector table used by the CPU. | ||
26 | * It does not include one entry per possible cause of interruption. | ||
27 | * | ||
28 | * The first 20 entries of the table contain 64 bundles each while the | ||
29 | * remaining 48 entries contain only 16 bundles each. | ||
30 | * | ||
31 | * The 64 bundles are used to allow inlining the whole handler for | ||
32 | * critical | ||
33 | * interruptions like TLB misses. | ||
34 | * | ||
35 | * For each entry, the comment is as follows: | ||
36 | * | ||
37 | * // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss | ||
38 | * (12,51) | ||
39 | * entry offset ----/ / / / | ||
40 | * / | ||
41 | * entry number ---------/ / / | ||
42 | * / | ||
43 | * size of the entry -------------/ / | ||
44 | * / | ||
45 | * vector name -------------------------------------/ | ||
46 | * / | ||
47 | * interruptions triggering this vector | ||
48 | * ----------------------/ | ||
49 | * | ||
50 | * The table is 32KB in size and must be aligned on 32KB | ||
51 | * boundary. | ||
52 | * (The CPU ignores the 15 lower bits of the address) | ||
53 | * | ||
54 | * Table is based upon EAS2.6 (Oct 1999) | ||
55 | */ | ||
56 | |||
57 | |||
58 | #include <asm/asmmacro.h> | ||
59 | #include <asm/cache.h> | ||
60 | #include <asm/pgtable.h> | ||
61 | |||
62 | #include "asm-offsets.h" | ||
63 | #include "vcpu.h" | ||
64 | #include "kvm_minstate.h" | ||
65 | #include "vti.h" | ||
66 | |||
67 | #if 0 | ||
68 | # define PSR_DEFAULT_BITS psr.ac | ||
69 | #else | ||
70 | # define PSR_DEFAULT_BITS 0 | ||
71 | #endif | ||
72 | |||
73 | #define KVM_FAULT(n) \ | ||
74 | kvm_fault_##n:; \ | ||
75 | mov r19=n;; \ | ||
76 | br.sptk.many kvm_vmm_panic; \ | ||
77 | ;; \ | ||
78 | |||
79 | #define KVM_REFLECT(n) \ | ||
80 | mov r31=pr; \ | ||
81 | mov r19=n; /* prepare to save predicates */ \ | ||
82 | mov r29=cr.ipsr; \ | ||
83 | ;; \ | ||
84 | tbit.z p6,p7=r29,IA64_PSR_VM_BIT; \ | ||
85 | (p7) br.sptk.many kvm_dispatch_reflection; \ | ||
86 | br.sptk.many kvm_vmm_panic; \ | ||
87 | |||
88 | GLOBAL_ENTRY(kvm_vmm_panic) | ||
89 | KVM_SAVE_MIN_WITH_COVER_R19 | ||
90 | alloc r14=ar.pfs,0,0,1,0 | ||
91 | mov out0=r15 | ||
92 | adds r3=8,r2 // set up second base pointer | ||
93 | ;; | ||
94 | ssm psr.ic | ||
95 | ;; | ||
96 | srlz.i // guarantee that interruption collection is on | ||
97 | ;; | ||
98 | (p15) ssm psr.i // restore psr. | ||
99 | addl r14=@gprel(ia64_leave_hypervisor),gp | ||
100 | ;; | ||
101 | KVM_SAVE_REST | ||
102 | mov rp=r14 | ||
103 | ;; | ||
104 | br.call.sptk.many b6=vmm_panic_handler; | ||
105 | END(kvm_vmm_panic) | ||
106 | |||
107 | .section .text..ivt,"ax" | ||
108 | |||
109 | .align 32768 // align on 32KB boundary | ||
110 | .global kvm_ia64_ivt | ||
111 | kvm_ia64_ivt: | ||
112 | /////////////////////////////////////////////////////////////// | ||
113 | // 0x0000 Entry 0 (size 64 bundles) VHPT Translation (8,20,47) | ||
114 | ENTRY(kvm_vhpt_miss) | ||
115 | KVM_FAULT(0) | ||
116 | END(kvm_vhpt_miss) | ||
117 | |||
118 | .org kvm_ia64_ivt+0x400 | ||
119 | //////////////////////////////////////////////////////////////// | ||
120 | // 0x0400 Entry 1 (size 64 bundles) ITLB (21) | ||
121 | ENTRY(kvm_itlb_miss) | ||
122 | mov r31 = pr | ||
123 | mov r29=cr.ipsr; | ||
124 | ;; | ||
125 | tbit.z p6,p7=r29,IA64_PSR_VM_BIT; | ||
126 | (p6) br.sptk kvm_alt_itlb_miss | ||
127 | mov r19 = 1 | ||
128 | br.sptk kvm_itlb_miss_dispatch | ||
129 | KVM_FAULT(1); | ||
130 | END(kvm_itlb_miss) | ||
131 | |||
132 | .org kvm_ia64_ivt+0x0800 | ||
133 | ////////////////////////////////////////////////////////////////// | ||
134 | // 0x0800 Entry 2 (size 64 bundles) DTLB (9,48) | ||
135 | ENTRY(kvm_dtlb_miss) | ||
136 | mov r31 = pr | ||
137 | mov r29=cr.ipsr; | ||
138 | ;; | ||
139 | tbit.z p6,p7=r29,IA64_PSR_VM_BIT; | ||
140 | (p6) br.sptk kvm_alt_dtlb_miss | ||
141 | br.sptk kvm_dtlb_miss_dispatch | ||
142 | END(kvm_dtlb_miss) | ||
143 | |||
144 | .org kvm_ia64_ivt+0x0c00 | ||
145 | //////////////////////////////////////////////////////////////////// | ||
146 | // 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19) | ||
147 | ENTRY(kvm_alt_itlb_miss) | ||
148 | mov r16=cr.ifa // get address that caused the TLB miss | ||
149 | ;; | ||
150 | movl r17=PAGE_KERNEL | ||
151 | mov r24=cr.ipsr | ||
152 | movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff) | ||
153 | ;; | ||
154 | and r19=r19,r16 // clear ed, reserved bits, and PTE control bits | ||
155 | ;; | ||
156 | or r19=r17,r19 // insert PTE control bits into r19 | ||
157 | ;; | ||
158 | movl r20=IA64_GRANULE_SHIFT<<2 | ||
159 | ;; | ||
160 | mov cr.itir=r20 | ||
161 | ;; | ||
162 | itc.i r19 // insert the TLB entry | ||
163 | mov pr=r31,-1 | ||
164 | rfi | ||
165 | END(kvm_alt_itlb_miss) | ||
166 | |||
167 | .org kvm_ia64_ivt+0x1000 | ||
168 | ///////////////////////////////////////////////////////////////////// | ||
169 | // 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46) | ||
170 | ENTRY(kvm_alt_dtlb_miss) | ||
171 | mov r16=cr.ifa // get address that caused the TLB miss | ||
172 | ;; | ||
173 | movl r17=PAGE_KERNEL | ||
174 | movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff) | ||
175 | mov r24=cr.ipsr | ||
176 | ;; | ||
177 | and r19=r19,r16 // clear ed, reserved bits, and PTE control bits | ||
178 | ;; | ||
179 | or r19=r19,r17 // insert PTE control bits into r19 | ||
180 | ;; | ||
181 | movl r20=IA64_GRANULE_SHIFT<<2 | ||
182 | ;; | ||
183 | mov cr.itir=r20 | ||
184 | ;; | ||
185 | itc.d r19 // insert the TLB entry | ||
186 | mov pr=r31,-1 | ||
187 | rfi | ||
188 | END(kvm_alt_dtlb_miss) | ||
189 | |||
190 | .org kvm_ia64_ivt+0x1400 | ||
191 | ////////////////////////////////////////////////////////////////////// | ||
192 | // 0x1400 Entry 5 (size 64 bundles) Data nested TLB (6,45) | ||
193 | ENTRY(kvm_nested_dtlb_miss) | ||
194 | KVM_FAULT(5) | ||
195 | END(kvm_nested_dtlb_miss) | ||
196 | |||
197 | .org kvm_ia64_ivt+0x1800 | ||
198 | ///////////////////////////////////////////////////////////////////// | ||
199 | // 0x1800 Entry 6 (size 64 bundles) Instruction Key Miss (24) | ||
200 | ENTRY(kvm_ikey_miss) | ||
201 | KVM_REFLECT(6) | ||
202 | END(kvm_ikey_miss) | ||
203 | |||
204 | .org kvm_ia64_ivt+0x1c00 | ||
205 | ///////////////////////////////////////////////////////////////////// | ||
206 | // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51) | ||
207 | ENTRY(kvm_dkey_miss) | ||
208 | KVM_REFLECT(7) | ||
209 | END(kvm_dkey_miss) | ||
210 | |||
211 | .org kvm_ia64_ivt+0x2000 | ||
212 | //////////////////////////////////////////////////////////////////// | ||
213 | // 0x2000 Entry 8 (size 64 bundles) Dirty-bit (54) | ||
214 | ENTRY(kvm_dirty_bit) | ||
215 | KVM_REFLECT(8) | ||
216 | END(kvm_dirty_bit) | ||
217 | |||
218 | .org kvm_ia64_ivt+0x2400 | ||
219 | //////////////////////////////////////////////////////////////////// | ||
220 | // 0x2400 Entry 9 (size 64 bundles) Instruction Access-bit (27) | ||
221 | ENTRY(kvm_iaccess_bit) | ||
222 | KVM_REFLECT(9) | ||
223 | END(kvm_iaccess_bit) | ||
224 | |||
225 | .org kvm_ia64_ivt+0x2800 | ||
226 | /////////////////////////////////////////////////////////////////// | ||
227 | // 0x2800 Entry 10 (size 64 bundles) Data Access-bit (15,55) | ||
228 | ENTRY(kvm_daccess_bit) | ||
229 | KVM_REFLECT(10) | ||
230 | END(kvm_daccess_bit) | ||
231 | |||
232 | .org kvm_ia64_ivt+0x2c00 | ||
233 | ///////////////////////////////////////////////////////////////// | ||
234 | // 0x2c00 Entry 11 (size 64 bundles) Break instruction (33) | ||
235 | ENTRY(kvm_break_fault) | ||
236 | mov r31=pr | ||
237 | mov r19=11 | ||
238 | mov r29=cr.ipsr | ||
239 | ;; | ||
240 | KVM_SAVE_MIN_WITH_COVER_R19 | ||
241 | ;; | ||
242 | alloc r14=ar.pfs,0,0,4,0 //(must be first in insn group!) | ||
243 | mov out0=cr.ifa | ||
244 | mov out2=cr.isr // FIXME: pity to make this slow access twice | ||
245 | mov out3=cr.iim // FIXME: pity to make this slow access twice | ||
246 | adds r3=8,r2 // set up second base pointer | ||
247 | ;; | ||
248 | ssm psr.ic | ||
249 | ;; | ||
250 | srlz.i // guarantee that interruption collection is on | ||
251 | ;; | ||
252 | (p15)ssm psr.i // restore psr.i | ||
253 | addl r14=@gprel(ia64_leave_hypervisor),gp | ||
254 | ;; | ||
255 | KVM_SAVE_REST | ||
256 | mov rp=r14 | ||
257 | ;; | ||
258 | adds out1=16,sp | ||
259 | br.call.sptk.many b6=kvm_ia64_handle_break | ||
260 | ;; | ||
261 | END(kvm_break_fault) | ||
262 | |||
263 | .org kvm_ia64_ivt+0x3000 | ||
264 | ///////////////////////////////////////////////////////////////// | ||
265 | // 0x3000 Entry 12 (size 64 bundles) External Interrupt (4) | ||
266 | ENTRY(kvm_interrupt) | ||
267 | mov r31=pr // prepare to save predicates | ||
268 | mov r19=12 | ||
269 | mov r29=cr.ipsr | ||
270 | ;; | ||
271 | tbit.z p6,p7=r29,IA64_PSR_VM_BIT | ||
272 | tbit.z p0,p15=r29,IA64_PSR_I_BIT | ||
273 | ;; | ||
274 | (p7) br.sptk kvm_dispatch_interrupt | ||
275 | ;; | ||
276 | mov r27=ar.rsc /* M */ | ||
277 | mov r20=r1 /* A */ | ||
278 | mov r25=ar.unat /* M */ | ||
279 | mov r26=ar.pfs /* I */ | ||
280 | mov r28=cr.iip /* M */ | ||
281 | cover /* B (or nothing) */ | ||
282 | ;; | ||
283 | mov r1=sp | ||
284 | ;; | ||
285 | invala /* M */ | ||
286 | mov r30=cr.ifs | ||
287 | ;; | ||
288 | addl r1=-VMM_PT_REGS_SIZE,r1 | ||
289 | ;; | ||
290 | adds r17=2*L1_CACHE_BYTES,r1 /* really: biggest cache-line size */ | ||
291 | adds r16=PT(CR_IPSR),r1 | ||
292 | ;; | ||
293 | lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES | ||
294 | st8 [r16]=r29 /* save cr.ipsr */ | ||
295 | ;; | ||
296 | lfetch.fault.excl.nt1 [r17] | ||
297 | mov r29=b0 | ||
298 | ;; | ||
299 | adds r16=PT(R8),r1 /* initialize first base pointer */ | ||
300 | adds r17=PT(R9),r1 /* initialize second base pointer */ | ||
301 | mov r18=r0 /* make sure r18 isn't NaT */ | ||
302 | ;; | ||
303 | .mem.offset 0,0; st8.spill [r16]=r8,16 | ||
304 | .mem.offset 8,0; st8.spill [r17]=r9,16 | ||
305 | ;; | ||
306 | .mem.offset 0,0; st8.spill [r16]=r10,24 | ||
307 | .mem.offset 8,0; st8.spill [r17]=r11,24 | ||
308 | ;; | ||
309 | st8 [r16]=r28,16 /* save cr.iip */ | ||
310 | st8 [r17]=r30,16 /* save cr.ifs */ | ||
311 | mov r8=ar.fpsr /* M */ | ||
312 | mov r9=ar.csd | ||
313 | mov r10=ar.ssd | ||
314 | movl r11=FPSR_DEFAULT /* L-unit */ | ||
315 | ;; | ||
316 | st8 [r16]=r25,16 /* save ar.unat */ | ||
317 | st8 [r17]=r26,16 /* save ar.pfs */ | ||
318 | shl r18=r18,16 /* compute ar.rsc to be used for "loadrs" */ | ||
319 | ;; | ||
320 | st8 [r16]=r27,16 /* save ar.rsc */ | ||
321 | adds r17=16,r17 /* skip over ar_rnat field */ | ||
322 | ;; | ||
323 | st8 [r17]=r31,16 /* save predicates */ | ||
324 | adds r16=16,r16 /* skip over ar_bspstore field */ | ||
325 | ;; | ||
326 | st8 [r16]=r29,16 /* save b0 */ | ||
327 | st8 [r17]=r18,16 /* save ar.rsc value for "loadrs" */ | ||
328 | ;; | ||
329 | .mem.offset 0,0; st8.spill [r16]=r20,16 /* save original r1 */ | ||
330 | .mem.offset 8,0; st8.spill [r17]=r12,16 | ||
331 | adds r12=-16,r1 | ||
332 | /* switch to kernel memory stack (with 16 bytes of scratch) */ | ||
333 | ;; | ||
334 | .mem.offset 0,0; st8.spill [r16]=r13,16 | ||
335 | .mem.offset 8,0; st8.spill [r17]=r8,16 /* save ar.fpsr */ | ||
336 | ;; | ||
337 | .mem.offset 0,0; st8.spill [r16]=r15,16 | ||
338 | .mem.offset 8,0; st8.spill [r17]=r14,16 | ||
339 | dep r14=-1,r0,60,4 | ||
340 | ;; | ||
341 | .mem.offset 0,0; st8.spill [r16]=r2,16 | ||
342 | .mem.offset 8,0; st8.spill [r17]=r3,16 | ||
343 | adds r2=VMM_PT_REGS_R16_OFFSET,r1 | ||
344 | adds r14 = VMM_VCPU_GP_OFFSET,r13 | ||
345 | ;; | ||
346 | mov r8=ar.ccv | ||
347 | ld8 r14 = [r14] | ||
348 | ;; | ||
349 | mov r1=r14 /* establish kernel global pointer */ | ||
350 | ;; \ | ||
351 | bsw.1 | ||
352 | ;; | ||
353 | alloc r14=ar.pfs,0,0,1,0 // must be first in an insn group | ||
354 | mov out0=r13 | ||
355 | ;; | ||
356 | ssm psr.ic | ||
357 | ;; | ||
358 | srlz.i | ||
359 | ;; | ||
360 | //(p15) ssm psr.i | ||
361 | adds r3=8,r2 // set up second base pointer for SAVE_REST | ||
362 | srlz.i // ensure everybody knows psr.ic is back on | ||
363 | ;; | ||
364 | .mem.offset 0,0; st8.spill [r2]=r16,16 | ||
365 | .mem.offset 8,0; st8.spill [r3]=r17,16 | ||
366 | ;; | ||
367 | .mem.offset 0,0; st8.spill [r2]=r18,16 | ||
368 | .mem.offset 8,0; st8.spill [r3]=r19,16 | ||
369 | ;; | ||
370 | .mem.offset 0,0; st8.spill [r2]=r20,16 | ||
371 | .mem.offset 8,0; st8.spill [r3]=r21,16 | ||
372 | mov r18=b6 | ||
373 | ;; | ||
374 | .mem.offset 0,0; st8.spill [r2]=r22,16 | ||
375 | .mem.offset 8,0; st8.spill [r3]=r23,16 | ||
376 | mov r19=b7 | ||
377 | ;; | ||
378 | .mem.offset 0,0; st8.spill [r2]=r24,16 | ||
379 | .mem.offset 8,0; st8.spill [r3]=r25,16 | ||
380 | ;; | ||
381 | .mem.offset 0,0; st8.spill [r2]=r26,16 | ||
382 | .mem.offset 8,0; st8.spill [r3]=r27,16 | ||
383 | ;; | ||
384 | .mem.offset 0,0; st8.spill [r2]=r28,16 | ||
385 | .mem.offset 8,0; st8.spill [r3]=r29,16 | ||
386 | ;; | ||
387 | .mem.offset 0,0; st8.spill [r2]=r30,16 | ||
388 | .mem.offset 8,0; st8.spill [r3]=r31,32 | ||
389 | ;; | ||
390 | mov ar.fpsr=r11 /* M-unit */ | ||
391 | st8 [r2]=r8,8 /* ar.ccv */ | ||
392 | adds r24=PT(B6)-PT(F7),r3 | ||
393 | ;; | ||
394 | stf.spill [r2]=f6,32 | ||
395 | stf.spill [r3]=f7,32 | ||
396 | ;; | ||
397 | stf.spill [r2]=f8,32 | ||
398 | stf.spill [r3]=f9,32 | ||
399 | ;; | ||
400 | stf.spill [r2]=f10 | ||
401 | stf.spill [r3]=f11 | ||
402 | adds r25=PT(B7)-PT(F11),r3 | ||
403 | ;; | ||
404 | st8 [r24]=r18,16 /* b6 */ | ||
405 | st8 [r25]=r19,16 /* b7 */ | ||
406 | ;; | ||
407 | st8 [r24]=r9 /* ar.csd */ | ||
408 | st8 [r25]=r10 /* ar.ssd */ | ||
409 | ;; | ||
410 | srlz.d // make sure we see the effect of cr.ivr | ||
411 | addl r14=@gprel(ia64_leave_nested),gp | ||
412 | ;; | ||
413 | mov rp=r14 | ||
414 | br.call.sptk.many b6=kvm_ia64_handle_irq | ||
415 | ;; | ||
416 | END(kvm_interrupt) | ||
417 | |||
418 | .global kvm_dispatch_vexirq | ||
419 | .org kvm_ia64_ivt+0x3400 | ||
420 | ////////////////////////////////////////////////////////////////////// | ||
421 | // 0x3400 Entry 13 (size 64 bundles) Reserved | ||
422 | ENTRY(kvm_virtual_exirq) | ||
423 | mov r31=pr | ||
424 | mov r19=13 | ||
425 | mov r30 =r0 | ||
426 | ;; | ||
427 | kvm_dispatch_vexirq: | ||
428 | cmp.eq p6,p0 = 1,r30 | ||
429 | ;; | ||
430 | (p6) add r29 = VMM_VCPU_SAVED_GP_OFFSET,r21 | ||
431 | ;; | ||
432 | (p6) ld8 r1 = [r29] | ||
433 | ;; | ||
434 | KVM_SAVE_MIN_WITH_COVER_R19 | ||
435 | alloc r14=ar.pfs,0,0,1,0 | ||
436 | mov out0=r13 | ||
437 | |||
438 | ssm psr.ic | ||
439 | ;; | ||
440 | srlz.i // guarantee that interruption collection is on | ||
441 | ;; | ||
442 | (p15) ssm psr.i // restore psr.i | ||
443 | adds r3=8,r2 // set up second base pointer | ||
444 | ;; | ||
445 | KVM_SAVE_REST | ||
446 | addl r14=@gprel(ia64_leave_hypervisor),gp | ||
447 | ;; | ||
448 | mov rp=r14 | ||
449 | br.call.sptk.many b6=kvm_vexirq | ||
450 | END(kvm_virtual_exirq) | ||
451 | |||
452 | .org kvm_ia64_ivt+0x3800 | ||
453 | ///////////////////////////////////////////////////////////////////// | ||
454 | // 0x3800 Entry 14 (size 64 bundles) Reserved | ||
455 | KVM_FAULT(14) | ||
456 | // this code segment is from 2.6.16.13 | ||
457 | |||
458 | .org kvm_ia64_ivt+0x3c00 | ||
459 | /////////////////////////////////////////////////////////////////////// | ||
460 | // 0x3c00 Entry 15 (size 64 bundles) Reserved | ||
461 | KVM_FAULT(15) | ||
462 | |||
463 | .org kvm_ia64_ivt+0x4000 | ||
464 | /////////////////////////////////////////////////////////////////////// | ||
465 | // 0x4000 Entry 16 (size 64 bundles) Reserved | ||
466 | KVM_FAULT(16) | ||
467 | |||
468 | .org kvm_ia64_ivt+0x4400 | ||
469 | ////////////////////////////////////////////////////////////////////// | ||
470 | // 0x4400 Entry 17 (size 64 bundles) Reserved | ||
471 | KVM_FAULT(17) | ||
472 | |||
473 | .org kvm_ia64_ivt+0x4800 | ||
474 | ////////////////////////////////////////////////////////////////////// | ||
475 | // 0x4800 Entry 18 (size 64 bundles) Reserved | ||
476 | KVM_FAULT(18) | ||
477 | |||
478 | .org kvm_ia64_ivt+0x4c00 | ||
479 | ////////////////////////////////////////////////////////////////////// | ||
480 | // 0x4c00 Entry 19 (size 64 bundles) Reserved | ||
481 | KVM_FAULT(19) | ||
482 | |||
483 | .org kvm_ia64_ivt+0x5000 | ||
484 | ////////////////////////////////////////////////////////////////////// | ||
485 | // 0x5000 Entry 20 (size 16 bundles) Page Not Present | ||
486 | ENTRY(kvm_page_not_present) | ||
487 | KVM_REFLECT(20) | ||
488 | END(kvm_page_not_present) | ||
489 | |||
490 | .org kvm_ia64_ivt+0x5100 | ||
491 | /////////////////////////////////////////////////////////////////////// | ||
492 | // 0x5100 Entry 21 (size 16 bundles) Key Permission vector | ||
493 | ENTRY(kvm_key_permission) | ||
494 | KVM_REFLECT(21) | ||
495 | END(kvm_key_permission) | ||
496 | |||
497 | .org kvm_ia64_ivt+0x5200 | ||
498 | ////////////////////////////////////////////////////////////////////// | ||
499 | // 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26) | ||
500 | ENTRY(kvm_iaccess_rights) | ||
501 | KVM_REFLECT(22) | ||
502 | END(kvm_iaccess_rights) | ||
503 | |||
504 | .org kvm_ia64_ivt+0x5300 | ||
505 | ////////////////////////////////////////////////////////////////////// | ||
506 | // 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53) | ||
507 | ENTRY(kvm_daccess_rights) | ||
508 | KVM_REFLECT(23) | ||
509 | END(kvm_daccess_rights) | ||
510 | |||
511 | .org kvm_ia64_ivt+0x5400 | ||
512 | ///////////////////////////////////////////////////////////////////// | ||
513 | // 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39) | ||
514 | ENTRY(kvm_general_exception) | ||
515 | KVM_REFLECT(24) | ||
516 | KVM_FAULT(24) | ||
517 | END(kvm_general_exception) | ||
518 | |||
519 | .org kvm_ia64_ivt+0x5500 | ||
520 | ////////////////////////////////////////////////////////////////////// | ||
521 | // 0x5500 Entry 25 (size 16 bundles) Disabled FP-Register (35) | ||
522 | ENTRY(kvm_disabled_fp_reg) | ||
523 | KVM_REFLECT(25) | ||
524 | END(kvm_disabled_fp_reg) | ||
525 | |||
526 | .org kvm_ia64_ivt+0x5600 | ||
527 | //////////////////////////////////////////////////////////////////// | ||
528 | // 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50) | ||
529 | ENTRY(kvm_nat_consumption) | ||
530 | KVM_REFLECT(26) | ||
531 | END(kvm_nat_consumption) | ||
532 | |||
533 | .org kvm_ia64_ivt+0x5700 | ||
534 | ///////////////////////////////////////////////////////////////////// | ||
535 | // 0x5700 Entry 27 (size 16 bundles) Speculation (40) | ||
536 | ENTRY(kvm_speculation_vector) | ||
537 | KVM_REFLECT(27) | ||
538 | END(kvm_speculation_vector) | ||
539 | |||
540 | .org kvm_ia64_ivt+0x5800 | ||
541 | ///////////////////////////////////////////////////////////////////// | ||
542 | // 0x5800 Entry 28 (size 16 bundles) Reserved | ||
543 | KVM_FAULT(28) | ||
544 | |||
545 | .org kvm_ia64_ivt+0x5900 | ||
546 | /////////////////////////////////////////////////////////////////// | ||
547 | // 0x5900 Entry 29 (size 16 bundles) Debug (16,28,56) | ||
548 | ENTRY(kvm_debug_vector) | ||
549 | KVM_FAULT(29) | ||
550 | END(kvm_debug_vector) | ||
551 | |||
552 | .org kvm_ia64_ivt+0x5a00 | ||
553 | /////////////////////////////////////////////////////////////// | ||
554 | // 0x5a00 Entry 30 (size 16 bundles) Unaligned Reference (57) | ||
555 | ENTRY(kvm_unaligned_access) | ||
556 | KVM_REFLECT(30) | ||
557 | END(kvm_unaligned_access) | ||
558 | |||
559 | .org kvm_ia64_ivt+0x5b00 | ||
560 | ////////////////////////////////////////////////////////////////////// | ||
561 | // 0x5b00 Entry 31 (size 16 bundles) Unsupported Data Reference (57) | ||
562 | ENTRY(kvm_unsupported_data_reference) | ||
563 | KVM_REFLECT(31) | ||
564 | END(kvm_unsupported_data_reference) | ||
565 | |||
566 | .org kvm_ia64_ivt+0x5c00 | ||
567 | //////////////////////////////////////////////////////////////////// | ||
568 | // 0x5c00 Entry 32 (size 16 bundles) Floating Point FAULT (65) | ||
569 | ENTRY(kvm_floating_point_fault) | ||
570 | KVM_REFLECT(32) | ||
571 | END(kvm_floating_point_fault) | ||
572 | |||
573 | .org kvm_ia64_ivt+0x5d00 | ||
574 | ///////////////////////////////////////////////////////////////////// | ||
575 | // 0x5d00 Entry 33 (size 16 bundles) Floating Point Trap (66) | ||
576 | ENTRY(kvm_floating_point_trap) | ||
577 | KVM_REFLECT(33) | ||
578 | END(kvm_floating_point_trap) | ||
579 | |||
580 | .org kvm_ia64_ivt+0x5e00 | ||
581 | ////////////////////////////////////////////////////////////////////// | ||
582 | // 0x5e00 Entry 34 (size 16 bundles) Lower Privilege Transfer Trap (66) | ||
583 | ENTRY(kvm_lower_privilege_trap) | ||
584 | KVM_REFLECT(34) | ||
585 | END(kvm_lower_privilege_trap) | ||
586 | |||
587 | .org kvm_ia64_ivt+0x5f00 | ||
588 | ////////////////////////////////////////////////////////////////////// | ||
589 | // 0x5f00 Entry 35 (size 16 bundles) Taken Branch Trap (68) | ||
590 | ENTRY(kvm_taken_branch_trap) | ||
591 | KVM_REFLECT(35) | ||
592 | END(kvm_taken_branch_trap) | ||
593 | |||
594 | .org kvm_ia64_ivt+0x6000 | ||
595 | //////////////////////////////////////////////////////////////////// | ||
596 | // 0x6000 Entry 36 (size 16 bundles) Single Step Trap (69) | ||
597 | ENTRY(kvm_single_step_trap) | ||
598 | KVM_REFLECT(36) | ||
599 | END(kvm_single_step_trap) | ||
600 | .global kvm_virtualization_fault_back | ||
601 | .org kvm_ia64_ivt+0x6100 | ||
602 | ///////////////////////////////////////////////////////////////////// | ||
603 | // 0x6100 Entry 37 (size 16 bundles) Virtualization Fault | ||
604 | ENTRY(kvm_virtualization_fault) | ||
605 | mov r31=pr | ||
606 | adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21 | ||
607 | ;; | ||
608 | st8 [r16] = r1 | ||
609 | adds r17 = VMM_VCPU_GP_OFFSET, r21 | ||
610 | ;; | ||
611 | ld8 r1 = [r17] | ||
612 | cmp.eq p6,p0=EVENT_MOV_FROM_AR,r24 | ||
613 | cmp.eq p7,p0=EVENT_MOV_FROM_RR,r24 | ||
614 | cmp.eq p8,p0=EVENT_MOV_TO_RR,r24 | ||
615 | cmp.eq p9,p0=EVENT_RSM,r24 | ||
616 | cmp.eq p10,p0=EVENT_SSM,r24 | ||
617 | cmp.eq p11,p0=EVENT_MOV_TO_PSR,r24 | ||
618 | cmp.eq p12,p0=EVENT_THASH,r24 | ||
619 | (p6) br.dptk.many kvm_asm_mov_from_ar | ||
620 | (p7) br.dptk.many kvm_asm_mov_from_rr | ||
621 | (p8) br.dptk.many kvm_asm_mov_to_rr | ||
622 | (p9) br.dptk.many kvm_asm_rsm | ||
623 | (p10) br.dptk.many kvm_asm_ssm | ||
624 | (p11) br.dptk.many kvm_asm_mov_to_psr | ||
625 | (p12) br.dptk.many kvm_asm_thash | ||
626 | ;; | ||
627 | kvm_virtualization_fault_back: | ||
628 | adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21 | ||
629 | ;; | ||
630 | ld8 r1 = [r16] | ||
631 | ;; | ||
632 | mov r19=37 | ||
633 | adds r16 = VMM_VCPU_CAUSE_OFFSET,r21 | ||
634 | adds r17 = VMM_VCPU_OPCODE_OFFSET,r21 | ||
635 | ;; | ||
636 | st8 [r16] = r24 | ||
637 | st8 [r17] = r25 | ||
638 | ;; | ||
639 | cmp.ne p6,p0=EVENT_RFI, r24 | ||
640 | (p6) br.sptk kvm_dispatch_virtualization_fault | ||
641 | ;; | ||
642 | adds r18=VMM_VPD_BASE_OFFSET,r21 | ||
643 | ;; | ||
644 | ld8 r18=[r18] | ||
645 | ;; | ||
646 | adds r18=VMM_VPD_VIFS_OFFSET,r18 | ||
647 | ;; | ||
648 | ld8 r18=[r18] | ||
649 | ;; | ||
650 | tbit.z p6,p0=r18,63 | ||
651 | (p6) br.sptk kvm_dispatch_virtualization_fault | ||
652 | ;; | ||
653 | //if vifs.v=1 desert current register frame | ||
654 | alloc r18=ar.pfs,0,0,0,0 | ||
655 | br.sptk kvm_dispatch_virtualization_fault | ||
656 | END(kvm_virtualization_fault) | ||
657 | |||
658 | .org kvm_ia64_ivt+0x6200 | ||
659 | ////////////////////////////////////////////////////////////// | ||
660 | // 0x6200 Entry 38 (size 16 bundles) Reserved | ||
661 | KVM_FAULT(38) | ||
662 | |||
663 | .org kvm_ia64_ivt+0x6300 | ||
664 | ///////////////////////////////////////////////////////////////// | ||
665 | // 0x6300 Entry 39 (size 16 bundles) Reserved | ||
666 | KVM_FAULT(39) | ||
667 | |||
668 | .org kvm_ia64_ivt+0x6400 | ||
669 | ///////////////////////////////////////////////////////////////// | ||
670 | // 0x6400 Entry 40 (size 16 bundles) Reserved | ||
671 | KVM_FAULT(40) | ||
672 | |||
673 | .org kvm_ia64_ivt+0x6500 | ||
674 | ////////////////////////////////////////////////////////////////// | ||
675 | // 0x6500 Entry 41 (size 16 bundles) Reserved | ||
676 | KVM_FAULT(41) | ||
677 | |||
678 | .org kvm_ia64_ivt+0x6600 | ||
679 | ////////////////////////////////////////////////////////////////// | ||
680 | // 0x6600 Entry 42 (size 16 bundles) Reserved | ||
681 | KVM_FAULT(42) | ||
682 | |||
683 | .org kvm_ia64_ivt+0x6700 | ||
684 | ////////////////////////////////////////////////////////////////// | ||
685 | // 0x6700 Entry 43 (size 16 bundles) Reserved | ||
686 | KVM_FAULT(43) | ||
687 | |||
688 | .org kvm_ia64_ivt+0x6800 | ||
689 | ////////////////////////////////////////////////////////////////// | ||
690 | // 0x6800 Entry 44 (size 16 bundles) Reserved | ||
691 | KVM_FAULT(44) | ||
692 | |||
693 | .org kvm_ia64_ivt+0x6900 | ||
694 | /////////////////////////////////////////////////////////////////// | ||
695 | // 0x6900 Entry 45 (size 16 bundles) IA-32 Exeception | ||
696 | //(17,18,29,41,42,43,44,58,60,61,62,72,73,75,76,77) | ||
697 | ENTRY(kvm_ia32_exception) | ||
698 | KVM_FAULT(45) | ||
699 | END(kvm_ia32_exception) | ||
700 | |||
701 | .org kvm_ia64_ivt+0x6a00 | ||
702 | //////////////////////////////////////////////////////////////////// | ||
703 | // 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept (30,31,59,70,71) | ||
704 | ENTRY(kvm_ia32_intercept) | ||
705 | KVM_FAULT(47) | ||
706 | END(kvm_ia32_intercept) | ||
707 | |||
708 | .org kvm_ia64_ivt+0x6c00 | ||
709 | ///////////////////////////////////////////////////////////////////// | ||
710 | // 0x6c00 Entry 48 (size 16 bundles) Reserved | ||
711 | KVM_FAULT(48) | ||
712 | |||
713 | .org kvm_ia64_ivt+0x6d00 | ||
714 | ////////////////////////////////////////////////////////////////////// | ||
715 | // 0x6d00 Entry 49 (size 16 bundles) Reserved | ||
716 | KVM_FAULT(49) | ||
717 | |||
718 | .org kvm_ia64_ivt+0x6e00 | ||
719 | ////////////////////////////////////////////////////////////////////// | ||
720 | // 0x6e00 Entry 50 (size 16 bundles) Reserved | ||
721 | KVM_FAULT(50) | ||
722 | |||
723 | .org kvm_ia64_ivt+0x6f00 | ||
724 | ///////////////////////////////////////////////////////////////////// | ||
725 | // 0x6f00 Entry 51 (size 16 bundles) Reserved | ||
726 | KVM_FAULT(52) | ||
727 | |||
728 | .org kvm_ia64_ivt+0x7100 | ||
729 | //////////////////////////////////////////////////////////////////// | ||
730 | // 0x7100 Entry 53 (size 16 bundles) Reserved | ||
731 | KVM_FAULT(53) | ||
732 | |||
733 | .org kvm_ia64_ivt+0x7200 | ||
734 | ///////////////////////////////////////////////////////////////////// | ||
735 | // 0x7200 Entry 54 (size 16 bundles) Reserved | ||
736 | KVM_FAULT(54) | ||
737 | |||
738 | .org kvm_ia64_ivt+0x7300 | ||
739 | //////////////////////////////////////////////////////////////////// | ||
740 | // 0x7300 Entry 55 (size 16 bundles) Reserved | ||
741 | KVM_FAULT(55) | ||
742 | |||
743 | .org kvm_ia64_ivt+0x7400 | ||
744 | //////////////////////////////////////////////////////////////////// | ||
745 | // 0x7400 Entry 56 (size 16 bundles) Reserved | ||
746 | KVM_FAULT(56) | ||
747 | |||
748 | .org kvm_ia64_ivt+0x7500 | ||
749 | ///////////////////////////////////////////////////////////////////// | ||
750 | // 0x7500 Entry 57 (size 16 bundles) Reserved | ||
751 | KVM_FAULT(57) | ||
752 | |||
753 | .org kvm_ia64_ivt+0x7600 | ||
754 | ///////////////////////////////////////////////////////////////////// | ||
755 | // 0x7600 Entry 58 (size 16 bundles) Reserved | ||
756 | KVM_FAULT(58) | ||
757 | |||
758 | .org kvm_ia64_ivt+0x7700 | ||
759 | //////////////////////////////////////////////////////////////////// | ||
760 | // 0x7700 Entry 59 (size 16 bundles) Reserved | ||
761 | KVM_FAULT(59) | ||
762 | |||
763 | .org kvm_ia64_ivt+0x7800 | ||
764 | //////////////////////////////////////////////////////////////////// | ||
765 | // 0x7800 Entry 60 (size 16 bundles) Reserved | ||
766 | KVM_FAULT(60) | ||
767 | |||
768 | .org kvm_ia64_ivt+0x7900 | ||
769 | ///////////////////////////////////////////////////////////////////// | ||
770 | // 0x7900 Entry 61 (size 16 bundles) Reserved | ||
771 | KVM_FAULT(61) | ||
772 | |||
773 | .org kvm_ia64_ivt+0x7a00 | ||
774 | ///////////////////////////////////////////////////////////////////// | ||
775 | // 0x7a00 Entry 62 (size 16 bundles) Reserved | ||
776 | KVM_FAULT(62) | ||
777 | |||
778 | .org kvm_ia64_ivt+0x7b00 | ||
779 | ///////////////////////////////////////////////////////////////////// | ||
780 | // 0x7b00 Entry 63 (size 16 bundles) Reserved | ||
781 | KVM_FAULT(63) | ||
782 | |||
783 | .org kvm_ia64_ivt+0x7c00 | ||
784 | //////////////////////////////////////////////////////////////////// | ||
785 | // 0x7c00 Entry 64 (size 16 bundles) Reserved | ||
786 | KVM_FAULT(64) | ||
787 | |||
788 | .org kvm_ia64_ivt+0x7d00 | ||
789 | ///////////////////////////////////////////////////////////////////// | ||
790 | // 0x7d00 Entry 65 (size 16 bundles) Reserved | ||
791 | KVM_FAULT(65) | ||
792 | |||
793 | .org kvm_ia64_ivt+0x7e00 | ||
794 | ///////////////////////////////////////////////////////////////////// | ||
795 | // 0x7e00 Entry 66 (size 16 bundles) Reserved | ||
796 | KVM_FAULT(66) | ||
797 | |||
798 | .org kvm_ia64_ivt+0x7f00 | ||
799 | //////////////////////////////////////////////////////////////////// | ||
800 | // 0x7f00 Entry 67 (size 16 bundles) Reserved | ||
801 | KVM_FAULT(67) | ||
802 | |||
803 | .org kvm_ia64_ivt+0x8000 | ||
804 | // There is no particular reason for this code to be here, other than that | ||
805 | // there happens to be space here that would go unused otherwise. If this | ||
806 | // fault ever gets "unreserved", simply moved the following code to a more | ||
807 | // suitable spot... | ||
808 | |||
809 | |||
810 | ENTRY(kvm_dtlb_miss_dispatch) | ||
811 | mov r19 = 2 | ||
812 | KVM_SAVE_MIN_WITH_COVER_R19 | ||
813 | alloc r14=ar.pfs,0,0,3,0 | ||
814 | mov out0=cr.ifa | ||
815 | mov out1=r15 | ||
816 | adds r3=8,r2 // set up second base pointer | ||
817 | ;; | ||
818 | ssm psr.ic | ||
819 | ;; | ||
820 | srlz.i // guarantee that interruption collection is on | ||
821 | ;; | ||
822 | (p15) ssm psr.i // restore psr.i | ||
823 | addl r14=@gprel(ia64_leave_hypervisor_prepare),gp | ||
824 | ;; | ||
825 | KVM_SAVE_REST | ||
826 | KVM_SAVE_EXTRA | ||
827 | mov rp=r14 | ||
828 | ;; | ||
829 | adds out2=16,r12 | ||
830 | br.call.sptk.many b6=kvm_page_fault | ||
831 | END(kvm_dtlb_miss_dispatch) | ||
832 | |||
833 | ENTRY(kvm_itlb_miss_dispatch) | ||
834 | |||
835 | KVM_SAVE_MIN_WITH_COVER_R19 | ||
836 | alloc r14=ar.pfs,0,0,3,0 | ||
837 | mov out0=cr.ifa | ||
838 | mov out1=r15 | ||
839 | adds r3=8,r2 // set up second base pointer | ||
840 | ;; | ||
841 | ssm psr.ic | ||
842 | ;; | ||
843 | srlz.i // guarantee that interruption collection is on | ||
844 | ;; | ||
845 | (p15) ssm psr.i // restore psr.i | ||
846 | addl r14=@gprel(ia64_leave_hypervisor),gp | ||
847 | ;; | ||
848 | KVM_SAVE_REST | ||
849 | mov rp=r14 | ||
850 | ;; | ||
851 | adds out2=16,r12 | ||
852 | br.call.sptk.many b6=kvm_page_fault | ||
853 | END(kvm_itlb_miss_dispatch) | ||
854 | |||
855 | ENTRY(kvm_dispatch_reflection) | ||
856 | /* | ||
857 | * Input: | ||
858 | * psr.ic: off | ||
859 | * r19: intr type (offset into ivt, see ia64_int.h) | ||
860 | * r31: contains saved predicates (pr) | ||
861 | */ | ||
862 | KVM_SAVE_MIN_WITH_COVER_R19 | ||
863 | alloc r14=ar.pfs,0,0,5,0 | ||
864 | mov out0=cr.ifa | ||
865 | mov out1=cr.isr | ||
866 | mov out2=cr.iim | ||
867 | mov out3=r15 | ||
868 | adds r3=8,r2 // set up second base pointer | ||
869 | ;; | ||
870 | ssm psr.ic | ||
871 | ;; | ||
872 | srlz.i // guarantee that interruption collection is on | ||
873 | ;; | ||
874 | (p15) ssm psr.i // restore psr.i | ||
875 | addl r14=@gprel(ia64_leave_hypervisor),gp | ||
876 | ;; | ||
877 | KVM_SAVE_REST | ||
878 | mov rp=r14 | ||
879 | ;; | ||
880 | adds out4=16,r12 | ||
881 | br.call.sptk.many b6=reflect_interruption | ||
882 | END(kvm_dispatch_reflection) | ||
883 | |||
884 | ENTRY(kvm_dispatch_virtualization_fault) | ||
885 | adds r16 = VMM_VCPU_CAUSE_OFFSET,r21 | ||
886 | adds r17 = VMM_VCPU_OPCODE_OFFSET,r21 | ||
887 | ;; | ||
888 | st8 [r16] = r24 | ||
889 | st8 [r17] = r25 | ||
890 | ;; | ||
891 | KVM_SAVE_MIN_WITH_COVER_R19 | ||
892 | ;; | ||
893 | alloc r14=ar.pfs,0,0,2,0 // (must be first in insn group!) | ||
894 | mov out0=r13 //vcpu | ||
895 | adds r3=8,r2 // set up second base pointer | ||
896 | ;; | ||
897 | ssm psr.ic | ||
898 | ;; | ||
899 | srlz.i // guarantee that interruption collection is on | ||
900 | ;; | ||
901 | (p15) ssm psr.i // restore psr.i | ||
902 | addl r14=@gprel(ia64_leave_hypervisor_prepare),gp | ||
903 | ;; | ||
904 | KVM_SAVE_REST | ||
905 | KVM_SAVE_EXTRA | ||
906 | mov rp=r14 | ||
907 | ;; | ||
908 | adds out1=16,sp //regs | ||
909 | br.call.sptk.many b6=kvm_emulate | ||
910 | END(kvm_dispatch_virtualization_fault) | ||
911 | |||
912 | |||
913 | ENTRY(kvm_dispatch_interrupt) | ||
914 | KVM_SAVE_MIN_WITH_COVER_R19 // uses r31; defines r2 and r3 | ||
915 | ;; | ||
916 | alloc r14=ar.pfs,0,0,1,0 // must be first in an insn group | ||
917 | adds r3=8,r2 // set up second base pointer for SAVE_REST | ||
918 | ;; | ||
919 | ssm psr.ic | ||
920 | ;; | ||
921 | srlz.i | ||
922 | ;; | ||
923 | (p15) ssm psr.i | ||
924 | addl r14=@gprel(ia64_leave_hypervisor),gp | ||
925 | ;; | ||
926 | KVM_SAVE_REST | ||
927 | mov rp=r14 | ||
928 | ;; | ||
929 | mov out0=r13 // pass pointer to pt_regs as second arg | ||
930 | br.call.sptk.many b6=kvm_ia64_handle_irq | ||
931 | END(kvm_dispatch_interrupt) | ||
932 | |||
933 | GLOBAL_ENTRY(ia64_leave_nested) | ||
934 | rsm psr.i | ||
935 | ;; | ||
936 | adds r21=PT(PR)+16,r12 | ||
937 | ;; | ||
938 | lfetch [r21],PT(CR_IPSR)-PT(PR) | ||
939 | adds r2=PT(B6)+16,r12 | ||
940 | adds r3=PT(R16)+16,r12 | ||
941 | ;; | ||
942 | lfetch [r21] | ||
943 | ld8 r28=[r2],8 // load b6 | ||
944 | adds r29=PT(R24)+16,r12 | ||
945 | |||
946 | ld8.fill r16=[r3] | ||
947 | adds r3=PT(AR_CSD)-PT(R16),r3 | ||
948 | adds r30=PT(AR_CCV)+16,r12 | ||
949 | ;; | ||
950 | ld8.fill r24=[r29] | ||
951 | ld8 r15=[r30] // load ar.ccv | ||
952 | ;; | ||
953 | ld8 r29=[r2],16 // load b7 | ||
954 | ld8 r30=[r3],16 // load ar.csd | ||
955 | ;; | ||
956 | ld8 r31=[r2],16 // load ar.ssd | ||
957 | ld8.fill r8=[r3],16 | ||
958 | ;; | ||
959 | ld8.fill r9=[r2],16 | ||
960 | ld8.fill r10=[r3],PT(R17)-PT(R10) | ||
961 | ;; | ||
962 | ld8.fill r11=[r2],PT(R18)-PT(R11) | ||
963 | ld8.fill r17=[r3],16 | ||
964 | ;; | ||
965 | ld8.fill r18=[r2],16 | ||
966 | ld8.fill r19=[r3],16 | ||
967 | ;; | ||
968 | ld8.fill r20=[r2],16 | ||
969 | ld8.fill r21=[r3],16 | ||
970 | mov ar.csd=r30 | ||
971 | mov ar.ssd=r31 | ||
972 | ;; | ||
973 | rsm psr.i | psr.ic | ||
974 | // initiate turning off of interrupt and interruption collection | ||
975 | invala // invalidate ALAT | ||
976 | ;; | ||
977 | srlz.i | ||
978 | ;; | ||
979 | ld8.fill r22=[r2],24 | ||
980 | ld8.fill r23=[r3],24 | ||
981 | mov b6=r28 | ||
982 | ;; | ||
983 | ld8.fill r25=[r2],16 | ||
984 | ld8.fill r26=[r3],16 | ||
985 | mov b7=r29 | ||
986 | ;; | ||
987 | ld8.fill r27=[r2],16 | ||
988 | ld8.fill r28=[r3],16 | ||
989 | ;; | ||
990 | ld8.fill r29=[r2],16 | ||
991 | ld8.fill r30=[r3],24 | ||
992 | ;; | ||
993 | ld8.fill r31=[r2],PT(F9)-PT(R31) | ||
994 | adds r3=PT(F10)-PT(F6),r3 | ||
995 | ;; | ||
996 | ldf.fill f9=[r2],PT(F6)-PT(F9) | ||
997 | ldf.fill f10=[r3],PT(F8)-PT(F10) | ||
998 | ;; | ||
999 | ldf.fill f6=[r2],PT(F7)-PT(F6) | ||
1000 | ;; | ||
1001 | ldf.fill f7=[r2],PT(F11)-PT(F7) | ||
1002 | ldf.fill f8=[r3],32 | ||
1003 | ;; | ||
1004 | srlz.i // ensure interruption collection is off | ||
1005 | mov ar.ccv=r15 | ||
1006 | ;; | ||
1007 | bsw.0 // switch back to bank 0 (no stop bit required beforehand...) | ||
1008 | ;; | ||
1009 | ldf.fill f11=[r2] | ||
1010 | // mov r18=r13 | ||
1011 | // mov r21=r13 | ||
1012 | adds r16=PT(CR_IPSR)+16,r12 | ||
1013 | adds r17=PT(CR_IIP)+16,r12 | ||
1014 | ;; | ||
1015 | ld8 r29=[r16],16 // load cr.ipsr | ||
1016 | ld8 r28=[r17],16 // load cr.iip | ||
1017 | ;; | ||
1018 | ld8 r30=[r16],16 // load cr.ifs | ||
1019 | ld8 r25=[r17],16 // load ar.unat | ||
1020 | ;; | ||
1021 | ld8 r26=[r16],16 // load ar.pfs | ||
1022 | ld8 r27=[r17],16 // load ar.rsc | ||
1023 | cmp.eq p9,p0=r0,r0 | ||
1024 | // set p9 to indicate that we should restore cr.ifs | ||
1025 | ;; | ||
1026 | ld8 r24=[r16],16 // load ar.rnat (may be garbage) | ||
1027 | ld8 r23=[r17],16// load ar.bspstore (may be garbage) | ||
1028 | ;; | ||
1029 | ld8 r31=[r16],16 // load predicates | ||
1030 | ld8 r22=[r17],16 // load b0 | ||
1031 | ;; | ||
1032 | ld8 r19=[r16],16 // load ar.rsc value for "loadrs" | ||
1033 | ld8.fill r1=[r17],16 // load r1 | ||
1034 | ;; | ||
1035 | ld8.fill r12=[r16],16 | ||
1036 | ld8.fill r13=[r17],16 | ||
1037 | ;; | ||
1038 | ld8 r20=[r16],16 // ar.fpsr | ||
1039 | ld8.fill r15=[r17],16 | ||
1040 | ;; | ||
1041 | ld8.fill r14=[r16],16 | ||
1042 | ld8.fill r2=[r17] | ||
1043 | ;; | ||
1044 | ld8.fill r3=[r16] | ||
1045 | ;; | ||
1046 | mov r16=ar.bsp // get existing backing store pointer | ||
1047 | ;; | ||
1048 | mov b0=r22 | ||
1049 | mov ar.pfs=r26 | ||
1050 | mov cr.ifs=r30 | ||
1051 | mov cr.ipsr=r29 | ||
1052 | mov ar.fpsr=r20 | ||
1053 | mov cr.iip=r28 | ||
1054 | ;; | ||
1055 | mov ar.rsc=r27 | ||
1056 | mov ar.unat=r25 | ||
1057 | mov pr=r31,-1 | ||
1058 | rfi | ||
1059 | END(ia64_leave_nested) | ||
1060 | |||
1061 | GLOBAL_ENTRY(ia64_leave_hypervisor_prepare) | ||
1062 | /* | ||
1063 | * work.need_resched etc. mustn't get changed | ||
1064 | *by this CPU before it returns to | ||
1065 | * user- or fsys-mode, hence we disable interrupts early on: | ||
1066 | */ | ||
1067 | adds r2 = PT(R4)+16,r12 | ||
1068 | adds r3 = PT(R5)+16,r12 | ||
1069 | adds r8 = PT(EML_UNAT)+16,r12 | ||
1070 | ;; | ||
1071 | ld8 r8 = [r8] | ||
1072 | ;; | ||
1073 | mov ar.unat=r8 | ||
1074 | ;; | ||
1075 | ld8.fill r4=[r2],16 //load r4 | ||
1076 | ld8.fill r5=[r3],16 //load r5 | ||
1077 | ;; | ||
1078 | ld8.fill r6=[r2] //load r6 | ||
1079 | ld8.fill r7=[r3] //load r7 | ||
1080 | ;; | ||
1081 | END(ia64_leave_hypervisor_prepare) | ||
1082 | //fall through | ||
1083 | GLOBAL_ENTRY(ia64_leave_hypervisor) | ||
1084 | rsm psr.i | ||
1085 | ;; | ||
1086 | br.call.sptk.many b0=leave_hypervisor_tail | ||
1087 | ;; | ||
1088 | adds r20=PT(PR)+16,r12 | ||
1089 | adds r8=PT(EML_UNAT)+16,r12 | ||
1090 | ;; | ||
1091 | ld8 r8=[r8] | ||
1092 | ;; | ||
1093 | mov ar.unat=r8 | ||
1094 | ;; | ||
1095 | lfetch [r20],PT(CR_IPSR)-PT(PR) | ||
1096 | adds r2 = PT(B6)+16,r12 | ||
1097 | adds r3 = PT(B7)+16,r12 | ||
1098 | ;; | ||
1099 | lfetch [r20] | ||
1100 | ;; | ||
1101 | ld8 r24=[r2],16 /* B6 */ | ||
1102 | ld8 r25=[r3],16 /* B7 */ | ||
1103 | ;; | ||
1104 | ld8 r26=[r2],16 /* ar_csd */ | ||
1105 | ld8 r27=[r3],16 /* ar_ssd */ | ||
1106 | mov b6 = r24 | ||
1107 | ;; | ||
1108 | ld8.fill r8=[r2],16 | ||
1109 | ld8.fill r9=[r3],16 | ||
1110 | mov b7 = r25 | ||
1111 | ;; | ||
1112 | mov ar.csd = r26 | ||
1113 | mov ar.ssd = r27 | ||
1114 | ;; | ||
1115 | ld8.fill r10=[r2],PT(R15)-PT(R10) | ||
1116 | ld8.fill r11=[r3],PT(R14)-PT(R11) | ||
1117 | ;; | ||
1118 | ld8.fill r15=[r2],PT(R16)-PT(R15) | ||
1119 | ld8.fill r14=[r3],PT(R17)-PT(R14) | ||
1120 | ;; | ||
1121 | ld8.fill r16=[r2],16 | ||
1122 | ld8.fill r17=[r3],16 | ||
1123 | ;; | ||
1124 | ld8.fill r18=[r2],16 | ||
1125 | ld8.fill r19=[r3],16 | ||
1126 | ;; | ||
1127 | ld8.fill r20=[r2],16 | ||
1128 | ld8.fill r21=[r3],16 | ||
1129 | ;; | ||
1130 | ld8.fill r22=[r2],16 | ||
1131 | ld8.fill r23=[r3],16 | ||
1132 | ;; | ||
1133 | ld8.fill r24=[r2],16 | ||
1134 | ld8.fill r25=[r3],16 | ||
1135 | ;; | ||
1136 | ld8.fill r26=[r2],16 | ||
1137 | ld8.fill r27=[r3],16 | ||
1138 | ;; | ||
1139 | ld8.fill r28=[r2],16 | ||
1140 | ld8.fill r29=[r3],16 | ||
1141 | ;; | ||
1142 | ld8.fill r30=[r2],PT(F6)-PT(R30) | ||
1143 | ld8.fill r31=[r3],PT(F7)-PT(R31) | ||
1144 | ;; | ||
1145 | rsm psr.i | psr.ic | ||
1146 | // initiate turning off of interrupt and interruption collection | ||
1147 | invala // invalidate ALAT | ||
1148 | ;; | ||
1149 | srlz.i // ensure interruption collection is off | ||
1150 | ;; | ||
1151 | bsw.0 | ||
1152 | ;; | ||
1153 | adds r16 = PT(CR_IPSR)+16,r12 | ||
1154 | adds r17 = PT(CR_IIP)+16,r12 | ||
1155 | mov r21=r13 // get current | ||
1156 | ;; | ||
1157 | ld8 r31=[r16],16 // load cr.ipsr | ||
1158 | ld8 r30=[r17],16 // load cr.iip | ||
1159 | ;; | ||
1160 | ld8 r29=[r16],16 // load cr.ifs | ||
1161 | ld8 r28=[r17],16 // load ar.unat | ||
1162 | ;; | ||
1163 | ld8 r27=[r16],16 // load ar.pfs | ||
1164 | ld8 r26=[r17],16 // load ar.rsc | ||
1165 | ;; | ||
1166 | ld8 r25=[r16],16 // load ar.rnat | ||
1167 | ld8 r24=[r17],16 // load ar.bspstore | ||
1168 | ;; | ||
1169 | ld8 r23=[r16],16 // load predicates | ||
1170 | ld8 r22=[r17],16 // load b0 | ||
1171 | ;; | ||
1172 | ld8 r20=[r16],16 // load ar.rsc value for "loadrs" | ||
1173 | ld8.fill r1=[r17],16 //load r1 | ||
1174 | ;; | ||
1175 | ld8.fill r12=[r16],16 //load r12 | ||
1176 | ld8.fill r13=[r17],PT(R2)-PT(R13) //load r13 | ||
1177 | ;; | ||
1178 | ld8 r19=[r16],PT(R3)-PT(AR_FPSR) //load ar_fpsr | ||
1179 | ld8.fill r2=[r17],PT(AR_CCV)-PT(R2) //load r2 | ||
1180 | ;; | ||
1181 | ld8.fill r3=[r16] //load r3 | ||
1182 | ld8 r18=[r17] //load ar_ccv | ||
1183 | ;; | ||
1184 | mov ar.fpsr=r19 | ||
1185 | mov ar.ccv=r18 | ||
1186 | shr.u r18=r20,16 | ||
1187 | ;; | ||
1188 | kvm_rbs_switch: | ||
1189 | mov r19=96 | ||
1190 | |||
1191 | kvm_dont_preserve_current_frame: | ||
1192 | /* | ||
1193 | * To prevent leaking bits between the hypervisor and guest domain, | ||
1194 | * we must clear the stacked registers in the "invalid" partition here. | ||
1195 | * 5 registers/cycle on McKinley). | ||
1196 | */ | ||
1197 | # define pRecurse p6 | ||
1198 | # define pReturn p7 | ||
1199 | # define Nregs 14 | ||
1200 | |||
1201 | alloc loc0=ar.pfs,2,Nregs-2,2,0 | ||
1202 | shr.u loc1=r18,9 // RNaTslots <= floor(dirtySize / (64*8)) | ||
1203 | sub r19=r19,r18 // r19 = (physStackedSize + 8) - dirtySize | ||
1204 | ;; | ||
1205 | mov ar.rsc=r20 // load ar.rsc to be used for "loadrs" | ||
1206 | shladd in0=loc1,3,r19 | ||
1207 | mov in1=0 | ||
1208 | ;; | ||
1209 | TEXT_ALIGN(32) | ||
1210 | kvm_rse_clear_invalid: | ||
1211 | alloc loc0=ar.pfs,2,Nregs-2,2,0 | ||
1212 | cmp.lt pRecurse,p0=Nregs*8,in0 | ||
1213 | // if more than Nregs regs left to clear, (re)curse | ||
1214 | add out0=-Nregs*8,in0 | ||
1215 | add out1=1,in1 // increment recursion count | ||
1216 | mov loc1=0 | ||
1217 | mov loc2=0 | ||
1218 | ;; | ||
1219 | mov loc3=0 | ||
1220 | mov loc4=0 | ||
1221 | mov loc5=0 | ||
1222 | mov loc6=0 | ||
1223 | mov loc7=0 | ||
1224 | (pRecurse) br.call.dptk.few b0=kvm_rse_clear_invalid | ||
1225 | ;; | ||
1226 | mov loc8=0 | ||
1227 | mov loc9=0 | ||
1228 | cmp.ne pReturn,p0=r0,in1 | ||
1229 | // if recursion count != 0, we need to do a br.ret | ||
1230 | mov loc10=0 | ||
1231 | mov loc11=0 | ||
1232 | (pReturn) br.ret.dptk.many b0 | ||
1233 | |||
1234 | # undef pRecurse | ||
1235 | # undef pReturn | ||
1236 | |||
1237 | // loadrs has already been shifted | ||
1238 | alloc r16=ar.pfs,0,0,0,0 // drop current register frame | ||
1239 | ;; | ||
1240 | loadrs | ||
1241 | ;; | ||
1242 | mov ar.bspstore=r24 | ||
1243 | ;; | ||
1244 | mov ar.unat=r28 | ||
1245 | mov ar.rnat=r25 | ||
1246 | mov ar.rsc=r26 | ||
1247 | ;; | ||
1248 | mov cr.ipsr=r31 | ||
1249 | mov cr.iip=r30 | ||
1250 | mov cr.ifs=r29 | ||
1251 | mov ar.pfs=r27 | ||
1252 | adds r18=VMM_VPD_BASE_OFFSET,r21 | ||
1253 | ;; | ||
1254 | ld8 r18=[r18] //vpd | ||
1255 | adds r17=VMM_VCPU_ISR_OFFSET,r21 | ||
1256 | ;; | ||
1257 | ld8 r17=[r17] | ||
1258 | adds r19=VMM_VPD_VPSR_OFFSET,r18 | ||
1259 | ;; | ||
1260 | ld8 r19=[r19] //vpsr | ||
1261 | mov r25=r18 | ||
1262 | adds r16= VMM_VCPU_GP_OFFSET,r21 | ||
1263 | ;; | ||
1264 | ld8 r16= [r16] // Put gp in r24 | ||
1265 | movl r24=@gprel(ia64_vmm_entry) // calculate return address | ||
1266 | ;; | ||
1267 | add r24=r24,r16 | ||
1268 | ;; | ||
1269 | br.sptk.many kvm_vps_sync_write // call the service | ||
1270 | ;; | ||
1271 | END(ia64_leave_hypervisor) | ||
1272 | // fall through | ||
1273 | GLOBAL_ENTRY(ia64_vmm_entry) | ||
1274 | /* | ||
1275 | * must be at bank 0 | ||
1276 | * parameter: | ||
1277 | * r17:cr.isr | ||
1278 | * r18:vpd | ||
1279 | * r19:vpsr | ||
1280 | * r22:b0 | ||
1281 | * r23:predicate | ||
1282 | */ | ||
1283 | mov r24=r22 | ||
1284 | mov r25=r18 | ||
1285 | tbit.nz p1,p2 = r19,IA64_PSR_IC_BIT // p1=vpsr.ic | ||
1286 | (p1) br.cond.sptk.few kvm_vps_resume_normal | ||
1287 | (p2) br.cond.sptk.many kvm_vps_resume_handler | ||
1288 | ;; | ||
1289 | END(ia64_vmm_entry) | ||
1290 | |||
1291 | /* | ||
1292 | * extern u64 ia64_call_vsa(u64 proc, u64 arg1, u64 arg2, | ||
1293 | * u64 arg3, u64 arg4, u64 arg5, | ||
1294 | * u64 arg6, u64 arg7); | ||
1295 | * | ||
1296 | * XXX: The currently defined services use only 4 args at the max. The | ||
1297 | * rest are not consumed. | ||
1298 | */ | ||
1299 | GLOBAL_ENTRY(ia64_call_vsa) | ||
1300 | .regstk 4,4,0,0 | ||
1301 | |||
1302 | rpsave = loc0 | ||
1303 | pfssave = loc1 | ||
1304 | psrsave = loc2 | ||
1305 | entry = loc3 | ||
1306 | hostret = r24 | ||
1307 | |||
1308 | alloc pfssave=ar.pfs,4,4,0,0 | ||
1309 | mov rpsave=rp | ||
1310 | adds entry=VMM_VCPU_VSA_BASE_OFFSET, r13 | ||
1311 | ;; | ||
1312 | ld8 entry=[entry] | ||
1313 | 1: mov hostret=ip | ||
1314 | mov r25=in1 // copy arguments | ||
1315 | mov r26=in2 | ||
1316 | mov r27=in3 | ||
1317 | mov psrsave=psr | ||
1318 | ;; | ||
1319 | tbit.nz p6,p0=psrsave,14 // IA64_PSR_I | ||
1320 | tbit.nz p7,p0=psrsave,13 // IA64_PSR_IC | ||
1321 | ;; | ||
1322 | add hostret=2f-1b,hostret // calculate return address | ||
1323 | add entry=entry,in0 | ||
1324 | ;; | ||
1325 | rsm psr.i | psr.ic | ||
1326 | ;; | ||
1327 | srlz.i | ||
1328 | mov b6=entry | ||
1329 | br.cond.sptk b6 // call the service | ||
1330 | 2: | ||
1331 | // Architectural sequence for enabling interrupts if necessary | ||
1332 | (p7) ssm psr.ic | ||
1333 | ;; | ||
1334 | (p7) srlz.i | ||
1335 | ;; | ||
1336 | (p6) ssm psr.i | ||
1337 | ;; | ||
1338 | mov rp=rpsave | ||
1339 | mov ar.pfs=pfssave | ||
1340 | mov r8=r31 | ||
1341 | ;; | ||
1342 | srlz.d | ||
1343 | br.ret.sptk rp | ||
1344 | |||
1345 | END(ia64_call_vsa) | ||
1346 | |||
1347 | #define INIT_BSPSTORE ((4<<30)-(12<<20)-0x100) | ||
1348 | |||
1349 | GLOBAL_ENTRY(vmm_reset_entry) | ||
1350 | //set up ipsr, iip, vpd.vpsr, dcr | ||
1351 | // For IPSR: it/dt/rt=1, i/ic=1, si=1, vm/bn=1 | ||
1352 | // For DCR: all bits 0 | ||
1353 | bsw.0 | ||
1354 | ;; | ||
1355 | mov r21 =r13 | ||
1356 | adds r14=-VMM_PT_REGS_SIZE, r12 | ||
1357 | ;; | ||
1358 | movl r6=0x501008826000 // IPSR dt/rt/it:1;i/ic:1, si:1, vm/bn:1 | ||
1359 | movl r10=0x8000000000000000 | ||
1360 | adds r16=PT(CR_IIP), r14 | ||
1361 | adds r20=PT(R1), r14 | ||
1362 | ;; | ||
1363 | rsm psr.ic | psr.i | ||
1364 | ;; | ||
1365 | srlz.i | ||
1366 | ;; | ||
1367 | mov ar.rsc = 0 | ||
1368 | ;; | ||
1369 | flushrs | ||
1370 | ;; | ||
1371 | mov ar.bspstore = 0 | ||
1372 | // clear BSPSTORE | ||
1373 | ;; | ||
1374 | mov cr.ipsr=r6 | ||
1375 | mov cr.ifs=r10 | ||
1376 | ld8 r4 = [r16] // Set init iip for first run. | ||
1377 | ld8 r1 = [r20] | ||
1378 | ;; | ||
1379 | mov cr.iip=r4 | ||
1380 | adds r16=VMM_VPD_BASE_OFFSET,r13 | ||
1381 | ;; | ||
1382 | ld8 r18=[r16] | ||
1383 | ;; | ||
1384 | adds r19=VMM_VPD_VPSR_OFFSET,r18 | ||
1385 | ;; | ||
1386 | ld8 r19=[r19] | ||
1387 | mov r17=r0 | ||
1388 | mov r22=r0 | ||
1389 | mov r23=r0 | ||
1390 | br.cond.sptk ia64_vmm_entry | ||
1391 | br.ret.sptk b0 | ||
1392 | END(vmm_reset_entry) | ||
diff --git a/arch/ia64/kvm/vti.h b/arch/ia64/kvm/vti.h deleted file mode 100644 index b214b5b0432d..000000000000 --- a/arch/ia64/kvm/vti.h +++ /dev/null | |||
@@ -1,290 +0,0 @@ | |||
1 | /* | ||
2 | * vti.h: prototype for generial vt related interface | ||
3 | * Copyright (c) 2004, Intel Corporation. | ||
4 | * | ||
5 | * Xuefei Xu (Anthony Xu) (anthony.xu@intel.com) | ||
6 | * Fred Yang (fred.yang@intel.com) | ||
7 | * Kun Tian (Kevin Tian) (kevin.tian@intel.com) | ||
8 | * | ||
9 | * Copyright (c) 2007, Intel Corporation. | ||
10 | * Zhang xiantao <xiantao.zhang@intel.com> | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or modify it | ||
13 | * under the terms and conditions of the GNU General Public License, | ||
14 | * version 2, as published by the Free Software Foundation. | ||
15 | * | ||
16 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
17 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
18 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
19 | * more details. | ||
20 | * | ||
21 | * You should have received a copy of the GNU General Public License along with | ||
22 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple | ||
23 | * Place - Suite 330, Boston, MA 02111-1307 USA. | ||
24 | */ | ||
25 | #ifndef _KVM_VT_I_H | ||
26 | #define _KVM_VT_I_H | ||
27 | |||
28 | #ifndef __ASSEMBLY__ | ||
29 | #include <asm/page.h> | ||
30 | |||
31 | #include <linux/kvm_host.h> | ||
32 | |||
33 | /* define itr.i and itr.d in ia64_itr function */ | ||
34 | #define ITR 0x01 | ||
35 | #define DTR 0x02 | ||
36 | #define IaDTR 0x03 | ||
37 | |||
38 | #define IA64_TR_VMM 6 /*itr6, dtr6 : maps vmm code, vmbuffer*/ | ||
39 | #define IA64_TR_VM_DATA 7 /*dtr7 : maps current vm data*/ | ||
40 | |||
41 | #define RR6 (6UL<<61) | ||
42 | #define RR7 (7UL<<61) | ||
43 | |||
44 | |||
45 | /* config_options in pal_vp_init_env */ | ||
46 | #define VP_INITIALIZE 1UL | ||
47 | #define VP_FR_PMC 1UL<<1 | ||
48 | #define VP_OPCODE 1UL<<8 | ||
49 | #define VP_CAUSE 1UL<<9 | ||
50 | #define VP_FW_ACC 1UL<<63 | ||
51 | |||
52 | /* init vp env with initializing vm_buffer */ | ||
53 | #define VP_INIT_ENV_INITALIZE (VP_INITIALIZE | VP_FR_PMC |\ | ||
54 | VP_OPCODE | VP_CAUSE | VP_FW_ACC) | ||
55 | /* init vp env without initializing vm_buffer */ | ||
56 | #define VP_INIT_ENV VP_FR_PMC | VP_OPCODE | VP_CAUSE | VP_FW_ACC | ||
57 | |||
58 | #define PAL_VP_CREATE 265 | ||
59 | /* Stacked Virt. Initializes a new VPD for the operation of | ||
60 | * a new virtual processor in the virtual environment. | ||
61 | */ | ||
62 | #define PAL_VP_ENV_INFO 266 | ||
63 | /*Stacked Virt. Returns the parameters needed to enter a virtual environment.*/ | ||
64 | #define PAL_VP_EXIT_ENV 267 | ||
65 | /*Stacked Virt. Allows a logical processor to exit a virtual environment.*/ | ||
66 | #define PAL_VP_INIT_ENV 268 | ||
67 | /*Stacked Virt. Allows a logical processor to enter a virtual environment.*/ | ||
68 | #define PAL_VP_REGISTER 269 | ||
69 | /*Stacked Virt. Register a different host IVT for the virtual processor.*/ | ||
70 | #define PAL_VP_RESUME 270 | ||
71 | /* Renamed from PAL_VP_RESUME */ | ||
72 | #define PAL_VP_RESTORE 270 | ||
73 | /*Stacked Virt. Resumes virtual processor operation on the logical processor.*/ | ||
74 | #define PAL_VP_SUSPEND 271 | ||
75 | /* Renamed from PAL_VP_SUSPEND */ | ||
76 | #define PAL_VP_SAVE 271 | ||
77 | /* Stacked Virt. Suspends operation for the specified virtual processor on | ||
78 | * the logical processor. | ||
79 | */ | ||
80 | #define PAL_VP_TERMINATE 272 | ||
81 | /* Stacked Virt. Terminates operation for the specified virtual processor.*/ | ||
82 | |||
83 | union vac { | ||
84 | unsigned long value; | ||
85 | struct { | ||
86 | unsigned int a_int:1; | ||
87 | unsigned int a_from_int_cr:1; | ||
88 | unsigned int a_to_int_cr:1; | ||
89 | unsigned int a_from_psr:1; | ||
90 | unsigned int a_from_cpuid:1; | ||
91 | unsigned int a_cover:1; | ||
92 | unsigned int a_bsw:1; | ||
93 | long reserved:57; | ||
94 | }; | ||
95 | }; | ||
96 | |||
97 | union vdc { | ||
98 | unsigned long value; | ||
99 | struct { | ||
100 | unsigned int d_vmsw:1; | ||
101 | unsigned int d_extint:1; | ||
102 | unsigned int d_ibr_dbr:1; | ||
103 | unsigned int d_pmc:1; | ||
104 | unsigned int d_to_pmd:1; | ||
105 | unsigned int d_itm:1; | ||
106 | long reserved:58; | ||
107 | }; | ||
108 | }; | ||
109 | |||
110 | struct vpd { | ||
111 | union vac vac; | ||
112 | union vdc vdc; | ||
113 | unsigned long virt_env_vaddr; | ||
114 | unsigned long reserved1[29]; | ||
115 | unsigned long vhpi; | ||
116 | unsigned long reserved2[95]; | ||
117 | unsigned long vgr[16]; | ||
118 | unsigned long vbgr[16]; | ||
119 | unsigned long vnat; | ||
120 | unsigned long vbnat; | ||
121 | unsigned long vcpuid[5]; | ||
122 | unsigned long reserved3[11]; | ||
123 | unsigned long vpsr; | ||
124 | unsigned long vpr; | ||
125 | unsigned long reserved4[76]; | ||
126 | union { | ||
127 | unsigned long vcr[128]; | ||
128 | struct { | ||
129 | unsigned long dcr; | ||
130 | unsigned long itm; | ||
131 | unsigned long iva; | ||
132 | unsigned long rsv1[5]; | ||
133 | unsigned long pta; | ||
134 | unsigned long rsv2[7]; | ||
135 | unsigned long ipsr; | ||
136 | unsigned long isr; | ||
137 | unsigned long rsv3; | ||
138 | unsigned long iip; | ||
139 | unsigned long ifa; | ||
140 | unsigned long itir; | ||
141 | unsigned long iipa; | ||
142 | unsigned long ifs; | ||
143 | unsigned long iim; | ||
144 | unsigned long iha; | ||
145 | unsigned long rsv4[38]; | ||
146 | unsigned long lid; | ||
147 | unsigned long ivr; | ||
148 | unsigned long tpr; | ||
149 | unsigned long eoi; | ||
150 | unsigned long irr[4]; | ||
151 | unsigned long itv; | ||
152 | unsigned long pmv; | ||
153 | unsigned long cmcv; | ||
154 | unsigned long rsv5[5]; | ||
155 | unsigned long lrr0; | ||
156 | unsigned long lrr1; | ||
157 | unsigned long rsv6[46]; | ||
158 | }; | ||
159 | }; | ||
160 | unsigned long reserved5[128]; | ||
161 | unsigned long reserved6[3456]; | ||
162 | unsigned long vmm_avail[128]; | ||
163 | unsigned long reserved7[4096]; | ||
164 | }; | ||
165 | |||
166 | #define PAL_PROC_VM_BIT (1UL << 40) | ||
167 | #define PAL_PROC_VMSW_BIT (1UL << 54) | ||
168 | |||
169 | static inline s64 ia64_pal_vp_env_info(u64 *buffer_size, | ||
170 | u64 *vp_env_info) | ||
171 | { | ||
172 | struct ia64_pal_retval iprv; | ||
173 | PAL_CALL_STK(iprv, PAL_VP_ENV_INFO, 0, 0, 0); | ||
174 | *buffer_size = iprv.v0; | ||
175 | *vp_env_info = iprv.v1; | ||
176 | return iprv.status; | ||
177 | } | ||
178 | |||
179 | static inline s64 ia64_pal_vp_exit_env(u64 iva) | ||
180 | { | ||
181 | struct ia64_pal_retval iprv; | ||
182 | |||
183 | PAL_CALL_STK(iprv, PAL_VP_EXIT_ENV, (u64)iva, 0, 0); | ||
184 | return iprv.status; | ||
185 | } | ||
186 | |||
187 | static inline s64 ia64_pal_vp_init_env(u64 config_options, u64 pbase_addr, | ||
188 | u64 vbase_addr, u64 *vsa_base) | ||
189 | { | ||
190 | struct ia64_pal_retval iprv; | ||
191 | |||
192 | PAL_CALL_STK(iprv, PAL_VP_INIT_ENV, config_options, pbase_addr, | ||
193 | vbase_addr); | ||
194 | *vsa_base = iprv.v0; | ||
195 | |||
196 | return iprv.status; | ||
197 | } | ||
198 | |||
199 | static inline s64 ia64_pal_vp_restore(u64 *vpd, u64 pal_proc_vector) | ||
200 | { | ||
201 | struct ia64_pal_retval iprv; | ||
202 | |||
203 | PAL_CALL_STK(iprv, PAL_VP_RESTORE, (u64)vpd, pal_proc_vector, 0); | ||
204 | |||
205 | return iprv.status; | ||
206 | } | ||
207 | |||
208 | static inline s64 ia64_pal_vp_save(u64 *vpd, u64 pal_proc_vector) | ||
209 | { | ||
210 | struct ia64_pal_retval iprv; | ||
211 | |||
212 | PAL_CALL_STK(iprv, PAL_VP_SAVE, (u64)vpd, pal_proc_vector, 0); | ||
213 | |||
214 | return iprv.status; | ||
215 | } | ||
216 | |||
217 | #endif | ||
218 | |||
219 | /*VPD field offset*/ | ||
220 | #define VPD_VAC_START_OFFSET 0 | ||
221 | #define VPD_VDC_START_OFFSET 8 | ||
222 | #define VPD_VHPI_START_OFFSET 256 | ||
223 | #define VPD_VGR_START_OFFSET 1024 | ||
224 | #define VPD_VBGR_START_OFFSET 1152 | ||
225 | #define VPD_VNAT_START_OFFSET 1280 | ||
226 | #define VPD_VBNAT_START_OFFSET 1288 | ||
227 | #define VPD_VCPUID_START_OFFSET 1296 | ||
228 | #define VPD_VPSR_START_OFFSET 1424 | ||
229 | #define VPD_VPR_START_OFFSET 1432 | ||
230 | #define VPD_VRSE_CFLE_START_OFFSET 1440 | ||
231 | #define VPD_VCR_START_OFFSET 2048 | ||
232 | #define VPD_VTPR_START_OFFSET 2576 | ||
233 | #define VPD_VRR_START_OFFSET 3072 | ||
234 | #define VPD_VMM_VAIL_START_OFFSET 31744 | ||
235 | |||
236 | /*Virtualization faults*/ | ||
237 | |||
238 | #define EVENT_MOV_TO_AR 1 | ||
239 | #define EVENT_MOV_TO_AR_IMM 2 | ||
240 | #define EVENT_MOV_FROM_AR 3 | ||
241 | #define EVENT_MOV_TO_CR 4 | ||
242 | #define EVENT_MOV_FROM_CR 5 | ||
243 | #define EVENT_MOV_TO_PSR 6 | ||
244 | #define EVENT_MOV_FROM_PSR 7 | ||
245 | #define EVENT_ITC_D 8 | ||
246 | #define EVENT_ITC_I 9 | ||
247 | #define EVENT_MOV_TO_RR 10 | ||
248 | #define EVENT_MOV_TO_DBR 11 | ||
249 | #define EVENT_MOV_TO_IBR 12 | ||
250 | #define EVENT_MOV_TO_PKR 13 | ||
251 | #define EVENT_MOV_TO_PMC 14 | ||
252 | #define EVENT_MOV_TO_PMD 15 | ||
253 | #define EVENT_ITR_D 16 | ||
254 | #define EVENT_ITR_I 17 | ||
255 | #define EVENT_MOV_FROM_RR 18 | ||
256 | #define EVENT_MOV_FROM_DBR 19 | ||
257 | #define EVENT_MOV_FROM_IBR 20 | ||
258 | #define EVENT_MOV_FROM_PKR 21 | ||
259 | #define EVENT_MOV_FROM_PMC 22 | ||
260 | #define EVENT_MOV_FROM_CPUID 23 | ||
261 | #define EVENT_SSM 24 | ||
262 | #define EVENT_RSM 25 | ||
263 | #define EVENT_PTC_L 26 | ||
264 | #define EVENT_PTC_G 27 | ||
265 | #define EVENT_PTC_GA 28 | ||
266 | #define EVENT_PTR_D 29 | ||
267 | #define EVENT_PTR_I 30 | ||
268 | #define EVENT_THASH 31 | ||
269 | #define EVENT_TTAG 32 | ||
270 | #define EVENT_TPA 33 | ||
271 | #define EVENT_TAK 34 | ||
272 | #define EVENT_PTC_E 35 | ||
273 | #define EVENT_COVER 36 | ||
274 | #define EVENT_RFI 37 | ||
275 | #define EVENT_BSW_0 38 | ||
276 | #define EVENT_BSW_1 39 | ||
277 | #define EVENT_VMSW 40 | ||
278 | |||
279 | /**PAL virtual services offsets */ | ||
280 | #define PAL_VPS_RESUME_NORMAL 0x0000 | ||
281 | #define PAL_VPS_RESUME_HANDLER 0x0400 | ||
282 | #define PAL_VPS_SYNC_READ 0x0800 | ||
283 | #define PAL_VPS_SYNC_WRITE 0x0c00 | ||
284 | #define PAL_VPS_SET_PENDING_INTERRUPT 0x1000 | ||
285 | #define PAL_VPS_THASH 0x1400 | ||
286 | #define PAL_VPS_TTAG 0x1800 | ||
287 | #define PAL_VPS_RESTORE 0x1c00 | ||
288 | #define PAL_VPS_SAVE 0x2000 | ||
289 | |||
290 | #endif/* _VT_I_H*/ | ||
diff --git a/arch/ia64/kvm/vtlb.c b/arch/ia64/kvm/vtlb.c deleted file mode 100644 index a7869f8f49a6..000000000000 --- a/arch/ia64/kvm/vtlb.c +++ /dev/null | |||
@@ -1,640 +0,0 @@ | |||
1 | /* | ||
2 | * vtlb.c: guest virtual tlb handling module. | ||
3 | * Copyright (c) 2004, Intel Corporation. | ||
4 | * Yaozu Dong (Eddie Dong) <Eddie.dong@intel.com> | ||
5 | * Xuefei Xu (Anthony Xu) <anthony.xu@intel.com> | ||
6 | * | ||
7 | * Copyright (c) 2007, Intel Corporation. | ||
8 | * Xuefei Xu (Anthony Xu) <anthony.xu@intel.com> | ||
9 | * Xiantao Zhang <xiantao.zhang@intel.com> | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or modify it | ||
12 | * under the terms and conditions of the GNU General Public License, | ||
13 | * version 2, as published by the Free Software Foundation. | ||
14 | * | ||
15 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
16 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
18 | * more details. | ||
19 | * | ||
20 | * You should have received a copy of the GNU General Public License along with | ||
21 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple | ||
22 | * Place - Suite 330, Boston, MA 02111-1307 USA. | ||
23 | * | ||
24 | */ | ||
25 | |||
26 | #include "vcpu.h" | ||
27 | |||
28 | #include <linux/rwsem.h> | ||
29 | |||
30 | #include <asm/tlb.h> | ||
31 | |||
32 | /* | ||
33 | * Check to see if the address rid:va is translated by the TLB | ||
34 | */ | ||
35 | |||
36 | static int __is_tr_translated(struct thash_data *trp, u64 rid, u64 va) | ||
37 | { | ||
38 | return ((trp->p) && (trp->rid == rid) | ||
39 | && ((va-trp->vadr) < PSIZE(trp->ps))); | ||
40 | } | ||
41 | |||
42 | /* | ||
43 | * Only for GUEST TR format. | ||
44 | */ | ||
45 | static int __is_tr_overlap(struct thash_data *trp, u64 rid, u64 sva, u64 eva) | ||
46 | { | ||
47 | u64 sa1, ea1; | ||
48 | |||
49 | if (!trp->p || trp->rid != rid) | ||
50 | return 0; | ||
51 | |||
52 | sa1 = trp->vadr; | ||
53 | ea1 = sa1 + PSIZE(trp->ps) - 1; | ||
54 | eva -= 1; | ||
55 | if ((sva > ea1) || (sa1 > eva)) | ||
56 | return 0; | ||
57 | else | ||
58 | return 1; | ||
59 | |||
60 | } | ||
61 | |||
62 | void machine_tlb_purge(u64 va, u64 ps) | ||
63 | { | ||
64 | ia64_ptcl(va, ps << 2); | ||
65 | } | ||
66 | |||
67 | void local_flush_tlb_all(void) | ||
68 | { | ||
69 | int i, j; | ||
70 | unsigned long flags, count0, count1; | ||
71 | unsigned long stride0, stride1, addr; | ||
72 | |||
73 | addr = current_vcpu->arch.ptce_base; | ||
74 | count0 = current_vcpu->arch.ptce_count[0]; | ||
75 | count1 = current_vcpu->arch.ptce_count[1]; | ||
76 | stride0 = current_vcpu->arch.ptce_stride[0]; | ||
77 | stride1 = current_vcpu->arch.ptce_stride[1]; | ||
78 | |||
79 | local_irq_save(flags); | ||
80 | for (i = 0; i < count0; ++i) { | ||
81 | for (j = 0; j < count1; ++j) { | ||
82 | ia64_ptce(addr); | ||
83 | addr += stride1; | ||
84 | } | ||
85 | addr += stride0; | ||
86 | } | ||
87 | local_irq_restore(flags); | ||
88 | ia64_srlz_i(); /* srlz.i implies srlz.d */ | ||
89 | } | ||
90 | |||
91 | int vhpt_enabled(struct kvm_vcpu *vcpu, u64 vadr, enum vhpt_ref ref) | ||
92 | { | ||
93 | union ia64_rr vrr; | ||
94 | union ia64_pta vpta; | ||
95 | struct ia64_psr vpsr; | ||
96 | |||
97 | vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr); | ||
98 | vrr.val = vcpu_get_rr(vcpu, vadr); | ||
99 | vpta.val = vcpu_get_pta(vcpu); | ||
100 | |||
101 | if (vrr.ve & vpta.ve) { | ||
102 | switch (ref) { | ||
103 | case DATA_REF: | ||
104 | case NA_REF: | ||
105 | return vpsr.dt; | ||
106 | case INST_REF: | ||
107 | return vpsr.dt && vpsr.it && vpsr.ic; | ||
108 | case RSE_REF: | ||
109 | return vpsr.dt && vpsr.rt; | ||
110 | |||
111 | } | ||
112 | } | ||
113 | return 0; | ||
114 | } | ||
115 | |||
116 | struct thash_data *vsa_thash(union ia64_pta vpta, u64 va, u64 vrr, u64 *tag) | ||
117 | { | ||
118 | u64 index, pfn, rid, pfn_bits; | ||
119 | |||
120 | pfn_bits = vpta.size - 5 - 8; | ||
121 | pfn = REGION_OFFSET(va) >> _REGION_PAGE_SIZE(vrr); | ||
122 | rid = _REGION_ID(vrr); | ||
123 | index = ((rid & 0xff) << pfn_bits)|(pfn & ((1UL << pfn_bits) - 1)); | ||
124 | *tag = ((rid >> 8) & 0xffff) | ((pfn >> pfn_bits) << 16); | ||
125 | |||
126 | return (struct thash_data *)((vpta.base << PTA_BASE_SHIFT) + | ||
127 | (index << 5)); | ||
128 | } | ||
129 | |||
130 | struct thash_data *__vtr_lookup(struct kvm_vcpu *vcpu, u64 va, int type) | ||
131 | { | ||
132 | |||
133 | struct thash_data *trp; | ||
134 | int i; | ||
135 | u64 rid; | ||
136 | |||
137 | rid = vcpu_get_rr(vcpu, va); | ||
138 | rid = rid & RR_RID_MASK; | ||
139 | if (type == D_TLB) { | ||
140 | if (vcpu_quick_region_check(vcpu->arch.dtr_regions, va)) { | ||
141 | for (trp = (struct thash_data *)&vcpu->arch.dtrs, i = 0; | ||
142 | i < NDTRS; i++, trp++) { | ||
143 | if (__is_tr_translated(trp, rid, va)) | ||
144 | return trp; | ||
145 | } | ||
146 | } | ||
147 | } else { | ||
148 | if (vcpu_quick_region_check(vcpu->arch.itr_regions, va)) { | ||
149 | for (trp = (struct thash_data *)&vcpu->arch.itrs, i = 0; | ||
150 | i < NITRS; i++, trp++) { | ||
151 | if (__is_tr_translated(trp, rid, va)) | ||
152 | return trp; | ||
153 | } | ||
154 | } | ||
155 | } | ||
156 | |||
157 | return NULL; | ||
158 | } | ||
159 | |||
160 | static void vhpt_insert(u64 pte, u64 itir, u64 ifa, u64 gpte) | ||
161 | { | ||
162 | union ia64_rr rr; | ||
163 | struct thash_data *head; | ||
164 | unsigned long ps, gpaddr; | ||
165 | |||
166 | ps = itir_ps(itir); | ||
167 | rr.val = ia64_get_rr(ifa); | ||
168 | |||
169 | gpaddr = ((gpte & _PAGE_PPN_MASK) >> ps << ps) | | ||
170 | (ifa & ((1UL << ps) - 1)); | ||
171 | |||
172 | head = (struct thash_data *)ia64_thash(ifa); | ||
173 | head->etag = INVALID_TI_TAG; | ||
174 | ia64_mf(); | ||
175 | head->page_flags = pte & ~PAGE_FLAGS_RV_MASK; | ||
176 | head->itir = rr.ps << 2; | ||
177 | head->etag = ia64_ttag(ifa); | ||
178 | head->gpaddr = gpaddr; | ||
179 | } | ||
180 | |||
181 | void mark_pages_dirty(struct kvm_vcpu *v, u64 pte, u64 ps) | ||
182 | { | ||
183 | u64 i, dirty_pages = 1; | ||
184 | u64 base_gfn = (pte&_PAGE_PPN_MASK) >> PAGE_SHIFT; | ||
185 | vmm_spinlock_t *lock = __kvm_va(v->arch.dirty_log_lock_pa); | ||
186 | void *dirty_bitmap = (void *)KVM_MEM_DIRTY_LOG_BASE; | ||
187 | |||
188 | dirty_pages <<= ps <= PAGE_SHIFT ? 0 : ps - PAGE_SHIFT; | ||
189 | |||
190 | vmm_spin_lock(lock); | ||
191 | for (i = 0; i < dirty_pages; i++) { | ||
192 | /* avoid RMW */ | ||
193 | if (!test_bit(base_gfn + i, dirty_bitmap)) | ||
194 | set_bit(base_gfn + i , dirty_bitmap); | ||
195 | } | ||
196 | vmm_spin_unlock(lock); | ||
197 | } | ||
198 | |||
199 | void thash_vhpt_insert(struct kvm_vcpu *v, u64 pte, u64 itir, u64 va, int type) | ||
200 | { | ||
201 | u64 phy_pte, psr; | ||
202 | union ia64_rr mrr; | ||
203 | |||
204 | mrr.val = ia64_get_rr(va); | ||
205 | phy_pte = translate_phy_pte(&pte, itir, va); | ||
206 | |||
207 | if (itir_ps(itir) >= mrr.ps) { | ||
208 | vhpt_insert(phy_pte, itir, va, pte); | ||
209 | } else { | ||
210 | phy_pte &= ~PAGE_FLAGS_RV_MASK; | ||
211 | psr = ia64_clear_ic(); | ||
212 | ia64_itc(type, va, phy_pte, itir_ps(itir)); | ||
213 | paravirt_dv_serialize_data(); | ||
214 | ia64_set_psr(psr); | ||
215 | } | ||
216 | |||
217 | if (!(pte&VTLB_PTE_IO)) | ||
218 | mark_pages_dirty(v, pte, itir_ps(itir)); | ||
219 | } | ||
220 | |||
221 | /* | ||
222 | * vhpt lookup | ||
223 | */ | ||
224 | struct thash_data *vhpt_lookup(u64 va) | ||
225 | { | ||
226 | struct thash_data *head; | ||
227 | u64 tag; | ||
228 | |||
229 | head = (struct thash_data *)ia64_thash(va); | ||
230 | tag = ia64_ttag(va); | ||
231 | if (head->etag == tag) | ||
232 | return head; | ||
233 | return NULL; | ||
234 | } | ||
235 | |||
236 | u64 guest_vhpt_lookup(u64 iha, u64 *pte) | ||
237 | { | ||
238 | u64 ret; | ||
239 | struct thash_data *data; | ||
240 | |||
241 | data = __vtr_lookup(current_vcpu, iha, D_TLB); | ||
242 | if (data != NULL) | ||
243 | thash_vhpt_insert(current_vcpu, data->page_flags, | ||
244 | data->itir, iha, D_TLB); | ||
245 | |||
246 | asm volatile ("rsm psr.ic|psr.i;;" | ||
247 | "srlz.d;;" | ||
248 | "ld8.s r9=[%1];;" | ||
249 | "tnat.nz p6,p7=r9;;" | ||
250 | "(p6) mov %0=1;" | ||
251 | "(p6) mov r9=r0;" | ||
252 | "(p7) extr.u r9=r9,0,53;;" | ||
253 | "(p7) mov %0=r0;" | ||
254 | "(p7) st8 [%2]=r9;;" | ||
255 | "ssm psr.ic;;" | ||
256 | "srlz.d;;" | ||
257 | "ssm psr.i;;" | ||
258 | "srlz.d;;" | ||
259 | : "=&r"(ret) : "r"(iha), "r"(pte) : "memory"); | ||
260 | |||
261 | return ret; | ||
262 | } | ||
263 | |||
264 | /* | ||
265 | * purge software guest tlb | ||
266 | */ | ||
267 | |||
268 | static void vtlb_purge(struct kvm_vcpu *v, u64 va, u64 ps) | ||
269 | { | ||
270 | struct thash_data *cur; | ||
271 | u64 start, curadr, size, psbits, tag, rr_ps, num; | ||
272 | union ia64_rr vrr; | ||
273 | struct thash_cb *hcb = &v->arch.vtlb; | ||
274 | |||
275 | vrr.val = vcpu_get_rr(v, va); | ||
276 | psbits = VMX(v, psbits[(va >> 61)]); | ||
277 | start = va & ~((1UL << ps) - 1); | ||
278 | while (psbits) { | ||
279 | curadr = start; | ||
280 | rr_ps = __ffs(psbits); | ||
281 | psbits &= ~(1UL << rr_ps); | ||
282 | num = 1UL << ((ps < rr_ps) ? 0 : (ps - rr_ps)); | ||
283 | size = PSIZE(rr_ps); | ||
284 | vrr.ps = rr_ps; | ||
285 | while (num) { | ||
286 | cur = vsa_thash(hcb->pta, curadr, vrr.val, &tag); | ||
287 | if (cur->etag == tag && cur->ps == rr_ps) | ||
288 | cur->etag = INVALID_TI_TAG; | ||
289 | curadr += size; | ||
290 | num--; | ||
291 | } | ||
292 | } | ||
293 | } | ||
294 | |||
295 | |||
296 | /* | ||
297 | * purge VHPT and machine TLB | ||
298 | */ | ||
299 | static void vhpt_purge(struct kvm_vcpu *v, u64 va, u64 ps) | ||
300 | { | ||
301 | struct thash_data *cur; | ||
302 | u64 start, size, tag, num; | ||
303 | union ia64_rr rr; | ||
304 | |||
305 | start = va & ~((1UL << ps) - 1); | ||
306 | rr.val = ia64_get_rr(va); | ||
307 | size = PSIZE(rr.ps); | ||
308 | num = 1UL << ((ps < rr.ps) ? 0 : (ps - rr.ps)); | ||
309 | while (num) { | ||
310 | cur = (struct thash_data *)ia64_thash(start); | ||
311 | tag = ia64_ttag(start); | ||
312 | if (cur->etag == tag) | ||
313 | cur->etag = INVALID_TI_TAG; | ||
314 | start += size; | ||
315 | num--; | ||
316 | } | ||
317 | machine_tlb_purge(va, ps); | ||
318 | } | ||
319 | |||
320 | /* | ||
321 | * Insert an entry into hash TLB or VHPT. | ||
322 | * NOTES: | ||
323 | * 1: When inserting VHPT to thash, "va" is a must covered | ||
324 | * address by the inserted machine VHPT entry. | ||
325 | * 2: The format of entry is always in TLB. | ||
326 | * 3: The caller need to make sure the new entry will not overlap | ||
327 | * with any existed entry. | ||
328 | */ | ||
329 | void vtlb_insert(struct kvm_vcpu *v, u64 pte, u64 itir, u64 va) | ||
330 | { | ||
331 | struct thash_data *head; | ||
332 | union ia64_rr vrr; | ||
333 | u64 tag; | ||
334 | struct thash_cb *hcb = &v->arch.vtlb; | ||
335 | |||
336 | vrr.val = vcpu_get_rr(v, va); | ||
337 | vrr.ps = itir_ps(itir); | ||
338 | VMX(v, psbits[va >> 61]) |= (1UL << vrr.ps); | ||
339 | head = vsa_thash(hcb->pta, va, vrr.val, &tag); | ||
340 | head->page_flags = pte; | ||
341 | head->itir = itir; | ||
342 | head->etag = tag; | ||
343 | } | ||
344 | |||
345 | int vtr_find_overlap(struct kvm_vcpu *vcpu, u64 va, u64 ps, int type) | ||
346 | { | ||
347 | struct thash_data *trp; | ||
348 | int i; | ||
349 | u64 end, rid; | ||
350 | |||
351 | rid = vcpu_get_rr(vcpu, va); | ||
352 | rid = rid & RR_RID_MASK; | ||
353 | end = va + PSIZE(ps); | ||
354 | if (type == D_TLB) { | ||
355 | if (vcpu_quick_region_check(vcpu->arch.dtr_regions, va)) { | ||
356 | for (trp = (struct thash_data *)&vcpu->arch.dtrs, i = 0; | ||
357 | i < NDTRS; i++, trp++) { | ||
358 | if (__is_tr_overlap(trp, rid, va, end)) | ||
359 | return i; | ||
360 | } | ||
361 | } | ||
362 | } else { | ||
363 | if (vcpu_quick_region_check(vcpu->arch.itr_regions, va)) { | ||
364 | for (trp = (struct thash_data *)&vcpu->arch.itrs, i = 0; | ||
365 | i < NITRS; i++, trp++) { | ||
366 | if (__is_tr_overlap(trp, rid, va, end)) | ||
367 | return i; | ||
368 | } | ||
369 | } | ||
370 | } | ||
371 | return -1; | ||
372 | } | ||
373 | |||
374 | /* | ||
375 | * Purge entries in VTLB and VHPT | ||
376 | */ | ||
377 | void thash_purge_entries(struct kvm_vcpu *v, u64 va, u64 ps) | ||
378 | { | ||
379 | if (vcpu_quick_region_check(v->arch.tc_regions, va)) | ||
380 | vtlb_purge(v, va, ps); | ||
381 | vhpt_purge(v, va, ps); | ||
382 | } | ||
383 | |||
384 | void thash_purge_entries_remote(struct kvm_vcpu *v, u64 va, u64 ps) | ||
385 | { | ||
386 | u64 old_va = va; | ||
387 | va = REGION_OFFSET(va); | ||
388 | if (vcpu_quick_region_check(v->arch.tc_regions, old_va)) | ||
389 | vtlb_purge(v, va, ps); | ||
390 | vhpt_purge(v, va, ps); | ||
391 | } | ||
392 | |||
393 | u64 translate_phy_pte(u64 *pte, u64 itir, u64 va) | ||
394 | { | ||
395 | u64 ps, ps_mask, paddr, maddr, io_mask; | ||
396 | union pte_flags phy_pte; | ||
397 | |||
398 | ps = itir_ps(itir); | ||
399 | ps_mask = ~((1UL << ps) - 1); | ||
400 | phy_pte.val = *pte; | ||
401 | paddr = *pte; | ||
402 | paddr = ((paddr & _PAGE_PPN_MASK) & ps_mask) | (va & ~ps_mask); | ||
403 | maddr = kvm_get_mpt_entry(paddr >> PAGE_SHIFT); | ||
404 | io_mask = maddr & GPFN_IO_MASK; | ||
405 | if (io_mask && (io_mask != GPFN_PHYS_MMIO)) { | ||
406 | *pte |= VTLB_PTE_IO; | ||
407 | return -1; | ||
408 | } | ||
409 | maddr = ((maddr & _PAGE_PPN_MASK) & PAGE_MASK) | | ||
410 | (paddr & ~PAGE_MASK); | ||
411 | phy_pte.ppn = maddr >> ARCH_PAGE_SHIFT; | ||
412 | return phy_pte.val; | ||
413 | } | ||
414 | |||
415 | /* | ||
416 | * Purge overlap TCs and then insert the new entry to emulate itc ops. | ||
417 | * Notes: Only TC entry can purge and insert. | ||
418 | */ | ||
419 | void thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir, | ||
420 | u64 ifa, int type) | ||
421 | { | ||
422 | u64 ps; | ||
423 | u64 phy_pte, io_mask, index; | ||
424 | union ia64_rr vrr, mrr; | ||
425 | |||
426 | ps = itir_ps(itir); | ||
427 | vrr.val = vcpu_get_rr(v, ifa); | ||
428 | mrr.val = ia64_get_rr(ifa); | ||
429 | |||
430 | index = (pte & _PAGE_PPN_MASK) >> PAGE_SHIFT; | ||
431 | io_mask = kvm_get_mpt_entry(index) & GPFN_IO_MASK; | ||
432 | phy_pte = translate_phy_pte(&pte, itir, ifa); | ||
433 | |||
434 | /* Ensure WB attribute if pte is related to a normal mem page, | ||
435 | * which is required by vga acceleration since qemu maps shared | ||
436 | * vram buffer with WB. | ||
437 | */ | ||
438 | if (!(pte & VTLB_PTE_IO) && ((pte & _PAGE_MA_MASK) != _PAGE_MA_NAT) && | ||
439 | io_mask != GPFN_PHYS_MMIO) { | ||
440 | pte &= ~_PAGE_MA_MASK; | ||
441 | phy_pte &= ~_PAGE_MA_MASK; | ||
442 | } | ||
443 | |||
444 | vtlb_purge(v, ifa, ps); | ||
445 | vhpt_purge(v, ifa, ps); | ||
446 | |||
447 | if ((ps != mrr.ps) || (pte & VTLB_PTE_IO)) { | ||
448 | vtlb_insert(v, pte, itir, ifa); | ||
449 | vcpu_quick_region_set(VMX(v, tc_regions), ifa); | ||
450 | } | ||
451 | if (pte & VTLB_PTE_IO) | ||
452 | return; | ||
453 | |||
454 | if (ps >= mrr.ps) | ||
455 | vhpt_insert(phy_pte, itir, ifa, pte); | ||
456 | else { | ||
457 | u64 psr; | ||
458 | phy_pte &= ~PAGE_FLAGS_RV_MASK; | ||
459 | psr = ia64_clear_ic(); | ||
460 | ia64_itc(type, ifa, phy_pte, ps); | ||
461 | paravirt_dv_serialize_data(); | ||
462 | ia64_set_psr(psr); | ||
463 | } | ||
464 | if (!(pte&VTLB_PTE_IO)) | ||
465 | mark_pages_dirty(v, pte, ps); | ||
466 | |||
467 | } | ||
468 | |||
469 | /* | ||
470 | * Purge all TCs or VHPT entries including those in Hash table. | ||
471 | * | ||
472 | */ | ||
473 | |||
474 | void thash_purge_all(struct kvm_vcpu *v) | ||
475 | { | ||
476 | int i; | ||
477 | struct thash_data *head; | ||
478 | struct thash_cb *vtlb, *vhpt; | ||
479 | vtlb = &v->arch.vtlb; | ||
480 | vhpt = &v->arch.vhpt; | ||
481 | |||
482 | for (i = 0; i < 8; i++) | ||
483 | VMX(v, psbits[i]) = 0; | ||
484 | |||
485 | head = vtlb->hash; | ||
486 | for (i = 0; i < vtlb->num; i++) { | ||
487 | head->page_flags = 0; | ||
488 | head->etag = INVALID_TI_TAG; | ||
489 | head->itir = 0; | ||
490 | head->next = 0; | ||
491 | head++; | ||
492 | }; | ||
493 | |||
494 | head = vhpt->hash; | ||
495 | for (i = 0; i < vhpt->num; i++) { | ||
496 | head->page_flags = 0; | ||
497 | head->etag = INVALID_TI_TAG; | ||
498 | head->itir = 0; | ||
499 | head->next = 0; | ||
500 | head++; | ||
501 | }; | ||
502 | |||
503 | local_flush_tlb_all(); | ||
504 | } | ||
505 | |||
506 | /* | ||
507 | * Lookup the hash table and its collision chain to find an entry | ||
508 | * covering this address rid:va or the entry. | ||
509 | * | ||
510 | * INPUT: | ||
511 | * in: TLB format for both VHPT & TLB. | ||
512 | */ | ||
513 | struct thash_data *vtlb_lookup(struct kvm_vcpu *v, u64 va, int is_data) | ||
514 | { | ||
515 | struct thash_data *cch; | ||
516 | u64 psbits, ps, tag; | ||
517 | union ia64_rr vrr; | ||
518 | |||
519 | struct thash_cb *hcb = &v->arch.vtlb; | ||
520 | |||
521 | cch = __vtr_lookup(v, va, is_data); | ||
522 | if (cch) | ||
523 | return cch; | ||
524 | |||
525 | if (vcpu_quick_region_check(v->arch.tc_regions, va) == 0) | ||
526 | return NULL; | ||
527 | |||
528 | psbits = VMX(v, psbits[(va >> 61)]); | ||
529 | vrr.val = vcpu_get_rr(v, va); | ||
530 | while (psbits) { | ||
531 | ps = __ffs(psbits); | ||
532 | psbits &= ~(1UL << ps); | ||
533 | vrr.ps = ps; | ||
534 | cch = vsa_thash(hcb->pta, va, vrr.val, &tag); | ||
535 | if (cch->etag == tag && cch->ps == ps) | ||
536 | return cch; | ||
537 | } | ||
538 | |||
539 | return NULL; | ||
540 | } | ||
541 | |||
542 | /* | ||
543 | * Initialize internal control data before service. | ||
544 | */ | ||
545 | void thash_init(struct thash_cb *hcb, u64 sz) | ||
546 | { | ||
547 | int i; | ||
548 | struct thash_data *head; | ||
549 | |||
550 | hcb->pta.val = (unsigned long)hcb->hash; | ||
551 | hcb->pta.vf = 1; | ||
552 | hcb->pta.ve = 1; | ||
553 | hcb->pta.size = sz; | ||
554 | head = hcb->hash; | ||
555 | for (i = 0; i < hcb->num; i++) { | ||
556 | head->page_flags = 0; | ||
557 | head->itir = 0; | ||
558 | head->etag = INVALID_TI_TAG; | ||
559 | head->next = 0; | ||
560 | head++; | ||
561 | } | ||
562 | } | ||
563 | |||
564 | u64 kvm_get_mpt_entry(u64 gpfn) | ||
565 | { | ||
566 | u64 *base = (u64 *) KVM_P2M_BASE; | ||
567 | |||
568 | if (gpfn >= (KVM_P2M_SIZE >> 3)) | ||
569 | panic_vm(current_vcpu, "Invalid gpfn =%lx\n", gpfn); | ||
570 | |||
571 | return *(base + gpfn); | ||
572 | } | ||
573 | |||
574 | u64 kvm_lookup_mpa(u64 gpfn) | ||
575 | { | ||
576 | u64 maddr; | ||
577 | maddr = kvm_get_mpt_entry(gpfn); | ||
578 | return maddr&_PAGE_PPN_MASK; | ||
579 | } | ||
580 | |||
581 | u64 kvm_gpa_to_mpa(u64 gpa) | ||
582 | { | ||
583 | u64 pte = kvm_lookup_mpa(gpa >> PAGE_SHIFT); | ||
584 | return (pte >> PAGE_SHIFT << PAGE_SHIFT) | (gpa & ~PAGE_MASK); | ||
585 | } | ||
586 | |||
587 | /* | ||
588 | * Fetch guest bundle code. | ||
589 | * INPUT: | ||
590 | * gip: guest ip | ||
591 | * pbundle: used to return fetched bundle. | ||
592 | */ | ||
593 | int fetch_code(struct kvm_vcpu *vcpu, u64 gip, IA64_BUNDLE *pbundle) | ||
594 | { | ||
595 | u64 gpip = 0; /* guest physical IP*/ | ||
596 | u64 *vpa; | ||
597 | struct thash_data *tlb; | ||
598 | u64 maddr; | ||
599 | |||
600 | if (!(VCPU(vcpu, vpsr) & IA64_PSR_IT)) { | ||
601 | /* I-side physical mode */ | ||
602 | gpip = gip; | ||
603 | } else { | ||
604 | tlb = vtlb_lookup(vcpu, gip, I_TLB); | ||
605 | if (tlb) | ||
606 | gpip = (tlb->ppn >> (tlb->ps - 12) << tlb->ps) | | ||
607 | (gip & (PSIZE(tlb->ps) - 1)); | ||
608 | } | ||
609 | if (gpip) { | ||
610 | maddr = kvm_gpa_to_mpa(gpip); | ||
611 | } else { | ||
612 | tlb = vhpt_lookup(gip); | ||
613 | if (tlb == NULL) { | ||
614 | ia64_ptcl(gip, ARCH_PAGE_SHIFT << 2); | ||
615 | return IA64_FAULT; | ||
616 | } | ||
617 | maddr = (tlb->ppn >> (tlb->ps - 12) << tlb->ps) | ||
618 | | (gip & (PSIZE(tlb->ps) - 1)); | ||
619 | } | ||
620 | vpa = (u64 *)__kvm_va(maddr); | ||
621 | |||
622 | pbundle->i64[0] = *vpa++; | ||
623 | pbundle->i64[1] = *vpa; | ||
624 | |||
625 | return IA64_NO_FAULT; | ||
626 | } | ||
627 | |||
628 | void kvm_init_vhpt(struct kvm_vcpu *v) | ||
629 | { | ||
630 | v->arch.vhpt.num = VHPT_NUM_ENTRIES; | ||
631 | thash_init(&v->arch.vhpt, VHPT_SHIFT); | ||
632 | ia64_set_pta(v->arch.vhpt.pta.val); | ||
633 | /*Enable VHPT here?*/ | ||
634 | } | ||
635 | |||
636 | void kvm_init_vtlb(struct kvm_vcpu *v) | ||
637 | { | ||
638 | v->arch.vtlb.num = VTLB_NUM_ENTRIES; | ||
639 | thash_init(&v->arch.vtlb, VTLB_SHIFT); | ||
640 | } | ||