diff options
Diffstat (limited to 'arch/ia64/kvm')
-rw-r--r-- | arch/ia64/kvm/Kconfig | 49 | ||||
-rw-r--r-- | arch/ia64/kvm/Makefile | 61 | ||||
-rw-r--r-- | arch/ia64/kvm/asm-offsets.c | 251 | ||||
-rw-r--r-- | arch/ia64/kvm/kvm-ia64.c | 1806 | ||||
-rw-r--r-- | arch/ia64/kvm/kvm_fw.c | 500 | ||||
-rw-r--r-- | arch/ia64/kvm/kvm_minstate.h | 273 | ||||
-rw-r--r-- | arch/ia64/kvm/lapic.h | 25 | ||||
-rw-r--r-- | arch/ia64/kvm/misc.h | 93 | ||||
-rw-r--r-- | arch/ia64/kvm/mmio.c | 341 | ||||
-rw-r--r-- | arch/ia64/kvm/optvfault.S | 918 | ||||
-rw-r--r-- | arch/ia64/kvm/process.c | 970 | ||||
-rw-r--r-- | arch/ia64/kvm/trampoline.S | 1038 | ||||
-rw-r--r-- | arch/ia64/kvm/vcpu.c | 2163 | ||||
-rw-r--r-- | arch/ia64/kvm/vcpu.h | 740 | ||||
-rw-r--r-- | arch/ia64/kvm/vmm.c | 66 | ||||
-rw-r--r-- | arch/ia64/kvm/vmm_ivt.S | 1424 | ||||
-rw-r--r-- | arch/ia64/kvm/vti.h | 290 | ||||
-rw-r--r-- | arch/ia64/kvm/vtlb.c | 636 |
18 files changed, 11644 insertions, 0 deletions
diff --git a/arch/ia64/kvm/Kconfig b/arch/ia64/kvm/Kconfig new file mode 100644 index 000000000000..7914e4828504 --- /dev/null +++ b/arch/ia64/kvm/Kconfig | |||
@@ -0,0 +1,49 @@ | |||
1 | # | ||
2 | # KVM configuration | ||
3 | # | ||
4 | config HAVE_KVM | ||
5 | bool | ||
6 | |||
7 | menuconfig VIRTUALIZATION | ||
8 | bool "Virtualization" | ||
9 | depends on HAVE_KVM || IA64 | ||
10 | default y | ||
11 | ---help--- | ||
12 | Say Y here to get to see options for using your Linux host to run other | ||
13 | operating systems inside virtual machines (guests). | ||
14 | This option alone does not add any kernel code. | ||
15 | |||
16 | If you say N, all options in this submenu will be skipped and disabled. | ||
17 | |||
18 | if VIRTUALIZATION | ||
19 | |||
20 | config KVM | ||
21 | tristate "Kernel-based Virtual Machine (KVM) support" | ||
22 | depends on HAVE_KVM && EXPERIMENTAL | ||
23 | select PREEMPT_NOTIFIERS | ||
24 | select ANON_INODES | ||
25 | ---help--- | ||
26 | Support hosting fully virtualized guest machines using hardware | ||
27 | virtualization extensions. You will need a fairly recent | ||
28 | processor equipped with virtualization extensions. You will also | ||
29 | need to select one or more of the processor modules below. | ||
30 | |||
31 | This module provides access to the hardware capabilities through | ||
32 | a character device node named /dev/kvm. | ||
33 | |||
34 | To compile this as a module, choose M here: the module | ||
35 | will be called kvm. | ||
36 | |||
37 | If unsure, say N. | ||
38 | |||
39 | config KVM_INTEL | ||
40 | tristate "KVM for Intel Itanium 2 processors support" | ||
41 | depends on KVM && m | ||
42 | ---help--- | ||
43 | Provides support for KVM on Itanium 2 processors equipped with the VT | ||
44 | extensions. | ||
45 | |||
46 | config KVM_TRACE | ||
47 | bool | ||
48 | |||
49 | endif # VIRTUALIZATION | ||
diff --git a/arch/ia64/kvm/Makefile b/arch/ia64/kvm/Makefile new file mode 100644 index 000000000000..41b034ffa73b --- /dev/null +++ b/arch/ia64/kvm/Makefile | |||
@@ -0,0 +1,61 @@ | |||
1 | #This Make file is to generate asm-offsets.h and build source. | ||
2 | # | ||
3 | |||
4 | #Generate asm-offsets.h for vmm module build | ||
5 | offsets-file := asm-offsets.h | ||
6 | |||
7 | always := $(offsets-file) | ||
8 | targets := $(offsets-file) | ||
9 | targets += arch/ia64/kvm/asm-offsets.s | ||
10 | clean-files := $(addprefix $(objtree)/,$(targets) $(obj)/memcpy.S $(obj)/memset.S) | ||
11 | |||
12 | # Default sed regexp - multiline due to syntax constraints | ||
13 | define sed-y | ||
14 | "/^->/{s:^->\([^ ]*\) [\$$#]*\([^ ]*\) \(.*\):#define \1 \2 /* \3 */:; s:->::; p;}" | ||
15 | endef | ||
16 | |||
17 | quiet_cmd_offsets = GEN $@ | ||
18 | define cmd_offsets | ||
19 | (set -e; \ | ||
20 | echo "#ifndef __ASM_KVM_OFFSETS_H__"; \ | ||
21 | echo "#define __ASM_KVM_OFFSETS_H__"; \ | ||
22 | echo "/*"; \ | ||
23 | echo " * DO NOT MODIFY."; \ | ||
24 | echo " *"; \ | ||
25 | echo " * This file was generated by Makefile"; \ | ||
26 | echo " *"; \ | ||
27 | echo " */"; \ | ||
28 | echo ""; \ | ||
29 | sed -ne $(sed-y) $<; \ | ||
30 | echo ""; \ | ||
31 | echo "#endif" ) > $@ | ||
32 | endef | ||
33 | # We use internal rules to avoid the "is up to date" message from make | ||
34 | arch/ia64/kvm/asm-offsets.s: arch/ia64/kvm/asm-offsets.c | ||
35 | $(call if_changed_dep,cc_s_c) | ||
36 | |||
37 | $(obj)/$(offsets-file): arch/ia64/kvm/asm-offsets.s | ||
38 | $(call cmd,offsets) | ||
39 | |||
40 | # | ||
41 | # Makefile for Kernel-based Virtual Machine module | ||
42 | # | ||
43 | |||
44 | EXTRA_CFLAGS += -Ivirt/kvm -Iarch/ia64/kvm/ | ||
45 | |||
46 | $(addprefix $(objtree)/,$(obj)/memcpy.S $(obj)/memset.S): | ||
47 | $(shell ln -snf ../lib/memcpy.S $(src)/memcpy.S) | ||
48 | $(shell ln -snf ../lib/memset.S $(src)/memset.S) | ||
49 | |||
50 | common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o ioapic.o) | ||
51 | |||
52 | kvm-objs := $(common-objs) kvm-ia64.o kvm_fw.o | ||
53 | obj-$(CONFIG_KVM) += kvm.o | ||
54 | |||
55 | FORCE : $(obj)/$(offsets-file) | ||
56 | EXTRA_CFLAGS_vcpu.o += -mfixed-range=f2-f5,f12-f127 | ||
57 | kvm-intel-objs = vmm.o vmm_ivt.o trampoline.o vcpu.o optvfault.o mmio.o \ | ||
58 | vtlb.o process.o | ||
59 | #Add link memcpy and memset to avoid possible structure assignment error | ||
60 | kvm-intel-objs += memset.o memcpy.o | ||
61 | obj-$(CONFIG_KVM_INTEL) += kvm-intel.o | ||
diff --git a/arch/ia64/kvm/asm-offsets.c b/arch/ia64/kvm/asm-offsets.c new file mode 100644 index 000000000000..4e3dc13a619c --- /dev/null +++ b/arch/ia64/kvm/asm-offsets.c | |||
@@ -0,0 +1,251 @@ | |||
1 | /* | ||
2 | * asm-offsets.c Generate definitions needed by assembly language modules. | ||
3 | * This code generates raw asm output which is post-processed | ||
4 | * to extract and format the required data. | ||
5 | * | ||
6 | * Anthony Xu <anthony.xu@intel.com> | ||
7 | * Xiantao Zhang <xiantao.zhang@intel.com> | ||
8 | * Copyright (c) 2007 Intel Corporation KVM support. | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify it | ||
11 | * under the terms and conditions of the GNU General Public License, | ||
12 | * version 2, as published by the Free Software Foundation. | ||
13 | * | ||
14 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
15 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
17 | * more details. | ||
18 | * | ||
19 | * You should have received a copy of the GNU General Public License along with | ||
20 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple | ||
21 | * Place - Suite 330, Boston, MA 02111-1307 USA. | ||
22 | * | ||
23 | */ | ||
24 | |||
25 | #include <linux/autoconf.h> | ||
26 | #include <linux/kvm_host.h> | ||
27 | |||
28 | #include "vcpu.h" | ||
29 | |||
30 | #define task_struct kvm_vcpu | ||
31 | |||
32 | #define DEFINE(sym, val) \ | ||
33 | asm volatile("\n->" #sym " (%0) " #val : : "i" (val)) | ||
34 | |||
35 | #define BLANK() asm volatile("\n->" : :) | ||
36 | |||
37 | #define OFFSET(_sym, _str, _mem) \ | ||
38 | DEFINE(_sym, offsetof(_str, _mem)); | ||
39 | |||
40 | void foo(void) | ||
41 | { | ||
42 | DEFINE(VMM_TASK_SIZE, sizeof(struct kvm_vcpu)); | ||
43 | DEFINE(VMM_PT_REGS_SIZE, sizeof(struct kvm_pt_regs)); | ||
44 | |||
45 | BLANK(); | ||
46 | |||
47 | DEFINE(VMM_VCPU_META_RR0_OFFSET, | ||
48 | offsetof(struct kvm_vcpu, arch.metaphysical_rr0)); | ||
49 | DEFINE(VMM_VCPU_META_SAVED_RR0_OFFSET, | ||
50 | offsetof(struct kvm_vcpu, | ||
51 | arch.metaphysical_saved_rr0)); | ||
52 | DEFINE(VMM_VCPU_VRR0_OFFSET, | ||
53 | offsetof(struct kvm_vcpu, arch.vrr[0])); | ||
54 | DEFINE(VMM_VPD_IRR0_OFFSET, | ||
55 | offsetof(struct vpd, irr[0])); | ||
56 | DEFINE(VMM_VCPU_ITC_CHECK_OFFSET, | ||
57 | offsetof(struct kvm_vcpu, arch.itc_check)); | ||
58 | DEFINE(VMM_VCPU_IRQ_CHECK_OFFSET, | ||
59 | offsetof(struct kvm_vcpu, arch.irq_check)); | ||
60 | DEFINE(VMM_VPD_VHPI_OFFSET, | ||
61 | offsetof(struct vpd, vhpi)); | ||
62 | DEFINE(VMM_VCPU_VSA_BASE_OFFSET, | ||
63 | offsetof(struct kvm_vcpu, arch.vsa_base)); | ||
64 | DEFINE(VMM_VCPU_VPD_OFFSET, | ||
65 | offsetof(struct kvm_vcpu, arch.vpd)); | ||
66 | DEFINE(VMM_VCPU_IRQ_CHECK, | ||
67 | offsetof(struct kvm_vcpu, arch.irq_check)); | ||
68 | DEFINE(VMM_VCPU_TIMER_PENDING, | ||
69 | offsetof(struct kvm_vcpu, arch.timer_pending)); | ||
70 | DEFINE(VMM_VCPU_META_SAVED_RR0_OFFSET, | ||
71 | offsetof(struct kvm_vcpu, arch.metaphysical_saved_rr0)); | ||
72 | DEFINE(VMM_VCPU_MODE_FLAGS_OFFSET, | ||
73 | offsetof(struct kvm_vcpu, arch.mode_flags)); | ||
74 | DEFINE(VMM_VCPU_ITC_OFS_OFFSET, | ||
75 | offsetof(struct kvm_vcpu, arch.itc_offset)); | ||
76 | DEFINE(VMM_VCPU_LAST_ITC_OFFSET, | ||
77 | offsetof(struct kvm_vcpu, arch.last_itc)); | ||
78 | DEFINE(VMM_VCPU_SAVED_GP_OFFSET, | ||
79 | offsetof(struct kvm_vcpu, arch.saved_gp)); | ||
80 | |||
81 | BLANK(); | ||
82 | |||
83 | DEFINE(VMM_PT_REGS_B6_OFFSET, | ||
84 | offsetof(struct kvm_pt_regs, b6)); | ||
85 | DEFINE(VMM_PT_REGS_B7_OFFSET, | ||
86 | offsetof(struct kvm_pt_regs, b7)); | ||
87 | DEFINE(VMM_PT_REGS_AR_CSD_OFFSET, | ||
88 | offsetof(struct kvm_pt_regs, ar_csd)); | ||
89 | DEFINE(VMM_PT_REGS_AR_SSD_OFFSET, | ||
90 | offsetof(struct kvm_pt_regs, ar_ssd)); | ||
91 | DEFINE(VMM_PT_REGS_R8_OFFSET, | ||
92 | offsetof(struct kvm_pt_regs, r8)); | ||
93 | DEFINE(VMM_PT_REGS_R9_OFFSET, | ||
94 | offsetof(struct kvm_pt_regs, r9)); | ||
95 | DEFINE(VMM_PT_REGS_R10_OFFSET, | ||
96 | offsetof(struct kvm_pt_regs, r10)); | ||
97 | DEFINE(VMM_PT_REGS_R11_OFFSET, | ||
98 | offsetof(struct kvm_pt_regs, r11)); | ||
99 | DEFINE(VMM_PT_REGS_CR_IPSR_OFFSET, | ||
100 | offsetof(struct kvm_pt_regs, cr_ipsr)); | ||
101 | DEFINE(VMM_PT_REGS_CR_IIP_OFFSET, | ||
102 | offsetof(struct kvm_pt_regs, cr_iip)); | ||
103 | DEFINE(VMM_PT_REGS_CR_IFS_OFFSET, | ||
104 | offsetof(struct kvm_pt_regs, cr_ifs)); | ||
105 | DEFINE(VMM_PT_REGS_AR_UNAT_OFFSET, | ||
106 | offsetof(struct kvm_pt_regs, ar_unat)); | ||
107 | DEFINE(VMM_PT_REGS_AR_PFS_OFFSET, | ||
108 | offsetof(struct kvm_pt_regs, ar_pfs)); | ||
109 | DEFINE(VMM_PT_REGS_AR_RSC_OFFSET, | ||
110 | offsetof(struct kvm_pt_regs, ar_rsc)); | ||
111 | DEFINE(VMM_PT_REGS_AR_RNAT_OFFSET, | ||
112 | offsetof(struct kvm_pt_regs, ar_rnat)); | ||
113 | |||
114 | DEFINE(VMM_PT_REGS_AR_BSPSTORE_OFFSET, | ||
115 | offsetof(struct kvm_pt_regs, ar_bspstore)); | ||
116 | DEFINE(VMM_PT_REGS_PR_OFFSET, | ||
117 | offsetof(struct kvm_pt_regs, pr)); | ||
118 | DEFINE(VMM_PT_REGS_B0_OFFSET, | ||
119 | offsetof(struct kvm_pt_regs, b0)); | ||
120 | DEFINE(VMM_PT_REGS_LOADRS_OFFSET, | ||
121 | offsetof(struct kvm_pt_regs, loadrs)); | ||
122 | DEFINE(VMM_PT_REGS_R1_OFFSET, | ||
123 | offsetof(struct kvm_pt_regs, r1)); | ||
124 | DEFINE(VMM_PT_REGS_R12_OFFSET, | ||
125 | offsetof(struct kvm_pt_regs, r12)); | ||
126 | DEFINE(VMM_PT_REGS_R13_OFFSET, | ||
127 | offsetof(struct kvm_pt_regs, r13)); | ||
128 | DEFINE(VMM_PT_REGS_AR_FPSR_OFFSET, | ||
129 | offsetof(struct kvm_pt_regs, ar_fpsr)); | ||
130 | DEFINE(VMM_PT_REGS_R15_OFFSET, | ||
131 | offsetof(struct kvm_pt_regs, r15)); | ||
132 | DEFINE(VMM_PT_REGS_R14_OFFSET, | ||
133 | offsetof(struct kvm_pt_regs, r14)); | ||
134 | DEFINE(VMM_PT_REGS_R2_OFFSET, | ||
135 | offsetof(struct kvm_pt_regs, r2)); | ||
136 | DEFINE(VMM_PT_REGS_R3_OFFSET, | ||
137 | offsetof(struct kvm_pt_regs, r3)); | ||
138 | DEFINE(VMM_PT_REGS_R16_OFFSET, | ||
139 | offsetof(struct kvm_pt_regs, r16)); | ||
140 | DEFINE(VMM_PT_REGS_R17_OFFSET, | ||
141 | offsetof(struct kvm_pt_regs, r17)); | ||
142 | DEFINE(VMM_PT_REGS_R18_OFFSET, | ||
143 | offsetof(struct kvm_pt_regs, r18)); | ||
144 | DEFINE(VMM_PT_REGS_R19_OFFSET, | ||
145 | offsetof(struct kvm_pt_regs, r19)); | ||
146 | DEFINE(VMM_PT_REGS_R20_OFFSET, | ||
147 | offsetof(struct kvm_pt_regs, r20)); | ||
148 | DEFINE(VMM_PT_REGS_R21_OFFSET, | ||
149 | offsetof(struct kvm_pt_regs, r21)); | ||
150 | DEFINE(VMM_PT_REGS_R22_OFFSET, | ||
151 | offsetof(struct kvm_pt_regs, r22)); | ||
152 | DEFINE(VMM_PT_REGS_R23_OFFSET, | ||
153 | offsetof(struct kvm_pt_regs, r23)); | ||
154 | DEFINE(VMM_PT_REGS_R24_OFFSET, | ||
155 | offsetof(struct kvm_pt_regs, r24)); | ||
156 | DEFINE(VMM_PT_REGS_R25_OFFSET, | ||
157 | offsetof(struct kvm_pt_regs, r25)); | ||
158 | DEFINE(VMM_PT_REGS_R26_OFFSET, | ||
159 | offsetof(struct kvm_pt_regs, r26)); | ||
160 | DEFINE(VMM_PT_REGS_R27_OFFSET, | ||
161 | offsetof(struct kvm_pt_regs, r27)); | ||
162 | DEFINE(VMM_PT_REGS_R28_OFFSET, | ||
163 | offsetof(struct kvm_pt_regs, r28)); | ||
164 | DEFINE(VMM_PT_REGS_R29_OFFSET, | ||
165 | offsetof(struct kvm_pt_regs, r29)); | ||
166 | DEFINE(VMM_PT_REGS_R30_OFFSET, | ||
167 | offsetof(struct kvm_pt_regs, r30)); | ||
168 | DEFINE(VMM_PT_REGS_R31_OFFSET, | ||
169 | offsetof(struct kvm_pt_regs, r31)); | ||
170 | DEFINE(VMM_PT_REGS_AR_CCV_OFFSET, | ||
171 | offsetof(struct kvm_pt_regs, ar_ccv)); | ||
172 | DEFINE(VMM_PT_REGS_F6_OFFSET, | ||
173 | offsetof(struct kvm_pt_regs, f6)); | ||
174 | DEFINE(VMM_PT_REGS_F7_OFFSET, | ||
175 | offsetof(struct kvm_pt_regs, f7)); | ||
176 | DEFINE(VMM_PT_REGS_F8_OFFSET, | ||
177 | offsetof(struct kvm_pt_regs, f8)); | ||
178 | DEFINE(VMM_PT_REGS_F9_OFFSET, | ||
179 | offsetof(struct kvm_pt_regs, f9)); | ||
180 | DEFINE(VMM_PT_REGS_F10_OFFSET, | ||
181 | offsetof(struct kvm_pt_regs, f10)); | ||
182 | DEFINE(VMM_PT_REGS_F11_OFFSET, | ||
183 | offsetof(struct kvm_pt_regs, f11)); | ||
184 | DEFINE(VMM_PT_REGS_R4_OFFSET, | ||
185 | offsetof(struct kvm_pt_regs, r4)); | ||
186 | DEFINE(VMM_PT_REGS_R5_OFFSET, | ||
187 | offsetof(struct kvm_pt_regs, r5)); | ||
188 | DEFINE(VMM_PT_REGS_R6_OFFSET, | ||
189 | offsetof(struct kvm_pt_regs, r6)); | ||
190 | DEFINE(VMM_PT_REGS_R7_OFFSET, | ||
191 | offsetof(struct kvm_pt_regs, r7)); | ||
192 | DEFINE(VMM_PT_REGS_EML_UNAT_OFFSET, | ||
193 | offsetof(struct kvm_pt_regs, eml_unat)); | ||
194 | DEFINE(VMM_VCPU_IIPA_OFFSET, | ||
195 | offsetof(struct kvm_vcpu, arch.cr_iipa)); | ||
196 | DEFINE(VMM_VCPU_OPCODE_OFFSET, | ||
197 | offsetof(struct kvm_vcpu, arch.opcode)); | ||
198 | DEFINE(VMM_VCPU_CAUSE_OFFSET, offsetof(struct kvm_vcpu, arch.cause)); | ||
199 | DEFINE(VMM_VCPU_ISR_OFFSET, | ||
200 | offsetof(struct kvm_vcpu, arch.cr_isr)); | ||
201 | DEFINE(VMM_PT_REGS_R16_SLOT, | ||
202 | (((offsetof(struct kvm_pt_regs, r16) | ||
203 | - sizeof(struct kvm_pt_regs)) >> 3) & 0x3f)); | ||
204 | DEFINE(VMM_VCPU_MODE_FLAGS_OFFSET, | ||
205 | offsetof(struct kvm_vcpu, arch.mode_flags)); | ||
206 | DEFINE(VMM_VCPU_GP_OFFSET, offsetof(struct kvm_vcpu, arch.__gp)); | ||
207 | BLANK(); | ||
208 | |||
209 | DEFINE(VMM_VPD_BASE_OFFSET, offsetof(struct kvm_vcpu, arch.vpd)); | ||
210 | DEFINE(VMM_VPD_VIFS_OFFSET, offsetof(struct vpd, ifs)); | ||
211 | DEFINE(VMM_VLSAPIC_INSVC_BASE_OFFSET, | ||
212 | offsetof(struct kvm_vcpu, arch.insvc[0])); | ||
213 | DEFINE(VMM_VPD_VPTA_OFFSET, offsetof(struct vpd, pta)); | ||
214 | DEFINE(VMM_VPD_VPSR_OFFSET, offsetof(struct vpd, vpsr)); | ||
215 | |||
216 | DEFINE(VMM_CTX_R4_OFFSET, offsetof(union context, gr[4])); | ||
217 | DEFINE(VMM_CTX_R5_OFFSET, offsetof(union context, gr[5])); | ||
218 | DEFINE(VMM_CTX_R12_OFFSET, offsetof(union context, gr[12])); | ||
219 | DEFINE(VMM_CTX_R13_OFFSET, offsetof(union context, gr[13])); | ||
220 | DEFINE(VMM_CTX_KR0_OFFSET, offsetof(union context, ar[0])); | ||
221 | DEFINE(VMM_CTX_KR1_OFFSET, offsetof(union context, ar[1])); | ||
222 | DEFINE(VMM_CTX_B0_OFFSET, offsetof(union context, br[0])); | ||
223 | DEFINE(VMM_CTX_B1_OFFSET, offsetof(union context, br[1])); | ||
224 | DEFINE(VMM_CTX_B2_OFFSET, offsetof(union context, br[2])); | ||
225 | DEFINE(VMM_CTX_RR0_OFFSET, offsetof(union context, rr[0])); | ||
226 | DEFINE(VMM_CTX_RSC_OFFSET, offsetof(union context, ar[16])); | ||
227 | DEFINE(VMM_CTX_BSPSTORE_OFFSET, offsetof(union context, ar[18])); | ||
228 | DEFINE(VMM_CTX_RNAT_OFFSET, offsetof(union context, ar[19])); | ||
229 | DEFINE(VMM_CTX_FCR_OFFSET, offsetof(union context, ar[21])); | ||
230 | DEFINE(VMM_CTX_EFLAG_OFFSET, offsetof(union context, ar[24])); | ||
231 | DEFINE(VMM_CTX_CFLG_OFFSET, offsetof(union context, ar[27])); | ||
232 | DEFINE(VMM_CTX_FSR_OFFSET, offsetof(union context, ar[28])); | ||
233 | DEFINE(VMM_CTX_FIR_OFFSET, offsetof(union context, ar[29])); | ||
234 | DEFINE(VMM_CTX_FDR_OFFSET, offsetof(union context, ar[30])); | ||
235 | DEFINE(VMM_CTX_UNAT_OFFSET, offsetof(union context, ar[36])); | ||
236 | DEFINE(VMM_CTX_FPSR_OFFSET, offsetof(union context, ar[40])); | ||
237 | DEFINE(VMM_CTX_PFS_OFFSET, offsetof(union context, ar[64])); | ||
238 | DEFINE(VMM_CTX_LC_OFFSET, offsetof(union context, ar[65])); | ||
239 | DEFINE(VMM_CTX_DCR_OFFSET, offsetof(union context, cr[0])); | ||
240 | DEFINE(VMM_CTX_IVA_OFFSET, offsetof(union context, cr[2])); | ||
241 | DEFINE(VMM_CTX_PTA_OFFSET, offsetof(union context, cr[8])); | ||
242 | DEFINE(VMM_CTX_IBR0_OFFSET, offsetof(union context, ibr[0])); | ||
243 | DEFINE(VMM_CTX_DBR0_OFFSET, offsetof(union context, dbr[0])); | ||
244 | DEFINE(VMM_CTX_F2_OFFSET, offsetof(union context, fr[2])); | ||
245 | DEFINE(VMM_CTX_F3_OFFSET, offsetof(union context, fr[3])); | ||
246 | DEFINE(VMM_CTX_F32_OFFSET, offsetof(union context, fr[32])); | ||
247 | DEFINE(VMM_CTX_F33_OFFSET, offsetof(union context, fr[33])); | ||
248 | DEFINE(VMM_CTX_PKR0_OFFSET, offsetof(union context, pkr[0])); | ||
249 | DEFINE(VMM_CTX_PSR_OFFSET, offsetof(union context, psr)); | ||
250 | BLANK(); | ||
251 | } | ||
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c new file mode 100644 index 000000000000..6df073240135 --- /dev/null +++ b/arch/ia64/kvm/kvm-ia64.c | |||
@@ -0,0 +1,1806 @@ | |||
1 | |||
2 | /* | ||
3 | * kvm_ia64.c: Basic KVM suppport On Itanium series processors | ||
4 | * | ||
5 | * | ||
6 | * Copyright (C) 2007, Intel Corporation. | ||
7 | * Xiantao Zhang (xiantao.zhang@intel.com) | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify it | ||
10 | * under the terms and conditions of the GNU General Public License, | ||
11 | * version 2, as published by the Free Software Foundation. | ||
12 | * | ||
13 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
14 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
15 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
16 | * more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License along with | ||
19 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple | ||
20 | * Place - Suite 330, Boston, MA 02111-1307 USA. | ||
21 | * | ||
22 | */ | ||
23 | |||
24 | #include <linux/module.h> | ||
25 | #include <linux/errno.h> | ||
26 | #include <linux/percpu.h> | ||
27 | #include <linux/gfp.h> | ||
28 | #include <linux/fs.h> | ||
29 | #include <linux/smp.h> | ||
30 | #include <linux/kvm_host.h> | ||
31 | #include <linux/kvm.h> | ||
32 | #include <linux/bitops.h> | ||
33 | #include <linux/hrtimer.h> | ||
34 | #include <linux/uaccess.h> | ||
35 | |||
36 | #include <asm/pgtable.h> | ||
37 | #include <asm/gcc_intrin.h> | ||
38 | #include <asm/pal.h> | ||
39 | #include <asm/cacheflush.h> | ||
40 | #include <asm/div64.h> | ||
41 | #include <asm/tlb.h> | ||
42 | |||
43 | #include "misc.h" | ||
44 | #include "vti.h" | ||
45 | #include "iodev.h" | ||
46 | #include "ioapic.h" | ||
47 | #include "lapic.h" | ||
48 | |||
49 | static unsigned long kvm_vmm_base; | ||
50 | static unsigned long kvm_vsa_base; | ||
51 | static unsigned long kvm_vm_buffer; | ||
52 | static unsigned long kvm_vm_buffer_size; | ||
53 | unsigned long kvm_vmm_gp; | ||
54 | |||
55 | static long vp_env_info; | ||
56 | |||
57 | static struct kvm_vmm_info *kvm_vmm_info; | ||
58 | |||
59 | static DEFINE_PER_CPU(struct kvm_vcpu *, last_vcpu); | ||
60 | |||
61 | struct kvm_stats_debugfs_item debugfs_entries[] = { | ||
62 | { NULL } | ||
63 | }; | ||
64 | |||
65 | |||
66 | struct fdesc{ | ||
67 | unsigned long ip; | ||
68 | unsigned long gp; | ||
69 | }; | ||
70 | |||
71 | static void kvm_flush_icache(unsigned long start, unsigned long len) | ||
72 | { | ||
73 | int l; | ||
74 | |||
75 | for (l = 0; l < (len + 32); l += 32) | ||
76 | ia64_fc(start + l); | ||
77 | |||
78 | ia64_sync_i(); | ||
79 | ia64_srlz_i(); | ||
80 | } | ||
81 | |||
82 | static void kvm_flush_tlb_all(void) | ||
83 | { | ||
84 | unsigned long i, j, count0, count1, stride0, stride1, addr; | ||
85 | long flags; | ||
86 | |||
87 | addr = local_cpu_data->ptce_base; | ||
88 | count0 = local_cpu_data->ptce_count[0]; | ||
89 | count1 = local_cpu_data->ptce_count[1]; | ||
90 | stride0 = local_cpu_data->ptce_stride[0]; | ||
91 | stride1 = local_cpu_data->ptce_stride[1]; | ||
92 | |||
93 | local_irq_save(flags); | ||
94 | for (i = 0; i < count0; ++i) { | ||
95 | for (j = 0; j < count1; ++j) { | ||
96 | ia64_ptce(addr); | ||
97 | addr += stride1; | ||
98 | } | ||
99 | addr += stride0; | ||
100 | } | ||
101 | local_irq_restore(flags); | ||
102 | ia64_srlz_i(); /* srlz.i implies srlz.d */ | ||
103 | } | ||
104 | |||
105 | long ia64_pal_vp_create(u64 *vpd, u64 *host_iva, u64 *opt_handler) | ||
106 | { | ||
107 | struct ia64_pal_retval iprv; | ||
108 | |||
109 | PAL_CALL_STK(iprv, PAL_VP_CREATE, (u64)vpd, (u64)host_iva, | ||
110 | (u64)opt_handler); | ||
111 | |||
112 | return iprv.status; | ||
113 | } | ||
114 | |||
115 | static DEFINE_SPINLOCK(vp_lock); | ||
116 | |||
117 | void kvm_arch_hardware_enable(void *garbage) | ||
118 | { | ||
119 | long status; | ||
120 | long tmp_base; | ||
121 | unsigned long pte; | ||
122 | unsigned long saved_psr; | ||
123 | int slot; | ||
124 | |||
125 | pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base), | ||
126 | PAGE_KERNEL)); | ||
127 | local_irq_save(saved_psr); | ||
128 | slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT); | ||
129 | if (slot < 0) | ||
130 | return; | ||
131 | local_irq_restore(saved_psr); | ||
132 | |||
133 | spin_lock(&vp_lock); | ||
134 | status = ia64_pal_vp_init_env(kvm_vsa_base ? | ||
135 | VP_INIT_ENV : VP_INIT_ENV_INITALIZE, | ||
136 | __pa(kvm_vm_buffer), KVM_VM_BUFFER_BASE, &tmp_base); | ||
137 | if (status != 0) { | ||
138 | printk(KERN_WARNING"kvm: Failed to Enable VT Support!!!!\n"); | ||
139 | return ; | ||
140 | } | ||
141 | |||
142 | if (!kvm_vsa_base) { | ||
143 | kvm_vsa_base = tmp_base; | ||
144 | printk(KERN_INFO"kvm: kvm_vsa_base:0x%lx\n", kvm_vsa_base); | ||
145 | } | ||
146 | spin_unlock(&vp_lock); | ||
147 | ia64_ptr_entry(0x3, slot); | ||
148 | } | ||
149 | |||
150 | void kvm_arch_hardware_disable(void *garbage) | ||
151 | { | ||
152 | |||
153 | long status; | ||
154 | int slot; | ||
155 | unsigned long pte; | ||
156 | unsigned long saved_psr; | ||
157 | unsigned long host_iva = ia64_getreg(_IA64_REG_CR_IVA); | ||
158 | |||
159 | pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base), | ||
160 | PAGE_KERNEL)); | ||
161 | |||
162 | local_irq_save(saved_psr); | ||
163 | slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT); | ||
164 | if (slot < 0) | ||
165 | return; | ||
166 | local_irq_restore(saved_psr); | ||
167 | |||
168 | status = ia64_pal_vp_exit_env(host_iva); | ||
169 | if (status) | ||
170 | printk(KERN_DEBUG"kvm: Failed to disable VT support! :%ld\n", | ||
171 | status); | ||
172 | ia64_ptr_entry(0x3, slot); | ||
173 | } | ||
174 | |||
175 | void kvm_arch_check_processor_compat(void *rtn) | ||
176 | { | ||
177 | *(int *)rtn = 0; | ||
178 | } | ||
179 | |||
180 | int kvm_dev_ioctl_check_extension(long ext) | ||
181 | { | ||
182 | |||
183 | int r; | ||
184 | |||
185 | switch (ext) { | ||
186 | case KVM_CAP_IRQCHIP: | ||
187 | case KVM_CAP_USER_MEMORY: | ||
188 | |||
189 | r = 1; | ||
190 | break; | ||
191 | default: | ||
192 | r = 0; | ||
193 | } | ||
194 | return r; | ||
195 | |||
196 | } | ||
197 | |||
198 | static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu, | ||
199 | gpa_t addr) | ||
200 | { | ||
201 | struct kvm_io_device *dev; | ||
202 | |||
203 | dev = kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr); | ||
204 | |||
205 | return dev; | ||
206 | } | ||
207 | |||
208 | static int handle_vm_error(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | ||
209 | { | ||
210 | kvm_run->exit_reason = KVM_EXIT_UNKNOWN; | ||
211 | kvm_run->hw.hardware_exit_reason = 1; | ||
212 | return 0; | ||
213 | } | ||
214 | |||
215 | static int handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | ||
216 | { | ||
217 | struct kvm_mmio_req *p; | ||
218 | struct kvm_io_device *mmio_dev; | ||
219 | |||
220 | p = kvm_get_vcpu_ioreq(vcpu); | ||
221 | |||
222 | if ((p->addr & PAGE_MASK) == IOAPIC_DEFAULT_BASE_ADDRESS) | ||
223 | goto mmio; | ||
224 | vcpu->mmio_needed = 1; | ||
225 | vcpu->mmio_phys_addr = kvm_run->mmio.phys_addr = p->addr; | ||
226 | vcpu->mmio_size = kvm_run->mmio.len = p->size; | ||
227 | vcpu->mmio_is_write = kvm_run->mmio.is_write = !p->dir; | ||
228 | |||
229 | if (vcpu->mmio_is_write) | ||
230 | memcpy(vcpu->mmio_data, &p->data, p->size); | ||
231 | memcpy(kvm_run->mmio.data, &p->data, p->size); | ||
232 | kvm_run->exit_reason = KVM_EXIT_MMIO; | ||
233 | return 0; | ||
234 | mmio: | ||
235 | mmio_dev = vcpu_find_mmio_dev(vcpu, p->addr); | ||
236 | if (mmio_dev) { | ||
237 | if (!p->dir) | ||
238 | kvm_iodevice_write(mmio_dev, p->addr, p->size, | ||
239 | &p->data); | ||
240 | else | ||
241 | kvm_iodevice_read(mmio_dev, p->addr, p->size, | ||
242 | &p->data); | ||
243 | |||
244 | } else | ||
245 | printk(KERN_ERR"kvm: No iodevice found! addr:%lx\n", p->addr); | ||
246 | p->state = STATE_IORESP_READY; | ||
247 | |||
248 | return 1; | ||
249 | } | ||
250 | |||
251 | static int handle_pal_call(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | ||
252 | { | ||
253 | struct exit_ctl_data *p; | ||
254 | |||
255 | p = kvm_get_exit_data(vcpu); | ||
256 | |||
257 | if (p->exit_reason == EXIT_REASON_PAL_CALL) | ||
258 | return kvm_pal_emul(vcpu, kvm_run); | ||
259 | else { | ||
260 | kvm_run->exit_reason = KVM_EXIT_UNKNOWN; | ||
261 | kvm_run->hw.hardware_exit_reason = 2; | ||
262 | return 0; | ||
263 | } | ||
264 | } | ||
265 | |||
266 | static int handle_sal_call(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | ||
267 | { | ||
268 | struct exit_ctl_data *p; | ||
269 | |||
270 | p = kvm_get_exit_data(vcpu); | ||
271 | |||
272 | if (p->exit_reason == EXIT_REASON_SAL_CALL) { | ||
273 | kvm_sal_emul(vcpu); | ||
274 | return 1; | ||
275 | } else { | ||
276 | kvm_run->exit_reason = KVM_EXIT_UNKNOWN; | ||
277 | kvm_run->hw.hardware_exit_reason = 3; | ||
278 | return 0; | ||
279 | } | ||
280 | |||
281 | } | ||
282 | |||
283 | /* | ||
284 | * offset: address offset to IPI space. | ||
285 | * value: deliver value. | ||
286 | */ | ||
287 | static void vcpu_deliver_ipi(struct kvm_vcpu *vcpu, uint64_t dm, | ||
288 | uint64_t vector) | ||
289 | { | ||
290 | switch (dm) { | ||
291 | case SAPIC_FIXED: | ||
292 | kvm_apic_set_irq(vcpu, vector, 0); | ||
293 | break; | ||
294 | case SAPIC_NMI: | ||
295 | kvm_apic_set_irq(vcpu, 2, 0); | ||
296 | break; | ||
297 | case SAPIC_EXTINT: | ||
298 | kvm_apic_set_irq(vcpu, 0, 0); | ||
299 | break; | ||
300 | case SAPIC_INIT: | ||
301 | case SAPIC_PMI: | ||
302 | default: | ||
303 | printk(KERN_ERR"kvm: Unimplemented Deliver reserved IPI!\n"); | ||
304 | break; | ||
305 | } | ||
306 | } | ||
307 | |||
308 | static struct kvm_vcpu *lid_to_vcpu(struct kvm *kvm, unsigned long id, | ||
309 | unsigned long eid) | ||
310 | { | ||
311 | union ia64_lid lid; | ||
312 | int i; | ||
313 | |||
314 | for (i = 0; i < KVM_MAX_VCPUS; i++) { | ||
315 | if (kvm->vcpus[i]) { | ||
316 | lid.val = VCPU_LID(kvm->vcpus[i]); | ||
317 | if (lid.id == id && lid.eid == eid) | ||
318 | return kvm->vcpus[i]; | ||
319 | } | ||
320 | } | ||
321 | |||
322 | return NULL; | ||
323 | } | ||
324 | |||
325 | static int handle_ipi(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | ||
326 | { | ||
327 | struct exit_ctl_data *p = kvm_get_exit_data(vcpu); | ||
328 | struct kvm_vcpu *target_vcpu; | ||
329 | struct kvm_pt_regs *regs; | ||
330 | union ia64_ipi_a addr = p->u.ipi_data.addr; | ||
331 | union ia64_ipi_d data = p->u.ipi_data.data; | ||
332 | |||
333 | target_vcpu = lid_to_vcpu(vcpu->kvm, addr.id, addr.eid); | ||
334 | if (!target_vcpu) | ||
335 | return handle_vm_error(vcpu, kvm_run); | ||
336 | |||
337 | if (!target_vcpu->arch.launched) { | ||
338 | regs = vcpu_regs(target_vcpu); | ||
339 | |||
340 | regs->cr_iip = vcpu->kvm->arch.rdv_sal_data.boot_ip; | ||
341 | regs->r1 = vcpu->kvm->arch.rdv_sal_data.boot_gp; | ||
342 | |||
343 | target_vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; | ||
344 | if (waitqueue_active(&target_vcpu->wq)) | ||
345 | wake_up_interruptible(&target_vcpu->wq); | ||
346 | } else { | ||
347 | vcpu_deliver_ipi(target_vcpu, data.dm, data.vector); | ||
348 | if (target_vcpu != vcpu) | ||
349 | kvm_vcpu_kick(target_vcpu); | ||
350 | } | ||
351 | |||
352 | return 1; | ||
353 | } | ||
354 | |||
355 | struct call_data { | ||
356 | struct kvm_ptc_g ptc_g_data; | ||
357 | struct kvm_vcpu *vcpu; | ||
358 | }; | ||
359 | |||
360 | static void vcpu_global_purge(void *info) | ||
361 | { | ||
362 | struct call_data *p = (struct call_data *)info; | ||
363 | struct kvm_vcpu *vcpu = p->vcpu; | ||
364 | |||
365 | if (test_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests)) | ||
366 | return; | ||
367 | |||
368 | set_bit(KVM_REQ_PTC_G, &vcpu->requests); | ||
369 | if (vcpu->arch.ptc_g_count < MAX_PTC_G_NUM) { | ||
370 | vcpu->arch.ptc_g_data[vcpu->arch.ptc_g_count++] = | ||
371 | p->ptc_g_data; | ||
372 | } else { | ||
373 | clear_bit(KVM_REQ_PTC_G, &vcpu->requests); | ||
374 | vcpu->arch.ptc_g_count = 0; | ||
375 | set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests); | ||
376 | } | ||
377 | } | ||
378 | |||
379 | static int handle_global_purge(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | ||
380 | { | ||
381 | struct exit_ctl_data *p = kvm_get_exit_data(vcpu); | ||
382 | struct kvm *kvm = vcpu->kvm; | ||
383 | struct call_data call_data; | ||
384 | int i; | ||
385 | call_data.ptc_g_data = p->u.ptc_g_data; | ||
386 | |||
387 | for (i = 0; i < KVM_MAX_VCPUS; i++) { | ||
388 | if (!kvm->vcpus[i] || kvm->vcpus[i]->arch.mp_state == | ||
389 | KVM_MP_STATE_UNINITIALIZED || | ||
390 | vcpu == kvm->vcpus[i]) | ||
391 | continue; | ||
392 | |||
393 | if (waitqueue_active(&kvm->vcpus[i]->wq)) | ||
394 | wake_up_interruptible(&kvm->vcpus[i]->wq); | ||
395 | |||
396 | if (kvm->vcpus[i]->cpu != -1) { | ||
397 | call_data.vcpu = kvm->vcpus[i]; | ||
398 | smp_call_function_single(kvm->vcpus[i]->cpu, | ||
399 | vcpu_global_purge, &call_data, 0, 1); | ||
400 | } else | ||
401 | printk(KERN_WARNING"kvm: Uninit vcpu received ipi!\n"); | ||
402 | |||
403 | } | ||
404 | return 1; | ||
405 | } | ||
406 | |||
407 | static int handle_switch_rr6(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | ||
408 | { | ||
409 | return 1; | ||
410 | } | ||
411 | |||
412 | int kvm_emulate_halt(struct kvm_vcpu *vcpu) | ||
413 | { | ||
414 | |||
415 | ktime_t kt; | ||
416 | long itc_diff; | ||
417 | unsigned long vcpu_now_itc; | ||
418 | |||
419 | unsigned long expires; | ||
420 | struct hrtimer *p_ht = &vcpu->arch.hlt_timer; | ||
421 | unsigned long cyc_per_usec = local_cpu_data->cyc_per_usec; | ||
422 | struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); | ||
423 | |||
424 | vcpu_now_itc = ia64_getreg(_IA64_REG_AR_ITC) + vcpu->arch.itc_offset; | ||
425 | |||
426 | if (time_after(vcpu_now_itc, vpd->itm)) { | ||
427 | vcpu->arch.timer_check = 1; | ||
428 | return 1; | ||
429 | } | ||
430 | itc_diff = vpd->itm - vcpu_now_itc; | ||
431 | if (itc_diff < 0) | ||
432 | itc_diff = -itc_diff; | ||
433 | |||
434 | expires = div64_64(itc_diff, cyc_per_usec); | ||
435 | kt = ktime_set(0, 1000 * expires); | ||
436 | vcpu->arch.ht_active = 1; | ||
437 | hrtimer_start(p_ht, kt, HRTIMER_MODE_ABS); | ||
438 | |||
439 | if (irqchip_in_kernel(vcpu->kvm)) { | ||
440 | vcpu->arch.mp_state = KVM_MP_STATE_HALTED; | ||
441 | kvm_vcpu_block(vcpu); | ||
442 | hrtimer_cancel(p_ht); | ||
443 | vcpu->arch.ht_active = 0; | ||
444 | |||
445 | if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE) | ||
446 | return -EINTR; | ||
447 | return 1; | ||
448 | } else { | ||
449 | printk(KERN_ERR"kvm: Unsupported userspace halt!"); | ||
450 | return 0; | ||
451 | } | ||
452 | } | ||
453 | |||
454 | static int handle_vm_shutdown(struct kvm_vcpu *vcpu, | ||
455 | struct kvm_run *kvm_run) | ||
456 | { | ||
457 | kvm_run->exit_reason = KVM_EXIT_SHUTDOWN; | ||
458 | return 0; | ||
459 | } | ||
460 | |||
461 | static int handle_external_interrupt(struct kvm_vcpu *vcpu, | ||
462 | struct kvm_run *kvm_run) | ||
463 | { | ||
464 | return 1; | ||
465 | } | ||
466 | |||
467 | static int (*kvm_vti_exit_handlers[])(struct kvm_vcpu *vcpu, | ||
468 | struct kvm_run *kvm_run) = { | ||
469 | [EXIT_REASON_VM_PANIC] = handle_vm_error, | ||
470 | [EXIT_REASON_MMIO_INSTRUCTION] = handle_mmio, | ||
471 | [EXIT_REASON_PAL_CALL] = handle_pal_call, | ||
472 | [EXIT_REASON_SAL_CALL] = handle_sal_call, | ||
473 | [EXIT_REASON_SWITCH_RR6] = handle_switch_rr6, | ||
474 | [EXIT_REASON_VM_DESTROY] = handle_vm_shutdown, | ||
475 | [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt, | ||
476 | [EXIT_REASON_IPI] = handle_ipi, | ||
477 | [EXIT_REASON_PTC_G] = handle_global_purge, | ||
478 | |||
479 | }; | ||
480 | |||
481 | static const int kvm_vti_max_exit_handlers = | ||
482 | sizeof(kvm_vti_exit_handlers)/sizeof(*kvm_vti_exit_handlers); | ||
483 | |||
484 | static void kvm_prepare_guest_switch(struct kvm_vcpu *vcpu) | ||
485 | { | ||
486 | } | ||
487 | |||
488 | static uint32_t kvm_get_exit_reason(struct kvm_vcpu *vcpu) | ||
489 | { | ||
490 | struct exit_ctl_data *p_exit_data; | ||
491 | |||
492 | p_exit_data = kvm_get_exit_data(vcpu); | ||
493 | return p_exit_data->exit_reason; | ||
494 | } | ||
495 | |||
496 | /* | ||
497 | * The guest has exited. See if we can fix it or if we need userspace | ||
498 | * assistance. | ||
499 | */ | ||
500 | static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | ||
501 | { | ||
502 | u32 exit_reason = kvm_get_exit_reason(vcpu); | ||
503 | vcpu->arch.last_exit = exit_reason; | ||
504 | |||
505 | if (exit_reason < kvm_vti_max_exit_handlers | ||
506 | && kvm_vti_exit_handlers[exit_reason]) | ||
507 | return kvm_vti_exit_handlers[exit_reason](vcpu, kvm_run); | ||
508 | else { | ||
509 | kvm_run->exit_reason = KVM_EXIT_UNKNOWN; | ||
510 | kvm_run->hw.hardware_exit_reason = exit_reason; | ||
511 | } | ||
512 | return 0; | ||
513 | } | ||
514 | |||
515 | static inline void vti_set_rr6(unsigned long rr6) | ||
516 | { | ||
517 | ia64_set_rr(RR6, rr6); | ||
518 | ia64_srlz_i(); | ||
519 | } | ||
520 | |||
521 | static int kvm_insert_vmm_mapping(struct kvm_vcpu *vcpu) | ||
522 | { | ||
523 | unsigned long pte; | ||
524 | struct kvm *kvm = vcpu->kvm; | ||
525 | int r; | ||
526 | |||
527 | /*Insert a pair of tr to map vmm*/ | ||
528 | pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base), PAGE_KERNEL)); | ||
529 | r = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT); | ||
530 | if (r < 0) | ||
531 | goto out; | ||
532 | vcpu->arch.vmm_tr_slot = r; | ||
533 | /*Insert a pairt of tr to map data of vm*/ | ||
534 | pte = pte_val(mk_pte_phys(__pa(kvm->arch.vm_base), PAGE_KERNEL)); | ||
535 | r = ia64_itr_entry(0x3, KVM_VM_DATA_BASE, | ||
536 | pte, KVM_VM_DATA_SHIFT); | ||
537 | if (r < 0) | ||
538 | goto out; | ||
539 | vcpu->arch.vm_tr_slot = r; | ||
540 | r = 0; | ||
541 | out: | ||
542 | return r; | ||
543 | |||
544 | } | ||
545 | |||
546 | static void kvm_purge_vmm_mapping(struct kvm_vcpu *vcpu) | ||
547 | { | ||
548 | |||
549 | ia64_ptr_entry(0x3, vcpu->arch.vmm_tr_slot); | ||
550 | ia64_ptr_entry(0x3, vcpu->arch.vm_tr_slot); | ||
551 | |||
552 | } | ||
553 | |||
554 | static int kvm_vcpu_pre_transition(struct kvm_vcpu *vcpu) | ||
555 | { | ||
556 | int cpu = smp_processor_id(); | ||
557 | |||
558 | if (vcpu->arch.last_run_cpu != cpu || | ||
559 | per_cpu(last_vcpu, cpu) != vcpu) { | ||
560 | per_cpu(last_vcpu, cpu) = vcpu; | ||
561 | vcpu->arch.last_run_cpu = cpu; | ||
562 | kvm_flush_tlb_all(); | ||
563 | } | ||
564 | |||
565 | vcpu->arch.host_rr6 = ia64_get_rr(RR6); | ||
566 | vti_set_rr6(vcpu->arch.vmm_rr); | ||
567 | return kvm_insert_vmm_mapping(vcpu); | ||
568 | } | ||
569 | static void kvm_vcpu_post_transition(struct kvm_vcpu *vcpu) | ||
570 | { | ||
571 | kvm_purge_vmm_mapping(vcpu); | ||
572 | vti_set_rr6(vcpu->arch.host_rr6); | ||
573 | } | ||
574 | |||
575 | static int vti_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | ||
576 | { | ||
577 | union context *host_ctx, *guest_ctx; | ||
578 | int r; | ||
579 | |||
580 | /*Get host and guest context with guest address space.*/ | ||
581 | host_ctx = kvm_get_host_context(vcpu); | ||
582 | guest_ctx = kvm_get_guest_context(vcpu); | ||
583 | |||
584 | r = kvm_vcpu_pre_transition(vcpu); | ||
585 | if (r < 0) | ||
586 | goto out; | ||
587 | kvm_vmm_info->tramp_entry(host_ctx, guest_ctx); | ||
588 | kvm_vcpu_post_transition(vcpu); | ||
589 | r = 0; | ||
590 | out: | ||
591 | return r; | ||
592 | } | ||
593 | |||
594 | static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | ||
595 | { | ||
596 | int r; | ||
597 | |||
598 | again: | ||
599 | preempt_disable(); | ||
600 | |||
601 | kvm_prepare_guest_switch(vcpu); | ||
602 | local_irq_disable(); | ||
603 | |||
604 | if (signal_pending(current)) { | ||
605 | local_irq_enable(); | ||
606 | preempt_enable(); | ||
607 | r = -EINTR; | ||
608 | kvm_run->exit_reason = KVM_EXIT_INTR; | ||
609 | goto out; | ||
610 | } | ||
611 | |||
612 | vcpu->guest_mode = 1; | ||
613 | kvm_guest_enter(); | ||
614 | |||
615 | r = vti_vcpu_run(vcpu, kvm_run); | ||
616 | if (r < 0) { | ||
617 | local_irq_enable(); | ||
618 | preempt_enable(); | ||
619 | kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; | ||
620 | goto out; | ||
621 | } | ||
622 | |||
623 | vcpu->arch.launched = 1; | ||
624 | vcpu->guest_mode = 0; | ||
625 | local_irq_enable(); | ||
626 | |||
627 | /* | ||
628 | * We must have an instruction between local_irq_enable() and | ||
629 | * kvm_guest_exit(), so the timer interrupt isn't delayed by | ||
630 | * the interrupt shadow. The stat.exits increment will do nicely. | ||
631 | * But we need to prevent reordering, hence this barrier(): | ||
632 | */ | ||
633 | barrier(); | ||
634 | |||
635 | kvm_guest_exit(); | ||
636 | |||
637 | preempt_enable(); | ||
638 | |||
639 | r = kvm_handle_exit(kvm_run, vcpu); | ||
640 | |||
641 | if (r > 0) { | ||
642 | if (!need_resched()) | ||
643 | goto again; | ||
644 | } | ||
645 | |||
646 | out: | ||
647 | if (r > 0) { | ||
648 | kvm_resched(vcpu); | ||
649 | goto again; | ||
650 | } | ||
651 | |||
652 | return r; | ||
653 | } | ||
654 | |||
655 | static void kvm_set_mmio_data(struct kvm_vcpu *vcpu) | ||
656 | { | ||
657 | struct kvm_mmio_req *p = kvm_get_vcpu_ioreq(vcpu); | ||
658 | |||
659 | if (!vcpu->mmio_is_write) | ||
660 | memcpy(&p->data, vcpu->mmio_data, 8); | ||
661 | p->state = STATE_IORESP_READY; | ||
662 | } | ||
663 | |||
664 | int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | ||
665 | { | ||
666 | int r; | ||
667 | sigset_t sigsaved; | ||
668 | |||
669 | vcpu_load(vcpu); | ||
670 | |||
671 | if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { | ||
672 | kvm_vcpu_block(vcpu); | ||
673 | vcpu_put(vcpu); | ||
674 | return -EAGAIN; | ||
675 | } | ||
676 | |||
677 | if (vcpu->sigset_active) | ||
678 | sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); | ||
679 | |||
680 | if (vcpu->mmio_needed) { | ||
681 | memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8); | ||
682 | kvm_set_mmio_data(vcpu); | ||
683 | vcpu->mmio_read_completed = 1; | ||
684 | vcpu->mmio_needed = 0; | ||
685 | } | ||
686 | r = __vcpu_run(vcpu, kvm_run); | ||
687 | |||
688 | if (vcpu->sigset_active) | ||
689 | sigprocmask(SIG_SETMASK, &sigsaved, NULL); | ||
690 | |||
691 | vcpu_put(vcpu); | ||
692 | return r; | ||
693 | } | ||
694 | |||
695 | /* | ||
696 | * Allocate 16M memory for every vm to hold its specific data. | ||
697 | * Its memory map is defined in kvm_host.h. | ||
698 | */ | ||
699 | static struct kvm *kvm_alloc_kvm(void) | ||
700 | { | ||
701 | |||
702 | struct kvm *kvm; | ||
703 | uint64_t vm_base; | ||
704 | |||
705 | vm_base = __get_free_pages(GFP_KERNEL, get_order(KVM_VM_DATA_SIZE)); | ||
706 | |||
707 | if (!vm_base) | ||
708 | return ERR_PTR(-ENOMEM); | ||
709 | printk(KERN_DEBUG"kvm: VM data's base Address:0x%lx\n", vm_base); | ||
710 | |||
711 | /* Zero all pages before use! */ | ||
712 | memset((void *)vm_base, 0, KVM_VM_DATA_SIZE); | ||
713 | |||
714 | kvm = (struct kvm *)(vm_base + KVM_VM_OFS); | ||
715 | kvm->arch.vm_base = vm_base; | ||
716 | |||
717 | return kvm; | ||
718 | } | ||
719 | |||
720 | struct kvm_io_range { | ||
721 | unsigned long start; | ||
722 | unsigned long size; | ||
723 | unsigned long type; | ||
724 | }; | ||
725 | |||
726 | static const struct kvm_io_range io_ranges[] = { | ||
727 | {VGA_IO_START, VGA_IO_SIZE, GPFN_FRAME_BUFFER}, | ||
728 | {MMIO_START, MMIO_SIZE, GPFN_LOW_MMIO}, | ||
729 | {LEGACY_IO_START, LEGACY_IO_SIZE, GPFN_LEGACY_IO}, | ||
730 | {IO_SAPIC_START, IO_SAPIC_SIZE, GPFN_IOSAPIC}, | ||
731 | {PIB_START, PIB_SIZE, GPFN_PIB}, | ||
732 | }; | ||
733 | |||
734 | static void kvm_build_io_pmt(struct kvm *kvm) | ||
735 | { | ||
736 | unsigned long i, j; | ||
737 | |||
738 | /* Mark I/O ranges */ | ||
739 | for (i = 0; i < (sizeof(io_ranges) / sizeof(struct kvm_io_range)); | ||
740 | i++) { | ||
741 | for (j = io_ranges[i].start; | ||
742 | j < io_ranges[i].start + io_ranges[i].size; | ||
743 | j += PAGE_SIZE) | ||
744 | kvm_set_pmt_entry(kvm, j >> PAGE_SHIFT, | ||
745 | io_ranges[i].type, 0); | ||
746 | } | ||
747 | |||
748 | } | ||
749 | |||
750 | /*Use unused rids to virtualize guest rid.*/ | ||
751 | #define GUEST_PHYSICAL_RR0 0x1739 | ||
752 | #define GUEST_PHYSICAL_RR4 0x2739 | ||
753 | #define VMM_INIT_RR 0x1660 | ||
754 | |||
755 | static void kvm_init_vm(struct kvm *kvm) | ||
756 | { | ||
757 | long vm_base; | ||
758 | |||
759 | BUG_ON(!kvm); | ||
760 | |||
761 | kvm->arch.metaphysical_rr0 = GUEST_PHYSICAL_RR0; | ||
762 | kvm->arch.metaphysical_rr4 = GUEST_PHYSICAL_RR4; | ||
763 | kvm->arch.vmm_init_rr = VMM_INIT_RR; | ||
764 | |||
765 | vm_base = kvm->arch.vm_base; | ||
766 | if (vm_base) { | ||
767 | kvm->arch.vhpt_base = vm_base + KVM_VHPT_OFS; | ||
768 | kvm->arch.vtlb_base = vm_base + KVM_VTLB_OFS; | ||
769 | kvm->arch.vpd_base = vm_base + KVM_VPD_OFS; | ||
770 | } | ||
771 | |||
772 | /* | ||
773 | *Fill P2M entries for MMIO/IO ranges | ||
774 | */ | ||
775 | kvm_build_io_pmt(kvm); | ||
776 | |||
777 | } | ||
778 | |||
779 | struct kvm *kvm_arch_create_vm(void) | ||
780 | { | ||
781 | struct kvm *kvm = kvm_alloc_kvm(); | ||
782 | |||
783 | if (IS_ERR(kvm)) | ||
784 | return ERR_PTR(-ENOMEM); | ||
785 | kvm_init_vm(kvm); | ||
786 | |||
787 | return kvm; | ||
788 | |||
789 | } | ||
790 | |||
791 | static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, | ||
792 | struct kvm_irqchip *chip) | ||
793 | { | ||
794 | int r; | ||
795 | |||
796 | r = 0; | ||
797 | switch (chip->chip_id) { | ||
798 | case KVM_IRQCHIP_IOAPIC: | ||
799 | memcpy(&chip->chip.ioapic, ioapic_irqchip(kvm), | ||
800 | sizeof(struct kvm_ioapic_state)); | ||
801 | break; | ||
802 | default: | ||
803 | r = -EINVAL; | ||
804 | break; | ||
805 | } | ||
806 | return r; | ||
807 | } | ||
808 | |||
809 | static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) | ||
810 | { | ||
811 | int r; | ||
812 | |||
813 | r = 0; | ||
814 | switch (chip->chip_id) { | ||
815 | case KVM_IRQCHIP_IOAPIC: | ||
816 | memcpy(ioapic_irqchip(kvm), | ||
817 | &chip->chip.ioapic, | ||
818 | sizeof(struct kvm_ioapic_state)); | ||
819 | break; | ||
820 | default: | ||
821 | r = -EINVAL; | ||
822 | break; | ||
823 | } | ||
824 | return r; | ||
825 | } | ||
826 | |||
827 | #define RESTORE_REGS(_x) vcpu->arch._x = regs->_x | ||
828 | |||
829 | int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | ||
830 | { | ||
831 | int i; | ||
832 | struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); | ||
833 | int r; | ||
834 | |||
835 | vcpu_load(vcpu); | ||
836 | |||
837 | for (i = 0; i < 16; i++) { | ||
838 | vpd->vgr[i] = regs->vpd.vgr[i]; | ||
839 | vpd->vbgr[i] = regs->vpd.vbgr[i]; | ||
840 | } | ||
841 | for (i = 0; i < 128; i++) | ||
842 | vpd->vcr[i] = regs->vpd.vcr[i]; | ||
843 | vpd->vhpi = regs->vpd.vhpi; | ||
844 | vpd->vnat = regs->vpd.vnat; | ||
845 | vpd->vbnat = regs->vpd.vbnat; | ||
846 | vpd->vpsr = regs->vpd.vpsr; | ||
847 | |||
848 | vpd->vpr = regs->vpd.vpr; | ||
849 | |||
850 | r = -EFAULT; | ||
851 | r = copy_from_user(&vcpu->arch.guest, regs->saved_guest, | ||
852 | sizeof(union context)); | ||
853 | if (r) | ||
854 | goto out; | ||
855 | r = copy_from_user(vcpu + 1, regs->saved_stack + | ||
856 | sizeof(struct kvm_vcpu), | ||
857 | IA64_STK_OFFSET - sizeof(struct kvm_vcpu)); | ||
858 | if (r) | ||
859 | goto out; | ||
860 | vcpu->arch.exit_data = | ||
861 | ((struct kvm_vcpu *)(regs->saved_stack))->arch.exit_data; | ||
862 | |||
863 | RESTORE_REGS(mp_state); | ||
864 | RESTORE_REGS(vmm_rr); | ||
865 | memcpy(vcpu->arch.itrs, regs->itrs, sizeof(struct thash_data) * NITRS); | ||
866 | memcpy(vcpu->arch.dtrs, regs->dtrs, sizeof(struct thash_data) * NDTRS); | ||
867 | RESTORE_REGS(itr_regions); | ||
868 | RESTORE_REGS(dtr_regions); | ||
869 | RESTORE_REGS(tc_regions); | ||
870 | RESTORE_REGS(irq_check); | ||
871 | RESTORE_REGS(itc_check); | ||
872 | RESTORE_REGS(timer_check); | ||
873 | RESTORE_REGS(timer_pending); | ||
874 | RESTORE_REGS(last_itc); | ||
875 | for (i = 0; i < 8; i++) { | ||
876 | vcpu->arch.vrr[i] = regs->vrr[i]; | ||
877 | vcpu->arch.ibr[i] = regs->ibr[i]; | ||
878 | vcpu->arch.dbr[i] = regs->dbr[i]; | ||
879 | } | ||
880 | for (i = 0; i < 4; i++) | ||
881 | vcpu->arch.insvc[i] = regs->insvc[i]; | ||
882 | RESTORE_REGS(xtp); | ||
883 | RESTORE_REGS(metaphysical_rr0); | ||
884 | RESTORE_REGS(metaphysical_rr4); | ||
885 | RESTORE_REGS(metaphysical_saved_rr0); | ||
886 | RESTORE_REGS(metaphysical_saved_rr4); | ||
887 | RESTORE_REGS(fp_psr); | ||
888 | RESTORE_REGS(saved_gp); | ||
889 | |||
890 | vcpu->arch.irq_new_pending = 1; | ||
891 | vcpu->arch.itc_offset = regs->saved_itc - ia64_getreg(_IA64_REG_AR_ITC); | ||
892 | set_bit(KVM_REQ_RESUME, &vcpu->requests); | ||
893 | |||
894 | vcpu_put(vcpu); | ||
895 | r = 0; | ||
896 | out: | ||
897 | return r; | ||
898 | } | ||
899 | |||
900 | long kvm_arch_vm_ioctl(struct file *filp, | ||
901 | unsigned int ioctl, unsigned long arg) | ||
902 | { | ||
903 | struct kvm *kvm = filp->private_data; | ||
904 | void __user *argp = (void __user *)arg; | ||
905 | int r = -EINVAL; | ||
906 | |||
907 | switch (ioctl) { | ||
908 | case KVM_SET_MEMORY_REGION: { | ||
909 | struct kvm_memory_region kvm_mem; | ||
910 | struct kvm_userspace_memory_region kvm_userspace_mem; | ||
911 | |||
912 | r = -EFAULT; | ||
913 | if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem)) | ||
914 | goto out; | ||
915 | kvm_userspace_mem.slot = kvm_mem.slot; | ||
916 | kvm_userspace_mem.flags = kvm_mem.flags; | ||
917 | kvm_userspace_mem.guest_phys_addr = | ||
918 | kvm_mem.guest_phys_addr; | ||
919 | kvm_userspace_mem.memory_size = kvm_mem.memory_size; | ||
920 | r = kvm_vm_ioctl_set_memory_region(kvm, | ||
921 | &kvm_userspace_mem, 0); | ||
922 | if (r) | ||
923 | goto out; | ||
924 | break; | ||
925 | } | ||
926 | case KVM_CREATE_IRQCHIP: | ||
927 | r = -EFAULT; | ||
928 | r = kvm_ioapic_init(kvm); | ||
929 | if (r) | ||
930 | goto out; | ||
931 | break; | ||
932 | case KVM_IRQ_LINE: { | ||
933 | struct kvm_irq_level irq_event; | ||
934 | |||
935 | r = -EFAULT; | ||
936 | if (copy_from_user(&irq_event, argp, sizeof irq_event)) | ||
937 | goto out; | ||
938 | if (irqchip_in_kernel(kvm)) { | ||
939 | mutex_lock(&kvm->lock); | ||
940 | kvm_ioapic_set_irq(kvm->arch.vioapic, | ||
941 | irq_event.irq, | ||
942 | irq_event.level); | ||
943 | mutex_unlock(&kvm->lock); | ||
944 | r = 0; | ||
945 | } | ||
946 | break; | ||
947 | } | ||
948 | case KVM_GET_IRQCHIP: { | ||
949 | /* 0: PIC master, 1: PIC slave, 2: IOAPIC */ | ||
950 | struct kvm_irqchip chip; | ||
951 | |||
952 | r = -EFAULT; | ||
953 | if (copy_from_user(&chip, argp, sizeof chip)) | ||
954 | goto out; | ||
955 | r = -ENXIO; | ||
956 | if (!irqchip_in_kernel(kvm)) | ||
957 | goto out; | ||
958 | r = kvm_vm_ioctl_get_irqchip(kvm, &chip); | ||
959 | if (r) | ||
960 | goto out; | ||
961 | r = -EFAULT; | ||
962 | if (copy_to_user(argp, &chip, sizeof chip)) | ||
963 | goto out; | ||
964 | r = 0; | ||
965 | break; | ||
966 | } | ||
967 | case KVM_SET_IRQCHIP: { | ||
968 | /* 0: PIC master, 1: PIC slave, 2: IOAPIC */ | ||
969 | struct kvm_irqchip chip; | ||
970 | |||
971 | r = -EFAULT; | ||
972 | if (copy_from_user(&chip, argp, sizeof chip)) | ||
973 | goto out; | ||
974 | r = -ENXIO; | ||
975 | if (!irqchip_in_kernel(kvm)) | ||
976 | goto out; | ||
977 | r = kvm_vm_ioctl_set_irqchip(kvm, &chip); | ||
978 | if (r) | ||
979 | goto out; | ||
980 | r = 0; | ||
981 | break; | ||
982 | } | ||
983 | default: | ||
984 | ; | ||
985 | } | ||
986 | out: | ||
987 | return r; | ||
988 | } | ||
989 | |||
990 | int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, | ||
991 | struct kvm_sregs *sregs) | ||
992 | { | ||
993 | return -EINVAL; | ||
994 | } | ||
995 | |||
996 | int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, | ||
997 | struct kvm_sregs *sregs) | ||
998 | { | ||
999 | return -EINVAL; | ||
1000 | |||
1001 | } | ||
1002 | int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, | ||
1003 | struct kvm_translation *tr) | ||
1004 | { | ||
1005 | |||
1006 | return -EINVAL; | ||
1007 | } | ||
1008 | |||
1009 | static int kvm_alloc_vmm_area(void) | ||
1010 | { | ||
1011 | if (!kvm_vmm_base && (kvm_vm_buffer_size < KVM_VM_BUFFER_SIZE)) { | ||
1012 | kvm_vmm_base = __get_free_pages(GFP_KERNEL, | ||
1013 | get_order(KVM_VMM_SIZE)); | ||
1014 | if (!kvm_vmm_base) | ||
1015 | return -ENOMEM; | ||
1016 | |||
1017 | memset((void *)kvm_vmm_base, 0, KVM_VMM_SIZE); | ||
1018 | kvm_vm_buffer = kvm_vmm_base + VMM_SIZE; | ||
1019 | |||
1020 | printk(KERN_DEBUG"kvm:VMM's Base Addr:0x%lx, vm_buffer:0x%lx\n", | ||
1021 | kvm_vmm_base, kvm_vm_buffer); | ||
1022 | } | ||
1023 | |||
1024 | return 0; | ||
1025 | } | ||
1026 | |||
1027 | static void kvm_free_vmm_area(void) | ||
1028 | { | ||
1029 | if (kvm_vmm_base) { | ||
1030 | /*Zero this area before free to avoid bits leak!!*/ | ||
1031 | memset((void *)kvm_vmm_base, 0, KVM_VMM_SIZE); | ||
1032 | free_pages(kvm_vmm_base, get_order(KVM_VMM_SIZE)); | ||
1033 | kvm_vmm_base = 0; | ||
1034 | kvm_vm_buffer = 0; | ||
1035 | kvm_vsa_base = 0; | ||
1036 | } | ||
1037 | } | ||
1038 | |||
1039 | /* | ||
1040 | * Make sure that a cpu that is being hot-unplugged does not have any vcpus | ||
1041 | * cached on it. Leave it as blank for IA64. | ||
1042 | */ | ||
1043 | void decache_vcpus_on_cpu(int cpu) | ||
1044 | { | ||
1045 | } | ||
1046 | |||
1047 | static void vti_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | ||
1048 | { | ||
1049 | } | ||
1050 | |||
1051 | static int vti_init_vpd(struct kvm_vcpu *vcpu) | ||
1052 | { | ||
1053 | int i; | ||
1054 | union cpuid3_t cpuid3; | ||
1055 | struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); | ||
1056 | |||
1057 | if (IS_ERR(vpd)) | ||
1058 | return PTR_ERR(vpd); | ||
1059 | |||
1060 | /* CPUID init */ | ||
1061 | for (i = 0; i < 5; i++) | ||
1062 | vpd->vcpuid[i] = ia64_get_cpuid(i); | ||
1063 | |||
1064 | /* Limit the CPUID number to 5 */ | ||
1065 | cpuid3.value = vpd->vcpuid[3]; | ||
1066 | cpuid3.number = 4; /* 5 - 1 */ | ||
1067 | vpd->vcpuid[3] = cpuid3.value; | ||
1068 | |||
1069 | /*Set vac and vdc fields*/ | ||
1070 | vpd->vac.a_from_int_cr = 1; | ||
1071 | vpd->vac.a_to_int_cr = 1; | ||
1072 | vpd->vac.a_from_psr = 1; | ||
1073 | vpd->vac.a_from_cpuid = 1; | ||
1074 | vpd->vac.a_cover = 1; | ||
1075 | vpd->vac.a_bsw = 1; | ||
1076 | vpd->vac.a_int = 1; | ||
1077 | vpd->vdc.d_vmsw = 1; | ||
1078 | |||
1079 | /*Set virtual buffer*/ | ||
1080 | vpd->virt_env_vaddr = KVM_VM_BUFFER_BASE; | ||
1081 | |||
1082 | return 0; | ||
1083 | } | ||
1084 | |||
1085 | static int vti_create_vp(struct kvm_vcpu *vcpu) | ||
1086 | { | ||
1087 | long ret; | ||
1088 | struct vpd *vpd = vcpu->arch.vpd; | ||
1089 | unsigned long vmm_ivt; | ||
1090 | |||
1091 | vmm_ivt = kvm_vmm_info->vmm_ivt; | ||
1092 | |||
1093 | printk(KERN_DEBUG "kvm: vcpu:%p,ivt: 0x%lx\n", vcpu, vmm_ivt); | ||
1094 | |||
1095 | ret = ia64_pal_vp_create((u64 *)vpd, (u64 *)vmm_ivt, 0); | ||
1096 | |||
1097 | if (ret) { | ||
1098 | printk(KERN_ERR"kvm: ia64_pal_vp_create failed!\n"); | ||
1099 | return -EINVAL; | ||
1100 | } | ||
1101 | return 0; | ||
1102 | } | ||
1103 | |||
1104 | static void init_ptce_info(struct kvm_vcpu *vcpu) | ||
1105 | { | ||
1106 | ia64_ptce_info_t ptce = {0}; | ||
1107 | |||
1108 | ia64_get_ptce(&ptce); | ||
1109 | vcpu->arch.ptce_base = ptce.base; | ||
1110 | vcpu->arch.ptce_count[0] = ptce.count[0]; | ||
1111 | vcpu->arch.ptce_count[1] = ptce.count[1]; | ||
1112 | vcpu->arch.ptce_stride[0] = ptce.stride[0]; | ||
1113 | vcpu->arch.ptce_stride[1] = ptce.stride[1]; | ||
1114 | } | ||
1115 | |||
1116 | static void kvm_migrate_hlt_timer(struct kvm_vcpu *vcpu) | ||
1117 | { | ||
1118 | struct hrtimer *p_ht = &vcpu->arch.hlt_timer; | ||
1119 | |||
1120 | if (hrtimer_cancel(p_ht)) | ||
1121 | hrtimer_start(p_ht, p_ht->expires, HRTIMER_MODE_ABS); | ||
1122 | } | ||
1123 | |||
1124 | static enum hrtimer_restart hlt_timer_fn(struct hrtimer *data) | ||
1125 | { | ||
1126 | struct kvm_vcpu *vcpu; | ||
1127 | wait_queue_head_t *q; | ||
1128 | |||
1129 | vcpu = container_of(data, struct kvm_vcpu, arch.hlt_timer); | ||
1130 | if (vcpu->arch.mp_state != KVM_MP_STATE_HALTED) | ||
1131 | goto out; | ||
1132 | |||
1133 | q = &vcpu->wq; | ||
1134 | if (waitqueue_active(q)) { | ||
1135 | vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; | ||
1136 | wake_up_interruptible(q); | ||
1137 | } | ||
1138 | out: | ||
1139 | vcpu->arch.timer_check = 1; | ||
1140 | return HRTIMER_NORESTART; | ||
1141 | } | ||
1142 | |||
1143 | #define PALE_RESET_ENTRY 0x80000000ffffffb0UL | ||
1144 | |||
1145 | int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) | ||
1146 | { | ||
1147 | struct kvm_vcpu *v; | ||
1148 | int r; | ||
1149 | int i; | ||
1150 | long itc_offset; | ||
1151 | struct kvm *kvm = vcpu->kvm; | ||
1152 | struct kvm_pt_regs *regs = vcpu_regs(vcpu); | ||
1153 | |||
1154 | union context *p_ctx = &vcpu->arch.guest; | ||
1155 | struct kvm_vcpu *vmm_vcpu = to_guest(vcpu->kvm, vcpu); | ||
1156 | |||
1157 | /*Init vcpu context for first run.*/ | ||
1158 | if (IS_ERR(vmm_vcpu)) | ||
1159 | return PTR_ERR(vmm_vcpu); | ||
1160 | |||
1161 | if (vcpu->vcpu_id == 0) { | ||
1162 | vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; | ||
1163 | |||
1164 | /*Set entry address for first run.*/ | ||
1165 | regs->cr_iip = PALE_RESET_ENTRY; | ||
1166 | |||
1167 | /*Initilize itc offset for vcpus*/ | ||
1168 | itc_offset = 0UL - ia64_getreg(_IA64_REG_AR_ITC); | ||
1169 | for (i = 0; i < MAX_VCPU_NUM; i++) { | ||
1170 | v = (struct kvm_vcpu *)((char *)vcpu + VCPU_SIZE * i); | ||
1171 | v->arch.itc_offset = itc_offset; | ||
1172 | v->arch.last_itc = 0; | ||
1173 | } | ||
1174 | } else | ||
1175 | vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED; | ||
1176 | |||
1177 | r = -ENOMEM; | ||
1178 | vcpu->arch.apic = kzalloc(sizeof(struct kvm_lapic), GFP_KERNEL); | ||
1179 | if (!vcpu->arch.apic) | ||
1180 | goto out; | ||
1181 | vcpu->arch.apic->vcpu = vcpu; | ||
1182 | |||
1183 | p_ctx->gr[1] = 0; | ||
1184 | p_ctx->gr[12] = (unsigned long)((char *)vmm_vcpu + IA64_STK_OFFSET); | ||
1185 | p_ctx->gr[13] = (unsigned long)vmm_vcpu; | ||
1186 | p_ctx->psr = 0x1008522000UL; | ||
1187 | p_ctx->ar[40] = FPSR_DEFAULT; /*fpsr*/ | ||
1188 | p_ctx->caller_unat = 0; | ||
1189 | p_ctx->pr = 0x0; | ||
1190 | p_ctx->ar[36] = 0x0; /*unat*/ | ||
1191 | p_ctx->ar[19] = 0x0; /*rnat*/ | ||
1192 | p_ctx->ar[18] = (unsigned long)vmm_vcpu + | ||
1193 | ((sizeof(struct kvm_vcpu)+15) & ~15); | ||
1194 | p_ctx->ar[64] = 0x0; /*pfs*/ | ||
1195 | p_ctx->cr[0] = 0x7e04UL; | ||
1196 | p_ctx->cr[2] = (unsigned long)kvm_vmm_info->vmm_ivt; | ||
1197 | p_ctx->cr[8] = 0x3c; | ||
1198 | |||
1199 | /*Initilize region register*/ | ||
1200 | p_ctx->rr[0] = 0x30; | ||
1201 | p_ctx->rr[1] = 0x30; | ||
1202 | p_ctx->rr[2] = 0x30; | ||
1203 | p_ctx->rr[3] = 0x30; | ||
1204 | p_ctx->rr[4] = 0x30; | ||
1205 | p_ctx->rr[5] = 0x30; | ||
1206 | p_ctx->rr[7] = 0x30; | ||
1207 | |||
1208 | /*Initilize branch register 0*/ | ||
1209 | p_ctx->br[0] = *(unsigned long *)kvm_vmm_info->vmm_entry; | ||
1210 | |||
1211 | vcpu->arch.vmm_rr = kvm->arch.vmm_init_rr; | ||
1212 | vcpu->arch.metaphysical_rr0 = kvm->arch.metaphysical_rr0; | ||
1213 | vcpu->arch.metaphysical_rr4 = kvm->arch.metaphysical_rr4; | ||
1214 | |||
1215 | hrtimer_init(&vcpu->arch.hlt_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | ||
1216 | vcpu->arch.hlt_timer.function = hlt_timer_fn; | ||
1217 | |||
1218 | vcpu->arch.last_run_cpu = -1; | ||
1219 | vcpu->arch.vpd = (struct vpd *)VPD_ADDR(vcpu->vcpu_id); | ||
1220 | vcpu->arch.vsa_base = kvm_vsa_base; | ||
1221 | vcpu->arch.__gp = kvm_vmm_gp; | ||
1222 | vcpu->arch.dirty_log_lock_pa = __pa(&kvm->arch.dirty_log_lock); | ||
1223 | vcpu->arch.vhpt.hash = (struct thash_data *)VHPT_ADDR(vcpu->vcpu_id); | ||
1224 | vcpu->arch.vtlb.hash = (struct thash_data *)VTLB_ADDR(vcpu->vcpu_id); | ||
1225 | init_ptce_info(vcpu); | ||
1226 | |||
1227 | r = 0; | ||
1228 | out: | ||
1229 | return r; | ||
1230 | } | ||
1231 | |||
1232 | static int vti_vcpu_setup(struct kvm_vcpu *vcpu, int id) | ||
1233 | { | ||
1234 | unsigned long psr; | ||
1235 | int r; | ||
1236 | |||
1237 | local_irq_save(psr); | ||
1238 | r = kvm_insert_vmm_mapping(vcpu); | ||
1239 | if (r) | ||
1240 | goto fail; | ||
1241 | r = kvm_vcpu_init(vcpu, vcpu->kvm, id); | ||
1242 | if (r) | ||
1243 | goto fail; | ||
1244 | |||
1245 | r = vti_init_vpd(vcpu); | ||
1246 | if (r) { | ||
1247 | printk(KERN_DEBUG"kvm: vpd init error!!\n"); | ||
1248 | goto uninit; | ||
1249 | } | ||
1250 | |||
1251 | r = vti_create_vp(vcpu); | ||
1252 | if (r) | ||
1253 | goto uninit; | ||
1254 | |||
1255 | kvm_purge_vmm_mapping(vcpu); | ||
1256 | local_irq_restore(psr); | ||
1257 | |||
1258 | return 0; | ||
1259 | uninit: | ||
1260 | kvm_vcpu_uninit(vcpu); | ||
1261 | fail: | ||
1262 | return r; | ||
1263 | } | ||
1264 | |||
1265 | struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, | ||
1266 | unsigned int id) | ||
1267 | { | ||
1268 | struct kvm_vcpu *vcpu; | ||
1269 | unsigned long vm_base = kvm->arch.vm_base; | ||
1270 | int r; | ||
1271 | int cpu; | ||
1272 | |||
1273 | r = -ENOMEM; | ||
1274 | if (!vm_base) { | ||
1275 | printk(KERN_ERR"kvm: Create vcpu[%d] error!\n", id); | ||
1276 | goto fail; | ||
1277 | } | ||
1278 | vcpu = (struct kvm_vcpu *)(vm_base + KVM_VCPU_OFS + VCPU_SIZE * id); | ||
1279 | vcpu->kvm = kvm; | ||
1280 | |||
1281 | cpu = get_cpu(); | ||
1282 | vti_vcpu_load(vcpu, cpu); | ||
1283 | r = vti_vcpu_setup(vcpu, id); | ||
1284 | put_cpu(); | ||
1285 | |||
1286 | if (r) { | ||
1287 | printk(KERN_DEBUG"kvm: vcpu_setup error!!\n"); | ||
1288 | goto fail; | ||
1289 | } | ||
1290 | |||
1291 | return vcpu; | ||
1292 | fail: | ||
1293 | return ERR_PTR(r); | ||
1294 | } | ||
1295 | |||
1296 | int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | ||
1297 | { | ||
1298 | return 0; | ||
1299 | } | ||
1300 | |||
1301 | int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) | ||
1302 | { | ||
1303 | return -EINVAL; | ||
1304 | } | ||
1305 | |||
1306 | int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) | ||
1307 | { | ||
1308 | return -EINVAL; | ||
1309 | } | ||
1310 | |||
1311 | int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu, | ||
1312 | struct kvm_debug_guest *dbg) | ||
1313 | { | ||
1314 | return -EINVAL; | ||
1315 | } | ||
1316 | |||
1317 | static void free_kvm(struct kvm *kvm) | ||
1318 | { | ||
1319 | unsigned long vm_base = kvm->arch.vm_base; | ||
1320 | |||
1321 | if (vm_base) { | ||
1322 | memset((void *)vm_base, 0, KVM_VM_DATA_SIZE); | ||
1323 | free_pages(vm_base, get_order(KVM_VM_DATA_SIZE)); | ||
1324 | } | ||
1325 | |||
1326 | } | ||
1327 | |||
1328 | static void kvm_release_vm_pages(struct kvm *kvm) | ||
1329 | { | ||
1330 | struct kvm_memory_slot *memslot; | ||
1331 | int i, j; | ||
1332 | unsigned long base_gfn; | ||
1333 | |||
1334 | for (i = 0; i < kvm->nmemslots; i++) { | ||
1335 | memslot = &kvm->memslots[i]; | ||
1336 | base_gfn = memslot->base_gfn; | ||
1337 | |||
1338 | for (j = 0; j < memslot->npages; j++) { | ||
1339 | if (memslot->rmap[j]) | ||
1340 | put_page((struct page *)memslot->rmap[j]); | ||
1341 | } | ||
1342 | } | ||
1343 | } | ||
1344 | |||
1345 | void kvm_arch_destroy_vm(struct kvm *kvm) | ||
1346 | { | ||
1347 | kfree(kvm->arch.vioapic); | ||
1348 | kvm_release_vm_pages(kvm); | ||
1349 | kvm_free_physmem(kvm); | ||
1350 | free_kvm(kvm); | ||
1351 | } | ||
1352 | |||
1353 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) | ||
1354 | { | ||
1355 | } | ||
1356 | |||
1357 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | ||
1358 | { | ||
1359 | if (cpu != vcpu->cpu) { | ||
1360 | vcpu->cpu = cpu; | ||
1361 | if (vcpu->arch.ht_active) | ||
1362 | kvm_migrate_hlt_timer(vcpu); | ||
1363 | } | ||
1364 | } | ||
1365 | |||
1366 | #define SAVE_REGS(_x) regs->_x = vcpu->arch._x | ||
1367 | |||
1368 | int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | ||
1369 | { | ||
1370 | int i; | ||
1371 | int r; | ||
1372 | struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); | ||
1373 | vcpu_load(vcpu); | ||
1374 | |||
1375 | for (i = 0; i < 16; i++) { | ||
1376 | regs->vpd.vgr[i] = vpd->vgr[i]; | ||
1377 | regs->vpd.vbgr[i] = vpd->vbgr[i]; | ||
1378 | } | ||
1379 | for (i = 0; i < 128; i++) | ||
1380 | regs->vpd.vcr[i] = vpd->vcr[i]; | ||
1381 | regs->vpd.vhpi = vpd->vhpi; | ||
1382 | regs->vpd.vnat = vpd->vnat; | ||
1383 | regs->vpd.vbnat = vpd->vbnat; | ||
1384 | regs->vpd.vpsr = vpd->vpsr; | ||
1385 | regs->vpd.vpr = vpd->vpr; | ||
1386 | |||
1387 | r = -EFAULT; | ||
1388 | r = copy_to_user(regs->saved_guest, &vcpu->arch.guest, | ||
1389 | sizeof(union context)); | ||
1390 | if (r) | ||
1391 | goto out; | ||
1392 | r = copy_to_user(regs->saved_stack, (void *)vcpu, IA64_STK_OFFSET); | ||
1393 | if (r) | ||
1394 | goto out; | ||
1395 | SAVE_REGS(mp_state); | ||
1396 | SAVE_REGS(vmm_rr); | ||
1397 | memcpy(regs->itrs, vcpu->arch.itrs, sizeof(struct thash_data) * NITRS); | ||
1398 | memcpy(regs->dtrs, vcpu->arch.dtrs, sizeof(struct thash_data) * NDTRS); | ||
1399 | SAVE_REGS(itr_regions); | ||
1400 | SAVE_REGS(dtr_regions); | ||
1401 | SAVE_REGS(tc_regions); | ||
1402 | SAVE_REGS(irq_check); | ||
1403 | SAVE_REGS(itc_check); | ||
1404 | SAVE_REGS(timer_check); | ||
1405 | SAVE_REGS(timer_pending); | ||
1406 | SAVE_REGS(last_itc); | ||
1407 | for (i = 0; i < 8; i++) { | ||
1408 | regs->vrr[i] = vcpu->arch.vrr[i]; | ||
1409 | regs->ibr[i] = vcpu->arch.ibr[i]; | ||
1410 | regs->dbr[i] = vcpu->arch.dbr[i]; | ||
1411 | } | ||
1412 | for (i = 0; i < 4; i++) | ||
1413 | regs->insvc[i] = vcpu->arch.insvc[i]; | ||
1414 | regs->saved_itc = vcpu->arch.itc_offset + ia64_getreg(_IA64_REG_AR_ITC); | ||
1415 | SAVE_REGS(xtp); | ||
1416 | SAVE_REGS(metaphysical_rr0); | ||
1417 | SAVE_REGS(metaphysical_rr4); | ||
1418 | SAVE_REGS(metaphysical_saved_rr0); | ||
1419 | SAVE_REGS(metaphysical_saved_rr4); | ||
1420 | SAVE_REGS(fp_psr); | ||
1421 | SAVE_REGS(saved_gp); | ||
1422 | vcpu_put(vcpu); | ||
1423 | r = 0; | ||
1424 | out: | ||
1425 | return r; | ||
1426 | } | ||
1427 | |||
1428 | void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) | ||
1429 | { | ||
1430 | |||
1431 | hrtimer_cancel(&vcpu->arch.hlt_timer); | ||
1432 | kfree(vcpu->arch.apic); | ||
1433 | } | ||
1434 | |||
1435 | |||
1436 | long kvm_arch_vcpu_ioctl(struct file *filp, | ||
1437 | unsigned int ioctl, unsigned long arg) | ||
1438 | { | ||
1439 | return -EINVAL; | ||
1440 | } | ||
1441 | |||
1442 | int kvm_arch_set_memory_region(struct kvm *kvm, | ||
1443 | struct kvm_userspace_memory_region *mem, | ||
1444 | struct kvm_memory_slot old, | ||
1445 | int user_alloc) | ||
1446 | { | ||
1447 | unsigned long i; | ||
1448 | struct page *page; | ||
1449 | int npages = mem->memory_size >> PAGE_SHIFT; | ||
1450 | struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot]; | ||
1451 | unsigned long base_gfn = memslot->base_gfn; | ||
1452 | |||
1453 | for (i = 0; i < npages; i++) { | ||
1454 | page = gfn_to_page(kvm, base_gfn + i); | ||
1455 | kvm_set_pmt_entry(kvm, base_gfn + i, | ||
1456 | page_to_pfn(page) << PAGE_SHIFT, | ||
1457 | _PAGE_AR_RWX|_PAGE_MA_WB); | ||
1458 | memslot->rmap[i] = (unsigned long)page; | ||
1459 | } | ||
1460 | |||
1461 | return 0; | ||
1462 | } | ||
1463 | |||
1464 | |||
1465 | long kvm_arch_dev_ioctl(struct file *filp, | ||
1466 | unsigned int ioctl, unsigned long arg) | ||
1467 | { | ||
1468 | return -EINVAL; | ||
1469 | } | ||
1470 | |||
1471 | void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) | ||
1472 | { | ||
1473 | kvm_vcpu_uninit(vcpu); | ||
1474 | } | ||
1475 | |||
1476 | static int vti_cpu_has_kvm_support(void) | ||
1477 | { | ||
1478 | long avail = 1, status = 1, control = 1; | ||
1479 | long ret; | ||
1480 | |||
1481 | ret = ia64_pal_proc_get_features(&avail, &status, &control, 0); | ||
1482 | if (ret) | ||
1483 | goto out; | ||
1484 | |||
1485 | if (!(avail & PAL_PROC_VM_BIT)) | ||
1486 | goto out; | ||
1487 | |||
1488 | printk(KERN_DEBUG"kvm: Hardware Supports VT\n"); | ||
1489 | |||
1490 | ret = ia64_pal_vp_env_info(&kvm_vm_buffer_size, &vp_env_info); | ||
1491 | if (ret) | ||
1492 | goto out; | ||
1493 | printk(KERN_DEBUG"kvm: VM Buffer Size:0x%lx\n", kvm_vm_buffer_size); | ||
1494 | |||
1495 | if (!(vp_env_info & VP_OPCODE)) { | ||
1496 | printk(KERN_WARNING"kvm: No opcode ability on hardware, " | ||
1497 | "vm_env_info:0x%lx\n", vp_env_info); | ||
1498 | } | ||
1499 | |||
1500 | return 1; | ||
1501 | out: | ||
1502 | return 0; | ||
1503 | } | ||
1504 | |||
1505 | static int kvm_relocate_vmm(struct kvm_vmm_info *vmm_info, | ||
1506 | struct module *module) | ||
1507 | { | ||
1508 | unsigned long module_base; | ||
1509 | unsigned long vmm_size; | ||
1510 | |||
1511 | unsigned long vmm_offset, func_offset, fdesc_offset; | ||
1512 | struct fdesc *p_fdesc; | ||
1513 | |||
1514 | BUG_ON(!module); | ||
1515 | |||
1516 | if (!kvm_vmm_base) { | ||
1517 | printk("kvm: kvm area hasn't been initilized yet!!\n"); | ||
1518 | return -EFAULT; | ||
1519 | } | ||
1520 | |||
1521 | /*Calculate new position of relocated vmm module.*/ | ||
1522 | module_base = (unsigned long)module->module_core; | ||
1523 | vmm_size = module->core_size; | ||
1524 | if (unlikely(vmm_size > KVM_VMM_SIZE)) | ||
1525 | return -EFAULT; | ||
1526 | |||
1527 | memcpy((void *)kvm_vmm_base, (void *)module_base, vmm_size); | ||
1528 | kvm_flush_icache(kvm_vmm_base, vmm_size); | ||
1529 | |||
1530 | /*Recalculate kvm_vmm_info based on new VMM*/ | ||
1531 | vmm_offset = vmm_info->vmm_ivt - module_base; | ||
1532 | kvm_vmm_info->vmm_ivt = KVM_VMM_BASE + vmm_offset; | ||
1533 | printk(KERN_DEBUG"kvm: Relocated VMM's IVT Base Addr:%lx\n", | ||
1534 | kvm_vmm_info->vmm_ivt); | ||
1535 | |||
1536 | fdesc_offset = (unsigned long)vmm_info->vmm_entry - module_base; | ||
1537 | kvm_vmm_info->vmm_entry = (kvm_vmm_entry *)(KVM_VMM_BASE + | ||
1538 | fdesc_offset); | ||
1539 | func_offset = *(unsigned long *)vmm_info->vmm_entry - module_base; | ||
1540 | p_fdesc = (struct fdesc *)(kvm_vmm_base + fdesc_offset); | ||
1541 | p_fdesc->ip = KVM_VMM_BASE + func_offset; | ||
1542 | p_fdesc->gp = KVM_VMM_BASE+(p_fdesc->gp - module_base); | ||
1543 | |||
1544 | printk(KERN_DEBUG"kvm: Relocated VMM's Init Entry Addr:%lx\n", | ||
1545 | KVM_VMM_BASE+func_offset); | ||
1546 | |||
1547 | fdesc_offset = (unsigned long)vmm_info->tramp_entry - module_base; | ||
1548 | kvm_vmm_info->tramp_entry = (kvm_tramp_entry *)(KVM_VMM_BASE + | ||
1549 | fdesc_offset); | ||
1550 | func_offset = *(unsigned long *)vmm_info->tramp_entry - module_base; | ||
1551 | p_fdesc = (struct fdesc *)(kvm_vmm_base + fdesc_offset); | ||
1552 | p_fdesc->ip = KVM_VMM_BASE + func_offset; | ||
1553 | p_fdesc->gp = KVM_VMM_BASE + (p_fdesc->gp - module_base); | ||
1554 | |||
1555 | kvm_vmm_gp = p_fdesc->gp; | ||
1556 | |||
1557 | printk(KERN_DEBUG"kvm: Relocated VMM's Entry IP:%p\n", | ||
1558 | kvm_vmm_info->vmm_entry); | ||
1559 | printk(KERN_DEBUG"kvm: Relocated VMM's Trampoline Entry IP:0x%lx\n", | ||
1560 | KVM_VMM_BASE + func_offset); | ||
1561 | |||
1562 | return 0; | ||
1563 | } | ||
1564 | |||
1565 | int kvm_arch_init(void *opaque) | ||
1566 | { | ||
1567 | int r; | ||
1568 | struct kvm_vmm_info *vmm_info = (struct kvm_vmm_info *)opaque; | ||
1569 | |||
1570 | if (!vti_cpu_has_kvm_support()) { | ||
1571 | printk(KERN_ERR "kvm: No Hardware Virtualization Support!\n"); | ||
1572 | r = -EOPNOTSUPP; | ||
1573 | goto out; | ||
1574 | } | ||
1575 | |||
1576 | if (kvm_vmm_info) { | ||
1577 | printk(KERN_ERR "kvm: Already loaded VMM module!\n"); | ||
1578 | r = -EEXIST; | ||
1579 | goto out; | ||
1580 | } | ||
1581 | |||
1582 | r = -ENOMEM; | ||
1583 | kvm_vmm_info = kzalloc(sizeof(struct kvm_vmm_info), GFP_KERNEL); | ||
1584 | if (!kvm_vmm_info) | ||
1585 | goto out; | ||
1586 | |||
1587 | if (kvm_alloc_vmm_area()) | ||
1588 | goto out_free0; | ||
1589 | |||
1590 | r = kvm_relocate_vmm(vmm_info, vmm_info->module); | ||
1591 | if (r) | ||
1592 | goto out_free1; | ||
1593 | |||
1594 | return 0; | ||
1595 | |||
1596 | out_free1: | ||
1597 | kvm_free_vmm_area(); | ||
1598 | out_free0: | ||
1599 | kfree(kvm_vmm_info); | ||
1600 | out: | ||
1601 | return r; | ||
1602 | } | ||
1603 | |||
1604 | void kvm_arch_exit(void) | ||
1605 | { | ||
1606 | kvm_free_vmm_area(); | ||
1607 | kfree(kvm_vmm_info); | ||
1608 | kvm_vmm_info = NULL; | ||
1609 | } | ||
1610 | |||
1611 | static int kvm_ia64_sync_dirty_log(struct kvm *kvm, | ||
1612 | struct kvm_dirty_log *log) | ||
1613 | { | ||
1614 | struct kvm_memory_slot *memslot; | ||
1615 | int r, i; | ||
1616 | long n, base; | ||
1617 | unsigned long *dirty_bitmap = (unsigned long *)((void *)kvm - KVM_VM_OFS | ||
1618 | + KVM_MEM_DIRTY_LOG_OFS); | ||
1619 | |||
1620 | r = -EINVAL; | ||
1621 | if (log->slot >= KVM_MEMORY_SLOTS) | ||
1622 | goto out; | ||
1623 | |||
1624 | memslot = &kvm->memslots[log->slot]; | ||
1625 | r = -ENOENT; | ||
1626 | if (!memslot->dirty_bitmap) | ||
1627 | goto out; | ||
1628 | |||
1629 | n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; | ||
1630 | base = memslot->base_gfn / BITS_PER_LONG; | ||
1631 | |||
1632 | for (i = 0; i < n/sizeof(long); ++i) { | ||
1633 | memslot->dirty_bitmap[i] = dirty_bitmap[base + i]; | ||
1634 | dirty_bitmap[base + i] = 0; | ||
1635 | } | ||
1636 | r = 0; | ||
1637 | out: | ||
1638 | return r; | ||
1639 | } | ||
1640 | |||
1641 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, | ||
1642 | struct kvm_dirty_log *log) | ||
1643 | { | ||
1644 | int r; | ||
1645 | int n; | ||
1646 | struct kvm_memory_slot *memslot; | ||
1647 | int is_dirty = 0; | ||
1648 | |||
1649 | spin_lock(&kvm->arch.dirty_log_lock); | ||
1650 | |||
1651 | r = kvm_ia64_sync_dirty_log(kvm, log); | ||
1652 | if (r) | ||
1653 | goto out; | ||
1654 | |||
1655 | r = kvm_get_dirty_log(kvm, log, &is_dirty); | ||
1656 | if (r) | ||
1657 | goto out; | ||
1658 | |||
1659 | /* If nothing is dirty, don't bother messing with page tables. */ | ||
1660 | if (is_dirty) { | ||
1661 | kvm_flush_remote_tlbs(kvm); | ||
1662 | memslot = &kvm->memslots[log->slot]; | ||
1663 | n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; | ||
1664 | memset(memslot->dirty_bitmap, 0, n); | ||
1665 | } | ||
1666 | r = 0; | ||
1667 | out: | ||
1668 | spin_unlock(&kvm->arch.dirty_log_lock); | ||
1669 | return r; | ||
1670 | } | ||
1671 | |||
1672 | int kvm_arch_hardware_setup(void) | ||
1673 | { | ||
1674 | return 0; | ||
1675 | } | ||
1676 | |||
1677 | void kvm_arch_hardware_unsetup(void) | ||
1678 | { | ||
1679 | } | ||
1680 | |||
1681 | static void vcpu_kick_intr(void *info) | ||
1682 | { | ||
1683 | #ifdef DEBUG | ||
1684 | struct kvm_vcpu *vcpu = (struct kvm_vcpu *)info; | ||
1685 | printk(KERN_DEBUG"vcpu_kick_intr %p \n", vcpu); | ||
1686 | #endif | ||
1687 | } | ||
1688 | |||
1689 | void kvm_vcpu_kick(struct kvm_vcpu *vcpu) | ||
1690 | { | ||
1691 | int ipi_pcpu = vcpu->cpu; | ||
1692 | |||
1693 | if (waitqueue_active(&vcpu->wq)) | ||
1694 | wake_up_interruptible(&vcpu->wq); | ||
1695 | |||
1696 | if (vcpu->guest_mode) | ||
1697 | smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0, 0); | ||
1698 | } | ||
1699 | |||
1700 | int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 trig) | ||
1701 | { | ||
1702 | |||
1703 | struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); | ||
1704 | |||
1705 | if (!test_and_set_bit(vec, &vpd->irr[0])) { | ||
1706 | vcpu->arch.irq_new_pending = 1; | ||
1707 | if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE) | ||
1708 | kvm_vcpu_kick(vcpu); | ||
1709 | else if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) { | ||
1710 | vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; | ||
1711 | if (waitqueue_active(&vcpu->wq)) | ||
1712 | wake_up_interruptible(&vcpu->wq); | ||
1713 | } | ||
1714 | return 1; | ||
1715 | } | ||
1716 | return 0; | ||
1717 | } | ||
1718 | |||
1719 | int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest) | ||
1720 | { | ||
1721 | return apic->vcpu->vcpu_id == dest; | ||
1722 | } | ||
1723 | |||
1724 | int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda) | ||
1725 | { | ||
1726 | return 0; | ||
1727 | } | ||
1728 | |||
1729 | struct kvm_vcpu *kvm_get_lowest_prio_vcpu(struct kvm *kvm, u8 vector, | ||
1730 | unsigned long bitmap) | ||
1731 | { | ||
1732 | struct kvm_vcpu *lvcpu = kvm->vcpus[0]; | ||
1733 | int i; | ||
1734 | |||
1735 | for (i = 1; i < KVM_MAX_VCPUS; i++) { | ||
1736 | if (!kvm->vcpus[i]) | ||
1737 | continue; | ||
1738 | if (lvcpu->arch.xtp > kvm->vcpus[i]->arch.xtp) | ||
1739 | lvcpu = kvm->vcpus[i]; | ||
1740 | } | ||
1741 | |||
1742 | return lvcpu; | ||
1743 | } | ||
1744 | |||
1745 | static int find_highest_bits(int *dat) | ||
1746 | { | ||
1747 | u32 bits, bitnum; | ||
1748 | int i; | ||
1749 | |||
1750 | /* loop for all 256 bits */ | ||
1751 | for (i = 7; i >= 0 ; i--) { | ||
1752 | bits = dat[i]; | ||
1753 | if (bits) { | ||
1754 | bitnum = fls(bits); | ||
1755 | return i * 32 + bitnum - 1; | ||
1756 | } | ||
1757 | } | ||
1758 | |||
1759 | return -1; | ||
1760 | } | ||
1761 | |||
1762 | int kvm_highest_pending_irq(struct kvm_vcpu *vcpu) | ||
1763 | { | ||
1764 | struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); | ||
1765 | |||
1766 | if (vpd->irr[0] & (1UL << NMI_VECTOR)) | ||
1767 | return NMI_VECTOR; | ||
1768 | if (vpd->irr[0] & (1UL << ExtINT_VECTOR)) | ||
1769 | return ExtINT_VECTOR; | ||
1770 | |||
1771 | return find_highest_bits((int *)&vpd->irr[0]); | ||
1772 | } | ||
1773 | |||
1774 | int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu) | ||
1775 | { | ||
1776 | if (kvm_highest_pending_irq(vcpu) != -1) | ||
1777 | return 1; | ||
1778 | return 0; | ||
1779 | } | ||
1780 | |||
1781 | int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) | ||
1782 | { | ||
1783 | return 0; | ||
1784 | } | ||
1785 | |||
1786 | gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn) | ||
1787 | { | ||
1788 | return gfn; | ||
1789 | } | ||
1790 | |||
1791 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) | ||
1792 | { | ||
1793 | return vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE; | ||
1794 | } | ||
1795 | |||
1796 | int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, | ||
1797 | struct kvm_mp_state *mp_state) | ||
1798 | { | ||
1799 | return -EINVAL; | ||
1800 | } | ||
1801 | |||
1802 | int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, | ||
1803 | struct kvm_mp_state *mp_state) | ||
1804 | { | ||
1805 | return -EINVAL; | ||
1806 | } | ||
diff --git a/arch/ia64/kvm/kvm_fw.c b/arch/ia64/kvm/kvm_fw.c new file mode 100644 index 000000000000..091f936c4485 --- /dev/null +++ b/arch/ia64/kvm/kvm_fw.c | |||
@@ -0,0 +1,500 @@ | |||
1 | /* | ||
2 | * PAL/SAL call delegation | ||
3 | * | ||
4 | * Copyright (c) 2004 Li Susie <susie.li@intel.com> | ||
5 | * Copyright (c) 2005 Yu Ke <ke.yu@intel.com> | ||
6 | * Copyright (c) 2007 Xiantao Zhang <xiantao.zhang@intel.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms and conditions of the GNU General Public License, | ||
10 | * version 2, as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
15 | * more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License along with | ||
18 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple | ||
19 | * Place - Suite 330, Boston, MA 02111-1307 USA. | ||
20 | */ | ||
21 | |||
22 | #include <linux/kvm_host.h> | ||
23 | #include <linux/smp.h> | ||
24 | |||
25 | #include "vti.h" | ||
26 | #include "misc.h" | ||
27 | |||
28 | #include <asm/pal.h> | ||
29 | #include <asm/sal.h> | ||
30 | #include <asm/tlb.h> | ||
31 | |||
32 | /* | ||
33 | * Handy macros to make sure that the PAL return values start out | ||
34 | * as something meaningful. | ||
35 | */ | ||
36 | #define INIT_PAL_STATUS_UNIMPLEMENTED(x) \ | ||
37 | { \ | ||
38 | x.status = PAL_STATUS_UNIMPLEMENTED; \ | ||
39 | x.v0 = 0; \ | ||
40 | x.v1 = 0; \ | ||
41 | x.v2 = 0; \ | ||
42 | } | ||
43 | |||
44 | #define INIT_PAL_STATUS_SUCCESS(x) \ | ||
45 | { \ | ||
46 | x.status = PAL_STATUS_SUCCESS; \ | ||
47 | x.v0 = 0; \ | ||
48 | x.v1 = 0; \ | ||
49 | x.v2 = 0; \ | ||
50 | } | ||
51 | |||
52 | static void kvm_get_pal_call_data(struct kvm_vcpu *vcpu, | ||
53 | u64 *gr28, u64 *gr29, u64 *gr30, u64 *gr31) { | ||
54 | struct exit_ctl_data *p; | ||
55 | |||
56 | if (vcpu) { | ||
57 | p = &vcpu->arch.exit_data; | ||
58 | if (p->exit_reason == EXIT_REASON_PAL_CALL) { | ||
59 | *gr28 = p->u.pal_data.gr28; | ||
60 | *gr29 = p->u.pal_data.gr29; | ||
61 | *gr30 = p->u.pal_data.gr30; | ||
62 | *gr31 = p->u.pal_data.gr31; | ||
63 | return ; | ||
64 | } | ||
65 | } | ||
66 | printk(KERN_DEBUG"Failed to get vcpu pal data!!!\n"); | ||
67 | } | ||
68 | |||
69 | static void set_pal_result(struct kvm_vcpu *vcpu, | ||
70 | struct ia64_pal_retval result) { | ||
71 | |||
72 | struct exit_ctl_data *p; | ||
73 | |||
74 | p = kvm_get_exit_data(vcpu); | ||
75 | if (p && p->exit_reason == EXIT_REASON_PAL_CALL) { | ||
76 | p->u.pal_data.ret = result; | ||
77 | return ; | ||
78 | } | ||
79 | INIT_PAL_STATUS_UNIMPLEMENTED(p->u.pal_data.ret); | ||
80 | } | ||
81 | |||
82 | static void set_sal_result(struct kvm_vcpu *vcpu, | ||
83 | struct sal_ret_values result) { | ||
84 | struct exit_ctl_data *p; | ||
85 | |||
86 | p = kvm_get_exit_data(vcpu); | ||
87 | if (p && p->exit_reason == EXIT_REASON_SAL_CALL) { | ||
88 | p->u.sal_data.ret = result; | ||
89 | return ; | ||
90 | } | ||
91 | printk(KERN_WARNING"Failed to set sal result!!\n"); | ||
92 | } | ||
93 | |||
94 | struct cache_flush_args { | ||
95 | u64 cache_type; | ||
96 | u64 operation; | ||
97 | u64 progress; | ||
98 | long status; | ||
99 | }; | ||
100 | |||
101 | cpumask_t cpu_cache_coherent_map; | ||
102 | |||
103 | static void remote_pal_cache_flush(void *data) | ||
104 | { | ||
105 | struct cache_flush_args *args = data; | ||
106 | long status; | ||
107 | u64 progress = args->progress; | ||
108 | |||
109 | status = ia64_pal_cache_flush(args->cache_type, args->operation, | ||
110 | &progress, NULL); | ||
111 | if (status != 0) | ||
112 | args->status = status; | ||
113 | } | ||
114 | |||
115 | static struct ia64_pal_retval pal_cache_flush(struct kvm_vcpu *vcpu) | ||
116 | { | ||
117 | u64 gr28, gr29, gr30, gr31; | ||
118 | struct ia64_pal_retval result = {0, 0, 0, 0}; | ||
119 | struct cache_flush_args args = {0, 0, 0, 0}; | ||
120 | long psr; | ||
121 | |||
122 | gr28 = gr29 = gr30 = gr31 = 0; | ||
123 | kvm_get_pal_call_data(vcpu, &gr28, &gr29, &gr30, &gr31); | ||
124 | |||
125 | if (gr31 != 0) | ||
126 | printk(KERN_ERR"vcpu:%p called cache_flush error!\n", vcpu); | ||
127 | |||
128 | /* Always call Host Pal in int=1 */ | ||
129 | gr30 &= ~PAL_CACHE_FLUSH_CHK_INTRS; | ||
130 | args.cache_type = gr29; | ||
131 | args.operation = gr30; | ||
132 | smp_call_function(remote_pal_cache_flush, | ||
133 | (void *)&args, 1, 1); | ||
134 | if (args.status != 0) | ||
135 | printk(KERN_ERR"pal_cache_flush error!," | ||
136 | "status:0x%lx\n", args.status); | ||
137 | /* | ||
138 | * Call Host PAL cache flush | ||
139 | * Clear psr.ic when call PAL_CACHE_FLUSH | ||
140 | */ | ||
141 | local_irq_save(psr); | ||
142 | result.status = ia64_pal_cache_flush(gr29, gr30, &result.v1, | ||
143 | &result.v0); | ||
144 | local_irq_restore(psr); | ||
145 | if (result.status != 0) | ||
146 | printk(KERN_ERR"vcpu:%p crashed due to cache_flush err:%ld" | ||
147 | "in1:%lx,in2:%lx\n", | ||
148 | vcpu, result.status, gr29, gr30); | ||
149 | |||
150 | #if 0 | ||
151 | if (gr29 == PAL_CACHE_TYPE_COHERENT) { | ||
152 | cpus_setall(vcpu->arch.cache_coherent_map); | ||
153 | cpu_clear(vcpu->cpu, vcpu->arch.cache_coherent_map); | ||
154 | cpus_setall(cpu_cache_coherent_map); | ||
155 | cpu_clear(vcpu->cpu, cpu_cache_coherent_map); | ||
156 | } | ||
157 | #endif | ||
158 | return result; | ||
159 | } | ||
160 | |||
161 | struct ia64_pal_retval pal_cache_summary(struct kvm_vcpu *vcpu) | ||
162 | { | ||
163 | |||
164 | struct ia64_pal_retval result; | ||
165 | |||
166 | PAL_CALL(result, PAL_CACHE_SUMMARY, 0, 0, 0); | ||
167 | return result; | ||
168 | } | ||
169 | |||
170 | static struct ia64_pal_retval pal_freq_base(struct kvm_vcpu *vcpu) | ||
171 | { | ||
172 | |||
173 | struct ia64_pal_retval result; | ||
174 | |||
175 | PAL_CALL(result, PAL_FREQ_BASE, 0, 0, 0); | ||
176 | |||
177 | /* | ||
178 | * PAL_FREQ_BASE may not be implemented in some platforms, | ||
179 | * call SAL instead. | ||
180 | */ | ||
181 | if (result.v0 == 0) { | ||
182 | result.status = ia64_sal_freq_base(SAL_FREQ_BASE_PLATFORM, | ||
183 | &result.v0, | ||
184 | &result.v1); | ||
185 | result.v2 = 0; | ||
186 | } | ||
187 | |||
188 | return result; | ||
189 | } | ||
190 | |||
191 | static struct ia64_pal_retval pal_freq_ratios(struct kvm_vcpu *vcpu) | ||
192 | { | ||
193 | |||
194 | struct ia64_pal_retval result; | ||
195 | |||
196 | PAL_CALL(result, PAL_FREQ_RATIOS, 0, 0, 0); | ||
197 | return result; | ||
198 | } | ||
199 | |||
200 | static struct ia64_pal_retval pal_logical_to_physica(struct kvm_vcpu *vcpu) | ||
201 | { | ||
202 | struct ia64_pal_retval result; | ||
203 | |||
204 | INIT_PAL_STATUS_UNIMPLEMENTED(result); | ||
205 | return result; | ||
206 | } | ||
207 | |||
208 | static struct ia64_pal_retval pal_platform_addr(struct kvm_vcpu *vcpu) | ||
209 | { | ||
210 | |||
211 | struct ia64_pal_retval result; | ||
212 | |||
213 | INIT_PAL_STATUS_SUCCESS(result); | ||
214 | return result; | ||
215 | } | ||
216 | |||
217 | static struct ia64_pal_retval pal_proc_get_features(struct kvm_vcpu *vcpu) | ||
218 | { | ||
219 | |||
220 | struct ia64_pal_retval result = {0, 0, 0, 0}; | ||
221 | long in0, in1, in2, in3; | ||
222 | |||
223 | kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3); | ||
224 | result.status = ia64_pal_proc_get_features(&result.v0, &result.v1, | ||
225 | &result.v2, in2); | ||
226 | |||
227 | return result; | ||
228 | } | ||
229 | |||
230 | static struct ia64_pal_retval pal_cache_info(struct kvm_vcpu *vcpu) | ||
231 | { | ||
232 | |||
233 | pal_cache_config_info_t ci; | ||
234 | long status; | ||
235 | unsigned long in0, in1, in2, in3, r9, r10; | ||
236 | |||
237 | kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3); | ||
238 | status = ia64_pal_cache_config_info(in1, in2, &ci); | ||
239 | r9 = ci.pcci_info_1.pcci1_data; | ||
240 | r10 = ci.pcci_info_2.pcci2_data; | ||
241 | return ((struct ia64_pal_retval){status, r9, r10, 0}); | ||
242 | } | ||
243 | |||
244 | #define GUEST_IMPL_VA_MSB 59 | ||
245 | #define GUEST_RID_BITS 18 | ||
246 | |||
247 | static struct ia64_pal_retval pal_vm_summary(struct kvm_vcpu *vcpu) | ||
248 | { | ||
249 | |||
250 | pal_vm_info_1_u_t vminfo1; | ||
251 | pal_vm_info_2_u_t vminfo2; | ||
252 | struct ia64_pal_retval result; | ||
253 | |||
254 | PAL_CALL(result, PAL_VM_SUMMARY, 0, 0, 0); | ||
255 | if (!result.status) { | ||
256 | vminfo1.pvi1_val = result.v0; | ||
257 | vminfo1.pal_vm_info_1_s.max_itr_entry = 8; | ||
258 | vminfo1.pal_vm_info_1_s.max_dtr_entry = 8; | ||
259 | result.v0 = vminfo1.pvi1_val; | ||
260 | vminfo2.pal_vm_info_2_s.impl_va_msb = GUEST_IMPL_VA_MSB; | ||
261 | vminfo2.pal_vm_info_2_s.rid_size = GUEST_RID_BITS; | ||
262 | result.v1 = vminfo2.pvi2_val; | ||
263 | } | ||
264 | |||
265 | return result; | ||
266 | } | ||
267 | |||
268 | static struct ia64_pal_retval pal_vm_info(struct kvm_vcpu *vcpu) | ||
269 | { | ||
270 | struct ia64_pal_retval result; | ||
271 | |||
272 | INIT_PAL_STATUS_UNIMPLEMENTED(result); | ||
273 | |||
274 | return result; | ||
275 | } | ||
276 | |||
277 | static u64 kvm_get_pal_call_index(struct kvm_vcpu *vcpu) | ||
278 | { | ||
279 | u64 index = 0; | ||
280 | struct exit_ctl_data *p; | ||
281 | |||
282 | p = kvm_get_exit_data(vcpu); | ||
283 | if (p && (p->exit_reason == EXIT_REASON_PAL_CALL)) | ||
284 | index = p->u.pal_data.gr28; | ||
285 | |||
286 | return index; | ||
287 | } | ||
288 | |||
289 | int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run) | ||
290 | { | ||
291 | |||
292 | u64 gr28; | ||
293 | struct ia64_pal_retval result; | ||
294 | int ret = 1; | ||
295 | |||
296 | gr28 = kvm_get_pal_call_index(vcpu); | ||
297 | /*printk("pal_call index:%lx\n",gr28);*/ | ||
298 | switch (gr28) { | ||
299 | case PAL_CACHE_FLUSH: | ||
300 | result = pal_cache_flush(vcpu); | ||
301 | break; | ||
302 | case PAL_CACHE_SUMMARY: | ||
303 | result = pal_cache_summary(vcpu); | ||
304 | break; | ||
305 | case PAL_HALT_LIGHT: | ||
306 | { | ||
307 | vcpu->arch.timer_pending = 1; | ||
308 | INIT_PAL_STATUS_SUCCESS(result); | ||
309 | if (kvm_highest_pending_irq(vcpu) == -1) | ||
310 | ret = kvm_emulate_halt(vcpu); | ||
311 | |||
312 | } | ||
313 | break; | ||
314 | |||
315 | case PAL_FREQ_RATIOS: | ||
316 | result = pal_freq_ratios(vcpu); | ||
317 | break; | ||
318 | |||
319 | case PAL_FREQ_BASE: | ||
320 | result = pal_freq_base(vcpu); | ||
321 | break; | ||
322 | |||
323 | case PAL_LOGICAL_TO_PHYSICAL : | ||
324 | result = pal_logical_to_physica(vcpu); | ||
325 | break; | ||
326 | |||
327 | case PAL_VM_SUMMARY : | ||
328 | result = pal_vm_summary(vcpu); | ||
329 | break; | ||
330 | |||
331 | case PAL_VM_INFO : | ||
332 | result = pal_vm_info(vcpu); | ||
333 | break; | ||
334 | case PAL_PLATFORM_ADDR : | ||
335 | result = pal_platform_addr(vcpu); | ||
336 | break; | ||
337 | case PAL_CACHE_INFO: | ||
338 | result = pal_cache_info(vcpu); | ||
339 | break; | ||
340 | case PAL_PTCE_INFO: | ||
341 | INIT_PAL_STATUS_SUCCESS(result); | ||
342 | result.v1 = (1L << 32) | 1L; | ||
343 | break; | ||
344 | case PAL_VM_PAGE_SIZE: | ||
345 | result.status = ia64_pal_vm_page_size(&result.v0, | ||
346 | &result.v1); | ||
347 | break; | ||
348 | case PAL_RSE_INFO: | ||
349 | result.status = ia64_pal_rse_info(&result.v0, | ||
350 | (pal_hints_u_t *)&result.v1); | ||
351 | break; | ||
352 | case PAL_PROC_GET_FEATURES: | ||
353 | result = pal_proc_get_features(vcpu); | ||
354 | break; | ||
355 | case PAL_DEBUG_INFO: | ||
356 | result.status = ia64_pal_debug_info(&result.v0, | ||
357 | &result.v1); | ||
358 | break; | ||
359 | case PAL_VERSION: | ||
360 | result.status = ia64_pal_version( | ||
361 | (pal_version_u_t *)&result.v0, | ||
362 | (pal_version_u_t *)&result.v1); | ||
363 | |||
364 | break; | ||
365 | case PAL_FIXED_ADDR: | ||
366 | result.status = PAL_STATUS_SUCCESS; | ||
367 | result.v0 = vcpu->vcpu_id; | ||
368 | break; | ||
369 | default: | ||
370 | INIT_PAL_STATUS_UNIMPLEMENTED(result); | ||
371 | printk(KERN_WARNING"kvm: Unsupported pal call," | ||
372 | " index:0x%lx\n", gr28); | ||
373 | } | ||
374 | set_pal_result(vcpu, result); | ||
375 | return ret; | ||
376 | } | ||
377 | |||
378 | static struct sal_ret_values sal_emulator(struct kvm *kvm, | ||
379 | long index, unsigned long in1, | ||
380 | unsigned long in2, unsigned long in3, | ||
381 | unsigned long in4, unsigned long in5, | ||
382 | unsigned long in6, unsigned long in7) | ||
383 | { | ||
384 | unsigned long r9 = 0; | ||
385 | unsigned long r10 = 0; | ||
386 | long r11 = 0; | ||
387 | long status; | ||
388 | |||
389 | status = 0; | ||
390 | switch (index) { | ||
391 | case SAL_FREQ_BASE: | ||
392 | status = ia64_sal_freq_base(in1, &r9, &r10); | ||
393 | break; | ||
394 | case SAL_PCI_CONFIG_READ: | ||
395 | printk(KERN_WARNING"kvm: Not allowed to call here!" | ||
396 | " SAL_PCI_CONFIG_READ\n"); | ||
397 | break; | ||
398 | case SAL_PCI_CONFIG_WRITE: | ||
399 | printk(KERN_WARNING"kvm: Not allowed to call here!" | ||
400 | " SAL_PCI_CONFIG_WRITE\n"); | ||
401 | break; | ||
402 | case SAL_SET_VECTORS: | ||
403 | if (in1 == SAL_VECTOR_OS_BOOT_RENDEZ) { | ||
404 | if (in4 != 0 || in5 != 0 || in6 != 0 || in7 != 0) { | ||
405 | status = -2; | ||
406 | } else { | ||
407 | kvm->arch.rdv_sal_data.boot_ip = in2; | ||
408 | kvm->arch.rdv_sal_data.boot_gp = in3; | ||
409 | } | ||
410 | printk("Rendvous called! iip:%lx\n\n", in2); | ||
411 | } else | ||
412 | printk(KERN_WARNING"kvm: CALLED SAL_SET_VECTORS %lu." | ||
413 | "ignored...\n", in1); | ||
414 | break; | ||
415 | case SAL_GET_STATE_INFO: | ||
416 | /* No more info. */ | ||
417 | status = -5; | ||
418 | r9 = 0; | ||
419 | break; | ||
420 | case SAL_GET_STATE_INFO_SIZE: | ||
421 | /* Return a dummy size. */ | ||
422 | status = 0; | ||
423 | r9 = 128; | ||
424 | break; | ||
425 | case SAL_CLEAR_STATE_INFO: | ||
426 | /* Noop. */ | ||
427 | break; | ||
428 | case SAL_MC_RENDEZ: | ||
429 | printk(KERN_WARNING | ||
430 | "kvm: called SAL_MC_RENDEZ. ignored...\n"); | ||
431 | break; | ||
432 | case SAL_MC_SET_PARAMS: | ||
433 | printk(KERN_WARNING | ||
434 | "kvm: called SAL_MC_SET_PARAMS.ignored!\n"); | ||
435 | break; | ||
436 | case SAL_CACHE_FLUSH: | ||
437 | if (1) { | ||
438 | /*Flush using SAL. | ||
439 | This method is faster but has a side | ||
440 | effect on other vcpu running on | ||
441 | this cpu. */ | ||
442 | status = ia64_sal_cache_flush(in1); | ||
443 | } else { | ||
444 | /*Maybe need to implement the method | ||
445 | without side effect!*/ | ||
446 | status = 0; | ||
447 | } | ||
448 | break; | ||
449 | case SAL_CACHE_INIT: | ||
450 | printk(KERN_WARNING | ||
451 | "kvm: called SAL_CACHE_INIT. ignored...\n"); | ||
452 | break; | ||
453 | case SAL_UPDATE_PAL: | ||
454 | printk(KERN_WARNING | ||
455 | "kvm: CALLED SAL_UPDATE_PAL. ignored...\n"); | ||
456 | break; | ||
457 | default: | ||
458 | printk(KERN_WARNING"kvm: called SAL_CALL with unknown index." | ||
459 | " index:%ld\n", index); | ||
460 | status = -1; | ||
461 | break; | ||
462 | } | ||
463 | return ((struct sal_ret_values) {status, r9, r10, r11}); | ||
464 | } | ||
465 | |||
466 | static void kvm_get_sal_call_data(struct kvm_vcpu *vcpu, u64 *in0, u64 *in1, | ||
467 | u64 *in2, u64 *in3, u64 *in4, u64 *in5, u64 *in6, u64 *in7){ | ||
468 | |||
469 | struct exit_ctl_data *p; | ||
470 | |||
471 | p = kvm_get_exit_data(vcpu); | ||
472 | |||
473 | if (p) { | ||
474 | if (p->exit_reason == EXIT_REASON_SAL_CALL) { | ||
475 | *in0 = p->u.sal_data.in0; | ||
476 | *in1 = p->u.sal_data.in1; | ||
477 | *in2 = p->u.sal_data.in2; | ||
478 | *in3 = p->u.sal_data.in3; | ||
479 | *in4 = p->u.sal_data.in4; | ||
480 | *in5 = p->u.sal_data.in5; | ||
481 | *in6 = p->u.sal_data.in6; | ||
482 | *in7 = p->u.sal_data.in7; | ||
483 | return ; | ||
484 | } | ||
485 | } | ||
486 | *in0 = 0; | ||
487 | } | ||
488 | |||
489 | void kvm_sal_emul(struct kvm_vcpu *vcpu) | ||
490 | { | ||
491 | |||
492 | struct sal_ret_values result; | ||
493 | u64 index, in1, in2, in3, in4, in5, in6, in7; | ||
494 | |||
495 | kvm_get_sal_call_data(vcpu, &index, &in1, &in2, | ||
496 | &in3, &in4, &in5, &in6, &in7); | ||
497 | result = sal_emulator(vcpu->kvm, index, in1, in2, in3, | ||
498 | in4, in5, in6, in7); | ||
499 | set_sal_result(vcpu, result); | ||
500 | } | ||
diff --git a/arch/ia64/kvm/kvm_minstate.h b/arch/ia64/kvm/kvm_minstate.h new file mode 100644 index 000000000000..13980d9b8bcf --- /dev/null +++ b/arch/ia64/kvm/kvm_minstate.h | |||
@@ -0,0 +1,273 @@ | |||
1 | /* | ||
2 | * kvm_minstate.h: min save macros | ||
3 | * Copyright (c) 2007, Intel Corporation. | ||
4 | * | ||
5 | * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com) | ||
6 | * Xiantao Zhang (xiantao.zhang@intel.com) | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms and conditions of the GNU General Public License, | ||
10 | * version 2, as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
15 | * more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License along with | ||
18 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple | ||
19 | * Place - Suite 330, Boston, MA 02111-1307 USA. | ||
20 | * | ||
21 | */ | ||
22 | |||
23 | |||
24 | #include <asm/asmmacro.h> | ||
25 | #include <asm/types.h> | ||
26 | #include <asm/kregs.h> | ||
27 | #include "asm-offsets.h" | ||
28 | |||
29 | #define KVM_MINSTATE_START_SAVE_MIN \ | ||
30 | mov ar.rsc = 0;/* set enforced lazy mode, pl 0, little-endian, loadrs=0 */\ | ||
31 | ;; \ | ||
32 | mov.m r28 = ar.rnat; \ | ||
33 | addl r22 = VMM_RBS_OFFSET,r1; /* compute base of RBS */ \ | ||
34 | ;; \ | ||
35 | lfetch.fault.excl.nt1 [r22]; \ | ||
36 | addl r1 = IA64_STK_OFFSET-VMM_PT_REGS_SIZE,r1; /* compute base of memory stack */ \ | ||
37 | mov r23 = ar.bspstore; /* save ar.bspstore */ \ | ||
38 | ;; \ | ||
39 | mov ar.bspstore = r22; /* switch to kernel RBS */\ | ||
40 | ;; \ | ||
41 | mov r18 = ar.bsp; \ | ||
42 | mov ar.rsc = 0x3; /* set eager mode, pl 0, little-endian, loadrs=0 */ | ||
43 | |||
44 | |||
45 | |||
46 | #define KVM_MINSTATE_END_SAVE_MIN \ | ||
47 | bsw.1; /* switch back to bank 1 (must be last in insn group) */\ | ||
48 | ;; | ||
49 | |||
50 | |||
51 | #define PAL_VSA_SYNC_READ \ | ||
52 | /* begin to call pal vps sync_read */ \ | ||
53 | add r25 = VMM_VPD_BASE_OFFSET, r21; \ | ||
54 | adds r20 = VMM_VCPU_VSA_BASE_OFFSET, r21; /* entry point */ \ | ||
55 | ;; \ | ||
56 | ld8 r25 = [r25]; /* read vpd base */ \ | ||
57 | ld8 r20 = [r20]; \ | ||
58 | ;; \ | ||
59 | add r20 = PAL_VPS_SYNC_READ,r20; \ | ||
60 | ;; \ | ||
61 | { .mii; \ | ||
62 | nop 0x0; \ | ||
63 | mov r24 = ip; \ | ||
64 | mov b0 = r20; \ | ||
65 | ;; \ | ||
66 | }; \ | ||
67 | { .mmb; \ | ||
68 | add r24 = 0x20, r24; \ | ||
69 | nop 0x0; \ | ||
70 | br.cond.sptk b0; /* call the service */ \ | ||
71 | ;; \ | ||
72 | }; | ||
73 | |||
74 | |||
75 | |||
76 | #define KVM_MINSTATE_GET_CURRENT(reg) mov reg=r21 | ||
77 | |||
78 | /* | ||
79 | * KVM_DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves | ||
80 | * the minimum state necessary that allows us to turn psr.ic back | ||
81 | * on. | ||
82 | * | ||
83 | * Assumed state upon entry: | ||
84 | * psr.ic: off | ||
85 | * r31: contains saved predicates (pr) | ||
86 | * | ||
87 | * Upon exit, the state is as follows: | ||
88 | * psr.ic: off | ||
89 | * r2 = points to &pt_regs.r16 | ||
90 | * r8 = contents of ar.ccv | ||
91 | * r9 = contents of ar.csd | ||
92 | * r10 = contents of ar.ssd | ||
93 | * r11 = FPSR_DEFAULT | ||
94 | * r12 = kernel sp (kernel virtual address) | ||
95 | * r13 = points to current task_struct (kernel virtual address) | ||
96 | * p15 = TRUE if psr.i is set in cr.ipsr | ||
97 | * predicate registers (other than p2, p3, and p15), b6, r3, r14, r15: | ||
98 | * preserved | ||
99 | * | ||
100 | * Note that psr.ic is NOT turned on by this macro. This is so that | ||
101 | * we can pass interruption state as arguments to a handler. | ||
102 | */ | ||
103 | |||
104 | |||
105 | #define PT(f) (VMM_PT_REGS_##f##_OFFSET) | ||
106 | |||
107 | #define KVM_DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA) \ | ||
108 | KVM_MINSTATE_GET_CURRENT(r16); /* M (or M;;I) */ \ | ||
109 | mov r27 = ar.rsc; /* M */ \ | ||
110 | mov r20 = r1; /* A */ \ | ||
111 | mov r25 = ar.unat; /* M */ \ | ||
112 | mov r29 = cr.ipsr; /* M */ \ | ||
113 | mov r26 = ar.pfs; /* I */ \ | ||
114 | mov r18 = cr.isr; \ | ||
115 | COVER; /* B;; (or nothing) */ \ | ||
116 | ;; \ | ||
117 | tbit.z p0,p15 = r29,IA64_PSR_I_BIT; \ | ||
118 | mov r1 = r16; \ | ||
119 | /* mov r21=r16; */ \ | ||
120 | /* switch from user to kernel RBS: */ \ | ||
121 | ;; \ | ||
122 | invala; /* M */ \ | ||
123 | SAVE_IFS; \ | ||
124 | ;; \ | ||
125 | KVM_MINSTATE_START_SAVE_MIN \ | ||
126 | adds r17 = 2*L1_CACHE_BYTES,r1;/* cache-line size */ \ | ||
127 | adds r16 = PT(CR_IPSR),r1; \ | ||
128 | ;; \ | ||
129 | lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES; \ | ||
130 | st8 [r16] = r29; /* save cr.ipsr */ \ | ||
131 | ;; \ | ||
132 | lfetch.fault.excl.nt1 [r17]; \ | ||
133 | tbit.nz p15,p0 = r29,IA64_PSR_I_BIT; \ | ||
134 | mov r29 = b0 \ | ||
135 | ;; \ | ||
136 | adds r16 = PT(R8),r1; /* initialize first base pointer */\ | ||
137 | adds r17 = PT(R9),r1; /* initialize second base pointer */\ | ||
138 | ;; \ | ||
139 | .mem.offset 0,0; st8.spill [r16] = r8,16; \ | ||
140 | .mem.offset 8,0; st8.spill [r17] = r9,16; \ | ||
141 | ;; \ | ||
142 | .mem.offset 0,0; st8.spill [r16] = r10,24; \ | ||
143 | .mem.offset 8,0; st8.spill [r17] = r11,24; \ | ||
144 | ;; \ | ||
145 | mov r9 = cr.iip; /* M */ \ | ||
146 | mov r10 = ar.fpsr; /* M */ \ | ||
147 | ;; \ | ||
148 | st8 [r16] = r9,16; /* save cr.iip */ \ | ||
149 | st8 [r17] = r30,16; /* save cr.ifs */ \ | ||
150 | sub r18 = r18,r22; /* r18=RSE.ndirty*8 */ \ | ||
151 | ;; \ | ||
152 | st8 [r16] = r25,16; /* save ar.unat */ \ | ||
153 | st8 [r17] = r26,16; /* save ar.pfs */ \ | ||
154 | shl r18 = r18,16; /* calu ar.rsc used for "loadrs" */\ | ||
155 | ;; \ | ||
156 | st8 [r16] = r27,16; /* save ar.rsc */ \ | ||
157 | st8 [r17] = r28,16; /* save ar.rnat */ \ | ||
158 | ;; /* avoid RAW on r16 & r17 */ \ | ||
159 | st8 [r16] = r23,16; /* save ar.bspstore */ \ | ||
160 | st8 [r17] = r31,16; /* save predicates */ \ | ||
161 | ;; \ | ||
162 | st8 [r16] = r29,16; /* save b0 */ \ | ||
163 | st8 [r17] = r18,16; /* save ar.rsc value for "loadrs" */\ | ||
164 | ;; \ | ||
165 | .mem.offset 0,0; st8.spill [r16] = r20,16;/* save original r1 */ \ | ||
166 | .mem.offset 8,0; st8.spill [r17] = r12,16; \ | ||
167 | adds r12 = -16,r1; /* switch to kernel memory stack */ \ | ||
168 | ;; \ | ||
169 | .mem.offset 0,0; st8.spill [r16] = r13,16; \ | ||
170 | .mem.offset 8,0; st8.spill [r17] = r10,16; /* save ar.fpsr */\ | ||
171 | mov r13 = r21; /* establish `current' */ \ | ||
172 | ;; \ | ||
173 | .mem.offset 0,0; st8.spill [r16] = r15,16; \ | ||
174 | .mem.offset 8,0; st8.spill [r17] = r14,16; \ | ||
175 | ;; \ | ||
176 | .mem.offset 0,0; st8.spill [r16] = r2,16; \ | ||
177 | .mem.offset 8,0; st8.spill [r17] = r3,16; \ | ||
178 | adds r2 = VMM_PT_REGS_R16_OFFSET,r1; \ | ||
179 | ;; \ | ||
180 | adds r16 = VMM_VCPU_IIPA_OFFSET,r13; \ | ||
181 | adds r17 = VMM_VCPU_ISR_OFFSET,r13; \ | ||
182 | mov r26 = cr.iipa; \ | ||
183 | mov r27 = cr.isr; \ | ||
184 | ;; \ | ||
185 | st8 [r16] = r26; \ | ||
186 | st8 [r17] = r27; \ | ||
187 | ;; \ | ||
188 | EXTRA; \ | ||
189 | mov r8 = ar.ccv; \ | ||
190 | mov r9 = ar.csd; \ | ||
191 | mov r10 = ar.ssd; \ | ||
192 | movl r11 = FPSR_DEFAULT; /* L-unit */ \ | ||
193 | adds r17 = VMM_VCPU_GP_OFFSET,r13; \ | ||
194 | ;; \ | ||
195 | ld8 r1 = [r17];/* establish kernel global pointer */ \ | ||
196 | ;; \ | ||
197 | PAL_VSA_SYNC_READ \ | ||
198 | KVM_MINSTATE_END_SAVE_MIN | ||
199 | |||
200 | /* | ||
201 | * SAVE_REST saves the remainder of pt_regs (with psr.ic on). | ||
202 | * | ||
203 | * Assumed state upon entry: | ||
204 | * psr.ic: on | ||
205 | * r2: points to &pt_regs.f6 | ||
206 | * r3: points to &pt_regs.f7 | ||
207 | * r8: contents of ar.ccv | ||
208 | * r9: contents of ar.csd | ||
209 | * r10: contents of ar.ssd | ||
210 | * r11: FPSR_DEFAULT | ||
211 | * | ||
212 | * Registers r14 and r15 are guaranteed not to be touched by SAVE_REST. | ||
213 | */ | ||
214 | #define KVM_SAVE_REST \ | ||
215 | .mem.offset 0,0; st8.spill [r2] = r16,16; \ | ||
216 | .mem.offset 8,0; st8.spill [r3] = r17,16; \ | ||
217 | ;; \ | ||
218 | .mem.offset 0,0; st8.spill [r2] = r18,16; \ | ||
219 | .mem.offset 8,0; st8.spill [r3] = r19,16; \ | ||
220 | ;; \ | ||
221 | .mem.offset 0,0; st8.spill [r2] = r20,16; \ | ||
222 | .mem.offset 8,0; st8.spill [r3] = r21,16; \ | ||
223 | mov r18=b6; \ | ||
224 | ;; \ | ||
225 | .mem.offset 0,0; st8.spill [r2] = r22,16; \ | ||
226 | .mem.offset 8,0; st8.spill [r3] = r23,16; \ | ||
227 | mov r19 = b7; \ | ||
228 | ;; \ | ||
229 | .mem.offset 0,0; st8.spill [r2] = r24,16; \ | ||
230 | .mem.offset 8,0; st8.spill [r3] = r25,16; \ | ||
231 | ;; \ | ||
232 | .mem.offset 0,0; st8.spill [r2] = r26,16; \ | ||
233 | .mem.offset 8,0; st8.spill [r3] = r27,16; \ | ||
234 | ;; \ | ||
235 | .mem.offset 0,0; st8.spill [r2] = r28,16; \ | ||
236 | .mem.offset 8,0; st8.spill [r3] = r29,16; \ | ||
237 | ;; \ | ||
238 | .mem.offset 0,0; st8.spill [r2] = r30,16; \ | ||
239 | .mem.offset 8,0; st8.spill [r3] = r31,32; \ | ||
240 | ;; \ | ||
241 | mov ar.fpsr = r11; \ | ||
242 | st8 [r2] = r8,8; \ | ||
243 | adds r24 = PT(B6)-PT(F7),r3; \ | ||
244 | adds r25 = PT(B7)-PT(F7),r3; \ | ||
245 | ;; \ | ||
246 | st8 [r24] = r18,16; /* b6 */ \ | ||
247 | st8 [r25] = r19,16; /* b7 */ \ | ||
248 | adds r2 = PT(R4)-PT(F6),r2; \ | ||
249 | adds r3 = PT(R5)-PT(F7),r3; \ | ||
250 | ;; \ | ||
251 | st8 [r24] = r9; /* ar.csd */ \ | ||
252 | st8 [r25] = r10; /* ar.ssd */ \ | ||
253 | ;; \ | ||
254 | mov r18 = ar.unat; \ | ||
255 | adds r19 = PT(EML_UNAT)-PT(R4),r2; \ | ||
256 | ;; \ | ||
257 | st8 [r19] = r18; /* eml_unat */ \ | ||
258 | |||
259 | |||
260 | #define KVM_SAVE_EXTRA \ | ||
261 | .mem.offset 0,0; st8.spill [r2] = r4,16; \ | ||
262 | .mem.offset 8,0; st8.spill [r3] = r5,16; \ | ||
263 | ;; \ | ||
264 | .mem.offset 0,0; st8.spill [r2] = r6,16; \ | ||
265 | .mem.offset 8,0; st8.spill [r3] = r7; \ | ||
266 | ;; \ | ||
267 | mov r26 = ar.unat; \ | ||
268 | ;; \ | ||
269 | st8 [r2] = r26;/* eml_unat */ \ | ||
270 | |||
271 | #define KVM_SAVE_MIN_WITH_COVER KVM_DO_SAVE_MIN(cover, mov r30 = cr.ifs,) | ||
272 | #define KVM_SAVE_MIN_WITH_COVER_R19 KVM_DO_SAVE_MIN(cover, mov r30 = cr.ifs, mov r15 = r19) | ||
273 | #define KVM_SAVE_MIN KVM_DO_SAVE_MIN( , mov r30 = r0, ) | ||
diff --git a/arch/ia64/kvm/lapic.h b/arch/ia64/kvm/lapic.h new file mode 100644 index 000000000000..6d6cbcb14893 --- /dev/null +++ b/arch/ia64/kvm/lapic.h | |||
@@ -0,0 +1,25 @@ | |||
1 | #ifndef __KVM_IA64_LAPIC_H | ||
2 | #define __KVM_IA64_LAPIC_H | ||
3 | |||
4 | #include <linux/kvm_host.h> | ||
5 | |||
6 | /* | ||
7 | * vlsapic | ||
8 | */ | ||
9 | struct kvm_lapic{ | ||
10 | struct kvm_vcpu *vcpu; | ||
11 | uint64_t insvc[4]; | ||
12 | uint64_t vhpi; | ||
13 | uint8_t xtp; | ||
14 | uint8_t pal_init_pending; | ||
15 | uint8_t pad[2]; | ||
16 | }; | ||
17 | |||
18 | int kvm_create_lapic(struct kvm_vcpu *vcpu); | ||
19 | void kvm_free_lapic(struct kvm_vcpu *vcpu); | ||
20 | |||
21 | int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest); | ||
22 | int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda); | ||
23 | int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 trig); | ||
24 | |||
25 | #endif | ||
diff --git a/arch/ia64/kvm/misc.h b/arch/ia64/kvm/misc.h new file mode 100644 index 000000000000..e585c4607344 --- /dev/null +++ b/arch/ia64/kvm/misc.h | |||
@@ -0,0 +1,93 @@ | |||
1 | #ifndef __KVM_IA64_MISC_H | ||
2 | #define __KVM_IA64_MISC_H | ||
3 | |||
4 | #include <linux/kvm_host.h> | ||
5 | /* | ||
6 | * misc.h | ||
7 | * Copyright (C) 2007, Intel Corporation. | ||
8 | * Xiantao Zhang (xiantao.zhang@intel.com) | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify it | ||
11 | * under the terms and conditions of the GNU General Public License, | ||
12 | * version 2, as published by the Free Software Foundation. | ||
13 | * | ||
14 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
15 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
17 | * more details. | ||
18 | * | ||
19 | * You should have received a copy of the GNU General Public License along with | ||
20 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple | ||
21 | * Place - Suite 330, Boston, MA 02111-1307 USA. | ||
22 | * | ||
23 | */ | ||
24 | |||
25 | /* | ||
26 | *Return p2m base address at host side! | ||
27 | */ | ||
28 | static inline uint64_t *kvm_host_get_pmt(struct kvm *kvm) | ||
29 | { | ||
30 | return (uint64_t *)(kvm->arch.vm_base + KVM_P2M_OFS); | ||
31 | } | ||
32 | |||
33 | static inline void kvm_set_pmt_entry(struct kvm *kvm, gfn_t gfn, | ||
34 | u64 paddr, u64 mem_flags) | ||
35 | { | ||
36 | uint64_t *pmt_base = kvm_host_get_pmt(kvm); | ||
37 | unsigned long pte; | ||
38 | |||
39 | pte = PAGE_ALIGN(paddr) | mem_flags; | ||
40 | pmt_base[gfn] = pte; | ||
41 | } | ||
42 | |||
43 | /*Function for translating host address to guest address*/ | ||
44 | |||
45 | static inline void *to_guest(struct kvm *kvm, void *addr) | ||
46 | { | ||
47 | return (void *)((unsigned long)(addr) - kvm->arch.vm_base + | ||
48 | KVM_VM_DATA_BASE); | ||
49 | } | ||
50 | |||
51 | /*Function for translating guest address to host address*/ | ||
52 | |||
53 | static inline void *to_host(struct kvm *kvm, void *addr) | ||
54 | { | ||
55 | return (void *)((unsigned long)addr - KVM_VM_DATA_BASE | ||
56 | + kvm->arch.vm_base); | ||
57 | } | ||
58 | |||
59 | /* Get host context of the vcpu */ | ||
60 | static inline union context *kvm_get_host_context(struct kvm_vcpu *vcpu) | ||
61 | { | ||
62 | union context *ctx = &vcpu->arch.host; | ||
63 | return to_guest(vcpu->kvm, ctx); | ||
64 | } | ||
65 | |||
66 | /* Get guest context of the vcpu */ | ||
67 | static inline union context *kvm_get_guest_context(struct kvm_vcpu *vcpu) | ||
68 | { | ||
69 | union context *ctx = &vcpu->arch.guest; | ||
70 | return to_guest(vcpu->kvm, ctx); | ||
71 | } | ||
72 | |||
73 | /* kvm get exit data from gvmm! */ | ||
74 | static inline struct exit_ctl_data *kvm_get_exit_data(struct kvm_vcpu *vcpu) | ||
75 | { | ||
76 | return &vcpu->arch.exit_data; | ||
77 | } | ||
78 | |||
79 | /*kvm get vcpu ioreq for kvm module!*/ | ||
80 | static inline struct kvm_mmio_req *kvm_get_vcpu_ioreq(struct kvm_vcpu *vcpu) | ||
81 | { | ||
82 | struct exit_ctl_data *p_ctl_data; | ||
83 | |||
84 | if (vcpu) { | ||
85 | p_ctl_data = kvm_get_exit_data(vcpu); | ||
86 | if (p_ctl_data->exit_reason == EXIT_REASON_MMIO_INSTRUCTION) | ||
87 | return &p_ctl_data->u.ioreq; | ||
88 | } | ||
89 | |||
90 | return NULL; | ||
91 | } | ||
92 | |||
93 | #endif | ||
diff --git a/arch/ia64/kvm/mmio.c b/arch/ia64/kvm/mmio.c new file mode 100644 index 000000000000..351bf70da463 --- /dev/null +++ b/arch/ia64/kvm/mmio.c | |||
@@ -0,0 +1,341 @@ | |||
1 | /* | ||
2 | * mmio.c: MMIO emulation components. | ||
3 | * Copyright (c) 2004, Intel Corporation. | ||
4 | * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com) | ||
5 | * Kun Tian (Kevin Tian) (Kevin.tian@intel.com) | ||
6 | * | ||
7 | * Copyright (c) 2007 Intel Corporation KVM support. | ||
8 | * Xuefei Xu (Anthony Xu) (anthony.xu@intel.com) | ||
9 | * Xiantao Zhang (xiantao.zhang@intel.com) | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or modify it | ||
12 | * under the terms and conditions of the GNU General Public License, | ||
13 | * version 2, as published by the Free Software Foundation. | ||
14 | * | ||
15 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
16 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
18 | * more details. | ||
19 | * | ||
20 | * You should have received a copy of the GNU General Public License along with | ||
21 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple | ||
22 | * Place - Suite 330, Boston, MA 02111-1307 USA. | ||
23 | * | ||
24 | */ | ||
25 | |||
26 | #include <linux/kvm_host.h> | ||
27 | |||
28 | #include "vcpu.h" | ||
29 | |||
30 | static void vlsapic_write_xtp(struct kvm_vcpu *v, uint8_t val) | ||
31 | { | ||
32 | VLSAPIC_XTP(v) = val; | ||
33 | } | ||
34 | |||
35 | /* | ||
36 | * LSAPIC OFFSET | ||
37 | */ | ||
38 | #define PIB_LOW_HALF(ofst) !(ofst & (1 << 20)) | ||
39 | #define PIB_OFST_INTA 0x1E0000 | ||
40 | #define PIB_OFST_XTP 0x1E0008 | ||
41 | |||
42 | /* | ||
43 | * execute write IPI op. | ||
44 | */ | ||
45 | static void vlsapic_write_ipi(struct kvm_vcpu *vcpu, | ||
46 | uint64_t addr, uint64_t data) | ||
47 | { | ||
48 | struct exit_ctl_data *p = ¤t_vcpu->arch.exit_data; | ||
49 | unsigned long psr; | ||
50 | |||
51 | local_irq_save(psr); | ||
52 | |||
53 | p->exit_reason = EXIT_REASON_IPI; | ||
54 | p->u.ipi_data.addr.val = addr; | ||
55 | p->u.ipi_data.data.val = data; | ||
56 | vmm_transition(current_vcpu); | ||
57 | |||
58 | local_irq_restore(psr); | ||
59 | |||
60 | } | ||
61 | |||
62 | void lsapic_write(struct kvm_vcpu *v, unsigned long addr, | ||
63 | unsigned long length, unsigned long val) | ||
64 | { | ||
65 | addr &= (PIB_SIZE - 1); | ||
66 | |||
67 | switch (addr) { | ||
68 | case PIB_OFST_INTA: | ||
69 | /*panic_domain(NULL, "Undefined write on PIB INTA\n");*/ | ||
70 | panic_vm(v); | ||
71 | break; | ||
72 | case PIB_OFST_XTP: | ||
73 | if (length == 1) { | ||
74 | vlsapic_write_xtp(v, val); | ||
75 | } else { | ||
76 | /*panic_domain(NULL, | ||
77 | "Undefined write on PIB XTP\n");*/ | ||
78 | panic_vm(v); | ||
79 | } | ||
80 | break; | ||
81 | default: | ||
82 | if (PIB_LOW_HALF(addr)) { | ||
83 | /*lower half */ | ||
84 | if (length != 8) | ||
85 | /*panic_domain(NULL, | ||
86 | "Can't LHF write with size %ld!\n", | ||
87 | length);*/ | ||
88 | panic_vm(v); | ||
89 | else | ||
90 | vlsapic_write_ipi(v, addr, val); | ||
91 | } else { /* upper half | ||
92 | printk("IPI-UHF write %lx\n",addr);*/ | ||
93 | panic_vm(v); | ||
94 | } | ||
95 | break; | ||
96 | } | ||
97 | } | ||
98 | |||
99 | unsigned long lsapic_read(struct kvm_vcpu *v, unsigned long addr, | ||
100 | unsigned long length) | ||
101 | { | ||
102 | uint64_t result = 0; | ||
103 | |||
104 | addr &= (PIB_SIZE - 1); | ||
105 | |||
106 | switch (addr) { | ||
107 | case PIB_OFST_INTA: | ||
108 | if (length == 1) /* 1 byte load */ | ||
109 | ; /* There is no i8259, there is no INTA access*/ | ||
110 | else | ||
111 | /*panic_domain(NULL,"Undefined read on PIB INTA\n"); */ | ||
112 | panic_vm(v); | ||
113 | |||
114 | break; | ||
115 | case PIB_OFST_XTP: | ||
116 | if (length == 1) { | ||
117 | result = VLSAPIC_XTP(v); | ||
118 | /* printk("read xtp %lx\n", result); */ | ||
119 | } else { | ||
120 | /*panic_domain(NULL, | ||
121 | "Undefined read on PIB XTP\n");*/ | ||
122 | panic_vm(v); | ||
123 | } | ||
124 | break; | ||
125 | default: | ||
126 | panic_vm(v); | ||
127 | break; | ||
128 | } | ||
129 | return result; | ||
130 | } | ||
131 | |||
132 | static void mmio_access(struct kvm_vcpu *vcpu, u64 src_pa, u64 *dest, | ||
133 | u16 s, int ma, int dir) | ||
134 | { | ||
135 | unsigned long iot; | ||
136 | struct exit_ctl_data *p = &vcpu->arch.exit_data; | ||
137 | unsigned long psr; | ||
138 | |||
139 | iot = __gpfn_is_io(src_pa >> PAGE_SHIFT); | ||
140 | |||
141 | local_irq_save(psr); | ||
142 | |||
143 | /*Intercept the acces for PIB range*/ | ||
144 | if (iot == GPFN_PIB) { | ||
145 | if (!dir) | ||
146 | lsapic_write(vcpu, src_pa, s, *dest); | ||
147 | else | ||
148 | *dest = lsapic_read(vcpu, src_pa, s); | ||
149 | goto out; | ||
150 | } | ||
151 | p->exit_reason = EXIT_REASON_MMIO_INSTRUCTION; | ||
152 | p->u.ioreq.addr = src_pa; | ||
153 | p->u.ioreq.size = s; | ||
154 | p->u.ioreq.dir = dir; | ||
155 | if (dir == IOREQ_WRITE) | ||
156 | p->u.ioreq.data = *dest; | ||
157 | p->u.ioreq.state = STATE_IOREQ_READY; | ||
158 | vmm_transition(vcpu); | ||
159 | |||
160 | if (p->u.ioreq.state == STATE_IORESP_READY) { | ||
161 | if (dir == IOREQ_READ) | ||
162 | *dest = p->u.ioreq.data; | ||
163 | } else | ||
164 | panic_vm(vcpu); | ||
165 | out: | ||
166 | local_irq_restore(psr); | ||
167 | return ; | ||
168 | } | ||
169 | |||
170 | /* | ||
171 | dir 1: read 0:write | ||
172 | inst_type 0:integer 1:floating point | ||
173 | */ | ||
174 | #define SL_INTEGER 0 /* store/load interger*/ | ||
175 | #define SL_FLOATING 1 /* store/load floating*/ | ||
176 | |||
177 | void emulate_io_inst(struct kvm_vcpu *vcpu, u64 padr, u64 ma) | ||
178 | { | ||
179 | struct kvm_pt_regs *regs; | ||
180 | IA64_BUNDLE bundle; | ||
181 | int slot, dir = 0; | ||
182 | int inst_type = -1; | ||
183 | u16 size = 0; | ||
184 | u64 data, slot1a, slot1b, temp, update_reg; | ||
185 | s32 imm; | ||
186 | INST64 inst; | ||
187 | |||
188 | regs = vcpu_regs(vcpu); | ||
189 | |||
190 | if (fetch_code(vcpu, regs->cr_iip, &bundle)) { | ||
191 | /* if fetch code fail, return and try again */ | ||
192 | return; | ||
193 | } | ||
194 | slot = ((struct ia64_psr *)&(regs->cr_ipsr))->ri; | ||
195 | if (!slot) | ||
196 | inst.inst = bundle.slot0; | ||
197 | else if (slot == 1) { | ||
198 | slot1a = bundle.slot1a; | ||
199 | slot1b = bundle.slot1b; | ||
200 | inst.inst = slot1a + (slot1b << 18); | ||
201 | } else if (slot == 2) | ||
202 | inst.inst = bundle.slot2; | ||
203 | |||
204 | /* Integer Load/Store */ | ||
205 | if (inst.M1.major == 4 && inst.M1.m == 0 && inst.M1.x == 0) { | ||
206 | inst_type = SL_INTEGER; | ||
207 | size = (inst.M1.x6 & 0x3); | ||
208 | if ((inst.M1.x6 >> 2) > 0xb) { | ||
209 | /*write*/ | ||
210 | dir = IOREQ_WRITE; | ||
211 | data = vcpu_get_gr(vcpu, inst.M4.r2); | ||
212 | } else if ((inst.M1.x6 >> 2) < 0xb) { | ||
213 | /*read*/ | ||
214 | dir = IOREQ_READ; | ||
215 | } | ||
216 | } else if (inst.M2.major == 4 && inst.M2.m == 1 && inst.M2.x == 0) { | ||
217 | /* Integer Load + Reg update */ | ||
218 | inst_type = SL_INTEGER; | ||
219 | dir = IOREQ_READ; | ||
220 | size = (inst.M2.x6 & 0x3); | ||
221 | temp = vcpu_get_gr(vcpu, inst.M2.r3); | ||
222 | update_reg = vcpu_get_gr(vcpu, inst.M2.r2); | ||
223 | temp += update_reg; | ||
224 | vcpu_set_gr(vcpu, inst.M2.r3, temp, 0); | ||
225 | } else if (inst.M3.major == 5) { | ||
226 | /*Integer Load/Store + Imm update*/ | ||
227 | inst_type = SL_INTEGER; | ||
228 | size = (inst.M3.x6&0x3); | ||
229 | if ((inst.M5.x6 >> 2) > 0xb) { | ||
230 | /*write*/ | ||
231 | dir = IOREQ_WRITE; | ||
232 | data = vcpu_get_gr(vcpu, inst.M5.r2); | ||
233 | temp = vcpu_get_gr(vcpu, inst.M5.r3); | ||
234 | imm = (inst.M5.s << 31) | (inst.M5.i << 30) | | ||
235 | (inst.M5.imm7 << 23); | ||
236 | temp += imm >> 23; | ||
237 | vcpu_set_gr(vcpu, inst.M5.r3, temp, 0); | ||
238 | |||
239 | } else if ((inst.M3.x6 >> 2) < 0xb) { | ||
240 | /*read*/ | ||
241 | dir = IOREQ_READ; | ||
242 | temp = vcpu_get_gr(vcpu, inst.M3.r3); | ||
243 | imm = (inst.M3.s << 31) | (inst.M3.i << 30) | | ||
244 | (inst.M3.imm7 << 23); | ||
245 | temp += imm >> 23; | ||
246 | vcpu_set_gr(vcpu, inst.M3.r3, temp, 0); | ||
247 | |||
248 | } | ||
249 | } else if (inst.M9.major == 6 && inst.M9.x6 == 0x3B | ||
250 | && inst.M9.m == 0 && inst.M9.x == 0) { | ||
251 | /* Floating-point spill*/ | ||
252 | struct ia64_fpreg v; | ||
253 | |||
254 | inst_type = SL_FLOATING; | ||
255 | dir = IOREQ_WRITE; | ||
256 | vcpu_get_fpreg(vcpu, inst.M9.f2, &v); | ||
257 | /* Write high word. FIXME: this is a kludge! */ | ||
258 | v.u.bits[1] &= 0x3ffff; | ||
259 | mmio_access(vcpu, padr + 8, &v.u.bits[1], 8, ma, IOREQ_WRITE); | ||
260 | data = v.u.bits[0]; | ||
261 | size = 3; | ||
262 | } else if (inst.M10.major == 7 && inst.M10.x6 == 0x3B) { | ||
263 | /* Floating-point spill + Imm update */ | ||
264 | struct ia64_fpreg v; | ||
265 | |||
266 | inst_type = SL_FLOATING; | ||
267 | dir = IOREQ_WRITE; | ||
268 | vcpu_get_fpreg(vcpu, inst.M10.f2, &v); | ||
269 | temp = vcpu_get_gr(vcpu, inst.M10.r3); | ||
270 | imm = (inst.M10.s << 31) | (inst.M10.i << 30) | | ||
271 | (inst.M10.imm7 << 23); | ||
272 | temp += imm >> 23; | ||
273 | vcpu_set_gr(vcpu, inst.M10.r3, temp, 0); | ||
274 | |||
275 | /* Write high word.FIXME: this is a kludge! */ | ||
276 | v.u.bits[1] &= 0x3ffff; | ||
277 | mmio_access(vcpu, padr + 8, &v.u.bits[1], 8, ma, IOREQ_WRITE); | ||
278 | data = v.u.bits[0]; | ||
279 | size = 3; | ||
280 | } else if (inst.M10.major == 7 && inst.M10.x6 == 0x31) { | ||
281 | /* Floating-point stf8 + Imm update */ | ||
282 | struct ia64_fpreg v; | ||
283 | inst_type = SL_FLOATING; | ||
284 | dir = IOREQ_WRITE; | ||
285 | size = 3; | ||
286 | vcpu_get_fpreg(vcpu, inst.M10.f2, &v); | ||
287 | data = v.u.bits[0]; /* Significand. */ | ||
288 | temp = vcpu_get_gr(vcpu, inst.M10.r3); | ||
289 | imm = (inst.M10.s << 31) | (inst.M10.i << 30) | | ||
290 | (inst.M10.imm7 << 23); | ||
291 | temp += imm >> 23; | ||
292 | vcpu_set_gr(vcpu, inst.M10.r3, temp, 0); | ||
293 | } else if (inst.M15.major == 7 && inst.M15.x6 >= 0x2c | ||
294 | && inst.M15.x6 <= 0x2f) { | ||
295 | temp = vcpu_get_gr(vcpu, inst.M15.r3); | ||
296 | imm = (inst.M15.s << 31) | (inst.M15.i << 30) | | ||
297 | (inst.M15.imm7 << 23); | ||
298 | temp += imm >> 23; | ||
299 | vcpu_set_gr(vcpu, inst.M15.r3, temp, 0); | ||
300 | |||
301 | vcpu_increment_iip(vcpu); | ||
302 | return; | ||
303 | } else if (inst.M12.major == 6 && inst.M12.m == 1 | ||
304 | && inst.M12.x == 1 && inst.M12.x6 == 1) { | ||
305 | /* Floating-point Load Pair + Imm ldfp8 M12*/ | ||
306 | struct ia64_fpreg v; | ||
307 | |||
308 | inst_type = SL_FLOATING; | ||
309 | dir = IOREQ_READ; | ||
310 | size = 8; /*ldfd*/ | ||
311 | mmio_access(vcpu, padr, &data, size, ma, dir); | ||
312 | v.u.bits[0] = data; | ||
313 | v.u.bits[1] = 0x1003E; | ||
314 | vcpu_set_fpreg(vcpu, inst.M12.f1, &v); | ||
315 | padr += 8; | ||
316 | mmio_access(vcpu, padr, &data, size, ma, dir); | ||
317 | v.u.bits[0] = data; | ||
318 | v.u.bits[1] = 0x1003E; | ||
319 | vcpu_set_fpreg(vcpu, inst.M12.f2, &v); | ||
320 | padr += 8; | ||
321 | vcpu_set_gr(vcpu, inst.M12.r3, padr, 0); | ||
322 | vcpu_increment_iip(vcpu); | ||
323 | return; | ||
324 | } else { | ||
325 | inst_type = -1; | ||
326 | panic_vm(vcpu); | ||
327 | } | ||
328 | |||
329 | size = 1 << size; | ||
330 | if (dir == IOREQ_WRITE) { | ||
331 | mmio_access(vcpu, padr, &data, size, ma, dir); | ||
332 | } else { | ||
333 | mmio_access(vcpu, padr, &data, size, ma, dir); | ||
334 | if (inst_type == SL_INTEGER) | ||
335 | vcpu_set_gr(vcpu, inst.M1.r1, data, 0); | ||
336 | else | ||
337 | panic_vm(vcpu); | ||
338 | |||
339 | } | ||
340 | vcpu_increment_iip(vcpu); | ||
341 | } | ||
diff --git a/arch/ia64/kvm/optvfault.S b/arch/ia64/kvm/optvfault.S new file mode 100644 index 000000000000..e4f15d641b22 --- /dev/null +++ b/arch/ia64/kvm/optvfault.S | |||
@@ -0,0 +1,918 @@ | |||
1 | /* | ||
2 | * arch/ia64/vmx/optvfault.S | ||
3 | * optimize virtualization fault handler | ||
4 | * | ||
5 | * Copyright (C) 2006 Intel Co | ||
6 | * Xuefei Xu (Anthony Xu) <anthony.xu@intel.com> | ||
7 | */ | ||
8 | |||
9 | #include <asm/asmmacro.h> | ||
10 | #include <asm/processor.h> | ||
11 | |||
12 | #include "vti.h" | ||
13 | #include "asm-offsets.h" | ||
14 | |||
15 | #define ACCE_MOV_FROM_AR | ||
16 | #define ACCE_MOV_FROM_RR | ||
17 | #define ACCE_MOV_TO_RR | ||
18 | #define ACCE_RSM | ||
19 | #define ACCE_SSM | ||
20 | #define ACCE_MOV_TO_PSR | ||
21 | #define ACCE_THASH | ||
22 | |||
23 | //mov r1=ar3 | ||
24 | GLOBAL_ENTRY(kvm_asm_mov_from_ar) | ||
25 | #ifndef ACCE_MOV_FROM_AR | ||
26 | br.many kvm_virtualization_fault_back | ||
27 | #endif | ||
28 | add r18=VMM_VCPU_ITC_OFS_OFFSET, r21 | ||
29 | add r16=VMM_VCPU_LAST_ITC_OFFSET,r21 | ||
30 | extr.u r17=r25,6,7 | ||
31 | ;; | ||
32 | ld8 r18=[r18] | ||
33 | mov r19=ar.itc | ||
34 | mov r24=b0 | ||
35 | ;; | ||
36 | add r19=r19,r18 | ||
37 | addl r20=@gprel(asm_mov_to_reg),gp | ||
38 | ;; | ||
39 | st8 [r16] = r19 | ||
40 | adds r30=kvm_resume_to_guest-asm_mov_to_reg,r20 | ||
41 | shladd r17=r17,4,r20 | ||
42 | ;; | ||
43 | mov b0=r17 | ||
44 | br.sptk.few b0 | ||
45 | ;; | ||
46 | END(kvm_asm_mov_from_ar) | ||
47 | |||
48 | |||
49 | // mov r1=rr[r3] | ||
50 | GLOBAL_ENTRY(kvm_asm_mov_from_rr) | ||
51 | #ifndef ACCE_MOV_FROM_RR | ||
52 | br.many kvm_virtualization_fault_back | ||
53 | #endif | ||
54 | extr.u r16=r25,20,7 | ||
55 | extr.u r17=r25,6,7 | ||
56 | addl r20=@gprel(asm_mov_from_reg),gp | ||
57 | ;; | ||
58 | adds r30=kvm_asm_mov_from_rr_back_1-asm_mov_from_reg,r20 | ||
59 | shladd r16=r16,4,r20 | ||
60 | mov r24=b0 | ||
61 | ;; | ||
62 | add r27=VMM_VCPU_VRR0_OFFSET,r21 | ||
63 | mov b0=r16 | ||
64 | br.many b0 | ||
65 | ;; | ||
66 | kvm_asm_mov_from_rr_back_1: | ||
67 | adds r30=kvm_resume_to_guest-asm_mov_from_reg,r20 | ||
68 | adds r22=asm_mov_to_reg-asm_mov_from_reg,r20 | ||
69 | shr.u r26=r19,61 | ||
70 | ;; | ||
71 | shladd r17=r17,4,r22 | ||
72 | shladd r27=r26,3,r27 | ||
73 | ;; | ||
74 | ld8 r19=[r27] | ||
75 | mov b0=r17 | ||
76 | br.many b0 | ||
77 | END(kvm_asm_mov_from_rr) | ||
78 | |||
79 | |||
80 | // mov rr[r3]=r2 | ||
81 | GLOBAL_ENTRY(kvm_asm_mov_to_rr) | ||
82 | #ifndef ACCE_MOV_TO_RR | ||
83 | br.many kvm_virtualization_fault_back | ||
84 | #endif | ||
85 | extr.u r16=r25,20,7 | ||
86 | extr.u r17=r25,13,7 | ||
87 | addl r20=@gprel(asm_mov_from_reg),gp | ||
88 | ;; | ||
89 | adds r30=kvm_asm_mov_to_rr_back_1-asm_mov_from_reg,r20 | ||
90 | shladd r16=r16,4,r20 | ||
91 | mov r22=b0 | ||
92 | ;; | ||
93 | add r27=VMM_VCPU_VRR0_OFFSET,r21 | ||
94 | mov b0=r16 | ||
95 | br.many b0 | ||
96 | ;; | ||
97 | kvm_asm_mov_to_rr_back_1: | ||
98 | adds r30=kvm_asm_mov_to_rr_back_2-asm_mov_from_reg,r20 | ||
99 | shr.u r23=r19,61 | ||
100 | shladd r17=r17,4,r20 | ||
101 | ;; | ||
102 | //if rr6, go back | ||
103 | cmp.eq p6,p0=6,r23 | ||
104 | mov b0=r22 | ||
105 | (p6) br.cond.dpnt.many kvm_virtualization_fault_back | ||
106 | ;; | ||
107 | mov r28=r19 | ||
108 | mov b0=r17 | ||
109 | br.many b0 | ||
110 | kvm_asm_mov_to_rr_back_2: | ||
111 | adds r30=kvm_resume_to_guest-asm_mov_from_reg,r20 | ||
112 | shladd r27=r23,3,r27 | ||
113 | ;; // vrr.rid<<4 |0xe | ||
114 | st8 [r27]=r19 | ||
115 | mov b0=r30 | ||
116 | ;; | ||
117 | extr.u r16=r19,8,26 | ||
118 | extr.u r18 =r19,2,6 | ||
119 | mov r17 =0xe | ||
120 | ;; | ||
121 | shladd r16 = r16, 4, r17 | ||
122 | extr.u r19 =r19,0,8 | ||
123 | ;; | ||
124 | shl r16 = r16,8 | ||
125 | ;; | ||
126 | add r19 = r19, r16 | ||
127 | ;; //set ve 1 | ||
128 | dep r19=-1,r19,0,1 | ||
129 | cmp.lt p6,p0=14,r18 | ||
130 | ;; | ||
131 | (p6) mov r18=14 | ||
132 | ;; | ||
133 | (p6) dep r19=r18,r19,2,6 | ||
134 | ;; | ||
135 | cmp.eq p6,p0=0,r23 | ||
136 | ;; | ||
137 | cmp.eq.or p6,p0=4,r23 | ||
138 | ;; | ||
139 | adds r16=VMM_VCPU_MODE_FLAGS_OFFSET,r21 | ||
140 | (p6) adds r17=VMM_VCPU_META_SAVED_RR0_OFFSET,r21 | ||
141 | ;; | ||
142 | ld4 r16=[r16] | ||
143 | cmp.eq p7,p0=r0,r0 | ||
144 | (p6) shladd r17=r23,1,r17 | ||
145 | ;; | ||
146 | (p6) st8 [r17]=r19 | ||
147 | (p6) tbit.nz p6,p7=r16,0 | ||
148 | ;; | ||
149 | (p7) mov rr[r28]=r19 | ||
150 | mov r24=r22 | ||
151 | br.many b0 | ||
152 | END(kvm_asm_mov_to_rr) | ||
153 | |||
154 | |||
155 | //rsm | ||
156 | GLOBAL_ENTRY(kvm_asm_rsm) | ||
157 | #ifndef ACCE_RSM | ||
158 | br.many kvm_virtualization_fault_back | ||
159 | #endif | ||
160 | add r16=VMM_VPD_BASE_OFFSET,r21 | ||
161 | extr.u r26=r25,6,21 | ||
162 | extr.u r27=r25,31,2 | ||
163 | ;; | ||
164 | ld8 r16=[r16] | ||
165 | extr.u r28=r25,36,1 | ||
166 | dep r26=r27,r26,21,2 | ||
167 | ;; | ||
168 | add r17=VPD_VPSR_START_OFFSET,r16 | ||
169 | add r22=VMM_VCPU_MODE_FLAGS_OFFSET,r21 | ||
170 | //r26 is imm24 | ||
171 | dep r26=r28,r26,23,1 | ||
172 | ;; | ||
173 | ld8 r18=[r17] | ||
174 | movl r28=IA64_PSR_IC+IA64_PSR_I+IA64_PSR_DT+IA64_PSR_SI | ||
175 | ld4 r23=[r22] | ||
176 | sub r27=-1,r26 | ||
177 | mov r24=b0 | ||
178 | ;; | ||
179 | mov r20=cr.ipsr | ||
180 | or r28=r27,r28 | ||
181 | and r19=r18,r27 | ||
182 | ;; | ||
183 | st8 [r17]=r19 | ||
184 | and r20=r20,r28 | ||
185 | /* Comment it out due to short of fp lazy alorgithm support | ||
186 | adds r27=IA64_VCPU_FP_PSR_OFFSET,r21 | ||
187 | ;; | ||
188 | ld8 r27=[r27] | ||
189 | ;; | ||
190 | tbit.nz p8,p0= r27,IA64_PSR_DFH_BIT | ||
191 | ;; | ||
192 | (p8) dep r20=-1,r20,IA64_PSR_DFH_BIT,1 | ||
193 | */ | ||
194 | ;; | ||
195 | mov cr.ipsr=r20 | ||
196 | tbit.nz p6,p0=r23,0 | ||
197 | ;; | ||
198 | tbit.z.or p6,p0=r26,IA64_PSR_DT_BIT | ||
199 | (p6) br.dptk kvm_resume_to_guest | ||
200 | ;; | ||
201 | add r26=VMM_VCPU_META_RR0_OFFSET,r21 | ||
202 | add r27=VMM_VCPU_META_RR0_OFFSET+8,r21 | ||
203 | dep r23=-1,r23,0,1 | ||
204 | ;; | ||
205 | ld8 r26=[r26] | ||
206 | ld8 r27=[r27] | ||
207 | st4 [r22]=r23 | ||
208 | dep.z r28=4,61,3 | ||
209 | ;; | ||
210 | mov rr[r0]=r26 | ||
211 | ;; | ||
212 | mov rr[r28]=r27 | ||
213 | ;; | ||
214 | srlz.d | ||
215 | br.many kvm_resume_to_guest | ||
216 | END(kvm_asm_rsm) | ||
217 | |||
218 | |||
219 | //ssm | ||
220 | GLOBAL_ENTRY(kvm_asm_ssm) | ||
221 | #ifndef ACCE_SSM | ||
222 | br.many kvm_virtualization_fault_back | ||
223 | #endif | ||
224 | add r16=VMM_VPD_BASE_OFFSET,r21 | ||
225 | extr.u r26=r25,6,21 | ||
226 | extr.u r27=r25,31,2 | ||
227 | ;; | ||
228 | ld8 r16=[r16] | ||
229 | extr.u r28=r25,36,1 | ||
230 | dep r26=r27,r26,21,2 | ||
231 | ;; //r26 is imm24 | ||
232 | add r27=VPD_VPSR_START_OFFSET,r16 | ||
233 | dep r26=r28,r26,23,1 | ||
234 | ;; //r19 vpsr | ||
235 | ld8 r29=[r27] | ||
236 | mov r24=b0 | ||
237 | ;; | ||
238 | add r22=VMM_VCPU_MODE_FLAGS_OFFSET,r21 | ||
239 | mov r20=cr.ipsr | ||
240 | or r19=r29,r26 | ||
241 | ;; | ||
242 | ld4 r23=[r22] | ||
243 | st8 [r27]=r19 | ||
244 | or r20=r20,r26 | ||
245 | ;; | ||
246 | mov cr.ipsr=r20 | ||
247 | movl r28=IA64_PSR_DT+IA64_PSR_RT+IA64_PSR_IT | ||
248 | ;; | ||
249 | and r19=r28,r19 | ||
250 | tbit.z p6,p0=r23,0 | ||
251 | ;; | ||
252 | cmp.ne.or p6,p0=r28,r19 | ||
253 | (p6) br.dptk kvm_asm_ssm_1 | ||
254 | ;; | ||
255 | add r26=VMM_VCPU_META_SAVED_RR0_OFFSET,r21 | ||
256 | add r27=VMM_VCPU_META_SAVED_RR0_OFFSET+8,r21 | ||
257 | dep r23=0,r23,0,1 | ||
258 | ;; | ||
259 | ld8 r26=[r26] | ||
260 | ld8 r27=[r27] | ||
261 | st4 [r22]=r23 | ||
262 | dep.z r28=4,61,3 | ||
263 | ;; | ||
264 | mov rr[r0]=r26 | ||
265 | ;; | ||
266 | mov rr[r28]=r27 | ||
267 | ;; | ||
268 | srlz.d | ||
269 | ;; | ||
270 | kvm_asm_ssm_1: | ||
271 | tbit.nz p6,p0=r29,IA64_PSR_I_BIT | ||
272 | ;; | ||
273 | tbit.z.or p6,p0=r19,IA64_PSR_I_BIT | ||
274 | (p6) br.dptk kvm_resume_to_guest | ||
275 | ;; | ||
276 | add r29=VPD_VTPR_START_OFFSET,r16 | ||
277 | add r30=VPD_VHPI_START_OFFSET,r16 | ||
278 | ;; | ||
279 | ld8 r29=[r29] | ||
280 | ld8 r30=[r30] | ||
281 | ;; | ||
282 | extr.u r17=r29,4,4 | ||
283 | extr.u r18=r29,16,1 | ||
284 | ;; | ||
285 | dep r17=r18,r17,4,1 | ||
286 | ;; | ||
287 | cmp.gt p6,p0=r30,r17 | ||
288 | (p6) br.dpnt.few kvm_asm_dispatch_vexirq | ||
289 | br.many kvm_resume_to_guest | ||
290 | END(kvm_asm_ssm) | ||
291 | |||
292 | |||
293 | //mov psr.l=r2 | ||
294 | GLOBAL_ENTRY(kvm_asm_mov_to_psr) | ||
295 | #ifndef ACCE_MOV_TO_PSR | ||
296 | br.many kvm_virtualization_fault_back | ||
297 | #endif | ||
298 | add r16=VMM_VPD_BASE_OFFSET,r21 | ||
299 | extr.u r26=r25,13,7 //r2 | ||
300 | ;; | ||
301 | ld8 r16=[r16] | ||
302 | addl r20=@gprel(asm_mov_from_reg),gp | ||
303 | ;; | ||
304 | adds r30=kvm_asm_mov_to_psr_back-asm_mov_from_reg,r20 | ||
305 | shladd r26=r26,4,r20 | ||
306 | mov r24=b0 | ||
307 | ;; | ||
308 | add r27=VPD_VPSR_START_OFFSET,r16 | ||
309 | mov b0=r26 | ||
310 | br.many b0 | ||
311 | ;; | ||
312 | kvm_asm_mov_to_psr_back: | ||
313 | ld8 r17=[r27] | ||
314 | add r22=VMM_VCPU_MODE_FLAGS_OFFSET,r21 | ||
315 | dep r19=0,r19,32,32 | ||
316 | ;; | ||
317 | ld4 r23=[r22] | ||
318 | dep r18=0,r17,0,32 | ||
319 | ;; | ||
320 | add r30=r18,r19 | ||
321 | movl r28=IA64_PSR_DT+IA64_PSR_RT+IA64_PSR_IT | ||
322 | ;; | ||
323 | st8 [r27]=r30 | ||
324 | and r27=r28,r30 | ||
325 | and r29=r28,r17 | ||
326 | ;; | ||
327 | cmp.eq p5,p0=r29,r27 | ||
328 | cmp.eq p6,p7=r28,r27 | ||
329 | (p5) br.many kvm_asm_mov_to_psr_1 | ||
330 | ;; | ||
331 | //virtual to physical | ||
332 | (p7) add r26=VMM_VCPU_META_RR0_OFFSET,r21 | ||
333 | (p7) add r27=VMM_VCPU_META_RR0_OFFSET+8,r21 | ||
334 | (p7) dep r23=-1,r23,0,1 | ||
335 | ;; | ||
336 | //physical to virtual | ||
337 | (p6) add r26=VMM_VCPU_META_SAVED_RR0_OFFSET,r21 | ||
338 | (p6) add r27=VMM_VCPU_META_SAVED_RR0_OFFSET+8,r21 | ||
339 | (p6) dep r23=0,r23,0,1 | ||
340 | ;; | ||
341 | ld8 r26=[r26] | ||
342 | ld8 r27=[r27] | ||
343 | st4 [r22]=r23 | ||
344 | dep.z r28=4,61,3 | ||
345 | ;; | ||
346 | mov rr[r0]=r26 | ||
347 | ;; | ||
348 | mov rr[r28]=r27 | ||
349 | ;; | ||
350 | srlz.d | ||
351 | ;; | ||
352 | kvm_asm_mov_to_psr_1: | ||
353 | mov r20=cr.ipsr | ||
354 | movl r28=IA64_PSR_IC+IA64_PSR_I+IA64_PSR_DT+IA64_PSR_SI+IA64_PSR_RT | ||
355 | ;; | ||
356 | or r19=r19,r28 | ||
357 | dep r20=0,r20,0,32 | ||
358 | ;; | ||
359 | add r20=r19,r20 | ||
360 | mov b0=r24 | ||
361 | ;; | ||
362 | /* Comment it out due to short of fp lazy algorithm support | ||
363 | adds r27=IA64_VCPU_FP_PSR_OFFSET,r21 | ||
364 | ;; | ||
365 | ld8 r27=[r27] | ||
366 | ;; | ||
367 | tbit.nz p8,p0=r27,IA64_PSR_DFH_BIT | ||
368 | ;; | ||
369 | (p8) dep r20=-1,r20,IA64_PSR_DFH_BIT,1 | ||
370 | ;; | ||
371 | */ | ||
372 | mov cr.ipsr=r20 | ||
373 | cmp.ne p6,p0=r0,r0 | ||
374 | ;; | ||
375 | tbit.nz.or p6,p0=r17,IA64_PSR_I_BIT | ||
376 | tbit.z.or p6,p0=r30,IA64_PSR_I_BIT | ||
377 | (p6) br.dpnt.few kvm_resume_to_guest | ||
378 | ;; | ||
379 | add r29=VPD_VTPR_START_OFFSET,r16 | ||
380 | add r30=VPD_VHPI_START_OFFSET,r16 | ||
381 | ;; | ||
382 | ld8 r29=[r29] | ||
383 | ld8 r30=[r30] | ||
384 | ;; | ||
385 | extr.u r17=r29,4,4 | ||
386 | extr.u r18=r29,16,1 | ||
387 | ;; | ||
388 | dep r17=r18,r17,4,1 | ||
389 | ;; | ||
390 | cmp.gt p6,p0=r30,r17 | ||
391 | (p6) br.dpnt.few kvm_asm_dispatch_vexirq | ||
392 | br.many kvm_resume_to_guest | ||
393 | END(kvm_asm_mov_to_psr) | ||
394 | |||
395 | |||
396 | ENTRY(kvm_asm_dispatch_vexirq) | ||
397 | //increment iip | ||
398 | mov r16=cr.ipsr | ||
399 | ;; | ||
400 | extr.u r17=r16,IA64_PSR_RI_BIT,2 | ||
401 | tbit.nz p6,p7=r16,IA64_PSR_RI_BIT+1 | ||
402 | ;; | ||
403 | (p6) mov r18=cr.iip | ||
404 | (p6) mov r17=r0 | ||
405 | (p7) add r17=1,r17 | ||
406 | ;; | ||
407 | (p6) add r18=0x10,r18 | ||
408 | dep r16=r17,r16,IA64_PSR_RI_BIT,2 | ||
409 | ;; | ||
410 | (p6) mov cr.iip=r18 | ||
411 | mov cr.ipsr=r16 | ||
412 | mov r30 =1 | ||
413 | br.many kvm_dispatch_vexirq | ||
414 | END(kvm_asm_dispatch_vexirq) | ||
415 | |||
416 | // thash | ||
417 | // TODO: add support when pta.vf = 1 | ||
418 | GLOBAL_ENTRY(kvm_asm_thash) | ||
419 | #ifndef ACCE_THASH | ||
420 | br.many kvm_virtualization_fault_back | ||
421 | #endif | ||
422 | extr.u r17=r25,20,7 // get r3 from opcode in r25 | ||
423 | extr.u r18=r25,6,7 // get r1 from opcode in r25 | ||
424 | addl r20=@gprel(asm_mov_from_reg),gp | ||
425 | ;; | ||
426 | adds r30=kvm_asm_thash_back1-asm_mov_from_reg,r20 | ||
427 | shladd r17=r17,4,r20 // get addr of MOVE_FROM_REG(r17) | ||
428 | adds r16=VMM_VPD_BASE_OFFSET,r21 // get vcpu.arch.priveregs | ||
429 | ;; | ||
430 | mov r24=b0 | ||
431 | ;; | ||
432 | ld8 r16=[r16] // get VPD addr | ||
433 | mov b0=r17 | ||
434 | br.many b0 // r19 return value | ||
435 | ;; | ||
436 | kvm_asm_thash_back1: | ||
437 | shr.u r23=r19,61 // get RR number | ||
438 | adds r25=VMM_VCPU_VRR0_OFFSET,r21 // get vcpu->arch.vrr[0]'s addr | ||
439 | adds r16=VMM_VPD_VPTA_OFFSET,r16 // get vpta | ||
440 | ;; | ||
441 | shladd r27=r23,3,r25 // get vcpu->arch.vrr[r23]'s addr | ||
442 | ld8 r17=[r16] // get PTA | ||
443 | mov r26=1 | ||
444 | ;; | ||
445 | extr.u r29=r17,2,6 // get pta.size | ||
446 | ld8 r25=[r27] // get vcpu->arch.vrr[r23]'s value | ||
447 | ;; | ||
448 | extr.u r25=r25,2,6 // get rr.ps | ||
449 | shl r22=r26,r29 // 1UL << pta.size | ||
450 | ;; | ||
451 | shr.u r23=r19,r25 // vaddr >> rr.ps | ||
452 | adds r26=3,r29 // pta.size + 3 | ||
453 | shl r27=r17,3 // pta << 3 | ||
454 | ;; | ||
455 | shl r23=r23,3 // (vaddr >> rr.ps) << 3 | ||
456 | shr.u r27=r27,r26 // (pta << 3) >> (pta.size+3) | ||
457 | movl r16=7<<61 | ||
458 | ;; | ||
459 | adds r22=-1,r22 // (1UL << pta.size) - 1 | ||
460 | shl r27=r27,r29 // ((pta<<3)>>(pta.size+3))<<pta.size | ||
461 | and r19=r19,r16 // vaddr & VRN_MASK | ||
462 | ;; | ||
463 | and r22=r22,r23 // vhpt_offset | ||
464 | or r19=r19,r27 // (vadr&VRN_MASK)|(((pta<<3)>>(pta.size + 3))<<pta.size) | ||
465 | adds r26=asm_mov_to_reg-asm_mov_from_reg,r20 | ||
466 | ;; | ||
467 | or r19=r19,r22 // calc pval | ||
468 | shladd r17=r18,4,r26 | ||
469 | adds r30=kvm_resume_to_guest-asm_mov_from_reg,r20 | ||
470 | ;; | ||
471 | mov b0=r17 | ||
472 | br.many b0 | ||
473 | END(kvm_asm_thash) | ||
474 | |||
475 | #define MOV_TO_REG0 \ | ||
476 | {; \ | ||
477 | nop.b 0x0; \ | ||
478 | nop.b 0x0; \ | ||
479 | nop.b 0x0; \ | ||
480 | ;; \ | ||
481 | }; | ||
482 | |||
483 | |||
484 | #define MOV_TO_REG(n) \ | ||
485 | {; \ | ||
486 | mov r##n##=r19; \ | ||
487 | mov b0=r30; \ | ||
488 | br.sptk.many b0; \ | ||
489 | ;; \ | ||
490 | }; | ||
491 | |||
492 | |||
493 | #define MOV_FROM_REG(n) \ | ||
494 | {; \ | ||
495 | mov r19=r##n##; \ | ||
496 | mov b0=r30; \ | ||
497 | br.sptk.many b0; \ | ||
498 | ;; \ | ||
499 | }; | ||
500 | |||
501 | |||
502 | #define MOV_TO_BANK0_REG(n) \ | ||
503 | ENTRY_MIN_ALIGN(asm_mov_to_bank0_reg##n##); \ | ||
504 | {; \ | ||
505 | mov r26=r2; \ | ||
506 | mov r2=r19; \ | ||
507 | bsw.1; \ | ||
508 | ;; \ | ||
509 | }; \ | ||
510 | {; \ | ||
511 | mov r##n##=r2; \ | ||
512 | nop.b 0x0; \ | ||
513 | bsw.0; \ | ||
514 | ;; \ | ||
515 | }; \ | ||
516 | {; \ | ||
517 | mov r2=r26; \ | ||
518 | mov b0=r30; \ | ||
519 | br.sptk.many b0; \ | ||
520 | ;; \ | ||
521 | }; \ | ||
522 | END(asm_mov_to_bank0_reg##n##) | ||
523 | |||
524 | |||
525 | #define MOV_FROM_BANK0_REG(n) \ | ||
526 | ENTRY_MIN_ALIGN(asm_mov_from_bank0_reg##n##); \ | ||
527 | {; \ | ||
528 | mov r26=r2; \ | ||
529 | nop.b 0x0; \ | ||
530 | bsw.1; \ | ||
531 | ;; \ | ||
532 | }; \ | ||
533 | {; \ | ||
534 | mov r2=r##n##; \ | ||
535 | nop.b 0x0; \ | ||
536 | bsw.0; \ | ||
537 | ;; \ | ||
538 | }; \ | ||
539 | {; \ | ||
540 | mov r19=r2; \ | ||
541 | mov r2=r26; \ | ||
542 | mov b0=r30; \ | ||
543 | }; \ | ||
544 | {; \ | ||
545 | nop.b 0x0; \ | ||
546 | nop.b 0x0; \ | ||
547 | br.sptk.many b0; \ | ||
548 | ;; \ | ||
549 | }; \ | ||
550 | END(asm_mov_from_bank0_reg##n##) | ||
551 | |||
552 | |||
553 | #define JMP_TO_MOV_TO_BANK0_REG(n) \ | ||
554 | {; \ | ||
555 | nop.b 0x0; \ | ||
556 | nop.b 0x0; \ | ||
557 | br.sptk.many asm_mov_to_bank0_reg##n##; \ | ||
558 | ;; \ | ||
559 | } | ||
560 | |||
561 | |||
562 | #define JMP_TO_MOV_FROM_BANK0_REG(n) \ | ||
563 | {; \ | ||
564 | nop.b 0x0; \ | ||
565 | nop.b 0x0; \ | ||
566 | br.sptk.many asm_mov_from_bank0_reg##n##; \ | ||
567 | ;; \ | ||
568 | } | ||
569 | |||
570 | |||
571 | MOV_FROM_BANK0_REG(16) | ||
572 | MOV_FROM_BANK0_REG(17) | ||
573 | MOV_FROM_BANK0_REG(18) | ||
574 | MOV_FROM_BANK0_REG(19) | ||
575 | MOV_FROM_BANK0_REG(20) | ||
576 | MOV_FROM_BANK0_REG(21) | ||
577 | MOV_FROM_BANK0_REG(22) | ||
578 | MOV_FROM_BANK0_REG(23) | ||
579 | MOV_FROM_BANK0_REG(24) | ||
580 | MOV_FROM_BANK0_REG(25) | ||
581 | MOV_FROM_BANK0_REG(26) | ||
582 | MOV_FROM_BANK0_REG(27) | ||
583 | MOV_FROM_BANK0_REG(28) | ||
584 | MOV_FROM_BANK0_REG(29) | ||
585 | MOV_FROM_BANK0_REG(30) | ||
586 | MOV_FROM_BANK0_REG(31) | ||
587 | |||
588 | |||
589 | // mov from reg table | ||
590 | ENTRY(asm_mov_from_reg) | ||
591 | MOV_FROM_REG(0) | ||
592 | MOV_FROM_REG(1) | ||
593 | MOV_FROM_REG(2) | ||
594 | MOV_FROM_REG(3) | ||
595 | MOV_FROM_REG(4) | ||
596 | MOV_FROM_REG(5) | ||
597 | MOV_FROM_REG(6) | ||
598 | MOV_FROM_REG(7) | ||
599 | MOV_FROM_REG(8) | ||
600 | MOV_FROM_REG(9) | ||
601 | MOV_FROM_REG(10) | ||
602 | MOV_FROM_REG(11) | ||
603 | MOV_FROM_REG(12) | ||
604 | MOV_FROM_REG(13) | ||
605 | MOV_FROM_REG(14) | ||
606 | MOV_FROM_REG(15) | ||
607 | JMP_TO_MOV_FROM_BANK0_REG(16) | ||
608 | JMP_TO_MOV_FROM_BANK0_REG(17) | ||
609 | JMP_TO_MOV_FROM_BANK0_REG(18) | ||
610 | JMP_TO_MOV_FROM_BANK0_REG(19) | ||
611 | JMP_TO_MOV_FROM_BANK0_REG(20) | ||
612 | JMP_TO_MOV_FROM_BANK0_REG(21) | ||
613 | JMP_TO_MOV_FROM_BANK0_REG(22) | ||
614 | JMP_TO_MOV_FROM_BANK0_REG(23) | ||
615 | JMP_TO_MOV_FROM_BANK0_REG(24) | ||
616 | JMP_TO_MOV_FROM_BANK0_REG(25) | ||
617 | JMP_TO_MOV_FROM_BANK0_REG(26) | ||
618 | JMP_TO_MOV_FROM_BANK0_REG(27) | ||
619 | JMP_TO_MOV_FROM_BANK0_REG(28) | ||
620 | JMP_TO_MOV_FROM_BANK0_REG(29) | ||
621 | JMP_TO_MOV_FROM_BANK0_REG(30) | ||
622 | JMP_TO_MOV_FROM_BANK0_REG(31) | ||
623 | MOV_FROM_REG(32) | ||
624 | MOV_FROM_REG(33) | ||
625 | MOV_FROM_REG(34) | ||
626 | MOV_FROM_REG(35) | ||
627 | MOV_FROM_REG(36) | ||
628 | MOV_FROM_REG(37) | ||
629 | MOV_FROM_REG(38) | ||
630 | MOV_FROM_REG(39) | ||
631 | MOV_FROM_REG(40) | ||
632 | MOV_FROM_REG(41) | ||
633 | MOV_FROM_REG(42) | ||
634 | MOV_FROM_REG(43) | ||
635 | MOV_FROM_REG(44) | ||
636 | MOV_FROM_REG(45) | ||
637 | MOV_FROM_REG(46) | ||
638 | MOV_FROM_REG(47) | ||
639 | MOV_FROM_REG(48) | ||
640 | MOV_FROM_REG(49) | ||
641 | MOV_FROM_REG(50) | ||
642 | MOV_FROM_REG(51) | ||
643 | MOV_FROM_REG(52) | ||
644 | MOV_FROM_REG(53) | ||
645 | MOV_FROM_REG(54) | ||
646 | MOV_FROM_REG(55) | ||
647 | MOV_FROM_REG(56) | ||
648 | MOV_FROM_REG(57) | ||
649 | MOV_FROM_REG(58) | ||
650 | MOV_FROM_REG(59) | ||
651 | MOV_FROM_REG(60) | ||
652 | MOV_FROM_REG(61) | ||
653 | MOV_FROM_REG(62) | ||
654 | MOV_FROM_REG(63) | ||
655 | MOV_FROM_REG(64) | ||
656 | MOV_FROM_REG(65) | ||
657 | MOV_FROM_REG(66) | ||
658 | MOV_FROM_REG(67) | ||
659 | MOV_FROM_REG(68) | ||
660 | MOV_FROM_REG(69) | ||
661 | MOV_FROM_REG(70) | ||
662 | MOV_FROM_REG(71) | ||
663 | MOV_FROM_REG(72) | ||
664 | MOV_FROM_REG(73) | ||
665 | MOV_FROM_REG(74) | ||
666 | MOV_FROM_REG(75) | ||
667 | MOV_FROM_REG(76) | ||
668 | MOV_FROM_REG(77) | ||
669 | MOV_FROM_REG(78) | ||
670 | MOV_FROM_REG(79) | ||
671 | MOV_FROM_REG(80) | ||
672 | MOV_FROM_REG(81) | ||
673 | MOV_FROM_REG(82) | ||
674 | MOV_FROM_REG(83) | ||
675 | MOV_FROM_REG(84) | ||
676 | MOV_FROM_REG(85) | ||
677 | MOV_FROM_REG(86) | ||
678 | MOV_FROM_REG(87) | ||
679 | MOV_FROM_REG(88) | ||
680 | MOV_FROM_REG(89) | ||
681 | MOV_FROM_REG(90) | ||
682 | MOV_FROM_REG(91) | ||
683 | MOV_FROM_REG(92) | ||
684 | MOV_FROM_REG(93) | ||
685 | MOV_FROM_REG(94) | ||
686 | MOV_FROM_REG(95) | ||
687 | MOV_FROM_REG(96) | ||
688 | MOV_FROM_REG(97) | ||
689 | MOV_FROM_REG(98) | ||
690 | MOV_FROM_REG(99) | ||
691 | MOV_FROM_REG(100) | ||
692 | MOV_FROM_REG(101) | ||
693 | MOV_FROM_REG(102) | ||
694 | MOV_FROM_REG(103) | ||
695 | MOV_FROM_REG(104) | ||
696 | MOV_FROM_REG(105) | ||
697 | MOV_FROM_REG(106) | ||
698 | MOV_FROM_REG(107) | ||
699 | MOV_FROM_REG(108) | ||
700 | MOV_FROM_REG(109) | ||
701 | MOV_FROM_REG(110) | ||
702 | MOV_FROM_REG(111) | ||
703 | MOV_FROM_REG(112) | ||
704 | MOV_FROM_REG(113) | ||
705 | MOV_FROM_REG(114) | ||
706 | MOV_FROM_REG(115) | ||
707 | MOV_FROM_REG(116) | ||
708 | MOV_FROM_REG(117) | ||
709 | MOV_FROM_REG(118) | ||
710 | MOV_FROM_REG(119) | ||
711 | MOV_FROM_REG(120) | ||
712 | MOV_FROM_REG(121) | ||
713 | MOV_FROM_REG(122) | ||
714 | MOV_FROM_REG(123) | ||
715 | MOV_FROM_REG(124) | ||
716 | MOV_FROM_REG(125) | ||
717 | MOV_FROM_REG(126) | ||
718 | MOV_FROM_REG(127) | ||
719 | END(asm_mov_from_reg) | ||
720 | |||
721 | |||
722 | /* must be in bank 0 | ||
723 | * parameter: | ||
724 | * r31: pr | ||
725 | * r24: b0 | ||
726 | */ | ||
727 | ENTRY(kvm_resume_to_guest) | ||
728 | adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21 | ||
729 | ;; | ||
730 | ld8 r1 =[r16] | ||
731 | adds r20 = VMM_VCPU_VSA_BASE_OFFSET,r21 | ||
732 | ;; | ||
733 | mov r16=cr.ipsr | ||
734 | ;; | ||
735 | ld8 r20 = [r20] | ||
736 | adds r19=VMM_VPD_BASE_OFFSET,r21 | ||
737 | ;; | ||
738 | ld8 r25=[r19] | ||
739 | extr.u r17=r16,IA64_PSR_RI_BIT,2 | ||
740 | tbit.nz p6,p7=r16,IA64_PSR_RI_BIT+1 | ||
741 | ;; | ||
742 | (p6) mov r18=cr.iip | ||
743 | (p6) mov r17=r0 | ||
744 | ;; | ||
745 | (p6) add r18=0x10,r18 | ||
746 | (p7) add r17=1,r17 | ||
747 | ;; | ||
748 | (p6) mov cr.iip=r18 | ||
749 | dep r16=r17,r16,IA64_PSR_RI_BIT,2 | ||
750 | ;; | ||
751 | mov cr.ipsr=r16 | ||
752 | adds r19= VPD_VPSR_START_OFFSET,r25 | ||
753 | add r28=PAL_VPS_RESUME_NORMAL,r20 | ||
754 | add r29=PAL_VPS_RESUME_HANDLER,r20 | ||
755 | ;; | ||
756 | ld8 r19=[r19] | ||
757 | mov b0=r29 | ||
758 | cmp.ne p6,p7 = r0,r0 | ||
759 | ;; | ||
760 | tbit.z p6,p7 = r19,IA64_PSR_IC_BIT // p1=vpsr.ic | ||
761 | ;; | ||
762 | (p6) ld8 r26=[r25] | ||
763 | (p7) mov b0=r28 | ||
764 | mov pr=r31,-2 | ||
765 | br.sptk.many b0 // call pal service | ||
766 | ;; | ||
767 | END(kvm_resume_to_guest) | ||
768 | |||
769 | |||
770 | MOV_TO_BANK0_REG(16) | ||
771 | MOV_TO_BANK0_REG(17) | ||
772 | MOV_TO_BANK0_REG(18) | ||
773 | MOV_TO_BANK0_REG(19) | ||
774 | MOV_TO_BANK0_REG(20) | ||
775 | MOV_TO_BANK0_REG(21) | ||
776 | MOV_TO_BANK0_REG(22) | ||
777 | MOV_TO_BANK0_REG(23) | ||
778 | MOV_TO_BANK0_REG(24) | ||
779 | MOV_TO_BANK0_REG(25) | ||
780 | MOV_TO_BANK0_REG(26) | ||
781 | MOV_TO_BANK0_REG(27) | ||
782 | MOV_TO_BANK0_REG(28) | ||
783 | MOV_TO_BANK0_REG(29) | ||
784 | MOV_TO_BANK0_REG(30) | ||
785 | MOV_TO_BANK0_REG(31) | ||
786 | |||
787 | |||
788 | // mov to reg table | ||
789 | ENTRY(asm_mov_to_reg) | ||
790 | MOV_TO_REG0 | ||
791 | MOV_TO_REG(1) | ||
792 | MOV_TO_REG(2) | ||
793 | MOV_TO_REG(3) | ||
794 | MOV_TO_REG(4) | ||
795 | MOV_TO_REG(5) | ||
796 | MOV_TO_REG(6) | ||
797 | MOV_TO_REG(7) | ||
798 | MOV_TO_REG(8) | ||
799 | MOV_TO_REG(9) | ||
800 | MOV_TO_REG(10) | ||
801 | MOV_TO_REG(11) | ||
802 | MOV_TO_REG(12) | ||
803 | MOV_TO_REG(13) | ||
804 | MOV_TO_REG(14) | ||
805 | MOV_TO_REG(15) | ||
806 | JMP_TO_MOV_TO_BANK0_REG(16) | ||
807 | JMP_TO_MOV_TO_BANK0_REG(17) | ||
808 | JMP_TO_MOV_TO_BANK0_REG(18) | ||
809 | JMP_TO_MOV_TO_BANK0_REG(19) | ||
810 | JMP_TO_MOV_TO_BANK0_REG(20) | ||
811 | JMP_TO_MOV_TO_BANK0_REG(21) | ||
812 | JMP_TO_MOV_TO_BANK0_REG(22) | ||
813 | JMP_TO_MOV_TO_BANK0_REG(23) | ||
814 | JMP_TO_MOV_TO_BANK0_REG(24) | ||
815 | JMP_TO_MOV_TO_BANK0_REG(25) | ||
816 | JMP_TO_MOV_TO_BANK0_REG(26) | ||
817 | JMP_TO_MOV_TO_BANK0_REG(27) | ||
818 | JMP_TO_MOV_TO_BANK0_REG(28) | ||
819 | JMP_TO_MOV_TO_BANK0_REG(29) | ||
820 | JMP_TO_MOV_TO_BANK0_REG(30) | ||
821 | JMP_TO_MOV_TO_BANK0_REG(31) | ||
822 | MOV_TO_REG(32) | ||
823 | MOV_TO_REG(33) | ||
824 | MOV_TO_REG(34) | ||
825 | MOV_TO_REG(35) | ||
826 | MOV_TO_REG(36) | ||
827 | MOV_TO_REG(37) | ||
828 | MOV_TO_REG(38) | ||
829 | MOV_TO_REG(39) | ||
830 | MOV_TO_REG(40) | ||
831 | MOV_TO_REG(41) | ||
832 | MOV_TO_REG(42) | ||
833 | MOV_TO_REG(43) | ||
834 | MOV_TO_REG(44) | ||
835 | MOV_TO_REG(45) | ||
836 | MOV_TO_REG(46) | ||
837 | MOV_TO_REG(47) | ||
838 | MOV_TO_REG(48) | ||
839 | MOV_TO_REG(49) | ||
840 | MOV_TO_REG(50) | ||
841 | MOV_TO_REG(51) | ||
842 | MOV_TO_REG(52) | ||
843 | MOV_TO_REG(53) | ||
844 | MOV_TO_REG(54) | ||
845 | MOV_TO_REG(55) | ||
846 | MOV_TO_REG(56) | ||
847 | MOV_TO_REG(57) | ||
848 | MOV_TO_REG(58) | ||
849 | MOV_TO_REG(59) | ||
850 | MOV_TO_REG(60) | ||
851 | MOV_TO_REG(61) | ||
852 | MOV_TO_REG(62) | ||
853 | MOV_TO_REG(63) | ||
854 | MOV_TO_REG(64) | ||
855 | MOV_TO_REG(65) | ||
856 | MOV_TO_REG(66) | ||
857 | MOV_TO_REG(67) | ||
858 | MOV_TO_REG(68) | ||
859 | MOV_TO_REG(69) | ||
860 | MOV_TO_REG(70) | ||
861 | MOV_TO_REG(71) | ||
862 | MOV_TO_REG(72) | ||
863 | MOV_TO_REG(73) | ||
864 | MOV_TO_REG(74) | ||
865 | MOV_TO_REG(75) | ||
866 | MOV_TO_REG(76) | ||
867 | MOV_TO_REG(77) | ||
868 | MOV_TO_REG(78) | ||
869 | MOV_TO_REG(79) | ||
870 | MOV_TO_REG(80) | ||
871 | MOV_TO_REG(81) | ||
872 | MOV_TO_REG(82) | ||
873 | MOV_TO_REG(83) | ||
874 | MOV_TO_REG(84) | ||
875 | MOV_TO_REG(85) | ||
876 | MOV_TO_REG(86) | ||
877 | MOV_TO_REG(87) | ||
878 | MOV_TO_REG(88) | ||
879 | MOV_TO_REG(89) | ||
880 | MOV_TO_REG(90) | ||
881 | MOV_TO_REG(91) | ||
882 | MOV_TO_REG(92) | ||
883 | MOV_TO_REG(93) | ||
884 | MOV_TO_REG(94) | ||
885 | MOV_TO_REG(95) | ||
886 | MOV_TO_REG(96) | ||
887 | MOV_TO_REG(97) | ||
888 | MOV_TO_REG(98) | ||
889 | MOV_TO_REG(99) | ||
890 | MOV_TO_REG(100) | ||
891 | MOV_TO_REG(101) | ||
892 | MOV_TO_REG(102) | ||
893 | MOV_TO_REG(103) | ||
894 | MOV_TO_REG(104) | ||
895 | MOV_TO_REG(105) | ||
896 | MOV_TO_REG(106) | ||
897 | MOV_TO_REG(107) | ||
898 | MOV_TO_REG(108) | ||
899 | MOV_TO_REG(109) | ||
900 | MOV_TO_REG(110) | ||
901 | MOV_TO_REG(111) | ||
902 | MOV_TO_REG(112) | ||
903 | MOV_TO_REG(113) | ||
904 | MOV_TO_REG(114) | ||
905 | MOV_TO_REG(115) | ||
906 | MOV_TO_REG(116) | ||
907 | MOV_TO_REG(117) | ||
908 | MOV_TO_REG(118) | ||
909 | MOV_TO_REG(119) | ||
910 | MOV_TO_REG(120) | ||
911 | MOV_TO_REG(121) | ||
912 | MOV_TO_REG(122) | ||
913 | MOV_TO_REG(123) | ||
914 | MOV_TO_REG(124) | ||
915 | MOV_TO_REG(125) | ||
916 | MOV_TO_REG(126) | ||
917 | MOV_TO_REG(127) | ||
918 | END(asm_mov_to_reg) | ||
diff --git a/arch/ia64/kvm/process.c b/arch/ia64/kvm/process.c new file mode 100644 index 000000000000..5a33f7ed29a0 --- /dev/null +++ b/arch/ia64/kvm/process.c | |||
@@ -0,0 +1,970 @@ | |||
1 | /* | ||
2 | * process.c: handle interruption inject for guests. | ||
3 | * Copyright (c) 2005, Intel Corporation. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms and conditions of the GNU General Public License, | ||
7 | * version 2, as published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License along with | ||
15 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple | ||
16 | * Place - Suite 330, Boston, MA 02111-1307 USA. | ||
17 | * | ||
18 | * Shaofan Li (Susue Li) <susie.li@intel.com> | ||
19 | * Xiaoyan Feng (Fleming Feng) <fleming.feng@intel.com> | ||
20 | * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com) | ||
21 | * Xiantao Zhang (xiantao.zhang@intel.com) | ||
22 | */ | ||
23 | #include "vcpu.h" | ||
24 | |||
25 | #include <asm/pal.h> | ||
26 | #include <asm/sal.h> | ||
27 | #include <asm/fpswa.h> | ||
28 | #include <asm/kregs.h> | ||
29 | #include <asm/tlb.h> | ||
30 | |||
31 | fpswa_interface_t *vmm_fpswa_interface; | ||
32 | |||
33 | #define IA64_VHPT_TRANS_VECTOR 0x0000 | ||
34 | #define IA64_INST_TLB_VECTOR 0x0400 | ||
35 | #define IA64_DATA_TLB_VECTOR 0x0800 | ||
36 | #define IA64_ALT_INST_TLB_VECTOR 0x0c00 | ||
37 | #define IA64_ALT_DATA_TLB_VECTOR 0x1000 | ||
38 | #define IA64_DATA_NESTED_TLB_VECTOR 0x1400 | ||
39 | #define IA64_INST_KEY_MISS_VECTOR 0x1800 | ||
40 | #define IA64_DATA_KEY_MISS_VECTOR 0x1c00 | ||
41 | #define IA64_DIRTY_BIT_VECTOR 0x2000 | ||
42 | #define IA64_INST_ACCESS_BIT_VECTOR 0x2400 | ||
43 | #define IA64_DATA_ACCESS_BIT_VECTOR 0x2800 | ||
44 | #define IA64_BREAK_VECTOR 0x2c00 | ||
45 | #define IA64_EXTINT_VECTOR 0x3000 | ||
46 | #define IA64_PAGE_NOT_PRESENT_VECTOR 0x5000 | ||
47 | #define IA64_KEY_PERMISSION_VECTOR 0x5100 | ||
48 | #define IA64_INST_ACCESS_RIGHTS_VECTOR 0x5200 | ||
49 | #define IA64_DATA_ACCESS_RIGHTS_VECTOR 0x5300 | ||
50 | #define IA64_GENEX_VECTOR 0x5400 | ||
51 | #define IA64_DISABLED_FPREG_VECTOR 0x5500 | ||
52 | #define IA64_NAT_CONSUMPTION_VECTOR 0x5600 | ||
53 | #define IA64_SPECULATION_VECTOR 0x5700 /* UNUSED */ | ||
54 | #define IA64_DEBUG_VECTOR 0x5900 | ||
55 | #define IA64_UNALIGNED_REF_VECTOR 0x5a00 | ||
56 | #define IA64_UNSUPPORTED_DATA_REF_VECTOR 0x5b00 | ||
57 | #define IA64_FP_FAULT_VECTOR 0x5c00 | ||
58 | #define IA64_FP_TRAP_VECTOR 0x5d00 | ||
59 | #define IA64_LOWERPRIV_TRANSFER_TRAP_VECTOR 0x5e00 | ||
60 | #define IA64_TAKEN_BRANCH_TRAP_VECTOR 0x5f00 | ||
61 | #define IA64_SINGLE_STEP_TRAP_VECTOR 0x6000 | ||
62 | |||
63 | /* SDM vol2 5.5 - IVA based interruption handling */ | ||
64 | #define INITIAL_PSR_VALUE_AT_INTERRUPTION (IA64_PSR_UP | IA64_PSR_MFL |\ | ||
65 | IA64_PSR_MFH | IA64_PSR_PK | IA64_PSR_DT | \ | ||
66 | IA64_PSR_RT | IA64_PSR_MC|IA64_PSR_IT) | ||
67 | |||
68 | #define DOMN_PAL_REQUEST 0x110000 | ||
69 | #define DOMN_SAL_REQUEST 0x110001 | ||
70 | |||
71 | static u64 vec2off[68] = {0x0, 0x400, 0x800, 0xc00, 0x1000, 0x1400, 0x1800, | ||
72 | 0x1c00, 0x2000, 0x2400, 0x2800, 0x2c00, 0x3000, 0x3400, 0x3800, 0x3c00, | ||
73 | 0x4000, 0x4400, 0x4800, 0x4c00, 0x5000, 0x5100, 0x5200, 0x5300, 0x5400, | ||
74 | 0x5500, 0x5600, 0x5700, 0x5800, 0x5900, 0x5a00, 0x5b00, 0x5c00, 0x5d00, | ||
75 | 0x5e00, 0x5f00, 0x6000, 0x6100, 0x6200, 0x6300, 0x6400, 0x6500, 0x6600, | ||
76 | 0x6700, 0x6800, 0x6900, 0x6a00, 0x6b00, 0x6c00, 0x6d00, 0x6e00, 0x6f00, | ||
77 | 0x7000, 0x7100, 0x7200, 0x7300, 0x7400, 0x7500, 0x7600, 0x7700, 0x7800, | ||
78 | 0x7900, 0x7a00, 0x7b00, 0x7c00, 0x7d00, 0x7e00, 0x7f00 | ||
79 | }; | ||
80 | |||
81 | static void collect_interruption(struct kvm_vcpu *vcpu) | ||
82 | { | ||
83 | u64 ipsr; | ||
84 | u64 vdcr; | ||
85 | u64 vifs; | ||
86 | unsigned long vpsr; | ||
87 | struct kvm_pt_regs *regs = vcpu_regs(vcpu); | ||
88 | |||
89 | vpsr = vcpu_get_psr(vcpu); | ||
90 | vcpu_bsw0(vcpu); | ||
91 | if (vpsr & IA64_PSR_IC) { | ||
92 | |||
93 | /* Sync mpsr id/da/dd/ss/ed bits to vipsr | ||
94 | * since after guest do rfi, we still want these bits on in | ||
95 | * mpsr | ||
96 | */ | ||
97 | |||
98 | ipsr = regs->cr_ipsr; | ||
99 | vpsr = vpsr | (ipsr & (IA64_PSR_ID | IA64_PSR_DA | ||
100 | | IA64_PSR_DD | IA64_PSR_SS | ||
101 | | IA64_PSR_ED)); | ||
102 | vcpu_set_ipsr(vcpu, vpsr); | ||
103 | |||
104 | /* Currently, for trap, we do not advance IIP to next | ||
105 | * instruction. That's because we assume caller already | ||
106 | * set up IIP correctly | ||
107 | */ | ||
108 | |||
109 | vcpu_set_iip(vcpu , regs->cr_iip); | ||
110 | |||
111 | /* set vifs.v to zero */ | ||
112 | vifs = VCPU(vcpu, ifs); | ||
113 | vifs &= ~IA64_IFS_V; | ||
114 | vcpu_set_ifs(vcpu, vifs); | ||
115 | |||
116 | vcpu_set_iipa(vcpu, VMX(vcpu, cr_iipa)); | ||
117 | } | ||
118 | |||
119 | vdcr = VCPU(vcpu, dcr); | ||
120 | |||
121 | /* Set guest psr | ||
122 | * up/mfl/mfh/pk/dt/rt/mc/it keeps unchanged | ||
123 | * be: set to the value of dcr.be | ||
124 | * pp: set to the value of dcr.pp | ||
125 | */ | ||
126 | vpsr &= INITIAL_PSR_VALUE_AT_INTERRUPTION; | ||
127 | vpsr |= (vdcr & IA64_DCR_BE); | ||
128 | |||
129 | /* VDCR pp bit position is different from VPSR pp bit */ | ||
130 | if (vdcr & IA64_DCR_PP) { | ||
131 | vpsr |= IA64_PSR_PP; | ||
132 | } else { | ||
133 | vpsr &= ~IA64_PSR_PP;; | ||
134 | } | ||
135 | |||
136 | vcpu_set_psr(vcpu, vpsr); | ||
137 | |||
138 | } | ||
139 | |||
140 | void inject_guest_interruption(struct kvm_vcpu *vcpu, u64 vec) | ||
141 | { | ||
142 | u64 viva; | ||
143 | struct kvm_pt_regs *regs; | ||
144 | union ia64_isr pt_isr; | ||
145 | |||
146 | regs = vcpu_regs(vcpu); | ||
147 | |||
148 | /* clear cr.isr.ir (incomplete register frame)*/ | ||
149 | pt_isr.val = VMX(vcpu, cr_isr); | ||
150 | pt_isr.ir = 0; | ||
151 | VMX(vcpu, cr_isr) = pt_isr.val; | ||
152 | |||
153 | collect_interruption(vcpu); | ||
154 | |||
155 | viva = vcpu_get_iva(vcpu); | ||
156 | regs->cr_iip = viva + vec; | ||
157 | } | ||
158 | |||
159 | static u64 vcpu_get_itir_on_fault(struct kvm_vcpu *vcpu, u64 ifa) | ||
160 | { | ||
161 | union ia64_rr rr, rr1; | ||
162 | |||
163 | rr.val = vcpu_get_rr(vcpu, ifa); | ||
164 | rr1.val = 0; | ||
165 | rr1.ps = rr.ps; | ||
166 | rr1.rid = rr.rid; | ||
167 | return (rr1.val); | ||
168 | } | ||
169 | |||
170 | |||
171 | /* | ||
172 | * Set vIFA & vITIR & vIHA, when vPSR.ic =1 | ||
173 | * Parameter: | ||
174 | * set_ifa: if true, set vIFA | ||
175 | * set_itir: if true, set vITIR | ||
176 | * set_iha: if true, set vIHA | ||
177 | */ | ||
178 | void set_ifa_itir_iha(struct kvm_vcpu *vcpu, u64 vadr, | ||
179 | int set_ifa, int set_itir, int set_iha) | ||
180 | { | ||
181 | long vpsr; | ||
182 | u64 value; | ||
183 | |||
184 | vpsr = VCPU(vcpu, vpsr); | ||
185 | /* Vol2, Table 8-1 */ | ||
186 | if (vpsr & IA64_PSR_IC) { | ||
187 | if (set_ifa) | ||
188 | vcpu_set_ifa(vcpu, vadr); | ||
189 | if (set_itir) { | ||
190 | value = vcpu_get_itir_on_fault(vcpu, vadr); | ||
191 | vcpu_set_itir(vcpu, value); | ||
192 | } | ||
193 | |||
194 | if (set_iha) { | ||
195 | value = vcpu_thash(vcpu, vadr); | ||
196 | vcpu_set_iha(vcpu, value); | ||
197 | } | ||
198 | } | ||
199 | } | ||
200 | |||
201 | /* | ||
202 | * Data TLB Fault | ||
203 | * @ Data TLB vector | ||
204 | * Refer to SDM Vol2 Table 5-6 & 8-1 | ||
205 | */ | ||
206 | void dtlb_fault(struct kvm_vcpu *vcpu, u64 vadr) | ||
207 | { | ||
208 | /* If vPSR.ic, IFA, ITIR, IHA */ | ||
209 | set_ifa_itir_iha(vcpu, vadr, 1, 1, 1); | ||
210 | inject_guest_interruption(vcpu, IA64_DATA_TLB_VECTOR); | ||
211 | } | ||
212 | |||
213 | /* | ||
214 | * Instruction TLB Fault | ||
215 | * @ Instruction TLB vector | ||
216 | * Refer to SDM Vol2 Table 5-6 & 8-1 | ||
217 | */ | ||
218 | void itlb_fault(struct kvm_vcpu *vcpu, u64 vadr) | ||
219 | { | ||
220 | /* If vPSR.ic, IFA, ITIR, IHA */ | ||
221 | set_ifa_itir_iha(vcpu, vadr, 1, 1, 1); | ||
222 | inject_guest_interruption(vcpu, IA64_INST_TLB_VECTOR); | ||
223 | } | ||
224 | |||
225 | |||
226 | |||
227 | /* | ||
228 | * Data Nested TLB Fault | ||
229 | * @ Data Nested TLB Vector | ||
230 | * Refer to SDM Vol2 Table 5-6 & 8-1 | ||
231 | */ | ||
232 | void nested_dtlb(struct kvm_vcpu *vcpu) | ||
233 | { | ||
234 | inject_guest_interruption(vcpu, IA64_DATA_NESTED_TLB_VECTOR); | ||
235 | } | ||
236 | |||
237 | /* | ||
238 | * Alternate Data TLB Fault | ||
239 | * @ Alternate Data TLB vector | ||
240 | * Refer to SDM Vol2 Table 5-6 & 8-1 | ||
241 | */ | ||
242 | void alt_dtlb(struct kvm_vcpu *vcpu, u64 vadr) | ||
243 | { | ||
244 | set_ifa_itir_iha(vcpu, vadr, 1, 1, 0); | ||
245 | inject_guest_interruption(vcpu, IA64_ALT_DATA_TLB_VECTOR); | ||
246 | } | ||
247 | |||
248 | |||
249 | /* | ||
250 | * Data TLB Fault | ||
251 | * @ Data TLB vector | ||
252 | * Refer to SDM Vol2 Table 5-6 & 8-1 | ||
253 | */ | ||
254 | void alt_itlb(struct kvm_vcpu *vcpu, u64 vadr) | ||
255 | { | ||
256 | set_ifa_itir_iha(vcpu, vadr, 1, 1, 0); | ||
257 | inject_guest_interruption(vcpu, IA64_ALT_INST_TLB_VECTOR); | ||
258 | } | ||
259 | |||
260 | /* Deal with: | ||
261 | * VHPT Translation Vector | ||
262 | */ | ||
263 | static void _vhpt_fault(struct kvm_vcpu *vcpu, u64 vadr) | ||
264 | { | ||
265 | /* If vPSR.ic, IFA, ITIR, IHA*/ | ||
266 | set_ifa_itir_iha(vcpu, vadr, 1, 1, 1); | ||
267 | inject_guest_interruption(vcpu, IA64_VHPT_TRANS_VECTOR); | ||
268 | |||
269 | |||
270 | } | ||
271 | |||
272 | /* | ||
273 | * VHPT Instruction Fault | ||
274 | * @ VHPT Translation vector | ||
275 | * Refer to SDM Vol2 Table 5-6 & 8-1 | ||
276 | */ | ||
277 | void ivhpt_fault(struct kvm_vcpu *vcpu, u64 vadr) | ||
278 | { | ||
279 | _vhpt_fault(vcpu, vadr); | ||
280 | } | ||
281 | |||
282 | |||
283 | /* | ||
284 | * VHPT Data Fault | ||
285 | * @ VHPT Translation vector | ||
286 | * Refer to SDM Vol2 Table 5-6 & 8-1 | ||
287 | */ | ||
288 | void dvhpt_fault(struct kvm_vcpu *vcpu, u64 vadr) | ||
289 | { | ||
290 | _vhpt_fault(vcpu, vadr); | ||
291 | } | ||
292 | |||
293 | |||
294 | |||
295 | /* | ||
296 | * Deal with: | ||
297 | * General Exception vector | ||
298 | */ | ||
299 | void _general_exception(struct kvm_vcpu *vcpu) | ||
300 | { | ||
301 | inject_guest_interruption(vcpu, IA64_GENEX_VECTOR); | ||
302 | } | ||
303 | |||
304 | |||
305 | /* | ||
306 | * Illegal Operation Fault | ||
307 | * @ General Exception Vector | ||
308 | * Refer to SDM Vol2 Table 5-6 & 8-1 | ||
309 | */ | ||
310 | void illegal_op(struct kvm_vcpu *vcpu) | ||
311 | { | ||
312 | _general_exception(vcpu); | ||
313 | } | ||
314 | |||
315 | /* | ||
316 | * Illegal Dependency Fault | ||
317 | * @ General Exception Vector | ||
318 | * Refer to SDM Vol2 Table 5-6 & 8-1 | ||
319 | */ | ||
320 | void illegal_dep(struct kvm_vcpu *vcpu) | ||
321 | { | ||
322 | _general_exception(vcpu); | ||
323 | } | ||
324 | |||
325 | /* | ||
326 | * Reserved Register/Field Fault | ||
327 | * @ General Exception Vector | ||
328 | * Refer to SDM Vol2 Table 5-6 & 8-1 | ||
329 | */ | ||
330 | void rsv_reg_field(struct kvm_vcpu *vcpu) | ||
331 | { | ||
332 | _general_exception(vcpu); | ||
333 | } | ||
334 | /* | ||
335 | * Privileged Operation Fault | ||
336 | * @ General Exception Vector | ||
337 | * Refer to SDM Vol2 Table 5-6 & 8-1 | ||
338 | */ | ||
339 | |||
340 | void privilege_op(struct kvm_vcpu *vcpu) | ||
341 | { | ||
342 | _general_exception(vcpu); | ||
343 | } | ||
344 | |||
345 | /* | ||
346 | * Unimplement Data Address Fault | ||
347 | * @ General Exception Vector | ||
348 | * Refer to SDM Vol2 Table 5-6 & 8-1 | ||
349 | */ | ||
350 | void unimpl_daddr(struct kvm_vcpu *vcpu) | ||
351 | { | ||
352 | _general_exception(vcpu); | ||
353 | } | ||
354 | |||
355 | /* | ||
356 | * Privileged Register Fault | ||
357 | * @ General Exception Vector | ||
358 | * Refer to SDM Vol2 Table 5-6 & 8-1 | ||
359 | */ | ||
360 | void privilege_reg(struct kvm_vcpu *vcpu) | ||
361 | { | ||
362 | _general_exception(vcpu); | ||
363 | } | ||
364 | |||
365 | /* Deal with | ||
366 | * Nat consumption vector | ||
367 | * Parameter: | ||
368 | * vaddr: Optional, if t == REGISTER | ||
369 | */ | ||
370 | static void _nat_consumption_fault(struct kvm_vcpu *vcpu, u64 vadr, | ||
371 | enum tlb_miss_type t) | ||
372 | { | ||
373 | /* If vPSR.ic && t == DATA/INST, IFA */ | ||
374 | if (t == DATA || t == INSTRUCTION) { | ||
375 | /* IFA */ | ||
376 | set_ifa_itir_iha(vcpu, vadr, 1, 0, 0); | ||
377 | } | ||
378 | |||
379 | inject_guest_interruption(vcpu, IA64_NAT_CONSUMPTION_VECTOR); | ||
380 | } | ||
381 | |||
382 | /* | ||
383 | * Instruction Nat Page Consumption Fault | ||
384 | * @ Nat Consumption Vector | ||
385 | * Refer to SDM Vol2 Table 5-6 & 8-1 | ||
386 | */ | ||
387 | void inat_page_consumption(struct kvm_vcpu *vcpu, u64 vadr) | ||
388 | { | ||
389 | _nat_consumption_fault(vcpu, vadr, INSTRUCTION); | ||
390 | } | ||
391 | |||
392 | /* | ||
393 | * Register Nat Consumption Fault | ||
394 | * @ Nat Consumption Vector | ||
395 | * Refer to SDM Vol2 Table 5-6 & 8-1 | ||
396 | */ | ||
397 | void rnat_consumption(struct kvm_vcpu *vcpu) | ||
398 | { | ||
399 | _nat_consumption_fault(vcpu, 0, REGISTER); | ||
400 | } | ||
401 | |||
402 | /* | ||
403 | * Data Nat Page Consumption Fault | ||
404 | * @ Nat Consumption Vector | ||
405 | * Refer to SDM Vol2 Table 5-6 & 8-1 | ||
406 | */ | ||
407 | void dnat_page_consumption(struct kvm_vcpu *vcpu, u64 vadr) | ||
408 | { | ||
409 | _nat_consumption_fault(vcpu, vadr, DATA); | ||
410 | } | ||
411 | |||
412 | /* Deal with | ||
413 | * Page not present vector | ||
414 | */ | ||
415 | static void __page_not_present(struct kvm_vcpu *vcpu, u64 vadr) | ||
416 | { | ||
417 | /* If vPSR.ic, IFA, ITIR */ | ||
418 | set_ifa_itir_iha(vcpu, vadr, 1, 1, 0); | ||
419 | inject_guest_interruption(vcpu, IA64_PAGE_NOT_PRESENT_VECTOR); | ||
420 | } | ||
421 | |||
422 | |||
423 | void data_page_not_present(struct kvm_vcpu *vcpu, u64 vadr) | ||
424 | { | ||
425 | __page_not_present(vcpu, vadr); | ||
426 | } | ||
427 | |||
428 | |||
429 | void inst_page_not_present(struct kvm_vcpu *vcpu, u64 vadr) | ||
430 | { | ||
431 | __page_not_present(vcpu, vadr); | ||
432 | } | ||
433 | |||
434 | |||
435 | /* Deal with | ||
436 | * Data access rights vector | ||
437 | */ | ||
438 | void data_access_rights(struct kvm_vcpu *vcpu, u64 vadr) | ||
439 | { | ||
440 | /* If vPSR.ic, IFA, ITIR */ | ||
441 | set_ifa_itir_iha(vcpu, vadr, 1, 1, 0); | ||
442 | inject_guest_interruption(vcpu, IA64_DATA_ACCESS_RIGHTS_VECTOR); | ||
443 | } | ||
444 | |||
445 | fpswa_ret_t vmm_fp_emulate(int fp_fault, void *bundle, unsigned long *ipsr, | ||
446 | unsigned long *fpsr, unsigned long *isr, unsigned long *pr, | ||
447 | unsigned long *ifs, struct kvm_pt_regs *regs) | ||
448 | { | ||
449 | fp_state_t fp_state; | ||
450 | fpswa_ret_t ret; | ||
451 | struct kvm_vcpu *vcpu = current_vcpu; | ||
452 | |||
453 | uint64_t old_rr7 = ia64_get_rr(7UL<<61); | ||
454 | |||
455 | if (!vmm_fpswa_interface) | ||
456 | return (fpswa_ret_t) {-1, 0, 0, 0}; | ||
457 | |||
458 | /* | ||
459 | * Just let fpswa driver to use hardware fp registers. | ||
460 | * No fp register is valid in memory. | ||
461 | */ | ||
462 | memset(&fp_state, 0, sizeof(fp_state_t)); | ||
463 | |||
464 | /* | ||
465 | * unsigned long (*EFI_FPSWA) ( | ||
466 | * unsigned long trap_type, | ||
467 | * void *Bundle, | ||
468 | * unsigned long *pipsr, | ||
469 | * unsigned long *pfsr, | ||
470 | * unsigned long *pisr, | ||
471 | * unsigned long *ppreds, | ||
472 | * unsigned long *pifs, | ||
473 | * void *fp_state); | ||
474 | */ | ||
475 | /*Call host fpswa interface directly to virtualize | ||
476 | *guest fpswa request! | ||
477 | */ | ||
478 | ia64_set_rr(7UL << 61, vcpu->arch.host.rr[7]); | ||
479 | ia64_srlz_d(); | ||
480 | |||
481 | ret = (*vmm_fpswa_interface->fpswa) (fp_fault, bundle, | ||
482 | ipsr, fpsr, isr, pr, ifs, &fp_state); | ||
483 | ia64_set_rr(7UL << 61, old_rr7); | ||
484 | ia64_srlz_d(); | ||
485 | return ret; | ||
486 | } | ||
487 | |||
488 | /* | ||
489 | * Handle floating-point assist faults and traps for domain. | ||
490 | */ | ||
491 | unsigned long vmm_handle_fpu_swa(int fp_fault, struct kvm_pt_regs *regs, | ||
492 | unsigned long isr) | ||
493 | { | ||
494 | struct kvm_vcpu *v = current_vcpu; | ||
495 | IA64_BUNDLE bundle; | ||
496 | unsigned long fault_ip; | ||
497 | fpswa_ret_t ret; | ||
498 | |||
499 | fault_ip = regs->cr_iip; | ||
500 | /* | ||
501 | * When the FP trap occurs, the trapping instruction is completed. | ||
502 | * If ipsr.ri == 0, there is the trapping instruction in previous | ||
503 | * bundle. | ||
504 | */ | ||
505 | if (!fp_fault && (ia64_psr(regs)->ri == 0)) | ||
506 | fault_ip -= 16; | ||
507 | |||
508 | if (fetch_code(v, fault_ip, &bundle)) | ||
509 | return -EAGAIN; | ||
510 | |||
511 | if (!bundle.i64[0] && !bundle.i64[1]) | ||
512 | return -EACCES; | ||
513 | |||
514 | ret = vmm_fp_emulate(fp_fault, &bundle, ®s->cr_ipsr, ®s->ar_fpsr, | ||
515 | &isr, ®s->pr, ®s->cr_ifs, regs); | ||
516 | return ret.status; | ||
517 | } | ||
518 | |||
519 | void reflect_interruption(u64 ifa, u64 isr, u64 iim, | ||
520 | u64 vec, struct kvm_pt_regs *regs) | ||
521 | { | ||
522 | u64 vector; | ||
523 | int status ; | ||
524 | struct kvm_vcpu *vcpu = current_vcpu; | ||
525 | u64 vpsr = VCPU(vcpu, vpsr); | ||
526 | |||
527 | vector = vec2off[vec]; | ||
528 | |||
529 | if (!(vpsr & IA64_PSR_IC) && (vector != IA64_DATA_NESTED_TLB_VECTOR)) { | ||
530 | panic_vm(vcpu); | ||
531 | return; | ||
532 | } | ||
533 | |||
534 | switch (vec) { | ||
535 | case 32: /*IA64_FP_FAULT_VECTOR*/ | ||
536 | status = vmm_handle_fpu_swa(1, regs, isr); | ||
537 | if (!status) { | ||
538 | vcpu_increment_iip(vcpu); | ||
539 | return; | ||
540 | } else if (-EAGAIN == status) | ||
541 | return; | ||
542 | break; | ||
543 | case 33: /*IA64_FP_TRAP_VECTOR*/ | ||
544 | status = vmm_handle_fpu_swa(0, regs, isr); | ||
545 | if (!status) | ||
546 | return ; | ||
547 | else if (-EAGAIN == status) { | ||
548 | vcpu_decrement_iip(vcpu); | ||
549 | return ; | ||
550 | } | ||
551 | break; | ||
552 | } | ||
553 | |||
554 | VCPU(vcpu, isr) = isr; | ||
555 | VCPU(vcpu, iipa) = regs->cr_iip; | ||
556 | if (vector == IA64_BREAK_VECTOR || vector == IA64_SPECULATION_VECTOR) | ||
557 | VCPU(vcpu, iim) = iim; | ||
558 | else | ||
559 | set_ifa_itir_iha(vcpu, ifa, 1, 1, 1); | ||
560 | |||
561 | inject_guest_interruption(vcpu, vector); | ||
562 | } | ||
563 | |||
564 | static void set_pal_call_data(struct kvm_vcpu *vcpu) | ||
565 | { | ||
566 | struct exit_ctl_data *p = &vcpu->arch.exit_data; | ||
567 | |||
568 | /*FIXME:For static and stacked convention, firmware | ||
569 | * has put the parameters in gr28-gr31 before | ||
570 | * break to vmm !!*/ | ||
571 | |||
572 | p->u.pal_data.gr28 = vcpu_get_gr(vcpu, 28); | ||
573 | p->u.pal_data.gr29 = vcpu_get_gr(vcpu, 29); | ||
574 | p->u.pal_data.gr30 = vcpu_get_gr(vcpu, 30); | ||
575 | p->u.pal_data.gr31 = vcpu_get_gr(vcpu, 31); | ||
576 | p->exit_reason = EXIT_REASON_PAL_CALL; | ||
577 | } | ||
578 | |||
579 | static void set_pal_call_result(struct kvm_vcpu *vcpu) | ||
580 | { | ||
581 | struct exit_ctl_data *p = &vcpu->arch.exit_data; | ||
582 | |||
583 | if (p->exit_reason == EXIT_REASON_PAL_CALL) { | ||
584 | vcpu_set_gr(vcpu, 8, p->u.pal_data.ret.status, 0); | ||
585 | vcpu_set_gr(vcpu, 9, p->u.pal_data.ret.v0, 0); | ||
586 | vcpu_set_gr(vcpu, 10, p->u.pal_data.ret.v1, 0); | ||
587 | vcpu_set_gr(vcpu, 11, p->u.pal_data.ret.v2, 0); | ||
588 | } else | ||
589 | panic_vm(vcpu); | ||
590 | } | ||
591 | |||
592 | static void set_sal_call_data(struct kvm_vcpu *vcpu) | ||
593 | { | ||
594 | struct exit_ctl_data *p = &vcpu->arch.exit_data; | ||
595 | |||
596 | p->u.sal_data.in0 = vcpu_get_gr(vcpu, 32); | ||
597 | p->u.sal_data.in1 = vcpu_get_gr(vcpu, 33); | ||
598 | p->u.sal_data.in2 = vcpu_get_gr(vcpu, 34); | ||
599 | p->u.sal_data.in3 = vcpu_get_gr(vcpu, 35); | ||
600 | p->u.sal_data.in4 = vcpu_get_gr(vcpu, 36); | ||
601 | p->u.sal_data.in5 = vcpu_get_gr(vcpu, 37); | ||
602 | p->u.sal_data.in6 = vcpu_get_gr(vcpu, 38); | ||
603 | p->u.sal_data.in7 = vcpu_get_gr(vcpu, 39); | ||
604 | p->exit_reason = EXIT_REASON_SAL_CALL; | ||
605 | } | ||
606 | |||
607 | static void set_sal_call_result(struct kvm_vcpu *vcpu) | ||
608 | { | ||
609 | struct exit_ctl_data *p = &vcpu->arch.exit_data; | ||
610 | |||
611 | if (p->exit_reason == EXIT_REASON_SAL_CALL) { | ||
612 | vcpu_set_gr(vcpu, 8, p->u.sal_data.ret.r8, 0); | ||
613 | vcpu_set_gr(vcpu, 9, p->u.sal_data.ret.r9, 0); | ||
614 | vcpu_set_gr(vcpu, 10, p->u.sal_data.ret.r10, 0); | ||
615 | vcpu_set_gr(vcpu, 11, p->u.sal_data.ret.r11, 0); | ||
616 | } else | ||
617 | panic_vm(vcpu); | ||
618 | } | ||
619 | |||
620 | void kvm_ia64_handle_break(unsigned long ifa, struct kvm_pt_regs *regs, | ||
621 | unsigned long isr, unsigned long iim) | ||
622 | { | ||
623 | struct kvm_vcpu *v = current_vcpu; | ||
624 | |||
625 | if (ia64_psr(regs)->cpl == 0) { | ||
626 | /* Allow hypercalls only when cpl = 0. */ | ||
627 | if (iim == DOMN_PAL_REQUEST) { | ||
628 | set_pal_call_data(v); | ||
629 | vmm_transition(v); | ||
630 | set_pal_call_result(v); | ||
631 | vcpu_increment_iip(v); | ||
632 | return; | ||
633 | } else if (iim == DOMN_SAL_REQUEST) { | ||
634 | set_sal_call_data(v); | ||
635 | vmm_transition(v); | ||
636 | set_sal_call_result(v); | ||
637 | vcpu_increment_iip(v); | ||
638 | return; | ||
639 | } | ||
640 | } | ||
641 | reflect_interruption(ifa, isr, iim, 11, regs); | ||
642 | } | ||
643 | |||
644 | void check_pending_irq(struct kvm_vcpu *vcpu) | ||
645 | { | ||
646 | int mask, h_pending, h_inservice; | ||
647 | u64 isr; | ||
648 | unsigned long vpsr; | ||
649 | struct kvm_pt_regs *regs = vcpu_regs(vcpu); | ||
650 | |||
651 | h_pending = highest_pending_irq(vcpu); | ||
652 | if (h_pending == NULL_VECTOR) { | ||
653 | update_vhpi(vcpu, NULL_VECTOR); | ||
654 | return; | ||
655 | } | ||
656 | h_inservice = highest_inservice_irq(vcpu); | ||
657 | |||
658 | vpsr = VCPU(vcpu, vpsr); | ||
659 | mask = irq_masked(vcpu, h_pending, h_inservice); | ||
660 | if ((vpsr & IA64_PSR_I) && IRQ_NO_MASKED == mask) { | ||
661 | isr = vpsr & IA64_PSR_RI; | ||
662 | update_vhpi(vcpu, h_pending); | ||
663 | reflect_interruption(0, isr, 0, 12, regs); /* EXT IRQ */ | ||
664 | } else if (mask == IRQ_MASKED_BY_INSVC) { | ||
665 | if (VCPU(vcpu, vhpi)) | ||
666 | update_vhpi(vcpu, NULL_VECTOR); | ||
667 | } else { | ||
668 | /* masked by vpsr.i or vtpr.*/ | ||
669 | update_vhpi(vcpu, h_pending); | ||
670 | } | ||
671 | } | ||
672 | |||
673 | static void generate_exirq(struct kvm_vcpu *vcpu) | ||
674 | { | ||
675 | unsigned vpsr; | ||
676 | uint64_t isr; | ||
677 | |||
678 | struct kvm_pt_regs *regs = vcpu_regs(vcpu); | ||
679 | |||
680 | vpsr = VCPU(vcpu, vpsr); | ||
681 | isr = vpsr & IA64_PSR_RI; | ||
682 | if (!(vpsr & IA64_PSR_IC)) | ||
683 | panic_vm(vcpu); | ||
684 | reflect_interruption(0, isr, 0, 12, regs); /* EXT IRQ */ | ||
685 | } | ||
686 | |||
687 | void vhpi_detection(struct kvm_vcpu *vcpu) | ||
688 | { | ||
689 | uint64_t threshold, vhpi; | ||
690 | union ia64_tpr vtpr; | ||
691 | struct ia64_psr vpsr; | ||
692 | |||
693 | vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr); | ||
694 | vtpr.val = VCPU(vcpu, tpr); | ||
695 | |||
696 | threshold = ((!vpsr.i) << 5) | (vtpr.mmi << 4) | vtpr.mic; | ||
697 | vhpi = VCPU(vcpu, vhpi); | ||
698 | if (vhpi > threshold) { | ||
699 | /* interrupt actived*/ | ||
700 | generate_exirq(vcpu); | ||
701 | } | ||
702 | } | ||
703 | |||
704 | |||
705 | void leave_hypervisor_tail(void) | ||
706 | { | ||
707 | struct kvm_vcpu *v = current_vcpu; | ||
708 | |||
709 | if (VMX(v, timer_check)) { | ||
710 | VMX(v, timer_check) = 0; | ||
711 | if (VMX(v, itc_check)) { | ||
712 | if (vcpu_get_itc(v) > VCPU(v, itm)) { | ||
713 | if (!(VCPU(v, itv) & (1 << 16))) { | ||
714 | vcpu_pend_interrupt(v, VCPU(v, itv) | ||
715 | & 0xff); | ||
716 | VMX(v, itc_check) = 0; | ||
717 | } else { | ||
718 | v->arch.timer_pending = 1; | ||
719 | } | ||
720 | VMX(v, last_itc) = VCPU(v, itm) + 1; | ||
721 | } | ||
722 | } | ||
723 | } | ||
724 | |||
725 | rmb(); | ||
726 | if (v->arch.irq_new_pending) { | ||
727 | v->arch.irq_new_pending = 0; | ||
728 | VMX(v, irq_check) = 0; | ||
729 | check_pending_irq(v); | ||
730 | return; | ||
731 | } | ||
732 | if (VMX(v, irq_check)) { | ||
733 | VMX(v, irq_check) = 0; | ||
734 | vhpi_detection(v); | ||
735 | } | ||
736 | } | ||
737 | |||
738 | |||
739 | static inline void handle_lds(struct kvm_pt_regs *regs) | ||
740 | { | ||
741 | regs->cr_ipsr |= IA64_PSR_ED; | ||
742 | } | ||
743 | |||
744 | void physical_tlb_miss(struct kvm_vcpu *vcpu, unsigned long vadr, int type) | ||
745 | { | ||
746 | unsigned long pte; | ||
747 | union ia64_rr rr; | ||
748 | |||
749 | rr.val = ia64_get_rr(vadr); | ||
750 | pte = vadr & _PAGE_PPN_MASK; | ||
751 | pte = pte | PHY_PAGE_WB; | ||
752 | thash_vhpt_insert(vcpu, pte, (u64)(rr.ps << 2), vadr, type); | ||
753 | return; | ||
754 | } | ||
755 | |||
756 | void kvm_page_fault(u64 vadr , u64 vec, struct kvm_pt_regs *regs) | ||
757 | { | ||
758 | unsigned long vpsr; | ||
759 | int type; | ||
760 | |||
761 | u64 vhpt_adr, gppa, pteval, rr, itir; | ||
762 | union ia64_isr misr; | ||
763 | union ia64_pta vpta; | ||
764 | struct thash_data *data; | ||
765 | struct kvm_vcpu *v = current_vcpu; | ||
766 | |||
767 | vpsr = VCPU(v, vpsr); | ||
768 | misr.val = VMX(v, cr_isr); | ||
769 | |||
770 | type = vec; | ||
771 | |||
772 | if (is_physical_mode(v) && (!(vadr << 1 >> 62))) { | ||
773 | if (vec == 2) { | ||
774 | if (__gpfn_is_io((vadr << 1) >> (PAGE_SHIFT + 1))) { | ||
775 | emulate_io_inst(v, ((vadr << 1) >> 1), 4); | ||
776 | return; | ||
777 | } | ||
778 | } | ||
779 | physical_tlb_miss(v, vadr, type); | ||
780 | return; | ||
781 | } | ||
782 | data = vtlb_lookup(v, vadr, type); | ||
783 | if (data != 0) { | ||
784 | if (type == D_TLB) { | ||
785 | gppa = (vadr & ((1UL << data->ps) - 1)) | ||
786 | + (data->ppn >> (data->ps - 12) << data->ps); | ||
787 | if (__gpfn_is_io(gppa >> PAGE_SHIFT)) { | ||
788 | if (data->pl >= ((regs->cr_ipsr >> | ||
789 | IA64_PSR_CPL0_BIT) & 3)) | ||
790 | emulate_io_inst(v, gppa, data->ma); | ||
791 | else { | ||
792 | vcpu_set_isr(v, misr.val); | ||
793 | data_access_rights(v, vadr); | ||
794 | } | ||
795 | return ; | ||
796 | } | ||
797 | } | ||
798 | thash_vhpt_insert(v, data->page_flags, data->itir, vadr, type); | ||
799 | |||
800 | } else if (type == D_TLB) { | ||
801 | if (misr.sp) { | ||
802 | handle_lds(regs); | ||
803 | return; | ||
804 | } | ||
805 | |||
806 | rr = vcpu_get_rr(v, vadr); | ||
807 | itir = rr & (RR_RID_MASK | RR_PS_MASK); | ||
808 | |||
809 | if (!vhpt_enabled(v, vadr, misr.rs ? RSE_REF : DATA_REF)) { | ||
810 | if (vpsr & IA64_PSR_IC) { | ||
811 | vcpu_set_isr(v, misr.val); | ||
812 | alt_dtlb(v, vadr); | ||
813 | } else { | ||
814 | nested_dtlb(v); | ||
815 | } | ||
816 | return ; | ||
817 | } | ||
818 | |||
819 | vpta.val = vcpu_get_pta(v); | ||
820 | /* avoid recursively walking (short format) VHPT */ | ||
821 | |||
822 | vhpt_adr = vcpu_thash(v, vadr); | ||
823 | if (!guest_vhpt_lookup(vhpt_adr, &pteval)) { | ||
824 | /* VHPT successfully read. */ | ||
825 | if (!(pteval & _PAGE_P)) { | ||
826 | if (vpsr & IA64_PSR_IC) { | ||
827 | vcpu_set_isr(v, misr.val); | ||
828 | dtlb_fault(v, vadr); | ||
829 | } else { | ||
830 | nested_dtlb(v); | ||
831 | } | ||
832 | } else if ((pteval & _PAGE_MA_MASK) != _PAGE_MA_ST) { | ||
833 | thash_purge_and_insert(v, pteval, itir, | ||
834 | vadr, D_TLB); | ||
835 | } else if (vpsr & IA64_PSR_IC) { | ||
836 | vcpu_set_isr(v, misr.val); | ||
837 | dtlb_fault(v, vadr); | ||
838 | } else { | ||
839 | nested_dtlb(v); | ||
840 | } | ||
841 | } else { | ||
842 | /* Can't read VHPT. */ | ||
843 | if (vpsr & IA64_PSR_IC) { | ||
844 | vcpu_set_isr(v, misr.val); | ||
845 | dvhpt_fault(v, vadr); | ||
846 | } else { | ||
847 | nested_dtlb(v); | ||
848 | } | ||
849 | } | ||
850 | } else if (type == I_TLB) { | ||
851 | if (!(vpsr & IA64_PSR_IC)) | ||
852 | misr.ni = 1; | ||
853 | if (!vhpt_enabled(v, vadr, INST_REF)) { | ||
854 | vcpu_set_isr(v, misr.val); | ||
855 | alt_itlb(v, vadr); | ||
856 | return; | ||
857 | } | ||
858 | |||
859 | vpta.val = vcpu_get_pta(v); | ||
860 | |||
861 | vhpt_adr = vcpu_thash(v, vadr); | ||
862 | if (!guest_vhpt_lookup(vhpt_adr, &pteval)) { | ||
863 | /* VHPT successfully read. */ | ||
864 | if (pteval & _PAGE_P) { | ||
865 | if ((pteval & _PAGE_MA_MASK) == _PAGE_MA_ST) { | ||
866 | vcpu_set_isr(v, misr.val); | ||
867 | itlb_fault(v, vadr); | ||
868 | return ; | ||
869 | } | ||
870 | rr = vcpu_get_rr(v, vadr); | ||
871 | itir = rr & (RR_RID_MASK | RR_PS_MASK); | ||
872 | thash_purge_and_insert(v, pteval, itir, | ||
873 | vadr, I_TLB); | ||
874 | } else { | ||
875 | vcpu_set_isr(v, misr.val); | ||
876 | inst_page_not_present(v, vadr); | ||
877 | } | ||
878 | } else { | ||
879 | vcpu_set_isr(v, misr.val); | ||
880 | ivhpt_fault(v, vadr); | ||
881 | } | ||
882 | } | ||
883 | } | ||
884 | |||
885 | void kvm_vexirq(struct kvm_vcpu *vcpu) | ||
886 | { | ||
887 | u64 vpsr, isr; | ||
888 | struct kvm_pt_regs *regs; | ||
889 | |||
890 | regs = vcpu_regs(vcpu); | ||
891 | vpsr = VCPU(vcpu, vpsr); | ||
892 | isr = vpsr & IA64_PSR_RI; | ||
893 | reflect_interruption(0, isr, 0, 12, regs); /*EXT IRQ*/ | ||
894 | } | ||
895 | |||
896 | void kvm_ia64_handle_irq(struct kvm_vcpu *v) | ||
897 | { | ||
898 | struct exit_ctl_data *p = &v->arch.exit_data; | ||
899 | long psr; | ||
900 | |||
901 | local_irq_save(psr); | ||
902 | p->exit_reason = EXIT_REASON_EXTERNAL_INTERRUPT; | ||
903 | vmm_transition(v); | ||
904 | local_irq_restore(psr); | ||
905 | |||
906 | VMX(v, timer_check) = 1; | ||
907 | |||
908 | } | ||
909 | |||
910 | static void ptc_ga_remote_func(struct kvm_vcpu *v, int pos) | ||
911 | { | ||
912 | u64 oldrid, moldrid, oldpsbits, vaddr; | ||
913 | struct kvm_ptc_g *p = &v->arch.ptc_g_data[pos]; | ||
914 | vaddr = p->vaddr; | ||
915 | |||
916 | oldrid = VMX(v, vrr[0]); | ||
917 | VMX(v, vrr[0]) = p->rr; | ||
918 | oldpsbits = VMX(v, psbits[0]); | ||
919 | VMX(v, psbits[0]) = VMX(v, psbits[REGION_NUMBER(vaddr)]); | ||
920 | moldrid = ia64_get_rr(0x0); | ||
921 | ia64_set_rr(0x0, vrrtomrr(p->rr)); | ||
922 | ia64_srlz_d(); | ||
923 | |||
924 | vaddr = PAGEALIGN(vaddr, p->ps); | ||
925 | thash_purge_entries_remote(v, vaddr, p->ps); | ||
926 | |||
927 | VMX(v, vrr[0]) = oldrid; | ||
928 | VMX(v, psbits[0]) = oldpsbits; | ||
929 | ia64_set_rr(0x0, moldrid); | ||
930 | ia64_dv_serialize_data(); | ||
931 | } | ||
932 | |||
933 | static void vcpu_do_resume(struct kvm_vcpu *vcpu) | ||
934 | { | ||
935 | /*Re-init VHPT and VTLB once from resume*/ | ||
936 | vcpu->arch.vhpt.num = VHPT_NUM_ENTRIES; | ||
937 | thash_init(&vcpu->arch.vhpt, VHPT_SHIFT); | ||
938 | vcpu->arch.vtlb.num = VTLB_NUM_ENTRIES; | ||
939 | thash_init(&vcpu->arch.vtlb, VTLB_SHIFT); | ||
940 | |||
941 | ia64_set_pta(vcpu->arch.vhpt.pta.val); | ||
942 | } | ||
943 | |||
944 | static void kvm_do_resume_op(struct kvm_vcpu *vcpu) | ||
945 | { | ||
946 | if (test_and_clear_bit(KVM_REQ_RESUME, &vcpu->requests)) { | ||
947 | vcpu_do_resume(vcpu); | ||
948 | return; | ||
949 | } | ||
950 | |||
951 | if (unlikely(test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))) { | ||
952 | thash_purge_all(vcpu); | ||
953 | return; | ||
954 | } | ||
955 | |||
956 | if (test_and_clear_bit(KVM_REQ_PTC_G, &vcpu->requests)) { | ||
957 | while (vcpu->arch.ptc_g_count > 0) | ||
958 | ptc_ga_remote_func(vcpu, --vcpu->arch.ptc_g_count); | ||
959 | } | ||
960 | } | ||
961 | |||
962 | void vmm_transition(struct kvm_vcpu *vcpu) | ||
963 | { | ||
964 | ia64_call_vsa(PAL_VPS_SAVE, (unsigned long)vcpu->arch.vpd, | ||
965 | 0, 0, 0, 0, 0, 0); | ||
966 | vmm_trampoline(&vcpu->arch.guest, &vcpu->arch.host); | ||
967 | ia64_call_vsa(PAL_VPS_RESTORE, (unsigned long)vcpu->arch.vpd, | ||
968 | 0, 0, 0, 0, 0, 0); | ||
969 | kvm_do_resume_op(vcpu); | ||
970 | } | ||
diff --git a/arch/ia64/kvm/trampoline.S b/arch/ia64/kvm/trampoline.S new file mode 100644 index 000000000000..30897d44d61e --- /dev/null +++ b/arch/ia64/kvm/trampoline.S | |||
@@ -0,0 +1,1038 @@ | |||
1 | /* Save all processor states | ||
2 | * | ||
3 | * Copyright (c) 2007 Fleming Feng <fleming.feng@intel.com> | ||
4 | * Copyright (c) 2007 Anthony Xu <anthony.xu@intel.com> | ||
5 | */ | ||
6 | |||
7 | #include <asm/asmmacro.h> | ||
8 | #include "asm-offsets.h" | ||
9 | |||
10 | |||
11 | #define CTX(name) VMM_CTX_##name##_OFFSET | ||
12 | |||
13 | /* | ||
14 | * r32: context_t base address | ||
15 | */ | ||
16 | #define SAVE_BRANCH_REGS \ | ||
17 | add r2 = CTX(B0),r32; \ | ||
18 | add r3 = CTX(B1),r32; \ | ||
19 | mov r16 = b0; \ | ||
20 | mov r17 = b1; \ | ||
21 | ;; \ | ||
22 | st8 [r2]=r16,16; \ | ||
23 | st8 [r3]=r17,16; \ | ||
24 | ;; \ | ||
25 | mov r16 = b2; \ | ||
26 | mov r17 = b3; \ | ||
27 | ;; \ | ||
28 | st8 [r2]=r16,16; \ | ||
29 | st8 [r3]=r17,16; \ | ||
30 | ;; \ | ||
31 | mov r16 = b4; \ | ||
32 | mov r17 = b5; \ | ||
33 | ;; \ | ||
34 | st8 [r2]=r16; \ | ||
35 | st8 [r3]=r17; \ | ||
36 | ;; | ||
37 | |||
38 | /* | ||
39 | * r33: context_t base address | ||
40 | */ | ||
41 | #define RESTORE_BRANCH_REGS \ | ||
42 | add r2 = CTX(B0),r33; \ | ||
43 | add r3 = CTX(B1),r33; \ | ||
44 | ;; \ | ||
45 | ld8 r16=[r2],16; \ | ||
46 | ld8 r17=[r3],16; \ | ||
47 | ;; \ | ||
48 | mov b0 = r16; \ | ||
49 | mov b1 = r17; \ | ||
50 | ;; \ | ||
51 | ld8 r16=[r2],16; \ | ||
52 | ld8 r17=[r3],16; \ | ||
53 | ;; \ | ||
54 | mov b2 = r16; \ | ||
55 | mov b3 = r17; \ | ||
56 | ;; \ | ||
57 | ld8 r16=[r2]; \ | ||
58 | ld8 r17=[r3]; \ | ||
59 | ;; \ | ||
60 | mov b4=r16; \ | ||
61 | mov b5=r17; \ | ||
62 | ;; | ||
63 | |||
64 | |||
65 | /* | ||
66 | * r32: context_t base address | ||
67 | * bsw == 1 | ||
68 | * Save all bank1 general registers, r4 ~ r7 | ||
69 | */ | ||
70 | #define SAVE_GENERAL_REGS \ | ||
71 | add r2=CTX(R4),r32; \ | ||
72 | add r3=CTX(R5),r32; \ | ||
73 | ;; \ | ||
74 | .mem.offset 0,0; \ | ||
75 | st8.spill [r2]=r4,16; \ | ||
76 | .mem.offset 8,0; \ | ||
77 | st8.spill [r3]=r5,16; \ | ||
78 | ;; \ | ||
79 | .mem.offset 0,0; \ | ||
80 | st8.spill [r2]=r6,48; \ | ||
81 | .mem.offset 8,0; \ | ||
82 | st8.spill [r3]=r7,48; \ | ||
83 | ;; \ | ||
84 | .mem.offset 0,0; \ | ||
85 | st8.spill [r2]=r12; \ | ||
86 | .mem.offset 8,0; \ | ||
87 | st8.spill [r3]=r13; \ | ||
88 | ;; | ||
89 | |||
90 | /* | ||
91 | * r33: context_t base address | ||
92 | * bsw == 1 | ||
93 | */ | ||
94 | #define RESTORE_GENERAL_REGS \ | ||
95 | add r2=CTX(R4),r33; \ | ||
96 | add r3=CTX(R5),r33; \ | ||
97 | ;; \ | ||
98 | ld8.fill r4=[r2],16; \ | ||
99 | ld8.fill r5=[r3],16; \ | ||
100 | ;; \ | ||
101 | ld8.fill r6=[r2],48; \ | ||
102 | ld8.fill r7=[r3],48; \ | ||
103 | ;; \ | ||
104 | ld8.fill r12=[r2]; \ | ||
105 | ld8.fill r13 =[r3]; \ | ||
106 | ;; | ||
107 | |||
108 | |||
109 | |||
110 | |||
111 | /* | ||
112 | * r32: context_t base address | ||
113 | */ | ||
114 | #define SAVE_KERNEL_REGS \ | ||
115 | add r2 = CTX(KR0),r32; \ | ||
116 | add r3 = CTX(KR1),r32; \ | ||
117 | mov r16 = ar.k0; \ | ||
118 | mov r17 = ar.k1; \ | ||
119 | ;; \ | ||
120 | st8 [r2] = r16,16; \ | ||
121 | st8 [r3] = r17,16; \ | ||
122 | ;; \ | ||
123 | mov r16 = ar.k2; \ | ||
124 | mov r17 = ar.k3; \ | ||
125 | ;; \ | ||
126 | st8 [r2] = r16,16; \ | ||
127 | st8 [r3] = r17,16; \ | ||
128 | ;; \ | ||
129 | mov r16 = ar.k4; \ | ||
130 | mov r17 = ar.k5; \ | ||
131 | ;; \ | ||
132 | st8 [r2] = r16,16; \ | ||
133 | st8 [r3] = r17,16; \ | ||
134 | ;; \ | ||
135 | mov r16 = ar.k6; \ | ||
136 | mov r17 = ar.k7; \ | ||
137 | ;; \ | ||
138 | st8 [r2] = r16; \ | ||
139 | st8 [r3] = r17; \ | ||
140 | ;; | ||
141 | |||
142 | |||
143 | |||
144 | /* | ||
145 | * r33: context_t base address | ||
146 | */ | ||
147 | #define RESTORE_KERNEL_REGS \ | ||
148 | add r2 = CTX(KR0),r33; \ | ||
149 | add r3 = CTX(KR1),r33; \ | ||
150 | ;; \ | ||
151 | ld8 r16=[r2],16; \ | ||
152 | ld8 r17=[r3],16; \ | ||
153 | ;; \ | ||
154 | mov ar.k0=r16; \ | ||
155 | mov ar.k1=r17; \ | ||
156 | ;; \ | ||
157 | ld8 r16=[r2],16; \ | ||
158 | ld8 r17=[r3],16; \ | ||
159 | ;; \ | ||
160 | mov ar.k2=r16; \ | ||
161 | mov ar.k3=r17; \ | ||
162 | ;; \ | ||
163 | ld8 r16=[r2],16; \ | ||
164 | ld8 r17=[r3],16; \ | ||
165 | ;; \ | ||
166 | mov ar.k4=r16; \ | ||
167 | mov ar.k5=r17; \ | ||
168 | ;; \ | ||
169 | ld8 r16=[r2],16; \ | ||
170 | ld8 r17=[r3],16; \ | ||
171 | ;; \ | ||
172 | mov ar.k6=r16; \ | ||
173 | mov ar.k7=r17; \ | ||
174 | ;; | ||
175 | |||
176 | |||
177 | |||
178 | /* | ||
179 | * r32: context_t base address | ||
180 | */ | ||
181 | #define SAVE_APP_REGS \ | ||
182 | add r2 = CTX(BSPSTORE),r32; \ | ||
183 | mov r16 = ar.bspstore; \ | ||
184 | ;; \ | ||
185 | st8 [r2] = r16,CTX(RNAT)-CTX(BSPSTORE);\ | ||
186 | mov r16 = ar.rnat; \ | ||
187 | ;; \ | ||
188 | st8 [r2] = r16,CTX(FCR)-CTX(RNAT); \ | ||
189 | mov r16 = ar.fcr; \ | ||
190 | ;; \ | ||
191 | st8 [r2] = r16,CTX(EFLAG)-CTX(FCR); \ | ||
192 | mov r16 = ar.eflag; \ | ||
193 | ;; \ | ||
194 | st8 [r2] = r16,CTX(CFLG)-CTX(EFLAG); \ | ||
195 | mov r16 = ar.cflg; \ | ||
196 | ;; \ | ||
197 | st8 [r2] = r16,CTX(FSR)-CTX(CFLG); \ | ||
198 | mov r16 = ar.fsr; \ | ||
199 | ;; \ | ||
200 | st8 [r2] = r16,CTX(FIR)-CTX(FSR); \ | ||
201 | mov r16 = ar.fir; \ | ||
202 | ;; \ | ||
203 | st8 [r2] = r16,CTX(FDR)-CTX(FIR); \ | ||
204 | mov r16 = ar.fdr; \ | ||
205 | ;; \ | ||
206 | st8 [r2] = r16,CTX(UNAT)-CTX(FDR); \ | ||
207 | mov r16 = ar.unat; \ | ||
208 | ;; \ | ||
209 | st8 [r2] = r16,CTX(FPSR)-CTX(UNAT); \ | ||
210 | mov r16 = ar.fpsr; \ | ||
211 | ;; \ | ||
212 | st8 [r2] = r16,CTX(PFS)-CTX(FPSR); \ | ||
213 | mov r16 = ar.pfs; \ | ||
214 | ;; \ | ||
215 | st8 [r2] = r16,CTX(LC)-CTX(PFS); \ | ||
216 | mov r16 = ar.lc; \ | ||
217 | ;; \ | ||
218 | st8 [r2] = r16; \ | ||
219 | ;; | ||
220 | |||
221 | /* | ||
222 | * r33: context_t base address | ||
223 | */ | ||
224 | #define RESTORE_APP_REGS \ | ||
225 | add r2=CTX(BSPSTORE),r33; \ | ||
226 | ;; \ | ||
227 | ld8 r16=[r2],CTX(RNAT)-CTX(BSPSTORE); \ | ||
228 | ;; \ | ||
229 | mov ar.bspstore=r16; \ | ||
230 | ld8 r16=[r2],CTX(FCR)-CTX(RNAT); \ | ||
231 | ;; \ | ||
232 | mov ar.rnat=r16; \ | ||
233 | ld8 r16=[r2],CTX(EFLAG)-CTX(FCR); \ | ||
234 | ;; \ | ||
235 | mov ar.fcr=r16; \ | ||
236 | ld8 r16=[r2],CTX(CFLG)-CTX(EFLAG); \ | ||
237 | ;; \ | ||
238 | mov ar.eflag=r16; \ | ||
239 | ld8 r16=[r2],CTX(FSR)-CTX(CFLG); \ | ||
240 | ;; \ | ||
241 | mov ar.cflg=r16; \ | ||
242 | ld8 r16=[r2],CTX(FIR)-CTX(FSR); \ | ||
243 | ;; \ | ||
244 | mov ar.fsr=r16; \ | ||
245 | ld8 r16=[r2],CTX(FDR)-CTX(FIR); \ | ||
246 | ;; \ | ||
247 | mov ar.fir=r16; \ | ||
248 | ld8 r16=[r2],CTX(UNAT)-CTX(FDR); \ | ||
249 | ;; \ | ||
250 | mov ar.fdr=r16; \ | ||
251 | ld8 r16=[r2],CTX(FPSR)-CTX(UNAT); \ | ||
252 | ;; \ | ||
253 | mov ar.unat=r16; \ | ||
254 | ld8 r16=[r2],CTX(PFS)-CTX(FPSR); \ | ||
255 | ;; \ | ||
256 | mov ar.fpsr=r16; \ | ||
257 | ld8 r16=[r2],CTX(LC)-CTX(PFS); \ | ||
258 | ;; \ | ||
259 | mov ar.pfs=r16; \ | ||
260 | ld8 r16=[r2]; \ | ||
261 | ;; \ | ||
262 | mov ar.lc=r16; \ | ||
263 | ;; | ||
264 | |||
265 | /* | ||
266 | * r32: context_t base address | ||
267 | */ | ||
268 | #define SAVE_CTL_REGS \ | ||
269 | add r2 = CTX(DCR),r32; \ | ||
270 | mov r16 = cr.dcr; \ | ||
271 | ;; \ | ||
272 | st8 [r2] = r16,CTX(IVA)-CTX(DCR); \ | ||
273 | ;; \ | ||
274 | mov r16 = cr.iva; \ | ||
275 | ;; \ | ||
276 | st8 [r2] = r16,CTX(PTA)-CTX(IVA); \ | ||
277 | ;; \ | ||
278 | mov r16 = cr.pta; \ | ||
279 | ;; \ | ||
280 | st8 [r2] = r16 ; \ | ||
281 | ;; | ||
282 | |||
283 | /* | ||
284 | * r33: context_t base address | ||
285 | */ | ||
286 | #define RESTORE_CTL_REGS \ | ||
287 | add r2 = CTX(DCR),r33; \ | ||
288 | ;; \ | ||
289 | ld8 r16 = [r2],CTX(IVA)-CTX(DCR); \ | ||
290 | ;; \ | ||
291 | mov cr.dcr = r16; \ | ||
292 | dv_serialize_data; \ | ||
293 | ;; \ | ||
294 | ld8 r16 = [r2],CTX(PTA)-CTX(IVA); \ | ||
295 | ;; \ | ||
296 | mov cr.iva = r16; \ | ||
297 | dv_serialize_data; \ | ||
298 | ;; \ | ||
299 | ld8 r16 = [r2]; \ | ||
300 | ;; \ | ||
301 | mov cr.pta = r16; \ | ||
302 | dv_serialize_data; \ | ||
303 | ;; | ||
304 | |||
305 | |||
306 | /* | ||
307 | * r32: context_t base address | ||
308 | */ | ||
309 | #define SAVE_REGION_REGS \ | ||
310 | add r2=CTX(RR0),r32; \ | ||
311 | mov r16=rr[r0]; \ | ||
312 | dep.z r18=1,61,3; \ | ||
313 | ;; \ | ||
314 | st8 [r2]=r16,8; \ | ||
315 | mov r17=rr[r18]; \ | ||
316 | dep.z r18=2,61,3; \ | ||
317 | ;; \ | ||
318 | st8 [r2]=r17,8; \ | ||
319 | mov r16=rr[r18]; \ | ||
320 | dep.z r18=3,61,3; \ | ||
321 | ;; \ | ||
322 | st8 [r2]=r16,8; \ | ||
323 | mov r17=rr[r18]; \ | ||
324 | dep.z r18=4,61,3; \ | ||
325 | ;; \ | ||
326 | st8 [r2]=r17,8; \ | ||
327 | mov r16=rr[r18]; \ | ||
328 | dep.z r18=5,61,3; \ | ||
329 | ;; \ | ||
330 | st8 [r2]=r16,8; \ | ||
331 | mov r17=rr[r18]; \ | ||
332 | dep.z r18=7,61,3; \ | ||
333 | ;; \ | ||
334 | st8 [r2]=r17,16; \ | ||
335 | mov r16=rr[r18]; \ | ||
336 | ;; \ | ||
337 | st8 [r2]=r16,8; \ | ||
338 | ;; | ||
339 | |||
340 | /* | ||
341 | * r33:context_t base address | ||
342 | */ | ||
343 | #define RESTORE_REGION_REGS \ | ||
344 | add r2=CTX(RR0),r33;\ | ||
345 | mov r18=r0; \ | ||
346 | ;; \ | ||
347 | ld8 r20=[r2],8; \ | ||
348 | ;; /* rr0 */ \ | ||
349 | ld8 r21=[r2],8; \ | ||
350 | ;; /* rr1 */ \ | ||
351 | ld8 r22=[r2],8; \ | ||
352 | ;; /* rr2 */ \ | ||
353 | ld8 r23=[r2],8; \ | ||
354 | ;; /* rr3 */ \ | ||
355 | ld8 r24=[r2],8; \ | ||
356 | ;; /* rr4 */ \ | ||
357 | ld8 r25=[r2],16; \ | ||
358 | ;; /* rr5 */ \ | ||
359 | ld8 r27=[r2]; \ | ||
360 | ;; /* rr7 */ \ | ||
361 | mov rr[r18]=r20; \ | ||
362 | dep.z r18=1,61,3; \ | ||
363 | ;; /* rr1 */ \ | ||
364 | mov rr[r18]=r21; \ | ||
365 | dep.z r18=2,61,3; \ | ||
366 | ;; /* rr2 */ \ | ||
367 | mov rr[r18]=r22; \ | ||
368 | dep.z r18=3,61,3; \ | ||
369 | ;; /* rr3 */ \ | ||
370 | mov rr[r18]=r23; \ | ||
371 | dep.z r18=4,61,3; \ | ||
372 | ;; /* rr4 */ \ | ||
373 | mov rr[r18]=r24; \ | ||
374 | dep.z r18=5,61,3; \ | ||
375 | ;; /* rr5 */ \ | ||
376 | mov rr[r18]=r25; \ | ||
377 | dep.z r18=7,61,3; \ | ||
378 | ;; /* rr7 */ \ | ||
379 | mov rr[r18]=r27; \ | ||
380 | ;; \ | ||
381 | srlz.i; \ | ||
382 | ;; | ||
383 | |||
384 | |||
385 | |||
386 | /* | ||
387 | * r32: context_t base address | ||
388 | * r36~r39:scratch registers | ||
389 | */ | ||
390 | #define SAVE_DEBUG_REGS \ | ||
391 | add r2=CTX(IBR0),r32; \ | ||
392 | add r3=CTX(DBR0),r32; \ | ||
393 | mov r16=ibr[r0]; \ | ||
394 | mov r17=dbr[r0]; \ | ||
395 | ;; \ | ||
396 | st8 [r2]=r16,8; \ | ||
397 | st8 [r3]=r17,8; \ | ||
398 | add r18=1,r0; \ | ||
399 | ;; \ | ||
400 | mov r16=ibr[r18]; \ | ||
401 | mov r17=dbr[r18]; \ | ||
402 | ;; \ | ||
403 | st8 [r2]=r16,8; \ | ||
404 | st8 [r3]=r17,8; \ | ||
405 | add r18=2,r0; \ | ||
406 | ;; \ | ||
407 | mov r16=ibr[r18]; \ | ||
408 | mov r17=dbr[r18]; \ | ||
409 | ;; \ | ||
410 | st8 [r2]=r16,8; \ | ||
411 | st8 [r3]=r17,8; \ | ||
412 | add r18=2,r0; \ | ||
413 | ;; \ | ||
414 | mov r16=ibr[r18]; \ | ||
415 | mov r17=dbr[r18]; \ | ||
416 | ;; \ | ||
417 | st8 [r2]=r16,8; \ | ||
418 | st8 [r3]=r17,8; \ | ||
419 | add r18=3,r0; \ | ||
420 | ;; \ | ||
421 | mov r16=ibr[r18]; \ | ||
422 | mov r17=dbr[r18]; \ | ||
423 | ;; \ | ||
424 | st8 [r2]=r16,8; \ | ||
425 | st8 [r3]=r17,8; \ | ||
426 | add r18=4,r0; \ | ||
427 | ;; \ | ||
428 | mov r16=ibr[r18]; \ | ||
429 | mov r17=dbr[r18]; \ | ||
430 | ;; \ | ||
431 | st8 [r2]=r16,8; \ | ||
432 | st8 [r3]=r17,8; \ | ||
433 | add r18=5,r0; \ | ||
434 | ;; \ | ||
435 | mov r16=ibr[r18]; \ | ||
436 | mov r17=dbr[r18]; \ | ||
437 | ;; \ | ||
438 | st8 [r2]=r16,8; \ | ||
439 | st8 [r3]=r17,8; \ | ||
440 | add r18=6,r0; \ | ||
441 | ;; \ | ||
442 | mov r16=ibr[r18]; \ | ||
443 | mov r17=dbr[r18]; \ | ||
444 | ;; \ | ||
445 | st8 [r2]=r16,8; \ | ||
446 | st8 [r3]=r17,8; \ | ||
447 | add r18=7,r0; \ | ||
448 | ;; \ | ||
449 | mov r16=ibr[r18]; \ | ||
450 | mov r17=dbr[r18]; \ | ||
451 | ;; \ | ||
452 | st8 [r2]=r16,8; \ | ||
453 | st8 [r3]=r17,8; \ | ||
454 | ;; | ||
455 | |||
456 | |||
457 | /* | ||
458 | * r33: point to context_t structure | ||
459 | * ar.lc are corrupted. | ||
460 | */ | ||
461 | #define RESTORE_DEBUG_REGS \ | ||
462 | add r2=CTX(IBR0),r33; \ | ||
463 | add r3=CTX(DBR0),r33; \ | ||
464 | mov r16=7; \ | ||
465 | mov r17=r0; \ | ||
466 | ;; \ | ||
467 | mov ar.lc = r16; \ | ||
468 | ;; \ | ||
469 | 1: \ | ||
470 | ld8 r18=[r2],8; \ | ||
471 | ld8 r19=[r3],8; \ | ||
472 | ;; \ | ||
473 | mov ibr[r17]=r18; \ | ||
474 | mov dbr[r17]=r19; \ | ||
475 | ;; \ | ||
476 | srlz.i; \ | ||
477 | ;; \ | ||
478 | add r17=1,r17; \ | ||
479 | br.cloop.sptk 1b; \ | ||
480 | ;; | ||
481 | |||
482 | |||
483 | /* | ||
484 | * r32: context_t base address | ||
485 | */ | ||
486 | #define SAVE_FPU_LOW \ | ||
487 | add r2=CTX(F2),r32; \ | ||
488 | add r3=CTX(F3),r32; \ | ||
489 | ;; \ | ||
490 | stf.spill.nta [r2]=f2,32; \ | ||
491 | stf.spill.nta [r3]=f3,32; \ | ||
492 | ;; \ | ||
493 | stf.spill.nta [r2]=f4,32; \ | ||
494 | stf.spill.nta [r3]=f5,32; \ | ||
495 | ;; \ | ||
496 | stf.spill.nta [r2]=f6,32; \ | ||
497 | stf.spill.nta [r3]=f7,32; \ | ||
498 | ;; \ | ||
499 | stf.spill.nta [r2]=f8,32; \ | ||
500 | stf.spill.nta [r3]=f9,32; \ | ||
501 | ;; \ | ||
502 | stf.spill.nta [r2]=f10,32; \ | ||
503 | stf.spill.nta [r3]=f11,32; \ | ||
504 | ;; \ | ||
505 | stf.spill.nta [r2]=f12,32; \ | ||
506 | stf.spill.nta [r3]=f13,32; \ | ||
507 | ;; \ | ||
508 | stf.spill.nta [r2]=f14,32; \ | ||
509 | stf.spill.nta [r3]=f15,32; \ | ||
510 | ;; \ | ||
511 | stf.spill.nta [r2]=f16,32; \ | ||
512 | stf.spill.nta [r3]=f17,32; \ | ||
513 | ;; \ | ||
514 | stf.spill.nta [r2]=f18,32; \ | ||
515 | stf.spill.nta [r3]=f19,32; \ | ||
516 | ;; \ | ||
517 | stf.spill.nta [r2]=f20,32; \ | ||
518 | stf.spill.nta [r3]=f21,32; \ | ||
519 | ;; \ | ||
520 | stf.spill.nta [r2]=f22,32; \ | ||
521 | stf.spill.nta [r3]=f23,32; \ | ||
522 | ;; \ | ||
523 | stf.spill.nta [r2]=f24,32; \ | ||
524 | stf.spill.nta [r3]=f25,32; \ | ||
525 | ;; \ | ||
526 | stf.spill.nta [r2]=f26,32; \ | ||
527 | stf.spill.nta [r3]=f27,32; \ | ||
528 | ;; \ | ||
529 | stf.spill.nta [r2]=f28,32; \ | ||
530 | stf.spill.nta [r3]=f29,32; \ | ||
531 | ;; \ | ||
532 | stf.spill.nta [r2]=f30; \ | ||
533 | stf.spill.nta [r3]=f31; \ | ||
534 | ;; | ||
535 | |||
536 | /* | ||
537 | * r32: context_t base address | ||
538 | */ | ||
539 | #define SAVE_FPU_HIGH \ | ||
540 | add r2=CTX(F32),r32; \ | ||
541 | add r3=CTX(F33),r32; \ | ||
542 | ;; \ | ||
543 | stf.spill.nta [r2]=f32,32; \ | ||
544 | stf.spill.nta [r3]=f33,32; \ | ||
545 | ;; \ | ||
546 | stf.spill.nta [r2]=f34,32; \ | ||
547 | stf.spill.nta [r3]=f35,32; \ | ||
548 | ;; \ | ||
549 | stf.spill.nta [r2]=f36,32; \ | ||
550 | stf.spill.nta [r3]=f37,32; \ | ||
551 | ;; \ | ||
552 | stf.spill.nta [r2]=f38,32; \ | ||
553 | stf.spill.nta [r3]=f39,32; \ | ||
554 | ;; \ | ||
555 | stf.spill.nta [r2]=f40,32; \ | ||
556 | stf.spill.nta [r3]=f41,32; \ | ||
557 | ;; \ | ||
558 | stf.spill.nta [r2]=f42,32; \ | ||
559 | stf.spill.nta [r3]=f43,32; \ | ||
560 | ;; \ | ||
561 | stf.spill.nta [r2]=f44,32; \ | ||
562 | stf.spill.nta [r3]=f45,32; \ | ||
563 | ;; \ | ||
564 | stf.spill.nta [r2]=f46,32; \ | ||
565 | stf.spill.nta [r3]=f47,32; \ | ||
566 | ;; \ | ||
567 | stf.spill.nta [r2]=f48,32; \ | ||
568 | stf.spill.nta [r3]=f49,32; \ | ||
569 | ;; \ | ||
570 | stf.spill.nta [r2]=f50,32; \ | ||
571 | stf.spill.nta [r3]=f51,32; \ | ||
572 | ;; \ | ||
573 | stf.spill.nta [r2]=f52,32; \ | ||
574 | stf.spill.nta [r3]=f53,32; \ | ||
575 | ;; \ | ||
576 | stf.spill.nta [r2]=f54,32; \ | ||
577 | stf.spill.nta [r3]=f55,32; \ | ||
578 | ;; \ | ||
579 | stf.spill.nta [r2]=f56,32; \ | ||
580 | stf.spill.nta [r3]=f57,32; \ | ||
581 | ;; \ | ||
582 | stf.spill.nta [r2]=f58,32; \ | ||
583 | stf.spill.nta [r3]=f59,32; \ | ||
584 | ;; \ | ||
585 | stf.spill.nta [r2]=f60,32; \ | ||
586 | stf.spill.nta [r3]=f61,32; \ | ||
587 | ;; \ | ||
588 | stf.spill.nta [r2]=f62,32; \ | ||
589 | stf.spill.nta [r3]=f63,32; \ | ||
590 | ;; \ | ||
591 | stf.spill.nta [r2]=f64,32; \ | ||
592 | stf.spill.nta [r3]=f65,32; \ | ||
593 | ;; \ | ||
594 | stf.spill.nta [r2]=f66,32; \ | ||
595 | stf.spill.nta [r3]=f67,32; \ | ||
596 | ;; \ | ||
597 | stf.spill.nta [r2]=f68,32; \ | ||
598 | stf.spill.nta [r3]=f69,32; \ | ||
599 | ;; \ | ||
600 | stf.spill.nta [r2]=f70,32; \ | ||
601 | stf.spill.nta [r3]=f71,32; \ | ||
602 | ;; \ | ||
603 | stf.spill.nta [r2]=f72,32; \ | ||
604 | stf.spill.nta [r3]=f73,32; \ | ||
605 | ;; \ | ||
606 | stf.spill.nta [r2]=f74,32; \ | ||
607 | stf.spill.nta [r3]=f75,32; \ | ||
608 | ;; \ | ||
609 | stf.spill.nta [r2]=f76,32; \ | ||
610 | stf.spill.nta [r3]=f77,32; \ | ||
611 | ;; \ | ||
612 | stf.spill.nta [r2]=f78,32; \ | ||
613 | stf.spill.nta [r3]=f79,32; \ | ||
614 | ;; \ | ||
615 | stf.spill.nta [r2]=f80,32; \ | ||
616 | stf.spill.nta [r3]=f81,32; \ | ||
617 | ;; \ | ||
618 | stf.spill.nta [r2]=f82,32; \ | ||
619 | stf.spill.nta [r3]=f83,32; \ | ||
620 | ;; \ | ||
621 | stf.spill.nta [r2]=f84,32; \ | ||
622 | stf.spill.nta [r3]=f85,32; \ | ||
623 | ;; \ | ||
624 | stf.spill.nta [r2]=f86,32; \ | ||
625 | stf.spill.nta [r3]=f87,32; \ | ||
626 | ;; \ | ||
627 | stf.spill.nta [r2]=f88,32; \ | ||
628 | stf.spill.nta [r3]=f89,32; \ | ||
629 | ;; \ | ||
630 | stf.spill.nta [r2]=f90,32; \ | ||
631 | stf.spill.nta [r3]=f91,32; \ | ||
632 | ;; \ | ||
633 | stf.spill.nta [r2]=f92,32; \ | ||
634 | stf.spill.nta [r3]=f93,32; \ | ||
635 | ;; \ | ||
636 | stf.spill.nta [r2]=f94,32; \ | ||
637 | stf.spill.nta [r3]=f95,32; \ | ||
638 | ;; \ | ||
639 | stf.spill.nta [r2]=f96,32; \ | ||
640 | stf.spill.nta [r3]=f97,32; \ | ||
641 | ;; \ | ||
642 | stf.spill.nta [r2]=f98,32; \ | ||
643 | stf.spill.nta [r3]=f99,32; \ | ||
644 | ;; \ | ||
645 | stf.spill.nta [r2]=f100,32; \ | ||
646 | stf.spill.nta [r3]=f101,32; \ | ||
647 | ;; \ | ||
648 | stf.spill.nta [r2]=f102,32; \ | ||
649 | stf.spill.nta [r3]=f103,32; \ | ||
650 | ;; \ | ||
651 | stf.spill.nta [r2]=f104,32; \ | ||
652 | stf.spill.nta [r3]=f105,32; \ | ||
653 | ;; \ | ||
654 | stf.spill.nta [r2]=f106,32; \ | ||
655 | stf.spill.nta [r3]=f107,32; \ | ||
656 | ;; \ | ||
657 | stf.spill.nta [r2]=f108,32; \ | ||
658 | stf.spill.nta [r3]=f109,32; \ | ||
659 | ;; \ | ||
660 | stf.spill.nta [r2]=f110,32; \ | ||
661 | stf.spill.nta [r3]=f111,32; \ | ||
662 | ;; \ | ||
663 | stf.spill.nta [r2]=f112,32; \ | ||
664 | stf.spill.nta [r3]=f113,32; \ | ||
665 | ;; \ | ||
666 | stf.spill.nta [r2]=f114,32; \ | ||
667 | stf.spill.nta [r3]=f115,32; \ | ||
668 | ;; \ | ||
669 | stf.spill.nta [r2]=f116,32; \ | ||
670 | stf.spill.nta [r3]=f117,32; \ | ||
671 | ;; \ | ||
672 | stf.spill.nta [r2]=f118,32; \ | ||
673 | stf.spill.nta [r3]=f119,32; \ | ||
674 | ;; \ | ||
675 | stf.spill.nta [r2]=f120,32; \ | ||
676 | stf.spill.nta [r3]=f121,32; \ | ||
677 | ;; \ | ||
678 | stf.spill.nta [r2]=f122,32; \ | ||
679 | stf.spill.nta [r3]=f123,32; \ | ||
680 | ;; \ | ||
681 | stf.spill.nta [r2]=f124,32; \ | ||
682 | stf.spill.nta [r3]=f125,32; \ | ||
683 | ;; \ | ||
684 | stf.spill.nta [r2]=f126; \ | ||
685 | stf.spill.nta [r3]=f127; \ | ||
686 | ;; | ||
687 | |||
688 | /* | ||
689 | * r33: point to context_t structure | ||
690 | */ | ||
691 | #define RESTORE_FPU_LOW \ | ||
692 | add r2 = CTX(F2), r33; \ | ||
693 | add r3 = CTX(F3), r33; \ | ||
694 | ;; \ | ||
695 | ldf.fill.nta f2 = [r2], 32; \ | ||
696 | ldf.fill.nta f3 = [r3], 32; \ | ||
697 | ;; \ | ||
698 | ldf.fill.nta f4 = [r2], 32; \ | ||
699 | ldf.fill.nta f5 = [r3], 32; \ | ||
700 | ;; \ | ||
701 | ldf.fill.nta f6 = [r2], 32; \ | ||
702 | ldf.fill.nta f7 = [r3], 32; \ | ||
703 | ;; \ | ||
704 | ldf.fill.nta f8 = [r2], 32; \ | ||
705 | ldf.fill.nta f9 = [r3], 32; \ | ||
706 | ;; \ | ||
707 | ldf.fill.nta f10 = [r2], 32; \ | ||
708 | ldf.fill.nta f11 = [r3], 32; \ | ||
709 | ;; \ | ||
710 | ldf.fill.nta f12 = [r2], 32; \ | ||
711 | ldf.fill.nta f13 = [r3], 32; \ | ||
712 | ;; \ | ||
713 | ldf.fill.nta f14 = [r2], 32; \ | ||
714 | ldf.fill.nta f15 = [r3], 32; \ | ||
715 | ;; \ | ||
716 | ldf.fill.nta f16 = [r2], 32; \ | ||
717 | ldf.fill.nta f17 = [r3], 32; \ | ||
718 | ;; \ | ||
719 | ldf.fill.nta f18 = [r2], 32; \ | ||
720 | ldf.fill.nta f19 = [r3], 32; \ | ||
721 | ;; \ | ||
722 | ldf.fill.nta f20 = [r2], 32; \ | ||
723 | ldf.fill.nta f21 = [r3], 32; \ | ||
724 | ;; \ | ||
725 | ldf.fill.nta f22 = [r2], 32; \ | ||
726 | ldf.fill.nta f23 = [r3], 32; \ | ||
727 | ;; \ | ||
728 | ldf.fill.nta f24 = [r2], 32; \ | ||
729 | ldf.fill.nta f25 = [r3], 32; \ | ||
730 | ;; \ | ||
731 | ldf.fill.nta f26 = [r2], 32; \ | ||
732 | ldf.fill.nta f27 = [r3], 32; \ | ||
733 | ;; \ | ||
734 | ldf.fill.nta f28 = [r2], 32; \ | ||
735 | ldf.fill.nta f29 = [r3], 32; \ | ||
736 | ;; \ | ||
737 | ldf.fill.nta f30 = [r2], 32; \ | ||
738 | ldf.fill.nta f31 = [r3], 32; \ | ||
739 | ;; | ||
740 | |||
741 | |||
742 | |||
743 | /* | ||
744 | * r33: point to context_t structure | ||
745 | */ | ||
746 | #define RESTORE_FPU_HIGH \ | ||
747 | add r2 = CTX(F32), r33; \ | ||
748 | add r3 = CTX(F33), r33; \ | ||
749 | ;; \ | ||
750 | ldf.fill.nta f32 = [r2], 32; \ | ||
751 | ldf.fill.nta f33 = [r3], 32; \ | ||
752 | ;; \ | ||
753 | ldf.fill.nta f34 = [r2], 32; \ | ||
754 | ldf.fill.nta f35 = [r3], 32; \ | ||
755 | ;; \ | ||
756 | ldf.fill.nta f36 = [r2], 32; \ | ||
757 | ldf.fill.nta f37 = [r3], 32; \ | ||
758 | ;; \ | ||
759 | ldf.fill.nta f38 = [r2], 32; \ | ||
760 | ldf.fill.nta f39 = [r3], 32; \ | ||
761 | ;; \ | ||
762 | ldf.fill.nta f40 = [r2], 32; \ | ||
763 | ldf.fill.nta f41 = [r3], 32; \ | ||
764 | ;; \ | ||
765 | ldf.fill.nta f42 = [r2], 32; \ | ||
766 | ldf.fill.nta f43 = [r3], 32; \ | ||
767 | ;; \ | ||
768 | ldf.fill.nta f44 = [r2], 32; \ | ||
769 | ldf.fill.nta f45 = [r3], 32; \ | ||
770 | ;; \ | ||
771 | ldf.fill.nta f46 = [r2], 32; \ | ||
772 | ldf.fill.nta f47 = [r3], 32; \ | ||
773 | ;; \ | ||
774 | ldf.fill.nta f48 = [r2], 32; \ | ||
775 | ldf.fill.nta f49 = [r3], 32; \ | ||
776 | ;; \ | ||
777 | ldf.fill.nta f50 = [r2], 32; \ | ||
778 | ldf.fill.nta f51 = [r3], 32; \ | ||
779 | ;; \ | ||
780 | ldf.fill.nta f52 = [r2], 32; \ | ||
781 | ldf.fill.nta f53 = [r3], 32; \ | ||
782 | ;; \ | ||
783 | ldf.fill.nta f54 = [r2], 32; \ | ||
784 | ldf.fill.nta f55 = [r3], 32; \ | ||
785 | ;; \ | ||
786 | ldf.fill.nta f56 = [r2], 32; \ | ||
787 | ldf.fill.nta f57 = [r3], 32; \ | ||
788 | ;; \ | ||
789 | ldf.fill.nta f58 = [r2], 32; \ | ||
790 | ldf.fill.nta f59 = [r3], 32; \ | ||
791 | ;; \ | ||
792 | ldf.fill.nta f60 = [r2], 32; \ | ||
793 | ldf.fill.nta f61 = [r3], 32; \ | ||
794 | ;; \ | ||
795 | ldf.fill.nta f62 = [r2], 32; \ | ||
796 | ldf.fill.nta f63 = [r3], 32; \ | ||
797 | ;; \ | ||
798 | ldf.fill.nta f64 = [r2], 32; \ | ||
799 | ldf.fill.nta f65 = [r3], 32; \ | ||
800 | ;; \ | ||
801 | ldf.fill.nta f66 = [r2], 32; \ | ||
802 | ldf.fill.nta f67 = [r3], 32; \ | ||
803 | ;; \ | ||
804 | ldf.fill.nta f68 = [r2], 32; \ | ||
805 | ldf.fill.nta f69 = [r3], 32; \ | ||
806 | ;; \ | ||
807 | ldf.fill.nta f70 = [r2], 32; \ | ||
808 | ldf.fill.nta f71 = [r3], 32; \ | ||
809 | ;; \ | ||
810 | ldf.fill.nta f72 = [r2], 32; \ | ||
811 | ldf.fill.nta f73 = [r3], 32; \ | ||
812 | ;; \ | ||
813 | ldf.fill.nta f74 = [r2], 32; \ | ||
814 | ldf.fill.nta f75 = [r3], 32; \ | ||
815 | ;; \ | ||
816 | ldf.fill.nta f76 = [r2], 32; \ | ||
817 | ldf.fill.nta f77 = [r3], 32; \ | ||
818 | ;; \ | ||
819 | ldf.fill.nta f78 = [r2], 32; \ | ||
820 | ldf.fill.nta f79 = [r3], 32; \ | ||
821 | ;; \ | ||
822 | ldf.fill.nta f80 = [r2], 32; \ | ||
823 | ldf.fill.nta f81 = [r3], 32; \ | ||
824 | ;; \ | ||
825 | ldf.fill.nta f82 = [r2], 32; \ | ||
826 | ldf.fill.nta f83 = [r3], 32; \ | ||
827 | ;; \ | ||
828 | ldf.fill.nta f84 = [r2], 32; \ | ||
829 | ldf.fill.nta f85 = [r3], 32; \ | ||
830 | ;; \ | ||
831 | ldf.fill.nta f86 = [r2], 32; \ | ||
832 | ldf.fill.nta f87 = [r3], 32; \ | ||
833 | ;; \ | ||
834 | ldf.fill.nta f88 = [r2], 32; \ | ||
835 | ldf.fill.nta f89 = [r3], 32; \ | ||
836 | ;; \ | ||
837 | ldf.fill.nta f90 = [r2], 32; \ | ||
838 | ldf.fill.nta f91 = [r3], 32; \ | ||
839 | ;; \ | ||
840 | ldf.fill.nta f92 = [r2], 32; \ | ||
841 | ldf.fill.nta f93 = [r3], 32; \ | ||
842 | ;; \ | ||
843 | ldf.fill.nta f94 = [r2], 32; \ | ||
844 | ldf.fill.nta f95 = [r3], 32; \ | ||
845 | ;; \ | ||
846 | ldf.fill.nta f96 = [r2], 32; \ | ||
847 | ldf.fill.nta f97 = [r3], 32; \ | ||
848 | ;; \ | ||
849 | ldf.fill.nta f98 = [r2], 32; \ | ||
850 | ldf.fill.nta f99 = [r3], 32; \ | ||
851 | ;; \ | ||
852 | ldf.fill.nta f100 = [r2], 32; \ | ||
853 | ldf.fill.nta f101 = [r3], 32; \ | ||
854 | ;; \ | ||
855 | ldf.fill.nta f102 = [r2], 32; \ | ||
856 | ldf.fill.nta f103 = [r3], 32; \ | ||
857 | ;; \ | ||
858 | ldf.fill.nta f104 = [r2], 32; \ | ||
859 | ldf.fill.nta f105 = [r3], 32; \ | ||
860 | ;; \ | ||
861 | ldf.fill.nta f106 = [r2], 32; \ | ||
862 | ldf.fill.nta f107 = [r3], 32; \ | ||
863 | ;; \ | ||
864 | ldf.fill.nta f108 = [r2], 32; \ | ||
865 | ldf.fill.nta f109 = [r3], 32; \ | ||
866 | ;; \ | ||
867 | ldf.fill.nta f110 = [r2], 32; \ | ||
868 | ldf.fill.nta f111 = [r3], 32; \ | ||
869 | ;; \ | ||
870 | ldf.fill.nta f112 = [r2], 32; \ | ||
871 | ldf.fill.nta f113 = [r3], 32; \ | ||
872 | ;; \ | ||
873 | ldf.fill.nta f114 = [r2], 32; \ | ||
874 | ldf.fill.nta f115 = [r3], 32; \ | ||
875 | ;; \ | ||
876 | ldf.fill.nta f116 = [r2], 32; \ | ||
877 | ldf.fill.nta f117 = [r3], 32; \ | ||
878 | ;; \ | ||
879 | ldf.fill.nta f118 = [r2], 32; \ | ||
880 | ldf.fill.nta f119 = [r3], 32; \ | ||
881 | ;; \ | ||
882 | ldf.fill.nta f120 = [r2], 32; \ | ||
883 | ldf.fill.nta f121 = [r3], 32; \ | ||
884 | ;; \ | ||
885 | ldf.fill.nta f122 = [r2], 32; \ | ||
886 | ldf.fill.nta f123 = [r3], 32; \ | ||
887 | ;; \ | ||
888 | ldf.fill.nta f124 = [r2], 32; \ | ||
889 | ldf.fill.nta f125 = [r3], 32; \ | ||
890 | ;; \ | ||
891 | ldf.fill.nta f126 = [r2], 32; \ | ||
892 | ldf.fill.nta f127 = [r3], 32; \ | ||
893 | ;; | ||
894 | |||
895 | /* | ||
896 | * r32: context_t base address | ||
897 | */ | ||
898 | #define SAVE_PTK_REGS \ | ||
899 | add r2=CTX(PKR0), r32; \ | ||
900 | mov r16=7; \ | ||
901 | ;; \ | ||
902 | mov ar.lc=r16; \ | ||
903 | mov r17=r0; \ | ||
904 | ;; \ | ||
905 | 1: \ | ||
906 | mov r18=pkr[r17]; \ | ||
907 | ;; \ | ||
908 | srlz.i; \ | ||
909 | ;; \ | ||
910 | st8 [r2]=r18, 8; \ | ||
911 | ;; \ | ||
912 | add r17 =1,r17; \ | ||
913 | ;; \ | ||
914 | br.cloop.sptk 1b; \ | ||
915 | ;; | ||
916 | |||
917 | /* | ||
918 | * r33: point to context_t structure | ||
919 | * ar.lc are corrupted. | ||
920 | */ | ||
921 | #define RESTORE_PTK_REGS \ | ||
922 | add r2=CTX(PKR0), r33; \ | ||
923 | mov r16=7; \ | ||
924 | ;; \ | ||
925 | mov ar.lc=r16; \ | ||
926 | mov r17=r0; \ | ||
927 | ;; \ | ||
928 | 1: \ | ||
929 | ld8 r18=[r2], 8; \ | ||
930 | ;; \ | ||
931 | mov pkr[r17]=r18; \ | ||
932 | ;; \ | ||
933 | srlz.i; \ | ||
934 | ;; \ | ||
935 | add r17 =1,r17; \ | ||
936 | ;; \ | ||
937 | br.cloop.sptk 1b; \ | ||
938 | ;; | ||
939 | |||
940 | |||
941 | /* | ||
942 | * void vmm_trampoline( context_t * from, | ||
943 | * context_t * to) | ||
944 | * | ||
945 | * from: r32 | ||
946 | * to: r33 | ||
947 | * note: interrupt disabled before call this function. | ||
948 | */ | ||
949 | GLOBAL_ENTRY(vmm_trampoline) | ||
950 | mov r16 = psr | ||
951 | adds r2 = CTX(PSR), r32 | ||
952 | ;; | ||
953 | st8 [r2] = r16, 8 // psr | ||
954 | mov r17 = pr | ||
955 | ;; | ||
956 | st8 [r2] = r17, 8 // pr | ||
957 | mov r18 = ar.unat | ||
958 | ;; | ||
959 | st8 [r2] = r18 | ||
960 | mov r17 = ar.rsc | ||
961 | ;; | ||
962 | adds r2 = CTX(RSC),r32 | ||
963 | ;; | ||
964 | st8 [r2]= r17 | ||
965 | mov ar.rsc =0 | ||
966 | flushrs | ||
967 | ;; | ||
968 | SAVE_GENERAL_REGS | ||
969 | ;; | ||
970 | SAVE_KERNEL_REGS | ||
971 | ;; | ||
972 | SAVE_APP_REGS | ||
973 | ;; | ||
974 | SAVE_BRANCH_REGS | ||
975 | ;; | ||
976 | SAVE_CTL_REGS | ||
977 | ;; | ||
978 | SAVE_REGION_REGS | ||
979 | ;; | ||
980 | //SAVE_DEBUG_REGS | ||
981 | ;; | ||
982 | rsm psr.dfl | ||
983 | ;; | ||
984 | srlz.d | ||
985 | ;; | ||
986 | SAVE_FPU_LOW | ||
987 | ;; | ||
988 | rsm psr.dfh | ||
989 | ;; | ||
990 | srlz.d | ||
991 | ;; | ||
992 | SAVE_FPU_HIGH | ||
993 | ;; | ||
994 | SAVE_PTK_REGS | ||
995 | ;; | ||
996 | RESTORE_PTK_REGS | ||
997 | ;; | ||
998 | RESTORE_FPU_HIGH | ||
999 | ;; | ||
1000 | RESTORE_FPU_LOW | ||
1001 | ;; | ||
1002 | //RESTORE_DEBUG_REGS | ||
1003 | ;; | ||
1004 | RESTORE_REGION_REGS | ||
1005 | ;; | ||
1006 | RESTORE_CTL_REGS | ||
1007 | ;; | ||
1008 | RESTORE_BRANCH_REGS | ||
1009 | ;; | ||
1010 | RESTORE_APP_REGS | ||
1011 | ;; | ||
1012 | RESTORE_KERNEL_REGS | ||
1013 | ;; | ||
1014 | RESTORE_GENERAL_REGS | ||
1015 | ;; | ||
1016 | adds r2=CTX(PSR), r33 | ||
1017 | ;; | ||
1018 | ld8 r16=[r2], 8 // psr | ||
1019 | ;; | ||
1020 | mov psr.l=r16 | ||
1021 | ;; | ||
1022 | srlz.d | ||
1023 | ;; | ||
1024 | ld8 r16=[r2], 8 // pr | ||
1025 | ;; | ||
1026 | mov pr =r16,-1 | ||
1027 | ld8 r16=[r2] // unat | ||
1028 | ;; | ||
1029 | mov ar.unat=r16 | ||
1030 | ;; | ||
1031 | adds r2=CTX(RSC),r33 | ||
1032 | ;; | ||
1033 | ld8 r16 =[r2] | ||
1034 | ;; | ||
1035 | mov ar.rsc = r16 | ||
1036 | ;; | ||
1037 | br.ret.sptk.few b0 | ||
1038 | END(vmm_trampoline) | ||
diff --git a/arch/ia64/kvm/vcpu.c b/arch/ia64/kvm/vcpu.c new file mode 100644 index 000000000000..e44027ce5667 --- /dev/null +++ b/arch/ia64/kvm/vcpu.c | |||
@@ -0,0 +1,2163 @@ | |||
1 | /* | ||
2 | * kvm_vcpu.c: handling all virtual cpu related thing. | ||
3 | * Copyright (c) 2005, Intel Corporation. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms and conditions of the GNU General Public License, | ||
7 | * version 2, as published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License along with | ||
15 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple | ||
16 | * Place - Suite 330, Boston, MA 02111-1307 USA. | ||
17 | * | ||
18 | * Shaofan Li (Susue Li) <susie.li@intel.com> | ||
19 | * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com) | ||
20 | * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com) | ||
21 | * Xiantao Zhang <xiantao.zhang@intel.com> | ||
22 | */ | ||
23 | |||
24 | #include <linux/kvm_host.h> | ||
25 | #include <linux/types.h> | ||
26 | |||
27 | #include <asm/processor.h> | ||
28 | #include <asm/ia64regs.h> | ||
29 | #include <asm/gcc_intrin.h> | ||
30 | #include <asm/kregs.h> | ||
31 | #include <asm/pgtable.h> | ||
32 | #include <asm/tlb.h> | ||
33 | |||
34 | #include "asm-offsets.h" | ||
35 | #include "vcpu.h" | ||
36 | |||
37 | /* | ||
38 | * Special notes: | ||
39 | * - Index by it/dt/rt sequence | ||
40 | * - Only existing mode transitions are allowed in this table | ||
41 | * - RSE is placed at lazy mode when emulating guest partial mode | ||
42 | * - If gva happens to be rr0 and rr4, only allowed case is identity | ||
43 | * mapping (gva=gpa), or panic! (How?) | ||
44 | */ | ||
45 | int mm_switch_table[8][8] = { | ||
46 | /* 2004/09/12(Kevin): Allow switch to self */ | ||
47 | /* | ||
48 | * (it,dt,rt): (0,0,0) -> (1,1,1) | ||
49 | * This kind of transition usually occurs in the very early | ||
50 | * stage of Linux boot up procedure. Another case is in efi | ||
51 | * and pal calls. (see "arch/ia64/kernel/head.S") | ||
52 | * | ||
53 | * (it,dt,rt): (0,0,0) -> (0,1,1) | ||
54 | * This kind of transition is found when OSYa exits efi boot | ||
55 | * service. Due to gva = gpa in this case (Same region), | ||
56 | * data access can be satisfied though itlb entry for physical | ||
57 | * emulation is hit. | ||
58 | */ | ||
59 | {SW_SELF, 0, 0, SW_NOP, 0, 0, 0, SW_P2V}, | ||
60 | {0, 0, 0, 0, 0, 0, 0, 0}, | ||
61 | {0, 0, 0, 0, 0, 0, 0, 0}, | ||
62 | /* | ||
63 | * (it,dt,rt): (0,1,1) -> (1,1,1) | ||
64 | * This kind of transition is found in OSYa. | ||
65 | * | ||
66 | * (it,dt,rt): (0,1,1) -> (0,0,0) | ||
67 | * This kind of transition is found in OSYa | ||
68 | */ | ||
69 | {SW_NOP, 0, 0, SW_SELF, 0, 0, 0, SW_P2V}, | ||
70 | /* (1,0,0)->(1,1,1) */ | ||
71 | {0, 0, 0, 0, 0, 0, 0, SW_P2V}, | ||
72 | /* | ||
73 | * (it,dt,rt): (1,0,1) -> (1,1,1) | ||
74 | * This kind of transition usually occurs when Linux returns | ||
75 | * from the low level TLB miss handlers. | ||
76 | * (see "arch/ia64/kernel/ivt.S") | ||
77 | */ | ||
78 | {0, 0, 0, 0, 0, SW_SELF, 0, SW_P2V}, | ||
79 | {0, 0, 0, 0, 0, 0, 0, 0}, | ||
80 | /* | ||
81 | * (it,dt,rt): (1,1,1) -> (1,0,1) | ||
82 | * This kind of transition usually occurs in Linux low level | ||
83 | * TLB miss handler. (see "arch/ia64/kernel/ivt.S") | ||
84 | * | ||
85 | * (it,dt,rt): (1,1,1) -> (0,0,0) | ||
86 | * This kind of transition usually occurs in pal and efi calls, | ||
87 | * which requires running in physical mode. | ||
88 | * (see "arch/ia64/kernel/head.S") | ||
89 | * (1,1,1)->(1,0,0) | ||
90 | */ | ||
91 | |||
92 | {SW_V2P, 0, 0, 0, SW_V2P, SW_V2P, 0, SW_SELF}, | ||
93 | }; | ||
94 | |||
95 | void physical_mode_init(struct kvm_vcpu *vcpu) | ||
96 | { | ||
97 | vcpu->arch.mode_flags = GUEST_IN_PHY; | ||
98 | } | ||
99 | |||
100 | void switch_to_physical_rid(struct kvm_vcpu *vcpu) | ||
101 | { | ||
102 | unsigned long psr; | ||
103 | |||
104 | /* Save original virtual mode rr[0] and rr[4] */ | ||
105 | psr = ia64_clear_ic(); | ||
106 | ia64_set_rr(VRN0<<VRN_SHIFT, vcpu->arch.metaphysical_rr0); | ||
107 | ia64_srlz_d(); | ||
108 | ia64_set_rr(VRN4<<VRN_SHIFT, vcpu->arch.metaphysical_rr4); | ||
109 | ia64_srlz_d(); | ||
110 | |||
111 | ia64_set_psr(psr); | ||
112 | return; | ||
113 | } | ||
114 | |||
115 | |||
116 | void switch_to_virtual_rid(struct kvm_vcpu *vcpu) | ||
117 | { | ||
118 | unsigned long psr; | ||
119 | |||
120 | psr = ia64_clear_ic(); | ||
121 | ia64_set_rr(VRN0 << VRN_SHIFT, vcpu->arch.metaphysical_saved_rr0); | ||
122 | ia64_srlz_d(); | ||
123 | ia64_set_rr(VRN4 << VRN_SHIFT, vcpu->arch.metaphysical_saved_rr4); | ||
124 | ia64_srlz_d(); | ||
125 | ia64_set_psr(psr); | ||
126 | return; | ||
127 | } | ||
128 | |||
129 | static int mm_switch_action(struct ia64_psr opsr, struct ia64_psr npsr) | ||
130 | { | ||
131 | return mm_switch_table[MODE_IND(opsr)][MODE_IND(npsr)]; | ||
132 | } | ||
133 | |||
134 | void switch_mm_mode(struct kvm_vcpu *vcpu, struct ia64_psr old_psr, | ||
135 | struct ia64_psr new_psr) | ||
136 | { | ||
137 | int act; | ||
138 | act = mm_switch_action(old_psr, new_psr); | ||
139 | switch (act) { | ||
140 | case SW_V2P: | ||
141 | /*printk("V -> P mode transition: (0x%lx -> 0x%lx)\n", | ||
142 | old_psr.val, new_psr.val);*/ | ||
143 | switch_to_physical_rid(vcpu); | ||
144 | /* | ||
145 | * Set rse to enforced lazy, to prevent active rse | ||
146 | *save/restor when guest physical mode. | ||
147 | */ | ||
148 | vcpu->arch.mode_flags |= GUEST_IN_PHY; | ||
149 | break; | ||
150 | case SW_P2V: | ||
151 | switch_to_virtual_rid(vcpu); | ||
152 | /* | ||
153 | * recover old mode which is saved when entering | ||
154 | * guest physical mode | ||
155 | */ | ||
156 | vcpu->arch.mode_flags &= ~GUEST_IN_PHY; | ||
157 | break; | ||
158 | case SW_SELF: | ||
159 | break; | ||
160 | case SW_NOP: | ||
161 | break; | ||
162 | default: | ||
163 | /* Sanity check */ | ||
164 | break; | ||
165 | } | ||
166 | return; | ||
167 | } | ||
168 | |||
169 | |||
170 | |||
171 | /* | ||
172 | * In physical mode, insert tc/tr for region 0 and 4 uses | ||
173 | * RID[0] and RID[4] which is for physical mode emulation. | ||
174 | * However what those inserted tc/tr wants is rid for | ||
175 | * virtual mode. So original virtual rid needs to be restored | ||
176 | * before insert. | ||
177 | * | ||
178 | * Operations which required such switch include: | ||
179 | * - insertions (itc.*, itr.*) | ||
180 | * - purges (ptc.* and ptr.*) | ||
181 | * - tpa | ||
182 | * - tak | ||
183 | * - thash?, ttag? | ||
184 | * All above needs actual virtual rid for destination entry. | ||
185 | */ | ||
186 | |||
187 | void check_mm_mode_switch(struct kvm_vcpu *vcpu, struct ia64_psr old_psr, | ||
188 | struct ia64_psr new_psr) | ||
189 | { | ||
190 | |||
191 | if ((old_psr.dt != new_psr.dt) | ||
192 | || (old_psr.it != new_psr.it) | ||
193 | || (old_psr.rt != new_psr.rt)) | ||
194 | switch_mm_mode(vcpu, old_psr, new_psr); | ||
195 | |||
196 | return; | ||
197 | } | ||
198 | |||
199 | |||
200 | /* | ||
201 | * In physical mode, insert tc/tr for region 0 and 4 uses | ||
202 | * RID[0] and RID[4] which is for physical mode emulation. | ||
203 | * However what those inserted tc/tr wants is rid for | ||
204 | * virtual mode. So original virtual rid needs to be restored | ||
205 | * before insert. | ||
206 | * | ||
207 | * Operations which required such switch include: | ||
208 | * - insertions (itc.*, itr.*) | ||
209 | * - purges (ptc.* and ptr.*) | ||
210 | * - tpa | ||
211 | * - tak | ||
212 | * - thash?, ttag? | ||
213 | * All above needs actual virtual rid for destination entry. | ||
214 | */ | ||
215 | |||
216 | void prepare_if_physical_mode(struct kvm_vcpu *vcpu) | ||
217 | { | ||
218 | if (is_physical_mode(vcpu)) { | ||
219 | vcpu->arch.mode_flags |= GUEST_PHY_EMUL; | ||
220 | switch_to_virtual_rid(vcpu); | ||
221 | } | ||
222 | return; | ||
223 | } | ||
224 | |||
225 | /* Recover always follows prepare */ | ||
226 | void recover_if_physical_mode(struct kvm_vcpu *vcpu) | ||
227 | { | ||
228 | if (is_physical_mode(vcpu)) | ||
229 | switch_to_physical_rid(vcpu); | ||
230 | vcpu->arch.mode_flags &= ~GUEST_PHY_EMUL; | ||
231 | return; | ||
232 | } | ||
233 | |||
234 | #define RPT(x) ((u16) &((struct kvm_pt_regs *)0)->x) | ||
235 | |||
236 | static u16 gr_info[32] = { | ||
237 | 0, /* r0 is read-only : WE SHOULD NEVER GET THIS */ | ||
238 | RPT(r1), RPT(r2), RPT(r3), | ||
239 | RPT(r4), RPT(r5), RPT(r6), RPT(r7), | ||
240 | RPT(r8), RPT(r9), RPT(r10), RPT(r11), | ||
241 | RPT(r12), RPT(r13), RPT(r14), RPT(r15), | ||
242 | RPT(r16), RPT(r17), RPT(r18), RPT(r19), | ||
243 | RPT(r20), RPT(r21), RPT(r22), RPT(r23), | ||
244 | RPT(r24), RPT(r25), RPT(r26), RPT(r27), | ||
245 | RPT(r28), RPT(r29), RPT(r30), RPT(r31) | ||
246 | }; | ||
247 | |||
248 | #define IA64_FIRST_STACKED_GR 32 | ||
249 | #define IA64_FIRST_ROTATING_FR 32 | ||
250 | |||
251 | static inline unsigned long | ||
252 | rotate_reg(unsigned long sor, unsigned long rrb, unsigned long reg) | ||
253 | { | ||
254 | reg += rrb; | ||
255 | if (reg >= sor) | ||
256 | reg -= sor; | ||
257 | return reg; | ||
258 | } | ||
259 | |||
260 | /* | ||
261 | * Return the (rotated) index for floating point register | ||
262 | * be in the REGNUM (REGNUM must range from 32-127, | ||
263 | * result is in the range from 0-95. | ||
264 | */ | ||
265 | static inline unsigned long fph_index(struct kvm_pt_regs *regs, | ||
266 | long regnum) | ||
267 | { | ||
268 | unsigned long rrb_fr = (regs->cr_ifs >> 25) & 0x7f; | ||
269 | return rotate_reg(96, rrb_fr, (regnum - IA64_FIRST_ROTATING_FR)); | ||
270 | } | ||
271 | |||
272 | |||
273 | /* | ||
274 | * The inverse of the above: given bspstore and the number of | ||
275 | * registers, calculate ar.bsp. | ||
276 | */ | ||
277 | static inline unsigned long *kvm_rse_skip_regs(unsigned long *addr, | ||
278 | long num_regs) | ||
279 | { | ||
280 | long delta = ia64_rse_slot_num(addr) + num_regs; | ||
281 | int i = 0; | ||
282 | |||
283 | if (num_regs < 0) | ||
284 | delta -= 0x3e; | ||
285 | if (delta < 0) { | ||
286 | while (delta <= -0x3f) { | ||
287 | i--; | ||
288 | delta += 0x3f; | ||
289 | } | ||
290 | } else { | ||
291 | while (delta >= 0x3f) { | ||
292 | i++; | ||
293 | delta -= 0x3f; | ||
294 | } | ||
295 | } | ||
296 | |||
297 | return addr + num_regs + i; | ||
298 | } | ||
299 | |||
300 | static void get_rse_reg(struct kvm_pt_regs *regs, unsigned long r1, | ||
301 | unsigned long *val, int *nat) | ||
302 | { | ||
303 | unsigned long *bsp, *addr, *rnat_addr, *bspstore; | ||
304 | unsigned long *kbs = (void *) current_vcpu + VMM_RBS_OFFSET; | ||
305 | unsigned long nat_mask; | ||
306 | unsigned long old_rsc, new_rsc; | ||
307 | long sof = (regs->cr_ifs) & 0x7f; | ||
308 | long sor = (((regs->cr_ifs >> 14) & 0xf) << 3); | ||
309 | long rrb_gr = (regs->cr_ifs >> 18) & 0x7f; | ||
310 | long ridx = r1 - 32; | ||
311 | |||
312 | if (ridx < sor) | ||
313 | ridx = rotate_reg(sor, rrb_gr, ridx); | ||
314 | |||
315 | old_rsc = ia64_getreg(_IA64_REG_AR_RSC); | ||
316 | new_rsc = old_rsc&(~(0x3)); | ||
317 | ia64_setreg(_IA64_REG_AR_RSC, new_rsc); | ||
318 | |||
319 | bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE); | ||
320 | bsp = kbs + (regs->loadrs >> 19); | ||
321 | |||
322 | addr = kvm_rse_skip_regs(bsp, -sof + ridx); | ||
323 | nat_mask = 1UL << ia64_rse_slot_num(addr); | ||
324 | rnat_addr = ia64_rse_rnat_addr(addr); | ||
325 | |||
326 | if (addr >= bspstore) { | ||
327 | ia64_flushrs(); | ||
328 | ia64_mf(); | ||
329 | bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE); | ||
330 | } | ||
331 | *val = *addr; | ||
332 | if (nat) { | ||
333 | if (bspstore < rnat_addr) | ||
334 | *nat = (int)!!(ia64_getreg(_IA64_REG_AR_RNAT) | ||
335 | & nat_mask); | ||
336 | else | ||
337 | *nat = (int)!!((*rnat_addr) & nat_mask); | ||
338 | ia64_setreg(_IA64_REG_AR_RSC, old_rsc); | ||
339 | } | ||
340 | } | ||
341 | |||
342 | void set_rse_reg(struct kvm_pt_regs *regs, unsigned long r1, | ||
343 | unsigned long val, unsigned long nat) | ||
344 | { | ||
345 | unsigned long *bsp, *bspstore, *addr, *rnat_addr; | ||
346 | unsigned long *kbs = (void *) current_vcpu + VMM_RBS_OFFSET; | ||
347 | unsigned long nat_mask; | ||
348 | unsigned long old_rsc, new_rsc, psr; | ||
349 | unsigned long rnat; | ||
350 | long sof = (regs->cr_ifs) & 0x7f; | ||
351 | long sor = (((regs->cr_ifs >> 14) & 0xf) << 3); | ||
352 | long rrb_gr = (regs->cr_ifs >> 18) & 0x7f; | ||
353 | long ridx = r1 - 32; | ||
354 | |||
355 | if (ridx < sor) | ||
356 | ridx = rotate_reg(sor, rrb_gr, ridx); | ||
357 | |||
358 | old_rsc = ia64_getreg(_IA64_REG_AR_RSC); | ||
359 | /* put RSC to lazy mode, and set loadrs 0 */ | ||
360 | new_rsc = old_rsc & (~0x3fff0003); | ||
361 | ia64_setreg(_IA64_REG_AR_RSC, new_rsc); | ||
362 | bsp = kbs + (regs->loadrs >> 19); /* 16 + 3 */ | ||
363 | |||
364 | addr = kvm_rse_skip_regs(bsp, -sof + ridx); | ||
365 | nat_mask = 1UL << ia64_rse_slot_num(addr); | ||
366 | rnat_addr = ia64_rse_rnat_addr(addr); | ||
367 | |||
368 | local_irq_save(psr); | ||
369 | bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE); | ||
370 | if (addr >= bspstore) { | ||
371 | |||
372 | ia64_flushrs(); | ||
373 | ia64_mf(); | ||
374 | *addr = val; | ||
375 | bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE); | ||
376 | rnat = ia64_getreg(_IA64_REG_AR_RNAT); | ||
377 | if (bspstore < rnat_addr) | ||
378 | rnat = rnat & (~nat_mask); | ||
379 | else | ||
380 | *rnat_addr = (*rnat_addr)&(~nat_mask); | ||
381 | |||
382 | ia64_mf(); | ||
383 | ia64_loadrs(); | ||
384 | ia64_setreg(_IA64_REG_AR_RNAT, rnat); | ||
385 | } else { | ||
386 | rnat = ia64_getreg(_IA64_REG_AR_RNAT); | ||
387 | *addr = val; | ||
388 | if (bspstore < rnat_addr) | ||
389 | rnat = rnat&(~nat_mask); | ||
390 | else | ||
391 | *rnat_addr = (*rnat_addr) & (~nat_mask); | ||
392 | |||
393 | ia64_setreg(_IA64_REG_AR_BSPSTORE, bspstore); | ||
394 | ia64_setreg(_IA64_REG_AR_RNAT, rnat); | ||
395 | } | ||
396 | local_irq_restore(psr); | ||
397 | ia64_setreg(_IA64_REG_AR_RSC, old_rsc); | ||
398 | } | ||
399 | |||
400 | void getreg(unsigned long regnum, unsigned long *val, | ||
401 | int *nat, struct kvm_pt_regs *regs) | ||
402 | { | ||
403 | unsigned long addr, *unat; | ||
404 | if (regnum >= IA64_FIRST_STACKED_GR) { | ||
405 | get_rse_reg(regs, regnum, val, nat); | ||
406 | return; | ||
407 | } | ||
408 | |||
409 | /* | ||
410 | * Now look at registers in [0-31] range and init correct UNAT | ||
411 | */ | ||
412 | addr = (unsigned long)regs; | ||
413 | unat = ®s->eml_unat;; | ||
414 | |||
415 | addr += gr_info[regnum]; | ||
416 | |||
417 | *val = *(unsigned long *)addr; | ||
418 | /* | ||
419 | * do it only when requested | ||
420 | */ | ||
421 | if (nat) | ||
422 | *nat = (*unat >> ((addr >> 3) & 0x3f)) & 0x1UL; | ||
423 | } | ||
424 | |||
425 | void setreg(unsigned long regnum, unsigned long val, | ||
426 | int nat, struct kvm_pt_regs *regs) | ||
427 | { | ||
428 | unsigned long addr; | ||
429 | unsigned long bitmask; | ||
430 | unsigned long *unat; | ||
431 | |||
432 | /* | ||
433 | * First takes care of stacked registers | ||
434 | */ | ||
435 | if (regnum >= IA64_FIRST_STACKED_GR) { | ||
436 | set_rse_reg(regs, regnum, val, nat); | ||
437 | return; | ||
438 | } | ||
439 | |||
440 | /* | ||
441 | * Now look at registers in [0-31] range and init correct UNAT | ||
442 | */ | ||
443 | addr = (unsigned long)regs; | ||
444 | unat = ®s->eml_unat; | ||
445 | /* | ||
446 | * add offset from base of struct | ||
447 | * and do it ! | ||
448 | */ | ||
449 | addr += gr_info[regnum]; | ||
450 | |||
451 | *(unsigned long *)addr = val; | ||
452 | |||
453 | /* | ||
454 | * We need to clear the corresponding UNAT bit to fully emulate the load | ||
455 | * UNAT bit_pos = GR[r3]{8:3} form EAS-2.4 | ||
456 | */ | ||
457 | bitmask = 1UL << ((addr >> 3) & 0x3f); | ||
458 | if (nat) | ||
459 | *unat |= bitmask; | ||
460 | else | ||
461 | *unat &= ~bitmask; | ||
462 | |||
463 | } | ||
464 | |||
465 | u64 vcpu_get_gr(struct kvm_vcpu *vcpu, unsigned long reg) | ||
466 | { | ||
467 | struct kvm_pt_regs *regs = vcpu_regs(vcpu); | ||
468 | u64 val; | ||
469 | |||
470 | if (!reg) | ||
471 | return 0; | ||
472 | getreg(reg, &val, 0, regs); | ||
473 | return val; | ||
474 | } | ||
475 | |||
476 | void vcpu_set_gr(struct kvm_vcpu *vcpu, u64 reg, u64 value, int nat) | ||
477 | { | ||
478 | struct kvm_pt_regs *regs = vcpu_regs(vcpu); | ||
479 | long sof = (regs->cr_ifs) & 0x7f; | ||
480 | |||
481 | if (!reg) | ||
482 | return; | ||
483 | if (reg >= sof + 32) | ||
484 | return; | ||
485 | setreg(reg, value, nat, regs); /* FIXME: handle NATs later*/ | ||
486 | } | ||
487 | |||
488 | void getfpreg(unsigned long regnum, struct ia64_fpreg *fpval, | ||
489 | struct kvm_pt_regs *regs) | ||
490 | { | ||
491 | /* Take floating register rotation into consideration*/ | ||
492 | if (regnum >= IA64_FIRST_ROTATING_FR) | ||
493 | regnum = IA64_FIRST_ROTATING_FR + fph_index(regs, regnum); | ||
494 | #define CASE_FIXED_FP(reg) \ | ||
495 | case (reg) : \ | ||
496 | ia64_stf_spill(fpval, reg); \ | ||
497 | break | ||
498 | |||
499 | switch (regnum) { | ||
500 | CASE_FIXED_FP(0); | ||
501 | CASE_FIXED_FP(1); | ||
502 | CASE_FIXED_FP(2); | ||
503 | CASE_FIXED_FP(3); | ||
504 | CASE_FIXED_FP(4); | ||
505 | CASE_FIXED_FP(5); | ||
506 | |||
507 | CASE_FIXED_FP(6); | ||
508 | CASE_FIXED_FP(7); | ||
509 | CASE_FIXED_FP(8); | ||
510 | CASE_FIXED_FP(9); | ||
511 | CASE_FIXED_FP(10); | ||
512 | CASE_FIXED_FP(11); | ||
513 | |||
514 | CASE_FIXED_FP(12); | ||
515 | CASE_FIXED_FP(13); | ||
516 | CASE_FIXED_FP(14); | ||
517 | CASE_FIXED_FP(15); | ||
518 | CASE_FIXED_FP(16); | ||
519 | CASE_FIXED_FP(17); | ||
520 | CASE_FIXED_FP(18); | ||
521 | CASE_FIXED_FP(19); | ||
522 | CASE_FIXED_FP(20); | ||
523 | CASE_FIXED_FP(21); | ||
524 | CASE_FIXED_FP(22); | ||
525 | CASE_FIXED_FP(23); | ||
526 | CASE_FIXED_FP(24); | ||
527 | CASE_FIXED_FP(25); | ||
528 | CASE_FIXED_FP(26); | ||
529 | CASE_FIXED_FP(27); | ||
530 | CASE_FIXED_FP(28); | ||
531 | CASE_FIXED_FP(29); | ||
532 | CASE_FIXED_FP(30); | ||
533 | CASE_FIXED_FP(31); | ||
534 | CASE_FIXED_FP(32); | ||
535 | CASE_FIXED_FP(33); | ||
536 | CASE_FIXED_FP(34); | ||
537 | CASE_FIXED_FP(35); | ||
538 | CASE_FIXED_FP(36); | ||
539 | CASE_FIXED_FP(37); | ||
540 | CASE_FIXED_FP(38); | ||
541 | CASE_FIXED_FP(39); | ||
542 | CASE_FIXED_FP(40); | ||
543 | CASE_FIXED_FP(41); | ||
544 | CASE_FIXED_FP(42); | ||
545 | CASE_FIXED_FP(43); | ||
546 | CASE_FIXED_FP(44); | ||
547 | CASE_FIXED_FP(45); | ||
548 | CASE_FIXED_FP(46); | ||
549 | CASE_FIXED_FP(47); | ||
550 | CASE_FIXED_FP(48); | ||
551 | CASE_FIXED_FP(49); | ||
552 | CASE_FIXED_FP(50); | ||
553 | CASE_FIXED_FP(51); | ||
554 | CASE_FIXED_FP(52); | ||
555 | CASE_FIXED_FP(53); | ||
556 | CASE_FIXED_FP(54); | ||
557 | CASE_FIXED_FP(55); | ||
558 | CASE_FIXED_FP(56); | ||
559 | CASE_FIXED_FP(57); | ||
560 | CASE_FIXED_FP(58); | ||
561 | CASE_FIXED_FP(59); | ||
562 | CASE_FIXED_FP(60); | ||
563 | CASE_FIXED_FP(61); | ||
564 | CASE_FIXED_FP(62); | ||
565 | CASE_FIXED_FP(63); | ||
566 | CASE_FIXED_FP(64); | ||
567 | CASE_FIXED_FP(65); | ||
568 | CASE_FIXED_FP(66); | ||
569 | CASE_FIXED_FP(67); | ||
570 | CASE_FIXED_FP(68); | ||
571 | CASE_FIXED_FP(69); | ||
572 | CASE_FIXED_FP(70); | ||
573 | CASE_FIXED_FP(71); | ||
574 | CASE_FIXED_FP(72); | ||
575 | CASE_FIXED_FP(73); | ||
576 | CASE_FIXED_FP(74); | ||
577 | CASE_FIXED_FP(75); | ||
578 | CASE_FIXED_FP(76); | ||
579 | CASE_FIXED_FP(77); | ||
580 | CASE_FIXED_FP(78); | ||
581 | CASE_FIXED_FP(79); | ||
582 | CASE_FIXED_FP(80); | ||
583 | CASE_FIXED_FP(81); | ||
584 | CASE_FIXED_FP(82); | ||
585 | CASE_FIXED_FP(83); | ||
586 | CASE_FIXED_FP(84); | ||
587 | CASE_FIXED_FP(85); | ||
588 | CASE_FIXED_FP(86); | ||
589 | CASE_FIXED_FP(87); | ||
590 | CASE_FIXED_FP(88); | ||
591 | CASE_FIXED_FP(89); | ||
592 | CASE_FIXED_FP(90); | ||
593 | CASE_FIXED_FP(91); | ||
594 | CASE_FIXED_FP(92); | ||
595 | CASE_FIXED_FP(93); | ||
596 | CASE_FIXED_FP(94); | ||
597 | CASE_FIXED_FP(95); | ||
598 | CASE_FIXED_FP(96); | ||
599 | CASE_FIXED_FP(97); | ||
600 | CASE_FIXED_FP(98); | ||
601 | CASE_FIXED_FP(99); | ||
602 | CASE_FIXED_FP(100); | ||
603 | CASE_FIXED_FP(101); | ||
604 | CASE_FIXED_FP(102); | ||
605 | CASE_FIXED_FP(103); | ||
606 | CASE_FIXED_FP(104); | ||
607 | CASE_FIXED_FP(105); | ||
608 | CASE_FIXED_FP(106); | ||
609 | CASE_FIXED_FP(107); | ||
610 | CASE_FIXED_FP(108); | ||
611 | CASE_FIXED_FP(109); | ||
612 | CASE_FIXED_FP(110); | ||
613 | CASE_FIXED_FP(111); | ||
614 | CASE_FIXED_FP(112); | ||
615 | CASE_FIXED_FP(113); | ||
616 | CASE_FIXED_FP(114); | ||
617 | CASE_FIXED_FP(115); | ||
618 | CASE_FIXED_FP(116); | ||
619 | CASE_FIXED_FP(117); | ||
620 | CASE_FIXED_FP(118); | ||
621 | CASE_FIXED_FP(119); | ||
622 | CASE_FIXED_FP(120); | ||
623 | CASE_FIXED_FP(121); | ||
624 | CASE_FIXED_FP(122); | ||
625 | CASE_FIXED_FP(123); | ||
626 | CASE_FIXED_FP(124); | ||
627 | CASE_FIXED_FP(125); | ||
628 | CASE_FIXED_FP(126); | ||
629 | CASE_FIXED_FP(127); | ||
630 | } | ||
631 | #undef CASE_FIXED_FP | ||
632 | } | ||
633 | |||
634 | void setfpreg(unsigned long regnum, struct ia64_fpreg *fpval, | ||
635 | struct kvm_pt_regs *regs) | ||
636 | { | ||
637 | /* Take floating register rotation into consideration*/ | ||
638 | if (regnum >= IA64_FIRST_ROTATING_FR) | ||
639 | regnum = IA64_FIRST_ROTATING_FR + fph_index(regs, regnum); | ||
640 | |||
641 | #define CASE_FIXED_FP(reg) \ | ||
642 | case (reg) : \ | ||
643 | ia64_ldf_fill(reg, fpval); \ | ||
644 | break | ||
645 | |||
646 | switch (regnum) { | ||
647 | CASE_FIXED_FP(2); | ||
648 | CASE_FIXED_FP(3); | ||
649 | CASE_FIXED_FP(4); | ||
650 | CASE_FIXED_FP(5); | ||
651 | |||
652 | CASE_FIXED_FP(6); | ||
653 | CASE_FIXED_FP(7); | ||
654 | CASE_FIXED_FP(8); | ||
655 | CASE_FIXED_FP(9); | ||
656 | CASE_FIXED_FP(10); | ||
657 | CASE_FIXED_FP(11); | ||
658 | |||
659 | CASE_FIXED_FP(12); | ||
660 | CASE_FIXED_FP(13); | ||
661 | CASE_FIXED_FP(14); | ||
662 | CASE_FIXED_FP(15); | ||
663 | CASE_FIXED_FP(16); | ||
664 | CASE_FIXED_FP(17); | ||
665 | CASE_FIXED_FP(18); | ||
666 | CASE_FIXED_FP(19); | ||
667 | CASE_FIXED_FP(20); | ||
668 | CASE_FIXED_FP(21); | ||
669 | CASE_FIXED_FP(22); | ||
670 | CASE_FIXED_FP(23); | ||
671 | CASE_FIXED_FP(24); | ||
672 | CASE_FIXED_FP(25); | ||
673 | CASE_FIXED_FP(26); | ||
674 | CASE_FIXED_FP(27); | ||
675 | CASE_FIXED_FP(28); | ||
676 | CASE_FIXED_FP(29); | ||
677 | CASE_FIXED_FP(30); | ||
678 | CASE_FIXED_FP(31); | ||
679 | CASE_FIXED_FP(32); | ||
680 | CASE_FIXED_FP(33); | ||
681 | CASE_FIXED_FP(34); | ||
682 | CASE_FIXED_FP(35); | ||
683 | CASE_FIXED_FP(36); | ||
684 | CASE_FIXED_FP(37); | ||
685 | CASE_FIXED_FP(38); | ||
686 | CASE_FIXED_FP(39); | ||
687 | CASE_FIXED_FP(40); | ||
688 | CASE_FIXED_FP(41); | ||
689 | CASE_FIXED_FP(42); | ||
690 | CASE_FIXED_FP(43); | ||
691 | CASE_FIXED_FP(44); | ||
692 | CASE_FIXED_FP(45); | ||
693 | CASE_FIXED_FP(46); | ||
694 | CASE_FIXED_FP(47); | ||
695 | CASE_FIXED_FP(48); | ||
696 | CASE_FIXED_FP(49); | ||
697 | CASE_FIXED_FP(50); | ||
698 | CASE_FIXED_FP(51); | ||
699 | CASE_FIXED_FP(52); | ||
700 | CASE_FIXED_FP(53); | ||
701 | CASE_FIXED_FP(54); | ||
702 | CASE_FIXED_FP(55); | ||
703 | CASE_FIXED_FP(56); | ||
704 | CASE_FIXED_FP(57); | ||
705 | CASE_FIXED_FP(58); | ||
706 | CASE_FIXED_FP(59); | ||
707 | CASE_FIXED_FP(60); | ||
708 | CASE_FIXED_FP(61); | ||
709 | CASE_FIXED_FP(62); | ||
710 | CASE_FIXED_FP(63); | ||
711 | CASE_FIXED_FP(64); | ||
712 | CASE_FIXED_FP(65); | ||
713 | CASE_FIXED_FP(66); | ||
714 | CASE_FIXED_FP(67); | ||
715 | CASE_FIXED_FP(68); | ||
716 | CASE_FIXED_FP(69); | ||
717 | CASE_FIXED_FP(70); | ||
718 | CASE_FIXED_FP(71); | ||
719 | CASE_FIXED_FP(72); | ||
720 | CASE_FIXED_FP(73); | ||
721 | CASE_FIXED_FP(74); | ||
722 | CASE_FIXED_FP(75); | ||
723 | CASE_FIXED_FP(76); | ||
724 | CASE_FIXED_FP(77); | ||
725 | CASE_FIXED_FP(78); | ||
726 | CASE_FIXED_FP(79); | ||
727 | CASE_FIXED_FP(80); | ||
728 | CASE_FIXED_FP(81); | ||
729 | CASE_FIXED_FP(82); | ||
730 | CASE_FIXED_FP(83); | ||
731 | CASE_FIXED_FP(84); | ||
732 | CASE_FIXED_FP(85); | ||
733 | CASE_FIXED_FP(86); | ||
734 | CASE_FIXED_FP(87); | ||
735 | CASE_FIXED_FP(88); | ||
736 | CASE_FIXED_FP(89); | ||
737 | CASE_FIXED_FP(90); | ||
738 | CASE_FIXED_FP(91); | ||
739 | CASE_FIXED_FP(92); | ||
740 | CASE_FIXED_FP(93); | ||
741 | CASE_FIXED_FP(94); | ||
742 | CASE_FIXED_FP(95); | ||
743 | CASE_FIXED_FP(96); | ||
744 | CASE_FIXED_FP(97); | ||
745 | CASE_FIXED_FP(98); | ||
746 | CASE_FIXED_FP(99); | ||
747 | CASE_FIXED_FP(100); | ||
748 | CASE_FIXED_FP(101); | ||
749 | CASE_FIXED_FP(102); | ||
750 | CASE_FIXED_FP(103); | ||
751 | CASE_FIXED_FP(104); | ||
752 | CASE_FIXED_FP(105); | ||
753 | CASE_FIXED_FP(106); | ||
754 | CASE_FIXED_FP(107); | ||
755 | CASE_FIXED_FP(108); | ||
756 | CASE_FIXED_FP(109); | ||
757 | CASE_FIXED_FP(110); | ||
758 | CASE_FIXED_FP(111); | ||
759 | CASE_FIXED_FP(112); | ||
760 | CASE_FIXED_FP(113); | ||
761 | CASE_FIXED_FP(114); | ||
762 | CASE_FIXED_FP(115); | ||
763 | CASE_FIXED_FP(116); | ||
764 | CASE_FIXED_FP(117); | ||
765 | CASE_FIXED_FP(118); | ||
766 | CASE_FIXED_FP(119); | ||
767 | CASE_FIXED_FP(120); | ||
768 | CASE_FIXED_FP(121); | ||
769 | CASE_FIXED_FP(122); | ||
770 | CASE_FIXED_FP(123); | ||
771 | CASE_FIXED_FP(124); | ||
772 | CASE_FIXED_FP(125); | ||
773 | CASE_FIXED_FP(126); | ||
774 | CASE_FIXED_FP(127); | ||
775 | } | ||
776 | } | ||
777 | |||
778 | void vcpu_get_fpreg(struct kvm_vcpu *vcpu, unsigned long reg, | ||
779 | struct ia64_fpreg *val) | ||
780 | { | ||
781 | struct kvm_pt_regs *regs = vcpu_regs(vcpu); | ||
782 | |||
783 | getfpreg(reg, val, regs); /* FIXME: handle NATs later*/ | ||
784 | } | ||
785 | |||
786 | void vcpu_set_fpreg(struct kvm_vcpu *vcpu, unsigned long reg, | ||
787 | struct ia64_fpreg *val) | ||
788 | { | ||
789 | struct kvm_pt_regs *regs = vcpu_regs(vcpu); | ||
790 | |||
791 | if (reg > 1) | ||
792 | setfpreg(reg, val, regs); /* FIXME: handle NATs later*/ | ||
793 | } | ||
794 | |||
795 | /************************************************************************ | ||
796 | * lsapic timer | ||
797 | ***********************************************************************/ | ||
798 | u64 vcpu_get_itc(struct kvm_vcpu *vcpu) | ||
799 | { | ||
800 | unsigned long guest_itc; | ||
801 | guest_itc = VMX(vcpu, itc_offset) + ia64_getreg(_IA64_REG_AR_ITC); | ||
802 | |||
803 | if (guest_itc >= VMX(vcpu, last_itc)) { | ||
804 | VMX(vcpu, last_itc) = guest_itc; | ||
805 | return guest_itc; | ||
806 | } else | ||
807 | return VMX(vcpu, last_itc); | ||
808 | } | ||
809 | |||
810 | static inline void vcpu_set_itm(struct kvm_vcpu *vcpu, u64 val); | ||
811 | static void vcpu_set_itc(struct kvm_vcpu *vcpu, u64 val) | ||
812 | { | ||
813 | struct kvm_vcpu *v; | ||
814 | int i; | ||
815 | long itc_offset = val - ia64_getreg(_IA64_REG_AR_ITC); | ||
816 | unsigned long vitv = VCPU(vcpu, itv); | ||
817 | |||
818 | if (vcpu->vcpu_id == 0) { | ||
819 | for (i = 0; i < MAX_VCPU_NUM; i++) { | ||
820 | v = (struct kvm_vcpu *)((char *)vcpu + VCPU_SIZE * i); | ||
821 | VMX(v, itc_offset) = itc_offset; | ||
822 | VMX(v, last_itc) = 0; | ||
823 | } | ||
824 | } | ||
825 | VMX(vcpu, last_itc) = 0; | ||
826 | if (VCPU(vcpu, itm) <= val) { | ||
827 | VMX(vcpu, itc_check) = 0; | ||
828 | vcpu_unpend_interrupt(vcpu, vitv); | ||
829 | } else { | ||
830 | VMX(vcpu, itc_check) = 1; | ||
831 | vcpu_set_itm(vcpu, VCPU(vcpu, itm)); | ||
832 | } | ||
833 | |||
834 | } | ||
835 | |||
836 | static inline u64 vcpu_get_itm(struct kvm_vcpu *vcpu) | ||
837 | { | ||
838 | return ((u64)VCPU(vcpu, itm)); | ||
839 | } | ||
840 | |||
841 | static inline void vcpu_set_itm(struct kvm_vcpu *vcpu, u64 val) | ||
842 | { | ||
843 | unsigned long vitv = VCPU(vcpu, itv); | ||
844 | VCPU(vcpu, itm) = val; | ||
845 | |||
846 | if (val > vcpu_get_itc(vcpu)) { | ||
847 | VMX(vcpu, itc_check) = 1; | ||
848 | vcpu_unpend_interrupt(vcpu, vitv); | ||
849 | VMX(vcpu, timer_pending) = 0; | ||
850 | } else | ||
851 | VMX(vcpu, itc_check) = 0; | ||
852 | } | ||
853 | |||
854 | #define ITV_VECTOR(itv) (itv&0xff) | ||
855 | #define ITV_IRQ_MASK(itv) (itv&(1<<16)) | ||
856 | |||
857 | static inline void vcpu_set_itv(struct kvm_vcpu *vcpu, u64 val) | ||
858 | { | ||
859 | VCPU(vcpu, itv) = val; | ||
860 | if (!ITV_IRQ_MASK(val) && vcpu->arch.timer_pending) { | ||
861 | vcpu_pend_interrupt(vcpu, ITV_VECTOR(val)); | ||
862 | vcpu->arch.timer_pending = 0; | ||
863 | } | ||
864 | } | ||
865 | |||
866 | static inline void vcpu_set_eoi(struct kvm_vcpu *vcpu, u64 val) | ||
867 | { | ||
868 | int vec; | ||
869 | |||
870 | vec = highest_inservice_irq(vcpu); | ||
871 | if (vec == NULL_VECTOR) | ||
872 | return; | ||
873 | VMX(vcpu, insvc[vec >> 6]) &= ~(1UL << (vec & 63)); | ||
874 | VCPU(vcpu, eoi) = 0; | ||
875 | vcpu->arch.irq_new_pending = 1; | ||
876 | |||
877 | } | ||
878 | |||
879 | /* See Table 5-8 in SDM vol2 for the definition */ | ||
880 | int irq_masked(struct kvm_vcpu *vcpu, int h_pending, int h_inservice) | ||
881 | { | ||
882 | union ia64_tpr vtpr; | ||
883 | |||
884 | vtpr.val = VCPU(vcpu, tpr); | ||
885 | |||
886 | if (h_inservice == NMI_VECTOR) | ||
887 | return IRQ_MASKED_BY_INSVC; | ||
888 | |||
889 | if (h_pending == NMI_VECTOR) { | ||
890 | /* Non Maskable Interrupt */ | ||
891 | return IRQ_NO_MASKED; | ||
892 | } | ||
893 | |||
894 | if (h_inservice == ExtINT_VECTOR) | ||
895 | return IRQ_MASKED_BY_INSVC; | ||
896 | |||
897 | if (h_pending == ExtINT_VECTOR) { | ||
898 | if (vtpr.mmi) { | ||
899 | /* mask all external IRQ */ | ||
900 | return IRQ_MASKED_BY_VTPR; | ||
901 | } else | ||
902 | return IRQ_NO_MASKED; | ||
903 | } | ||
904 | |||
905 | if (is_higher_irq(h_pending, h_inservice)) { | ||
906 | if (is_higher_class(h_pending, vtpr.mic + (vtpr.mmi << 4))) | ||
907 | return IRQ_NO_MASKED; | ||
908 | else | ||
909 | return IRQ_MASKED_BY_VTPR; | ||
910 | } else { | ||
911 | return IRQ_MASKED_BY_INSVC; | ||
912 | } | ||
913 | } | ||
914 | |||
915 | void vcpu_pend_interrupt(struct kvm_vcpu *vcpu, u8 vec) | ||
916 | { | ||
917 | long spsr; | ||
918 | int ret; | ||
919 | |||
920 | local_irq_save(spsr); | ||
921 | ret = test_and_set_bit(vec, &VCPU(vcpu, irr[0])); | ||
922 | local_irq_restore(spsr); | ||
923 | |||
924 | vcpu->arch.irq_new_pending = 1; | ||
925 | } | ||
926 | |||
927 | void vcpu_unpend_interrupt(struct kvm_vcpu *vcpu, u8 vec) | ||
928 | { | ||
929 | long spsr; | ||
930 | int ret; | ||
931 | |||
932 | local_irq_save(spsr); | ||
933 | ret = test_and_clear_bit(vec, &VCPU(vcpu, irr[0])); | ||
934 | local_irq_restore(spsr); | ||
935 | if (ret) { | ||
936 | vcpu->arch.irq_new_pending = 1; | ||
937 | wmb(); | ||
938 | } | ||
939 | } | ||
940 | |||
941 | void update_vhpi(struct kvm_vcpu *vcpu, int vec) | ||
942 | { | ||
943 | u64 vhpi; | ||
944 | |||
945 | if (vec == NULL_VECTOR) | ||
946 | vhpi = 0; | ||
947 | else if (vec == NMI_VECTOR) | ||
948 | vhpi = 32; | ||
949 | else if (vec == ExtINT_VECTOR) | ||
950 | vhpi = 16; | ||
951 | else | ||
952 | vhpi = vec >> 4; | ||
953 | |||
954 | VCPU(vcpu, vhpi) = vhpi; | ||
955 | if (VCPU(vcpu, vac).a_int) | ||
956 | ia64_call_vsa(PAL_VPS_SET_PENDING_INTERRUPT, | ||
957 | (u64)vcpu->arch.vpd, 0, 0, 0, 0, 0, 0); | ||
958 | } | ||
959 | |||
960 | u64 vcpu_get_ivr(struct kvm_vcpu *vcpu) | ||
961 | { | ||
962 | int vec, h_inservice, mask; | ||
963 | |||
964 | vec = highest_pending_irq(vcpu); | ||
965 | h_inservice = highest_inservice_irq(vcpu); | ||
966 | mask = irq_masked(vcpu, vec, h_inservice); | ||
967 | if (vec == NULL_VECTOR || mask == IRQ_MASKED_BY_INSVC) { | ||
968 | if (VCPU(vcpu, vhpi)) | ||
969 | update_vhpi(vcpu, NULL_VECTOR); | ||
970 | return IA64_SPURIOUS_INT_VECTOR; | ||
971 | } | ||
972 | if (mask == IRQ_MASKED_BY_VTPR) { | ||
973 | update_vhpi(vcpu, vec); | ||
974 | return IA64_SPURIOUS_INT_VECTOR; | ||
975 | } | ||
976 | VMX(vcpu, insvc[vec >> 6]) |= (1UL << (vec & 63)); | ||
977 | vcpu_unpend_interrupt(vcpu, vec); | ||
978 | return (u64)vec; | ||
979 | } | ||
980 | |||
981 | /************************************************************************** | ||
982 | Privileged operation emulation routines | ||
983 | **************************************************************************/ | ||
984 | u64 vcpu_thash(struct kvm_vcpu *vcpu, u64 vadr) | ||
985 | { | ||
986 | union ia64_pta vpta; | ||
987 | union ia64_rr vrr; | ||
988 | u64 pval; | ||
989 | u64 vhpt_offset; | ||
990 | |||
991 | vpta.val = vcpu_get_pta(vcpu); | ||
992 | vrr.val = vcpu_get_rr(vcpu, vadr); | ||
993 | vhpt_offset = ((vadr >> vrr.ps) << 3) & ((1UL << (vpta.size)) - 1); | ||
994 | if (vpta.vf) { | ||
995 | pval = ia64_call_vsa(PAL_VPS_THASH, vadr, vrr.val, | ||
996 | vpta.val, 0, 0, 0, 0); | ||
997 | } else { | ||
998 | pval = (vadr & VRN_MASK) | vhpt_offset | | ||
999 | (vpta.val << 3 >> (vpta.size + 3) << (vpta.size)); | ||
1000 | } | ||
1001 | return pval; | ||
1002 | } | ||
1003 | |||
1004 | u64 vcpu_ttag(struct kvm_vcpu *vcpu, u64 vadr) | ||
1005 | { | ||
1006 | union ia64_rr vrr; | ||
1007 | union ia64_pta vpta; | ||
1008 | u64 pval; | ||
1009 | |||
1010 | vpta.val = vcpu_get_pta(vcpu); | ||
1011 | vrr.val = vcpu_get_rr(vcpu, vadr); | ||
1012 | if (vpta.vf) { | ||
1013 | pval = ia64_call_vsa(PAL_VPS_TTAG, vadr, vrr.val, | ||
1014 | 0, 0, 0, 0, 0); | ||
1015 | } else | ||
1016 | pval = 1; | ||
1017 | |||
1018 | return pval; | ||
1019 | } | ||
1020 | |||
1021 | u64 vcpu_tak(struct kvm_vcpu *vcpu, u64 vadr) | ||
1022 | { | ||
1023 | struct thash_data *data; | ||
1024 | union ia64_pta vpta; | ||
1025 | u64 key; | ||
1026 | |||
1027 | vpta.val = vcpu_get_pta(vcpu); | ||
1028 | if (vpta.vf == 0) { | ||
1029 | key = 1; | ||
1030 | return key; | ||
1031 | } | ||
1032 | data = vtlb_lookup(vcpu, vadr, D_TLB); | ||
1033 | if (!data || !data->p) | ||
1034 | key = 1; | ||
1035 | else | ||
1036 | key = data->key; | ||
1037 | |||
1038 | return key; | ||
1039 | } | ||
1040 | |||
1041 | |||
1042 | |||
1043 | void kvm_thash(struct kvm_vcpu *vcpu, INST64 inst) | ||
1044 | { | ||
1045 | unsigned long thash, vadr; | ||
1046 | |||
1047 | vadr = vcpu_get_gr(vcpu, inst.M46.r3); | ||
1048 | thash = vcpu_thash(vcpu, vadr); | ||
1049 | vcpu_set_gr(vcpu, inst.M46.r1, thash, 0); | ||
1050 | } | ||
1051 | |||
1052 | |||
1053 | void kvm_ttag(struct kvm_vcpu *vcpu, INST64 inst) | ||
1054 | { | ||
1055 | unsigned long tag, vadr; | ||
1056 | |||
1057 | vadr = vcpu_get_gr(vcpu, inst.M46.r3); | ||
1058 | tag = vcpu_ttag(vcpu, vadr); | ||
1059 | vcpu_set_gr(vcpu, inst.M46.r1, tag, 0); | ||
1060 | } | ||
1061 | |||
1062 | int vcpu_tpa(struct kvm_vcpu *vcpu, u64 vadr, u64 *padr) | ||
1063 | { | ||
1064 | struct thash_data *data; | ||
1065 | union ia64_isr visr, pt_isr; | ||
1066 | struct kvm_pt_regs *regs; | ||
1067 | struct ia64_psr vpsr; | ||
1068 | |||
1069 | regs = vcpu_regs(vcpu); | ||
1070 | pt_isr.val = VMX(vcpu, cr_isr); | ||
1071 | visr.val = 0; | ||
1072 | visr.ei = pt_isr.ei; | ||
1073 | visr.ir = pt_isr.ir; | ||
1074 | vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr); | ||
1075 | visr.na = 1; | ||
1076 | |||
1077 | data = vhpt_lookup(vadr); | ||
1078 | if (data) { | ||
1079 | if (data->p == 0) { | ||
1080 | vcpu_set_isr(vcpu, visr.val); | ||
1081 | data_page_not_present(vcpu, vadr); | ||
1082 | return IA64_FAULT; | ||
1083 | } else if (data->ma == VA_MATTR_NATPAGE) { | ||
1084 | vcpu_set_isr(vcpu, visr.val); | ||
1085 | dnat_page_consumption(vcpu, vadr); | ||
1086 | return IA64_FAULT; | ||
1087 | } else { | ||
1088 | *padr = (data->gpaddr >> data->ps << data->ps) | | ||
1089 | (vadr & (PSIZE(data->ps) - 1)); | ||
1090 | return IA64_NO_FAULT; | ||
1091 | } | ||
1092 | } | ||
1093 | |||
1094 | data = vtlb_lookup(vcpu, vadr, D_TLB); | ||
1095 | if (data) { | ||
1096 | if (data->p == 0) { | ||
1097 | vcpu_set_isr(vcpu, visr.val); | ||
1098 | data_page_not_present(vcpu, vadr); | ||
1099 | return IA64_FAULT; | ||
1100 | } else if (data->ma == VA_MATTR_NATPAGE) { | ||
1101 | vcpu_set_isr(vcpu, visr.val); | ||
1102 | dnat_page_consumption(vcpu, vadr); | ||
1103 | return IA64_FAULT; | ||
1104 | } else{ | ||
1105 | *padr = ((data->ppn >> (data->ps - 12)) << data->ps) | ||
1106 | | (vadr & (PSIZE(data->ps) - 1)); | ||
1107 | return IA64_NO_FAULT; | ||
1108 | } | ||
1109 | } | ||
1110 | if (!vhpt_enabled(vcpu, vadr, NA_REF)) { | ||
1111 | if (vpsr.ic) { | ||
1112 | vcpu_set_isr(vcpu, visr.val); | ||
1113 | alt_dtlb(vcpu, vadr); | ||
1114 | return IA64_FAULT; | ||
1115 | } else { | ||
1116 | nested_dtlb(vcpu); | ||
1117 | return IA64_FAULT; | ||
1118 | } | ||
1119 | } else { | ||
1120 | if (vpsr.ic) { | ||
1121 | vcpu_set_isr(vcpu, visr.val); | ||
1122 | dvhpt_fault(vcpu, vadr); | ||
1123 | return IA64_FAULT; | ||
1124 | } else{ | ||
1125 | nested_dtlb(vcpu); | ||
1126 | return IA64_FAULT; | ||
1127 | } | ||
1128 | } | ||
1129 | |||
1130 | return IA64_NO_FAULT; | ||
1131 | } | ||
1132 | |||
1133 | |||
1134 | int kvm_tpa(struct kvm_vcpu *vcpu, INST64 inst) | ||
1135 | { | ||
1136 | unsigned long r1, r3; | ||
1137 | |||
1138 | r3 = vcpu_get_gr(vcpu, inst.M46.r3); | ||
1139 | |||
1140 | if (vcpu_tpa(vcpu, r3, &r1)) | ||
1141 | return IA64_FAULT; | ||
1142 | |||
1143 | vcpu_set_gr(vcpu, inst.M46.r1, r1, 0); | ||
1144 | return(IA64_NO_FAULT); | ||
1145 | } | ||
1146 | |||
1147 | void kvm_tak(struct kvm_vcpu *vcpu, INST64 inst) | ||
1148 | { | ||
1149 | unsigned long r1, r3; | ||
1150 | |||
1151 | r3 = vcpu_get_gr(vcpu, inst.M46.r3); | ||
1152 | r1 = vcpu_tak(vcpu, r3); | ||
1153 | vcpu_set_gr(vcpu, inst.M46.r1, r1, 0); | ||
1154 | } | ||
1155 | |||
1156 | |||
1157 | /************************************ | ||
1158 | * Insert/Purge translation register/cache | ||
1159 | ************************************/ | ||
1160 | void vcpu_itc_i(struct kvm_vcpu *vcpu, u64 pte, u64 itir, u64 ifa) | ||
1161 | { | ||
1162 | thash_purge_and_insert(vcpu, pte, itir, ifa, I_TLB); | ||
1163 | } | ||
1164 | |||
1165 | void vcpu_itc_d(struct kvm_vcpu *vcpu, u64 pte, u64 itir, u64 ifa) | ||
1166 | { | ||
1167 | thash_purge_and_insert(vcpu, pte, itir, ifa, D_TLB); | ||
1168 | } | ||
1169 | |||
1170 | void vcpu_itr_i(struct kvm_vcpu *vcpu, u64 slot, u64 pte, u64 itir, u64 ifa) | ||
1171 | { | ||
1172 | u64 ps, va, rid; | ||
1173 | struct thash_data *p_itr; | ||
1174 | |||
1175 | ps = itir_ps(itir); | ||
1176 | va = PAGEALIGN(ifa, ps); | ||
1177 | pte &= ~PAGE_FLAGS_RV_MASK; | ||
1178 | rid = vcpu_get_rr(vcpu, ifa); | ||
1179 | rid = rid & RR_RID_MASK; | ||
1180 | p_itr = (struct thash_data *)&vcpu->arch.itrs[slot]; | ||
1181 | vcpu_set_tr(p_itr, pte, itir, va, rid); | ||
1182 | vcpu_quick_region_set(VMX(vcpu, itr_regions), va); | ||
1183 | } | ||
1184 | |||
1185 | |||
1186 | void vcpu_itr_d(struct kvm_vcpu *vcpu, u64 slot, u64 pte, u64 itir, u64 ifa) | ||
1187 | { | ||
1188 | u64 gpfn; | ||
1189 | u64 ps, va, rid; | ||
1190 | struct thash_data *p_dtr; | ||
1191 | |||
1192 | ps = itir_ps(itir); | ||
1193 | va = PAGEALIGN(ifa, ps); | ||
1194 | pte &= ~PAGE_FLAGS_RV_MASK; | ||
1195 | |||
1196 | if (ps != _PAGE_SIZE_16M) | ||
1197 | thash_purge_entries(vcpu, va, ps); | ||
1198 | gpfn = (pte & _PAGE_PPN_MASK) >> PAGE_SHIFT; | ||
1199 | if (__gpfn_is_io(gpfn)) | ||
1200 | pte |= VTLB_PTE_IO; | ||
1201 | rid = vcpu_get_rr(vcpu, va); | ||
1202 | rid = rid & RR_RID_MASK; | ||
1203 | p_dtr = (struct thash_data *)&vcpu->arch.dtrs[slot]; | ||
1204 | vcpu_set_tr((struct thash_data *)&vcpu->arch.dtrs[slot], | ||
1205 | pte, itir, va, rid); | ||
1206 | vcpu_quick_region_set(VMX(vcpu, dtr_regions), va); | ||
1207 | } | ||
1208 | |||
1209 | void vcpu_ptr_d(struct kvm_vcpu *vcpu, u64 ifa, u64 ps) | ||
1210 | { | ||
1211 | int index; | ||
1212 | u64 va; | ||
1213 | |||
1214 | va = PAGEALIGN(ifa, ps); | ||
1215 | while ((index = vtr_find_overlap(vcpu, va, ps, D_TLB)) >= 0) | ||
1216 | vcpu->arch.dtrs[index].page_flags = 0; | ||
1217 | |||
1218 | thash_purge_entries(vcpu, va, ps); | ||
1219 | } | ||
1220 | |||
1221 | void vcpu_ptr_i(struct kvm_vcpu *vcpu, u64 ifa, u64 ps) | ||
1222 | { | ||
1223 | int index; | ||
1224 | u64 va; | ||
1225 | |||
1226 | va = PAGEALIGN(ifa, ps); | ||
1227 | while ((index = vtr_find_overlap(vcpu, va, ps, I_TLB)) >= 0) | ||
1228 | vcpu->arch.itrs[index].page_flags = 0; | ||
1229 | |||
1230 | thash_purge_entries(vcpu, va, ps); | ||
1231 | } | ||
1232 | |||
1233 | void vcpu_ptc_l(struct kvm_vcpu *vcpu, u64 va, u64 ps) | ||
1234 | { | ||
1235 | va = PAGEALIGN(va, ps); | ||
1236 | thash_purge_entries(vcpu, va, ps); | ||
1237 | } | ||
1238 | |||
1239 | void vcpu_ptc_e(struct kvm_vcpu *vcpu, u64 va) | ||
1240 | { | ||
1241 | thash_purge_all(vcpu); | ||
1242 | } | ||
1243 | |||
1244 | void vcpu_ptc_ga(struct kvm_vcpu *vcpu, u64 va, u64 ps) | ||
1245 | { | ||
1246 | struct exit_ctl_data *p = &vcpu->arch.exit_data; | ||
1247 | long psr; | ||
1248 | local_irq_save(psr); | ||
1249 | p->exit_reason = EXIT_REASON_PTC_G; | ||
1250 | |||
1251 | p->u.ptc_g_data.rr = vcpu_get_rr(vcpu, va); | ||
1252 | p->u.ptc_g_data.vaddr = va; | ||
1253 | p->u.ptc_g_data.ps = ps; | ||
1254 | vmm_transition(vcpu); | ||
1255 | /* Do Local Purge Here*/ | ||
1256 | vcpu_ptc_l(vcpu, va, ps); | ||
1257 | local_irq_restore(psr); | ||
1258 | } | ||
1259 | |||
1260 | |||
1261 | void vcpu_ptc_g(struct kvm_vcpu *vcpu, u64 va, u64 ps) | ||
1262 | { | ||
1263 | vcpu_ptc_ga(vcpu, va, ps); | ||
1264 | } | ||
1265 | |||
1266 | void kvm_ptc_e(struct kvm_vcpu *vcpu, INST64 inst) | ||
1267 | { | ||
1268 | unsigned long ifa; | ||
1269 | |||
1270 | ifa = vcpu_get_gr(vcpu, inst.M45.r3); | ||
1271 | vcpu_ptc_e(vcpu, ifa); | ||
1272 | } | ||
1273 | |||
1274 | void kvm_ptc_g(struct kvm_vcpu *vcpu, INST64 inst) | ||
1275 | { | ||
1276 | unsigned long ifa, itir; | ||
1277 | |||
1278 | ifa = vcpu_get_gr(vcpu, inst.M45.r3); | ||
1279 | itir = vcpu_get_gr(vcpu, inst.M45.r2); | ||
1280 | vcpu_ptc_g(vcpu, ifa, itir_ps(itir)); | ||
1281 | } | ||
1282 | |||
1283 | void kvm_ptc_ga(struct kvm_vcpu *vcpu, INST64 inst) | ||
1284 | { | ||
1285 | unsigned long ifa, itir; | ||
1286 | |||
1287 | ifa = vcpu_get_gr(vcpu, inst.M45.r3); | ||
1288 | itir = vcpu_get_gr(vcpu, inst.M45.r2); | ||
1289 | vcpu_ptc_ga(vcpu, ifa, itir_ps(itir)); | ||
1290 | } | ||
1291 | |||
1292 | void kvm_ptc_l(struct kvm_vcpu *vcpu, INST64 inst) | ||
1293 | { | ||
1294 | unsigned long ifa, itir; | ||
1295 | |||
1296 | ifa = vcpu_get_gr(vcpu, inst.M45.r3); | ||
1297 | itir = vcpu_get_gr(vcpu, inst.M45.r2); | ||
1298 | vcpu_ptc_l(vcpu, ifa, itir_ps(itir)); | ||
1299 | } | ||
1300 | |||
1301 | void kvm_ptr_d(struct kvm_vcpu *vcpu, INST64 inst) | ||
1302 | { | ||
1303 | unsigned long ifa, itir; | ||
1304 | |||
1305 | ifa = vcpu_get_gr(vcpu, inst.M45.r3); | ||
1306 | itir = vcpu_get_gr(vcpu, inst.M45.r2); | ||
1307 | vcpu_ptr_d(vcpu, ifa, itir_ps(itir)); | ||
1308 | } | ||
1309 | |||
1310 | void kvm_ptr_i(struct kvm_vcpu *vcpu, INST64 inst) | ||
1311 | { | ||
1312 | unsigned long ifa, itir; | ||
1313 | |||
1314 | ifa = vcpu_get_gr(vcpu, inst.M45.r3); | ||
1315 | itir = vcpu_get_gr(vcpu, inst.M45.r2); | ||
1316 | vcpu_ptr_i(vcpu, ifa, itir_ps(itir)); | ||
1317 | } | ||
1318 | |||
1319 | void kvm_itr_d(struct kvm_vcpu *vcpu, INST64 inst) | ||
1320 | { | ||
1321 | unsigned long itir, ifa, pte, slot; | ||
1322 | |||
1323 | slot = vcpu_get_gr(vcpu, inst.M45.r3); | ||
1324 | pte = vcpu_get_gr(vcpu, inst.M45.r2); | ||
1325 | itir = vcpu_get_itir(vcpu); | ||
1326 | ifa = vcpu_get_ifa(vcpu); | ||
1327 | vcpu_itr_d(vcpu, slot, pte, itir, ifa); | ||
1328 | } | ||
1329 | |||
1330 | |||
1331 | |||
1332 | void kvm_itr_i(struct kvm_vcpu *vcpu, INST64 inst) | ||
1333 | { | ||
1334 | unsigned long itir, ifa, pte, slot; | ||
1335 | |||
1336 | slot = vcpu_get_gr(vcpu, inst.M45.r3); | ||
1337 | pte = vcpu_get_gr(vcpu, inst.M45.r2); | ||
1338 | itir = vcpu_get_itir(vcpu); | ||
1339 | ifa = vcpu_get_ifa(vcpu); | ||
1340 | vcpu_itr_i(vcpu, slot, pte, itir, ifa); | ||
1341 | } | ||
1342 | |||
1343 | void kvm_itc_d(struct kvm_vcpu *vcpu, INST64 inst) | ||
1344 | { | ||
1345 | unsigned long itir, ifa, pte; | ||
1346 | |||
1347 | itir = vcpu_get_itir(vcpu); | ||
1348 | ifa = vcpu_get_ifa(vcpu); | ||
1349 | pte = vcpu_get_gr(vcpu, inst.M45.r2); | ||
1350 | vcpu_itc_d(vcpu, pte, itir, ifa); | ||
1351 | } | ||
1352 | |||
1353 | void kvm_itc_i(struct kvm_vcpu *vcpu, INST64 inst) | ||
1354 | { | ||
1355 | unsigned long itir, ifa, pte; | ||
1356 | |||
1357 | itir = vcpu_get_itir(vcpu); | ||
1358 | ifa = vcpu_get_ifa(vcpu); | ||
1359 | pte = vcpu_get_gr(vcpu, inst.M45.r2); | ||
1360 | vcpu_itc_i(vcpu, pte, itir, ifa); | ||
1361 | } | ||
1362 | |||
1363 | /************************************* | ||
1364 | * Moves to semi-privileged registers | ||
1365 | *************************************/ | ||
1366 | |||
1367 | void kvm_mov_to_ar_imm(struct kvm_vcpu *vcpu, INST64 inst) | ||
1368 | { | ||
1369 | unsigned long imm; | ||
1370 | |||
1371 | if (inst.M30.s) | ||
1372 | imm = -inst.M30.imm; | ||
1373 | else | ||
1374 | imm = inst.M30.imm; | ||
1375 | |||
1376 | vcpu_set_itc(vcpu, imm); | ||
1377 | } | ||
1378 | |||
1379 | void kvm_mov_to_ar_reg(struct kvm_vcpu *vcpu, INST64 inst) | ||
1380 | { | ||
1381 | unsigned long r2; | ||
1382 | |||
1383 | r2 = vcpu_get_gr(vcpu, inst.M29.r2); | ||
1384 | vcpu_set_itc(vcpu, r2); | ||
1385 | } | ||
1386 | |||
1387 | |||
1388 | void kvm_mov_from_ar_reg(struct kvm_vcpu *vcpu, INST64 inst) | ||
1389 | { | ||
1390 | unsigned long r1; | ||
1391 | |||
1392 | r1 = vcpu_get_itc(vcpu); | ||
1393 | vcpu_set_gr(vcpu, inst.M31.r1, r1, 0); | ||
1394 | } | ||
1395 | /************************************************************************** | ||
1396 | struct kvm_vcpu*protection key register access routines | ||
1397 | **************************************************************************/ | ||
1398 | |||
1399 | unsigned long vcpu_get_pkr(struct kvm_vcpu *vcpu, unsigned long reg) | ||
1400 | { | ||
1401 | return ((unsigned long)ia64_get_pkr(reg)); | ||
1402 | } | ||
1403 | |||
1404 | void vcpu_set_pkr(struct kvm_vcpu *vcpu, unsigned long reg, unsigned long val) | ||
1405 | { | ||
1406 | ia64_set_pkr(reg, val); | ||
1407 | } | ||
1408 | |||
1409 | |||
1410 | unsigned long vcpu_get_itir_on_fault(struct kvm_vcpu *vcpu, unsigned long ifa) | ||
1411 | { | ||
1412 | union ia64_rr rr, rr1; | ||
1413 | |||
1414 | rr.val = vcpu_get_rr(vcpu, ifa); | ||
1415 | rr1.val = 0; | ||
1416 | rr1.ps = rr.ps; | ||
1417 | rr1.rid = rr.rid; | ||
1418 | return (rr1.val); | ||
1419 | } | ||
1420 | |||
1421 | |||
1422 | |||
1423 | /******************************** | ||
1424 | * Moves to privileged registers | ||
1425 | ********************************/ | ||
1426 | unsigned long vcpu_set_rr(struct kvm_vcpu *vcpu, unsigned long reg, | ||
1427 | unsigned long val) | ||
1428 | { | ||
1429 | union ia64_rr oldrr, newrr; | ||
1430 | unsigned long rrval; | ||
1431 | struct exit_ctl_data *p = &vcpu->arch.exit_data; | ||
1432 | unsigned long psr; | ||
1433 | |||
1434 | oldrr.val = vcpu_get_rr(vcpu, reg); | ||
1435 | newrr.val = val; | ||
1436 | vcpu->arch.vrr[reg >> VRN_SHIFT] = val; | ||
1437 | |||
1438 | switch ((unsigned long)(reg >> VRN_SHIFT)) { | ||
1439 | case VRN6: | ||
1440 | vcpu->arch.vmm_rr = vrrtomrr(val); | ||
1441 | local_irq_save(psr); | ||
1442 | p->exit_reason = EXIT_REASON_SWITCH_RR6; | ||
1443 | vmm_transition(vcpu); | ||
1444 | local_irq_restore(psr); | ||
1445 | break; | ||
1446 | case VRN4: | ||
1447 | rrval = vrrtomrr(val); | ||
1448 | vcpu->arch.metaphysical_saved_rr4 = rrval; | ||
1449 | if (!is_physical_mode(vcpu)) | ||
1450 | ia64_set_rr(reg, rrval); | ||
1451 | break; | ||
1452 | case VRN0: | ||
1453 | rrval = vrrtomrr(val); | ||
1454 | vcpu->arch.metaphysical_saved_rr0 = rrval; | ||
1455 | if (!is_physical_mode(vcpu)) | ||
1456 | ia64_set_rr(reg, rrval); | ||
1457 | break; | ||
1458 | default: | ||
1459 | ia64_set_rr(reg, vrrtomrr(val)); | ||
1460 | break; | ||
1461 | } | ||
1462 | |||
1463 | return (IA64_NO_FAULT); | ||
1464 | } | ||
1465 | |||
1466 | |||
1467 | |||
1468 | void kvm_mov_to_rr(struct kvm_vcpu *vcpu, INST64 inst) | ||
1469 | { | ||
1470 | unsigned long r3, r2; | ||
1471 | |||
1472 | r3 = vcpu_get_gr(vcpu, inst.M42.r3); | ||
1473 | r2 = vcpu_get_gr(vcpu, inst.M42.r2); | ||
1474 | vcpu_set_rr(vcpu, r3, r2); | ||
1475 | } | ||
1476 | |||
1477 | void kvm_mov_to_dbr(struct kvm_vcpu *vcpu, INST64 inst) | ||
1478 | { | ||
1479 | } | ||
1480 | |||
1481 | void kvm_mov_to_ibr(struct kvm_vcpu *vcpu, INST64 inst) | ||
1482 | { | ||
1483 | } | ||
1484 | |||
1485 | void kvm_mov_to_pmc(struct kvm_vcpu *vcpu, INST64 inst) | ||
1486 | { | ||
1487 | unsigned long r3, r2; | ||
1488 | |||
1489 | r3 = vcpu_get_gr(vcpu, inst.M42.r3); | ||
1490 | r2 = vcpu_get_gr(vcpu, inst.M42.r2); | ||
1491 | vcpu_set_pmc(vcpu, r3, r2); | ||
1492 | } | ||
1493 | |||
1494 | void kvm_mov_to_pmd(struct kvm_vcpu *vcpu, INST64 inst) | ||
1495 | { | ||
1496 | unsigned long r3, r2; | ||
1497 | |||
1498 | r3 = vcpu_get_gr(vcpu, inst.M42.r3); | ||
1499 | r2 = vcpu_get_gr(vcpu, inst.M42.r2); | ||
1500 | vcpu_set_pmd(vcpu, r3, r2); | ||
1501 | } | ||
1502 | |||
1503 | void kvm_mov_to_pkr(struct kvm_vcpu *vcpu, INST64 inst) | ||
1504 | { | ||
1505 | u64 r3, r2; | ||
1506 | |||
1507 | r3 = vcpu_get_gr(vcpu, inst.M42.r3); | ||
1508 | r2 = vcpu_get_gr(vcpu, inst.M42.r2); | ||
1509 | vcpu_set_pkr(vcpu, r3, r2); | ||
1510 | } | ||
1511 | |||
1512 | |||
1513 | |||
1514 | void kvm_mov_from_rr(struct kvm_vcpu *vcpu, INST64 inst) | ||
1515 | { | ||
1516 | unsigned long r3, r1; | ||
1517 | |||
1518 | r3 = vcpu_get_gr(vcpu, inst.M43.r3); | ||
1519 | r1 = vcpu_get_rr(vcpu, r3); | ||
1520 | vcpu_set_gr(vcpu, inst.M43.r1, r1, 0); | ||
1521 | } | ||
1522 | |||
1523 | void kvm_mov_from_pkr(struct kvm_vcpu *vcpu, INST64 inst) | ||
1524 | { | ||
1525 | unsigned long r3, r1; | ||
1526 | |||
1527 | r3 = vcpu_get_gr(vcpu, inst.M43.r3); | ||
1528 | r1 = vcpu_get_pkr(vcpu, r3); | ||
1529 | vcpu_set_gr(vcpu, inst.M43.r1, r1, 0); | ||
1530 | } | ||
1531 | |||
1532 | void kvm_mov_from_dbr(struct kvm_vcpu *vcpu, INST64 inst) | ||
1533 | { | ||
1534 | unsigned long r3, r1; | ||
1535 | |||
1536 | r3 = vcpu_get_gr(vcpu, inst.M43.r3); | ||
1537 | r1 = vcpu_get_dbr(vcpu, r3); | ||
1538 | vcpu_set_gr(vcpu, inst.M43.r1, r1, 0); | ||
1539 | } | ||
1540 | |||
1541 | void kvm_mov_from_ibr(struct kvm_vcpu *vcpu, INST64 inst) | ||
1542 | { | ||
1543 | unsigned long r3, r1; | ||
1544 | |||
1545 | r3 = vcpu_get_gr(vcpu, inst.M43.r3); | ||
1546 | r1 = vcpu_get_ibr(vcpu, r3); | ||
1547 | vcpu_set_gr(vcpu, inst.M43.r1, r1, 0); | ||
1548 | } | ||
1549 | |||
1550 | void kvm_mov_from_pmc(struct kvm_vcpu *vcpu, INST64 inst) | ||
1551 | { | ||
1552 | unsigned long r3, r1; | ||
1553 | |||
1554 | r3 = vcpu_get_gr(vcpu, inst.M43.r3); | ||
1555 | r1 = vcpu_get_pmc(vcpu, r3); | ||
1556 | vcpu_set_gr(vcpu, inst.M43.r1, r1, 0); | ||
1557 | } | ||
1558 | |||
1559 | |||
1560 | unsigned long vcpu_get_cpuid(struct kvm_vcpu *vcpu, unsigned long reg) | ||
1561 | { | ||
1562 | /* FIXME: This could get called as a result of a rsvd-reg fault */ | ||
1563 | if (reg > (ia64_get_cpuid(3) & 0xff)) | ||
1564 | return 0; | ||
1565 | else | ||
1566 | return ia64_get_cpuid(reg); | ||
1567 | } | ||
1568 | |||
1569 | void kvm_mov_from_cpuid(struct kvm_vcpu *vcpu, INST64 inst) | ||
1570 | { | ||
1571 | unsigned long r3, r1; | ||
1572 | |||
1573 | r3 = vcpu_get_gr(vcpu, inst.M43.r3); | ||
1574 | r1 = vcpu_get_cpuid(vcpu, r3); | ||
1575 | vcpu_set_gr(vcpu, inst.M43.r1, r1, 0); | ||
1576 | } | ||
1577 | |||
1578 | void vcpu_set_tpr(struct kvm_vcpu *vcpu, unsigned long val) | ||
1579 | { | ||
1580 | VCPU(vcpu, tpr) = val; | ||
1581 | vcpu->arch.irq_check = 1; | ||
1582 | } | ||
1583 | |||
1584 | unsigned long kvm_mov_to_cr(struct kvm_vcpu *vcpu, INST64 inst) | ||
1585 | { | ||
1586 | unsigned long r2; | ||
1587 | |||
1588 | r2 = vcpu_get_gr(vcpu, inst.M32.r2); | ||
1589 | VCPU(vcpu, vcr[inst.M32.cr3]) = r2; | ||
1590 | |||
1591 | switch (inst.M32.cr3) { | ||
1592 | case 0: | ||
1593 | vcpu_set_dcr(vcpu, r2); | ||
1594 | break; | ||
1595 | case 1: | ||
1596 | vcpu_set_itm(vcpu, r2); | ||
1597 | break; | ||
1598 | case 66: | ||
1599 | vcpu_set_tpr(vcpu, r2); | ||
1600 | break; | ||
1601 | case 67: | ||
1602 | vcpu_set_eoi(vcpu, r2); | ||
1603 | break; | ||
1604 | default: | ||
1605 | break; | ||
1606 | } | ||
1607 | |||
1608 | return 0; | ||
1609 | } | ||
1610 | |||
1611 | |||
1612 | unsigned long kvm_mov_from_cr(struct kvm_vcpu *vcpu, INST64 inst) | ||
1613 | { | ||
1614 | unsigned long tgt = inst.M33.r1; | ||
1615 | unsigned long val; | ||
1616 | |||
1617 | switch (inst.M33.cr3) { | ||
1618 | case 65: | ||
1619 | val = vcpu_get_ivr(vcpu); | ||
1620 | vcpu_set_gr(vcpu, tgt, val, 0); | ||
1621 | break; | ||
1622 | |||
1623 | case 67: | ||
1624 | vcpu_set_gr(vcpu, tgt, 0L, 0); | ||
1625 | break; | ||
1626 | default: | ||
1627 | val = VCPU(vcpu, vcr[inst.M33.cr3]); | ||
1628 | vcpu_set_gr(vcpu, tgt, val, 0); | ||
1629 | break; | ||
1630 | } | ||
1631 | |||
1632 | return 0; | ||
1633 | } | ||
1634 | |||
1635 | |||
1636 | |||
1637 | void vcpu_set_psr(struct kvm_vcpu *vcpu, unsigned long val) | ||
1638 | { | ||
1639 | |||
1640 | unsigned long mask; | ||
1641 | struct kvm_pt_regs *regs; | ||
1642 | struct ia64_psr old_psr, new_psr; | ||
1643 | |||
1644 | old_psr = *(struct ia64_psr *)&VCPU(vcpu, vpsr); | ||
1645 | |||
1646 | regs = vcpu_regs(vcpu); | ||
1647 | /* We only support guest as: | ||
1648 | * vpsr.pk = 0 | ||
1649 | * vpsr.is = 0 | ||
1650 | * Otherwise panic | ||
1651 | */ | ||
1652 | if (val & (IA64_PSR_PK | IA64_PSR_IS | IA64_PSR_VM)) | ||
1653 | panic_vm(vcpu); | ||
1654 | |||
1655 | /* | ||
1656 | * For those IA64_PSR bits: id/da/dd/ss/ed/ia | ||
1657 | * Since these bits will become 0, after success execution of each | ||
1658 | * instruction, we will change set them to mIA64_PSR | ||
1659 | */ | ||
1660 | VCPU(vcpu, vpsr) = val | ||
1661 | & (~(IA64_PSR_ID | IA64_PSR_DA | IA64_PSR_DD | | ||
1662 | IA64_PSR_SS | IA64_PSR_ED | IA64_PSR_IA)); | ||
1663 | |||
1664 | if (!old_psr.i && (val & IA64_PSR_I)) { | ||
1665 | /* vpsr.i 0->1 */ | ||
1666 | vcpu->arch.irq_check = 1; | ||
1667 | } | ||
1668 | new_psr = *(struct ia64_psr *)&VCPU(vcpu, vpsr); | ||
1669 | |||
1670 | /* | ||
1671 | * All vIA64_PSR bits shall go to mPSR (v->tf->tf_special.psr) | ||
1672 | * , except for the following bits: | ||
1673 | * ic/i/dt/si/rt/mc/it/bn/vm | ||
1674 | */ | ||
1675 | mask = IA64_PSR_IC + IA64_PSR_I + IA64_PSR_DT + IA64_PSR_SI + | ||
1676 | IA64_PSR_RT + IA64_PSR_MC + IA64_PSR_IT + IA64_PSR_BN + | ||
1677 | IA64_PSR_VM; | ||
1678 | |||
1679 | regs->cr_ipsr = (regs->cr_ipsr & mask) | (val & (~mask)); | ||
1680 | |||
1681 | check_mm_mode_switch(vcpu, old_psr, new_psr); | ||
1682 | |||
1683 | return ; | ||
1684 | } | ||
1685 | |||
1686 | unsigned long vcpu_cover(struct kvm_vcpu *vcpu) | ||
1687 | { | ||
1688 | struct ia64_psr vpsr; | ||
1689 | |||
1690 | struct kvm_pt_regs *regs = vcpu_regs(vcpu); | ||
1691 | vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr); | ||
1692 | |||
1693 | if (!vpsr.ic) | ||
1694 | VCPU(vcpu, ifs) = regs->cr_ifs; | ||
1695 | regs->cr_ifs = IA64_IFS_V; | ||
1696 | return (IA64_NO_FAULT); | ||
1697 | } | ||
1698 | |||
1699 | |||
1700 | |||
1701 | /************************************************************************** | ||
1702 | VCPU banked general register access routines | ||
1703 | **************************************************************************/ | ||
1704 | #define vcpu_bsw0_unat(i, b0unat, b1unat, runat, VMM_PT_REGS_R16_SLOT) \ | ||
1705 | do { \ | ||
1706 | __asm__ __volatile__ ( \ | ||
1707 | ";;extr.u %0 = %3,%6,16;;\n" \ | ||
1708 | "dep %1 = %0, %1, 0, 16;;\n" \ | ||
1709 | "st8 [%4] = %1\n" \ | ||
1710 | "extr.u %0 = %2, 16, 16;;\n" \ | ||
1711 | "dep %3 = %0, %3, %6, 16;;\n" \ | ||
1712 | "st8 [%5] = %3\n" \ | ||
1713 | ::"r"(i), "r"(*b1unat), "r"(*b0unat), \ | ||
1714 | "r"(*runat), "r"(b1unat), "r"(runat), \ | ||
1715 | "i"(VMM_PT_REGS_R16_SLOT) : "memory"); \ | ||
1716 | } while (0) | ||
1717 | |||
1718 | void vcpu_bsw0(struct kvm_vcpu *vcpu) | ||
1719 | { | ||
1720 | unsigned long i; | ||
1721 | |||
1722 | struct kvm_pt_regs *regs = vcpu_regs(vcpu); | ||
1723 | unsigned long *r = ®s->r16; | ||
1724 | unsigned long *b0 = &VCPU(vcpu, vbgr[0]); | ||
1725 | unsigned long *b1 = &VCPU(vcpu, vgr[0]); | ||
1726 | unsigned long *runat = ®s->eml_unat; | ||
1727 | unsigned long *b0unat = &VCPU(vcpu, vbnat); | ||
1728 | unsigned long *b1unat = &VCPU(vcpu, vnat); | ||
1729 | |||
1730 | |||
1731 | if (VCPU(vcpu, vpsr) & IA64_PSR_BN) { | ||
1732 | for (i = 0; i < 16; i++) { | ||
1733 | *b1++ = *r; | ||
1734 | *r++ = *b0++; | ||
1735 | } | ||
1736 | vcpu_bsw0_unat(i, b0unat, b1unat, runat, | ||
1737 | VMM_PT_REGS_R16_SLOT); | ||
1738 | VCPU(vcpu, vpsr) &= ~IA64_PSR_BN; | ||
1739 | } | ||
1740 | } | ||
1741 | |||
1742 | #define vcpu_bsw1_unat(i, b0unat, b1unat, runat, VMM_PT_REGS_R16_SLOT) \ | ||
1743 | do { \ | ||
1744 | __asm__ __volatile__ (";;extr.u %0 = %3, %6, 16;;\n" \ | ||
1745 | "dep %1 = %0, %1, 16, 16;;\n" \ | ||
1746 | "st8 [%4] = %1\n" \ | ||
1747 | "extr.u %0 = %2, 0, 16;;\n" \ | ||
1748 | "dep %3 = %0, %3, %6, 16;;\n" \ | ||
1749 | "st8 [%5] = %3\n" \ | ||
1750 | ::"r"(i), "r"(*b0unat), "r"(*b1unat), \ | ||
1751 | "r"(*runat), "r"(b0unat), "r"(runat), \ | ||
1752 | "i"(VMM_PT_REGS_R16_SLOT) : "memory"); \ | ||
1753 | } while (0) | ||
1754 | |||
1755 | void vcpu_bsw1(struct kvm_vcpu *vcpu) | ||
1756 | { | ||
1757 | unsigned long i; | ||
1758 | struct kvm_pt_regs *regs = vcpu_regs(vcpu); | ||
1759 | unsigned long *r = ®s->r16; | ||
1760 | unsigned long *b0 = &VCPU(vcpu, vbgr[0]); | ||
1761 | unsigned long *b1 = &VCPU(vcpu, vgr[0]); | ||
1762 | unsigned long *runat = ®s->eml_unat; | ||
1763 | unsigned long *b0unat = &VCPU(vcpu, vbnat); | ||
1764 | unsigned long *b1unat = &VCPU(vcpu, vnat); | ||
1765 | |||
1766 | if (!(VCPU(vcpu, vpsr) & IA64_PSR_BN)) { | ||
1767 | for (i = 0; i < 16; i++) { | ||
1768 | *b0++ = *r; | ||
1769 | *r++ = *b1++; | ||
1770 | } | ||
1771 | vcpu_bsw1_unat(i, b0unat, b1unat, runat, | ||
1772 | VMM_PT_REGS_R16_SLOT); | ||
1773 | VCPU(vcpu, vpsr) |= IA64_PSR_BN; | ||
1774 | } | ||
1775 | } | ||
1776 | |||
1777 | |||
1778 | |||
1779 | |||
1780 | void vcpu_rfi(struct kvm_vcpu *vcpu) | ||
1781 | { | ||
1782 | unsigned long ifs, psr; | ||
1783 | struct kvm_pt_regs *regs = vcpu_regs(vcpu); | ||
1784 | |||
1785 | psr = VCPU(vcpu, ipsr); | ||
1786 | if (psr & IA64_PSR_BN) | ||
1787 | vcpu_bsw1(vcpu); | ||
1788 | else | ||
1789 | vcpu_bsw0(vcpu); | ||
1790 | vcpu_set_psr(vcpu, psr); | ||
1791 | ifs = VCPU(vcpu, ifs); | ||
1792 | if (ifs >> 63) | ||
1793 | regs->cr_ifs = ifs; | ||
1794 | regs->cr_iip = VCPU(vcpu, iip); | ||
1795 | } | ||
1796 | |||
1797 | |||
1798 | /* | ||
1799 | VPSR can't keep track of below bits of guest PSR | ||
1800 | This function gets guest PSR | ||
1801 | */ | ||
1802 | |||
1803 | unsigned long vcpu_get_psr(struct kvm_vcpu *vcpu) | ||
1804 | { | ||
1805 | unsigned long mask; | ||
1806 | struct kvm_pt_regs *regs = vcpu_regs(vcpu); | ||
1807 | |||
1808 | mask = IA64_PSR_BE | IA64_PSR_UP | IA64_PSR_AC | IA64_PSR_MFL | | ||
1809 | IA64_PSR_MFH | IA64_PSR_CPL | IA64_PSR_RI; | ||
1810 | return (VCPU(vcpu, vpsr) & ~mask) | (regs->cr_ipsr & mask); | ||
1811 | } | ||
1812 | |||
1813 | void kvm_rsm(struct kvm_vcpu *vcpu, INST64 inst) | ||
1814 | { | ||
1815 | unsigned long vpsr; | ||
1816 | unsigned long imm24 = (inst.M44.i<<23) | (inst.M44.i2<<21) | ||
1817 | | inst.M44.imm; | ||
1818 | |||
1819 | vpsr = vcpu_get_psr(vcpu); | ||
1820 | vpsr &= (~imm24); | ||
1821 | vcpu_set_psr(vcpu, vpsr); | ||
1822 | } | ||
1823 | |||
1824 | void kvm_ssm(struct kvm_vcpu *vcpu, INST64 inst) | ||
1825 | { | ||
1826 | unsigned long vpsr; | ||
1827 | unsigned long imm24 = (inst.M44.i << 23) | (inst.M44.i2 << 21) | ||
1828 | | inst.M44.imm; | ||
1829 | |||
1830 | vpsr = vcpu_get_psr(vcpu); | ||
1831 | vpsr |= imm24; | ||
1832 | vcpu_set_psr(vcpu, vpsr); | ||
1833 | } | ||
1834 | |||
1835 | /* Generate Mask | ||
1836 | * Parameter: | ||
1837 | * bit -- starting bit | ||
1838 | * len -- how many bits | ||
1839 | */ | ||
1840 | #define MASK(bit,len) \ | ||
1841 | ({ \ | ||
1842 | __u64 ret; \ | ||
1843 | \ | ||
1844 | __asm __volatile("dep %0=-1, r0, %1, %2"\ | ||
1845 | : "=r" (ret): \ | ||
1846 | "M" (bit), \ | ||
1847 | "M" (len)); \ | ||
1848 | ret; \ | ||
1849 | }) | ||
1850 | |||
1851 | void vcpu_set_psr_l(struct kvm_vcpu *vcpu, unsigned long val) | ||
1852 | { | ||
1853 | val = (val & MASK(0, 32)) | (vcpu_get_psr(vcpu) & MASK(32, 32)); | ||
1854 | vcpu_set_psr(vcpu, val); | ||
1855 | } | ||
1856 | |||
1857 | void kvm_mov_to_psr(struct kvm_vcpu *vcpu, INST64 inst) | ||
1858 | { | ||
1859 | unsigned long val; | ||
1860 | |||
1861 | val = vcpu_get_gr(vcpu, inst.M35.r2); | ||
1862 | vcpu_set_psr_l(vcpu, val); | ||
1863 | } | ||
1864 | |||
1865 | void kvm_mov_from_psr(struct kvm_vcpu *vcpu, INST64 inst) | ||
1866 | { | ||
1867 | unsigned long val; | ||
1868 | |||
1869 | val = vcpu_get_psr(vcpu); | ||
1870 | val = (val & MASK(0, 32)) | (val & MASK(35, 2)); | ||
1871 | vcpu_set_gr(vcpu, inst.M33.r1, val, 0); | ||
1872 | } | ||
1873 | |||
1874 | void vcpu_increment_iip(struct kvm_vcpu *vcpu) | ||
1875 | { | ||
1876 | struct kvm_pt_regs *regs = vcpu_regs(vcpu); | ||
1877 | struct ia64_psr *ipsr = (struct ia64_psr *)®s->cr_ipsr; | ||
1878 | if (ipsr->ri == 2) { | ||
1879 | ipsr->ri = 0; | ||
1880 | regs->cr_iip += 16; | ||
1881 | } else | ||
1882 | ipsr->ri++; | ||
1883 | } | ||
1884 | |||
1885 | void vcpu_decrement_iip(struct kvm_vcpu *vcpu) | ||
1886 | { | ||
1887 | struct kvm_pt_regs *regs = vcpu_regs(vcpu); | ||
1888 | struct ia64_psr *ipsr = (struct ia64_psr *)®s->cr_ipsr; | ||
1889 | |||
1890 | if (ipsr->ri == 0) { | ||
1891 | ipsr->ri = 2; | ||
1892 | regs->cr_iip -= 16; | ||
1893 | } else | ||
1894 | ipsr->ri--; | ||
1895 | } | ||
1896 | |||
1897 | /** Emulate a privileged operation. | ||
1898 | * | ||
1899 | * | ||
1900 | * @param vcpu virtual cpu | ||
1901 | * @cause the reason cause virtualization fault | ||
1902 | * @opcode the instruction code which cause virtualization fault | ||
1903 | */ | ||
1904 | |||
1905 | void kvm_emulate(struct kvm_vcpu *vcpu, struct kvm_pt_regs *regs) | ||
1906 | { | ||
1907 | unsigned long status, cause, opcode ; | ||
1908 | INST64 inst; | ||
1909 | |||
1910 | status = IA64_NO_FAULT; | ||
1911 | cause = VMX(vcpu, cause); | ||
1912 | opcode = VMX(vcpu, opcode); | ||
1913 | inst.inst = opcode; | ||
1914 | /* | ||
1915 | * Switch to actual virtual rid in rr0 and rr4, | ||
1916 | * which is required by some tlb related instructions. | ||
1917 | */ | ||
1918 | prepare_if_physical_mode(vcpu); | ||
1919 | |||
1920 | switch (cause) { | ||
1921 | case EVENT_RSM: | ||
1922 | kvm_rsm(vcpu, inst); | ||
1923 | break; | ||
1924 | case EVENT_SSM: | ||
1925 | kvm_ssm(vcpu, inst); | ||
1926 | break; | ||
1927 | case EVENT_MOV_TO_PSR: | ||
1928 | kvm_mov_to_psr(vcpu, inst); | ||
1929 | break; | ||
1930 | case EVENT_MOV_FROM_PSR: | ||
1931 | kvm_mov_from_psr(vcpu, inst); | ||
1932 | break; | ||
1933 | case EVENT_MOV_FROM_CR: | ||
1934 | kvm_mov_from_cr(vcpu, inst); | ||
1935 | break; | ||
1936 | case EVENT_MOV_TO_CR: | ||
1937 | kvm_mov_to_cr(vcpu, inst); | ||
1938 | break; | ||
1939 | case EVENT_BSW_0: | ||
1940 | vcpu_bsw0(vcpu); | ||
1941 | break; | ||
1942 | case EVENT_BSW_1: | ||
1943 | vcpu_bsw1(vcpu); | ||
1944 | break; | ||
1945 | case EVENT_COVER: | ||
1946 | vcpu_cover(vcpu); | ||
1947 | break; | ||
1948 | case EVENT_RFI: | ||
1949 | vcpu_rfi(vcpu); | ||
1950 | break; | ||
1951 | case EVENT_ITR_D: | ||
1952 | kvm_itr_d(vcpu, inst); | ||
1953 | break; | ||
1954 | case EVENT_ITR_I: | ||
1955 | kvm_itr_i(vcpu, inst); | ||
1956 | break; | ||
1957 | case EVENT_PTR_D: | ||
1958 | kvm_ptr_d(vcpu, inst); | ||
1959 | break; | ||
1960 | case EVENT_PTR_I: | ||
1961 | kvm_ptr_i(vcpu, inst); | ||
1962 | break; | ||
1963 | case EVENT_ITC_D: | ||
1964 | kvm_itc_d(vcpu, inst); | ||
1965 | break; | ||
1966 | case EVENT_ITC_I: | ||
1967 | kvm_itc_i(vcpu, inst); | ||
1968 | break; | ||
1969 | case EVENT_PTC_L: | ||
1970 | kvm_ptc_l(vcpu, inst); | ||
1971 | break; | ||
1972 | case EVENT_PTC_G: | ||
1973 | kvm_ptc_g(vcpu, inst); | ||
1974 | break; | ||
1975 | case EVENT_PTC_GA: | ||
1976 | kvm_ptc_ga(vcpu, inst); | ||
1977 | break; | ||
1978 | case EVENT_PTC_E: | ||
1979 | kvm_ptc_e(vcpu, inst); | ||
1980 | break; | ||
1981 | case EVENT_MOV_TO_RR: | ||
1982 | kvm_mov_to_rr(vcpu, inst); | ||
1983 | break; | ||
1984 | case EVENT_MOV_FROM_RR: | ||
1985 | kvm_mov_from_rr(vcpu, inst); | ||
1986 | break; | ||
1987 | case EVENT_THASH: | ||
1988 | kvm_thash(vcpu, inst); | ||
1989 | break; | ||
1990 | case EVENT_TTAG: | ||
1991 | kvm_ttag(vcpu, inst); | ||
1992 | break; | ||
1993 | case EVENT_TPA: | ||
1994 | status = kvm_tpa(vcpu, inst); | ||
1995 | break; | ||
1996 | case EVENT_TAK: | ||
1997 | kvm_tak(vcpu, inst); | ||
1998 | break; | ||
1999 | case EVENT_MOV_TO_AR_IMM: | ||
2000 | kvm_mov_to_ar_imm(vcpu, inst); | ||
2001 | break; | ||
2002 | case EVENT_MOV_TO_AR: | ||
2003 | kvm_mov_to_ar_reg(vcpu, inst); | ||
2004 | break; | ||
2005 | case EVENT_MOV_FROM_AR: | ||
2006 | kvm_mov_from_ar_reg(vcpu, inst); | ||
2007 | break; | ||
2008 | case EVENT_MOV_TO_DBR: | ||
2009 | kvm_mov_to_dbr(vcpu, inst); | ||
2010 | break; | ||
2011 | case EVENT_MOV_TO_IBR: | ||
2012 | kvm_mov_to_ibr(vcpu, inst); | ||
2013 | break; | ||
2014 | case EVENT_MOV_TO_PMC: | ||
2015 | kvm_mov_to_pmc(vcpu, inst); | ||
2016 | break; | ||
2017 | case EVENT_MOV_TO_PMD: | ||
2018 | kvm_mov_to_pmd(vcpu, inst); | ||
2019 | break; | ||
2020 | case EVENT_MOV_TO_PKR: | ||
2021 | kvm_mov_to_pkr(vcpu, inst); | ||
2022 | break; | ||
2023 | case EVENT_MOV_FROM_DBR: | ||
2024 | kvm_mov_from_dbr(vcpu, inst); | ||
2025 | break; | ||
2026 | case EVENT_MOV_FROM_IBR: | ||
2027 | kvm_mov_from_ibr(vcpu, inst); | ||
2028 | break; | ||
2029 | case EVENT_MOV_FROM_PMC: | ||
2030 | kvm_mov_from_pmc(vcpu, inst); | ||
2031 | break; | ||
2032 | case EVENT_MOV_FROM_PKR: | ||
2033 | kvm_mov_from_pkr(vcpu, inst); | ||
2034 | break; | ||
2035 | case EVENT_MOV_FROM_CPUID: | ||
2036 | kvm_mov_from_cpuid(vcpu, inst); | ||
2037 | break; | ||
2038 | case EVENT_VMSW: | ||
2039 | status = IA64_FAULT; | ||
2040 | break; | ||
2041 | default: | ||
2042 | break; | ||
2043 | }; | ||
2044 | /*Assume all status is NO_FAULT ?*/ | ||
2045 | if (status == IA64_NO_FAULT && cause != EVENT_RFI) | ||
2046 | vcpu_increment_iip(vcpu); | ||
2047 | |||
2048 | recover_if_physical_mode(vcpu); | ||
2049 | } | ||
2050 | |||
2051 | void init_vcpu(struct kvm_vcpu *vcpu) | ||
2052 | { | ||
2053 | int i; | ||
2054 | |||
2055 | vcpu->arch.mode_flags = GUEST_IN_PHY; | ||
2056 | VMX(vcpu, vrr[0]) = 0x38; | ||
2057 | VMX(vcpu, vrr[1]) = 0x38; | ||
2058 | VMX(vcpu, vrr[2]) = 0x38; | ||
2059 | VMX(vcpu, vrr[3]) = 0x38; | ||
2060 | VMX(vcpu, vrr[4]) = 0x38; | ||
2061 | VMX(vcpu, vrr[5]) = 0x38; | ||
2062 | VMX(vcpu, vrr[6]) = 0x38; | ||
2063 | VMX(vcpu, vrr[7]) = 0x38; | ||
2064 | VCPU(vcpu, vpsr) = IA64_PSR_BN; | ||
2065 | VCPU(vcpu, dcr) = 0; | ||
2066 | /* pta.size must not be 0. The minimum is 15 (32k) */ | ||
2067 | VCPU(vcpu, pta) = 15 << 2; | ||
2068 | VCPU(vcpu, itv) = 0x10000; | ||
2069 | VCPU(vcpu, itm) = 0; | ||
2070 | VMX(vcpu, last_itc) = 0; | ||
2071 | |||
2072 | VCPU(vcpu, lid) = VCPU_LID(vcpu); | ||
2073 | VCPU(vcpu, ivr) = 0; | ||
2074 | VCPU(vcpu, tpr) = 0x10000; | ||
2075 | VCPU(vcpu, eoi) = 0; | ||
2076 | VCPU(vcpu, irr[0]) = 0; | ||
2077 | VCPU(vcpu, irr[1]) = 0; | ||
2078 | VCPU(vcpu, irr[2]) = 0; | ||
2079 | VCPU(vcpu, irr[3]) = 0; | ||
2080 | VCPU(vcpu, pmv) = 0x10000; | ||
2081 | VCPU(vcpu, cmcv) = 0x10000; | ||
2082 | VCPU(vcpu, lrr0) = 0x10000; /* default reset value? */ | ||
2083 | VCPU(vcpu, lrr1) = 0x10000; /* default reset value? */ | ||
2084 | update_vhpi(vcpu, NULL_VECTOR); | ||
2085 | VLSAPIC_XTP(vcpu) = 0x80; /* disabled */ | ||
2086 | |||
2087 | for (i = 0; i < 4; i++) | ||
2088 | VLSAPIC_INSVC(vcpu, i) = 0; | ||
2089 | } | ||
2090 | |||
2091 | void kvm_init_all_rr(struct kvm_vcpu *vcpu) | ||
2092 | { | ||
2093 | unsigned long psr; | ||
2094 | |||
2095 | local_irq_save(psr); | ||
2096 | |||
2097 | /* WARNING: not allow co-exist of both virtual mode and physical | ||
2098 | * mode in same region | ||
2099 | */ | ||
2100 | |||
2101 | vcpu->arch.metaphysical_saved_rr0 = vrrtomrr(VMX(vcpu, vrr[VRN0])); | ||
2102 | vcpu->arch.metaphysical_saved_rr4 = vrrtomrr(VMX(vcpu, vrr[VRN4])); | ||
2103 | |||
2104 | if (is_physical_mode(vcpu)) { | ||
2105 | if (vcpu->arch.mode_flags & GUEST_PHY_EMUL) | ||
2106 | panic_vm(vcpu); | ||
2107 | |||
2108 | ia64_set_rr((VRN0 << VRN_SHIFT), vcpu->arch.metaphysical_rr0); | ||
2109 | ia64_dv_serialize_data(); | ||
2110 | ia64_set_rr((VRN4 << VRN_SHIFT), vcpu->arch.metaphysical_rr4); | ||
2111 | ia64_dv_serialize_data(); | ||
2112 | } else { | ||
2113 | ia64_set_rr((VRN0 << VRN_SHIFT), | ||
2114 | vcpu->arch.metaphysical_saved_rr0); | ||
2115 | ia64_dv_serialize_data(); | ||
2116 | ia64_set_rr((VRN4 << VRN_SHIFT), | ||
2117 | vcpu->arch.metaphysical_saved_rr4); | ||
2118 | ia64_dv_serialize_data(); | ||
2119 | } | ||
2120 | ia64_set_rr((VRN1 << VRN_SHIFT), | ||
2121 | vrrtomrr(VMX(vcpu, vrr[VRN1]))); | ||
2122 | ia64_dv_serialize_data(); | ||
2123 | ia64_set_rr((VRN2 << VRN_SHIFT), | ||
2124 | vrrtomrr(VMX(vcpu, vrr[VRN2]))); | ||
2125 | ia64_dv_serialize_data(); | ||
2126 | ia64_set_rr((VRN3 << VRN_SHIFT), | ||
2127 | vrrtomrr(VMX(vcpu, vrr[VRN3]))); | ||
2128 | ia64_dv_serialize_data(); | ||
2129 | ia64_set_rr((VRN5 << VRN_SHIFT), | ||
2130 | vrrtomrr(VMX(vcpu, vrr[VRN5]))); | ||
2131 | ia64_dv_serialize_data(); | ||
2132 | ia64_set_rr((VRN7 << VRN_SHIFT), | ||
2133 | vrrtomrr(VMX(vcpu, vrr[VRN7]))); | ||
2134 | ia64_dv_serialize_data(); | ||
2135 | ia64_srlz_d(); | ||
2136 | ia64_set_psr(psr); | ||
2137 | } | ||
2138 | |||
2139 | int vmm_entry(void) | ||
2140 | { | ||
2141 | struct kvm_vcpu *v; | ||
2142 | v = current_vcpu; | ||
2143 | |||
2144 | ia64_call_vsa(PAL_VPS_RESTORE, (unsigned long)v->arch.vpd, | ||
2145 | 0, 0, 0, 0, 0, 0); | ||
2146 | kvm_init_vtlb(v); | ||
2147 | kvm_init_vhpt(v); | ||
2148 | init_vcpu(v); | ||
2149 | kvm_init_all_rr(v); | ||
2150 | vmm_reset_entry(); | ||
2151 | |||
2152 | return 0; | ||
2153 | } | ||
2154 | |||
2155 | void panic_vm(struct kvm_vcpu *v) | ||
2156 | { | ||
2157 | struct exit_ctl_data *p = &v->arch.exit_data; | ||
2158 | |||
2159 | p->exit_reason = EXIT_REASON_VM_PANIC; | ||
2160 | vmm_transition(v); | ||
2161 | /*Never to return*/ | ||
2162 | while (1); | ||
2163 | } | ||
diff --git a/arch/ia64/kvm/vcpu.h b/arch/ia64/kvm/vcpu.h new file mode 100644 index 000000000000..b0fcfb62c49e --- /dev/null +++ b/arch/ia64/kvm/vcpu.h | |||
@@ -0,0 +1,740 @@ | |||
1 | /* | ||
2 | * vcpu.h: vcpu routines | ||
3 | * Copyright (c) 2005, Intel Corporation. | ||
4 | * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com) | ||
5 | * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com) | ||
6 | * | ||
7 | * Copyright (c) 2007, Intel Corporation. | ||
8 | * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com) | ||
9 | * Xiantao Zhang (xiantao.zhang@intel.com) | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or modify it | ||
12 | * under the terms and conditions of the GNU General Public License, | ||
13 | * version 2, as published by the Free Software Foundation. | ||
14 | * | ||
15 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
16 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
18 | * more details. | ||
19 | * | ||
20 | * You should have received a copy of the GNU General Public License along with | ||
21 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple | ||
22 | * Place - Suite 330, Boston, MA 02111-1307 USA. | ||
23 | * | ||
24 | */ | ||
25 | |||
26 | |||
27 | #ifndef __KVM_VCPU_H__ | ||
28 | #define __KVM_VCPU_H__ | ||
29 | |||
30 | #include <asm/types.h> | ||
31 | #include <asm/fpu.h> | ||
32 | #include <asm/processor.h> | ||
33 | |||
34 | #ifndef __ASSEMBLY__ | ||
35 | #include "vti.h" | ||
36 | |||
37 | #include <linux/kvm_host.h> | ||
38 | #include <linux/spinlock.h> | ||
39 | |||
40 | typedef unsigned long IA64_INST; | ||
41 | |||
42 | typedef union U_IA64_BUNDLE { | ||
43 | unsigned long i64[2]; | ||
44 | struct { unsigned long template:5, slot0:41, slot1a:18, | ||
45 | slot1b:23, slot2:41; }; | ||
46 | /* NOTE: following doesn't work because bitfields can't cross natural | ||
47 | size boundaries | ||
48 | struct { unsigned long template:5, slot0:41, slot1:41, slot2:41; }; */ | ||
49 | } IA64_BUNDLE; | ||
50 | |||
51 | typedef union U_INST64_A5 { | ||
52 | IA64_INST inst; | ||
53 | struct { unsigned long qp:6, r1:7, imm7b:7, r3:2, imm5c:5, | ||
54 | imm9d:9, s:1, major:4; }; | ||
55 | } INST64_A5; | ||
56 | |||
57 | typedef union U_INST64_B4 { | ||
58 | IA64_INST inst; | ||
59 | struct { unsigned long qp:6, btype:3, un3:3, p:1, b2:3, un11:11, x6:6, | ||
60 | wh:2, d:1, un1:1, major:4; }; | ||
61 | } INST64_B4; | ||
62 | |||
63 | typedef union U_INST64_B8 { | ||
64 | IA64_INST inst; | ||
65 | struct { unsigned long qp:6, un21:21, x6:6, un4:4, major:4; }; | ||
66 | } INST64_B8; | ||
67 | |||
68 | typedef union U_INST64_B9 { | ||
69 | IA64_INST inst; | ||
70 | struct { unsigned long qp:6, imm20:20, :1, x6:6, :3, i:1, major:4; }; | ||
71 | } INST64_B9; | ||
72 | |||
73 | typedef union U_INST64_I19 { | ||
74 | IA64_INST inst; | ||
75 | struct { unsigned long qp:6, imm20:20, :1, x6:6, x3:3, i:1, major:4; }; | ||
76 | } INST64_I19; | ||
77 | |||
78 | typedef union U_INST64_I26 { | ||
79 | IA64_INST inst; | ||
80 | struct { unsigned long qp:6, :7, r2:7, ar3:7, x6:6, x3:3, :1, major:4; }; | ||
81 | } INST64_I26; | ||
82 | |||
83 | typedef union U_INST64_I27 { | ||
84 | IA64_INST inst; | ||
85 | struct { unsigned long qp:6, :7, imm:7, ar3:7, x6:6, x3:3, s:1, major:4; }; | ||
86 | } INST64_I27; | ||
87 | |||
88 | typedef union U_INST64_I28 { /* not privileged (mov from AR) */ | ||
89 | IA64_INST inst; | ||
90 | struct { unsigned long qp:6, r1:7, :7, ar3:7, x6:6, x3:3, :1, major:4; }; | ||
91 | } INST64_I28; | ||
92 | |||
93 | typedef union U_INST64_M28 { | ||
94 | IA64_INST inst; | ||
95 | struct { unsigned long qp:6, :14, r3:7, x6:6, x3:3, :1, major:4; }; | ||
96 | } INST64_M28; | ||
97 | |||
98 | typedef union U_INST64_M29 { | ||
99 | IA64_INST inst; | ||
100 | struct { unsigned long qp:6, :7, r2:7, ar3:7, x6:6, x3:3, :1, major:4; }; | ||
101 | } INST64_M29; | ||
102 | |||
103 | typedef union U_INST64_M30 { | ||
104 | IA64_INST inst; | ||
105 | struct { unsigned long qp:6, :7, imm:7, ar3:7, x4:4, x2:2, | ||
106 | x3:3, s:1, major:4; }; | ||
107 | } INST64_M30; | ||
108 | |||
109 | typedef union U_INST64_M31 { | ||
110 | IA64_INST inst; | ||
111 | struct { unsigned long qp:6, r1:7, :7, ar3:7, x6:6, x3:3, :1, major:4; }; | ||
112 | } INST64_M31; | ||
113 | |||
114 | typedef union U_INST64_M32 { | ||
115 | IA64_INST inst; | ||
116 | struct { unsigned long qp:6, :7, r2:7, cr3:7, x6:6, x3:3, :1, major:4; }; | ||
117 | } INST64_M32; | ||
118 | |||
119 | typedef union U_INST64_M33 { | ||
120 | IA64_INST inst; | ||
121 | struct { unsigned long qp:6, r1:7, :7, cr3:7, x6:6, x3:3, :1, major:4; }; | ||
122 | } INST64_M33; | ||
123 | |||
124 | typedef union U_INST64_M35 { | ||
125 | IA64_INST inst; | ||
126 | struct { unsigned long qp:6, :7, r2:7, :7, x6:6, x3:3, :1, major:4; }; | ||
127 | |||
128 | } INST64_M35; | ||
129 | |||
130 | typedef union U_INST64_M36 { | ||
131 | IA64_INST inst; | ||
132 | struct { unsigned long qp:6, r1:7, :14, x6:6, x3:3, :1, major:4; }; | ||
133 | } INST64_M36; | ||
134 | |||
135 | typedef union U_INST64_M37 { | ||
136 | IA64_INST inst; | ||
137 | struct { unsigned long qp:6, imm20a:20, :1, x4:4, x2:2, x3:3, | ||
138 | i:1, major:4; }; | ||
139 | } INST64_M37; | ||
140 | |||
141 | typedef union U_INST64_M41 { | ||
142 | IA64_INST inst; | ||
143 | struct { unsigned long qp:6, :7, r2:7, :7, x6:6, x3:3, :1, major:4; }; | ||
144 | } INST64_M41; | ||
145 | |||
146 | typedef union U_INST64_M42 { | ||
147 | IA64_INST inst; | ||
148 | struct { unsigned long qp:6, :7, r2:7, r3:7, x6:6, x3:3, :1, major:4; }; | ||
149 | } INST64_M42; | ||
150 | |||
151 | typedef union U_INST64_M43 { | ||
152 | IA64_INST inst; | ||
153 | struct { unsigned long qp:6, r1:7, :7, r3:7, x6:6, x3:3, :1, major:4; }; | ||
154 | } INST64_M43; | ||
155 | |||
156 | typedef union U_INST64_M44 { | ||
157 | IA64_INST inst; | ||
158 | struct { unsigned long qp:6, imm:21, x4:4, i2:2, x3:3, i:1, major:4; }; | ||
159 | } INST64_M44; | ||
160 | |||
161 | typedef union U_INST64_M45 { | ||
162 | IA64_INST inst; | ||
163 | struct { unsigned long qp:6, :7, r2:7, r3:7, x6:6, x3:3, :1, major:4; }; | ||
164 | } INST64_M45; | ||
165 | |||
166 | typedef union U_INST64_M46 { | ||
167 | IA64_INST inst; | ||
168 | struct { unsigned long qp:6, r1:7, un7:7, r3:7, x6:6, | ||
169 | x3:3, un1:1, major:4; }; | ||
170 | } INST64_M46; | ||
171 | |||
172 | typedef union U_INST64_M47 { | ||
173 | IA64_INST inst; | ||
174 | struct { unsigned long qp:6, un14:14, r3:7, x6:6, x3:3, un1:1, major:4; }; | ||
175 | } INST64_M47; | ||
176 | |||
177 | typedef union U_INST64_M1{ | ||
178 | IA64_INST inst; | ||
179 | struct { unsigned long qp:6, r1:7, un7:7, r3:7, x:1, hint:2, | ||
180 | x6:6, m:1, major:4; }; | ||
181 | } INST64_M1; | ||
182 | |||
183 | typedef union U_INST64_M2{ | ||
184 | IA64_INST inst; | ||
185 | struct { unsigned long qp:6, r1:7, r2:7, r3:7, x:1, hint:2, | ||
186 | x6:6, m:1, major:4; }; | ||
187 | } INST64_M2; | ||
188 | |||
189 | typedef union U_INST64_M3{ | ||
190 | IA64_INST inst; | ||
191 | struct { unsigned long qp:6, r1:7, imm7:7, r3:7, i:1, hint:2, | ||
192 | x6:6, s:1, major:4; }; | ||
193 | } INST64_M3; | ||
194 | |||
195 | typedef union U_INST64_M4 { | ||
196 | IA64_INST inst; | ||
197 | struct { unsigned long qp:6, un7:7, r2:7, r3:7, x:1, hint:2, | ||
198 | x6:6, m:1, major:4; }; | ||
199 | } INST64_M4; | ||
200 | |||
201 | typedef union U_INST64_M5 { | ||
202 | IA64_INST inst; | ||
203 | struct { unsigned long qp:6, imm7:7, r2:7, r3:7, i:1, hint:2, | ||
204 | x6:6, s:1, major:4; }; | ||
205 | } INST64_M5; | ||
206 | |||
207 | typedef union U_INST64_M6 { | ||
208 | IA64_INST inst; | ||
209 | struct { unsigned long qp:6, f1:7, un7:7, r3:7, x:1, hint:2, | ||
210 | x6:6, m:1, major:4; }; | ||
211 | } INST64_M6; | ||
212 | |||
213 | typedef union U_INST64_M9 { | ||
214 | IA64_INST inst; | ||
215 | struct { unsigned long qp:6, :7, f2:7, r3:7, x:1, hint:2, | ||
216 | x6:6, m:1, major:4; }; | ||
217 | } INST64_M9; | ||
218 | |||
219 | typedef union U_INST64_M10 { | ||
220 | IA64_INST inst; | ||
221 | struct { unsigned long qp:6, imm7:7, f2:7, r3:7, i:1, hint:2, | ||
222 | x6:6, s:1, major:4; }; | ||
223 | } INST64_M10; | ||
224 | |||
225 | typedef union U_INST64_M12 { | ||
226 | IA64_INST inst; | ||
227 | struct { unsigned long qp:6, f1:7, f2:7, r3:7, x:1, hint:2, | ||
228 | x6:6, m:1, major:4; }; | ||
229 | } INST64_M12; | ||
230 | |||
231 | typedef union U_INST64_M15 { | ||
232 | IA64_INST inst; | ||
233 | struct { unsigned long qp:6, :7, imm7:7, r3:7, i:1, hint:2, | ||
234 | x6:6, s:1, major:4; }; | ||
235 | } INST64_M15; | ||
236 | |||
237 | typedef union U_INST64 { | ||
238 | IA64_INST inst; | ||
239 | struct { unsigned long :37, major:4; } generic; | ||
240 | INST64_A5 A5; /* used in build_hypercall_bundle only */ | ||
241 | INST64_B4 B4; /* used in build_hypercall_bundle only */ | ||
242 | INST64_B8 B8; /* rfi, bsw.[01] */ | ||
243 | INST64_B9 B9; /* break.b */ | ||
244 | INST64_I19 I19; /* used in build_hypercall_bundle only */ | ||
245 | INST64_I26 I26; /* mov register to ar (I unit) */ | ||
246 | INST64_I27 I27; /* mov immediate to ar (I unit) */ | ||
247 | INST64_I28 I28; /* mov from ar (I unit) */ | ||
248 | INST64_M1 M1; /* ld integer */ | ||
249 | INST64_M2 M2; | ||
250 | INST64_M3 M3; | ||
251 | INST64_M4 M4; /* st integer */ | ||
252 | INST64_M5 M5; | ||
253 | INST64_M6 M6; /* ldfd floating pointer */ | ||
254 | INST64_M9 M9; /* stfd floating pointer */ | ||
255 | INST64_M10 M10; /* stfd floating pointer */ | ||
256 | INST64_M12 M12; /* ldfd pair floating pointer */ | ||
257 | INST64_M15 M15; /* lfetch + imm update */ | ||
258 | INST64_M28 M28; /* purge translation cache entry */ | ||
259 | INST64_M29 M29; /* mov register to ar (M unit) */ | ||
260 | INST64_M30 M30; /* mov immediate to ar (M unit) */ | ||
261 | INST64_M31 M31; /* mov from ar (M unit) */ | ||
262 | INST64_M32 M32; /* mov reg to cr */ | ||
263 | INST64_M33 M33; /* mov from cr */ | ||
264 | INST64_M35 M35; /* mov to psr */ | ||
265 | INST64_M36 M36; /* mov from psr */ | ||
266 | INST64_M37 M37; /* break.m */ | ||
267 | INST64_M41 M41; /* translation cache insert */ | ||
268 | INST64_M42 M42; /* mov to indirect reg/translation reg insert*/ | ||
269 | INST64_M43 M43; /* mov from indirect reg */ | ||
270 | INST64_M44 M44; /* set/reset system mask */ | ||
271 | INST64_M45 M45; /* translation purge */ | ||
272 | INST64_M46 M46; /* translation access (tpa,tak) */ | ||
273 | INST64_M47 M47; /* purge translation entry */ | ||
274 | } INST64; | ||
275 | |||
276 | #define MASK_41 ((unsigned long)0x1ffffffffff) | ||
277 | |||
278 | /* Virtual address memory attributes encoding */ | ||
279 | #define VA_MATTR_WB 0x0 | ||
280 | #define VA_MATTR_UC 0x4 | ||
281 | #define VA_MATTR_UCE 0x5 | ||
282 | #define VA_MATTR_WC 0x6 | ||
283 | #define VA_MATTR_NATPAGE 0x7 | ||
284 | |||
285 | #define PMASK(size) (~((size) - 1)) | ||
286 | #define PSIZE(size) (1UL<<(size)) | ||
287 | #define CLEARLSB(ppn, nbits) (((ppn) >> (nbits)) << (nbits)) | ||
288 | #define PAGEALIGN(va, ps) CLEARLSB(va, ps) | ||
289 | #define PAGE_FLAGS_RV_MASK (0x2|(0x3UL<<50)|(((1UL<<11)-1)<<53)) | ||
290 | #define _PAGE_MA_ST (0x1 << 2) /* is reserved for software use */ | ||
291 | |||
292 | #define ARCH_PAGE_SHIFT 12 | ||
293 | |||
294 | #define INVALID_TI_TAG (1UL << 63) | ||
295 | |||
296 | #define VTLB_PTE_P_BIT 0 | ||
297 | #define VTLB_PTE_IO_BIT 60 | ||
298 | #define VTLB_PTE_IO (1UL<<VTLB_PTE_IO_BIT) | ||
299 | #define VTLB_PTE_P (1UL<<VTLB_PTE_P_BIT) | ||
300 | |||
301 | #define vcpu_quick_region_check(_tr_regions,_ifa) \ | ||
302 | (_tr_regions & (1 << ((unsigned long)_ifa >> 61))) | ||
303 | |||
304 | #define vcpu_quick_region_set(_tr_regions,_ifa) \ | ||
305 | do {_tr_regions |= (1 << ((unsigned long)_ifa >> 61)); } while (0) | ||
306 | |||
307 | static inline void vcpu_set_tr(struct thash_data *trp, u64 pte, u64 itir, | ||
308 | u64 va, u64 rid) | ||
309 | { | ||
310 | trp->page_flags = pte; | ||
311 | trp->itir = itir; | ||
312 | trp->vadr = va; | ||
313 | trp->rid = rid; | ||
314 | } | ||
315 | |||
316 | extern u64 kvm_lookup_mpa(u64 gpfn); | ||
317 | extern u64 kvm_gpa_to_mpa(u64 gpa); | ||
318 | |||
319 | /* Return I/O type if trye */ | ||
320 | #define __gpfn_is_io(gpfn) \ | ||
321 | ({ \ | ||
322 | u64 pte, ret = 0; \ | ||
323 | pte = kvm_lookup_mpa(gpfn); \ | ||
324 | if (!(pte & GPFN_INV_MASK)) \ | ||
325 | ret = pte & GPFN_IO_MASK; \ | ||
326 | ret; \ | ||
327 | }) | ||
328 | |||
329 | #endif | ||
330 | |||
331 | #define IA64_NO_FAULT 0 | ||
332 | #define IA64_FAULT 1 | ||
333 | |||
334 | #define VMM_RBS_OFFSET ((VMM_TASK_SIZE + 15) & ~15) | ||
335 | |||
336 | #define SW_BAD 0 /* Bad mode transitition */ | ||
337 | #define SW_V2P 1 /* Physical emulatino is activated */ | ||
338 | #define SW_P2V 2 /* Exit physical mode emulation */ | ||
339 | #define SW_SELF 3 /* No mode transition */ | ||
340 | #define SW_NOP 4 /* Mode transition, but without action required */ | ||
341 | |||
342 | #define GUEST_IN_PHY 0x1 | ||
343 | #define GUEST_PHY_EMUL 0x2 | ||
344 | |||
345 | #define current_vcpu ((struct kvm_vcpu *) ia64_getreg(_IA64_REG_TP)) | ||
346 | |||
347 | #define VRN_SHIFT 61 | ||
348 | #define VRN_MASK 0xe000000000000000 | ||
349 | #define VRN0 0x0UL | ||
350 | #define VRN1 0x1UL | ||
351 | #define VRN2 0x2UL | ||
352 | #define VRN3 0x3UL | ||
353 | #define VRN4 0x4UL | ||
354 | #define VRN5 0x5UL | ||
355 | #define VRN6 0x6UL | ||
356 | #define VRN7 0x7UL | ||
357 | |||
358 | #define IRQ_NO_MASKED 0 | ||
359 | #define IRQ_MASKED_BY_VTPR 1 | ||
360 | #define IRQ_MASKED_BY_INSVC 2 /* masked by inservice IRQ */ | ||
361 | |||
362 | #define PTA_BASE_SHIFT 15 | ||
363 | |||
364 | #define IA64_PSR_VM_BIT 46 | ||
365 | #define IA64_PSR_VM (__IA64_UL(1) << IA64_PSR_VM_BIT) | ||
366 | |||
367 | /* Interruption Function State */ | ||
368 | #define IA64_IFS_V_BIT 63 | ||
369 | #define IA64_IFS_V (__IA64_UL(1) << IA64_IFS_V_BIT) | ||
370 | |||
371 | #define PHY_PAGE_UC (_PAGE_A|_PAGE_D|_PAGE_P|_PAGE_MA_UC|_PAGE_AR_RWX) | ||
372 | #define PHY_PAGE_WB (_PAGE_A|_PAGE_D|_PAGE_P|_PAGE_MA_WB|_PAGE_AR_RWX) | ||
373 | |||
374 | #ifndef __ASSEMBLY__ | ||
375 | |||
376 | #include <asm/gcc_intrin.h> | ||
377 | |||
378 | #define is_physical_mode(v) \ | ||
379 | ((v->arch.mode_flags) & GUEST_IN_PHY) | ||
380 | |||
381 | #define is_virtual_mode(v) \ | ||
382 | (!is_physical_mode(v)) | ||
383 | |||
384 | #define MODE_IND(psr) \ | ||
385 | (((psr).it << 2) + ((psr).dt << 1) + (psr).rt) | ||
386 | |||
387 | #define _vmm_raw_spin_lock(x) \ | ||
388 | do { \ | ||
389 | __u32 *ia64_spinlock_ptr = (__u32 *) (x); \ | ||
390 | __u64 ia64_spinlock_val; \ | ||
391 | ia64_spinlock_val = ia64_cmpxchg4_acq(ia64_spinlock_ptr, 1, 0);\ | ||
392 | if (unlikely(ia64_spinlock_val)) { \ | ||
393 | do { \ | ||
394 | while (*ia64_spinlock_ptr) \ | ||
395 | ia64_barrier(); \ | ||
396 | ia64_spinlock_val = \ | ||
397 | ia64_cmpxchg4_acq(ia64_spinlock_ptr, 1, 0);\ | ||
398 | } while (ia64_spinlock_val); \ | ||
399 | } \ | ||
400 | } while (0) | ||
401 | |||
402 | #define _vmm_raw_spin_unlock(x) \ | ||
403 | do { barrier(); \ | ||
404 | ((spinlock_t *)x)->raw_lock.lock = 0; } \ | ||
405 | while (0) | ||
406 | |||
407 | void vmm_spin_lock(spinlock_t *lock); | ||
408 | void vmm_spin_unlock(spinlock_t *lock); | ||
409 | enum { | ||
410 | I_TLB = 1, | ||
411 | D_TLB = 2 | ||
412 | }; | ||
413 | |||
414 | union kvm_va { | ||
415 | struct { | ||
416 | unsigned long off : 60; /* intra-region offset */ | ||
417 | unsigned long reg : 4; /* region number */ | ||
418 | } f; | ||
419 | unsigned long l; | ||
420 | void *p; | ||
421 | }; | ||
422 | |||
423 | #define __kvm_pa(x) ({union kvm_va _v; _v.l = (long) (x); \ | ||
424 | _v.f.reg = 0; _v.l; }) | ||
425 | #define __kvm_va(x) ({union kvm_va _v; _v.l = (long) (x); \ | ||
426 | _v.f.reg = -1; _v.p; }) | ||
427 | |||
428 | #define _REGION_ID(x) ({union ia64_rr _v; _v.val = (long)(x); \ | ||
429 | _v.rid; }) | ||
430 | #define _REGION_PAGE_SIZE(x) ({union ia64_rr _v; _v.val = (long)(x); \ | ||
431 | _v.ps; }) | ||
432 | #define _REGION_HW_WALKER(x) ({union ia64_rr _v; _v.val = (long)(x); \ | ||
433 | _v.ve; }) | ||
434 | |||
435 | enum vhpt_ref{ DATA_REF, NA_REF, INST_REF, RSE_REF }; | ||
436 | enum tlb_miss_type { INSTRUCTION, DATA, REGISTER }; | ||
437 | |||
438 | #define VCPU(_v, _x) ((_v)->arch.vpd->_x) | ||
439 | #define VMX(_v, _x) ((_v)->arch._x) | ||
440 | |||
441 | #define VLSAPIC_INSVC(vcpu, i) ((vcpu)->arch.insvc[i]) | ||
442 | #define VLSAPIC_XTP(_v) VMX(_v, xtp) | ||
443 | |||
444 | static inline unsigned long itir_ps(unsigned long itir) | ||
445 | { | ||
446 | return ((itir >> 2) & 0x3f); | ||
447 | } | ||
448 | |||
449 | |||
450 | /************************************************************************** | ||
451 | VCPU control register access routines | ||
452 | **************************************************************************/ | ||
453 | |||
454 | static inline u64 vcpu_get_itir(struct kvm_vcpu *vcpu) | ||
455 | { | ||
456 | return ((u64)VCPU(vcpu, itir)); | ||
457 | } | ||
458 | |||
459 | static inline void vcpu_set_itir(struct kvm_vcpu *vcpu, u64 val) | ||
460 | { | ||
461 | VCPU(vcpu, itir) = val; | ||
462 | } | ||
463 | |||
464 | static inline u64 vcpu_get_ifa(struct kvm_vcpu *vcpu) | ||
465 | { | ||
466 | return ((u64)VCPU(vcpu, ifa)); | ||
467 | } | ||
468 | |||
469 | static inline void vcpu_set_ifa(struct kvm_vcpu *vcpu, u64 val) | ||
470 | { | ||
471 | VCPU(vcpu, ifa) = val; | ||
472 | } | ||
473 | |||
474 | static inline u64 vcpu_get_iva(struct kvm_vcpu *vcpu) | ||
475 | { | ||
476 | return ((u64)VCPU(vcpu, iva)); | ||
477 | } | ||
478 | |||
479 | static inline u64 vcpu_get_pta(struct kvm_vcpu *vcpu) | ||
480 | { | ||
481 | return ((u64)VCPU(vcpu, pta)); | ||
482 | } | ||
483 | |||
484 | static inline u64 vcpu_get_lid(struct kvm_vcpu *vcpu) | ||
485 | { | ||
486 | return ((u64)VCPU(vcpu, lid)); | ||
487 | } | ||
488 | |||
489 | static inline u64 vcpu_get_tpr(struct kvm_vcpu *vcpu) | ||
490 | { | ||
491 | return ((u64)VCPU(vcpu, tpr)); | ||
492 | } | ||
493 | |||
494 | static inline u64 vcpu_get_eoi(struct kvm_vcpu *vcpu) | ||
495 | { | ||
496 | return (0UL); /*reads of eoi always return 0 */ | ||
497 | } | ||
498 | |||
499 | static inline u64 vcpu_get_irr0(struct kvm_vcpu *vcpu) | ||
500 | { | ||
501 | return ((u64)VCPU(vcpu, irr[0])); | ||
502 | } | ||
503 | |||
504 | static inline u64 vcpu_get_irr1(struct kvm_vcpu *vcpu) | ||
505 | { | ||
506 | return ((u64)VCPU(vcpu, irr[1])); | ||
507 | } | ||
508 | |||
509 | static inline u64 vcpu_get_irr2(struct kvm_vcpu *vcpu) | ||
510 | { | ||
511 | return ((u64)VCPU(vcpu, irr[2])); | ||
512 | } | ||
513 | |||
514 | static inline u64 vcpu_get_irr3(struct kvm_vcpu *vcpu) | ||
515 | { | ||
516 | return ((u64)VCPU(vcpu, irr[3])); | ||
517 | } | ||
518 | |||
519 | static inline void vcpu_set_dcr(struct kvm_vcpu *vcpu, u64 val) | ||
520 | { | ||
521 | ia64_setreg(_IA64_REG_CR_DCR, val); | ||
522 | } | ||
523 | |||
524 | static inline void vcpu_set_isr(struct kvm_vcpu *vcpu, u64 val) | ||
525 | { | ||
526 | VCPU(vcpu, isr) = val; | ||
527 | } | ||
528 | |||
529 | static inline void vcpu_set_lid(struct kvm_vcpu *vcpu, u64 val) | ||
530 | { | ||
531 | VCPU(vcpu, lid) = val; | ||
532 | } | ||
533 | |||
534 | static inline void vcpu_set_ipsr(struct kvm_vcpu *vcpu, u64 val) | ||
535 | { | ||
536 | VCPU(vcpu, ipsr) = val; | ||
537 | } | ||
538 | |||
539 | static inline void vcpu_set_iip(struct kvm_vcpu *vcpu, u64 val) | ||
540 | { | ||
541 | VCPU(vcpu, iip) = val; | ||
542 | } | ||
543 | |||
544 | static inline void vcpu_set_ifs(struct kvm_vcpu *vcpu, u64 val) | ||
545 | { | ||
546 | VCPU(vcpu, ifs) = val; | ||
547 | } | ||
548 | |||
549 | static inline void vcpu_set_iipa(struct kvm_vcpu *vcpu, u64 val) | ||
550 | { | ||
551 | VCPU(vcpu, iipa) = val; | ||
552 | } | ||
553 | |||
554 | static inline void vcpu_set_iha(struct kvm_vcpu *vcpu, u64 val) | ||
555 | { | ||
556 | VCPU(vcpu, iha) = val; | ||
557 | } | ||
558 | |||
559 | |||
560 | static inline u64 vcpu_get_rr(struct kvm_vcpu *vcpu, u64 reg) | ||
561 | { | ||
562 | return vcpu->arch.vrr[reg>>61]; | ||
563 | } | ||
564 | |||
565 | /************************************************************************** | ||
566 | VCPU debug breakpoint register access routines | ||
567 | **************************************************************************/ | ||
568 | |||
569 | static inline void vcpu_set_dbr(struct kvm_vcpu *vcpu, u64 reg, u64 val) | ||
570 | { | ||
571 | __ia64_set_dbr(reg, val); | ||
572 | } | ||
573 | |||
574 | static inline void vcpu_set_ibr(struct kvm_vcpu *vcpu, u64 reg, u64 val) | ||
575 | { | ||
576 | ia64_set_ibr(reg, val); | ||
577 | } | ||
578 | |||
579 | static inline u64 vcpu_get_dbr(struct kvm_vcpu *vcpu, u64 reg) | ||
580 | { | ||
581 | return ((u64)__ia64_get_dbr(reg)); | ||
582 | } | ||
583 | |||
584 | static inline u64 vcpu_get_ibr(struct kvm_vcpu *vcpu, u64 reg) | ||
585 | { | ||
586 | return ((u64)ia64_get_ibr(reg)); | ||
587 | } | ||
588 | |||
589 | /************************************************************************** | ||
590 | VCPU performance monitor register access routines | ||
591 | **************************************************************************/ | ||
592 | static inline void vcpu_set_pmc(struct kvm_vcpu *vcpu, u64 reg, u64 val) | ||
593 | { | ||
594 | /* NOTE: Writes to unimplemented PMC registers are discarded */ | ||
595 | ia64_set_pmc(reg, val); | ||
596 | } | ||
597 | |||
598 | static inline void vcpu_set_pmd(struct kvm_vcpu *vcpu, u64 reg, u64 val) | ||
599 | { | ||
600 | /* NOTE: Writes to unimplemented PMD registers are discarded */ | ||
601 | ia64_set_pmd(reg, val); | ||
602 | } | ||
603 | |||
604 | static inline u64 vcpu_get_pmc(struct kvm_vcpu *vcpu, u64 reg) | ||
605 | { | ||
606 | /* NOTE: Reads from unimplemented PMC registers return zero */ | ||
607 | return ((u64)ia64_get_pmc(reg)); | ||
608 | } | ||
609 | |||
610 | static inline u64 vcpu_get_pmd(struct kvm_vcpu *vcpu, u64 reg) | ||
611 | { | ||
612 | /* NOTE: Reads from unimplemented PMD registers return zero */ | ||
613 | return ((u64)ia64_get_pmd(reg)); | ||
614 | } | ||
615 | |||
616 | static inline unsigned long vrrtomrr(unsigned long val) | ||
617 | { | ||
618 | union ia64_rr rr; | ||
619 | rr.val = val; | ||
620 | rr.rid = (rr.rid << 4) | 0xe; | ||
621 | if (rr.ps > PAGE_SHIFT) | ||
622 | rr.ps = PAGE_SHIFT; | ||
623 | rr.ve = 1; | ||
624 | return rr.val; | ||
625 | } | ||
626 | |||
627 | |||
628 | static inline int highest_bits(int *dat) | ||
629 | { | ||
630 | u32 bits, bitnum; | ||
631 | int i; | ||
632 | |||
633 | /* loop for all 256 bits */ | ||
634 | for (i = 7; i >= 0 ; i--) { | ||
635 | bits = dat[i]; | ||
636 | if (bits) { | ||
637 | bitnum = fls(bits); | ||
638 | return i * 32 + bitnum - 1; | ||
639 | } | ||
640 | } | ||
641 | return NULL_VECTOR; | ||
642 | } | ||
643 | |||
644 | /* | ||
645 | * The pending irq is higher than the inservice one. | ||
646 | * | ||
647 | */ | ||
648 | static inline int is_higher_irq(int pending, int inservice) | ||
649 | { | ||
650 | return ((pending > inservice) | ||
651 | || ((pending != NULL_VECTOR) | ||
652 | && (inservice == NULL_VECTOR))); | ||
653 | } | ||
654 | |||
655 | static inline int is_higher_class(int pending, int mic) | ||
656 | { | ||
657 | return ((pending >> 4) > mic); | ||
658 | } | ||
659 | |||
660 | /* | ||
661 | * Return 0-255 for pending irq. | ||
662 | * NULL_VECTOR: when no pending. | ||
663 | */ | ||
664 | static inline int highest_pending_irq(struct kvm_vcpu *vcpu) | ||
665 | { | ||
666 | if (VCPU(vcpu, irr[0]) & (1UL<<NMI_VECTOR)) | ||
667 | return NMI_VECTOR; | ||
668 | if (VCPU(vcpu, irr[0]) & (1UL<<ExtINT_VECTOR)) | ||
669 | return ExtINT_VECTOR; | ||
670 | |||
671 | return highest_bits((int *)&VCPU(vcpu, irr[0])); | ||
672 | } | ||
673 | |||
674 | static inline int highest_inservice_irq(struct kvm_vcpu *vcpu) | ||
675 | { | ||
676 | if (VMX(vcpu, insvc[0]) & (1UL<<NMI_VECTOR)) | ||
677 | return NMI_VECTOR; | ||
678 | if (VMX(vcpu, insvc[0]) & (1UL<<ExtINT_VECTOR)) | ||
679 | return ExtINT_VECTOR; | ||
680 | |||
681 | return highest_bits((int *)&(VMX(vcpu, insvc[0]))); | ||
682 | } | ||
683 | |||
684 | extern void vcpu_get_fpreg(struct kvm_vcpu *vcpu, u64 reg, | ||
685 | struct ia64_fpreg *val); | ||
686 | extern void vcpu_set_fpreg(struct kvm_vcpu *vcpu, u64 reg, | ||
687 | struct ia64_fpreg *val); | ||
688 | extern u64 vcpu_get_gr(struct kvm_vcpu *vcpu, u64 reg); | ||
689 | extern void vcpu_set_gr(struct kvm_vcpu *vcpu, u64 reg, u64 val, int nat); | ||
690 | extern u64 vcpu_get_psr(struct kvm_vcpu *vcpu); | ||
691 | extern void vcpu_set_psr(struct kvm_vcpu *vcpu, u64 val); | ||
692 | extern u64 vcpu_thash(struct kvm_vcpu *vcpu, u64 vadr); | ||
693 | extern void vcpu_bsw0(struct kvm_vcpu *vcpu); | ||
694 | extern void thash_vhpt_insert(struct kvm_vcpu *v, u64 pte, | ||
695 | u64 itir, u64 va, int type); | ||
696 | extern struct thash_data *vhpt_lookup(u64 va); | ||
697 | extern u64 guest_vhpt_lookup(u64 iha, u64 *pte); | ||
698 | extern void thash_purge_entries(struct kvm_vcpu *v, u64 va, u64 ps); | ||
699 | extern void thash_purge_entries_remote(struct kvm_vcpu *v, u64 va, u64 ps); | ||
700 | extern u64 translate_phy_pte(u64 *pte, u64 itir, u64 va); | ||
701 | extern int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, | ||
702 | u64 itir, u64 ifa, int type); | ||
703 | extern void thash_purge_all(struct kvm_vcpu *v); | ||
704 | extern struct thash_data *vtlb_lookup(struct kvm_vcpu *v, | ||
705 | u64 va, int is_data); | ||
706 | extern int vtr_find_overlap(struct kvm_vcpu *vcpu, u64 va, | ||
707 | u64 ps, int is_data); | ||
708 | |||
709 | extern void vcpu_increment_iip(struct kvm_vcpu *v); | ||
710 | extern void vcpu_decrement_iip(struct kvm_vcpu *vcpu); | ||
711 | extern void vcpu_pend_interrupt(struct kvm_vcpu *vcpu, u8 vec); | ||
712 | extern void vcpu_unpend_interrupt(struct kvm_vcpu *vcpu, u8 vec); | ||
713 | extern void data_page_not_present(struct kvm_vcpu *vcpu, u64 vadr); | ||
714 | extern void dnat_page_consumption(struct kvm_vcpu *vcpu, u64 vadr); | ||
715 | extern void alt_dtlb(struct kvm_vcpu *vcpu, u64 vadr); | ||
716 | extern void nested_dtlb(struct kvm_vcpu *vcpu); | ||
717 | extern void dvhpt_fault(struct kvm_vcpu *vcpu, u64 vadr); | ||
718 | extern int vhpt_enabled(struct kvm_vcpu *vcpu, u64 vadr, enum vhpt_ref ref); | ||
719 | |||
720 | extern void update_vhpi(struct kvm_vcpu *vcpu, int vec); | ||
721 | extern int irq_masked(struct kvm_vcpu *vcpu, int h_pending, int h_inservice); | ||
722 | |||
723 | extern int fetch_code(struct kvm_vcpu *vcpu, u64 gip, IA64_BUNDLE *pbundle); | ||
724 | extern void emulate_io_inst(struct kvm_vcpu *vcpu, u64 padr, u64 ma); | ||
725 | extern void vmm_transition(struct kvm_vcpu *vcpu); | ||
726 | extern void vmm_trampoline(union context *from, union context *to); | ||
727 | extern int vmm_entry(void); | ||
728 | extern u64 vcpu_get_itc(struct kvm_vcpu *vcpu); | ||
729 | |||
730 | extern void vmm_reset_entry(void); | ||
731 | void kvm_init_vtlb(struct kvm_vcpu *v); | ||
732 | void kvm_init_vhpt(struct kvm_vcpu *v); | ||
733 | void thash_init(struct thash_cb *hcb, u64 sz); | ||
734 | |||
735 | void panic_vm(struct kvm_vcpu *v); | ||
736 | |||
737 | extern u64 ia64_call_vsa(u64 proc, u64 arg1, u64 arg2, u64 arg3, | ||
738 | u64 arg4, u64 arg5, u64 arg6, u64 arg7); | ||
739 | #endif | ||
740 | #endif /* __VCPU_H__ */ | ||
diff --git a/arch/ia64/kvm/vmm.c b/arch/ia64/kvm/vmm.c new file mode 100644 index 000000000000..2275bf4e681a --- /dev/null +++ b/arch/ia64/kvm/vmm.c | |||
@@ -0,0 +1,66 @@ | |||
1 | /* | ||
2 | * vmm.c: vmm module interface with kvm module | ||
3 | * | ||
4 | * Copyright (c) 2007, Intel Corporation. | ||
5 | * | ||
6 | * Xiantao Zhang (xiantao.zhang@intel.com) | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms and conditions of the GNU General Public License, | ||
10 | * version 2, as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
15 | * more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License along with | ||
18 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple | ||
19 | * Place - Suite 330, Boston, MA 02111-1307 USA. | ||
20 | */ | ||
21 | |||
22 | |||
23 | #include<linux/module.h> | ||
24 | #include<asm/fpswa.h> | ||
25 | |||
26 | #include "vcpu.h" | ||
27 | |||
28 | MODULE_AUTHOR("Intel"); | ||
29 | MODULE_LICENSE("GPL"); | ||
30 | |||
31 | extern char kvm_ia64_ivt; | ||
32 | extern fpswa_interface_t *vmm_fpswa_interface; | ||
33 | |||
34 | struct kvm_vmm_info vmm_info = { | ||
35 | .module = THIS_MODULE, | ||
36 | .vmm_entry = vmm_entry, | ||
37 | .tramp_entry = vmm_trampoline, | ||
38 | .vmm_ivt = (unsigned long)&kvm_ia64_ivt, | ||
39 | }; | ||
40 | |||
41 | static int __init kvm_vmm_init(void) | ||
42 | { | ||
43 | |||
44 | vmm_fpswa_interface = fpswa_interface; | ||
45 | |||
46 | /*Register vmm data to kvm side*/ | ||
47 | return kvm_init(&vmm_info, 1024, THIS_MODULE); | ||
48 | } | ||
49 | |||
50 | static void __exit kvm_vmm_exit(void) | ||
51 | { | ||
52 | kvm_exit(); | ||
53 | return ; | ||
54 | } | ||
55 | |||
56 | void vmm_spin_lock(spinlock_t *lock) | ||
57 | { | ||
58 | _vmm_raw_spin_lock(lock); | ||
59 | } | ||
60 | |||
61 | void vmm_spin_unlock(spinlock_t *lock) | ||
62 | { | ||
63 | _vmm_raw_spin_unlock(lock); | ||
64 | } | ||
65 | module_init(kvm_vmm_init) | ||
66 | module_exit(kvm_vmm_exit) | ||
diff --git a/arch/ia64/kvm/vmm_ivt.S b/arch/ia64/kvm/vmm_ivt.S new file mode 100644 index 000000000000..3ee5f481c06d --- /dev/null +++ b/arch/ia64/kvm/vmm_ivt.S | |||
@@ -0,0 +1,1424 @@ | |||
1 | /* | ||
2 | * /ia64/kvm_ivt.S | ||
3 | * | ||
4 | * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co | ||
5 | * Stephane Eranian <eranian@hpl.hp.com> | ||
6 | * David Mosberger <davidm@hpl.hp.com> | ||
7 | * Copyright (C) 2000, 2002-2003 Intel Co | ||
8 | * Asit Mallick <asit.k.mallick@intel.com> | ||
9 | * Suresh Siddha <suresh.b.siddha@intel.com> | ||
10 | * Kenneth Chen <kenneth.w.chen@intel.com> | ||
11 | * Fenghua Yu <fenghua.yu@intel.com> | ||
12 | * | ||
13 | * | ||
14 | * 00/08/23 Asit Mallick <asit.k.mallick@intel.com> TLB handling | ||
15 | * for SMP | ||
16 | * 00/12/20 David Mosberger-Tang <davidm@hpl.hp.com> DTLB/ITLB | ||
17 | * handler now uses virtual PT. | ||
18 | * | ||
19 | * 07/6/20 Xuefei Xu (Anthony Xu) (anthony.xu@intel.com) | ||
20 | * Supporting Intel virtualization architecture | ||
21 | * | ||
22 | */ | ||
23 | |||
24 | /* | ||
25 | * This file defines the interruption vector table used by the CPU. | ||
26 | * It does not include one entry per possible cause of interruption. | ||
27 | * | ||
28 | * The first 20 entries of the table contain 64 bundles each while the | ||
29 | * remaining 48 entries contain only 16 bundles each. | ||
30 | * | ||
31 | * The 64 bundles are used to allow inlining the whole handler for | ||
32 | * critical | ||
33 | * interruptions like TLB misses. | ||
34 | * | ||
35 | * For each entry, the comment is as follows: | ||
36 | * | ||
37 | * // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss | ||
38 | * (12,51) | ||
39 | * entry offset ----/ / / / | ||
40 | * / | ||
41 | * entry number ---------/ / / | ||
42 | * / | ||
43 | * size of the entry -------------/ / | ||
44 | * / | ||
45 | * vector name -------------------------------------/ | ||
46 | * / | ||
47 | * interruptions triggering this vector | ||
48 | * ----------------------/ | ||
49 | * | ||
50 | * The table is 32KB in size and must be aligned on 32KB | ||
51 | * boundary. | ||
52 | * (The CPU ignores the 15 lower bits of the address) | ||
53 | * | ||
54 | * Table is based upon EAS2.6 (Oct 1999) | ||
55 | */ | ||
56 | |||
57 | |||
58 | #include <asm/asmmacro.h> | ||
59 | #include <asm/cache.h> | ||
60 | #include <asm/pgtable.h> | ||
61 | |||
62 | #include "asm-offsets.h" | ||
63 | #include "vcpu.h" | ||
64 | #include "kvm_minstate.h" | ||
65 | #include "vti.h" | ||
66 | |||
67 | #if 1 | ||
68 | # define PSR_DEFAULT_BITS psr.ac | ||
69 | #else | ||
70 | # define PSR_DEFAULT_BITS 0 | ||
71 | #endif | ||
72 | |||
73 | |||
74 | #define KVM_FAULT(n) \ | ||
75 | kvm_fault_##n:; \ | ||
76 | mov r19=n;; \ | ||
77 | br.sptk.many kvm_fault_##n; \ | ||
78 | ;; \ | ||
79 | |||
80 | |||
81 | #define KVM_REFLECT(n) \ | ||
82 | mov r31=pr; \ | ||
83 | mov r19=n; /* prepare to save predicates */ \ | ||
84 | mov r29=cr.ipsr; \ | ||
85 | ;; \ | ||
86 | tbit.z p6,p7=r29,IA64_PSR_VM_BIT; \ | ||
87 | (p7)br.sptk.many kvm_dispatch_reflection; \ | ||
88 | br.sptk.many kvm_panic; \ | ||
89 | |||
90 | |||
91 | GLOBAL_ENTRY(kvm_panic) | ||
92 | br.sptk.many kvm_panic | ||
93 | ;; | ||
94 | END(kvm_panic) | ||
95 | |||
96 | |||
97 | |||
98 | |||
99 | |||
100 | .section .text.ivt,"ax" | ||
101 | |||
102 | .align 32768 // align on 32KB boundary | ||
103 | .global kvm_ia64_ivt | ||
104 | kvm_ia64_ivt: | ||
105 | /////////////////////////////////////////////////////////////// | ||
106 | // 0x0000 Entry 0 (size 64 bundles) VHPT Translation (8,20,47) | ||
107 | ENTRY(kvm_vhpt_miss) | ||
108 | KVM_FAULT(0) | ||
109 | END(kvm_vhpt_miss) | ||
110 | |||
111 | |||
112 | .org kvm_ia64_ivt+0x400 | ||
113 | //////////////////////////////////////////////////////////////// | ||
114 | // 0x0400 Entry 1 (size 64 bundles) ITLB (21) | ||
115 | ENTRY(kvm_itlb_miss) | ||
116 | mov r31 = pr | ||
117 | mov r29=cr.ipsr; | ||
118 | ;; | ||
119 | tbit.z p6,p7=r29,IA64_PSR_VM_BIT; | ||
120 | (p6) br.sptk kvm_alt_itlb_miss | ||
121 | mov r19 = 1 | ||
122 | br.sptk kvm_itlb_miss_dispatch | ||
123 | KVM_FAULT(1); | ||
124 | END(kvm_itlb_miss) | ||
125 | |||
126 | .org kvm_ia64_ivt+0x0800 | ||
127 | ////////////////////////////////////////////////////////////////// | ||
128 | // 0x0800 Entry 2 (size 64 bundles) DTLB (9,48) | ||
129 | ENTRY(kvm_dtlb_miss) | ||
130 | mov r31 = pr | ||
131 | mov r29=cr.ipsr; | ||
132 | ;; | ||
133 | tbit.z p6,p7=r29,IA64_PSR_VM_BIT; | ||
134 | (p6)br.sptk kvm_alt_dtlb_miss | ||
135 | br.sptk kvm_dtlb_miss_dispatch | ||
136 | END(kvm_dtlb_miss) | ||
137 | |||
138 | .org kvm_ia64_ivt+0x0c00 | ||
139 | //////////////////////////////////////////////////////////////////// | ||
140 | // 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19) | ||
141 | ENTRY(kvm_alt_itlb_miss) | ||
142 | mov r16=cr.ifa // get address that caused the TLB miss | ||
143 | ;; | ||
144 | movl r17=PAGE_KERNEL | ||
145 | mov r24=cr.ipsr | ||
146 | movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff) | ||
147 | ;; | ||
148 | and r19=r19,r16 // clear ed, reserved bits, and PTE control bits | ||
149 | ;; | ||
150 | or r19=r17,r19 // insert PTE control bits into r19 | ||
151 | ;; | ||
152 | movl r20=IA64_GRANULE_SHIFT<<2 | ||
153 | ;; | ||
154 | mov cr.itir=r20 | ||
155 | ;; | ||
156 | itc.i r19 // insert the TLB entry | ||
157 | mov pr=r31,-1 | ||
158 | rfi | ||
159 | END(kvm_alt_itlb_miss) | ||
160 | |||
161 | .org kvm_ia64_ivt+0x1000 | ||
162 | ///////////////////////////////////////////////////////////////////// | ||
163 | // 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46) | ||
164 | ENTRY(kvm_alt_dtlb_miss) | ||
165 | mov r16=cr.ifa // get address that caused the TLB miss | ||
166 | ;; | ||
167 | movl r17=PAGE_KERNEL | ||
168 | movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff) | ||
169 | mov r24=cr.ipsr | ||
170 | ;; | ||
171 | and r19=r19,r16 // clear ed, reserved bits, and PTE control bits | ||
172 | ;; | ||
173 | or r19=r19,r17 // insert PTE control bits into r19 | ||
174 | ;; | ||
175 | movl r20=IA64_GRANULE_SHIFT<<2 | ||
176 | ;; | ||
177 | mov cr.itir=r20 | ||
178 | ;; | ||
179 | itc.d r19 // insert the TLB entry | ||
180 | mov pr=r31,-1 | ||
181 | rfi | ||
182 | END(kvm_alt_dtlb_miss) | ||
183 | |||
184 | .org kvm_ia64_ivt+0x1400 | ||
185 | ////////////////////////////////////////////////////////////////////// | ||
186 | // 0x1400 Entry 5 (size 64 bundles) Data nested TLB (6,45) | ||
187 | ENTRY(kvm_nested_dtlb_miss) | ||
188 | KVM_FAULT(5) | ||
189 | END(kvm_nested_dtlb_miss) | ||
190 | |||
191 | .org kvm_ia64_ivt+0x1800 | ||
192 | ///////////////////////////////////////////////////////////////////// | ||
193 | // 0x1800 Entry 6 (size 64 bundles) Instruction Key Miss (24) | ||
194 | ENTRY(kvm_ikey_miss) | ||
195 | KVM_REFLECT(6) | ||
196 | END(kvm_ikey_miss) | ||
197 | |||
198 | .org kvm_ia64_ivt+0x1c00 | ||
199 | ///////////////////////////////////////////////////////////////////// | ||
200 | // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51) | ||
201 | ENTRY(kvm_dkey_miss) | ||
202 | KVM_REFLECT(7) | ||
203 | END(kvm_dkey_miss) | ||
204 | |||
205 | .org kvm_ia64_ivt+0x2000 | ||
206 | //////////////////////////////////////////////////////////////////// | ||
207 | // 0x2000 Entry 8 (size 64 bundles) Dirty-bit (54) | ||
208 | ENTRY(kvm_dirty_bit) | ||
209 | KVM_REFLECT(8) | ||
210 | END(kvm_dirty_bit) | ||
211 | |||
212 | .org kvm_ia64_ivt+0x2400 | ||
213 | //////////////////////////////////////////////////////////////////// | ||
214 | // 0x2400 Entry 9 (size 64 bundles) Instruction Access-bit (27) | ||
215 | ENTRY(kvm_iaccess_bit) | ||
216 | KVM_REFLECT(9) | ||
217 | END(kvm_iaccess_bit) | ||
218 | |||
219 | .org kvm_ia64_ivt+0x2800 | ||
220 | /////////////////////////////////////////////////////////////////// | ||
221 | // 0x2800 Entry 10 (size 64 bundles) Data Access-bit (15,55) | ||
222 | ENTRY(kvm_daccess_bit) | ||
223 | KVM_REFLECT(10) | ||
224 | END(kvm_daccess_bit) | ||
225 | |||
226 | .org kvm_ia64_ivt+0x2c00 | ||
227 | ///////////////////////////////////////////////////////////////// | ||
228 | // 0x2c00 Entry 11 (size 64 bundles) Break instruction (33) | ||
229 | ENTRY(kvm_break_fault) | ||
230 | mov r31=pr | ||
231 | mov r19=11 | ||
232 | mov r29=cr.ipsr | ||
233 | ;; | ||
234 | KVM_SAVE_MIN_WITH_COVER_R19 | ||
235 | ;; | ||
236 | alloc r14=ar.pfs,0,0,4,0 // now it's safe (must be first in insn group!) | ||
237 | mov out0=cr.ifa | ||
238 | mov out2=cr.isr // FIXME: pity to make this slow access twice | ||
239 | mov out3=cr.iim // FIXME: pity to make this slow access twice | ||
240 | adds r3=8,r2 // set up second base pointer | ||
241 | ;; | ||
242 | ssm psr.ic | ||
243 | ;; | ||
244 | srlz.i // guarantee that interruption collection is on | ||
245 | ;; | ||
246 | //(p15)ssm psr.i // restore psr.i | ||
247 | addl r14=@gprel(ia64_leave_hypervisor),gp | ||
248 | ;; | ||
249 | KVM_SAVE_REST | ||
250 | mov rp=r14 | ||
251 | ;; | ||
252 | adds out1=16,sp | ||
253 | br.call.sptk.many b6=kvm_ia64_handle_break | ||
254 | ;; | ||
255 | END(kvm_break_fault) | ||
256 | |||
257 | .org kvm_ia64_ivt+0x3000 | ||
258 | ///////////////////////////////////////////////////////////////// | ||
259 | // 0x3000 Entry 12 (size 64 bundles) External Interrupt (4) | ||
260 | ENTRY(kvm_interrupt) | ||
261 | mov r31=pr // prepare to save predicates | ||
262 | mov r19=12 | ||
263 | mov r29=cr.ipsr | ||
264 | ;; | ||
265 | tbit.z p6,p7=r29,IA64_PSR_VM_BIT | ||
266 | tbit.z p0,p15=r29,IA64_PSR_I_BIT | ||
267 | ;; | ||
268 | (p7) br.sptk kvm_dispatch_interrupt | ||
269 | ;; | ||
270 | mov r27=ar.rsc /* M */ | ||
271 | mov r20=r1 /* A */ | ||
272 | mov r25=ar.unat /* M */ | ||
273 | mov r26=ar.pfs /* I */ | ||
274 | mov r28=cr.iip /* M */ | ||
275 | cover /* B (or nothing) */ | ||
276 | ;; | ||
277 | mov r1=sp | ||
278 | ;; | ||
279 | invala /* M */ | ||
280 | mov r30=cr.ifs | ||
281 | ;; | ||
282 | addl r1=-VMM_PT_REGS_SIZE,r1 | ||
283 | ;; | ||
284 | adds r17=2*L1_CACHE_BYTES,r1 /* really: biggest cache-line size */ | ||
285 | adds r16=PT(CR_IPSR),r1 | ||
286 | ;; | ||
287 | lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES | ||
288 | st8 [r16]=r29 /* save cr.ipsr */ | ||
289 | ;; | ||
290 | lfetch.fault.excl.nt1 [r17] | ||
291 | mov r29=b0 | ||
292 | ;; | ||
293 | adds r16=PT(R8),r1 /* initialize first base pointer */ | ||
294 | adds r17=PT(R9),r1 /* initialize second base pointer */ | ||
295 | mov r18=r0 /* make sure r18 isn't NaT */ | ||
296 | ;; | ||
297 | .mem.offset 0,0; st8.spill [r16]=r8,16 | ||
298 | .mem.offset 8,0; st8.spill [r17]=r9,16 | ||
299 | ;; | ||
300 | .mem.offset 0,0; st8.spill [r16]=r10,24 | ||
301 | .mem.offset 8,0; st8.spill [r17]=r11,24 | ||
302 | ;; | ||
303 | st8 [r16]=r28,16 /* save cr.iip */ | ||
304 | st8 [r17]=r30,16 /* save cr.ifs */ | ||
305 | mov r8=ar.fpsr /* M */ | ||
306 | mov r9=ar.csd | ||
307 | mov r10=ar.ssd | ||
308 | movl r11=FPSR_DEFAULT /* L-unit */ | ||
309 | ;; | ||
310 | st8 [r16]=r25,16 /* save ar.unat */ | ||
311 | st8 [r17]=r26,16 /* save ar.pfs */ | ||
312 | shl r18=r18,16 /* compute ar.rsc to be used for "loadrs" */ | ||
313 | ;; | ||
314 | st8 [r16]=r27,16 /* save ar.rsc */ | ||
315 | adds r17=16,r17 /* skip over ar_rnat field */ | ||
316 | ;; | ||
317 | st8 [r17]=r31,16 /* save predicates */ | ||
318 | adds r16=16,r16 /* skip over ar_bspstore field */ | ||
319 | ;; | ||
320 | st8 [r16]=r29,16 /* save b0 */ | ||
321 | st8 [r17]=r18,16 /* save ar.rsc value for "loadrs" */ | ||
322 | ;; | ||
323 | .mem.offset 0,0; st8.spill [r16]=r20,16 /* save original r1 */ | ||
324 | .mem.offset 8,0; st8.spill [r17]=r12,16 | ||
325 | adds r12=-16,r1 | ||
326 | /* switch to kernel memory stack (with 16 bytes of scratch) */ | ||
327 | ;; | ||
328 | .mem.offset 0,0; st8.spill [r16]=r13,16 | ||
329 | .mem.offset 8,0; st8.spill [r17]=r8,16 /* save ar.fpsr */ | ||
330 | ;; | ||
331 | .mem.offset 0,0; st8.spill [r16]=r15,16 | ||
332 | .mem.offset 8,0; st8.spill [r17]=r14,16 | ||
333 | dep r14=-1,r0,60,4 | ||
334 | ;; | ||
335 | .mem.offset 0,0; st8.spill [r16]=r2,16 | ||
336 | .mem.offset 8,0; st8.spill [r17]=r3,16 | ||
337 | adds r2=VMM_PT_REGS_R16_OFFSET,r1 | ||
338 | adds r14 = VMM_VCPU_GP_OFFSET,r13 | ||
339 | ;; | ||
340 | mov r8=ar.ccv | ||
341 | ld8 r14 = [r14] | ||
342 | ;; | ||
343 | mov r1=r14 /* establish kernel global pointer */ | ||
344 | ;; \ | ||
345 | bsw.1 | ||
346 | ;; | ||
347 | alloc r14=ar.pfs,0,0,1,0 // must be first in an insn group | ||
348 | mov out0=r13 | ||
349 | ;; | ||
350 | ssm psr.ic | ||
351 | ;; | ||
352 | srlz.i | ||
353 | ;; | ||
354 | //(p15) ssm psr.i | ||
355 | adds r3=8,r2 // set up second base pointer for SAVE_REST | ||
356 | srlz.i // ensure everybody knows psr.ic is back on | ||
357 | ;; | ||
358 | .mem.offset 0,0; st8.spill [r2]=r16,16 | ||
359 | .mem.offset 8,0; st8.spill [r3]=r17,16 | ||
360 | ;; | ||
361 | .mem.offset 0,0; st8.spill [r2]=r18,16 | ||
362 | .mem.offset 8,0; st8.spill [r3]=r19,16 | ||
363 | ;; | ||
364 | .mem.offset 0,0; st8.spill [r2]=r20,16 | ||
365 | .mem.offset 8,0; st8.spill [r3]=r21,16 | ||
366 | mov r18=b6 | ||
367 | ;; | ||
368 | .mem.offset 0,0; st8.spill [r2]=r22,16 | ||
369 | .mem.offset 8,0; st8.spill [r3]=r23,16 | ||
370 | mov r19=b7 | ||
371 | ;; | ||
372 | .mem.offset 0,0; st8.spill [r2]=r24,16 | ||
373 | .mem.offset 8,0; st8.spill [r3]=r25,16 | ||
374 | ;; | ||
375 | .mem.offset 0,0; st8.spill [r2]=r26,16 | ||
376 | .mem.offset 8,0; st8.spill [r3]=r27,16 | ||
377 | ;; | ||
378 | .mem.offset 0,0; st8.spill [r2]=r28,16 | ||
379 | .mem.offset 8,0; st8.spill [r3]=r29,16 | ||
380 | ;; | ||
381 | .mem.offset 0,0; st8.spill [r2]=r30,16 | ||
382 | .mem.offset 8,0; st8.spill [r3]=r31,32 | ||
383 | ;; | ||
384 | mov ar.fpsr=r11 /* M-unit */ | ||
385 | st8 [r2]=r8,8 /* ar.ccv */ | ||
386 | adds r24=PT(B6)-PT(F7),r3 | ||
387 | ;; | ||
388 | stf.spill [r2]=f6,32 | ||
389 | stf.spill [r3]=f7,32 | ||
390 | ;; | ||
391 | stf.spill [r2]=f8,32 | ||
392 | stf.spill [r3]=f9,32 | ||
393 | ;; | ||
394 | stf.spill [r2]=f10 | ||
395 | stf.spill [r3]=f11 | ||
396 | adds r25=PT(B7)-PT(F11),r3 | ||
397 | ;; | ||
398 | st8 [r24]=r18,16 /* b6 */ | ||
399 | st8 [r25]=r19,16 /* b7 */ | ||
400 | ;; | ||
401 | st8 [r24]=r9 /* ar.csd */ | ||
402 | st8 [r25]=r10 /* ar.ssd */ | ||
403 | ;; | ||
404 | srlz.d // make sure we see the effect of cr.ivr | ||
405 | addl r14=@gprel(ia64_leave_nested),gp | ||
406 | ;; | ||
407 | mov rp=r14 | ||
408 | br.call.sptk.many b6=kvm_ia64_handle_irq | ||
409 | ;; | ||
410 | END(kvm_interrupt) | ||
411 | |||
412 | .global kvm_dispatch_vexirq | ||
413 | .org kvm_ia64_ivt+0x3400 | ||
414 | ////////////////////////////////////////////////////////////////////// | ||
415 | // 0x3400 Entry 13 (size 64 bundles) Reserved | ||
416 | ENTRY(kvm_virtual_exirq) | ||
417 | mov r31=pr | ||
418 | mov r19=13 | ||
419 | mov r30 =r0 | ||
420 | ;; | ||
421 | kvm_dispatch_vexirq: | ||
422 | cmp.eq p6,p0 = 1,r30 | ||
423 | ;; | ||
424 | (p6)add r29 = VMM_VCPU_SAVED_GP_OFFSET,r21 | ||
425 | ;; | ||
426 | (p6)ld8 r1 = [r29] | ||
427 | ;; | ||
428 | KVM_SAVE_MIN_WITH_COVER_R19 | ||
429 | alloc r14=ar.pfs,0,0,1,0 | ||
430 | mov out0=r13 | ||
431 | |||
432 | ssm psr.ic | ||
433 | ;; | ||
434 | srlz.i // guarantee that interruption collection is on | ||
435 | ;; | ||
436 | //(p15) ssm psr.i // restore psr.i | ||
437 | adds r3=8,r2 // set up second base pointer | ||
438 | ;; | ||
439 | KVM_SAVE_REST | ||
440 | addl r14=@gprel(ia64_leave_hypervisor),gp | ||
441 | ;; | ||
442 | mov rp=r14 | ||
443 | br.call.sptk.many b6=kvm_vexirq | ||
444 | END(kvm_virtual_exirq) | ||
445 | |||
446 | .org kvm_ia64_ivt+0x3800 | ||
447 | ///////////////////////////////////////////////////////////////////// | ||
448 | // 0x3800 Entry 14 (size 64 bundles) Reserved | ||
449 | KVM_FAULT(14) | ||
450 | // this code segment is from 2.6.16.13 | ||
451 | |||
452 | |||
453 | .org kvm_ia64_ivt+0x3c00 | ||
454 | /////////////////////////////////////////////////////////////////////// | ||
455 | // 0x3c00 Entry 15 (size 64 bundles) Reserved | ||
456 | KVM_FAULT(15) | ||
457 | |||
458 | |||
459 | .org kvm_ia64_ivt+0x4000 | ||
460 | /////////////////////////////////////////////////////////////////////// | ||
461 | // 0x4000 Entry 16 (size 64 bundles) Reserved | ||
462 | KVM_FAULT(16) | ||
463 | |||
464 | .org kvm_ia64_ivt+0x4400 | ||
465 | ////////////////////////////////////////////////////////////////////// | ||
466 | // 0x4400 Entry 17 (size 64 bundles) Reserved | ||
467 | KVM_FAULT(17) | ||
468 | |||
469 | .org kvm_ia64_ivt+0x4800 | ||
470 | ////////////////////////////////////////////////////////////////////// | ||
471 | // 0x4800 Entry 18 (size 64 bundles) Reserved | ||
472 | KVM_FAULT(18) | ||
473 | |||
474 | .org kvm_ia64_ivt+0x4c00 | ||
475 | ////////////////////////////////////////////////////////////////////// | ||
476 | // 0x4c00 Entry 19 (size 64 bundles) Reserved | ||
477 | KVM_FAULT(19) | ||
478 | |||
479 | .org kvm_ia64_ivt+0x5000 | ||
480 | ////////////////////////////////////////////////////////////////////// | ||
481 | // 0x5000 Entry 20 (size 16 bundles) Page Not Present | ||
482 | ENTRY(kvm_page_not_present) | ||
483 | KVM_REFLECT(20) | ||
484 | END(kvm_page_not_present) | ||
485 | |||
486 | .org kvm_ia64_ivt+0x5100 | ||
487 | /////////////////////////////////////////////////////////////////////// | ||
488 | // 0x5100 Entry 21 (size 16 bundles) Key Permission vector | ||
489 | ENTRY(kvm_key_permission) | ||
490 | KVM_REFLECT(21) | ||
491 | END(kvm_key_permission) | ||
492 | |||
493 | .org kvm_ia64_ivt+0x5200 | ||
494 | ////////////////////////////////////////////////////////////////////// | ||
495 | // 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26) | ||
496 | ENTRY(kvm_iaccess_rights) | ||
497 | KVM_REFLECT(22) | ||
498 | END(kvm_iaccess_rights) | ||
499 | |||
500 | .org kvm_ia64_ivt+0x5300 | ||
501 | ////////////////////////////////////////////////////////////////////// | ||
502 | // 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53) | ||
503 | ENTRY(kvm_daccess_rights) | ||
504 | KVM_REFLECT(23) | ||
505 | END(kvm_daccess_rights) | ||
506 | |||
507 | .org kvm_ia64_ivt+0x5400 | ||
508 | ///////////////////////////////////////////////////////////////////// | ||
509 | // 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39) | ||
510 | ENTRY(kvm_general_exception) | ||
511 | KVM_REFLECT(24) | ||
512 | KVM_FAULT(24) | ||
513 | END(kvm_general_exception) | ||
514 | |||
515 | .org kvm_ia64_ivt+0x5500 | ||
516 | ////////////////////////////////////////////////////////////////////// | ||
517 | // 0x5500 Entry 25 (size 16 bundles) Disabled FP-Register (35) | ||
518 | ENTRY(kvm_disabled_fp_reg) | ||
519 | KVM_REFLECT(25) | ||
520 | END(kvm_disabled_fp_reg) | ||
521 | |||
522 | .org kvm_ia64_ivt+0x5600 | ||
523 | //////////////////////////////////////////////////////////////////// | ||
524 | // 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50) | ||
525 | ENTRY(kvm_nat_consumption) | ||
526 | KVM_REFLECT(26) | ||
527 | END(kvm_nat_consumption) | ||
528 | |||
529 | .org kvm_ia64_ivt+0x5700 | ||
530 | ///////////////////////////////////////////////////////////////////// | ||
531 | // 0x5700 Entry 27 (size 16 bundles) Speculation (40) | ||
532 | ENTRY(kvm_speculation_vector) | ||
533 | KVM_REFLECT(27) | ||
534 | END(kvm_speculation_vector) | ||
535 | |||
536 | .org kvm_ia64_ivt+0x5800 | ||
537 | ///////////////////////////////////////////////////////////////////// | ||
538 | // 0x5800 Entry 28 (size 16 bundles) Reserved | ||
539 | KVM_FAULT(28) | ||
540 | |||
541 | .org kvm_ia64_ivt+0x5900 | ||
542 | /////////////////////////////////////////////////////////////////// | ||
543 | // 0x5900 Entry 29 (size 16 bundles) Debug (16,28,56) | ||
544 | ENTRY(kvm_debug_vector) | ||
545 | KVM_FAULT(29) | ||
546 | END(kvm_debug_vector) | ||
547 | |||
548 | .org kvm_ia64_ivt+0x5a00 | ||
549 | /////////////////////////////////////////////////////////////// | ||
550 | // 0x5a00 Entry 30 (size 16 bundles) Unaligned Reference (57) | ||
551 | ENTRY(kvm_unaligned_access) | ||
552 | KVM_REFLECT(30) | ||
553 | END(kvm_unaligned_access) | ||
554 | |||
555 | .org kvm_ia64_ivt+0x5b00 | ||
556 | ////////////////////////////////////////////////////////////////////// | ||
557 | // 0x5b00 Entry 31 (size 16 bundles) Unsupported Data Reference (57) | ||
558 | ENTRY(kvm_unsupported_data_reference) | ||
559 | KVM_REFLECT(31) | ||
560 | END(kvm_unsupported_data_reference) | ||
561 | |||
562 | .org kvm_ia64_ivt+0x5c00 | ||
563 | //////////////////////////////////////////////////////////////////// | ||
564 | // 0x5c00 Entry 32 (size 16 bundles) Floating Point FAULT (65) | ||
565 | ENTRY(kvm_floating_point_fault) | ||
566 | KVM_REFLECT(32) | ||
567 | END(kvm_floating_point_fault) | ||
568 | |||
569 | .org kvm_ia64_ivt+0x5d00 | ||
570 | ///////////////////////////////////////////////////////////////////// | ||
571 | // 0x5d00 Entry 33 (size 16 bundles) Floating Point Trap (66) | ||
572 | ENTRY(kvm_floating_point_trap) | ||
573 | KVM_REFLECT(33) | ||
574 | END(kvm_floating_point_trap) | ||
575 | |||
576 | .org kvm_ia64_ivt+0x5e00 | ||
577 | ////////////////////////////////////////////////////////////////////// | ||
578 | // 0x5e00 Entry 34 (size 16 bundles) Lower Privilege Transfer Trap (66) | ||
579 | ENTRY(kvm_lower_privilege_trap) | ||
580 | KVM_REFLECT(34) | ||
581 | END(kvm_lower_privilege_trap) | ||
582 | |||
583 | .org kvm_ia64_ivt+0x5f00 | ||
584 | ////////////////////////////////////////////////////////////////////// | ||
585 | // 0x5f00 Entry 35 (size 16 bundles) Taken Branch Trap (68) | ||
586 | ENTRY(kvm_taken_branch_trap) | ||
587 | KVM_REFLECT(35) | ||
588 | END(kvm_taken_branch_trap) | ||
589 | |||
590 | .org kvm_ia64_ivt+0x6000 | ||
591 | //////////////////////////////////////////////////////////////////// | ||
592 | // 0x6000 Entry 36 (size 16 bundles) Single Step Trap (69) | ||
593 | ENTRY(kvm_single_step_trap) | ||
594 | KVM_REFLECT(36) | ||
595 | END(kvm_single_step_trap) | ||
596 | .global kvm_virtualization_fault_back | ||
597 | .org kvm_ia64_ivt+0x6100 | ||
598 | ///////////////////////////////////////////////////////////////////// | ||
599 | // 0x6100 Entry 37 (size 16 bundles) Virtualization Fault | ||
600 | ENTRY(kvm_virtualization_fault) | ||
601 | mov r31=pr | ||
602 | adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21 | ||
603 | ;; | ||
604 | st8 [r16] = r1 | ||
605 | adds r17 = VMM_VCPU_GP_OFFSET, r21 | ||
606 | ;; | ||
607 | ld8 r1 = [r17] | ||
608 | cmp.eq p6,p0=EVENT_MOV_FROM_AR,r24 | ||
609 | cmp.eq p7,p0=EVENT_MOV_FROM_RR,r24 | ||
610 | cmp.eq p8,p0=EVENT_MOV_TO_RR,r24 | ||
611 | cmp.eq p9,p0=EVENT_RSM,r24 | ||
612 | cmp.eq p10,p0=EVENT_SSM,r24 | ||
613 | cmp.eq p11,p0=EVENT_MOV_TO_PSR,r24 | ||
614 | cmp.eq p12,p0=EVENT_THASH,r24 | ||
615 | (p6) br.dptk.many kvm_asm_mov_from_ar | ||
616 | (p7) br.dptk.many kvm_asm_mov_from_rr | ||
617 | (p8) br.dptk.many kvm_asm_mov_to_rr | ||
618 | (p9) br.dptk.many kvm_asm_rsm | ||
619 | (p10) br.dptk.many kvm_asm_ssm | ||
620 | (p11) br.dptk.many kvm_asm_mov_to_psr | ||
621 | (p12) br.dptk.many kvm_asm_thash | ||
622 | ;; | ||
623 | kvm_virtualization_fault_back: | ||
624 | adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21 | ||
625 | ;; | ||
626 | ld8 r1 = [r16] | ||
627 | ;; | ||
628 | mov r19=37 | ||
629 | adds r16 = VMM_VCPU_CAUSE_OFFSET,r21 | ||
630 | adds r17 = VMM_VCPU_OPCODE_OFFSET,r21 | ||
631 | ;; | ||
632 | st8 [r16] = r24 | ||
633 | st8 [r17] = r25 | ||
634 | ;; | ||
635 | cmp.ne p6,p0=EVENT_RFI, r24 | ||
636 | (p6) br.sptk kvm_dispatch_virtualization_fault | ||
637 | ;; | ||
638 | adds r18=VMM_VPD_BASE_OFFSET,r21 | ||
639 | ;; | ||
640 | ld8 r18=[r18] | ||
641 | ;; | ||
642 | adds r18=VMM_VPD_VIFS_OFFSET,r18 | ||
643 | ;; | ||
644 | ld8 r18=[r18] | ||
645 | ;; | ||
646 | tbit.z p6,p0=r18,63 | ||
647 | (p6) br.sptk kvm_dispatch_virtualization_fault | ||
648 | ;; | ||
649 | //if vifs.v=1 desert current register frame | ||
650 | alloc r18=ar.pfs,0,0,0,0 | ||
651 | br.sptk kvm_dispatch_virtualization_fault | ||
652 | END(kvm_virtualization_fault) | ||
653 | |||
654 | .org kvm_ia64_ivt+0x6200 | ||
655 | ////////////////////////////////////////////////////////////// | ||
656 | // 0x6200 Entry 38 (size 16 bundles) Reserved | ||
657 | KVM_FAULT(38) | ||
658 | |||
659 | .org kvm_ia64_ivt+0x6300 | ||
660 | ///////////////////////////////////////////////////////////////// | ||
661 | // 0x6300 Entry 39 (size 16 bundles) Reserved | ||
662 | KVM_FAULT(39) | ||
663 | |||
664 | .org kvm_ia64_ivt+0x6400 | ||
665 | ///////////////////////////////////////////////////////////////// | ||
666 | // 0x6400 Entry 40 (size 16 bundles) Reserved | ||
667 | KVM_FAULT(40) | ||
668 | |||
669 | .org kvm_ia64_ivt+0x6500 | ||
670 | ////////////////////////////////////////////////////////////////// | ||
671 | // 0x6500 Entry 41 (size 16 bundles) Reserved | ||
672 | KVM_FAULT(41) | ||
673 | |||
674 | .org kvm_ia64_ivt+0x6600 | ||
675 | ////////////////////////////////////////////////////////////////// | ||
676 | // 0x6600 Entry 42 (size 16 bundles) Reserved | ||
677 | KVM_FAULT(42) | ||
678 | |||
679 | .org kvm_ia64_ivt+0x6700 | ||
680 | ////////////////////////////////////////////////////////////////// | ||
681 | // 0x6700 Entry 43 (size 16 bundles) Reserved | ||
682 | KVM_FAULT(43) | ||
683 | |||
684 | .org kvm_ia64_ivt+0x6800 | ||
685 | ////////////////////////////////////////////////////////////////// | ||
686 | // 0x6800 Entry 44 (size 16 bundles) Reserved | ||
687 | KVM_FAULT(44) | ||
688 | |||
689 | .org kvm_ia64_ivt+0x6900 | ||
690 | /////////////////////////////////////////////////////////////////// | ||
691 | // 0x6900 Entry 45 (size 16 bundles) IA-32 Exeception | ||
692 | //(17,18,29,41,42,43,44,58,60,61,62,72,73,75,76,77) | ||
693 | ENTRY(kvm_ia32_exception) | ||
694 | KVM_FAULT(45) | ||
695 | END(kvm_ia32_exception) | ||
696 | |||
697 | .org kvm_ia64_ivt+0x6a00 | ||
698 | //////////////////////////////////////////////////////////////////// | ||
699 | // 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept (30,31,59,70,71) | ||
700 | ENTRY(kvm_ia32_intercept) | ||
701 | KVM_FAULT(47) | ||
702 | END(kvm_ia32_intercept) | ||
703 | |||
704 | .org kvm_ia64_ivt+0x6c00 | ||
705 | ///////////////////////////////////////////////////////////////////// | ||
706 | // 0x6c00 Entry 48 (size 16 bundles) Reserved | ||
707 | KVM_FAULT(48) | ||
708 | |||
709 | .org kvm_ia64_ivt+0x6d00 | ||
710 | ////////////////////////////////////////////////////////////////////// | ||
711 | // 0x6d00 Entry 49 (size 16 bundles) Reserved | ||
712 | KVM_FAULT(49) | ||
713 | |||
714 | .org kvm_ia64_ivt+0x6e00 | ||
715 | ////////////////////////////////////////////////////////////////////// | ||
716 | // 0x6e00 Entry 50 (size 16 bundles) Reserved | ||
717 | KVM_FAULT(50) | ||
718 | |||
719 | .org kvm_ia64_ivt+0x6f00 | ||
720 | ///////////////////////////////////////////////////////////////////// | ||
721 | // 0x6f00 Entry 51 (size 16 bundles) Reserved | ||
722 | KVM_FAULT(52) | ||
723 | |||
724 | .org kvm_ia64_ivt+0x7100 | ||
725 | //////////////////////////////////////////////////////////////////// | ||
726 | // 0x7100 Entry 53 (size 16 bundles) Reserved | ||
727 | KVM_FAULT(53) | ||
728 | |||
729 | .org kvm_ia64_ivt+0x7200 | ||
730 | ///////////////////////////////////////////////////////////////////// | ||
731 | // 0x7200 Entry 54 (size 16 bundles) Reserved | ||
732 | KVM_FAULT(54) | ||
733 | |||
734 | .org kvm_ia64_ivt+0x7300 | ||
735 | //////////////////////////////////////////////////////////////////// | ||
736 | // 0x7300 Entry 55 (size 16 bundles) Reserved | ||
737 | KVM_FAULT(55) | ||
738 | |||
739 | .org kvm_ia64_ivt+0x7400 | ||
740 | //////////////////////////////////////////////////////////////////// | ||
741 | // 0x7400 Entry 56 (size 16 bundles) Reserved | ||
742 | KVM_FAULT(56) | ||
743 | |||
744 | .org kvm_ia64_ivt+0x7500 | ||
745 | ///////////////////////////////////////////////////////////////////// | ||
746 | // 0x7500 Entry 57 (size 16 bundles) Reserved | ||
747 | KVM_FAULT(57) | ||
748 | |||
749 | .org kvm_ia64_ivt+0x7600 | ||
750 | ///////////////////////////////////////////////////////////////////// | ||
751 | // 0x7600 Entry 58 (size 16 bundles) Reserved | ||
752 | KVM_FAULT(58) | ||
753 | |||
754 | .org kvm_ia64_ivt+0x7700 | ||
755 | //////////////////////////////////////////////////////////////////// | ||
756 | // 0x7700 Entry 59 (size 16 bundles) Reserved | ||
757 | KVM_FAULT(59) | ||
758 | |||
759 | .org kvm_ia64_ivt+0x7800 | ||
760 | //////////////////////////////////////////////////////////////////// | ||
761 | // 0x7800 Entry 60 (size 16 bundles) Reserved | ||
762 | KVM_FAULT(60) | ||
763 | |||
764 | .org kvm_ia64_ivt+0x7900 | ||
765 | ///////////////////////////////////////////////////////////////////// | ||
766 | // 0x7900 Entry 61 (size 16 bundles) Reserved | ||
767 | KVM_FAULT(61) | ||
768 | |||
769 | .org kvm_ia64_ivt+0x7a00 | ||
770 | ///////////////////////////////////////////////////////////////////// | ||
771 | // 0x7a00 Entry 62 (size 16 bundles) Reserved | ||
772 | KVM_FAULT(62) | ||
773 | |||
774 | .org kvm_ia64_ivt+0x7b00 | ||
775 | ///////////////////////////////////////////////////////////////////// | ||
776 | // 0x7b00 Entry 63 (size 16 bundles) Reserved | ||
777 | KVM_FAULT(63) | ||
778 | |||
779 | .org kvm_ia64_ivt+0x7c00 | ||
780 | //////////////////////////////////////////////////////////////////// | ||
781 | // 0x7c00 Entry 64 (size 16 bundles) Reserved | ||
782 | KVM_FAULT(64) | ||
783 | |||
784 | .org kvm_ia64_ivt+0x7d00 | ||
785 | ///////////////////////////////////////////////////////////////////// | ||
786 | // 0x7d00 Entry 65 (size 16 bundles) Reserved | ||
787 | KVM_FAULT(65) | ||
788 | |||
789 | .org kvm_ia64_ivt+0x7e00 | ||
790 | ///////////////////////////////////////////////////////////////////// | ||
791 | // 0x7e00 Entry 66 (size 16 bundles) Reserved | ||
792 | KVM_FAULT(66) | ||
793 | |||
794 | .org kvm_ia64_ivt+0x7f00 | ||
795 | //////////////////////////////////////////////////////////////////// | ||
796 | // 0x7f00 Entry 67 (size 16 bundles) Reserved | ||
797 | KVM_FAULT(67) | ||
798 | |||
799 | .org kvm_ia64_ivt+0x8000 | ||
800 | // There is no particular reason for this code to be here, other than that | ||
801 | // there happens to be space here that would go unused otherwise. If this | ||
802 | // fault ever gets "unreserved", simply moved the following code to a more | ||
803 | // suitable spot... | ||
804 | |||
805 | |||
806 | ENTRY(kvm_dtlb_miss_dispatch) | ||
807 | mov r19 = 2 | ||
808 | KVM_SAVE_MIN_WITH_COVER_R19 | ||
809 | alloc r14=ar.pfs,0,0,3,0 | ||
810 | mov out0=cr.ifa | ||
811 | mov out1=r15 | ||
812 | adds r3=8,r2 // set up second base pointer | ||
813 | ;; | ||
814 | ssm psr.ic | ||
815 | ;; | ||
816 | srlz.i // guarantee that interruption collection is on | ||
817 | ;; | ||
818 | //(p15) ssm psr.i // restore psr.i | ||
819 | addl r14=@gprel(ia64_leave_hypervisor_prepare),gp | ||
820 | ;; | ||
821 | KVM_SAVE_REST | ||
822 | KVM_SAVE_EXTRA | ||
823 | mov rp=r14 | ||
824 | ;; | ||
825 | adds out2=16,r12 | ||
826 | br.call.sptk.many b6=kvm_page_fault | ||
827 | END(kvm_dtlb_miss_dispatch) | ||
828 | |||
829 | ENTRY(kvm_itlb_miss_dispatch) | ||
830 | |||
831 | KVM_SAVE_MIN_WITH_COVER_R19 | ||
832 | alloc r14=ar.pfs,0,0,3,0 | ||
833 | mov out0=cr.ifa | ||
834 | mov out1=r15 | ||
835 | adds r3=8,r2 // set up second base pointer | ||
836 | ;; | ||
837 | ssm psr.ic | ||
838 | ;; | ||
839 | srlz.i // guarantee that interruption collection is on | ||
840 | ;; | ||
841 | //(p15) ssm psr.i // restore psr.i | ||
842 | addl r14=@gprel(ia64_leave_hypervisor),gp | ||
843 | ;; | ||
844 | KVM_SAVE_REST | ||
845 | mov rp=r14 | ||
846 | ;; | ||
847 | adds out2=16,r12 | ||
848 | br.call.sptk.many b6=kvm_page_fault | ||
849 | END(kvm_itlb_miss_dispatch) | ||
850 | |||
851 | ENTRY(kvm_dispatch_reflection) | ||
852 | /* | ||
853 | * Input: | ||
854 | * psr.ic: off | ||
855 | * r19: intr type (offset into ivt, see ia64_int.h) | ||
856 | * r31: contains saved predicates (pr) | ||
857 | */ | ||
858 | KVM_SAVE_MIN_WITH_COVER_R19 | ||
859 | alloc r14=ar.pfs,0,0,5,0 | ||
860 | mov out0=cr.ifa | ||
861 | mov out1=cr.isr | ||
862 | mov out2=cr.iim | ||
863 | mov out3=r15 | ||
864 | adds r3=8,r2 // set up second base pointer | ||
865 | ;; | ||
866 | ssm psr.ic | ||
867 | ;; | ||
868 | srlz.i // guarantee that interruption collection is on | ||
869 | ;; | ||
870 | //(p15) ssm psr.i // restore psr.i | ||
871 | addl r14=@gprel(ia64_leave_hypervisor),gp | ||
872 | ;; | ||
873 | KVM_SAVE_REST | ||
874 | mov rp=r14 | ||
875 | ;; | ||
876 | adds out4=16,r12 | ||
877 | br.call.sptk.many b6=reflect_interruption | ||
878 | END(kvm_dispatch_reflection) | ||
879 | |||
880 | ENTRY(kvm_dispatch_virtualization_fault) | ||
881 | adds r16 = VMM_VCPU_CAUSE_OFFSET,r21 | ||
882 | adds r17 = VMM_VCPU_OPCODE_OFFSET,r21 | ||
883 | ;; | ||
884 | st8 [r16] = r24 | ||
885 | st8 [r17] = r25 | ||
886 | ;; | ||
887 | KVM_SAVE_MIN_WITH_COVER_R19 | ||
888 | ;; | ||
889 | alloc r14=ar.pfs,0,0,2,0 // now it's safe (must be first in insn group!) | ||
890 | mov out0=r13 //vcpu | ||
891 | adds r3=8,r2 // set up second base pointer | ||
892 | ;; | ||
893 | ssm psr.ic | ||
894 | ;; | ||
895 | srlz.i // guarantee that interruption collection is on | ||
896 | ;; | ||
897 | //(p15) ssm psr.i // restore psr.i | ||
898 | addl r14=@gprel(ia64_leave_hypervisor_prepare),gp | ||
899 | ;; | ||
900 | KVM_SAVE_REST | ||
901 | KVM_SAVE_EXTRA | ||
902 | mov rp=r14 | ||
903 | ;; | ||
904 | adds out1=16,sp //regs | ||
905 | br.call.sptk.many b6=kvm_emulate | ||
906 | END(kvm_dispatch_virtualization_fault) | ||
907 | |||
908 | |||
909 | ENTRY(kvm_dispatch_interrupt) | ||
910 | KVM_SAVE_MIN_WITH_COVER_R19 // uses r31; defines r2 and r3 | ||
911 | ;; | ||
912 | alloc r14=ar.pfs,0,0,1,0 // must be first in an insn group | ||
913 | //mov out0=cr.ivr // pass cr.ivr as first arg | ||
914 | adds r3=8,r2 // set up second base pointer for SAVE_REST | ||
915 | ;; | ||
916 | ssm psr.ic | ||
917 | ;; | ||
918 | srlz.i | ||
919 | ;; | ||
920 | //(p15) ssm psr.i | ||
921 | addl r14=@gprel(ia64_leave_hypervisor),gp | ||
922 | ;; | ||
923 | KVM_SAVE_REST | ||
924 | mov rp=r14 | ||
925 | ;; | ||
926 | mov out0=r13 // pass pointer to pt_regs as second arg | ||
927 | br.call.sptk.many b6=kvm_ia64_handle_irq | ||
928 | END(kvm_dispatch_interrupt) | ||
929 | |||
930 | |||
931 | |||
932 | |||
933 | GLOBAL_ENTRY(ia64_leave_nested) | ||
934 | rsm psr.i | ||
935 | ;; | ||
936 | adds r21=PT(PR)+16,r12 | ||
937 | ;; | ||
938 | lfetch [r21],PT(CR_IPSR)-PT(PR) | ||
939 | adds r2=PT(B6)+16,r12 | ||
940 | adds r3=PT(R16)+16,r12 | ||
941 | ;; | ||
942 | lfetch [r21] | ||
943 | ld8 r28=[r2],8 // load b6 | ||
944 | adds r29=PT(R24)+16,r12 | ||
945 | |||
946 | ld8.fill r16=[r3] | ||
947 | adds r3=PT(AR_CSD)-PT(R16),r3 | ||
948 | adds r30=PT(AR_CCV)+16,r12 | ||
949 | ;; | ||
950 | ld8.fill r24=[r29] | ||
951 | ld8 r15=[r30] // load ar.ccv | ||
952 | ;; | ||
953 | ld8 r29=[r2],16 // load b7 | ||
954 | ld8 r30=[r3],16 // load ar.csd | ||
955 | ;; | ||
956 | ld8 r31=[r2],16 // load ar.ssd | ||
957 | ld8.fill r8=[r3],16 | ||
958 | ;; | ||
959 | ld8.fill r9=[r2],16 | ||
960 | ld8.fill r10=[r3],PT(R17)-PT(R10) | ||
961 | ;; | ||
962 | ld8.fill r11=[r2],PT(R18)-PT(R11) | ||
963 | ld8.fill r17=[r3],16 | ||
964 | ;; | ||
965 | ld8.fill r18=[r2],16 | ||
966 | ld8.fill r19=[r3],16 | ||
967 | ;; | ||
968 | ld8.fill r20=[r2],16 | ||
969 | ld8.fill r21=[r3],16 | ||
970 | mov ar.csd=r30 | ||
971 | mov ar.ssd=r31 | ||
972 | ;; | ||
973 | rsm psr.i | psr.ic | ||
974 | // initiate turning off of interrupt and interruption collection | ||
975 | invala // invalidate ALAT | ||
976 | ;; | ||
977 | srlz.i | ||
978 | ;; | ||
979 | ld8.fill r22=[r2],24 | ||
980 | ld8.fill r23=[r3],24 | ||
981 | mov b6=r28 | ||
982 | ;; | ||
983 | ld8.fill r25=[r2],16 | ||
984 | ld8.fill r26=[r3],16 | ||
985 | mov b7=r29 | ||
986 | ;; | ||
987 | ld8.fill r27=[r2],16 | ||
988 | ld8.fill r28=[r3],16 | ||
989 | ;; | ||
990 | ld8.fill r29=[r2],16 | ||
991 | ld8.fill r30=[r3],24 | ||
992 | ;; | ||
993 | ld8.fill r31=[r2],PT(F9)-PT(R31) | ||
994 | adds r3=PT(F10)-PT(F6),r3 | ||
995 | ;; | ||
996 | ldf.fill f9=[r2],PT(F6)-PT(F9) | ||
997 | ldf.fill f10=[r3],PT(F8)-PT(F10) | ||
998 | ;; | ||
999 | ldf.fill f6=[r2],PT(F7)-PT(F6) | ||
1000 | ;; | ||
1001 | ldf.fill f7=[r2],PT(F11)-PT(F7) | ||
1002 | ldf.fill f8=[r3],32 | ||
1003 | ;; | ||
1004 | srlz.i // ensure interruption collection is off | ||
1005 | mov ar.ccv=r15 | ||
1006 | ;; | ||
1007 | bsw.0 // switch back to bank 0 (no stop bit required beforehand...) | ||
1008 | ;; | ||
1009 | ldf.fill f11=[r2] | ||
1010 | // mov r18=r13 | ||
1011 | // mov r21=r13 | ||
1012 | adds r16=PT(CR_IPSR)+16,r12 | ||
1013 | adds r17=PT(CR_IIP)+16,r12 | ||
1014 | ;; | ||
1015 | ld8 r29=[r16],16 // load cr.ipsr | ||
1016 | ld8 r28=[r17],16 // load cr.iip | ||
1017 | ;; | ||
1018 | ld8 r30=[r16],16 // load cr.ifs | ||
1019 | ld8 r25=[r17],16 // load ar.unat | ||
1020 | ;; | ||
1021 | ld8 r26=[r16],16 // load ar.pfs | ||
1022 | ld8 r27=[r17],16 // load ar.rsc | ||
1023 | cmp.eq p9,p0=r0,r0 | ||
1024 | // set p9 to indicate that we should restore cr.ifs | ||
1025 | ;; | ||
1026 | ld8 r24=[r16],16 // load ar.rnat (may be garbage) | ||
1027 | ld8 r23=[r17],16// load ar.bspstore (may be garbage) | ||
1028 | ;; | ||
1029 | ld8 r31=[r16],16 // load predicates | ||
1030 | ld8 r22=[r17],16 // load b0 | ||
1031 | ;; | ||
1032 | ld8 r19=[r16],16 // load ar.rsc value for "loadrs" | ||
1033 | ld8.fill r1=[r17],16 // load r1 | ||
1034 | ;; | ||
1035 | ld8.fill r12=[r16],16 | ||
1036 | ld8.fill r13=[r17],16 | ||
1037 | ;; | ||
1038 | ld8 r20=[r16],16 // ar.fpsr | ||
1039 | ld8.fill r15=[r17],16 | ||
1040 | ;; | ||
1041 | ld8.fill r14=[r16],16 | ||
1042 | ld8.fill r2=[r17] | ||
1043 | ;; | ||
1044 | ld8.fill r3=[r16] | ||
1045 | ;; | ||
1046 | mov r16=ar.bsp // get existing backing store pointer | ||
1047 | ;; | ||
1048 | mov b0=r22 | ||
1049 | mov ar.pfs=r26 | ||
1050 | mov cr.ifs=r30 | ||
1051 | mov cr.ipsr=r29 | ||
1052 | mov ar.fpsr=r20 | ||
1053 | mov cr.iip=r28 | ||
1054 | ;; | ||
1055 | mov ar.rsc=r27 | ||
1056 | mov ar.unat=r25 | ||
1057 | mov pr=r31,-1 | ||
1058 | rfi | ||
1059 | END(ia64_leave_nested) | ||
1060 | |||
1061 | |||
1062 | |||
1063 | GLOBAL_ENTRY(ia64_leave_hypervisor_prepare) | ||
1064 | /* | ||
1065 | * work.need_resched etc. mustn't get changed | ||
1066 | *by this CPU before it returns to | ||
1067 | ;; | ||
1068 | * user- or fsys-mode, hence we disable interrupts early on: | ||
1069 | */ | ||
1070 | adds r2 = PT(R4)+16,r12 | ||
1071 | adds r3 = PT(R5)+16,r12 | ||
1072 | adds r8 = PT(EML_UNAT)+16,r12 | ||
1073 | ;; | ||
1074 | ld8 r8 = [r8] | ||
1075 | ;; | ||
1076 | mov ar.unat=r8 | ||
1077 | ;; | ||
1078 | ld8.fill r4=[r2],16 //load r4 | ||
1079 | ld8.fill r5=[r3],16 //load r5 | ||
1080 | ;; | ||
1081 | ld8.fill r6=[r2] //load r6 | ||
1082 | ld8.fill r7=[r3] //load r7 | ||
1083 | ;; | ||
1084 | END(ia64_leave_hypervisor_prepare) | ||
1085 | //fall through | ||
1086 | GLOBAL_ENTRY(ia64_leave_hypervisor) | ||
1087 | rsm psr.i | ||
1088 | ;; | ||
1089 | br.call.sptk.many b0=leave_hypervisor_tail | ||
1090 | ;; | ||
1091 | adds r20=PT(PR)+16,r12 | ||
1092 | adds r8=PT(EML_UNAT)+16,r12 | ||
1093 | ;; | ||
1094 | ld8 r8=[r8] | ||
1095 | ;; | ||
1096 | mov ar.unat=r8 | ||
1097 | ;; | ||
1098 | lfetch [r20],PT(CR_IPSR)-PT(PR) | ||
1099 | adds r2 = PT(B6)+16,r12 | ||
1100 | adds r3 = PT(B7)+16,r12 | ||
1101 | ;; | ||
1102 | lfetch [r20] | ||
1103 | ;; | ||
1104 | ld8 r24=[r2],16 /* B6 */ | ||
1105 | ld8 r25=[r3],16 /* B7 */ | ||
1106 | ;; | ||
1107 | ld8 r26=[r2],16 /* ar_csd */ | ||
1108 | ld8 r27=[r3],16 /* ar_ssd */ | ||
1109 | mov b6 = r24 | ||
1110 | ;; | ||
1111 | ld8.fill r8=[r2],16 | ||
1112 | ld8.fill r9=[r3],16 | ||
1113 | mov b7 = r25 | ||
1114 | ;; | ||
1115 | mov ar.csd = r26 | ||
1116 | mov ar.ssd = r27 | ||
1117 | ;; | ||
1118 | ld8.fill r10=[r2],PT(R15)-PT(R10) | ||
1119 | ld8.fill r11=[r3],PT(R14)-PT(R11) | ||
1120 | ;; | ||
1121 | ld8.fill r15=[r2],PT(R16)-PT(R15) | ||
1122 | ld8.fill r14=[r3],PT(R17)-PT(R14) | ||
1123 | ;; | ||
1124 | ld8.fill r16=[r2],16 | ||
1125 | ld8.fill r17=[r3],16 | ||
1126 | ;; | ||
1127 | ld8.fill r18=[r2],16 | ||
1128 | ld8.fill r19=[r3],16 | ||
1129 | ;; | ||
1130 | ld8.fill r20=[r2],16 | ||
1131 | ld8.fill r21=[r3],16 | ||
1132 | ;; | ||
1133 | ld8.fill r22=[r2],16 | ||
1134 | ld8.fill r23=[r3],16 | ||
1135 | ;; | ||
1136 | ld8.fill r24=[r2],16 | ||
1137 | ld8.fill r25=[r3],16 | ||
1138 | ;; | ||
1139 | ld8.fill r26=[r2],16 | ||
1140 | ld8.fill r27=[r3],16 | ||
1141 | ;; | ||
1142 | ld8.fill r28=[r2],16 | ||
1143 | ld8.fill r29=[r3],16 | ||
1144 | ;; | ||
1145 | ld8.fill r30=[r2],PT(F6)-PT(R30) | ||
1146 | ld8.fill r31=[r3],PT(F7)-PT(R31) | ||
1147 | ;; | ||
1148 | rsm psr.i | psr.ic | ||
1149 | // initiate turning off of interrupt and interruption collection | ||
1150 | invala // invalidate ALAT | ||
1151 | ;; | ||
1152 | srlz.i // ensure interruption collection is off | ||
1153 | ;; | ||
1154 | bsw.0 | ||
1155 | ;; | ||
1156 | adds r16 = PT(CR_IPSR)+16,r12 | ||
1157 | adds r17 = PT(CR_IIP)+16,r12 | ||
1158 | mov r21=r13 // get current | ||
1159 | ;; | ||
1160 | ld8 r31=[r16],16 // load cr.ipsr | ||
1161 | ld8 r30=[r17],16 // load cr.iip | ||
1162 | ;; | ||
1163 | ld8 r29=[r16],16 // load cr.ifs | ||
1164 | ld8 r28=[r17],16 // load ar.unat | ||
1165 | ;; | ||
1166 | ld8 r27=[r16],16 // load ar.pfs | ||
1167 | ld8 r26=[r17],16 // load ar.rsc | ||
1168 | ;; | ||
1169 | ld8 r25=[r16],16 // load ar.rnat | ||
1170 | ld8 r24=[r17],16 // load ar.bspstore | ||
1171 | ;; | ||
1172 | ld8 r23=[r16],16 // load predicates | ||
1173 | ld8 r22=[r17],16 // load b0 | ||
1174 | ;; | ||
1175 | ld8 r20=[r16],16 // load ar.rsc value for "loadrs" | ||
1176 | ld8.fill r1=[r17],16 //load r1 | ||
1177 | ;; | ||
1178 | ld8.fill r12=[r16],16 //load r12 | ||
1179 | ld8.fill r13=[r17],PT(R2)-PT(R13) //load r13 | ||
1180 | ;; | ||
1181 | ld8 r19=[r16],PT(R3)-PT(AR_FPSR) //load ar_fpsr | ||
1182 | ld8.fill r2=[r17],PT(AR_CCV)-PT(R2) //load r2 | ||
1183 | ;; | ||
1184 | ld8.fill r3=[r16] //load r3 | ||
1185 | ld8 r18=[r17] //load ar_ccv | ||
1186 | ;; | ||
1187 | mov ar.fpsr=r19 | ||
1188 | mov ar.ccv=r18 | ||
1189 | shr.u r18=r20,16 | ||
1190 | ;; | ||
1191 | kvm_rbs_switch: | ||
1192 | mov r19=96 | ||
1193 | |||
1194 | kvm_dont_preserve_current_frame: | ||
1195 | /* | ||
1196 | * To prevent leaking bits between the hypervisor and guest domain, | ||
1197 | * we must clear the stacked registers in the "invalid" partition here. | ||
1198 | * 5 registers/cycle on McKinley). | ||
1199 | */ | ||
1200 | # define pRecurse p6 | ||
1201 | # define pReturn p7 | ||
1202 | # define Nregs 14 | ||
1203 | |||
1204 | alloc loc0=ar.pfs,2,Nregs-2,2,0 | ||
1205 | shr.u loc1=r18,9 // RNaTslots <= floor(dirtySize / (64*8)) | ||
1206 | sub r19=r19,r18 // r19 = (physStackedSize + 8) - dirtySize | ||
1207 | ;; | ||
1208 | mov ar.rsc=r20 // load ar.rsc to be used for "loadrs" | ||
1209 | shladd in0=loc1,3,r19 | ||
1210 | mov in1=0 | ||
1211 | ;; | ||
1212 | TEXT_ALIGN(32) | ||
1213 | kvm_rse_clear_invalid: | ||
1214 | alloc loc0=ar.pfs,2,Nregs-2,2,0 | ||
1215 | cmp.lt pRecurse,p0=Nregs*8,in0 | ||
1216 | // if more than Nregs regs left to clear, (re)curse | ||
1217 | add out0=-Nregs*8,in0 | ||
1218 | add out1=1,in1 // increment recursion count | ||
1219 | mov loc1=0 | ||
1220 | mov loc2=0 | ||
1221 | ;; | ||
1222 | mov loc3=0 | ||
1223 | mov loc4=0 | ||
1224 | mov loc5=0 | ||
1225 | mov loc6=0 | ||
1226 | mov loc7=0 | ||
1227 | (pRecurse) br.call.dptk.few b0=kvm_rse_clear_invalid | ||
1228 | ;; | ||
1229 | mov loc8=0 | ||
1230 | mov loc9=0 | ||
1231 | cmp.ne pReturn,p0=r0,in1 | ||
1232 | // if recursion count != 0, we need to do a br.ret | ||
1233 | mov loc10=0 | ||
1234 | mov loc11=0 | ||
1235 | (pReturn) br.ret.dptk.many b0 | ||
1236 | |||
1237 | # undef pRecurse | ||
1238 | # undef pReturn | ||
1239 | |||
1240 | // loadrs has already been shifted | ||
1241 | alloc r16=ar.pfs,0,0,0,0 // drop current register frame | ||
1242 | ;; | ||
1243 | loadrs | ||
1244 | ;; | ||
1245 | mov ar.bspstore=r24 | ||
1246 | ;; | ||
1247 | mov ar.unat=r28 | ||
1248 | mov ar.rnat=r25 | ||
1249 | mov ar.rsc=r26 | ||
1250 | ;; | ||
1251 | mov cr.ipsr=r31 | ||
1252 | mov cr.iip=r30 | ||
1253 | mov cr.ifs=r29 | ||
1254 | mov ar.pfs=r27 | ||
1255 | adds r18=VMM_VPD_BASE_OFFSET,r21 | ||
1256 | ;; | ||
1257 | ld8 r18=[r18] //vpd | ||
1258 | adds r17=VMM_VCPU_ISR_OFFSET,r21 | ||
1259 | ;; | ||
1260 | ld8 r17=[r17] | ||
1261 | adds r19=VMM_VPD_VPSR_OFFSET,r18 | ||
1262 | ;; | ||
1263 | ld8 r19=[r19] //vpsr | ||
1264 | adds r20=VMM_VCPU_VSA_BASE_OFFSET,r21 | ||
1265 | ;; | ||
1266 | ld8 r20=[r20] | ||
1267 | ;; | ||
1268 | //vsa_sync_write_start | ||
1269 | mov r25=r18 | ||
1270 | adds r16= VMM_VCPU_GP_OFFSET,r21 | ||
1271 | ;; | ||
1272 | ld8 r16= [r16] // Put gp in r24 | ||
1273 | movl r24=@gprel(ia64_vmm_entry) // calculate return address | ||
1274 | ;; | ||
1275 | add r24=r24,r16 | ||
1276 | ;; | ||
1277 | add r16=PAL_VPS_SYNC_WRITE,r20 | ||
1278 | ;; | ||
1279 | mov b0=r16 | ||
1280 | br.cond.sptk b0 // call the service | ||
1281 | ;; | ||
1282 | END(ia64_leave_hypervisor) | ||
1283 | // fall through | ||
1284 | GLOBAL_ENTRY(ia64_vmm_entry) | ||
1285 | /* | ||
1286 | * must be at bank 0 | ||
1287 | * parameter: | ||
1288 | * r17:cr.isr | ||
1289 | * r18:vpd | ||
1290 | * r19:vpsr | ||
1291 | * r20:__vsa_base | ||
1292 | * r22:b0 | ||
1293 | * r23:predicate | ||
1294 | */ | ||
1295 | mov r24=r22 | ||
1296 | mov r25=r18 | ||
1297 | tbit.nz p1,p2 = r19,IA64_PSR_IC_BIT // p1=vpsr.ic | ||
1298 | ;; | ||
1299 | (p1) add r29=PAL_VPS_RESUME_NORMAL,r20 | ||
1300 | (p1) br.sptk.many ia64_vmm_entry_out | ||
1301 | ;; | ||
1302 | tbit.nz p1,p2 = r17,IA64_ISR_IR_BIT //p1=cr.isr.ir | ||
1303 | ;; | ||
1304 | (p1) add r29=PAL_VPS_RESUME_NORMAL,r20 | ||
1305 | (p2) add r29=PAL_VPS_RESUME_HANDLER,r20 | ||
1306 | (p2) ld8 r26=[r25] | ||
1307 | ;; | ||
1308 | ia64_vmm_entry_out: | ||
1309 | mov pr=r23,-2 | ||
1310 | mov b0=r29 | ||
1311 | ;; | ||
1312 | br.cond.sptk b0 // call pal service | ||
1313 | END(ia64_vmm_entry) | ||
1314 | |||
1315 | |||
1316 | |||
1317 | /* | ||
1318 | * extern u64 ia64_call_vsa(u64 proc, u64 arg1, u64 arg2, | ||
1319 | * u64 arg3, u64 arg4, u64 arg5, | ||
1320 | * u64 arg6, u64 arg7); | ||
1321 | * | ||
1322 | * XXX: The currently defined services use only 4 args at the max. The | ||
1323 | * rest are not consumed. | ||
1324 | */ | ||
1325 | GLOBAL_ENTRY(ia64_call_vsa) | ||
1326 | .regstk 4,4,0,0 | ||
1327 | |||
1328 | rpsave = loc0 | ||
1329 | pfssave = loc1 | ||
1330 | psrsave = loc2 | ||
1331 | entry = loc3 | ||
1332 | hostret = r24 | ||
1333 | |||
1334 | alloc pfssave=ar.pfs,4,4,0,0 | ||
1335 | mov rpsave=rp | ||
1336 | adds entry=VMM_VCPU_VSA_BASE_OFFSET, r13 | ||
1337 | ;; | ||
1338 | ld8 entry=[entry] | ||
1339 | 1: mov hostret=ip | ||
1340 | mov r25=in1 // copy arguments | ||
1341 | mov r26=in2 | ||
1342 | mov r27=in3 | ||
1343 | mov psrsave=psr | ||
1344 | ;; | ||
1345 | tbit.nz p6,p0=psrsave,14 // IA64_PSR_I | ||
1346 | tbit.nz p7,p0=psrsave,13 // IA64_PSR_IC | ||
1347 | ;; | ||
1348 | add hostret=2f-1b,hostret // calculate return address | ||
1349 | add entry=entry,in0 | ||
1350 | ;; | ||
1351 | rsm psr.i | psr.ic | ||
1352 | ;; | ||
1353 | srlz.i | ||
1354 | mov b6=entry | ||
1355 | br.cond.sptk b6 // call the service | ||
1356 | 2: | ||
1357 | // Architectural sequence for enabling interrupts if necessary | ||
1358 | (p7) ssm psr.ic | ||
1359 | ;; | ||
1360 | (p7) srlz.i | ||
1361 | ;; | ||
1362 | //(p6) ssm psr.i | ||
1363 | ;; | ||
1364 | mov rp=rpsave | ||
1365 | mov ar.pfs=pfssave | ||
1366 | mov r8=r31 | ||
1367 | ;; | ||
1368 | srlz.d | ||
1369 | br.ret.sptk rp | ||
1370 | |||
1371 | END(ia64_call_vsa) | ||
1372 | |||
1373 | #define INIT_BSPSTORE ((4<<30)-(12<<20)-0x100) | ||
1374 | |||
1375 | GLOBAL_ENTRY(vmm_reset_entry) | ||
1376 | //set up ipsr, iip, vpd.vpsr, dcr | ||
1377 | // For IPSR: it/dt/rt=1, i/ic=1, si=1, vm/bn=1 | ||
1378 | // For DCR: all bits 0 | ||
1379 | adds r14=-VMM_PT_REGS_SIZE, r12 | ||
1380 | ;; | ||
1381 | movl r6=0x501008826000 // IPSR dt/rt/it:1;i/ic:1, si:1, vm/bn:1 | ||
1382 | movl r10=0x8000000000000000 | ||
1383 | adds r16=PT(CR_IIP), r14 | ||
1384 | adds r20=PT(R1), r14 | ||
1385 | ;; | ||
1386 | rsm psr.ic | psr.i | ||
1387 | ;; | ||
1388 | srlz.i | ||
1389 | ;; | ||
1390 | bsw.0 | ||
1391 | ;; | ||
1392 | mov r21 =r13 | ||
1393 | ;; | ||
1394 | bsw.1 | ||
1395 | ;; | ||
1396 | mov ar.rsc = 0 | ||
1397 | ;; | ||
1398 | flushrs | ||
1399 | ;; | ||
1400 | mov ar.bspstore = 0 | ||
1401 | // clear BSPSTORE | ||
1402 | ;; | ||
1403 | mov cr.ipsr=r6 | ||
1404 | mov cr.ifs=r10 | ||
1405 | ld8 r4 = [r16] // Set init iip for first run. | ||
1406 | ld8 r1 = [r20] | ||
1407 | ;; | ||
1408 | mov cr.iip=r4 | ||
1409 | ;; | ||
1410 | adds r16=VMM_VPD_BASE_OFFSET,r13 | ||
1411 | adds r20=VMM_VCPU_VSA_BASE_OFFSET,r13 | ||
1412 | ;; | ||
1413 | ld8 r18=[r16] | ||
1414 | ld8 r20=[r20] | ||
1415 | ;; | ||
1416 | adds r19=VMM_VPD_VPSR_OFFSET,r18 | ||
1417 | ;; | ||
1418 | ld8 r19=[r19] | ||
1419 | mov r17=r0 | ||
1420 | mov r22=r0 | ||
1421 | mov r23=r0 | ||
1422 | br.cond.sptk ia64_vmm_entry | ||
1423 | br.ret.sptk b0 | ||
1424 | END(vmm_reset_entry) | ||
diff --git a/arch/ia64/kvm/vti.h b/arch/ia64/kvm/vti.h new file mode 100644 index 000000000000..f6c5617e16af --- /dev/null +++ b/arch/ia64/kvm/vti.h | |||
@@ -0,0 +1,290 @@ | |||
1 | /* | ||
2 | * vti.h: prototype for generial vt related interface | ||
3 | * Copyright (c) 2004, Intel Corporation. | ||
4 | * | ||
5 | * Xuefei Xu (Anthony Xu) (anthony.xu@intel.com) | ||
6 | * Fred Yang (fred.yang@intel.com) | ||
7 | * Kun Tian (Kevin Tian) (kevin.tian@intel.com) | ||
8 | * | ||
9 | * Copyright (c) 2007, Intel Corporation. | ||
10 | * Zhang xiantao <xiantao.zhang@intel.com> | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or modify it | ||
13 | * under the terms and conditions of the GNU General Public License, | ||
14 | * version 2, as published by the Free Software Foundation. | ||
15 | * | ||
16 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
17 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
18 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
19 | * more details. | ||
20 | * | ||
21 | * You should have received a copy of the GNU General Public License along with | ||
22 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple | ||
23 | * Place - Suite 330, Boston, MA 02111-1307 USA. | ||
24 | */ | ||
25 | #ifndef _KVM_VT_I_H | ||
26 | #define _KVM_VT_I_H | ||
27 | |||
28 | #ifndef __ASSEMBLY__ | ||
29 | #include <asm/page.h> | ||
30 | |||
31 | #include <linux/kvm_host.h> | ||
32 | |||
33 | /* define itr.i and itr.d in ia64_itr function */ | ||
34 | #define ITR 0x01 | ||
35 | #define DTR 0x02 | ||
36 | #define IaDTR 0x03 | ||
37 | |||
38 | #define IA64_TR_VMM 6 /*itr6, dtr6 : maps vmm code, vmbuffer*/ | ||
39 | #define IA64_TR_VM_DATA 7 /*dtr7 : maps current vm data*/ | ||
40 | |||
41 | #define RR6 (6UL<<61) | ||
42 | #define RR7 (7UL<<61) | ||
43 | |||
44 | |||
45 | /* config_options in pal_vp_init_env */ | ||
46 | #define VP_INITIALIZE 1UL | ||
47 | #define VP_FR_PMC 1UL<<1 | ||
48 | #define VP_OPCODE 1UL<<8 | ||
49 | #define VP_CAUSE 1UL<<9 | ||
50 | #define VP_FW_ACC 1UL<<63 | ||
51 | |||
52 | /* init vp env with initializing vm_buffer */ | ||
53 | #define VP_INIT_ENV_INITALIZE (VP_INITIALIZE | VP_FR_PMC |\ | ||
54 | VP_OPCODE | VP_CAUSE | VP_FW_ACC) | ||
55 | /* init vp env without initializing vm_buffer */ | ||
56 | #define VP_INIT_ENV VP_FR_PMC | VP_OPCODE | VP_CAUSE | VP_FW_ACC | ||
57 | |||
58 | #define PAL_VP_CREATE 265 | ||
59 | /* Stacked Virt. Initializes a new VPD for the operation of | ||
60 | * a new virtual processor in the virtual environment. | ||
61 | */ | ||
62 | #define PAL_VP_ENV_INFO 266 | ||
63 | /*Stacked Virt. Returns the parameters needed to enter a virtual environment.*/ | ||
64 | #define PAL_VP_EXIT_ENV 267 | ||
65 | /*Stacked Virt. Allows a logical processor to exit a virtual environment.*/ | ||
66 | #define PAL_VP_INIT_ENV 268 | ||
67 | /*Stacked Virt. Allows a logical processor to enter a virtual environment.*/ | ||
68 | #define PAL_VP_REGISTER 269 | ||
69 | /*Stacked Virt. Register a different host IVT for the virtual processor.*/ | ||
70 | #define PAL_VP_RESUME 270 | ||
71 | /* Renamed from PAL_VP_RESUME */ | ||
72 | #define PAL_VP_RESTORE 270 | ||
73 | /*Stacked Virt. Resumes virtual processor operation on the logical processor.*/ | ||
74 | #define PAL_VP_SUSPEND 271 | ||
75 | /* Renamed from PAL_VP_SUSPEND */ | ||
76 | #define PAL_VP_SAVE 271 | ||
77 | /* Stacked Virt. Suspends operation for the specified virtual processor on | ||
78 | * the logical processor. | ||
79 | */ | ||
80 | #define PAL_VP_TERMINATE 272 | ||
81 | /* Stacked Virt. Terminates operation for the specified virtual processor.*/ | ||
82 | |||
83 | union vac { | ||
84 | unsigned long value; | ||
85 | struct { | ||
86 | int a_int:1; | ||
87 | int a_from_int_cr:1; | ||
88 | int a_to_int_cr:1; | ||
89 | int a_from_psr:1; | ||
90 | int a_from_cpuid:1; | ||
91 | int a_cover:1; | ||
92 | int a_bsw:1; | ||
93 | long reserved:57; | ||
94 | }; | ||
95 | }; | ||
96 | |||
97 | union vdc { | ||
98 | unsigned long value; | ||
99 | struct { | ||
100 | int d_vmsw:1; | ||
101 | int d_extint:1; | ||
102 | int d_ibr_dbr:1; | ||
103 | int d_pmc:1; | ||
104 | int d_to_pmd:1; | ||
105 | int d_itm:1; | ||
106 | long reserved:58; | ||
107 | }; | ||
108 | }; | ||
109 | |||
110 | struct vpd { | ||
111 | union vac vac; | ||
112 | union vdc vdc; | ||
113 | unsigned long virt_env_vaddr; | ||
114 | unsigned long reserved1[29]; | ||
115 | unsigned long vhpi; | ||
116 | unsigned long reserved2[95]; | ||
117 | unsigned long vgr[16]; | ||
118 | unsigned long vbgr[16]; | ||
119 | unsigned long vnat; | ||
120 | unsigned long vbnat; | ||
121 | unsigned long vcpuid[5]; | ||
122 | unsigned long reserved3[11]; | ||
123 | unsigned long vpsr; | ||
124 | unsigned long vpr; | ||
125 | unsigned long reserved4[76]; | ||
126 | union { | ||
127 | unsigned long vcr[128]; | ||
128 | struct { | ||
129 | unsigned long dcr; | ||
130 | unsigned long itm; | ||
131 | unsigned long iva; | ||
132 | unsigned long rsv1[5]; | ||
133 | unsigned long pta; | ||
134 | unsigned long rsv2[7]; | ||
135 | unsigned long ipsr; | ||
136 | unsigned long isr; | ||
137 | unsigned long rsv3; | ||
138 | unsigned long iip; | ||
139 | unsigned long ifa; | ||
140 | unsigned long itir; | ||
141 | unsigned long iipa; | ||
142 | unsigned long ifs; | ||
143 | unsigned long iim; | ||
144 | unsigned long iha; | ||
145 | unsigned long rsv4[38]; | ||
146 | unsigned long lid; | ||
147 | unsigned long ivr; | ||
148 | unsigned long tpr; | ||
149 | unsigned long eoi; | ||
150 | unsigned long irr[4]; | ||
151 | unsigned long itv; | ||
152 | unsigned long pmv; | ||
153 | unsigned long cmcv; | ||
154 | unsigned long rsv5[5]; | ||
155 | unsigned long lrr0; | ||
156 | unsigned long lrr1; | ||
157 | unsigned long rsv6[46]; | ||
158 | }; | ||
159 | }; | ||
160 | unsigned long reserved5[128]; | ||
161 | unsigned long reserved6[3456]; | ||
162 | unsigned long vmm_avail[128]; | ||
163 | unsigned long reserved7[4096]; | ||
164 | }; | ||
165 | |||
166 | #define PAL_PROC_VM_BIT (1UL << 40) | ||
167 | #define PAL_PROC_VMSW_BIT (1UL << 54) | ||
168 | |||
169 | static inline s64 ia64_pal_vp_env_info(u64 *buffer_size, | ||
170 | u64 *vp_env_info) | ||
171 | { | ||
172 | struct ia64_pal_retval iprv; | ||
173 | PAL_CALL_STK(iprv, PAL_VP_ENV_INFO, 0, 0, 0); | ||
174 | *buffer_size = iprv.v0; | ||
175 | *vp_env_info = iprv.v1; | ||
176 | return iprv.status; | ||
177 | } | ||
178 | |||
179 | static inline s64 ia64_pal_vp_exit_env(u64 iva) | ||
180 | { | ||
181 | struct ia64_pal_retval iprv; | ||
182 | |||
183 | PAL_CALL_STK(iprv, PAL_VP_EXIT_ENV, (u64)iva, 0, 0); | ||
184 | return iprv.status; | ||
185 | } | ||
186 | |||
187 | static inline s64 ia64_pal_vp_init_env(u64 config_options, u64 pbase_addr, | ||
188 | u64 vbase_addr, u64 *vsa_base) | ||
189 | { | ||
190 | struct ia64_pal_retval iprv; | ||
191 | |||
192 | PAL_CALL_STK(iprv, PAL_VP_INIT_ENV, config_options, pbase_addr, | ||
193 | vbase_addr); | ||
194 | *vsa_base = iprv.v0; | ||
195 | |||
196 | return iprv.status; | ||
197 | } | ||
198 | |||
199 | static inline s64 ia64_pal_vp_restore(u64 *vpd, u64 pal_proc_vector) | ||
200 | { | ||
201 | struct ia64_pal_retval iprv; | ||
202 | |||
203 | PAL_CALL_STK(iprv, PAL_VP_RESTORE, (u64)vpd, pal_proc_vector, 0); | ||
204 | |||
205 | return iprv.status; | ||
206 | } | ||
207 | |||
208 | static inline s64 ia64_pal_vp_save(u64 *vpd, u64 pal_proc_vector) | ||
209 | { | ||
210 | struct ia64_pal_retval iprv; | ||
211 | |||
212 | PAL_CALL_STK(iprv, PAL_VP_SAVE, (u64)vpd, pal_proc_vector, 0); | ||
213 | |||
214 | return iprv.status; | ||
215 | } | ||
216 | |||
217 | #endif | ||
218 | |||
219 | /*VPD field offset*/ | ||
220 | #define VPD_VAC_START_OFFSET 0 | ||
221 | #define VPD_VDC_START_OFFSET 8 | ||
222 | #define VPD_VHPI_START_OFFSET 256 | ||
223 | #define VPD_VGR_START_OFFSET 1024 | ||
224 | #define VPD_VBGR_START_OFFSET 1152 | ||
225 | #define VPD_VNAT_START_OFFSET 1280 | ||
226 | #define VPD_VBNAT_START_OFFSET 1288 | ||
227 | #define VPD_VCPUID_START_OFFSET 1296 | ||
228 | #define VPD_VPSR_START_OFFSET 1424 | ||
229 | #define VPD_VPR_START_OFFSET 1432 | ||
230 | #define VPD_VRSE_CFLE_START_OFFSET 1440 | ||
231 | #define VPD_VCR_START_OFFSET 2048 | ||
232 | #define VPD_VTPR_START_OFFSET 2576 | ||
233 | #define VPD_VRR_START_OFFSET 3072 | ||
234 | #define VPD_VMM_VAIL_START_OFFSET 31744 | ||
235 | |||
236 | /*Virtualization faults*/ | ||
237 | |||
238 | #define EVENT_MOV_TO_AR 1 | ||
239 | #define EVENT_MOV_TO_AR_IMM 2 | ||
240 | #define EVENT_MOV_FROM_AR 3 | ||
241 | #define EVENT_MOV_TO_CR 4 | ||
242 | #define EVENT_MOV_FROM_CR 5 | ||
243 | #define EVENT_MOV_TO_PSR 6 | ||
244 | #define EVENT_MOV_FROM_PSR 7 | ||
245 | #define EVENT_ITC_D 8 | ||
246 | #define EVENT_ITC_I 9 | ||
247 | #define EVENT_MOV_TO_RR 10 | ||
248 | #define EVENT_MOV_TO_DBR 11 | ||
249 | #define EVENT_MOV_TO_IBR 12 | ||
250 | #define EVENT_MOV_TO_PKR 13 | ||
251 | #define EVENT_MOV_TO_PMC 14 | ||
252 | #define EVENT_MOV_TO_PMD 15 | ||
253 | #define EVENT_ITR_D 16 | ||
254 | #define EVENT_ITR_I 17 | ||
255 | #define EVENT_MOV_FROM_RR 18 | ||
256 | #define EVENT_MOV_FROM_DBR 19 | ||
257 | #define EVENT_MOV_FROM_IBR 20 | ||
258 | #define EVENT_MOV_FROM_PKR 21 | ||
259 | #define EVENT_MOV_FROM_PMC 22 | ||
260 | #define EVENT_MOV_FROM_CPUID 23 | ||
261 | #define EVENT_SSM 24 | ||
262 | #define EVENT_RSM 25 | ||
263 | #define EVENT_PTC_L 26 | ||
264 | #define EVENT_PTC_G 27 | ||
265 | #define EVENT_PTC_GA 28 | ||
266 | #define EVENT_PTR_D 29 | ||
267 | #define EVENT_PTR_I 30 | ||
268 | #define EVENT_THASH 31 | ||
269 | #define EVENT_TTAG 32 | ||
270 | #define EVENT_TPA 33 | ||
271 | #define EVENT_TAK 34 | ||
272 | #define EVENT_PTC_E 35 | ||
273 | #define EVENT_COVER 36 | ||
274 | #define EVENT_RFI 37 | ||
275 | #define EVENT_BSW_0 38 | ||
276 | #define EVENT_BSW_1 39 | ||
277 | #define EVENT_VMSW 40 | ||
278 | |||
279 | /**PAL virtual services offsets */ | ||
280 | #define PAL_VPS_RESUME_NORMAL 0x0000 | ||
281 | #define PAL_VPS_RESUME_HANDLER 0x0400 | ||
282 | #define PAL_VPS_SYNC_READ 0x0800 | ||
283 | #define PAL_VPS_SYNC_WRITE 0x0c00 | ||
284 | #define PAL_VPS_SET_PENDING_INTERRUPT 0x1000 | ||
285 | #define PAL_VPS_THASH 0x1400 | ||
286 | #define PAL_VPS_TTAG 0x1800 | ||
287 | #define PAL_VPS_RESTORE 0x1c00 | ||
288 | #define PAL_VPS_SAVE 0x2000 | ||
289 | |||
290 | #endif/* _VT_I_H*/ | ||
diff --git a/arch/ia64/kvm/vtlb.c b/arch/ia64/kvm/vtlb.c new file mode 100644 index 000000000000..def4576d22b1 --- /dev/null +++ b/arch/ia64/kvm/vtlb.c | |||
@@ -0,0 +1,636 @@ | |||
1 | /* | ||
2 | * vtlb.c: guest virtual tlb handling module. | ||
3 | * Copyright (c) 2004, Intel Corporation. | ||
4 | * Yaozu Dong (Eddie Dong) <Eddie.dong@intel.com> | ||
5 | * Xuefei Xu (Anthony Xu) <anthony.xu@intel.com> | ||
6 | * | ||
7 | * Copyright (c) 2007, Intel Corporation. | ||
8 | * Xuefei Xu (Anthony Xu) <anthony.xu@intel.com> | ||
9 | * Xiantao Zhang <xiantao.zhang@intel.com> | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or modify it | ||
12 | * under the terms and conditions of the GNU General Public License, | ||
13 | * version 2, as published by the Free Software Foundation. | ||
14 | * | ||
15 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
16 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
18 | * more details. | ||
19 | * | ||
20 | * You should have received a copy of the GNU General Public License along with | ||
21 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple | ||
22 | * Place - Suite 330, Boston, MA 02111-1307 USA. | ||
23 | * | ||
24 | */ | ||
25 | |||
26 | #include "vcpu.h" | ||
27 | |||
28 | #include <linux/rwsem.h> | ||
29 | |||
30 | #include <asm/tlb.h> | ||
31 | |||
32 | /* | ||
33 | * Check to see if the address rid:va is translated by the TLB | ||
34 | */ | ||
35 | |||
36 | static int __is_tr_translated(struct thash_data *trp, u64 rid, u64 va) | ||
37 | { | ||
38 | return ((trp->p) && (trp->rid == rid) | ||
39 | && ((va-trp->vadr) < PSIZE(trp->ps))); | ||
40 | } | ||
41 | |||
42 | /* | ||
43 | * Only for GUEST TR format. | ||
44 | */ | ||
45 | static int __is_tr_overlap(struct thash_data *trp, u64 rid, u64 sva, u64 eva) | ||
46 | { | ||
47 | u64 sa1, ea1; | ||
48 | |||
49 | if (!trp->p || trp->rid != rid) | ||
50 | return 0; | ||
51 | |||
52 | sa1 = trp->vadr; | ||
53 | ea1 = sa1 + PSIZE(trp->ps) - 1; | ||
54 | eva -= 1; | ||
55 | if ((sva > ea1) || (sa1 > eva)) | ||
56 | return 0; | ||
57 | else | ||
58 | return 1; | ||
59 | |||
60 | } | ||
61 | |||
62 | void machine_tlb_purge(u64 va, u64 ps) | ||
63 | { | ||
64 | ia64_ptcl(va, ps << 2); | ||
65 | } | ||
66 | |||
67 | void local_flush_tlb_all(void) | ||
68 | { | ||
69 | int i, j; | ||
70 | unsigned long flags, count0, count1; | ||
71 | unsigned long stride0, stride1, addr; | ||
72 | |||
73 | addr = current_vcpu->arch.ptce_base; | ||
74 | count0 = current_vcpu->arch.ptce_count[0]; | ||
75 | count1 = current_vcpu->arch.ptce_count[1]; | ||
76 | stride0 = current_vcpu->arch.ptce_stride[0]; | ||
77 | stride1 = current_vcpu->arch.ptce_stride[1]; | ||
78 | |||
79 | local_irq_save(flags); | ||
80 | for (i = 0; i < count0; ++i) { | ||
81 | for (j = 0; j < count1; ++j) { | ||
82 | ia64_ptce(addr); | ||
83 | addr += stride1; | ||
84 | } | ||
85 | addr += stride0; | ||
86 | } | ||
87 | local_irq_restore(flags); | ||
88 | ia64_srlz_i(); /* srlz.i implies srlz.d */ | ||
89 | } | ||
90 | |||
91 | int vhpt_enabled(struct kvm_vcpu *vcpu, u64 vadr, enum vhpt_ref ref) | ||
92 | { | ||
93 | union ia64_rr vrr; | ||
94 | union ia64_pta vpta; | ||
95 | struct ia64_psr vpsr; | ||
96 | |||
97 | vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr); | ||
98 | vrr.val = vcpu_get_rr(vcpu, vadr); | ||
99 | vpta.val = vcpu_get_pta(vcpu); | ||
100 | |||
101 | if (vrr.ve & vpta.ve) { | ||
102 | switch (ref) { | ||
103 | case DATA_REF: | ||
104 | case NA_REF: | ||
105 | return vpsr.dt; | ||
106 | case INST_REF: | ||
107 | return vpsr.dt && vpsr.it && vpsr.ic; | ||
108 | case RSE_REF: | ||
109 | return vpsr.dt && vpsr.rt; | ||
110 | |||
111 | } | ||
112 | } | ||
113 | return 0; | ||
114 | } | ||
115 | |||
116 | struct thash_data *vsa_thash(union ia64_pta vpta, u64 va, u64 vrr, u64 *tag) | ||
117 | { | ||
118 | u64 index, pfn, rid, pfn_bits; | ||
119 | |||
120 | pfn_bits = vpta.size - 5 - 8; | ||
121 | pfn = REGION_OFFSET(va) >> _REGION_PAGE_SIZE(vrr); | ||
122 | rid = _REGION_ID(vrr); | ||
123 | index = ((rid & 0xff) << pfn_bits)|(pfn & ((1UL << pfn_bits) - 1)); | ||
124 | *tag = ((rid >> 8) & 0xffff) | ((pfn >> pfn_bits) << 16); | ||
125 | |||
126 | return (struct thash_data *)((vpta.base << PTA_BASE_SHIFT) + | ||
127 | (index << 5)); | ||
128 | } | ||
129 | |||
130 | struct thash_data *__vtr_lookup(struct kvm_vcpu *vcpu, u64 va, int type) | ||
131 | { | ||
132 | |||
133 | struct thash_data *trp; | ||
134 | int i; | ||
135 | u64 rid; | ||
136 | |||
137 | rid = vcpu_get_rr(vcpu, va); | ||
138 | rid = rid & RR_RID_MASK;; | ||
139 | if (type == D_TLB) { | ||
140 | if (vcpu_quick_region_check(vcpu->arch.dtr_regions, va)) { | ||
141 | for (trp = (struct thash_data *)&vcpu->arch.dtrs, i = 0; | ||
142 | i < NDTRS; i++, trp++) { | ||
143 | if (__is_tr_translated(trp, rid, va)) | ||
144 | return trp; | ||
145 | } | ||
146 | } | ||
147 | } else { | ||
148 | if (vcpu_quick_region_check(vcpu->arch.itr_regions, va)) { | ||
149 | for (trp = (struct thash_data *)&vcpu->arch.itrs, i = 0; | ||
150 | i < NITRS; i++, trp++) { | ||
151 | if (__is_tr_translated(trp, rid, va)) | ||
152 | return trp; | ||
153 | } | ||
154 | } | ||
155 | } | ||
156 | |||
157 | return NULL; | ||
158 | } | ||
159 | |||
160 | static void vhpt_insert(u64 pte, u64 itir, u64 ifa, u64 gpte) | ||
161 | { | ||
162 | union ia64_rr rr; | ||
163 | struct thash_data *head; | ||
164 | unsigned long ps, gpaddr; | ||
165 | |||
166 | ps = itir_ps(itir); | ||
167 | |||
168 | gpaddr = ((gpte & _PAGE_PPN_MASK) >> ps << ps) | | ||
169 | (ifa & ((1UL << ps) - 1)); | ||
170 | |||
171 | rr.val = ia64_get_rr(ifa); | ||
172 | head = (struct thash_data *)ia64_thash(ifa); | ||
173 | head->etag = INVALID_TI_TAG; | ||
174 | ia64_mf(); | ||
175 | head->page_flags = pte & ~PAGE_FLAGS_RV_MASK; | ||
176 | head->itir = rr.ps << 2; | ||
177 | head->etag = ia64_ttag(ifa); | ||
178 | head->gpaddr = gpaddr; | ||
179 | } | ||
180 | |||
181 | void mark_pages_dirty(struct kvm_vcpu *v, u64 pte, u64 ps) | ||
182 | { | ||
183 | u64 i, dirty_pages = 1; | ||
184 | u64 base_gfn = (pte&_PAGE_PPN_MASK) >> PAGE_SHIFT; | ||
185 | spinlock_t *lock = __kvm_va(v->arch.dirty_log_lock_pa); | ||
186 | void *dirty_bitmap = (void *)v - (KVM_VCPU_OFS + v->vcpu_id * VCPU_SIZE) | ||
187 | + KVM_MEM_DIRTY_LOG_OFS; | ||
188 | dirty_pages <<= ps <= PAGE_SHIFT ? 0 : ps - PAGE_SHIFT; | ||
189 | |||
190 | vmm_spin_lock(lock); | ||
191 | for (i = 0; i < dirty_pages; i++) { | ||
192 | /* avoid RMW */ | ||
193 | if (!test_bit(base_gfn + i, dirty_bitmap)) | ||
194 | set_bit(base_gfn + i , dirty_bitmap); | ||
195 | } | ||
196 | vmm_spin_unlock(lock); | ||
197 | } | ||
198 | |||
199 | void thash_vhpt_insert(struct kvm_vcpu *v, u64 pte, u64 itir, u64 va, int type) | ||
200 | { | ||
201 | u64 phy_pte, psr; | ||
202 | union ia64_rr mrr; | ||
203 | |||
204 | mrr.val = ia64_get_rr(va); | ||
205 | phy_pte = translate_phy_pte(&pte, itir, va); | ||
206 | |||
207 | if (itir_ps(itir) >= mrr.ps) { | ||
208 | vhpt_insert(phy_pte, itir, va, pte); | ||
209 | } else { | ||
210 | phy_pte &= ~PAGE_FLAGS_RV_MASK; | ||
211 | psr = ia64_clear_ic(); | ||
212 | ia64_itc(type, va, phy_pte, itir_ps(itir)); | ||
213 | ia64_set_psr(psr); | ||
214 | } | ||
215 | |||
216 | if (!(pte&VTLB_PTE_IO)) | ||
217 | mark_pages_dirty(v, pte, itir_ps(itir)); | ||
218 | } | ||
219 | |||
220 | /* | ||
221 | * vhpt lookup | ||
222 | */ | ||
223 | struct thash_data *vhpt_lookup(u64 va) | ||
224 | { | ||
225 | struct thash_data *head; | ||
226 | u64 tag; | ||
227 | |||
228 | head = (struct thash_data *)ia64_thash(va); | ||
229 | tag = ia64_ttag(va); | ||
230 | if (head->etag == tag) | ||
231 | return head; | ||
232 | return NULL; | ||
233 | } | ||
234 | |||
235 | u64 guest_vhpt_lookup(u64 iha, u64 *pte) | ||
236 | { | ||
237 | u64 ret; | ||
238 | struct thash_data *data; | ||
239 | |||
240 | data = __vtr_lookup(current_vcpu, iha, D_TLB); | ||
241 | if (data != NULL) | ||
242 | thash_vhpt_insert(current_vcpu, data->page_flags, | ||
243 | data->itir, iha, D_TLB); | ||
244 | |||
245 | asm volatile ("rsm psr.ic|psr.i;;" | ||
246 | "srlz.d;;" | ||
247 | "ld8.s r9=[%1];;" | ||
248 | "tnat.nz p6,p7=r9;;" | ||
249 | "(p6) mov %0=1;" | ||
250 | "(p6) mov r9=r0;" | ||
251 | "(p7) extr.u r9=r9,0,53;;" | ||
252 | "(p7) mov %0=r0;" | ||
253 | "(p7) st8 [%2]=r9;;" | ||
254 | "ssm psr.ic;;" | ||
255 | "srlz.d;;" | ||
256 | /* "ssm psr.i;;" Once interrupts in vmm open, need fix*/ | ||
257 | : "=r"(ret) : "r"(iha), "r"(pte):"memory"); | ||
258 | |||
259 | return ret; | ||
260 | } | ||
261 | |||
262 | /* | ||
263 | * purge software guest tlb | ||
264 | */ | ||
265 | |||
266 | static void vtlb_purge(struct kvm_vcpu *v, u64 va, u64 ps) | ||
267 | { | ||
268 | struct thash_data *cur; | ||
269 | u64 start, curadr, size, psbits, tag, rr_ps, num; | ||
270 | union ia64_rr vrr; | ||
271 | struct thash_cb *hcb = &v->arch.vtlb; | ||
272 | |||
273 | vrr.val = vcpu_get_rr(v, va); | ||
274 | psbits = VMX(v, psbits[(va >> 61)]); | ||
275 | start = va & ~((1UL << ps) - 1); | ||
276 | while (psbits) { | ||
277 | curadr = start; | ||
278 | rr_ps = __ffs(psbits); | ||
279 | psbits &= ~(1UL << rr_ps); | ||
280 | num = 1UL << ((ps < rr_ps) ? 0 : (ps - rr_ps)); | ||
281 | size = PSIZE(rr_ps); | ||
282 | vrr.ps = rr_ps; | ||
283 | while (num) { | ||
284 | cur = vsa_thash(hcb->pta, curadr, vrr.val, &tag); | ||
285 | if (cur->etag == tag && cur->ps == rr_ps) | ||
286 | cur->etag = INVALID_TI_TAG; | ||
287 | curadr += size; | ||
288 | num--; | ||
289 | } | ||
290 | } | ||
291 | } | ||
292 | |||
293 | |||
294 | /* | ||
295 | * purge VHPT and machine TLB | ||
296 | */ | ||
297 | static void vhpt_purge(struct kvm_vcpu *v, u64 va, u64 ps) | ||
298 | { | ||
299 | struct thash_data *cur; | ||
300 | u64 start, size, tag, num; | ||
301 | union ia64_rr rr; | ||
302 | |||
303 | start = va & ~((1UL << ps) - 1); | ||
304 | rr.val = ia64_get_rr(va); | ||
305 | size = PSIZE(rr.ps); | ||
306 | num = 1UL << ((ps < rr.ps) ? 0 : (ps - rr.ps)); | ||
307 | while (num) { | ||
308 | cur = (struct thash_data *)ia64_thash(start); | ||
309 | tag = ia64_ttag(start); | ||
310 | if (cur->etag == tag) | ||
311 | cur->etag = INVALID_TI_TAG; | ||
312 | start += size; | ||
313 | num--; | ||
314 | } | ||
315 | machine_tlb_purge(va, ps); | ||
316 | } | ||
317 | |||
318 | /* | ||
319 | * Insert an entry into hash TLB or VHPT. | ||
320 | * NOTES: | ||
321 | * 1: When inserting VHPT to thash, "va" is a must covered | ||
322 | * address by the inserted machine VHPT entry. | ||
323 | * 2: The format of entry is always in TLB. | ||
324 | * 3: The caller need to make sure the new entry will not overlap | ||
325 | * with any existed entry. | ||
326 | */ | ||
327 | void vtlb_insert(struct kvm_vcpu *v, u64 pte, u64 itir, u64 va) | ||
328 | { | ||
329 | struct thash_data *head; | ||
330 | union ia64_rr vrr; | ||
331 | u64 tag; | ||
332 | struct thash_cb *hcb = &v->arch.vtlb; | ||
333 | |||
334 | vrr.val = vcpu_get_rr(v, va); | ||
335 | vrr.ps = itir_ps(itir); | ||
336 | VMX(v, psbits[va >> 61]) |= (1UL << vrr.ps); | ||
337 | head = vsa_thash(hcb->pta, va, vrr.val, &tag); | ||
338 | head->page_flags = pte; | ||
339 | head->itir = itir; | ||
340 | head->etag = tag; | ||
341 | } | ||
342 | |||
343 | int vtr_find_overlap(struct kvm_vcpu *vcpu, u64 va, u64 ps, int type) | ||
344 | { | ||
345 | struct thash_data *trp; | ||
346 | int i; | ||
347 | u64 end, rid; | ||
348 | |||
349 | rid = vcpu_get_rr(vcpu, va); | ||
350 | rid = rid & RR_RID_MASK; | ||
351 | end = va + PSIZE(ps); | ||
352 | if (type == D_TLB) { | ||
353 | if (vcpu_quick_region_check(vcpu->arch.dtr_regions, va)) { | ||
354 | for (trp = (struct thash_data *)&vcpu->arch.dtrs, i = 0; | ||
355 | i < NDTRS; i++, trp++) { | ||
356 | if (__is_tr_overlap(trp, rid, va, end)) | ||
357 | return i; | ||
358 | } | ||
359 | } | ||
360 | } else { | ||
361 | if (vcpu_quick_region_check(vcpu->arch.itr_regions, va)) { | ||
362 | for (trp = (struct thash_data *)&vcpu->arch.itrs, i = 0; | ||
363 | i < NITRS; i++, trp++) { | ||
364 | if (__is_tr_overlap(trp, rid, va, end)) | ||
365 | return i; | ||
366 | } | ||
367 | } | ||
368 | } | ||
369 | return -1; | ||
370 | } | ||
371 | |||
372 | /* | ||
373 | * Purge entries in VTLB and VHPT | ||
374 | */ | ||
375 | void thash_purge_entries(struct kvm_vcpu *v, u64 va, u64 ps) | ||
376 | { | ||
377 | if (vcpu_quick_region_check(v->arch.tc_regions, va)) | ||
378 | vtlb_purge(v, va, ps); | ||
379 | vhpt_purge(v, va, ps); | ||
380 | } | ||
381 | |||
382 | void thash_purge_entries_remote(struct kvm_vcpu *v, u64 va, u64 ps) | ||
383 | { | ||
384 | u64 old_va = va; | ||
385 | va = REGION_OFFSET(va); | ||
386 | if (vcpu_quick_region_check(v->arch.tc_regions, old_va)) | ||
387 | vtlb_purge(v, va, ps); | ||
388 | vhpt_purge(v, va, ps); | ||
389 | } | ||
390 | |||
391 | u64 translate_phy_pte(u64 *pte, u64 itir, u64 va) | ||
392 | { | ||
393 | u64 ps, ps_mask, paddr, maddr; | ||
394 | union pte_flags phy_pte; | ||
395 | |||
396 | ps = itir_ps(itir); | ||
397 | ps_mask = ~((1UL << ps) - 1); | ||
398 | phy_pte.val = *pte; | ||
399 | paddr = *pte; | ||
400 | paddr = ((paddr & _PAGE_PPN_MASK) & ps_mask) | (va & ~ps_mask); | ||
401 | maddr = kvm_lookup_mpa(paddr >> PAGE_SHIFT); | ||
402 | if (maddr & GPFN_IO_MASK) { | ||
403 | *pte |= VTLB_PTE_IO; | ||
404 | return -1; | ||
405 | } | ||
406 | maddr = ((maddr & _PAGE_PPN_MASK) & PAGE_MASK) | | ||
407 | (paddr & ~PAGE_MASK); | ||
408 | phy_pte.ppn = maddr >> ARCH_PAGE_SHIFT; | ||
409 | return phy_pte.val; | ||
410 | } | ||
411 | |||
412 | /* | ||
413 | * Purge overlap TCs and then insert the new entry to emulate itc ops. | ||
414 | * Notes: Only TC entry can purge and insert. | ||
415 | * 1 indicates this is MMIO | ||
416 | */ | ||
417 | int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir, | ||
418 | u64 ifa, int type) | ||
419 | { | ||
420 | u64 ps; | ||
421 | u64 phy_pte; | ||
422 | union ia64_rr vrr, mrr; | ||
423 | int ret = 0; | ||
424 | |||
425 | ps = itir_ps(itir); | ||
426 | vrr.val = vcpu_get_rr(v, ifa); | ||
427 | mrr.val = ia64_get_rr(ifa); | ||
428 | |||
429 | phy_pte = translate_phy_pte(&pte, itir, ifa); | ||
430 | |||
431 | /* Ensure WB attribute if pte is related to a normal mem page, | ||
432 | * which is required by vga acceleration since qemu maps shared | ||
433 | * vram buffer with WB. | ||
434 | */ | ||
435 | if (!(pte & VTLB_PTE_IO) && ((pte & _PAGE_MA_MASK) != _PAGE_MA_NAT)) { | ||
436 | pte &= ~_PAGE_MA_MASK; | ||
437 | phy_pte &= ~_PAGE_MA_MASK; | ||
438 | } | ||
439 | |||
440 | if (pte & VTLB_PTE_IO) | ||
441 | ret = 1; | ||
442 | |||
443 | vtlb_purge(v, ifa, ps); | ||
444 | vhpt_purge(v, ifa, ps); | ||
445 | |||
446 | if (ps == mrr.ps) { | ||
447 | if (!(pte&VTLB_PTE_IO)) { | ||
448 | vhpt_insert(phy_pte, itir, ifa, pte); | ||
449 | } else { | ||
450 | vtlb_insert(v, pte, itir, ifa); | ||
451 | vcpu_quick_region_set(VMX(v, tc_regions), ifa); | ||
452 | } | ||
453 | } else if (ps > mrr.ps) { | ||
454 | vtlb_insert(v, pte, itir, ifa); | ||
455 | vcpu_quick_region_set(VMX(v, tc_regions), ifa); | ||
456 | if (!(pte&VTLB_PTE_IO)) | ||
457 | vhpt_insert(phy_pte, itir, ifa, pte); | ||
458 | } else { | ||
459 | u64 psr; | ||
460 | phy_pte &= ~PAGE_FLAGS_RV_MASK; | ||
461 | psr = ia64_clear_ic(); | ||
462 | ia64_itc(type, ifa, phy_pte, ps); | ||
463 | ia64_set_psr(psr); | ||
464 | } | ||
465 | if (!(pte&VTLB_PTE_IO)) | ||
466 | mark_pages_dirty(v, pte, ps); | ||
467 | |||
468 | return ret; | ||
469 | } | ||
470 | |||
471 | /* | ||
472 | * Purge all TCs or VHPT entries including those in Hash table. | ||
473 | * | ||
474 | */ | ||
475 | |||
476 | void thash_purge_all(struct kvm_vcpu *v) | ||
477 | { | ||
478 | int i; | ||
479 | struct thash_data *head; | ||
480 | struct thash_cb *vtlb, *vhpt; | ||
481 | vtlb = &v->arch.vtlb; | ||
482 | vhpt = &v->arch.vhpt; | ||
483 | |||
484 | for (i = 0; i < 8; i++) | ||
485 | VMX(v, psbits[i]) = 0; | ||
486 | |||
487 | head = vtlb->hash; | ||
488 | for (i = 0; i < vtlb->num; i++) { | ||
489 | head->page_flags = 0; | ||
490 | head->etag = INVALID_TI_TAG; | ||
491 | head->itir = 0; | ||
492 | head->next = 0; | ||
493 | head++; | ||
494 | }; | ||
495 | |||
496 | head = vhpt->hash; | ||
497 | for (i = 0; i < vhpt->num; i++) { | ||
498 | head->page_flags = 0; | ||
499 | head->etag = INVALID_TI_TAG; | ||
500 | head->itir = 0; | ||
501 | head->next = 0; | ||
502 | head++; | ||
503 | }; | ||
504 | |||
505 | local_flush_tlb_all(); | ||
506 | } | ||
507 | |||
508 | |||
509 | /* | ||
510 | * Lookup the hash table and its collision chain to find an entry | ||
511 | * covering this address rid:va or the entry. | ||
512 | * | ||
513 | * INPUT: | ||
514 | * in: TLB format for both VHPT & TLB. | ||
515 | */ | ||
516 | |||
517 | struct thash_data *vtlb_lookup(struct kvm_vcpu *v, u64 va, int is_data) | ||
518 | { | ||
519 | struct thash_data *cch; | ||
520 | u64 psbits, ps, tag; | ||
521 | union ia64_rr vrr; | ||
522 | |||
523 | struct thash_cb *hcb = &v->arch.vtlb; | ||
524 | |||
525 | cch = __vtr_lookup(v, va, is_data);; | ||
526 | if (cch) | ||
527 | return cch; | ||
528 | |||
529 | if (vcpu_quick_region_check(v->arch.tc_regions, va) == 0) | ||
530 | return NULL; | ||
531 | |||
532 | psbits = VMX(v, psbits[(va >> 61)]); | ||
533 | vrr.val = vcpu_get_rr(v, va); | ||
534 | while (psbits) { | ||
535 | ps = __ffs(psbits); | ||
536 | psbits &= ~(1UL << ps); | ||
537 | vrr.ps = ps; | ||
538 | cch = vsa_thash(hcb->pta, va, vrr.val, &tag); | ||
539 | if (cch->etag == tag && cch->ps == ps) | ||
540 | return cch; | ||
541 | } | ||
542 | |||
543 | return NULL; | ||
544 | } | ||
545 | |||
546 | |||
547 | /* | ||
548 | * Initialize internal control data before service. | ||
549 | */ | ||
550 | void thash_init(struct thash_cb *hcb, u64 sz) | ||
551 | { | ||
552 | int i; | ||
553 | struct thash_data *head; | ||
554 | |||
555 | hcb->pta.val = (unsigned long)hcb->hash; | ||
556 | hcb->pta.vf = 1; | ||
557 | hcb->pta.ve = 1; | ||
558 | hcb->pta.size = sz; | ||
559 | head = hcb->hash; | ||
560 | for (i = 0; i < hcb->num; i++) { | ||
561 | head->page_flags = 0; | ||
562 | head->itir = 0; | ||
563 | head->etag = INVALID_TI_TAG; | ||
564 | head->next = 0; | ||
565 | head++; | ||
566 | } | ||
567 | } | ||
568 | |||
569 | u64 kvm_lookup_mpa(u64 gpfn) | ||
570 | { | ||
571 | u64 *base = (u64 *) KVM_P2M_BASE; | ||
572 | return *(base + gpfn); | ||
573 | } | ||
574 | |||
575 | u64 kvm_gpa_to_mpa(u64 gpa) | ||
576 | { | ||
577 | u64 pte = kvm_lookup_mpa(gpa >> PAGE_SHIFT); | ||
578 | return (pte >> PAGE_SHIFT << PAGE_SHIFT) | (gpa & ~PAGE_MASK); | ||
579 | } | ||
580 | |||
581 | |||
582 | /* | ||
583 | * Fetch guest bundle code. | ||
584 | * INPUT: | ||
585 | * gip: guest ip | ||
586 | * pbundle: used to return fetched bundle. | ||
587 | */ | ||
588 | int fetch_code(struct kvm_vcpu *vcpu, u64 gip, IA64_BUNDLE *pbundle) | ||
589 | { | ||
590 | u64 gpip = 0; /* guest physical IP*/ | ||
591 | u64 *vpa; | ||
592 | struct thash_data *tlb; | ||
593 | u64 maddr; | ||
594 | |||
595 | if (!(VCPU(vcpu, vpsr) & IA64_PSR_IT)) { | ||
596 | /* I-side physical mode */ | ||
597 | gpip = gip; | ||
598 | } else { | ||
599 | tlb = vtlb_lookup(vcpu, gip, I_TLB); | ||
600 | if (tlb) | ||
601 | gpip = (tlb->ppn >> (tlb->ps - 12) << tlb->ps) | | ||
602 | (gip & (PSIZE(tlb->ps) - 1)); | ||
603 | } | ||
604 | if (gpip) { | ||
605 | maddr = kvm_gpa_to_mpa(gpip); | ||
606 | } else { | ||
607 | tlb = vhpt_lookup(gip); | ||
608 | if (tlb == NULL) { | ||
609 | ia64_ptcl(gip, ARCH_PAGE_SHIFT << 2); | ||
610 | return IA64_FAULT; | ||
611 | } | ||
612 | maddr = (tlb->ppn >> (tlb->ps - 12) << tlb->ps) | ||
613 | | (gip & (PSIZE(tlb->ps) - 1)); | ||
614 | } | ||
615 | vpa = (u64 *)__kvm_va(maddr); | ||
616 | |||
617 | pbundle->i64[0] = *vpa++; | ||
618 | pbundle->i64[1] = *vpa; | ||
619 | |||
620 | return IA64_NO_FAULT; | ||
621 | } | ||
622 | |||
623 | |||
624 | void kvm_init_vhpt(struct kvm_vcpu *v) | ||
625 | { | ||
626 | v->arch.vhpt.num = VHPT_NUM_ENTRIES; | ||
627 | thash_init(&v->arch.vhpt, VHPT_SHIFT); | ||
628 | ia64_set_pta(v->arch.vhpt.pta.val); | ||
629 | /*Enable VHPT here?*/ | ||
630 | } | ||
631 | |||
632 | void kvm_init_vtlb(struct kvm_vcpu *v) | ||
633 | { | ||
634 | v->arch.vtlb.num = VTLB_NUM_ENTRIES; | ||
635 | thash_init(&v->arch.vtlb, VTLB_SHIFT); | ||
636 | } | ||