aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/kvm
diff options
context:
space:
mode:
authorXiantao Zhang <xiantao.zhang@intel.com>2008-04-01 04:00:24 -0400
committerAvi Kivity <avi@qumranet.com>2008-04-27 05:01:03 -0400
commita4f500381ac91969fa4f8b0a4e39e76dbf00a913 (patch)
tree943377eb26c82400e6b8abcb7c0a9f1eb0ff2486 /arch/ia64/kvm
parentb024b79322aad213cd2d4f30c23a6c626a0d5b31 (diff)
KVM: ia64: Add header files for kvm/ia64
kvm_minstate.h : Marcos about Min save routines. lapic.h: apic structure definition. vcpu.h : routions related to vcpu virtualization. vti.h : Some macros or routines for VT support on Itanium. Signed-off-by: Xiantao Zhang <xiantao.zhang@intel.com> Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'arch/ia64/kvm')
-rw-r--r--arch/ia64/kvm/kvm_minstate.h273
-rw-r--r--arch/ia64/kvm/lapic.h25
-rw-r--r--arch/ia64/kvm/misc.h93
-rw-r--r--arch/ia64/kvm/vcpu.h740
-rw-r--r--arch/ia64/kvm/vti.h290
5 files changed, 1421 insertions, 0 deletions
diff --git a/arch/ia64/kvm/kvm_minstate.h b/arch/ia64/kvm/kvm_minstate.h
new file mode 100644
index 00000000000..13980d9b8bc
--- /dev/null
+++ b/arch/ia64/kvm/kvm_minstate.h
@@ -0,0 +1,273 @@
1/*
2 * kvm_minstate.h: min save macros
3 * Copyright (c) 2007, Intel Corporation.
4 *
5 * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
6 * Xiantao Zhang (xiantao.zhang@intel.com)
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
19 * Place - Suite 330, Boston, MA 02111-1307 USA.
20 *
21 */
22
23
24#include <asm/asmmacro.h>
25#include <asm/types.h>
26#include <asm/kregs.h>
27#include "asm-offsets.h"
28
29#define KVM_MINSTATE_START_SAVE_MIN \
30 mov ar.rsc = 0;/* set enforced lazy mode, pl 0, little-endian, loadrs=0 */\
31 ;; \
32 mov.m r28 = ar.rnat; \
33 addl r22 = VMM_RBS_OFFSET,r1; /* compute base of RBS */ \
34 ;; \
35 lfetch.fault.excl.nt1 [r22]; \
36 addl r1 = IA64_STK_OFFSET-VMM_PT_REGS_SIZE,r1; /* compute base of memory stack */ \
37 mov r23 = ar.bspstore; /* save ar.bspstore */ \
38 ;; \
39 mov ar.bspstore = r22; /* switch to kernel RBS */\
40 ;; \
41 mov r18 = ar.bsp; \
42 mov ar.rsc = 0x3; /* set eager mode, pl 0, little-endian, loadrs=0 */
43
44
45
46#define KVM_MINSTATE_END_SAVE_MIN \
47 bsw.1; /* switch back to bank 1 (must be last in insn group) */\
48 ;;
49
50
51#define PAL_VSA_SYNC_READ \
52 /* begin to call pal vps sync_read */ \
53 add r25 = VMM_VPD_BASE_OFFSET, r21; \
54 adds r20 = VMM_VCPU_VSA_BASE_OFFSET, r21; /* entry point */ \
55 ;; \
56 ld8 r25 = [r25]; /* read vpd base */ \
57 ld8 r20 = [r20]; \
58 ;; \
59 add r20 = PAL_VPS_SYNC_READ,r20; \
60 ;; \
61{ .mii; \
62 nop 0x0; \
63 mov r24 = ip; \
64 mov b0 = r20; \
65 ;; \
66}; \
67{ .mmb; \
68 add r24 = 0x20, r24; \
69 nop 0x0; \
70 br.cond.sptk b0; /* call the service */ \
71 ;; \
72};
73
74
75
76#define KVM_MINSTATE_GET_CURRENT(reg) mov reg=r21
77
78/*
79 * KVM_DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves
80 * the minimum state necessary that allows us to turn psr.ic back
81 * on.
82 *
83 * Assumed state upon entry:
84 * psr.ic: off
85 * r31: contains saved predicates (pr)
86 *
87 * Upon exit, the state is as follows:
88 * psr.ic: off
89 * r2 = points to &pt_regs.r16
90 * r8 = contents of ar.ccv
91 * r9 = contents of ar.csd
92 * r10 = contents of ar.ssd
93 * r11 = FPSR_DEFAULT
94 * r12 = kernel sp (kernel virtual address)
95 * r13 = points to current task_struct (kernel virtual address)
96 * p15 = TRUE if psr.i is set in cr.ipsr
97 * predicate registers (other than p2, p3, and p15), b6, r3, r14, r15:
98 * preserved
99 *
100 * Note that psr.ic is NOT turned on by this macro. This is so that
101 * we can pass interruption state as arguments to a handler.
102 */
103
104
105#define PT(f) (VMM_PT_REGS_##f##_OFFSET)
106
107#define KVM_DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA) \
108 KVM_MINSTATE_GET_CURRENT(r16); /* M (or M;;I) */ \
109 mov r27 = ar.rsc; /* M */ \
110 mov r20 = r1; /* A */ \
111 mov r25 = ar.unat; /* M */ \
112 mov r29 = cr.ipsr; /* M */ \
113 mov r26 = ar.pfs; /* I */ \
114 mov r18 = cr.isr; \
115 COVER; /* B;; (or nothing) */ \
116 ;; \
117 tbit.z p0,p15 = r29,IA64_PSR_I_BIT; \
118 mov r1 = r16; \
119/* mov r21=r16; */ \
120 /* switch from user to kernel RBS: */ \
121 ;; \
122 invala; /* M */ \
123 SAVE_IFS; \
124 ;; \
125 KVM_MINSTATE_START_SAVE_MIN \
126 adds r17 = 2*L1_CACHE_BYTES,r1;/* cache-line size */ \
127 adds r16 = PT(CR_IPSR),r1; \
128 ;; \
129 lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES; \
130 st8 [r16] = r29; /* save cr.ipsr */ \
131 ;; \
132 lfetch.fault.excl.nt1 [r17]; \
133 tbit.nz p15,p0 = r29,IA64_PSR_I_BIT; \
134 mov r29 = b0 \
135 ;; \
136 adds r16 = PT(R8),r1; /* initialize first base pointer */\
137 adds r17 = PT(R9),r1; /* initialize second base pointer */\
138 ;; \
139.mem.offset 0,0; st8.spill [r16] = r8,16; \
140.mem.offset 8,0; st8.spill [r17] = r9,16; \
141 ;; \
142.mem.offset 0,0; st8.spill [r16] = r10,24; \
143.mem.offset 8,0; st8.spill [r17] = r11,24; \
144 ;; \
145 mov r9 = cr.iip; /* M */ \
146 mov r10 = ar.fpsr; /* M */ \
147 ;; \
148 st8 [r16] = r9,16; /* save cr.iip */ \
149 st8 [r17] = r30,16; /* save cr.ifs */ \
150 sub r18 = r18,r22; /* r18=RSE.ndirty*8 */ \
151 ;; \
152 st8 [r16] = r25,16; /* save ar.unat */ \
153 st8 [r17] = r26,16; /* save ar.pfs */ \
154 shl r18 = r18,16; /* calu ar.rsc used for "loadrs" */\
155 ;; \
156 st8 [r16] = r27,16; /* save ar.rsc */ \
157 st8 [r17] = r28,16; /* save ar.rnat */ \
158 ;; /* avoid RAW on r16 & r17 */ \
159 st8 [r16] = r23,16; /* save ar.bspstore */ \
160 st8 [r17] = r31,16; /* save predicates */ \
161 ;; \
162 st8 [r16] = r29,16; /* save b0 */ \
163 st8 [r17] = r18,16; /* save ar.rsc value for "loadrs" */\
164 ;; \
165.mem.offset 0,0; st8.spill [r16] = r20,16;/* save original r1 */ \
166.mem.offset 8,0; st8.spill [r17] = r12,16; \
167 adds r12 = -16,r1; /* switch to kernel memory stack */ \
168 ;; \
169.mem.offset 0,0; st8.spill [r16] = r13,16; \
170.mem.offset 8,0; st8.spill [r17] = r10,16; /* save ar.fpsr */\
171 mov r13 = r21; /* establish `current' */ \
172 ;; \
173.mem.offset 0,0; st8.spill [r16] = r15,16; \
174.mem.offset 8,0; st8.spill [r17] = r14,16; \
175 ;; \
176.mem.offset 0,0; st8.spill [r16] = r2,16; \
177.mem.offset 8,0; st8.spill [r17] = r3,16; \
178 adds r2 = VMM_PT_REGS_R16_OFFSET,r1; \
179 ;; \
180 adds r16 = VMM_VCPU_IIPA_OFFSET,r13; \
181 adds r17 = VMM_VCPU_ISR_OFFSET,r13; \
182 mov r26 = cr.iipa; \
183 mov r27 = cr.isr; \
184 ;; \
185 st8 [r16] = r26; \
186 st8 [r17] = r27; \
187 ;; \
188 EXTRA; \
189 mov r8 = ar.ccv; \
190 mov r9 = ar.csd; \
191 mov r10 = ar.ssd; \
192 movl r11 = FPSR_DEFAULT; /* L-unit */ \
193 adds r17 = VMM_VCPU_GP_OFFSET,r13; \
194 ;; \
195 ld8 r1 = [r17];/* establish kernel global pointer */ \
196 ;; \
197 PAL_VSA_SYNC_READ \
198 KVM_MINSTATE_END_SAVE_MIN
199
200/*
201 * SAVE_REST saves the remainder of pt_regs (with psr.ic on).
202 *
203 * Assumed state upon entry:
204 * psr.ic: on
205 * r2: points to &pt_regs.f6
206 * r3: points to &pt_regs.f7
207 * r8: contents of ar.ccv
208 * r9: contents of ar.csd
209 * r10: contents of ar.ssd
210 * r11: FPSR_DEFAULT
211 *
212 * Registers r14 and r15 are guaranteed not to be touched by SAVE_REST.
213 */
214#define KVM_SAVE_REST \
215.mem.offset 0,0; st8.spill [r2] = r16,16; \
216.mem.offset 8,0; st8.spill [r3] = r17,16; \
217 ;; \
218.mem.offset 0,0; st8.spill [r2] = r18,16; \
219.mem.offset 8,0; st8.spill [r3] = r19,16; \
220 ;; \
221.mem.offset 0,0; st8.spill [r2] = r20,16; \
222.mem.offset 8,0; st8.spill [r3] = r21,16; \
223 mov r18=b6; \
224 ;; \
225.mem.offset 0,0; st8.spill [r2] = r22,16; \
226.mem.offset 8,0; st8.spill [r3] = r23,16; \
227 mov r19 = b7; \
228 ;; \
229.mem.offset 0,0; st8.spill [r2] = r24,16; \
230.mem.offset 8,0; st8.spill [r3] = r25,16; \
231 ;; \
232.mem.offset 0,0; st8.spill [r2] = r26,16; \
233.mem.offset 8,0; st8.spill [r3] = r27,16; \
234 ;; \
235.mem.offset 0,0; st8.spill [r2] = r28,16; \
236.mem.offset 8,0; st8.spill [r3] = r29,16; \
237 ;; \
238.mem.offset 0,0; st8.spill [r2] = r30,16; \
239.mem.offset 8,0; st8.spill [r3] = r31,32; \
240 ;; \
241 mov ar.fpsr = r11; \
242 st8 [r2] = r8,8; \
243 adds r24 = PT(B6)-PT(F7),r3; \
244 adds r25 = PT(B7)-PT(F7),r3; \
245 ;; \
246 st8 [r24] = r18,16; /* b6 */ \
247 st8 [r25] = r19,16; /* b7 */ \
248 adds r2 = PT(R4)-PT(F6),r2; \
249 adds r3 = PT(R5)-PT(F7),r3; \
250 ;; \
251 st8 [r24] = r9; /* ar.csd */ \
252 st8 [r25] = r10; /* ar.ssd */ \
253 ;; \
254 mov r18 = ar.unat; \
255 adds r19 = PT(EML_UNAT)-PT(R4),r2; \
256 ;; \
257 st8 [r19] = r18; /* eml_unat */ \
258
259
260#define KVM_SAVE_EXTRA \
261.mem.offset 0,0; st8.spill [r2] = r4,16; \
262.mem.offset 8,0; st8.spill [r3] = r5,16; \
263 ;; \
264.mem.offset 0,0; st8.spill [r2] = r6,16; \
265.mem.offset 8,0; st8.spill [r3] = r7; \
266 ;; \
267 mov r26 = ar.unat; \
268 ;; \
269 st8 [r2] = r26;/* eml_unat */ \
270
271#define KVM_SAVE_MIN_WITH_COVER KVM_DO_SAVE_MIN(cover, mov r30 = cr.ifs,)
272#define KVM_SAVE_MIN_WITH_COVER_R19 KVM_DO_SAVE_MIN(cover, mov r30 = cr.ifs, mov r15 = r19)
273#define KVM_SAVE_MIN KVM_DO_SAVE_MIN( , mov r30 = r0, )
diff --git a/arch/ia64/kvm/lapic.h b/arch/ia64/kvm/lapic.h
new file mode 100644
index 00000000000..6d6cbcb1489
--- /dev/null
+++ b/arch/ia64/kvm/lapic.h
@@ -0,0 +1,25 @@
1#ifndef __KVM_IA64_LAPIC_H
2#define __KVM_IA64_LAPIC_H
3
4#include <linux/kvm_host.h>
5
6/*
7 * vlsapic
8 */
9struct kvm_lapic{
10 struct kvm_vcpu *vcpu;
11 uint64_t insvc[4];
12 uint64_t vhpi;
13 uint8_t xtp;
14 uint8_t pal_init_pending;
15 uint8_t pad[2];
16};
17
18int kvm_create_lapic(struct kvm_vcpu *vcpu);
19void kvm_free_lapic(struct kvm_vcpu *vcpu);
20
21int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest);
22int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda);
23int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 trig);
24
25#endif
diff --git a/arch/ia64/kvm/misc.h b/arch/ia64/kvm/misc.h
new file mode 100644
index 00000000000..e585c460734
--- /dev/null
+++ b/arch/ia64/kvm/misc.h
@@ -0,0 +1,93 @@
1#ifndef __KVM_IA64_MISC_H
2#define __KVM_IA64_MISC_H
3
4#include <linux/kvm_host.h>
5/*
6 * misc.h
7 * Copyright (C) 2007, Intel Corporation.
8 * Xiantao Zhang (xiantao.zhang@intel.com)
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms and conditions of the GNU General Public License,
12 * version 2, as published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * more details.
18 *
19 * You should have received a copy of the GNU General Public License along with
20 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
21 * Place - Suite 330, Boston, MA 02111-1307 USA.
22 *
23 */
24
25/*
26 *Return p2m base address at host side!
27 */
28static inline uint64_t *kvm_host_get_pmt(struct kvm *kvm)
29{
30 return (uint64_t *)(kvm->arch.vm_base + KVM_P2M_OFS);
31}
32
33static inline void kvm_set_pmt_entry(struct kvm *kvm, gfn_t gfn,
34 u64 paddr, u64 mem_flags)
35{
36 uint64_t *pmt_base = kvm_host_get_pmt(kvm);
37 unsigned long pte;
38
39 pte = PAGE_ALIGN(paddr) | mem_flags;
40 pmt_base[gfn] = pte;
41}
42
43/*Function for translating host address to guest address*/
44
45static inline void *to_guest(struct kvm *kvm, void *addr)
46{
47 return (void *)((unsigned long)(addr) - kvm->arch.vm_base +
48 KVM_VM_DATA_BASE);
49}
50
51/*Function for translating guest address to host address*/
52
53static inline void *to_host(struct kvm *kvm, void *addr)
54{
55 return (void *)((unsigned long)addr - KVM_VM_DATA_BASE
56 + kvm->arch.vm_base);
57}
58
59/* Get host context of the vcpu */
60static inline union context *kvm_get_host_context(struct kvm_vcpu *vcpu)
61{
62 union context *ctx = &vcpu->arch.host;
63 return to_guest(vcpu->kvm, ctx);
64}
65
66/* Get guest context of the vcpu */
67static inline union context *kvm_get_guest_context(struct kvm_vcpu *vcpu)
68{
69 union context *ctx = &vcpu->arch.guest;
70 return to_guest(vcpu->kvm, ctx);
71}
72
73/* kvm get exit data from gvmm! */
74static inline struct exit_ctl_data *kvm_get_exit_data(struct kvm_vcpu *vcpu)
75{
76 return &vcpu->arch.exit_data;
77}
78
79/*kvm get vcpu ioreq for kvm module!*/
80static inline struct kvm_mmio_req *kvm_get_vcpu_ioreq(struct kvm_vcpu *vcpu)
81{
82 struct exit_ctl_data *p_ctl_data;
83
84 if (vcpu) {
85 p_ctl_data = kvm_get_exit_data(vcpu);
86 if (p_ctl_data->exit_reason == EXIT_REASON_MMIO_INSTRUCTION)
87 return &p_ctl_data->u.ioreq;
88 }
89
90 return NULL;
91}
92
93#endif
diff --git a/arch/ia64/kvm/vcpu.h b/arch/ia64/kvm/vcpu.h
new file mode 100644
index 00000000000..b0fcfb62c49
--- /dev/null
+++ b/arch/ia64/kvm/vcpu.h
@@ -0,0 +1,740 @@
1/*
2 * vcpu.h: vcpu routines
3 * Copyright (c) 2005, Intel Corporation.
4 * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
5 * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
6 *
7 * Copyright (c) 2007, Intel Corporation.
8 * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
9 * Xiantao Zhang (xiantao.zhang@intel.com)
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms and conditions of the GNU General Public License,
13 * version 2, as published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * more details.
19 *
20 * You should have received a copy of the GNU General Public License along with
21 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
22 * Place - Suite 330, Boston, MA 02111-1307 USA.
23 *
24 */
25
26
27#ifndef __KVM_VCPU_H__
28#define __KVM_VCPU_H__
29
30#include <asm/types.h>
31#include <asm/fpu.h>
32#include <asm/processor.h>
33
34#ifndef __ASSEMBLY__
35#include "vti.h"
36
37#include <linux/kvm_host.h>
38#include <linux/spinlock.h>
39
40typedef unsigned long IA64_INST;
41
42typedef union U_IA64_BUNDLE {
43 unsigned long i64[2];
44 struct { unsigned long template:5, slot0:41, slot1a:18,
45 slot1b:23, slot2:41; };
46 /* NOTE: following doesn't work because bitfields can't cross natural
47 size boundaries
48 struct { unsigned long template:5, slot0:41, slot1:41, slot2:41; }; */
49} IA64_BUNDLE;
50
51typedef union U_INST64_A5 {
52 IA64_INST inst;
53 struct { unsigned long qp:6, r1:7, imm7b:7, r3:2, imm5c:5,
54 imm9d:9, s:1, major:4; };
55} INST64_A5;
56
57typedef union U_INST64_B4 {
58 IA64_INST inst;
59 struct { unsigned long qp:6, btype:3, un3:3, p:1, b2:3, un11:11, x6:6,
60 wh:2, d:1, un1:1, major:4; };
61} INST64_B4;
62
63typedef union U_INST64_B8 {
64 IA64_INST inst;
65 struct { unsigned long qp:6, un21:21, x6:6, un4:4, major:4; };
66} INST64_B8;
67
68typedef union U_INST64_B9 {
69 IA64_INST inst;
70 struct { unsigned long qp:6, imm20:20, :1, x6:6, :3, i:1, major:4; };
71} INST64_B9;
72
73typedef union U_INST64_I19 {
74 IA64_INST inst;
75 struct { unsigned long qp:6, imm20:20, :1, x6:6, x3:3, i:1, major:4; };
76} INST64_I19;
77
78typedef union U_INST64_I26 {
79 IA64_INST inst;
80 struct { unsigned long qp:6, :7, r2:7, ar3:7, x6:6, x3:3, :1, major:4; };
81} INST64_I26;
82
83typedef union U_INST64_I27 {
84 IA64_INST inst;
85 struct { unsigned long qp:6, :7, imm:7, ar3:7, x6:6, x3:3, s:1, major:4; };
86} INST64_I27;
87
88typedef union U_INST64_I28 { /* not privileged (mov from AR) */
89 IA64_INST inst;
90 struct { unsigned long qp:6, r1:7, :7, ar3:7, x6:6, x3:3, :1, major:4; };
91} INST64_I28;
92
93typedef union U_INST64_M28 {
94 IA64_INST inst;
95 struct { unsigned long qp:6, :14, r3:7, x6:6, x3:3, :1, major:4; };
96} INST64_M28;
97
98typedef union U_INST64_M29 {
99 IA64_INST inst;
100 struct { unsigned long qp:6, :7, r2:7, ar3:7, x6:6, x3:3, :1, major:4; };
101} INST64_M29;
102
103typedef union U_INST64_M30 {
104 IA64_INST inst;
105 struct { unsigned long qp:6, :7, imm:7, ar3:7, x4:4, x2:2,
106 x3:3, s:1, major:4; };
107} INST64_M30;
108
109typedef union U_INST64_M31 {
110 IA64_INST inst;
111 struct { unsigned long qp:6, r1:7, :7, ar3:7, x6:6, x3:3, :1, major:4; };
112} INST64_M31;
113
114typedef union U_INST64_M32 {
115 IA64_INST inst;
116 struct { unsigned long qp:6, :7, r2:7, cr3:7, x6:6, x3:3, :1, major:4; };
117} INST64_M32;
118
119typedef union U_INST64_M33 {
120 IA64_INST inst;
121 struct { unsigned long qp:6, r1:7, :7, cr3:7, x6:6, x3:3, :1, major:4; };
122} INST64_M33;
123
124typedef union U_INST64_M35 {
125 IA64_INST inst;
126 struct { unsigned long qp:6, :7, r2:7, :7, x6:6, x3:3, :1, major:4; };
127
128} INST64_M35;
129
130typedef union U_INST64_M36 {
131 IA64_INST inst;
132 struct { unsigned long qp:6, r1:7, :14, x6:6, x3:3, :1, major:4; };
133} INST64_M36;
134
135typedef union U_INST64_M37 {
136 IA64_INST inst;
137 struct { unsigned long qp:6, imm20a:20, :1, x4:4, x2:2, x3:3,
138 i:1, major:4; };
139} INST64_M37;
140
141typedef union U_INST64_M41 {
142 IA64_INST inst;
143 struct { unsigned long qp:6, :7, r2:7, :7, x6:6, x3:3, :1, major:4; };
144} INST64_M41;
145
146typedef union U_INST64_M42 {
147 IA64_INST inst;
148 struct { unsigned long qp:6, :7, r2:7, r3:7, x6:6, x3:3, :1, major:4; };
149} INST64_M42;
150
151typedef union U_INST64_M43 {
152 IA64_INST inst;
153 struct { unsigned long qp:6, r1:7, :7, r3:7, x6:6, x3:3, :1, major:4; };
154} INST64_M43;
155
156typedef union U_INST64_M44 {
157 IA64_INST inst;
158 struct { unsigned long qp:6, imm:21, x4:4, i2:2, x3:3, i:1, major:4; };
159} INST64_M44;
160
161typedef union U_INST64_M45 {
162 IA64_INST inst;
163 struct { unsigned long qp:6, :7, r2:7, r3:7, x6:6, x3:3, :1, major:4; };
164} INST64_M45;
165
166typedef union U_INST64_M46 {
167 IA64_INST inst;
168 struct { unsigned long qp:6, r1:7, un7:7, r3:7, x6:6,
169 x3:3, un1:1, major:4; };
170} INST64_M46;
171
172typedef union U_INST64_M47 {
173 IA64_INST inst;
174 struct { unsigned long qp:6, un14:14, r3:7, x6:6, x3:3, un1:1, major:4; };
175} INST64_M47;
176
177typedef union U_INST64_M1{
178 IA64_INST inst;
179 struct { unsigned long qp:6, r1:7, un7:7, r3:7, x:1, hint:2,
180 x6:6, m:1, major:4; };
181} INST64_M1;
182
183typedef union U_INST64_M2{
184 IA64_INST inst;
185 struct { unsigned long qp:6, r1:7, r2:7, r3:7, x:1, hint:2,
186 x6:6, m:1, major:4; };
187} INST64_M2;
188
189typedef union U_INST64_M3{
190 IA64_INST inst;
191 struct { unsigned long qp:6, r1:7, imm7:7, r3:7, i:1, hint:2,
192 x6:6, s:1, major:4; };
193} INST64_M3;
194
195typedef union U_INST64_M4 {
196 IA64_INST inst;
197 struct { unsigned long qp:6, un7:7, r2:7, r3:7, x:1, hint:2,
198 x6:6, m:1, major:4; };
199} INST64_M4;
200
201typedef union U_INST64_M5 {
202 IA64_INST inst;
203 struct { unsigned long qp:6, imm7:7, r2:7, r3:7, i:1, hint:2,
204 x6:6, s:1, major:4; };
205} INST64_M5;
206
207typedef union U_INST64_M6 {
208 IA64_INST inst;
209 struct { unsigned long qp:6, f1:7, un7:7, r3:7, x:1, hint:2,
210 x6:6, m:1, major:4; };
211} INST64_M6;
212
213typedef union U_INST64_M9 {
214 IA64_INST inst;
215 struct { unsigned long qp:6, :7, f2:7, r3:7, x:1, hint:2,
216 x6:6, m:1, major:4; };
217} INST64_M9;
218
219typedef union U_INST64_M10 {
220 IA64_INST inst;
221 struct { unsigned long qp:6, imm7:7, f2:7, r3:7, i:1, hint:2,
222 x6:6, s:1, major:4; };
223} INST64_M10;
224
225typedef union U_INST64_M12 {
226 IA64_INST inst;
227 struct { unsigned long qp:6, f1:7, f2:7, r3:7, x:1, hint:2,
228 x6:6, m:1, major:4; };
229} INST64_M12;
230
231typedef union U_INST64_M15 {
232 IA64_INST inst;
233 struct { unsigned long qp:6, :7, imm7:7, r3:7, i:1, hint:2,
234 x6:6, s:1, major:4; };
235} INST64_M15;
236
237typedef union U_INST64 {
238 IA64_INST inst;
239 struct { unsigned long :37, major:4; } generic;
240 INST64_A5 A5; /* used in build_hypercall_bundle only */
241 INST64_B4 B4; /* used in build_hypercall_bundle only */
242 INST64_B8 B8; /* rfi, bsw.[01] */
243 INST64_B9 B9; /* break.b */
244 INST64_I19 I19; /* used in build_hypercall_bundle only */
245 INST64_I26 I26; /* mov register to ar (I unit) */
246 INST64_I27 I27; /* mov immediate to ar (I unit) */
247 INST64_I28 I28; /* mov from ar (I unit) */
248 INST64_M1 M1; /* ld integer */
249 INST64_M2 M2;
250 INST64_M3 M3;
251 INST64_M4 M4; /* st integer */
252 INST64_M5 M5;
253 INST64_M6 M6; /* ldfd floating pointer */
254 INST64_M9 M9; /* stfd floating pointer */
255 INST64_M10 M10; /* stfd floating pointer */
256 INST64_M12 M12; /* ldfd pair floating pointer */
257 INST64_M15 M15; /* lfetch + imm update */
258 INST64_M28 M28; /* purge translation cache entry */
259 INST64_M29 M29; /* mov register to ar (M unit) */
260 INST64_M30 M30; /* mov immediate to ar (M unit) */
261 INST64_M31 M31; /* mov from ar (M unit) */
262 INST64_M32 M32; /* mov reg to cr */
263 INST64_M33 M33; /* mov from cr */
264 INST64_M35 M35; /* mov to psr */
265 INST64_M36 M36; /* mov from psr */
266 INST64_M37 M37; /* break.m */
267 INST64_M41 M41; /* translation cache insert */
268 INST64_M42 M42; /* mov to indirect reg/translation reg insert*/
269 INST64_M43 M43; /* mov from indirect reg */
270 INST64_M44 M44; /* set/reset system mask */
271 INST64_M45 M45; /* translation purge */
272 INST64_M46 M46; /* translation access (tpa,tak) */
273 INST64_M47 M47; /* purge translation entry */
274} INST64;
275
276#define MASK_41 ((unsigned long)0x1ffffffffff)
277
278/* Virtual address memory attributes encoding */
279#define VA_MATTR_WB 0x0
280#define VA_MATTR_UC 0x4
281#define VA_MATTR_UCE 0x5
282#define VA_MATTR_WC 0x6
283#define VA_MATTR_NATPAGE 0x7
284
285#define PMASK(size) (~((size) - 1))
286#define PSIZE(size) (1UL<<(size))
287#define CLEARLSB(ppn, nbits) (((ppn) >> (nbits)) << (nbits))
288#define PAGEALIGN(va, ps) CLEARLSB(va, ps)
289#define PAGE_FLAGS_RV_MASK (0x2|(0x3UL<<50)|(((1UL<<11)-1)<<53))
290#define _PAGE_MA_ST (0x1 << 2) /* is reserved for software use */
291
292#define ARCH_PAGE_SHIFT 12
293
294#define INVALID_TI_TAG (1UL << 63)
295
296#define VTLB_PTE_P_BIT 0
297#define VTLB_PTE_IO_BIT 60
298#define VTLB_PTE_IO (1UL<<VTLB_PTE_IO_BIT)
299#define VTLB_PTE_P (1UL<<VTLB_PTE_P_BIT)
300
301#define vcpu_quick_region_check(_tr_regions,_ifa) \
302 (_tr_regions & (1 << ((unsigned long)_ifa >> 61)))
303
304#define vcpu_quick_region_set(_tr_regions,_ifa) \
305 do {_tr_regions |= (1 << ((unsigned long)_ifa >> 61)); } while (0)
306
307static inline void vcpu_set_tr(struct thash_data *trp, u64 pte, u64 itir,
308 u64 va, u64 rid)
309{
310 trp->page_flags = pte;
311 trp->itir = itir;
312 trp->vadr = va;
313 trp->rid = rid;
314}
315
316extern u64 kvm_lookup_mpa(u64 gpfn);
317extern u64 kvm_gpa_to_mpa(u64 gpa);
318
319/* Return I/O type if trye */
320#define __gpfn_is_io(gpfn) \
321 ({ \
322 u64 pte, ret = 0; \
323 pte = kvm_lookup_mpa(gpfn); \
324 if (!(pte & GPFN_INV_MASK)) \
325 ret = pte & GPFN_IO_MASK; \
326 ret; \
327 })
328
329#endif
330
331#define IA64_NO_FAULT 0
332#define IA64_FAULT 1
333
334#define VMM_RBS_OFFSET ((VMM_TASK_SIZE + 15) & ~15)
335
336#define SW_BAD 0 /* Bad mode transitition */
337#define SW_V2P 1 /* Physical emulatino is activated */
338#define SW_P2V 2 /* Exit physical mode emulation */
339#define SW_SELF 3 /* No mode transition */
340#define SW_NOP 4 /* Mode transition, but without action required */
341
342#define GUEST_IN_PHY 0x1
343#define GUEST_PHY_EMUL 0x2
344
345#define current_vcpu ((struct kvm_vcpu *) ia64_getreg(_IA64_REG_TP))
346
347#define VRN_SHIFT 61
348#define VRN_MASK 0xe000000000000000
349#define VRN0 0x0UL
350#define VRN1 0x1UL
351#define VRN2 0x2UL
352#define VRN3 0x3UL
353#define VRN4 0x4UL
354#define VRN5 0x5UL
355#define VRN6 0x6UL
356#define VRN7 0x7UL
357
358#define IRQ_NO_MASKED 0
359#define IRQ_MASKED_BY_VTPR 1
360#define IRQ_MASKED_BY_INSVC 2 /* masked by inservice IRQ */
361
362#define PTA_BASE_SHIFT 15
363
364#define IA64_PSR_VM_BIT 46
365#define IA64_PSR_VM (__IA64_UL(1) << IA64_PSR_VM_BIT)
366
367/* Interruption Function State */
368#define IA64_IFS_V_BIT 63
369#define IA64_IFS_V (__IA64_UL(1) << IA64_IFS_V_BIT)
370
371#define PHY_PAGE_UC (_PAGE_A|_PAGE_D|_PAGE_P|_PAGE_MA_UC|_PAGE_AR_RWX)
372#define PHY_PAGE_WB (_PAGE_A|_PAGE_D|_PAGE_P|_PAGE_MA_WB|_PAGE_AR_RWX)
373
374#ifndef __ASSEMBLY__
375
376#include <asm/gcc_intrin.h>
377
378#define is_physical_mode(v) \
379 ((v->arch.mode_flags) & GUEST_IN_PHY)
380
381#define is_virtual_mode(v) \
382 (!is_physical_mode(v))
383
384#define MODE_IND(psr) \
385 (((psr).it << 2) + ((psr).dt << 1) + (psr).rt)
386
387#define _vmm_raw_spin_lock(x) \
388 do { \
389 __u32 *ia64_spinlock_ptr = (__u32 *) (x); \
390 __u64 ia64_spinlock_val; \
391 ia64_spinlock_val = ia64_cmpxchg4_acq(ia64_spinlock_ptr, 1, 0);\
392 if (unlikely(ia64_spinlock_val)) { \
393 do { \
394 while (*ia64_spinlock_ptr) \
395 ia64_barrier(); \
396 ia64_spinlock_val = \
397 ia64_cmpxchg4_acq(ia64_spinlock_ptr, 1, 0);\
398 } while (ia64_spinlock_val); \
399 } \
400 } while (0)
401
402#define _vmm_raw_spin_unlock(x) \
403 do { barrier(); \
404 ((spinlock_t *)x)->raw_lock.lock = 0; } \
405while (0)
406
407void vmm_spin_lock(spinlock_t *lock);
408void vmm_spin_unlock(spinlock_t *lock);
409enum {
410 I_TLB = 1,
411 D_TLB = 2
412};
413
414union kvm_va {
415 struct {
416 unsigned long off : 60; /* intra-region offset */
417 unsigned long reg : 4; /* region number */
418 } f;
419 unsigned long l;
420 void *p;
421};
422
423#define __kvm_pa(x) ({union kvm_va _v; _v.l = (long) (x); \
424 _v.f.reg = 0; _v.l; })
425#define __kvm_va(x) ({union kvm_va _v; _v.l = (long) (x); \
426 _v.f.reg = -1; _v.p; })
427
428#define _REGION_ID(x) ({union ia64_rr _v; _v.val = (long)(x); \
429 _v.rid; })
430#define _REGION_PAGE_SIZE(x) ({union ia64_rr _v; _v.val = (long)(x); \
431 _v.ps; })
432#define _REGION_HW_WALKER(x) ({union ia64_rr _v; _v.val = (long)(x); \
433 _v.ve; })
434
435enum vhpt_ref{ DATA_REF, NA_REF, INST_REF, RSE_REF };
436enum tlb_miss_type { INSTRUCTION, DATA, REGISTER };
437
438#define VCPU(_v, _x) ((_v)->arch.vpd->_x)
439#define VMX(_v, _x) ((_v)->arch._x)
440
441#define VLSAPIC_INSVC(vcpu, i) ((vcpu)->arch.insvc[i])
442#define VLSAPIC_XTP(_v) VMX(_v, xtp)
443
444static inline unsigned long itir_ps(unsigned long itir)
445{
446 return ((itir >> 2) & 0x3f);
447}
448
449
450/**************************************************************************
451 VCPU control register access routines
452 **************************************************************************/
453
454static inline u64 vcpu_get_itir(struct kvm_vcpu *vcpu)
455{
456 return ((u64)VCPU(vcpu, itir));
457}
458
459static inline void vcpu_set_itir(struct kvm_vcpu *vcpu, u64 val)
460{
461 VCPU(vcpu, itir) = val;
462}
463
464static inline u64 vcpu_get_ifa(struct kvm_vcpu *vcpu)
465{
466 return ((u64)VCPU(vcpu, ifa));
467}
468
469static inline void vcpu_set_ifa(struct kvm_vcpu *vcpu, u64 val)
470{
471 VCPU(vcpu, ifa) = val;
472}
473
474static inline u64 vcpu_get_iva(struct kvm_vcpu *vcpu)
475{
476 return ((u64)VCPU(vcpu, iva));
477}
478
479static inline u64 vcpu_get_pta(struct kvm_vcpu *vcpu)
480{
481 return ((u64)VCPU(vcpu, pta));
482}
483
484static inline u64 vcpu_get_lid(struct kvm_vcpu *vcpu)
485{
486 return ((u64)VCPU(vcpu, lid));
487}
488
489static inline u64 vcpu_get_tpr(struct kvm_vcpu *vcpu)
490{
491 return ((u64)VCPU(vcpu, tpr));
492}
493
494static inline u64 vcpu_get_eoi(struct kvm_vcpu *vcpu)
495{
496 return (0UL); /*reads of eoi always return 0 */
497}
498
499static inline u64 vcpu_get_irr0(struct kvm_vcpu *vcpu)
500{
501 return ((u64)VCPU(vcpu, irr[0]));
502}
503
504static inline u64 vcpu_get_irr1(struct kvm_vcpu *vcpu)
505{
506 return ((u64)VCPU(vcpu, irr[1]));
507}
508
509static inline u64 vcpu_get_irr2(struct kvm_vcpu *vcpu)
510{
511 return ((u64)VCPU(vcpu, irr[2]));
512}
513
514static inline u64 vcpu_get_irr3(struct kvm_vcpu *vcpu)
515{
516 return ((u64)VCPU(vcpu, irr[3]));
517}
518
519static inline void vcpu_set_dcr(struct kvm_vcpu *vcpu, u64 val)
520{
521 ia64_setreg(_IA64_REG_CR_DCR, val);
522}
523
524static inline void vcpu_set_isr(struct kvm_vcpu *vcpu, u64 val)
525{
526 VCPU(vcpu, isr) = val;
527}
528
529static inline void vcpu_set_lid(struct kvm_vcpu *vcpu, u64 val)
530{
531 VCPU(vcpu, lid) = val;
532}
533
534static inline void vcpu_set_ipsr(struct kvm_vcpu *vcpu, u64 val)
535{
536 VCPU(vcpu, ipsr) = val;
537}
538
539static inline void vcpu_set_iip(struct kvm_vcpu *vcpu, u64 val)
540{
541 VCPU(vcpu, iip) = val;
542}
543
544static inline void vcpu_set_ifs(struct kvm_vcpu *vcpu, u64 val)
545{
546 VCPU(vcpu, ifs) = val;
547}
548
549static inline void vcpu_set_iipa(struct kvm_vcpu *vcpu, u64 val)
550{
551 VCPU(vcpu, iipa) = val;
552}
553
554static inline void vcpu_set_iha(struct kvm_vcpu *vcpu, u64 val)
555{
556 VCPU(vcpu, iha) = val;
557}
558
559
560static inline u64 vcpu_get_rr(struct kvm_vcpu *vcpu, u64 reg)
561{
562 return vcpu->arch.vrr[reg>>61];
563}
564
565/**************************************************************************
566 VCPU debug breakpoint register access routines
567 **************************************************************************/
568
569static inline void vcpu_set_dbr(struct kvm_vcpu *vcpu, u64 reg, u64 val)
570{
571 __ia64_set_dbr(reg, val);
572}
573
574static inline void vcpu_set_ibr(struct kvm_vcpu *vcpu, u64 reg, u64 val)
575{
576 ia64_set_ibr(reg, val);
577}
578
579static inline u64 vcpu_get_dbr(struct kvm_vcpu *vcpu, u64 reg)
580{
581 return ((u64)__ia64_get_dbr(reg));
582}
583
584static inline u64 vcpu_get_ibr(struct kvm_vcpu *vcpu, u64 reg)
585{
586 return ((u64)ia64_get_ibr(reg));
587}
588
589/**************************************************************************
590 VCPU performance monitor register access routines
591 **************************************************************************/
592static inline void vcpu_set_pmc(struct kvm_vcpu *vcpu, u64 reg, u64 val)
593{
594 /* NOTE: Writes to unimplemented PMC registers are discarded */
595 ia64_set_pmc(reg, val);
596}
597
598static inline void vcpu_set_pmd(struct kvm_vcpu *vcpu, u64 reg, u64 val)
599{
600 /* NOTE: Writes to unimplemented PMD registers are discarded */
601 ia64_set_pmd(reg, val);
602}
603
604static inline u64 vcpu_get_pmc(struct kvm_vcpu *vcpu, u64 reg)
605{
606 /* NOTE: Reads from unimplemented PMC registers return zero */
607 return ((u64)ia64_get_pmc(reg));
608}
609
610static inline u64 vcpu_get_pmd(struct kvm_vcpu *vcpu, u64 reg)
611{
612 /* NOTE: Reads from unimplemented PMD registers return zero */
613 return ((u64)ia64_get_pmd(reg));
614}
615
616static inline unsigned long vrrtomrr(unsigned long val)
617{
618 union ia64_rr rr;
619 rr.val = val;
620 rr.rid = (rr.rid << 4) | 0xe;
621 if (rr.ps > PAGE_SHIFT)
622 rr.ps = PAGE_SHIFT;
623 rr.ve = 1;
624 return rr.val;
625}
626
627
628static inline int highest_bits(int *dat)
629{
630 u32 bits, bitnum;
631 int i;
632
633 /* loop for all 256 bits */
634 for (i = 7; i >= 0 ; i--) {
635 bits = dat[i];
636 if (bits) {
637 bitnum = fls(bits);
638 return i * 32 + bitnum - 1;
639 }
640 }
641 return NULL_VECTOR;
642}
643
644/*
645 * The pending irq is higher than the inservice one.
646 *
647 */
648static inline int is_higher_irq(int pending, int inservice)
649{
650 return ((pending > inservice)
651 || ((pending != NULL_VECTOR)
652 && (inservice == NULL_VECTOR)));
653}
654
655static inline int is_higher_class(int pending, int mic)
656{
657 return ((pending >> 4) > mic);
658}
659
660/*
661 * Return 0-255 for pending irq.
662 * NULL_VECTOR: when no pending.
663 */
664static inline int highest_pending_irq(struct kvm_vcpu *vcpu)
665{
666 if (VCPU(vcpu, irr[0]) & (1UL<<NMI_VECTOR))
667 return NMI_VECTOR;
668 if (VCPU(vcpu, irr[0]) & (1UL<<ExtINT_VECTOR))
669 return ExtINT_VECTOR;
670
671 return highest_bits((int *)&VCPU(vcpu, irr[0]));
672}
673
674static inline int highest_inservice_irq(struct kvm_vcpu *vcpu)
675{
676 if (VMX(vcpu, insvc[0]) & (1UL<<NMI_VECTOR))
677 return NMI_VECTOR;
678 if (VMX(vcpu, insvc[0]) & (1UL<<ExtINT_VECTOR))
679 return ExtINT_VECTOR;
680
681 return highest_bits((int *)&(VMX(vcpu, insvc[0])));
682}
683
684extern void vcpu_get_fpreg(struct kvm_vcpu *vcpu, u64 reg,
685 struct ia64_fpreg *val);
686extern void vcpu_set_fpreg(struct kvm_vcpu *vcpu, u64 reg,
687 struct ia64_fpreg *val);
688extern u64 vcpu_get_gr(struct kvm_vcpu *vcpu, u64 reg);
689extern void vcpu_set_gr(struct kvm_vcpu *vcpu, u64 reg, u64 val, int nat);
690extern u64 vcpu_get_psr(struct kvm_vcpu *vcpu);
691extern void vcpu_set_psr(struct kvm_vcpu *vcpu, u64 val);
692extern u64 vcpu_thash(struct kvm_vcpu *vcpu, u64 vadr);
693extern void vcpu_bsw0(struct kvm_vcpu *vcpu);
694extern void thash_vhpt_insert(struct kvm_vcpu *v, u64 pte,
695 u64 itir, u64 va, int type);
696extern struct thash_data *vhpt_lookup(u64 va);
697extern u64 guest_vhpt_lookup(u64 iha, u64 *pte);
698extern void thash_purge_entries(struct kvm_vcpu *v, u64 va, u64 ps);
699extern void thash_purge_entries_remote(struct kvm_vcpu *v, u64 va, u64 ps);
700extern u64 translate_phy_pte(u64 *pte, u64 itir, u64 va);
701extern int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte,
702 u64 itir, u64 ifa, int type);
703extern void thash_purge_all(struct kvm_vcpu *v);
704extern struct thash_data *vtlb_lookup(struct kvm_vcpu *v,
705 u64 va, int is_data);
706extern int vtr_find_overlap(struct kvm_vcpu *vcpu, u64 va,
707 u64 ps, int is_data);
708
709extern void vcpu_increment_iip(struct kvm_vcpu *v);
710extern void vcpu_decrement_iip(struct kvm_vcpu *vcpu);
711extern void vcpu_pend_interrupt(struct kvm_vcpu *vcpu, u8 vec);
712extern void vcpu_unpend_interrupt(struct kvm_vcpu *vcpu, u8 vec);
713extern void data_page_not_present(struct kvm_vcpu *vcpu, u64 vadr);
714extern void dnat_page_consumption(struct kvm_vcpu *vcpu, u64 vadr);
715extern void alt_dtlb(struct kvm_vcpu *vcpu, u64 vadr);
716extern void nested_dtlb(struct kvm_vcpu *vcpu);
717extern void dvhpt_fault(struct kvm_vcpu *vcpu, u64 vadr);
718extern int vhpt_enabled(struct kvm_vcpu *vcpu, u64 vadr, enum vhpt_ref ref);
719
720extern void update_vhpi(struct kvm_vcpu *vcpu, int vec);
721extern int irq_masked(struct kvm_vcpu *vcpu, int h_pending, int h_inservice);
722
723extern int fetch_code(struct kvm_vcpu *vcpu, u64 gip, IA64_BUNDLE *pbundle);
724extern void emulate_io_inst(struct kvm_vcpu *vcpu, u64 padr, u64 ma);
725extern void vmm_transition(struct kvm_vcpu *vcpu);
726extern void vmm_trampoline(union context *from, union context *to);
727extern int vmm_entry(void);
728extern u64 vcpu_get_itc(struct kvm_vcpu *vcpu);
729
730extern void vmm_reset_entry(void);
731void kvm_init_vtlb(struct kvm_vcpu *v);
732void kvm_init_vhpt(struct kvm_vcpu *v);
733void thash_init(struct thash_cb *hcb, u64 sz);
734
735void panic_vm(struct kvm_vcpu *v);
736
737extern u64 ia64_call_vsa(u64 proc, u64 arg1, u64 arg2, u64 arg3,
738 u64 arg4, u64 arg5, u64 arg6, u64 arg7);
739#endif
740#endif /* __VCPU_H__ */
diff --git a/arch/ia64/kvm/vti.h b/arch/ia64/kvm/vti.h
new file mode 100644
index 00000000000..f6c5617e16a
--- /dev/null
+++ b/arch/ia64/kvm/vti.h
@@ -0,0 +1,290 @@
1/*
2 * vti.h: prototype for generial vt related interface
3 * Copyright (c) 2004, Intel Corporation.
4 *
5 * Xuefei Xu (Anthony Xu) (anthony.xu@intel.com)
6 * Fred Yang (fred.yang@intel.com)
7 * Kun Tian (Kevin Tian) (kevin.tian@intel.com)
8 *
9 * Copyright (c) 2007, Intel Corporation.
10 * Zhang xiantao <xiantao.zhang@intel.com>
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms and conditions of the GNU General Public License,
14 * version 2, as published by the Free Software Foundation.
15 *
16 * This program is distributed in the hope it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
19 * more details.
20 *
21 * You should have received a copy of the GNU General Public License along with
22 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
23 * Place - Suite 330, Boston, MA 02111-1307 USA.
24 */
25#ifndef _KVM_VT_I_H
26#define _KVM_VT_I_H
27
28#ifndef __ASSEMBLY__
29#include <asm/page.h>
30
31#include <linux/kvm_host.h>
32
33/* define itr.i and itr.d in ia64_itr function */
34#define ITR 0x01
35#define DTR 0x02
36#define IaDTR 0x03
37
38#define IA64_TR_VMM 6 /*itr6, dtr6 : maps vmm code, vmbuffer*/
39#define IA64_TR_VM_DATA 7 /*dtr7 : maps current vm data*/
40
41#define RR6 (6UL<<61)
42#define RR7 (7UL<<61)
43
44
45/* config_options in pal_vp_init_env */
46#define VP_INITIALIZE 1UL
47#define VP_FR_PMC 1UL<<1
48#define VP_OPCODE 1UL<<8
49#define VP_CAUSE 1UL<<9
50#define VP_FW_ACC 1UL<<63
51
52/* init vp env with initializing vm_buffer */
53#define VP_INIT_ENV_INITALIZE (VP_INITIALIZE | VP_FR_PMC |\
54 VP_OPCODE | VP_CAUSE | VP_FW_ACC)
55/* init vp env without initializing vm_buffer */
56#define VP_INIT_ENV VP_FR_PMC | VP_OPCODE | VP_CAUSE | VP_FW_ACC
57
58#define PAL_VP_CREATE 265
59/* Stacked Virt. Initializes a new VPD for the operation of
60 * a new virtual processor in the virtual environment.
61 */
62#define PAL_VP_ENV_INFO 266
63/*Stacked Virt. Returns the parameters needed to enter a virtual environment.*/
64#define PAL_VP_EXIT_ENV 267
65/*Stacked Virt. Allows a logical processor to exit a virtual environment.*/
66#define PAL_VP_INIT_ENV 268
67/*Stacked Virt. Allows a logical processor to enter a virtual environment.*/
68#define PAL_VP_REGISTER 269
69/*Stacked Virt. Register a different host IVT for the virtual processor.*/
70#define PAL_VP_RESUME 270
71/* Renamed from PAL_VP_RESUME */
72#define PAL_VP_RESTORE 270
73/*Stacked Virt. Resumes virtual processor operation on the logical processor.*/
74#define PAL_VP_SUSPEND 271
75/* Renamed from PAL_VP_SUSPEND */
76#define PAL_VP_SAVE 271
77/* Stacked Virt. Suspends operation for the specified virtual processor on
78 * the logical processor.
79 */
80#define PAL_VP_TERMINATE 272
81/* Stacked Virt. Terminates operation for the specified virtual processor.*/
82
83union vac {
84 unsigned long value;
85 struct {
86 int a_int:1;
87 int a_from_int_cr:1;
88 int a_to_int_cr:1;
89 int a_from_psr:1;
90 int a_from_cpuid:1;
91 int a_cover:1;
92 int a_bsw:1;
93 long reserved:57;
94 };
95};
96
97union vdc {
98 unsigned long value;
99 struct {
100 int d_vmsw:1;
101 int d_extint:1;
102 int d_ibr_dbr:1;
103 int d_pmc:1;
104 int d_to_pmd:1;
105 int d_itm:1;
106 long reserved:58;
107 };
108};
109
110struct vpd {
111 union vac vac;
112 union vdc vdc;
113 unsigned long virt_env_vaddr;
114 unsigned long reserved1[29];
115 unsigned long vhpi;
116 unsigned long reserved2[95];
117 unsigned long vgr[16];
118 unsigned long vbgr[16];
119 unsigned long vnat;
120 unsigned long vbnat;
121 unsigned long vcpuid[5];
122 unsigned long reserved3[11];
123 unsigned long vpsr;
124 unsigned long vpr;
125 unsigned long reserved4[76];
126 union {
127 unsigned long vcr[128];
128 struct {
129 unsigned long dcr;
130 unsigned long itm;
131 unsigned long iva;
132 unsigned long rsv1[5];
133 unsigned long pta;
134 unsigned long rsv2[7];
135 unsigned long ipsr;
136 unsigned long isr;
137 unsigned long rsv3;
138 unsigned long iip;
139 unsigned long ifa;
140 unsigned long itir;
141 unsigned long iipa;
142 unsigned long ifs;
143 unsigned long iim;
144 unsigned long iha;
145 unsigned long rsv4[38];
146 unsigned long lid;
147 unsigned long ivr;
148 unsigned long tpr;
149 unsigned long eoi;
150 unsigned long irr[4];
151 unsigned long itv;
152 unsigned long pmv;
153 unsigned long cmcv;
154 unsigned long rsv5[5];
155 unsigned long lrr0;
156 unsigned long lrr1;
157 unsigned long rsv6[46];
158 };
159 };
160 unsigned long reserved5[128];
161 unsigned long reserved6[3456];
162 unsigned long vmm_avail[128];
163 unsigned long reserved7[4096];
164};
165
166#define PAL_PROC_VM_BIT (1UL << 40)
167#define PAL_PROC_VMSW_BIT (1UL << 54)
168
169static inline s64 ia64_pal_vp_env_info(u64 *buffer_size,
170 u64 *vp_env_info)
171{
172 struct ia64_pal_retval iprv;
173 PAL_CALL_STK(iprv, PAL_VP_ENV_INFO, 0, 0, 0);
174 *buffer_size = iprv.v0;
175 *vp_env_info = iprv.v1;
176 return iprv.status;
177}
178
179static inline s64 ia64_pal_vp_exit_env(u64 iva)
180{
181 struct ia64_pal_retval iprv;
182
183 PAL_CALL_STK(iprv, PAL_VP_EXIT_ENV, (u64)iva, 0, 0);
184 return iprv.status;
185}
186
187static inline s64 ia64_pal_vp_init_env(u64 config_options, u64 pbase_addr,
188 u64 vbase_addr, u64 *vsa_base)
189{
190 struct ia64_pal_retval iprv;
191
192 PAL_CALL_STK(iprv, PAL_VP_INIT_ENV, config_options, pbase_addr,
193 vbase_addr);
194 *vsa_base = iprv.v0;
195
196 return iprv.status;
197}
198
199static inline s64 ia64_pal_vp_restore(u64 *vpd, u64 pal_proc_vector)
200{
201 struct ia64_pal_retval iprv;
202
203 PAL_CALL_STK(iprv, PAL_VP_RESTORE, (u64)vpd, pal_proc_vector, 0);
204
205 return iprv.status;
206}
207
208static inline s64 ia64_pal_vp_save(u64 *vpd, u64 pal_proc_vector)
209{
210 struct ia64_pal_retval iprv;
211
212 PAL_CALL_STK(iprv, PAL_VP_SAVE, (u64)vpd, pal_proc_vector, 0);
213
214 return iprv.status;
215}
216
217#endif
218
219/*VPD field offset*/
220#define VPD_VAC_START_OFFSET 0
221#define VPD_VDC_START_OFFSET 8
222#define VPD_VHPI_START_OFFSET 256
223#define VPD_VGR_START_OFFSET 1024
224#define VPD_VBGR_START_OFFSET 1152
225#define VPD_VNAT_START_OFFSET 1280
226#define VPD_VBNAT_START_OFFSET 1288
227#define VPD_VCPUID_START_OFFSET 1296
228#define VPD_VPSR_START_OFFSET 1424
229#define VPD_VPR_START_OFFSET 1432
230#define VPD_VRSE_CFLE_START_OFFSET 1440
231#define VPD_VCR_START_OFFSET 2048
232#define VPD_VTPR_START_OFFSET 2576
233#define VPD_VRR_START_OFFSET 3072
234#define VPD_VMM_VAIL_START_OFFSET 31744
235
236/*Virtualization faults*/
237
238#define EVENT_MOV_TO_AR 1
239#define EVENT_MOV_TO_AR_IMM 2
240#define EVENT_MOV_FROM_AR 3
241#define EVENT_MOV_TO_CR 4
242#define EVENT_MOV_FROM_CR 5
243#define EVENT_MOV_TO_PSR 6
244#define EVENT_MOV_FROM_PSR 7
245#define EVENT_ITC_D 8
246#define EVENT_ITC_I 9
247#define EVENT_MOV_TO_RR 10
248#define EVENT_MOV_TO_DBR 11
249#define EVENT_MOV_TO_IBR 12
250#define EVENT_MOV_TO_PKR 13
251#define EVENT_MOV_TO_PMC 14
252#define EVENT_MOV_TO_PMD 15
253#define EVENT_ITR_D 16
254#define EVENT_ITR_I 17
255#define EVENT_MOV_FROM_RR 18
256#define EVENT_MOV_FROM_DBR 19
257#define EVENT_MOV_FROM_IBR 20
258#define EVENT_MOV_FROM_PKR 21
259#define EVENT_MOV_FROM_PMC 22
260#define EVENT_MOV_FROM_CPUID 23
261#define EVENT_SSM 24
262#define EVENT_RSM 25
263#define EVENT_PTC_L 26
264#define EVENT_PTC_G 27
265#define EVENT_PTC_GA 28
266#define EVENT_PTR_D 29
267#define EVENT_PTR_I 30
268#define EVENT_THASH 31
269#define EVENT_TTAG 32
270#define EVENT_TPA 33
271#define EVENT_TAK 34
272#define EVENT_PTC_E 35
273#define EVENT_COVER 36
274#define EVENT_RFI 37
275#define EVENT_BSW_0 38
276#define EVENT_BSW_1 39
277#define EVENT_VMSW 40
278
279/**PAL virtual services offsets */
280#define PAL_VPS_RESUME_NORMAL 0x0000
281#define PAL_VPS_RESUME_HANDLER 0x0400
282#define PAL_VPS_SYNC_READ 0x0800
283#define PAL_VPS_SYNC_WRITE 0x0c00
284#define PAL_VPS_SET_PENDING_INTERRUPT 0x1000
285#define PAL_VPS_THASH 0x1400
286#define PAL_VPS_TTAG 0x1800
287#define PAL_VPS_RESTORE 0x1c00
288#define PAL_VPS_SAVE 0x2000
289
290#endif/* _VT_I_H*/