aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/include/asm/xen
diff options
context:
space:
mode:
authorIsaku Yamahata <yamahata@valinux.co.jp>2008-10-16 22:18:01 -0400
committerTony Luck <tony.luck@intel.com>2008-10-17 13:03:47 -0400
commitd65b503edd7361097974f909c05f26699aff4057 (patch)
tree3ccc712e496755e875ad339265eaa784f7e138fc /arch/ia64/include/asm/xen
parent4b83ce4367943aa3eb960df56759f45d70722a4c (diff)
ia64/pv_ops/xen: define xen paravirtualized instructions for hand written assembly code
define xen paravirtualized instructions for hand written assembly code. Signed-off-by: Yaozu (Eddie) Dong <eddie.dong@intel.com> Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp> Cc: Akio Takebe <takebe_akio@jp.fujitsu.com> Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'arch/ia64/include/asm/xen')
-rw-r--r--arch/ia64/include/asm/xen/inst.h447
1 files changed, 447 insertions, 0 deletions
diff --git a/arch/ia64/include/asm/xen/inst.h b/arch/ia64/include/asm/xen/inst.h
new file mode 100644
index 000000000000..03895e985509
--- /dev/null
+++ b/arch/ia64/include/asm/xen/inst.h
@@ -0,0 +1,447 @@
1/******************************************************************************
2 * arch/ia64/include/asm/xen/inst.h
3 *
4 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#include <asm/xen/privop.h>
24
25#define MOV_FROM_IFA(reg) \
26 movl reg = XSI_IFA; \
27 ;; \
28 ld8 reg = [reg]
29
30#define MOV_FROM_ITIR(reg) \
31 movl reg = XSI_ITIR; \
32 ;; \
33 ld8 reg = [reg]
34
35#define MOV_FROM_ISR(reg) \
36 movl reg = XSI_ISR; \
37 ;; \
38 ld8 reg = [reg]
39
40#define MOV_FROM_IHA(reg) \
41 movl reg = XSI_IHA; \
42 ;; \
43 ld8 reg = [reg]
44
45#define MOV_FROM_IPSR(pred, reg) \
46(pred) movl reg = XSI_IPSR; \
47 ;; \
48(pred) ld8 reg = [reg]
49
50#define MOV_FROM_IIM(reg) \
51 movl reg = XSI_IIM; \
52 ;; \
53 ld8 reg = [reg]
54
55#define MOV_FROM_IIP(reg) \
56 movl reg = XSI_IIP; \
57 ;; \
58 ld8 reg = [reg]
59
60.macro __MOV_FROM_IVR reg, clob
61 .ifc "\reg", "r8"
62 XEN_HYPER_GET_IVR
63 .exitm
64 .endif
65 .ifc "\clob", "r8"
66 XEN_HYPER_GET_IVR
67 ;;
68 mov \reg = r8
69 .exitm
70 .endif
71
72 mov \clob = r8
73 ;;
74 XEN_HYPER_GET_IVR
75 ;;
76 mov \reg = r8
77 ;;
78 mov r8 = \clob
79.endm
80#define MOV_FROM_IVR(reg, clob) __MOV_FROM_IVR reg, clob
81
82.macro __MOV_FROM_PSR pred, reg, clob
83 .ifc "\reg", "r8"
84 (\pred) XEN_HYPER_GET_PSR;
85 .exitm
86 .endif
87 .ifc "\clob", "r8"
88 (\pred) XEN_HYPER_GET_PSR
89 ;;
90 (\pred) mov \reg = r8
91 .exitm
92 .endif
93
94 (\pred) mov \clob = r8
95 (\pred) XEN_HYPER_GET_PSR
96 ;;
97 (\pred) mov \reg = r8
98 (\pred) mov r8 = \clob
99.endm
100#define MOV_FROM_PSR(pred, reg, clob) __MOV_FROM_PSR pred, reg, clob
101
102
103#define MOV_TO_IFA(reg, clob) \
104 movl clob = XSI_IFA; \
105 ;; \
106 st8 [clob] = reg \
107
108#define MOV_TO_ITIR(pred, reg, clob) \
109(pred) movl clob = XSI_ITIR; \
110 ;; \
111(pred) st8 [clob] = reg
112
113#define MOV_TO_IHA(pred, reg, clob) \
114(pred) movl clob = XSI_IHA; \
115 ;; \
116(pred) st8 [clob] = reg
117
118#define MOV_TO_IPSR(pred, reg, clob) \
119(pred) movl clob = XSI_IPSR; \
120 ;; \
121(pred) st8 [clob] = reg; \
122 ;;
123
124#define MOV_TO_IFS(pred, reg, clob) \
125(pred) movl clob = XSI_IFS; \
126 ;; \
127(pred) st8 [clob] = reg; \
128 ;;
129
130#define MOV_TO_IIP(reg, clob) \
131 movl clob = XSI_IIP; \
132 ;; \
133 st8 [clob] = reg
134
135.macro ____MOV_TO_KR kr, reg, clob0, clob1
136 .ifc "\clob0", "r9"
137 .error "clob0 \clob0 must not be r9"
138 .endif
139 .ifc "\clob1", "r8"
140 .error "clob1 \clob1 must not be r8"
141 .endif
142
143 .ifnc "\reg", "r9"
144 .ifnc "\clob1", "r9"
145 mov \clob1 = r9
146 .endif
147 mov r9 = \reg
148 .endif
149 .ifnc "\clob0", "r8"
150 mov \clob0 = r8
151 .endif
152 mov r8 = \kr
153 ;;
154 XEN_HYPER_SET_KR
155
156 .ifnc "\reg", "r9"
157 .ifnc "\clob1", "r9"
158 mov r9 = \clob1
159 .endif
160 .endif
161 .ifnc "\clob0", "r8"
162 mov r8 = \clob0
163 .endif
164.endm
165
166.macro __MOV_TO_KR kr, reg, clob0, clob1
167 .ifc "\clob0", "r9"
168 ____MOV_TO_KR \kr, \reg, \clob1, \clob0
169 .exitm
170 .endif
171 .ifc "\clob1", "r8"
172 ____MOV_TO_KR \kr, \reg, \clob1, \clob0
173 .exitm
174 .endif
175
176 ____MOV_TO_KR \kr, \reg, \clob0, \clob1
177.endm
178
179#define MOV_TO_KR(kr, reg, clob0, clob1) \
180 __MOV_TO_KR IA64_KR_ ## kr, reg, clob0, clob1
181
182
183.macro __ITC_I pred, reg, clob
184 .ifc "\reg", "r8"
185 (\pred) XEN_HYPER_ITC_I
186 .exitm
187 .endif
188 .ifc "\clob", "r8"
189 (\pred) mov r8 = \reg
190 ;;
191 (\pred) XEN_HYPER_ITC_I
192 .exitm
193 .endif
194
195 (\pred) mov \clob = r8
196 (\pred) mov r8 = \reg
197 ;;
198 (\pred) XEN_HYPER_ITC_I
199 ;;
200 (\pred) mov r8 = \clob
201 ;;
202.endm
203#define ITC_I(pred, reg, clob) __ITC_I pred, reg, clob
204
205.macro __ITC_D pred, reg, clob
206 .ifc "\reg", "r8"
207 (\pred) XEN_HYPER_ITC_D
208 ;;
209 .exitm
210 .endif
211 .ifc "\clob", "r8"
212 (\pred) mov r8 = \reg
213 ;;
214 (\pred) XEN_HYPER_ITC_D
215 ;;
216 .exitm
217 .endif
218
219 (\pred) mov \clob = r8
220 (\pred) mov r8 = \reg
221 ;;
222 (\pred) XEN_HYPER_ITC_D
223 ;;
224 (\pred) mov r8 = \clob
225 ;;
226.endm
227#define ITC_D(pred, reg, clob) __ITC_D pred, reg, clob
228
229.macro __ITC_I_AND_D pred_i, pred_d, reg, clob
230 .ifc "\reg", "r8"
231 (\pred_i)XEN_HYPER_ITC_I
232 ;;
233 (\pred_d)XEN_HYPER_ITC_D
234 ;;
235 .exitm
236 .endif
237 .ifc "\clob", "r8"
238 mov r8 = \reg
239 ;;
240 (\pred_i)XEN_HYPER_ITC_I
241 ;;
242 (\pred_d)XEN_HYPER_ITC_D
243 ;;
244 .exitm
245 .endif
246
247 mov \clob = r8
248 mov r8 = \reg
249 ;;
250 (\pred_i)XEN_HYPER_ITC_I
251 ;;
252 (\pred_d)XEN_HYPER_ITC_D
253 ;;
254 mov r8 = \clob
255 ;;
256.endm
257#define ITC_I_AND_D(pred_i, pred_d, reg, clob) \
258 __ITC_I_AND_D pred_i, pred_d, reg, clob
259
260.macro __THASH pred, reg0, reg1, clob
261 .ifc "\reg0", "r8"
262 (\pred) mov r8 = \reg1
263 (\pred) XEN_HYPER_THASH
264 .exitm
265 .endc
266 .ifc "\reg1", "r8"
267 (\pred) XEN_HYPER_THASH
268 ;;
269 (\pred) mov \reg0 = r8
270 ;;
271 .exitm
272 .endif
273 .ifc "\clob", "r8"
274 (\pred) mov r8 = \reg1
275 (\pred) XEN_HYPER_THASH
276 ;;
277 (\pred) mov \reg0 = r8
278 ;;
279 .exitm
280 .endif
281
282 (\pred) mov \clob = r8
283 (\pred) mov r8 = \reg1
284 (\pred) XEN_HYPER_THASH
285 ;;
286 (\pred) mov \reg0 = r8
287 (\pred) mov r8 = \clob
288 ;;
289.endm
290#define THASH(pred, reg0, reg1, clob) __THASH pred, reg0, reg1, clob
291
292#define SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(clob0, clob1) \
293 mov clob0 = 1; \
294 movl clob1 = XSI_PSR_IC; \
295 ;; \
296 st4 [clob1] = clob0 \
297 ;;
298
299#define SSM_PSR_IC_AND_SRLZ_D(clob0, clob1) \
300 ;; \
301 srlz.d; \
302 mov clob1 = 1; \
303 movl clob0 = XSI_PSR_IC; \
304 ;; \
305 st4 [clob0] = clob1
306
307#define RSM_PSR_IC(clob) \
308 movl clob = XSI_PSR_IC; \
309 ;; \
310 st4 [clob] = r0; \
311 ;;
312
313/* pred will be clobbered */
314#define MASK_TO_PEND_OFS (-1)
315#define SSM_PSR_I(pred, pred_clob, clob) \
316(pred) movl clob = XSI_PSR_I_ADDR \
317 ;; \
318(pred) ld8 clob = [clob] \
319 ;; \
320 /* if (pred) vpsr.i = 1 */ \
321 /* if (pred) (vcpu->vcpu_info->evtchn_upcall_mask)=0 */ \
322(pred) st1 [clob] = r0, MASK_TO_PEND_OFS \
323 ;; \
324 /* if (vcpu->vcpu_info->evtchn_upcall_pending) */ \
325(pred) ld1 clob = [clob] \
326 ;; \
327(pred) cmp.ne.unc pred_clob, p0 = clob, r0 \
328 ;; \
329(pred_clob)XEN_HYPER_SSM_I /* do areal ssm psr.i */
330
331#define RSM_PSR_I(pred, clob0, clob1) \
332 movl clob0 = XSI_PSR_I_ADDR; \
333 mov clob1 = 1; \
334 ;; \
335 ld8 clob0 = [clob0]; \
336 ;; \
337(pred) st1 [clob0] = clob1
338
339#define RSM_PSR_I_IC(clob0, clob1, clob2) \
340 movl clob0 = XSI_PSR_I_ADDR; \
341 movl clob1 = XSI_PSR_IC; \
342 ;; \
343 ld8 clob0 = [clob0]; \
344 mov clob2 = 1; \
345 ;; \
346 /* note: clears both vpsr.i and vpsr.ic! */ \
347 st1 [clob0] = clob2; \
348 st4 [clob1] = r0; \
349 ;;
350
351#define RSM_PSR_DT \
352 XEN_HYPER_RSM_PSR_DT
353
354#define SSM_PSR_DT_AND_SRLZ_I \
355 XEN_HYPER_SSM_PSR_DT
356
357#define BSW_0(clob0, clob1, clob2) \
358 ;; \
359 /* r16-r31 all now hold bank1 values */ \
360 mov clob2 = ar.unat; \
361 movl clob0 = XSI_BANK1_R16; \
362 movl clob1 = XSI_BANK1_R16 + 8; \
363 ;; \
364.mem.offset 0, 0; st8.spill [clob0] = r16, 16; \
365.mem.offset 8, 0; st8.spill [clob1] = r17, 16; \
366 ;; \
367.mem.offset 0, 0; st8.spill [clob0] = r18, 16; \
368.mem.offset 8, 0; st8.spill [clob1] = r19, 16; \
369 ;; \
370.mem.offset 0, 0; st8.spill [clob0] = r20, 16; \
371.mem.offset 8, 0; st8.spill [clob1] = r21, 16; \
372 ;; \
373.mem.offset 0, 0; st8.spill [clob0] = r22, 16; \
374.mem.offset 8, 0; st8.spill [clob1] = r23, 16; \
375 ;; \
376.mem.offset 0, 0; st8.spill [clob0] = r24, 16; \
377.mem.offset 8, 0; st8.spill [clob1] = r25, 16; \
378 ;; \
379.mem.offset 0, 0; st8.spill [clob0] = r26, 16; \
380.mem.offset 8, 0; st8.spill [clob1] = r27, 16; \
381 ;; \
382.mem.offset 0, 0; st8.spill [clob0] = r28, 16; \
383.mem.offset 8, 0; st8.spill [clob1] = r29, 16; \
384 ;; \
385.mem.offset 0, 0; st8.spill [clob0] = r30, 16; \
386.mem.offset 8, 0; st8.spill [clob1] = r31, 16; \
387 ;; \
388 mov clob1 = ar.unat; \
389 movl clob0 = XSI_B1NAT; \
390 ;; \
391 st8 [clob0] = clob1; \
392 mov ar.unat = clob2; \
393 movl clob0 = XSI_BANKNUM; \
394 ;; \
395 st4 [clob0] = r0
396
397
398 /* FIXME: THIS CODE IS NOT NaT SAFE! */
399#define XEN_BSW_1(clob) \
400 mov clob = ar.unat; \
401 movl r30 = XSI_B1NAT; \
402 ;; \
403 ld8 r30 = [r30]; \
404 mov r31 = 1; \
405 ;; \
406 mov ar.unat = r30; \
407 movl r30 = XSI_BANKNUM; \
408 ;; \
409 st4 [r30] = r31; \
410 movl r30 = XSI_BANK1_R16; \
411 movl r31 = XSI_BANK1_R16+8; \
412 ;; \
413 ld8.fill r16 = [r30], 16; \
414 ld8.fill r17 = [r31], 16; \
415 ;; \
416 ld8.fill r18 = [r30], 16; \
417 ld8.fill r19 = [r31], 16; \
418 ;; \
419 ld8.fill r20 = [r30], 16; \
420 ld8.fill r21 = [r31], 16; \
421 ;; \
422 ld8.fill r22 = [r30], 16; \
423 ld8.fill r23 = [r31], 16; \
424 ;; \
425 ld8.fill r24 = [r30], 16; \
426 ld8.fill r25 = [r31], 16; \
427 ;; \
428 ld8.fill r26 = [r30], 16; \
429 ld8.fill r27 = [r31], 16; \
430 ;; \
431 ld8.fill r28 = [r30], 16; \
432 ld8.fill r29 = [r31], 16; \
433 ;; \
434 ld8.fill r30 = [r30]; \
435 ld8.fill r31 = [r31]; \
436 ;; \
437 mov ar.unat = clob
438
439#define BSW_1(clob0, clob1) XEN_BSW_1(clob1)
440
441
442#define COVER \
443 XEN_HYPER_COVER
444
445#define RFI \
446 XEN_HYPER_RFI; \
447 dv_serialize_data