aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-x86
diff options
context:
space:
mode:
authorAvi Kivity <avi@qumranet.com>2007-12-16 04:02:48 -0500
committerAvi Kivity <avi@qumranet.com>2008-01-30 11:01:18 -0500
commitedf884172e9828c6234b254208af04655855038d (patch)
treef5e5d1eecaed9737eced6ba60d09fe93149751c1 /include/asm-x86
parent9584bf2c93f56656dba0de8f6c75b54ca7995143 (diff)
KVM: Move arch dependent files to new directory arch/x86/kvm/
This paves the way for multiple architecture support. Note that while ioapic.c could potentially be shared with ia64, it is also moved. Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'include/asm-x86')
-rw-r--r--include/asm-x86/kvm_host.h601
-rw-r--r--include/asm-x86/kvm_x86_emulate.h186
2 files changed, 787 insertions, 0 deletions
diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h
new file mode 100644
index 000000000000..28940e1a9713
--- /dev/null
+++ b/include/asm-x86/kvm_host.h
@@ -0,0 +1,601 @@
1#/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * This header defines architecture specific interfaces, x86 version
5 *
6 * This work is licensed under the terms of the GNU GPL, version 2. See
7 * the COPYING file in the top-level directory.
8 *
9 */
10
11#ifndef ASM_KVM_HOST_H
12#define ASM_KVM_HOST_H
13
14#include <linux/types.h>
15#include <linux/mm.h>
16
17#include <linux/kvm.h>
18#include <linux/kvm_para.h>
19#include <linux/kvm_types.h>
20
21#include <asm/desc.h>
22
23#define CR3_PAE_RESERVED_BITS ((X86_CR3_PWT | X86_CR3_PCD) - 1)
24#define CR3_NONPAE_RESERVED_BITS ((PAGE_SIZE-1) & ~(X86_CR3_PWT | X86_CR3_PCD))
25#define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS|0xFFFFFF0000000000ULL)
26
27#define KVM_GUEST_CR0_MASK \
28 (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE \
29 | X86_CR0_NW | X86_CR0_CD)
30#define KVM_VM_CR0_ALWAYS_ON \
31 (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE | X86_CR0_TS \
32 | X86_CR0_MP)
33#define KVM_GUEST_CR4_MASK \
34 (X86_CR4_VME | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE | X86_CR4_VMXE)
35#define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
36#define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE)
37
38#define INVALID_PAGE (~(hpa_t)0)
39#define UNMAPPED_GVA (~(gpa_t)0)
40
41#define DE_VECTOR 0
42#define UD_VECTOR 6
43#define NM_VECTOR 7
44#define DF_VECTOR 8
45#define TS_VECTOR 10
46#define NP_VECTOR 11
47#define SS_VECTOR 12
48#define GP_VECTOR 13
49#define PF_VECTOR 14
50
51#define SELECTOR_TI_MASK (1 << 2)
52#define SELECTOR_RPL_MASK 0x03
53
54#define IOPL_SHIFT 12
55
56#define KVM_ALIAS_SLOTS 4
57
58#define KVM_PERMILLE_MMU_PAGES 20
59#define KVM_MIN_ALLOC_MMU_PAGES 64
60#define KVM_NUM_MMU_PAGES 1024
61#define KVM_MIN_FREE_MMU_PAGES 5
62#define KVM_REFILL_PAGES 25
63#define KVM_MAX_CPUID_ENTRIES 40
64
65extern spinlock_t kvm_lock;
66extern struct list_head vm_list;
67
68struct kvm_vcpu;
69struct kvm;
70
71enum {
72 VCPU_REGS_RAX = 0,
73 VCPU_REGS_RCX = 1,
74 VCPU_REGS_RDX = 2,
75 VCPU_REGS_RBX = 3,
76 VCPU_REGS_RSP = 4,
77 VCPU_REGS_RBP = 5,
78 VCPU_REGS_RSI = 6,
79 VCPU_REGS_RDI = 7,
80#ifdef CONFIG_X86_64
81 VCPU_REGS_R8 = 8,
82 VCPU_REGS_R9 = 9,
83 VCPU_REGS_R10 = 10,
84 VCPU_REGS_R11 = 11,
85 VCPU_REGS_R12 = 12,
86 VCPU_REGS_R13 = 13,
87 VCPU_REGS_R14 = 14,
88 VCPU_REGS_R15 = 15,
89#endif
90 NR_VCPU_REGS
91};
92
93enum {
94 VCPU_SREG_CS,
95 VCPU_SREG_DS,
96 VCPU_SREG_ES,
97 VCPU_SREG_FS,
98 VCPU_SREG_GS,
99 VCPU_SREG_SS,
100 VCPU_SREG_TR,
101 VCPU_SREG_LDTR,
102};
103
104#include <asm/kvm_x86_emulate.h>
105
106#define KVM_NR_MEM_OBJS 40
107
108/*
109 * We don't want allocation failures within the mmu code, so we preallocate
110 * enough memory for a single page fault in a cache.
111 */
112struct kvm_mmu_memory_cache {
113 int nobjs;
114 void *objects[KVM_NR_MEM_OBJS];
115};
116
117#define NR_PTE_CHAIN_ENTRIES 5
118
119struct kvm_pte_chain {
120 u64 *parent_ptes[NR_PTE_CHAIN_ENTRIES];
121 struct hlist_node link;
122};
123
124/*
125 * kvm_mmu_page_role, below, is defined as:
126 *
127 * bits 0:3 - total guest paging levels (2-4, or zero for real mode)
128 * bits 4:7 - page table level for this shadow (1-4)
129 * bits 8:9 - page table quadrant for 2-level guests
130 * bit 16 - "metaphysical" - gfn is not a real page (huge page/real mode)
131 * bits 17:19 - common access permissions for all ptes in this shadow page
132 */
133union kvm_mmu_page_role {
134 unsigned word;
135 struct {
136 unsigned glevels : 4;
137 unsigned level : 4;
138 unsigned quadrant : 2;
139 unsigned pad_for_nice_hex_output : 6;
140 unsigned metaphysical : 1;
141 unsigned access : 3;
142 };
143};
144
145struct kvm_mmu_page {
146 struct list_head link;
147 struct hlist_node hash_link;
148
149 /*
150 * The following two entries are used to key the shadow page in the
151 * hash table.
152 */
153 gfn_t gfn;
154 union kvm_mmu_page_role role;
155
156 u64 *spt;
157 /* hold the gfn of each spte inside spt */
158 gfn_t *gfns;
159 unsigned long slot_bitmap; /* One bit set per slot which has memory
160 * in this shadow page.
161 */
162 int multimapped; /* More than one parent_pte? */
163 int root_count; /* Currently serving as active root */
164 union {
165 u64 *parent_pte; /* !multimapped */
166 struct hlist_head parent_ptes; /* multimapped, kvm_pte_chain */
167 };
168};
169
170/*
171 * x86 supports 3 paging modes (4-level 64-bit, 3-level 64-bit, and 2-level
172 * 32-bit). The kvm_mmu structure abstracts the details of the current mmu
173 * mode.
174 */
175struct kvm_mmu {
176 void (*new_cr3)(struct kvm_vcpu *vcpu);
177 int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err);
178 void (*free)(struct kvm_vcpu *vcpu);
179 gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva);
180 void (*prefetch_page)(struct kvm_vcpu *vcpu,
181 struct kvm_mmu_page *page);
182 hpa_t root_hpa;
183 int root_level;
184 int shadow_root_level;
185
186 u64 *pae_root;
187};
188
189struct kvm_vcpu_arch {
190 u64 host_tsc;
191 int interrupt_window_open;
192 unsigned long irq_summary; /* bit vector: 1 per word in irq_pending */
193 DECLARE_BITMAP(irq_pending, KVM_NR_INTERRUPTS);
194 unsigned long regs[NR_VCPU_REGS]; /* for rsp: vcpu_load_rsp_rip() */
195 unsigned long rip; /* needs vcpu_load_rsp_rip() */
196
197 unsigned long cr0;
198 unsigned long cr2;
199 unsigned long cr3;
200 unsigned long cr4;
201 unsigned long cr8;
202 u64 pdptrs[4]; /* pae */
203 u64 shadow_efer;
204 u64 apic_base;
205 struct kvm_lapic *apic; /* kernel irqchip context */
206#define VCPU_MP_STATE_RUNNABLE 0
207#define VCPU_MP_STATE_UNINITIALIZED 1
208#define VCPU_MP_STATE_INIT_RECEIVED 2
209#define VCPU_MP_STATE_SIPI_RECEIVED 3
210#define VCPU_MP_STATE_HALTED 4
211 int mp_state;
212 int sipi_vector;
213 u64 ia32_misc_enable_msr;
214
215 struct kvm_mmu mmu;
216
217 struct kvm_mmu_memory_cache mmu_pte_chain_cache;
218 struct kvm_mmu_memory_cache mmu_rmap_desc_cache;
219 struct kvm_mmu_memory_cache mmu_page_cache;
220 struct kvm_mmu_memory_cache mmu_page_header_cache;
221
222 gfn_t last_pt_write_gfn;
223 int last_pt_write_count;
224 u64 *last_pte_updated;
225
226 struct i387_fxsave_struct host_fx_image;
227 struct i387_fxsave_struct guest_fx_image;
228
229 gva_t mmio_fault_cr2;
230 struct kvm_pio_request pio;
231 void *pio_data;
232
233 struct kvm_queued_exception {
234 bool pending;
235 bool has_error_code;
236 u8 nr;
237 u32 error_code;
238 } exception;
239
240 struct {
241 int active;
242 u8 save_iopl;
243 struct kvm_save_segment {
244 u16 selector;
245 unsigned long base;
246 u32 limit;
247 u32 ar;
248 } tr, es, ds, fs, gs;
249 } rmode;
250 int halt_request; /* real mode on Intel only */
251
252 int cpuid_nent;
253 struct kvm_cpuid_entry2 cpuid_entries[KVM_MAX_CPUID_ENTRIES];
254 /* emulate context */
255
256 struct x86_emulate_ctxt emulate_ctxt;
257};
258
259struct kvm_mem_alias {
260 gfn_t base_gfn;
261 unsigned long npages;
262 gfn_t target_gfn;
263};
264
265struct kvm_arch{
266 int naliases;
267 struct kvm_mem_alias aliases[KVM_ALIAS_SLOTS];
268
269 unsigned int n_free_mmu_pages;
270 unsigned int n_requested_mmu_pages;
271 unsigned int n_alloc_mmu_pages;
272 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
273 /*
274 * Hash table of struct kvm_mmu_page.
275 */
276 struct list_head active_mmu_pages;
277 struct kvm_pic *vpic;
278 struct kvm_ioapic *vioapic;
279
280 int round_robin_prev_vcpu;
281 unsigned int tss_addr;
282 struct page *apic_access_page;
283};
284
285struct kvm_vm_stat {
286 u32 mmu_shadow_zapped;
287 u32 mmu_pte_write;
288 u32 mmu_pte_updated;
289 u32 mmu_pde_zapped;
290 u32 mmu_flooded;
291 u32 mmu_recycled;
292 u32 remote_tlb_flush;
293};
294
295struct kvm_vcpu_stat {
296 u32 pf_fixed;
297 u32 pf_guest;
298 u32 tlb_flush;
299 u32 invlpg;
300
301 u32 exits;
302 u32 io_exits;
303 u32 mmio_exits;
304 u32 signal_exits;
305 u32 irq_window_exits;
306 u32 halt_exits;
307 u32 halt_wakeup;
308 u32 request_irq_exits;
309 u32 irq_exits;
310 u32 host_state_reload;
311 u32 efer_reload;
312 u32 fpu_reload;
313 u32 insn_emulation;
314 u32 insn_emulation_fail;
315};
316
317struct descriptor_table {
318 u16 limit;
319 unsigned long base;
320} __attribute__((packed));
321
322struct kvm_x86_ops {
323 int (*cpu_has_kvm_support)(void); /* __init */
324 int (*disabled_by_bios)(void); /* __init */
325 void (*hardware_enable)(void *dummy); /* __init */
326 void (*hardware_disable)(void *dummy);
327 void (*check_processor_compatibility)(void *rtn);
328 int (*hardware_setup)(void); /* __init */
329 void (*hardware_unsetup)(void); /* __exit */
330
331 /* Create, but do not attach this VCPU */
332 struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned id);
333 void (*vcpu_free)(struct kvm_vcpu *vcpu);
334 int (*vcpu_reset)(struct kvm_vcpu *vcpu);
335
336 void (*prepare_guest_switch)(struct kvm_vcpu *vcpu);
337 void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
338 void (*vcpu_put)(struct kvm_vcpu *vcpu);
339 void (*vcpu_decache)(struct kvm_vcpu *vcpu);
340
341 int (*set_guest_debug)(struct kvm_vcpu *vcpu,
342 struct kvm_debug_guest *dbg);
343 void (*guest_debug_pre)(struct kvm_vcpu *vcpu);
344 int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata);
345 int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
346 u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg);
347 void (*get_segment)(struct kvm_vcpu *vcpu,
348 struct kvm_segment *var, int seg);
349 void (*set_segment)(struct kvm_vcpu *vcpu,
350 struct kvm_segment *var, int seg);
351 void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l);
352 void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu);
353 void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
354 void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
355 void (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
356 void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer);
357 void (*get_idt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
358 void (*set_idt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
359 void (*get_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
360 void (*set_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
361 unsigned long (*get_dr)(struct kvm_vcpu *vcpu, int dr);
362 void (*set_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long value,
363 int *exception);
364 void (*cache_regs)(struct kvm_vcpu *vcpu);
365 void (*decache_regs)(struct kvm_vcpu *vcpu);
366 unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
367 void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
368
369 void (*tlb_flush)(struct kvm_vcpu *vcpu);
370
371 void (*run)(struct kvm_vcpu *vcpu, struct kvm_run *run);
372 int (*handle_exit)(struct kvm_run *run, struct kvm_vcpu *vcpu);
373 void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);
374 void (*patch_hypercall)(struct kvm_vcpu *vcpu,
375 unsigned char *hypercall_addr);
376 int (*get_irq)(struct kvm_vcpu *vcpu);
377 void (*set_irq)(struct kvm_vcpu *vcpu, int vec);
378 void (*queue_exception)(struct kvm_vcpu *vcpu, unsigned nr,
379 bool has_error_code, u32 error_code);
380 bool (*exception_injected)(struct kvm_vcpu *vcpu);
381 void (*inject_pending_irq)(struct kvm_vcpu *vcpu);
382 void (*inject_pending_vectors)(struct kvm_vcpu *vcpu,
383 struct kvm_run *run);
384
385 int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
386};
387
388extern struct kvm_x86_ops *kvm_x86_ops;
389
390int kvm_mmu_module_init(void);
391void kvm_mmu_module_exit(void);
392
393void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
394int kvm_mmu_create(struct kvm_vcpu *vcpu);
395int kvm_mmu_setup(struct kvm_vcpu *vcpu);
396void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte);
397
398int kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
399void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot);
400void kvm_mmu_zap_all(struct kvm *kvm);
401unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
402void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
403
404enum emulation_result {
405 EMULATE_DONE, /* no further processing */
406 EMULATE_DO_MMIO, /* kvm_run filled with mmio request */
407 EMULATE_FAIL, /* can't emulate this instruction */
408};
409
410int emulate_instruction(struct kvm_vcpu *vcpu, struct kvm_run *run,
411 unsigned long cr2, u16 error_code, int no_decode);
412void kvm_report_emulation_failure(struct kvm_vcpu *cvpu, const char *context);
413void realmode_lgdt(struct kvm_vcpu *vcpu, u16 size, unsigned long address);
414void realmode_lidt(struct kvm_vcpu *vcpu, u16 size, unsigned long address);
415void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
416 unsigned long *rflags);
417
418unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr);
419void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long value,
420 unsigned long *rflags);
421int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data);
422int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
423
424struct x86_emulate_ctxt;
425
426int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
427 int size, unsigned port);
428int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
429 int size, unsigned long count, int down,
430 gva_t address, int rep, unsigned port);
431void kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
432int kvm_emulate_halt(struct kvm_vcpu *vcpu);
433int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address);
434int emulate_clts(struct kvm_vcpu *vcpu);
435int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr,
436 unsigned long *dest);
437int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr,
438 unsigned long value);
439
440void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
441void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr0);
442void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr0);
443void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr0);
444unsigned long get_cr8(struct kvm_vcpu *vcpu);
445void lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
446void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l);
447
448int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
449int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data);
450
451void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr);
452void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
453void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long cr2,
454 u32 error_code);
455
456void fx_init(struct kvm_vcpu *vcpu);
457
458int emulator_read_std(unsigned long addr,
459 void *val,
460 unsigned int bytes,
461 struct kvm_vcpu *vcpu);
462int emulator_write_emulated(unsigned long addr,
463 const void *val,
464 unsigned int bytes,
465 struct kvm_vcpu *vcpu);
466
467unsigned long segment_base(u16 selector);
468
469void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu);
470void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
471 const u8 *new, int bytes);
472int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
473void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
474int kvm_mmu_load(struct kvm_vcpu *vcpu);
475void kvm_mmu_unload(struct kvm_vcpu *vcpu);
476
477int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
478
479int kvm_fix_hypercall(struct kvm_vcpu *vcpu);
480
481int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code);
482
483int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3);
484int complete_pio(struct kvm_vcpu *vcpu);
485
486static inline struct kvm_mmu_page *page_header(hpa_t shadow_page)
487{
488 struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT);
489
490 return (struct kvm_mmu_page *)page_private(page);
491}
492
493static inline u16 read_fs(void)
494{
495 u16 seg;
496 asm("mov %%fs, %0" : "=g"(seg));
497 return seg;
498}
499
500static inline u16 read_gs(void)
501{
502 u16 seg;
503 asm("mov %%gs, %0" : "=g"(seg));
504 return seg;
505}
506
507static inline u16 read_ldt(void)
508{
509 u16 ldt;
510 asm("sldt %0" : "=g"(ldt));
511 return ldt;
512}
513
514static inline void load_fs(u16 sel)
515{
516 asm("mov %0, %%fs" : : "rm"(sel));
517}
518
519static inline void load_gs(u16 sel)
520{
521 asm("mov %0, %%gs" : : "rm"(sel));
522}
523
524#ifndef load_ldt
525static inline void load_ldt(u16 sel)
526{
527 asm("lldt %0" : : "rm"(sel));
528}
529#endif
530
531static inline void get_idt(struct descriptor_table *table)
532{
533 asm("sidt %0" : "=m"(*table));
534}
535
536static inline void get_gdt(struct descriptor_table *table)
537{
538 asm("sgdt %0" : "=m"(*table));
539}
540
541static inline unsigned long read_tr_base(void)
542{
543 u16 tr;
544 asm("str %0" : "=g"(tr));
545 return segment_base(tr);
546}
547
548#ifdef CONFIG_X86_64
549static inline unsigned long read_msr(unsigned long msr)
550{
551 u64 value;
552
553 rdmsrl(msr, value);
554 return value;
555}
556#endif
557
558static inline void fx_save(struct i387_fxsave_struct *image)
559{
560 asm("fxsave (%0)":: "r" (image));
561}
562
563static inline void fx_restore(struct i387_fxsave_struct *image)
564{
565 asm("fxrstor (%0)":: "r" (image));
566}
567
568static inline void fpu_init(void)
569{
570 asm("finit");
571}
572
573static inline u32 get_rdx_init_val(void)
574{
575 return 0x600; /* P6 family */
576}
577
578static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code)
579{
580 kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
581}
582
583#define ASM_VMX_VMCLEAR_RAX ".byte 0x66, 0x0f, 0xc7, 0x30"
584#define ASM_VMX_VMLAUNCH ".byte 0x0f, 0x01, 0xc2"
585#define ASM_VMX_VMRESUME ".byte 0x0f, 0x01, 0xc3"
586#define ASM_VMX_VMPTRLD_RAX ".byte 0x0f, 0xc7, 0x30"
587#define ASM_VMX_VMREAD_RDX_RAX ".byte 0x0f, 0x78, 0xd0"
588#define ASM_VMX_VMWRITE_RAX_RDX ".byte 0x0f, 0x79, 0xd0"
589#define ASM_VMX_VMWRITE_RSP_RDX ".byte 0x0f, 0x79, 0xd4"
590#define ASM_VMX_VMXOFF ".byte 0x0f, 0x01, 0xc4"
591#define ASM_VMX_VMXON_RAX ".byte 0xf3, 0x0f, 0xc7, 0x30"
592
593#define MSR_IA32_TIME_STAMP_COUNTER 0x010
594
595#define TSS_IOPB_BASE_OFFSET 0x66
596#define TSS_BASE_SIZE 0x68
597#define TSS_IOPB_SIZE (65536 / 8)
598#define TSS_REDIRECTION_SIZE (256 / 8)
599#define RMODE_TSS_SIZE (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1)
600
601#endif
diff --git a/include/asm-x86/kvm_x86_emulate.h b/include/asm-x86/kvm_x86_emulate.h
new file mode 100644
index 000000000000..7db91b9bdcd4
--- /dev/null
+++ b/include/asm-x86/kvm_x86_emulate.h
@@ -0,0 +1,186 @@
1/******************************************************************************
2 * x86_emulate.h
3 *
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
5 *
6 * Copyright (c) 2005 Keir Fraser
7 *
8 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
9 */
10
11#ifndef __X86_EMULATE_H__
12#define __X86_EMULATE_H__
13
14struct x86_emulate_ctxt;
15
16/*
17 * x86_emulate_ops:
18 *
19 * These operations represent the instruction emulator's interface to memory.
20 * There are two categories of operation: those that act on ordinary memory
21 * regions (*_std), and those that act on memory regions known to require
22 * special treatment or emulation (*_emulated).
23 *
24 * The emulator assumes that an instruction accesses only one 'emulated memory'
25 * location, that this location is the given linear faulting address (cr2), and
26 * that this is one of the instruction's data operands. Instruction fetches and
27 * stack operations are assumed never to access emulated memory. The emulator
28 * automatically deduces which operand of a string-move operation is accessing
29 * emulated memory, and assumes that the other operand accesses normal memory.
30 *
31 * NOTES:
32 * 1. The emulator isn't very smart about emulated vs. standard memory.
33 * 'Emulated memory' access addresses should be checked for sanity.
34 * 'Normal memory' accesses may fault, and the caller must arrange to
35 * detect and handle reentrancy into the emulator via recursive faults.
36 * Accesses may be unaligned and may cross page boundaries.
37 * 2. If the access fails (cannot emulate, or a standard access faults) then
38 * it is up to the memop to propagate the fault to the guest VM via
39 * some out-of-band mechanism, unknown to the emulator. The memop signals
40 * failure by returning X86EMUL_PROPAGATE_FAULT to the emulator, which will
41 * then immediately bail.
42 * 3. Valid access sizes are 1, 2, 4 and 8 bytes. On x86/32 systems only
43 * cmpxchg8b_emulated need support 8-byte accesses.
44 * 4. The emulator cannot handle 64-bit mode emulation on an x86/32 system.
45 */
46/* Access completed successfully: continue emulation as normal. */
47#define X86EMUL_CONTINUE 0
48/* Access is unhandleable: bail from emulation and return error to caller. */
49#define X86EMUL_UNHANDLEABLE 1
50/* Terminate emulation but return success to the caller. */
51#define X86EMUL_PROPAGATE_FAULT 2 /* propagate a generated fault to guest */
52#define X86EMUL_RETRY_INSTR 2 /* retry the instruction for some reason */
53#define X86EMUL_CMPXCHG_FAILED 2 /* cmpxchg did not see expected value */
54struct x86_emulate_ops {
55 /*
56 * read_std: Read bytes of standard (non-emulated/special) memory.
57 * Used for instruction fetch, stack operations, and others.
58 * @addr: [IN ] Linear address from which to read.
59 * @val: [OUT] Value read from memory, zero-extended to 'u_long'.
60 * @bytes: [IN ] Number of bytes to read from memory.
61 */
62 int (*read_std)(unsigned long addr, void *val,
63 unsigned int bytes, struct kvm_vcpu *vcpu);
64
65 /*
66 * read_emulated: Read bytes from emulated/special memory area.
67 * @addr: [IN ] Linear address from which to read.
68 * @val: [OUT] Value read from memory, zero-extended to 'u_long'.
69 * @bytes: [IN ] Number of bytes to read from memory.
70 */
71 int (*read_emulated) (unsigned long addr,
72 void *val,
73 unsigned int bytes,
74 struct kvm_vcpu *vcpu);
75
76 /*
77 * write_emulated: Read bytes from emulated/special memory area.
78 * @addr: [IN ] Linear address to which to write.
79 * @val: [IN ] Value to write to memory (low-order bytes used as
80 * required).
81 * @bytes: [IN ] Number of bytes to write to memory.
82 */
83 int (*write_emulated) (unsigned long addr,
84 const void *val,
85 unsigned int bytes,
86 struct kvm_vcpu *vcpu);
87
88 /*
89 * cmpxchg_emulated: Emulate an atomic (LOCKed) CMPXCHG operation on an
90 * emulated/special memory area.
91 * @addr: [IN ] Linear address to access.
92 * @old: [IN ] Value expected to be current at @addr.
93 * @new: [IN ] Value to write to @addr.
94 * @bytes: [IN ] Number of bytes to access using CMPXCHG.
95 */
96 int (*cmpxchg_emulated) (unsigned long addr,
97 const void *old,
98 const void *new,
99 unsigned int bytes,
100 struct kvm_vcpu *vcpu);
101
102};
103
104/* Type, address-of, and value of an instruction's operand. */
105struct operand {
106 enum { OP_REG, OP_MEM, OP_IMM, OP_NONE } type;
107 unsigned int bytes;
108 unsigned long val, orig_val, *ptr;
109};
110
111struct fetch_cache {
112 u8 data[15];
113 unsigned long start;
114 unsigned long end;
115};
116
117struct decode_cache {
118 u8 twobyte;
119 u8 b;
120 u8 lock_prefix;
121 u8 rep_prefix;
122 u8 op_bytes;
123 u8 ad_bytes;
124 u8 rex_prefix;
125 struct operand src;
126 struct operand dst;
127 unsigned long *override_base;
128 unsigned int d;
129 unsigned long regs[NR_VCPU_REGS];
130 unsigned long eip;
131 /* modrm */
132 u8 modrm;
133 u8 modrm_mod;
134 u8 modrm_reg;
135 u8 modrm_rm;
136 u8 use_modrm_ea;
137 unsigned long modrm_ea;
138 unsigned long modrm_val;
139 struct fetch_cache fetch;
140};
141
142struct x86_emulate_ctxt {
143 /* Register state before/after emulation. */
144 struct kvm_vcpu *vcpu;
145
146 /* Linear faulting address (if emulating a page-faulting instruction). */
147 unsigned long eflags;
148
149 /* Emulated execution mode, represented by an X86EMUL_MODE value. */
150 int mode;
151
152 unsigned long cs_base;
153 unsigned long ds_base;
154 unsigned long es_base;
155 unsigned long ss_base;
156 unsigned long gs_base;
157 unsigned long fs_base;
158
159 /* decode cache */
160
161 struct decode_cache decode;
162};
163
164/* Repeat String Operation Prefix */
165#define REPE_PREFIX 1
166#define REPNE_PREFIX 2
167
168/* Execution mode, passed to the emulator. */
169#define X86EMUL_MODE_REAL 0 /* Real mode. */
170#define X86EMUL_MODE_PROT16 2 /* 16-bit protected mode. */
171#define X86EMUL_MODE_PROT32 4 /* 32-bit protected mode. */
172#define X86EMUL_MODE_PROT64 8 /* 64-bit (long) mode. */
173
174/* Host execution mode. */
175#if defined(__i386__)
176#define X86EMUL_MODE_HOST X86EMUL_MODE_PROT32
177#elif defined(CONFIG_X86_64)
178#define X86EMUL_MODE_HOST X86EMUL_MODE_PROT64
179#endif
180
181int x86_decode_insn(struct x86_emulate_ctxt *ctxt,
182 struct x86_emulate_ops *ops);
183int x86_emulate_insn(struct x86_emulate_ctxt *ctxt,
184 struct x86_emulate_ops *ops);
185
186#endif /* __X86_EMULATE_H__ */