aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include/asm/kvm_host.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/include/asm/kvm_host.h')
-rw-r--r--arch/x86/include/asm/kvm_host.h752
1 files changed, 752 insertions, 0 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
new file mode 100644
index 000000000000..65679d006337
--- /dev/null
+++ b/arch/x86/include/asm/kvm_host.h
@@ -0,0 +1,752 @@
1/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * This header defines architecture specific interfaces, x86 version
5 *
6 * This work is licensed under the terms of the GNU GPL, version 2. See
7 * the COPYING file in the top-level directory.
8 *
9 */
10
11#ifndef _ASM_X86_KVM_HOST_H
12#define _ASM_X86_KVM_HOST_H
13
14#include <linux/types.h>
15#include <linux/mm.h>
16#include <linux/mmu_notifier.h>
17
18#include <linux/kvm.h>
19#include <linux/kvm_para.h>
20#include <linux/kvm_types.h>
21
22#include <asm/pvclock-abi.h>
23#include <asm/desc.h>
24
25#define KVM_MAX_VCPUS 16
26#define KVM_MEMORY_SLOTS 32
27/* memory slots that does not exposed to userspace */
28#define KVM_PRIVATE_MEM_SLOTS 4
29
30#define KVM_PIO_PAGE_OFFSET 1
31#define KVM_COALESCED_MMIO_PAGE_OFFSET 2
32
33#define CR3_PAE_RESERVED_BITS ((X86_CR3_PWT | X86_CR3_PCD) - 1)
34#define CR3_NONPAE_RESERVED_BITS ((PAGE_SIZE-1) & ~(X86_CR3_PWT | X86_CR3_PCD))
35#define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS | \
36 0xFFFFFF0000000000ULL)
37
38#define KVM_GUEST_CR0_MASK \
39 (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE \
40 | X86_CR0_NW | X86_CR0_CD)
41#define KVM_VM_CR0_ALWAYS_ON \
42 (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE | X86_CR0_TS \
43 | X86_CR0_MP)
44#define KVM_GUEST_CR4_MASK \
45 (X86_CR4_VME | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE | X86_CR4_VMXE)
46#define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
47#define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE)
48
49#define INVALID_PAGE (~(hpa_t)0)
50#define UNMAPPED_GVA (~(gpa_t)0)
51
52/* shadow tables are PAE even on non-PAE hosts */
53#define KVM_HPAGE_SHIFT 21
54#define KVM_HPAGE_SIZE (1UL << KVM_HPAGE_SHIFT)
55#define KVM_HPAGE_MASK (~(KVM_HPAGE_SIZE - 1))
56
57#define KVM_PAGES_PER_HPAGE (KVM_HPAGE_SIZE / PAGE_SIZE)
58
59#define DE_VECTOR 0
60#define DB_VECTOR 1
61#define BP_VECTOR 3
62#define OF_VECTOR 4
63#define BR_VECTOR 5
64#define UD_VECTOR 6
65#define NM_VECTOR 7
66#define DF_VECTOR 8
67#define TS_VECTOR 10
68#define NP_VECTOR 11
69#define SS_VECTOR 12
70#define GP_VECTOR 13
71#define PF_VECTOR 14
72#define MF_VECTOR 16
73#define MC_VECTOR 18
74
75#define SELECTOR_TI_MASK (1 << 2)
76#define SELECTOR_RPL_MASK 0x03
77
78#define IOPL_SHIFT 12
79
80#define KVM_ALIAS_SLOTS 4
81
82#define KVM_PERMILLE_MMU_PAGES 20
83#define KVM_MIN_ALLOC_MMU_PAGES 64
84#define KVM_MMU_HASH_SHIFT 10
85#define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT)
86#define KVM_MIN_FREE_MMU_PAGES 5
87#define KVM_REFILL_PAGES 25
88#define KVM_MAX_CPUID_ENTRIES 40
89#define KVM_NR_VAR_MTRR 8
90
91extern spinlock_t kvm_lock;
92extern struct list_head vm_list;
93
94struct kvm_vcpu;
95struct kvm;
96
97enum kvm_reg {
98 VCPU_REGS_RAX = 0,
99 VCPU_REGS_RCX = 1,
100 VCPU_REGS_RDX = 2,
101 VCPU_REGS_RBX = 3,
102 VCPU_REGS_RSP = 4,
103 VCPU_REGS_RBP = 5,
104 VCPU_REGS_RSI = 6,
105 VCPU_REGS_RDI = 7,
106#ifdef CONFIG_X86_64
107 VCPU_REGS_R8 = 8,
108 VCPU_REGS_R9 = 9,
109 VCPU_REGS_R10 = 10,
110 VCPU_REGS_R11 = 11,
111 VCPU_REGS_R12 = 12,
112 VCPU_REGS_R13 = 13,
113 VCPU_REGS_R14 = 14,
114 VCPU_REGS_R15 = 15,
115#endif
116 VCPU_REGS_RIP,
117 NR_VCPU_REGS
118};
119
120enum {
121 VCPU_SREG_ES,
122 VCPU_SREG_CS,
123 VCPU_SREG_SS,
124 VCPU_SREG_DS,
125 VCPU_SREG_FS,
126 VCPU_SREG_GS,
127 VCPU_SREG_TR,
128 VCPU_SREG_LDTR,
129};
130
131#include <asm/kvm_x86_emulate.h>
132
133#define KVM_NR_MEM_OBJS 40
134
135struct kvm_guest_debug {
136 int enabled;
137 unsigned long bp[4];
138 int singlestep;
139};
140
141/*
142 * We don't want allocation failures within the mmu code, so we preallocate
143 * enough memory for a single page fault in a cache.
144 */
145struct kvm_mmu_memory_cache {
146 int nobjs;
147 void *objects[KVM_NR_MEM_OBJS];
148};
149
150#define NR_PTE_CHAIN_ENTRIES 5
151
152struct kvm_pte_chain {
153 u64 *parent_ptes[NR_PTE_CHAIN_ENTRIES];
154 struct hlist_node link;
155};
156
157/*
158 * kvm_mmu_page_role, below, is defined as:
159 *
160 * bits 0:3 - total guest paging levels (2-4, or zero for real mode)
161 * bits 4:7 - page table level for this shadow (1-4)
162 * bits 8:9 - page table quadrant for 2-level guests
163 * bit 16 - "metaphysical" - gfn is not a real page (huge page/real mode)
164 * bits 17:19 - common access permissions for all ptes in this shadow page
165 */
166union kvm_mmu_page_role {
167 unsigned word;
168 struct {
169 unsigned glevels:4;
170 unsigned level:4;
171 unsigned quadrant:2;
172 unsigned pad_for_nice_hex_output:6;
173 unsigned metaphysical:1;
174 unsigned access:3;
175 unsigned invalid:1;
176 };
177};
178
179struct kvm_mmu_page {
180 struct list_head link;
181 struct hlist_node hash_link;
182
183 /*
184 * The following two entries are used to key the shadow page in the
185 * hash table.
186 */
187 gfn_t gfn;
188 union kvm_mmu_page_role role;
189
190 u64 *spt;
191 /* hold the gfn of each spte inside spt */
192 gfn_t *gfns;
193 unsigned long slot_bitmap; /* One bit set per slot which has memory
194 * in this shadow page.
195 */
196 int multimapped; /* More than one parent_pte? */
197 int root_count; /* Currently serving as active root */
198 bool unsync;
199 bool unsync_children;
200 union {
201 u64 *parent_pte; /* !multimapped */
202 struct hlist_head parent_ptes; /* multimapped, kvm_pte_chain */
203 };
204 DECLARE_BITMAP(unsync_child_bitmap, 512);
205};
206
207struct kvm_pv_mmu_op_buffer {
208 void *ptr;
209 unsigned len;
210 unsigned processed;
211 char buf[512] __aligned(sizeof(long));
212};
213
214/*
215 * x86 supports 3 paging modes (4-level 64-bit, 3-level 64-bit, and 2-level
216 * 32-bit). The kvm_mmu structure abstracts the details of the current mmu
217 * mode.
218 */
219struct kvm_mmu {
220 void (*new_cr3)(struct kvm_vcpu *vcpu);
221 int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err);
222 void (*free)(struct kvm_vcpu *vcpu);
223 gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva);
224 void (*prefetch_page)(struct kvm_vcpu *vcpu,
225 struct kvm_mmu_page *page);
226 int (*sync_page)(struct kvm_vcpu *vcpu,
227 struct kvm_mmu_page *sp);
228 void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva);
229 hpa_t root_hpa;
230 int root_level;
231 int shadow_root_level;
232
233 u64 *pae_root;
234};
235
236struct kvm_vcpu_arch {
237 u64 host_tsc;
238 int interrupt_window_open;
239 unsigned long irq_summary; /* bit vector: 1 per word in irq_pending */
240 DECLARE_BITMAP(irq_pending, KVM_NR_INTERRUPTS);
241 /*
242 * rip and regs accesses must go through
243 * kvm_{register,rip}_{read,write} functions.
244 */
245 unsigned long regs[NR_VCPU_REGS];
246 u32 regs_avail;
247 u32 regs_dirty;
248
249 unsigned long cr0;
250 unsigned long cr2;
251 unsigned long cr3;
252 unsigned long cr4;
253 unsigned long cr8;
254 u64 pdptrs[4]; /* pae */
255 u64 shadow_efer;
256 u64 apic_base;
257 struct kvm_lapic *apic; /* kernel irqchip context */
258 int mp_state;
259 int sipi_vector;
260 u64 ia32_misc_enable_msr;
261 bool tpr_access_reporting;
262
263 struct kvm_mmu mmu;
264 /* only needed in kvm_pv_mmu_op() path, but it's hot so
265 * put it here to avoid allocation */
266 struct kvm_pv_mmu_op_buffer mmu_op_buffer;
267
268 struct kvm_mmu_memory_cache mmu_pte_chain_cache;
269 struct kvm_mmu_memory_cache mmu_rmap_desc_cache;
270 struct kvm_mmu_memory_cache mmu_page_cache;
271 struct kvm_mmu_memory_cache mmu_page_header_cache;
272
273 gfn_t last_pt_write_gfn;
274 int last_pt_write_count;
275 u64 *last_pte_updated;
276 gfn_t last_pte_gfn;
277
278 struct {
279 gfn_t gfn; /* presumed gfn during guest pte update */
280 pfn_t pfn; /* pfn corresponding to that gfn */
281 int largepage;
282 unsigned long mmu_seq;
283 } update_pte;
284
285 struct i387_fxsave_struct host_fx_image;
286 struct i387_fxsave_struct guest_fx_image;
287
288 gva_t mmio_fault_cr2;
289 struct kvm_pio_request pio;
290 void *pio_data;
291
292 struct kvm_queued_exception {
293 bool pending;
294 bool has_error_code;
295 u8 nr;
296 u32 error_code;
297 } exception;
298
299 struct kvm_queued_interrupt {
300 bool pending;
301 u8 nr;
302 } interrupt;
303
304 struct {
305 int active;
306 u8 save_iopl;
307 struct kvm_save_segment {
308 u16 selector;
309 unsigned long base;
310 u32 limit;
311 u32 ar;
312 } tr, es, ds, fs, gs;
313 } rmode;
314 int halt_request; /* real mode on Intel only */
315
316 int cpuid_nent;
317 struct kvm_cpuid_entry2 cpuid_entries[KVM_MAX_CPUID_ENTRIES];
318 /* emulate context */
319
320 struct x86_emulate_ctxt emulate_ctxt;
321
322 gpa_t time;
323 struct pvclock_vcpu_time_info hv_clock;
324 unsigned int hv_clock_tsc_khz;
325 unsigned int time_offset;
326 struct page *time_page;
327
328 bool nmi_pending;
329 bool nmi_injected;
330
331 u64 mtrr[0x100];
332};
333
334struct kvm_mem_alias {
335 gfn_t base_gfn;
336 unsigned long npages;
337 gfn_t target_gfn;
338};
339
340struct kvm_arch{
341 int naliases;
342 struct kvm_mem_alias aliases[KVM_ALIAS_SLOTS];
343
344 unsigned int n_free_mmu_pages;
345 unsigned int n_requested_mmu_pages;
346 unsigned int n_alloc_mmu_pages;
347 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
348 /*
349 * Hash table of struct kvm_mmu_page.
350 */
351 struct list_head active_mmu_pages;
352 struct list_head assigned_dev_head;
353 struct dmar_domain *intel_iommu_domain;
354 struct kvm_pic *vpic;
355 struct kvm_ioapic *vioapic;
356 struct kvm_pit *vpit;
357 struct hlist_head irq_ack_notifier_list;
358
359 int round_robin_prev_vcpu;
360 unsigned int tss_addr;
361 struct page *apic_access_page;
362
363 gpa_t wall_clock;
364
365 struct page *ept_identity_pagetable;
366 bool ept_identity_pagetable_done;
367};
368
369struct kvm_vm_stat {
370 u32 mmu_shadow_zapped;
371 u32 mmu_pte_write;
372 u32 mmu_pte_updated;
373 u32 mmu_pde_zapped;
374 u32 mmu_flooded;
375 u32 mmu_recycled;
376 u32 mmu_cache_miss;
377 u32 mmu_unsync;
378 u32 remote_tlb_flush;
379 u32 lpages;
380};
381
382struct kvm_vcpu_stat {
383 u32 pf_fixed;
384 u32 pf_guest;
385 u32 tlb_flush;
386 u32 invlpg;
387
388 u32 exits;
389 u32 io_exits;
390 u32 mmio_exits;
391 u32 signal_exits;
392 u32 irq_window_exits;
393 u32 nmi_window_exits;
394 u32 halt_exits;
395 u32 halt_wakeup;
396 u32 request_irq_exits;
397 u32 irq_exits;
398 u32 host_state_reload;
399 u32 efer_reload;
400 u32 fpu_reload;
401 u32 insn_emulation;
402 u32 insn_emulation_fail;
403 u32 hypercalls;
404 u32 irq_injections;
405};
406
407struct descriptor_table {
408 u16 limit;
409 unsigned long base;
410} __attribute__((packed));
411
412struct kvm_x86_ops {
413 int (*cpu_has_kvm_support)(void); /* __init */
414 int (*disabled_by_bios)(void); /* __init */
415 void (*hardware_enable)(void *dummy); /* __init */
416 void (*hardware_disable)(void *dummy);
417 void (*check_processor_compatibility)(void *rtn);
418 int (*hardware_setup)(void); /* __init */
419 void (*hardware_unsetup)(void); /* __exit */
420 bool (*cpu_has_accelerated_tpr)(void);
421
422 /* Create, but do not attach this VCPU */
423 struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned id);
424 void (*vcpu_free)(struct kvm_vcpu *vcpu);
425 int (*vcpu_reset)(struct kvm_vcpu *vcpu);
426
427 void (*prepare_guest_switch)(struct kvm_vcpu *vcpu);
428 void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
429 void (*vcpu_put)(struct kvm_vcpu *vcpu);
430
431 int (*set_guest_debug)(struct kvm_vcpu *vcpu,
432 struct kvm_debug_guest *dbg);
433 void (*guest_debug_pre)(struct kvm_vcpu *vcpu);
434 int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata);
435 int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
436 u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg);
437 void (*get_segment)(struct kvm_vcpu *vcpu,
438 struct kvm_segment *var, int seg);
439 int (*get_cpl)(struct kvm_vcpu *vcpu);
440 void (*set_segment)(struct kvm_vcpu *vcpu,
441 struct kvm_segment *var, int seg);
442 void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l);
443 void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu);
444 void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
445 void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
446 void (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
447 void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer);
448 void (*get_idt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
449 void (*set_idt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
450 void (*get_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
451 void (*set_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
452 unsigned long (*get_dr)(struct kvm_vcpu *vcpu, int dr);
453 void (*set_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long value,
454 int *exception);
455 void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
456 unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
457 void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
458
459 void (*tlb_flush)(struct kvm_vcpu *vcpu);
460
461 void (*run)(struct kvm_vcpu *vcpu, struct kvm_run *run);
462 int (*handle_exit)(struct kvm_run *run, struct kvm_vcpu *vcpu);
463 void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);
464 void (*patch_hypercall)(struct kvm_vcpu *vcpu,
465 unsigned char *hypercall_addr);
466 int (*get_irq)(struct kvm_vcpu *vcpu);
467 void (*set_irq)(struct kvm_vcpu *vcpu, int vec);
468 void (*queue_exception)(struct kvm_vcpu *vcpu, unsigned nr,
469 bool has_error_code, u32 error_code);
470 bool (*exception_injected)(struct kvm_vcpu *vcpu);
471 void (*inject_pending_irq)(struct kvm_vcpu *vcpu);
472 void (*inject_pending_vectors)(struct kvm_vcpu *vcpu,
473 struct kvm_run *run);
474
475 int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
476 int (*get_tdp_level)(void);
477};
478
479extern struct kvm_x86_ops *kvm_x86_ops;
480
481int kvm_mmu_module_init(void);
482void kvm_mmu_module_exit(void);
483
484void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
485int kvm_mmu_create(struct kvm_vcpu *vcpu);
486int kvm_mmu_setup(struct kvm_vcpu *vcpu);
487void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte);
488void kvm_mmu_set_base_ptes(u64 base_pte);
489void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
490 u64 dirty_mask, u64 nx_mask, u64 x_mask);
491
492int kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
493void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot);
494void kvm_mmu_zap_all(struct kvm *kvm);
495unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
496void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
497
498int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3);
499
500int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
501 const void *val, int bytes);
502int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
503 gpa_t addr, unsigned long *ret);
504
505extern bool tdp_enabled;
506
507enum emulation_result {
508 EMULATE_DONE, /* no further processing */
509 EMULATE_DO_MMIO, /* kvm_run filled with mmio request */
510 EMULATE_FAIL, /* can't emulate this instruction */
511};
512
513#define EMULTYPE_NO_DECODE (1 << 0)
514#define EMULTYPE_TRAP_UD (1 << 1)
515int emulate_instruction(struct kvm_vcpu *vcpu, struct kvm_run *run,
516 unsigned long cr2, u16 error_code, int emulation_type);
517void kvm_report_emulation_failure(struct kvm_vcpu *cvpu, const char *context);
518void realmode_lgdt(struct kvm_vcpu *vcpu, u16 size, unsigned long address);
519void realmode_lidt(struct kvm_vcpu *vcpu, u16 size, unsigned long address);
520void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
521 unsigned long *rflags);
522
523unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr);
524void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long value,
525 unsigned long *rflags);
526void kvm_enable_efer_bits(u64);
527int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data);
528int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
529
530struct x86_emulate_ctxt;
531
532int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
533 int size, unsigned port);
534int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
535 int size, unsigned long count, int down,
536 gva_t address, int rep, unsigned port);
537void kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
538int kvm_emulate_halt(struct kvm_vcpu *vcpu);
539int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address);
540int emulate_clts(struct kvm_vcpu *vcpu);
541int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr,
542 unsigned long *dest);
543int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr,
544 unsigned long value);
545
546void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
547int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
548 int type_bits, int seg);
549
550int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason);
551
552void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
553void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
554void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
555void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8);
556unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu);
557void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
558void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l);
559
560int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
561int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data);
562
563void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr);
564void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
565void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long cr2,
566 u32 error_code);
567
568void kvm_pic_set_irq(void *opaque, int irq, int level);
569
570void kvm_inject_nmi(struct kvm_vcpu *vcpu);
571
572void fx_init(struct kvm_vcpu *vcpu);
573
574int emulator_read_std(unsigned long addr,
575 void *val,
576 unsigned int bytes,
577 struct kvm_vcpu *vcpu);
578int emulator_write_emulated(unsigned long addr,
579 const void *val,
580 unsigned int bytes,
581 struct kvm_vcpu *vcpu);
582
583unsigned long segment_base(u16 selector);
584
585void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu);
586void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
587 const u8 *new, int bytes);
588int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
589void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
590int kvm_mmu_load(struct kvm_vcpu *vcpu);
591void kvm_mmu_unload(struct kvm_vcpu *vcpu);
592void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu);
593
594int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
595
596int kvm_fix_hypercall(struct kvm_vcpu *vcpu);
597
598int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code);
599void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
600
601void kvm_enable_tdp(void);
602void kvm_disable_tdp(void);
603
604int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3);
605int complete_pio(struct kvm_vcpu *vcpu);
606
607static inline struct kvm_mmu_page *page_header(hpa_t shadow_page)
608{
609 struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT);
610
611 return (struct kvm_mmu_page *)page_private(page);
612}
613
614static inline u16 kvm_read_fs(void)
615{
616 u16 seg;
617 asm("mov %%fs, %0" : "=g"(seg));
618 return seg;
619}
620
621static inline u16 kvm_read_gs(void)
622{
623 u16 seg;
624 asm("mov %%gs, %0" : "=g"(seg));
625 return seg;
626}
627
628static inline u16 kvm_read_ldt(void)
629{
630 u16 ldt;
631 asm("sldt %0" : "=g"(ldt));
632 return ldt;
633}
634
635static inline void kvm_load_fs(u16 sel)
636{
637 asm("mov %0, %%fs" : : "rm"(sel));
638}
639
640static inline void kvm_load_gs(u16 sel)
641{
642 asm("mov %0, %%gs" : : "rm"(sel));
643}
644
645static inline void kvm_load_ldt(u16 sel)
646{
647 asm("lldt %0" : : "rm"(sel));
648}
649
650static inline void kvm_get_idt(struct descriptor_table *table)
651{
652 asm("sidt %0" : "=m"(*table));
653}
654
655static inline void kvm_get_gdt(struct descriptor_table *table)
656{
657 asm("sgdt %0" : "=m"(*table));
658}
659
660static inline unsigned long kvm_read_tr_base(void)
661{
662 u16 tr;
663 asm("str %0" : "=g"(tr));
664 return segment_base(tr);
665}
666
667#ifdef CONFIG_X86_64
668static inline unsigned long read_msr(unsigned long msr)
669{
670 u64 value;
671
672 rdmsrl(msr, value);
673 return value;
674}
675#endif
676
677static inline void kvm_fx_save(struct i387_fxsave_struct *image)
678{
679 asm("fxsave (%0)":: "r" (image));
680}
681
682static inline void kvm_fx_restore(struct i387_fxsave_struct *image)
683{
684 asm("fxrstor (%0)":: "r" (image));
685}
686
687static inline void kvm_fx_finit(void)
688{
689 asm("finit");
690}
691
692static inline u32 get_rdx_init_val(void)
693{
694 return 0x600; /* P6 family */
695}
696
697static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code)
698{
699 kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
700}
701
702#define ASM_VMX_VMCLEAR_RAX ".byte 0x66, 0x0f, 0xc7, 0x30"
703#define ASM_VMX_VMLAUNCH ".byte 0x0f, 0x01, 0xc2"
704#define ASM_VMX_VMRESUME ".byte 0x0f, 0x01, 0xc3"
705#define ASM_VMX_VMPTRLD_RAX ".byte 0x0f, 0xc7, 0x30"
706#define ASM_VMX_VMREAD_RDX_RAX ".byte 0x0f, 0x78, 0xd0"
707#define ASM_VMX_VMWRITE_RAX_RDX ".byte 0x0f, 0x79, 0xd0"
708#define ASM_VMX_VMWRITE_RSP_RDX ".byte 0x0f, 0x79, 0xd4"
709#define ASM_VMX_VMXOFF ".byte 0x0f, 0x01, 0xc4"
710#define ASM_VMX_VMXON_RAX ".byte 0xf3, 0x0f, 0xc7, 0x30"
711#define ASM_VMX_INVEPT ".byte 0x66, 0x0f, 0x38, 0x80, 0x08"
712#define ASM_VMX_INVVPID ".byte 0x66, 0x0f, 0x38, 0x81, 0x08"
713
714#define MSR_IA32_TIME_STAMP_COUNTER 0x010
715
716#define TSS_IOPB_BASE_OFFSET 0x66
717#define TSS_BASE_SIZE 0x68
718#define TSS_IOPB_SIZE (65536 / 8)
719#define TSS_REDIRECTION_SIZE (256 / 8)
720#define RMODE_TSS_SIZE \
721 (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1)
722
723enum {
724 TASK_SWITCH_CALL = 0,
725 TASK_SWITCH_IRET = 1,
726 TASK_SWITCH_JMP = 2,
727 TASK_SWITCH_GATE = 3,
728};
729
730/*
731 * Hardware virtualization extension instructions may fault if a
732 * reboot turns off virtualization while processes are running.
733 * Trap the fault and ignore the instruction if that happens.
734 */
735asmlinkage void kvm_handle_fault_on_reboot(void);
736
737#define __kvm_handle_fault_on_reboot(insn) \
738 "666: " insn "\n\t" \
739 ".pushsection .fixup, \"ax\" \n" \
740 "667: \n\t" \
741 __ASM_SIZE(push) " $666b \n\t" \
742 "jmp kvm_handle_fault_on_reboot \n\t" \
743 ".popsection \n\t" \
744 ".pushsection __ex_table, \"a\" \n\t" \
745 _ASM_PTR " 666b, 667b \n\t" \
746 ".popsection"
747
748#define KVM_ARCH_WANT_MMU_NOTIFIER
749int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
750int kvm_age_hva(struct kvm *kvm, unsigned long hva);
751
752#endif /* _ASM_X86_KVM_HOST_H */