aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/kvm/kvm.h
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/kvm/kvm.h')
-rw-r--r--drivers/kvm/kvm.h116
1 files changed, 108 insertions, 8 deletions
diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h
index 152312c1fafa..a7c5e6bee034 100644
--- a/drivers/kvm/kvm.h
+++ b/drivers/kvm/kvm.h
@@ -10,6 +10,8 @@
10#include <linux/list.h> 10#include <linux/list.h>
11#include <linux/mutex.h> 11#include <linux/mutex.h>
12#include <linux/spinlock.h> 12#include <linux/spinlock.h>
13#include <linux/signal.h>
14#include <linux/sched.h>
13#include <linux/mm.h> 15#include <linux/mm.h>
14#include <asm/signal.h> 16#include <asm/signal.h>
15 17
@@ -18,6 +20,7 @@
18#include <linux/kvm_para.h> 20#include <linux/kvm_para.h>
19 21
20#define CR0_PE_MASK (1ULL << 0) 22#define CR0_PE_MASK (1ULL << 0)
23#define CR0_MP_MASK (1ULL << 1)
21#define CR0_TS_MASK (1ULL << 3) 24#define CR0_TS_MASK (1ULL << 3)
22#define CR0_NE_MASK (1ULL << 5) 25#define CR0_NE_MASK (1ULL << 5)
23#define CR0_WP_MASK (1ULL << 16) 26#define CR0_WP_MASK (1ULL << 16)
@@ -42,7 +45,8 @@
42 (CR0_PG_MASK | CR0_PE_MASK | CR0_WP_MASK | CR0_NE_MASK \ 45 (CR0_PG_MASK | CR0_PE_MASK | CR0_WP_MASK | CR0_NE_MASK \
43 | CR0_NW_MASK | CR0_CD_MASK) 46 | CR0_NW_MASK | CR0_CD_MASK)
44#define KVM_VM_CR0_ALWAYS_ON \ 47#define KVM_VM_CR0_ALWAYS_ON \
45 (CR0_PG_MASK | CR0_PE_MASK | CR0_WP_MASK | CR0_NE_MASK) 48 (CR0_PG_MASK | CR0_PE_MASK | CR0_WP_MASK | CR0_NE_MASK | CR0_TS_MASK \
49 | CR0_MP_MASK)
46#define KVM_GUEST_CR4_MASK \ 50#define KVM_GUEST_CR4_MASK \
47 (CR4_PSE_MASK | CR4_PAE_MASK | CR4_PGE_MASK | CR4_VMXE_MASK | CR4_VME_MASK) 51 (CR4_PSE_MASK | CR4_PAE_MASK | CR4_PGE_MASK | CR4_VMXE_MASK | CR4_VME_MASK)
48#define KVM_PMODE_VM_CR4_ALWAYS_ON (CR4_VMXE_MASK | CR4_PAE_MASK) 52#define KVM_PMODE_VM_CR4_ALWAYS_ON (CR4_VMXE_MASK | CR4_PAE_MASK)
@@ -51,10 +55,10 @@
51#define INVALID_PAGE (~(hpa_t)0) 55#define INVALID_PAGE (~(hpa_t)0)
52#define UNMAPPED_GVA (~(gpa_t)0) 56#define UNMAPPED_GVA (~(gpa_t)0)
53 57
54#define KVM_MAX_VCPUS 1 58#define KVM_MAX_VCPUS 4
55#define KVM_ALIAS_SLOTS 4 59#define KVM_ALIAS_SLOTS 4
56#define KVM_MEMORY_SLOTS 4 60#define KVM_MEMORY_SLOTS 4
57#define KVM_NUM_MMU_PAGES 256 61#define KVM_NUM_MMU_PAGES 1024
58#define KVM_MIN_FREE_MMU_PAGES 5 62#define KVM_MIN_FREE_MMU_PAGES 5
59#define KVM_REFILL_PAGES 25 63#define KVM_REFILL_PAGES 25
60#define KVM_MAX_CPUID_ENTRIES 40 64#define KVM_MAX_CPUID_ENTRIES 40
@@ -80,6 +84,11 @@
80#define KVM_PIO_PAGE_OFFSET 1 84#define KVM_PIO_PAGE_OFFSET 1
81 85
82/* 86/*
87 * vcpu->requests bit members
88 */
89#define KVM_TLB_FLUSH 0
90
91/*
83 * Address types: 92 * Address types:
84 * 93 *
85 * gva - guest virtual address 94 * gva - guest virtual address
@@ -137,7 +146,7 @@ struct kvm_mmu_page {
137 gfn_t gfn; 146 gfn_t gfn;
138 union kvm_mmu_page_role role; 147 union kvm_mmu_page_role role;
139 148
140 hpa_t page_hpa; 149 u64 *spt;
141 unsigned long slot_bitmap; /* One bit set per slot which has memory 150 unsigned long slot_bitmap; /* One bit set per slot which has memory
142 * in this shadow page. 151 * in this shadow page.
143 */ 152 */
@@ -232,6 +241,7 @@ struct kvm_pio_request {
232 struct page *guest_pages[2]; 241 struct page *guest_pages[2];
233 unsigned guest_page_offset; 242 unsigned guest_page_offset;
234 int in; 243 int in;
244 int port;
235 int size; 245 int size;
236 int string; 246 int string;
237 int down; 247 int down;
@@ -252,8 +262,70 @@ struct kvm_stat {
252 u32 halt_exits; 262 u32 halt_exits;
253 u32 request_irq_exits; 263 u32 request_irq_exits;
254 u32 irq_exits; 264 u32 irq_exits;
265 u32 light_exits;
266 u32 efer_reload;
267};
268
269struct kvm_io_device {
270 void (*read)(struct kvm_io_device *this,
271 gpa_t addr,
272 int len,
273 void *val);
274 void (*write)(struct kvm_io_device *this,
275 gpa_t addr,
276 int len,
277 const void *val);
278 int (*in_range)(struct kvm_io_device *this, gpa_t addr);
279 void (*destructor)(struct kvm_io_device *this);
280
281 void *private;
282};
283
284static inline void kvm_iodevice_read(struct kvm_io_device *dev,
285 gpa_t addr,
286 int len,
287 void *val)
288{
289 dev->read(dev, addr, len, val);
290}
291
292static inline void kvm_iodevice_write(struct kvm_io_device *dev,
293 gpa_t addr,
294 int len,
295 const void *val)
296{
297 dev->write(dev, addr, len, val);
298}
299
300static inline int kvm_iodevice_inrange(struct kvm_io_device *dev, gpa_t addr)
301{
302 return dev->in_range(dev, addr);
303}
304
305static inline void kvm_iodevice_destructor(struct kvm_io_device *dev)
306{
307 if (dev->destructor)
308 dev->destructor(dev);
309}
310
311/*
312 * It would be nice to use something smarter than a linear search, TBD...
313 * Thankfully we dont expect many devices to register (famous last words :),
314 * so until then it will suffice. At least its abstracted so we can change
315 * in one place.
316 */
317struct kvm_io_bus {
318 int dev_count;
319#define NR_IOBUS_DEVS 6
320 struct kvm_io_device *devs[NR_IOBUS_DEVS];
255}; 321};
256 322
323void kvm_io_bus_init(struct kvm_io_bus *bus);
324void kvm_io_bus_destroy(struct kvm_io_bus *bus);
325struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, gpa_t addr);
326void kvm_io_bus_register_dev(struct kvm_io_bus *bus,
327 struct kvm_io_device *dev);
328
257struct kvm_vcpu { 329struct kvm_vcpu {
258 struct kvm *kvm; 330 struct kvm *kvm;
259 union { 331 union {
@@ -266,6 +338,8 @@ struct kvm_vcpu {
266 u64 host_tsc; 338 u64 host_tsc;
267 struct kvm_run *run; 339 struct kvm_run *run;
268 int interrupt_window_open; 340 int interrupt_window_open;
341 int guest_mode;
342 unsigned long requests;
269 unsigned long irq_summary; /* bit vector: 1 per word in irq_pending */ 343 unsigned long irq_summary; /* bit vector: 1 per word in irq_pending */
270#define NR_IRQ_WORDS KVM_IRQ_BITMAP_SIZE(unsigned long) 344#define NR_IRQ_WORDS KVM_IRQ_BITMAP_SIZE(unsigned long)
271 unsigned long irq_pending[NR_IRQ_WORDS]; 345 unsigned long irq_pending[NR_IRQ_WORDS];
@@ -285,15 +359,20 @@ struct kvm_vcpu {
285 u64 apic_base; 359 u64 apic_base;
286 u64 ia32_misc_enable_msr; 360 u64 ia32_misc_enable_msr;
287 int nmsrs; 361 int nmsrs;
362 int save_nmsrs;
363 int msr_offset_efer;
364#ifdef CONFIG_X86_64
365 int msr_offset_kernel_gs_base;
366#endif
288 struct vmx_msr_entry *guest_msrs; 367 struct vmx_msr_entry *guest_msrs;
289 struct vmx_msr_entry *host_msrs; 368 struct vmx_msr_entry *host_msrs;
290 369
291 struct list_head free_pages;
292 struct kvm_mmu_page page_header_buf[KVM_NUM_MMU_PAGES];
293 struct kvm_mmu mmu; 370 struct kvm_mmu mmu;
294 371
295 struct kvm_mmu_memory_cache mmu_pte_chain_cache; 372 struct kvm_mmu_memory_cache mmu_pte_chain_cache;
296 struct kvm_mmu_memory_cache mmu_rmap_desc_cache; 373 struct kvm_mmu_memory_cache mmu_rmap_desc_cache;
374 struct kvm_mmu_memory_cache mmu_page_cache;
375 struct kvm_mmu_memory_cache mmu_page_header_cache;
297 376
298 gfn_t last_pt_write_gfn; 377 gfn_t last_pt_write_gfn;
299 int last_pt_write_count; 378 int last_pt_write_count;
@@ -305,6 +384,11 @@ struct kvm_vcpu {
305 char *guest_fx_image; 384 char *guest_fx_image;
306 int fpu_active; 385 int fpu_active;
307 int guest_fpu_loaded; 386 int guest_fpu_loaded;
387 struct vmx_host_state {
388 int loaded;
389 u16 fs_sel, gs_sel, ldt_sel;
390 int fs_gs_ldt_reload_needed;
391 } vmx_host_state;
308 392
309 int mmio_needed; 393 int mmio_needed;
310 int mmio_read_completed; 394 int mmio_read_completed;
@@ -331,6 +415,7 @@ struct kvm_vcpu {
331 u32 ar; 415 u32 ar;
332 } tr, es, ds, fs, gs; 416 } tr, es, ds, fs, gs;
333 } rmode; 417 } rmode;
418 int halt_request; /* real mode on Intel only */
334 419
335 int cpuid_nent; 420 int cpuid_nent;
336 struct kvm_cpuid_entry cpuid_entries[KVM_MAX_CPUID_ENTRIES]; 421 struct kvm_cpuid_entry cpuid_entries[KVM_MAX_CPUID_ENTRIES];
@@ -362,12 +447,15 @@ struct kvm {
362 struct list_head active_mmu_pages; 447 struct list_head active_mmu_pages;
363 int n_free_mmu_pages; 448 int n_free_mmu_pages;
364 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES]; 449 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
450 int nvcpus;
365 struct kvm_vcpu vcpus[KVM_MAX_VCPUS]; 451 struct kvm_vcpu vcpus[KVM_MAX_VCPUS];
366 int memory_config_version; 452 int memory_config_version;
367 int busy; 453 int busy;
368 unsigned long rmap_overflow; 454 unsigned long rmap_overflow;
369 struct list_head vm_list; 455 struct list_head vm_list;
370 struct file *filp; 456 struct file *filp;
457 struct kvm_io_bus mmio_bus;
458 struct kvm_io_bus pio_bus;
371}; 459};
372 460
373struct descriptor_table { 461struct descriptor_table {
@@ -488,6 +576,7 @@ int kvm_setup_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
488 int size, unsigned long count, int string, int down, 576 int size, unsigned long count, int string, int down,
489 gva_t address, int rep, unsigned port); 577 gva_t address, int rep, unsigned port);
490void kvm_emulate_cpuid(struct kvm_vcpu *vcpu); 578void kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
579int kvm_emulate_halt(struct kvm_vcpu *vcpu);
491int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address); 580int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address);
492int emulate_clts(struct kvm_vcpu *vcpu); 581int emulate_clts(struct kvm_vcpu *vcpu);
493int emulator_get_dr(struct x86_emulate_ctxt* ctxt, int dr, 582int emulator_get_dr(struct x86_emulate_ctxt* ctxt, int dr,
@@ -511,6 +600,7 @@ void save_msrs(struct vmx_msr_entry *e, int n);
511void kvm_resched(struct kvm_vcpu *vcpu); 600void kvm_resched(struct kvm_vcpu *vcpu);
512void kvm_load_guest_fpu(struct kvm_vcpu *vcpu); 601void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
513void kvm_put_guest_fpu(struct kvm_vcpu *vcpu); 602void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
603void kvm_flush_remote_tlbs(struct kvm *kvm);
514 604
515int kvm_read_guest(struct kvm_vcpu *vcpu, 605int kvm_read_guest(struct kvm_vcpu *vcpu,
516 gva_t addr, 606 gva_t addr,
@@ -524,10 +614,12 @@ int kvm_write_guest(struct kvm_vcpu *vcpu,
524 614
525unsigned long segment_base(u16 selector); 615unsigned long segment_base(u16 selector);
526 616
527void kvm_mmu_pre_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes); 617void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
528void kvm_mmu_post_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes); 618 const u8 *old, const u8 *new, int bytes);
529int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva); 619int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
530void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu); 620void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
621int kvm_mmu_load(struct kvm_vcpu *vcpu);
622void kvm_mmu_unload(struct kvm_vcpu *vcpu);
531 623
532int kvm_hypercall(struct kvm_vcpu *vcpu, struct kvm_run *run); 624int kvm_hypercall(struct kvm_vcpu *vcpu, struct kvm_run *run);
533 625
@@ -539,6 +631,14 @@ static inline int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
539 return vcpu->mmu.page_fault(vcpu, gva, error_code); 631 return vcpu->mmu.page_fault(vcpu, gva, error_code);
540} 632}
541 633
634static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
635{
636 if (likely(vcpu->mmu.root_hpa != INVALID_PAGE))
637 return 0;
638
639 return kvm_mmu_load(vcpu);
640}
641
542static inline int is_long_mode(struct kvm_vcpu *vcpu) 642static inline int is_long_mode(struct kvm_vcpu *vcpu)
543{ 643{
544#ifdef CONFIG_X86_64 644#ifdef CONFIG_X86_64