diff options
author | Zhang Xiantao <xiantao.zhang@intel.com> | 2007-10-20 03:34:38 -0400 |
---|---|---|
committer | Avi Kivity <avi@qumranet.com> | 2008-01-30 10:52:54 -0500 |
commit | 34c16eecf78ed4cf01f39ac7211f5b57942ec899 (patch) | |
tree | 9a9d6192db411cc3c7ff665cc94a8797eb55aa80 /drivers/kvm/kvm.h | |
parent | 8d4e1288ebb753d3140d81cb349f22b0a6829a4a (diff) |
KVM: Portability: Split kvm_vcpu into arch dependent and independent parts (part 1)
First step to split kvm_vcpu. Currently, we just use an macro to define
the common fields in kvm_vcpu for all archs, and all archs need to define
its own kvm_vcpu struct.
Signed-off-by: Zhang Xiantao <xiantao.zhang@intel.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers/kvm/kvm.h')
-rw-r--r-- | drivers/kvm/kvm.h | 154 |
1 files changed, 27 insertions, 127 deletions
diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h index eb006ed696c1..db18d278c1c0 100644 --- a/drivers/kvm/kvm.h +++ b/drivers/kvm/kvm.h | |||
@@ -308,93 +308,37 @@ struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, gpa_t addr); | |||
308 | void kvm_io_bus_register_dev(struct kvm_io_bus *bus, | 308 | void kvm_io_bus_register_dev(struct kvm_io_bus *bus, |
309 | struct kvm_io_device *dev); | 309 | struct kvm_io_device *dev); |
310 | 310 | ||
311 | struct kvm_vcpu { | 311 | #ifdef CONFIG_HAS_IOMEM |
312 | struct kvm *kvm; | 312 | #define KVM_VCPU_MMIO \ |
313 | struct preempt_notifier preempt_notifier; | 313 | int mmio_needed; \ |
314 | int vcpu_id; | 314 | int mmio_read_completed; \ |
315 | struct mutex mutex; | 315 | int mmio_is_write; \ |
316 | int cpu; | 316 | int mmio_size; \ |
317 | u64 host_tsc; | 317 | unsigned char mmio_data[8]; \ |
318 | struct kvm_run *run; | ||
319 | int interrupt_window_open; | ||
320 | int guest_mode; | ||
321 | unsigned long requests; | ||
322 | unsigned long irq_summary; /* bit vector: 1 per word in irq_pending */ | ||
323 | DECLARE_BITMAP(irq_pending, KVM_NR_INTERRUPTS); | ||
324 | unsigned long regs[NR_VCPU_REGS]; /* for rsp: vcpu_load_rsp_rip() */ | ||
325 | unsigned long rip; /* needs vcpu_load_rsp_rip() */ | ||
326 | |||
327 | unsigned long cr0; | ||
328 | unsigned long cr2; | ||
329 | unsigned long cr3; | ||
330 | unsigned long cr4; | ||
331 | unsigned long cr8; | ||
332 | u64 pdptrs[4]; /* pae */ | ||
333 | u64 shadow_efer; | ||
334 | u64 apic_base; | ||
335 | struct kvm_lapic *apic; /* kernel irqchip context */ | ||
336 | #define VCPU_MP_STATE_RUNNABLE 0 | ||
337 | #define VCPU_MP_STATE_UNINITIALIZED 1 | ||
338 | #define VCPU_MP_STATE_INIT_RECEIVED 2 | ||
339 | #define VCPU_MP_STATE_SIPI_RECEIVED 3 | ||
340 | #define VCPU_MP_STATE_HALTED 4 | ||
341 | int mp_state; | ||
342 | int sipi_vector; | ||
343 | u64 ia32_misc_enable_msr; | ||
344 | |||
345 | struct kvm_mmu mmu; | ||
346 | |||
347 | struct kvm_mmu_memory_cache mmu_pte_chain_cache; | ||
348 | struct kvm_mmu_memory_cache mmu_rmap_desc_cache; | ||
349 | struct kvm_mmu_memory_cache mmu_page_cache; | ||
350 | struct kvm_mmu_memory_cache mmu_page_header_cache; | ||
351 | |||
352 | gfn_t last_pt_write_gfn; | ||
353 | int last_pt_write_count; | ||
354 | u64 *last_pte_updated; | ||
355 | |||
356 | struct kvm_guest_debug guest_debug; | ||
357 | |||
358 | struct i387_fxsave_struct host_fx_image; | ||
359 | struct i387_fxsave_struct guest_fx_image; | ||
360 | int fpu_active; | ||
361 | int guest_fpu_loaded; | ||
362 | |||
363 | int mmio_needed; | ||
364 | int mmio_read_completed; | ||
365 | int mmio_is_write; | ||
366 | int mmio_size; | ||
367 | unsigned char mmio_data[8]; | ||
368 | gpa_t mmio_phys_addr; | 318 | gpa_t mmio_phys_addr; |
369 | gva_t mmio_fault_cr2; | ||
370 | struct kvm_pio_request pio; | ||
371 | void *pio_data; | ||
372 | wait_queue_head_t wq; | ||
373 | 319 | ||
374 | int sigset_active; | 320 | #else |
375 | sigset_t sigset; | 321 | #define KVM_VCPU_MMIO |
376 | 322 | ||
377 | struct kvm_stat stat; | 323 | #endif |
378 | 324 | ||
379 | struct { | 325 | #define KVM_VCPU_COMM \ |
380 | int active; | 326 | struct kvm *kvm; \ |
381 | u8 save_iopl; | 327 | struct preempt_notifier preempt_notifier; \ |
382 | struct kvm_save_segment { | 328 | int vcpu_id; \ |
383 | u16 selector; | 329 | struct mutex mutex; \ |
384 | unsigned long base; | 330 | int cpu; \ |
385 | u32 limit; | 331 | struct kvm_run *run; \ |
386 | u32 ar; | 332 | int guest_mode; \ |
387 | } tr, es, ds, fs, gs; | 333 | unsigned long requests; \ |
388 | } rmode; | 334 | struct kvm_guest_debug guest_debug; \ |
389 | int halt_request; /* real mode on Intel only */ | 335 | int fpu_active; \ |
390 | 336 | int guest_fpu_loaded; \ | |
391 | int cpuid_nent; | 337 | wait_queue_head_t wq; \ |
392 | struct kvm_cpuid_entry cpuid_entries[KVM_MAX_CPUID_ENTRIES]; | 338 | int sigset_active; \ |
393 | 339 | sigset_t sigset; \ | |
394 | /* emulate context */ | 340 | struct kvm_stat stat; \ |
395 | 341 | KVM_VCPU_MMIO | |
396 | struct x86_emulate_ctxt emulate_ctxt; | ||
397 | }; | ||
398 | 342 | ||
399 | struct kvm_mem_alias { | 343 | struct kvm_mem_alias { |
400 | gfn_t base_gfn; | 344 | gfn_t base_gfn; |
@@ -680,50 +624,6 @@ static inline void kvm_guest_exit(void) | |||
680 | current->flags &= ~PF_VCPU; | 624 | current->flags &= ~PF_VCPU; |
681 | } | 625 | } |
682 | 626 | ||
683 | static inline int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, | ||
684 | u32 error_code) | ||
685 | { | ||
686 | return vcpu->mmu.page_fault(vcpu, gva, error_code); | ||
687 | } | ||
688 | |||
689 | static inline void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu) | ||
690 | { | ||
691 | if (unlikely(vcpu->kvm->n_free_mmu_pages < KVM_MIN_FREE_MMU_PAGES)) | ||
692 | __kvm_mmu_free_some_pages(vcpu); | ||
693 | } | ||
694 | |||
695 | static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu) | ||
696 | { | ||
697 | if (likely(vcpu->mmu.root_hpa != INVALID_PAGE)) | ||
698 | return 0; | ||
699 | |||
700 | return kvm_mmu_load(vcpu); | ||
701 | } | ||
702 | |||
703 | static inline int is_long_mode(struct kvm_vcpu *vcpu) | ||
704 | { | ||
705 | #ifdef CONFIG_X86_64 | ||
706 | return vcpu->shadow_efer & EFER_LME; | ||
707 | #else | ||
708 | return 0; | ||
709 | #endif | ||
710 | } | ||
711 | |||
712 | static inline int is_pae(struct kvm_vcpu *vcpu) | ||
713 | { | ||
714 | return vcpu->cr4 & X86_CR4_PAE; | ||
715 | } | ||
716 | |||
717 | static inline int is_pse(struct kvm_vcpu *vcpu) | ||
718 | { | ||
719 | return vcpu->cr4 & X86_CR4_PSE; | ||
720 | } | ||
721 | |||
722 | static inline int is_paging(struct kvm_vcpu *vcpu) | ||
723 | { | ||
724 | return vcpu->cr0 & X86_CR0_PG; | ||
725 | } | ||
726 | |||
727 | static inline int memslot_id(struct kvm *kvm, struct kvm_memory_slot *slot) | 627 | static inline int memslot_id(struct kvm *kvm, struct kvm_memory_slot *slot) |
728 | { | 628 | { |
729 | return slot - kvm->memslots; | 629 | return slot - kvm->memslots; |