aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/kvm/kvm.h32
-rw-r--r--drivers/kvm/kvm_main.c58
-rw-r--r--drivers/kvm/lapic.c3
-rw-r--r--drivers/kvm/mmu.c10
-rw-r--r--drivers/kvm/paging_tmpl.h2
-rw-r--r--drivers/kvm/svm.c48
-rw-r--r--drivers/kvm/svm.h2
-rw-r--r--drivers/kvm/vmx.c60
-rw-r--r--drivers/kvm/vmx.h8
-rw-r--r--drivers/kvm/x86_emulate.c76
10 files changed, 151 insertions, 148 deletions
diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h
index 9f10c373b74c..ec5b498945ae 100644
--- a/drivers/kvm/kvm.h
+++ b/drivers/kvm/kvm.h
@@ -528,7 +528,7 @@ extern struct kvm_x86_ops *kvm_x86_ops;
528 if (printk_ratelimit()) \ 528 if (printk_ratelimit()) \
529 printk(KERN_ERR "kvm: %i: cpu%i " fmt, \ 529 printk(KERN_ERR "kvm: %i: cpu%i " fmt, \
530 current->tgid, (vcpu)->vcpu_id , ## __VA_ARGS__); \ 530 current->tgid, (vcpu)->vcpu_id , ## __VA_ARGS__); \
531 } while(0) 531 } while (0)
532 532
533#define kvm_printf(kvm, fmt ...) printk(KERN_DEBUG fmt) 533#define kvm_printf(kvm, fmt ...) printk(KERN_DEBUG fmt)
534#define vcpu_printf(vcpu, fmt...) kvm_printf(vcpu->kvm, fmt) 534#define vcpu_printf(vcpu, fmt...) kvm_printf(vcpu->kvm, fmt)
@@ -598,7 +598,7 @@ int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
598 598
599struct x86_emulate_ctxt; 599struct x86_emulate_ctxt;
600 600
601int kvm_emulate_pio (struct kvm_vcpu *vcpu, struct kvm_run *run, int in, 601int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
602 int size, unsigned port); 602 int size, unsigned port);
603int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in, 603int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
604 int size, unsigned long count, int down, 604 int size, unsigned long count, int down,
@@ -607,7 +607,7 @@ void kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
607int kvm_emulate_halt(struct kvm_vcpu *vcpu); 607int kvm_emulate_halt(struct kvm_vcpu *vcpu);
608int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address); 608int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address);
609int emulate_clts(struct kvm_vcpu *vcpu); 609int emulate_clts(struct kvm_vcpu *vcpu);
610int emulator_get_dr(struct x86_emulate_ctxt* ctxt, int dr, 610int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr,
611 unsigned long *dest); 611 unsigned long *dest);
612int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, 612int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr,
613 unsigned long value); 613 unsigned long value);
@@ -631,7 +631,7 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
631void kvm_flush_remote_tlbs(struct kvm *kvm); 631void kvm_flush_remote_tlbs(struct kvm *kvm);
632 632
633int emulator_read_std(unsigned long addr, 633int emulator_read_std(unsigned long addr,
634 void *val, 634 void *val,
635 unsigned int bytes, 635 unsigned int bytes,
636 struct kvm_vcpu *vcpu); 636 struct kvm_vcpu *vcpu);
637int emulator_write_emulated(unsigned long addr, 637int emulator_write_emulated(unsigned long addr,
@@ -721,55 +721,55 @@ static inline struct kvm_mmu_page *page_header(hpa_t shadow_page)
721static inline u16 read_fs(void) 721static inline u16 read_fs(void)
722{ 722{
723 u16 seg; 723 u16 seg;
724 asm ("mov %%fs, %0" : "=g"(seg)); 724 asm("mov %%fs, %0" : "=g"(seg));
725 return seg; 725 return seg;
726} 726}
727 727
728static inline u16 read_gs(void) 728static inline u16 read_gs(void)
729{ 729{
730 u16 seg; 730 u16 seg;
731 asm ("mov %%gs, %0" : "=g"(seg)); 731 asm("mov %%gs, %0" : "=g"(seg));
732 return seg; 732 return seg;
733} 733}
734 734
735static inline u16 read_ldt(void) 735static inline u16 read_ldt(void)
736{ 736{
737 u16 ldt; 737 u16 ldt;
738 asm ("sldt %0" : "=g"(ldt)); 738 asm("sldt %0" : "=g"(ldt));
739 return ldt; 739 return ldt;
740} 740}
741 741
742static inline void load_fs(u16 sel) 742static inline void load_fs(u16 sel)
743{ 743{
744 asm ("mov %0, %%fs" : : "rm"(sel)); 744 asm("mov %0, %%fs" : : "rm"(sel));
745} 745}
746 746
747static inline void load_gs(u16 sel) 747static inline void load_gs(u16 sel)
748{ 748{
749 asm ("mov %0, %%gs" : : "rm"(sel)); 749 asm("mov %0, %%gs" : : "rm"(sel));
750} 750}
751 751
752#ifndef load_ldt 752#ifndef load_ldt
753static inline void load_ldt(u16 sel) 753static inline void load_ldt(u16 sel)
754{ 754{
755 asm ("lldt %0" : : "rm"(sel)); 755 asm("lldt %0" : : "rm"(sel));
756} 756}
757#endif 757#endif
758 758
759static inline void get_idt(struct descriptor_table *table) 759static inline void get_idt(struct descriptor_table *table)
760{ 760{
761 asm ("sidt %0" : "=m"(*table)); 761 asm("sidt %0" : "=m"(*table));
762} 762}
763 763
764static inline void get_gdt(struct descriptor_table *table) 764static inline void get_gdt(struct descriptor_table *table)
765{ 765{
766 asm ("sgdt %0" : "=m"(*table)); 766 asm("sgdt %0" : "=m"(*table));
767} 767}
768 768
769static inline unsigned long read_tr_base(void) 769static inline unsigned long read_tr_base(void)
770{ 770{
771 u16 tr; 771 u16 tr;
772 asm ("str %0" : "=g"(tr)); 772 asm("str %0" : "=g"(tr));
773 return segment_base(tr); 773 return segment_base(tr);
774} 774}
775 775
@@ -785,17 +785,17 @@ static inline unsigned long read_msr(unsigned long msr)
785 785
786static inline void fx_save(struct i387_fxsave_struct *image) 786static inline void fx_save(struct i387_fxsave_struct *image)
787{ 787{
788 asm ("fxsave (%0)":: "r" (image)); 788 asm("fxsave (%0)":: "r" (image));
789} 789}
790 790
791static inline void fx_restore(struct i387_fxsave_struct *image) 791static inline void fx_restore(struct i387_fxsave_struct *image)
792{ 792{
793 asm ("fxrstor (%0)":: "r" (image)); 793 asm("fxrstor (%0)":: "r" (image));
794} 794}
795 795
796static inline void fpu_init(void) 796static inline void fpu_init(void)
797{ 797{
798 asm ("finit"); 798 asm("finit");
799} 799}
800 800
801static inline u32 get_rdx_init_val(void) 801static inline u32 get_rdx_init_val(void)
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
index 9ea9277014aa..a1983d2d5b8f 100644
--- a/drivers/kvm/kvm_main.c
+++ b/drivers/kvm/kvm_main.c
@@ -104,7 +104,7 @@ static struct dentry *debugfs_dir;
104#define EFER_RESERVED_BITS 0xfffffffffffff2fe 104#define EFER_RESERVED_BITS 0xfffffffffffff2fe
105 105
106#ifdef CONFIG_X86_64 106#ifdef CONFIG_X86_64
107// LDT or TSS descriptor in the GDT. 16 bytes. 107/* LDT or TSS descriptor in the GDT. 16 bytes. */
108struct segment_descriptor_64 { 108struct segment_descriptor_64 {
109 struct segment_descriptor s; 109 struct segment_descriptor s;
110 u32 base_higher; 110 u32 base_higher;
@@ -121,27 +121,27 @@ unsigned long segment_base(u16 selector)
121 struct descriptor_table gdt; 121 struct descriptor_table gdt;
122 struct segment_descriptor *d; 122 struct segment_descriptor *d;
123 unsigned long table_base; 123 unsigned long table_base;
124 typedef unsigned long ul;
125 unsigned long v; 124 unsigned long v;
126 125
127 if (selector == 0) 126 if (selector == 0)
128 return 0; 127 return 0;
129 128
130 asm ("sgdt %0" : "=m"(gdt)); 129 asm("sgdt %0" : "=m"(gdt));
131 table_base = gdt.base; 130 table_base = gdt.base;
132 131
133 if (selector & 4) { /* from ldt */ 132 if (selector & 4) { /* from ldt */
134 u16 ldt_selector; 133 u16 ldt_selector;
135 134
136 asm ("sldt %0" : "=g"(ldt_selector)); 135 asm("sldt %0" : "=g"(ldt_selector));
137 table_base = segment_base(ldt_selector); 136 table_base = segment_base(ldt_selector);
138 } 137 }
139 d = (struct segment_descriptor *)(table_base + (selector & ~7)); 138 d = (struct segment_descriptor *)(table_base + (selector & ~7));
140 v = d->base_low | ((ul)d->base_mid << 16) | ((ul)d->base_high << 24); 139 v = d->base_low | ((unsigned long)d->base_mid << 16) |
140 ((unsigned long)d->base_high << 24);
141#ifdef CONFIG_X86_64 141#ifdef CONFIG_X86_64
142 if (d->system == 0 142 if (d->system == 0 && (d->type == 2 || d->type == 9 || d->type == 11))
143 && (d->type == 2 || d->type == 9 || d->type == 11)) 143 v |= ((unsigned long) \
144 v |= ((ul)((struct segment_descriptor_64 *)d)->base_higher) << 32; 144 ((struct segment_descriptor_64 *)d)->base_higher) << 32;
145#endif 145#endif
146 return v; 146 return v;
147} 147}
@@ -721,7 +721,7 @@ static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
721 if (!new.phys_mem) 721 if (!new.phys_mem)
722 goto out_unlock; 722 goto out_unlock;
723 723
724 new.rmap = vmalloc(npages * sizeof(struct page*)); 724 new.rmap = vmalloc(npages * sizeof(struct page *));
725 725
726 if (!new.rmap) 726 if (!new.rmap)
727 goto out_unlock; 727 goto out_unlock;
@@ -904,17 +904,17 @@ static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
904 r = 0; 904 r = 0;
905 switch (chip->chip_id) { 905 switch (chip->chip_id) {
906 case KVM_IRQCHIP_PIC_MASTER: 906 case KVM_IRQCHIP_PIC_MASTER:
907 memcpy (&chip->chip.pic, 907 memcpy(&chip->chip.pic,
908 &pic_irqchip(kvm)->pics[0], 908 &pic_irqchip(kvm)->pics[0],
909 sizeof(struct kvm_pic_state)); 909 sizeof(struct kvm_pic_state));
910 break; 910 break;
911 case KVM_IRQCHIP_PIC_SLAVE: 911 case KVM_IRQCHIP_PIC_SLAVE:
912 memcpy (&chip->chip.pic, 912 memcpy(&chip->chip.pic,
913 &pic_irqchip(kvm)->pics[1], 913 &pic_irqchip(kvm)->pics[1],
914 sizeof(struct kvm_pic_state)); 914 sizeof(struct kvm_pic_state));
915 break; 915 break;
916 case KVM_IRQCHIP_IOAPIC: 916 case KVM_IRQCHIP_IOAPIC:
917 memcpy (&chip->chip.ioapic, 917 memcpy(&chip->chip.ioapic,
918 ioapic_irqchip(kvm), 918 ioapic_irqchip(kvm),
919 sizeof(struct kvm_ioapic_state)); 919 sizeof(struct kvm_ioapic_state));
920 break; 920 break;
@@ -932,17 +932,17 @@ static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
932 r = 0; 932 r = 0;
933 switch (chip->chip_id) { 933 switch (chip->chip_id) {
934 case KVM_IRQCHIP_PIC_MASTER: 934 case KVM_IRQCHIP_PIC_MASTER:
935 memcpy (&pic_irqchip(kvm)->pics[0], 935 memcpy(&pic_irqchip(kvm)->pics[0],
936 &chip->chip.pic, 936 &chip->chip.pic,
937 sizeof(struct kvm_pic_state)); 937 sizeof(struct kvm_pic_state));
938 break; 938 break;
939 case KVM_IRQCHIP_PIC_SLAVE: 939 case KVM_IRQCHIP_PIC_SLAVE:
940 memcpy (&pic_irqchip(kvm)->pics[1], 940 memcpy(&pic_irqchip(kvm)->pics[1],
941 &chip->chip.pic, 941 &chip->chip.pic,
942 sizeof(struct kvm_pic_state)); 942 sizeof(struct kvm_pic_state));
943 break; 943 break;
944 case KVM_IRQCHIP_IOAPIC: 944 case KVM_IRQCHIP_IOAPIC:
945 memcpy (ioapic_irqchip(kvm), 945 memcpy(ioapic_irqchip(kvm),
946 &chip->chip.ioapic, 946 &chip->chip.ioapic,
947 sizeof(struct kvm_ioapic_state)); 947 sizeof(struct kvm_ioapic_state));
948 break; 948 break;
@@ -1341,7 +1341,7 @@ int emulate_clts(struct kvm_vcpu *vcpu)
1341 return X86EMUL_CONTINUE; 1341 return X86EMUL_CONTINUE;
1342} 1342}
1343 1343
1344int emulator_get_dr(struct x86_emulate_ctxt* ctxt, int dr, unsigned long *dest) 1344int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest)
1345{ 1345{
1346 struct kvm_vcpu *vcpu = ctxt->vcpu; 1346 struct kvm_vcpu *vcpu = ctxt->vcpu;
1347 1347
@@ -1934,7 +1934,7 @@ static void pio_string_write(struct kvm_io_device *pio_dev,
1934 mutex_unlock(&vcpu->kvm->lock); 1934 mutex_unlock(&vcpu->kvm->lock);
1935} 1935}
1936 1936
1937int kvm_emulate_pio (struct kvm_vcpu *vcpu, struct kvm_run *run, int in, 1937int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
1938 int size, unsigned port) 1938 int size, unsigned port)
1939{ 1939{
1940 struct kvm_io_device *pio_dev; 1940 struct kvm_io_device *pio_dev;
@@ -2089,7 +2089,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2089 int r; 2089 int r;
2090 2090
2091 if (unlikely(vcpu->mp_state == VCPU_MP_STATE_SIPI_RECEIVED)) { 2091 if (unlikely(vcpu->mp_state == VCPU_MP_STATE_SIPI_RECEIVED)) {
2092 printk("vcpu %d received sipi with vector # %x\n", 2092 pr_debug("vcpu %d received sipi with vector # %x\n",
2093 vcpu->vcpu_id, vcpu->sipi_vector); 2093 vcpu->vcpu_id, vcpu->sipi_vector);
2094 kvm_lapic_reset(vcpu); 2094 kvm_lapic_reset(vcpu);
2095 kvm_x86_ops->vcpu_reset(vcpu); 2095 kvm_x86_ops->vcpu_reset(vcpu);
@@ -2363,7 +2363,8 @@ static int kvm_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
2363 sizeof sregs->interrupt_bitmap); 2363 sizeof sregs->interrupt_bitmap);
2364 pending_vec = kvm_x86_ops->get_irq(vcpu); 2364 pending_vec = kvm_x86_ops->get_irq(vcpu);
2365 if (pending_vec >= 0) 2365 if (pending_vec >= 0)
2366 set_bit(pending_vec, (unsigned long *)sregs->interrupt_bitmap); 2366 set_bit(pending_vec,
2367 (unsigned long *)sregs->interrupt_bitmap);
2367 } else 2368 } else
2368 memcpy(sregs->interrupt_bitmap, vcpu->irq_pending, 2369 memcpy(sregs->interrupt_bitmap, vcpu->irq_pending,
2369 sizeof sregs->interrupt_bitmap); 2370 sizeof sregs->interrupt_bitmap);
@@ -2436,7 +2437,8 @@ static int kvm_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
2436 /* Only pending external irq is handled here */ 2437 /* Only pending external irq is handled here */
2437 if (pending_vec < max_bits) { 2438 if (pending_vec < max_bits) {
2438 kvm_x86_ops->set_irq(vcpu, pending_vec); 2439 kvm_x86_ops->set_irq(vcpu, pending_vec);
2439 printk("Set back pending irq %d\n", pending_vec); 2440 pr_debug("Set back pending irq %d\n",
2441 pending_vec);
2440 } 2442 }
2441 } 2443 }
2442 2444
@@ -3155,8 +3157,7 @@ static long kvm_vm_ioctl(struct file *filp,
3155 kvm->vpic = NULL; 3157 kvm->vpic = NULL;
3156 goto out; 3158 goto out;
3157 } 3159 }
3158 } 3160 } else
3159 else
3160 goto out; 3161 goto out;
3161 break; 3162 break;
3162 case KVM_IRQ_LINE: { 3163 case KVM_IRQ_LINE: {
@@ -3448,7 +3449,7 @@ static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
3448} 3449}
3449 3450
3450static int kvm_reboot(struct notifier_block *notifier, unsigned long val, 3451static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
3451 void *v) 3452 void *v)
3452{ 3453{
3453 if (val == SYS_RESTART) { 3454 if (val == SYS_RESTART) {
3454 /* 3455 /*
@@ -3655,7 +3656,7 @@ int kvm_init_x86(struct kvm_x86_ops *ops, unsigned int vcpu_size,
3655 3656
3656 r = misc_register(&kvm_dev); 3657 r = misc_register(&kvm_dev);
3657 if (r) { 3658 if (r) {
3658 printk (KERN_ERR "kvm: misc device register failed\n"); 3659 printk(KERN_ERR "kvm: misc device register failed\n");
3659 goto out_free; 3660 goto out_free;
3660 } 3661 }
3661 3662
@@ -3683,6 +3684,7 @@ out:
3683 kvm_x86_ops = NULL; 3684 kvm_x86_ops = NULL;
3684 return r; 3685 return r;
3685} 3686}
3687EXPORT_SYMBOL_GPL(kvm_init_x86);
3686 3688
3687void kvm_exit_x86(void) 3689void kvm_exit_x86(void)
3688{ 3690{
@@ -3696,6 +3698,7 @@ void kvm_exit_x86(void)
3696 kvm_x86_ops->hardware_unsetup(); 3698 kvm_x86_ops->hardware_unsetup();
3697 kvm_x86_ops = NULL; 3699 kvm_x86_ops = NULL;
3698} 3700}
3701EXPORT_SYMBOL_GPL(kvm_exit_x86);
3699 3702
3700static __init int kvm_init(void) 3703static __init int kvm_init(void)
3701{ 3704{
@@ -3710,7 +3713,9 @@ static __init int kvm_init(void)
3710 3713
3711 kvm_init_msr_list(); 3714 kvm_init_msr_list();
3712 3715
3713 if ((bad_page = alloc_page(GFP_KERNEL)) == NULL) { 3716 bad_page = alloc_page(GFP_KERNEL);
3717
3718 if (bad_page == NULL) {
3714 r = -ENOMEM; 3719 r = -ENOMEM;
3715 goto out; 3720 goto out;
3716 } 3721 }
@@ -3736,6 +3741,3 @@ static __exit void kvm_exit(void)
3736 3741
3737module_init(kvm_init) 3742module_init(kvm_init)
3738module_exit(kvm_exit) 3743module_exit(kvm_exit)
3739
3740EXPORT_SYMBOL_GPL(kvm_init_x86);
3741EXPORT_SYMBOL_GPL(kvm_exit_x86);
diff --git a/drivers/kvm/lapic.c b/drivers/kvm/lapic.c
index 554e73ad33f0..e15b42e48862 100644
--- a/drivers/kvm/lapic.c
+++ b/drivers/kvm/lapic.c
@@ -906,8 +906,7 @@ static int __apic_timer_fn(struct kvm_lapic *apic)
906 wait_queue_head_t *q = &apic->vcpu->wq; 906 wait_queue_head_t *q = &apic->vcpu->wq;
907 907
908 atomic_inc(&apic->timer.pending); 908 atomic_inc(&apic->timer.pending);
909 if (waitqueue_active(q)) 909 if (waitqueue_active(q)) {
910 {
911 apic->vcpu->mp_state = VCPU_MP_STATE_RUNNABLE; 910 apic->vcpu->mp_state = VCPU_MP_STATE_RUNNABLE;
912 wake_up_interruptible(q); 911 wake_up_interruptible(q);
913 } 912 }
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c
index 6cda1feb9a95..ece0aa4e4c9f 100644
--- a/drivers/kvm/mmu.c
+++ b/drivers/kvm/mmu.c
@@ -90,7 +90,8 @@ static int dbg = 1;
90 90
91#define PT32_DIR_PSE36_SIZE 4 91#define PT32_DIR_PSE36_SIZE 4
92#define PT32_DIR_PSE36_SHIFT 13 92#define PT32_DIR_PSE36_SHIFT 13
93#define PT32_DIR_PSE36_MASK (((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT) 93#define PT32_DIR_PSE36_MASK \
94 (((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)
94 95
95 96
96#define PT_FIRST_AVAIL_BITS_SHIFT 9 97#define PT_FIRST_AVAIL_BITS_SHIFT 9
@@ -103,7 +104,7 @@ static int dbg = 1;
103#define PT64_LEVEL_BITS 9 104#define PT64_LEVEL_BITS 9
104 105
105#define PT64_LEVEL_SHIFT(level) \ 106#define PT64_LEVEL_SHIFT(level) \
106 ( PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS ) 107 (PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS)
107 108
108#define PT64_LEVEL_MASK(level) \ 109#define PT64_LEVEL_MASK(level) \
109 (((1ULL << PT64_LEVEL_BITS) - 1) << PT64_LEVEL_SHIFT(level)) 110 (((1ULL << PT64_LEVEL_BITS) - 1) << PT64_LEVEL_SHIFT(level))
@@ -115,7 +116,7 @@ static int dbg = 1;
115#define PT32_LEVEL_BITS 10 116#define PT32_LEVEL_BITS 10
116 117
117#define PT32_LEVEL_SHIFT(level) \ 118#define PT32_LEVEL_SHIFT(level) \
118 ( PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS ) 119 (PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS)
119 120
120#define PT32_LEVEL_MASK(level) \ 121#define PT32_LEVEL_MASK(level) \
121 (((1ULL << PT32_LEVEL_BITS) - 1) << PT32_LEVEL_SHIFT(level)) 122 (((1ULL << PT32_LEVEL_BITS) - 1) << PT32_LEVEL_SHIFT(level))
@@ -1489,7 +1490,8 @@ static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
1489 printk(KERN_ERR "xx audit error: (%s) levels %d" 1490 printk(KERN_ERR "xx audit error: (%s) levels %d"
1490 " gva %lx gpa %llx hpa %llx ent %llx %d\n", 1491 " gva %lx gpa %llx hpa %llx ent %llx %d\n",
1491 audit_msg, vcpu->mmu.root_level, 1492 audit_msg, vcpu->mmu.root_level,
1492 va, gpa, hpa, ent, is_shadow_present_pte(ent)); 1493 va, gpa, hpa, ent,
1494 is_shadow_present_pte(ent));
1493 else if (ent == shadow_notrap_nonpresent_pte 1495 else if (ent == shadow_notrap_nonpresent_pte
1494 && !is_error_hpa(hpa)) 1496 && !is_error_hpa(hpa))
1495 printk(KERN_ERR "audit: (%s) notrap shadow," 1497 printk(KERN_ERR "audit: (%s) notrap shadow,"
diff --git a/drivers/kvm/paging_tmpl.h b/drivers/kvm/paging_tmpl.h
index fbe595f880af..447d2c31f0cb 100644
--- a/drivers/kvm/paging_tmpl.h
+++ b/drivers/kvm/paging_tmpl.h
@@ -163,7 +163,7 @@ static int FNAME(walk_addr)(struct guest_walker *walker,
163 walker->page = pfn_to_page(paddr >> PAGE_SHIFT); 163 walker->page = pfn_to_page(paddr >> PAGE_SHIFT);
164 walker->table = kmap_atomic(walker->page, KM_USER0); 164 walker->table = kmap_atomic(walker->page, KM_USER0);
165 --walker->level; 165 --walker->level;
166 walker->table_gfn[walker->level - 1 ] = table_gfn; 166 walker->table_gfn[walker->level - 1] = table_gfn;
167 pgprintk("%s: table_gfn[%d] %lx\n", __FUNCTION__, 167 pgprintk("%s: table_gfn[%d] %lx\n", __FUNCTION__,
168 walker->level - 1, table_gfn); 168 walker->level - 1, table_gfn);
169 } 169 }
diff --git a/drivers/kvm/svm.c b/drivers/kvm/svm.c
index fb2e591d5397..7b21576b62bc 100644
--- a/drivers/kvm/svm.c
+++ b/drivers/kvm/svm.c
@@ -229,12 +229,11 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
229 printk(KERN_DEBUG "%s: NOP\n", __FUNCTION__); 229 printk(KERN_DEBUG "%s: NOP\n", __FUNCTION__);
230 return; 230 return;
231 } 231 }
232 if (svm->next_rip - svm->vmcb->save.rip > MAX_INST_SIZE) { 232 if (svm->next_rip - svm->vmcb->save.rip > MAX_INST_SIZE)
233 printk(KERN_ERR "%s: ip 0x%llx next 0x%llx\n", 233 printk(KERN_ERR "%s: ip 0x%llx next 0x%llx\n",
234 __FUNCTION__, 234 __FUNCTION__,
235 svm->vmcb->save.rip, 235 svm->vmcb->save.rip,
236 svm->next_rip); 236 svm->next_rip);
237 }
238 237
239 vcpu->rip = svm->vmcb->save.rip = svm->next_rip; 238 vcpu->rip = svm->vmcb->save.rip = svm->next_rip;
240 svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK; 239 svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
@@ -312,7 +311,7 @@ static void svm_hardware_enable(void *garbage)
312 svm_data->next_asid = svm_data->max_asid + 1; 311 svm_data->next_asid = svm_data->max_asid + 1;
313 svm_features = cpuid_edx(SVM_CPUID_FUNC); 312 svm_features = cpuid_edx(SVM_CPUID_FUNC);
314 313
315 asm volatile ( "sgdt %0" : "=m"(gdt_descr) ); 314 asm volatile ("sgdt %0" : "=m"(gdt_descr));
316 gdt = (struct desc_struct *)gdt_descr.address; 315 gdt = (struct desc_struct *)gdt_descr.address;
317 svm_data->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS); 316 svm_data->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
318 317
@@ -544,8 +543,7 @@ static void init_vmcb(struct vmcb *vmcb)
544 init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16); 543 init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16);
545 544
546 save->efer = MSR_EFER_SVME_MASK; 545 save->efer = MSR_EFER_SVME_MASK;
547 546 save->dr6 = 0xffff0ff0;
548 save->dr6 = 0xffff0ff0;
549 save->dr7 = 0x400; 547 save->dr7 = 0x400;
550 save->rflags = 2; 548 save->rflags = 2;
551 save->rip = 0x0000fff0; 549 save->rip = 0x0000fff0;
@@ -783,7 +781,7 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
783 svm->vmcb->save.efer |= KVM_EFER_LMA | KVM_EFER_LME; 781 svm->vmcb->save.efer |= KVM_EFER_LMA | KVM_EFER_LME;
784 } 782 }
785 783
786 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG) ) { 784 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) {
787 vcpu->shadow_efer &= ~KVM_EFER_LMA; 785 vcpu->shadow_efer &= ~KVM_EFER_LMA;
788 svm->vmcb->save.efer &= ~(KVM_EFER_LMA | KVM_EFER_LME); 786 svm->vmcb->save.efer &= ~(KVM_EFER_LMA | KVM_EFER_LME);
789 } 787 }
@@ -1010,7 +1008,7 @@ static int shutdown_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1010 1008
1011static int io_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) 1009static int io_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1012{ 1010{
1013 u32 io_info = svm->vmcb->control.exit_info_1; //address size bug? 1011 u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */
1014 int size, down, in, string, rep; 1012 int size, down, in, string, rep;
1015 unsigned port; 1013 unsigned port;
1016 1014
@@ -1316,7 +1314,7 @@ static void reload_tss(struct kvm_vcpu *vcpu)
1316 int cpu = raw_smp_processor_id(); 1314 int cpu = raw_smp_processor_id();
1317 1315
1318 struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu); 1316 struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
1319 svm_data->tss_desc->type = 9; //available 32/64-bit TSS 1317 svm_data->tss_desc->type = 9; /* available 32/64-bit TSS */
1320 load_TR_desc(); 1318 load_TR_desc();
1321} 1319}
1322 1320
@@ -1434,9 +1432,9 @@ static void do_interrupt_requests(struct kvm_vcpu *vcpu,
1434 * Interrupts blocked. Wait for unblock. 1432 * Interrupts blocked. Wait for unblock.
1435 */ 1433 */
1436 if (!svm->vcpu.interrupt_window_open && 1434 if (!svm->vcpu.interrupt_window_open &&
1437 (svm->vcpu.irq_summary || kvm_run->request_interrupt_window)) { 1435 (svm->vcpu.irq_summary || kvm_run->request_interrupt_window))
1438 control->intercept |= 1ULL << INTERCEPT_VINTR; 1436 control->intercept |= 1ULL << INTERCEPT_VINTR;
1439 } else 1437 else
1440 control->intercept &= ~(1ULL << INTERCEPT_VINTR); 1438 control->intercept &= ~(1ULL << INTERCEPT_VINTR);
1441} 1439}
1442 1440
@@ -1581,23 +1579,23 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1581 : 1579 :
1582 : [svm]"a"(svm), 1580 : [svm]"a"(svm),
1583 [vmcb]"i"(offsetof(struct vcpu_svm, vmcb_pa)), 1581 [vmcb]"i"(offsetof(struct vcpu_svm, vmcb_pa)),
1584 [rbx]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_RBX])), 1582 [rbx]"i"(offsetof(struct vcpu_svm, vcpu.regs[VCPU_REGS_RBX])),
1585 [rcx]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_RCX])), 1583 [rcx]"i"(offsetof(struct vcpu_svm, vcpu.regs[VCPU_REGS_RCX])),
1586 [rdx]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_RDX])), 1584 [rdx]"i"(offsetof(struct vcpu_svm, vcpu.regs[VCPU_REGS_RDX])),
1587 [rsi]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_RSI])), 1585 [rsi]"i"(offsetof(struct vcpu_svm, vcpu.regs[VCPU_REGS_RSI])),
1588 [rdi]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_RDI])), 1586 [rdi]"i"(offsetof(struct vcpu_svm, vcpu.regs[VCPU_REGS_RDI])),
1589 [rbp]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_RBP])) 1587 [rbp]"i"(offsetof(struct vcpu_svm, vcpu.regs[VCPU_REGS_RBP]))
1590#ifdef CONFIG_X86_64 1588#ifdef CONFIG_X86_64
1591 ,[r8 ]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_R8])), 1589 , [r8]"i"(offsetof(struct vcpu_svm, vcpu.regs[VCPU_REGS_R8])),
1592 [r9 ]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_R9 ])), 1590 [r9]"i"(offsetof(struct vcpu_svm, vcpu.regs[VCPU_REGS_R9])),
1593 [r10]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_R10])), 1591 [r10]"i"(offsetof(struct vcpu_svm, vcpu.regs[VCPU_REGS_R10])),
1594 [r11]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_R11])), 1592 [r11]"i"(offsetof(struct vcpu_svm, vcpu.regs[VCPU_REGS_R11])),
1595 [r12]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_R12])), 1593 [r12]"i"(offsetof(struct vcpu_svm, vcpu.regs[VCPU_REGS_R12])),
1596 [r13]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_R13])), 1594 [r13]"i"(offsetof(struct vcpu_svm, vcpu.regs[VCPU_REGS_R13])),
1597 [r14]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_R14])), 1595 [r14]"i"(offsetof(struct vcpu_svm, vcpu.regs[VCPU_REGS_R14])),
1598 [r15]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_R15])) 1596 [r15]"i"(offsetof(struct vcpu_svm, vcpu.regs[VCPU_REGS_R15]))
1599#endif 1597#endif
1600 : "cc", "memory" ); 1598 : "cc", "memory");
1601 1599
1602 if ((svm->vmcb->save.dr7 & 0xff)) 1600 if ((svm->vmcb->save.dr7 & 0xff))
1603 load_db_regs(svm->host_db_regs); 1601 load_db_regs(svm->host_db_regs);
diff --git a/drivers/kvm/svm.h b/drivers/kvm/svm.h
index 3b1b0f35b6cb..5fa277c0187c 100644
--- a/drivers/kvm/svm.h
+++ b/drivers/kvm/svm.h
@@ -311,7 +311,7 @@ struct __attribute__ ((__packed__)) vmcb {
311 311
312#define SVM_EXIT_ERR -1 312#define SVM_EXIT_ERR -1
313 313
314#define SVM_CR0_SELECTIVE_MASK (1 << 3 | 1) // TS and MP 314#define SVM_CR0_SELECTIVE_MASK (1 << 3 | 1) /* TS and MP */
315 315
316#define SVM_VMLOAD ".byte 0x0f, 0x01, 0xda" 316#define SVM_VMLOAD ".byte 0x0f, 0x01, 0xda"
317#define SVM_VMRUN ".byte 0x0f, 0x01, 0xd8" 317#define SVM_VMRUN ".byte 0x0f, 0x01, 0xd8"
diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c
index 7b742901e783..6955580bb69e 100644
--- a/drivers/kvm/vmx.c
+++ b/drivers/kvm/vmx.c
@@ -62,7 +62,7 @@ struct vcpu_vmx {
62 int gs_ldt_reload_needed; 62 int gs_ldt_reload_needed;
63 int fs_reload_needed; 63 int fs_reload_needed;
64 int guest_efer_loaded; 64 int guest_efer_loaded;
65 }host_state; 65 } host_state;
66 66
67}; 67};
68 68
@@ -271,7 +271,7 @@ static void vmcs_writel(unsigned long field, unsigned long value)
271 u8 error; 271 u8 error;
272 272
273 asm volatile (ASM_VMX_VMWRITE_RAX_RDX "; setna %0" 273 asm volatile (ASM_VMX_VMWRITE_RAX_RDX "; setna %0"
274 : "=q"(error) : "a"(value), "d"(field) : "cc" ); 274 : "=q"(error) : "a"(value), "d"(field) : "cc");
275 if (unlikely(error)) 275 if (unlikely(error))
276 vmwrite_error(field, value); 276 vmwrite_error(field, value);
277} 277}
@@ -415,10 +415,10 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
415#endif 415#endif
416 416
417#ifdef CONFIG_X86_64 417#ifdef CONFIG_X86_64
418 if (is_long_mode(&vmx->vcpu)) { 418 if (is_long_mode(&vmx->vcpu))
419 save_msrs(vmx->host_msrs + 419 save_msrs(vmx->host_msrs +
420 vmx->msr_offset_kernel_gs_base, 1); 420 vmx->msr_offset_kernel_gs_base, 1);
421 } 421
422#endif 422#endif
423 load_msrs(vmx->guest_msrs, vmx->save_nmsrs); 423 load_msrs(vmx->guest_msrs, vmx->save_nmsrs);
424 load_transition_efer(vmx); 424 load_transition_efer(vmx);
@@ -845,7 +845,7 @@ static int vmx_get_irq(struct kvm_vcpu *vcpu)
845 if (is_external_interrupt(idtv_info_field)) 845 if (is_external_interrupt(idtv_info_field))
846 return idtv_info_field & VECTORING_INFO_VECTOR_MASK; 846 return idtv_info_field & VECTORING_INFO_VECTOR_MASK;
847 else 847 else
848 printk("pending exception: not handled yet\n"); 848 printk(KERN_DEBUG "pending exception: not handled yet\n");
849 } 849 }
850 return -1; 850 return -1;
851} 851}
@@ -893,7 +893,7 @@ static void hardware_disable(void *garbage)
893} 893}
894 894
895static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt, 895static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt,
896 u32 msr, u32* result) 896 u32 msr, u32 *result)
897{ 897{
898 u32 vmx_msr_low, vmx_msr_high; 898 u32 vmx_msr_low, vmx_msr_high;
899 u32 ctl = ctl_min | ctl_opt; 899 u32 ctl = ctl_min | ctl_opt;
@@ -1102,7 +1102,7 @@ static void enter_pmode(struct kvm_vcpu *vcpu)
1102 vmcs_write32(GUEST_CS_AR_BYTES, 0x9b); 1102 vmcs_write32(GUEST_CS_AR_BYTES, 0x9b);
1103} 1103}
1104 1104
1105static gva_t rmode_tss_base(struct kvm* kvm) 1105static gva_t rmode_tss_base(struct kvm *kvm)
1106{ 1106{
1107 gfn_t base_gfn = kvm->memslots[0].base_gfn + kvm->memslots[0].npages - 3; 1107 gfn_t base_gfn = kvm->memslots[0].base_gfn + kvm->memslots[0].npages - 3;
1108 return base_gfn << PAGE_SHIFT; 1108 return base_gfn << PAGE_SHIFT;
@@ -1385,7 +1385,7 @@ static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
1385 vmcs_writel(GUEST_GDTR_BASE, dt->base); 1385 vmcs_writel(GUEST_GDTR_BASE, dt->base);
1386} 1386}
1387 1387
1388static int init_rmode_tss(struct kvm* kvm) 1388static int init_rmode_tss(struct kvm *kvm)
1389{ 1389{
1390 gfn_t fn = rmode_tss_base(kvm) >> PAGE_SHIFT; 1390 gfn_t fn = rmode_tss_base(kvm) >> PAGE_SHIFT;
1391 u16 data = 0; 1391 u16 data = 0;
@@ -1494,7 +1494,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
1494 vmcs_writel(GUEST_RIP, 0); 1494 vmcs_writel(GUEST_RIP, 0);
1495 vmcs_writel(GUEST_RSP, 0); 1495 vmcs_writel(GUEST_RSP, 0);
1496 1496
1497 //todo: dr0 = dr1 = dr2 = dr3 = 0; dr6 = 0xffff0ff0 1497 /* todo: dr0 = dr1 = dr2 = dr3 = 0; dr6 = 0xffff0ff0 */
1498 vmcs_writel(GUEST_DR7, 0x400); 1498 vmcs_writel(GUEST_DR7, 0x400);
1499 1499
1500 vmcs_writel(GUEST_GDTR_BASE, 0); 1500 vmcs_writel(GUEST_GDTR_BASE, 0);
@@ -1561,7 +1561,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
1561 get_idt(&dt); 1561 get_idt(&dt);
1562 vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */ 1562 vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */
1563 1563
1564 asm ("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return)); 1564 asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
1565 vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */ 1565 vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
1566 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0); 1566 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
1567 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0); 1567 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
@@ -1613,7 +1613,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
1613 vmcs_writel(CR4_GUEST_HOST_MASK, KVM_GUEST_CR4_MASK); 1613 vmcs_writel(CR4_GUEST_HOST_MASK, KVM_GUEST_CR4_MASK);
1614 1614
1615 vmx->vcpu.cr0 = 0x60000010; 1615 vmx->vcpu.cr0 = 0x60000010;
1616 vmx_set_cr0(&vmx->vcpu, vmx->vcpu.cr0); // enter rmode 1616 vmx_set_cr0(&vmx->vcpu, vmx->vcpu.cr0); /* enter rmode */
1617 vmx_set_cr4(&vmx->vcpu, 0); 1617 vmx_set_cr4(&vmx->vcpu, 0);
1618#ifdef CONFIG_X86_64 1618#ifdef CONFIG_X86_64
1619 vmx_set_efer(&vmx->vcpu, 0); 1619 vmx_set_efer(&vmx->vcpu, 0);
@@ -1644,7 +1644,7 @@ static void inject_rmode_irq(struct kvm_vcpu *vcpu, int irq)
1644 u16 sp = vmcs_readl(GUEST_RSP); 1644 u16 sp = vmcs_readl(GUEST_RSP);
1645 u32 ss_limit = vmcs_read32(GUEST_SS_LIMIT); 1645 u32 ss_limit = vmcs_read32(GUEST_SS_LIMIT);
1646 1646
1647 if (sp > ss_limit || sp < 6 ) { 1647 if (sp > ss_limit || sp < 6) {
1648 vcpu_printf(vcpu, "%s: #SS, rsp 0x%lx ss 0x%lx limit 0x%x\n", 1648 vcpu_printf(vcpu, "%s: #SS, rsp 0x%lx ss 0x%lx limit 0x%x\n",
1649 __FUNCTION__, 1649 __FUNCTION__,
1650 vmcs_readl(GUEST_RSP), 1650 vmcs_readl(GUEST_RSP),
@@ -1664,15 +1664,18 @@ static void inject_rmode_irq(struct kvm_vcpu *vcpu, int irq)
1664 ip = vmcs_readl(GUEST_RIP); 1664 ip = vmcs_readl(GUEST_RIP);
1665 1665
1666 1666
1667 if (emulator_write_emulated(ss_base + sp - 2, &flags, 2, vcpu) != X86EMUL_CONTINUE || 1667 if (emulator_write_emulated(
1668 emulator_write_emulated(ss_base + sp - 4, &cs, 2, vcpu) != X86EMUL_CONTINUE || 1668 ss_base + sp - 2, &flags, 2, vcpu) != X86EMUL_CONTINUE ||
1669 emulator_write_emulated(ss_base + sp - 6, &ip, 2, vcpu) != X86EMUL_CONTINUE) { 1669 emulator_write_emulated(
1670 ss_base + sp - 4, &cs, 2, vcpu) != X86EMUL_CONTINUE ||
1671 emulator_write_emulated(
1672 ss_base + sp - 6, &ip, 2, vcpu) != X86EMUL_CONTINUE) {
1670 vcpu_printf(vcpu, "%s: write guest err\n", __FUNCTION__); 1673 vcpu_printf(vcpu, "%s: write guest err\n", __FUNCTION__);
1671 return; 1674 return;
1672 } 1675 }
1673 1676
1674 vmcs_writel(GUEST_RFLAGS, flags & 1677 vmcs_writel(GUEST_RFLAGS, flags &
1675 ~( X86_EFLAGS_IF | X86_EFLAGS_AC | X86_EFLAGS_TF)); 1678 ~(X86_EFLAGS_IF | X86_EFLAGS_AC | X86_EFLAGS_TF));
1676 vmcs_write16(GUEST_CS_SELECTOR, ent[1]) ; 1679 vmcs_write16(GUEST_CS_SELECTOR, ent[1]) ;
1677 vmcs_writel(GUEST_CS_BASE, ent[1] << 4); 1680 vmcs_writel(GUEST_CS_BASE, ent[1] << 4);
1678 vmcs_writel(GUEST_RIP, ent[0]); 1681 vmcs_writel(GUEST_RIP, ent[0]);
@@ -1777,10 +1780,9 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1777 intr_info = vmcs_read32(VM_EXIT_INTR_INFO); 1780 intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
1778 1781
1779 if ((vect_info & VECTORING_INFO_VALID_MASK) && 1782 if ((vect_info & VECTORING_INFO_VALID_MASK) &&
1780 !is_page_fault(intr_info)) { 1783 !is_page_fault(intr_info))
1781 printk(KERN_ERR "%s: unexpected, vectoring info 0x%x " 1784 printk(KERN_ERR "%s: unexpected, vectoring info 0x%x "
1782 "intr info 0x%x\n", __FUNCTION__, vect_info, intr_info); 1785 "intr info 0x%x\n", __FUNCTION__, vect_info, intr_info);
1783 }
1784 1786
1785 if (!irqchip_in_kernel(vcpu->kvm) && is_external_interrupt(vect_info)) { 1787 if (!irqchip_in_kernel(vcpu->kvm) && is_external_interrupt(vect_info)) {
1786 int irq = vect_info & VECTORING_INFO_VECTOR_MASK; 1788 int irq = vect_info & VECTORING_INFO_VECTOR_MASK;
@@ -1831,7 +1833,7 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1831 case EMULATE_DO_MMIO: 1833 case EMULATE_DO_MMIO:
1832 ++vcpu->stat.mmio_exits; 1834 ++vcpu->stat.mmio_exits;
1833 return 0; 1835 return 0;
1834 case EMULATE_FAIL: 1836 case EMULATE_FAIL:
1835 kvm_report_emulation_failure(vcpu, "pagetable"); 1837 kvm_report_emulation_failure(vcpu, "pagetable");
1836 break; 1838 break;
1837 default: 1839 default:
@@ -1849,7 +1851,8 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1849 return 1; 1851 return 1;
1850 } 1852 }
1851 1853
1852 if ((intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK)) == (INTR_TYPE_EXCEPTION | 1)) { 1854 if ((intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK)) ==
1855 (INTR_TYPE_EXCEPTION | 1)) {
1853 kvm_run->exit_reason = KVM_EXIT_DEBUG; 1856 kvm_run->exit_reason = KVM_EXIT_DEBUG;
1854 return 0; 1857 return 0;
1855 } 1858 }
@@ -2138,8 +2141,8 @@ static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
2138 return 0; 2141 return 0;
2139 } 2142 }
2140 2143
2141 if ( (vectoring_info & VECTORING_INFO_VALID_MASK) && 2144 if ((vectoring_info & VECTORING_INFO_VALID_MASK) &&
2142 exit_reason != EXIT_REASON_EXCEPTION_NMI ) 2145 exit_reason != EXIT_REASON_EXCEPTION_NMI)
2143 printk(KERN_WARNING "%s: unexpected, valid vectoring info and " 2146 printk(KERN_WARNING "%s: unexpected, valid vectoring info and "
2144 "exit reason is 0x%x\n", __FUNCTION__, exit_reason); 2147 "exit reason is 0x%x\n", __FUNCTION__, exit_reason);
2145 if (exit_reason < kvm_vmx_max_exit_handlers 2148 if (exit_reason < kvm_vmx_max_exit_handlers
@@ -2238,7 +2241,7 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2238 */ 2241 */
2239 vmcs_writel(HOST_CR0, read_cr0()); 2242 vmcs_writel(HOST_CR0, read_cr0());
2240 2243
2241 asm ( 2244 asm(
2242 /* Store host registers */ 2245 /* Store host registers */
2243#ifdef CONFIG_X86_64 2246#ifdef CONFIG_X86_64
2244 "push %%rax; push %%rbx; push %%rdx;" 2247 "push %%rax; push %%rbx; push %%rdx;"
@@ -2342,8 +2345,8 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2342 [rdi]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RDI])), 2345 [rdi]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RDI])),
2343 [rbp]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RBP])), 2346 [rbp]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RBP])),
2344#ifdef CONFIG_X86_64 2347#ifdef CONFIG_X86_64
2345 [r8 ]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R8 ])), 2348 [r8]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R8])),
2346 [r9 ]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R9 ])), 2349 [r9]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R9])),
2347 [r10]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R10])), 2350 [r10]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R10])),
2348 [r11]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R11])), 2351 [r11]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R11])),
2349 [r12]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R12])), 2352 [r12]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R12])),
@@ -2352,11 +2355,12 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2352 [r15]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R15])), 2355 [r15]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R15])),
2353#endif 2356#endif
2354 [cr2]"i"(offsetof(struct kvm_vcpu, cr2)) 2357 [cr2]"i"(offsetof(struct kvm_vcpu, cr2))
2355 : "cc", "memory" ); 2358 : "cc", "memory");
2356 2359
2357 vcpu->interrupt_window_open = (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0; 2360 vcpu->interrupt_window_open =
2361 (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0;
2358 2362
2359 asm ("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS)); 2363 asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
2360 vmx->launched = 1; 2364 vmx->launched = 1;
2361 2365
2362 intr_info = vmcs_read32(VM_EXIT_INTR_INFO); 2366 intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
diff --git a/drivers/kvm/vmx.h b/drivers/kvm/vmx.h
index fd4e14666088..270d477a2aa6 100644
--- a/drivers/kvm/vmx.h
+++ b/drivers/kvm/vmx.h
@@ -234,9 +234,9 @@ enum vmcs_field {
234/* 234/*
235 * Exit Qualifications for MOV for Control Register Access 235 * Exit Qualifications for MOV for Control Register Access
236 */ 236 */
237#define CONTROL_REG_ACCESS_NUM 0x7 /* 2:0, number of control register */ 237#define CONTROL_REG_ACCESS_NUM 0x7 /* 2:0, number of control reg.*/
238#define CONTROL_REG_ACCESS_TYPE 0x30 /* 5:4, access type */ 238#define CONTROL_REG_ACCESS_TYPE 0x30 /* 5:4, access type */
239#define CONTROL_REG_ACCESS_REG 0xf00 /* 10:8, general purpose register */ 239#define CONTROL_REG_ACCESS_REG 0xf00 /* 10:8, general purpose reg. */
240#define LMSW_SOURCE_DATA_SHIFT 16 240#define LMSW_SOURCE_DATA_SHIFT 16
241#define LMSW_SOURCE_DATA (0xFFFF << LMSW_SOURCE_DATA_SHIFT) /* 16:31 lmsw source */ 241#define LMSW_SOURCE_DATA (0xFFFF << LMSW_SOURCE_DATA_SHIFT) /* 16:31 lmsw source */
242#define REG_EAX (0 << 8) 242#define REG_EAX (0 << 8)
@@ -259,11 +259,11 @@ enum vmcs_field {
259/* 259/*
260 * Exit Qualifications for MOV for Debug Register Access 260 * Exit Qualifications for MOV for Debug Register Access
261 */ 261 */
262#define DEBUG_REG_ACCESS_NUM 0x7 /* 2:0, number of debug register */ 262#define DEBUG_REG_ACCESS_NUM 0x7 /* 2:0, number of debug reg. */
263#define DEBUG_REG_ACCESS_TYPE 0x10 /* 4, direction of access */ 263#define DEBUG_REG_ACCESS_TYPE 0x10 /* 4, direction of access */
264#define TYPE_MOV_TO_DR (0 << 4) 264#define TYPE_MOV_TO_DR (0 << 4)
265#define TYPE_MOV_FROM_DR (1 << 4) 265#define TYPE_MOV_FROM_DR (1 << 4)
266#define DEBUG_REG_ACCESS_REG 0xf00 /* 11:8, general purpose register */ 266#define DEBUG_REG_ACCESS_REG 0xf00 /* 11:8, general purpose reg. */
267 267
268 268
269/* segment AR */ 269/* segment AR */
diff --git a/drivers/kvm/x86_emulate.c b/drivers/kvm/x86_emulate.c
index e294d8409571..75fd23bade9c 100644
--- a/drivers/kvm/x86_emulate.c
+++ b/drivers/kvm/x86_emulate.c
@@ -23,7 +23,7 @@
23#include <stdio.h> 23#include <stdio.h>
24#include <stdint.h> 24#include <stdint.h>
25#include <public/xen.h> 25#include <public/xen.h>
26#define DPRINTF(_f, _a ...) printf( _f , ## _a ) 26#define DPRINTF(_f, _a ...) printf(_f , ## _a)
27#else 27#else
28#include "kvm.h" 28#include "kvm.h"
29#define DPRINTF(x...) do {} while (0) 29#define DPRINTF(x...) do {} while (0)
@@ -285,21 +285,21 @@ static u16 twobyte_table[256] = {
285 switch ((_dst).bytes) { \ 285 switch ((_dst).bytes) { \
286 case 2: \ 286 case 2: \
287 __asm__ __volatile__ ( \ 287 __asm__ __volatile__ ( \
288 _PRE_EFLAGS("0","4","2") \ 288 _PRE_EFLAGS("0", "4", "2") \
289 _op"w %"_wx"3,%1; " \ 289 _op"w %"_wx"3,%1; " \
290 _POST_EFLAGS("0","4","2") \ 290 _POST_EFLAGS("0", "4", "2") \
291 : "=m" (_eflags), "=m" ((_dst).val), \ 291 : "=m" (_eflags), "=m" ((_dst).val), \
292 "=&r" (_tmp) \ 292 "=&r" (_tmp) \
293 : _wy ((_src).val), "i" (EFLAGS_MASK) ); \ 293 : _wy ((_src).val), "i" (EFLAGS_MASK)); \
294 break; \ 294 break; \
295 case 4: \ 295 case 4: \
296 __asm__ __volatile__ ( \ 296 __asm__ __volatile__ ( \
297 _PRE_EFLAGS("0","4","2") \ 297 _PRE_EFLAGS("0", "4", "2") \
298 _op"l %"_lx"3,%1; " \ 298 _op"l %"_lx"3,%1; " \
299 _POST_EFLAGS("0","4","2") \ 299 _POST_EFLAGS("0", "4", "2") \
300 : "=m" (_eflags), "=m" ((_dst).val), \ 300 : "=m" (_eflags), "=m" ((_dst).val), \
301 "=&r" (_tmp) \ 301 "=&r" (_tmp) \
302 : _ly ((_src).val), "i" (EFLAGS_MASK) ); \ 302 : _ly ((_src).val), "i" (EFLAGS_MASK)); \
303 break; \ 303 break; \
304 case 8: \ 304 case 8: \
305 __emulate_2op_8byte(_op, _src, _dst, \ 305 __emulate_2op_8byte(_op, _src, _dst, \
@@ -311,16 +311,15 @@ static u16 twobyte_table[256] = {
311#define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \ 311#define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
312 do { \ 312 do { \
313 unsigned long _tmp; \ 313 unsigned long _tmp; \
314 switch ( (_dst).bytes ) \ 314 switch ((_dst).bytes) { \
315 { \
316 case 1: \ 315 case 1: \
317 __asm__ __volatile__ ( \ 316 __asm__ __volatile__ ( \
318 _PRE_EFLAGS("0","4","2") \ 317 _PRE_EFLAGS("0", "4", "2") \
319 _op"b %"_bx"3,%1; " \ 318 _op"b %"_bx"3,%1; " \
320 _POST_EFLAGS("0","4","2") \ 319 _POST_EFLAGS("0", "4", "2") \
321 : "=m" (_eflags), "=m" ((_dst).val), \ 320 : "=m" (_eflags), "=m" ((_dst).val), \
322 "=&r" (_tmp) \ 321 "=&r" (_tmp) \
323 : _by ((_src).val), "i" (EFLAGS_MASK) ); \ 322 : _by ((_src).val), "i" (EFLAGS_MASK)); \
324 break; \ 323 break; \
325 default: \ 324 default: \
326 __emulate_2op_nobyte(_op, _src, _dst, _eflags, \ 325 __emulate_2op_nobyte(_op, _src, _dst, _eflags, \
@@ -349,34 +348,33 @@ static u16 twobyte_table[256] = {
349 do { \ 348 do { \
350 unsigned long _tmp; \ 349 unsigned long _tmp; \
351 \ 350 \
352 switch ( (_dst).bytes ) \ 351 switch ((_dst).bytes) { \
353 { \
354 case 1: \ 352 case 1: \
355 __asm__ __volatile__ ( \ 353 __asm__ __volatile__ ( \
356 _PRE_EFLAGS("0","3","2") \ 354 _PRE_EFLAGS("0", "3", "2") \
357 _op"b %1; " \ 355 _op"b %1; " \
358 _POST_EFLAGS("0","3","2") \ 356 _POST_EFLAGS("0", "3", "2") \
359 : "=m" (_eflags), "=m" ((_dst).val), \ 357 : "=m" (_eflags), "=m" ((_dst).val), \
360 "=&r" (_tmp) \ 358 "=&r" (_tmp) \
361 : "i" (EFLAGS_MASK) ); \ 359 : "i" (EFLAGS_MASK)); \
362 break; \ 360 break; \
363 case 2: \ 361 case 2: \
364 __asm__ __volatile__ ( \ 362 __asm__ __volatile__ ( \
365 _PRE_EFLAGS("0","3","2") \ 363 _PRE_EFLAGS("0", "3", "2") \
366 _op"w %1; " \ 364 _op"w %1; " \
367 _POST_EFLAGS("0","3","2") \ 365 _POST_EFLAGS("0", "3", "2") \
368 : "=m" (_eflags), "=m" ((_dst).val), \ 366 : "=m" (_eflags), "=m" ((_dst).val), \
369 "=&r" (_tmp) \ 367 "=&r" (_tmp) \
370 : "i" (EFLAGS_MASK) ); \ 368 : "i" (EFLAGS_MASK)); \
371 break; \ 369 break; \
372 case 4: \ 370 case 4: \
373 __asm__ __volatile__ ( \ 371 __asm__ __volatile__ ( \
374 _PRE_EFLAGS("0","3","2") \ 372 _PRE_EFLAGS("0", "3", "2") \
375 _op"l %1; " \ 373 _op"l %1; " \
376 _POST_EFLAGS("0","3","2") \ 374 _POST_EFLAGS("0", "3", "2") \
377 : "=m" (_eflags), "=m" ((_dst).val), \ 375 : "=m" (_eflags), "=m" ((_dst).val), \
378 "=&r" (_tmp) \ 376 "=&r" (_tmp) \
379 : "i" (EFLAGS_MASK) ); \ 377 : "i" (EFLAGS_MASK)); \
380 break; \ 378 break; \
381 case 8: \ 379 case 8: \
382 __emulate_1op_8byte(_op, _dst, _eflags); \ 380 __emulate_1op_8byte(_op, _dst, _eflags); \
@@ -389,21 +387,21 @@ static u16 twobyte_table[256] = {
389#define __emulate_2op_8byte(_op, _src, _dst, _eflags, _qx, _qy) \ 387#define __emulate_2op_8byte(_op, _src, _dst, _eflags, _qx, _qy) \
390 do { \ 388 do { \
391 __asm__ __volatile__ ( \ 389 __asm__ __volatile__ ( \
392 _PRE_EFLAGS("0","4","2") \ 390 _PRE_EFLAGS("0", "4", "2") \
393 _op"q %"_qx"3,%1; " \ 391 _op"q %"_qx"3,%1; " \
394 _POST_EFLAGS("0","4","2") \ 392 _POST_EFLAGS("0", "4", "2") \
395 : "=m" (_eflags), "=m" ((_dst).val), "=&r" (_tmp) \ 393 : "=m" (_eflags), "=m" ((_dst).val), "=&r" (_tmp) \
396 : _qy ((_src).val), "i" (EFLAGS_MASK) ); \ 394 : _qy ((_src).val), "i" (EFLAGS_MASK)); \
397 } while (0) 395 } while (0)
398 396
399#define __emulate_1op_8byte(_op, _dst, _eflags) \ 397#define __emulate_1op_8byte(_op, _dst, _eflags) \
400 do { \ 398 do { \
401 __asm__ __volatile__ ( \ 399 __asm__ __volatile__ ( \
402 _PRE_EFLAGS("0","3","2") \ 400 _PRE_EFLAGS("0", "3", "2") \
403 _op"q %1; " \ 401 _op"q %1; " \
404 _POST_EFLAGS("0","3","2") \ 402 _POST_EFLAGS("0", "3", "2") \
405 : "=m" (_eflags), "=m" ((_dst).val), "=&r" (_tmp) \ 403 : "=m" (_eflags), "=m" ((_dst).val), "=&r" (_tmp) \
406 : "i" (EFLAGS_MASK) ); \ 404 : "i" (EFLAGS_MASK)); \
407 } while (0) 405 } while (0)
408 406
409#elif defined(__i386__) 407#elif defined(__i386__)
@@ -415,8 +413,8 @@ static u16 twobyte_table[256] = {
415#define insn_fetch(_type, _size, _eip) \ 413#define insn_fetch(_type, _size, _eip) \
416({ unsigned long _x; \ 414({ unsigned long _x; \
417 rc = ops->read_std((unsigned long)(_eip) + ctxt->cs_base, &_x, \ 415 rc = ops->read_std((unsigned long)(_eip) + ctxt->cs_base, &_x, \
418 (_size), ctxt->vcpu); \ 416 (_size), ctxt->vcpu); \
419 if ( rc != 0 ) \ 417 if (rc != 0) \
420 goto done; \ 418 goto done; \
421 (_eip) += (_size); \ 419 (_eip) += (_size); \
422 (_type)_x; \ 420 (_type)_x; \
@@ -780,7 +778,7 @@ done_prefixes:
780 } 778 }
781 if (c->ad_bytes != 8) 779 if (c->ad_bytes != 8)
782 c->modrm_ea = (u32)c->modrm_ea; 780 c->modrm_ea = (u32)c->modrm_ea;
783 modrm_done: 781modrm_done:
784 ; 782 ;
785 } 783 }
786 784
@@ -828,10 +826,9 @@ done_prefixes:
828 c->src.bytes = (c->d & ByteOp) ? 1 : 826 c->src.bytes = (c->d & ByteOp) ? 1 :
829 c->op_bytes; 827 c->op_bytes;
830 /* Don't fetch the address for invlpg: it could be unmapped. */ 828 /* Don't fetch the address for invlpg: it could be unmapped. */
831 if (c->twobyte && c->b == 0x01 829 if (c->twobyte && c->b == 0x01 && c->modrm_reg == 7)
832 && c->modrm_reg == 7)
833 break; 830 break;
834 srcmem_common: 831 srcmem_common:
835 /* 832 /*
836 * For instructions with a ModR/M byte, switch to register 833 * For instructions with a ModR/M byte, switch to register
837 * access if Mod = 3. 834 * access if Mod = 3.
@@ -1175,10 +1172,11 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1175 if (c->src.type == OP_MEM) { 1172 if (c->src.type == OP_MEM) {
1176 c->src.ptr = (unsigned long *)cr2; 1173 c->src.ptr = (unsigned long *)cr2;
1177 c->src.val = 0; 1174 c->src.val = 0;
1178 if ((rc = ops->read_emulated((unsigned long)c->src.ptr, 1175 rc = ops->read_emulated((unsigned long)c->src.ptr,
1179 &c->src.val, 1176 &c->src.val,
1180 c->src.bytes, 1177 c->src.bytes,
1181 ctxt->vcpu)) != 0) 1178 ctxt->vcpu);
1179 if (rc != 0)
1182 goto done; 1180 goto done;
1183 c->src.orig_val = c->src.val; 1181 c->src.orig_val = c->src.val;
1184 } 1182 }