diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2008-07-21 00:13:26 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-07-21 00:13:26 -0400 |
commit | f076ab8d048f152b968bb1c6313fed88abb037fe (patch) | |
tree | fe9ff7bc40d04c5a12eb41a90cc0e1dbf8ae4e45 | |
parent | db6d8c7a4027b48d797b369a53f8470aaeed7063 (diff) | |
parent | 597a5f551ec4cd0aa0966e4fff4684ecc8c31c0d (diff) |
Merge branch 'kvm-updates-2.6.27' of git://git.kernel.org/pub/scm/linux/kernel/git/avi/kvm
* 'kvm-updates-2.6.27' of git://git.kernel.org/pub/scm/linux/kernel/git/avi/kvm: (70 commits)
KVM: Adjust smp_call_function_mask() callers to new requirements
KVM: MMU: Fix potential race setting upper shadow ptes on nonpae hosts
KVM: x86 emulator: emulate clflush
KVM: MMU: improve invalid shadow root page handling
KVM: MMU: nuke shadowed pgtable pages and ptes on memslot destruction
KVM: Prefix some x86 low level function with kvm_, to avoid namespace issues
KVM: check injected pic irq within valid pic irqs
KVM: x86 emulator: Fix HLT instruction
KVM: Apply the kernel sigmask to vcpus blocked due to being uninitialized
KVM: VMX: Add ept_sync_context in flush_tlb
KVM: mmu_shrink: kvm_mmu_zap_page requires slots_lock to be held
x86: KVM guest: make kvm_smp_prepare_boot_cpu() static
KVM: SVM: fix suspend/resume support
KVM: s390: rename private structures
KVM: s390: Set guest storage limit and offset to sane values
KVM: Fix memory leak on guest exit
KVM: s390: dont allocate dirty bitmap
KVM: move slots_lock acquision down to vapic_exit
KVM: VMX: Fake emulate Intel perfctr MSRs
KVM: VMX: Fix a wrong usage of vmcs_config
...
37 files changed, 1215 insertions, 520 deletions
diff --git a/arch/ia64/kvm/Makefile b/arch/ia64/kvm/Makefile index 112791dd2542..bf22fb9e6dcf 100644 --- a/arch/ia64/kvm/Makefile +++ b/arch/ia64/kvm/Makefile | |||
@@ -43,7 +43,8 @@ $(obj)/$(offsets-file): arch/ia64/kvm/asm-offsets.s | |||
43 | EXTRA_CFLAGS += -Ivirt/kvm -Iarch/ia64/kvm/ | 43 | EXTRA_CFLAGS += -Ivirt/kvm -Iarch/ia64/kvm/ |
44 | EXTRA_AFLAGS += -Ivirt/kvm -Iarch/ia64/kvm/ | 44 | EXTRA_AFLAGS += -Ivirt/kvm -Iarch/ia64/kvm/ |
45 | 45 | ||
46 | common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o ioapic.o) | 46 | common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o ioapic.o \ |
47 | coalesced_mmio.o) | ||
47 | 48 | ||
48 | kvm-objs := $(common-objs) kvm-ia64.o kvm_fw.o | 49 | kvm-objs := $(common-objs) kvm-ia64.o kvm_fw.o |
49 | obj-$(CONFIG_KVM) += kvm.o | 50 | obj-$(CONFIG_KVM) += kvm.o |
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index 68c978be9a51..2672f4d278ac 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c | |||
@@ -187,6 +187,9 @@ int kvm_dev_ioctl_check_extension(long ext) | |||
187 | 187 | ||
188 | r = 1; | 188 | r = 1; |
189 | break; | 189 | break; |
190 | case KVM_CAP_COALESCED_MMIO: | ||
191 | r = KVM_COALESCED_MMIO_PAGE_OFFSET; | ||
192 | break; | ||
190 | default: | 193 | default: |
191 | r = 0; | 194 | r = 0; |
192 | } | 195 | } |
@@ -195,11 +198,11 @@ int kvm_dev_ioctl_check_extension(long ext) | |||
195 | } | 198 | } |
196 | 199 | ||
197 | static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu, | 200 | static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu, |
198 | gpa_t addr) | 201 | gpa_t addr, int len, int is_write) |
199 | { | 202 | { |
200 | struct kvm_io_device *dev; | 203 | struct kvm_io_device *dev; |
201 | 204 | ||
202 | dev = kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr); | 205 | dev = kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr, len, is_write); |
203 | 206 | ||
204 | return dev; | 207 | return dev; |
205 | } | 208 | } |
@@ -231,7 +234,7 @@ static int handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
231 | kvm_run->exit_reason = KVM_EXIT_MMIO; | 234 | kvm_run->exit_reason = KVM_EXIT_MMIO; |
232 | return 0; | 235 | return 0; |
233 | mmio: | 236 | mmio: |
234 | mmio_dev = vcpu_find_mmio_dev(vcpu, p->addr); | 237 | mmio_dev = vcpu_find_mmio_dev(vcpu, p->addr, p->size, !p->dir); |
235 | if (mmio_dev) { | 238 | if (mmio_dev) { |
236 | if (!p->dir) | 239 | if (!p->dir) |
237 | kvm_iodevice_write(mmio_dev, p->addr, p->size, | 240 | kvm_iodevice_write(mmio_dev, p->addr, p->size, |
@@ -1035,14 +1038,6 @@ static void kvm_free_vmm_area(void) | |||
1035 | } | 1038 | } |
1036 | } | 1039 | } |
1037 | 1040 | ||
1038 | /* | ||
1039 | * Make sure that a cpu that is being hot-unplugged does not have any vcpus | ||
1040 | * cached on it. Leave it as blank for IA64. | ||
1041 | */ | ||
1042 | void decache_vcpus_on_cpu(int cpu) | ||
1043 | { | ||
1044 | } | ||
1045 | |||
1046 | static void vti_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | 1041 | static void vti_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
1047 | { | 1042 | { |
1048 | } | 1043 | } |
@@ -1460,6 +1455,9 @@ int kvm_arch_set_memory_region(struct kvm *kvm, | |||
1460 | return 0; | 1455 | return 0; |
1461 | } | 1456 | } |
1462 | 1457 | ||
1458 | void kvm_arch_flush_shadow(struct kvm *kvm) | ||
1459 | { | ||
1460 | } | ||
1463 | 1461 | ||
1464 | long kvm_arch_dev_ioctl(struct file *filp, | 1462 | long kvm_arch_dev_ioctl(struct file *filp, |
1465 | unsigned int ioctl, unsigned long arg) | 1463 | unsigned int ioctl, unsigned long arg) |
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile index d0d358d367ec..04e3449e1f42 100644 --- a/arch/powerpc/kvm/Makefile +++ b/arch/powerpc/kvm/Makefile | |||
@@ -4,7 +4,7 @@ | |||
4 | 4 | ||
5 | EXTRA_CFLAGS += -Ivirt/kvm -Iarch/powerpc/kvm | 5 | EXTRA_CFLAGS += -Ivirt/kvm -Iarch/powerpc/kvm |
6 | 6 | ||
7 | common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o) | 7 | common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o) |
8 | 8 | ||
9 | kvm-objs := $(common-objs) powerpc.o emulate.o booke_guest.o | 9 | kvm-objs := $(common-objs) powerpc.o emulate.o booke_guest.o |
10 | obj-$(CONFIG_KVM) += kvm.o | 10 | obj-$(CONFIG_KVM) += kvm.o |
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 777e0f34e0ea..53826a5f6c06 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c | |||
@@ -145,6 +145,9 @@ int kvm_dev_ioctl_check_extension(long ext) | |||
145 | case KVM_CAP_USER_MEMORY: | 145 | case KVM_CAP_USER_MEMORY: |
146 | r = 1; | 146 | r = 1; |
147 | break; | 147 | break; |
148 | case KVM_CAP_COALESCED_MMIO: | ||
149 | r = KVM_COALESCED_MMIO_PAGE_OFFSET; | ||
150 | break; | ||
148 | default: | 151 | default: |
149 | r = 0; | 152 | r = 0; |
150 | break; | 153 | break; |
@@ -167,6 +170,10 @@ int kvm_arch_set_memory_region(struct kvm *kvm, | |||
167 | return 0; | 170 | return 0; |
168 | } | 171 | } |
169 | 172 | ||
173 | void kvm_arch_flush_shadow(struct kvm *kvm) | ||
174 | { | ||
175 | } | ||
176 | |||
170 | struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) | 177 | struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) |
171 | { | 178 | { |
172 | struct kvm_vcpu *vcpu; | 179 | struct kvm_vcpu *vcpu; |
@@ -240,10 +247,6 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) | |||
240 | { | 247 | { |
241 | } | 248 | } |
242 | 249 | ||
243 | void decache_vcpus_on_cpu(int cpu) | ||
244 | { | ||
245 | } | ||
246 | |||
247 | int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu, | 250 | int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu, |
248 | struct kvm_debug_guest *dbg) | 251 | struct kvm_debug_guest *dbg) |
249 | { | 252 | { |
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 84a7fed4cd4e..11230b0db957 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c | |||
@@ -31,7 +31,7 @@ static int psw_interrupts_disabled(struct kvm_vcpu *vcpu) | |||
31 | } | 31 | } |
32 | 32 | ||
33 | static int __interrupt_is_deliverable(struct kvm_vcpu *vcpu, | 33 | static int __interrupt_is_deliverable(struct kvm_vcpu *vcpu, |
34 | struct interrupt_info *inti) | 34 | struct kvm_s390_interrupt_info *inti) |
35 | { | 35 | { |
36 | switch (inti->type) { | 36 | switch (inti->type) { |
37 | case KVM_S390_INT_EMERGENCY: | 37 | case KVM_S390_INT_EMERGENCY: |
@@ -91,7 +91,7 @@ static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag) | |||
91 | } | 91 | } |
92 | 92 | ||
93 | static void __set_intercept_indicator(struct kvm_vcpu *vcpu, | 93 | static void __set_intercept_indicator(struct kvm_vcpu *vcpu, |
94 | struct interrupt_info *inti) | 94 | struct kvm_s390_interrupt_info *inti) |
95 | { | 95 | { |
96 | switch (inti->type) { | 96 | switch (inti->type) { |
97 | case KVM_S390_INT_EMERGENCY: | 97 | case KVM_S390_INT_EMERGENCY: |
@@ -111,7 +111,7 @@ static void __set_intercept_indicator(struct kvm_vcpu *vcpu, | |||
111 | } | 111 | } |
112 | 112 | ||
113 | static void __do_deliver_interrupt(struct kvm_vcpu *vcpu, | 113 | static void __do_deliver_interrupt(struct kvm_vcpu *vcpu, |
114 | struct interrupt_info *inti) | 114 | struct kvm_s390_interrupt_info *inti) |
115 | { | 115 | { |
116 | const unsigned short table[] = { 2, 4, 4, 6 }; | 116 | const unsigned short table[] = { 2, 4, 4, 6 }; |
117 | int rc, exception = 0; | 117 | int rc, exception = 0; |
@@ -290,9 +290,9 @@ static int __try_deliver_ckc_interrupt(struct kvm_vcpu *vcpu) | |||
290 | 290 | ||
291 | int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu) | 291 | int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu) |
292 | { | 292 | { |
293 | struct local_interrupt *li = &vcpu->arch.local_int; | 293 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
294 | struct float_interrupt *fi = vcpu->arch.local_int.float_int; | 294 | struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int; |
295 | struct interrupt_info *inti; | 295 | struct kvm_s390_interrupt_info *inti; |
296 | int rc = 0; | 296 | int rc = 0; |
297 | 297 | ||
298 | if (atomic_read(&li->active)) { | 298 | if (atomic_read(&li->active)) { |
@@ -408,9 +408,9 @@ void kvm_s390_idle_wakeup(unsigned long data) | |||
408 | 408 | ||
409 | void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) | 409 | void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) |
410 | { | 410 | { |
411 | struct local_interrupt *li = &vcpu->arch.local_int; | 411 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
412 | struct float_interrupt *fi = vcpu->arch.local_int.float_int; | 412 | struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int; |
413 | struct interrupt_info *n, *inti = NULL; | 413 | struct kvm_s390_interrupt_info *n, *inti = NULL; |
414 | int deliver; | 414 | int deliver; |
415 | 415 | ||
416 | __reset_intercept_indicators(vcpu); | 416 | __reset_intercept_indicators(vcpu); |
@@ -465,8 +465,8 @@ void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) | |||
465 | 465 | ||
466 | int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code) | 466 | int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code) |
467 | { | 467 | { |
468 | struct local_interrupt *li = &vcpu->arch.local_int; | 468 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
469 | struct interrupt_info *inti; | 469 | struct kvm_s390_interrupt_info *inti; |
470 | 470 | ||
471 | inti = kzalloc(sizeof(*inti), GFP_KERNEL); | 471 | inti = kzalloc(sizeof(*inti), GFP_KERNEL); |
472 | if (!inti) | 472 | if (!inti) |
@@ -487,9 +487,9 @@ int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code) | |||
487 | int kvm_s390_inject_vm(struct kvm *kvm, | 487 | int kvm_s390_inject_vm(struct kvm *kvm, |
488 | struct kvm_s390_interrupt *s390int) | 488 | struct kvm_s390_interrupt *s390int) |
489 | { | 489 | { |
490 | struct local_interrupt *li; | 490 | struct kvm_s390_local_interrupt *li; |
491 | struct float_interrupt *fi; | 491 | struct kvm_s390_float_interrupt *fi; |
492 | struct interrupt_info *inti; | 492 | struct kvm_s390_interrupt_info *inti; |
493 | int sigcpu; | 493 | int sigcpu; |
494 | 494 | ||
495 | inti = kzalloc(sizeof(*inti), GFP_KERNEL); | 495 | inti = kzalloc(sizeof(*inti), GFP_KERNEL); |
@@ -544,8 +544,8 @@ int kvm_s390_inject_vm(struct kvm *kvm, | |||
544 | int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, | 544 | int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, |
545 | struct kvm_s390_interrupt *s390int) | 545 | struct kvm_s390_interrupt *s390int) |
546 | { | 546 | { |
547 | struct local_interrupt *li; | 547 | struct kvm_s390_local_interrupt *li; |
548 | struct interrupt_info *inti; | 548 | struct kvm_s390_interrupt_info *inti; |
549 | 549 | ||
550 | inti = kzalloc(sizeof(*inti), GFP_KERNEL); | 550 | inti = kzalloc(sizeof(*inti), GFP_KERNEL); |
551 | if (!inti) | 551 | if (!inti) |
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 6558b09ff579..1782cbcd2829 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c | |||
@@ -79,10 +79,6 @@ void kvm_arch_hardware_disable(void *garbage) | |||
79 | { | 79 | { |
80 | } | 80 | } |
81 | 81 | ||
82 | void decache_vcpus_on_cpu(int cpu) | ||
83 | { | ||
84 | } | ||
85 | |||
86 | int kvm_arch_hardware_setup(void) | 82 | int kvm_arch_hardware_setup(void) |
87 | { | 83 | { |
88 | return 0; | 84 | return 0; |
@@ -198,6 +194,7 @@ out_nokvm: | |||
198 | void kvm_arch_destroy_vm(struct kvm *kvm) | 194 | void kvm_arch_destroy_vm(struct kvm *kvm) |
199 | { | 195 | { |
200 | debug_unregister(kvm->arch.dbf); | 196 | debug_unregister(kvm->arch.dbf); |
197 | kvm_free_physmem(kvm); | ||
201 | free_page((unsigned long)(kvm->arch.sca)); | 198 | free_page((unsigned long)(kvm->arch.sca)); |
202 | kfree(kvm); | 199 | kfree(kvm); |
203 | module_put(THIS_MODULE); | 200 | module_put(THIS_MODULE); |
@@ -250,11 +247,16 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu) | |||
250 | vcpu->arch.sie_block->gbea = 1; | 247 | vcpu->arch.sie_block->gbea = 1; |
251 | } | 248 | } |
252 | 249 | ||
250 | /* The current code can have up to 256 pages for virtio */ | ||
251 | #define VIRTIODESCSPACE (256ul * 4096ul) | ||
252 | |||
253 | int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | 253 | int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) |
254 | { | 254 | { |
255 | atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH); | 255 | atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH); |
256 | vcpu->arch.sie_block->gmslm = 0xffffffffffUL; | 256 | vcpu->arch.sie_block->gmslm = vcpu->kvm->arch.guest_memsize + |
257 | vcpu->arch.sie_block->gmsor = 0x000000000000; | 257 | vcpu->kvm->arch.guest_origin + |
258 | VIRTIODESCSPACE - 1ul; | ||
259 | vcpu->arch.sie_block->gmsor = vcpu->kvm->arch.guest_origin; | ||
258 | vcpu->arch.sie_block->ecb = 2; | 260 | vcpu->arch.sie_block->ecb = 2; |
259 | vcpu->arch.sie_block->eca = 0xC1002001U; | 261 | vcpu->arch.sie_block->eca = 0xC1002001U; |
260 | setup_timer(&vcpu->arch.ckc_timer, kvm_s390_idle_wakeup, | 262 | setup_timer(&vcpu->arch.ckc_timer, kvm_s390_idle_wakeup, |
@@ -273,7 +275,8 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, | |||
273 | if (!vcpu) | 275 | if (!vcpu) |
274 | goto out_nomem; | 276 | goto out_nomem; |
275 | 277 | ||
276 | vcpu->arch.sie_block = (struct sie_block *) get_zeroed_page(GFP_KERNEL); | 278 | vcpu->arch.sie_block = (struct kvm_s390_sie_block *) |
279 | get_zeroed_page(GFP_KERNEL); | ||
277 | 280 | ||
278 | if (!vcpu->arch.sie_block) | 281 | if (!vcpu->arch.sie_block) |
279 | goto out_free_cpu; | 282 | goto out_free_cpu; |
@@ -672,6 +675,10 @@ int kvm_arch_set_memory_region(struct kvm *kvm, | |||
672 | return 0; | 675 | return 0; |
673 | } | 676 | } |
674 | 677 | ||
678 | void kvm_arch_flush_shadow(struct kvm *kvm) | ||
679 | { | ||
680 | } | ||
681 | |||
675 | gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn) | 682 | gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn) |
676 | { | 683 | { |
677 | return gfn; | 684 | return gfn; |
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index c02286c6a931..2e2d2ffb6a07 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c | |||
@@ -199,7 +199,7 @@ out: | |||
199 | 199 | ||
200 | static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem) | 200 | static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem) |
201 | { | 201 | { |
202 | struct float_interrupt *fi = &vcpu->kvm->arch.float_int; | 202 | struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; |
203 | int cpus = 0; | 203 | int cpus = 0; |
204 | int n; | 204 | int n; |
205 | 205 | ||
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c index 0a236acfb5f6..5a556114eaa5 100644 --- a/arch/s390/kvm/sigp.c +++ b/arch/s390/kvm/sigp.c | |||
@@ -45,7 +45,7 @@ | |||
45 | 45 | ||
46 | static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr, u64 *reg) | 46 | static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr, u64 *reg) |
47 | { | 47 | { |
48 | struct float_interrupt *fi = &vcpu->kvm->arch.float_int; | 48 | struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; |
49 | int rc; | 49 | int rc; |
50 | 50 | ||
51 | if (cpu_addr >= KVM_MAX_VCPUS) | 51 | if (cpu_addr >= KVM_MAX_VCPUS) |
@@ -71,9 +71,9 @@ static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr, u64 *reg) | |||
71 | 71 | ||
72 | static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr) | 72 | static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr) |
73 | { | 73 | { |
74 | struct float_interrupt *fi = &vcpu->kvm->arch.float_int; | 74 | struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; |
75 | struct local_interrupt *li; | 75 | struct kvm_s390_local_interrupt *li; |
76 | struct interrupt_info *inti; | 76 | struct kvm_s390_interrupt_info *inti; |
77 | int rc; | 77 | int rc; |
78 | 78 | ||
79 | if (cpu_addr >= KVM_MAX_VCPUS) | 79 | if (cpu_addr >= KVM_MAX_VCPUS) |
@@ -108,9 +108,9 @@ unlock: | |||
108 | 108 | ||
109 | static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int store) | 109 | static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int store) |
110 | { | 110 | { |
111 | struct float_interrupt *fi = &vcpu->kvm->arch.float_int; | 111 | struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; |
112 | struct local_interrupt *li; | 112 | struct kvm_s390_local_interrupt *li; |
113 | struct interrupt_info *inti; | 113 | struct kvm_s390_interrupt_info *inti; |
114 | int rc; | 114 | int rc; |
115 | 115 | ||
116 | if (cpu_addr >= KVM_MAX_VCPUS) | 116 | if (cpu_addr >= KVM_MAX_VCPUS) |
@@ -169,9 +169,9 @@ static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter) | |||
169 | static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address, | 169 | static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address, |
170 | u64 *reg) | 170 | u64 *reg) |
171 | { | 171 | { |
172 | struct float_interrupt *fi = &vcpu->kvm->arch.float_int; | 172 | struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; |
173 | struct local_interrupt *li; | 173 | struct kvm_s390_local_interrupt *li; |
174 | struct interrupt_info *inti; | 174 | struct kvm_s390_interrupt_info *inti; |
175 | int rc; | 175 | int rc; |
176 | u8 tmp; | 176 | u8 tmp; |
177 | 177 | ||
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c index 87edf1ceb1df..d02def06ca91 100644 --- a/arch/x86/kernel/kvmclock.c +++ b/arch/x86/kernel/kvmclock.c | |||
@@ -113,7 +113,7 @@ static void kvm_setup_secondary_clock(void) | |||
113 | #endif | 113 | #endif |
114 | 114 | ||
115 | #ifdef CONFIG_SMP | 115 | #ifdef CONFIG_SMP |
116 | void __init kvm_smp_prepare_boot_cpu(void) | 116 | static void __init kvm_smp_prepare_boot_cpu(void) |
117 | { | 117 | { |
118 | WARN_ON(kvm_register_clock("primary cpu clock")); | 118 | WARN_ON(kvm_register_clock("primary cpu clock")); |
119 | native_smp_prepare_boot_cpu(); | 119 | native_smp_prepare_boot_cpu(); |
diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile index c97d35c218db..d0e940bb6f40 100644 --- a/arch/x86/kvm/Makefile +++ b/arch/x86/kvm/Makefile | |||
@@ -2,7 +2,8 @@ | |||
2 | # Makefile for Kernel-based Virtual Machine module | 2 | # Makefile for Kernel-based Virtual Machine module |
3 | # | 3 | # |
4 | 4 | ||
5 | common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o ioapic.o) | 5 | common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o ioapic.o \ |
6 | coalesced_mmio.o) | ||
6 | ifeq ($(CONFIG_KVM_TRACE),y) | 7 | ifeq ($(CONFIG_KVM_TRACE),y) |
7 | common-objs += $(addprefix ../../../virt/kvm/, kvm_trace.o) | 8 | common-objs += $(addprefix ../../../virt/kvm/, kvm_trace.o) |
8 | endif | 9 | endif |
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c index 3829aa7b663f..c0f7872a9124 100644 --- a/arch/x86/kvm/i8254.c +++ b/arch/x86/kvm/i8254.c | |||
@@ -91,7 +91,7 @@ static void pit_set_gate(struct kvm *kvm, int channel, u32 val) | |||
91 | c->gate = val; | 91 | c->gate = val; |
92 | } | 92 | } |
93 | 93 | ||
94 | int pit_get_gate(struct kvm *kvm, int channel) | 94 | static int pit_get_gate(struct kvm *kvm, int channel) |
95 | { | 95 | { |
96 | WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock)); | 96 | WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock)); |
97 | 97 | ||
@@ -193,19 +193,16 @@ static void pit_latch_status(struct kvm *kvm, int channel) | |||
193 | } | 193 | } |
194 | } | 194 | } |
195 | 195 | ||
196 | int __pit_timer_fn(struct kvm_kpit_state *ps) | 196 | static int __pit_timer_fn(struct kvm_kpit_state *ps) |
197 | { | 197 | { |
198 | struct kvm_vcpu *vcpu0 = ps->pit->kvm->vcpus[0]; | 198 | struct kvm_vcpu *vcpu0 = ps->pit->kvm->vcpus[0]; |
199 | struct kvm_kpit_timer *pt = &ps->pit_timer; | 199 | struct kvm_kpit_timer *pt = &ps->pit_timer; |
200 | 200 | ||
201 | atomic_inc(&pt->pending); | 201 | if (!atomic_inc_and_test(&pt->pending)) |
202 | smp_mb__after_atomic_inc(); | ||
203 | if (vcpu0) { | ||
204 | set_bit(KVM_REQ_PENDING_TIMER, &vcpu0->requests); | 202 | set_bit(KVM_REQ_PENDING_TIMER, &vcpu0->requests); |
205 | if (waitqueue_active(&vcpu0->wq)) { | 203 | if (vcpu0 && waitqueue_active(&vcpu0->wq)) { |
206 | vcpu0->arch.mp_state = KVM_MP_STATE_RUNNABLE; | 204 | vcpu0->arch.mp_state = KVM_MP_STATE_RUNNABLE; |
207 | wake_up_interruptible(&vcpu0->wq); | 205 | wake_up_interruptible(&vcpu0->wq); |
208 | } | ||
209 | } | 206 | } |
210 | 207 | ||
211 | pt->timer.expires = ktime_add_ns(pt->timer.expires, pt->period); | 208 | pt->timer.expires = ktime_add_ns(pt->timer.expires, pt->period); |
@@ -308,6 +305,7 @@ static void pit_load_count(struct kvm *kvm, int channel, u32 val) | |||
308 | create_pit_timer(&ps->pit_timer, val, 0); | 305 | create_pit_timer(&ps->pit_timer, val, 0); |
309 | break; | 306 | break; |
310 | case 2: | 307 | case 2: |
308 | case 3: | ||
311 | create_pit_timer(&ps->pit_timer, val, 1); | 309 | create_pit_timer(&ps->pit_timer, val, 1); |
312 | break; | 310 | break; |
313 | default: | 311 | default: |
@@ -459,7 +457,8 @@ static void pit_ioport_read(struct kvm_io_device *this, | |||
459 | mutex_unlock(&pit_state->lock); | 457 | mutex_unlock(&pit_state->lock); |
460 | } | 458 | } |
461 | 459 | ||
462 | static int pit_in_range(struct kvm_io_device *this, gpa_t addr) | 460 | static int pit_in_range(struct kvm_io_device *this, gpa_t addr, |
461 | int len, int is_write) | ||
463 | { | 462 | { |
464 | return ((addr >= KVM_PIT_BASE_ADDRESS) && | 463 | return ((addr >= KVM_PIT_BASE_ADDRESS) && |
465 | (addr < KVM_PIT_BASE_ADDRESS + KVM_PIT_MEM_LENGTH)); | 464 | (addr < KVM_PIT_BASE_ADDRESS + KVM_PIT_MEM_LENGTH)); |
@@ -500,7 +499,8 @@ static void speaker_ioport_read(struct kvm_io_device *this, | |||
500 | mutex_unlock(&pit_state->lock); | 499 | mutex_unlock(&pit_state->lock); |
501 | } | 500 | } |
502 | 501 | ||
503 | static int speaker_in_range(struct kvm_io_device *this, gpa_t addr) | 502 | static int speaker_in_range(struct kvm_io_device *this, gpa_t addr, |
503 | int len, int is_write) | ||
504 | { | 504 | { |
505 | return (addr == KVM_SPEAKER_BASE_ADDRESS); | 505 | return (addr == KVM_SPEAKER_BASE_ADDRESS); |
506 | } | 506 | } |
@@ -575,7 +575,7 @@ void kvm_free_pit(struct kvm *kvm) | |||
575 | } | 575 | } |
576 | } | 576 | } |
577 | 577 | ||
578 | void __inject_pit_timer_intr(struct kvm *kvm) | 578 | static void __inject_pit_timer_intr(struct kvm *kvm) |
579 | { | 579 | { |
580 | mutex_lock(&kvm->lock); | 580 | mutex_lock(&kvm->lock); |
581 | kvm_ioapic_set_irq(kvm->arch.vioapic, 0, 1); | 581 | kvm_ioapic_set_irq(kvm->arch.vioapic, 0, 1); |
diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c index ab29cf2def47..c31164e8aa46 100644 --- a/arch/x86/kvm/i8259.c +++ b/arch/x86/kvm/i8259.c | |||
@@ -130,8 +130,10 @@ void kvm_pic_set_irq(void *opaque, int irq, int level) | |||
130 | { | 130 | { |
131 | struct kvm_pic *s = opaque; | 131 | struct kvm_pic *s = opaque; |
132 | 132 | ||
133 | pic_set_irq1(&s->pics[irq >> 3], irq & 7, level); | 133 | if (irq >= 0 && irq < PIC_NUM_PINS) { |
134 | pic_update_irq(s); | 134 | pic_set_irq1(&s->pics[irq >> 3], irq & 7, level); |
135 | pic_update_irq(s); | ||
136 | } | ||
135 | } | 137 | } |
136 | 138 | ||
137 | /* | 139 | /* |
@@ -346,7 +348,8 @@ static u32 elcr_ioport_read(void *opaque, u32 addr1) | |||
346 | return s->elcr; | 348 | return s->elcr; |
347 | } | 349 | } |
348 | 350 | ||
349 | static int picdev_in_range(struct kvm_io_device *this, gpa_t addr) | 351 | static int picdev_in_range(struct kvm_io_device *this, gpa_t addr, |
352 | int len, int is_write) | ||
350 | { | 353 | { |
351 | switch (addr) { | 354 | switch (addr) { |
352 | case 0x20: | 355 | case 0x20: |
diff --git a/arch/x86/kvm/irq.h b/arch/x86/kvm/irq.h index 2a15be2275c0..7ca47cbb48bb 100644 --- a/arch/x86/kvm/irq.h +++ b/arch/x86/kvm/irq.h | |||
@@ -30,6 +30,8 @@ | |||
30 | #include "ioapic.h" | 30 | #include "ioapic.h" |
31 | #include "lapic.h" | 31 | #include "lapic.h" |
32 | 32 | ||
33 | #define PIC_NUM_PINS 16 | ||
34 | |||
33 | struct kvm; | 35 | struct kvm; |
34 | struct kvm_vcpu; | 36 | struct kvm_vcpu; |
35 | 37 | ||
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index ebc03f5ae162..73f43de69f67 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c | |||
@@ -356,8 +356,9 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode, | |||
356 | case APIC_DM_SMI: | 356 | case APIC_DM_SMI: |
357 | printk(KERN_DEBUG "Ignoring guest SMI\n"); | 357 | printk(KERN_DEBUG "Ignoring guest SMI\n"); |
358 | break; | 358 | break; |
359 | |||
359 | case APIC_DM_NMI: | 360 | case APIC_DM_NMI: |
360 | printk(KERN_DEBUG "Ignoring guest NMI\n"); | 361 | kvm_inject_nmi(vcpu); |
361 | break; | 362 | break; |
362 | 363 | ||
363 | case APIC_DM_INIT: | 364 | case APIC_DM_INIT: |
@@ -572,6 +573,8 @@ static u32 __apic_read(struct kvm_lapic *apic, unsigned int offset) | |||
572 | { | 573 | { |
573 | u32 val = 0; | 574 | u32 val = 0; |
574 | 575 | ||
576 | KVMTRACE_1D(APIC_ACCESS, apic->vcpu, (u32)offset, handler); | ||
577 | |||
575 | if (offset >= LAPIC_MMIO_LENGTH) | 578 | if (offset >= LAPIC_MMIO_LENGTH) |
576 | return 0; | 579 | return 0; |
577 | 580 | ||
@@ -695,6 +698,8 @@ static void apic_mmio_write(struct kvm_io_device *this, | |||
695 | 698 | ||
696 | offset &= 0xff0; | 699 | offset &= 0xff0; |
697 | 700 | ||
701 | KVMTRACE_1D(APIC_ACCESS, apic->vcpu, (u32)offset, handler); | ||
702 | |||
698 | switch (offset) { | 703 | switch (offset) { |
699 | case APIC_ID: /* Local APIC ID */ | 704 | case APIC_ID: /* Local APIC ID */ |
700 | apic_set_reg(apic, APIC_ID, val); | 705 | apic_set_reg(apic, APIC_ID, val); |
@@ -780,7 +785,8 @@ static void apic_mmio_write(struct kvm_io_device *this, | |||
780 | 785 | ||
781 | } | 786 | } |
782 | 787 | ||
783 | static int apic_mmio_range(struct kvm_io_device *this, gpa_t addr) | 788 | static int apic_mmio_range(struct kvm_io_device *this, gpa_t addr, |
789 | int len, int size) | ||
784 | { | 790 | { |
785 | struct kvm_lapic *apic = (struct kvm_lapic *)this->private; | 791 | struct kvm_lapic *apic = (struct kvm_lapic *)this->private; |
786 | int ret = 0; | 792 | int ret = 0; |
@@ -939,8 +945,8 @@ static int __apic_timer_fn(struct kvm_lapic *apic) | |||
939 | int result = 0; | 945 | int result = 0; |
940 | wait_queue_head_t *q = &apic->vcpu->wq; | 946 | wait_queue_head_t *q = &apic->vcpu->wq; |
941 | 947 | ||
942 | atomic_inc(&apic->timer.pending); | 948 | if(!atomic_inc_and_test(&apic->timer.pending)) |
943 | set_bit(KVM_REQ_PENDING_TIMER, &apic->vcpu->requests); | 949 | set_bit(KVM_REQ_PENDING_TIMER, &apic->vcpu->requests); |
944 | if (waitqueue_active(q)) { | 950 | if (waitqueue_active(q)) { |
945 | apic->vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; | 951 | apic->vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; |
946 | wake_up_interruptible(q); | 952 | wake_up_interruptible(q); |
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h index 676c396c9cee..81858881287e 100644 --- a/arch/x86/kvm/lapic.h +++ b/arch/x86/kvm/lapic.h | |||
@@ -31,6 +31,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu); | |||
31 | u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu); | 31 | u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu); |
32 | void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8); | 32 | void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8); |
33 | void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value); | 33 | void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value); |
34 | u64 kvm_lapic_get_base(struct kvm_vcpu *vcpu); | ||
34 | 35 | ||
35 | int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest); | 36 | int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest); |
36 | int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda); | 37 | int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda); |
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 7e7c3969f7a2..b0e4ddca6c18 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -66,7 +66,8 @@ static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg) {} | |||
66 | #endif | 66 | #endif |
67 | 67 | ||
68 | #if defined(MMU_DEBUG) || defined(AUDIT) | 68 | #if defined(MMU_DEBUG) || defined(AUDIT) |
69 | static int dbg = 1; | 69 | static int dbg = 0; |
70 | module_param(dbg, bool, 0644); | ||
70 | #endif | 71 | #endif |
71 | 72 | ||
72 | #ifndef MMU_DEBUG | 73 | #ifndef MMU_DEBUG |
@@ -776,6 +777,15 @@ static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp, | |||
776 | BUG(); | 777 | BUG(); |
777 | } | 778 | } |
778 | 779 | ||
780 | static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu, | ||
781 | struct kvm_mmu_page *sp) | ||
782 | { | ||
783 | int i; | ||
784 | |||
785 | for (i = 0; i < PT64_ENT_PER_PAGE; ++i) | ||
786 | sp->spt[i] = shadow_trap_nonpresent_pte; | ||
787 | } | ||
788 | |||
779 | static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn) | 789 | static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn) |
780 | { | 790 | { |
781 | unsigned index; | 791 | unsigned index; |
@@ -841,7 +851,10 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, | |||
841 | hlist_add_head(&sp->hash_link, bucket); | 851 | hlist_add_head(&sp->hash_link, bucket); |
842 | if (!metaphysical) | 852 | if (!metaphysical) |
843 | rmap_write_protect(vcpu->kvm, gfn); | 853 | rmap_write_protect(vcpu->kvm, gfn); |
844 | vcpu->arch.mmu.prefetch_page(vcpu, sp); | 854 | if (shadow_trap_nonpresent_pte != shadow_notrap_nonpresent_pte) |
855 | vcpu->arch.mmu.prefetch_page(vcpu, sp); | ||
856 | else | ||
857 | nonpaging_prefetch_page(vcpu, sp); | ||
845 | return sp; | 858 | return sp; |
846 | } | 859 | } |
847 | 860 | ||
@@ -917,14 +930,17 @@ static void kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp) | |||
917 | } | 930 | } |
918 | kvm_mmu_page_unlink_children(kvm, sp); | 931 | kvm_mmu_page_unlink_children(kvm, sp); |
919 | if (!sp->root_count) { | 932 | if (!sp->root_count) { |
920 | if (!sp->role.metaphysical) | 933 | if (!sp->role.metaphysical && !sp->role.invalid) |
921 | unaccount_shadowed(kvm, sp->gfn); | 934 | unaccount_shadowed(kvm, sp->gfn); |
922 | hlist_del(&sp->hash_link); | 935 | hlist_del(&sp->hash_link); |
923 | kvm_mmu_free_page(kvm, sp); | 936 | kvm_mmu_free_page(kvm, sp); |
924 | } else { | 937 | } else { |
938 | int invalid = sp->role.invalid; | ||
925 | list_move(&sp->link, &kvm->arch.active_mmu_pages); | 939 | list_move(&sp->link, &kvm->arch.active_mmu_pages); |
926 | sp->role.invalid = 1; | 940 | sp->role.invalid = 1; |
927 | kvm_reload_remote_mmus(kvm); | 941 | kvm_reload_remote_mmus(kvm); |
942 | if (!sp->role.metaphysical && !invalid) | ||
943 | unaccount_shadowed(kvm, sp->gfn); | ||
928 | } | 944 | } |
929 | kvm_mmu_reset_last_pte_updated(kvm); | 945 | kvm_mmu_reset_last_pte_updated(kvm); |
930 | } | 946 | } |
@@ -1103,7 +1119,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, | |||
1103 | mark_page_dirty(vcpu->kvm, gfn); | 1119 | mark_page_dirty(vcpu->kvm, gfn); |
1104 | 1120 | ||
1105 | pgprintk("%s: setting spte %llx\n", __func__, spte); | 1121 | pgprintk("%s: setting spte %llx\n", __func__, spte); |
1106 | pgprintk("instantiating %s PTE (%s) at %d (%llx) addr %llx\n", | 1122 | pgprintk("instantiating %s PTE (%s) at %ld (%llx) addr %p\n", |
1107 | (spte&PT_PAGE_SIZE_MASK)? "2MB" : "4kB", | 1123 | (spte&PT_PAGE_SIZE_MASK)? "2MB" : "4kB", |
1108 | (spte&PT_WRITABLE_MASK)?"RW":"R", gfn, spte, shadow_pte); | 1124 | (spte&PT_WRITABLE_MASK)?"RW":"R", gfn, spte, shadow_pte); |
1109 | set_shadow_pte(shadow_pte, spte); | 1125 | set_shadow_pte(shadow_pte, spte); |
@@ -1122,8 +1138,10 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, | |||
1122 | else | 1138 | else |
1123 | kvm_release_pfn_clean(pfn); | 1139 | kvm_release_pfn_clean(pfn); |
1124 | } | 1140 | } |
1125 | if (!ptwrite || !*ptwrite) | 1141 | if (speculative) { |
1126 | vcpu->arch.last_pte_updated = shadow_pte; | 1142 | vcpu->arch.last_pte_updated = shadow_pte; |
1143 | vcpu->arch.last_pte_gfn = gfn; | ||
1144 | } | ||
1127 | } | 1145 | } |
1128 | 1146 | ||
1129 | static void nonpaging_new_cr3(struct kvm_vcpu *vcpu) | 1147 | static void nonpaging_new_cr3(struct kvm_vcpu *vcpu) |
@@ -1171,9 +1189,10 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write, | |||
1171 | return -ENOMEM; | 1189 | return -ENOMEM; |
1172 | } | 1190 | } |
1173 | 1191 | ||
1174 | table[index] = __pa(new_table->spt) | 1192 | set_shadow_pte(&table[index], |
1175 | | PT_PRESENT_MASK | PT_WRITABLE_MASK | 1193 | __pa(new_table->spt) |
1176 | | shadow_user_mask | shadow_x_mask; | 1194 | | PT_PRESENT_MASK | PT_WRITABLE_MASK |
1195 | | shadow_user_mask | shadow_x_mask); | ||
1177 | } | 1196 | } |
1178 | table_addr = table[index] & PT64_BASE_ADDR_MASK; | 1197 | table_addr = table[index] & PT64_BASE_ADDR_MASK; |
1179 | } | 1198 | } |
@@ -1211,15 +1230,6 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn) | |||
1211 | } | 1230 | } |
1212 | 1231 | ||
1213 | 1232 | ||
1214 | static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu, | ||
1215 | struct kvm_mmu_page *sp) | ||
1216 | { | ||
1217 | int i; | ||
1218 | |||
1219 | for (i = 0; i < PT64_ENT_PER_PAGE; ++i) | ||
1220 | sp->spt[i] = shadow_trap_nonpresent_pte; | ||
1221 | } | ||
1222 | |||
1223 | static void mmu_free_roots(struct kvm_vcpu *vcpu) | 1233 | static void mmu_free_roots(struct kvm_vcpu *vcpu) |
1224 | { | 1234 | { |
1225 | int i; | 1235 | int i; |
@@ -1671,6 +1681,18 @@ static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, | |||
1671 | vcpu->arch.update_pte.pfn = pfn; | 1681 | vcpu->arch.update_pte.pfn = pfn; |
1672 | } | 1682 | } |
1673 | 1683 | ||
1684 | static void kvm_mmu_access_page(struct kvm_vcpu *vcpu, gfn_t gfn) | ||
1685 | { | ||
1686 | u64 *spte = vcpu->arch.last_pte_updated; | ||
1687 | |||
1688 | if (spte | ||
1689 | && vcpu->arch.last_pte_gfn == gfn | ||
1690 | && shadow_accessed_mask | ||
1691 | && !(*spte & shadow_accessed_mask) | ||
1692 | && is_shadow_present_pte(*spte)) | ||
1693 | set_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte); | ||
1694 | } | ||
1695 | |||
1674 | void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, | 1696 | void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, |
1675 | const u8 *new, int bytes) | 1697 | const u8 *new, int bytes) |
1676 | { | 1698 | { |
@@ -1694,6 +1716,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, | |||
1694 | pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes); | 1716 | pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes); |
1695 | mmu_guess_page_from_pte_write(vcpu, gpa, new, bytes); | 1717 | mmu_guess_page_from_pte_write(vcpu, gpa, new, bytes); |
1696 | spin_lock(&vcpu->kvm->mmu_lock); | 1718 | spin_lock(&vcpu->kvm->mmu_lock); |
1719 | kvm_mmu_access_page(vcpu, gfn); | ||
1697 | kvm_mmu_free_some_pages(vcpu); | 1720 | kvm_mmu_free_some_pages(vcpu); |
1698 | ++vcpu->kvm->stat.mmu_pte_write; | 1721 | ++vcpu->kvm->stat.mmu_pte_write; |
1699 | kvm_mmu_audit(vcpu, "pre pte write"); | 1722 | kvm_mmu_audit(vcpu, "pre pte write"); |
@@ -1948,7 +1971,7 @@ void kvm_mmu_zap_all(struct kvm *kvm) | |||
1948 | kvm_flush_remote_tlbs(kvm); | 1971 | kvm_flush_remote_tlbs(kvm); |
1949 | } | 1972 | } |
1950 | 1973 | ||
1951 | void kvm_mmu_remove_one_alloc_mmu_page(struct kvm *kvm) | 1974 | static void kvm_mmu_remove_one_alloc_mmu_page(struct kvm *kvm) |
1952 | { | 1975 | { |
1953 | struct kvm_mmu_page *page; | 1976 | struct kvm_mmu_page *page; |
1954 | 1977 | ||
@@ -1968,6 +1991,8 @@ static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask) | |||
1968 | list_for_each_entry(kvm, &vm_list, vm_list) { | 1991 | list_for_each_entry(kvm, &vm_list, vm_list) { |
1969 | int npages; | 1992 | int npages; |
1970 | 1993 | ||
1994 | if (!down_read_trylock(&kvm->slots_lock)) | ||
1995 | continue; | ||
1971 | spin_lock(&kvm->mmu_lock); | 1996 | spin_lock(&kvm->mmu_lock); |
1972 | npages = kvm->arch.n_alloc_mmu_pages - | 1997 | npages = kvm->arch.n_alloc_mmu_pages - |
1973 | kvm->arch.n_free_mmu_pages; | 1998 | kvm->arch.n_free_mmu_pages; |
@@ -1980,6 +2005,7 @@ static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask) | |||
1980 | nr_to_scan--; | 2005 | nr_to_scan--; |
1981 | 2006 | ||
1982 | spin_unlock(&kvm->mmu_lock); | 2007 | spin_unlock(&kvm->mmu_lock); |
2008 | up_read(&kvm->slots_lock); | ||
1983 | } | 2009 | } |
1984 | if (kvm_freed) | 2010 | if (kvm_freed) |
1985 | list_move_tail(&kvm_freed->vm_list, &vm_list); | 2011 | list_move_tail(&kvm_freed->vm_list, &vm_list); |
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h index 1730757bbc7a..258e5d56298e 100644 --- a/arch/x86/kvm/mmu.h +++ b/arch/x86/kvm/mmu.h | |||
@@ -15,7 +15,8 @@ | |||
15 | #define PT_USER_MASK (1ULL << 2) | 15 | #define PT_USER_MASK (1ULL << 2) |
16 | #define PT_PWT_MASK (1ULL << 3) | 16 | #define PT_PWT_MASK (1ULL << 3) |
17 | #define PT_PCD_MASK (1ULL << 4) | 17 | #define PT_PCD_MASK (1ULL << 4) |
18 | #define PT_ACCESSED_MASK (1ULL << 5) | 18 | #define PT_ACCESSED_SHIFT 5 |
19 | #define PT_ACCESSED_MASK (1ULL << PT_ACCESSED_SHIFT) | ||
19 | #define PT_DIRTY_MASK (1ULL << 6) | 20 | #define PT_DIRTY_MASK (1ULL << 6) |
20 | #define PT_PAGE_SIZE_MASK (1ULL << 7) | 21 | #define PT_PAGE_SIZE_MASK (1ULL << 7) |
21 | #define PT_PAT_MASK (1ULL << 7) | 22 | #define PT_PAT_MASK (1ULL << 7) |
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 934c7b619396..4d918220baeb 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h | |||
@@ -460,8 +460,9 @@ static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr) | |||
460 | static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu, | 460 | static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu, |
461 | struct kvm_mmu_page *sp) | 461 | struct kvm_mmu_page *sp) |
462 | { | 462 | { |
463 | int i, offset = 0, r = 0; | 463 | int i, j, offset, r; |
464 | pt_element_t pt; | 464 | pt_element_t pt[256 / sizeof(pt_element_t)]; |
465 | gpa_t pte_gpa; | ||
465 | 466 | ||
466 | if (sp->role.metaphysical | 467 | if (sp->role.metaphysical |
467 | || (PTTYPE == 32 && sp->role.level > PT_PAGE_TABLE_LEVEL)) { | 468 | || (PTTYPE == 32 && sp->role.level > PT_PAGE_TABLE_LEVEL)) { |
@@ -469,19 +470,20 @@ static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu, | |||
469 | return; | 470 | return; |
470 | } | 471 | } |
471 | 472 | ||
472 | if (PTTYPE == 32) | 473 | pte_gpa = gfn_to_gpa(sp->gfn); |
474 | if (PTTYPE == 32) { | ||
473 | offset = sp->role.quadrant << PT64_LEVEL_BITS; | 475 | offset = sp->role.quadrant << PT64_LEVEL_BITS; |
476 | pte_gpa += offset * sizeof(pt_element_t); | ||
477 | } | ||
474 | 478 | ||
475 | for (i = 0; i < PT64_ENT_PER_PAGE; ++i) { | 479 | for (i = 0; i < PT64_ENT_PER_PAGE; i += ARRAY_SIZE(pt)) { |
476 | gpa_t pte_gpa = gfn_to_gpa(sp->gfn); | 480 | r = kvm_read_guest_atomic(vcpu->kvm, pte_gpa, pt, sizeof pt); |
477 | pte_gpa += (i+offset) * sizeof(pt_element_t); | 481 | pte_gpa += ARRAY_SIZE(pt) * sizeof(pt_element_t); |
478 | 482 | for (j = 0; j < ARRAY_SIZE(pt); ++j) | |
479 | r = kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &pt, | 483 | if (r || is_present_pte(pt[j])) |
480 | sizeof(pt_element_t)); | 484 | sp->spt[i+j] = shadow_trap_nonpresent_pte; |
481 | if (r || is_present_pte(pt)) | 485 | else |
482 | sp->spt[i] = shadow_trap_nonpresent_pte; | 486 | sp->spt[i+j] = shadow_notrap_nonpresent_pte; |
483 | else | ||
484 | sp->spt[i] = shadow_notrap_nonpresent_pte; | ||
485 | } | 487 | } |
486 | } | 488 | } |
487 | 489 | ||
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 6b0d5fa5bab3..b756e876dce3 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -27,6 +27,8 @@ | |||
27 | 27 | ||
28 | #include <asm/desc.h> | 28 | #include <asm/desc.h> |
29 | 29 | ||
30 | #define __ex(x) __kvm_handle_fault_on_reboot(x) | ||
31 | |||
30 | MODULE_AUTHOR("Qumranet"); | 32 | MODULE_AUTHOR("Qumranet"); |
31 | MODULE_LICENSE("GPL"); | 33 | MODULE_LICENSE("GPL"); |
32 | 34 | ||
@@ -129,17 +131,17 @@ static inline void push_irq(struct kvm_vcpu *vcpu, u8 irq) | |||
129 | 131 | ||
130 | static inline void clgi(void) | 132 | static inline void clgi(void) |
131 | { | 133 | { |
132 | asm volatile (SVM_CLGI); | 134 | asm volatile (__ex(SVM_CLGI)); |
133 | } | 135 | } |
134 | 136 | ||
135 | static inline void stgi(void) | 137 | static inline void stgi(void) |
136 | { | 138 | { |
137 | asm volatile (SVM_STGI); | 139 | asm volatile (__ex(SVM_STGI)); |
138 | } | 140 | } |
139 | 141 | ||
140 | static inline void invlpga(unsigned long addr, u32 asid) | 142 | static inline void invlpga(unsigned long addr, u32 asid) |
141 | { | 143 | { |
142 | asm volatile (SVM_INVLPGA :: "a"(addr), "c"(asid)); | 144 | asm volatile (__ex(SVM_INVLPGA) :: "a"(addr), "c"(asid)); |
143 | } | 145 | } |
144 | 146 | ||
145 | static inline unsigned long kvm_read_cr2(void) | 147 | static inline unsigned long kvm_read_cr2(void) |
@@ -270,19 +272,11 @@ static int has_svm(void) | |||
270 | 272 | ||
271 | static void svm_hardware_disable(void *garbage) | 273 | static void svm_hardware_disable(void *garbage) |
272 | { | 274 | { |
273 | struct svm_cpu_data *svm_data | 275 | uint64_t efer; |
274 | = per_cpu(svm_data, raw_smp_processor_id()); | ||
275 | |||
276 | if (svm_data) { | ||
277 | uint64_t efer; | ||
278 | 276 | ||
279 | wrmsrl(MSR_VM_HSAVE_PA, 0); | 277 | wrmsrl(MSR_VM_HSAVE_PA, 0); |
280 | rdmsrl(MSR_EFER, efer); | 278 | rdmsrl(MSR_EFER, efer); |
281 | wrmsrl(MSR_EFER, efer & ~MSR_EFER_SVME_MASK); | 279 | wrmsrl(MSR_EFER, efer & ~MSR_EFER_SVME_MASK); |
282 | per_cpu(svm_data, raw_smp_processor_id()) = NULL; | ||
283 | __free_page(svm_data->save_area); | ||
284 | kfree(svm_data); | ||
285 | } | ||
286 | } | 280 | } |
287 | 281 | ||
288 | static void svm_hardware_enable(void *garbage) | 282 | static void svm_hardware_enable(void *garbage) |
@@ -321,6 +315,19 @@ static void svm_hardware_enable(void *garbage) | |||
321 | page_to_pfn(svm_data->save_area) << PAGE_SHIFT); | 315 | page_to_pfn(svm_data->save_area) << PAGE_SHIFT); |
322 | } | 316 | } |
323 | 317 | ||
318 | static void svm_cpu_uninit(int cpu) | ||
319 | { | ||
320 | struct svm_cpu_data *svm_data | ||
321 | = per_cpu(svm_data, raw_smp_processor_id()); | ||
322 | |||
323 | if (!svm_data) | ||
324 | return; | ||
325 | |||
326 | per_cpu(svm_data, raw_smp_processor_id()) = NULL; | ||
327 | __free_page(svm_data->save_area); | ||
328 | kfree(svm_data); | ||
329 | } | ||
330 | |||
324 | static int svm_cpu_init(int cpu) | 331 | static int svm_cpu_init(int cpu) |
325 | { | 332 | { |
326 | struct svm_cpu_data *svm_data; | 333 | struct svm_cpu_data *svm_data; |
@@ -458,6 +465,11 @@ err: | |||
458 | 465 | ||
459 | static __exit void svm_hardware_unsetup(void) | 466 | static __exit void svm_hardware_unsetup(void) |
460 | { | 467 | { |
468 | int cpu; | ||
469 | |||
470 | for_each_online_cpu(cpu) | ||
471 | svm_cpu_uninit(cpu); | ||
472 | |||
461 | __free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), IOPM_ALLOC_ORDER); | 473 | __free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), IOPM_ALLOC_ORDER); |
462 | iopm_base = 0; | 474 | iopm_base = 0; |
463 | } | 475 | } |
@@ -707,10 +719,6 @@ static void svm_vcpu_put(struct kvm_vcpu *vcpu) | |||
707 | rdtscll(vcpu->arch.host_tsc); | 719 | rdtscll(vcpu->arch.host_tsc); |
708 | } | 720 | } |
709 | 721 | ||
710 | static void svm_vcpu_decache(struct kvm_vcpu *vcpu) | ||
711 | { | ||
712 | } | ||
713 | |||
714 | static void svm_cache_regs(struct kvm_vcpu *vcpu) | 722 | static void svm_cache_regs(struct kvm_vcpu *vcpu) |
715 | { | 723 | { |
716 | struct vcpu_svm *svm = to_svm(vcpu); | 724 | struct vcpu_svm *svm = to_svm(vcpu); |
@@ -949,7 +957,9 @@ static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *svm_data) | |||
949 | 957 | ||
950 | static unsigned long svm_get_dr(struct kvm_vcpu *vcpu, int dr) | 958 | static unsigned long svm_get_dr(struct kvm_vcpu *vcpu, int dr) |
951 | { | 959 | { |
952 | return to_svm(vcpu)->db_regs[dr]; | 960 | unsigned long val = to_svm(vcpu)->db_regs[dr]; |
961 | KVMTRACE_2D(DR_READ, vcpu, (u32)dr, (u32)val, handler); | ||
962 | return val; | ||
953 | } | 963 | } |
954 | 964 | ||
955 | static void svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value, | 965 | static void svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value, |
@@ -1004,6 +1014,16 @@ static int pf_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | |||
1004 | 1014 | ||
1005 | fault_address = svm->vmcb->control.exit_info_2; | 1015 | fault_address = svm->vmcb->control.exit_info_2; |
1006 | error_code = svm->vmcb->control.exit_info_1; | 1016 | error_code = svm->vmcb->control.exit_info_1; |
1017 | |||
1018 | if (!npt_enabled) | ||
1019 | KVMTRACE_3D(PAGE_FAULT, &svm->vcpu, error_code, | ||
1020 | (u32)fault_address, (u32)(fault_address >> 32), | ||
1021 | handler); | ||
1022 | else | ||
1023 | KVMTRACE_3D(TDP_FAULT, &svm->vcpu, error_code, | ||
1024 | (u32)fault_address, (u32)(fault_address >> 32), | ||
1025 | handler); | ||
1026 | |||
1007 | return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code); | 1027 | return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code); |
1008 | } | 1028 | } |
1009 | 1029 | ||
@@ -1081,6 +1101,19 @@ static int io_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | |||
1081 | return kvm_emulate_pio(&svm->vcpu, kvm_run, in, size, port); | 1101 | return kvm_emulate_pio(&svm->vcpu, kvm_run, in, size, port); |
1082 | } | 1102 | } |
1083 | 1103 | ||
1104 | static int nmi_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | ||
1105 | { | ||
1106 | KVMTRACE_0D(NMI, &svm->vcpu, handler); | ||
1107 | return 1; | ||
1108 | } | ||
1109 | |||
1110 | static int intr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | ||
1111 | { | ||
1112 | ++svm->vcpu.stat.irq_exits; | ||
1113 | KVMTRACE_0D(INTR, &svm->vcpu, handler); | ||
1114 | return 1; | ||
1115 | } | ||
1116 | |||
1084 | static int nop_on_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | 1117 | static int nop_on_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) |
1085 | { | 1118 | { |
1086 | return 1; | 1119 | return 1; |
@@ -1219,6 +1252,9 @@ static int rdmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | |||
1219 | if (svm_get_msr(&svm->vcpu, ecx, &data)) | 1252 | if (svm_get_msr(&svm->vcpu, ecx, &data)) |
1220 | kvm_inject_gp(&svm->vcpu, 0); | 1253 | kvm_inject_gp(&svm->vcpu, 0); |
1221 | else { | 1254 | else { |
1255 | KVMTRACE_3D(MSR_READ, &svm->vcpu, ecx, (u32)data, | ||
1256 | (u32)(data >> 32), handler); | ||
1257 | |||
1222 | svm->vmcb->save.rax = data & 0xffffffff; | 1258 | svm->vmcb->save.rax = data & 0xffffffff; |
1223 | svm->vcpu.arch.regs[VCPU_REGS_RDX] = data >> 32; | 1259 | svm->vcpu.arch.regs[VCPU_REGS_RDX] = data >> 32; |
1224 | svm->next_rip = svm->vmcb->save.rip + 2; | 1260 | svm->next_rip = svm->vmcb->save.rip + 2; |
@@ -1284,16 +1320,19 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data) | |||
1284 | case MSR_K7_EVNTSEL1: | 1320 | case MSR_K7_EVNTSEL1: |
1285 | case MSR_K7_EVNTSEL2: | 1321 | case MSR_K7_EVNTSEL2: |
1286 | case MSR_K7_EVNTSEL3: | 1322 | case MSR_K7_EVNTSEL3: |
1323 | case MSR_K7_PERFCTR0: | ||
1324 | case MSR_K7_PERFCTR1: | ||
1325 | case MSR_K7_PERFCTR2: | ||
1326 | case MSR_K7_PERFCTR3: | ||
1287 | /* | 1327 | /* |
1288 | * only support writing 0 to the performance counters for now | 1328 | * Just discard all writes to the performance counters; this |
1289 | * to make Windows happy. Should be replaced by a real | 1329 | * should keep both older linux and windows 64-bit guests |
1290 | * performance counter emulation later. | 1330 | * happy |
1291 | */ | 1331 | */ |
1292 | if (data != 0) | 1332 | pr_unimpl(vcpu, "unimplemented perfctr wrmsr: 0x%x data 0x%llx\n", ecx, data); |
1293 | goto unhandled; | 1333 | |
1294 | break; | 1334 | break; |
1295 | default: | 1335 | default: |
1296 | unhandled: | ||
1297 | return kvm_set_msr_common(vcpu, ecx, data); | 1336 | return kvm_set_msr_common(vcpu, ecx, data); |
1298 | } | 1337 | } |
1299 | return 0; | 1338 | return 0; |
@@ -1304,6 +1343,10 @@ static int wrmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | |||
1304 | u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX]; | 1343 | u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX]; |
1305 | u64 data = (svm->vmcb->save.rax & -1u) | 1344 | u64 data = (svm->vmcb->save.rax & -1u) |
1306 | | ((u64)(svm->vcpu.arch.regs[VCPU_REGS_RDX] & -1u) << 32); | 1345 | | ((u64)(svm->vcpu.arch.regs[VCPU_REGS_RDX] & -1u) << 32); |
1346 | |||
1347 | KVMTRACE_3D(MSR_WRITE, &svm->vcpu, ecx, (u32)data, (u32)(data >> 32), | ||
1348 | handler); | ||
1349 | |||
1307 | svm->next_rip = svm->vmcb->save.rip + 2; | 1350 | svm->next_rip = svm->vmcb->save.rip + 2; |
1308 | if (svm_set_msr(&svm->vcpu, ecx, data)) | 1351 | if (svm_set_msr(&svm->vcpu, ecx, data)) |
1309 | kvm_inject_gp(&svm->vcpu, 0); | 1352 | kvm_inject_gp(&svm->vcpu, 0); |
@@ -1323,6 +1366,8 @@ static int msr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | |||
1323 | static int interrupt_window_interception(struct vcpu_svm *svm, | 1366 | static int interrupt_window_interception(struct vcpu_svm *svm, |
1324 | struct kvm_run *kvm_run) | 1367 | struct kvm_run *kvm_run) |
1325 | { | 1368 | { |
1369 | KVMTRACE_0D(PEND_INTR, &svm->vcpu, handler); | ||
1370 | |||
1326 | svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_VINTR); | 1371 | svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_VINTR); |
1327 | svm->vmcb->control.int_ctl &= ~V_IRQ_MASK; | 1372 | svm->vmcb->control.int_ctl &= ~V_IRQ_MASK; |
1328 | /* | 1373 | /* |
@@ -1364,8 +1409,8 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm, | |||
1364 | [SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception, | 1409 | [SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception, |
1365 | [SVM_EXIT_EXCP_BASE + NM_VECTOR] = nm_interception, | 1410 | [SVM_EXIT_EXCP_BASE + NM_VECTOR] = nm_interception, |
1366 | [SVM_EXIT_EXCP_BASE + MC_VECTOR] = mc_interception, | 1411 | [SVM_EXIT_EXCP_BASE + MC_VECTOR] = mc_interception, |
1367 | [SVM_EXIT_INTR] = nop_on_interception, | 1412 | [SVM_EXIT_INTR] = intr_interception, |
1368 | [SVM_EXIT_NMI] = nop_on_interception, | 1413 | [SVM_EXIT_NMI] = nmi_interception, |
1369 | [SVM_EXIT_SMI] = nop_on_interception, | 1414 | [SVM_EXIT_SMI] = nop_on_interception, |
1370 | [SVM_EXIT_INIT] = nop_on_interception, | 1415 | [SVM_EXIT_INIT] = nop_on_interception, |
1371 | [SVM_EXIT_VINTR] = interrupt_window_interception, | 1416 | [SVM_EXIT_VINTR] = interrupt_window_interception, |
@@ -1397,6 +1442,9 @@ static int handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
1397 | struct vcpu_svm *svm = to_svm(vcpu); | 1442 | struct vcpu_svm *svm = to_svm(vcpu); |
1398 | u32 exit_code = svm->vmcb->control.exit_code; | 1443 | u32 exit_code = svm->vmcb->control.exit_code; |
1399 | 1444 | ||
1445 | KVMTRACE_3D(VMEXIT, vcpu, exit_code, (u32)svm->vmcb->save.rip, | ||
1446 | (u32)((u64)svm->vmcb->save.rip >> 32), entryexit); | ||
1447 | |||
1400 | if (npt_enabled) { | 1448 | if (npt_enabled) { |
1401 | int mmu_reload = 0; | 1449 | int mmu_reload = 0; |
1402 | if ((vcpu->arch.cr0 ^ svm->vmcb->save.cr0) & X86_CR0_PG) { | 1450 | if ((vcpu->arch.cr0 ^ svm->vmcb->save.cr0) & X86_CR0_PG) { |
@@ -1470,6 +1518,8 @@ static inline void svm_inject_irq(struct vcpu_svm *svm, int irq) | |||
1470 | { | 1518 | { |
1471 | struct vmcb_control_area *control; | 1519 | struct vmcb_control_area *control; |
1472 | 1520 | ||
1521 | KVMTRACE_1D(INJ_VIRQ, &svm->vcpu, (u32)irq, handler); | ||
1522 | |||
1473 | control = &svm->vmcb->control; | 1523 | control = &svm->vmcb->control; |
1474 | control->int_vector = irq; | 1524 | control->int_vector = irq; |
1475 | control->int_ctl &= ~V_INTR_PRIO_MASK; | 1525 | control->int_ctl &= ~V_INTR_PRIO_MASK; |
@@ -1660,9 +1710,9 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
1660 | sync_lapic_to_cr8(vcpu); | 1710 | sync_lapic_to_cr8(vcpu); |
1661 | 1711 | ||
1662 | save_host_msrs(vcpu); | 1712 | save_host_msrs(vcpu); |
1663 | fs_selector = read_fs(); | 1713 | fs_selector = kvm_read_fs(); |
1664 | gs_selector = read_gs(); | 1714 | gs_selector = kvm_read_gs(); |
1665 | ldt_selector = read_ldt(); | 1715 | ldt_selector = kvm_read_ldt(); |
1666 | svm->host_cr2 = kvm_read_cr2(); | 1716 | svm->host_cr2 = kvm_read_cr2(); |
1667 | svm->host_dr6 = read_dr6(); | 1717 | svm->host_dr6 = read_dr6(); |
1668 | svm->host_dr7 = read_dr7(); | 1718 | svm->host_dr7 = read_dr7(); |
@@ -1716,17 +1766,17 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
1716 | /* Enter guest mode */ | 1766 | /* Enter guest mode */ |
1717 | "push %%rax \n\t" | 1767 | "push %%rax \n\t" |
1718 | "mov %c[vmcb](%[svm]), %%rax \n\t" | 1768 | "mov %c[vmcb](%[svm]), %%rax \n\t" |
1719 | SVM_VMLOAD "\n\t" | 1769 | __ex(SVM_VMLOAD) "\n\t" |
1720 | SVM_VMRUN "\n\t" | 1770 | __ex(SVM_VMRUN) "\n\t" |
1721 | SVM_VMSAVE "\n\t" | 1771 | __ex(SVM_VMSAVE) "\n\t" |
1722 | "pop %%rax \n\t" | 1772 | "pop %%rax \n\t" |
1723 | #else | 1773 | #else |
1724 | /* Enter guest mode */ | 1774 | /* Enter guest mode */ |
1725 | "push %%eax \n\t" | 1775 | "push %%eax \n\t" |
1726 | "mov %c[vmcb](%[svm]), %%eax \n\t" | 1776 | "mov %c[vmcb](%[svm]), %%eax \n\t" |
1727 | SVM_VMLOAD "\n\t" | 1777 | __ex(SVM_VMLOAD) "\n\t" |
1728 | SVM_VMRUN "\n\t" | 1778 | __ex(SVM_VMRUN) "\n\t" |
1729 | SVM_VMSAVE "\n\t" | 1779 | __ex(SVM_VMSAVE) "\n\t" |
1730 | "pop %%eax \n\t" | 1780 | "pop %%eax \n\t" |
1731 | #endif | 1781 | #endif |
1732 | 1782 | ||
@@ -1795,9 +1845,9 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
1795 | write_dr7(svm->host_dr7); | 1845 | write_dr7(svm->host_dr7); |
1796 | kvm_write_cr2(svm->host_cr2); | 1846 | kvm_write_cr2(svm->host_cr2); |
1797 | 1847 | ||
1798 | load_fs(fs_selector); | 1848 | kvm_load_fs(fs_selector); |
1799 | load_gs(gs_selector); | 1849 | kvm_load_gs(gs_selector); |
1800 | load_ldt(ldt_selector); | 1850 | kvm_load_ldt(ldt_selector); |
1801 | load_host_msrs(vcpu); | 1851 | load_host_msrs(vcpu); |
1802 | 1852 | ||
1803 | reload_tss(vcpu); | 1853 | reload_tss(vcpu); |
@@ -1889,7 +1939,6 @@ static struct kvm_x86_ops svm_x86_ops = { | |||
1889 | .prepare_guest_switch = svm_prepare_guest_switch, | 1939 | .prepare_guest_switch = svm_prepare_guest_switch, |
1890 | .vcpu_load = svm_vcpu_load, | 1940 | .vcpu_load = svm_vcpu_load, |
1891 | .vcpu_put = svm_vcpu_put, | 1941 | .vcpu_put = svm_vcpu_put, |
1892 | .vcpu_decache = svm_vcpu_decache, | ||
1893 | 1942 | ||
1894 | .set_guest_debug = svm_guest_debug, | 1943 | .set_guest_debug = svm_guest_debug, |
1895 | .get_msr = svm_get_msr, | 1944 | .get_msr = svm_get_msr, |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 10ce6ee4c491..0cac63701719 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -30,6 +30,8 @@ | |||
30 | #include <asm/io.h> | 30 | #include <asm/io.h> |
31 | #include <asm/desc.h> | 31 | #include <asm/desc.h> |
32 | 32 | ||
33 | #define __ex(x) __kvm_handle_fault_on_reboot(x) | ||
34 | |||
33 | MODULE_AUTHOR("Qumranet"); | 35 | MODULE_AUTHOR("Qumranet"); |
34 | MODULE_LICENSE("GPL"); | 36 | MODULE_LICENSE("GPL"); |
35 | 37 | ||
@@ -53,6 +55,7 @@ struct vmcs { | |||
53 | 55 | ||
54 | struct vcpu_vmx { | 56 | struct vcpu_vmx { |
55 | struct kvm_vcpu vcpu; | 57 | struct kvm_vcpu vcpu; |
58 | struct list_head local_vcpus_link; | ||
56 | int launched; | 59 | int launched; |
57 | u8 fail; | 60 | u8 fail; |
58 | u32 idt_vectoring_info; | 61 | u32 idt_vectoring_info; |
@@ -88,9 +91,11 @@ static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu) | |||
88 | } | 91 | } |
89 | 92 | ||
90 | static int init_rmode(struct kvm *kvm); | 93 | static int init_rmode(struct kvm *kvm); |
94 | static u64 construct_eptp(unsigned long root_hpa); | ||
91 | 95 | ||
92 | static DEFINE_PER_CPU(struct vmcs *, vmxarea); | 96 | static DEFINE_PER_CPU(struct vmcs *, vmxarea); |
93 | static DEFINE_PER_CPU(struct vmcs *, current_vmcs); | 97 | static DEFINE_PER_CPU(struct vmcs *, current_vmcs); |
98 | static DEFINE_PER_CPU(struct list_head, vcpus_on_cpu); | ||
94 | 99 | ||
95 | static struct page *vmx_io_bitmap_a; | 100 | static struct page *vmx_io_bitmap_a; |
96 | static struct page *vmx_io_bitmap_b; | 101 | static struct page *vmx_io_bitmap_b; |
@@ -260,6 +265,11 @@ static inline int cpu_has_vmx_vpid(void) | |||
260 | SECONDARY_EXEC_ENABLE_VPID); | 265 | SECONDARY_EXEC_ENABLE_VPID); |
261 | } | 266 | } |
262 | 267 | ||
268 | static inline int cpu_has_virtual_nmis(void) | ||
269 | { | ||
270 | return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS; | ||
271 | } | ||
272 | |||
263 | static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr) | 273 | static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr) |
264 | { | 274 | { |
265 | int i; | 275 | int i; |
@@ -278,7 +288,7 @@ static inline void __invvpid(int ext, u16 vpid, gva_t gva) | |||
278 | u64 gva; | 288 | u64 gva; |
279 | } operand = { vpid, 0, gva }; | 289 | } operand = { vpid, 0, gva }; |
280 | 290 | ||
281 | asm volatile (ASM_VMX_INVVPID | 291 | asm volatile (__ex(ASM_VMX_INVVPID) |
282 | /* CF==1 or ZF==1 --> rc = -1 */ | 292 | /* CF==1 or ZF==1 --> rc = -1 */ |
283 | "; ja 1f ; ud2 ; 1:" | 293 | "; ja 1f ; ud2 ; 1:" |
284 | : : "a"(&operand), "c"(ext) : "cc", "memory"); | 294 | : : "a"(&operand), "c"(ext) : "cc", "memory"); |
@@ -290,7 +300,7 @@ static inline void __invept(int ext, u64 eptp, gpa_t gpa) | |||
290 | u64 eptp, gpa; | 300 | u64 eptp, gpa; |
291 | } operand = {eptp, gpa}; | 301 | } operand = {eptp, gpa}; |
292 | 302 | ||
293 | asm volatile (ASM_VMX_INVEPT | 303 | asm volatile (__ex(ASM_VMX_INVEPT) |
294 | /* CF==1 or ZF==1 --> rc = -1 */ | 304 | /* CF==1 or ZF==1 --> rc = -1 */ |
295 | "; ja 1f ; ud2 ; 1:\n" | 305 | "; ja 1f ; ud2 ; 1:\n" |
296 | : : "a" (&operand), "c" (ext) : "cc", "memory"); | 306 | : : "a" (&operand), "c" (ext) : "cc", "memory"); |
@@ -311,7 +321,7 @@ static void vmcs_clear(struct vmcs *vmcs) | |||
311 | u64 phys_addr = __pa(vmcs); | 321 | u64 phys_addr = __pa(vmcs); |
312 | u8 error; | 322 | u8 error; |
313 | 323 | ||
314 | asm volatile (ASM_VMX_VMCLEAR_RAX "; setna %0" | 324 | asm volatile (__ex(ASM_VMX_VMCLEAR_RAX) "; setna %0" |
315 | : "=g"(error) : "a"(&phys_addr), "m"(phys_addr) | 325 | : "=g"(error) : "a"(&phys_addr), "m"(phys_addr) |
316 | : "cc", "memory"); | 326 | : "cc", "memory"); |
317 | if (error) | 327 | if (error) |
@@ -329,6 +339,9 @@ static void __vcpu_clear(void *arg) | |||
329 | if (per_cpu(current_vmcs, cpu) == vmx->vmcs) | 339 | if (per_cpu(current_vmcs, cpu) == vmx->vmcs) |
330 | per_cpu(current_vmcs, cpu) = NULL; | 340 | per_cpu(current_vmcs, cpu) = NULL; |
331 | rdtscll(vmx->vcpu.arch.host_tsc); | 341 | rdtscll(vmx->vcpu.arch.host_tsc); |
342 | list_del(&vmx->local_vcpus_link); | ||
343 | vmx->vcpu.cpu = -1; | ||
344 | vmx->launched = 0; | ||
332 | } | 345 | } |
333 | 346 | ||
334 | static void vcpu_clear(struct vcpu_vmx *vmx) | 347 | static void vcpu_clear(struct vcpu_vmx *vmx) |
@@ -336,7 +349,6 @@ static void vcpu_clear(struct vcpu_vmx *vmx) | |||
336 | if (vmx->vcpu.cpu == -1) | 349 | if (vmx->vcpu.cpu == -1) |
337 | return; | 350 | return; |
338 | smp_call_function_single(vmx->vcpu.cpu, __vcpu_clear, vmx, 1); | 351 | smp_call_function_single(vmx->vcpu.cpu, __vcpu_clear, vmx, 1); |
339 | vmx->launched = 0; | ||
340 | } | 352 | } |
341 | 353 | ||
342 | static inline void vpid_sync_vcpu_all(struct vcpu_vmx *vmx) | 354 | static inline void vpid_sync_vcpu_all(struct vcpu_vmx *vmx) |
@@ -378,7 +390,7 @@ static unsigned long vmcs_readl(unsigned long field) | |||
378 | { | 390 | { |
379 | unsigned long value; | 391 | unsigned long value; |
380 | 392 | ||
381 | asm volatile (ASM_VMX_VMREAD_RDX_RAX | 393 | asm volatile (__ex(ASM_VMX_VMREAD_RDX_RAX) |
382 | : "=a"(value) : "d"(field) : "cc"); | 394 | : "=a"(value) : "d"(field) : "cc"); |
383 | return value; | 395 | return value; |
384 | } | 396 | } |
@@ -413,7 +425,7 @@ static void vmcs_writel(unsigned long field, unsigned long value) | |||
413 | { | 425 | { |
414 | u8 error; | 426 | u8 error; |
415 | 427 | ||
416 | asm volatile (ASM_VMX_VMWRITE_RAX_RDX "; setna %0" | 428 | asm volatile (__ex(ASM_VMX_VMWRITE_RAX_RDX) "; setna %0" |
417 | : "=q"(error) : "a"(value), "d"(field) : "cc"); | 429 | : "=q"(error) : "a"(value), "d"(field) : "cc"); |
418 | if (unlikely(error)) | 430 | if (unlikely(error)) |
419 | vmwrite_error(field, value); | 431 | vmwrite_error(field, value); |
@@ -431,10 +443,8 @@ static void vmcs_write32(unsigned long field, u32 value) | |||
431 | 443 | ||
432 | static void vmcs_write64(unsigned long field, u64 value) | 444 | static void vmcs_write64(unsigned long field, u64 value) |
433 | { | 445 | { |
434 | #ifdef CONFIG_X86_64 | ||
435 | vmcs_writel(field, value); | ||
436 | #else | ||
437 | vmcs_writel(field, value); | 446 | vmcs_writel(field, value); |
447 | #ifndef CONFIG_X86_64 | ||
438 | asm volatile (""); | 448 | asm volatile (""); |
439 | vmcs_writel(field+1, value >> 32); | 449 | vmcs_writel(field+1, value >> 32); |
440 | #endif | 450 | #endif |
@@ -474,7 +484,7 @@ static void reload_tss(void) | |||
474 | struct descriptor_table gdt; | 484 | struct descriptor_table gdt; |
475 | struct desc_struct *descs; | 485 | struct desc_struct *descs; |
476 | 486 | ||
477 | get_gdt(&gdt); | 487 | kvm_get_gdt(&gdt); |
478 | descs = (void *)gdt.base; | 488 | descs = (void *)gdt.base; |
479 | descs[GDT_ENTRY_TSS].type = 9; /* available TSS */ | 489 | descs[GDT_ENTRY_TSS].type = 9; /* available TSS */ |
480 | load_TR_desc(); | 490 | load_TR_desc(); |
@@ -530,9 +540,9 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu) | |||
530 | * Set host fs and gs selectors. Unfortunately, 22.2.3 does not | 540 | * Set host fs and gs selectors. Unfortunately, 22.2.3 does not |
531 | * allow segment selectors with cpl > 0 or ti == 1. | 541 | * allow segment selectors with cpl > 0 or ti == 1. |
532 | */ | 542 | */ |
533 | vmx->host_state.ldt_sel = read_ldt(); | 543 | vmx->host_state.ldt_sel = kvm_read_ldt(); |
534 | vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel; | 544 | vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel; |
535 | vmx->host_state.fs_sel = read_fs(); | 545 | vmx->host_state.fs_sel = kvm_read_fs(); |
536 | if (!(vmx->host_state.fs_sel & 7)) { | 546 | if (!(vmx->host_state.fs_sel & 7)) { |
537 | vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel); | 547 | vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel); |
538 | vmx->host_state.fs_reload_needed = 0; | 548 | vmx->host_state.fs_reload_needed = 0; |
@@ -540,7 +550,7 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu) | |||
540 | vmcs_write16(HOST_FS_SELECTOR, 0); | 550 | vmcs_write16(HOST_FS_SELECTOR, 0); |
541 | vmx->host_state.fs_reload_needed = 1; | 551 | vmx->host_state.fs_reload_needed = 1; |
542 | } | 552 | } |
543 | vmx->host_state.gs_sel = read_gs(); | 553 | vmx->host_state.gs_sel = kvm_read_gs(); |
544 | if (!(vmx->host_state.gs_sel & 7)) | 554 | if (!(vmx->host_state.gs_sel & 7)) |
545 | vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel); | 555 | vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel); |
546 | else { | 556 | else { |
@@ -576,15 +586,15 @@ static void __vmx_load_host_state(struct vcpu_vmx *vmx) | |||
576 | ++vmx->vcpu.stat.host_state_reload; | 586 | ++vmx->vcpu.stat.host_state_reload; |
577 | vmx->host_state.loaded = 0; | 587 | vmx->host_state.loaded = 0; |
578 | if (vmx->host_state.fs_reload_needed) | 588 | if (vmx->host_state.fs_reload_needed) |
579 | load_fs(vmx->host_state.fs_sel); | 589 | kvm_load_fs(vmx->host_state.fs_sel); |
580 | if (vmx->host_state.gs_ldt_reload_needed) { | 590 | if (vmx->host_state.gs_ldt_reload_needed) { |
581 | load_ldt(vmx->host_state.ldt_sel); | 591 | kvm_load_ldt(vmx->host_state.ldt_sel); |
582 | /* | 592 | /* |
583 | * If we have to reload gs, we must take care to | 593 | * If we have to reload gs, we must take care to |
584 | * preserve our gs base. | 594 | * preserve our gs base. |
585 | */ | 595 | */ |
586 | local_irq_save(flags); | 596 | local_irq_save(flags); |
587 | load_gs(vmx->host_state.gs_sel); | 597 | kvm_load_gs(vmx->host_state.gs_sel); |
588 | #ifdef CONFIG_X86_64 | 598 | #ifdef CONFIG_X86_64 |
589 | wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE)); | 599 | wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE)); |
590 | #endif | 600 | #endif |
@@ -617,13 +627,17 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |||
617 | vcpu_clear(vmx); | 627 | vcpu_clear(vmx); |
618 | kvm_migrate_timers(vcpu); | 628 | kvm_migrate_timers(vcpu); |
619 | vpid_sync_vcpu_all(vmx); | 629 | vpid_sync_vcpu_all(vmx); |
630 | local_irq_disable(); | ||
631 | list_add(&vmx->local_vcpus_link, | ||
632 | &per_cpu(vcpus_on_cpu, cpu)); | ||
633 | local_irq_enable(); | ||
620 | } | 634 | } |
621 | 635 | ||
622 | if (per_cpu(current_vmcs, cpu) != vmx->vmcs) { | 636 | if (per_cpu(current_vmcs, cpu) != vmx->vmcs) { |
623 | u8 error; | 637 | u8 error; |
624 | 638 | ||
625 | per_cpu(current_vmcs, cpu) = vmx->vmcs; | 639 | per_cpu(current_vmcs, cpu) = vmx->vmcs; |
626 | asm volatile (ASM_VMX_VMPTRLD_RAX "; setna %0" | 640 | asm volatile (__ex(ASM_VMX_VMPTRLD_RAX) "; setna %0" |
627 | : "=g"(error) : "a"(&phys_addr), "m"(phys_addr) | 641 | : "=g"(error) : "a"(&phys_addr), "m"(phys_addr) |
628 | : "cc"); | 642 | : "cc"); |
629 | if (error) | 643 | if (error) |
@@ -640,8 +654,8 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |||
640 | * Linux uses per-cpu TSS and GDT, so set these when switching | 654 | * Linux uses per-cpu TSS and GDT, so set these when switching |
641 | * processors. | 655 | * processors. |
642 | */ | 656 | */ |
643 | vmcs_writel(HOST_TR_BASE, read_tr_base()); /* 22.2.4 */ | 657 | vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */ |
644 | get_gdt(&dt); | 658 | kvm_get_gdt(&dt); |
645 | vmcs_writel(HOST_GDTR_BASE, dt.base); /* 22.2.4 */ | 659 | vmcs_writel(HOST_GDTR_BASE, dt.base); /* 22.2.4 */ |
646 | 660 | ||
647 | rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp); | 661 | rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp); |
@@ -684,11 +698,6 @@ static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu) | |||
684 | update_exception_bitmap(vcpu); | 698 | update_exception_bitmap(vcpu); |
685 | } | 699 | } |
686 | 700 | ||
687 | static void vmx_vcpu_decache(struct kvm_vcpu *vcpu) | ||
688 | { | ||
689 | vcpu_clear(to_vmx(vcpu)); | ||
690 | } | ||
691 | |||
692 | static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu) | 701 | static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu) |
693 | { | 702 | { |
694 | return vmcs_readl(GUEST_RFLAGS); | 703 | return vmcs_readl(GUEST_RFLAGS); |
@@ -913,6 +922,18 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) | |||
913 | case MSR_IA32_TIME_STAMP_COUNTER: | 922 | case MSR_IA32_TIME_STAMP_COUNTER: |
914 | guest_write_tsc(data); | 923 | guest_write_tsc(data); |
915 | break; | 924 | break; |
925 | case MSR_P6_PERFCTR0: | ||
926 | case MSR_P6_PERFCTR1: | ||
927 | case MSR_P6_EVNTSEL0: | ||
928 | case MSR_P6_EVNTSEL1: | ||
929 | /* | ||
930 | * Just discard all writes to the performance counters; this | ||
931 | * should keep both older linux and windows 64-bit guests | ||
932 | * happy | ||
933 | */ | ||
934 | pr_unimpl(vcpu, "unimplemented perfctr wrmsr: 0x%x data 0x%llx\n", msr_index, data); | ||
935 | |||
936 | break; | ||
916 | default: | 937 | default: |
917 | vmx_load_host_state(vmx); | 938 | vmx_load_host_state(vmx); |
918 | msr = find_msr_entry(vmx, msr_index); | 939 | msr = find_msr_entry(vmx, msr_index); |
@@ -1022,6 +1043,7 @@ static void hardware_enable(void *garbage) | |||
1022 | u64 phys_addr = __pa(per_cpu(vmxarea, cpu)); | 1043 | u64 phys_addr = __pa(per_cpu(vmxarea, cpu)); |
1023 | u64 old; | 1044 | u64 old; |
1024 | 1045 | ||
1046 | INIT_LIST_HEAD(&per_cpu(vcpus_on_cpu, cpu)); | ||
1025 | rdmsrl(MSR_IA32_FEATURE_CONTROL, old); | 1047 | rdmsrl(MSR_IA32_FEATURE_CONTROL, old); |
1026 | if ((old & (MSR_IA32_FEATURE_CONTROL_LOCKED | | 1048 | if ((old & (MSR_IA32_FEATURE_CONTROL_LOCKED | |
1027 | MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED)) | 1049 | MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED)) |
@@ -1032,13 +1054,25 @@ static void hardware_enable(void *garbage) | |||
1032 | MSR_IA32_FEATURE_CONTROL_LOCKED | | 1054 | MSR_IA32_FEATURE_CONTROL_LOCKED | |
1033 | MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED); | 1055 | MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED); |
1034 | write_cr4(read_cr4() | X86_CR4_VMXE); /* FIXME: not cpu hotplug safe */ | 1056 | write_cr4(read_cr4() | X86_CR4_VMXE); /* FIXME: not cpu hotplug safe */ |
1035 | asm volatile (ASM_VMX_VMXON_RAX : : "a"(&phys_addr), "m"(phys_addr) | 1057 | asm volatile (ASM_VMX_VMXON_RAX |
1058 | : : "a"(&phys_addr), "m"(phys_addr) | ||
1036 | : "memory", "cc"); | 1059 | : "memory", "cc"); |
1037 | } | 1060 | } |
1038 | 1061 | ||
1062 | static void vmclear_local_vcpus(void) | ||
1063 | { | ||
1064 | int cpu = raw_smp_processor_id(); | ||
1065 | struct vcpu_vmx *vmx, *n; | ||
1066 | |||
1067 | list_for_each_entry_safe(vmx, n, &per_cpu(vcpus_on_cpu, cpu), | ||
1068 | local_vcpus_link) | ||
1069 | __vcpu_clear(vmx); | ||
1070 | } | ||
1071 | |||
1039 | static void hardware_disable(void *garbage) | 1072 | static void hardware_disable(void *garbage) |
1040 | { | 1073 | { |
1041 | asm volatile (ASM_VMX_VMXOFF : : : "cc"); | 1074 | vmclear_local_vcpus(); |
1075 | asm volatile (__ex(ASM_VMX_VMXOFF) : : : "cc"); | ||
1042 | write_cr4(read_cr4() & ~X86_CR4_VMXE); | 1076 | write_cr4(read_cr4() & ~X86_CR4_VMXE); |
1043 | } | 1077 | } |
1044 | 1078 | ||
@@ -1072,7 +1106,7 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf) | |||
1072 | u32 _vmentry_control = 0; | 1106 | u32 _vmentry_control = 0; |
1073 | 1107 | ||
1074 | min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING; | 1108 | min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING; |
1075 | opt = 0; | 1109 | opt = PIN_BASED_VIRTUAL_NMIS; |
1076 | if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PINBASED_CTLS, | 1110 | if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PINBASED_CTLS, |
1077 | &_pin_based_exec_control) < 0) | 1111 | &_pin_based_exec_control) < 0) |
1078 | return -EIO; | 1112 | return -EIO; |
@@ -1389,6 +1423,8 @@ static void exit_lmode(struct kvm_vcpu *vcpu) | |||
1389 | static void vmx_flush_tlb(struct kvm_vcpu *vcpu) | 1423 | static void vmx_flush_tlb(struct kvm_vcpu *vcpu) |
1390 | { | 1424 | { |
1391 | vpid_sync_vcpu_all(to_vmx(vcpu)); | 1425 | vpid_sync_vcpu_all(to_vmx(vcpu)); |
1426 | if (vm_need_ept()) | ||
1427 | ept_sync_context(construct_eptp(vcpu->arch.mmu.root_hpa)); | ||
1392 | } | 1428 | } |
1393 | 1429 | ||
1394 | static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu) | 1430 | static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu) |
@@ -1420,7 +1456,7 @@ static void ept_update_paging_mode_cr0(unsigned long *hw_cr0, | |||
1420 | if (!(cr0 & X86_CR0_PG)) { | 1456 | if (!(cr0 & X86_CR0_PG)) { |
1421 | /* From paging/starting to nonpaging */ | 1457 | /* From paging/starting to nonpaging */ |
1422 | vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, | 1458 | vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, |
1423 | vmcs_config.cpu_based_exec_ctrl | | 1459 | vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) | |
1424 | (CPU_BASED_CR3_LOAD_EXITING | | 1460 | (CPU_BASED_CR3_LOAD_EXITING | |
1425 | CPU_BASED_CR3_STORE_EXITING)); | 1461 | CPU_BASED_CR3_STORE_EXITING)); |
1426 | vcpu->arch.cr0 = cr0; | 1462 | vcpu->arch.cr0 = cr0; |
@@ -1430,7 +1466,7 @@ static void ept_update_paging_mode_cr0(unsigned long *hw_cr0, | |||
1430 | } else if (!is_paging(vcpu)) { | 1466 | } else if (!is_paging(vcpu)) { |
1431 | /* From nonpaging to paging */ | 1467 | /* From nonpaging to paging */ |
1432 | vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, | 1468 | vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, |
1433 | vmcs_config.cpu_based_exec_ctrl & | 1469 | vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) & |
1434 | ~(CPU_BASED_CR3_LOAD_EXITING | | 1470 | ~(CPU_BASED_CR3_LOAD_EXITING | |
1435 | CPU_BASED_CR3_STORE_EXITING)); | 1471 | CPU_BASED_CR3_STORE_EXITING)); |
1436 | vcpu->arch.cr0 = cr0; | 1472 | vcpu->arch.cr0 = cr0; |
@@ -1821,7 +1857,7 @@ static void allocate_vpid(struct vcpu_vmx *vmx) | |||
1821 | spin_unlock(&vmx_vpid_lock); | 1857 | spin_unlock(&vmx_vpid_lock); |
1822 | } | 1858 | } |
1823 | 1859 | ||
1824 | void vmx_disable_intercept_for_msr(struct page *msr_bitmap, u32 msr) | 1860 | static void vmx_disable_intercept_for_msr(struct page *msr_bitmap, u32 msr) |
1825 | { | 1861 | { |
1826 | void *va; | 1862 | void *va; |
1827 | 1863 | ||
@@ -1907,8 +1943,8 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) | |||
1907 | vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */ | 1943 | vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */ |
1908 | vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ | 1944 | vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ |
1909 | vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */ | 1945 | vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */ |
1910 | vmcs_write16(HOST_FS_SELECTOR, read_fs()); /* 22.2.4 */ | 1946 | vmcs_write16(HOST_FS_SELECTOR, kvm_read_fs()); /* 22.2.4 */ |
1911 | vmcs_write16(HOST_GS_SELECTOR, read_gs()); /* 22.2.4 */ | 1947 | vmcs_write16(HOST_GS_SELECTOR, kvm_read_gs()); /* 22.2.4 */ |
1912 | vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ | 1948 | vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ |
1913 | #ifdef CONFIG_X86_64 | 1949 | #ifdef CONFIG_X86_64 |
1914 | rdmsrl(MSR_FS_BASE, a); | 1950 | rdmsrl(MSR_FS_BASE, a); |
@@ -1922,7 +1958,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) | |||
1922 | 1958 | ||
1923 | vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8); /* 22.2.4 */ | 1959 | vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8); /* 22.2.4 */ |
1924 | 1960 | ||
1925 | get_idt(&dt); | 1961 | kvm_get_idt(&dt); |
1926 | vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */ | 1962 | vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */ |
1927 | 1963 | ||
1928 | asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return)); | 1964 | asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return)); |
@@ -2114,6 +2150,13 @@ static void vmx_inject_irq(struct kvm_vcpu *vcpu, int irq) | |||
2114 | irq | INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK); | 2150 | irq | INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK); |
2115 | } | 2151 | } |
2116 | 2152 | ||
2153 | static void vmx_inject_nmi(struct kvm_vcpu *vcpu) | ||
2154 | { | ||
2155 | vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, | ||
2156 | INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR); | ||
2157 | vcpu->arch.nmi_pending = 0; | ||
2158 | } | ||
2159 | |||
2117 | static void kvm_do_inject_irq(struct kvm_vcpu *vcpu) | 2160 | static void kvm_do_inject_irq(struct kvm_vcpu *vcpu) |
2118 | { | 2161 | { |
2119 | int word_index = __ffs(vcpu->arch.irq_summary); | 2162 | int word_index = __ffs(vcpu->arch.irq_summary); |
@@ -2554,8 +2597,6 @@ static int handle_apic_access(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2554 | exit_qualification = vmcs_read64(EXIT_QUALIFICATION); | 2597 | exit_qualification = vmcs_read64(EXIT_QUALIFICATION); |
2555 | offset = exit_qualification & 0xffful; | 2598 | offset = exit_qualification & 0xffful; |
2556 | 2599 | ||
2557 | KVMTRACE_1D(APIC_ACCESS, vcpu, (u32)offset, handler); | ||
2558 | |||
2559 | er = emulate_instruction(vcpu, kvm_run, 0, 0, 0); | 2600 | er = emulate_instruction(vcpu, kvm_run, 0, 0, 0); |
2560 | 2601 | ||
2561 | if (er != EMULATE_DONE) { | 2602 | if (er != EMULATE_DONE) { |
@@ -2639,6 +2680,19 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2639 | return 1; | 2680 | return 1; |
2640 | } | 2681 | } |
2641 | 2682 | ||
2683 | static int handle_nmi_window(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | ||
2684 | { | ||
2685 | u32 cpu_based_vm_exec_control; | ||
2686 | |||
2687 | /* clear pending NMI */ | ||
2688 | cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); | ||
2689 | cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING; | ||
2690 | vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); | ||
2691 | ++vcpu->stat.nmi_window_exits; | ||
2692 | |||
2693 | return 1; | ||
2694 | } | ||
2695 | |||
2642 | /* | 2696 | /* |
2643 | * The exit handlers return 1 if the exit was handled fully and guest execution | 2697 | * The exit handlers return 1 if the exit was handled fully and guest execution |
2644 | * may resume. Otherwise they set the kvm_run parameter to indicate what needs | 2698 | * may resume. Otherwise they set the kvm_run parameter to indicate what needs |
@@ -2649,6 +2703,7 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu, | |||
2649 | [EXIT_REASON_EXCEPTION_NMI] = handle_exception, | 2703 | [EXIT_REASON_EXCEPTION_NMI] = handle_exception, |
2650 | [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt, | 2704 | [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt, |
2651 | [EXIT_REASON_TRIPLE_FAULT] = handle_triple_fault, | 2705 | [EXIT_REASON_TRIPLE_FAULT] = handle_triple_fault, |
2706 | [EXIT_REASON_NMI_WINDOW] = handle_nmi_window, | ||
2652 | [EXIT_REASON_IO_INSTRUCTION] = handle_io, | 2707 | [EXIT_REASON_IO_INSTRUCTION] = handle_io, |
2653 | [EXIT_REASON_CR_ACCESS] = handle_cr, | 2708 | [EXIT_REASON_CR_ACCESS] = handle_cr, |
2654 | [EXIT_REASON_DR_ACCESS] = handle_dr, | 2709 | [EXIT_REASON_DR_ACCESS] = handle_dr, |
@@ -2736,17 +2791,52 @@ static void enable_irq_window(struct kvm_vcpu *vcpu) | |||
2736 | vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); | 2791 | vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); |
2737 | } | 2792 | } |
2738 | 2793 | ||
2794 | static void enable_nmi_window(struct kvm_vcpu *vcpu) | ||
2795 | { | ||
2796 | u32 cpu_based_vm_exec_control; | ||
2797 | |||
2798 | if (!cpu_has_virtual_nmis()) | ||
2799 | return; | ||
2800 | |||
2801 | cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); | ||
2802 | cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_NMI_PENDING; | ||
2803 | vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); | ||
2804 | } | ||
2805 | |||
2806 | static int vmx_nmi_enabled(struct kvm_vcpu *vcpu) | ||
2807 | { | ||
2808 | u32 guest_intr = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO); | ||
2809 | return !(guest_intr & (GUEST_INTR_STATE_NMI | | ||
2810 | GUEST_INTR_STATE_MOV_SS | | ||
2811 | GUEST_INTR_STATE_STI)); | ||
2812 | } | ||
2813 | |||
2814 | static int vmx_irq_enabled(struct kvm_vcpu *vcpu) | ||
2815 | { | ||
2816 | u32 guest_intr = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO); | ||
2817 | return (!(guest_intr & (GUEST_INTR_STATE_MOV_SS | | ||
2818 | GUEST_INTR_STATE_STI)) && | ||
2819 | (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF)); | ||
2820 | } | ||
2821 | |||
2822 | static void enable_intr_window(struct kvm_vcpu *vcpu) | ||
2823 | { | ||
2824 | if (vcpu->arch.nmi_pending) | ||
2825 | enable_nmi_window(vcpu); | ||
2826 | else if (kvm_cpu_has_interrupt(vcpu)) | ||
2827 | enable_irq_window(vcpu); | ||
2828 | } | ||
2829 | |||
2739 | static void vmx_intr_assist(struct kvm_vcpu *vcpu) | 2830 | static void vmx_intr_assist(struct kvm_vcpu *vcpu) |
2740 | { | 2831 | { |
2741 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 2832 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
2742 | u32 idtv_info_field, intr_info_field; | 2833 | u32 idtv_info_field, intr_info_field, exit_intr_info_field; |
2743 | int has_ext_irq, interrupt_window_open; | ||
2744 | int vector; | 2834 | int vector; |
2745 | 2835 | ||
2746 | update_tpr_threshold(vcpu); | 2836 | update_tpr_threshold(vcpu); |
2747 | 2837 | ||
2748 | has_ext_irq = kvm_cpu_has_interrupt(vcpu); | ||
2749 | intr_info_field = vmcs_read32(VM_ENTRY_INTR_INFO_FIELD); | 2838 | intr_info_field = vmcs_read32(VM_ENTRY_INTR_INFO_FIELD); |
2839 | exit_intr_info_field = vmcs_read32(VM_EXIT_INTR_INFO); | ||
2750 | idtv_info_field = vmx->idt_vectoring_info; | 2840 | idtv_info_field = vmx->idt_vectoring_info; |
2751 | if (intr_info_field & INTR_INFO_VALID_MASK) { | 2841 | if (intr_info_field & INTR_INFO_VALID_MASK) { |
2752 | if (idtv_info_field & INTR_INFO_VALID_MASK) { | 2842 | if (idtv_info_field & INTR_INFO_VALID_MASK) { |
@@ -2754,8 +2844,7 @@ static void vmx_intr_assist(struct kvm_vcpu *vcpu) | |||
2754 | if (printk_ratelimit()) | 2844 | if (printk_ratelimit()) |
2755 | printk(KERN_ERR "Fault when IDT_Vectoring\n"); | 2845 | printk(KERN_ERR "Fault when IDT_Vectoring\n"); |
2756 | } | 2846 | } |
2757 | if (has_ext_irq) | 2847 | enable_intr_window(vcpu); |
2758 | enable_irq_window(vcpu); | ||
2759 | return; | 2848 | return; |
2760 | } | 2849 | } |
2761 | if (unlikely(idtv_info_field & INTR_INFO_VALID_MASK)) { | 2850 | if (unlikely(idtv_info_field & INTR_INFO_VALID_MASK)) { |
@@ -2765,30 +2854,56 @@ static void vmx_intr_assist(struct kvm_vcpu *vcpu) | |||
2765 | u8 vect = idtv_info_field & VECTORING_INFO_VECTOR_MASK; | 2854 | u8 vect = idtv_info_field & VECTORING_INFO_VECTOR_MASK; |
2766 | 2855 | ||
2767 | vmx_inject_irq(vcpu, vect); | 2856 | vmx_inject_irq(vcpu, vect); |
2768 | if (unlikely(has_ext_irq)) | 2857 | enable_intr_window(vcpu); |
2769 | enable_irq_window(vcpu); | ||
2770 | return; | 2858 | return; |
2771 | } | 2859 | } |
2772 | 2860 | ||
2773 | KVMTRACE_1D(REDELIVER_EVT, vcpu, idtv_info_field, handler); | 2861 | KVMTRACE_1D(REDELIVER_EVT, vcpu, idtv_info_field, handler); |
2774 | 2862 | ||
2775 | vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, idtv_info_field); | 2863 | /* |
2864 | * SDM 3: 25.7.1.2 | ||
2865 | * Clear bit "block by NMI" before VM entry if a NMI delivery | ||
2866 | * faulted. | ||
2867 | */ | ||
2868 | if ((idtv_info_field & VECTORING_INFO_TYPE_MASK) | ||
2869 | == INTR_TYPE_NMI_INTR && cpu_has_virtual_nmis()) | ||
2870 | vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, | ||
2871 | vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & | ||
2872 | ~GUEST_INTR_STATE_NMI); | ||
2873 | |||
2874 | vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, idtv_info_field | ||
2875 | & ~INTR_INFO_RESVD_BITS_MASK); | ||
2776 | vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, | 2876 | vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, |
2777 | vmcs_read32(VM_EXIT_INSTRUCTION_LEN)); | 2877 | vmcs_read32(VM_EXIT_INSTRUCTION_LEN)); |
2778 | 2878 | ||
2779 | if (unlikely(idtv_info_field & INTR_INFO_DELIVER_CODE_MASK)) | 2879 | if (unlikely(idtv_info_field & INTR_INFO_DELIVER_CODE_MASK)) |
2780 | vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, | 2880 | vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, |
2781 | vmcs_read32(IDT_VECTORING_ERROR_CODE)); | 2881 | vmcs_read32(IDT_VECTORING_ERROR_CODE)); |
2782 | if (unlikely(has_ext_irq)) | 2882 | enable_intr_window(vcpu); |
2783 | enable_irq_window(vcpu); | ||
2784 | return; | 2883 | return; |
2785 | } | 2884 | } |
2786 | if (!has_ext_irq) | 2885 | if (cpu_has_virtual_nmis()) { |
2886 | /* | ||
2887 | * SDM 3: 25.7.1.2 | ||
2888 | * Re-set bit "block by NMI" before VM entry if vmexit caused by | ||
2889 | * a guest IRET fault. | ||
2890 | */ | ||
2891 | if ((exit_intr_info_field & INTR_INFO_UNBLOCK_NMI) && | ||
2892 | (exit_intr_info_field & INTR_INFO_VECTOR_MASK) != 8) | ||
2893 | vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, | ||
2894 | vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) | | ||
2895 | GUEST_INTR_STATE_NMI); | ||
2896 | else if (vcpu->arch.nmi_pending) { | ||
2897 | if (vmx_nmi_enabled(vcpu)) | ||
2898 | vmx_inject_nmi(vcpu); | ||
2899 | enable_intr_window(vcpu); | ||
2900 | return; | ||
2901 | } | ||
2902 | |||
2903 | } | ||
2904 | if (!kvm_cpu_has_interrupt(vcpu)) | ||
2787 | return; | 2905 | return; |
2788 | interrupt_window_open = | 2906 | if (vmx_irq_enabled(vcpu)) { |
2789 | ((vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) && | ||
2790 | (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0); | ||
2791 | if (interrupt_window_open) { | ||
2792 | vector = kvm_cpu_get_interrupt(vcpu); | 2907 | vector = kvm_cpu_get_interrupt(vcpu); |
2793 | vmx_inject_irq(vcpu, vector); | 2908 | vmx_inject_irq(vcpu, vector); |
2794 | kvm_timer_intr_post(vcpu, vector); | 2909 | kvm_timer_intr_post(vcpu, vector); |
@@ -2838,7 +2953,7 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2838 | "push %%edx; push %%ebp;" | 2953 | "push %%edx; push %%ebp;" |
2839 | "push %%ecx \n\t" | 2954 | "push %%ecx \n\t" |
2840 | #endif | 2955 | #endif |
2841 | ASM_VMX_VMWRITE_RSP_RDX "\n\t" | 2956 | __ex(ASM_VMX_VMWRITE_RSP_RDX) "\n\t" |
2842 | /* Check if vmlaunch of vmresume is needed */ | 2957 | /* Check if vmlaunch of vmresume is needed */ |
2843 | "cmpl $0, %c[launched](%0) \n\t" | 2958 | "cmpl $0, %c[launched](%0) \n\t" |
2844 | /* Load guest registers. Don't clobber flags. */ | 2959 | /* Load guest registers. Don't clobber flags. */ |
@@ -2873,9 +2988,9 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2873 | #endif | 2988 | #endif |
2874 | /* Enter guest mode */ | 2989 | /* Enter guest mode */ |
2875 | "jne .Llaunched \n\t" | 2990 | "jne .Llaunched \n\t" |
2876 | ASM_VMX_VMLAUNCH "\n\t" | 2991 | __ex(ASM_VMX_VMLAUNCH) "\n\t" |
2877 | "jmp .Lkvm_vmx_return \n\t" | 2992 | "jmp .Lkvm_vmx_return \n\t" |
2878 | ".Llaunched: " ASM_VMX_VMRESUME "\n\t" | 2993 | ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t" |
2879 | ".Lkvm_vmx_return: " | 2994 | ".Lkvm_vmx_return: " |
2880 | /* Save guest registers, load host registers, keep flags */ | 2995 | /* Save guest registers, load host registers, keep flags */ |
2881 | #ifdef CONFIG_X86_64 | 2996 | #ifdef CONFIG_X86_64 |
@@ -2949,7 +3064,8 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2949 | fixup_rmode_irq(vmx); | 3064 | fixup_rmode_irq(vmx); |
2950 | 3065 | ||
2951 | vcpu->arch.interrupt_window_open = | 3066 | vcpu->arch.interrupt_window_open = |
2952 | (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0; | 3067 | (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & |
3068 | (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS)) == 0; | ||
2953 | 3069 | ||
2954 | asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS)); | 3070 | asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS)); |
2955 | vmx->launched = 1; | 3071 | vmx->launched = 1; |
@@ -2957,7 +3073,8 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2957 | intr_info = vmcs_read32(VM_EXIT_INTR_INFO); | 3073 | intr_info = vmcs_read32(VM_EXIT_INTR_INFO); |
2958 | 3074 | ||
2959 | /* We need to handle NMIs before interrupts are enabled */ | 3075 | /* We need to handle NMIs before interrupts are enabled */ |
2960 | if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == 0x200) { /* nmi */ | 3076 | if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == 0x200 && |
3077 | (intr_info & INTR_INFO_VALID_MASK)) { | ||
2961 | KVMTRACE_0D(NMI, vcpu, handler); | 3078 | KVMTRACE_0D(NMI, vcpu, handler); |
2962 | asm("int $2"); | 3079 | asm("int $2"); |
2963 | } | 3080 | } |
@@ -2968,7 +3085,7 @@ static void vmx_free_vmcs(struct kvm_vcpu *vcpu) | |||
2968 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 3085 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
2969 | 3086 | ||
2970 | if (vmx->vmcs) { | 3087 | if (vmx->vmcs) { |
2971 | on_each_cpu(__vcpu_clear, vmx, 1); | 3088 | vcpu_clear(vmx); |
2972 | free_vmcs(vmx->vmcs); | 3089 | free_vmcs(vmx->vmcs); |
2973 | vmx->vmcs = NULL; | 3090 | vmx->vmcs = NULL; |
2974 | } | 3091 | } |
@@ -3095,7 +3212,6 @@ static struct kvm_x86_ops vmx_x86_ops = { | |||
3095 | .prepare_guest_switch = vmx_save_host_state, | 3212 | .prepare_guest_switch = vmx_save_host_state, |
3096 | .vcpu_load = vmx_vcpu_load, | 3213 | .vcpu_load = vmx_vcpu_load, |
3097 | .vcpu_put = vmx_vcpu_put, | 3214 | .vcpu_put = vmx_vcpu_put, |
3098 | .vcpu_decache = vmx_vcpu_decache, | ||
3099 | 3215 | ||
3100 | .set_guest_debug = set_guest_debug, | 3216 | .set_guest_debug = set_guest_debug, |
3101 | .guest_debug_pre = kvm_guest_debug_pre, | 3217 | .guest_debug_pre = kvm_guest_debug_pre, |
diff --git a/arch/x86/kvm/vmx.h b/arch/x86/kvm/vmx.h index 79d94c610dfe..425a13436b3f 100644 --- a/arch/x86/kvm/vmx.h +++ b/arch/x86/kvm/vmx.h | |||
@@ -40,6 +40,7 @@ | |||
40 | #define CPU_BASED_CR8_LOAD_EXITING 0x00080000 | 40 | #define CPU_BASED_CR8_LOAD_EXITING 0x00080000 |
41 | #define CPU_BASED_CR8_STORE_EXITING 0x00100000 | 41 | #define CPU_BASED_CR8_STORE_EXITING 0x00100000 |
42 | #define CPU_BASED_TPR_SHADOW 0x00200000 | 42 | #define CPU_BASED_TPR_SHADOW 0x00200000 |
43 | #define CPU_BASED_VIRTUAL_NMI_PENDING 0x00400000 | ||
43 | #define CPU_BASED_MOV_DR_EXITING 0x00800000 | 44 | #define CPU_BASED_MOV_DR_EXITING 0x00800000 |
44 | #define CPU_BASED_UNCOND_IO_EXITING 0x01000000 | 45 | #define CPU_BASED_UNCOND_IO_EXITING 0x01000000 |
45 | #define CPU_BASED_USE_IO_BITMAPS 0x02000000 | 46 | #define CPU_BASED_USE_IO_BITMAPS 0x02000000 |
@@ -216,7 +217,7 @@ enum vmcs_field { | |||
216 | #define EXIT_REASON_TRIPLE_FAULT 2 | 217 | #define EXIT_REASON_TRIPLE_FAULT 2 |
217 | 218 | ||
218 | #define EXIT_REASON_PENDING_INTERRUPT 7 | 219 | #define EXIT_REASON_PENDING_INTERRUPT 7 |
219 | 220 | #define EXIT_REASON_NMI_WINDOW 8 | |
220 | #define EXIT_REASON_TASK_SWITCH 9 | 221 | #define EXIT_REASON_TASK_SWITCH 9 |
221 | #define EXIT_REASON_CPUID 10 | 222 | #define EXIT_REASON_CPUID 10 |
222 | #define EXIT_REASON_HLT 12 | 223 | #define EXIT_REASON_HLT 12 |
@@ -251,7 +252,9 @@ enum vmcs_field { | |||
251 | #define INTR_INFO_VECTOR_MASK 0xff /* 7:0 */ | 252 | #define INTR_INFO_VECTOR_MASK 0xff /* 7:0 */ |
252 | #define INTR_INFO_INTR_TYPE_MASK 0x700 /* 10:8 */ | 253 | #define INTR_INFO_INTR_TYPE_MASK 0x700 /* 10:8 */ |
253 | #define INTR_INFO_DELIVER_CODE_MASK 0x800 /* 11 */ | 254 | #define INTR_INFO_DELIVER_CODE_MASK 0x800 /* 11 */ |
255 | #define INTR_INFO_UNBLOCK_NMI 0x1000 /* 12 */ | ||
254 | #define INTR_INFO_VALID_MASK 0x80000000 /* 31 */ | 256 | #define INTR_INFO_VALID_MASK 0x80000000 /* 31 */ |
257 | #define INTR_INFO_RESVD_BITS_MASK 0x7ffff000 | ||
255 | 258 | ||
256 | #define VECTORING_INFO_VECTOR_MASK INTR_INFO_VECTOR_MASK | 259 | #define VECTORING_INFO_VECTOR_MASK INTR_INFO_VECTOR_MASK |
257 | #define VECTORING_INFO_TYPE_MASK INTR_INFO_INTR_TYPE_MASK | 260 | #define VECTORING_INFO_TYPE_MASK INTR_INFO_INTR_TYPE_MASK |
@@ -259,9 +262,16 @@ enum vmcs_field { | |||
259 | #define VECTORING_INFO_VALID_MASK INTR_INFO_VALID_MASK | 262 | #define VECTORING_INFO_VALID_MASK INTR_INFO_VALID_MASK |
260 | 263 | ||
261 | #define INTR_TYPE_EXT_INTR (0 << 8) /* external interrupt */ | 264 | #define INTR_TYPE_EXT_INTR (0 << 8) /* external interrupt */ |
265 | #define INTR_TYPE_NMI_INTR (2 << 8) /* NMI */ | ||
262 | #define INTR_TYPE_EXCEPTION (3 << 8) /* processor exception */ | 266 | #define INTR_TYPE_EXCEPTION (3 << 8) /* processor exception */ |
263 | #define INTR_TYPE_SOFT_INTR (4 << 8) /* software interrupt */ | 267 | #define INTR_TYPE_SOFT_INTR (4 << 8) /* software interrupt */ |
264 | 268 | ||
269 | /* GUEST_INTERRUPTIBILITY_INFO flags. */ | ||
270 | #define GUEST_INTR_STATE_STI 0x00000001 | ||
271 | #define GUEST_INTR_STATE_MOV_SS 0x00000002 | ||
272 | #define GUEST_INTR_STATE_SMI 0x00000004 | ||
273 | #define GUEST_INTR_STATE_NMI 0x00000008 | ||
274 | |||
265 | /* | 275 | /* |
266 | * Exit Qualifications for MOV for Control Register Access | 276 | * Exit Qualifications for MOV for Control Register Access |
267 | */ | 277 | */ |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 0faa2546b1cd..9f1cdb011cff 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -72,6 +72,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { | |||
72 | { "mmio_exits", VCPU_STAT(mmio_exits) }, | 72 | { "mmio_exits", VCPU_STAT(mmio_exits) }, |
73 | { "signal_exits", VCPU_STAT(signal_exits) }, | 73 | { "signal_exits", VCPU_STAT(signal_exits) }, |
74 | { "irq_window", VCPU_STAT(irq_window_exits) }, | 74 | { "irq_window", VCPU_STAT(irq_window_exits) }, |
75 | { "nmi_window", VCPU_STAT(nmi_window_exits) }, | ||
75 | { "halt_exits", VCPU_STAT(halt_exits) }, | 76 | { "halt_exits", VCPU_STAT(halt_exits) }, |
76 | { "halt_wakeup", VCPU_STAT(halt_wakeup) }, | 77 | { "halt_wakeup", VCPU_STAT(halt_wakeup) }, |
77 | { "hypercalls", VCPU_STAT(hypercalls) }, | 78 | { "hypercalls", VCPU_STAT(hypercalls) }, |
@@ -173,6 +174,12 @@ void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long addr, | |||
173 | kvm_queue_exception_e(vcpu, PF_VECTOR, error_code); | 174 | kvm_queue_exception_e(vcpu, PF_VECTOR, error_code); |
174 | } | 175 | } |
175 | 176 | ||
177 | void kvm_inject_nmi(struct kvm_vcpu *vcpu) | ||
178 | { | ||
179 | vcpu->arch.nmi_pending = 1; | ||
180 | } | ||
181 | EXPORT_SYMBOL_GPL(kvm_inject_nmi); | ||
182 | |||
176 | void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code) | 183 | void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code) |
177 | { | 184 | { |
178 | WARN_ON(vcpu->arch.exception.pending); | 185 | WARN_ON(vcpu->arch.exception.pending); |
@@ -604,6 +611,38 @@ static void kvm_write_guest_time(struct kvm_vcpu *v) | |||
604 | mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT); | 611 | mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT); |
605 | } | 612 | } |
606 | 613 | ||
614 | static bool msr_mtrr_valid(unsigned msr) | ||
615 | { | ||
616 | switch (msr) { | ||
617 | case 0x200 ... 0x200 + 2 * KVM_NR_VAR_MTRR - 1: | ||
618 | case MSR_MTRRfix64K_00000: | ||
619 | case MSR_MTRRfix16K_80000: | ||
620 | case MSR_MTRRfix16K_A0000: | ||
621 | case MSR_MTRRfix4K_C0000: | ||
622 | case MSR_MTRRfix4K_C8000: | ||
623 | case MSR_MTRRfix4K_D0000: | ||
624 | case MSR_MTRRfix4K_D8000: | ||
625 | case MSR_MTRRfix4K_E0000: | ||
626 | case MSR_MTRRfix4K_E8000: | ||
627 | case MSR_MTRRfix4K_F0000: | ||
628 | case MSR_MTRRfix4K_F8000: | ||
629 | case MSR_MTRRdefType: | ||
630 | case MSR_IA32_CR_PAT: | ||
631 | return true; | ||
632 | case 0x2f8: | ||
633 | return true; | ||
634 | } | ||
635 | return false; | ||
636 | } | ||
637 | |||
638 | static int set_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 data) | ||
639 | { | ||
640 | if (!msr_mtrr_valid(msr)) | ||
641 | return 1; | ||
642 | |||
643 | vcpu->arch.mtrr[msr - 0x200] = data; | ||
644 | return 0; | ||
645 | } | ||
607 | 646 | ||
608 | int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) | 647 | int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) |
609 | { | 648 | { |
@@ -625,8 +664,9 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) | |||
625 | break; | 664 | break; |
626 | case MSR_IA32_UCODE_REV: | 665 | case MSR_IA32_UCODE_REV: |
627 | case MSR_IA32_UCODE_WRITE: | 666 | case MSR_IA32_UCODE_WRITE: |
628 | case 0x200 ... 0x2ff: /* MTRRs */ | ||
629 | break; | 667 | break; |
668 | case 0x200 ... 0x2ff: | ||
669 | return set_msr_mtrr(vcpu, msr, data); | ||
630 | case MSR_IA32_APICBASE: | 670 | case MSR_IA32_APICBASE: |
631 | kvm_set_apic_base(vcpu, data); | 671 | kvm_set_apic_base(vcpu, data); |
632 | break; | 672 | break; |
@@ -684,6 +724,15 @@ int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) | |||
684 | return kvm_x86_ops->get_msr(vcpu, msr_index, pdata); | 724 | return kvm_x86_ops->get_msr(vcpu, msr_index, pdata); |
685 | } | 725 | } |
686 | 726 | ||
727 | static int get_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) | ||
728 | { | ||
729 | if (!msr_mtrr_valid(msr)) | ||
730 | return 1; | ||
731 | |||
732 | *pdata = vcpu->arch.mtrr[msr - 0x200]; | ||
733 | return 0; | ||
734 | } | ||
735 | |||
687 | int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) | 736 | int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) |
688 | { | 737 | { |
689 | u64 data; | 738 | u64 data; |
@@ -705,11 +754,13 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) | |||
705 | case MSR_IA32_MC0_MISC+16: | 754 | case MSR_IA32_MC0_MISC+16: |
706 | case MSR_IA32_UCODE_REV: | 755 | case MSR_IA32_UCODE_REV: |
707 | case MSR_IA32_EBL_CR_POWERON: | 756 | case MSR_IA32_EBL_CR_POWERON: |
708 | /* MTRR registers */ | ||
709 | case 0xfe: | ||
710 | case 0x200 ... 0x2ff: | ||
711 | data = 0; | 757 | data = 0; |
712 | break; | 758 | break; |
759 | case MSR_MTRRcap: | ||
760 | data = 0x500 | KVM_NR_VAR_MTRR; | ||
761 | break; | ||
762 | case 0x200 ... 0x2ff: | ||
763 | return get_msr_mtrr(vcpu, msr, pdata); | ||
713 | case 0xcd: /* fsb frequency */ | 764 | case 0xcd: /* fsb frequency */ |
714 | data = 3; | 765 | data = 3; |
715 | break; | 766 | break; |
@@ -817,41 +868,6 @@ out: | |||
817 | return r; | 868 | return r; |
818 | } | 869 | } |
819 | 870 | ||
820 | /* | ||
821 | * Make sure that a cpu that is being hot-unplugged does not have any vcpus | ||
822 | * cached on it. | ||
823 | */ | ||
824 | void decache_vcpus_on_cpu(int cpu) | ||
825 | { | ||
826 | struct kvm *vm; | ||
827 | struct kvm_vcpu *vcpu; | ||
828 | int i; | ||
829 | |||
830 | spin_lock(&kvm_lock); | ||
831 | list_for_each_entry(vm, &vm_list, vm_list) | ||
832 | for (i = 0; i < KVM_MAX_VCPUS; ++i) { | ||
833 | vcpu = vm->vcpus[i]; | ||
834 | if (!vcpu) | ||
835 | continue; | ||
836 | /* | ||
837 | * If the vcpu is locked, then it is running on some | ||
838 | * other cpu and therefore it is not cached on the | ||
839 | * cpu in question. | ||
840 | * | ||
841 | * If it's not locked, check the last cpu it executed | ||
842 | * on. | ||
843 | */ | ||
844 | if (mutex_trylock(&vcpu->mutex)) { | ||
845 | if (vcpu->cpu == cpu) { | ||
846 | kvm_x86_ops->vcpu_decache(vcpu); | ||
847 | vcpu->cpu = -1; | ||
848 | } | ||
849 | mutex_unlock(&vcpu->mutex); | ||
850 | } | ||
851 | } | ||
852 | spin_unlock(&kvm_lock); | ||
853 | } | ||
854 | |||
855 | int kvm_dev_ioctl_check_extension(long ext) | 871 | int kvm_dev_ioctl_check_extension(long ext) |
856 | { | 872 | { |
857 | int r; | 873 | int r; |
@@ -869,6 +885,9 @@ int kvm_dev_ioctl_check_extension(long ext) | |||
869 | case KVM_CAP_MP_STATE: | 885 | case KVM_CAP_MP_STATE: |
870 | r = 1; | 886 | r = 1; |
871 | break; | 887 | break; |
888 | case KVM_CAP_COALESCED_MMIO: | ||
889 | r = KVM_COALESCED_MMIO_PAGE_OFFSET; | ||
890 | break; | ||
872 | case KVM_CAP_VAPIC: | 891 | case KVM_CAP_VAPIC: |
873 | r = !kvm_x86_ops->cpu_has_accelerated_tpr(); | 892 | r = !kvm_x86_ops->cpu_has_accelerated_tpr(); |
874 | break; | 893 | break; |
@@ -1781,13 +1800,14 @@ static void kvm_init_msr_list(void) | |||
1781 | * Only apic need an MMIO device hook, so shortcut now.. | 1800 | * Only apic need an MMIO device hook, so shortcut now.. |
1782 | */ | 1801 | */ |
1783 | static struct kvm_io_device *vcpu_find_pervcpu_dev(struct kvm_vcpu *vcpu, | 1802 | static struct kvm_io_device *vcpu_find_pervcpu_dev(struct kvm_vcpu *vcpu, |
1784 | gpa_t addr) | 1803 | gpa_t addr, int len, |
1804 | int is_write) | ||
1785 | { | 1805 | { |
1786 | struct kvm_io_device *dev; | 1806 | struct kvm_io_device *dev; |
1787 | 1807 | ||
1788 | if (vcpu->arch.apic) { | 1808 | if (vcpu->arch.apic) { |
1789 | dev = &vcpu->arch.apic->dev; | 1809 | dev = &vcpu->arch.apic->dev; |
1790 | if (dev->in_range(dev, addr)) | 1810 | if (dev->in_range(dev, addr, len, is_write)) |
1791 | return dev; | 1811 | return dev; |
1792 | } | 1812 | } |
1793 | return NULL; | 1813 | return NULL; |
@@ -1795,13 +1815,15 @@ static struct kvm_io_device *vcpu_find_pervcpu_dev(struct kvm_vcpu *vcpu, | |||
1795 | 1815 | ||
1796 | 1816 | ||
1797 | static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu, | 1817 | static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu, |
1798 | gpa_t addr) | 1818 | gpa_t addr, int len, |
1819 | int is_write) | ||
1799 | { | 1820 | { |
1800 | struct kvm_io_device *dev; | 1821 | struct kvm_io_device *dev; |
1801 | 1822 | ||
1802 | dev = vcpu_find_pervcpu_dev(vcpu, addr); | 1823 | dev = vcpu_find_pervcpu_dev(vcpu, addr, len, is_write); |
1803 | if (dev == NULL) | 1824 | if (dev == NULL) |
1804 | dev = kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr); | 1825 | dev = kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr, len, |
1826 | is_write); | ||
1805 | return dev; | 1827 | return dev; |
1806 | } | 1828 | } |
1807 | 1829 | ||
@@ -1869,7 +1891,7 @@ mmio: | |||
1869 | * Is this MMIO handled locally? | 1891 | * Is this MMIO handled locally? |
1870 | */ | 1892 | */ |
1871 | mutex_lock(&vcpu->kvm->lock); | 1893 | mutex_lock(&vcpu->kvm->lock); |
1872 | mmio_dev = vcpu_find_mmio_dev(vcpu, gpa); | 1894 | mmio_dev = vcpu_find_mmio_dev(vcpu, gpa, bytes, 0); |
1873 | if (mmio_dev) { | 1895 | if (mmio_dev) { |
1874 | kvm_iodevice_read(mmio_dev, gpa, bytes, val); | 1896 | kvm_iodevice_read(mmio_dev, gpa, bytes, val); |
1875 | mutex_unlock(&vcpu->kvm->lock); | 1897 | mutex_unlock(&vcpu->kvm->lock); |
@@ -1924,7 +1946,7 @@ mmio: | |||
1924 | * Is this MMIO handled locally? | 1946 | * Is this MMIO handled locally? |
1925 | */ | 1947 | */ |
1926 | mutex_lock(&vcpu->kvm->lock); | 1948 | mutex_lock(&vcpu->kvm->lock); |
1927 | mmio_dev = vcpu_find_mmio_dev(vcpu, gpa); | 1949 | mmio_dev = vcpu_find_mmio_dev(vcpu, gpa, bytes, 1); |
1928 | if (mmio_dev) { | 1950 | if (mmio_dev) { |
1929 | kvm_iodevice_write(mmio_dev, gpa, bytes, val); | 1951 | kvm_iodevice_write(mmio_dev, gpa, bytes, val); |
1930 | mutex_unlock(&vcpu->kvm->lock); | 1952 | mutex_unlock(&vcpu->kvm->lock); |
@@ -2020,6 +2042,7 @@ int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address) | |||
2020 | 2042 | ||
2021 | int emulate_clts(struct kvm_vcpu *vcpu) | 2043 | int emulate_clts(struct kvm_vcpu *vcpu) |
2022 | { | 2044 | { |
2045 | KVMTRACE_0D(CLTS, vcpu, handler); | ||
2023 | kvm_x86_ops->set_cr0(vcpu, vcpu->arch.cr0 & ~X86_CR0_TS); | 2046 | kvm_x86_ops->set_cr0(vcpu, vcpu->arch.cr0 & ~X86_CR0_TS); |
2024 | return X86EMUL_CONTINUE; | 2047 | return X86EMUL_CONTINUE; |
2025 | } | 2048 | } |
@@ -2053,21 +2076,19 @@ int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value) | |||
2053 | 2076 | ||
2054 | void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context) | 2077 | void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context) |
2055 | { | 2078 | { |
2056 | static int reported; | ||
2057 | u8 opcodes[4]; | 2079 | u8 opcodes[4]; |
2058 | unsigned long rip = vcpu->arch.rip; | 2080 | unsigned long rip = vcpu->arch.rip; |
2059 | unsigned long rip_linear; | 2081 | unsigned long rip_linear; |
2060 | 2082 | ||
2061 | rip_linear = rip + get_segment_base(vcpu, VCPU_SREG_CS); | 2083 | if (!printk_ratelimit()) |
2062 | |||
2063 | if (reported) | ||
2064 | return; | 2084 | return; |
2065 | 2085 | ||
2086 | rip_linear = rip + get_segment_base(vcpu, VCPU_SREG_CS); | ||
2087 | |||
2066 | emulator_read_std(rip_linear, (void *)opcodes, 4, vcpu); | 2088 | emulator_read_std(rip_linear, (void *)opcodes, 4, vcpu); |
2067 | 2089 | ||
2068 | printk(KERN_ERR "emulation failed (%s) rip %lx %02x %02x %02x %02x\n", | 2090 | printk(KERN_ERR "emulation failed (%s) rip %lx %02x %02x %02x %02x\n", |
2069 | context, rip, opcodes[0], opcodes[1], opcodes[2], opcodes[3]); | 2091 | context, rip, opcodes[0], opcodes[1], opcodes[2], opcodes[3]); |
2070 | reported = 1; | ||
2071 | } | 2092 | } |
2072 | EXPORT_SYMBOL_GPL(kvm_report_emulation_failure); | 2093 | EXPORT_SYMBOL_GPL(kvm_report_emulation_failure); |
2073 | 2094 | ||
@@ -2105,27 +2126,6 @@ int emulate_instruction(struct kvm_vcpu *vcpu, | |||
2105 | ? X86EMUL_MODE_PROT64 : cs_db | 2126 | ? X86EMUL_MODE_PROT64 : cs_db |
2106 | ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16; | 2127 | ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16; |
2107 | 2128 | ||
2108 | if (vcpu->arch.emulate_ctxt.mode == X86EMUL_MODE_PROT64) { | ||
2109 | vcpu->arch.emulate_ctxt.cs_base = 0; | ||
2110 | vcpu->arch.emulate_ctxt.ds_base = 0; | ||
2111 | vcpu->arch.emulate_ctxt.es_base = 0; | ||
2112 | vcpu->arch.emulate_ctxt.ss_base = 0; | ||
2113 | } else { | ||
2114 | vcpu->arch.emulate_ctxt.cs_base = | ||
2115 | get_segment_base(vcpu, VCPU_SREG_CS); | ||
2116 | vcpu->arch.emulate_ctxt.ds_base = | ||
2117 | get_segment_base(vcpu, VCPU_SREG_DS); | ||
2118 | vcpu->arch.emulate_ctxt.es_base = | ||
2119 | get_segment_base(vcpu, VCPU_SREG_ES); | ||
2120 | vcpu->arch.emulate_ctxt.ss_base = | ||
2121 | get_segment_base(vcpu, VCPU_SREG_SS); | ||
2122 | } | ||
2123 | |||
2124 | vcpu->arch.emulate_ctxt.gs_base = | ||
2125 | get_segment_base(vcpu, VCPU_SREG_GS); | ||
2126 | vcpu->arch.emulate_ctxt.fs_base = | ||
2127 | get_segment_base(vcpu, VCPU_SREG_FS); | ||
2128 | |||
2129 | r = x86_decode_insn(&vcpu->arch.emulate_ctxt, &emulate_ops); | 2129 | r = x86_decode_insn(&vcpu->arch.emulate_ctxt, &emulate_ops); |
2130 | 2130 | ||
2131 | /* Reject the instructions other than VMCALL/VMMCALL when | 2131 | /* Reject the instructions other than VMCALL/VMMCALL when |
@@ -2300,9 +2300,10 @@ static void pio_string_write(struct kvm_io_device *pio_dev, | |||
2300 | } | 2300 | } |
2301 | 2301 | ||
2302 | static struct kvm_io_device *vcpu_find_pio_dev(struct kvm_vcpu *vcpu, | 2302 | static struct kvm_io_device *vcpu_find_pio_dev(struct kvm_vcpu *vcpu, |
2303 | gpa_t addr) | 2303 | gpa_t addr, int len, |
2304 | int is_write) | ||
2304 | { | 2305 | { |
2305 | return kvm_io_bus_find_dev(&vcpu->kvm->pio_bus, addr); | 2306 | return kvm_io_bus_find_dev(&vcpu->kvm->pio_bus, addr, len, is_write); |
2306 | } | 2307 | } |
2307 | 2308 | ||
2308 | int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in, | 2309 | int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in, |
@@ -2331,11 +2332,10 @@ int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in, | |||
2331 | 2332 | ||
2332 | kvm_x86_ops->cache_regs(vcpu); | 2333 | kvm_x86_ops->cache_regs(vcpu); |
2333 | memcpy(vcpu->arch.pio_data, &vcpu->arch.regs[VCPU_REGS_RAX], 4); | 2334 | memcpy(vcpu->arch.pio_data, &vcpu->arch.regs[VCPU_REGS_RAX], 4); |
2334 | kvm_x86_ops->decache_regs(vcpu); | ||
2335 | 2335 | ||
2336 | kvm_x86_ops->skip_emulated_instruction(vcpu); | 2336 | kvm_x86_ops->skip_emulated_instruction(vcpu); |
2337 | 2337 | ||
2338 | pio_dev = vcpu_find_pio_dev(vcpu, port); | 2338 | pio_dev = vcpu_find_pio_dev(vcpu, port, size, !in); |
2339 | if (pio_dev) { | 2339 | if (pio_dev) { |
2340 | kernel_pio(pio_dev, vcpu, vcpu->arch.pio_data); | 2340 | kernel_pio(pio_dev, vcpu, vcpu->arch.pio_data); |
2341 | complete_pio(vcpu); | 2341 | complete_pio(vcpu); |
@@ -2417,7 +2417,9 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in, | |||
2417 | } | 2417 | } |
2418 | } | 2418 | } |
2419 | 2419 | ||
2420 | pio_dev = vcpu_find_pio_dev(vcpu, port); | 2420 | pio_dev = vcpu_find_pio_dev(vcpu, port, |
2421 | vcpu->arch.pio.cur_count, | ||
2422 | !vcpu->arch.pio.in); | ||
2421 | if (!vcpu->arch.pio.in) { | 2423 | if (!vcpu->arch.pio.in) { |
2422 | /* string PIO write */ | 2424 | /* string PIO write */ |
2423 | ret = pio_copy_data(vcpu); | 2425 | ret = pio_copy_data(vcpu); |
@@ -2600,27 +2602,41 @@ void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw, | |||
2600 | 2602 | ||
2601 | unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr) | 2603 | unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr) |
2602 | { | 2604 | { |
2605 | unsigned long value; | ||
2606 | |||
2603 | kvm_x86_ops->decache_cr4_guest_bits(vcpu); | 2607 | kvm_x86_ops->decache_cr4_guest_bits(vcpu); |
2604 | switch (cr) { | 2608 | switch (cr) { |
2605 | case 0: | 2609 | case 0: |
2606 | return vcpu->arch.cr0; | 2610 | value = vcpu->arch.cr0; |
2611 | break; | ||
2607 | case 2: | 2612 | case 2: |
2608 | return vcpu->arch.cr2; | 2613 | value = vcpu->arch.cr2; |
2614 | break; | ||
2609 | case 3: | 2615 | case 3: |
2610 | return vcpu->arch.cr3; | 2616 | value = vcpu->arch.cr3; |
2617 | break; | ||
2611 | case 4: | 2618 | case 4: |
2612 | return vcpu->arch.cr4; | 2619 | value = vcpu->arch.cr4; |
2620 | break; | ||
2613 | case 8: | 2621 | case 8: |
2614 | return kvm_get_cr8(vcpu); | 2622 | value = kvm_get_cr8(vcpu); |
2623 | break; | ||
2615 | default: | 2624 | default: |
2616 | vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr); | 2625 | vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr); |
2617 | return 0; | 2626 | return 0; |
2618 | } | 2627 | } |
2628 | KVMTRACE_3D(CR_READ, vcpu, (u32)cr, (u32)value, | ||
2629 | (u32)((u64)value >> 32), handler); | ||
2630 | |||
2631 | return value; | ||
2619 | } | 2632 | } |
2620 | 2633 | ||
2621 | void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val, | 2634 | void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val, |
2622 | unsigned long *rflags) | 2635 | unsigned long *rflags) |
2623 | { | 2636 | { |
2637 | KVMTRACE_3D(CR_WRITE, vcpu, (u32)cr, (u32)val, | ||
2638 | (u32)((u64)val >> 32), handler); | ||
2639 | |||
2624 | switch (cr) { | 2640 | switch (cr) { |
2625 | case 0: | 2641 | case 0: |
2626 | kvm_set_cr0(vcpu, mk_cr_64(vcpu->arch.cr0, val)); | 2642 | kvm_set_cr0(vcpu, mk_cr_64(vcpu->arch.cr0, val)); |
@@ -2771,8 +2787,10 @@ static void vapic_exit(struct kvm_vcpu *vcpu) | |||
2771 | if (!apic || !apic->vapic_addr) | 2787 | if (!apic || !apic->vapic_addr) |
2772 | return; | 2788 | return; |
2773 | 2789 | ||
2790 | down_read(&vcpu->kvm->slots_lock); | ||
2774 | kvm_release_page_dirty(apic->vapic_page); | 2791 | kvm_release_page_dirty(apic->vapic_page); |
2775 | mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); | 2792 | mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); |
2793 | up_read(&vcpu->kvm->slots_lock); | ||
2776 | } | 2794 | } |
2777 | 2795 | ||
2778 | static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 2796 | static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
@@ -2928,9 +2946,7 @@ out: | |||
2928 | 2946 | ||
2929 | post_kvm_run_save(vcpu, kvm_run); | 2947 | post_kvm_run_save(vcpu, kvm_run); |
2930 | 2948 | ||
2931 | down_read(&vcpu->kvm->slots_lock); | ||
2932 | vapic_exit(vcpu); | 2949 | vapic_exit(vcpu); |
2933 | up_read(&vcpu->kvm->slots_lock); | ||
2934 | 2950 | ||
2935 | return r; | 2951 | return r; |
2936 | } | 2952 | } |
@@ -2942,15 +2958,15 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2942 | 2958 | ||
2943 | vcpu_load(vcpu); | 2959 | vcpu_load(vcpu); |
2944 | 2960 | ||
2961 | if (vcpu->sigset_active) | ||
2962 | sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); | ||
2963 | |||
2945 | if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { | 2964 | if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { |
2946 | kvm_vcpu_block(vcpu); | 2965 | kvm_vcpu_block(vcpu); |
2947 | vcpu_put(vcpu); | 2966 | r = -EAGAIN; |
2948 | return -EAGAIN; | 2967 | goto out; |
2949 | } | 2968 | } |
2950 | 2969 | ||
2951 | if (vcpu->sigset_active) | ||
2952 | sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); | ||
2953 | |||
2954 | /* re-sync apic's tpr */ | 2970 | /* re-sync apic's tpr */ |
2955 | if (!irqchip_in_kernel(vcpu->kvm)) | 2971 | if (!irqchip_in_kernel(vcpu->kvm)) |
2956 | kvm_set_cr8(vcpu, kvm_run->cr8); | 2972 | kvm_set_cr8(vcpu, kvm_run->cr8); |
@@ -3070,8 +3086,8 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
3070 | return 0; | 3086 | return 0; |
3071 | } | 3087 | } |
3072 | 3088 | ||
3073 | static void get_segment(struct kvm_vcpu *vcpu, | 3089 | void kvm_get_segment(struct kvm_vcpu *vcpu, |
3074 | struct kvm_segment *var, int seg) | 3090 | struct kvm_segment *var, int seg) |
3075 | { | 3091 | { |
3076 | kvm_x86_ops->get_segment(vcpu, var, seg); | 3092 | kvm_x86_ops->get_segment(vcpu, var, seg); |
3077 | } | 3093 | } |
@@ -3080,7 +3096,7 @@ void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l) | |||
3080 | { | 3096 | { |
3081 | struct kvm_segment cs; | 3097 | struct kvm_segment cs; |
3082 | 3098 | ||
3083 | get_segment(vcpu, &cs, VCPU_SREG_CS); | 3099 | kvm_get_segment(vcpu, &cs, VCPU_SREG_CS); |
3084 | *db = cs.db; | 3100 | *db = cs.db; |
3085 | *l = cs.l; | 3101 | *l = cs.l; |
3086 | } | 3102 | } |
@@ -3094,15 +3110,15 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, | |||
3094 | 3110 | ||
3095 | vcpu_load(vcpu); | 3111 | vcpu_load(vcpu); |
3096 | 3112 | ||
3097 | get_segment(vcpu, &sregs->cs, VCPU_SREG_CS); | 3113 | kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS); |
3098 | get_segment(vcpu, &sregs->ds, VCPU_SREG_DS); | 3114 | kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS); |
3099 | get_segment(vcpu, &sregs->es, VCPU_SREG_ES); | 3115 | kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES); |
3100 | get_segment(vcpu, &sregs->fs, VCPU_SREG_FS); | 3116 | kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS); |
3101 | get_segment(vcpu, &sregs->gs, VCPU_SREG_GS); | 3117 | kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS); |
3102 | get_segment(vcpu, &sregs->ss, VCPU_SREG_SS); | 3118 | kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS); |
3103 | 3119 | ||
3104 | get_segment(vcpu, &sregs->tr, VCPU_SREG_TR); | 3120 | kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR); |
3105 | get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR); | 3121 | kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR); |
3106 | 3122 | ||
3107 | kvm_x86_ops->get_idt(vcpu, &dt); | 3123 | kvm_x86_ops->get_idt(vcpu, &dt); |
3108 | sregs->idt.limit = dt.limit; | 3124 | sregs->idt.limit = dt.limit; |
@@ -3154,7 +3170,7 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, | |||
3154 | return 0; | 3170 | return 0; |
3155 | } | 3171 | } |
3156 | 3172 | ||
3157 | static void set_segment(struct kvm_vcpu *vcpu, | 3173 | static void kvm_set_segment(struct kvm_vcpu *vcpu, |
3158 | struct kvm_segment *var, int seg) | 3174 | struct kvm_segment *var, int seg) |
3159 | { | 3175 | { |
3160 | kvm_x86_ops->set_segment(vcpu, var, seg); | 3176 | kvm_x86_ops->set_segment(vcpu, var, seg); |
@@ -3191,7 +3207,7 @@ static void get_segment_descritptor_dtable(struct kvm_vcpu *vcpu, | |||
3191 | if (selector & 1 << 2) { | 3207 | if (selector & 1 << 2) { |
3192 | struct kvm_segment kvm_seg; | 3208 | struct kvm_segment kvm_seg; |
3193 | 3209 | ||
3194 | get_segment(vcpu, &kvm_seg, VCPU_SREG_LDTR); | 3210 | kvm_get_segment(vcpu, &kvm_seg, VCPU_SREG_LDTR); |
3195 | 3211 | ||
3196 | if (kvm_seg.unusable) | 3212 | if (kvm_seg.unusable) |
3197 | dtable->limit = 0; | 3213 | dtable->limit = 0; |
@@ -3297,7 +3313,7 @@ static u16 get_segment_selector(struct kvm_vcpu *vcpu, int seg) | |||
3297 | { | 3313 | { |
3298 | struct kvm_segment kvm_seg; | 3314 | struct kvm_segment kvm_seg; |
3299 | 3315 | ||
3300 | get_segment(vcpu, &kvm_seg, seg); | 3316 | kvm_get_segment(vcpu, &kvm_seg, seg); |
3301 | return kvm_seg.selector; | 3317 | return kvm_seg.selector; |
3302 | } | 3318 | } |
3303 | 3319 | ||
@@ -3313,8 +3329,8 @@ static int load_segment_descriptor_to_kvm_desct(struct kvm_vcpu *vcpu, | |||
3313 | return 0; | 3329 | return 0; |
3314 | } | 3330 | } |
3315 | 3331 | ||
3316 | static int load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, | 3332 | int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, |
3317 | int type_bits, int seg) | 3333 | int type_bits, int seg) |
3318 | { | 3334 | { |
3319 | struct kvm_segment kvm_seg; | 3335 | struct kvm_segment kvm_seg; |
3320 | 3336 | ||
@@ -3327,7 +3343,7 @@ static int load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, | |||
3327 | if (!kvm_seg.s) | 3343 | if (!kvm_seg.s) |
3328 | kvm_seg.unusable = 1; | 3344 | kvm_seg.unusable = 1; |
3329 | 3345 | ||
3330 | set_segment(vcpu, &kvm_seg, seg); | 3346 | kvm_set_segment(vcpu, &kvm_seg, seg); |
3331 | return 0; | 3347 | return 0; |
3332 | } | 3348 | } |
3333 | 3349 | ||
@@ -3373,25 +3389,25 @@ static int load_state_from_tss32(struct kvm_vcpu *vcpu, | |||
3373 | vcpu->arch.regs[VCPU_REGS_RSI] = tss->esi; | 3389 | vcpu->arch.regs[VCPU_REGS_RSI] = tss->esi; |
3374 | vcpu->arch.regs[VCPU_REGS_RDI] = tss->edi; | 3390 | vcpu->arch.regs[VCPU_REGS_RDI] = tss->edi; |
3375 | 3391 | ||
3376 | if (load_segment_descriptor(vcpu, tss->ldt_selector, 0, VCPU_SREG_LDTR)) | 3392 | if (kvm_load_segment_descriptor(vcpu, tss->ldt_selector, 0, VCPU_SREG_LDTR)) |
3377 | return 1; | 3393 | return 1; |
3378 | 3394 | ||
3379 | if (load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES)) | 3395 | if (kvm_load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES)) |
3380 | return 1; | 3396 | return 1; |
3381 | 3397 | ||
3382 | if (load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS)) | 3398 | if (kvm_load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS)) |
3383 | return 1; | 3399 | return 1; |
3384 | 3400 | ||
3385 | if (load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS)) | 3401 | if (kvm_load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS)) |
3386 | return 1; | 3402 | return 1; |
3387 | 3403 | ||
3388 | if (load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS)) | 3404 | if (kvm_load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS)) |
3389 | return 1; | 3405 | return 1; |
3390 | 3406 | ||
3391 | if (load_segment_descriptor(vcpu, tss->fs, 1, VCPU_SREG_FS)) | 3407 | if (kvm_load_segment_descriptor(vcpu, tss->fs, 1, VCPU_SREG_FS)) |
3392 | return 1; | 3408 | return 1; |
3393 | 3409 | ||
3394 | if (load_segment_descriptor(vcpu, tss->gs, 1, VCPU_SREG_GS)) | 3410 | if (kvm_load_segment_descriptor(vcpu, tss->gs, 1, VCPU_SREG_GS)) |
3395 | return 1; | 3411 | return 1; |
3396 | return 0; | 3412 | return 0; |
3397 | } | 3413 | } |
@@ -3432,24 +3448,24 @@ static int load_state_from_tss16(struct kvm_vcpu *vcpu, | |||
3432 | vcpu->arch.regs[VCPU_REGS_RSI] = tss->si; | 3448 | vcpu->arch.regs[VCPU_REGS_RSI] = tss->si; |
3433 | vcpu->arch.regs[VCPU_REGS_RDI] = tss->di; | 3449 | vcpu->arch.regs[VCPU_REGS_RDI] = tss->di; |
3434 | 3450 | ||
3435 | if (load_segment_descriptor(vcpu, tss->ldt, 0, VCPU_SREG_LDTR)) | 3451 | if (kvm_load_segment_descriptor(vcpu, tss->ldt, 0, VCPU_SREG_LDTR)) |
3436 | return 1; | 3452 | return 1; |
3437 | 3453 | ||
3438 | if (load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES)) | 3454 | if (kvm_load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES)) |
3439 | return 1; | 3455 | return 1; |
3440 | 3456 | ||
3441 | if (load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS)) | 3457 | if (kvm_load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS)) |
3442 | return 1; | 3458 | return 1; |
3443 | 3459 | ||
3444 | if (load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS)) | 3460 | if (kvm_load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS)) |
3445 | return 1; | 3461 | return 1; |
3446 | 3462 | ||
3447 | if (load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS)) | 3463 | if (kvm_load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS)) |
3448 | return 1; | 3464 | return 1; |
3449 | return 0; | 3465 | return 0; |
3450 | } | 3466 | } |
3451 | 3467 | ||
3452 | int kvm_task_switch_16(struct kvm_vcpu *vcpu, u16 tss_selector, | 3468 | static int kvm_task_switch_16(struct kvm_vcpu *vcpu, u16 tss_selector, |
3453 | struct desc_struct *cseg_desc, | 3469 | struct desc_struct *cseg_desc, |
3454 | struct desc_struct *nseg_desc) | 3470 | struct desc_struct *nseg_desc) |
3455 | { | 3471 | { |
@@ -3472,7 +3488,7 @@ out: | |||
3472 | return ret; | 3488 | return ret; |
3473 | } | 3489 | } |
3474 | 3490 | ||
3475 | int kvm_task_switch_32(struct kvm_vcpu *vcpu, u16 tss_selector, | 3491 | static int kvm_task_switch_32(struct kvm_vcpu *vcpu, u16 tss_selector, |
3476 | struct desc_struct *cseg_desc, | 3492 | struct desc_struct *cseg_desc, |
3477 | struct desc_struct *nseg_desc) | 3493 | struct desc_struct *nseg_desc) |
3478 | { | 3494 | { |
@@ -3502,7 +3518,7 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason) | |||
3502 | struct desc_struct nseg_desc; | 3518 | struct desc_struct nseg_desc; |
3503 | int ret = 0; | 3519 | int ret = 0; |
3504 | 3520 | ||
3505 | get_segment(vcpu, &tr_seg, VCPU_SREG_TR); | 3521 | kvm_get_segment(vcpu, &tr_seg, VCPU_SREG_TR); |
3506 | 3522 | ||
3507 | if (load_guest_segment_descriptor(vcpu, tss_selector, &nseg_desc)) | 3523 | if (load_guest_segment_descriptor(vcpu, tss_selector, &nseg_desc)) |
3508 | goto out; | 3524 | goto out; |
@@ -3561,7 +3577,7 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason) | |||
3561 | kvm_x86_ops->set_cr0(vcpu, vcpu->arch.cr0 | X86_CR0_TS); | 3577 | kvm_x86_ops->set_cr0(vcpu, vcpu->arch.cr0 | X86_CR0_TS); |
3562 | seg_desct_to_kvm_desct(&nseg_desc, tss_selector, &tr_seg); | 3578 | seg_desct_to_kvm_desct(&nseg_desc, tss_selector, &tr_seg); |
3563 | tr_seg.type = 11; | 3579 | tr_seg.type = 11; |
3564 | set_segment(vcpu, &tr_seg, VCPU_SREG_TR); | 3580 | kvm_set_segment(vcpu, &tr_seg, VCPU_SREG_TR); |
3565 | out: | 3581 | out: |
3566 | kvm_x86_ops->decache_regs(vcpu); | 3582 | kvm_x86_ops->decache_regs(vcpu); |
3567 | return ret; | 3583 | return ret; |
@@ -3628,15 +3644,15 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, | |||
3628 | } | 3644 | } |
3629 | } | 3645 | } |
3630 | 3646 | ||
3631 | set_segment(vcpu, &sregs->cs, VCPU_SREG_CS); | 3647 | kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS); |
3632 | set_segment(vcpu, &sregs->ds, VCPU_SREG_DS); | 3648 | kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS); |
3633 | set_segment(vcpu, &sregs->es, VCPU_SREG_ES); | 3649 | kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES); |
3634 | set_segment(vcpu, &sregs->fs, VCPU_SREG_FS); | 3650 | kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS); |
3635 | set_segment(vcpu, &sregs->gs, VCPU_SREG_GS); | 3651 | kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS); |
3636 | set_segment(vcpu, &sregs->ss, VCPU_SREG_SS); | 3652 | kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS); |
3637 | 3653 | ||
3638 | set_segment(vcpu, &sregs->tr, VCPU_SREG_TR); | 3654 | kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR); |
3639 | set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR); | 3655 | kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR); |
3640 | 3656 | ||
3641 | vcpu_put(vcpu); | 3657 | vcpu_put(vcpu); |
3642 | 3658 | ||
@@ -3751,14 +3767,14 @@ void fx_init(struct kvm_vcpu *vcpu) | |||
3751 | * allocate ram with GFP_KERNEL. | 3767 | * allocate ram with GFP_KERNEL. |
3752 | */ | 3768 | */ |
3753 | if (!used_math()) | 3769 | if (!used_math()) |
3754 | fx_save(&vcpu->arch.host_fx_image); | 3770 | kvm_fx_save(&vcpu->arch.host_fx_image); |
3755 | 3771 | ||
3756 | /* Initialize guest FPU by resetting ours and saving into guest's */ | 3772 | /* Initialize guest FPU by resetting ours and saving into guest's */ |
3757 | preempt_disable(); | 3773 | preempt_disable(); |
3758 | fx_save(&vcpu->arch.host_fx_image); | 3774 | kvm_fx_save(&vcpu->arch.host_fx_image); |
3759 | fx_finit(); | 3775 | kvm_fx_finit(); |
3760 | fx_save(&vcpu->arch.guest_fx_image); | 3776 | kvm_fx_save(&vcpu->arch.guest_fx_image); |
3761 | fx_restore(&vcpu->arch.host_fx_image); | 3777 | kvm_fx_restore(&vcpu->arch.host_fx_image); |
3762 | preempt_enable(); | 3778 | preempt_enable(); |
3763 | 3779 | ||
3764 | vcpu->arch.cr0 |= X86_CR0_ET; | 3780 | vcpu->arch.cr0 |= X86_CR0_ET; |
@@ -3775,8 +3791,8 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu) | |||
3775 | return; | 3791 | return; |
3776 | 3792 | ||
3777 | vcpu->guest_fpu_loaded = 1; | 3793 | vcpu->guest_fpu_loaded = 1; |
3778 | fx_save(&vcpu->arch.host_fx_image); | 3794 | kvm_fx_save(&vcpu->arch.host_fx_image); |
3779 | fx_restore(&vcpu->arch.guest_fx_image); | 3795 | kvm_fx_restore(&vcpu->arch.guest_fx_image); |
3780 | } | 3796 | } |
3781 | EXPORT_SYMBOL_GPL(kvm_load_guest_fpu); | 3797 | EXPORT_SYMBOL_GPL(kvm_load_guest_fpu); |
3782 | 3798 | ||
@@ -3786,8 +3802,8 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu) | |||
3786 | return; | 3802 | return; |
3787 | 3803 | ||
3788 | vcpu->guest_fpu_loaded = 0; | 3804 | vcpu->guest_fpu_loaded = 0; |
3789 | fx_save(&vcpu->arch.guest_fx_image); | 3805 | kvm_fx_save(&vcpu->arch.guest_fx_image); |
3790 | fx_restore(&vcpu->arch.host_fx_image); | 3806 | kvm_fx_restore(&vcpu->arch.host_fx_image); |
3791 | ++vcpu->stat.fpu_reload; | 3807 | ++vcpu->stat.fpu_reload; |
3792 | } | 3808 | } |
3793 | EXPORT_SYMBOL_GPL(kvm_put_guest_fpu); | 3809 | EXPORT_SYMBOL_GPL(kvm_put_guest_fpu); |
@@ -4016,6 +4032,11 @@ int kvm_arch_set_memory_region(struct kvm *kvm, | |||
4016 | return 0; | 4032 | return 0; |
4017 | } | 4033 | } |
4018 | 4034 | ||
4035 | void kvm_arch_flush_shadow(struct kvm *kvm) | ||
4036 | { | ||
4037 | kvm_mmu_zap_all(kvm); | ||
4038 | } | ||
4039 | |||
4019 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) | 4040 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) |
4020 | { | 4041 | { |
4021 | return vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE | 4042 | return vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE |
diff --git a/arch/x86/kvm/x86_emulate.c b/arch/x86/kvm/x86_emulate.c index 932f216d890c..f2f90468f8b1 100644 --- a/arch/x86/kvm/x86_emulate.c +++ b/arch/x86/kvm/x86_emulate.c | |||
@@ -121,7 +121,7 @@ static u16 opcode_table[256] = { | |||
121 | 0, 0, 0, DstReg | SrcMem32 | ModRM | Mov /* movsxd (x86/64) */ , | 121 | 0, 0, 0, DstReg | SrcMem32 | ModRM | Mov /* movsxd (x86/64) */ , |
122 | 0, 0, 0, 0, | 122 | 0, 0, 0, 0, |
123 | /* 0x68 - 0x6F */ | 123 | /* 0x68 - 0x6F */ |
124 | 0, 0, ImplicitOps | Mov | Stack, 0, | 124 | SrcImm | Mov | Stack, 0, SrcImmByte | Mov | Stack, 0, |
125 | SrcNone | ByteOp | ImplicitOps, SrcNone | ImplicitOps, /* insb, insw/insd */ | 125 | SrcNone | ByteOp | ImplicitOps, SrcNone | ImplicitOps, /* insb, insw/insd */ |
126 | SrcNone | ByteOp | ImplicitOps, SrcNone | ImplicitOps, /* outsb, outsw/outsd */ | 126 | SrcNone | ByteOp | ImplicitOps, SrcNone | ImplicitOps, /* outsb, outsw/outsd */ |
127 | /* 0x70 - 0x77 */ | 127 | /* 0x70 - 0x77 */ |
@@ -138,9 +138,11 @@ static u16 opcode_table[256] = { | |||
138 | /* 0x88 - 0x8F */ | 138 | /* 0x88 - 0x8F */ |
139 | ByteOp | DstMem | SrcReg | ModRM | Mov, DstMem | SrcReg | ModRM | Mov, | 139 | ByteOp | DstMem | SrcReg | ModRM | Mov, DstMem | SrcReg | ModRM | Mov, |
140 | ByteOp | DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov, | 140 | ByteOp | DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov, |
141 | 0, ModRM | DstReg, 0, Group | Group1A, | 141 | DstMem | SrcReg | ModRM | Mov, ModRM | DstReg, |
142 | /* 0x90 - 0x9F */ | 142 | DstReg | SrcMem | ModRM | Mov, Group | Group1A, |
143 | 0, 0, 0, 0, 0, 0, 0, 0, | 143 | /* 0x90 - 0x97 */ |
144 | DstReg, DstReg, DstReg, DstReg, DstReg, DstReg, DstReg, DstReg, | ||
145 | /* 0x98 - 0x9F */ | ||
144 | 0, 0, 0, 0, ImplicitOps | Stack, ImplicitOps | Stack, 0, 0, | 146 | 0, 0, 0, 0, ImplicitOps | Stack, ImplicitOps | Stack, 0, 0, |
145 | /* 0xA0 - 0xA7 */ | 147 | /* 0xA0 - 0xA7 */ |
146 | ByteOp | DstReg | SrcMem | Mov | MemAbs, DstReg | SrcMem | Mov | MemAbs, | 148 | ByteOp | DstReg | SrcMem | Mov | MemAbs, DstReg | SrcMem | Mov | MemAbs, |
@@ -152,7 +154,8 @@ static u16 opcode_table[256] = { | |||
152 | ByteOp | ImplicitOps | Mov | String, ImplicitOps | Mov | String, | 154 | ByteOp | ImplicitOps | Mov | String, ImplicitOps | Mov | String, |
153 | ByteOp | ImplicitOps | String, ImplicitOps | String, | 155 | ByteOp | ImplicitOps | String, ImplicitOps | String, |
154 | /* 0xB0 - 0xBF */ | 156 | /* 0xB0 - 0xBF */ |
155 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, | 157 | 0, 0, 0, 0, 0, 0, 0, 0, |
158 | DstReg | SrcImm | Mov, 0, 0, 0, 0, 0, 0, 0, | ||
156 | /* 0xC0 - 0xC7 */ | 159 | /* 0xC0 - 0xC7 */ |
157 | ByteOp | DstMem | SrcImm | ModRM, DstMem | SrcImmByte | ModRM, | 160 | ByteOp | DstMem | SrcImm | ModRM, DstMem | SrcImmByte | ModRM, |
158 | 0, ImplicitOps | Stack, 0, 0, | 161 | 0, ImplicitOps | Stack, 0, 0, |
@@ -168,7 +171,8 @@ static u16 opcode_table[256] = { | |||
168 | /* 0xE0 - 0xE7 */ | 171 | /* 0xE0 - 0xE7 */ |
169 | 0, 0, 0, 0, 0, 0, 0, 0, | 172 | 0, 0, 0, 0, 0, 0, 0, 0, |
170 | /* 0xE8 - 0xEF */ | 173 | /* 0xE8 - 0xEF */ |
171 | ImplicitOps | Stack, SrcImm|ImplicitOps, 0, SrcImmByte|ImplicitOps, | 174 | ImplicitOps | Stack, SrcImm | ImplicitOps, |
175 | ImplicitOps, SrcImmByte | ImplicitOps, | ||
172 | 0, 0, 0, 0, | 176 | 0, 0, 0, 0, |
173 | /* 0xF0 - 0xF7 */ | 177 | /* 0xF0 - 0xF7 */ |
174 | 0, 0, 0, 0, | 178 | 0, 0, 0, 0, |
@@ -215,7 +219,7 @@ static u16 twobyte_table[256] = { | |||
215 | /* 0xA0 - 0xA7 */ | 219 | /* 0xA0 - 0xA7 */ |
216 | 0, 0, 0, DstMem | SrcReg | ModRM | BitOp, 0, 0, 0, 0, | 220 | 0, 0, 0, DstMem | SrcReg | ModRM | BitOp, 0, 0, 0, 0, |
217 | /* 0xA8 - 0xAF */ | 221 | /* 0xA8 - 0xAF */ |
218 | 0, 0, 0, DstMem | SrcReg | ModRM | BitOp, 0, 0, 0, 0, | 222 | 0, 0, 0, DstMem | SrcReg | ModRM | BitOp, 0, 0, ModRM, 0, |
219 | /* 0xB0 - 0xB7 */ | 223 | /* 0xB0 - 0xB7 */ |
220 | ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM, 0, | 224 | ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM, 0, |
221 | DstMem | SrcReg | ModRM | BitOp, | 225 | DstMem | SrcReg | ModRM | BitOp, |
@@ -518,6 +522,39 @@ static inline void jmp_rel(struct decode_cache *c, int rel) | |||
518 | register_address_increment(c, &c->eip, rel); | 522 | register_address_increment(c, &c->eip, rel); |
519 | } | 523 | } |
520 | 524 | ||
525 | static void set_seg_override(struct decode_cache *c, int seg) | ||
526 | { | ||
527 | c->has_seg_override = true; | ||
528 | c->seg_override = seg; | ||
529 | } | ||
530 | |||
531 | static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg) | ||
532 | { | ||
533 | if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS) | ||
534 | return 0; | ||
535 | |||
536 | return kvm_x86_ops->get_segment_base(ctxt->vcpu, seg); | ||
537 | } | ||
538 | |||
539 | static unsigned long seg_override_base(struct x86_emulate_ctxt *ctxt, | ||
540 | struct decode_cache *c) | ||
541 | { | ||
542 | if (!c->has_seg_override) | ||
543 | return 0; | ||
544 | |||
545 | return seg_base(ctxt, c->seg_override); | ||
546 | } | ||
547 | |||
548 | static unsigned long es_base(struct x86_emulate_ctxt *ctxt) | ||
549 | { | ||
550 | return seg_base(ctxt, VCPU_SREG_ES); | ||
551 | } | ||
552 | |||
553 | static unsigned long ss_base(struct x86_emulate_ctxt *ctxt) | ||
554 | { | ||
555 | return seg_base(ctxt, VCPU_SREG_SS); | ||
556 | } | ||
557 | |||
521 | static int do_fetch_insn_byte(struct x86_emulate_ctxt *ctxt, | 558 | static int do_fetch_insn_byte(struct x86_emulate_ctxt *ctxt, |
522 | struct x86_emulate_ops *ops, | 559 | struct x86_emulate_ops *ops, |
523 | unsigned long linear, u8 *dest) | 560 | unsigned long linear, u8 *dest) |
@@ -660,7 +697,7 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt, | |||
660 | { | 697 | { |
661 | struct decode_cache *c = &ctxt->decode; | 698 | struct decode_cache *c = &ctxt->decode; |
662 | u8 sib; | 699 | u8 sib; |
663 | int index_reg = 0, base_reg = 0, scale, rip_relative = 0; | 700 | int index_reg = 0, base_reg = 0, scale; |
664 | int rc = 0; | 701 | int rc = 0; |
665 | 702 | ||
666 | if (c->rex_prefix) { | 703 | if (c->rex_prefix) { |
@@ -731,47 +768,28 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt, | |||
731 | } | 768 | } |
732 | if (c->modrm_rm == 2 || c->modrm_rm == 3 || | 769 | if (c->modrm_rm == 2 || c->modrm_rm == 3 || |
733 | (c->modrm_rm == 6 && c->modrm_mod != 0)) | 770 | (c->modrm_rm == 6 && c->modrm_mod != 0)) |
734 | if (!c->override_base) | 771 | if (!c->has_seg_override) |
735 | c->override_base = &ctxt->ss_base; | 772 | set_seg_override(c, VCPU_SREG_SS); |
736 | c->modrm_ea = (u16)c->modrm_ea; | 773 | c->modrm_ea = (u16)c->modrm_ea; |
737 | } else { | 774 | } else { |
738 | /* 32/64-bit ModR/M decode. */ | 775 | /* 32/64-bit ModR/M decode. */ |
739 | switch (c->modrm_rm) { | 776 | if ((c->modrm_rm & 7) == 4) { |
740 | case 4: | ||
741 | case 12: | ||
742 | sib = insn_fetch(u8, 1, c->eip); | 777 | sib = insn_fetch(u8, 1, c->eip); |
743 | index_reg |= (sib >> 3) & 7; | 778 | index_reg |= (sib >> 3) & 7; |
744 | base_reg |= sib & 7; | 779 | base_reg |= sib & 7; |
745 | scale = sib >> 6; | 780 | scale = sib >> 6; |
746 | 781 | ||
747 | switch (base_reg) { | 782 | if ((base_reg & 7) == 5 && c->modrm_mod == 0) |
748 | case 5: | 783 | c->modrm_ea += insn_fetch(s32, 4, c->eip); |
749 | if (c->modrm_mod != 0) | 784 | else |
750 | c->modrm_ea += c->regs[base_reg]; | ||
751 | else | ||
752 | c->modrm_ea += | ||
753 | insn_fetch(s32, 4, c->eip); | ||
754 | break; | ||
755 | default: | ||
756 | c->modrm_ea += c->regs[base_reg]; | 785 | c->modrm_ea += c->regs[base_reg]; |
757 | } | 786 | if (index_reg != 4) |
758 | switch (index_reg) { | ||
759 | case 4: | ||
760 | break; | ||
761 | default: | ||
762 | c->modrm_ea += c->regs[index_reg] << scale; | 787 | c->modrm_ea += c->regs[index_reg] << scale; |
763 | } | 788 | } else if ((c->modrm_rm & 7) == 5 && c->modrm_mod == 0) { |
764 | break; | 789 | if (ctxt->mode == X86EMUL_MODE_PROT64) |
765 | case 5: | 790 | c->rip_relative = 1; |
766 | if (c->modrm_mod != 0) | 791 | } else |
767 | c->modrm_ea += c->regs[c->modrm_rm]; | ||
768 | else if (ctxt->mode == X86EMUL_MODE_PROT64) | ||
769 | rip_relative = 1; | ||
770 | break; | ||
771 | default: | ||
772 | c->modrm_ea += c->regs[c->modrm_rm]; | 792 | c->modrm_ea += c->regs[c->modrm_rm]; |
773 | break; | ||
774 | } | ||
775 | switch (c->modrm_mod) { | 793 | switch (c->modrm_mod) { |
776 | case 0: | 794 | case 0: |
777 | if (c->modrm_rm == 5) | 795 | if (c->modrm_rm == 5) |
@@ -785,22 +803,6 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt, | |||
785 | break; | 803 | break; |
786 | } | 804 | } |
787 | } | 805 | } |
788 | if (rip_relative) { | ||
789 | c->modrm_ea += c->eip; | ||
790 | switch (c->d & SrcMask) { | ||
791 | case SrcImmByte: | ||
792 | c->modrm_ea += 1; | ||
793 | break; | ||
794 | case SrcImm: | ||
795 | if (c->d & ByteOp) | ||
796 | c->modrm_ea += 1; | ||
797 | else | ||
798 | if (c->op_bytes == 8) | ||
799 | c->modrm_ea += 4; | ||
800 | else | ||
801 | c->modrm_ea += c->op_bytes; | ||
802 | } | ||
803 | } | ||
804 | done: | 806 | done: |
805 | return rc; | 807 | return rc; |
806 | } | 808 | } |
@@ -838,6 +840,7 @@ x86_decode_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) | |||
838 | 840 | ||
839 | memset(c, 0, sizeof(struct decode_cache)); | 841 | memset(c, 0, sizeof(struct decode_cache)); |
840 | c->eip = ctxt->vcpu->arch.rip; | 842 | c->eip = ctxt->vcpu->arch.rip; |
843 | ctxt->cs_base = seg_base(ctxt, VCPU_SREG_CS); | ||
841 | memcpy(c->regs, ctxt->vcpu->arch.regs, sizeof c->regs); | 844 | memcpy(c->regs, ctxt->vcpu->arch.regs, sizeof c->regs); |
842 | 845 | ||
843 | switch (mode) { | 846 | switch (mode) { |
@@ -876,23 +879,15 @@ x86_decode_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) | |||
876 | /* switch between 2/4 bytes */ | 879 | /* switch between 2/4 bytes */ |
877 | c->ad_bytes = def_ad_bytes ^ 6; | 880 | c->ad_bytes = def_ad_bytes ^ 6; |
878 | break; | 881 | break; |
882 | case 0x26: /* ES override */ | ||
879 | case 0x2e: /* CS override */ | 883 | case 0x2e: /* CS override */ |
880 | c->override_base = &ctxt->cs_base; | 884 | case 0x36: /* SS override */ |
881 | break; | ||
882 | case 0x3e: /* DS override */ | 885 | case 0x3e: /* DS override */ |
883 | c->override_base = &ctxt->ds_base; | 886 | set_seg_override(c, (c->b >> 3) & 3); |
884 | break; | ||
885 | case 0x26: /* ES override */ | ||
886 | c->override_base = &ctxt->es_base; | ||
887 | break; | 887 | break; |
888 | case 0x64: /* FS override */ | 888 | case 0x64: /* FS override */ |
889 | c->override_base = &ctxt->fs_base; | ||
890 | break; | ||
891 | case 0x65: /* GS override */ | 889 | case 0x65: /* GS override */ |
892 | c->override_base = &ctxt->gs_base; | 890 | set_seg_override(c, c->b & 7); |
893 | break; | ||
894 | case 0x36: /* SS override */ | ||
895 | c->override_base = &ctxt->ss_base; | ||
896 | break; | 891 | break; |
897 | case 0x40 ... 0x4f: /* REX */ | 892 | case 0x40 ... 0x4f: /* REX */ |
898 | if (mode != X86EMUL_MODE_PROT64) | 893 | if (mode != X86EMUL_MODE_PROT64) |
@@ -964,15 +959,11 @@ done_prefixes: | |||
964 | if (rc) | 959 | if (rc) |
965 | goto done; | 960 | goto done; |
966 | 961 | ||
967 | if (!c->override_base) | 962 | if (!c->has_seg_override) |
968 | c->override_base = &ctxt->ds_base; | 963 | set_seg_override(c, VCPU_SREG_DS); |
969 | if (mode == X86EMUL_MODE_PROT64 && | ||
970 | c->override_base != &ctxt->fs_base && | ||
971 | c->override_base != &ctxt->gs_base) | ||
972 | c->override_base = NULL; | ||
973 | 964 | ||
974 | if (c->override_base) | 965 | if (!(!c->twobyte && c->b == 0x8d)) |
975 | c->modrm_ea += *c->override_base; | 966 | c->modrm_ea += seg_override_base(ctxt, c); |
976 | 967 | ||
977 | if (c->ad_bytes != 8) | 968 | if (c->ad_bytes != 8) |
978 | c->modrm_ea = (u32)c->modrm_ea; | 969 | c->modrm_ea = (u32)c->modrm_ea; |
@@ -1049,6 +1040,7 @@ done_prefixes: | |||
1049 | break; | 1040 | break; |
1050 | case DstMem: | 1041 | case DstMem: |
1051 | if ((c->d & ModRM) && c->modrm_mod == 3) { | 1042 | if ((c->d & ModRM) && c->modrm_mod == 3) { |
1043 | c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes; | ||
1052 | c->dst.type = OP_REG; | 1044 | c->dst.type = OP_REG; |
1053 | c->dst.val = c->dst.orig_val = c->modrm_val; | 1045 | c->dst.val = c->dst.orig_val = c->modrm_val; |
1054 | c->dst.ptr = c->modrm_ptr; | 1046 | c->dst.ptr = c->modrm_ptr; |
@@ -1058,6 +1050,9 @@ done_prefixes: | |||
1058 | break; | 1050 | break; |
1059 | } | 1051 | } |
1060 | 1052 | ||
1053 | if (c->rip_relative) | ||
1054 | c->modrm_ea += c->eip; | ||
1055 | |||
1061 | done: | 1056 | done: |
1062 | return (rc == X86EMUL_UNHANDLEABLE) ? -1 : 0; | 1057 | return (rc == X86EMUL_UNHANDLEABLE) ? -1 : 0; |
1063 | } | 1058 | } |
@@ -1070,7 +1065,7 @@ static inline void emulate_push(struct x86_emulate_ctxt *ctxt) | |||
1070 | c->dst.bytes = c->op_bytes; | 1065 | c->dst.bytes = c->op_bytes; |
1071 | c->dst.val = c->src.val; | 1066 | c->dst.val = c->src.val; |
1072 | register_address_increment(c, &c->regs[VCPU_REGS_RSP], -c->op_bytes); | 1067 | register_address_increment(c, &c->regs[VCPU_REGS_RSP], -c->op_bytes); |
1073 | c->dst.ptr = (void *) register_address(c, ctxt->ss_base, | 1068 | c->dst.ptr = (void *) register_address(c, ss_base(ctxt), |
1074 | c->regs[VCPU_REGS_RSP]); | 1069 | c->regs[VCPU_REGS_RSP]); |
1075 | } | 1070 | } |
1076 | 1071 | ||
@@ -1080,7 +1075,7 @@ static inline int emulate_grp1a(struct x86_emulate_ctxt *ctxt, | |||
1080 | struct decode_cache *c = &ctxt->decode; | 1075 | struct decode_cache *c = &ctxt->decode; |
1081 | int rc; | 1076 | int rc; |
1082 | 1077 | ||
1083 | rc = ops->read_std(register_address(c, ctxt->ss_base, | 1078 | rc = ops->read_std(register_address(c, ss_base(ctxt), |
1084 | c->regs[VCPU_REGS_RSP]), | 1079 | c->regs[VCPU_REGS_RSP]), |
1085 | &c->dst.val, c->dst.bytes, ctxt->vcpu); | 1080 | &c->dst.val, c->dst.bytes, ctxt->vcpu); |
1086 | if (rc != 0) | 1081 | if (rc != 0) |
@@ -1402,11 +1397,11 @@ special_insn: | |||
1402 | register_address_increment(c, &c->regs[VCPU_REGS_RSP], | 1397 | register_address_increment(c, &c->regs[VCPU_REGS_RSP], |
1403 | -c->op_bytes); | 1398 | -c->op_bytes); |
1404 | c->dst.ptr = (void *) register_address( | 1399 | c->dst.ptr = (void *) register_address( |
1405 | c, ctxt->ss_base, c->regs[VCPU_REGS_RSP]); | 1400 | c, ss_base(ctxt), c->regs[VCPU_REGS_RSP]); |
1406 | break; | 1401 | break; |
1407 | case 0x58 ... 0x5f: /* pop reg */ | 1402 | case 0x58 ... 0x5f: /* pop reg */ |
1408 | pop_instruction: | 1403 | pop_instruction: |
1409 | if ((rc = ops->read_std(register_address(c, ctxt->ss_base, | 1404 | if ((rc = ops->read_std(register_address(c, ss_base(ctxt), |
1410 | c->regs[VCPU_REGS_RSP]), c->dst.ptr, | 1405 | c->regs[VCPU_REGS_RSP]), c->dst.ptr, |
1411 | c->op_bytes, ctxt->vcpu)) != 0) | 1406 | c->op_bytes, ctxt->vcpu)) != 0) |
1412 | goto done; | 1407 | goto done; |
@@ -1420,9 +1415,8 @@ special_insn: | |||
1420 | goto cannot_emulate; | 1415 | goto cannot_emulate; |
1421 | c->dst.val = (s32) c->src.val; | 1416 | c->dst.val = (s32) c->src.val; |
1422 | break; | 1417 | break; |
1418 | case 0x68: /* push imm */ | ||
1423 | case 0x6a: /* push imm8 */ | 1419 | case 0x6a: /* push imm8 */ |
1424 | c->src.val = 0L; | ||
1425 | c->src.val = insn_fetch(s8, 1, c->eip); | ||
1426 | emulate_push(ctxt); | 1420 | emulate_push(ctxt); |
1427 | break; | 1421 | break; |
1428 | case 0x6c: /* insb */ | 1422 | case 0x6c: /* insb */ |
@@ -1433,7 +1427,7 @@ special_insn: | |||
1433 | c->rep_prefix ? | 1427 | c->rep_prefix ? |
1434 | address_mask(c, c->regs[VCPU_REGS_RCX]) : 1, | 1428 | address_mask(c, c->regs[VCPU_REGS_RCX]) : 1, |
1435 | (ctxt->eflags & EFLG_DF), | 1429 | (ctxt->eflags & EFLG_DF), |
1436 | register_address(c, ctxt->es_base, | 1430 | register_address(c, es_base(ctxt), |
1437 | c->regs[VCPU_REGS_RDI]), | 1431 | c->regs[VCPU_REGS_RDI]), |
1438 | c->rep_prefix, | 1432 | c->rep_prefix, |
1439 | c->regs[VCPU_REGS_RDX]) == 0) { | 1433 | c->regs[VCPU_REGS_RDX]) == 0) { |
@@ -1449,9 +1443,8 @@ special_insn: | |||
1449 | c->rep_prefix ? | 1443 | c->rep_prefix ? |
1450 | address_mask(c, c->regs[VCPU_REGS_RCX]) : 1, | 1444 | address_mask(c, c->regs[VCPU_REGS_RCX]) : 1, |
1451 | (ctxt->eflags & EFLG_DF), | 1445 | (ctxt->eflags & EFLG_DF), |
1452 | register_address(c, c->override_base ? | 1446 | register_address(c, |
1453 | *c->override_base : | 1447 | seg_override_base(ctxt, c), |
1454 | ctxt->ds_base, | ||
1455 | c->regs[VCPU_REGS_RSI]), | 1448 | c->regs[VCPU_REGS_RSI]), |
1456 | c->rep_prefix, | 1449 | c->rep_prefix, |
1457 | c->regs[VCPU_REGS_RDX]) == 0) { | 1450 | c->regs[VCPU_REGS_RDX]) == 0) { |
@@ -1490,6 +1483,7 @@ special_insn: | |||
1490 | emulate_2op_SrcV("test", c->src, c->dst, ctxt->eflags); | 1483 | emulate_2op_SrcV("test", c->src, c->dst, ctxt->eflags); |
1491 | break; | 1484 | break; |
1492 | case 0x86 ... 0x87: /* xchg */ | 1485 | case 0x86 ... 0x87: /* xchg */ |
1486 | xchg: | ||
1493 | /* Write back the register source. */ | 1487 | /* Write back the register source. */ |
1494 | switch (c->dst.bytes) { | 1488 | switch (c->dst.bytes) { |
1495 | case 1: | 1489 | case 1: |
@@ -1514,14 +1508,60 @@ special_insn: | |||
1514 | break; | 1508 | break; |
1515 | case 0x88 ... 0x8b: /* mov */ | 1509 | case 0x88 ... 0x8b: /* mov */ |
1516 | goto mov; | 1510 | goto mov; |
1511 | case 0x8c: { /* mov r/m, sreg */ | ||
1512 | struct kvm_segment segreg; | ||
1513 | |||
1514 | if (c->modrm_reg <= 5) | ||
1515 | kvm_get_segment(ctxt->vcpu, &segreg, c->modrm_reg); | ||
1516 | else { | ||
1517 | printk(KERN_INFO "0x8c: Invalid segreg in modrm byte 0x%02x\n", | ||
1518 | c->modrm); | ||
1519 | goto cannot_emulate; | ||
1520 | } | ||
1521 | c->dst.val = segreg.selector; | ||
1522 | break; | ||
1523 | } | ||
1517 | case 0x8d: /* lea r16/r32, m */ | 1524 | case 0x8d: /* lea r16/r32, m */ |
1518 | c->dst.val = c->modrm_ea; | 1525 | c->dst.val = c->modrm_ea; |
1519 | break; | 1526 | break; |
1527 | case 0x8e: { /* mov seg, r/m16 */ | ||
1528 | uint16_t sel; | ||
1529 | int type_bits; | ||
1530 | int err; | ||
1531 | |||
1532 | sel = c->src.val; | ||
1533 | if (c->modrm_reg <= 5) { | ||
1534 | type_bits = (c->modrm_reg == 1) ? 9 : 1; | ||
1535 | err = kvm_load_segment_descriptor(ctxt->vcpu, sel, | ||
1536 | type_bits, c->modrm_reg); | ||
1537 | } else { | ||
1538 | printk(KERN_INFO "Invalid segreg in modrm byte 0x%02x\n", | ||
1539 | c->modrm); | ||
1540 | goto cannot_emulate; | ||
1541 | } | ||
1542 | |||
1543 | if (err < 0) | ||
1544 | goto cannot_emulate; | ||
1545 | |||
1546 | c->dst.type = OP_NONE; /* Disable writeback. */ | ||
1547 | break; | ||
1548 | } | ||
1520 | case 0x8f: /* pop (sole member of Grp1a) */ | 1549 | case 0x8f: /* pop (sole member of Grp1a) */ |
1521 | rc = emulate_grp1a(ctxt, ops); | 1550 | rc = emulate_grp1a(ctxt, ops); |
1522 | if (rc != 0) | 1551 | if (rc != 0) |
1523 | goto done; | 1552 | goto done; |
1524 | break; | 1553 | break; |
1554 | case 0x90: /* nop / xchg r8,rax */ | ||
1555 | if (!(c->rex_prefix & 1)) { /* nop */ | ||
1556 | c->dst.type = OP_NONE; | ||
1557 | break; | ||
1558 | } | ||
1559 | case 0x91 ... 0x97: /* xchg reg,rax */ | ||
1560 | c->src.type = c->dst.type = OP_REG; | ||
1561 | c->src.bytes = c->dst.bytes = c->op_bytes; | ||
1562 | c->src.ptr = (unsigned long *) &c->regs[VCPU_REGS_RAX]; | ||
1563 | c->src.val = *(c->src.ptr); | ||
1564 | goto xchg; | ||
1525 | case 0x9c: /* pushf */ | 1565 | case 0x9c: /* pushf */ |
1526 | c->src.val = (unsigned long) ctxt->eflags; | 1566 | c->src.val = (unsigned long) ctxt->eflags; |
1527 | emulate_push(ctxt); | 1567 | emulate_push(ctxt); |
@@ -1540,11 +1580,10 @@ special_insn: | |||
1540 | c->dst.type = OP_MEM; | 1580 | c->dst.type = OP_MEM; |
1541 | c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes; | 1581 | c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes; |
1542 | c->dst.ptr = (unsigned long *)register_address(c, | 1582 | c->dst.ptr = (unsigned long *)register_address(c, |
1543 | ctxt->es_base, | 1583 | es_base(ctxt), |
1544 | c->regs[VCPU_REGS_RDI]); | 1584 | c->regs[VCPU_REGS_RDI]); |
1545 | if ((rc = ops->read_emulated(register_address(c, | 1585 | if ((rc = ops->read_emulated(register_address(c, |
1546 | c->override_base ? *c->override_base : | 1586 | seg_override_base(ctxt, c), |
1547 | ctxt->ds_base, | ||
1548 | c->regs[VCPU_REGS_RSI]), | 1587 | c->regs[VCPU_REGS_RSI]), |
1549 | &c->dst.val, | 1588 | &c->dst.val, |
1550 | c->dst.bytes, ctxt->vcpu)) != 0) | 1589 | c->dst.bytes, ctxt->vcpu)) != 0) |
@@ -1560,8 +1599,7 @@ special_insn: | |||
1560 | c->src.type = OP_NONE; /* Disable writeback. */ | 1599 | c->src.type = OP_NONE; /* Disable writeback. */ |
1561 | c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes; | 1600 | c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes; |
1562 | c->src.ptr = (unsigned long *)register_address(c, | 1601 | c->src.ptr = (unsigned long *)register_address(c, |
1563 | c->override_base ? *c->override_base : | 1602 | seg_override_base(ctxt, c), |
1564 | ctxt->ds_base, | ||
1565 | c->regs[VCPU_REGS_RSI]); | 1603 | c->regs[VCPU_REGS_RSI]); |
1566 | if ((rc = ops->read_emulated((unsigned long)c->src.ptr, | 1604 | if ((rc = ops->read_emulated((unsigned long)c->src.ptr, |
1567 | &c->src.val, | 1605 | &c->src.val, |
@@ -1572,7 +1610,7 @@ special_insn: | |||
1572 | c->dst.type = OP_NONE; /* Disable writeback. */ | 1610 | c->dst.type = OP_NONE; /* Disable writeback. */ |
1573 | c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes; | 1611 | c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes; |
1574 | c->dst.ptr = (unsigned long *)register_address(c, | 1612 | c->dst.ptr = (unsigned long *)register_address(c, |
1575 | ctxt->es_base, | 1613 | es_base(ctxt), |
1576 | c->regs[VCPU_REGS_RDI]); | 1614 | c->regs[VCPU_REGS_RDI]); |
1577 | if ((rc = ops->read_emulated((unsigned long)c->dst.ptr, | 1615 | if ((rc = ops->read_emulated((unsigned long)c->dst.ptr, |
1578 | &c->dst.val, | 1616 | &c->dst.val, |
@@ -1596,7 +1634,7 @@ special_insn: | |||
1596 | c->dst.type = OP_MEM; | 1634 | c->dst.type = OP_MEM; |
1597 | c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes; | 1635 | c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes; |
1598 | c->dst.ptr = (unsigned long *)register_address(c, | 1636 | c->dst.ptr = (unsigned long *)register_address(c, |
1599 | ctxt->es_base, | 1637 | es_base(ctxt), |
1600 | c->regs[VCPU_REGS_RDI]); | 1638 | c->regs[VCPU_REGS_RDI]); |
1601 | c->dst.val = c->regs[VCPU_REGS_RAX]; | 1639 | c->dst.val = c->regs[VCPU_REGS_RAX]; |
1602 | register_address_increment(c, &c->regs[VCPU_REGS_RDI], | 1640 | register_address_increment(c, &c->regs[VCPU_REGS_RDI], |
@@ -1608,8 +1646,7 @@ special_insn: | |||
1608 | c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes; | 1646 | c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes; |
1609 | c->dst.ptr = (unsigned long *)&c->regs[VCPU_REGS_RAX]; | 1647 | c->dst.ptr = (unsigned long *)&c->regs[VCPU_REGS_RAX]; |
1610 | if ((rc = ops->read_emulated(register_address(c, | 1648 | if ((rc = ops->read_emulated(register_address(c, |
1611 | c->override_base ? *c->override_base : | 1649 | seg_override_base(ctxt, c), |
1612 | ctxt->ds_base, | ||
1613 | c->regs[VCPU_REGS_RSI]), | 1650 | c->regs[VCPU_REGS_RSI]), |
1614 | &c->dst.val, | 1651 | &c->dst.val, |
1615 | c->dst.bytes, | 1652 | c->dst.bytes, |
@@ -1622,6 +1659,8 @@ special_insn: | |||
1622 | case 0xae ... 0xaf: /* scas */ | 1659 | case 0xae ... 0xaf: /* scas */ |
1623 | DPRINTF("Urk! I don't handle SCAS.\n"); | 1660 | DPRINTF("Urk! I don't handle SCAS.\n"); |
1624 | goto cannot_emulate; | 1661 | goto cannot_emulate; |
1662 | case 0xb8: /* mov r, imm */ | ||
1663 | goto mov; | ||
1625 | case 0xc0 ... 0xc1: | 1664 | case 0xc0 ... 0xc1: |
1626 | emulate_grp2(ctxt); | 1665 | emulate_grp2(ctxt); |
1627 | break; | 1666 | break; |
@@ -1660,13 +1699,39 @@ special_insn: | |||
1660 | break; | 1699 | break; |
1661 | } | 1700 | } |
1662 | case 0xe9: /* jmp rel */ | 1701 | case 0xe9: /* jmp rel */ |
1663 | case 0xeb: /* jmp rel short */ | 1702 | goto jmp; |
1703 | case 0xea: /* jmp far */ { | ||
1704 | uint32_t eip; | ||
1705 | uint16_t sel; | ||
1706 | |||
1707 | switch (c->op_bytes) { | ||
1708 | case 2: | ||
1709 | eip = insn_fetch(u16, 2, c->eip); | ||
1710 | break; | ||
1711 | case 4: | ||
1712 | eip = insn_fetch(u32, 4, c->eip); | ||
1713 | break; | ||
1714 | default: | ||
1715 | DPRINTF("jmp far: Invalid op_bytes\n"); | ||
1716 | goto cannot_emulate; | ||
1717 | } | ||
1718 | sel = insn_fetch(u16, 2, c->eip); | ||
1719 | if (kvm_load_segment_descriptor(ctxt->vcpu, sel, 9, VCPU_SREG_CS) < 0) { | ||
1720 | DPRINTF("jmp far: Failed to load CS descriptor\n"); | ||
1721 | goto cannot_emulate; | ||
1722 | } | ||
1723 | |||
1724 | c->eip = eip; | ||
1725 | break; | ||
1726 | } | ||
1727 | case 0xeb: | ||
1728 | jmp: /* jmp rel short */ | ||
1664 | jmp_rel(c, c->src.val); | 1729 | jmp_rel(c, c->src.val); |
1665 | c->dst.type = OP_NONE; /* Disable writeback. */ | 1730 | c->dst.type = OP_NONE; /* Disable writeback. */ |
1666 | break; | 1731 | break; |
1667 | case 0xf4: /* hlt */ | 1732 | case 0xf4: /* hlt */ |
1668 | ctxt->vcpu->arch.halt_request = 1; | 1733 | ctxt->vcpu->arch.halt_request = 1; |
1669 | goto done; | 1734 | break; |
1670 | case 0xf5: /* cmc */ | 1735 | case 0xf5: /* cmc */ |
1671 | /* complement carry flag from eflags reg */ | 1736 | /* complement carry flag from eflags reg */ |
1672 | ctxt->eflags ^= EFLG_CF; | 1737 | ctxt->eflags ^= EFLG_CF; |
@@ -1882,6 +1947,8 @@ twobyte_insn: | |||
1882 | c->src.val &= (c->dst.bytes << 3) - 1; | 1947 | c->src.val &= (c->dst.bytes << 3) - 1; |
1883 | emulate_2op_SrcV_nobyte("bts", c->src, c->dst, ctxt->eflags); | 1948 | emulate_2op_SrcV_nobyte("bts", c->src, c->dst, ctxt->eflags); |
1884 | break; | 1949 | break; |
1950 | case 0xae: /* clflush */ | ||
1951 | break; | ||
1885 | case 0xb0 ... 0xb1: /* cmpxchg */ | 1952 | case 0xb0 ... 0xb1: /* cmpxchg */ |
1886 | /* | 1953 | /* |
1887 | * Save real source value, then compare EAX against | 1954 | * Save real source value, then compare EAX against |
diff --git a/include/asm-ia64/kvm_host.h b/include/asm-ia64/kvm_host.h index c082c208c1f3..1efe513a9941 100644 --- a/include/asm-ia64/kvm_host.h +++ b/include/asm-ia64/kvm_host.h | |||
@@ -38,6 +38,7 @@ | |||
38 | /* memory slots that does not exposed to userspace */ | 38 | /* memory slots that does not exposed to userspace */ |
39 | #define KVM_PRIVATE_MEM_SLOTS 4 | 39 | #define KVM_PRIVATE_MEM_SLOTS 4 |
40 | 40 | ||
41 | #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 | ||
41 | 42 | ||
42 | /* define exit reasons from vmm to kvm*/ | 43 | /* define exit reasons from vmm to kvm*/ |
43 | #define EXIT_REASON_VM_PANIC 0 | 44 | #define EXIT_REASON_VM_PANIC 0 |
@@ -521,4 +522,6 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu); | |||
521 | int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run); | 522 | int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run); |
522 | void kvm_sal_emul(struct kvm_vcpu *vcpu); | 523 | void kvm_sal_emul(struct kvm_vcpu *vcpu); |
523 | 524 | ||
525 | static inline void kvm_inject_nmi(struct kvm_vcpu *vcpu) {} | ||
526 | |||
524 | #endif | 527 | #endif |
diff --git a/include/asm-powerpc/kvm_host.h b/include/asm-powerpc/kvm_host.h index 81a69d711017..2655e2a4831e 100644 --- a/include/asm-powerpc/kvm_host.h +++ b/include/asm-powerpc/kvm_host.h | |||
@@ -31,6 +31,8 @@ | |||
31 | /* memory slots that does not exposed to userspace */ | 31 | /* memory slots that does not exposed to userspace */ |
32 | #define KVM_PRIVATE_MEM_SLOTS 4 | 32 | #define KVM_PRIVATE_MEM_SLOTS 4 |
33 | 33 | ||
34 | #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 | ||
35 | |||
34 | /* We don't currently support large pages. */ | 36 | /* We don't currently support large pages. */ |
35 | #define KVM_PAGES_PER_HPAGE (1<<31) | 37 | #define KVM_PAGES_PER_HPAGE (1<<31) |
36 | 38 | ||
diff --git a/include/asm-s390/kvm_host.h b/include/asm-s390/kvm_host.h index 18cbd8a39796..3234dd5b3511 100644 --- a/include/asm-s390/kvm_host.h +++ b/include/asm-s390/kvm_host.h | |||
@@ -62,7 +62,7 @@ struct sca_block { | |||
62 | #define CPUSTAT_J 0x00000002 | 62 | #define CPUSTAT_J 0x00000002 |
63 | #define CPUSTAT_P 0x00000001 | 63 | #define CPUSTAT_P 0x00000001 |
64 | 64 | ||
65 | struct sie_block { | 65 | struct kvm_s390_sie_block { |
66 | atomic_t cpuflags; /* 0x0000 */ | 66 | atomic_t cpuflags; /* 0x0000 */ |
67 | __u32 prefix; /* 0x0004 */ | 67 | __u32 prefix; /* 0x0004 */ |
68 | __u8 reserved8[32]; /* 0x0008 */ | 68 | __u8 reserved8[32]; /* 0x0008 */ |
@@ -140,14 +140,14 @@ struct kvm_vcpu_stat { | |||
140 | u32 diagnose_44; | 140 | u32 diagnose_44; |
141 | }; | 141 | }; |
142 | 142 | ||
143 | struct io_info { | 143 | struct kvm_s390_io_info { |
144 | __u16 subchannel_id; /* 0x0b8 */ | 144 | __u16 subchannel_id; /* 0x0b8 */ |
145 | __u16 subchannel_nr; /* 0x0ba */ | 145 | __u16 subchannel_nr; /* 0x0ba */ |
146 | __u32 io_int_parm; /* 0x0bc */ | 146 | __u32 io_int_parm; /* 0x0bc */ |
147 | __u32 io_int_word; /* 0x0c0 */ | 147 | __u32 io_int_word; /* 0x0c0 */ |
148 | }; | 148 | }; |
149 | 149 | ||
150 | struct ext_info { | 150 | struct kvm_s390_ext_info { |
151 | __u32 ext_params; | 151 | __u32 ext_params; |
152 | __u64 ext_params2; | 152 | __u64 ext_params2; |
153 | }; | 153 | }; |
@@ -160,22 +160,22 @@ struct ext_info { | |||
160 | #define PGM_SPECIFICATION 0x06 | 160 | #define PGM_SPECIFICATION 0x06 |
161 | #define PGM_DATA 0x07 | 161 | #define PGM_DATA 0x07 |
162 | 162 | ||
163 | struct pgm_info { | 163 | struct kvm_s390_pgm_info { |
164 | __u16 code; | 164 | __u16 code; |
165 | }; | 165 | }; |
166 | 166 | ||
167 | struct prefix_info { | 167 | struct kvm_s390_prefix_info { |
168 | __u32 address; | 168 | __u32 address; |
169 | }; | 169 | }; |
170 | 170 | ||
171 | struct interrupt_info { | 171 | struct kvm_s390_interrupt_info { |
172 | struct list_head list; | 172 | struct list_head list; |
173 | u64 type; | 173 | u64 type; |
174 | union { | 174 | union { |
175 | struct io_info io; | 175 | struct kvm_s390_io_info io; |
176 | struct ext_info ext; | 176 | struct kvm_s390_ext_info ext; |
177 | struct pgm_info pgm; | 177 | struct kvm_s390_pgm_info pgm; |
178 | struct prefix_info prefix; | 178 | struct kvm_s390_prefix_info prefix; |
179 | }; | 179 | }; |
180 | }; | 180 | }; |
181 | 181 | ||
@@ -183,35 +183,35 @@ struct interrupt_info { | |||
183 | #define ACTION_STORE_ON_STOP 1 | 183 | #define ACTION_STORE_ON_STOP 1 |
184 | #define ACTION_STOP_ON_STOP 2 | 184 | #define ACTION_STOP_ON_STOP 2 |
185 | 185 | ||
186 | struct local_interrupt { | 186 | struct kvm_s390_local_interrupt { |
187 | spinlock_t lock; | 187 | spinlock_t lock; |
188 | struct list_head list; | 188 | struct list_head list; |
189 | atomic_t active; | 189 | atomic_t active; |
190 | struct float_interrupt *float_int; | 190 | struct kvm_s390_float_interrupt *float_int; |
191 | int timer_due; /* event indicator for waitqueue below */ | 191 | int timer_due; /* event indicator for waitqueue below */ |
192 | wait_queue_head_t wq; | 192 | wait_queue_head_t wq; |
193 | atomic_t *cpuflags; | 193 | atomic_t *cpuflags; |
194 | unsigned int action_bits; | 194 | unsigned int action_bits; |
195 | }; | 195 | }; |
196 | 196 | ||
197 | struct float_interrupt { | 197 | struct kvm_s390_float_interrupt { |
198 | spinlock_t lock; | 198 | spinlock_t lock; |
199 | struct list_head list; | 199 | struct list_head list; |
200 | atomic_t active; | 200 | atomic_t active; |
201 | int next_rr_cpu; | 201 | int next_rr_cpu; |
202 | unsigned long idle_mask [(64 + sizeof(long) - 1) / sizeof(long)]; | 202 | unsigned long idle_mask [(64 + sizeof(long) - 1) / sizeof(long)]; |
203 | struct local_interrupt *local_int[64]; | 203 | struct kvm_s390_local_interrupt *local_int[64]; |
204 | }; | 204 | }; |
205 | 205 | ||
206 | 206 | ||
207 | struct kvm_vcpu_arch { | 207 | struct kvm_vcpu_arch { |
208 | struct sie_block *sie_block; | 208 | struct kvm_s390_sie_block *sie_block; |
209 | unsigned long guest_gprs[16]; | 209 | unsigned long guest_gprs[16]; |
210 | s390_fp_regs host_fpregs; | 210 | s390_fp_regs host_fpregs; |
211 | unsigned int host_acrs[NUM_ACRS]; | 211 | unsigned int host_acrs[NUM_ACRS]; |
212 | s390_fp_regs guest_fpregs; | 212 | s390_fp_regs guest_fpregs; |
213 | unsigned int guest_acrs[NUM_ACRS]; | 213 | unsigned int guest_acrs[NUM_ACRS]; |
214 | struct local_interrupt local_int; | 214 | struct kvm_s390_local_interrupt local_int; |
215 | struct timer_list ckc_timer; | 215 | struct timer_list ckc_timer; |
216 | union { | 216 | union { |
217 | cpuid_t cpu_id; | 217 | cpuid_t cpu_id; |
@@ -228,8 +228,8 @@ struct kvm_arch{ | |||
228 | unsigned long guest_memsize; | 228 | unsigned long guest_memsize; |
229 | struct sca_block *sca; | 229 | struct sca_block *sca; |
230 | debug_info_t *dbf; | 230 | debug_info_t *dbf; |
231 | struct float_interrupt float_int; | 231 | struct kvm_s390_float_interrupt float_int; |
232 | }; | 232 | }; |
233 | 233 | ||
234 | extern int sie64a(struct sie_block *, __u64 *); | 234 | extern int sie64a(struct kvm_s390_sie_block *, __u64 *); |
235 | #endif | 235 | #endif |
diff --git a/include/asm-x86/kvm.h b/include/asm-x86/kvm.h index 80eefef2cc76..6f1840812e59 100644 --- a/include/asm-x86/kvm.h +++ b/include/asm-x86/kvm.h | |||
@@ -228,5 +228,6 @@ struct kvm_pit_state { | |||
228 | #define KVM_TRC_CLTS (KVM_TRC_HANDLER + 0x12) | 228 | #define KVM_TRC_CLTS (KVM_TRC_HANDLER + 0x12) |
229 | #define KVM_TRC_LMSW (KVM_TRC_HANDLER + 0x13) | 229 | #define KVM_TRC_LMSW (KVM_TRC_HANDLER + 0x13) |
230 | #define KVM_TRC_APIC_ACCESS (KVM_TRC_HANDLER + 0x14) | 230 | #define KVM_TRC_APIC_ACCESS (KVM_TRC_HANDLER + 0x14) |
231 | #define KVM_TRC_TDP_FAULT (KVM_TRC_HANDLER + 0x15) | ||
231 | 232 | ||
232 | #endif | 233 | #endif |
diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h index 844f2a89afbc..f995783b1fdb 100644 --- a/include/asm-x86/kvm_host.h +++ b/include/asm-x86/kvm_host.h | |||
@@ -27,6 +27,7 @@ | |||
27 | #define KVM_PRIVATE_MEM_SLOTS 4 | 27 | #define KVM_PRIVATE_MEM_SLOTS 4 |
28 | 28 | ||
29 | #define KVM_PIO_PAGE_OFFSET 1 | 29 | #define KVM_PIO_PAGE_OFFSET 1 |
30 | #define KVM_COALESCED_MMIO_PAGE_OFFSET 2 | ||
30 | 31 | ||
31 | #define CR3_PAE_RESERVED_BITS ((X86_CR3_PWT | X86_CR3_PCD) - 1) | 32 | #define CR3_PAE_RESERVED_BITS ((X86_CR3_PWT | X86_CR3_PCD) - 1) |
32 | #define CR3_NONPAE_RESERVED_BITS ((PAGE_SIZE-1) & ~(X86_CR3_PWT | X86_CR3_PCD)) | 33 | #define CR3_NONPAE_RESERVED_BITS ((PAGE_SIZE-1) & ~(X86_CR3_PWT | X86_CR3_PCD)) |
@@ -79,6 +80,7 @@ | |||
79 | #define KVM_MIN_FREE_MMU_PAGES 5 | 80 | #define KVM_MIN_FREE_MMU_PAGES 5 |
80 | #define KVM_REFILL_PAGES 25 | 81 | #define KVM_REFILL_PAGES 25 |
81 | #define KVM_MAX_CPUID_ENTRIES 40 | 82 | #define KVM_MAX_CPUID_ENTRIES 40 |
83 | #define KVM_NR_VAR_MTRR 8 | ||
82 | 84 | ||
83 | extern spinlock_t kvm_lock; | 85 | extern spinlock_t kvm_lock; |
84 | extern struct list_head vm_list; | 86 | extern struct list_head vm_list; |
@@ -109,12 +111,12 @@ enum { | |||
109 | }; | 111 | }; |
110 | 112 | ||
111 | enum { | 113 | enum { |
114 | VCPU_SREG_ES, | ||
112 | VCPU_SREG_CS, | 115 | VCPU_SREG_CS, |
116 | VCPU_SREG_SS, | ||
113 | VCPU_SREG_DS, | 117 | VCPU_SREG_DS, |
114 | VCPU_SREG_ES, | ||
115 | VCPU_SREG_FS, | 118 | VCPU_SREG_FS, |
116 | VCPU_SREG_GS, | 119 | VCPU_SREG_GS, |
117 | VCPU_SREG_SS, | ||
118 | VCPU_SREG_TR, | 120 | VCPU_SREG_TR, |
119 | VCPU_SREG_LDTR, | 121 | VCPU_SREG_LDTR, |
120 | }; | 122 | }; |
@@ -243,6 +245,7 @@ struct kvm_vcpu_arch { | |||
243 | gfn_t last_pt_write_gfn; | 245 | gfn_t last_pt_write_gfn; |
244 | int last_pt_write_count; | 246 | int last_pt_write_count; |
245 | u64 *last_pte_updated; | 247 | u64 *last_pte_updated; |
248 | gfn_t last_pte_gfn; | ||
246 | 249 | ||
247 | struct { | 250 | struct { |
248 | gfn_t gfn; /* presumed gfn during guest pte update */ | 251 | gfn_t gfn; /* presumed gfn during guest pte update */ |
@@ -287,6 +290,10 @@ struct kvm_vcpu_arch { | |||
287 | unsigned int hv_clock_tsc_khz; | 290 | unsigned int hv_clock_tsc_khz; |
288 | unsigned int time_offset; | 291 | unsigned int time_offset; |
289 | struct page *time_page; | 292 | struct page *time_page; |
293 | |||
294 | bool nmi_pending; | ||
295 | |||
296 | u64 mtrr[0x100]; | ||
290 | }; | 297 | }; |
291 | 298 | ||
292 | struct kvm_mem_alias { | 299 | struct kvm_mem_alias { |
@@ -344,6 +351,7 @@ struct kvm_vcpu_stat { | |||
344 | u32 mmio_exits; | 351 | u32 mmio_exits; |
345 | u32 signal_exits; | 352 | u32 signal_exits; |
346 | u32 irq_window_exits; | 353 | u32 irq_window_exits; |
354 | u32 nmi_window_exits; | ||
347 | u32 halt_exits; | 355 | u32 halt_exits; |
348 | u32 halt_wakeup; | 356 | u32 halt_wakeup; |
349 | u32 request_irq_exits; | 357 | u32 request_irq_exits; |
@@ -379,7 +387,6 @@ struct kvm_x86_ops { | |||
379 | void (*prepare_guest_switch)(struct kvm_vcpu *vcpu); | 387 | void (*prepare_guest_switch)(struct kvm_vcpu *vcpu); |
380 | void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu); | 388 | void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu); |
381 | void (*vcpu_put)(struct kvm_vcpu *vcpu); | 389 | void (*vcpu_put)(struct kvm_vcpu *vcpu); |
382 | void (*vcpu_decache)(struct kvm_vcpu *vcpu); | ||
383 | 390 | ||
384 | int (*set_guest_debug)(struct kvm_vcpu *vcpu, | 391 | int (*set_guest_debug)(struct kvm_vcpu *vcpu, |
385 | struct kvm_debug_guest *dbg); | 392 | struct kvm_debug_guest *dbg); |
@@ -497,6 +504,10 @@ int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, | |||
497 | int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, | 504 | int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, |
498 | unsigned long value); | 505 | unsigned long value); |
499 | 506 | ||
507 | void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); | ||
508 | int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, | ||
509 | int type_bits, int seg); | ||
510 | |||
500 | int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason); | 511 | int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason); |
501 | 512 | ||
502 | void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); | 513 | void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); |
@@ -515,6 +526,8 @@ void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); | |||
515 | void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long cr2, | 526 | void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long cr2, |
516 | u32 error_code); | 527 | u32 error_code); |
517 | 528 | ||
529 | void kvm_inject_nmi(struct kvm_vcpu *vcpu); | ||
530 | |||
518 | void fx_init(struct kvm_vcpu *vcpu); | 531 | void fx_init(struct kvm_vcpu *vcpu); |
519 | 532 | ||
520 | int emulator_read_std(unsigned long addr, | 533 | int emulator_read_std(unsigned long addr, |
@@ -554,55 +567,53 @@ static inline struct kvm_mmu_page *page_header(hpa_t shadow_page) | |||
554 | return (struct kvm_mmu_page *)page_private(page); | 567 | return (struct kvm_mmu_page *)page_private(page); |
555 | } | 568 | } |
556 | 569 | ||
557 | static inline u16 read_fs(void) | 570 | static inline u16 kvm_read_fs(void) |
558 | { | 571 | { |
559 | u16 seg; | 572 | u16 seg; |
560 | asm("mov %%fs, %0" : "=g"(seg)); | 573 | asm("mov %%fs, %0" : "=g"(seg)); |
561 | return seg; | 574 | return seg; |
562 | } | 575 | } |
563 | 576 | ||
564 | static inline u16 read_gs(void) | 577 | static inline u16 kvm_read_gs(void) |
565 | { | 578 | { |
566 | u16 seg; | 579 | u16 seg; |
567 | asm("mov %%gs, %0" : "=g"(seg)); | 580 | asm("mov %%gs, %0" : "=g"(seg)); |
568 | return seg; | 581 | return seg; |
569 | } | 582 | } |
570 | 583 | ||
571 | static inline u16 read_ldt(void) | 584 | static inline u16 kvm_read_ldt(void) |
572 | { | 585 | { |
573 | u16 ldt; | 586 | u16 ldt; |
574 | asm("sldt %0" : "=g"(ldt)); | 587 | asm("sldt %0" : "=g"(ldt)); |
575 | return ldt; | 588 | return ldt; |
576 | } | 589 | } |
577 | 590 | ||
578 | static inline void load_fs(u16 sel) | 591 | static inline void kvm_load_fs(u16 sel) |
579 | { | 592 | { |
580 | asm("mov %0, %%fs" : : "rm"(sel)); | 593 | asm("mov %0, %%fs" : : "rm"(sel)); |
581 | } | 594 | } |
582 | 595 | ||
583 | static inline void load_gs(u16 sel) | 596 | static inline void kvm_load_gs(u16 sel) |
584 | { | 597 | { |
585 | asm("mov %0, %%gs" : : "rm"(sel)); | 598 | asm("mov %0, %%gs" : : "rm"(sel)); |
586 | } | 599 | } |
587 | 600 | ||
588 | #ifndef load_ldt | 601 | static inline void kvm_load_ldt(u16 sel) |
589 | static inline void load_ldt(u16 sel) | ||
590 | { | 602 | { |
591 | asm("lldt %0" : : "rm"(sel)); | 603 | asm("lldt %0" : : "rm"(sel)); |
592 | } | 604 | } |
593 | #endif | ||
594 | 605 | ||
595 | static inline void get_idt(struct descriptor_table *table) | 606 | static inline void kvm_get_idt(struct descriptor_table *table) |
596 | { | 607 | { |
597 | asm("sidt %0" : "=m"(*table)); | 608 | asm("sidt %0" : "=m"(*table)); |
598 | } | 609 | } |
599 | 610 | ||
600 | static inline void get_gdt(struct descriptor_table *table) | 611 | static inline void kvm_get_gdt(struct descriptor_table *table) |
601 | { | 612 | { |
602 | asm("sgdt %0" : "=m"(*table)); | 613 | asm("sgdt %0" : "=m"(*table)); |
603 | } | 614 | } |
604 | 615 | ||
605 | static inline unsigned long read_tr_base(void) | 616 | static inline unsigned long kvm_read_tr_base(void) |
606 | { | 617 | { |
607 | u16 tr; | 618 | u16 tr; |
608 | asm("str %0" : "=g"(tr)); | 619 | asm("str %0" : "=g"(tr)); |
@@ -619,17 +630,17 @@ static inline unsigned long read_msr(unsigned long msr) | |||
619 | } | 630 | } |
620 | #endif | 631 | #endif |
621 | 632 | ||
622 | static inline void fx_save(struct i387_fxsave_struct *image) | 633 | static inline void kvm_fx_save(struct i387_fxsave_struct *image) |
623 | { | 634 | { |
624 | asm("fxsave (%0)":: "r" (image)); | 635 | asm("fxsave (%0)":: "r" (image)); |
625 | } | 636 | } |
626 | 637 | ||
627 | static inline void fx_restore(struct i387_fxsave_struct *image) | 638 | static inline void kvm_fx_restore(struct i387_fxsave_struct *image) |
628 | { | 639 | { |
629 | asm("fxrstor (%0)":: "r" (image)); | 640 | asm("fxrstor (%0)":: "r" (image)); |
630 | } | 641 | } |
631 | 642 | ||
632 | static inline void fx_finit(void) | 643 | static inline void kvm_fx_finit(void) |
633 | { | 644 | { |
634 | asm("finit"); | 645 | asm("finit"); |
635 | } | 646 | } |
@@ -691,4 +702,28 @@ enum { | |||
691 | trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \ | 702 | trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \ |
692 | vcpu, 0, 0, 0, 0, 0, 0) | 703 | vcpu, 0, 0, 0, 0, 0, 0) |
693 | 704 | ||
705 | #ifdef CONFIG_64BIT | ||
706 | #define KVM_EX_ENTRY ".quad" | ||
707 | #else | ||
708 | #define KVM_EX_ENTRY ".long" | ||
709 | #endif | ||
710 | |||
711 | /* | ||
712 | * Hardware virtualization extension instructions may fault if a | ||
713 | * reboot turns off virtualization while processes are running. | ||
714 | * Trap the fault and ignore the instruction if that happens. | ||
715 | */ | ||
716 | asmlinkage void kvm_handle_fault_on_reboot(void); | ||
717 | |||
718 | #define __kvm_handle_fault_on_reboot(insn) \ | ||
719 | "666: " insn "\n\t" \ | ||
720 | ".pushsection .text.fixup, \"ax\" \n" \ | ||
721 | "667: \n\t" \ | ||
722 | "push $666b \n\t" \ | ||
723 | "jmp kvm_handle_fault_on_reboot \n\t" \ | ||
724 | ".popsection \n\t" \ | ||
725 | ".pushsection __ex_table, \"a\" \n\t" \ | ||
726 | KVM_EX_ENTRY " 666b, 667b \n\t" \ | ||
727 | ".popsection" | ||
728 | |||
694 | #endif | 729 | #endif |
diff --git a/include/asm-x86/kvm_x86_emulate.h b/include/asm-x86/kvm_x86_emulate.h index b877bbd2d3a7..4e8c1e48d91d 100644 --- a/include/asm-x86/kvm_x86_emulate.h +++ b/include/asm-x86/kvm_x86_emulate.h | |||
@@ -124,7 +124,8 @@ struct decode_cache { | |||
124 | u8 rex_prefix; | 124 | u8 rex_prefix; |
125 | struct operand src; | 125 | struct operand src; |
126 | struct operand dst; | 126 | struct operand dst; |
127 | unsigned long *override_base; | 127 | bool has_seg_override; |
128 | u8 seg_override; | ||
128 | unsigned int d; | 129 | unsigned int d; |
129 | unsigned long regs[NR_VCPU_REGS]; | 130 | unsigned long regs[NR_VCPU_REGS]; |
130 | unsigned long eip; | 131 | unsigned long eip; |
@@ -134,6 +135,7 @@ struct decode_cache { | |||
134 | u8 modrm_reg; | 135 | u8 modrm_reg; |
135 | u8 modrm_rm; | 136 | u8 modrm_rm; |
136 | u8 use_modrm_ea; | 137 | u8 use_modrm_ea; |
138 | bool rip_relative; | ||
137 | unsigned long modrm_ea; | 139 | unsigned long modrm_ea; |
138 | void *modrm_ptr; | 140 | void *modrm_ptr; |
139 | unsigned long modrm_val; | 141 | unsigned long modrm_val; |
@@ -150,12 +152,7 @@ struct x86_emulate_ctxt { | |||
150 | /* Emulated execution mode, represented by an X86EMUL_MODE value. */ | 152 | /* Emulated execution mode, represented by an X86EMUL_MODE value. */ |
151 | int mode; | 153 | int mode; |
152 | 154 | ||
153 | unsigned long cs_base; | 155 | u32 cs_base; |
154 | unsigned long ds_base; | ||
155 | unsigned long es_base; | ||
156 | unsigned long ss_base; | ||
157 | unsigned long gs_base; | ||
158 | unsigned long fs_base; | ||
159 | 156 | ||
160 | /* decode cache */ | 157 | /* decode cache */ |
161 | 158 | ||
diff --git a/include/linux/kvm.h b/include/linux/kvm.h index a281afeddfbb..0ea064cbfbc8 100644 --- a/include/linux/kvm.h +++ b/include/linux/kvm.h | |||
@@ -173,6 +173,30 @@ struct kvm_run { | |||
173 | }; | 173 | }; |
174 | }; | 174 | }; |
175 | 175 | ||
176 | /* for KVM_REGISTER_COALESCED_MMIO / KVM_UNREGISTER_COALESCED_MMIO */ | ||
177 | |||
178 | struct kvm_coalesced_mmio_zone { | ||
179 | __u64 addr; | ||
180 | __u32 size; | ||
181 | __u32 pad; | ||
182 | }; | ||
183 | |||
184 | struct kvm_coalesced_mmio { | ||
185 | __u64 phys_addr; | ||
186 | __u32 len; | ||
187 | __u32 pad; | ||
188 | __u8 data[8]; | ||
189 | }; | ||
190 | |||
191 | struct kvm_coalesced_mmio_ring { | ||
192 | __u32 first, last; | ||
193 | struct kvm_coalesced_mmio coalesced_mmio[0]; | ||
194 | }; | ||
195 | |||
196 | #define KVM_COALESCED_MMIO_MAX \ | ||
197 | ((PAGE_SIZE - sizeof(struct kvm_coalesced_mmio_ring)) / \ | ||
198 | sizeof(struct kvm_coalesced_mmio)) | ||
199 | |||
176 | /* for KVM_TRANSLATE */ | 200 | /* for KVM_TRANSLATE */ |
177 | struct kvm_translation { | 201 | struct kvm_translation { |
178 | /* in */ | 202 | /* in */ |
@@ -294,14 +318,14 @@ struct kvm_trace_rec { | |||
294 | __u32 vcpu_id; | 318 | __u32 vcpu_id; |
295 | union { | 319 | union { |
296 | struct { | 320 | struct { |
297 | __u32 cycle_lo, cycle_hi; | 321 | __u64 cycle_u64; |
298 | __u32 extra_u32[KVM_TRC_EXTRA_MAX]; | 322 | __u32 extra_u32[KVM_TRC_EXTRA_MAX]; |
299 | } cycle; | 323 | } cycle; |
300 | struct { | 324 | struct { |
301 | __u32 extra_u32[KVM_TRC_EXTRA_MAX]; | 325 | __u32 extra_u32[KVM_TRC_EXTRA_MAX]; |
302 | } nocycle; | 326 | } nocycle; |
303 | } u; | 327 | } u; |
304 | }; | 328 | } __attribute__((packed)); |
305 | 329 | ||
306 | #define KVMIO 0xAE | 330 | #define KVMIO 0xAE |
307 | 331 | ||
@@ -346,6 +370,7 @@ struct kvm_trace_rec { | |||
346 | #define KVM_CAP_NOP_IO_DELAY 12 | 370 | #define KVM_CAP_NOP_IO_DELAY 12 |
347 | #define KVM_CAP_PV_MMU 13 | 371 | #define KVM_CAP_PV_MMU 13 |
348 | #define KVM_CAP_MP_STATE 14 | 372 | #define KVM_CAP_MP_STATE 14 |
373 | #define KVM_CAP_COALESCED_MMIO 15 | ||
349 | 374 | ||
350 | /* | 375 | /* |
351 | * ioctls for VM fds | 376 | * ioctls for VM fds |
@@ -371,6 +396,10 @@ struct kvm_trace_rec { | |||
371 | #define KVM_CREATE_PIT _IO(KVMIO, 0x64) | 396 | #define KVM_CREATE_PIT _IO(KVMIO, 0x64) |
372 | #define KVM_GET_PIT _IOWR(KVMIO, 0x65, struct kvm_pit_state) | 397 | #define KVM_GET_PIT _IOWR(KVMIO, 0x65, struct kvm_pit_state) |
373 | #define KVM_SET_PIT _IOR(KVMIO, 0x66, struct kvm_pit_state) | 398 | #define KVM_SET_PIT _IOR(KVMIO, 0x66, struct kvm_pit_state) |
399 | #define KVM_REGISTER_COALESCED_MMIO \ | ||
400 | _IOW(KVMIO, 0x67, struct kvm_coalesced_mmio_zone) | ||
401 | #define KVM_UNREGISTER_COALESCED_MMIO \ | ||
402 | _IOW(KVMIO, 0x68, struct kvm_coalesced_mmio_zone) | ||
374 | 403 | ||
375 | /* | 404 | /* |
376 | * ioctls for vcpu fds | 405 | * ioctls for vcpu fds |
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index de9d1df4bba2..07d68a8ae8e9 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h | |||
@@ -52,7 +52,8 @@ struct kvm_io_bus { | |||
52 | 52 | ||
53 | void kvm_io_bus_init(struct kvm_io_bus *bus); | 53 | void kvm_io_bus_init(struct kvm_io_bus *bus); |
54 | void kvm_io_bus_destroy(struct kvm_io_bus *bus); | 54 | void kvm_io_bus_destroy(struct kvm_io_bus *bus); |
55 | struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, gpa_t addr); | 55 | struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, |
56 | gpa_t addr, int len, int is_write); | ||
56 | void kvm_io_bus_register_dev(struct kvm_io_bus *bus, | 57 | void kvm_io_bus_register_dev(struct kvm_io_bus *bus, |
57 | struct kvm_io_device *dev); | 58 | struct kvm_io_device *dev); |
58 | 59 | ||
@@ -116,6 +117,10 @@ struct kvm { | |||
116 | struct kvm_vm_stat stat; | 117 | struct kvm_vm_stat stat; |
117 | struct kvm_arch arch; | 118 | struct kvm_arch arch; |
118 | atomic_t users_count; | 119 | atomic_t users_count; |
120 | #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET | ||
121 | struct kvm_coalesced_mmio_dev *coalesced_mmio_dev; | ||
122 | struct kvm_coalesced_mmio_ring *coalesced_mmio_ring; | ||
123 | #endif | ||
119 | }; | 124 | }; |
120 | 125 | ||
121 | /* The guest did something we don't support. */ | 126 | /* The guest did something we don't support. */ |
@@ -135,9 +140,6 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu); | |||
135 | void vcpu_load(struct kvm_vcpu *vcpu); | 140 | void vcpu_load(struct kvm_vcpu *vcpu); |
136 | void vcpu_put(struct kvm_vcpu *vcpu); | 141 | void vcpu_put(struct kvm_vcpu *vcpu); |
137 | 142 | ||
138 | void decache_vcpus_on_cpu(int cpu); | ||
139 | |||
140 | |||
141 | int kvm_init(void *opaque, unsigned int vcpu_size, | 143 | int kvm_init(void *opaque, unsigned int vcpu_size, |
142 | struct module *module); | 144 | struct module *module); |
143 | void kvm_exit(void); | 145 | void kvm_exit(void); |
@@ -166,6 +168,7 @@ int kvm_arch_set_memory_region(struct kvm *kvm, | |||
166 | struct kvm_userspace_memory_region *mem, | 168 | struct kvm_userspace_memory_region *mem, |
167 | struct kvm_memory_slot old, | 169 | struct kvm_memory_slot old, |
168 | int user_alloc); | 170 | int user_alloc); |
171 | void kvm_arch_flush_shadow(struct kvm *kvm); | ||
169 | gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn); | 172 | gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn); |
170 | struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); | 173 | struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); |
171 | unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn); | 174 | unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn); |
diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c new file mode 100644 index 000000000000..5ae620d32fac --- /dev/null +++ b/virt/kvm/coalesced_mmio.c | |||
@@ -0,0 +1,156 @@ | |||
1 | /* | ||
2 | * KVM coalesced MMIO | ||
3 | * | ||
4 | * Copyright (c) 2008 Bull S.A.S. | ||
5 | * | ||
6 | * Author: Laurent Vivier <Laurent.Vivier@bull.net> | ||
7 | * | ||
8 | */ | ||
9 | |||
10 | #include "iodev.h" | ||
11 | |||
12 | #include <linux/kvm_host.h> | ||
13 | #include <linux/kvm.h> | ||
14 | |||
15 | #include "coalesced_mmio.h" | ||
16 | |||
17 | static int coalesced_mmio_in_range(struct kvm_io_device *this, | ||
18 | gpa_t addr, int len, int is_write) | ||
19 | { | ||
20 | struct kvm_coalesced_mmio_dev *dev = | ||
21 | (struct kvm_coalesced_mmio_dev*)this->private; | ||
22 | struct kvm_coalesced_mmio_zone *zone; | ||
23 | int next; | ||
24 | int i; | ||
25 | |||
26 | if (!is_write) | ||
27 | return 0; | ||
28 | |||
29 | /* kvm->lock is taken by the caller and must be not released before | ||
30 | * dev.read/write | ||
31 | */ | ||
32 | |||
33 | /* Are we able to batch it ? */ | ||
34 | |||
35 | /* last is the first free entry | ||
36 | * check if we don't meet the first used entry | ||
37 | * there is always one unused entry in the buffer | ||
38 | */ | ||
39 | |||
40 | next = (dev->kvm->coalesced_mmio_ring->last + 1) % | ||
41 | KVM_COALESCED_MMIO_MAX; | ||
42 | if (next == dev->kvm->coalesced_mmio_ring->first) { | ||
43 | /* full */ | ||
44 | return 0; | ||
45 | } | ||
46 | |||
47 | /* is it in a batchable area ? */ | ||
48 | |||
49 | for (i = 0; i < dev->nb_zones; i++) { | ||
50 | zone = &dev->zone[i]; | ||
51 | |||
52 | /* (addr,len) is fully included in | ||
53 | * (zone->addr, zone->size) | ||
54 | */ | ||
55 | |||
56 | if (zone->addr <= addr && | ||
57 | addr + len <= zone->addr + zone->size) | ||
58 | return 1; | ||
59 | } | ||
60 | return 0; | ||
61 | } | ||
62 | |||
63 | static void coalesced_mmio_write(struct kvm_io_device *this, | ||
64 | gpa_t addr, int len, const void *val) | ||
65 | { | ||
66 | struct kvm_coalesced_mmio_dev *dev = | ||
67 | (struct kvm_coalesced_mmio_dev*)this->private; | ||
68 | struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring; | ||
69 | |||
70 | /* kvm->lock must be taken by caller before call to in_range()*/ | ||
71 | |||
72 | /* copy data in first free entry of the ring */ | ||
73 | |||
74 | ring->coalesced_mmio[ring->last].phys_addr = addr; | ||
75 | ring->coalesced_mmio[ring->last].len = len; | ||
76 | memcpy(ring->coalesced_mmio[ring->last].data, val, len); | ||
77 | smp_wmb(); | ||
78 | ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX; | ||
79 | } | ||
80 | |||
81 | static void coalesced_mmio_destructor(struct kvm_io_device *this) | ||
82 | { | ||
83 | kfree(this); | ||
84 | } | ||
85 | |||
86 | int kvm_coalesced_mmio_init(struct kvm *kvm) | ||
87 | { | ||
88 | struct kvm_coalesced_mmio_dev *dev; | ||
89 | |||
90 | dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev), GFP_KERNEL); | ||
91 | if (!dev) | ||
92 | return -ENOMEM; | ||
93 | dev->dev.write = coalesced_mmio_write; | ||
94 | dev->dev.in_range = coalesced_mmio_in_range; | ||
95 | dev->dev.destructor = coalesced_mmio_destructor; | ||
96 | dev->dev.private = dev; | ||
97 | dev->kvm = kvm; | ||
98 | kvm->coalesced_mmio_dev = dev; | ||
99 | kvm_io_bus_register_dev(&kvm->mmio_bus, &dev->dev); | ||
100 | |||
101 | return 0; | ||
102 | } | ||
103 | |||
104 | int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm, | ||
105 | struct kvm_coalesced_mmio_zone *zone) | ||
106 | { | ||
107 | struct kvm_coalesced_mmio_dev *dev = kvm->coalesced_mmio_dev; | ||
108 | |||
109 | if (dev == NULL) | ||
110 | return -EINVAL; | ||
111 | |||
112 | mutex_lock(&kvm->lock); | ||
113 | if (dev->nb_zones >= KVM_COALESCED_MMIO_ZONE_MAX) { | ||
114 | mutex_unlock(&kvm->lock); | ||
115 | return -ENOBUFS; | ||
116 | } | ||
117 | |||
118 | dev->zone[dev->nb_zones] = *zone; | ||
119 | dev->nb_zones++; | ||
120 | |||
121 | mutex_unlock(&kvm->lock); | ||
122 | return 0; | ||
123 | } | ||
124 | |||
125 | int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm, | ||
126 | struct kvm_coalesced_mmio_zone *zone) | ||
127 | { | ||
128 | int i; | ||
129 | struct kvm_coalesced_mmio_dev *dev = kvm->coalesced_mmio_dev; | ||
130 | struct kvm_coalesced_mmio_zone *z; | ||
131 | |||
132 | if (dev == NULL) | ||
133 | return -EINVAL; | ||
134 | |||
135 | mutex_lock(&kvm->lock); | ||
136 | |||
137 | i = dev->nb_zones; | ||
138 | while(i) { | ||
139 | z = &dev->zone[i - 1]; | ||
140 | |||
141 | /* unregister all zones | ||
142 | * included in (zone->addr, zone->size) | ||
143 | */ | ||
144 | |||
145 | if (zone->addr <= z->addr && | ||
146 | z->addr + z->size <= zone->addr + zone->size) { | ||
147 | dev->nb_zones--; | ||
148 | *z = dev->zone[dev->nb_zones]; | ||
149 | } | ||
150 | i--; | ||
151 | } | ||
152 | |||
153 | mutex_unlock(&kvm->lock); | ||
154 | |||
155 | return 0; | ||
156 | } | ||
diff --git a/virt/kvm/coalesced_mmio.h b/virt/kvm/coalesced_mmio.h new file mode 100644 index 000000000000..5ac0ec628461 --- /dev/null +++ b/virt/kvm/coalesced_mmio.h | |||
@@ -0,0 +1,23 @@ | |||
1 | /* | ||
2 | * KVM coalesced MMIO | ||
3 | * | ||
4 | * Copyright (c) 2008 Bull S.A.S. | ||
5 | * | ||
6 | * Author: Laurent Vivier <Laurent.Vivier@bull.net> | ||
7 | * | ||
8 | */ | ||
9 | |||
10 | #define KVM_COALESCED_MMIO_ZONE_MAX 100 | ||
11 | |||
12 | struct kvm_coalesced_mmio_dev { | ||
13 | struct kvm_io_device dev; | ||
14 | struct kvm *kvm; | ||
15 | int nb_zones; | ||
16 | struct kvm_coalesced_mmio_zone zone[KVM_COALESCED_MMIO_ZONE_MAX]; | ||
17 | }; | ||
18 | |||
19 | int kvm_coalesced_mmio_init(struct kvm *kvm); | ||
20 | int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm, | ||
21 | struct kvm_coalesced_mmio_zone *zone); | ||
22 | int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm, | ||
23 | struct kvm_coalesced_mmio_zone *zone); | ||
diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c index 44589088941f..c0d22870ee9c 100644 --- a/virt/kvm/ioapic.c +++ b/virt/kvm/ioapic.c | |||
@@ -146,6 +146,11 @@ static int ioapic_inj_irq(struct kvm_ioapic *ioapic, | |||
146 | return kvm_apic_set_irq(vcpu, vector, trig_mode); | 146 | return kvm_apic_set_irq(vcpu, vector, trig_mode); |
147 | } | 147 | } |
148 | 148 | ||
149 | static void ioapic_inj_nmi(struct kvm_vcpu *vcpu) | ||
150 | { | ||
151 | kvm_inject_nmi(vcpu); | ||
152 | } | ||
153 | |||
149 | static u32 ioapic_get_delivery_bitmask(struct kvm_ioapic *ioapic, u8 dest, | 154 | static u32 ioapic_get_delivery_bitmask(struct kvm_ioapic *ioapic, u8 dest, |
150 | u8 dest_mode) | 155 | u8 dest_mode) |
151 | { | 156 | { |
@@ -239,8 +244,19 @@ static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq) | |||
239 | } | 244 | } |
240 | } | 245 | } |
241 | break; | 246 | break; |
242 | 247 | case IOAPIC_NMI: | |
243 | /* TODO: NMI */ | 248 | for (vcpu_id = 0; deliver_bitmask != 0; vcpu_id++) { |
249 | if (!(deliver_bitmask & (1 << vcpu_id))) | ||
250 | continue; | ||
251 | deliver_bitmask &= ~(1 << vcpu_id); | ||
252 | vcpu = ioapic->kvm->vcpus[vcpu_id]; | ||
253 | if (vcpu) | ||
254 | ioapic_inj_nmi(vcpu); | ||
255 | else | ||
256 | ioapic_debug("NMI to vcpu %d failed\n", | ||
257 | vcpu->vcpu_id); | ||
258 | } | ||
259 | break; | ||
244 | default: | 260 | default: |
245 | printk(KERN_WARNING "Unsupported delivery mode %d\n", | 261 | printk(KERN_WARNING "Unsupported delivery mode %d\n", |
246 | delivery_mode); | 262 | delivery_mode); |
@@ -291,7 +307,8 @@ void kvm_ioapic_update_eoi(struct kvm *kvm, int vector) | |||
291 | __kvm_ioapic_update_eoi(ioapic, i); | 307 | __kvm_ioapic_update_eoi(ioapic, i); |
292 | } | 308 | } |
293 | 309 | ||
294 | static int ioapic_in_range(struct kvm_io_device *this, gpa_t addr) | 310 | static int ioapic_in_range(struct kvm_io_device *this, gpa_t addr, |
311 | int len, int is_write) | ||
295 | { | 312 | { |
296 | struct kvm_ioapic *ioapic = (struct kvm_ioapic *)this->private; | 313 | struct kvm_ioapic *ioapic = (struct kvm_ioapic *)this->private; |
297 | 314 | ||
diff --git a/virt/kvm/iodev.h b/virt/kvm/iodev.h index c14e642027b2..55e8846ac3a6 100644 --- a/virt/kvm/iodev.h +++ b/virt/kvm/iodev.h | |||
@@ -27,7 +27,8 @@ struct kvm_io_device { | |||
27 | gpa_t addr, | 27 | gpa_t addr, |
28 | int len, | 28 | int len, |
29 | const void *val); | 29 | const void *val); |
30 | int (*in_range)(struct kvm_io_device *this, gpa_t addr); | 30 | int (*in_range)(struct kvm_io_device *this, gpa_t addr, int len, |
31 | int is_write); | ||
31 | void (*destructor)(struct kvm_io_device *this); | 32 | void (*destructor)(struct kvm_io_device *this); |
32 | 33 | ||
33 | void *private; | 34 | void *private; |
@@ -49,9 +50,10 @@ static inline void kvm_iodevice_write(struct kvm_io_device *dev, | |||
49 | dev->write(dev, addr, len, val); | 50 | dev->write(dev, addr, len, val); |
50 | } | 51 | } |
51 | 52 | ||
52 | static inline int kvm_iodevice_inrange(struct kvm_io_device *dev, gpa_t addr) | 53 | static inline int kvm_iodevice_inrange(struct kvm_io_device *dev, |
54 | gpa_t addr, int len, int is_write) | ||
53 | { | 55 | { |
54 | return dev->in_range(dev, addr); | 56 | return dev->in_range(dev, addr, len, is_write); |
55 | } | 57 | } |
56 | 58 | ||
57 | static inline void kvm_iodevice_destructor(struct kvm_io_device *dev) | 59 | static inline void kvm_iodevice_destructor(struct kvm_io_device *dev) |
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index d4eae6af0738..904d7b7bd780 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
@@ -47,6 +47,10 @@ | |||
47 | #include <asm/uaccess.h> | 47 | #include <asm/uaccess.h> |
48 | #include <asm/pgtable.h> | 48 | #include <asm/pgtable.h> |
49 | 49 | ||
50 | #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET | ||
51 | #include "coalesced_mmio.h" | ||
52 | #endif | ||
53 | |||
50 | MODULE_AUTHOR("Qumranet"); | 54 | MODULE_AUTHOR("Qumranet"); |
51 | MODULE_LICENSE("GPL"); | 55 | MODULE_LICENSE("GPL"); |
52 | 56 | ||
@@ -65,6 +69,8 @@ struct dentry *kvm_debugfs_dir; | |||
65 | static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl, | 69 | static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl, |
66 | unsigned long arg); | 70 | unsigned long arg); |
67 | 71 | ||
72 | bool kvm_rebooting; | ||
73 | |||
68 | static inline int valid_vcpu(int n) | 74 | static inline int valid_vcpu(int n) |
69 | { | 75 | { |
70 | return likely(n >= 0 && n < KVM_MAX_VCPUS); | 76 | return likely(n >= 0 && n < KVM_MAX_VCPUS); |
@@ -99,10 +105,11 @@ static void ack_flush(void *_completed) | |||
99 | 105 | ||
100 | void kvm_flush_remote_tlbs(struct kvm *kvm) | 106 | void kvm_flush_remote_tlbs(struct kvm *kvm) |
101 | { | 107 | { |
102 | int i, cpu; | 108 | int i, cpu, me; |
103 | cpumask_t cpus; | 109 | cpumask_t cpus; |
104 | struct kvm_vcpu *vcpu; | 110 | struct kvm_vcpu *vcpu; |
105 | 111 | ||
112 | me = get_cpu(); | ||
106 | cpus_clear(cpus); | 113 | cpus_clear(cpus); |
107 | for (i = 0; i < KVM_MAX_VCPUS; ++i) { | 114 | for (i = 0; i < KVM_MAX_VCPUS; ++i) { |
108 | vcpu = kvm->vcpus[i]; | 115 | vcpu = kvm->vcpus[i]; |
@@ -111,21 +118,24 @@ void kvm_flush_remote_tlbs(struct kvm *kvm) | |||
111 | if (test_and_set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests)) | 118 | if (test_and_set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests)) |
112 | continue; | 119 | continue; |
113 | cpu = vcpu->cpu; | 120 | cpu = vcpu->cpu; |
114 | if (cpu != -1 && cpu != raw_smp_processor_id()) | 121 | if (cpu != -1 && cpu != me) |
115 | cpu_set(cpu, cpus); | 122 | cpu_set(cpu, cpus); |
116 | } | 123 | } |
117 | if (cpus_empty(cpus)) | 124 | if (cpus_empty(cpus)) |
118 | return; | 125 | goto out; |
119 | ++kvm->stat.remote_tlb_flush; | 126 | ++kvm->stat.remote_tlb_flush; |
120 | smp_call_function_mask(cpus, ack_flush, NULL, 1); | 127 | smp_call_function_mask(cpus, ack_flush, NULL, 1); |
128 | out: | ||
129 | put_cpu(); | ||
121 | } | 130 | } |
122 | 131 | ||
123 | void kvm_reload_remote_mmus(struct kvm *kvm) | 132 | void kvm_reload_remote_mmus(struct kvm *kvm) |
124 | { | 133 | { |
125 | int i, cpu; | 134 | int i, cpu, me; |
126 | cpumask_t cpus; | 135 | cpumask_t cpus; |
127 | struct kvm_vcpu *vcpu; | 136 | struct kvm_vcpu *vcpu; |
128 | 137 | ||
138 | me = get_cpu(); | ||
129 | cpus_clear(cpus); | 139 | cpus_clear(cpus); |
130 | for (i = 0; i < KVM_MAX_VCPUS; ++i) { | 140 | for (i = 0; i < KVM_MAX_VCPUS; ++i) { |
131 | vcpu = kvm->vcpus[i]; | 141 | vcpu = kvm->vcpus[i]; |
@@ -134,12 +144,14 @@ void kvm_reload_remote_mmus(struct kvm *kvm) | |||
134 | if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests)) | 144 | if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests)) |
135 | continue; | 145 | continue; |
136 | cpu = vcpu->cpu; | 146 | cpu = vcpu->cpu; |
137 | if (cpu != -1 && cpu != raw_smp_processor_id()) | 147 | if (cpu != -1 && cpu != me) |
138 | cpu_set(cpu, cpus); | 148 | cpu_set(cpu, cpus); |
139 | } | 149 | } |
140 | if (cpus_empty(cpus)) | 150 | if (cpus_empty(cpus)) |
141 | return; | 151 | goto out; |
142 | smp_call_function_mask(cpus, ack_flush, NULL, 1); | 152 | smp_call_function_mask(cpus, ack_flush, NULL, 1); |
153 | out: | ||
154 | put_cpu(); | ||
143 | } | 155 | } |
144 | 156 | ||
145 | 157 | ||
@@ -183,10 +195,23 @@ EXPORT_SYMBOL_GPL(kvm_vcpu_uninit); | |||
183 | static struct kvm *kvm_create_vm(void) | 195 | static struct kvm *kvm_create_vm(void) |
184 | { | 196 | { |
185 | struct kvm *kvm = kvm_arch_create_vm(); | 197 | struct kvm *kvm = kvm_arch_create_vm(); |
198 | #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET | ||
199 | struct page *page; | ||
200 | #endif | ||
186 | 201 | ||
187 | if (IS_ERR(kvm)) | 202 | if (IS_ERR(kvm)) |
188 | goto out; | 203 | goto out; |
189 | 204 | ||
205 | #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET | ||
206 | page = alloc_page(GFP_KERNEL | __GFP_ZERO); | ||
207 | if (!page) { | ||
208 | kfree(kvm); | ||
209 | return ERR_PTR(-ENOMEM); | ||
210 | } | ||
211 | kvm->coalesced_mmio_ring = | ||
212 | (struct kvm_coalesced_mmio_ring *)page_address(page); | ||
213 | #endif | ||
214 | |||
190 | kvm->mm = current->mm; | 215 | kvm->mm = current->mm; |
191 | atomic_inc(&kvm->mm->mm_count); | 216 | atomic_inc(&kvm->mm->mm_count); |
192 | spin_lock_init(&kvm->mmu_lock); | 217 | spin_lock_init(&kvm->mmu_lock); |
@@ -198,6 +223,9 @@ static struct kvm *kvm_create_vm(void) | |||
198 | spin_lock(&kvm_lock); | 223 | spin_lock(&kvm_lock); |
199 | list_add(&kvm->vm_list, &vm_list); | 224 | list_add(&kvm->vm_list, &vm_list); |
200 | spin_unlock(&kvm_lock); | 225 | spin_unlock(&kvm_lock); |
226 | #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET | ||
227 | kvm_coalesced_mmio_init(kvm); | ||
228 | #endif | ||
201 | out: | 229 | out: |
202 | return kvm; | 230 | return kvm; |
203 | } | 231 | } |
@@ -240,6 +268,10 @@ static void kvm_destroy_vm(struct kvm *kvm) | |||
240 | spin_unlock(&kvm_lock); | 268 | spin_unlock(&kvm_lock); |
241 | kvm_io_bus_destroy(&kvm->pio_bus); | 269 | kvm_io_bus_destroy(&kvm->pio_bus); |
242 | kvm_io_bus_destroy(&kvm->mmio_bus); | 270 | kvm_io_bus_destroy(&kvm->mmio_bus); |
271 | #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET | ||
272 | if (kvm->coalesced_mmio_ring != NULL) | ||
273 | free_page((unsigned long)kvm->coalesced_mmio_ring); | ||
274 | #endif | ||
243 | kvm_arch_destroy_vm(kvm); | 275 | kvm_arch_destroy_vm(kvm); |
244 | mmdrop(mm); | 276 | mmdrop(mm); |
245 | } | 277 | } |
@@ -333,6 +365,7 @@ int __kvm_set_memory_region(struct kvm *kvm, | |||
333 | r = -ENOMEM; | 365 | r = -ENOMEM; |
334 | 366 | ||
335 | /* Allocate if a slot is being created */ | 367 | /* Allocate if a slot is being created */ |
368 | #ifndef CONFIG_S390 | ||
336 | if (npages && !new.rmap) { | 369 | if (npages && !new.rmap) { |
337 | new.rmap = vmalloc(npages * sizeof(struct page *)); | 370 | new.rmap = vmalloc(npages * sizeof(struct page *)); |
338 | 371 | ||
@@ -373,10 +406,14 @@ int __kvm_set_memory_region(struct kvm *kvm, | |||
373 | goto out_free; | 406 | goto out_free; |
374 | memset(new.dirty_bitmap, 0, dirty_bytes); | 407 | memset(new.dirty_bitmap, 0, dirty_bytes); |
375 | } | 408 | } |
409 | #endif /* not defined CONFIG_S390 */ | ||
376 | 410 | ||
377 | if (mem->slot >= kvm->nmemslots) | 411 | if (mem->slot >= kvm->nmemslots) |
378 | kvm->nmemslots = mem->slot + 1; | 412 | kvm->nmemslots = mem->slot + 1; |
379 | 413 | ||
414 | if (!npages) | ||
415 | kvm_arch_flush_shadow(kvm); | ||
416 | |||
380 | *memslot = new; | 417 | *memslot = new; |
381 | 418 | ||
382 | r = kvm_arch_set_memory_region(kvm, mem, old, user_alloc); | 419 | r = kvm_arch_set_memory_region(kvm, mem, old, user_alloc); |
@@ -532,6 +569,7 @@ pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn) | |||
532 | struct page *page[1]; | 569 | struct page *page[1]; |
533 | unsigned long addr; | 570 | unsigned long addr; |
534 | int npages; | 571 | int npages; |
572 | pfn_t pfn; | ||
535 | 573 | ||
536 | might_sleep(); | 574 | might_sleep(); |
537 | 575 | ||
@@ -544,19 +582,38 @@ pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn) | |||
544 | npages = get_user_pages(current, current->mm, addr, 1, 1, 1, page, | 582 | npages = get_user_pages(current, current->mm, addr, 1, 1, 1, page, |
545 | NULL); | 583 | NULL); |
546 | 584 | ||
547 | if (npages != 1) { | 585 | if (unlikely(npages != 1)) { |
548 | get_page(bad_page); | 586 | struct vm_area_struct *vma; |
549 | return page_to_pfn(bad_page); | ||
550 | } | ||
551 | 587 | ||
552 | return page_to_pfn(page[0]); | 588 | vma = find_vma(current->mm, addr); |
589 | if (vma == NULL || addr < vma->vm_start || | ||
590 | !(vma->vm_flags & VM_PFNMAP)) { | ||
591 | get_page(bad_page); | ||
592 | return page_to_pfn(bad_page); | ||
593 | } | ||
594 | |||
595 | pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; | ||
596 | BUG_ON(pfn_valid(pfn)); | ||
597 | } else | ||
598 | pfn = page_to_pfn(page[0]); | ||
599 | |||
600 | return pfn; | ||
553 | } | 601 | } |
554 | 602 | ||
555 | EXPORT_SYMBOL_GPL(gfn_to_pfn); | 603 | EXPORT_SYMBOL_GPL(gfn_to_pfn); |
556 | 604 | ||
557 | struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) | 605 | struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) |
558 | { | 606 | { |
559 | return pfn_to_page(gfn_to_pfn(kvm, gfn)); | 607 | pfn_t pfn; |
608 | |||
609 | pfn = gfn_to_pfn(kvm, gfn); | ||
610 | if (pfn_valid(pfn)) | ||
611 | return pfn_to_page(pfn); | ||
612 | |||
613 | WARN_ON(!pfn_valid(pfn)); | ||
614 | |||
615 | get_page(bad_page); | ||
616 | return bad_page; | ||
560 | } | 617 | } |
561 | 618 | ||
562 | EXPORT_SYMBOL_GPL(gfn_to_page); | 619 | EXPORT_SYMBOL_GPL(gfn_to_page); |
@@ -569,7 +626,8 @@ EXPORT_SYMBOL_GPL(kvm_release_page_clean); | |||
569 | 626 | ||
570 | void kvm_release_pfn_clean(pfn_t pfn) | 627 | void kvm_release_pfn_clean(pfn_t pfn) |
571 | { | 628 | { |
572 | put_page(pfn_to_page(pfn)); | 629 | if (pfn_valid(pfn)) |
630 | put_page(pfn_to_page(pfn)); | ||
573 | } | 631 | } |
574 | EXPORT_SYMBOL_GPL(kvm_release_pfn_clean); | 632 | EXPORT_SYMBOL_GPL(kvm_release_pfn_clean); |
575 | 633 | ||
@@ -594,21 +652,25 @@ EXPORT_SYMBOL_GPL(kvm_set_page_dirty); | |||
594 | 652 | ||
595 | void kvm_set_pfn_dirty(pfn_t pfn) | 653 | void kvm_set_pfn_dirty(pfn_t pfn) |
596 | { | 654 | { |
597 | struct page *page = pfn_to_page(pfn); | 655 | if (pfn_valid(pfn)) { |
598 | if (!PageReserved(page)) | 656 | struct page *page = pfn_to_page(pfn); |
599 | SetPageDirty(page); | 657 | if (!PageReserved(page)) |
658 | SetPageDirty(page); | ||
659 | } | ||
600 | } | 660 | } |
601 | EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty); | 661 | EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty); |
602 | 662 | ||
603 | void kvm_set_pfn_accessed(pfn_t pfn) | 663 | void kvm_set_pfn_accessed(pfn_t pfn) |
604 | { | 664 | { |
605 | mark_page_accessed(pfn_to_page(pfn)); | 665 | if (pfn_valid(pfn)) |
666 | mark_page_accessed(pfn_to_page(pfn)); | ||
606 | } | 667 | } |
607 | EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed); | 668 | EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed); |
608 | 669 | ||
609 | void kvm_get_pfn(pfn_t pfn) | 670 | void kvm_get_pfn(pfn_t pfn) |
610 | { | 671 | { |
611 | get_page(pfn_to_page(pfn)); | 672 | if (pfn_valid(pfn)) |
673 | get_page(pfn_to_page(pfn)); | ||
612 | } | 674 | } |
613 | EXPORT_SYMBOL_GPL(kvm_get_pfn); | 675 | EXPORT_SYMBOL_GPL(kvm_get_pfn); |
614 | 676 | ||
@@ -799,6 +861,10 @@ static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
799 | else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET) | 861 | else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET) |
800 | page = virt_to_page(vcpu->arch.pio_data); | 862 | page = virt_to_page(vcpu->arch.pio_data); |
801 | #endif | 863 | #endif |
864 | #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET | ||
865 | else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET) | ||
866 | page = virt_to_page(vcpu->kvm->coalesced_mmio_ring); | ||
867 | #endif | ||
802 | else | 868 | else |
803 | return VM_FAULT_SIGBUS; | 869 | return VM_FAULT_SIGBUS; |
804 | get_page(page); | 870 | get_page(page); |
@@ -1121,6 +1187,32 @@ static long kvm_vm_ioctl(struct file *filp, | |||
1121 | goto out; | 1187 | goto out; |
1122 | break; | 1188 | break; |
1123 | } | 1189 | } |
1190 | #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET | ||
1191 | case KVM_REGISTER_COALESCED_MMIO: { | ||
1192 | struct kvm_coalesced_mmio_zone zone; | ||
1193 | r = -EFAULT; | ||
1194 | if (copy_from_user(&zone, argp, sizeof zone)) | ||
1195 | goto out; | ||
1196 | r = -ENXIO; | ||
1197 | r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone); | ||
1198 | if (r) | ||
1199 | goto out; | ||
1200 | r = 0; | ||
1201 | break; | ||
1202 | } | ||
1203 | case KVM_UNREGISTER_COALESCED_MMIO: { | ||
1204 | struct kvm_coalesced_mmio_zone zone; | ||
1205 | r = -EFAULT; | ||
1206 | if (copy_from_user(&zone, argp, sizeof zone)) | ||
1207 | goto out; | ||
1208 | r = -ENXIO; | ||
1209 | r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone); | ||
1210 | if (r) | ||
1211 | goto out; | ||
1212 | r = 0; | ||
1213 | break; | ||
1214 | } | ||
1215 | #endif | ||
1124 | default: | 1216 | default: |
1125 | r = kvm_arch_vm_ioctl(filp, ioctl, arg); | 1217 | r = kvm_arch_vm_ioctl(filp, ioctl, arg); |
1126 | } | 1218 | } |
@@ -1179,7 +1271,6 @@ static int kvm_dev_ioctl_create_vm(void) | |||
1179 | static long kvm_dev_ioctl(struct file *filp, | 1271 | static long kvm_dev_ioctl(struct file *filp, |
1180 | unsigned int ioctl, unsigned long arg) | 1272 | unsigned int ioctl, unsigned long arg) |
1181 | { | 1273 | { |
1182 | void __user *argp = (void __user *)arg; | ||
1183 | long r = -EINVAL; | 1274 | long r = -EINVAL; |
1184 | 1275 | ||
1185 | switch (ioctl) { | 1276 | switch (ioctl) { |
@@ -1196,7 +1287,7 @@ static long kvm_dev_ioctl(struct file *filp, | |||
1196 | r = kvm_dev_ioctl_create_vm(); | 1287 | r = kvm_dev_ioctl_create_vm(); |
1197 | break; | 1288 | break; |
1198 | case KVM_CHECK_EXTENSION: | 1289 | case KVM_CHECK_EXTENSION: |
1199 | r = kvm_dev_ioctl_check_extension((long)argp); | 1290 | r = kvm_dev_ioctl_check_extension(arg); |
1200 | break; | 1291 | break; |
1201 | case KVM_GET_VCPU_MMAP_SIZE: | 1292 | case KVM_GET_VCPU_MMAP_SIZE: |
1202 | r = -EINVAL; | 1293 | r = -EINVAL; |
@@ -1206,6 +1297,9 @@ static long kvm_dev_ioctl(struct file *filp, | |||
1206 | #ifdef CONFIG_X86 | 1297 | #ifdef CONFIG_X86 |
1207 | r += PAGE_SIZE; /* pio data page */ | 1298 | r += PAGE_SIZE; /* pio data page */ |
1208 | #endif | 1299 | #endif |
1300 | #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET | ||
1301 | r += PAGE_SIZE; /* coalesced mmio ring page */ | ||
1302 | #endif | ||
1209 | break; | 1303 | break; |
1210 | case KVM_TRACE_ENABLE: | 1304 | case KVM_TRACE_ENABLE: |
1211 | case KVM_TRACE_PAUSE: | 1305 | case KVM_TRACE_PAUSE: |
@@ -1247,7 +1341,6 @@ static void hardware_disable(void *junk) | |||
1247 | if (!cpu_isset(cpu, cpus_hardware_enabled)) | 1341 | if (!cpu_isset(cpu, cpus_hardware_enabled)) |
1248 | return; | 1342 | return; |
1249 | cpu_clear(cpu, cpus_hardware_enabled); | 1343 | cpu_clear(cpu, cpus_hardware_enabled); |
1250 | decache_vcpus_on_cpu(cpu); | ||
1251 | kvm_arch_hardware_disable(NULL); | 1344 | kvm_arch_hardware_disable(NULL); |
1252 | } | 1345 | } |
1253 | 1346 | ||
@@ -1277,6 +1370,18 @@ static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val, | |||
1277 | return NOTIFY_OK; | 1370 | return NOTIFY_OK; |
1278 | } | 1371 | } |
1279 | 1372 | ||
1373 | |||
1374 | asmlinkage void kvm_handle_fault_on_reboot(void) | ||
1375 | { | ||
1376 | if (kvm_rebooting) | ||
1377 | /* spin while reset goes on */ | ||
1378 | while (true) | ||
1379 | ; | ||
1380 | /* Fault while not rebooting. We want the trace. */ | ||
1381 | BUG(); | ||
1382 | } | ||
1383 | EXPORT_SYMBOL_GPL(kvm_handle_fault_on_reboot); | ||
1384 | |||
1280 | static int kvm_reboot(struct notifier_block *notifier, unsigned long val, | 1385 | static int kvm_reboot(struct notifier_block *notifier, unsigned long val, |
1281 | void *v) | 1386 | void *v) |
1282 | { | 1387 | { |
@@ -1286,6 +1391,7 @@ static int kvm_reboot(struct notifier_block *notifier, unsigned long val, | |||
1286 | * in vmx root mode. | 1391 | * in vmx root mode. |
1287 | */ | 1392 | */ |
1288 | printk(KERN_INFO "kvm: exiting hardware virtualization\n"); | 1393 | printk(KERN_INFO "kvm: exiting hardware virtualization\n"); |
1394 | kvm_rebooting = true; | ||
1289 | on_each_cpu(hardware_disable, NULL, 1); | 1395 | on_each_cpu(hardware_disable, NULL, 1); |
1290 | } | 1396 | } |
1291 | return NOTIFY_OK; | 1397 | return NOTIFY_OK; |
@@ -1312,14 +1418,15 @@ void kvm_io_bus_destroy(struct kvm_io_bus *bus) | |||
1312 | } | 1418 | } |
1313 | } | 1419 | } |
1314 | 1420 | ||
1315 | struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, gpa_t addr) | 1421 | struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, |
1422 | gpa_t addr, int len, int is_write) | ||
1316 | { | 1423 | { |
1317 | int i; | 1424 | int i; |
1318 | 1425 | ||
1319 | for (i = 0; i < bus->dev_count; i++) { | 1426 | for (i = 0; i < bus->dev_count; i++) { |
1320 | struct kvm_io_device *pos = bus->devs[i]; | 1427 | struct kvm_io_device *pos = bus->devs[i]; |
1321 | 1428 | ||
1322 | if (pos->in_range(pos, addr)) | 1429 | if (pos->in_range(pos, addr, len, is_write)) |
1323 | return pos; | 1430 | return pos; |
1324 | } | 1431 | } |
1325 | 1432 | ||
diff --git a/virt/kvm/kvm_trace.c b/virt/kvm/kvm_trace.c index 0e495470788d..58141f31ea8f 100644 --- a/virt/kvm/kvm_trace.c +++ b/virt/kvm/kvm_trace.c | |||
@@ -72,11 +72,7 @@ static void kvm_add_trace(void *probe_private, void *call_data, | |||
72 | rec.cycle_in = p->cycle_in; | 72 | rec.cycle_in = p->cycle_in; |
73 | 73 | ||
74 | if (rec.cycle_in) { | 74 | if (rec.cycle_in) { |
75 | u64 cycle = 0; | 75 | rec.u.cycle.cycle_u64 = get_cycles(); |
76 | |||
77 | cycle = get_cycles(); | ||
78 | rec.u.cycle.cycle_lo = (u32)cycle; | ||
79 | rec.u.cycle.cycle_hi = (u32)(cycle >> 32); | ||
80 | 76 | ||
81 | for (i = 0; i < rec.extra_u32; i++) | 77 | for (i = 0; i < rec.extra_u32; i++) |
82 | rec.u.cycle.extra_u32[i] = va_arg(*args, u32); | 78 | rec.u.cycle.extra_u32[i] = va_arg(*args, u32); |
@@ -114,8 +110,18 @@ static int kvm_subbuf_start_callback(struct rchan_buf *buf, void *subbuf, | |||
114 | { | 110 | { |
115 | struct kvm_trace *kt; | 111 | struct kvm_trace *kt; |
116 | 112 | ||
117 | if (!relay_buf_full(buf)) | 113 | if (!relay_buf_full(buf)) { |
114 | if (!prev_subbuf) { | ||
115 | /* | ||
116 | * executed only once when the channel is opened | ||
117 | * save metadata as first record | ||
118 | */ | ||
119 | subbuf_start_reserve(buf, sizeof(u32)); | ||
120 | *(u32 *)subbuf = 0x12345678; | ||
121 | } | ||
122 | |||
118 | return 1; | 123 | return 1; |
124 | } | ||
119 | 125 | ||
120 | kt = buf->chan->private_data; | 126 | kt = buf->chan->private_data; |
121 | atomic_inc(&kt->lost_records); | 127 | atomic_inc(&kt->lost_records); |