aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm
diff options
context:
space:
mode:
authorJeremy Erickson <jerickso@cs.unc.edu>2014-04-18 17:06:00 -0400
committerJeremy Erickson <jerickso@cs.unc.edu>2014-04-18 17:06:00 -0400
commita215aa7b9ab3759c047201199fba64d3042d7f13 (patch)
treebca37493d9b2233450e6d3ffced1261d0e4f71fe /arch/x86/kvm
parentd31199a77ef606f1d06894385f1852181ba6136b (diff)
Update 2.6.36 to 2.6.36.4wip-dissipation2-jerickso
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r--arch/x86/kvm/i8259.c2
-rw-r--r--arch/x86/kvm/mmu.c10
-rw-r--r--arch/x86/kvm/svm.c43
-rw-r--r--arch/x86/kvm/vmx.c24
-rw-r--r--arch/x86/kvm/x86.c25
-rw-r--r--arch/x86/kvm/x86.h5
6 files changed, 77 insertions, 32 deletions
diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c
index 4b7b73ce2098..9f163e61283c 100644
--- a/arch/x86/kvm/i8259.c
+++ b/arch/x86/kvm/i8259.c
@@ -570,6 +570,8 @@ struct kvm_pic *kvm_create_pic(struct kvm *kvm)
570 s->pics[1].elcr_mask = 0xde; 570 s->pics[1].elcr_mask = 0xde;
571 s->pics[0].pics_state = s; 571 s->pics[0].pics_state = s;
572 s->pics[1].pics_state = s; 572 s->pics[1].pics_state = s;
573 s->pics[0].isr_ack = 0xff;
574 s->pics[1].isr_ack = 0xff;
573 575
574 /* 576 /*
575 * Initialize PIO device 577 * Initialize PIO device
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 311f6dad8951..7fed5b793faf 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2254,6 +2254,10 @@ static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
2254 return 0; 2254 return 0;
2255 } 2255 }
2256 direct = !is_paging(vcpu); 2256 direct = !is_paging(vcpu);
2257
2258 if (mmu_check_root(vcpu, root_gfn))
2259 return 1;
2260
2257 for (i = 0; i < 4; ++i) { 2261 for (i = 0; i < 4; ++i) {
2258 hpa_t root = vcpu->arch.mmu.pae_root[i]; 2262 hpa_t root = vcpu->arch.mmu.pae_root[i];
2259 2263
@@ -2265,13 +2269,13 @@ static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
2265 continue; 2269 continue;
2266 } 2270 }
2267 root_gfn = pdptr >> PAGE_SHIFT; 2271 root_gfn = pdptr >> PAGE_SHIFT;
2272 if (mmu_check_root(vcpu, root_gfn))
2273 return 1;
2268 } else if (vcpu->arch.mmu.root_level == 0) 2274 } else if (vcpu->arch.mmu.root_level == 0)
2269 root_gfn = 0; 2275 root_gfn = 0;
2270 if (mmu_check_root(vcpu, root_gfn))
2271 return 1;
2272 if (tdp_enabled) { 2276 if (tdp_enabled) {
2273 direct = 1; 2277 direct = 1;
2274 root_gfn = i << 30; 2278 root_gfn = i << (30 - PAGE_SHIFT);
2275 } 2279 }
2276 spin_lock(&vcpu->kvm->mmu_lock); 2280 spin_lock(&vcpu->kvm->mmu_lock);
2277 kvm_mmu_free_some_pages(vcpu); 2281 kvm_mmu_free_some_pages(vcpu);
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 8a3f9f64f86f..e7c3f3bd08fc 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -88,6 +88,14 @@ struct nested_state {
88 /* A VMEXIT is required but not yet emulated */ 88 /* A VMEXIT is required but not yet emulated */
89 bool exit_required; 89 bool exit_required;
90 90
91 /*
92 * If we vmexit during an instruction emulation we need this to restore
93 * the l1 guest rip after the emulation
94 */
95 unsigned long vmexit_rip;
96 unsigned long vmexit_rsp;
97 unsigned long vmexit_rax;
98
91 /* cache for intercepts of the guest */ 99 /* cache for intercepts of the guest */
92 u16 intercept_cr_read; 100 u16 intercept_cr_read;
93 u16 intercept_cr_write; 101 u16 intercept_cr_write;
@@ -1206,8 +1214,12 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1206 if (old == new) { 1214 if (old == new) {
1207 /* cr0 write with ts and mp unchanged */ 1215 /* cr0 write with ts and mp unchanged */
1208 svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE; 1216 svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE;
1209 if (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE) 1217 if (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE) {
1218 svm->nested.vmexit_rip = kvm_rip_read(vcpu);
1219 svm->nested.vmexit_rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
1220 svm->nested.vmexit_rax = kvm_register_read(vcpu, VCPU_REGS_RAX);
1210 return; 1221 return;
1222 }
1211 } 1223 }
1212 } 1224 }
1213 1225
@@ -2399,6 +2411,23 @@ static int emulate_on_interception(struct vcpu_svm *svm)
2399 return emulate_instruction(&svm->vcpu, 0, 0, 0) == EMULATE_DONE; 2411 return emulate_instruction(&svm->vcpu, 0, 0, 0) == EMULATE_DONE;
2400} 2412}
2401 2413
2414static int cr0_write_interception(struct vcpu_svm *svm)
2415{
2416 struct kvm_vcpu *vcpu = &svm->vcpu;
2417 int r;
2418
2419 r = emulate_instruction(&svm->vcpu, 0, 0, 0);
2420
2421 if (svm->nested.vmexit_rip) {
2422 kvm_register_write(vcpu, VCPU_REGS_RIP, svm->nested.vmexit_rip);
2423 kvm_register_write(vcpu, VCPU_REGS_RSP, svm->nested.vmexit_rsp);
2424 kvm_register_write(vcpu, VCPU_REGS_RAX, svm->nested.vmexit_rax);
2425 svm->nested.vmexit_rip = 0;
2426 }
2427
2428 return r == EMULATE_DONE;
2429}
2430
2402static int cr8_write_interception(struct vcpu_svm *svm) 2431static int cr8_write_interception(struct vcpu_svm *svm)
2403{ 2432{
2404 struct kvm_run *kvm_run = svm->vcpu.run; 2433 struct kvm_run *kvm_run = svm->vcpu.run;
@@ -2672,7 +2701,7 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = {
2672 [SVM_EXIT_READ_CR4] = emulate_on_interception, 2701 [SVM_EXIT_READ_CR4] = emulate_on_interception,
2673 [SVM_EXIT_READ_CR8] = emulate_on_interception, 2702 [SVM_EXIT_READ_CR8] = emulate_on_interception,
2674 [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception, 2703 [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception,
2675 [SVM_EXIT_WRITE_CR0] = emulate_on_interception, 2704 [SVM_EXIT_WRITE_CR0] = cr0_write_interception,
2676 [SVM_EXIT_WRITE_CR3] = emulate_on_interception, 2705 [SVM_EXIT_WRITE_CR3] = emulate_on_interception,
2677 [SVM_EXIT_WRITE_CR4] = emulate_on_interception, 2706 [SVM_EXIT_WRITE_CR4] = emulate_on_interception,
2678 [SVM_EXIT_WRITE_CR8] = cr8_write_interception, 2707 [SVM_EXIT_WRITE_CR8] = cr8_write_interception,
@@ -3252,6 +3281,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
3252 vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip; 3281 vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
3253 3282
3254 load_host_msrs(vcpu); 3283 load_host_msrs(vcpu);
3284 kvm_load_ldt(ldt_selector);
3255 loadsegment(fs, fs_selector); 3285 loadsegment(fs, fs_selector);
3256#ifdef CONFIG_X86_64 3286#ifdef CONFIG_X86_64
3257 load_gs_index(gs_selector); 3287 load_gs_index(gs_selector);
@@ -3259,7 +3289,6 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
3259#else 3289#else
3260 loadsegment(gs, gs_selector); 3290 loadsegment(gs, gs_selector);
3261#endif 3291#endif
3262 kvm_load_ldt(ldt_selector);
3263 3292
3264 reload_tss(vcpu); 3293 reload_tss(vcpu);
3265 3294
@@ -3354,6 +3383,14 @@ static void svm_cpuid_update(struct kvm_vcpu *vcpu)
3354static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry) 3383static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
3355{ 3384{
3356 switch (func) { 3385 switch (func) {
3386 case 0x00000001:
3387 /* Mask out xsave bit as long as it is not supported by SVM */
3388 entry->ecx &= ~(bit(X86_FEATURE_XSAVE));
3389 break;
3390 case 0x80000001:
3391 if (nested)
3392 entry->ecx |= (1 << 2); /* Set SVM bit */
3393 break;
3357 case 0x8000000A: 3394 case 0x8000000A:
3358 entry->eax = 1; /* SVM revision 1 */ 3395 entry->eax = 1; /* SVM revision 1 */
3359 entry->ebx = 8; /* Lets support 8 ASIDs in case we add proper 3396 entry->ebx = 8; /* Lets support 8 ASIDs in case we add proper
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 7bddfab12013..b3986fec7e68 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -828,10 +828,9 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
828#endif 828#endif
829 829
830#ifdef CONFIG_X86_64 830#ifdef CONFIG_X86_64
831 if (is_long_mode(&vmx->vcpu)) { 831 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
832 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); 832 if (is_long_mode(&vmx->vcpu))
833 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); 833 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
834 }
835#endif 834#endif
836 for (i = 0; i < vmx->save_nmsrs; ++i) 835 for (i = 0; i < vmx->save_nmsrs; ++i)
837 kvm_set_shared_msr(vmx->guest_msrs[i].index, 836 kvm_set_shared_msr(vmx->guest_msrs[i].index,
@@ -846,23 +845,23 @@ static void __vmx_load_host_state(struct vcpu_vmx *vmx)
846 845
847 ++vmx->vcpu.stat.host_state_reload; 846 ++vmx->vcpu.stat.host_state_reload;
848 vmx->host_state.loaded = 0; 847 vmx->host_state.loaded = 0;
849 if (vmx->host_state.fs_reload_needed) 848#ifdef CONFIG_X86_64
850 loadsegment(fs, vmx->host_state.fs_sel); 849 if (is_long_mode(&vmx->vcpu))
850 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
851#endif
851 if (vmx->host_state.gs_ldt_reload_needed) { 852 if (vmx->host_state.gs_ldt_reload_needed) {
852 kvm_load_ldt(vmx->host_state.ldt_sel); 853 kvm_load_ldt(vmx->host_state.ldt_sel);
853#ifdef CONFIG_X86_64 854#ifdef CONFIG_X86_64
854 load_gs_index(vmx->host_state.gs_sel); 855 load_gs_index(vmx->host_state.gs_sel);
855 wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gs);
856#else 856#else
857 loadsegment(gs, vmx->host_state.gs_sel); 857 loadsegment(gs, vmx->host_state.gs_sel);
858#endif 858#endif
859 } 859 }
860 if (vmx->host_state.fs_reload_needed)
861 loadsegment(fs, vmx->host_state.fs_sel);
860 reload_tss(); 862 reload_tss();
861#ifdef CONFIG_X86_64 863#ifdef CONFIG_X86_64
862 if (is_long_mode(&vmx->vcpu)) { 864 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
863 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
864 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
865 }
866#endif 865#endif
867 if (current_thread_info()->status & TS_USEDFPU) 866 if (current_thread_info()->status & TS_USEDFPU)
868 clts(); 867 clts();
@@ -4249,11 +4248,6 @@ static int vmx_get_lpage_level(void)
4249 return PT_PDPE_LEVEL; 4248 return PT_PDPE_LEVEL;
4250} 4249}
4251 4250
4252static inline u32 bit(int bitno)
4253{
4254 return 1 << (bitno & 31);
4255}
4256
4257static void vmx_cpuid_update(struct kvm_vcpu *vcpu) 4251static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
4258{ 4252{
4259 struct kvm_cpuid_entry2 *best; 4253 struct kvm_cpuid_entry2 *best;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 3a09c625d526..a5746de6f402 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -153,11 +153,6 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
153 153
154u64 __read_mostly host_xcr0; 154u64 __read_mostly host_xcr0;
155 155
156static inline u32 bit(int bitno)
157{
158 return 1 << (bitno & 31);
159}
160
161static void kvm_on_user_return(struct user_return_notifier *urn) 156static void kvm_on_user_return(struct user_return_notifier *urn)
162{ 157{
163 unsigned slot; 158 unsigned slot;
@@ -1994,9 +1989,9 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
1994 0 /* Reserved, AES */ | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX); 1989 0 /* Reserved, AES */ | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX);
1995 /* cpuid 0x80000001.ecx */ 1990 /* cpuid 0x80000001.ecx */
1996 const u32 kvm_supported_word6_x86_features = 1991 const u32 kvm_supported_word6_x86_features =
1997 F(LAHF_LM) | F(CMP_LEGACY) | F(SVM) | 0 /* ExtApicSpace */ | 1992 F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ |
1998 F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) | 1993 F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
1999 F(3DNOWPREFETCH) | 0 /* OSVW */ | 0 /* IBS */ | F(SSE5) | 1994 F(3DNOWPREFETCH) | 0 /* OSVW */ | 0 /* IBS */ | F(XOP) |
2000 0 /* SKINIT */ | 0 /* WDT */; 1995 0 /* SKINIT */ | 0 /* WDT */;
2001 1996
2002 /* all calls to cpuid_count() should be made on the same cpu */ 1997 /* all calls to cpuid_count() should be made on the same cpu */
@@ -2305,6 +2300,7 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
2305 !kvm_exception_is_soft(vcpu->arch.exception.nr); 2300 !kvm_exception_is_soft(vcpu->arch.exception.nr);
2306 events->exception.nr = vcpu->arch.exception.nr; 2301 events->exception.nr = vcpu->arch.exception.nr;
2307 events->exception.has_error_code = vcpu->arch.exception.has_error_code; 2302 events->exception.has_error_code = vcpu->arch.exception.has_error_code;
2303 events->exception.pad = 0;
2308 events->exception.error_code = vcpu->arch.exception.error_code; 2304 events->exception.error_code = vcpu->arch.exception.error_code;
2309 2305
2310 events->interrupt.injected = 2306 events->interrupt.injected =
@@ -2318,12 +2314,14 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
2318 events->nmi.injected = vcpu->arch.nmi_injected; 2314 events->nmi.injected = vcpu->arch.nmi_injected;
2319 events->nmi.pending = vcpu->arch.nmi_pending; 2315 events->nmi.pending = vcpu->arch.nmi_pending;
2320 events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu); 2316 events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu);
2317 events->nmi.pad = 0;
2321 2318
2322 events->sipi_vector = vcpu->arch.sipi_vector; 2319 events->sipi_vector = vcpu->arch.sipi_vector;
2323 2320
2324 events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING 2321 events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
2325 | KVM_VCPUEVENT_VALID_SIPI_VECTOR 2322 | KVM_VCPUEVENT_VALID_SIPI_VECTOR
2326 | KVM_VCPUEVENT_VALID_SHADOW); 2323 | KVM_VCPUEVENT_VALID_SHADOW);
2324 memset(&events->reserved, 0, sizeof(events->reserved));
2327} 2325}
2328 2326
2329static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, 2327static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
@@ -2366,6 +2364,7 @@ static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu,
2366 dbgregs->dr6 = vcpu->arch.dr6; 2364 dbgregs->dr6 = vcpu->arch.dr6;
2367 dbgregs->dr7 = vcpu->arch.dr7; 2365 dbgregs->dr7 = vcpu->arch.dr7;
2368 dbgregs->flags = 0; 2366 dbgregs->flags = 0;
2367 memset(&dbgregs->reserved, 0, sizeof(dbgregs->reserved));
2369} 2368}
2370 2369
2371static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu, 2370static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
@@ -2849,6 +2848,7 @@ static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
2849 sizeof(ps->channels)); 2848 sizeof(ps->channels));
2850 ps->flags = kvm->arch.vpit->pit_state.flags; 2849 ps->flags = kvm->arch.vpit->pit_state.flags;
2851 mutex_unlock(&kvm->arch.vpit->pit_state.lock); 2850 mutex_unlock(&kvm->arch.vpit->pit_state.lock);
2851 memset(&ps->reserved, 0, sizeof(ps->reserved));
2852 return r; 2852 return r;
2853} 2853}
2854 2854
@@ -2912,10 +2912,6 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
2912 struct kvm_memslots *slots, *old_slots; 2912 struct kvm_memslots *slots, *old_slots;
2913 unsigned long *dirty_bitmap; 2913 unsigned long *dirty_bitmap;
2914 2914
2915 spin_lock(&kvm->mmu_lock);
2916 kvm_mmu_slot_remove_write_access(kvm, log->slot);
2917 spin_unlock(&kvm->mmu_lock);
2918
2919 r = -ENOMEM; 2915 r = -ENOMEM;
2920 dirty_bitmap = vmalloc(n); 2916 dirty_bitmap = vmalloc(n);
2921 if (!dirty_bitmap) 2917 if (!dirty_bitmap)
@@ -2937,6 +2933,10 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
2937 dirty_bitmap = old_slots->memslots[log->slot].dirty_bitmap; 2933 dirty_bitmap = old_slots->memslots[log->slot].dirty_bitmap;
2938 kfree(old_slots); 2934 kfree(old_slots);
2939 2935
2936 spin_lock(&kvm->mmu_lock);
2937 kvm_mmu_slot_remove_write_access(kvm, log->slot);
2938 spin_unlock(&kvm->mmu_lock);
2939
2940 r = -EFAULT; 2940 r = -EFAULT;
2941 if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n)) { 2941 if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n)) {
2942 vfree(dirty_bitmap); 2942 vfree(dirty_bitmap);
@@ -3229,6 +3229,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
3229 now_ns = timespec_to_ns(&now); 3229 now_ns = timespec_to_ns(&now);
3230 user_ns.clock = kvm->arch.kvmclock_offset + now_ns; 3230 user_ns.clock = kvm->arch.kvmclock_offset + now_ns;
3231 user_ns.flags = 0; 3231 user_ns.flags = 0;
3232 memset(&user_ns.pad, 0, sizeof(user_ns.pad));
3232 3233
3233 r = -EFAULT; 3234 r = -EFAULT;
3234 if (copy_to_user(argp, &user_ns, sizeof(user_ns))) 3235 if (copy_to_user(argp, &user_ns, sizeof(user_ns)))
@@ -5111,6 +5112,8 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
5111 5112
5112 mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4; 5113 mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
5113 kvm_x86_ops->set_cr4(vcpu, sregs->cr4); 5114 kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
5115 if (sregs->cr4 & X86_CR4_OSXSAVE)
5116 update_cpuid(vcpu);
5114 if (!is_long_mode(vcpu) && is_pae(vcpu)) { 5117 if (!is_long_mode(vcpu) && is_pae(vcpu)) {
5115 load_pdptrs(vcpu, vcpu->arch.cr3); 5118 load_pdptrs(vcpu, vcpu->arch.cr3);
5116 mmu_reset_needed = 1; 5119 mmu_reset_needed = 1;
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index b7a404722d2b..0bf327453499 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -65,6 +65,11 @@ static inline int is_paging(struct kvm_vcpu *vcpu)
65 return kvm_read_cr0_bits(vcpu, X86_CR0_PG); 65 return kvm_read_cr0_bits(vcpu, X86_CR0_PG);
66} 66}
67 67
68static inline u32 bit(int bitno)
69{
70 return 1 << (bitno & 31);
71}
72
68void kvm_before_handle_nmi(struct kvm_vcpu *vcpu); 73void kvm_before_handle_nmi(struct kvm_vcpu *vcpu);
69void kvm_after_handle_nmi(struct kvm_vcpu *vcpu); 74void kvm_after_handle_nmi(struct kvm_vcpu *vcpu);
70 75