aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlexander Graf <agraf@suse.de>2013-02-14 19:12:59 -0500
committerAlexander Graf <agraf@suse.de>2013-02-14 19:12:59 -0500
commit899f7b26bc4549cd5da8ac7688ed7e9d017f21b5 (patch)
treeb321cb79f7ba6802d24015b85a08b2567dad081f
parent011da8996263f799a469a761ee15c998d7ef1acb (diff)
parentcbd29cb6e38af6119df2cdac0c58acf0e85c177e (diff)
Merge commit 'origin/next' into kvm-ppc-next
-rw-r--r--arch/s390/kvm/interrupt.c18
-rw-r--r--arch/x86/kvm/emulate.c13
-rw-r--r--arch/x86/kvm/mmu.c55
-rw-r--r--arch/x86/kvm/paging_tmpl.h9
-rw-r--r--arch/x86/kvm/vmx.c19
-rw-r--r--arch/x86/kvm/x86.c37
-rw-r--r--include/linux/kvm_host.h1
-rw-r--r--virt/kvm/kvm_main.c1
8 files changed, 66 insertions, 87 deletions
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 9a128357fd15..2f6ccb065c4a 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -55,6 +55,13 @@ static int psw_interrupts_disabled(struct kvm_vcpu *vcpu)
55 return 1; 55 return 1;
56} 56}
57 57
58static u64 int_word_to_isc_bits(u32 int_word)
59{
60 u8 isc = (int_word & 0x38000000) >> 27;
61
62 return (0x80 >> isc) << 24;
63}
64
58static int __interrupt_is_deliverable(struct kvm_vcpu *vcpu, 65static int __interrupt_is_deliverable(struct kvm_vcpu *vcpu,
59 struct kvm_s390_interrupt_info *inti) 66 struct kvm_s390_interrupt_info *inti)
60{ 67{
@@ -96,7 +103,8 @@ static int __interrupt_is_deliverable(struct kvm_vcpu *vcpu,
96 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: 103 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
97 if (psw_ioint_disabled(vcpu)) 104 if (psw_ioint_disabled(vcpu))
98 return 0; 105 return 0;
99 if (vcpu->arch.sie_block->gcr[6] & inti->io.io_int_word) 106 if (vcpu->arch.sie_block->gcr[6] &
107 int_word_to_isc_bits(inti->io.io_int_word))
100 return 1; 108 return 1;
101 return 0; 109 return 0;
102 default: 110 default:
@@ -724,7 +732,8 @@ struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
724 list_for_each_entry(iter, &fi->list, list) { 732 list_for_each_entry(iter, &fi->list, list) {
725 if (!is_ioint(iter->type)) 733 if (!is_ioint(iter->type))
726 continue; 734 continue;
727 if (cr6 && ((cr6 & iter->io.io_int_word) == 0)) 735 if (cr6 &&
736 ((cr6 & int_word_to_isc_bits(iter->io.io_int_word)) == 0))
728 continue; 737 continue;
729 if (schid) { 738 if (schid) {
730 if (((schid & 0x00000000ffff0000) >> 16) != 739 if (((schid & 0x00000000ffff0000) >> 16) !=
@@ -811,11 +820,14 @@ int kvm_s390_inject_vm(struct kvm *kvm,
811 if (!is_ioint(inti->type)) 820 if (!is_ioint(inti->type))
812 list_add_tail(&inti->list, &fi->list); 821 list_add_tail(&inti->list, &fi->list);
813 else { 822 else {
823 u64 isc_bits = int_word_to_isc_bits(inti->io.io_int_word);
824
814 /* Keep I/O interrupts sorted in isc order. */ 825 /* Keep I/O interrupts sorted in isc order. */
815 list_for_each_entry(iter, &fi->list, list) { 826 list_for_each_entry(iter, &fi->list, list) {
816 if (!is_ioint(iter->type)) 827 if (!is_ioint(iter->type))
817 continue; 828 continue;
818 if (iter->io.io_int_word <= inti->io.io_int_word) 829 if (int_word_to_isc_bits(iter->io.io_int_word)
830 <= isc_bits)
819 continue; 831 continue;
820 break; 832 break;
821 } 833 }
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 2b11318151a4..a335cc6cde72 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -2995,14 +2995,11 @@ static int em_aad(struct x86_emulate_ctxt *ctxt)
2995 2995
2996 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al; 2996 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;
2997 2997
2998 ctxt->eflags &= ~(X86_EFLAGS_PF | X86_EFLAGS_SF | X86_EFLAGS_ZF); 2998 /* Set PF, ZF, SF */
2999 2999 ctxt->src.type = OP_IMM;
3000 if (!al) 3000 ctxt->src.val = 0;
3001 ctxt->eflags |= X86_EFLAGS_ZF; 3001 ctxt->src.bytes = 1;
3002 if (!(al & 1)) 3002 fastop(ctxt, em_or);
3003 ctxt->eflags |= X86_EFLAGS_PF;
3004 if (al & 0x80)
3005 ctxt->eflags |= X86_EFLAGS_SF;
3006 3003
3007 return X86EMUL_CONTINUE; 3004 return X86EMUL_CONTINUE;
3008} 3005}
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 0242a8a1b2e2..1cda1f332654 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -832,8 +832,7 @@ static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn)
832 if (host_level == PT_PAGE_TABLE_LEVEL) 832 if (host_level == PT_PAGE_TABLE_LEVEL)
833 return host_level; 833 return host_level;
834 834
835 max_level = kvm_x86_ops->get_lpage_level() < host_level ? 835 max_level = min(kvm_x86_ops->get_lpage_level(), host_level);
836 kvm_x86_ops->get_lpage_level() : host_level;
837 836
838 for (level = PT_DIRECTORY_LEVEL; level <= max_level; ++level) 837 for (level = PT_DIRECTORY_LEVEL; level <= max_level; ++level)
839 if (has_wrprotected_page(vcpu->kvm, large_gfn, level)) 838 if (has_wrprotected_page(vcpu->kvm, large_gfn, level))
@@ -1106,8 +1105,7 @@ static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep)
1106 1105
1107/* 1106/*
1108 * Write-protect on the specified @sptep, @pt_protect indicates whether 1107 * Write-protect on the specified @sptep, @pt_protect indicates whether
1109 * spte writ-protection is caused by protecting shadow page table. 1108 * spte write-protection is caused by protecting shadow page table.
1110 * @flush indicates whether tlb need be flushed.
1111 * 1109 *
1112 * Note: write protection is difference between drity logging and spte 1110 * Note: write protection is difference between drity logging and spte
1113 * protection: 1111 * protection:
@@ -1116,10 +1114,9 @@ static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep)
1116 * - for spte protection, the spte can be writable only after unsync-ing 1114 * - for spte protection, the spte can be writable only after unsync-ing
1117 * shadow page. 1115 * shadow page.
1118 * 1116 *
1119 * Return true if the spte is dropped. 1117 * Return true if tlb need be flushed.
1120 */ 1118 */
1121static bool 1119static bool spte_write_protect(struct kvm *kvm, u64 *sptep, bool pt_protect)
1122spte_write_protect(struct kvm *kvm, u64 *sptep, bool *flush, bool pt_protect)
1123{ 1120{
1124 u64 spte = *sptep; 1121 u64 spte = *sptep;
1125 1122
@@ -1129,17 +1126,11 @@ spte_write_protect(struct kvm *kvm, u64 *sptep, bool *flush, bool pt_protect)
1129 1126
1130 rmap_printk("rmap_write_protect: spte %p %llx\n", sptep, *sptep); 1127 rmap_printk("rmap_write_protect: spte %p %llx\n", sptep, *sptep);
1131 1128
1132 if (__drop_large_spte(kvm, sptep)) {
1133 *flush |= true;
1134 return true;
1135 }
1136
1137 if (pt_protect) 1129 if (pt_protect)
1138 spte &= ~SPTE_MMU_WRITEABLE; 1130 spte &= ~SPTE_MMU_WRITEABLE;
1139 spte = spte & ~PT_WRITABLE_MASK; 1131 spte = spte & ~PT_WRITABLE_MASK;
1140 1132
1141 *flush |= mmu_spte_update(sptep, spte); 1133 return mmu_spte_update(sptep, spte);
1142 return false;
1143} 1134}
1144 1135
1145static bool __rmap_write_protect(struct kvm *kvm, unsigned long *rmapp, 1136static bool __rmap_write_protect(struct kvm *kvm, unsigned long *rmapp,
@@ -1151,11 +1142,8 @@ static bool __rmap_write_protect(struct kvm *kvm, unsigned long *rmapp,
1151 1142
1152 for (sptep = rmap_get_first(*rmapp, &iter); sptep;) { 1143 for (sptep = rmap_get_first(*rmapp, &iter); sptep;) {
1153 BUG_ON(!(*sptep & PT_PRESENT_MASK)); 1144 BUG_ON(!(*sptep & PT_PRESENT_MASK));
1154 if (spte_write_protect(kvm, sptep, &flush, pt_protect)) {
1155 sptep = rmap_get_first(*rmapp, &iter);
1156 continue;
1157 }
1158 1145
1146 flush |= spte_write_protect(kvm, sptep, pt_protect);
1159 sptep = rmap_get_next(&iter); 1147 sptep = rmap_get_next(&iter);
1160 } 1148 }
1161 1149
@@ -1959,9 +1947,9 @@ static void link_shadow_page(u64 *sptep, struct kvm_mmu_page *sp)
1959{ 1947{
1960 u64 spte; 1948 u64 spte;
1961 1949
1962 spte = __pa(sp->spt) 1950 spte = __pa(sp->spt) | PT_PRESENT_MASK | PT_WRITABLE_MASK |
1963 | PT_PRESENT_MASK | PT_ACCESSED_MASK 1951 shadow_user_mask | shadow_x_mask | shadow_accessed_mask;
1964 | PT_WRITABLE_MASK | PT_USER_MASK; 1952
1965 mmu_spte_set(sptep, spte); 1953 mmu_spte_set(sptep, spte);
1966} 1954}
1967 1955
@@ -2400,16 +2388,15 @@ done:
2400} 2388}
2401 2389
2402static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, 2390static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2403 unsigned pt_access, unsigned pte_access, 2391 unsigned pte_access, int write_fault, int *emulate,
2404 int write_fault, int *emulate, int level, gfn_t gfn, 2392 int level, gfn_t gfn, pfn_t pfn, bool speculative,
2405 pfn_t pfn, bool speculative, bool host_writable) 2393 bool host_writable)
2406{ 2394{
2407 int was_rmapped = 0; 2395 int was_rmapped = 0;
2408 int rmap_count; 2396 int rmap_count;
2409 2397
2410 pgprintk("%s: spte %llx access %x write_fault %d gfn %llx\n", 2398 pgprintk("%s: spte %llx write_fault %d gfn %llx\n", __func__,
2411 __func__, *sptep, pt_access, 2399 *sptep, write_fault, gfn);
2412 write_fault, gfn);
2413 2400
2414 if (is_rmap_spte(*sptep)) { 2401 if (is_rmap_spte(*sptep)) {
2415 /* 2402 /*
@@ -2525,7 +2512,7 @@ static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
2525 return -1; 2512 return -1;
2526 2513
2527 for (i = 0; i < ret; i++, gfn++, start++) 2514 for (i = 0; i < ret; i++, gfn++, start++)
2528 mmu_set_spte(vcpu, start, ACC_ALL, access, 0, NULL, 2515 mmu_set_spte(vcpu, start, access, 0, NULL,
2529 sp->role.level, gfn, page_to_pfn(pages[i]), 2516 sp->role.level, gfn, page_to_pfn(pages[i]),
2530 true, true); 2517 true, true);
2531 2518
@@ -2586,9 +2573,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
2586 2573
2587 for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) { 2574 for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) {
2588 if (iterator.level == level) { 2575 if (iterator.level == level) {
2589 unsigned pte_access = ACC_ALL; 2576 mmu_set_spte(vcpu, iterator.sptep, ACC_ALL,
2590
2591 mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, pte_access,
2592 write, &emulate, level, gfn, pfn, 2577 write, &emulate, level, gfn, pfn,
2593 prefault, map_writable); 2578 prefault, map_writable);
2594 direct_pte_prefetch(vcpu, iterator.sptep); 2579 direct_pte_prefetch(vcpu, iterator.sptep);
@@ -2596,6 +2581,8 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
2596 break; 2581 break;
2597 } 2582 }
2598 2583
2584 drop_large_spte(vcpu, iterator.sptep);
2585
2599 if (!is_shadow_present_pte(*iterator.sptep)) { 2586 if (!is_shadow_present_pte(*iterator.sptep)) {
2600 u64 base_addr = iterator.addr; 2587 u64 base_addr = iterator.addr;
2601 2588
@@ -2605,11 +2592,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
2605 iterator.level - 1, 2592 iterator.level - 1,
2606 1, ACC_ALL, iterator.sptep); 2593 1, ACC_ALL, iterator.sptep);
2607 2594
2608 mmu_spte_set(iterator.sptep, 2595 link_shadow_page(iterator.sptep, sp);
2609 __pa(sp->spt)
2610 | PT_PRESENT_MASK | PT_WRITABLE_MASK
2611 | shadow_user_mask | shadow_x_mask
2612 | shadow_accessed_mask);
2613 } 2596 }
2614 } 2597 }
2615 return emulate; 2598 return emulate;
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 34c5c99323f4..105dd5bd550e 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -326,8 +326,8 @@ FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
326 * we call mmu_set_spte() with host_writable = true because 326 * we call mmu_set_spte() with host_writable = true because
327 * pte_prefetch_gfn_to_pfn always gets a writable pfn. 327 * pte_prefetch_gfn_to_pfn always gets a writable pfn.
328 */ 328 */
329 mmu_set_spte(vcpu, spte, sp->role.access, pte_access, 0, 329 mmu_set_spte(vcpu, spte, pte_access, 0, NULL, PT_PAGE_TABLE_LEVEL,
330 NULL, PT_PAGE_TABLE_LEVEL, gfn, pfn, true, true); 330 gfn, pfn, true, true);
331 331
332 return true; 332 return true;
333} 333}
@@ -470,9 +470,8 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
470 } 470 }
471 471
472 clear_sp_write_flooding_count(it.sptep); 472 clear_sp_write_flooding_count(it.sptep);
473 mmu_set_spte(vcpu, it.sptep, access, gw->pte_access, 473 mmu_set_spte(vcpu, it.sptep, gw->pte_access, write_fault, &emulate,
474 write_fault, &emulate, it.level, 474 it.level, gw->gfn, pfn, prefault, map_writable);
475 gw->gfn, pfn, prefault, map_writable);
476 FNAME(pte_prefetch)(vcpu, gw, it.sptep); 475 FNAME(pte_prefetch)(vcpu, gw, it.sptep);
477 476
478 return emulate; 477 return emulate;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index fe9a9cfadbd6..6667042714cc 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -84,8 +84,7 @@ module_param(vmm_exclusive, bool, S_IRUGO);
84static bool __read_mostly fasteoi = 1; 84static bool __read_mostly fasteoi = 1;
85module_param(fasteoi, bool, S_IRUGO); 85module_param(fasteoi, bool, S_IRUGO);
86 86
87static bool __read_mostly enable_apicv_reg_vid = 1; 87static bool __read_mostly enable_apicv_reg_vid;
88module_param(enable_apicv_reg_vid, bool, S_IRUGO);
89 88
90/* 89/*
91 * If nested=1, nested virtualization is supported, i.e., guests may use 90 * If nested=1, nested virtualization is supported, i.e., guests may use
@@ -95,12 +94,8 @@ module_param(enable_apicv_reg_vid, bool, S_IRUGO);
95static bool __read_mostly nested = 0; 94static bool __read_mostly nested = 0;
96module_param(nested, bool, S_IRUGO); 95module_param(nested, bool, S_IRUGO);
97 96
98#define KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST \ 97#define KVM_GUEST_CR0_MASK (X86_CR0_NW | X86_CR0_CD)
99 (X86_CR0_WP | X86_CR0_NE | X86_CR0_NW | X86_CR0_CD) 98#define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST (X86_CR0_WP | X86_CR0_NE)
100#define KVM_GUEST_CR0_MASK \
101 (KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE)
102#define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST \
103 (X86_CR0_WP | X86_CR0_NE)
104#define KVM_VM_CR0_ALWAYS_ON \ 99#define KVM_VM_CR0_ALWAYS_ON \
105 (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE) 100 (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE)
106#define KVM_CR4_GUEST_OWNED_BITS \ 101#define KVM_CR4_GUEST_OWNED_BITS \
@@ -3137,11 +3132,11 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
3137 struct vcpu_vmx *vmx = to_vmx(vcpu); 3132 struct vcpu_vmx *vmx = to_vmx(vcpu);
3138 unsigned long hw_cr0; 3133 unsigned long hw_cr0;
3139 3134
3135 hw_cr0 = (cr0 & ~KVM_GUEST_CR0_MASK);
3140 if (enable_unrestricted_guest) 3136 if (enable_unrestricted_guest)
3141 hw_cr0 = (cr0 & ~KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST) 3137 hw_cr0 |= KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST;
3142 | KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST;
3143 else { 3138 else {
3144 hw_cr0 = (cr0 & ~KVM_GUEST_CR0_MASK) | KVM_VM_CR0_ALWAYS_ON; 3139 hw_cr0 |= KVM_VM_CR0_ALWAYS_ON;
3145 3140
3146 if (vmx->rmode.vm86_active && (cr0 & X86_CR0_PE)) 3141 if (vmx->rmode.vm86_active && (cr0 & X86_CR0_PE))
3147 enter_pmode(vcpu); 3142 enter_pmode(vcpu);
@@ -5925,7 +5920,7 @@ static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu,
5925 u32 msr_index = vcpu->arch.regs[VCPU_REGS_RCX]; 5920 u32 msr_index = vcpu->arch.regs[VCPU_REGS_RCX];
5926 gpa_t bitmap; 5921 gpa_t bitmap;
5927 5922
5928 if (!nested_cpu_has(get_vmcs12(vcpu), CPU_BASED_USE_MSR_BITMAPS)) 5923 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
5929 return 1; 5924 return 1;
5930 5925
5931 /* 5926 /*
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 373e17a0d398..3c5bb6fe5280 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -6897,33 +6897,28 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
6897 bool user_alloc) 6897 bool user_alloc)
6898{ 6898{
6899 int npages = memslot->npages; 6899 int npages = memslot->npages;
6900 int map_flags = MAP_PRIVATE | MAP_ANONYMOUS;
6901 6900
6902 /* Prevent internal slot pages from being moved by fork()/COW. */ 6901 /*
6903 if (memslot->id >= KVM_USER_MEM_SLOTS) 6902 * Only private memory slots need to be mapped here since
6904 map_flags = MAP_SHARED | MAP_ANONYMOUS; 6903 * KVM_SET_MEMORY_REGION ioctl is no longer supported.
6905
6906 /*To keep backward compatibility with older userspace,
6907 *x86 needs to handle !user_alloc case.
6908 */ 6904 */
6909 if (!user_alloc) { 6905 if ((memslot->id >= KVM_USER_MEM_SLOTS) && npages && !old.npages) {
6910 if (npages && !old.npages) { 6906 unsigned long userspace_addr;
6911 unsigned long userspace_addr;
6912 6907
6913 userspace_addr = vm_mmap(NULL, 0, 6908 /*
6914 npages * PAGE_SIZE, 6909 * MAP_SHARED to prevent internal slot pages from being moved
6915 PROT_READ | PROT_WRITE, 6910 * by fork()/COW.
6916 map_flags, 6911 */
6917 0); 6912 userspace_addr = vm_mmap(NULL, 0, npages * PAGE_SIZE,
6913 PROT_READ | PROT_WRITE,
6914 MAP_SHARED | MAP_ANONYMOUS, 0);
6918 6915
6919 if (IS_ERR((void *)userspace_addr)) 6916 if (IS_ERR((void *)userspace_addr))
6920 return PTR_ERR((void *)userspace_addr); 6917 return PTR_ERR((void *)userspace_addr);
6921 6918
6922 memslot->userspace_addr = userspace_addr; 6919 memslot->userspace_addr = userspace_addr;
6923 }
6924 } 6920 }
6925 6921
6926
6927 return 0; 6922 return 0;
6928} 6923}
6929 6924
@@ -6935,7 +6930,7 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
6935 6930
6936 int nr_mmu_pages = 0, npages = mem->memory_size >> PAGE_SHIFT; 6931 int nr_mmu_pages = 0, npages = mem->memory_size >> PAGE_SHIFT;
6937 6932
6938 if (!user_alloc && !old.user_alloc && old.npages && !npages) { 6933 if ((mem->slot >= KVM_USER_MEM_SLOTS) && old.npages && !npages) {
6939 int ret; 6934 int ret;
6940 6935
6941 ret = vm_munmap(old.userspace_addr, 6936 ret = vm_munmap(old.userspace_addr,
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 0350e0d5e031..722cae78bbc4 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -273,7 +273,6 @@ struct kvm_memory_slot {
273 unsigned long userspace_addr; 273 unsigned long userspace_addr;
274 u32 flags; 274 u32 flags;
275 short id; 275 short id;
276 bool user_alloc;
277}; 276};
278 277
279static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot) 278static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot)
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 2e93630b4add..adc68feb5c5a 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -839,7 +839,6 @@ int __kvm_set_memory_region(struct kvm *kvm,
839 839
840 r = -ENOMEM; 840 r = -ENOMEM;
841 if (change == KVM_MR_CREATE) { 841 if (change == KVM_MR_CREATE) {
842 new.user_alloc = user_alloc;
843 new.userspace_addr = mem->userspace_addr; 842 new.userspace_addr = mem->userspace_addr;
844 843
845 if (kvm_arch_create_memslot(&new, npages)) 844 if (kvm_arch_create_memslot(&new, npages))