aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorXiao Guangrong <xiaoguangrong@cn.fujitsu.com>2010-11-19 04:04:03 -0500
committerAvi Kivity <avi@redhat.com>2011-01-12 04:29:48 -0500
commita4a8e6f76ecf963fa7e4d74b3635655a2033a27b (patch)
tree19be96f9c253b47e22600ae0034c2b228d77f6e8 /arch/x86
parent9bdbba13b8868815198e4fdbd484769ef76392d9 (diff)
KVM: MMU: remove 'clear_unsync' parameter
Remove it since we can judge it by using sp->unsync Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/kvm_host.h2
-rw-r--r--arch/x86/kvm/mmu.c8
-rw-r--r--arch/x86/kvm/paging_tmpl.h5
3 files changed, 7 insertions, 8 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 3cc80c478003..14524781de13 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -250,7 +250,7 @@ struct kvm_mmu {
250 void (*prefetch_page)(struct kvm_vcpu *vcpu, 250 void (*prefetch_page)(struct kvm_vcpu *vcpu,
251 struct kvm_mmu_page *page); 251 struct kvm_mmu_page *page);
252 int (*sync_page)(struct kvm_vcpu *vcpu, 252 int (*sync_page)(struct kvm_vcpu *vcpu,
253 struct kvm_mmu_page *sp, bool clear_unsync); 253 struct kvm_mmu_page *sp);
254 void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva); 254 void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva);
255 hpa_t root_hpa; 255 hpa_t root_hpa;
256 int root_level; 256 int root_level;
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 59104927c582..3db0cd4b13d7 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1156,7 +1156,7 @@ static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu,
1156} 1156}
1157 1157
1158static int nonpaging_sync_page(struct kvm_vcpu *vcpu, 1158static int nonpaging_sync_page(struct kvm_vcpu *vcpu,
1159 struct kvm_mmu_page *sp, bool clear_unsync) 1159 struct kvm_mmu_page *sp)
1160{ 1160{
1161 return 1; 1161 return 1;
1162} 1162}
@@ -1286,7 +1286,7 @@ static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
1286 if (clear_unsync) 1286 if (clear_unsync)
1287 kvm_unlink_unsync_page(vcpu->kvm, sp); 1287 kvm_unlink_unsync_page(vcpu->kvm, sp);
1288 1288
1289 if (vcpu->arch.mmu.sync_page(vcpu, sp, clear_unsync)) { 1289 if (vcpu->arch.mmu.sync_page(vcpu, sp)) {
1290 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list); 1290 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
1291 return 1; 1291 return 1;
1292 } 1292 }
@@ -1327,12 +1327,12 @@ static void kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn)
1327 continue; 1327 continue;
1328 1328
1329 WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL); 1329 WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL);
1330 kvm_unlink_unsync_page(vcpu->kvm, s);
1330 if ((s->role.cr4_pae != !!is_pae(vcpu)) || 1331 if ((s->role.cr4_pae != !!is_pae(vcpu)) ||
1331 (vcpu->arch.mmu.sync_page(vcpu, s, true))) { 1332 (vcpu->arch.mmu.sync_page(vcpu, s))) {
1332 kvm_mmu_prepare_zap_page(vcpu->kvm, s, &invalid_list); 1333 kvm_mmu_prepare_zap_page(vcpu->kvm, s, &invalid_list);
1333 continue; 1334 continue;
1334 } 1335 }
1335 kvm_unlink_unsync_page(vcpu->kvm, s);
1336 flush = true; 1336 flush = true;
1337 } 1337 }
1338 1338
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 57619ed4beee..60f00dbe327a 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -740,8 +740,7 @@ static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu,
740 * - The spte has a reference to the struct page, so the pfn for a given gfn 740 * - The spte has a reference to the struct page, so the pfn for a given gfn
741 * can't change unless all sptes pointing to it are nuked first. 741 * can't change unless all sptes pointing to it are nuked first.
742 */ 742 */
743static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, 743static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
744 bool clear_unsync)
745{ 744{
746 int i, offset, nr_present; 745 int i, offset, nr_present;
747 bool host_writable; 746 bool host_writable;
@@ -781,7 +780,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
781 u64 nonpresent; 780 u64 nonpresent;
782 781
783 if (rsvd_bits_set || is_present_gpte(gpte) || 782 if (rsvd_bits_set || is_present_gpte(gpte) ||
784 !clear_unsync) 783 sp->unsync)
785 nonpresent = shadow_trap_nonpresent_pte; 784 nonpresent = shadow_trap_nonpresent_pte;
786 else 785 else
787 nonpresent = shadow_notrap_nonpresent_pte; 786 nonpresent = shadow_notrap_nonpresent_pte;