aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorTakuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp>2012-01-17 05:52:15 -0500
committerAvi Kivity <avi@redhat.com>2012-03-05 07:52:43 -0500
commite4b35cc960bf216548516d8e39f5e364cfbbc86b (patch)
tree00c9039593840cc511ad67dc378d52546aeb91f5 /arch
parent9373e2c0576ee15b13e93bc5c5b3ef31d0612992 (diff)
KVM: MMU: Remove unused kvm parameter from rmap_next()
Signed-off-by: Takuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kvm/mmu.c26
-rw-r--r--arch/x86/kvm/mmu_audit.c4
2 files changed, 15 insertions, 15 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 75b8f579b2a6..ae76cc3392e1 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -988,7 +988,7 @@ static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
988 return pte_list_add(vcpu, spte, rmapp); 988 return pte_list_add(vcpu, spte, rmapp);
989} 989}
990 990
991static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte) 991static u64 *rmap_next(unsigned long *rmapp, u64 *spte)
992{ 992{
993 return pte_list_next(rmapp, spte); 993 return pte_list_next(rmapp, spte);
994} 994}
@@ -1019,7 +1019,7 @@ int kvm_mmu_rmap_write_protect(struct kvm *kvm, u64 gfn,
1019 int i, write_protected = 0; 1019 int i, write_protected = 0;
1020 1020
1021 rmapp = __gfn_to_rmap(gfn, PT_PAGE_TABLE_LEVEL, slot); 1021 rmapp = __gfn_to_rmap(gfn, PT_PAGE_TABLE_LEVEL, slot);
1022 spte = rmap_next(kvm, rmapp, NULL); 1022 spte = rmap_next(rmapp, NULL);
1023 while (spte) { 1023 while (spte) {
1024 BUG_ON(!(*spte & PT_PRESENT_MASK)); 1024 BUG_ON(!(*spte & PT_PRESENT_MASK));
1025 rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte); 1025 rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
@@ -1027,14 +1027,14 @@ int kvm_mmu_rmap_write_protect(struct kvm *kvm, u64 gfn,
1027 mmu_spte_update(spte, *spte & ~PT_WRITABLE_MASK); 1027 mmu_spte_update(spte, *spte & ~PT_WRITABLE_MASK);
1028 write_protected = 1; 1028 write_protected = 1;
1029 } 1029 }
1030 spte = rmap_next(kvm, rmapp, spte); 1030 spte = rmap_next(rmapp, spte);
1031 } 1031 }
1032 1032
1033 /* check for huge page mappings */ 1033 /* check for huge page mappings */
1034 for (i = PT_DIRECTORY_LEVEL; 1034 for (i = PT_DIRECTORY_LEVEL;
1035 i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) { 1035 i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
1036 rmapp = __gfn_to_rmap(gfn, i, slot); 1036 rmapp = __gfn_to_rmap(gfn, i, slot);
1037 spte = rmap_next(kvm, rmapp, NULL); 1037 spte = rmap_next(rmapp, NULL);
1038 while (spte) { 1038 while (spte) {
1039 BUG_ON(!(*spte & PT_PRESENT_MASK)); 1039 BUG_ON(!(*spte & PT_PRESENT_MASK));
1040 BUG_ON(!is_large_pte(*spte)); 1040 BUG_ON(!is_large_pte(*spte));
@@ -1045,7 +1045,7 @@ int kvm_mmu_rmap_write_protect(struct kvm *kvm, u64 gfn,
1045 spte = NULL; 1045 spte = NULL;
1046 write_protected = 1; 1046 write_protected = 1;
1047 } 1047 }
1048 spte = rmap_next(kvm, rmapp, spte); 1048 spte = rmap_next(rmapp, spte);
1049 } 1049 }
1050 } 1050 }
1051 1051
@@ -1066,7 +1066,7 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
1066 u64 *spte; 1066 u64 *spte;
1067 int need_tlb_flush = 0; 1067 int need_tlb_flush = 0;
1068 1068
1069 while ((spte = rmap_next(kvm, rmapp, NULL))) { 1069 while ((spte = rmap_next(rmapp, NULL))) {
1070 BUG_ON(!(*spte & PT_PRESENT_MASK)); 1070 BUG_ON(!(*spte & PT_PRESENT_MASK));
1071 rmap_printk("kvm_rmap_unmap_hva: spte %p %llx\n", spte, *spte); 1071 rmap_printk("kvm_rmap_unmap_hva: spte %p %llx\n", spte, *spte);
1072 drop_spte(kvm, spte); 1072 drop_spte(kvm, spte);
@@ -1085,14 +1085,14 @@ static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp,
1085 1085
1086 WARN_ON(pte_huge(*ptep)); 1086 WARN_ON(pte_huge(*ptep));
1087 new_pfn = pte_pfn(*ptep); 1087 new_pfn = pte_pfn(*ptep);
1088 spte = rmap_next(kvm, rmapp, NULL); 1088 spte = rmap_next(rmapp, NULL);
1089 while (spte) { 1089 while (spte) {
1090 BUG_ON(!is_shadow_present_pte(*spte)); 1090 BUG_ON(!is_shadow_present_pte(*spte));
1091 rmap_printk("kvm_set_pte_rmapp: spte %p %llx\n", spte, *spte); 1091 rmap_printk("kvm_set_pte_rmapp: spte %p %llx\n", spte, *spte);
1092 need_flush = 1; 1092 need_flush = 1;
1093 if (pte_write(*ptep)) { 1093 if (pte_write(*ptep)) {
1094 drop_spte(kvm, spte); 1094 drop_spte(kvm, spte);
1095 spte = rmap_next(kvm, rmapp, NULL); 1095 spte = rmap_next(rmapp, NULL);
1096 } else { 1096 } else {
1097 new_spte = *spte &~ (PT64_BASE_ADDR_MASK); 1097 new_spte = *spte &~ (PT64_BASE_ADDR_MASK);
1098 new_spte |= (u64)new_pfn << PAGE_SHIFT; 1098 new_spte |= (u64)new_pfn << PAGE_SHIFT;
@@ -1102,7 +1102,7 @@ static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp,
1102 new_spte &= ~shadow_accessed_mask; 1102 new_spte &= ~shadow_accessed_mask;
1103 mmu_spte_clear_track_bits(spte); 1103 mmu_spte_clear_track_bits(spte);
1104 mmu_spte_set(spte, new_spte); 1104 mmu_spte_set(spte, new_spte);
1105 spte = rmap_next(kvm, rmapp, spte); 1105 spte = rmap_next(rmapp, spte);
1106 } 1106 }
1107 } 1107 }
1108 if (need_flush) 1108 if (need_flush)
@@ -1176,7 +1176,7 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
1176 if (!shadow_accessed_mask) 1176 if (!shadow_accessed_mask)
1177 return kvm_unmap_rmapp(kvm, rmapp, data); 1177 return kvm_unmap_rmapp(kvm, rmapp, data);
1178 1178
1179 spte = rmap_next(kvm, rmapp, NULL); 1179 spte = rmap_next(rmapp, NULL);
1180 while (spte) { 1180 while (spte) {
1181 int _young; 1181 int _young;
1182 u64 _spte = *spte; 1182 u64 _spte = *spte;
@@ -1186,7 +1186,7 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
1186 young = 1; 1186 young = 1;
1187 clear_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte); 1187 clear_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte);
1188 } 1188 }
1189 spte = rmap_next(kvm, rmapp, spte); 1189 spte = rmap_next(rmapp, spte);
1190 } 1190 }
1191 return young; 1191 return young;
1192} 1192}
@@ -1205,7 +1205,7 @@ static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
1205 if (!shadow_accessed_mask) 1205 if (!shadow_accessed_mask)
1206 goto out; 1206 goto out;
1207 1207
1208 spte = rmap_next(kvm, rmapp, NULL); 1208 spte = rmap_next(rmapp, NULL);
1209 while (spte) { 1209 while (spte) {
1210 u64 _spte = *spte; 1210 u64 _spte = *spte;
1211 BUG_ON(!(_spte & PT_PRESENT_MASK)); 1211 BUG_ON(!(_spte & PT_PRESENT_MASK));
@@ -1214,7 +1214,7 @@ static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
1214 young = 1; 1214 young = 1;
1215 break; 1215 break;
1216 } 1216 }
1217 spte = rmap_next(kvm, rmapp, spte); 1217 spte = rmap_next(rmapp, spte);
1218 } 1218 }
1219out: 1219out:
1220 return young; 1220 return young;
diff --git a/arch/x86/kvm/mmu_audit.c b/arch/x86/kvm/mmu_audit.c
index fe15dcc07a6b..6eabae3d77ff 100644
--- a/arch/x86/kvm/mmu_audit.c
+++ b/arch/x86/kvm/mmu_audit.c
@@ -200,13 +200,13 @@ static void audit_write_protection(struct kvm *kvm, struct kvm_mmu_page *sp)
200 slot = gfn_to_memslot(kvm, sp->gfn); 200 slot = gfn_to_memslot(kvm, sp->gfn);
201 rmapp = &slot->rmap[sp->gfn - slot->base_gfn]; 201 rmapp = &slot->rmap[sp->gfn - slot->base_gfn];
202 202
203 spte = rmap_next(kvm, rmapp, NULL); 203 spte = rmap_next(rmapp, NULL);
204 while (spte) { 204 while (spte) {
205 if (is_writable_pte(*spte)) 205 if (is_writable_pte(*spte))
206 audit_printk(kvm, "shadow page has writable " 206 audit_printk(kvm, "shadow page has writable "
207 "mappings: gfn %llx role %x\n", 207 "mappings: gfn %llx role %x\n",
208 sp->gfn, sp->role.word); 208 sp->gfn, sp->role.word);
209 spte = rmap_next(kvm, rmapp, spte); 209 spte = rmap_next(rmapp, spte);
210 } 210 }
211} 211}
212 212