aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorXiao Guangrong <xiaoguangrong@cn.fujitsu.com>2010-06-11 09:34:04 -0400
committerAvi Kivity <avi@redhat.com>2010-08-01 03:46:44 -0400
commit7a8f1a74e4193d21e55b35928197486f2c047efb (patch)
tree53895b4e086b1acba775ad1f36cbd73b0c7e1901 /arch/x86
parentebdea638df04ae6293a9a5414d98ad843c69e82f (diff)
KVM: MMU: clear unsync_child_bitmap completely
In current code, some page's unsync_child_bitmap is not cleared completely in mmu_sync_children(), for example, if two PDPEs shard one PDT, one of PDPE's unsync_child_bitmap is not cleared. Currently, it not harm anything just little overload, but it's the prepare work for the later patch Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kvm/mmu.c53
1 files changed, 29 insertions, 24 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 41e801b53064..ab12be4eb105 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1149,33 +1149,38 @@ static int __mmu_unsync_walk(struct kvm_mmu_page *sp,
1149 int i, ret, nr_unsync_leaf = 0; 1149 int i, ret, nr_unsync_leaf = 0;
1150 1150
1151 for_each_unsync_children(sp->unsync_child_bitmap, i) { 1151 for_each_unsync_children(sp->unsync_child_bitmap, i) {
1152 struct kvm_mmu_page *child;
1152 u64 ent = sp->spt[i]; 1153 u64 ent = sp->spt[i];
1153 1154
1154 if (is_shadow_present_pte(ent) && !is_large_pte(ent)) { 1155 if (!is_shadow_present_pte(ent) || is_large_pte(ent))
1155 struct kvm_mmu_page *child; 1156 goto clear_child_bitmap;
1156 child = page_header(ent & PT64_BASE_ADDR_MASK); 1157
1157 1158 child = page_header(ent & PT64_BASE_ADDR_MASK);
1158 if (child->unsync_children) { 1159
1159 if (mmu_pages_add(pvec, child, i)) 1160 if (child->unsync_children) {
1160 return -ENOSPC; 1161 if (mmu_pages_add(pvec, child, i))
1161 1162 return -ENOSPC;
1162 ret = __mmu_unsync_walk(child, pvec); 1163
1163 if (!ret) { 1164 ret = __mmu_unsync_walk(child, pvec);
1164 __clear_bit(i, sp->unsync_child_bitmap); 1165 if (!ret)
1165 sp->unsync_children--; 1166 goto clear_child_bitmap;
1166 WARN_ON((int)sp->unsync_children < 0); 1167 else if (ret > 0)
1167 } else if (ret > 0) 1168 nr_unsync_leaf += ret;
1168 nr_unsync_leaf += ret; 1169 else
1169 else 1170 return ret;
1170 return ret; 1171 } else if (child->unsync) {
1171 } 1172 nr_unsync_leaf++;
1173 if (mmu_pages_add(pvec, child, i))
1174 return -ENOSPC;
1175 } else
1176 goto clear_child_bitmap;
1172 1177
1173 if (child->unsync) { 1178 continue;
1174 nr_unsync_leaf++; 1179
1175 if (mmu_pages_add(pvec, child, i)) 1180clear_child_bitmap:
1176 return -ENOSPC; 1181 __clear_bit(i, sp->unsync_child_bitmap);
1177 } 1182 sp->unsync_children--;
1178 } 1183 WARN_ON((int)sp->unsync_children < 0);
1179 } 1184 }
1180 1185
1181 1186