diff options
author | Shaohua Li <shaohua.li@intel.com> | 2007-07-23 02:51:39 -0400 |
---|---|---|
committer | Avi Kivity <avi@qumranet.com> | 2007-10-13 04:18:19 -0400 |
commit | fe5518819463d57ed032bc12458ed681bc790609 (patch) | |
tree | 15d13f175f2b77970eb7d84d442689426cafd283 /drivers/kvm | |
parent | 9ae0448f53324b3c476f68bd134d97ac4ec27e0c (diff) |
KVM: Move gfn_to_page out of kmap/unmap pairs
gfn_to_page might sleep with swap support. Move it out of the kmap calls.
Signed-off-by: Shaohua Li <shaohua.li@intel.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers/kvm')
-rw-r--r-- | drivers/kvm/kvm.h | 2 | ||||
-rw-r--r-- | drivers/kvm/kvm_main.c | 7 | ||||
-rw-r--r-- | drivers/kvm/mmu.c | 2 | ||||
-rw-r--r-- | drivers/kvm/paging_tmpl.h | 80 |
4 files changed, 52 insertions, 39 deletions
diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h index cec5f057f3bf..57504ae93dbc 100644 --- a/drivers/kvm/kvm.h +++ b/drivers/kvm/kvm.h | |||
@@ -599,7 +599,7 @@ int kvm_write_guest(struct kvm_vcpu *vcpu, | |||
599 | unsigned long segment_base(u16 selector); | 599 | unsigned long segment_base(u16 selector); |
600 | 600 | ||
601 | void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, | 601 | void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, |
602 | const u8 *old, const u8 *new, int bytes); | 602 | const u8 *new, int bytes); |
603 | int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva); | 603 | int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva); |
604 | void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu); | 604 | void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu); |
605 | int kvm_mmu_load(struct kvm_vcpu *vcpu); | 605 | int kvm_mmu_load(struct kvm_vcpu *vcpu); |
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c index 093cea36194b..80ee427754d2 100644 --- a/drivers/kvm/kvm_main.c +++ b/drivers/kvm/kvm_main.c | |||
@@ -1076,7 +1076,6 @@ static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, | |||
1076 | { | 1076 | { |
1077 | struct page *page; | 1077 | struct page *page; |
1078 | void *virt; | 1078 | void *virt; |
1079 | unsigned offset = offset_in_page(gpa); | ||
1080 | 1079 | ||
1081 | if (((gpa + bytes - 1) >> PAGE_SHIFT) != (gpa >> PAGE_SHIFT)) | 1080 | if (((gpa + bytes - 1) >> PAGE_SHIFT) != (gpa >> PAGE_SHIFT)) |
1082 | return 0; | 1081 | return 0; |
@@ -1085,7 +1084,7 @@ static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, | |||
1085 | return 0; | 1084 | return 0; |
1086 | mark_page_dirty(vcpu->kvm, gpa >> PAGE_SHIFT); | 1085 | mark_page_dirty(vcpu->kvm, gpa >> PAGE_SHIFT); |
1087 | virt = kmap_atomic(page, KM_USER0); | 1086 | virt = kmap_atomic(page, KM_USER0); |
1088 | kvm_mmu_pte_write(vcpu, gpa, virt + offset, val, bytes); | 1087 | kvm_mmu_pte_write(vcpu, gpa, val, bytes); |
1089 | memcpy(virt + offset_in_page(gpa), val, bytes); | 1088 | memcpy(virt + offset_in_page(gpa), val, bytes); |
1090 | kunmap_atomic(virt, KM_USER0); | 1089 | kunmap_atomic(virt, KM_USER0); |
1091 | return 1; | 1090 | return 1; |
@@ -1455,7 +1454,7 @@ static int vcpu_register_para(struct kvm_vcpu *vcpu, gpa_t para_state_gpa) | |||
1455 | 1454 | ||
1456 | mark_page_dirty(vcpu->kvm, para_state_gpa >> PAGE_SHIFT); | 1455 | mark_page_dirty(vcpu->kvm, para_state_gpa >> PAGE_SHIFT); |
1457 | para_state_page = pfn_to_page(para_state_hpa >> PAGE_SHIFT); | 1456 | para_state_page = pfn_to_page(para_state_hpa >> PAGE_SHIFT); |
1458 | para_state = kmap_atomic(para_state_page, KM_USER0); | 1457 | para_state = kmap(para_state_page); |
1459 | 1458 | ||
1460 | printk(KERN_DEBUG ".... guest version: %d\n", para_state->guest_version); | 1459 | printk(KERN_DEBUG ".... guest version: %d\n", para_state->guest_version); |
1461 | printk(KERN_DEBUG ".... size: %d\n", para_state->size); | 1460 | printk(KERN_DEBUG ".... size: %d\n", para_state->size); |
@@ -1491,7 +1490,7 @@ static int vcpu_register_para(struct kvm_vcpu *vcpu, gpa_t para_state_gpa) | |||
1491 | 1490 | ||
1492 | para_state->ret = 0; | 1491 | para_state->ret = 0; |
1493 | err_kunmap_skip: | 1492 | err_kunmap_skip: |
1494 | kunmap_atomic(para_state, KM_USER0); | 1493 | kunmap(para_state_page); |
1495 | return 0; | 1494 | return 0; |
1496 | err_gp: | 1495 | err_gp: |
1497 | return 1; | 1496 | return 1; |
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c index 75faef4fb086..5437de2aa2d8 100644 --- a/drivers/kvm/mmu.c +++ b/drivers/kvm/mmu.c | |||
@@ -1124,7 +1124,7 @@ static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu, | |||
1124 | } | 1124 | } |
1125 | 1125 | ||
1126 | void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, | 1126 | void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, |
1127 | const u8 *old, const u8 *new, int bytes) | 1127 | const u8 *new, int bytes) |
1128 | { | 1128 | { |
1129 | gfn_t gfn = gpa >> PAGE_SHIFT; | 1129 | gfn_t gfn = gpa >> PAGE_SHIFT; |
1130 | struct kvm_mmu_page *page; | 1130 | struct kvm_mmu_page *page; |
diff --git a/drivers/kvm/paging_tmpl.h b/drivers/kvm/paging_tmpl.h index 01901ec3fe80..660243b39d84 100644 --- a/drivers/kvm/paging_tmpl.h +++ b/drivers/kvm/paging_tmpl.h | |||
@@ -58,7 +58,10 @@ struct guest_walker { | |||
58 | int level; | 58 | int level; |
59 | gfn_t table_gfn[PT_MAX_FULL_LEVELS]; | 59 | gfn_t table_gfn[PT_MAX_FULL_LEVELS]; |
60 | pt_element_t *table; | 60 | pt_element_t *table; |
61 | pt_element_t pte; | ||
61 | pt_element_t *ptep; | 62 | pt_element_t *ptep; |
63 | struct page *page; | ||
64 | int index; | ||
62 | pt_element_t inherited_ar; | 65 | pt_element_t inherited_ar; |
63 | gfn_t gfn; | 66 | gfn_t gfn; |
64 | u32 error_code; | 67 | u32 error_code; |
@@ -80,11 +83,14 @@ static int FNAME(walk_addr)(struct guest_walker *walker, | |||
80 | pgprintk("%s: addr %lx\n", __FUNCTION__, addr); | 83 | pgprintk("%s: addr %lx\n", __FUNCTION__, addr); |
81 | walker->level = vcpu->mmu.root_level; | 84 | walker->level = vcpu->mmu.root_level; |
82 | walker->table = NULL; | 85 | walker->table = NULL; |
86 | walker->page = NULL; | ||
87 | walker->ptep = NULL; | ||
83 | root = vcpu->cr3; | 88 | root = vcpu->cr3; |
84 | #if PTTYPE == 64 | 89 | #if PTTYPE == 64 |
85 | if (!is_long_mode(vcpu)) { | 90 | if (!is_long_mode(vcpu)) { |
86 | walker->ptep = &vcpu->pdptrs[(addr >> 30) & 3]; | 91 | walker->ptep = &vcpu->pdptrs[(addr >> 30) & 3]; |
87 | root = *walker->ptep; | 92 | root = *walker->ptep; |
93 | walker->pte = root; | ||
88 | if (!(root & PT_PRESENT_MASK)) | 94 | if (!(root & PT_PRESENT_MASK)) |
89 | goto not_present; | 95 | goto not_present; |
90 | --walker->level; | 96 | --walker->level; |
@@ -96,7 +102,8 @@ static int FNAME(walk_addr)(struct guest_walker *walker, | |||
96 | walker->level - 1, table_gfn); | 102 | walker->level - 1, table_gfn); |
97 | slot = gfn_to_memslot(vcpu->kvm, table_gfn); | 103 | slot = gfn_to_memslot(vcpu->kvm, table_gfn); |
98 | hpa = safe_gpa_to_hpa(vcpu, root & PT64_BASE_ADDR_MASK); | 104 | hpa = safe_gpa_to_hpa(vcpu, root & PT64_BASE_ADDR_MASK); |
99 | walker->table = kmap_atomic(pfn_to_page(hpa >> PAGE_SHIFT), KM_USER0); | 105 | walker->page = pfn_to_page(hpa >> PAGE_SHIFT); |
106 | walker->table = kmap_atomic(walker->page, KM_USER0); | ||
100 | 107 | ||
101 | ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) || | 108 | ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) || |
102 | (vcpu->cr3 & CR3_NONPAE_RESERVED_BITS) == 0); | 109 | (vcpu->cr3 & CR3_NONPAE_RESERVED_BITS) == 0); |
@@ -108,6 +115,7 @@ static int FNAME(walk_addr)(struct guest_walker *walker, | |||
108 | hpa_t paddr; | 115 | hpa_t paddr; |
109 | 116 | ||
110 | ptep = &walker->table[index]; | 117 | ptep = &walker->table[index]; |
118 | walker->index = index; | ||
111 | ASSERT(((unsigned long)walker->table & PAGE_MASK) == | 119 | ASSERT(((unsigned long)walker->table & PAGE_MASK) == |
112 | ((unsigned long)ptep & PAGE_MASK)); | 120 | ((unsigned long)ptep & PAGE_MASK)); |
113 | 121 | ||
@@ -148,16 +156,20 @@ static int FNAME(walk_addr)(struct guest_walker *walker, | |||
148 | 156 | ||
149 | walker->inherited_ar &= walker->table[index]; | 157 | walker->inherited_ar &= walker->table[index]; |
150 | table_gfn = (*ptep & PT_BASE_ADDR_MASK) >> PAGE_SHIFT; | 158 | table_gfn = (*ptep & PT_BASE_ADDR_MASK) >> PAGE_SHIFT; |
151 | paddr = safe_gpa_to_hpa(vcpu, *ptep & PT_BASE_ADDR_MASK); | ||
152 | kunmap_atomic(walker->table, KM_USER0); | 159 | kunmap_atomic(walker->table, KM_USER0); |
153 | walker->table = kmap_atomic(pfn_to_page(paddr >> PAGE_SHIFT), | 160 | paddr = safe_gpa_to_hpa(vcpu, table_gfn << PAGE_SHIFT); |
154 | KM_USER0); | 161 | walker->page = pfn_to_page(paddr >> PAGE_SHIFT); |
162 | walker->table = kmap_atomic(walker->page, KM_USER0); | ||
155 | --walker->level; | 163 | --walker->level; |
156 | walker->table_gfn[walker->level - 1 ] = table_gfn; | 164 | walker->table_gfn[walker->level - 1 ] = table_gfn; |
157 | pgprintk("%s: table_gfn[%d] %lx\n", __FUNCTION__, | 165 | pgprintk("%s: table_gfn[%d] %lx\n", __FUNCTION__, |
158 | walker->level - 1, table_gfn); | 166 | walker->level - 1, table_gfn); |
159 | } | 167 | } |
160 | walker->ptep = ptep; | 168 | walker->pte = *ptep; |
169 | if (walker->page) | ||
170 | walker->ptep = NULL; | ||
171 | if (walker->table) | ||
172 | kunmap_atomic(walker->table, KM_USER0); | ||
161 | pgprintk("%s: pte %llx\n", __FUNCTION__, (u64)*ptep); | 173 | pgprintk("%s: pte %llx\n", __FUNCTION__, (u64)*ptep); |
162 | return 1; | 174 | return 1; |
163 | 175 | ||
@@ -175,13 +187,9 @@ err: | |||
175 | walker->error_code |= PFERR_USER_MASK; | 187 | walker->error_code |= PFERR_USER_MASK; |
176 | if (fetch_fault) | 188 | if (fetch_fault) |
177 | walker->error_code |= PFERR_FETCH_MASK; | 189 | walker->error_code |= PFERR_FETCH_MASK; |
178 | return 0; | ||
179 | } | ||
180 | |||
181 | static void FNAME(release_walker)(struct guest_walker *walker) | ||
182 | { | ||
183 | if (walker->table) | 190 | if (walker->table) |
184 | kunmap_atomic(walker->table, KM_USER0); | 191 | kunmap_atomic(walker->table, KM_USER0); |
192 | return 0; | ||
185 | } | 193 | } |
186 | 194 | ||
187 | static void FNAME(mark_pagetable_dirty)(struct kvm *kvm, | 195 | static void FNAME(mark_pagetable_dirty)(struct kvm *kvm, |
@@ -193,7 +201,7 @@ static void FNAME(mark_pagetable_dirty)(struct kvm *kvm, | |||
193 | static void FNAME(set_pte_common)(struct kvm_vcpu *vcpu, | 201 | static void FNAME(set_pte_common)(struct kvm_vcpu *vcpu, |
194 | u64 *shadow_pte, | 202 | u64 *shadow_pte, |
195 | gpa_t gaddr, | 203 | gpa_t gaddr, |
196 | pt_element_t *gpte, | 204 | pt_element_t gpte, |
197 | u64 access_bits, | 205 | u64 access_bits, |
198 | int user_fault, | 206 | int user_fault, |
199 | int write_fault, | 207 | int write_fault, |
@@ -202,23 +210,34 @@ static void FNAME(set_pte_common)(struct kvm_vcpu *vcpu, | |||
202 | gfn_t gfn) | 210 | gfn_t gfn) |
203 | { | 211 | { |
204 | hpa_t paddr; | 212 | hpa_t paddr; |
205 | int dirty = *gpte & PT_DIRTY_MASK; | 213 | int dirty = gpte & PT_DIRTY_MASK; |
206 | u64 spte = *shadow_pte; | 214 | u64 spte = *shadow_pte; |
207 | int was_rmapped = is_rmap_pte(spte); | 215 | int was_rmapped = is_rmap_pte(spte); |
208 | 216 | ||
209 | pgprintk("%s: spte %llx gpte %llx access %llx write_fault %d" | 217 | pgprintk("%s: spte %llx gpte %llx access %llx write_fault %d" |
210 | " user_fault %d gfn %lx\n", | 218 | " user_fault %d gfn %lx\n", |
211 | __FUNCTION__, spte, (u64)*gpte, access_bits, | 219 | __FUNCTION__, spte, (u64)gpte, access_bits, |
212 | write_fault, user_fault, gfn); | 220 | write_fault, user_fault, gfn); |
213 | 221 | ||
214 | if (write_fault && !dirty) { | 222 | if (write_fault && !dirty) { |
215 | *gpte |= PT_DIRTY_MASK; | 223 | pt_element_t *guest_ent, *tmp = NULL; |
224 | |||
225 | if (walker->ptep) | ||
226 | guest_ent = walker->ptep; | ||
227 | else { | ||
228 | tmp = kmap_atomic(walker->page, KM_USER0); | ||
229 | guest_ent = &tmp[walker->index]; | ||
230 | } | ||
231 | |||
232 | *guest_ent |= PT_DIRTY_MASK; | ||
233 | if (!walker->ptep) | ||
234 | kunmap_atomic(tmp, KM_USER0); | ||
216 | dirty = 1; | 235 | dirty = 1; |
217 | FNAME(mark_pagetable_dirty)(vcpu->kvm, walker); | 236 | FNAME(mark_pagetable_dirty)(vcpu->kvm, walker); |
218 | } | 237 | } |
219 | 238 | ||
220 | spte |= PT_PRESENT_MASK | PT_ACCESSED_MASK | PT_DIRTY_MASK; | 239 | spte |= PT_PRESENT_MASK | PT_ACCESSED_MASK | PT_DIRTY_MASK; |
221 | spte |= *gpte & PT64_NX_MASK; | 240 | spte |= gpte & PT64_NX_MASK; |
222 | if (!dirty) | 241 | if (!dirty) |
223 | access_bits &= ~PT_WRITABLE_MASK; | 242 | access_bits &= ~PT_WRITABLE_MASK; |
224 | 243 | ||
@@ -273,13 +292,13 @@ unshadowed: | |||
273 | rmap_add(vcpu, shadow_pte); | 292 | rmap_add(vcpu, shadow_pte); |
274 | } | 293 | } |
275 | 294 | ||
276 | static void FNAME(set_pte)(struct kvm_vcpu *vcpu, pt_element_t *gpte, | 295 | static void FNAME(set_pte)(struct kvm_vcpu *vcpu, pt_element_t gpte, |
277 | u64 *shadow_pte, u64 access_bits, | 296 | u64 *shadow_pte, u64 access_bits, |
278 | int user_fault, int write_fault, int *ptwrite, | 297 | int user_fault, int write_fault, int *ptwrite, |
279 | struct guest_walker *walker, gfn_t gfn) | 298 | struct guest_walker *walker, gfn_t gfn) |
280 | { | 299 | { |
281 | access_bits &= *gpte; | 300 | access_bits &= gpte; |
282 | FNAME(set_pte_common)(vcpu, shadow_pte, *gpte & PT_BASE_ADDR_MASK, | 301 | FNAME(set_pte_common)(vcpu, shadow_pte, gpte & PT_BASE_ADDR_MASK, |
283 | gpte, access_bits, user_fault, write_fault, | 302 | gpte, access_bits, user_fault, write_fault, |
284 | ptwrite, walker, gfn); | 303 | ptwrite, walker, gfn); |
285 | } | 304 | } |
@@ -295,22 +314,22 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page, | |||
295 | if (~gpte & (PT_PRESENT_MASK | PT_ACCESSED_MASK)) | 314 | if (~gpte & (PT_PRESENT_MASK | PT_ACCESSED_MASK)) |
296 | return; | 315 | return; |
297 | pgprintk("%s: gpte %llx spte %p\n", __FUNCTION__, (u64)gpte, spte); | 316 | pgprintk("%s: gpte %llx spte %p\n", __FUNCTION__, (u64)gpte, spte); |
298 | FNAME(set_pte)(vcpu, &gpte, spte, PT_USER_MASK | PT_WRITABLE_MASK, 0, | 317 | FNAME(set_pte)(vcpu, gpte, spte, PT_USER_MASK | PT_WRITABLE_MASK, 0, |
299 | 0, NULL, NULL, | 318 | 0, NULL, NULL, |
300 | (gpte & PT_BASE_ADDR_MASK) >> PAGE_SHIFT); | 319 | (gpte & PT_BASE_ADDR_MASK) >> PAGE_SHIFT); |
301 | } | 320 | } |
302 | 321 | ||
303 | static void FNAME(set_pde)(struct kvm_vcpu *vcpu, pt_element_t *gpde, | 322 | static void FNAME(set_pde)(struct kvm_vcpu *vcpu, pt_element_t gpde, |
304 | u64 *shadow_pte, u64 access_bits, | 323 | u64 *shadow_pte, u64 access_bits, |
305 | int user_fault, int write_fault, int *ptwrite, | 324 | int user_fault, int write_fault, int *ptwrite, |
306 | struct guest_walker *walker, gfn_t gfn) | 325 | struct guest_walker *walker, gfn_t gfn) |
307 | { | 326 | { |
308 | gpa_t gaddr; | 327 | gpa_t gaddr; |
309 | 328 | ||
310 | access_bits &= *gpde; | 329 | access_bits &= gpde; |
311 | gaddr = (gpa_t)gfn << PAGE_SHIFT; | 330 | gaddr = (gpa_t)gfn << PAGE_SHIFT; |
312 | if (PTTYPE == 32 && is_cpuid_PSE36()) | 331 | if (PTTYPE == 32 && is_cpuid_PSE36()) |
313 | gaddr |= (*gpde & PT32_DIR_PSE36_MASK) << | 332 | gaddr |= (gpde & PT32_DIR_PSE36_MASK) << |
314 | (32 - PT32_DIR_PSE36_SHIFT); | 333 | (32 - PT32_DIR_PSE36_SHIFT); |
315 | FNAME(set_pte_common)(vcpu, shadow_pte, gaddr, | 334 | FNAME(set_pte_common)(vcpu, shadow_pte, gaddr, |
316 | gpde, access_bits, user_fault, write_fault, | 335 | gpde, access_bits, user_fault, write_fault, |
@@ -328,9 +347,8 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, | |||
328 | int level; | 347 | int level; |
329 | u64 *shadow_ent; | 348 | u64 *shadow_ent; |
330 | u64 *prev_shadow_ent = NULL; | 349 | u64 *prev_shadow_ent = NULL; |
331 | pt_element_t *guest_ent = walker->ptep; | ||
332 | 350 | ||
333 | if (!is_present_pte(*guest_ent)) | 351 | if (!is_present_pte(walker->pte)) |
334 | return NULL; | 352 | return NULL; |
335 | 353 | ||
336 | shadow_addr = vcpu->mmu.root_hpa; | 354 | shadow_addr = vcpu->mmu.root_hpa; |
@@ -364,12 +382,12 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, | |||
364 | if (level - 1 == PT_PAGE_TABLE_LEVEL | 382 | if (level - 1 == PT_PAGE_TABLE_LEVEL |
365 | && walker->level == PT_DIRECTORY_LEVEL) { | 383 | && walker->level == PT_DIRECTORY_LEVEL) { |
366 | metaphysical = 1; | 384 | metaphysical = 1; |
367 | hugepage_access = *guest_ent; | 385 | hugepage_access = walker->pte; |
368 | hugepage_access &= PT_USER_MASK | PT_WRITABLE_MASK; | 386 | hugepage_access &= PT_USER_MASK | PT_WRITABLE_MASK; |
369 | if (*guest_ent & PT64_NX_MASK) | 387 | if (walker->pte & PT64_NX_MASK) |
370 | hugepage_access |= (1 << 2); | 388 | hugepage_access |= (1 << 2); |
371 | hugepage_access >>= PT_WRITABLE_SHIFT; | 389 | hugepage_access >>= PT_WRITABLE_SHIFT; |
372 | table_gfn = (*guest_ent & PT_BASE_ADDR_MASK) | 390 | table_gfn = (walker->pte & PT_BASE_ADDR_MASK) |
373 | >> PAGE_SHIFT; | 391 | >> PAGE_SHIFT; |
374 | } else { | 392 | } else { |
375 | metaphysical = 0; | 393 | metaphysical = 0; |
@@ -386,12 +404,12 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, | |||
386 | } | 404 | } |
387 | 405 | ||
388 | if (walker->level == PT_DIRECTORY_LEVEL) { | 406 | if (walker->level == PT_DIRECTORY_LEVEL) { |
389 | FNAME(set_pde)(vcpu, guest_ent, shadow_ent, | 407 | FNAME(set_pde)(vcpu, walker->pte, shadow_ent, |
390 | walker->inherited_ar, user_fault, write_fault, | 408 | walker->inherited_ar, user_fault, write_fault, |
391 | ptwrite, walker, walker->gfn); | 409 | ptwrite, walker, walker->gfn); |
392 | } else { | 410 | } else { |
393 | ASSERT(walker->level == PT_PAGE_TABLE_LEVEL); | 411 | ASSERT(walker->level == PT_PAGE_TABLE_LEVEL); |
394 | FNAME(set_pte)(vcpu, guest_ent, shadow_ent, | 412 | FNAME(set_pte)(vcpu, walker->pte, shadow_ent, |
395 | walker->inherited_ar, user_fault, write_fault, | 413 | walker->inherited_ar, user_fault, write_fault, |
396 | ptwrite, walker, walker->gfn); | 414 | ptwrite, walker, walker->gfn); |
397 | } | 415 | } |
@@ -442,7 +460,6 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, | |||
442 | if (!r) { | 460 | if (!r) { |
443 | pgprintk("%s: guest page fault\n", __FUNCTION__); | 461 | pgprintk("%s: guest page fault\n", __FUNCTION__); |
444 | inject_page_fault(vcpu, addr, walker.error_code); | 462 | inject_page_fault(vcpu, addr, walker.error_code); |
445 | FNAME(release_walker)(&walker); | ||
446 | vcpu->last_pt_write_count = 0; /* reset fork detector */ | 463 | vcpu->last_pt_write_count = 0; /* reset fork detector */ |
447 | return 0; | 464 | return 0; |
448 | } | 465 | } |
@@ -452,8 +469,6 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, | |||
452 | pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __FUNCTION__, | 469 | pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __FUNCTION__, |
453 | shadow_pte, *shadow_pte, write_pt); | 470 | shadow_pte, *shadow_pte, write_pt); |
454 | 471 | ||
455 | FNAME(release_walker)(&walker); | ||
456 | |||
457 | if (!write_pt) | 472 | if (!write_pt) |
458 | vcpu->last_pt_write_count = 0; /* reset fork detector */ | 473 | vcpu->last_pt_write_count = 0; /* reset fork detector */ |
459 | 474 | ||
@@ -482,7 +497,6 @@ static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr) | |||
482 | gpa |= vaddr & ~PAGE_MASK; | 497 | gpa |= vaddr & ~PAGE_MASK; |
483 | } | 498 | } |
484 | 499 | ||
485 | FNAME(release_walker)(&walker); | ||
486 | return gpa; | 500 | return gpa; |
487 | } | 501 | } |
488 | 502 | ||