diff options
Diffstat (limited to 'arch/x86/kvm/paging_tmpl.h')
-rw-r--r-- | arch/x86/kvm/paging_tmpl.h | 141 |
1 files changed, 74 insertions, 67 deletions
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 67785f635399..d2fec9c12d22 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h | |||
@@ -27,7 +27,8 @@ | |||
27 | #define guest_walker guest_walker64 | 27 | #define guest_walker guest_walker64 |
28 | #define FNAME(name) paging##64_##name | 28 | #define FNAME(name) paging##64_##name |
29 | #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK | 29 | #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK |
30 | #define PT_DIR_BASE_ADDR_MASK PT64_DIR_BASE_ADDR_MASK | 30 | #define PT_LVL_ADDR_MASK(lvl) PT64_LVL_ADDR_MASK(lvl) |
31 | #define PT_LVL_OFFSET_MASK(lvl) PT64_LVL_OFFSET_MASK(lvl) | ||
31 | #define PT_INDEX(addr, level) PT64_INDEX(addr, level) | 32 | #define PT_INDEX(addr, level) PT64_INDEX(addr, level) |
32 | #define PT_LEVEL_MASK(level) PT64_LEVEL_MASK(level) | 33 | #define PT_LEVEL_MASK(level) PT64_LEVEL_MASK(level) |
33 | #define PT_LEVEL_BITS PT64_LEVEL_BITS | 34 | #define PT_LEVEL_BITS PT64_LEVEL_BITS |
@@ -43,7 +44,8 @@ | |||
43 | #define guest_walker guest_walker32 | 44 | #define guest_walker guest_walker32 |
44 | #define FNAME(name) paging##32_##name | 45 | #define FNAME(name) paging##32_##name |
45 | #define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK | 46 | #define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK |
46 | #define PT_DIR_BASE_ADDR_MASK PT32_DIR_BASE_ADDR_MASK | 47 | #define PT_LVL_ADDR_MASK(lvl) PT32_LVL_ADDR_MASK(lvl) |
48 | #define PT_LVL_OFFSET_MASK(lvl) PT32_LVL_OFFSET_MASK(lvl) | ||
47 | #define PT_INDEX(addr, level) PT32_INDEX(addr, level) | 49 | #define PT_INDEX(addr, level) PT32_INDEX(addr, level) |
48 | #define PT_LEVEL_MASK(level) PT32_LEVEL_MASK(level) | 50 | #define PT_LEVEL_MASK(level) PT32_LEVEL_MASK(level) |
49 | #define PT_LEVEL_BITS PT32_LEVEL_BITS | 51 | #define PT_LEVEL_BITS PT32_LEVEL_BITS |
@@ -53,8 +55,8 @@ | |||
53 | #error Invalid PTTYPE value | 55 | #error Invalid PTTYPE value |
54 | #endif | 56 | #endif |
55 | 57 | ||
56 | #define gpte_to_gfn FNAME(gpte_to_gfn) | 58 | #define gpte_to_gfn_lvl FNAME(gpte_to_gfn_lvl) |
57 | #define gpte_to_gfn_pde FNAME(gpte_to_gfn_pde) | 59 | #define gpte_to_gfn(pte) gpte_to_gfn_lvl((pte), PT_PAGE_TABLE_LEVEL) |
58 | 60 | ||
59 | /* | 61 | /* |
60 | * The guest_walker structure emulates the behavior of the hardware page | 62 | * The guest_walker structure emulates the behavior of the hardware page |
@@ -71,14 +73,9 @@ struct guest_walker { | |||
71 | u32 error_code; | 73 | u32 error_code; |
72 | }; | 74 | }; |
73 | 75 | ||
74 | static gfn_t gpte_to_gfn(pt_element_t gpte) | 76 | static gfn_t gpte_to_gfn_lvl(pt_element_t gpte, int lvl) |
75 | { | 77 | { |
76 | return (gpte & PT_BASE_ADDR_MASK) >> PAGE_SHIFT; | 78 | return (gpte & PT_LVL_ADDR_MASK(lvl)) >> PAGE_SHIFT; |
77 | } | ||
78 | |||
79 | static gfn_t gpte_to_gfn_pde(pt_element_t gpte) | ||
80 | { | ||
81 | return (gpte & PT_DIR_BASE_ADDR_MASK) >> PAGE_SHIFT; | ||
82 | } | 79 | } |
83 | 80 | ||
84 | static bool FNAME(cmpxchg_gpte)(struct kvm *kvm, | 81 | static bool FNAME(cmpxchg_gpte)(struct kvm *kvm, |
@@ -125,14 +122,16 @@ static int FNAME(walk_addr)(struct guest_walker *walker, | |||
125 | gpa_t pte_gpa; | 122 | gpa_t pte_gpa; |
126 | int rsvd_fault = 0; | 123 | int rsvd_fault = 0; |
127 | 124 | ||
128 | pgprintk("%s: addr %lx\n", __func__, addr); | 125 | trace_kvm_mmu_pagetable_walk(addr, write_fault, user_fault, |
126 | fetch_fault); | ||
129 | walk: | 127 | walk: |
130 | walker->level = vcpu->arch.mmu.root_level; | 128 | walker->level = vcpu->arch.mmu.root_level; |
131 | pte = vcpu->arch.cr3; | 129 | pte = vcpu->arch.cr3; |
132 | #if PTTYPE == 64 | 130 | #if PTTYPE == 64 |
133 | if (!is_long_mode(vcpu)) { | 131 | if (!is_long_mode(vcpu)) { |
134 | pte = vcpu->arch.pdptrs[(addr >> 30) & 3]; | 132 | pte = kvm_pdptr_read(vcpu, (addr >> 30) & 3); |
135 | if (!is_present_pte(pte)) | 133 | trace_kvm_mmu_paging_element(pte, walker->level); |
134 | if (!is_present_gpte(pte)) | ||
136 | goto not_present; | 135 | goto not_present; |
137 | --walker->level; | 136 | --walker->level; |
138 | } | 137 | } |
@@ -150,12 +149,11 @@ walk: | |||
150 | pte_gpa += index * sizeof(pt_element_t); | 149 | pte_gpa += index * sizeof(pt_element_t); |
151 | walker->table_gfn[walker->level - 1] = table_gfn; | 150 | walker->table_gfn[walker->level - 1] = table_gfn; |
152 | walker->pte_gpa[walker->level - 1] = pte_gpa; | 151 | walker->pte_gpa[walker->level - 1] = pte_gpa; |
153 | pgprintk("%s: table_gfn[%d] %lx\n", __func__, | ||
154 | walker->level - 1, table_gfn); | ||
155 | 152 | ||
156 | kvm_read_guest(vcpu->kvm, pte_gpa, &pte, sizeof(pte)); | 153 | kvm_read_guest(vcpu->kvm, pte_gpa, &pte, sizeof(pte)); |
154 | trace_kvm_mmu_paging_element(pte, walker->level); | ||
157 | 155 | ||
158 | if (!is_present_pte(pte)) | 156 | if (!is_present_gpte(pte)) |
159 | goto not_present; | 157 | goto not_present; |
160 | 158 | ||
161 | rsvd_fault = is_rsvd_bits_set(vcpu, pte, walker->level); | 159 | rsvd_fault = is_rsvd_bits_set(vcpu, pte, walker->level); |
@@ -175,6 +173,8 @@ walk: | |||
175 | #endif | 173 | #endif |
176 | 174 | ||
177 | if (!(pte & PT_ACCESSED_MASK)) { | 175 | if (!(pte & PT_ACCESSED_MASK)) { |
176 | trace_kvm_mmu_set_accessed_bit(table_gfn, index, | ||
177 | sizeof(pte)); | ||
178 | mark_page_dirty(vcpu->kvm, table_gfn); | 178 | mark_page_dirty(vcpu->kvm, table_gfn); |
179 | if (FNAME(cmpxchg_gpte)(vcpu->kvm, table_gfn, | 179 | if (FNAME(cmpxchg_gpte)(vcpu->kvm, table_gfn, |
180 | index, pte, pte|PT_ACCESSED_MASK)) | 180 | index, pte, pte|PT_ACCESSED_MASK)) |
@@ -186,18 +186,24 @@ walk: | |||
186 | 186 | ||
187 | walker->ptes[walker->level - 1] = pte; | 187 | walker->ptes[walker->level - 1] = pte; |
188 | 188 | ||
189 | if (walker->level == PT_PAGE_TABLE_LEVEL) { | 189 | if ((walker->level == PT_PAGE_TABLE_LEVEL) || |
190 | walker->gfn = gpte_to_gfn(pte); | 190 | ((walker->level == PT_DIRECTORY_LEVEL) && |
191 | break; | 191 | (pte & PT_PAGE_SIZE_MASK) && |
192 | } | 192 | (PTTYPE == 64 || is_pse(vcpu))) || |
193 | 193 | ((walker->level == PT_PDPE_LEVEL) && | |
194 | if (walker->level == PT_DIRECTORY_LEVEL | 194 | (pte & PT_PAGE_SIZE_MASK) && |
195 | && (pte & PT_PAGE_SIZE_MASK) | 195 | is_long_mode(vcpu))) { |
196 | && (PTTYPE == 64 || is_pse(vcpu))) { | 196 | int lvl = walker->level; |
197 | walker->gfn = gpte_to_gfn_pde(pte); | 197 | |
198 | walker->gfn += PT_INDEX(addr, PT_PAGE_TABLE_LEVEL); | 198 | walker->gfn = gpte_to_gfn_lvl(pte, lvl); |
199 | if (PTTYPE == 32 && is_cpuid_PSE36()) | 199 | walker->gfn += (addr & PT_LVL_OFFSET_MASK(lvl)) |
200 | >> PAGE_SHIFT; | ||
201 | |||
202 | if (PTTYPE == 32 && | ||
203 | walker->level == PT_DIRECTORY_LEVEL && | ||
204 | is_cpuid_PSE36()) | ||
200 | walker->gfn += pse36_gfn_delta(pte); | 205 | walker->gfn += pse36_gfn_delta(pte); |
206 | |||
201 | break; | 207 | break; |
202 | } | 208 | } |
203 | 209 | ||
@@ -205,9 +211,10 @@ walk: | |||
205 | --walker->level; | 211 | --walker->level; |
206 | } | 212 | } |
207 | 213 | ||
208 | if (write_fault && !is_dirty_pte(pte)) { | 214 | if (write_fault && !is_dirty_gpte(pte)) { |
209 | bool ret; | 215 | bool ret; |
210 | 216 | ||
217 | trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte)); | ||
211 | mark_page_dirty(vcpu->kvm, table_gfn); | 218 | mark_page_dirty(vcpu->kvm, table_gfn); |
212 | ret = FNAME(cmpxchg_gpte)(vcpu->kvm, table_gfn, index, pte, | 219 | ret = FNAME(cmpxchg_gpte)(vcpu->kvm, table_gfn, index, pte, |
213 | pte|PT_DIRTY_MASK); | 220 | pte|PT_DIRTY_MASK); |
@@ -239,6 +246,7 @@ err: | |||
239 | walker->error_code |= PFERR_FETCH_MASK; | 246 | walker->error_code |= PFERR_FETCH_MASK; |
240 | if (rsvd_fault) | 247 | if (rsvd_fault) |
241 | walker->error_code |= PFERR_RSVD_MASK; | 248 | walker->error_code |= PFERR_RSVD_MASK; |
249 | trace_kvm_mmu_walker_error(walker->error_code); | ||
242 | return 0; | 250 | return 0; |
243 | } | 251 | } |
244 | 252 | ||
@@ -248,12 +256,11 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page, | |||
248 | pt_element_t gpte; | 256 | pt_element_t gpte; |
249 | unsigned pte_access; | 257 | unsigned pte_access; |
250 | pfn_t pfn; | 258 | pfn_t pfn; |
251 | int largepage = vcpu->arch.update_pte.largepage; | ||
252 | 259 | ||
253 | gpte = *(const pt_element_t *)pte; | 260 | gpte = *(const pt_element_t *)pte; |
254 | if (~gpte & (PT_PRESENT_MASK | PT_ACCESSED_MASK)) { | 261 | if (~gpte & (PT_PRESENT_MASK | PT_ACCESSED_MASK)) { |
255 | if (!is_present_pte(gpte)) | 262 | if (!is_present_gpte(gpte)) |
256 | set_shadow_pte(spte, shadow_notrap_nonpresent_pte); | 263 | __set_spte(spte, shadow_notrap_nonpresent_pte); |
257 | return; | 264 | return; |
258 | } | 265 | } |
259 | pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte); | 266 | pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte); |
@@ -267,7 +274,7 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page, | |||
267 | return; | 274 | return; |
268 | kvm_get_pfn(pfn); | 275 | kvm_get_pfn(pfn); |
269 | mmu_set_spte(vcpu, spte, page->role.access, pte_access, 0, 0, | 276 | mmu_set_spte(vcpu, spte, page->role.access, pte_access, 0, 0, |
270 | gpte & PT_DIRTY_MASK, NULL, largepage, | 277 | gpte & PT_DIRTY_MASK, NULL, PT_PAGE_TABLE_LEVEL, |
271 | gpte_to_gfn(gpte), pfn, true); | 278 | gpte_to_gfn(gpte), pfn, true); |
272 | } | 279 | } |
273 | 280 | ||
@@ -276,7 +283,7 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page, | |||
276 | */ | 283 | */ |
277 | static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, | 284 | static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, |
278 | struct guest_walker *gw, | 285 | struct guest_walker *gw, |
279 | int user_fault, int write_fault, int largepage, | 286 | int user_fault, int write_fault, int hlevel, |
280 | int *ptwrite, pfn_t pfn) | 287 | int *ptwrite, pfn_t pfn) |
281 | { | 288 | { |
282 | unsigned access = gw->pt_access; | 289 | unsigned access = gw->pt_access; |
@@ -289,19 +296,18 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, | |||
289 | pt_element_t curr_pte; | 296 | pt_element_t curr_pte; |
290 | struct kvm_shadow_walk_iterator iterator; | 297 | struct kvm_shadow_walk_iterator iterator; |
291 | 298 | ||
292 | if (!is_present_pte(gw->ptes[gw->level - 1])) | 299 | if (!is_present_gpte(gw->ptes[gw->level - 1])) |
293 | return NULL; | 300 | return NULL; |
294 | 301 | ||
295 | for_each_shadow_entry(vcpu, addr, iterator) { | 302 | for_each_shadow_entry(vcpu, addr, iterator) { |
296 | level = iterator.level; | 303 | level = iterator.level; |
297 | sptep = iterator.sptep; | 304 | sptep = iterator.sptep; |
298 | if (level == PT_PAGE_TABLE_LEVEL | 305 | if (iterator.level == hlevel) { |
299 | || (largepage && level == PT_DIRECTORY_LEVEL)) { | ||
300 | mmu_set_spte(vcpu, sptep, access, | 306 | mmu_set_spte(vcpu, sptep, access, |
301 | gw->pte_access & access, | 307 | gw->pte_access & access, |
302 | user_fault, write_fault, | 308 | user_fault, write_fault, |
303 | gw->ptes[gw->level-1] & PT_DIRTY_MASK, | 309 | gw->ptes[gw->level-1] & PT_DIRTY_MASK, |
304 | ptwrite, largepage, | 310 | ptwrite, level, |
305 | gw->gfn, pfn, false); | 311 | gw->gfn, pfn, false); |
306 | break; | 312 | break; |
307 | } | 313 | } |
@@ -311,16 +317,19 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, | |||
311 | 317 | ||
312 | if (is_large_pte(*sptep)) { | 318 | if (is_large_pte(*sptep)) { |
313 | rmap_remove(vcpu->kvm, sptep); | 319 | rmap_remove(vcpu->kvm, sptep); |
314 | set_shadow_pte(sptep, shadow_trap_nonpresent_pte); | 320 | __set_spte(sptep, shadow_trap_nonpresent_pte); |
315 | kvm_flush_remote_tlbs(vcpu->kvm); | 321 | kvm_flush_remote_tlbs(vcpu->kvm); |
316 | } | 322 | } |
317 | 323 | ||
318 | if (level == PT_DIRECTORY_LEVEL | 324 | if (level <= gw->level) { |
319 | && gw->level == PT_DIRECTORY_LEVEL) { | 325 | int delta = level - gw->level + 1; |
320 | direct = 1; | 326 | direct = 1; |
321 | if (!is_dirty_pte(gw->ptes[level - 1])) | 327 | if (!is_dirty_gpte(gw->ptes[level - delta])) |
322 | access &= ~ACC_WRITE_MASK; | 328 | access &= ~ACC_WRITE_MASK; |
323 | table_gfn = gpte_to_gfn(gw->ptes[level - 1]); | 329 | table_gfn = gpte_to_gfn(gw->ptes[level - delta]); |
330 | /* advance table_gfn when emulating 1gb pages with 4k */ | ||
331 | if (delta == 0) | ||
332 | table_gfn += PT_INDEX(addr, level); | ||
324 | } else { | 333 | } else { |
325 | direct = 0; | 334 | direct = 0; |
326 | table_gfn = gw->table_gfn[level - 2]; | 335 | table_gfn = gw->table_gfn[level - 2]; |
@@ -369,11 +378,11 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, | |||
369 | int user_fault = error_code & PFERR_USER_MASK; | 378 | int user_fault = error_code & PFERR_USER_MASK; |
370 | int fetch_fault = error_code & PFERR_FETCH_MASK; | 379 | int fetch_fault = error_code & PFERR_FETCH_MASK; |
371 | struct guest_walker walker; | 380 | struct guest_walker walker; |
372 | u64 *shadow_pte; | 381 | u64 *sptep; |
373 | int write_pt = 0; | 382 | int write_pt = 0; |
374 | int r; | 383 | int r; |
375 | pfn_t pfn; | 384 | pfn_t pfn; |
376 | int largepage = 0; | 385 | int level = PT_PAGE_TABLE_LEVEL; |
377 | unsigned long mmu_seq; | 386 | unsigned long mmu_seq; |
378 | 387 | ||
379 | pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code); | 388 | pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code); |
@@ -399,14 +408,11 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, | |||
399 | return 0; | 408 | return 0; |
400 | } | 409 | } |
401 | 410 | ||
402 | if (walker.level == PT_DIRECTORY_LEVEL) { | 411 | if (walker.level >= PT_DIRECTORY_LEVEL) { |
403 | gfn_t large_gfn; | 412 | level = min(walker.level, mapping_level(vcpu, walker.gfn)); |
404 | large_gfn = walker.gfn & ~(KVM_PAGES_PER_HPAGE-1); | 413 | walker.gfn = walker.gfn & ~(KVM_PAGES_PER_HPAGE(level) - 1); |
405 | if (is_largepage_backed(vcpu, large_gfn)) { | ||
406 | walker.gfn = large_gfn; | ||
407 | largepage = 1; | ||
408 | } | ||
409 | } | 414 | } |
415 | |||
410 | mmu_seq = vcpu->kvm->mmu_notifier_seq; | 416 | mmu_seq = vcpu->kvm->mmu_notifier_seq; |
411 | smp_rmb(); | 417 | smp_rmb(); |
412 | pfn = gfn_to_pfn(vcpu->kvm, walker.gfn); | 418 | pfn = gfn_to_pfn(vcpu->kvm, walker.gfn); |
@@ -422,11 +428,10 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, | |||
422 | if (mmu_notifier_retry(vcpu, mmu_seq)) | 428 | if (mmu_notifier_retry(vcpu, mmu_seq)) |
423 | goto out_unlock; | 429 | goto out_unlock; |
424 | kvm_mmu_free_some_pages(vcpu); | 430 | kvm_mmu_free_some_pages(vcpu); |
425 | shadow_pte = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault, | 431 | sptep = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault, |
426 | largepage, &write_pt, pfn); | 432 | level, &write_pt, pfn); |
427 | |||
428 | pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __func__, | 433 | pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __func__, |
429 | shadow_pte, *shadow_pte, write_pt); | 434 | sptep, *sptep, write_pt); |
430 | 435 | ||
431 | if (!write_pt) | 436 | if (!write_pt) |
432 | vcpu->arch.last_pt_write_count = 0; /* reset fork detector */ | 437 | vcpu->arch.last_pt_write_count = 0; /* reset fork detector */ |
@@ -459,8 +464,9 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) | |||
459 | sptep = iterator.sptep; | 464 | sptep = iterator.sptep; |
460 | 465 | ||
461 | /* FIXME: properly handle invlpg on large guest pages */ | 466 | /* FIXME: properly handle invlpg on large guest pages */ |
462 | if (level == PT_PAGE_TABLE_LEVEL || | 467 | if (level == PT_PAGE_TABLE_LEVEL || |
463 | ((level == PT_DIRECTORY_LEVEL) && is_large_pte(*sptep))) { | 468 | ((level == PT_DIRECTORY_LEVEL && is_large_pte(*sptep))) || |
469 | ((level == PT_PDPE_LEVEL && is_large_pte(*sptep)))) { | ||
464 | struct kvm_mmu_page *sp = page_header(__pa(sptep)); | 470 | struct kvm_mmu_page *sp = page_header(__pa(sptep)); |
465 | 471 | ||
466 | pte_gpa = (sp->gfn << PAGE_SHIFT); | 472 | pte_gpa = (sp->gfn << PAGE_SHIFT); |
@@ -472,7 +478,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) | |||
472 | --vcpu->kvm->stat.lpages; | 478 | --vcpu->kvm->stat.lpages; |
473 | need_flush = 1; | 479 | need_flush = 1; |
474 | } | 480 | } |
475 | set_shadow_pte(sptep, shadow_trap_nonpresent_pte); | 481 | __set_spte(sptep, shadow_trap_nonpresent_pte); |
476 | break; | 482 | break; |
477 | } | 483 | } |
478 | 484 | ||
@@ -489,7 +495,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) | |||
489 | if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte, | 495 | if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte, |
490 | sizeof(pt_element_t))) | 496 | sizeof(pt_element_t))) |
491 | return; | 497 | return; |
492 | if (is_present_pte(gpte) && (gpte & PT_ACCESSED_MASK)) { | 498 | if (is_present_gpte(gpte) && (gpte & PT_ACCESSED_MASK)) { |
493 | if (mmu_topup_memory_caches(vcpu)) | 499 | if (mmu_topup_memory_caches(vcpu)) |
494 | return; | 500 | return; |
495 | kvm_mmu_pte_write(vcpu, pte_gpa, (const u8 *)&gpte, | 501 | kvm_mmu_pte_write(vcpu, pte_gpa, (const u8 *)&gpte, |
@@ -536,7 +542,7 @@ static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu, | |||
536 | r = kvm_read_guest_atomic(vcpu->kvm, pte_gpa, pt, sizeof pt); | 542 | r = kvm_read_guest_atomic(vcpu->kvm, pte_gpa, pt, sizeof pt); |
537 | pte_gpa += ARRAY_SIZE(pt) * sizeof(pt_element_t); | 543 | pte_gpa += ARRAY_SIZE(pt) * sizeof(pt_element_t); |
538 | for (j = 0; j < ARRAY_SIZE(pt); ++j) | 544 | for (j = 0; j < ARRAY_SIZE(pt); ++j) |
539 | if (r || is_present_pte(pt[j])) | 545 | if (r || is_present_gpte(pt[j])) |
540 | sp->spt[i+j] = shadow_trap_nonpresent_pte; | 546 | sp->spt[i+j] = shadow_trap_nonpresent_pte; |
541 | else | 547 | else |
542 | sp->spt[i+j] = shadow_notrap_nonpresent_pte; | 548 | sp->spt[i+j] = shadow_notrap_nonpresent_pte; |
@@ -574,23 +580,23 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) | |||
574 | sizeof(pt_element_t))) | 580 | sizeof(pt_element_t))) |
575 | return -EINVAL; | 581 | return -EINVAL; |
576 | 582 | ||
577 | if (gpte_to_gfn(gpte) != gfn || !is_present_pte(gpte) || | 583 | if (gpte_to_gfn(gpte) != gfn || !is_present_gpte(gpte) || |
578 | !(gpte & PT_ACCESSED_MASK)) { | 584 | !(gpte & PT_ACCESSED_MASK)) { |
579 | u64 nonpresent; | 585 | u64 nonpresent; |
580 | 586 | ||
581 | rmap_remove(vcpu->kvm, &sp->spt[i]); | 587 | rmap_remove(vcpu->kvm, &sp->spt[i]); |
582 | if (is_present_pte(gpte)) | 588 | if (is_present_gpte(gpte)) |
583 | nonpresent = shadow_trap_nonpresent_pte; | 589 | nonpresent = shadow_trap_nonpresent_pte; |
584 | else | 590 | else |
585 | nonpresent = shadow_notrap_nonpresent_pte; | 591 | nonpresent = shadow_notrap_nonpresent_pte; |
586 | set_shadow_pte(&sp->spt[i], nonpresent); | 592 | __set_spte(&sp->spt[i], nonpresent); |
587 | continue; | 593 | continue; |
588 | } | 594 | } |
589 | 595 | ||
590 | nr_present++; | 596 | nr_present++; |
591 | pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte); | 597 | pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte); |
592 | set_spte(vcpu, &sp->spt[i], pte_access, 0, 0, | 598 | set_spte(vcpu, &sp->spt[i], pte_access, 0, 0, |
593 | is_dirty_pte(gpte), 0, gfn, | 599 | is_dirty_gpte(gpte), PT_PAGE_TABLE_LEVEL, gfn, |
594 | spte_to_pfn(sp->spt[i]), true, false); | 600 | spte_to_pfn(sp->spt[i]), true, false); |
595 | } | 601 | } |
596 | 602 | ||
@@ -603,9 +609,10 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) | |||
603 | #undef PT_BASE_ADDR_MASK | 609 | #undef PT_BASE_ADDR_MASK |
604 | #undef PT_INDEX | 610 | #undef PT_INDEX |
605 | #undef PT_LEVEL_MASK | 611 | #undef PT_LEVEL_MASK |
606 | #undef PT_DIR_BASE_ADDR_MASK | 612 | #undef PT_LVL_ADDR_MASK |
613 | #undef PT_LVL_OFFSET_MASK | ||
607 | #undef PT_LEVEL_BITS | 614 | #undef PT_LEVEL_BITS |
608 | #undef PT_MAX_FULL_LEVELS | 615 | #undef PT_MAX_FULL_LEVELS |
609 | #undef gpte_to_gfn | 616 | #undef gpte_to_gfn |
610 | #undef gpte_to_gfn_pde | 617 | #undef gpte_to_gfn_lvl |
611 | #undef CMPXCHG | 618 | #undef CMPXCHG |