aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/kvm/paging_tmpl.h
diff options
context:
space:
mode:
authorAvi Kivity <avi@qumranet.com>2007-05-30 12:31:17 -0400
committerAvi Kivity <avi@qumranet.com>2007-07-16 05:05:43 -0400
commite60d75ea292071e7ab33c10ca73fdd33fcbbe501 (patch)
tree85ca6a1b9f0dca02ec166acca4627127312ecf8e /drivers/kvm/paging_tmpl.h
parentef0197e8d9273ad8fbfb1bbd30e46e42a32c79e8 (diff)
KVM: MMU: Move set_pte_common() to pte width dependent code
In preparation of some modifications. Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers/kvm/paging_tmpl.h')
-rw-r--r--drivers/kvm/paging_tmpl.h56
1 files changed, 52 insertions, 4 deletions
diff --git a/drivers/kvm/paging_tmpl.h b/drivers/kvm/paging_tmpl.h
index e094a8ba17a8..65763007f04d 100644
--- a/drivers/kvm/paging_tmpl.h
+++ b/drivers/kvm/paging_tmpl.h
@@ -192,14 +192,62 @@ static void FNAME(mark_pagetable_dirty)(struct kvm *kvm,
192 mark_page_dirty(kvm, walker->table_gfn[walker->level - 1]); 192 mark_page_dirty(kvm, walker->table_gfn[walker->level - 1]);
193} 193}
194 194
195static void FNAME(set_pte_common)(struct kvm_vcpu *vcpu,
196 u64 *shadow_pte,
197 gpa_t gaddr,
198 int dirty,
199 u64 access_bits,
200 gfn_t gfn)
201{
202 hpa_t paddr;
203
204 *shadow_pte |= access_bits << PT_SHADOW_BITS_OFFSET;
205 if (!dirty)
206 access_bits &= ~PT_WRITABLE_MASK;
207
208 paddr = gpa_to_hpa(vcpu, gaddr & PT64_BASE_ADDR_MASK);
209
210 *shadow_pte |= access_bits;
211
212 if (is_error_hpa(paddr)) {
213 *shadow_pte |= gaddr;
214 *shadow_pte |= PT_SHADOW_IO_MARK;
215 *shadow_pte &= ~PT_PRESENT_MASK;
216 return;
217 }
218
219 *shadow_pte |= paddr;
220
221 if (access_bits & PT_WRITABLE_MASK) {
222 struct kvm_mmu_page *shadow;
223
224 shadow = kvm_mmu_lookup_page(vcpu, gfn);
225 if (shadow) {
226 pgprintk("%s: found shadow page for %lx, marking ro\n",
227 __FUNCTION__, gfn);
228 access_bits &= ~PT_WRITABLE_MASK;
229 if (is_writeble_pte(*shadow_pte)) {
230 *shadow_pte &= ~PT_WRITABLE_MASK;
231 kvm_arch_ops->tlb_flush(vcpu);
232 }
233 }
234 }
235
236 if (access_bits & PT_WRITABLE_MASK)
237 mark_page_dirty(vcpu->kvm, gaddr >> PAGE_SHIFT);
238
239 page_header_update_slot(vcpu->kvm, shadow_pte, gaddr);
240 rmap_add(vcpu, shadow_pte);
241}
242
195static void FNAME(set_pte)(struct kvm_vcpu *vcpu, u64 guest_pte, 243static void FNAME(set_pte)(struct kvm_vcpu *vcpu, u64 guest_pte,
196 u64 *shadow_pte, u64 access_bits, gfn_t gfn) 244 u64 *shadow_pte, u64 access_bits, gfn_t gfn)
197{ 245{
198 ASSERT(*shadow_pte == 0); 246 ASSERT(*shadow_pte == 0);
199 access_bits &= guest_pte; 247 access_bits &= guest_pte;
200 *shadow_pte = (guest_pte & PT_PTE_COPY_MASK); 248 *shadow_pte = (guest_pte & PT_PTE_COPY_MASK);
201 set_pte_common(vcpu, shadow_pte, guest_pte & PT_BASE_ADDR_MASK, 249 FNAME(set_pte_common)(vcpu, shadow_pte, guest_pte & PT_BASE_ADDR_MASK,
202 guest_pte & PT_DIRTY_MASK, access_bits, gfn); 250 guest_pte & PT_DIRTY_MASK, access_bits, gfn);
203} 251}
204 252
205static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page, 253static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
@@ -229,8 +277,8 @@ static void FNAME(set_pde)(struct kvm_vcpu *vcpu, u64 guest_pde,
229 gaddr |= (guest_pde & PT32_DIR_PSE36_MASK) << 277 gaddr |= (guest_pde & PT32_DIR_PSE36_MASK) <<
230 (32 - PT32_DIR_PSE36_SHIFT); 278 (32 - PT32_DIR_PSE36_SHIFT);
231 *shadow_pte = guest_pde & PT_PTE_COPY_MASK; 279 *shadow_pte = guest_pde & PT_PTE_COPY_MASK;
232 set_pte_common(vcpu, shadow_pte, gaddr, 280 FNAME(set_pte_common)(vcpu, shadow_pte, gaddr,
233 guest_pde & PT_DIRTY_MASK, access_bits, gfn); 281 guest_pde & PT_DIRTY_MASK, access_bits, gfn);
234} 282}
235 283
236/* 284/*