aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorAvi Kivity <avi@qumranet.com>2006-12-13 03:34:02 -0500
committerLinus Torvalds <torvalds@woody.osdl.org>2006-12-13 12:05:47 -0500
commit8c7bb723b4e36dbd4b144176116d126104dc65e0 (patch)
treed5f13bce4876887c8892297e2a31339efa5e6268 /drivers
parent0770b19b94ed8fc97e1fcac91c320ec738919628 (diff)
[PATCH] KVM: MMU: Ignore pcd, pwt, and pat bits on ptes
The pcd, pwt, and pat bits on page table entries affect the cpu cache. Since the cache is a host resource, the guest should not be able to control it. Moreover, the meaning of these bits changes depending on whether pat is enabled or not. So, force these bits to zero on shadow page table entries at all times. Signed-off-by: Avi Kivity <avi@qumranet.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/kvm/mmu.c17
-rw-r--r--drivers/kvm/paging_tmpl.h20
2 files changed, 9 insertions, 28 deletions
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c
index 4e29d9b7211c..3d367cbfe1f9 100644
--- a/drivers/kvm/mmu.c
+++ b/drivers/kvm/mmu.c
@@ -61,22 +61,9 @@
61 61
62 62
63#define PT32_PTE_COPY_MASK \ 63#define PT32_PTE_COPY_MASK \
64 (PT_PRESENT_MASK | PT_PWT_MASK | PT_PCD_MASK | \ 64 (PT_PRESENT_MASK | PT_ACCESSED_MASK | PT_DIRTY_MASK | PT_GLOBAL_MASK)
65 PT_ACCESSED_MASK | PT_DIRTY_MASK | PT_PAT_MASK | \
66 PT_GLOBAL_MASK )
67
68#define PT32_NON_PTE_COPY_MASK \
69 (PT_PRESENT_MASK | PT_PWT_MASK | PT_PCD_MASK | \
70 PT_ACCESSED_MASK | PT_DIRTY_MASK)
71
72
73#define PT64_PTE_COPY_MASK \
74 (PT64_NX_MASK | PT32_PTE_COPY_MASK)
75
76#define PT64_NON_PTE_COPY_MASK \
77 (PT64_NX_MASK | PT32_NON_PTE_COPY_MASK)
78
79 65
66#define PT64_PTE_COPY_MASK (PT64_NX_MASK | PT32_PTE_COPY_MASK)
80 67
81#define PT_FIRST_AVAIL_BITS_SHIFT 9 68#define PT_FIRST_AVAIL_BITS_SHIFT 9
82#define PT64_SECOND_AVAIL_BITS_SHIFT 52 69#define PT64_SECOND_AVAIL_BITS_SHIFT 52
diff --git a/drivers/kvm/paging_tmpl.h b/drivers/kvm/paging_tmpl.h
index 765c2e1a048e..a9771b4c5bb8 100644
--- a/drivers/kvm/paging_tmpl.h
+++ b/drivers/kvm/paging_tmpl.h
@@ -32,7 +32,6 @@
32 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level) 32 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
33 #define PT_LEVEL_MASK(level) PT64_LEVEL_MASK(level) 33 #define PT_LEVEL_MASK(level) PT64_LEVEL_MASK(level)
34 #define PT_PTE_COPY_MASK PT64_PTE_COPY_MASK 34 #define PT_PTE_COPY_MASK PT64_PTE_COPY_MASK
35 #define PT_NON_PTE_COPY_MASK PT64_NON_PTE_COPY_MASK
36#elif PTTYPE == 32 35#elif PTTYPE == 32
37 #define pt_element_t u32 36 #define pt_element_t u32
38 #define guest_walker guest_walker32 37 #define guest_walker guest_walker32
@@ -43,7 +42,6 @@
43 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level) 42 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
44 #define PT_LEVEL_MASK(level) PT32_LEVEL_MASK(level) 43 #define PT_LEVEL_MASK(level) PT32_LEVEL_MASK(level)
45 #define PT_PTE_COPY_MASK PT32_PTE_COPY_MASK 44 #define PT_PTE_COPY_MASK PT32_PTE_COPY_MASK
46 #define PT_NON_PTE_COPY_MASK PT32_NON_PTE_COPY_MASK
47#else 45#else
48 #error Invalid PTTYPE value 46 #error Invalid PTTYPE value
49#endif 47#endif
@@ -105,9 +103,7 @@ static void FNAME(set_pde)(struct kvm_vcpu *vcpu, u64 guest_pde,
105 if (PTTYPE == 32 && is_cpuid_PSE36()) 103 if (PTTYPE == 32 && is_cpuid_PSE36())
106 gaddr |= (guest_pde & PT32_DIR_PSE36_MASK) << 104 gaddr |= (guest_pde & PT32_DIR_PSE36_MASK) <<
107 (32 - PT32_DIR_PSE36_SHIFT); 105 (32 - PT32_DIR_PSE36_SHIFT);
108 *shadow_pte = (guest_pde & (PT_NON_PTE_COPY_MASK | PT_GLOBAL_MASK)) | 106 *shadow_pte = guest_pde & PT_PTE_COPY_MASK;
109 ((guest_pde & PT_DIR_PAT_MASK) >>
110 (PT_DIR_PAT_SHIFT - PT_PAT_SHIFT));
111 set_pte_common(vcpu, shadow_pte, gaddr, 107 set_pte_common(vcpu, shadow_pte, gaddr,
112 guest_pde & PT_DIRTY_MASK, access_bits); 108 guest_pde & PT_DIRTY_MASK, access_bits);
113} 109}
@@ -162,6 +158,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
162 u32 index = SHADOW_PT_INDEX(addr, level); 158 u32 index = SHADOW_PT_INDEX(addr, level);
163 u64 *shadow_ent = ((u64 *)__va(shadow_addr)) + index; 159 u64 *shadow_ent = ((u64 *)__va(shadow_addr)) + index;
164 pt_element_t *guest_ent; 160 pt_element_t *guest_ent;
161 u64 shadow_pte;
165 162
166 if (is_present_pte(*shadow_ent) || is_io_pte(*shadow_ent)) { 163 if (is_present_pte(*shadow_ent) || is_io_pte(*shadow_ent)) {
167 if (level == PT_PAGE_TABLE_LEVEL) 164 if (level == PT_PAGE_TABLE_LEVEL)
@@ -204,14 +201,11 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
204 shadow_addr = kvm_mmu_alloc_page(vcpu, shadow_ent); 201 shadow_addr = kvm_mmu_alloc_page(vcpu, shadow_ent);
205 if (!VALID_PAGE(shadow_addr)) 202 if (!VALID_PAGE(shadow_addr))
206 return ERR_PTR(-ENOMEM); 203 return ERR_PTR(-ENOMEM);
207 if (!kvm_arch_ops->is_long_mode(vcpu) && level == 3) 204 shadow_pte = shadow_addr | PT_PRESENT_MASK;
208 *shadow_ent = shadow_addr | 205 if (vcpu->mmu.root_level > 3 || level != 3)
209 (*guest_ent & (PT_PRESENT_MASK | PT_PWT_MASK | PT_PCD_MASK)); 206 shadow_pte |= PT_ACCESSED_MASK
210 else { 207 | PT_WRITABLE_MASK | PT_USER_MASK;
211 *shadow_ent = shadow_addr | 208 *shadow_ent = shadow_pte;
212 (*guest_ent & PT_NON_PTE_COPY_MASK);
213 *shadow_ent |= (PT_WRITABLE_MASK | PT_USER_MASK);
214 }
215 prev_shadow_ent = shadow_ent; 209 prev_shadow_ent = shadow_ent;
216 } 210 }
217} 211}