diff options
author | Gleb Natapov <gleb@redhat.com> | 2013-08-05 04:07:11 -0400 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2013-08-07 09:57:37 -0400 |
commit | 61719a8fff3da865cdda57dd62974e561e16315d (patch) | |
tree | 61cec4f5d28d4a7ce092884c0cd1c0f117de132c | |
parent | d8089baca4f6895ce9c7bdabd2fca48a23feee79 (diff) |
nEPT: Support shadow paging for guest paging without A/D bits
Some guest paging modes do not support A/D bits. Add support for such
modes in shadow page code. For such modes PT_GUEST_DIRTY_MASK,
PT_GUEST_ACCESSED_MASK, PT_GUEST_DIRTY_SHIFT and PT_GUEST_ACCESSED_SHIFT
should be set to zero.
Reviewed-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
Signed-off-by: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r-- | arch/x86/kvm/paging_tmpl.h | 16 |
1 files changed, 13 insertions, 3 deletions
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 83702a68e987..656f7fae312a 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h | |||
@@ -92,6 +92,10 @@ static inline void FNAME(protect_clean_gpte)(unsigned *access, unsigned gpte) | |||
92 | { | 92 | { |
93 | unsigned mask; | 93 | unsigned mask; |
94 | 94 | ||
95 | /* dirty bit is not supported, so no need to track it */ | ||
96 | if (!PT_GUEST_DIRTY_MASK) | ||
97 | return; | ||
98 | |||
95 | BUILD_BUG_ON(PT_WRITABLE_MASK != ACC_WRITE_MASK); | 99 | BUILD_BUG_ON(PT_WRITABLE_MASK != ACC_WRITE_MASK); |
96 | 100 | ||
97 | mask = (unsigned)~ACC_WRITE_MASK; | 101 | mask = (unsigned)~ACC_WRITE_MASK; |
@@ -147,7 +151,8 @@ static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu, | |||
147 | if (!FNAME(is_present_gpte)(gpte)) | 151 | if (!FNAME(is_present_gpte)(gpte)) |
148 | goto no_present; | 152 | goto no_present; |
149 | 153 | ||
150 | if (!(gpte & PT_GUEST_ACCESSED_MASK)) | 154 | /* if accessed bit is not supported prefetch non accessed gpte */ |
155 | if (PT_GUEST_ACCESSED_MASK && !(gpte & PT_GUEST_ACCESSED_MASK)) | ||
151 | goto no_present; | 156 | goto no_present; |
152 | 157 | ||
153 | return false; | 158 | return false; |
@@ -178,6 +183,10 @@ static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu, | |||
178 | gfn_t table_gfn; | 183 | gfn_t table_gfn; |
179 | int ret; | 184 | int ret; |
180 | 185 | ||
186 | /* dirty/accessed bits are not supported, so no need to update them */ | ||
187 | if (!PT_GUEST_DIRTY_MASK) | ||
188 | return 0; | ||
189 | |||
181 | for (level = walker->max_level; level >= walker->level; --level) { | 190 | for (level = walker->max_level; level >= walker->level; --level) { |
182 | pte = orig_pte = walker->ptes[level - 1]; | 191 | pte = orig_pte = walker->ptes[level - 1]; |
183 | table_gfn = walker->table_gfn[level - 1]; | 192 | table_gfn = walker->table_gfn[level - 1]; |
@@ -316,8 +325,9 @@ retry_walk: | |||
316 | FNAME(protect_clean_gpte)(&pte_access, pte); | 325 | FNAME(protect_clean_gpte)(&pte_access, pte); |
317 | else | 326 | else |
318 | /* | 327 | /* |
319 | * On a write fault, fold the dirty bit into accessed_dirty by | 328 | * On a write fault, fold the dirty bit into accessed_dirty. |
320 | * shifting it one place right. | 329 | * For modes without A/D bits support accessed_dirty will be |
330 | * always clear. | ||
321 | */ | 331 | */ |
322 | accessed_dirty &= pte >> | 332 | accessed_dirty &= pte >> |
323 | (PT_GUEST_DIRTY_SHIFT - PT_GUEST_ACCESSED_SHIFT); | 333 | (PT_GUEST_DIRTY_SHIFT - PT_GUEST_ACCESSED_SHIFT); |