aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorXiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>2013-06-07 04:51:24 -0400
committerGleb Natapov <gleb@redhat.com>2013-06-27 07:18:15 -0400
commitf2fd125d32822ee32779551a70d256a7c27dbe40 (patch)
tree76c09c50170f32db0587b3e5883f8e1b1b4f245e
parent885032b91042288f98d3888c2aaf3a108d348d5c (diff)
KVM: MMU: store generation-number into mmio spte
Store the generation-number into bit3 ~ bit11 and bit52 ~ bit61, totally 19 bits can be used, it should be enough for nearly all most common cases In this patch, the generation-number is always 0, it will be changed in the later patch [Gleb: masking generation bits from spte in get_mmio_spte_gfn() and get_mmio_spte_access()] Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com> Reviewed-by: Gleb Natapov <gleb@redhat.com> Reviewed-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r--arch/x86/kvm/mmu.c64
-rw-r--r--arch/x86/kvm/mmutrace.h10
-rw-r--r--arch/x86/kvm/paging_tmpl.h3
3 files changed, 60 insertions, 17 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 6941fa74eb35..5d9a1fb108f5 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -197,15 +197,52 @@ void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask)
197} 197}
198EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask); 198EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask);
199 199
200static void mark_mmio_spte(u64 *sptep, u64 gfn, unsigned access) 200/*
201 * spte bits of bit 3 ~ bit 11 are used as low 9 bits of generation number,
202 * the bits of bits 52 ~ bit 61 are used as high 10 bits of generation
203 * number.
204 */
205#define MMIO_SPTE_GEN_LOW_SHIFT 3
206#define MMIO_SPTE_GEN_HIGH_SHIFT 52
207
208#define MMIO_GEN_LOW_SHIFT 9
209#define MMIO_GEN_LOW_MASK ((1 << MMIO_GEN_LOW_SHIFT) - 1)
210#define MMIO_MAX_GEN ((1 << 19) - 1)
211
212static u64 generation_mmio_spte_mask(unsigned int gen)
213{
214 u64 mask;
215
216 WARN_ON(gen > MMIO_MAX_GEN);
217
218 mask = (gen & MMIO_GEN_LOW_MASK) << MMIO_SPTE_GEN_LOW_SHIFT;
219 mask |= ((u64)gen >> MMIO_GEN_LOW_SHIFT) << MMIO_SPTE_GEN_HIGH_SHIFT;
220 return mask;
221}
222
223static unsigned int get_mmio_spte_generation(u64 spte)
224{
225 unsigned int gen;
226
227 spte &= ~shadow_mmio_mask;
228
229 gen = (spte >> MMIO_SPTE_GEN_LOW_SHIFT) & MMIO_GEN_LOW_MASK;
230 gen |= (spte >> MMIO_SPTE_GEN_HIGH_SHIFT) << MMIO_GEN_LOW_SHIFT;
231 return gen;
232}
233
234static void mark_mmio_spte(struct kvm *kvm, u64 *sptep, u64 gfn,
235 unsigned access)
201{ 236{
202 struct kvm_mmu_page *sp = page_header(__pa(sptep)); 237 struct kvm_mmu_page *sp = page_header(__pa(sptep));
238 u64 mask = generation_mmio_spte_mask(0);
203 239
204 access &= ACC_WRITE_MASK | ACC_USER_MASK; 240 access &= ACC_WRITE_MASK | ACC_USER_MASK;
205 241 mask |= shadow_mmio_mask | access | gfn << PAGE_SHIFT;
206 sp->mmio_cached = true; 242 sp->mmio_cached = true;
207 trace_mark_mmio_spte(sptep, gfn, access); 243
208 mmu_spte_set(sptep, shadow_mmio_mask | access | gfn << PAGE_SHIFT); 244 trace_mark_mmio_spte(sptep, gfn, access, 0);
245 mmu_spte_set(sptep, mask);
209} 246}
210 247
211static bool is_mmio_spte(u64 spte) 248static bool is_mmio_spte(u64 spte)
@@ -215,18 +252,21 @@ static bool is_mmio_spte(u64 spte)
215 252
216static gfn_t get_mmio_spte_gfn(u64 spte) 253static gfn_t get_mmio_spte_gfn(u64 spte)
217{ 254{
218 return (spte & ~shadow_mmio_mask) >> PAGE_SHIFT; 255 u64 mask = generation_mmio_spte_mask(MMIO_MAX_GEN) | shadow_mmio_mask;
256 return (spte & ~mask) >> PAGE_SHIFT;
219} 257}
220 258
221static unsigned get_mmio_spte_access(u64 spte) 259static unsigned get_mmio_spte_access(u64 spte)
222{ 260{
223 return (spte & ~shadow_mmio_mask) & ~PAGE_MASK; 261 u64 mask = generation_mmio_spte_mask(MMIO_MAX_GEN) | shadow_mmio_mask;
262 return (spte & ~mask) & ~PAGE_MASK;
224} 263}
225 264
226static bool set_mmio_spte(u64 *sptep, gfn_t gfn, pfn_t pfn, unsigned access) 265static bool set_mmio_spte(struct kvm *kvm, u64 *sptep, gfn_t gfn,
266 pfn_t pfn, unsigned access)
227{ 267{
228 if (unlikely(is_noslot_pfn(pfn))) { 268 if (unlikely(is_noslot_pfn(pfn))) {
229 mark_mmio_spte(sptep, gfn, access); 269 mark_mmio_spte(kvm, sptep, gfn, access);
230 return true; 270 return true;
231 } 271 }
232 272
@@ -2364,7 +2404,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2364 u64 spte; 2404 u64 spte;
2365 int ret = 0; 2405 int ret = 0;
2366 2406
2367 if (set_mmio_spte(sptep, gfn, pfn, pte_access)) 2407 if (set_mmio_spte(vcpu->kvm, sptep, gfn, pfn, pte_access))
2368 return 0; 2408 return 0;
2369 2409
2370 spte = PT_PRESENT_MASK; 2410 spte = PT_PRESENT_MASK;
@@ -3427,8 +3467,8 @@ static inline void protect_clean_gpte(unsigned *access, unsigned gpte)
3427 *access &= mask; 3467 *access &= mask;
3428} 3468}
3429 3469
3430static bool sync_mmio_spte(u64 *sptep, gfn_t gfn, unsigned access, 3470static bool sync_mmio_spte(struct kvm *kvm, u64 *sptep, gfn_t gfn,
3431 int *nr_present) 3471 unsigned access, int *nr_present)
3432{ 3472{
3433 if (unlikely(is_mmio_spte(*sptep))) { 3473 if (unlikely(is_mmio_spte(*sptep))) {
3434 if (gfn != get_mmio_spte_gfn(*sptep)) { 3474 if (gfn != get_mmio_spte_gfn(*sptep)) {
@@ -3437,7 +3477,7 @@ static bool sync_mmio_spte(u64 *sptep, gfn_t gfn, unsigned access,
3437 } 3477 }
3438 3478
3439 (*nr_present)++; 3479 (*nr_present)++;
3440 mark_mmio_spte(sptep, gfn, access); 3480 mark_mmio_spte(kvm, sptep, gfn, access);
3441 return true; 3481 return true;
3442 } 3482 }
3443 3483
diff --git a/arch/x86/kvm/mmutrace.h b/arch/x86/kvm/mmutrace.h
index eb444dd374af..ad24757041ad 100644
--- a/arch/x86/kvm/mmutrace.h
+++ b/arch/x86/kvm/mmutrace.h
@@ -199,23 +199,25 @@ DEFINE_EVENT(kvm_mmu_page_class, kvm_mmu_prepare_zap_page,
199 199
200TRACE_EVENT( 200TRACE_EVENT(
201 mark_mmio_spte, 201 mark_mmio_spte,
202 TP_PROTO(u64 *sptep, gfn_t gfn, unsigned access), 202 TP_PROTO(u64 *sptep, gfn_t gfn, unsigned access, unsigned int gen),
203 TP_ARGS(sptep, gfn, access), 203 TP_ARGS(sptep, gfn, access, gen),
204 204
205 TP_STRUCT__entry( 205 TP_STRUCT__entry(
206 __field(void *, sptep) 206 __field(void *, sptep)
207 __field(gfn_t, gfn) 207 __field(gfn_t, gfn)
208 __field(unsigned, access) 208 __field(unsigned, access)
209 __field(unsigned int, gen)
209 ), 210 ),
210 211
211 TP_fast_assign( 212 TP_fast_assign(
212 __entry->sptep = sptep; 213 __entry->sptep = sptep;
213 __entry->gfn = gfn; 214 __entry->gfn = gfn;
214 __entry->access = access; 215 __entry->access = access;
216 __entry->gen = gen;
215 ), 217 ),
216 218
217 TP_printk("sptep:%p gfn %llx access %x", __entry->sptep, __entry->gfn, 219 TP_printk("sptep:%p gfn %llx access %x gen %x", __entry->sptep,
218 __entry->access) 220 __entry->gfn, __entry->access, __entry->gen)
219); 221);
220 222
221TRACE_EVENT( 223TRACE_EVENT(
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index da20860b457a..fb50fa64f36c 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -792,7 +792,8 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
792 pte_access &= gpte_access(vcpu, gpte); 792 pte_access &= gpte_access(vcpu, gpte);
793 protect_clean_gpte(&pte_access, gpte); 793 protect_clean_gpte(&pte_access, gpte);
794 794
795 if (sync_mmio_spte(&sp->spt[i], gfn, pte_access, &nr_present)) 795 if (sync_mmio_spte(vcpu->kvm, &sp->spt[i], gfn, pte_access,
796 &nr_present))
796 continue; 797 continue;
797 798
798 if (gfn != sp->gfns[i]) { 799 if (gfn != sp->gfns[i]) {