aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/mmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r--arch/x86/kvm/mmu.c86
1 files changed, 71 insertions, 15 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 982718fe12a..a22b5fe9216 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -259,26 +259,82 @@ static gfn_t pse36_gfn_delta(u32 gpte)
259 return (gpte & PT32_DIR_PSE36_MASK) << shift; 259 return (gpte & PT32_DIR_PSE36_MASK) << shift;
260} 260}
261 261
262#ifdef CONFIG_X86_64
262static void __set_spte(u64 *sptep, u64 spte) 263static void __set_spte(u64 *sptep, u64 spte)
263{ 264{
264 set_64bit(sptep, spte); 265 *sptep = spte;
265} 266}
266 267
267static u64 __xchg_spte(u64 *sptep, u64 new_spte) 268static void __update_clear_spte_fast(u64 *sptep, u64 spte)
268{ 269{
269#ifdef CONFIG_X86_64 270 *sptep = spte;
270 return xchg(sptep, new_spte); 271}
272
273static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
274{
275 return xchg(sptep, spte);
276}
271#else 277#else
272 u64 old_spte; 278union split_spte {
279 struct {
280 u32 spte_low;
281 u32 spte_high;
282 };
283 u64 spte;
284};
273 285
274 do { 286static void __set_spte(u64 *sptep, u64 spte)
275 old_spte = *sptep; 287{
276 } while (cmpxchg64(sptep, old_spte, new_spte) != old_spte); 288 union split_spte *ssptep, sspte;
277 289
278 return old_spte; 290 ssptep = (union split_spte *)sptep;
279#endif 291 sspte = (union split_spte)spte;
292
293 ssptep->spte_high = sspte.spte_high;
294
295 /*
296 * If we map the spte from nonpresent to present, We should store
297 * the high bits firstly, then set present bit, so cpu can not
298 * fetch this spte while we are setting the spte.
299 */
300 smp_wmb();
301
302 ssptep->spte_low = sspte.spte_low;
280} 303}
281 304
305static void __update_clear_spte_fast(u64 *sptep, u64 spte)
306{
307 union split_spte *ssptep, sspte;
308
309 ssptep = (union split_spte *)sptep;
310 sspte = (union split_spte)spte;
311
312 ssptep->spte_low = sspte.spte_low;
313
314 /*
315 * If we map the spte from present to nonpresent, we should clear
316 * present bit firstly to avoid vcpu fetch the old high bits.
317 */
318 smp_wmb();
319
320 ssptep->spte_high = sspte.spte_high;
321}
322
323static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
324{
325 union split_spte *ssptep, sspte, orig;
326
327 ssptep = (union split_spte *)sptep;
328 sspte = (union split_spte)spte;
329
330 /* xchg acts as a barrier before the setting of the high bits */
331 orig.spte_low = xchg(&ssptep->spte_low, sspte.spte_low);
332 orig.spte_high = ssptep->spte_high = sspte.spte_high;
333
334 return orig.spte;
335}
336#endif
337
282static bool spte_has_volatile_bits(u64 spte) 338static bool spte_has_volatile_bits(u64 spte)
283{ 339{
284 if (!shadow_accessed_mask) 340 if (!shadow_accessed_mask)
@@ -330,9 +386,9 @@ static void mmu_spte_update(u64 *sptep, u64 new_spte)
330 mask |= shadow_dirty_mask; 386 mask |= shadow_dirty_mask;
331 387
332 if (!spte_has_volatile_bits(old_spte) || (new_spte & mask) == mask) 388 if (!spte_has_volatile_bits(old_spte) || (new_spte & mask) == mask)
333 __set_spte(sptep, new_spte); 389 __update_clear_spte_fast(sptep, new_spte);
334 else 390 else
335 old_spte = __xchg_spte(sptep, new_spte); 391 old_spte = __update_clear_spte_slow(sptep, new_spte);
336 392
337 if (!shadow_accessed_mask) 393 if (!shadow_accessed_mask)
338 return; 394 return;
@@ -354,9 +410,9 @@ static int mmu_spte_clear_track_bits(u64 *sptep)
354 u64 old_spte = *sptep; 410 u64 old_spte = *sptep;
355 411
356 if (!spte_has_volatile_bits(old_spte)) 412 if (!spte_has_volatile_bits(old_spte))
357 __set_spte(sptep, 0ull); 413 __update_clear_spte_fast(sptep, 0ull);
358 else 414 else
359 old_spte = __xchg_spte(sptep, 0ull); 415 old_spte = __update_clear_spte_slow(sptep, 0ull);
360 416
361 if (!is_rmap_spte(old_spte)) 417 if (!is_rmap_spte(old_spte))
362 return 0; 418 return 0;
@@ -376,7 +432,7 @@ static int mmu_spte_clear_track_bits(u64 *sptep)
376 */ 432 */
377static void mmu_spte_clear_no_track(u64 *sptep) 433static void mmu_spte_clear_no_track(u64 *sptep)
378{ 434{
379 __set_spte(sptep, 0ull); 435 __update_clear_spte_fast(sptep, 0ull);
380} 436}
381 437
382static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, 438static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,