diff options
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r-- | arch/x86/kvm/mmu.c | 17 |
1 files changed, 14 insertions, 3 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 7113a0fb544c..f385a4cf4bfd 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -466,9 +466,20 @@ static u64 __update_clear_spte_slow(u64 *sptep, u64 spte) | |||
466 | /* | 466 | /* |
467 | * The idea using the light way get the spte on x86_32 guest is from | 467 | * The idea using the light way get the spte on x86_32 guest is from |
468 | * gup_get_pte(arch/x86/mm/gup.c). | 468 | * gup_get_pte(arch/x86/mm/gup.c). |
469 | * The difference is we can not catch the spte tlb flush if we leave | 469 | * |
470 | * guest mode, so we emulate it by increase clear_spte_count when spte | 470 | * An spte tlb flush may be pending, because kvm_set_pte_rmapp |
471 | * is cleared. | 471 | * coalesces them and we are running out of the MMU lock. Therefore |
472 | * we need to protect against in-progress updates of the spte. | ||
473 | * | ||
474 | * Reading the spte while an update is in progress may get the old value | ||
475 | * for the high part of the spte. The race is fine for a present->non-present | ||
476 | * change (because the high part of the spte is ignored for non-present spte), | ||
477 | * but for a present->present change we must reread the spte. | ||
478 | * | ||
479 | * All such changes are done in two steps (present->non-present and | ||
480 | * non-present->present), hence it is enough to count the number of | ||
481 | * present->non-present updates: if it changed while reading the spte, | ||
482 | * we might have hit the race. This is done using clear_spte_count. | ||
472 | */ | 483 | */ |
473 | static u64 __get_spte_lockless(u64 *sptep) | 484 | static u64 __get_spte_lockless(u64 *sptep) |
474 | { | 485 | { |