diff options
author | Nadav Amit <nadav.amit@gmail.com> | 2016-05-11 11:04:29 -0400 |
---|---|---|
committer | Radim Krčmář <rkrcmar@redhat.com> | 2016-06-02 11:38:50 -0400 |
commit | b19ee2ff3b287fea48a2896a381e31319394fe58 (patch) | |
tree | f1678e5632fa80ba384d77546a26fd2b6a963725 | |
parent | 13e98fd1efc7f65cab1bba6cfab7859840f9aa66 (diff) |
KVM: x86: avoid write-tearing of TDP
In theory, nothing prevents the compiler from write-tearing PTEs, or
split PTE writes. These partially-modified PTEs can be fetched by other
cores and cause mayhem. I have not really encountered such case in
real-life, but it does seem possible.
For example, the compiler may try to do something creative for
kvm_set_pte_rmapp() and perform multiple writes to the PTE.
Signed-off-by: Nadav Amit <nadav.amit@gmail.com>
Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
-rw-r--r-- | arch/x86/kvm/mmu.c | 8 |
1 files changed, 4 insertions, 4 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 24e800116ab4..def97b3a392b 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -336,12 +336,12 @@ static gfn_t pse36_gfn_delta(u32 gpte) | |||
336 | #ifdef CONFIG_X86_64 | 336 | #ifdef CONFIG_X86_64 |
337 | static void __set_spte(u64 *sptep, u64 spte) | 337 | static void __set_spte(u64 *sptep, u64 spte) |
338 | { | 338 | { |
339 | *sptep = spte; | 339 | WRITE_ONCE(*sptep, spte); |
340 | } | 340 | } |
341 | 341 | ||
342 | static void __update_clear_spte_fast(u64 *sptep, u64 spte) | 342 | static void __update_clear_spte_fast(u64 *sptep, u64 spte) |
343 | { | 343 | { |
344 | *sptep = spte; | 344 | WRITE_ONCE(*sptep, spte); |
345 | } | 345 | } |
346 | 346 | ||
347 | static u64 __update_clear_spte_slow(u64 *sptep, u64 spte) | 347 | static u64 __update_clear_spte_slow(u64 *sptep, u64 spte) |
@@ -390,7 +390,7 @@ static void __set_spte(u64 *sptep, u64 spte) | |||
390 | */ | 390 | */ |
391 | smp_wmb(); | 391 | smp_wmb(); |
392 | 392 | ||
393 | ssptep->spte_low = sspte.spte_low; | 393 | WRITE_ONCE(ssptep->spte_low, sspte.spte_low); |
394 | } | 394 | } |
395 | 395 | ||
396 | static void __update_clear_spte_fast(u64 *sptep, u64 spte) | 396 | static void __update_clear_spte_fast(u64 *sptep, u64 spte) |
@@ -400,7 +400,7 @@ static void __update_clear_spte_fast(u64 *sptep, u64 spte) | |||
400 | ssptep = (union split_spte *)sptep; | 400 | ssptep = (union split_spte *)sptep; |
401 | sspte = (union split_spte)spte; | 401 | sspte = (union split_spte)spte; |
402 | 402 | ||
403 | ssptep->spte_low = sspte.spte_low; | 403 | WRITE_ONCE(ssptep->spte_low, sspte.spte_low); |
404 | 404 | ||
405 | /* | 405 | /* |
406 | * If we map the spte from present to nonpresent, we should clear | 406 | * If we map the spte from present to nonpresent, we should clear |