diff options
author | Andrea Arcangeli <aarcange@redhat.com> | 2011-01-13 18:47:10 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-01-13 20:32:46 -0500 |
commit | 8ee53820edfd1f3b6554c593f337148dd3d7fc91 (patch) | |
tree | ca69957e928cd3efa1b47f92dcfb00591702684c /virt/kvm | |
parent | 4b7167b9ff9b7f3f528cbc4c7d02ebd275b9b10c (diff) |
thp: mmu_notifier_test_young
For GRU and EPT, we need gup-fast to set referenced bit too (this is why
it's correct to return 0 when shadow_access_mask is zero, it requires
gup-fast to set the referenced bit). qemu-kvm access already sets the
young bit in the pte if it isn't zero-copy, if it's zero copy or a shadow
paging EPT minor fault we relay on gup-fast to signal the page is in
use...
We also need to check the young bits on the secondary pagetables for NPT
and not nested shadow mmu as the data may never get accessed again by the
primary pte.
Without this closer accuracy, we'd have to remove the heuristic that
avoids collapsing hugepages in hugepage virtual regions that have not even
a single subpage in use.
->test_young is full backwards compatible with GRU and other usages that
don't have young bits in pagetables set by the hardware and that should
nuke the secondary mmu mappings when ->clear_flush_young runs just like
EPT does.
Removing the heuristic that checks the young bit in
khugepaged/collapse_huge_page completely isn't so bad either probably but
I thought it was worth it and this makes it reliable.
Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'virt/kvm')
-rw-r--r-- | virt/kvm/kvm_main.c | 17 |
1 files changed, 17 insertions, 0 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 85ab7db0d366..4286d4766510 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
@@ -380,6 +380,22 @@ static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn, | |||
380 | return young; | 380 | return young; |
381 | } | 381 | } |
382 | 382 | ||
383 | static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn, | ||
384 | struct mm_struct *mm, | ||
385 | unsigned long address) | ||
386 | { | ||
387 | struct kvm *kvm = mmu_notifier_to_kvm(mn); | ||
388 | int young, idx; | ||
389 | |||
390 | idx = srcu_read_lock(&kvm->srcu); | ||
391 | spin_lock(&kvm->mmu_lock); | ||
392 | young = kvm_test_age_hva(kvm, address); | ||
393 | spin_unlock(&kvm->mmu_lock); | ||
394 | srcu_read_unlock(&kvm->srcu, idx); | ||
395 | |||
396 | return young; | ||
397 | } | ||
398 | |||
383 | static void kvm_mmu_notifier_release(struct mmu_notifier *mn, | 399 | static void kvm_mmu_notifier_release(struct mmu_notifier *mn, |
384 | struct mm_struct *mm) | 400 | struct mm_struct *mm) |
385 | { | 401 | { |
@@ -396,6 +412,7 @@ static const struct mmu_notifier_ops kvm_mmu_notifier_ops = { | |||
396 | .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start, | 412 | .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start, |
397 | .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end, | 413 | .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end, |
398 | .clear_flush_young = kvm_mmu_notifier_clear_flush_young, | 414 | .clear_flush_young = kvm_mmu_notifier_clear_flush_young, |
415 | .test_young = kvm_mmu_notifier_test_young, | ||
399 | .change_pte = kvm_mmu_notifier_change_pte, | 416 | .change_pte = kvm_mmu_notifier_change_pte, |
400 | .release = kvm_mmu_notifier_release, | 417 | .release = kvm_mmu_notifier_release, |
401 | }; | 418 | }; |