diff options
Diffstat (limited to 'virt/kvm/kvm_main.c')
-rw-r--r-- | virt/kvm/kvm_main.c | 39 |
1 files changed, 37 insertions, 2 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 7f686251f711..f29abeb6a912 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
@@ -104,8 +104,26 @@ static pfn_t fault_pfn; | |||
104 | inline int kvm_is_mmio_pfn(pfn_t pfn) | 104 | inline int kvm_is_mmio_pfn(pfn_t pfn) |
105 | { | 105 | { |
106 | if (pfn_valid(pfn)) { | 106 | if (pfn_valid(pfn)) { |
107 | struct page *page = compound_head(pfn_to_page(pfn)); | 107 | int reserved; |
108 | return PageReserved(page); | 108 | struct page *tail = pfn_to_page(pfn); |
109 | struct page *head = compound_trans_head(tail); | ||
110 | reserved = PageReserved(head); | ||
111 | if (head != tail) { | ||
112 | /* | ||
113 | * "head" is not a dangling pointer | ||
114 | * (compound_trans_head takes care of that) | ||
115 | * but the hugepage may have been splitted | ||
116 | * from under us (and we may not hold a | ||
117 | * reference count on the head page so it can | ||
118 | * be reused before we run PageReferenced), so | ||
119 | * we've to check PageTail before returning | ||
120 | * what we just read. | ||
121 | */ | ||
122 | smp_rmb(); | ||
123 | if (PageTail(tail)) | ||
124 | return reserved; | ||
125 | } | ||
126 | return PageReserved(tail); | ||
109 | } | 127 | } |
110 | 128 | ||
111 | return true; | 129 | return true; |
@@ -352,6 +370,22 @@ static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn, | |||
352 | return young; | 370 | return young; |
353 | } | 371 | } |
354 | 372 | ||
373 | static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn, | ||
374 | struct mm_struct *mm, | ||
375 | unsigned long address) | ||
376 | { | ||
377 | struct kvm *kvm = mmu_notifier_to_kvm(mn); | ||
378 | int young, idx; | ||
379 | |||
380 | idx = srcu_read_lock(&kvm->srcu); | ||
381 | spin_lock(&kvm->mmu_lock); | ||
382 | young = kvm_test_age_hva(kvm, address); | ||
383 | spin_unlock(&kvm->mmu_lock); | ||
384 | srcu_read_unlock(&kvm->srcu, idx); | ||
385 | |||
386 | return young; | ||
387 | } | ||
388 | |||
355 | static void kvm_mmu_notifier_release(struct mmu_notifier *mn, | 389 | static void kvm_mmu_notifier_release(struct mmu_notifier *mn, |
356 | struct mm_struct *mm) | 390 | struct mm_struct *mm) |
357 | { | 391 | { |
@@ -368,6 +402,7 @@ static const struct mmu_notifier_ops kvm_mmu_notifier_ops = { | |||
368 | .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start, | 402 | .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start, |
369 | .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end, | 403 | .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end, |
370 | .clear_flush_young = kvm_mmu_notifier_clear_flush_young, | 404 | .clear_flush_young = kvm_mmu_notifier_clear_flush_young, |
405 | .test_young = kvm_mmu_notifier_test_young, | ||
371 | .change_pte = kvm_mmu_notifier_change_pte, | 406 | .change_pte = kvm_mmu_notifier_change_pte, |
372 | .release = kvm_mmu_notifier_release, | 407 | .release = kvm_mmu_notifier_release, |
373 | }; | 408 | }; |