diff options
author | Andres Lagar-Cavilla <andreslc@google.com> | 2014-09-22 17:54:42 -0400 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2014-09-24 08:07:58 -0400 |
commit | 57128468080a8b6ea452223036d3e417f748af55 (patch) | |
tree | e89cfc349a9c39710cfab4e387119365a0d64958 /include/linux/mmu_notifier.h | |
parent | 8a9522d2fe6a1b643d3aef5ab7f097f73c601e7a (diff) |
kvm: Fix page ageing bugs
1. We were calling clear_flush_young_notify in unmap_one, but we are
within an mmu notifier invalidate range scope. The spte exists no more
(due to range_start) and the accessed bit info has already been
propagated (due to kvm_pfn_set_accessed). Simply call
clear_flush_young.
2. We clear_flush_young on a primary MMU PMD, but this may be mapped
as a collection of PTEs by the secondary MMU (e.g. during log-dirty).
This required expanding the interface of the clear_flush_young mmu
notifier, so a lot of code has been trivially touched.
3. In the absence of shadow_accessed_mask (e.g. EPT A bit), we emulate
the access bit by blowing the spte. This requires proper synchronizing
with MMU notifier consumers, like every other removal of spte's does.
Signed-off-by: Andres Lagar-Cavilla <andreslc@google.com>
Acked-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'include/linux/mmu_notifier.h')
-rw-r--r-- | include/linux/mmu_notifier.h | 24 |
1 files changed, 17 insertions, 7 deletions
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h index 27288692241e..88787bb4b3b9 100644 --- a/include/linux/mmu_notifier.h +++ b/include/linux/mmu_notifier.h | |||
@@ -57,10 +57,13 @@ struct mmu_notifier_ops { | |||
57 | * pte. This way the VM will provide proper aging to the | 57 | * pte. This way the VM will provide proper aging to the |
58 | * accesses to the page through the secondary MMUs and not | 58 | * accesses to the page through the secondary MMUs and not |
59 | * only to the ones through the Linux pte. | 59 | * only to the ones through the Linux pte. |
60 | * Start-end is necessary in case the secondary MMU is mapping the page | ||
61 | * at a smaller granularity than the primary MMU. | ||
60 | */ | 62 | */ |
61 | int (*clear_flush_young)(struct mmu_notifier *mn, | 63 | int (*clear_flush_young)(struct mmu_notifier *mn, |
62 | struct mm_struct *mm, | 64 | struct mm_struct *mm, |
63 | unsigned long address); | 65 | unsigned long start, |
66 | unsigned long end); | ||
64 | 67 | ||
65 | /* | 68 | /* |
66 | * test_young is called to check the young/accessed bitflag in | 69 | * test_young is called to check the young/accessed bitflag in |
@@ -175,7 +178,8 @@ extern void mmu_notifier_unregister_no_release(struct mmu_notifier *mn, | |||
175 | extern void __mmu_notifier_mm_destroy(struct mm_struct *mm); | 178 | extern void __mmu_notifier_mm_destroy(struct mm_struct *mm); |
176 | extern void __mmu_notifier_release(struct mm_struct *mm); | 179 | extern void __mmu_notifier_release(struct mm_struct *mm); |
177 | extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm, | 180 | extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm, |
178 | unsigned long address); | 181 | unsigned long start, |
182 | unsigned long end); | ||
179 | extern int __mmu_notifier_test_young(struct mm_struct *mm, | 183 | extern int __mmu_notifier_test_young(struct mm_struct *mm, |
180 | unsigned long address); | 184 | unsigned long address); |
181 | extern void __mmu_notifier_change_pte(struct mm_struct *mm, | 185 | extern void __mmu_notifier_change_pte(struct mm_struct *mm, |
@@ -194,10 +198,11 @@ static inline void mmu_notifier_release(struct mm_struct *mm) | |||
194 | } | 198 | } |
195 | 199 | ||
196 | static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm, | 200 | static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm, |
197 | unsigned long address) | 201 | unsigned long start, |
202 | unsigned long end) | ||
198 | { | 203 | { |
199 | if (mm_has_notifiers(mm)) | 204 | if (mm_has_notifiers(mm)) |
200 | return __mmu_notifier_clear_flush_young(mm, address); | 205 | return __mmu_notifier_clear_flush_young(mm, start, end); |
201 | return 0; | 206 | return 0; |
202 | } | 207 | } |
203 | 208 | ||
@@ -255,7 +260,9 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm) | |||
255 | unsigned long ___address = __address; \ | 260 | unsigned long ___address = __address; \ |
256 | __young = ptep_clear_flush_young(___vma, ___address, __ptep); \ | 261 | __young = ptep_clear_flush_young(___vma, ___address, __ptep); \ |
257 | __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \ | 262 | __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \ |
258 | ___address); \ | 263 | ___address, \ |
264 | ___address + \ | ||
265 | PAGE_SIZE); \ | ||
259 | __young; \ | 266 | __young; \ |
260 | }) | 267 | }) |
261 | 268 | ||
@@ -266,7 +273,9 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm) | |||
266 | unsigned long ___address = __address; \ | 273 | unsigned long ___address = __address; \ |
267 | __young = pmdp_clear_flush_young(___vma, ___address, __pmdp); \ | 274 | __young = pmdp_clear_flush_young(___vma, ___address, __pmdp); \ |
268 | __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \ | 275 | __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \ |
269 | ___address); \ | 276 | ___address, \ |
277 | ___address + \ | ||
278 | PMD_SIZE); \ | ||
270 | __young; \ | 279 | __young; \ |
271 | }) | 280 | }) |
272 | 281 | ||
@@ -301,7 +310,8 @@ static inline void mmu_notifier_release(struct mm_struct *mm) | |||
301 | } | 310 | } |
302 | 311 | ||
303 | static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm, | 312 | static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm, |
304 | unsigned long address) | 313 | unsigned long start, |
314 | unsigned long end) | ||
305 | { | 315 | { |
306 | return 0; | 316 | return 0; |
307 | } | 317 | } |