diff options
author | Andrea Arcangeli <aarcange@redhat.com> | 2011-01-13 18:47:10 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-01-13 20:32:46 -0500 |
commit | 8ee53820edfd1f3b6554c593f337148dd3d7fc91 (patch) | |
tree | ca69957e928cd3efa1b47f92dcfb00591702684c /include/linux/mmu_notifier.h | |
parent | 4b7167b9ff9b7f3f528cbc4c7d02ebd275b9b10c (diff) |
thp: mmu_notifier_test_young
For GRU and EPT, we need gup-fast to set referenced bit too (this is why
it's correct to return 0 when shadow_access_mask is zero, it requires
gup-fast to set the referenced bit). qemu-kvm access already sets the
young bit in the pte if it isn't zero-copy, if it's zero copy or a shadow
paging EPT minor fault we relay on gup-fast to signal the page is in
use...
We also need to check the young bits on the secondary pagetables for NPT
and not nested shadow mmu as the data may never get accessed again by the
primary pte.
Without this closer accuracy, we'd have to remove the heuristic that
avoids collapsing hugepages in hugepage virtual regions that have not even
a single subpage in use.
->test_young is full backwards compatible with GRU and other usages that
don't have young bits in pagetables set by the hardware and that should
nuke the secondary mmu mappings when ->clear_flush_young runs just like
EPT does.
Removing the heuristic that checks the young bit in
khugepaged/collapse_huge_page completely isn't so bad either probably but
I thought it was worth it and this makes it reliable.
Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux/mmu_notifier.h')
-rw-r--r-- | include/linux/mmu_notifier.h | 26 |
1 files changed, 26 insertions, 0 deletions
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h index cbfab1e9957d..cc2e7dfea9d7 100644 --- a/include/linux/mmu_notifier.h +++ b/include/linux/mmu_notifier.h | |||
@@ -62,6 +62,16 @@ struct mmu_notifier_ops { | |||
62 | unsigned long address); | 62 | unsigned long address); |
63 | 63 | ||
64 | /* | 64 | /* |
65 | * test_young is called to check the young/accessed bitflag in | ||
66 | * the secondary pte. This is used to know if the page is | ||
67 | * frequently used without actually clearing the flag or tearing | ||
68 | * down the secondary mapping on the page. | ||
69 | */ | ||
70 | int (*test_young)(struct mmu_notifier *mn, | ||
71 | struct mm_struct *mm, | ||
72 | unsigned long address); | ||
73 | |||
74 | /* | ||
65 | * change_pte is called in cases that pte mapping to page is changed: | 75 | * change_pte is called in cases that pte mapping to page is changed: |
66 | * for example, when ksm remaps pte to point to a new shared page. | 76 | * for example, when ksm remaps pte to point to a new shared page. |
67 | */ | 77 | */ |
@@ -163,6 +173,8 @@ extern void __mmu_notifier_mm_destroy(struct mm_struct *mm); | |||
163 | extern void __mmu_notifier_release(struct mm_struct *mm); | 173 | extern void __mmu_notifier_release(struct mm_struct *mm); |
164 | extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm, | 174 | extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm, |
165 | unsigned long address); | 175 | unsigned long address); |
176 | extern int __mmu_notifier_test_young(struct mm_struct *mm, | ||
177 | unsigned long address); | ||
166 | extern void __mmu_notifier_change_pte(struct mm_struct *mm, | 178 | extern void __mmu_notifier_change_pte(struct mm_struct *mm, |
167 | unsigned long address, pte_t pte); | 179 | unsigned long address, pte_t pte); |
168 | extern void __mmu_notifier_invalidate_page(struct mm_struct *mm, | 180 | extern void __mmu_notifier_invalidate_page(struct mm_struct *mm, |
@@ -186,6 +198,14 @@ static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm, | |||
186 | return 0; | 198 | return 0; |
187 | } | 199 | } |
188 | 200 | ||
201 | static inline int mmu_notifier_test_young(struct mm_struct *mm, | ||
202 | unsigned long address) | ||
203 | { | ||
204 | if (mm_has_notifiers(mm)) | ||
205 | return __mmu_notifier_test_young(mm, address); | ||
206 | return 0; | ||
207 | } | ||
208 | |||
189 | static inline void mmu_notifier_change_pte(struct mm_struct *mm, | 209 | static inline void mmu_notifier_change_pte(struct mm_struct *mm, |
190 | unsigned long address, pte_t pte) | 210 | unsigned long address, pte_t pte) |
191 | { | 211 | { |
@@ -313,6 +333,12 @@ static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm, | |||
313 | return 0; | 333 | return 0; |
314 | } | 334 | } |
315 | 335 | ||
336 | static inline int mmu_notifier_test_young(struct mm_struct *mm, | ||
337 | unsigned long address) | ||
338 | { | ||
339 | return 0; | ||
340 | } | ||
341 | |||
316 | static inline void mmu_notifier_change_pte(struct mm_struct *mm, | 342 | static inline void mmu_notifier_change_pte(struct mm_struct *mm, |
317 | unsigned long address, pte_t pte) | 343 | unsigned long address, pte_t pte) |
318 | { | 344 | { |