diff options
author | Jianyu Zhan <nasa4836@gmail.com> | 2014-06-04 19:09:52 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-06-04 19:54:07 -0400 |
commit | 7ee07a44eb53374a73544ae14c71366a02d462e0 (patch) | |
tree | dc05d5d45c001a691bacddbab90d1b26905ef675 | |
parent | bea04b073292b2acb522c7c1aa67a4fc58151530 (diff) |
mm: fold mlocked_vma_newpage() into its only call site
In previous commit(mm: use the light version __mod_zone_page_state in
mlocked_vma_newpage()) a irq-unsafe __mod_zone_page_state is used. And as
suggested by Andrew, to reduce the risks that new call sites incorrectly
using mlocked_vma_newpage() without knowing they are adding racing, this
patch folds mlocked_vma_newpage() into its only call site,
page_add_new_anon_rmap, to make it open-cocded for people to know what is
going on.
[akpm@linux-foundation.org: coding-style fixes]
Signed-off-by: Jianyu Zhan <nasa4836@gmail.com>
Suggested-by: Andrew Morton <akpm@linux-foundation.org>
Suggested-by: Hugh Dickins <hughd@google.com>
Acked-by: Hugh Dickins <hughd@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | mm/internal.h | 29 | ||||
-rw-r--r-- | mm/rmap.c | 20 |
2 files changed, 17 insertions, 32 deletions
diff --git a/mm/internal.h b/mm/internal.h index e067984bafa0..802c3a4fc03a 100644 --- a/mm/internal.h +++ b/mm/internal.h | |||
@@ -189,31 +189,6 @@ static inline void munlock_vma_pages_all(struct vm_area_struct *vma) | |||
189 | } | 189 | } |
190 | 190 | ||
191 | /* | 191 | /* |
192 | * Called only in fault path, to determine if a new page is being | ||
193 | * mapped into a LOCKED vma. If it is, mark page as mlocked. | ||
194 | */ | ||
195 | static inline int mlocked_vma_newpage(struct vm_area_struct *vma, | ||
196 | struct page *page) | ||
197 | { | ||
198 | VM_BUG_ON_PAGE(PageLRU(page), page); | ||
199 | |||
200 | if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED)) | ||
201 | return 0; | ||
202 | |||
203 | if (!TestSetPageMlocked(page)) { | ||
204 | /* | ||
205 | * We use the irq-unsafe __mod_zone_page_stat because this | ||
206 | * counter is not modified from interrupt context, and the pte | ||
207 | * lock is held(spinlock), which implies preemption disabled. | ||
208 | */ | ||
209 | __mod_zone_page_state(page_zone(page), NR_MLOCK, | ||
210 | hpage_nr_pages(page)); | ||
211 | count_vm_event(UNEVICTABLE_PGMLOCKED); | ||
212 | } | ||
213 | return 1; | ||
214 | } | ||
215 | |||
216 | /* | ||
217 | * must be called with vma's mmap_sem held for read or write, and page locked. | 192 | * must be called with vma's mmap_sem held for read or write, and page locked. |
218 | */ | 193 | */ |
219 | extern void mlock_vma_page(struct page *page); | 194 | extern void mlock_vma_page(struct page *page); |
@@ -255,10 +230,6 @@ extern unsigned long vma_address(struct page *page, | |||
255 | struct vm_area_struct *vma); | 230 | struct vm_area_struct *vma); |
256 | #endif | 231 | #endif |
257 | #else /* !CONFIG_MMU */ | 232 | #else /* !CONFIG_MMU */ |
258 | static inline int mlocked_vma_newpage(struct vm_area_struct *v, struct page *p) | ||
259 | { | ||
260 | return 0; | ||
261 | } | ||
262 | static inline void clear_page_mlock(struct page *page) { } | 233 | static inline void clear_page_mlock(struct page *page) { } |
263 | static inline void mlock_vma_page(struct page *page) { } | 234 | static inline void mlock_vma_page(struct page *page) { } |
264 | static inline void mlock_migrate_page(struct page *new, struct page *old) { } | 235 | static inline void mlock_migrate_page(struct page *new, struct page *old) { } |
@@ -1032,11 +1032,25 @@ void page_add_new_anon_rmap(struct page *page, | |||
1032 | __mod_zone_page_state(page_zone(page), NR_ANON_PAGES, | 1032 | __mod_zone_page_state(page_zone(page), NR_ANON_PAGES, |
1033 | hpage_nr_pages(page)); | 1033 | hpage_nr_pages(page)); |
1034 | __page_set_anon_rmap(page, vma, address, 1); | 1034 | __page_set_anon_rmap(page, vma, address, 1); |
1035 | if (!mlocked_vma_newpage(vma, page)) { | 1035 | |
1036 | VM_BUG_ON_PAGE(PageLRU(page), page); | ||
1037 | if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED)) { | ||
1036 | SetPageActive(page); | 1038 | SetPageActive(page); |
1037 | lru_cache_add(page); | 1039 | lru_cache_add(page); |
1038 | } else | 1040 | return; |
1039 | add_page_to_unevictable_list(page); | 1041 | } |
1042 | |||
1043 | if (!TestSetPageMlocked(page)) { | ||
1044 | /* | ||
1045 | * We use the irq-unsafe __mod_zone_page_stat because this | ||
1046 | * counter is not modified from interrupt context, and the pte | ||
1047 | * lock is held(spinlock), which implies preemption disabled. | ||
1048 | */ | ||
1049 | __mod_zone_page_state(page_zone(page), NR_MLOCK, | ||
1050 | hpage_nr_pages(page)); | ||
1051 | count_vm_event(UNEVICTABLE_PGMLOCKED); | ||
1052 | } | ||
1053 | add_page_to_unevictable_list(page); | ||
1040 | } | 1054 | } |
1041 | 1055 | ||
1042 | /** | 1056 | /** |