aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJianyu Zhan <nasa4836@gmail.com>2014-06-04 19:09:52 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-06-04 19:54:07 -0400
commit7ee07a44eb53374a73544ae14c71366a02d462e0 (patch)
treedc05d5d45c001a691bacddbab90d1b26905ef675
parentbea04b073292b2acb522c7c1aa67a4fc58151530 (diff)
mm: fold mlocked_vma_newpage() into its only call site
In previous commit(mm: use the light version __mod_zone_page_state in mlocked_vma_newpage()) a irq-unsafe __mod_zone_page_state is used. And as suggested by Andrew, to reduce the risks that new call sites incorrectly using mlocked_vma_newpage() without knowing they are adding racing, this patch folds mlocked_vma_newpage() into its only call site, page_add_new_anon_rmap, to make it open-cocded for people to know what is going on. [akpm@linux-foundation.org: coding-style fixes] Signed-off-by: Jianyu Zhan <nasa4836@gmail.com> Suggested-by: Andrew Morton <akpm@linux-foundation.org> Suggested-by: Hugh Dickins <hughd@google.com> Acked-by: Hugh Dickins <hughd@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/internal.h29
-rw-r--r--mm/rmap.c20
2 files changed, 17 insertions, 32 deletions
diff --git a/mm/internal.h b/mm/internal.h
index e067984bafa0..802c3a4fc03a 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -189,31 +189,6 @@ static inline void munlock_vma_pages_all(struct vm_area_struct *vma)
189} 189}
190 190
191/* 191/*
192 * Called only in fault path, to determine if a new page is being
193 * mapped into a LOCKED vma. If it is, mark page as mlocked.
194 */
195static inline int mlocked_vma_newpage(struct vm_area_struct *vma,
196 struct page *page)
197{
198 VM_BUG_ON_PAGE(PageLRU(page), page);
199
200 if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED))
201 return 0;
202
203 if (!TestSetPageMlocked(page)) {
204 /*
205 * We use the irq-unsafe __mod_zone_page_stat because this
206 * counter is not modified from interrupt context, and the pte
207 * lock is held(spinlock), which implies preemption disabled.
208 */
209 __mod_zone_page_state(page_zone(page), NR_MLOCK,
210 hpage_nr_pages(page));
211 count_vm_event(UNEVICTABLE_PGMLOCKED);
212 }
213 return 1;
214}
215
216/*
217 * must be called with vma's mmap_sem held for read or write, and page locked. 192 * must be called with vma's mmap_sem held for read or write, and page locked.
218 */ 193 */
219extern void mlock_vma_page(struct page *page); 194extern void mlock_vma_page(struct page *page);
@@ -255,10 +230,6 @@ extern unsigned long vma_address(struct page *page,
255 struct vm_area_struct *vma); 230 struct vm_area_struct *vma);
256#endif 231#endif
257#else /* !CONFIG_MMU */ 232#else /* !CONFIG_MMU */
258static inline int mlocked_vma_newpage(struct vm_area_struct *v, struct page *p)
259{
260 return 0;
261}
262static inline void clear_page_mlock(struct page *page) { } 233static inline void clear_page_mlock(struct page *page) { }
263static inline void mlock_vma_page(struct page *page) { } 234static inline void mlock_vma_page(struct page *page) { }
264static inline void mlock_migrate_page(struct page *new, struct page *old) { } 235static inline void mlock_migrate_page(struct page *new, struct page *old) { }
diff --git a/mm/rmap.c b/mm/rmap.c
index 4644e10248f0..e375ce4bd93e 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1032,11 +1032,25 @@ void page_add_new_anon_rmap(struct page *page,
1032 __mod_zone_page_state(page_zone(page), NR_ANON_PAGES, 1032 __mod_zone_page_state(page_zone(page), NR_ANON_PAGES,
1033 hpage_nr_pages(page)); 1033 hpage_nr_pages(page));
1034 __page_set_anon_rmap(page, vma, address, 1); 1034 __page_set_anon_rmap(page, vma, address, 1);
1035 if (!mlocked_vma_newpage(vma, page)) { 1035
1036 VM_BUG_ON_PAGE(PageLRU(page), page);
1037 if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED)) {
1036 SetPageActive(page); 1038 SetPageActive(page);
1037 lru_cache_add(page); 1039 lru_cache_add(page);
1038 } else 1040 return;
1039 add_page_to_unevictable_list(page); 1041 }
1042
1043 if (!TestSetPageMlocked(page)) {
1044 /*
1045 * We use the irq-unsafe __mod_zone_page_stat because this
1046 * counter is not modified from interrupt context, and the pte
1047 * lock is held(spinlock), which implies preemption disabled.
1048 */
1049 __mod_zone_page_state(page_zone(page), NR_MLOCK,
1050 hpage_nr_pages(page));
1051 count_vm_event(UNEVICTABLE_PGMLOCKED);
1052 }
1053 add_page_to_unevictable_list(page);
1040} 1054}
1041 1055
1042/** 1056/**