aboutsummaryrefslogtreecommitdiffstats
path: root/mm/internal.h
diff options
context:
space:
mode:
authorJianyu Zhan <nasa4836@gmail.com>2014-06-04 19:09:52 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-06-04 19:54:07 -0400
commit7ee07a44eb53374a73544ae14c71366a02d462e0 (patch)
treedc05d5d45c001a691bacddbab90d1b26905ef675 /mm/internal.h
parentbea04b073292b2acb522c7c1aa67a4fc58151530 (diff)
mm: fold mlocked_vma_newpage() into its only call site
In previous commit(mm: use the light version __mod_zone_page_state in mlocked_vma_newpage()) a irq-unsafe __mod_zone_page_state is used. And as suggested by Andrew, to reduce the risks that new call sites incorrectly using mlocked_vma_newpage() without knowing they are adding racing, this patch folds mlocked_vma_newpage() into its only call site, page_add_new_anon_rmap, to make it open-cocded for people to know what is going on. [akpm@linux-foundation.org: coding-style fixes] Signed-off-by: Jianyu Zhan <nasa4836@gmail.com> Suggested-by: Andrew Morton <akpm@linux-foundation.org> Suggested-by: Hugh Dickins <hughd@google.com> Acked-by: Hugh Dickins <hughd@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/internal.h')
-rw-r--r--mm/internal.h29
1 files changed, 0 insertions, 29 deletions
diff --git a/mm/internal.h b/mm/internal.h
index e067984bafa0..802c3a4fc03a 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -189,31 +189,6 @@ static inline void munlock_vma_pages_all(struct vm_area_struct *vma)
189} 189}
190 190
191/* 191/*
192 * Called only in fault path, to determine if a new page is being
193 * mapped into a LOCKED vma. If it is, mark page as mlocked.
194 */
195static inline int mlocked_vma_newpage(struct vm_area_struct *vma,
196 struct page *page)
197{
198 VM_BUG_ON_PAGE(PageLRU(page), page);
199
200 if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED))
201 return 0;
202
203 if (!TestSetPageMlocked(page)) {
204 /*
205 * We use the irq-unsafe __mod_zone_page_stat because this
206 * counter is not modified from interrupt context, and the pte
207 * lock is held(spinlock), which implies preemption disabled.
208 */
209 __mod_zone_page_state(page_zone(page), NR_MLOCK,
210 hpage_nr_pages(page));
211 count_vm_event(UNEVICTABLE_PGMLOCKED);
212 }
213 return 1;
214}
215
216/*
217 * must be called with vma's mmap_sem held for read or write, and page locked. 192 * must be called with vma's mmap_sem held for read or write, and page locked.
218 */ 193 */
219extern void mlock_vma_page(struct page *page); 194extern void mlock_vma_page(struct page *page);
@@ -255,10 +230,6 @@ extern unsigned long vma_address(struct page *page,
255 struct vm_area_struct *vma); 230 struct vm_area_struct *vma);
256#endif 231#endif
257#else /* !CONFIG_MMU */ 232#else /* !CONFIG_MMU */
258static inline int mlocked_vma_newpage(struct vm_area_struct *v, struct page *p)
259{
260 return 0;
261}
262static inline void clear_page_mlock(struct page *page) { } 233static inline void clear_page_mlock(struct page *page) { }
263static inline void mlock_vma_page(struct page *page) { } 234static inline void mlock_vma_page(struct page *page) { }
264static inline void mlock_migrate_page(struct page *new, struct page *old) { } 235static inline void mlock_migrate_page(struct page *new, struct page *old) { }