aboutsummaryrefslogtreecommitdiffstats
path: root/mm/rmap.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-06-08 14:31:16 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-06-08 14:31:16 -0400
commit3f17ea6dea8ba5668873afa54628a91aaa3fb1c0 (patch)
treeafbeb2accd4c2199ddd705ae943995b143a0af02 /mm/rmap.c
parent1860e379875dfe7271c649058aeddffe5afd9d0d (diff)
parent1a5700bc2d10cd379a795fd2bb377a190af5acd4 (diff)
Merge branch 'next' (accumulated 3.16 merge window patches) into master
Now that 3.15 is released, this merges the 'next' branch into 'master', bringing us to the normal situation where my 'master' branch is the merge window. * accumulated work in next: (6809 commits) ufs: sb mutex merge + mutex_destroy powerpc: update comments for generic idle conversion cris: update comments for generic idle conversion idle: remove cpu_idle() forward declarations nbd: zero from and len fields in NBD_CMD_DISCONNECT. mm: convert some level-less printks to pr_* MAINTAINERS: adi-buildroot-devel is moderated MAINTAINERS: add linux-api for review of API/ABI changes mm/kmemleak-test.c: use pr_fmt for logging fs/dlm/debug_fs.c: replace seq_printf by seq_puts fs/dlm/lockspace.c: convert simple_str to kstr fs/dlm/config.c: convert simple_str to kstr mm: mark remap_file_pages() syscall as deprecated mm: memcontrol: remove unnecessary memcg argument from soft limit functions mm: memcontrol: clean up memcg zoneinfo lookup mm/memblock.c: call kmemleak directly from memblock_(alloc|free) mm/mempool.c: update the kmemleak stack trace for mempool allocations lib/radix-tree.c: update the kmemleak stack trace for radix tree allocations mm: introduce kmemleak_update_trace() mm/kmemleak.c: use %u to print ->checksum ...
Diffstat (limited to 'mm/rmap.c')
-rw-r--r--mm/rmap.c55
1 files changed, 41 insertions, 14 deletions
diff --git a/mm/rmap.c b/mm/rmap.c
index 83bfafabb47b..bf05fc872ae8 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -103,6 +103,7 @@ static inline void anon_vma_free(struct anon_vma *anon_vma)
103 * LOCK should suffice since the actual taking of the lock must 103 * LOCK should suffice since the actual taking of the lock must
104 * happen _before_ what follows. 104 * happen _before_ what follows.
105 */ 105 */
106 might_sleep();
106 if (rwsem_is_locked(&anon_vma->root->rwsem)) { 107 if (rwsem_is_locked(&anon_vma->root->rwsem)) {
107 anon_vma_lock_write(anon_vma); 108 anon_vma_lock_write(anon_vma);
108 anon_vma_unlock_write(anon_vma); 109 anon_vma_unlock_write(anon_vma);
@@ -426,8 +427,9 @@ struct anon_vma *page_get_anon_vma(struct page *page)
426 * above cannot corrupt). 427 * above cannot corrupt).
427 */ 428 */
428 if (!page_mapped(page)) { 429 if (!page_mapped(page)) {
430 rcu_read_unlock();
429 put_anon_vma(anon_vma); 431 put_anon_vma(anon_vma);
430 anon_vma = NULL; 432 return NULL;
431 } 433 }
432out: 434out:
433 rcu_read_unlock(); 435 rcu_read_unlock();
@@ -477,9 +479,9 @@ struct anon_vma *page_lock_anon_vma_read(struct page *page)
477 } 479 }
478 480
479 if (!page_mapped(page)) { 481 if (!page_mapped(page)) {
482 rcu_read_unlock();
480 put_anon_vma(anon_vma); 483 put_anon_vma(anon_vma);
481 anon_vma = NULL; 484 return NULL;
482 goto out;
483 } 485 }
484 486
485 /* we pinned the anon_vma, its safe to sleep */ 487 /* we pinned the anon_vma, its safe to sleep */
@@ -669,7 +671,7 @@ struct page_referenced_arg {
669/* 671/*
670 * arg: page_referenced_arg will be passed 672 * arg: page_referenced_arg will be passed
671 */ 673 */
672int page_referenced_one(struct page *page, struct vm_area_struct *vma, 674static int page_referenced_one(struct page *page, struct vm_area_struct *vma,
673 unsigned long address, void *arg) 675 unsigned long address, void *arg)
674{ 676{
675 struct mm_struct *mm = vma->vm_mm; 677 struct mm_struct *mm = vma->vm_mm;
@@ -986,6 +988,12 @@ void do_page_add_anon_rmap(struct page *page,
986{ 988{
987 int first = atomic_inc_and_test(&page->_mapcount); 989 int first = atomic_inc_and_test(&page->_mapcount);
988 if (first) { 990 if (first) {
991 /*
992 * We use the irq-unsafe __{inc|mod}_zone_page_stat because
993 * these counters are not modified in interrupt context, and
994 * pte lock(a spinlock) is held, which implies preemption
995 * disabled.
996 */
989 if (PageTransHuge(page)) 997 if (PageTransHuge(page))
990 __inc_zone_page_state(page, 998 __inc_zone_page_state(page,
991 NR_ANON_TRANSPARENT_HUGEPAGES); 999 NR_ANON_TRANSPARENT_HUGEPAGES);
@@ -1024,11 +1032,25 @@ void page_add_new_anon_rmap(struct page *page,
1024 __mod_zone_page_state(page_zone(page), NR_ANON_PAGES, 1032 __mod_zone_page_state(page_zone(page), NR_ANON_PAGES,
1025 hpage_nr_pages(page)); 1033 hpage_nr_pages(page));
1026 __page_set_anon_rmap(page, vma, address, 1); 1034 __page_set_anon_rmap(page, vma, address, 1);
1027 if (!mlocked_vma_newpage(vma, page)) { 1035
1036 VM_BUG_ON_PAGE(PageLRU(page), page);
1037 if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED)) {
1028 SetPageActive(page); 1038 SetPageActive(page);
1029 lru_cache_add(page); 1039 lru_cache_add(page);
1030 } else 1040 return;
1031 add_page_to_unevictable_list(page); 1041 }
1042
1043 if (!TestSetPageMlocked(page)) {
1044 /*
1045 * We use the irq-unsafe __mod_zone_page_stat because this
1046 * counter is not modified from interrupt context, and the pte
1047 * lock is held(spinlock), which implies preemption disabled.
1048 */
1049 __mod_zone_page_state(page_zone(page), NR_MLOCK,
1050 hpage_nr_pages(page));
1051 count_vm_event(UNEVICTABLE_PGMLOCKED);
1052 }
1053 add_page_to_unevictable_list(page);
1032} 1054}
1033 1055
1034/** 1056/**
@@ -1077,6 +1099,11 @@ void page_remove_rmap(struct page *page)
1077 /* 1099 /*
1078 * Hugepages are not counted in NR_ANON_PAGES nor NR_FILE_MAPPED 1100 * Hugepages are not counted in NR_ANON_PAGES nor NR_FILE_MAPPED
1079 * and not charged by memcg for now. 1101 * and not charged by memcg for now.
1102 *
1103 * We use the irq-unsafe __{inc|mod}_zone_page_stat because
1104 * these counters are not modified in interrupt context, and
1105 * these counters are not modified in interrupt context, and
1106 * pte lock(a spinlock) is held, which implies preemption disabled.
1080 */ 1107 */
1081 if (unlikely(PageHuge(page))) 1108 if (unlikely(PageHuge(page)))
1082 goto out; 1109 goto out;
@@ -1112,7 +1139,7 @@ out:
1112/* 1139/*
1113 * @arg: enum ttu_flags will be passed to this argument 1140 * @arg: enum ttu_flags will be passed to this argument
1114 */ 1141 */
1115int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, 1142static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1116 unsigned long address, void *arg) 1143 unsigned long address, void *arg)
1117{ 1144{
1118 struct mm_struct *mm = vma->vm_mm; 1145 struct mm_struct *mm = vma->vm_mm;
@@ -1135,7 +1162,7 @@ int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1135 if (vma->vm_flags & VM_LOCKED) 1162 if (vma->vm_flags & VM_LOCKED)
1136 goto out_mlock; 1163 goto out_mlock;
1137 1164
1138 if (TTU_ACTION(flags) == TTU_MUNLOCK) 1165 if (flags & TTU_MUNLOCK)
1139 goto out_unmap; 1166 goto out_unmap;
1140 } 1167 }
1141 if (!(flags & TTU_IGNORE_ACCESS)) { 1168 if (!(flags & TTU_IGNORE_ACCESS)) {
@@ -1203,7 +1230,7 @@ int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1203 * pte. do_swap_page() will wait until the migration 1230 * pte. do_swap_page() will wait until the migration
1204 * pte is removed and then restart fault handling. 1231 * pte is removed and then restart fault handling.
1205 */ 1232 */
1206 BUG_ON(TTU_ACTION(flags) != TTU_MIGRATION); 1233 BUG_ON(!(flags & TTU_MIGRATION));
1207 entry = make_migration_entry(page, pte_write(pteval)); 1234 entry = make_migration_entry(page, pte_write(pteval));
1208 } 1235 }
1209 swp_pte = swp_entry_to_pte(entry); 1236 swp_pte = swp_entry_to_pte(entry);
@@ -1212,7 +1239,7 @@ int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1212 set_pte_at(mm, address, pte, swp_pte); 1239 set_pte_at(mm, address, pte, swp_pte);
1213 BUG_ON(pte_file(*pte)); 1240 BUG_ON(pte_file(*pte));
1214 } else if (IS_ENABLED(CONFIG_MIGRATION) && 1241 } else if (IS_ENABLED(CONFIG_MIGRATION) &&
1215 (TTU_ACTION(flags) == TTU_MIGRATION)) { 1242 (flags & TTU_MIGRATION)) {
1216 /* Establish migration entry for a file page */ 1243 /* Establish migration entry for a file page */
1217 swp_entry_t entry; 1244 swp_entry_t entry;
1218 entry = make_migration_entry(page, pte_write(pteval)); 1245 entry = make_migration_entry(page, pte_write(pteval));
@@ -1225,7 +1252,7 @@ int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1225 1252
1226out_unmap: 1253out_unmap:
1227 pte_unmap_unlock(pte, ptl); 1254 pte_unmap_unlock(pte, ptl);
1228 if (ret != SWAP_FAIL) 1255 if (ret != SWAP_FAIL && !(flags & TTU_MUNLOCK))
1229 mmu_notifier_invalidate_page(mm, address); 1256 mmu_notifier_invalidate_page(mm, address);
1230out: 1257out:
1231 return ret; 1258 return ret;
@@ -1359,7 +1386,7 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
1359 if (page->index != linear_page_index(vma, address)) { 1386 if (page->index != linear_page_index(vma, address)) {
1360 pte_t ptfile = pgoff_to_pte(page->index); 1387 pte_t ptfile = pgoff_to_pte(page->index);
1361 if (pte_soft_dirty(pteval)) 1388 if (pte_soft_dirty(pteval))
1362 pte_file_mksoft_dirty(ptfile); 1389 ptfile = pte_file_mksoft_dirty(ptfile);
1363 set_pte_at(mm, address, pte, ptfile); 1390 set_pte_at(mm, address, pte, ptfile);
1364 } 1391 }
1365 1392
@@ -1512,7 +1539,7 @@ int try_to_unmap(struct page *page, enum ttu_flags flags)
1512 * locking requirements of exec(), migration skips 1539 * locking requirements of exec(), migration skips
1513 * temporary VMAs until after exec() completes. 1540 * temporary VMAs until after exec() completes.
1514 */ 1541 */
1515 if (flags & TTU_MIGRATION && !PageKsm(page) && PageAnon(page)) 1542 if ((flags & TTU_MIGRATION) && !PageKsm(page) && PageAnon(page))
1516 rwc.invalid_vma = invalid_migration_vma; 1543 rwc.invalid_vma = invalid_migration_vma;
1517 1544
1518 ret = rmap_walk(page, &rwc); 1545 ret = rmap_walk(page, &rwc);